diff --git a/.gitattributes b/.gitattributes old mode 100644 new mode 100755 diff --git a/.gitignore b/.gitignore old mode 100644 new mode 100755 index 20a3894a65..0b1472706b --- a/.gitignore +++ b/.gitignore @@ -1,3 +1,8 @@ .DS_Store /build /tags +.idea +.ycm_extra_conf.py +.ycm_extra_conf.pyc +Release +Debug diff --git a/CMakeLists.txt b/CMakeLists.txt old mode 100644 new mode 100755 index 102399c4cc..5bc30372cb --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,44 +4,58 @@ set(VERSION "0.1") # $Format:Packaged from commit %H%nset(COMMIT %h)%nset(REFS "%d")$ set_property(GLOBAL PROPERTY USE_FOLDERS ON) -set(CMAKE_CONFIGURATION_TYPES "Debug;Release") +set(CMAKE_CONFIGURATION_TYPES Debug RelWithDebInfo Release CACHE TYPE INTERNAL) +set(CMAKE_SKIP_INSTALL_RULES ON) +set(CMAKE_SKIP_PACKAGE_ALL_DEPENDENCY ON) +set(CMAKE_SUPPRESS_REGENERATION ON) enable_testing() +# copy CTestCustom.cmake to build dir to disable long running tests in 'make test' +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/CTestCustom.cmake ${CMAKE_CURRENT_BINARY_DIR}) -include_directories(include src contrib/epee/include external "${CMAKE_BINARY_DIR}/version") +project(Bytecoin) + +include_directories(include src external "${CMAKE_CURRENT_BINARY_DIR}/version") if(APPLE) include_directories(SYSTEM /usr/include/malloc) + enable_language(ASM) endif() if(MSVC) -include_directories(src/Platform/Windows) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/Platform/Windows) elseif(APPLE) -include_directories(src/Platform/OSX) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/Platform/OSX) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/Platform/Posix) else() -include_directories(src/Platform/Linux) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/Platform/Linux) +include_directories(${CMAKE_CURRENT_SOURCE_DIR}/src/Platform/Posix) endif() - set(STATIC ${MSVC} CACHE BOOL "Link libraries statically") if(MSVC) add_definitions("/bigobj /MP /W3 /GS- /D_CRT_SECURE_NO_WARNINGS /wd4996 /wd4345 /D_WIN32_WINNT=0x0600 /DWIN32_LEAN_AND_MEAN /DGTEST_HAS_TR1_TUPLE=0 /D_VARIADIC_MAX=8 /D__SSE4_1__") set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /STACK:10485760") if(STATIC) - foreach(VAR CMAKE_C_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELEASE CMAKE_C_FLAGS_RELWITHDEBINFO CMAKE_CXX_FLAGS_RELWITHDEBINFO) + foreach(VAR CMAKE_C_FLAGS_DEBUG CMAKE_CXX_FLAGS_DEBUG CMAKE_C_FLAGS_RELWITHDEBINFO CMAKE_CXX_FLAGS_RELWITHDEBINFO CMAKE_C_FLAGS_RELEASE CMAKE_CXX_FLAGS_RELEASE) string(REPLACE "/MD" "/MT" ${VAR} "${${VAR}}") endforeach() endif() - include_directories(SYSTEM src/platform/msc) + include_directories(SYSTEM ${CMAKE_CURRENT_SOURCE_DIR}/src/platform/msc) else() + if (${CMAKE_SYSTEM_NAME} STREQUAL "Linux") + # This option has no effect in glibc version less than 2.20. + # Since glibc 2.20 _BSD_SOURCE is deprecated, this macro is recomended instead + add_definitions("-D_DEFAULT_SOURCE -D_GNU_SOURCE") + endif() set(ARCH native CACHE STRING "CPU to build for: -march value or default") if("${ARCH}" STREQUAL "default") set(ARCH_FLAG "") else() set(ARCH_FLAG "-march=${ARCH}") endif() - set(WARNINGS "-Wall -Wextra -Wpointer-arith -Wundef -Wvla -Wwrite-strings -Werror -Wno-error=extra -Wno-error=deprecated-declarations -Wno-error=sign-compare -Wno-error=strict-aliasing -Wno-error=type-limits -Wno-unused-parameter -Wno-error=unused-variable -Wno-error=undef -Wno-error=uninitialized -Wno-error=unused-result") + set(WARNINGS "-Wall -Wextra -Wpointer-arith -Wundef -Wvla -Wwrite-strings -Werror -Wno-error=extra -Wno-error=unused-function -Wno-error=deprecated-declarations -Wno-error=sign-compare -Wno-error=strict-aliasing -Wno-error=type-limits -Wno-unused-parameter -Wno-error=unused-variable -Wno-error=undef -Wno-error=uninitialized -Wno-error=unused-result") if(CMAKE_C_COMPILER_ID STREQUAL "Clang") - set(WARNINGS "${WARNINGS} -Wno-error=mismatched-tags -Wno-error=null-conversion -Wno-overloaded-shift-op-parentheses -Wno-error=shift-count-overflow -Wno-error=tautological-constant-out-of-range-compare -Wno-error=unused-private-field -Wno-error=unneeded-internal-declaration -Wno-error=unused-function") + set(WARNINGS "${WARNINGS} -Wno-error=mismatched-tags -Wno-error=null-conversion -Wno-overloaded-shift-op-parentheses -Wno-error=shift-count-overflow -Wno-error=tautological-constant-out-of-range-compare -Wno-error=unused-private-field -Wno-error=unneeded-internal-declaration -Wno-error=unused-function -Wno-error=missing-braces") else() set(WARNINGS "${WARNINGS} -Wlogical-op -Wno-error=maybe-uninitialized -Wno-error=clobbered -Wno-error=unused-but-set-variable") endif() @@ -52,22 +66,34 @@ else() else() set(MINGW_FLAG "") endif() + if(CMAKE_C_COMPILER_ID STREQUAL "GNU" AND NOT (CMAKE_C_COMPILER_VERSION VERSION_LESS 5.1)) + set(WARNINGS "${WARNINGS} -Wno-error=odr") + endif() set(C_WARNINGS "-Waggregate-return -Wnested-externs -Wold-style-definition -Wstrict-prototypes") set(CXX_WARNINGS "-Wno-reorder -Wno-missing-field-initializers") - set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -D_GNU_SOURCE ${MINGW_FLAG} ${WARNINGS} ${C_WARNINGS} ${ARCH_FLAG} -maes") - set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -D_GNU_SOURCE ${MINGW_FLAG} ${WARNINGS} ${CXX_WARNINGS} ${ARCH_FLAG} -maes") + set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 ${MINGW_FLAG} ${WARNINGS} ${C_WARNINGS} ${ARCH_FLAG} -maes") + if(NOT APPLE) + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -pthread") + endif() + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 ${MINGW_FLAG} ${WARNINGS} ${CXX_WARNINGS} ${ARCH_FLAG} -maes") if(APPLE) set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -DGTEST_HAS_TR1_TUPLE=0") endif() if(CMAKE_C_COMPILER_ID STREQUAL "GNU" AND NOT (CMAKE_C_COMPILER_VERSION VERSION_LESS 4.8)) - set(DEBUG_FLAGS "-g3 -Og") + set(DEBUG_FLAGS "-g3 -Og -gdwarf-4 -fvar-tracking -fvar-tracking-assignments -fno-inline -fno-omit-frame-pointer") else() - set(DEBUG_FLAGS "-g3 -O0") + set(DEBUG_FLAGS "-g3 -O0 -fno-omit-frame-pointer") endif() set(RELEASE_FLAGS "-Ofast -DNDEBUG -Wno-unused-variable") if(NOT APPLE) # There is a clang bug that does not allow to compile code that uses AES-NI intrinsics if -flto is enabled - set(RELEASE_FLAGS "${RELEASE_FLAGS} -flto") + if (CMAKE_C_COMPILER_ID STREQUAL "GNU" AND CMAKE_SYSTEM_NAME STREQUAL "Linux" + AND CMAKE_BUILD_TYPE STREQUAL "Release" AND ((CMAKE_C_COMPILER_VERSION VERSION_GREATER 4.9) OR (CMAKE_C_COMPILER_VERSION VERSION_EQUAL 4.9))) + # On linux, to build in lto mode, check that ld.gold linker is used: 'update-alternatives --install /usr/bin/ld ld /usr/bin/ld.gold HIGHEST_PRIORITY' + set(CMAKE_AR gcc-ar) + set(CMAKE_RANLIB gcc-ranlib) + endif() + #set(RELEASE_FLAGS "${RELEASE_FLAGS} -flto") endif() #if(CMAKE_C_COMPILER_ID STREQUAL "GNU" AND NOT MINGW) # set(RELEASE_FLAGS "${RELEASE_FLAGS} -fno-fat-lto-objects") @@ -85,10 +111,7 @@ if(STATIC) set(Boost_USE_STATIC_LIBS ON) set(Boost_USE_STATIC_RUNTIME ON) endif() -find_package(Boost 1.53 REQUIRED COMPONENTS system filesystem thread date_time chrono regex serialization program_options coroutine context) -if((${Boost_MAJOR_VERSION} EQUAL 1) AND (${Boost_MINOR_VERSION} EQUAL 54)) - message(SEND_ERROR "Boost version 1.54 is unsupported, more details are available here http://goo.gl/RrCFmA") -endif() +find_package(Boost 1.55 REQUIRED COMPONENTS system filesystem thread date_time chrono regex serialization program_options) include_directories(SYSTEM ${Boost_INCLUDE_DIRS}) if(MINGW) set(Boost_LIBRARIES "${Boost_LIBRARIES};ws2_32;mswsock") @@ -99,32 +122,31 @@ elseif(NOT MSVC) endif() set(COMMIT_ID_IN_VERSION ON CACHE BOOL "Include commit ID in version") -file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/version") +file(MAKE_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}/version") if (NOT COMMIT_ID_IN_VERSION) set(VERSION "${VERSION}-unknown") - configure_file("src/version.h.in" "version/version.h") + configure_file("${CMAKE_CURRENT_SOURCE_DIR}/src/version.h.in" "${CMAKE_CURRENT_BINARY_DIR}/version/version.h") add_custom_target(version ALL) elseif(DEFINED COMMIT) string(REPLACE "." "\\." VERSION_RE "${VERSION}") if(NOT REFS MATCHES "(\\(|, )tag: v${VERSION_RE}(\\)|, )") set(VERSION "${VERSION}-g${COMMIT}") endif() - configure_file("src/version.h.in" "version/version.h") + configure_file("${CMAKE_CURRENT_SOURCE_DIR}/src/version.h.in" "${CMAKE_CURRENT_BINARY_DIR}/version/version.h") add_custom_target(version ALL) else() find_package(Git QUIET) if(Git_FOUND OR GIT_FOUND) message(STATUS "Found Git: ${GIT_EXECUTABLE}") - add_custom_target(version ALL "${CMAKE_COMMAND}" "-D" "VERSION=${VERSION}" "-D" "GIT=${GIT_EXECUTABLE}" "-D" "TO=${CMAKE_BINARY_DIR}/version/version.h" "-P" "src/version.cmake" WORKING_DIRECTORY "${CMAKE_SOURCE_DIR}") + add_custom_target(version ALL "${CMAKE_COMMAND}" "-D" "VERSION=${VERSION}" "-D" "GIT=${GIT_EXECUTABLE}" "-D" "TO=${CMAKE_CURRENT_BINARY_DIR}/version/version.h" "-P" "${CMAKE_CURRENT_SOURCE_DIR}/src/version.cmake" WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}") else() message(STATUS "WARNING: Git was not found!") set(VERSION "${VERSION}-unknown") - configure_file("src/version.h.in" "version/version.h") + configure_file("${CMAKE_CURRENT_SOURCE_DIR}/src/version.h.in" "${CMAKE_CURRENT_BINARY_DIR}/version/version.h") add_custom_target(version ALL) endif() endif() -add_subdirectory(contrib) add_subdirectory(external) add_subdirectory(src) add_subdirectory(tests) diff --git a/CTestCustom.cmake b/CTestCustom.cmake new file mode 100755 index 0000000000..088261fad1 --- /dev/null +++ b/CTestCustom.cmake @@ -0,0 +1,11 @@ +set(CTEST_CUSTOM_TESTS_IGNORE + CoreTests + IntegrationTestLibrary + TestGenerator + CryptoTests + IntegrationTests + NodeRpcProxyTests + PerformanceTests + TransfersTests + ) + diff --git a/LICENSE b/LICENSE old mode 100644 new mode 100755 diff --git a/Makefile b/Makefile old mode 100644 new mode 100755 diff --git a/README b/README old mode 100644 new mode 100755 index 75c2dce57c..237c3b97ca --- a/README +++ b/README @@ -1,3 +1,7 @@ +IMPORTANT ANNOUNCEMENT + +Official development moved to https://github.com/bcndev/bytecoin. This repository won't be updated any more. + -= Building CryptoNote =- On *nix: @@ -18,7 +22,7 @@ Test suite: run `make test-release' to run tests in addition to building. Runnin Building with Clang: it may be possible to use Clang instead of GCC, but this may not work everywhere. To build, run `export CC=clang CXX=clang++' before running `make'. On Windows: -Dependencies: MSVC 2012 or later, CMake 2.8.6 or later, and Boost 1.55. You may download them from: +Dependencies: MSVC 2013 or later, CMake 2.8.6 or later, and Boost 1.55. You may download them from: http://www.microsoft.com/ http://www.cmake.org/ http://www.boost.org/ @@ -26,7 +30,7 @@ http://www.boost.org/ To build, change to a directory where this file is located, and run this commands: mkdir build cd build -cmake -G "Visual Studio 11 Win64" .. +cmake -G "Visual Studio 12 Win64" .. And then do Build. -Good luck! \ No newline at end of file +Good luck! diff --git a/README.md b/README.md new file mode 100755 index 0000000000..362bd09a49 --- /dev/null +++ b/README.md @@ -0,0 +1,61 @@ +### About +Forknote is innovative way to create Cryptonote (https://cryptonote.org) based cryptotokens. It gives the users the ability to create cryptotokens just by creating a simple configuration file. + +### Dependencies +* GCC 4.7.3 or later (http://gcc.gnu.org/) +* CMake 2.8.6 or later (http://www.cmake.org/) +* Boost 1.55 or later (http://www.boost.org/) +* MSVC 2013 (Windows only) + +Step by step Windows instructions: +https://github.com/forknote/cryptonote-generator/blob/master/docs/windows-requirements-install.md + +Ubuntu (tested on Ubuntu 14.04) / Mac dependencies install script: +https://github.com/forknote/cryptonote-generator/blob/master/configure.sh + + +### Usage +1. Download or compile the binaries +2. Create configuration file. The easiest way is to use the form on http://forknote.net +3. Start the daemon: +``` +./forknoted --config-file PATH_TO_YOUR_CONFIG +``` + +### Configuration parameters +Use http://forknote.net to create configuration files. + +All fields supported: +``` +GENESIS_COINBASE_TX_HEX= // REQUIRED. Use " ./forknoted --config-file PATH_TO_CONFIG --print-genesis-tx " to generate +CRYPTONOTE_NAME=my_coin +p2p-bind-port=33669 +rpc-bind-port=33670 +seed-node=127.0.0.1:33669 // format: IP:PORT +seed-node=127.0.0.2:33669 +UPGRADE_HEIGHT_V2=1 // REQUIRED. Use 1 for new cryptotokens +UPGRADE_HEIGHT_V3=2 // REQUIRED. Use 2 for new cryptotokens +MONEY_SUPPLY=18446744073709551615 +EMISSION_SPEED_FACTOR=18 +GENESIS_BLOCK_REWARD=0 // premined coins. Default: 0 +DIFFICULTY_TARGET=120 +CRYPTONOTE_DISPLAY_DECIMAL_POINT=12 +DEFAULT_DUST_THRESHOLD=1000000 +MINIMUM_FEE=1000000 +CRYPTONOTE_MINED_MONEY_UNLOCK_WINDOW=10 +CRYPTONOTE_BLOCK_GRANTED_FULL_REWARD_ZONE=20000 +CRYPTONOTE_PUBLIC_ADDRESS_BASE58_PREFIX=120 +BYTECOIN_NETWORK=b2889400-e6f5-b62d-8d37-8fa91779dc7e +P2P_STAT_TRUSTED_PUB_KEY=4d26c4df7f4ca7037950ad026f9ab36dd05d881952662992f2e4dcfcafbe57eb +CHECKPOINT=10000:70d2531151529ac00bf875281e15f51324934bc85e5733dcd92e1ccb1a665ff8 // format: HEIGHT:BLOCK_ID +CHECKPOINT=20000:80d2dd05d8819526629235722e15f5f9ab36dd05d881952662992f2e4dcfcafb + +// simplewallet parameters +wallet-rpc-bind-ip=127.0.0.1 // instead rpc-bind-ip +wallet-rpc-bind-port=33671 // instead rpc-bind-port +SYNC_FROM_ZERO=1 // to sync the wallet from block 0. Used for premine coins or brain wallets +``` + +--- +This code is generated by Cryptonote generator - https://github.com/forknote/cryptonote-generator +Seed source - https://github.com/amjuarez/bytecoin \ No newline at end of file diff --git a/ReleaseNotes.txt b/ReleaseNotes.txt old mode 100644 new mode 100755 index d00ac9aaf4..412fe2fe3c --- a/ReleaseNotes.txt +++ b/ReleaseNotes.txt @@ -1,3 +1,109 @@ +Release notes 2.1.2 + +- Updated crawler public key +- Daemon stability improvements + +Release notes 2.1.1 + +- Updated RocksDB to release 4.11.2 +- Added two more seed nodes + +Release notes 2.1.0 + +- Transaction pool issue fixes +- Daemon synchronization improvements + +Release notes 2.0.7 + +- Wallet synchronization enhancements +- Daemon synchronization issue fix +- Daemon stability enhancements + +Release notes 2.0.6 + +- Wallet synchronization enhancements +- Correct application closing +- Daemon stability enhancements + +Release notes 2.0.5 + +- Fixed Bytecoin RPC Wallet hang issue + +Release notes 2.0.4 + +- Fixed an issue where some users experienced daemon hanging +- Fixed block difficulty calculation issue + +Release notes 2.0.3 + +- Stability enhancement +- Various improvements + +Release notes 2.0.2 + +- Security algorithm enhancement +- Daemon synchronization issue fix +- Wallet minor bug fixes +- Bytecoin RPC Wallet log optimization + +Release notes 2.0.1 + +- Bytecoin RPC Wallet API improvements + +Release notes 2.0.0 + +- Completely revamped CryptoNote core +- Blockchain storage changed to DB + +Release notes 1.0.11 + +- New Bytecoin Wallet file format +- Daemon loading optimization + +Release notes 1.0.10 + +- Blocksize increase +- Fusion transactions for Bytecoin RPC Wallet +- Bytecoin RPC Wallet API improvements + +Release notes 1.0.9 + +- New API for Bytecoin RPC Wallet +- Various improvements + +Release notes 1.0.8 + +- Fusion transactions for Bytecoin Wallet +- IWallet high-level API update +- JSON RPC improvements +- Synchronization improvements for OS X + +Release notes 1.0.7 + +- Fusion transactions support +- Various simplewallet improvements + +Release notes 1.0.6 + +- High-level API update +- Aggregate multi-addresses for Bytecoin RPC Wallet +- Wallet synchronization speed increase + +Release notes 1.0.5 + +- High-level API for blockchain explorer +- Full network layer refactoring +- Transactions pool synchronization +- list_transactions method for RPC Wallet +- Various improvements + +Release notes 1.0.4 + +- Bytecoin RPC Wallet +- New multithreading library +- Improved console logging +- Further optimizations + Release notes 1.0.3 - Multisignature API diff --git a/contrib/CMakeLists.txt b/contrib/CMakeLists.txt deleted file mode 100644 index 7222cce5ee..0000000000 --- a/contrib/CMakeLists.txt +++ /dev/null @@ -1,7 +0,0 @@ -file(GLOB_RECURSE EPEE epee/include/*) - -source_group(epee FILES ${EPEE}) - -add_library(epee ${EPEE}) - -set_property(TARGET epee PROPERTY FOLDER "external") diff --git a/contrib/epee/LICENSE.txt b/contrib/epee/LICENSE.txt deleted file mode 100644 index 4a6b529e5d..0000000000 --- a/contrib/epee/LICENSE.txt +++ /dev/null @@ -1,25 +0,0 @@ -Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of the Andrey N. Sabelnikov nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL Andrey N. Sabelnikov BE LIABLE FOR ANY -DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. \ No newline at end of file diff --git a/contrib/epee/README.md b/contrib/epee/README.md deleted file mode 100644 index a69884f570..0000000000 --- a/contrib/epee/README.md +++ /dev/null @@ -1 +0,0 @@ -epee - is a small library of helpers, wrappers, tools and and so on, used to make my life easier. \ No newline at end of file diff --git a/contrib/epee/demo/.gitignore b/contrib/epee/demo/.gitignore deleted file mode 100644 index d9b4f015d3..0000000000 --- a/contrib/epee/demo/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/build/* diff --git a/contrib/epee/demo/CMakeLists.txt b/contrib/epee/demo/CMakeLists.txt deleted file mode 100644 index b4ac2cc8ba..0000000000 --- a/contrib/epee/demo/CMakeLists.txt +++ /dev/null @@ -1,49 +0,0 @@ -cmake_minimum_required(VERSION 2.8) -set(Boost_USE_MULTITHREADED ON) -#set(Boost_DEBUG 1) -find_package(Boost COMPONENTS system filesystem thread date_time chrono regex ) - -include_directories( ${Boost_INCLUDE_DIRS} ) - - -IF (MSVC) - add_definitions( "/W3 /D_CRT_SECURE_NO_WARNINGS /wd4996 /wd4345 /nologo /D_WIN32_WINNT=0x0600 /DWIN32_LEAN_AND_MEAN /bigobj" ) -ELSE() - # set stuff for other systems - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Wno-reorder -D_GNU_SOURCE") -ENDIF() - - -include_directories(.) -include_directories(../include) -include_directories(iface) - - -# Add folders to filters -file(GLOB_RECURSE LEVIN_GENERAL_SECTION RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/demo_levin_server/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/demo_levin_server/*.inl - ${CMAKE_CURRENT_SOURCE_DIR}/demo_levin_server/*.cpp) - -file(GLOB_RECURSE HTTP_GENERAL_SECTION RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/demo_http_server/*.h - ${CMAKE_CURRENT_SOURCE_DIR}/demo_http_server/*.inl - ${CMAKE_CURRENT_SOURCE_DIR}/demo_http_server/*.cpp) - - - -source_group(general FILES ${LEVIN_GENERAL_SECTION} FILES ${HTTP_GENERAL_SECTION}) -#source_group(general FILES ${HTTP_GENERAL_SECTION}) - -add_executable(demo_http_server ${HTTP_GENERAL_SECTION} ) -add_executable(demo_levin_server ${LEVIN_GENERAL_SECTION} ) - -target_link_libraries( demo_http_server ${Boost_LIBRARIES} ) -target_link_libraries( demo_levin_server ${Boost_LIBRARIES} ) - -IF (NOT WIN32) - target_link_libraries (demo_http_server rt) - target_link_libraries (demo_levin_server rt) -ENDIF() - - diff --git a/contrib/epee/demo/README.txt b/contrib/epee/demo/README.txt deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/contrib/epee/demo/demo_http_server/demo_http_server.cpp b/contrib/epee/demo/demo_http_server/demo_http_server.cpp deleted file mode 100644 index 85547e4c9e..0000000000 --- a/contrib/epee/demo/demo_http_server/demo_http_server.cpp +++ /dev/null @@ -1,217 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#include "stdafx.h" -#include "console_handler.h" -#include "demo_http_server.h" -#include "net/http_client.h" -#include "storages/http_abstract_invoke.h" - - -template -bool communicate(const std::string url, t_request& req, t_response& rsp, const std::string& ip, const std::string& port, bool use_json, bool use_jrpc = false) -{ - epee::net_utils::http::http_simple_client http_client; - bool r = http_client.connect(ip, port, 1000); - CHECK_AND_ASSERT_MES(r, false, "failed to connect"); - if(use_json) - { - if(use_jrpc) - { - epee::json_rpc::request req_t = AUTO_VAL_INIT(req_t); - req_t.jsonrpc = "2.0"; - req_t.id = epee::serialization::storage_entry(10); - req_t.method = "command_example_1"; - req_t.params = req; - epee::json_rpc::response resp_t = AUTO_VAL_INIT(resp_t); - if(!epee::net_utils::invoke_http_json_remote_command2("/request_json_rpc", req_t, resp_t, http_client)) - { - return false; - } - rsp = resp_t.result; - return true; - }else - return epee::net_utils::invoke_http_json_remote_command2(url, req, rsp, http_client); - } - else - return epee::net_utils::invoke_http_bin_remote_command2(url, req, rsp, http_client); -} - - -int main(int argc, char* argv[]) -{ - TRY_ENTRY(); - string_tools::set_module_name_and_folder(argv[0]); - - //set up logging options - log_space::get_set_log_detalisation_level(true, LOG_LEVEL_2); - log_space::log_singletone::add_logger(LOGGER_CONSOLE, NULL, NULL); - log_space::log_singletone::add_logger(LOGGER_FILE, - log_space::log_singletone::get_default_log_file().c_str(), - log_space::log_singletone::get_default_log_folder().c_str()); - - - - LOG_PRINT("Demo server starting ...", LOG_LEVEL_0); - - - demo::demo_http_server srv; - - start_default_console(&srv, "#"); - - std::string bind_param = "0.0.0.0"; - std::string port = "83"; - - if(!srv.init(port, bind_param)) - { - LOG_ERROR("Failed to initialize srv!"); - return 1; - } - - //log loop - srv.run(); - size_t count = 0; - while (!srv.is_stop()) - { - - demo::COMMAND_EXAMPLE_1::request req; - req.sub = demo::get_test_data(); - demo::COMMAND_EXAMPLE_1::response rsp; - bool r = false; - if(count%2) - {//invoke json - r = communicate("/request_api_json_1", req, rsp, "127.0.0.1", port, true, true); - }else{ - r = communicate("/request_api_bin_1", req, rsp, "127.0.0.1", port, false); - } - CHECK_AND_ASSERT_MES(r, false, "failed to invoke http request"); - CHECK_AND_ASSERT_MES(rsp.m_success, false, "wrong response"); - CHECK_AND_ASSERT_MES(rsp.subs.size()==1, false, "wrong response"); - CHECK_AND_ASSERT_MES(rsp.subs.front() == demo::get_test_data(), false, "wrong response"); - //misc_utils::sleep_no_w(1000); - ++count; - } - bool r = srv.wait_stop(); - CHECK_AND_ASSERT_MES(r, 1, "failed to wait server stop"); - srv.deinit(); - - LOG_PRINT("Demo server stoped.", LOG_LEVEL_0); - return 1; - - CATCH_ENTRY_L0("main", 1); -} - -/************************************************************************/ -/* */ -/************************************************************************/ -namespace demo -{ - bool demo_http_server::init(const std::string& bind_port, const std::string& bind_ip) - { - - - //set self as callback handler - m_net_server.get_config_object().m_phandler = this; - - //here set folder for hosting reqests - m_net_server.get_config_object().m_folder = ""; - - LOG_PRINT_L0("Binding on " << bind_ip << ":" << bind_port); - return m_net_server.init_server(bind_port, bind_ip); - } - - bool demo_http_server::run() - { - m_stop = false; - //here you can set worker threads count - int thrds_count = 4; - - //go to loop - LOG_PRINT("Run net_service loop( " << thrds_count << " threads)...", LOG_LEVEL_0); - if(!m_net_server.run_server(thrds_count, false)) - { - LOG_ERROR("Failed to run net tcp server!"); - } - - return true; - } - - bool demo_http_server::deinit() - { - return m_net_server.deinit_server(); - } - - bool demo_http_server::send_stop_signal() - { - m_stop = true; - m_net_server.send_stop_signal(); - return true; - } - - bool demo_http_server::on_requestr_uri_1(const net_utils::http::http_request_info& query_info, - net_utils::http::http_response_info& response, - const net_utils::connection_context_base& m_conn_context) - { - return true; - } - - - bool demo_http_server::on_requestr_uri_2(const net_utils::http::http_request_info& query_info, - net_utils::http::http_response_info& response, - const net_utils::connection_context_base& m_conn_context) - { - return true; - } - - - bool demo_http_server::on_hosting_request( const net_utils::http::http_request_info& query_info, - net_utils::http::http_response_info& response, - const net_utils::connection_context_base& m_conn_context) - { - //read file from filesystem here - return true; - } - - bool demo_http_server::on_request_api_1(const COMMAND_EXAMPLE_1::request& req, COMMAND_EXAMPLE_1::response& res, connection_context& ctxt) - { - CHECK_AND_ASSERT_MES(req.sub == demo::get_test_data(), false, "wrong request"); - res.m_success = true; - res.subs.push_back(req.sub); - return true; - } - - bool demo_http_server::on_request_api_1_with_error(const COMMAND_EXAMPLE_1::request& req, COMMAND_EXAMPLE_1::response& res, epee::json_rpc::error& error_resp, connection_context& ctxt) - { - error_resp.code = 232432; - error_resp.message = "bla bla bla"; - return false; - } - - bool demo_http_server::on_request_api_2(const COMMAND_EXAMPLE_2::request& req, COMMAND_EXAMPLE_2::response& res, connection_context& ctxt) - { - return true; - } -} diff --git a/contrib/epee/demo/demo_http_server/demo_http_server.h b/contrib/epee/demo/demo_http_server/demo_http_server.h deleted file mode 100644 index 088ead548e..0000000000 --- a/contrib/epee/demo/demo_http_server/demo_http_server.h +++ /dev/null @@ -1,103 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once -#include -#include - -#include "net/http_server_cp2.h" -#include "transport_defs.h" -#include "net/http_server_handlers_map2.h" - -using namespace epee; - -namespace demo -{ - - class demo_http_server: public net_utils::http::i_http_server_handler - { - public: - typedef epee::net_utils::connection_context_base connection_context; - - demo_http_server():m_stop(false){} - bool run(); - bool init(const std::string& bind_port = "11112", const std::string& bind_ip = "0.0.0.0"); - bool deinit(); - bool send_stop_signal(); - bool is_stop(){return m_stop;} - bool wait_stop(){return m_net_server.timed_wait_server_stop(100000);} - private: - - - CHAIN_HTTP_TO_MAP2(connection_context); //forward http requests to uri map - - BEGIN_URI_MAP2() - MAP_URI2("/requestr_uri_1", on_requestr_uri_1) - MAP_URI2("/requestr_uri_2", on_requestr_uri_1) - //MAP_URI_AUTO_XML2("/request_api_xml_1", on_request_api_1, COMMAND_EXAMPLE_1) - //MAP_URI_AUTO_XML2("/request_api_xml_2", on_request_api_2, COMMAND_EXAMPLE_2) - MAP_URI_AUTO_JON2("/request_api_json_1", on_request_api_1, COMMAND_EXAMPLE_1) - MAP_URI_AUTO_JON2("/request_api_json_2", on_request_api_2, COMMAND_EXAMPLE_2) - MAP_URI_AUTO_BIN2("/request_api_bin_1", on_request_api_1, COMMAND_EXAMPLE_1) - MAP_URI_AUTO_BIN2("/request_api_bin_2", on_request_api_2, COMMAND_EXAMPLE_2) - BEGIN_JSON_RPC_MAP("/request_json_rpc") - MAP_JON_RPC("command_example_1", on_request_api_1, COMMAND_EXAMPLE_1) - MAP_JON_RPC("command_example_2", on_request_api_2, COMMAND_EXAMPLE_2) - MAP_JON_RPC_WE("command_example_1_we", on_request_api_1_with_error, COMMAND_EXAMPLE_1) - END_JSON_RPC_MAP() - CHAIN_URI_MAP2(on_hosting_request) - END_URI_MAP2() - - - - bool on_requestr_uri_1(const net_utils::http::http_request_info& query_info, - net_utils::http::http_response_info& response, - const net_utils::connection_context_base& m_conn_context); - - - bool on_requestr_uri_2(const net_utils::http::http_request_info& query_info, - net_utils::http::http_response_info& response, - const net_utils::connection_context_base& m_conn_context); - - - - - bool on_hosting_request( const net_utils::http::http_request_info& query_info, - net_utils::http::http_response_info& response, - const net_utils::connection_context_base& m_conn_context); - - bool on_request_api_1(const COMMAND_EXAMPLE_1::request& req, COMMAND_EXAMPLE_1::response& res, connection_context& ctxt); - bool on_request_api_2(const COMMAND_EXAMPLE_2::request& req, COMMAND_EXAMPLE_2::response& res, connection_context& ctxt); - - bool on_request_api_1_with_error(const COMMAND_EXAMPLE_1::request& req, COMMAND_EXAMPLE_1::response& res, epee::json_rpc::error& error_resp, connection_context& ctxt); - - net_utils::boosted_http_server_custum_handling m_net_server; - std::atomic m_stop; - }; -} - diff --git a/contrib/epee/demo/demo_http_server/stdafx.cpp b/contrib/epee/demo/demo_http_server/stdafx.cpp deleted file mode 100644 index ecec246572..0000000000 --- a/contrib/epee/demo/demo_http_server/stdafx.cpp +++ /dev/null @@ -1,8 +0,0 @@ -// stdafx.cpp : source file that includes just the standard includes -// demo_http_server.pch will be the pre-compiled header -// stdafx.obj will contain the pre-compiled type information - -#include "stdafx.h" - -// TODO: reference any additional headers you need in STDAFX.H -// and not in this file diff --git a/contrib/epee/demo/demo_http_server/stdafx.h b/contrib/epee/demo/demo_http_server/stdafx.h deleted file mode 100644 index e288832024..0000000000 --- a/contrib/epee/demo/demo_http_server/stdafx.h +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#pragma once - -#include "targetver.h" - - -#include - - -#define BOOST_FILESYSTEM_VERSION 3 -#define ENABLE_RELEASE_LOGGING -#include "misc_log_ex.h" - - diff --git a/contrib/epee/demo/demo_http_server/targetver.h b/contrib/epee/demo/demo_http_server/targetver.h deleted file mode 100644 index 6fe8eb79e1..0000000000 --- a/contrib/epee/demo/demo_http_server/targetver.h +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once - -// The following macros define the minimum required platform. The minimum required platform -// is the earliest version of Windows, Internet Explorer etc. that has the necessary features to run -// your application. The macros work by enabling all features available on platform versions up to and -// including the version specified. - -// Modify the following defines if you have to target a platform prior to the ones specified below. -// Refer to MSDN for the latest info on corresponding values for different platforms. -#ifndef _WIN32_WINNT // Specifies that the minimum required platform is Windows Vista. -#define _WIN32_WINNT 0x0600 // Change this to the appropriate value to target other versions of Windows. -#endif - diff --git a/contrib/epee/demo/demo_levin_server/demo_levin_server.cpp b/contrib/epee/demo/demo_levin_server/demo_levin_server.cpp deleted file mode 100644 index a99a1f564c..0000000000 --- a/contrib/epee/demo/demo_levin_server/demo_levin_server.cpp +++ /dev/null @@ -1,200 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#include "stdafx.h" -#include "demo_levin_server.h" -#include "console_handler.h" - - -template -bool communicate(net_utils::boosted_levin_async_server& transport, int id, t_request& req, const std::string& ip, const std::string& port, bool use_async) -{ - if(use_async) - { - //IMPORTANT: do not pass local parameters from stack by reference! connect_async returns immediately, and callback will call in any thread later - transport.connect_async(ip, port, 10000, [&transport, id, req, ip, port](net_utils::connection_context_base& ctx, const boost::system::error_code& ec_) - { - if(!!ec_) - { - LOG_ERROR("Failed to connect to " << ip << ":" << port); - }else - {//connected ok! - - epee::net_utils::async_invoke_remote_command2(ctx.m_connection_id, id, req, transport.get_config_object(), [&transport, ip, port](int res_code, demo::COMMAND_EXAMPLE_1::response& rsp, net_utils::connection_context_base& ctx) - { - if(res_code < 0) - { - LOG_ERROR("Failed to invoke to " << ip << ":" << port); - }else - {//invoked ok - CHECK_AND_ASSERT_MES(rsp.m_success, false, "wrong response"); - CHECK_AND_ASSERT_MES(rsp.subs.size()==1, false, "wrong response"); - CHECK_AND_ASSERT_MES(rsp.subs.front() == demo::get_test_data(), false, "wrong response"); - LOG_PRINT_GREEN("Client COMMAND_EXAMPLE_1 async invoked ok", LOG_LEVEL_0); - } - transport.get_config_object().close(ctx.m_connection_id); - return true; - }); - LOG_PRINT_GREEN("Client COMMAND_EXAMPLE_1 async invoke requested", LOG_LEVEL_0); - } - }); - }else - { - net_utils::connection_context_base ctx = AUTO_VAL_INIT(ctx); - bool r = transport.connect(ip, port, 10000, ctx); - CHECK_AND_ASSERT_MES(r, false, "failed to connect to " << ip << ":" << port); - demo::COMMAND_EXAMPLE_1::response rsp = AUTO_VAL_INIT(rsp); - LOG_PRINT_GREEN("Client COMMAND_EXAMPLE_1 sync invoke requested", LOG_LEVEL_0); - r = epee::net_utils::invoke_remote_command2(ctx.m_connection_id, id, req, rsp, transport.get_config_object()); - CHECK_AND_ASSERT_MES(r, false, "failed to invoke levin request"); - CHECK_AND_ASSERT_MES(rsp.m_success, false, "wrong response"); - CHECK_AND_ASSERT_MES(rsp.subs.size()==1, false, "wrong response"); - CHECK_AND_ASSERT_MES(rsp.subs.front() == demo::get_test_data(), false, "wrong response"); - transport.get_config_object().close(ctx.m_connection_id); - LOG_PRINT_GREEN("Client COMMAND_EXAMPLE_1 sync invoked ok", LOG_LEVEL_0); - } - return true; -} - - -int main(int argc, char* argv[]) -{ - TRY_ENTRY(); - string_tools::set_module_name_and_folder(argv[0]); - - //set up logging options - log_space::get_set_log_detalisation_level(true, LOG_LEVEL_2); - log_space::log_singletone::add_logger(LOGGER_CONSOLE, NULL, NULL); - log_space::log_singletone::add_logger(LOGGER_FILE, - log_space::log_singletone::get_default_log_file().c_str(), - log_space::log_singletone::get_default_log_folder().c_str()); - - - - LOG_PRINT("Demo server starting ...", LOG_LEVEL_0); - - - demo::demo_levin_server srv; - - start_default_console(&srv, "#"); - - std::string bind_param = "0.0.0.0"; - std::string port = "12345"; - - if(!srv.init(port, bind_param)) - { - LOG_ERROR("Failed to initialize srv!"); - return 1; - } - - srv.run(); - - size_t c = 1; - while (!srv.is_stop()) - { - - demo::COMMAND_EXAMPLE_1::request req; - req.sub = demo::get_test_data(); - bool r = communicate(srv.get_server(), demo::COMMAND_EXAMPLE_1::ID, req, "127.0.0.1", port, (c%2 == 0)); - misc_utils::sleep_no_w(1000); - ++c; - } - bool r = srv.wait_stop(); - CHECK_AND_ASSERT_MES(r, 1, "failed to wait server stop"); - - - srv.deinit(); - - LOG_PRINT("Demo server stoped.", LOG_LEVEL_0); - return 1; - - CATCH_ENTRY_L0("main", 1); -} - -/************************************************************************/ -/* */ -/************************************************************************/ -namespace demo -{ - bool demo_levin_server::init(const std::string& bind_port, const std::string& bind_ip) - { - m_net_server.get_config_object().m_pcommands_handler = this; - LOG_PRINT_L0("Binding on " << bind_ip << ":" << bind_port); - return m_net_server.init_server(bind_port, bind_ip); - } - - bool demo_levin_server::run() - { - m_stop = false; - //here you can set worker threads count - int thrds_count = 4; - m_net_server.get_config_object().m_invoke_timeout = 10000; - m_net_server.get_config_object().m_pcommands_handler = this; - - //go to loop - LOG_PRINT("Run net_service loop( " << thrds_count << " threads)...", LOG_LEVEL_0); - if(!m_net_server.run_server(thrds_count, false)) - { - LOG_ERROR("Failed to run net tcp server!"); - } - - LOG_PRINT("net_service loop stopped.", LOG_LEVEL_0); - return true; - } - - bool demo_levin_server::deinit() - { - return m_net_server.deinit_server(); - } - - bool demo_levin_server::send_stop_signal() - { - m_net_server.send_stop_signal(); - return true; - } - int demo_levin_server::handle_command_1(int command, COMMAND_EXAMPLE_1::request& arg, COMMAND_EXAMPLE_1::response& rsp, const net_utils::connection_context_base& context) - { - CHECK_AND_ASSERT_MES(arg.sub == demo::get_test_data(), false, "wrong request"); - rsp.m_success = true; - rsp.subs.push_back(arg.sub); - LOG_PRINT_GREEN("Server COMMAND_EXAMPLE_1 ok", LOG_LEVEL_0); - return 1; - } - int demo_levin_server::handle_command_2(int command, COMMAND_EXAMPLE_2::request& arg, COMMAND_EXAMPLE_2::response& rsp, const net_utils::connection_context_base& context) - { - return 1; - } - int demo_levin_server::handle_notify_1(int command, COMMAND_EXAMPLE_1::request& arg, const net_utils::connection_context_base& context) - { - return 1; - } - int demo_levin_server::handle_notify_2(int command, COMMAND_EXAMPLE_2::request& arg, const net_utils::connection_context_base& context) - { - return 1; - } -} diff --git a/contrib/epee/demo/demo_levin_server/demo_levin_server.h b/contrib/epee/demo/demo_levin_server/demo_levin_server.h deleted file mode 100644 index 5a6f68f2d4..0000000000 --- a/contrib/epee/demo/demo_levin_server/demo_levin_server.h +++ /dev/null @@ -1,76 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once -#include -#include - -#include "net/levin_server_cp2.h" -#include "transport_defs.h" -#include "storages/levin_abstract_invoke2.h" - -using namespace epee; - -namespace demo -{ - - class demo_levin_server: public levin::levin_commands_handler<> - { - public: - bool run(); - bool init(const std::string& bind_port = "11112", const std::string& bind_ip = "0.0.0.0"); - bool deinit(); - bool send_stop_signal(); - bool is_stop(){return m_stop;} - bool wait_stop(){return m_net_server.timed_wait_server_stop(100000);} - net_utils::boosted_levin_async_server& get_server(){return m_net_server;} - private: - - - CHAIN_LEVIN_INVOKE_MAP(); //move levin_commands_handler interface invoke(...) callbacks into invoke map - CHAIN_LEVIN_NOTIFY_STUB(); //move levin_commands_handler interface notify(...) callbacks into nothing - - BEGIN_INVOKE_MAP2(demo_levin_server) - HANDLE_INVOKE_T2(COMMAND_EXAMPLE_1, &demo_levin_server::handle_command_1) - HANDLE_INVOKE_T2(COMMAND_EXAMPLE_2, &demo_levin_server::handle_command_2) - HANDLE_NOTIFY_T2(COMMAND_EXAMPLE_1, &demo_levin_server::handle_notify_1) - HANDLE_NOTIFY_T2(COMMAND_EXAMPLE_2, &demo_levin_server::handle_notify_2) - END_INVOKE_MAP2() - - //----------------- commands handlers ---------------------------------------------- - int handle_command_1(int command, COMMAND_EXAMPLE_1::request& arg, COMMAND_EXAMPLE_1::response& rsp, const net_utils::connection_context_base& context); - int handle_command_2(int command, COMMAND_EXAMPLE_2::request& arg, COMMAND_EXAMPLE_2::response& rsp, const net_utils::connection_context_base& context); - int handle_notify_1(int command, COMMAND_EXAMPLE_1::request& arg, const net_utils::connection_context_base& context); - int handle_notify_2(int command, COMMAND_EXAMPLE_2::request& arg, const net_utils::connection_context_base& context); - //---------------------------------------------------------------------------------- - net_utils::boosted_levin_async_server m_net_server; - std::atomic m_stop; - - }; -} - diff --git a/contrib/epee/demo/demo_levin_server/stdafx.cpp b/contrib/epee/demo/demo_levin_server/stdafx.cpp deleted file mode 100644 index d6ea1c6f2d..0000000000 --- a/contrib/epee/demo/demo_levin_server/stdafx.cpp +++ /dev/null @@ -1,30 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#include "stdafx.h" - diff --git a/contrib/epee/demo/demo_levin_server/stdafx.h b/contrib/epee/demo/demo_levin_server/stdafx.h deleted file mode 100644 index cc4558434d..0000000000 --- a/contrib/epee/demo/demo_levin_server/stdafx.h +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#pragma once - -#include "targetver.h" - - -#include - - -#define BOOST_FILESYSTEM_VERSION 3 -#define ENABLE_RELEASE_LOGGING -#include "misc_log_ex.h" diff --git a/contrib/epee/demo/demo_levin_server/targetver.h b/contrib/epee/demo/demo_levin_server/targetver.h deleted file mode 100644 index 6fe8eb79e1..0000000000 --- a/contrib/epee/demo/demo_levin_server/targetver.h +++ /dev/null @@ -1,13 +0,0 @@ -#pragma once - -// The following macros define the minimum required platform. The minimum required platform -// is the earliest version of Windows, Internet Explorer etc. that has the necessary features to run -// your application. The macros work by enabling all features available on platform versions up to and -// including the version specified. - -// Modify the following defines if you have to target a platform prior to the ones specified below. -// Refer to MSDN for the latest info on corresponding values for different platforms. -#ifndef _WIN32_WINNT // Specifies that the minimum required platform is Windows Vista. -#define _WIN32_WINNT 0x0600 // Change this to the appropriate value to target other versions of Windows. -#endif - diff --git a/contrib/epee/demo/generate_gcc.sh b/contrib/epee/demo/generate_gcc.sh deleted file mode 100644 index fcd0a8a7e7..0000000000 --- a/contrib/epee/demo/generate_gcc.sh +++ /dev/null @@ -1,4 +0,0 @@ -mkdir build -cd build -cmake .. -#cmake -DBOOST_ROOT=/usr/local/proj/boost_1_49_0 -DBOOST_LIBRARYDIR=/usr/local/proj/boost_1_49_0/stage/lib .. diff --git a/contrib/epee/demo/generate_vc_proj.bat b/contrib/epee/demo/generate_vc_proj.bat deleted file mode 100644 index 111405981f..0000000000 --- a/contrib/epee/demo/generate_vc_proj.bat +++ /dev/null @@ -1,7 +0,0 @@ -mkdir build - -cd build - -cmake "-DBoost_USE_STATIC_LIBS=TRUE" -G "Visual Studio 11 Win64" .. -cd .. -pause \ No newline at end of file diff --git a/contrib/epee/demo/iface/transport_defs.h b/contrib/epee/demo/iface/transport_defs.h deleted file mode 100644 index 8463eb903a..0000000000 --- a/contrib/epee/demo/iface/transport_defs.h +++ /dev/null @@ -1,221 +0,0 @@ -#pragma once - -#include "serialization/keyvalue_serialization.h" -#include "storages/portable_storage_base.h" - -namespace demo -{ - - struct some_test_subdata - { - std::string m_str; - - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(m_str) - END_KV_SERIALIZE_MAP() - }; - - struct some_test_data - { - std::string m_str; - uint64_t m_uint64; - uint32_t m_uint32; - uint16_t m_uint16; - uint8_t m_uint8; - int64_t m_int64; - int32_t m_int32; - int16_t m_int16; - int8_t m_int8; - double m_double; - bool m_bool; - std::list m_list_of_str; - std::list m_list_of_uint64_t; - std::list m_list_of_uint32_t; - std::list m_list_of_uint16_t; - std::list m_list_of_uint8_t; - std::list m_list_of_int64_t; - std::list m_list_of_int32_t; - std::list m_list_of_int16_t; - std::list m_list_of_int8_t; - std::list m_list_of_double; - std::list m_list_of_bool; - some_test_subdata m_subobj; - std::list m_list_of_self; - epee::serialization::storage_entry m_storage_entry_int; - epee::serialization::storage_entry m_storage_entry_string; - - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(m_str) - KV_SERIALIZE(m_uint64) - KV_SERIALIZE(m_uint32) - KV_SERIALIZE(m_uint16) - KV_SERIALIZE(m_uint8) - KV_SERIALIZE(m_int64) - KV_SERIALIZE(m_int32) - KV_SERIALIZE(m_int16) - KV_SERIALIZE(m_int8) - KV_SERIALIZE(m_double) - KV_SERIALIZE(m_bool) - KV_SERIALIZE(m_subobj) - KV_SERIALIZE(m_list_of_str) - KV_SERIALIZE(m_list_of_uint64_t) - KV_SERIALIZE(m_list_of_uint32_t) - KV_SERIALIZE(m_list_of_uint16_t) - KV_SERIALIZE(m_list_of_uint8_t) - KV_SERIALIZE(m_list_of_int64_t) - KV_SERIALIZE(m_list_of_int32_t) - KV_SERIALIZE(m_list_of_int16_t) - KV_SERIALIZE(m_list_of_int8_t) - KV_SERIALIZE(m_list_of_double) - KV_SERIALIZE(m_list_of_bool) - KV_SERIALIZE(m_list_of_self) - KV_SERIALIZE(m_storage_entry_int) - KV_SERIALIZE(m_storage_entry_string) - END_KV_SERIALIZE_MAP() - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - struct COMMAND_EXAMPLE_1 - { - const static int ID = 1000; - - struct request - { - std::string example_string_data; - some_test_data sub; - - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(example_string_data) - KV_SERIALIZE(sub) - END_KV_SERIALIZE_MAP() - }; - - - struct response - { - bool m_success; - std::list subs; - - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(m_success) - KV_SERIALIZE(subs) - END_KV_SERIALIZE_MAP() - }; - }; - - - - struct COMMAND_EXAMPLE_2 - { - const static int ID = 1001; - - struct request - { - std::string example_string_data2; - - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(example_string_data2) - END_KV_SERIALIZE_MAP() - }; - - struct response - { - bool m_success; - - - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(m_success) - END_KV_SERIALIZE_MAP() - }; - }; - - - //------------------------------------------------------------------------------------- - //------------------------------------------------------------------------------------- - //in debug purpose - bool operator != (const some_test_subdata& a, const some_test_subdata& b) - { - return b.m_str != a.m_str; - } - - bool operator == (const some_test_data& a, const some_test_data& b) - { - if( b.m_str != a.m_str - || b.m_uint64 != a.m_uint64 - || b.m_uint32 != a.m_uint32 - || b.m_uint16 != a.m_uint16 - || b.m_uint8 != a.m_uint8 - || b.m_int64 != a.m_int64 - || b.m_int32 != a.m_int32 - || b.m_int16 != a.m_int16 - || b.m_int8 != a.m_int8 - || b.m_double != a.m_double - || b.m_bool != a.m_bool - || b.m_list_of_str != a.m_list_of_str - || b.m_list_of_uint64_t != a.m_list_of_uint64_t - || b.m_list_of_uint32_t != a.m_list_of_uint32_t - || b.m_list_of_uint16_t != a.m_list_of_uint16_t - || b.m_list_of_uint8_t != a.m_list_of_uint8_t - || b.m_list_of_int64_t != a.m_list_of_int64_t - || b.m_list_of_int32_t != a.m_list_of_int32_t - || b.m_list_of_int16_t != a.m_list_of_int16_t - || b.m_list_of_int8_t != a.m_list_of_int8_t - || b.m_list_of_double != a.m_list_of_double - || b.m_list_of_bool != a.m_list_of_bool - || b.m_subobj != a.m_subobj - || b.m_list_of_self != a.m_list_of_self - || b.m_storage_entry_int.which() != a.m_storage_entry_int.which() - || b.m_storage_entry_string.which() != a.m_storage_entry_string.which() - ) - return false; - return true; - } - - inline some_test_data get_test_data() - { - some_test_data s; - s.m_str = "zuzuzuzuzuz"; - s.m_uint64 = 111111111111111; - s.m_uint32 = 2222222; - s.m_uint16 = 2222; - s.m_uint8 = 22; - s.m_int64 = -111111111111111; - s.m_int32 = -2222222; - s.m_int16 = -2222; - s.m_int8 = -24; - s.m_double = 0.11111; - s.m_bool = true; - s.m_list_of_str.push_back("1112121"); - s.m_list_of_uint64_t.push_back(1111111111); - s.m_list_of_uint64_t.push_back(2222222222); - s.m_list_of_uint32_t.push_back(1111111); - s.m_list_of_uint32_t.push_back(2222222); - s.m_list_of_uint16_t.push_back(1111); - s.m_list_of_uint16_t.push_back(2222); - s.m_list_of_uint8_t.push_back(11); - s.m_list_of_uint8_t.push_back(22); - - - s.m_list_of_int64_t.push_back(-1111111111); - s.m_list_of_int64_t.push_back(-222222222); - s.m_list_of_int32_t.push_back(-1111111); - s.m_list_of_int32_t.push_back(-2222222); - s.m_list_of_int16_t.push_back(-1111); - s.m_list_of_int16_t.push_back(-2222); - s.m_list_of_int8_t.push_back(-11); - s.m_list_of_int8_t.push_back(-22); - - s.m_list_of_double.push_back(0.11111); - s.m_list_of_double.push_back(0.22222); - s.m_list_of_bool.push_back(true); - s.m_list_of_bool.push_back(false); - - s.m_subobj.m_str = "subszzzzzzzz"; - s.m_list_of_self.push_back(s); - s.m_storage_entry_int = epee::serialization::storage_entry(uint64_t(22222));; - s.m_storage_entry_string = epee::serialization::storage_entry(std::string("sdsvsdvs")); - return s; - } -} \ No newline at end of file diff --git a/contrib/epee/include/ado_db_helper.h b/contrib/epee/include/ado_db_helper.h deleted file mode 100644 index ed4e5b30f8..0000000000 --- a/contrib/epee/include/ado_db_helper.h +++ /dev/null @@ -1,1095 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef _DB_ADO_HELPER_H_ -#define _DB_ADO_HELPER_H_ - -#include -#include -#include "string_coding.h" -#include "math_helper.h" -#include "file_io_utils.h" -#include "global_stream_operators.h" - - - -#define BEGIN_TRY_SECTION() try { - -#define CATCH_TRY_SECTION(ret_val) CATCH_TRY_SECTION_MESS(ret_val, "") - -#define CATCH_TRY_SECTION_MESS(ret_val, mess_where) }\ - catch(const std::exception&ex)\ - {\ - LOG_PRINT_J("DB_ERROR: " << ex.what(), LOG_LEVEL_0);\ - return ret_val;\ - }\ - catch(const _com_error& comm_err)\ - {\ - const TCHAR* pstr = comm_err.Description();\ - std::string descr = string_encoding::convert_to_ansii(pstr?pstr:TEXT(""));\ - const TCHAR* pmessage = comm_err.ErrorMessage();\ - pstr = comm_err.Source();\ - std::string source = string_encoding::convert_to_ansii(pstr?pstr:TEXT(""));\ - LOG_PRINT_J("COM_ERROR " << mess_where << ":\n\tDescriprion:" << descr << ", \n\t Message: " << string_encoding::convert_to_ansii(pmessage) << "\n\t Source: " << source, LOG_LEVEL_0);\ - return ret_val;\ - }\ - catch(...)\ - {\ - LOG_PRINT_J("..._ERROR: Unknown error.", LOG_LEVEL_0);\ - return ret_val;\ - }\ - -namespace epee -{ -namespace ado_db_helper -{ - - struct profile_entry - { - profile_entry():m_call_count(0), m_max_time(0), m_min_time(0) - {} - //std::string m_sql; - math_helper::average m_avrg; - size_t m_call_count; - DWORD m_max_time; - DWORD m_min_time; - }; - - class profiler_manager - { - public: - typedef std::map sqls_map; - profiler_manager(){} - - static bool sort_by_timing(const sqls_map::iterator& a, const sqls_map::iterator& b) - { - return a->second.m_avrg.get_avg() > b->second.m_avrg.get_avg(); - } - - bool flush_log(const std::string& path) - { - CRITICAL_REGION_BEGIN(m_sqls_lock); - std::stringstream strm; - strm << "SQL PROFILE:\r\nStatements: " << m_sqls.size() << "\r\n"; - std::list m_sorted_by_time_sqls; - for(std::map::iterator it = m_sqls.begin();it!=m_sqls.end();it++) - m_sorted_by_time_sqls.push_back(it); - - m_sorted_by_time_sqls.sort(sort_by_timing); - - for(std::list::iterator it = m_sorted_by_time_sqls.begin();it!=m_sorted_by_time_sqls.end();it++) - { - strm << "---------------------------------------------------------------------------------------------------------\r\nSQL: " << (*it)->first << "\r\n"; - strm << "\tavrg: " << (*it)->second.m_avrg.get_avg() << "\r\n\tmax: " << (*it)->second.m_max_time << "\r\n\tmin: " << (*it)->second.m_min_time << "\r\n\tcount: " << (*it)->second.m_call_count << "\r\n"; - } - - return file_io_utils::save_string_to_file(path.c_str(), strm.str()); - CRITICAL_REGION_END(); - } - - bool push_entry(const std::string sql, DWORD time) - { - CRITICAL_REGION_BEGIN(m_sqls_lock); - profile_entry& entry_ref = m_sqls[sql]; - entry_ref.m_avrg.push(time); - entry_ref.m_call_count++; - if(time > entry_ref.m_max_time) entry_ref.m_max_time = time; - if(time < entry_ref.m_min_time || entry_ref.m_min_time == 0) entry_ref.m_min_time = time; - CRITICAL_REGION_END(); - return true; - } - - bool get_entry_avarege(const std::string sql, DWORD& time) - { - CRITICAL_REGION_BEGIN(m_sqls_lock); - sqls_map::iterator it = m_sqls.find(sql); - if(it==m_sqls.end()) - return false; - - time = static_cast(it->second.m_avrg.get_avg()); - CRITICAL_REGION_END(); - return true; - } - - private: - - sqls_map m_sqls; - critical_section m_sqls_lock; - }; -inline - profiler_manager* get_set_profiler(bool need_to_set = false, profiler_manager** pprofiler = NULL) - { - static profiler_manager* pmanager = NULL; - if(need_to_set) - pmanager = *pprofiler; - //else - // *pprofiler = pmanager; - - return pmanager; - } -inline - bool init() // INIT and DEINIT are NOT THREAD SAFE operations, CALL it BEFOR u start using this wrapper. - { - profiler_manager* pmanager = new profiler_manager(); - get_set_profiler(true, &pmanager); - return true; - } -inline - bool deinit() - { - profiler_manager* pmanager = get_set_profiler(); - //get_set_profiler(false, &pmanager); - if(pmanager) - delete pmanager; - return true; - } - inline bool push_timing(const std::string sql, DWORD time) - { - profiler_manager* pmanager = get_set_profiler(); - //get_set_profiler(false, &pmanager); - if(pmanager) - return pmanager->push_entry(sql, time); - return true; - } - - inline bool flush_profiler(const std::string path) - { - profiler_manager* pmanager = get_set_profiler(); - //get_set_profiler(false, &pmanager); - if(pmanager) - return pmanager->flush_log(path); - return true; - } - - class timing_guard - { - DWORD m_start_time; - std::string m_sql; - - public: - timing_guard(const std::string& sql) - { - m_start_time = ::GetTickCount(); - m_sql = sql; - } - - ~timing_guard() - { - DWORD timing = ::GetTickCount() - m_start_time; - push_timing(m_sql, timing); - } - }; -#define PROFILE_SQL(sql) timing_guard local_timing(sql) - - - typedef std::vector > table; - - inline bool add_parametr(ADODB::_CommandPtr cmd, const std::string& parametr) - { - _variant_t param(parametr.c_str()); - ADODB::ADO_LONGPTR size = sizeof(parametr); - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("", ADODB::adVarChar, ADODB::adParamInput, static_cast(parametr.size()+1), param); - cmd->Parameters->Append(param_obj); - return true; - } - - inline bool add_parametr(ADODB::_CommandPtr cmd, const std::wstring& parametr) - { - _variant_t param(parametr.c_str()); - ADODB::ADO_LONGPTR size = sizeof(parametr); - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("", ADODB::adVarWChar, ADODB::adParamInput, static_cast(parametr.size()+2), param); - cmd->Parameters->Append(param_obj); - return true; - } - - inline bool add_parametr(ADODB::_CommandPtr cmd, const __int64 parametr) - { - _variant_t param(parametr); - ADODB::ADO_LONGPTR size = static_cast(sizeof(parametr)); - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adBigInt, ADODB::adParamInput, static_cast(size), param); - cmd->Parameters->Append(param_obj); - return true; - } - - inline bool add_parametr(ADODB::_CommandPtr cmd, const unsigned __int64 parametr) - { - _variant_t param(parametr); - ADODB::ADO_LONGPTR size = static_cast(sizeof(parametr)); - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adUnsignedBigInt, ADODB::adParamInput, static_cast(size), param); - cmd->Parameters->Append(param_obj); - return true; - } - - - inline bool add_parametr(ADODB::_CommandPtr cmd, const int parametr) - { - _variant_t param(parametr); - ADODB::ADO_LONGPTR size = static_cast(sizeof(parametr)); - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adInteger, ADODB::adParamInput, static_cast(size), param); - cmd->Parameters->Append(param_obj); - return true; - } - - inline bool add_parametr(ADODB::_CommandPtr cmd, const unsigned int parametr) - { - _variant_t param(parametr); - ADODB::ADO_LONGPTR size = static_cast(sizeof(parametr)); - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adUnsignedInt, ADODB::adParamInput, static_cast(size), param); - cmd->Parameters->Append(param_obj); - return true; - } - - inline bool add_parametr(ADODB::_CommandPtr cmd, float parametr) - { - _variant_t param; - param.ChangeType(VT_R4); - param.fltVal = parametr; - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adSingle, ADODB::adParamInput, static_cast(sizeof(float)), param); - cmd->Parameters->Append(param_obj); - return true; - } - - inline bool add_parametr(ADODB::_CommandPtr cmd, bool parametr) - { - _variant_t param; - param = parametr; - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adBoolean, ADODB::adParamInput, sizeof(parametr), param); - cmd->Parameters->Append(param_obj); - return true; - } - - - inline bool add_parametr(ADODB::_CommandPtr cmd, _variant_t parametr) - { - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adDBTimeStamp, ADODB::adParamInput, sizeof(parametr), parametr); - cmd->Parameters->Append(param_obj); - return true; - } - - - inline bool add_parametr_as_double(ADODB::_CommandPtr cmd, const DATE parametr) - { - _variant_t param; - param.ChangeType(VT_R8); - param.dblVal = parametr; - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adDouble, ADODB::adParamInput, sizeof(float), param); - cmd->Parameters->Append(param_obj); - return true; - } - - template - inline bool add_parametr(ADODB::_CommandPtr cmd, const std::list params) - { - for(std::list::const_iterator it = params.begin(); it!=params.end(); it++) - if(!add_parametr(cmd, *it)) - return false; - return true; - } - - /* - inline bool add_parametr(ADODB::_CommandPtr cmd, const size_t parametr) - { - _variant_t param; - param.ChangeType(VT_I4); - param.intVal = parametr; - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adInteger, ADODB::adParamInput, sizeof(parametr), param); - cmd->Parameters->Append(param_obj); - return true; - }*/ - - - inline bool add_parametr(ADODB::_CommandPtr cmd, const DATE parametr) - { - /*_variant_t param; - param.ChangeType(VT_R8); - param.dblVal = parametr; - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adDouble, ADODB::adParamInput, sizeof(float), param); - cmd->Parameters->Append(param_obj);*/ - - _variant_t param; - param.ChangeType(VT_DATE); - param.date = parametr; - ADODB::_ParameterPtr param_obj = cmd->CreateParameter("parametr", ADODB::adDBDate, ADODB::adParamInput, sizeof(parametr), param); - cmd->Parameters->Append(param_obj); - - return true; - } - - - inline bool execute_helper(ADODB::_CommandPtr cmd, _variant_t* pcount_processed = NULL) - { - //BEGIN_TRY_SECTION(); - - cmd->Execute(pcount_processed, NULL, ADODB::adExecuteNoRecords); - - - //CATCH_TRY_SECTION(false); - - return true; - } - - - inline bool select_helper(ADODB::_CommandPtr cmd, table& result_vector) - { - result_vector.clear(); - //BEGIN_TRY_SECTION(); - - ADODB::_RecordsetPtr precordset = cmd->Execute(NULL, NULL, NULL); - if(!precordset) - { - LOG_ERROR("DB_ERROR: cmd->Execute returned NULL!!!"); - return false; - } - - //if(precordset->EndOfFile == EOF) - //{ - // return true; - //} - /*try - { - if(precordset->MoveFirst()!= S_OK) - { - LOG_ERROR("DB_ERROR: Filed to move first!!!"); - return false; - } - } - catch (...) - { - return true; - }*/ - - size_t current_record_index = 0; - while(precordset->EndOfFile != EOF) - { - result_vector.push_back(table::value_type()); - size_t fields_count = precordset->Fields->Count; - result_vector[current_record_index].resize(fields_count); - for(size_t current_field_index = 0; current_field_index < fields_count; current_field_index++) - { - _variant_t var; - var.ChangeType(VT_I2); - var.intVal = static_cast(current_field_index); - result_vector[current_record_index][current_field_index] = precordset->Fields->GetItem(var)->Value; - } - precordset->MoveNext(); - current_record_index++; - } - //CATCH_TRY_SECTION(false); - return true; - } - - - template - struct adapter_zero - { - - }; - - template - struct adapter_single - { - TParam1 tparam1; - }; - template - struct adapter_double - { - TParam1 tparam1; - TParam2 tparam2; - }; - - - template - struct adapter_triple - { - TParam1 tparam1; - TParam2 tparam2; - TParam3 tparam3; - }; - - template - struct adapter_quad - { - TParam1 tparam1; - TParam2 tparam2; - TParam3 tparam3; - TParam4 tparam4; - }; - - template - struct adapter_quanto - { - TParam1 tparam1; - TParam2 tparam2; - TParam3 tparam3; - TParam4 tparam4; - TParam5 tparam5; - }; - - template - struct adapter_sixto - { - TParam1 tparam1; - TParam2 tparam2; - TParam3 tparam3; - TParam4 tparam4; - TParam5 tparam5; - TParam6 tparam6; - }; - - template - struct adapter_sevento - { - TParam1 tparam1; - TParam2 tparam2; - TParam3 tparam3; - TParam4 tparam4; - TParam5 tparam5; - TParam6 tparam6; - TParam7 tparam7; - }; - - template - struct adapter_nine - { - TParam1 tparam1; - TParam2 tparam2; - TParam3 tparam3; - TParam4 tparam4; - TParam5 tparam5; - TParam6 tparam6; - TParam7 tparam7; - TParam8 tparam8; - TParam9 tparam9; - }; - - template - bool add_parametrs_multi(ADODB::_CommandPtr cmd, const adapter_zero& params) - { - return true; - } - - template - bool add_parametrs_multi(ADODB::_CommandPtr cmd, const adapter_single& params) - { - return add_parametr(cmd, params.tparam1); - } - - template - bool add_parametrs_multi(ADODB::_CommandPtr cmd, const adapter_double& params) - { - if(!add_parametr(cmd, params.tparam1)) return false; - return add_parametr(cmd, params.tparam2); - } - - template - bool add_parametrs_multi(ADODB::_CommandPtr cmd, const adapter_triple& params) - { - if(!add_parametr(cmd, params.tparam1)) return false; - if(!add_parametr(cmd, params.tparam2)) return false; - return add_parametr(cmd, params.tparam3); - } - - template - bool add_parametrs_multi(ADODB::_CommandPtr cmd, const adapter_quad& params) - { - if(!add_parametr(cmd, params.tparam1)) return false; - if(!add_parametr(cmd, params.tparam2)) return false; - if(!add_parametr(cmd, params.tparam3)) return false; - return add_parametr(cmd, params.tparam4); - } - - template - bool add_parametrs_multi(ADODB::_CommandPtr cmd, const adapter_quanto& params) - { - if(!add_parametr(cmd, params.tparam1)) return false; - if(!add_parametr(cmd, params.tparam2)) return false; - if(!add_parametr(cmd, params.tparam3)) return false; - if(!add_parametr(cmd, params.tparam4)) return false; - return add_parametr(cmd, params.tparam5); - } - - template - bool add_parametrs_multi(ADODB::_CommandPtr cmd, const adapter_sixto& params) - { - if(!add_parametr(cmd, params.tparam1)) return false; - if(!add_parametr(cmd, params.tparam2)) return false; - if(!add_parametr(cmd, params.tparam3)) return false; - if(!add_parametr(cmd, params.tparam4)) return false; - if(!add_parametr(cmd, params.tparam5)) return false; - return add_parametr(cmd, params.tparam6); - } - - template - bool add_parametrs_multi(ADODB::_CommandPtr cmd, const adapter_sevento& params) - { - if(!add_parametr(cmd, params.tparam1)) return false; - if(!add_parametr(cmd, params.tparam2)) return false; - if(!add_parametr(cmd, params.tparam3)) return false; - if(!add_parametr(cmd, params.tparam4)) return false; - if(!add_parametr(cmd, params.tparam5)) return false; - if(!add_parametr(cmd, params.tparam6)) return false; - return add_parametr(cmd, params.tparam7); - } - - template - bool add_parametrs_multi(ADODB::_CommandPtr cmd, const adapter_nine& params) - { - if(!add_parametr(cmd, params.tparam1)) return false; - if(!add_parametr(cmd, params.tparam2)) return false; - if(!add_parametr(cmd, params.tparam3)) return false; - if(!add_parametr(cmd, params.tparam4)) return false; - if(!add_parametr(cmd, params.tparam5)) return false; - if(!add_parametr(cmd, params.tparam6)) return false; - if(!add_parametr(cmd, params.tparam7)) return false; - if(!add_parametr(cmd, params.tparam8)) return false; - return add_parametr(cmd, params.tparam9); - } - - template - std::string print_parameters_multi(const adapter_sevento& params) - { - std::stringstream strm; - strm << params.tparam1 << ", " << params.tparam2 << ", " << params.tparam3 << ", " << params.tparam4 << ", " << params.tparam5 << ", " << params.tparam6 << ", " << params.tparam7; - return strm.str(); - } - - template - std::string print_parameters_multi(const adapter_nine& params) - { - std::stringstream strm; - strm << params.tparam1 << ", " << params.tparam2 << ", " << params.tparam3 << ", " << params.tparam4 << ", " << params.tparam5 << ", " << params.tparam6 << ", " << params.tparam7 << ", " << params.tparam8 << ", " << params.tparam9; - return strm.str(); - } - - template - std::string print_parameters_multi(const adapter_sixto& params) - { - std::stringstream strm; - strm << params.tparam1 << ", " << params.tparam2 << ", " << params.tparam3 << ", " << params.tparam4 << ", " << params.tparam5 << ", " << params.tparam6; - return strm.str(); - } - - template - std::string print_parameters_multi(const adapter_quanto& params) - { - std::stringstream strm; - strm << params.tparam1 << ", " << params.tparam2 << ", " << params.tparam3 << ", " << params.tparam4 << ", " << params.tparam5; - return strm.str(); - } - - - template - std::string print_parameters_multi(const adapter_quad& params) - { - std::stringstream strm; - strm << params.tparam1 << ", " << params.tparam2 << ", " << params.tparam3 << ", " << params.tparam4; - return strm.str(); - } - - template - std::string print_parameters_multi(const adapter_triple& params) - { - std::stringstream strm; - strm << params.tparam1 << ", " << params.tparam2 << ", " << params.tparam3; - return strm.str(); - } - - template - std::string get_str_param(const TParam& prm) - { - std::stringstream strm; - strm << prm; - return strm.str(); - } - - template - std::string get_str_param(const std::list& prm_lst) - { - std::stringstream strm; - for(std::list::const_iterator it = prm_lst.begin();it!=prm_lst.end();it++) - strm << get_str_param(*it) << ", "; - return strm.str(); - } - - - template - std::string print_parameters_multi(const adapter_double& params) - { - std::stringstream strm; - strm << get_str_param(params.tparam1) << ", " << get_str_param(params.tparam2); - return strm.str(); - } - - template - std::string print_parameters_multi(const adapter_single& params) - { - std::stringstream strm; - strm << get_str_param(params.tparam1); - return strm.str(); - } - - template - std::string print_parameters_multi(const adapter_zero& params) - { - std::stringstream strm; - strm << "(no parametrs)"; - return strm.str(); - } - - - template - bool execute_helper_multiparam(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParams& parametrs, _variant_t* pcount_processed = NULL) - { - PROFILE_SQL(sql_statment); - bool res = false; - BEGIN_TRY_SECTION(); - - ADODB::_CommandPtr cmd; - cmd.CreateInstance(__uuidof(ADODB::Command)); - cmd->CommandText = _bstr_t(sql_statment.c_str()); - - if(!add_parametrs_multi(cmd, parametrs)) - return false; - - cmd->ActiveConnection = pconnection; - res = execute_helper(cmd, pcount_processed); - - CATCH_TRY_SECTION_MESS(false, "while statment: " << sql_statment << " [params]: " << print_parameters_multi(parametrs)); - return res; - } - - - template - inline - bool select_helper_multiparam(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParams& parametrs, table& result_vector) - { - PROFILE_SQL(sql_statment); - bool res = false; - BEGIN_TRY_SECTION(); - ADODB::_CommandPtr cmd; - cmd.CreateInstance(__uuidof(ADODB::Command)); - cmd->CommandText = _bstr_t(sql_statment.c_str()); - - - if(!add_parametrs_multi(cmd, parametrs)) - return false; - - cmd->ActiveConnection = pconnection; - res = select_helper(cmd, result_vector); - CATCH_TRY_SECTION_MESS(false, "while statment: " << sql_statment << " [params]: " << print_parameters_multi(parametrs)); - return res; - } - - - template - inline - bool select_helper_param_container(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParams& parametrs, table& result_vector) - { - PROFILE_SQL(sql_statment); - bool res = false; - BEGIN_TRY_SECTION(); - ADODB::_CommandPtr cmd; - cmd.CreateInstance(__uuidof(ADODB::Command)); - cmd->CommandText = _bstr_t(sql_statment.c_str()); - - - for(TParams::const_iterator it = parametrs.begin(); it!=parametrs.end(); it++) - { - add_parametr(cmd, *it); - } - - cmd->ActiveConnection = pconnection; - res = select_helper(cmd, result_vector); - - CATCH_TRY_SECTION(false); - return res; - } - - - inline - bool execute_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, _variant_t* pvt = NULL) - { - adapter_zero params; - return execute_helper_multiparam(pconnection, sql_statment, params, pvt); - } - - template - bool execute_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam& parametr) - { - adapter_single params; - params.tparam1 = parametr; - return execute_helper_multiparam(pconnection, sql_statment, params); - } - - - template - bool execute_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1& parametr1, const TParam2& parametr2) - { - adapter_double params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - return execute_helper_multiparam(pconnection, sql_statment, params); - - } - - template - bool execute_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1& parametr1, const TParam2& parametr2, const TParam3& parametr3) - { - adapter_triple params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - return execute_helper_multiparam(pconnection, sql_statment, params); - } - - template - bool execute_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1& parametr1, const TParam2& parametr2, const TParam3& parametr3, const TParam4& parametr4) - { - adapter_quad params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - params.tparam4 = parametr4; - return execute_helper_multiparam(pconnection, sql_statment, params); - } - - template - bool execute_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1& parametr1, const TParam2& parametr2, const TParam3& parametr3, const TParam4& parametr4, const TParam5& parametr5) - { - adapter_quanto params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - params.tparam4 = parametr4; - params.tparam5 = parametr5; - return execute_helper_multiparam(pconnection, sql_statment, params); - } - - template - bool execute_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1& parametr1, const TParam2& parametr2, const TParam3& parametr3, const TParam4& parametr4, const TParam5& parametr5, const TParam6& parametr6) - { - adapter_sixto params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - params.tparam4 = parametr4; - params.tparam5 = parametr5; - params.tparam6 = parametr6; - return execute_helper_multiparam(pconnection, sql_statment, params); - } - - - template - bool execute_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1& parametr1, const TParam2& parametr2, const TParam3& parametr3, const TParam4& parametr4, const TParam5& parametr5, const TParam6& parametr6, const TParam7& parametr7) - { - adapter_sevento params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - params.tparam4 = parametr4; - params.tparam5 = parametr5; - params.tparam6 = parametr6; - params.tparam7 = parametr7; - return execute_helper_multiparam(pconnection, sql_statment, params); - } - - inline - bool select_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, table& result_vector) - { - adapter_zero params; - return select_helper_multiparam(pconnection, sql_statment, params, result_vector); - } - - - template - bool select_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam& parametr, table& result_vector) - { - adapter_single params; - params.tparam1 = parametr; - return select_helper_multiparam(pconnection, sql_statment, params, result_vector); - } - - template - bool select_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1 parametr1, const TParam2 parametr2, table& result_vector) - { - adapter_double params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - return select_helper_multiparam(pconnection, sql_statment, params, result_vector); - - } - - template - bool select_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1 parametr1, const TParam2 parametr2, const TParam3 parametr3, table& result_vector) - { - adapter_triple params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - return select_helper_multiparam(pconnection, sql_statment, params, result_vector); - - } - - template - bool select_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1 parametr1, const TParam2 parametr2, const TParam3 parametr3, const TParam4 parametr4, table& result_vector) - { - adapter_quad params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - params.tparam4 = parametr4; - return select_helper_multiparam(pconnection, sql_statment, params, result_vector); - } - - template - bool select_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1 parametr1, const TParam2 parametr2, const TParam3 parametr3, const TParam4 parametr4, const TParam5 parametr5, table& result_vector) - { - adapter_quanto params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - params.tparam4 = parametr4; - params.tparam5 = parametr5; - return select_helper_multiparam(pconnection, sql_statment, params, result_vector); - } - - - template - bool select_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1 parametr1, const TParam2 parametr2, const TParam3 parametr3, const TParam4 parametr4, const TParam5 parametr5, const TParam6 parametr6, table& result_vector) - { - adapter_sixto params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - params.tparam4 = parametr4; - params.tparam5 = parametr5; - params.tparam6 = parametr6; - return select_helper_multiparam(pconnection, sql_statment, params, result_vector); - } - - template - bool select_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1 parametr1, const TParam2 parametr2, const TParam3 parametr3, const TParam4 parametr4, const TParam5 parametr5, const TParam6 parametr6, const TParam7 parametr7, table& result_vector) - { - adapter_sevento params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - params.tparam4 = parametr4; - params.tparam5 = parametr5; - params.tparam6 = parametr6; - params.tparam7 = parametr7; - return select_helper_multiparam(pconnection, sql_statment, params, result_vector); - } - - template - bool select_helper(ADODB::_ConnectionPtr pconnection, const std::string& sql_statment, const TParam1 parametr1, const TParam2 parametr2, const TParam3 parametr3, const TParam4 parametr4, const TParam5 parametr5, const TParam6 parametr6, const TParam7 parametr7,const TParam8 parametr8,const TParam9 parametr9, table& result_vector) - { - adapter_nine params; - params.tparam1 = parametr1; - params.tparam2 = parametr2; - params.tparam3 = parametr3; - params.tparam4 = parametr4; - params.tparam5 = parametr5; - params.tparam6 = parametr6; - params.tparam7 = parametr7; - params.tparam8 = parametr8; - params.tparam9 = parametr9; - return select_helper_multiparam(pconnection, sql_statment, params, result_vector); - } - - - - - /************************************************************************/ - /* */ - /************************************************************************/ - - class per_thread_connection_pool - { - public: - bool init(const std::string& connection_string, const std::string& login, const std::string& pass) - { - m_connection_string = connection_string; - m_login = login; - m_password = pass; - if(!get_db_connection().GetInterfacePtr()) - return false; - - return true; - } - - ADODB::_ConnectionPtr& get_db_connection() - { - - //soci::session - - m_db_connections_lock.lock(); - boost::shared_ptr& conn_ptr = m_db_connections[::GetCurrentThreadId()]; - m_db_connections_lock.unlock(); - if(!conn_ptr.get()) - { - conn_ptr.reset(new ADODB::_ConnectionPtr()); - ADODB::_ConnectionPtr& conn = *conn_ptr.get(); - //init new connection - - BEGIN_TRY_SECTION(); - //_bstr_t str = _bstr_t("Provider=SQLOLEDB;Data Source=SRV1;Integrated Security=SSPI;Initial Catalog=dispatcher;"); - - if(S_OK != conn.CreateInstance(__uuidof(ADODB::Connection))) - { - LOG_ERROR("Failed to Create, instance, was CoInitialize called ???!"); - return conn; - } - - HRESULT res = conn->Open(_bstr_t(m_connection_string.c_str()), _bstr_t(m_login.c_str()), _bstr_t(m_password.c_str()), NULL); - if(res != S_OK) - { - LOG_ERROR("Failed to connect do DB, connection str:" << m_connection_string); - return conn; - } - CATCH_TRY_SECTION_MESS(conn, "while creating another connection"); - LOG_PRINT("New DB Connection added for threadid=" << ::GetCurrentThreadId(), LOG_LEVEL_0); - ado_db_helper::execute_helper(conn, "set enable_seqscan=false;"); - return conn; - } - - return *conn_ptr.get(); - } - - //---------------------------------------------------------------------------------------------- - bool check_status() - { - ADODB::_ConnectionPtr& rconn = get_db_connection(); - if(!ado_db_helper::execute_helper(rconn, "SET CLIENT_ENCODING TO 'SQL_ASCII'")) - { - - try{ - HRESULT res = rconn->Close(); - } - catch(...) - { - - }; - BEGIN_TRY_SECTION(); - - HRESULT res = rconn->Open(_bstr_t(m_connection_string.c_str()), _bstr_t(m_login.c_str()), _bstr_t(m_password.c_str()), NULL); - if(res != S_OK) - { - LOG_PRINT("Failed to restore connection to local AI DB", LOG_LEVEL_1); - return false; - } - CATCH_TRY_SECTION(false); - } - - return true; - } - - protected: - private: - std::map > m_db_connections; - critical_section m_db_connections_lock; - std::string m_connection_string; - std::string m_login; - std::string m_password; - }; - - - template - bool find_or_add_t(const std::string& sql_select_statment, const std::string& sql_insert_statment, OUT default_id_type& id, OUT bool& new_object_added, TParam1 parametr_1, t_conn& c) - { - ado_db_helper::adapter_single params; - params.tparam1 = parametr_1; - return find_or_add_t_multiparametred(sql_select_statment, sql_insert_statment, id, new_object_added, params, c); - } - - - template - bool find_or_add_t(const std::string& sql_select_statment, const std::string& sql_insert_statment, OUT default_id_type& id, OUT bool& new_object_added, TParam1 parametr_1, TParam2 parametr_2, t_conn& c) - { - ado_db_helper::adapter_double params; - params.tparam1 = parametr_1; - params.tparam2 = parametr_2; - return find_or_add_t_multiparametred(sql_select_statment, sql_insert_statment, id, new_object_added, params, c); - } - - - template - bool find_or_add_t(const std::string& sql_select_statment, const std::string& sql_insert_statment, OUT default_id_type& id, OUT bool& new_object_added, TParam1 parametr_1, TParam2 parametr_2, TParam3 parametr_3, t_conn& c) - { - ado_db_helper::adapter_triple params; - params.tparam1 = parametr_1; - params.tparam2 = parametr_2; - params.tparam3 = parametr_3; - return find_or_add_t_multiparametred(sql_select_statment, sql_insert_statment, id, new_object_added, params, c); - } - - template - bool find_or_add_t(const std::string& sql_select_statment, const std::string& sql_insert_statment, OUT default_id_type& id, OUT bool& new_object_added, TParam1 parametr_1, TParam2 parametr_2, TParam3 parametr_3, TParam4 parametr_4, t_conn& c) - { - ado_db_helper::adapter_quad params; - params.tparam1 = parametr_1; - params.tparam2 = parametr_2; - params.tparam3 = parametr_3; - params.tparam4 = parametr_4; - return find_or_add_t_multiparametred(sql_select_statment, sql_insert_statment, id, new_object_added, params, c); - } - - template - bool find_or_add_t_multiparametred(const std::string& sql_select_statment, const std::string& sql_insert_statment, OUT default_id_type& id, OUT bool& new_object_added, TParams params, t_conn& c) - { - - //CHECK_CONNECTION(false); - - new_object_added = false; - ado_db_helper::table result_table; - - bool res = select_helper_multiparam(c.get_db_connection(), sql_select_statment, params, result_table); - if(!result_table.size()) - { - res = select_helper_multiparam(c.get_db_connection(), sql_insert_statment, params, result_table); - if(!res || !result_table.size()) - { - //last time try to select - res = select_helper_multiparam(c.get_db_connection(), sql_select_statment, params, result_table); - CHECK_AND_ASSERT_MES(res, false, "Failed to execute statment: " << sql_select_statment); - CHECK_AND_ASSERT_MES(result_table.size(), false, "No records returned from statment: " << sql_select_statment); - }else - { - new_object_added = true; - } - } - - BEGIN_TRY_SECTION() - id = result_table[0][0]; - CATCH_TRY_SECTION_MESS(false, "while converting returned value [find_or_add_t_multiparametred()]"); - - return true; - } - -} -} -#endif //!_DB_HELPER_H_ diff --git a/contrib/epee/include/console_handler.h b/contrib/epee/include/console_handler.h deleted file mode 100644 index a1df78395f..0000000000 --- a/contrib/epee/include/console_handler.h +++ /dev/null @@ -1,429 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#include -#include -#include -#include -#include - -#include -#include - -#include "string_tools.h" - -namespace epee -{ - class async_stdin_reader - { - public: - async_stdin_reader() - : m_run(true) - , m_has_read_request(false) - , m_read_status(state_init) - { - m_reader_thread = std::thread(std::bind(&async_stdin_reader::reader_thread_func, this)); - } - - ~async_stdin_reader() - { - stop(); - } - - // Not thread safe. Only one thread can call this method at once. - bool get_line(std::string& line) - { - if (!start_read()) - return false; - - std::unique_lock lock(m_response_mutex); - while (state_init == m_read_status) - { - m_response_cv.wait(lock); - } - - bool res = false; - if (state_success == m_read_status) - { - line = m_line; - res = true; - } - - m_read_status = state_init; - - return res; - } - - void stop() - { - if (m_run) - { - m_run.store(false, std::memory_order_relaxed); - -#if defined(WIN32) - ::CloseHandle(::GetStdHandle(STD_INPUT_HANDLE)); -#endif - - m_request_cv.notify_one(); - m_reader_thread.join(); - } - } - - private: - bool start_read() - { - std::unique_lock lock(m_request_mutex); - if (!m_run.load(std::memory_order_relaxed) || m_has_read_request) - return false; - - m_has_read_request = true; - m_request_cv.notify_one(); - return true; - } - - bool wait_read() - { - std::unique_lock lock(m_request_mutex); - while (m_run.load(std::memory_order_relaxed) && !m_has_read_request) - { - m_request_cv.wait(lock); - } - - if (m_has_read_request) - { - m_has_read_request = false; - return true; - } - - return false; - } - - bool wait_stdin_data() - { -#if !defined(WIN32) - int stdin_fileno = ::fileno(stdin); - - while (m_run.load(std::memory_order_relaxed)) - { - fd_set read_set; - FD_ZERO(&read_set); - FD_SET(stdin_fileno, &read_set); - - struct timeval tv; - tv.tv_sec = 0; - tv.tv_usec = 100 * 1000; - - int retval = ::select(stdin_fileno + 1, &read_set, NULL, NULL, &tv); - if (retval < 0) - return false; - else if (0 < retval) - return true; - } -#endif - - return true; - } - - void reader_thread_func() - { - while (true) - { - if (!wait_read()) - break; - - std::string line; - bool read_ok = true; - if (wait_stdin_data()) - { - if (m_run.load(std::memory_order_relaxed)) - { - std::getline(std::cin, line); - read_ok = !std::cin.eof() && !std::cin.fail(); - } - } - else - { - read_ok = false; - } - - { - std::unique_lock lock(m_response_mutex); - if (m_run.load(std::memory_order_relaxed)) - { - m_line = std::move(line); - m_read_status = read_ok ? state_success : state_error; - } - else - { - m_read_status = state_cancelled; - } - m_response_cv.notify_one(); - } - } - } - - enum t_state - { - state_init, - state_success, - state_error, - state_cancelled - }; - - private: - std::thread m_reader_thread; - std::atomic m_run; - - std::string m_line; - bool m_has_read_request; - t_state m_read_status; - - std::mutex m_request_mutex; - std::mutex m_response_mutex; - std::condition_variable m_request_cv; - std::condition_variable m_response_cv; - }; - - - template - bool empty_commands_handler(t_server* psrv, const std::string& command) - { - return true; - } - - - class async_console_handler - { - public: - async_console_handler() - { - } - - template - bool run(t_server* psrv, chain_handler ch_handler, const std::string& prompt = "#", const std::string& usage = "") - { - return run(prompt, usage, [&](const std::string& cmd) { return ch_handler(psrv, cmd); }, [&] { psrv->send_stop_signal(); }); - } - - template - bool run(chain_handler ch_handler, const std::string& prompt = "#", const std::string& usage = "") - { - return run(prompt, usage, [&](const std::string& cmd) { return ch_handler(cmd); }, [] { }); - } - - void stop() - { - m_stdin_reader.stop(); - } - - private: - template - bool run(const std::string& prompt, const std::string& usage, const t_cmd_handler& cmd_handler, const t_exit_handler& exit_handler) - { - TRY_ENTRY(); - bool continue_handle = true; - while(continue_handle) - { - if (!prompt.empty()) - { - epee::log_space::set_console_color(epee::log_space::console_color_yellow, true); - std::cout << prompt; - if (' ' != prompt.back()) - std::cout << ' '; - epee::log_space::reset_console_color(); - std::cout.flush(); - } - - std::string command; - if(!m_stdin_reader.get_line(command)) - { - LOG_PRINT("Failed to read line. Stopping...", LOG_LEVEL_0); - continue_handle = false; - break; - } - string_tools::trim(command); - - LOG_PRINT_L2("Read command: " << command); - if(0 == command.compare("exit") || 0 == command.compare("q")) - { - continue_handle = false; - }else if (command.empty()) - { - continue; - } - else if(cmd_handler(command)) - { - continue; - } else - { - std::cout << "unknown command: " << command << std::endl; - std::cout << usage; - } - } - exit_handler(); - return true; - CATCH_ENTRY_L0("console_handler", false); - } - - private: - async_stdin_reader m_stdin_reader; - }; - - - template - bool start_default_console(t_server* ptsrv, t_handler handlr, const std::string& prompt, const std::string& usage = "") - { - std::shared_ptr console_handler = std::make_shared(); - std::thread([=](){console_handler->run(ptsrv, handlr, prompt, usage);}).detach(); - return true; - } - - template - bool start_default_console(t_server* ptsrv, const std::string& prompt, const std::string& usage = "") - { - return start_default_console(ptsrv, empty_commands_handler, prompt, usage); - } - - template - bool no_srv_param_adapter(t_server* ptsrv, const std::string& cmd, t_handler handlr) - { - return handlr(cmd); - } - - template - bool run_default_console_handler_no_srv_param(t_server* ptsrv, t_handler handlr, const std::string& prompt, const std::string& usage = "") - { - async_console_handler console_handler; - return console_handler.run(ptsrv, std::bind(no_srv_param_adapter, std::placeholders::_1, std::placeholders::_2, handlr), prompt, usage); - } - - template - bool start_default_console_handler_no_srv_param(t_server* ptsrv, t_handler handlr, const std::string& prompt, const std::string& usage = "") - { - std::thread( std::bind(run_default_console_handler_no_srv_param, ptsrv, handlr, prompt, usage) ); - return true; - } - - /************************************************************************/ - /* */ - /************************************************************************/ - class console_handlers_binder - { - typedef std::function &)> console_command_handler; - typedef std::map > command_handlers_map; - std::unique_ptr m_console_thread; - command_handlers_map m_command_handlers; - async_console_handler m_console_handler; - public: - std::string get_usage() - { - std::stringstream ss; - size_t max_command_len = 0; - for(auto& x:m_command_handlers) - if(x.first.size() > max_command_len) - max_command_len = x.first.size(); - - for(auto& x:m_command_handlers) - { - ss.width(max_command_len + 3); - ss << std::left << x.first << x.second.second << ENDL; - } - return ss.str(); - } - void set_handler(const std::string& cmd, const console_command_handler& hndlr, const std::string& usage = "") - { - command_handlers_map::mapped_type & vt = m_command_handlers[cmd]; - vt.first = hndlr; - vt.second = usage; - } - bool process_command_vec(const std::vector& cmd) - { - if(!cmd.size()) - return false; - auto it = m_command_handlers.find(cmd.front()); - if(it == m_command_handlers.end()) - return false; - std::vector cmd_local(cmd.begin()+1, cmd.end()); - return it->second.first(cmd_local); - } - - bool process_command_str(const std::string& cmd) - { - std::vector cmd_v; - boost::split(cmd_v,cmd,boost::is_any_of(" "), boost::token_compress_on); - return process_command_vec(cmd_v); - } - - bool start_handling(const std::string& prompt, const std::string& usage_string = "") - { - m_console_thread.reset(new std::thread(std::bind(&console_handlers_binder::run_handling, this, prompt, usage_string))); - m_console_thread->detach(); - return true; - } - - void stop_handling() - { - m_console_handler.stop(); - } - - bool run_handling(const std::string& prompt, const std::string& usage_string) - { - return m_console_handler.run(std::bind(&console_handlers_binder::process_command_str, this, std::placeholders::_1), prompt, usage_string); - } - }; - - /* work around because of broken boost bind */ - template - class srv_console_handlers_binder: public console_handlers_binder - { - bool process_command_str(t_server* /*psrv*/, const std::string& cmd) - { - return console_handlers_binder::process_command_str(cmd); - } - public: - bool start_handling(t_server* psrv, const std::string& prompt, const std::string& usage_string = "") - { - std::thread(std::bind(&srv_console_handlers_binder::run_handling, this, psrv, prompt, usage_string)).detach(); - return true; - } - - bool run_handling(t_server* psrv, const std::string& prompt, const std::string& usage_string) - { - return m_console_handler.run(psrv, std::bind(&srv_console_handlers_binder::process_command_str, this, - std::placeholders::_1, std::placeholders::_2), prompt, usage_string); - } - - void stop_handling() - { - m_console_handler.stop(); - } - - private: - async_console_handler m_console_handler; - }; -} diff --git a/contrib/epee/include/copyable_atomic.h b/contrib/epee/include/copyable_atomic.h deleted file mode 100644 index 6b5691ab11..0000000000 --- a/contrib/epee/include/copyable_atomic.h +++ /dev/null @@ -1,54 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#include - -namespace epee -{ - class copyable_atomic: public std::atomic - { - public: - copyable_atomic() - {}; - copyable_atomic(const copyable_atomic& a):std::atomic(a.load()) - {} - copyable_atomic& operator= (const copyable_atomic& a) - { - store(a.load()); - return *this; - } - uint32_t operator++() - { - return std::atomic::operator++(); - } - uint32_t operator++(int fake) - { - return std::atomic::operator++(fake); - } - }; -} \ No newline at end of file diff --git a/contrib/epee/include/file_io_utils.h b/contrib/epee/include/file_io_utils.h deleted file mode 100644 index 01d70db3ef..0000000000 --- a/contrib/epee/include/file_io_utils.h +++ /dev/null @@ -1,452 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef _FILE_IO_UTILS_H_ -#define _FILE_IO_UTILS_H_ - -#include -#include -#include - - -#ifndef MAKE64 - #define MAKE64(low,high) ((__int64)(((DWORD)(low)) | ((__int64)((DWORD)(high))) << 32)) -#endif - -#ifdef WINDOWS_PLATFORM -#include -#include -#include -#include - -#endif - - - -namespace epee -{ -namespace file_io_utils -{ -#ifdef WINDOWS_PLATFORM - - inline - std::string get_temp_file_name_a() - { - std::string str_result; - char sz_temp[MAX_PATH*2] = {0}; - if(!::GetTempPathA( sizeof( sz_temp ), sz_temp )) - return str_result; - - char sz_temp_file[MAX_PATH*2] = {0}; - if(!::GetTempFileNameA( sz_temp, "mail", 0, sz_temp_file)) - return str_result; - sz_temp_file[sizeof(sz_temp_file)-1] = 0; //be happy! - str_result = sz_temp_file; - return str_result; - } - - -#ifdef BOOST_LEXICAL_CAST_INCLUDED - inline - bool get_not_used_filename(const std::string& folder, std::string& result_name) - { - DWORD folder_attr = ::GetFileAttributesA(folder.c_str()); - if(folder_attr == INVALID_FILE_ATTRIBUTES) - return false; - if(!(folder_attr&FILE_ATTRIBUTE_DIRECTORY)) - return false; - - - std::string base_name = folder + "\\tmp"; - std::string tmp_name; - bool name_found = false; - int current_index = 0; - tmp_name = base_name + boost::lexical_cast(current_index) + ".tmp"; - while(!name_found) - { - if(INVALID_FILE_ATTRIBUTES == ::GetFileAttributesA(tmp_name.c_str())) - name_found = true; - else - { - current_index++; - tmp_name = base_name + boost::lexical_cast(current_index) + ".tmp"; - } - } - result_name = tmp_name; - return true; - } -#endif - - inline - std::string get_temp_folder_a() - { - std::string str_result; - char sz_temp[MAX_PATH*2] = {0}; - if(!::GetTempPathA( sizeof( sz_temp ), sz_temp )) - return str_result; - sz_temp[(sizeof(sz_temp)/sizeof(sz_temp[0])) -1] = 0; - str_result = sz_temp; - return str_result; - } - - std::string convert_from_device_path_to_standart(const std::string& path) - { - - - STRSAFE_LPSTR pszFilename = (STRSAFE_LPSTR)path.c_str(); - - // Translate path with device name to drive letters. - char szTemp[4000] = {0}; - - if (::GetLogicalDriveStringsA(sizeof(szTemp)-1, szTemp)) - { - char szName[MAX_PATH]; - char szDrive[3] = " :"; - BOOL bFound = FALSE; - char* p = szTemp; - - do - { - // Copy the drive letter to the template string - *szDrive = *p; - - // Look up each device name - if (::QueryDosDeviceA(szDrive, szName, sizeof(szName))) - { - UINT uNameLen = strlen(szName); - - if (uNameLen < MAX_PATH) - { - bFound = _mbsnbicmp((const unsigned char*)pszFilename, (const unsigned char*)szName, - uNameLen) == 0; - - if (bFound) - { - // Reconstruct pszFilename using szTempFile - // Replace device path with DOS path - char szTempFile[MAX_PATH] = {0}; - StringCchPrintfA(szTempFile, - MAX_PATH, - "%s%s", - szDrive, - pszFilename+uNameLen); - return szTempFile; - //::StringCchCopyNA(pszFilename, MAX_PATH+1, szTempFile, strlen(szTempFile)); - } - } - } - - // Go to the next NULL character. - while (*p++); - } while (!bFound && *p); // end of string - } - - return ""; - } - - inline - std::string get_process_path_by_pid(DWORD pid) - { - std::string res; - - HANDLE hprocess = 0; - if( hprocess = ::OpenProcess( PROCESS_QUERY_INFORMATION|PROCESS_VM_READ, FALSE, pid) ) - { - char buff[MAX_PATH]= {0}; - if(!::GetModuleFileNameExA( hprocess, 0, buff, MAX_PATH - 1 )) - res = "Unknown_b"; - else - { - buff[MAX_PATH - 1]=0; //be happy! - res = buff; - std::string::size_type a = res.rfind( '\\' ); - if ( a != std::string::npos ) - res.erase( 0, a+1); - - } - ::CloseHandle( hprocess ); - }else - res = "Unknown_a"; - - return res; - } - - - - - - inline - std::wstring get_temp_file_name_w() - { - std::wstring str_result; - wchar_t sz_temp[MAX_PATH*2] = {0}; - if(!::GetTempPathW( sizeof(sz_temp)/sizeof(sz_temp[0]), sz_temp )) - return str_result; - - wchar_t sz_temp_file[MAX_PATH+1] = {0}; - if(!::GetTempFileNameW( sz_temp, L"mail", 0, sz_temp_file)) - return str_result; - - sz_temp_file[(sizeof(sz_temp_file)/sizeof(sz_temp_file[0]))-1] = 0; //be happy! - str_result = sz_temp_file; - return str_result; - } -#endif - inline - bool is_file_exist(const std::string& path) - { - boost::filesystem::path p(path); - return boost::filesystem::exists(p); - } - - /* - inline - bool save_string_to_handle(HANDLE hfile, const std::string& str) - { - - - - if( INVALID_HANDLE_VALUE != hfile ) - { - DWORD dw; - if( !::WriteFile( hfile, str.data(), (DWORD) str.size(), &dw, NULL) ) - { - int err_code = GetLastError(); - //LOG_PRINT("Failed to write to file handle: " << hfile<< " Last error code:" << err_code << " : " << log_space::get_win32_err_descr(err_code), LOG_LEVEL_2); - return false; - } - ::CloseHandle(hfile); - return true; - }else - { - //LOG_WIN32_ERROR(::GetLastError()); - return false; - } - - return false; - }*/ - - - - inline - bool save_string_to_file(const std::string& path_to_file, const std::string& str) - { - - try - { - std::ofstream fstream; - fstream.exceptions(std::ifstream::failbit | std::ifstream::badbit); - fstream.open(path_to_file, std::ios_base::binary | std::ios_base::out | std::ios_base::trunc); - fstream << str; - fstream.close(); - return true; - } - - catch(...) - { - return false; - } - } - - /* - inline - bool load_form_handle(HANDLE hfile, std::string& str) - { - if( INVALID_HANDLE_VALUE != hfile ) - { - bool res = true; - DWORD dw = 0; - DWORD fsize = ::GetFileSize(hfile, &dw); - if(fsize > 300000000) - { - ::CloseHandle(hfile); - return false; - } - if(fsize) - { - str.resize(fsize); - if(!::ReadFile( hfile, (LPVOID)str.data(), (DWORD)str.size(), &dw, NULL)) - res = false; - } - ::CloseHandle(hfile); - return res; - } - return false; - } - */ - inline - bool get_file_time(const std::string& path_to_file, time_t& ft) - { - boost::system::error_code ec; - ft = boost::filesystem::last_write_time(boost::filesystem::path(path_to_file), ec); - if(!ec) - return true; - else - return false; - } - - inline - bool set_file_time(const std::string& path_to_file, const time_t& ft) - { - boost::system::error_code ec; - boost::filesystem::last_write_time(boost::filesystem::path(path_to_file), ft, ec); - if(!ec) - return true; - else - return false; - } - - - inline - bool load_file_to_string(const std::string& path_to_file, std::string& target_str) - { - try - { - std::ifstream fstream; - fstream.exceptions(std::ifstream::failbit | std::ifstream::badbit); - fstream.open(path_to_file, std::ios_base::binary | std::ios_base::in | std::ios::ate); - - std::ifstream::pos_type file_size = fstream.tellg(); - - if(file_size > 1000000000) - return false;//don't go crazy - size_t file_size_t = static_cast(file_size); - - target_str.resize(file_size_t); - - fstream.seekg (0, std::ios::beg); - fstream.read((char*)target_str.data(), target_str.size()); - fstream.close(); - return true; - } - - catch(...) - { - return false; - } - } - - inline - bool append_string_to_file(const std::string& path_to_file, const std::string& str) - { - try - { - std::ofstream fstream; - fstream.exceptions(std::ifstream::failbit | std::ifstream::badbit); - fstream.open(path_to_file.c_str(), std::ios_base::binary | std::ios_base::out | std::ios_base::app); - fstream << str; - fstream.close(); - return true; - } - - catch(...) - { - return false; - } - } - - /* - bool remove_dir_and_subirs(const char* path_to_dir); - - inline - bool clean_dir(const char* path_to_dir) - { - if(!path_to_dir) - return false; - - std::string folder = path_to_dir; - WIN32_FIND_DATAA find_data = {0}; - HANDLE hfind = ::FindFirstFileA((folder + "\\*.*").c_str(), &find_data); - if(INVALID_HANDLE_VALUE == hfind) - return false; - do{ - if(!strcmp("..", find_data.cFileName) || (!strcmp(".", find_data.cFileName))) - continue; - - if(find_data.dwFileAttributes&FILE_ATTRIBUTE_DIRECTORY) - { - if(!remove_dir_and_subirs((folder + "\\" + find_data.cFileName).c_str())) - return false; - }else - { - if(!::DeleteFileA((folder + "\\" + find_data.cFileName).c_str())) - return false; - } - - - }while(::FindNextFileA(hfind, &find_data)); - ::FindClose(hfind); - - return true; - } - */ -#ifdef WINDOWS_PLATFORM - inline bool get_folder_content(const std::string& path, std::list& target_list) - { - WIN32_FIND_DATAA find_data = {0}; - HANDLE hfind = ::FindFirstFileA((path + "\\*.*").c_str(), &find_data); - if(INVALID_HANDLE_VALUE == hfind) - return false; - do{ - if(!strcmp("..", find_data.cFileName) || (!strcmp(".", find_data.cFileName))) - continue; - - target_list.push_back(find_data); - - }while(::FindNextFileA(hfind, &find_data)); - ::FindClose(hfind); - - return true; - } -#endif - inline bool get_folder_content(const std::string& path, std::list& target_list, bool only_files = false) - { - try - { - - boost::filesystem::directory_iterator end_itr; // default construction yields past-the-end - for ( boost::filesystem::directory_iterator itr( path ); itr != end_itr; ++itr ) - { - if ( only_files && boost::filesystem::is_directory(itr->status()) ) - { - continue; - } - target_list.push_back(itr->path().filename().string()); - } - - } - - catch(...) - { - return false; - } - return true; - } -} -} - -#endif //_FILE_IO_UTILS_H_ diff --git a/contrib/epee/include/global_stream_operators.h b/contrib/epee/include/global_stream_operators.h deleted file mode 100644 index 6fbdbc2edb..0000000000 --- a/contrib/epee/include/global_stream_operators.h +++ /dev/null @@ -1,35 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#pragma once - -std::stringstream& operator<<(std::stringstream& out, const std::wstring& ws) -{ - std::string as = string_encoding::convert_to_ansii(ws); - out << as; - return out; -} diff --git a/contrib/epee/include/gzip_encoding.h b/contrib/epee/include/gzip_encoding.h deleted file mode 100644 index 2be51e77d6..0000000000 --- a/contrib/epee/include/gzip_encoding.h +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#ifndef _GZIP_ENCODING_H_ -#define _GZIP_ENCODING_H_ -#include "net/http_client_base.h" -#include "zlib/zlib.h" -//#include "http.h" - - -namespace epee -{ -namespace net_utils -{ - - - - class content_encoding_gzip: public i_sub_handler - { - public: - /*! \brief - * Function content_encoding_gzip : Constructor - * - */ - inline - content_encoding_gzip(i_target_handler* powner_filter, bool is_deflate_mode = false):m_powner_filter(powner_filter), - m_is_stream_ended(false), - m_is_deflate_mode(is_deflate_mode), - m_is_first_update_in(true) - { - memset(&m_zstream_in, 0, sizeof(m_zstream_in)); - memset(&m_zstream_out, 0, sizeof(m_zstream_out)); - int ret = 0; - if(is_deflate_mode) - { - ret = inflateInit(&m_zstream_in); - ret = deflateInit(&m_zstream_out, Z_DEFAULT_COMPRESSION); - }else - { - ret = inflateInit2(&m_zstream_in, 0x1F); - ret = deflateInit2(&m_zstream_out, Z_DEFAULT_COMPRESSION, Z_DEFLATED, 0x1F, 8, Z_DEFAULT_STRATEGY); - } - } - /*! \brief - * Function content_encoding_gzip : Destructor - * - */ - inline - ~content_encoding_gzip() - { - inflateEnd(& m_zstream_in ); - deflateEnd(& m_zstream_out ); - } - /*! \brief - * Function update_in : Entry point for income data - * - */ - inline - virtual bool update_in( std::string& piece_of_transfer) - { - - bool is_first_time_here = m_is_first_update_in; - m_is_first_update_in = false; - - if(m_pre_decode.size()) - m_pre_decode += piece_of_transfer; - else - m_pre_decode.swap(piece_of_transfer); - piece_of_transfer.clear(); - - std::string decode_summary_buff; - - size_t ungzip_size = m_pre_decode.size() * 0x30; - std::string current_decode_buff(ungzip_size, 'X'); - - //Here the cycle is introduced where we unpack the buffer, the cycle is required - //because of the case where if after unpacking the data will exceed the awaited size, we will not halt with error - bool continue_unpacking = true; - bool first_step = true; - while(m_pre_decode.size() && continue_unpacking) - { - - //fill buffers - m_zstream_in.next_in = (Bytef*)m_pre_decode.data(); - m_zstream_in.avail_in = (uInt)m_pre_decode.size(); - m_zstream_in.next_out = (Bytef*)current_decode_buff.data(); - m_zstream_in.avail_out = (uInt)ungzip_size; - - int flag = Z_SYNC_FLUSH; - int ret = inflate(&m_zstream_in, flag); - CHECK_AND_ASSERT_MES(ret>=0 || m_zstream_in.avail_out ||m_is_deflate_mode, false, "content_encoding_gzip::update_in() Failed to inflate. err = " << ret); - - if(Z_STREAM_END == ret) - m_is_stream_ended = true; - else if(Z_DATA_ERROR == ret && is_first_time_here && m_is_deflate_mode&& first_step) - { - // some servers (notably Apache with mod_deflate) don't generate zlib headers - // insert a dummy header and try again - static char dummy_head[2] = - { - 0x8 + 0x7 * 0x10, - (((0x8 + 0x7 * 0x10) * 0x100 + 30) / 31 * 31) & 0xFF, - }; - inflateReset(&m_zstream_in); - m_zstream_in.next_in = (Bytef*) dummy_head; - m_zstream_in.avail_in = sizeof(dummy_head); - - ret = inflate(&m_zstream_in, Z_NO_FLUSH); - if (ret != Z_OK) - { - LOCAL_ASSERT(0); - m_pre_decode.swap(piece_of_transfer); - return false; - } - m_zstream_in.next_in = (Bytef*)m_pre_decode.data(); - m_zstream_in.avail_in = (uInt)m_pre_decode.size(); - - ret = inflate(&m_zstream_in, Z_NO_FLUSH); - if (ret != Z_OK) - { - LOCAL_ASSERT(0); - m_pre_decode.swap(piece_of_transfer); - return false; - } - } - - - //leave only unpacked part in the output buffer to start with it the next time - m_pre_decode.erase(0, m_pre_decode.size()-m_zstream_in.avail_in); - //if decoder gave nothing to return, then everything is ahead, now simply break - if(ungzip_size == m_zstream_in.avail_out) - break; - - //decode_buff currently stores data parts that were unpacked, fix this size - current_decode_buff.resize(ungzip_size - m_zstream_in.avail_out); - if(decode_summary_buff.size()) - decode_summary_buff += current_decode_buff; - else - current_decode_buff.swap(decode_summary_buff); - - current_decode_buff.resize(ungzip_size); - first_step = false; - } - - //Process these data if required - bool res = true; - - res = m_powner_filter->handle_target_data(decode_summary_buff); - - return true; - - } - /*! \brief - * Function stop : Entry point for stop signal and flushing cached data buffer. - * - */ - inline - virtual void stop(std::string& OUT collect_remains) - { - } - protected: - private: - /*! \brief - * Pointer to parent HTTP-parser - */ - i_target_handler* m_powner_filter; - /*! \brief - * ZLIB object for income stream - */ - z_stream m_zstream_in; - /*! \brief - * ZLIB object for outcome stream - */ - z_stream m_zstream_out; - /*! \brief - * Data that could not be unpacked immediately, left to wait for the next packet of data - */ - std::string m_pre_decode; - /*! \brief - * The data are accumulated for a package in the buffer to send the web client - */ - std::string m_pre_encode; - /*! \brief - * Signals that stream looks like ended - */ - bool m_is_stream_ended; - /*! \brief - * If this flag is set, income data is in HTTP-deflate mode - */ - bool m_is_deflate_mode; - /*! \brief - * Marks that it is a first data packet - */ - bool m_is_first_update_in; - }; -} -} - - - -#endif //_GZIP_ENCODING_H_ diff --git a/contrib/epee/include/hmac-md5.h b/contrib/epee/include/hmac-md5.h deleted file mode 100644 index 2a4e0d401a..0000000000 --- a/contrib/epee/include/hmac-md5.h +++ /dev/null @@ -1,93 +0,0 @@ -/* - * libEtPan! -- a mail stuff library - * - * Copyright (C) 2001, 2005 - DINH Viet Hoa - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the libEtPan! project nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* hmac-md5.h -- HMAC_MD5 functions - */ - -/* - * $Id: hmac-md5.h,v 1.1.1.1 2005/03/18 20:17:28 zautrix Exp $ - */ - -#ifndef HMAC_MD5_H -#define HMAC_MD5_H 1 - -namespace md5 -{ - - - -#define HMAC_MD5_SIZE 16 - - /* intermediate MD5 context */ - typedef struct HMAC_MD5_CTX_s { - MD5_CTX ictx, octx; - } HMAC_MD5_CTX; - - /* intermediate HMAC state - * values stored in network byte order (Big Endian) - */ - typedef struct HMAC_MD5_STATE_s { - UINT4 istate[4]; - UINT4 ostate[4]; - } HMAC_MD5_STATE; - - /* One step hmac computation - * - * digest may be same as text or key - */ - void hmac_md5(const unsigned char *text, int text_len, - const unsigned char *key, int key_len, - unsigned char digest[HMAC_MD5_SIZE]); - - /* create context from key - */ - void hmac_md5_init(HMAC_MD5_CTX *hmac, - const unsigned char *key, int key_len); - - /* precalculate intermediate state from key - */ - void hmac_md5_precalc(HMAC_MD5_STATE *hmac, - const unsigned char *key, int key_len); - - /* initialize context from intermediate state - */ - void hmac_md5_import(HMAC_MD5_CTX *hmac, HMAC_MD5_STATE *state); - -#define hmac_md5_update(hmac, text, text_len) MD5Update(&(hmac)->ictx, (text), (text_len)) - - /* finish hmac from intermediate result. Intermediate result is zeroed. - */ - void hmac_md5_final(unsigned char digest[HMAC_MD5_SIZE], - HMAC_MD5_CTX *hmac); - -} - -#endif /* HMAC_MD5_H */ diff --git a/contrib/epee/include/include_base_utils.h b/contrib/epee/include/include_base_utils.h deleted file mode 100644 index 8412a0083c..0000000000 --- a/contrib/epee/include/include_base_utils.h +++ /dev/null @@ -1,34 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#define BOOST_FILESYSTEM_VERSION 3 -#define ENABLE_RELEASE_LOGGING - -#include "misc_log_ex.h" - - diff --git a/contrib/epee/include/math_helper.h b/contrib/epee/include/math_helper.h deleted file mode 100644 index 11faa9762f..0000000000 --- a/contrib/epee/include/math_helper.h +++ /dev/null @@ -1,273 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#pragma once - - -#include -#include -#include -#include -#include - -#include "misc_os_dependent.h" -#include "pragma_comp_defs.h" - -namespace epee -{ -namespace math_helper -{ - - template - class average - { - public: - - average() - { - m_base = default_base; - m_last_avg_val = 0; - } - - bool set_base() - { - CRITICAL_REGION_LOCAL(m_lock); - - m_base = default_base; - if(m_list.size() > m_base) - m_list.resize(m_base); - - return true; - } - - typedef val value_type; - - void push(const value_type& vl) - { - CRITICAL_REGION_LOCAL(m_lock); - -//#ifndef DEBUG_STUB - m_list.push_back(vl); - if(m_list.size() > m_base ) - m_list.pop_front(); -//#endif - } - - double update(const value_type& vl) - { - CRITICAL_REGION_LOCAL(m_lock); -//#ifndef DEBUG_STUB - push(vl); -//#endif - - return get_avg(); - } - - double get_avg() - { - CRITICAL_REGION_LOCAL(m_lock); - - value_type vl = std::accumulate(m_list.begin(), m_list.end(), value_type(0)); - if(m_list.size()) - return m_last_avg_val = (double)(vl/m_list.size()); - - return m_last_avg_val = (double)vl; - } - - value_type get_last_val() - { - CRITICAL_REGION_LOCAL(m_lock); - if(m_list.size()) - return m_list.back(); - - return 0; - } - - private: - unsigned int m_base; - double m_last_avg_val; - std::list m_list; - critical_section m_lock; - }; - - -#ifdef WINDOWS_PLATFORM - - /************************************************************************/ - /* */ - /************************************************************************/ - class timing_guard_base - { - public: - virtual ~timing_guard_base(){}; - }; - - template - class timing_guard: public timing_guard_base - { - public: - timing_guard(T& avrg):m_avrg(avrg) - { - m_start_ticks = ::GetTickCount(); - } - - ~timing_guard() - { - m_avrg.push(::GetTickCount()-m_start_ticks); - } - - private: - T& m_avrg; - DWORD m_start_ticks; - }; - - template - timing_guard_base* create_timing_guard(t_timing& timing){return new timing_guard(timing);} - -#define BEGIN_TIMING_ZONE(timing_var) { boost::shared_ptr local_timing_guard_ptr(math_helper::create_timing_guard(timing_var)); -#define END_TIMING_ZONE() } -#endif - -//#ifdef WINDOWS_PLATFORM_EX - template - class speed - { - public: - - speed() - { - m_time_window = default_time_window; - m_last_speed_value = 0; - } - bool chick() - { -#ifndef DEBUG_STUB - uint64_t ticks = misc_utils::get_tick_count(); - CRITICAL_REGION_BEGIN(m_lock); - m_chicks.push_back(ticks); - CRITICAL_REGION_END(); - //flush(ticks); -#endif - return true; - } - - bool chick(size_t count) - { - for(size_t s = 0; s != count; s++) - chick(); - - return true; - } - - - size_t get_speed() - { - flush(misc_utils::get_tick_count()); - return m_last_speed_value = m_chicks.size(); - } - private: - - bool flush(uint64_t ticks) - { - CRITICAL_REGION_BEGIN(m_lock); - std::list::iterator it = m_chicks.begin(); - while(it != m_chicks.end()) - { - if(*it + m_time_window < ticks) - m_chicks.erase(it++); - else - break; - } - CRITICAL_REGION_END(); - return true; - } - - std::list m_chicks; - uint64_t m_time_window; - size_t m_last_speed_value; - critical_section m_lock; - }; -//#endif - - template - void randomize_list(tlist& t_list) - { - for(typename tlist::iterator it = t_list.begin();it!=t_list.end();it++) - { - size_t offset = rand()%t_list.size(); - typename tlist::iterator it_2 = t_list.begin(); - for(size_t local_offset = 0;local_offset!=offset;local_offset++) - it_2++; - if(it_2 == it) - continue; - std::swap(*it_2, *it); - } - - } -PRAGMA_WARNING_PUSH -PRAGMA_GCC("GCC diagnostic ignored \"-Wstrict-aliasing\"") - inline - uint64_t generated_random_uint64() - { - boost::uuids::uuid id___ = boost::uuids::random_generator()(); - return *reinterpret_cast(&id___.data[0]); //(*reinterpret_cast(&id___.data[0]) ^ *reinterpret_cast(&id___.data[8])); - } -PRAGMA_WARNING_POP - template - class once_a_time_seconds - { - public: - once_a_time_seconds():m_interval(default_interval) - { - m_last_worked_time = 0; - if(!start_immediate) - time(&m_last_worked_time); - } - - template - bool do_call(functor_t functr) - { - time_t current_time = 0; - time(¤t_time); - - if(current_time - m_last_worked_time > m_interval) - { - bool res = functr(); - time(&m_last_worked_time); - return res; - } - return true; - } - - private: - time_t m_last_worked_time; - time_t m_interval; - }; -} -} \ No newline at end of file diff --git a/contrib/epee/include/md5_l.h b/contrib/epee/include/md5_l.h deleted file mode 100644 index fe4c67db62..0000000000 --- a/contrib/epee/include/md5_l.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * libEtPan! -- a mail stuff library - * - * Copyright (C) 2001, 2005 - DINH Viet Hoa - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the libEtPan! project nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * $Id: md5.h,v 1.1.1.1 2005/03/18 20:17:27 zautrix Exp $ - */ - -/* MD5.H - header file for MD5C.C - */ - -/* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All -rights reserved. - -License to copy and use this software is granted provided that it -is identified as the "RSA Data Security, Inc. MD5 Message-Digest -Algorithm" in all material mentioning or referencing this software -or this function. - -License is also granted to make and use derivative works provided -that such works are identified as "derived from the RSA Data -Security, Inc. MD5 Message-Digest Algorithm" in all material -mentioning or referencing the derived work. - -RSA Data Security, Inc. makes no representations concerning either -the merchantability of this software or the suitability of this -software for any particular purpose. It is provided "as is" -without express or implied warranty of any kind. -These notices must be retained in any copies of any part of this -documentation and/or software. - */ -#ifndef MD5_H -#define MD5_H - - -#include "md5global.h" - -namespace md5 -{ - /* MD5 context. */ - typedef struct { - UINT4 state[4]; /* state (ABCD) */ - UINT4 count[2]; /* number of bits, modulo 2^64 (lsb first) */ - unsigned char buffer[64]; /* input buffer */ - } MD5_CTX; - - static void MD5Init(MD5_CTX * context); - static void MD5Update( MD5_CTX *context, const unsigned char *input, unsigned int inputLen ); - static void MD5Final ( unsigned char digest[16], MD5_CTX *context ); - static void hmac_md5(const unsigned char* text, int text_len, const unsigned char* key, int key_len, unsigned char *digest); - - - inline bool md5( unsigned char *input, int ilen, unsigned char output[16] ) - { - MD5_CTX ctx; - - MD5Init( &ctx ); - MD5Update( &ctx, input, ilen ); - MD5Final( output, &ctx); - - memset( &ctx, 0, sizeof( MD5_CTX) ); - return true; - } - - -} - -#include "md5_l.inl" - -#endif diff --git a/contrib/epee/include/md5_l.inl b/contrib/epee/include/md5_l.inl deleted file mode 100644 index c3da1a3b07..0000000000 --- a/contrib/epee/include/md5_l.inl +++ /dev/null @@ -1,563 +0,0 @@ -/* -* libEtPan! -- a mail stuff library -* -* Copyright (C) 2001, 2005 - DINH Viet Hoa -* All rights reserved. -* -* Redistribution and use in source and binary forms, with or without -* modification, are permitted provided that the following conditions -* are met: -* 1. Redistributions of source code must retain the above copyright -* notice, this list of conditions and the following disclaimer. -* 2. Redistributions in binary form must reproduce the above copyright -* notice, this list of conditions and the following disclaimer in the -* documentation and/or other materials provided with the distribution. -* 3. Neither the name of the libEtPan! project nor the names of its -* contributors may be used to endorse or promote products derived -* from this software without specific prior written permission. -* -* THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND -* ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -* ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE -* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS -* OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) -* HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY -* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF -* SUCH DAMAGE. -*/ - -/* -* $Id: md5.c,v 1.1.1.1 2005/03/18 20:17:27 zautrix Exp $ -*/ - -/* MD5C.C - RSA Data Security, Inc., MD5 message-digest algorithm -*/ - -/* Copyright (C) 1991-2, RSA Data Security, Inc. Created 1991. All -rights reserved. - -License to copy and use this software is granted provided that it -is identified as the "RSA Data Security, Inc. MD5 Message-Digest -Algorithm" in all material mentioning or referencing this software -or this function. - -License is also granted to make and use derivative works provided -that such works are identified as "derived from the RSA Data -Security, Inc. MD5 Message-Digest Algorithm" in all material -mentioning or referencing the derived work. - -RSA Data Security, Inc. makes no representations concerning either -the merchantability of this software or the suitability of this -software for any particular purpose. It is provided "as is" -without express or implied warranty of any kind. - -These notices must be retained in any copies of any part of this -documentation and/or software. -*/ - -/* do i need all of this just for htonl()? damn. */ -//#include -//#include -//#include -//#include - - - -#include "md5global.h" -#include "md5_l.h" -#include "hmac-md5.h" - -namespace md5 -{ - /* Constants for MD5Transform routine. - */ - -#define S11 7 -#define S12 12 -#define S13 17 -#define S14 22 -#define S21 5 -#define S22 9 -#define S23 14 -#define S24 20 -#define S31 4 -#define S32 11 -#define S33 16 -#define S34 23 -#define S41 6 -#define S42 10 -#define S43 15 -#define S44 21 - - /* - static void MD5Transform PROTO_LIST ((UINT4 [4], unsigned char [64])); - static void Encode PROTO_LIST - ((unsigned char *, UINT4 *, unsigned int)); - static void Decode PROTO_LIST - ((UINT4 *, unsigned char *, unsigned int)); - static void MD5_memcpy PROTO_LIST ((POINTER, POINTER, unsigned int)); - static void MD5_memset PROTO_LIST ((POINTER, int, unsigned int)); - */ - - static void MD5_memcpy (POINTER output, POINTER input, unsigned int len) - { - unsigned int i; - - for (i = 0; i < len; i++) - output[i] = input[i]; - } - - /* Note: Replace "for loop" with standard memset if possible. - */ - - static void MD5_memset (POINTER output, int value, unsigned int len) - { - unsigned int i; - - for (i = 0; i < len; i++) - ((char *)output)[i] = (char)value; - } - - static void MD5Transform (UINT4 state[4], unsigned char block[64]); - - static unsigned char* PADDING() - { - static unsigned char local_PADDING[64] = { - 0x80, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, - 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 - }; - - return local_PADDING; - - } - - - - /* F, G, H and I are basic MD5 functions. - - */ -#ifdef I - /* This might be defined via NANA */ -#undef I -#endif - -#define MD5_M_F(x, y, z) (((x) & (y)) | ((~x) & (z))) -#define MD5_M_G(x, y, z) (((x) & (z)) | ((y) & (~z))) -#define MD5_M_H(x, y, z) ((x) ^ (y) ^ (z)) -#define MD5_M_I(x, y, z) ((y) ^ ((x) | (~z))) - - /* ROTATE_LEFT rotates x left n bits. - - */ - -#define ROTATE_LEFT(x, n) (((x) << (n)) | ((x) >> (32-(n)))) - - /* FF, GG, HH, and II transformations for rounds 1, 2, 3, and 4. - Rotation is separate from addition to prevent recomputation. - */ - -#define FF(a, b, c, d, x, s, ac) { (a) += MD5_M_F ((b), (c), (d)) + (x) + (UINT4)(ac); (a) = ROTATE_LEFT ((a), (s)); (a) += (b); } -#define GG(a, b, c, d, x, s, ac) { (a) += MD5_M_G ((b), (c), (d)) + (x) + (UINT4)(ac); (a) = ROTATE_LEFT ((a), (s)); (a) += (b); } -#define HH(a, b, c, d, x, s, ac) { (a) += MD5_M_H ((b), (c), (d)) + (x) + (UINT4)(ac); (a) = ROTATE_LEFT ((a), (s)); (a) += (b); } -#define II(a, b, c, d, x, s, ac) { (a) += MD5_M_I ((b), (c), (d)) + (x) + (UINT4)(ac); (a) = ROTATE_LEFT ((a), (s)); (a) += (b); } - - /* MD5 initialization. Begins an MD5 operation, writing a new context. - */ - - static void MD5Init(MD5_CTX * context) - { - context->count[0] = context->count[1] = 0; - - /* Load magic initialization constants. - - */ - context->state[0] = 0x67452301; - context->state[1] = 0xefcdab89; - context->state[2] = 0x98badcfe; - context->state[3] = 0x10325476; - } - - /* MD5 block update operation. Continues an MD5 message-digest - operation, processing another message block, and updating the context. - */ - - static void MD5Update( MD5_CTX *context, const unsigned char *input, unsigned int inputLen ) - { - unsigned int i, index, partLen; - - /* Compute number of bytes mod 64 */ - index = (unsigned int)((context->count[0] >> 3) & 0x3F); - - /* Update number of bits */ - if ((context->count[0] += ((UINT4)inputLen << 3)) - < ((UINT4)inputLen << 3)) - context->count[1]++; - context->count[1] += ((UINT4)inputLen >> 29); - - partLen = 64 - index; - - /* Transform as many times as possible. - - */ - if (inputLen >= partLen) - { - MD5_memcpy( (POINTER)&context->buffer[index], (POINTER)input, partLen ); - MD5Transform( context->state, context->buffer ); - - for (i = partLen; i + 63 < inputLen; i += 64) - MD5Transform (context->state, (unsigned char*)&input[i]); - - index = 0; - } - else - i = 0; - - /* Buffer remaining input */ - MD5_memcpy( (POINTER)&context->buffer[index], (POINTER)&input[i], inputLen-i ); - - } - - /* Encodes input (UINT4) into output (unsigned char). Assumes len is - a multiple of 4. - - */ - - static void Encode (unsigned char *output, UINT4 *input, unsigned int len) - { - unsigned int i, j; - - for (i = 0, j = 0; j < len; i++, j += 4) { - output[j] = (unsigned char)(input[i] & 0xff); - output[j+1] = (unsigned char)((input[i] >> 8) & 0xff); - output[j+2] = (unsigned char)((input[i] >> 16) & 0xff); - output[j+3] = (unsigned char)((input[i] >> 24) & 0xff); - } - } - - /* Decodes input (unsigned char) into output (UINT4). Assumes len is - a multiple of 4. - - */ - - static void Decode (UINT4 *output, unsigned char *input, unsigned int len) - { - unsigned int i, j; - - for (i = 0, j = 0; j < len; i++, j += 4) - output[i] = ((UINT4)input[j]) | (((UINT4)input[j+1]) << 8) | (((UINT4)input[j+2]) << 16) - | (((UINT4)input[j+3]) << 24); - } - - /* MD5 finalization. Ends an MD5 message-digest operation, writing the - the message digest and zeroizing the context. - - */ - - static void MD5Final ( unsigned char digest[16], MD5_CTX *context ) - { - unsigned char bits[8]; - unsigned int index, padLen; - - /* Save number of bits */ - Encode (bits, context->count, 8); - - /* Pad out to 56 mod 64. - - */ - index = (unsigned int)((context->count[0] >> 3) & 0x3f); - padLen = (index < 56) ? (56 - index) : (120 - index); - MD5Update (context, PADDING(), padLen); - - /* Append length (before padding) */ - MD5Update (context, bits, 8); - - /* Store state in digest */ - Encode (digest, context->state, 16); - - /* Zeroize sensitive information. - - */ - MD5_memset ((POINTER)context, 0, sizeof (*context)); - } - - /* MD5 basic transformation. Transforms state based on block. - - */ - - static void MD5Transform (UINT4 state[4], unsigned char block[64]) - { - UINT4 a = state[0], b = state[1], c = state[2], d = state[3], x[16]; - - Decode (x, block, 64); - - /* Round 1 */ - FF (a, b, c, d, x[ 0], S11, 0xd76aa478); /* 1 */ - FF (d, a, b, c, x[ 1], S12, 0xe8c7b756); /* 2 */ - FF (c, d, a, b, x[ 2], S13, 0x242070db); /* 3 */ - FF (b, c, d, a, x[ 3], S14, 0xc1bdceee); /* 4 */ - FF (a, b, c, d, x[ 4], S11, 0xf57c0faf); /* 5 */ - FF (d, a, b, c, x[ 5], S12, 0x4787c62a); /* 6 */ - FF (c, d, a, b, x[ 6], S13, 0xa8304613); /* 7 */ - FF (b, c, d, a, x[ 7], S14, 0xfd469501); /* 8 */ - FF (a, b, c, d, x[ 8], S11, 0x698098d8); /* 9 */ - FF (d, a, b, c, x[ 9], S12, 0x8b44f7af); /* 10 */ - FF (c, d, a, b, x[10], S13, 0xffff5bb1); /* 11 */ - FF (b, c, d, a, x[11], S14, 0x895cd7be); /* 12 */ - FF (a, b, c, d, x[12], S11, 0x6b901122); /* 13 */ - FF (d, a, b, c, x[13], S12, 0xfd987193); /* 14 */ - FF (c, d, a, b, x[14], S13, 0xa679438e); /* 15 */ - FF (b, c, d, a, x[15], S14, 0x49b40821); /* 16 */ - - /* Round 2 */ - GG (a, b, c, d, x[ 1], S21, 0xf61e2562); /* 17 */ - GG (d, a, b, c, x[ 6], S22, 0xc040b340); /* 18 */ - GG (c, d, a, b, x[11], S23, 0x265e5a51); /* 19 */ - GG (b, c, d, a, x[ 0], S24, 0xe9b6c7aa); /* 20 */ - GG (a, b, c, d, x[ 5], S21, 0xd62f105d); /* 21 */ - GG (d, a, b, c, x[10], S22, 0x2441453); /* 22 */ - GG (c, d, a, b, x[15], S23, 0xd8a1e681); /* 23 */ - GG (b, c, d, a, x[ 4], S24, 0xe7d3fbc8); /* 24 */ - GG (a, b, c, d, x[ 9], S21, 0x21e1cde6); /* 25 */ - GG (d, a, b, c, x[14], S22, 0xc33707d6); /* 26 */ - GG (c, d, a, b, x[ 3], S23, 0xf4d50d87); /* 27 */ - GG (b, c, d, a, x[ 8], S24, 0x455a14ed); /* 28 */ - GG (a, b, c, d, x[13], S21, 0xa9e3e905); /* 29 */ - GG (d, a, b, c, x[ 2], S22, 0xfcefa3f8); /* 30 */ - GG (c, d, a, b, x[ 7], S23, 0x676f02d9); /* 31 */ - GG (b, c, d, a, x[12], S24, 0x8d2a4c8a); /* 32 */ - - /* Round 3 */ - HH (a, b, c, d, x[ 5], S31, 0xfffa3942); /* 33 */ - HH (d, a, b, c, x[ 8], S32, 0x8771f681); /* 34 */ - HH (c, d, a, b, x[11], S33, 0x6d9d6122); /* 35 */ - HH (b, c, d, a, x[14], S34, 0xfde5380c); /* 36 */ - HH (a, b, c, d, x[ 1], S31, 0xa4beea44); /* 37 */ - HH (d, a, b, c, x[ 4], S32, 0x4bdecfa9); /* 38 */ - HH (c, d, a, b, x[ 7], S33, 0xf6bb4b60); /* 39 */ - HH (b, c, d, a, x[10], S34, 0xbebfbc70); /* 40 */ - HH (a, b, c, d, x[13], S31, 0x289b7ec6); /* 41 */ - HH (d, a, b, c, x[ 0], S32, 0xeaa127fa); /* 42 */ - HH (c, d, a, b, x[ 3], S33, 0xd4ef3085); /* 43 */ - HH (b, c, d, a, x[ 6], S34, 0x4881d05); /* 44 */ - HH (a, b, c, d, x[ 9], S31, 0xd9d4d039); /* 45 */ - HH (d, a, b, c, x[12], S32, 0xe6db99e5); /* 46 */ - HH (c, d, a, b, x[15], S33, 0x1fa27cf8); /* 47 */ - HH (b, c, d, a, x[ 2], S34, 0xc4ac5665); /* 48 */ - - /* Round 4 */ - II (a, b, c, d, x[ 0], S41, 0xf4292244); /* 49 */ - II (d, a, b, c, x[ 7], S42, 0x432aff97); /* 50 */ - II (c, d, a, b, x[14], S43, 0xab9423a7); /* 51 */ - II (b, c, d, a, x[ 5], S44, 0xfc93a039); /* 52 */ - II (a, b, c, d, x[12], S41, 0x655b59c3); /* 53 */ - II (d, a, b, c, x[ 3], S42, 0x8f0ccc92); /* 54 */ - II (c, d, a, b, x[10], S43, 0xffeff47d); /* 55 */ - II (b, c, d, a, x[ 1], S44, 0x85845dd1); /* 56 */ - II (a, b, c, d, x[ 8], S41, 0x6fa87e4f); /* 57 */ - II (d, a, b, c, x[15], S42, 0xfe2ce6e0); /* 58 */ - II (c, d, a, b, x[ 6], S43, 0xa3014314); /* 59 */ - II (b, c, d, a, x[13], S44, 0x4e0811a1); /* 60 */ - II (a, b, c, d, x[ 4], S41, 0xf7537e82); /* 61 */ - II (d, a, b, c, x[11], S42, 0xbd3af235); /* 62 */ - II (c, d, a, b, x[ 2], S43, 0x2ad7d2bb); /* 63 */ - II (b, c, d, a, x[ 9], S44, 0xeb86d391); /* 64 */ - - state[0] += a; - state[1] += b; - state[2] += c; - state[3] += d; - - /* Zeroize sensitive information. - */ - MD5_memset ((POINTER)x, 0, sizeof (x)); - } - - /* Note: Replace "for loop" with standard memcpy if possible. - - */ - inline - void hmac_md5_init(HMAC_MD5_CTX *hmac, - const unsigned char *key, - int key_len) - { - unsigned char k_ipad[65]; /* inner padding - - * key XORd with ipad - */ - unsigned char k_opad[65]; /* outer padding - - * key XORd with opad - */ - unsigned char tk[16]; - int i; - /* if key is longer than 64 bytes reset it to key=MD5(key) */ - if (key_len > 64) { - - MD5_CTX tctx; - - MD5Init(&tctx); - MD5Update(&tctx, key, key_len); - MD5Final(tk, &tctx); - - key = tk; - key_len = 16; - } - - /* - * the HMAC_MD5 transform looks like: - * - * MD5(K XOR opad, MD5(K XOR ipad, text)) - * - * where K is an n byte key - * ipad is the byte 0x36 repeated 64 times - * opad is the byte 0x5c repeated 64 times - * and text is the data being protected - */ - - /* start out by storing key in pads */ - MD5_memset(k_ipad, '\0', sizeof k_ipad); - MD5_memset(k_opad, '\0', sizeof k_opad); - MD5_memcpy( k_ipad, (POINTER)key, key_len); - MD5_memcpy( k_opad, (POINTER)key, key_len); - - /* XOR key with ipad and opad values */ - for (i=0; i<64; i++) { - k_ipad[i] ^= 0x36; - k_opad[i] ^= 0x5c; - } - - MD5Init(&hmac->ictx); /* init inner context */ - MD5Update(&hmac->ictx, k_ipad, 64); /* apply inner pad */ - - MD5Init(&hmac->octx); /* init outer context */ - MD5Update(&hmac->octx, k_opad, 64); /* apply outer pad */ - - /* scrub the pads and key context (if used) */ - MD5_memset( (POINTER)&k_ipad, 0, sizeof(k_ipad)); - MD5_memset( (POINTER)&k_opad, 0, sizeof(k_opad)); - MD5_memset( (POINTER)&tk, 0, sizeof(tk)); - - /* and we're done. */ - } - - /* The precalc and import routines here rely on the fact that we pad - * the key out to 64 bytes and use that to initialize the md5 - * contexts, and that updating an md5 context with 64 bytes of data - * leaves nothing left over; all of the interesting state is contained - * in the state field, and none of it is left over in the count and - * buffer fields. So all we have to do is save the state field; we - * can zero the others when we reload it. Which is why the decision - * was made to pad the key out to 64 bytes in the first place. */ - inline - void hmac_md5_precalc(HMAC_MD5_STATE *state, - const unsigned char *key, - int key_len) - { - HMAC_MD5_CTX hmac; - unsigned lupe; - - hmac_md5_init(&hmac, key, key_len); - for (lupe = 0; lupe < 4; lupe++) { - state->istate[lupe] = htonl(hmac.ictx.state[lupe]); - state->ostate[lupe] = htonl(hmac.octx.state[lupe]); - } - MD5_memset( (POINTER)&hmac, 0, sizeof(hmac)); - } - - - inline - void hmac_md5_import(HMAC_MD5_CTX *hmac, - HMAC_MD5_STATE *state) - { - unsigned lupe; - MD5_memset( (POINTER)hmac, 0, sizeof(HMAC_MD5_CTX)); - for (lupe = 0; lupe < 4; lupe++) { - hmac->ictx.state[lupe] = ntohl(state->istate[lupe]); - hmac->octx.state[lupe] = ntohl(state->ostate[lupe]); - } - /* Init the counts to account for our having applied - * 64 bytes of key; this works out to 0x200 (64 << 3; see - * MD5Update above...) */ - hmac->ictx.count[0] = hmac->octx.count[0] = 0x200; - } - - inline - void hmac_md5_final(unsigned char digest[HMAC_MD5_SIZE], - HMAC_MD5_CTX *hmac) - { - MD5Final(digest, &hmac->ictx); /* Finalize inner md5 */ - MD5Update(&hmac->octx, digest, 16); /* Update outer ctx */ - MD5Final(digest, &hmac->octx); /* Finalize outer md5 */ - } - - - void hmac_md5(const unsigned char* text, int text_len, const unsigned char* key, int key_len, unsigned char *digest) - { - MD5_CTX context; - - unsigned char k_ipad[65]; /* inner padding - - * key XORd with ipad - */ - unsigned char k_opad[65]; /* outer padding - - * key XORd with opad - */ - unsigned char tk[16]; - int i; - /* if key is longer than 64 bytes reset it to key=MD5(key) */ - if (key_len > 64) { - - MD5_CTX tctx; - - MD5Init(&tctx); - MD5Update(&tctx, key, key_len); - MD5Final(tk, &tctx); - - key = tk; - key_len = 16; - } - - /* - * the HMAC_MD5 transform looks like: - * - * MD5(K XOR opad, MD5(K XOR ipad, text)) - * - * where K is an n byte key - * ipad is the byte 0x36 repeated 64 times - * opad is the byte 0x5c repeated 64 times - * and text is the data being protected - */ - - /* start out by storing key in pads */ - MD5_memset(k_ipad, '\0', sizeof k_ipad); - MD5_memset(k_opad, '\0', sizeof k_opad); - MD5_memcpy( k_ipad, (POINTER)key, key_len); - MD5_memcpy( k_opad, (POINTER)key, key_len); - - /* XOR key with ipad and opad values */ - for (i=0; i<64; i++) { - k_ipad[i] ^= 0x36; - k_opad[i] ^= 0x5c; - } - /* - * perform inner MD5 - */ - - MD5Init(&context); /* init context for 1st - * pass */ - MD5Update(&context, k_ipad, 64); /* start with inner pad */ - MD5Update(&context, text, text_len); /* then text of datagram */ - MD5Final(digest, &context); /* finish up 1st pass */ - - /* - * perform outer MD5 - */ - MD5Init(&context); /* init context for 2nd - * pass */ - MD5Update(&context, k_opad, 64); /* start with outer pad */ - MD5Update(&context, digest, 16); /* then results of 1st - * hash */ - MD5Final(digest, &context); /* finish up 2nd pass */ - - } -} \ No newline at end of file diff --git a/contrib/epee/include/md5global.h b/contrib/epee/include/md5global.h deleted file mode 100644 index afc2290193..0000000000 --- a/contrib/epee/include/md5global.h +++ /dev/null @@ -1,77 +0,0 @@ -/* - * libEtPan! -- a mail stuff library - * - * Copyright (C) 2001, 2005 - DINH Viet Hoa - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * 3. Neither the name of the libEtPan! project nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND - * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE - * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE - * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL - * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS - * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) - * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT - * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY - * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF - * SUCH DAMAGE. - */ - -/* - * $Id: md5global.h,v 1.1.1.1 2005/03/18 20:17:28 zautrix Exp $ - */ - -/* GLOBAL.H - RSAREF types and constants - */ - -#ifndef MD5GLOBAL_H -#define MD5GLOBAL_H - -namespace md5 -{ - - - /* PROTOTYPES should be set to one if and only if the compiler supports - function argument prototyping. - The following makes PROTOTYPES default to 0 if it has not already - been defined with C compiler flags. - */ -#ifndef PROTOTYPES -#define PROTOTYPES 0 -#endif - - /* POINTER defines a generic pointer type */ - typedef unsigned char *POINTER; - - /* UINT2 defines a two byte word */ - typedef unsigned short int UINT2; - - /* UINT4 defines a four byte word */ - //typedef unsigned long int UINT4; - typedef unsigned int UINT4; - - /* PROTO_LIST is defined depending on how PROTOTYPES is defined above. - If using PROTOTYPES, then PROTO_LIST returns the list, otherwise it - returns an empty list. - */ -#if PROTOTYPES -#define PROTO_LIST(list) list -#else -#define PROTO_LIST(list) () -#endif - -} - -#endif diff --git a/contrib/epee/include/misc_language.h b/contrib/epee/include/misc_language.h deleted file mode 100644 index d5157365c8..0000000000 --- a/contrib/epee/include/misc_language.h +++ /dev/null @@ -1,162 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include -#include -#include -namespace epee -{ -#define STD_TRY_BEGIN() try { - -#define STD_TRY_CATCH(where_, ret_val) \ - } \ - catch (const std::exception &e) \ - { \ - LOG_ERROR("EXCEPTION: " << where_ << ", mes: "<< e.what()); \ - return ret_val; \ - } \ - catch (...) \ - { \ - LOG_ERROR("EXCEPTION: " << where_ ); \ - return ret_val; \ - } - - - -#define AUTO_VAL_INIT(v) boost::value_initialized() - -namespace misc_utils -{ - template - t_type get_max_t_val(t_type t) - { - return (std::numeric_limits::max)(); - } - - - template - t_iterator move_it_forward(t_iterator it, size_t count) - { - while(count--) - it++; - return it; - } - - template - t_iterator move_it_backward(t_iterator it, size_t count) - { - while(count--) - it--; - return it; - } - - - // TEMPLATE STRUCT less - template - struct less_as_pod - : public std::binary_function<_Ty, _Ty, bool> - { // functor for operator< - bool operator()(const _Ty& _Left, const _Ty& _Right) const - { // apply operator< to operands - return memcmp(&_Left, &_Right, sizeof(_Left)) < 0; - } - }; - - template - bool is_less_as_pod(const _Ty& _Left, const _Ty& _Right) - { // apply operator< to operands - return memcmp(&_Left, &_Right, sizeof(_Left)) < 0; - } - - - inline - bool sleep_no_w(long ms ) - { - boost::this_thread::sleep( - boost::get_system_time() + - boost::posix_time::milliseconds( std::max(ms,0) ) ); - - return true; - } - - template - type_vec_type median(std::vector &v) - { - if(v.empty()) - return boost::value_initialized(); - if(v.size() == 1) - return v[0]; - - size_t n = (v.size()) / 2; - std::sort(v.begin(), v.end()); - //nth_element(v.begin(), v.begin()+n-1, v.end()); - if(v.size()%2) - {//1, 3, 5... - return v[n]; - }else - {//2, 4, 6... - return (v[n-1] + v[n])/2; - } - - } - - /************************************************************************/ - /* */ - /************************************************************************/ - - struct call_befor_die_base - { - virtual ~call_befor_die_base(){} - }; - - typedef boost::shared_ptr auto_scope_leave_caller; - - - template - struct call_befor_die: public call_befor_die_base - { - t_scope_leave_handler m_func; - call_befor_die(t_scope_leave_handler f):m_func(f) - {} - ~call_befor_die() - { - m_func(); - } - }; - - template - auto_scope_leave_caller create_scope_leave_handler(t_scope_leave_handler f) - { - auto_scope_leave_caller slc(new call_befor_die(f)); - return slc; - } - -} -} diff --git a/contrib/epee/include/misc_log_ex.cpp b/contrib/epee/include/misc_log_ex.cpp deleted file mode 100644 index 0c0b441b4f..0000000000 --- a/contrib/epee/include/misc_log_ex.cpp +++ /dev/null @@ -1,1029 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#include "misc_log_ex.h" - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#if defined(WIN32) -#include -#else -#include -#endif - -#include "static_initializer.h" -#include "string_tools.h" -#include "time_helper.h" -#include "misc_os_dependent.h" - -#include "syncobj.h" - - -#define LOG_LEVEL_SILENT -1 -#define LOG_LEVEL_0 0 -#define LOG_LEVEL_1 1 -#define LOG_LEVEL_2 2 -#define LOG_LEVEL_3 3 -#define LOG_LEVEL_4 4 -#define LOG_LEVEL_MIN LOG_LEVEL_SILENT -#define LOG_LEVEL_MAX LOG_LEVEL_4 - - -#define LOGGER_NULL 0 -#define LOGGER_FILE 1 -#define LOGGER_DEBUGGER 2 -#define LOGGER_CONSOLE 3 -#define LOGGER_DUMP 4 - - -#ifndef LOCAL_ASSERT -#include -#if (defined _MSC_VER) -#define LOCAL_ASSERT(expr) {if(epee::debug::get_set_enable_assert()){_ASSERTE(expr);}} -#else -#define LOCAL_ASSERT(expr) -#endif - -#endif - -namespace epee { -namespace log_space { - //---------------------------------------------------------------------------- - bool is_stdout_a_tty() - { - static std::atomic initialized(false); - static std::atomic is_a_tty(false); - - if (!initialized.load(std::memory_order_acquire)) - { -#if defined(WIN32) - is_a_tty.store(0 != _isatty(_fileno(stdout)), std::memory_order_relaxed); -#else - is_a_tty.store(0 != isatty(fileno(stdout)), std::memory_order_relaxed); -#endif - initialized.store(true, std::memory_order_release); - } - - return is_a_tty.load(std::memory_order_relaxed); - } - //---------------------------------------------------------------------------- - void set_console_color(int color, bool bright) - { - if (!is_stdout_a_tty()) - return; - - switch(color) - { - case console_color_default: - { -#ifdef WIN32 - HANDLE h_stdout = GetStdHandle(STD_OUTPUT_HANDLE); - SetConsoleTextAttribute(h_stdout, FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE| (bright ? FOREGROUND_INTENSITY:0)); -#else - if(bright) - std::cout << "\033[1;37m"; - else - std::cout << "\033[0m"; -#endif - } - break; - case console_color_white: - { -#ifdef WIN32 - HANDLE h_stdout = GetStdHandle(STD_OUTPUT_HANDLE); - SetConsoleTextAttribute(h_stdout, FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE | (bright ? FOREGROUND_INTENSITY:0)); -#else - if(bright) - std::cout << "\033[1;37m"; - else - std::cout << "\033[0;37m"; -#endif - } - break; - case console_color_red: - { -#ifdef WIN32 - HANDLE h_stdout = GetStdHandle(STD_OUTPUT_HANDLE); - SetConsoleTextAttribute(h_stdout, FOREGROUND_RED | (bright ? FOREGROUND_INTENSITY:0)); -#else - if(bright) - std::cout << "\033[1;31m"; - else - std::cout << "\033[0;31m"; -#endif - } - break; - case console_color_green: - { -#ifdef WIN32 - HANDLE h_stdout = GetStdHandle(STD_OUTPUT_HANDLE); - SetConsoleTextAttribute(h_stdout, FOREGROUND_GREEN | (bright ? FOREGROUND_INTENSITY:0)); -#else - if(bright) - std::cout << "\033[1;32m"; - else - std::cout << "\033[0;32m"; -#endif - } - break; - - case console_color_blue: - { -#ifdef WIN32 - HANDLE h_stdout = GetStdHandle(STD_OUTPUT_HANDLE); - SetConsoleTextAttribute(h_stdout, FOREGROUND_BLUE | FOREGROUND_INTENSITY);//(bright ? FOREGROUND_INTENSITY:0)); -#else - if(bright) - std::cout << "\033[1;34m"; - else - std::cout << "\033[0;34m"; -#endif - } - break; - - case console_color_cyan: - { -#ifdef WIN32 - HANDLE h_stdout = GetStdHandle(STD_OUTPUT_HANDLE); - SetConsoleTextAttribute(h_stdout, FOREGROUND_GREEN | FOREGROUND_BLUE | (bright ? FOREGROUND_INTENSITY:0)); -#else - if(bright) - std::cout << "\033[1;36m"; - else - std::cout << "\033[0;36m"; -#endif - } - break; - - case console_color_magenta: - { -#ifdef WIN32 - HANDLE h_stdout = GetStdHandle(STD_OUTPUT_HANDLE); - SetConsoleTextAttribute(h_stdout, FOREGROUND_BLUE | FOREGROUND_RED | (bright ? FOREGROUND_INTENSITY:0)); -#else - if(bright) - std::cout << "\033[1;35m"; - else - std::cout << "\033[0;35m"; -#endif - } - break; - - case console_color_yellow: - { -#ifdef WIN32 - HANDLE h_stdout = GetStdHandle(STD_OUTPUT_HANDLE); - SetConsoleTextAttribute(h_stdout, FOREGROUND_RED | FOREGROUND_GREEN | (bright ? FOREGROUND_INTENSITY:0)); -#else - if(bright) - std::cout << "\033[1;33m"; - else - std::cout << "\033[0;33m"; -#endif - } - break; - - } - } - //---------------------------------------------------------------------------- - void reset_console_color() { - if (!is_stdout_a_tty()) - return; - -#ifdef WIN32 - HANDLE h_stdout = GetStdHandle(STD_OUTPUT_HANDLE); - SetConsoleTextAttribute(h_stdout, FOREGROUND_RED | FOREGROUND_GREEN | FOREGROUND_BLUE); -#else - std::cout << "\033[0m"; - std::cout.flush(); -#endif - } - //---------------------------------------------------------------------------- - bool rotate_log_file(const char* pfile_path) - { -#ifdef _MSC_VER - if(!pfile_path) - return false; - - std::string file_path = pfile_path; - std::string::size_type a = file_path .rfind('.'); - if ( a != std::string::npos ) - file_path .erase( a, file_path .size()); - - ::DeleteFileA( (file_path + ".0").c_str() ); - ::MoveFileA( (file_path + ".log").c_str(), (file_path + ".0").c_str() ); -#else - return false;//not implemented yet -#endif - return true; - } - //---------------------------------------------------------------------------- -#ifdef _MSC_VER - bool debug_output_stream::out_buffer( const char* buffer, int buffer_len , int log_level, int color, const char* plog_name/* = NULL*/) - { - for ( int i = 0; i < buffer_len; i = i + max_dbg_str_len ) - { - std::string s( buffer + i, buffer_len- i < max_dbg_str_len ? - buffer_len - i : max_dbg_str_len ); - - ::OutputDebugStringA( s.c_str() ); - } - return true; - } -#endif - //---------------------------------------------------------------------------- - console_output_stream::console_output_stream() - { -#ifdef _MSC_VER - - if(!::GetStdHandle(STD_OUTPUT_HANDLE)) - m_have_to_kill_console = true; - else - m_have_to_kill_console = false; - - ::AllocConsole(); -#endif - } - //---------------------------------------------------------------------------- - console_output_stream::~console_output_stream() - { -#ifdef _MSC_VER - if(m_have_to_kill_console) - ::FreeConsole(); -#endif - } - //---------------------------------------------------------------------------- - bool console_output_stream::out_buffer( const char* buffer, int buffer_len , int log_level, int color, const char* plog_name/* = NULL*/) - { - if(plog_name) - return true; //skip alternative logs from console - - set_console_color(color, log_level < 1); - -#ifdef _MSC_VER - const char* ptarget_buf = NULL; - char* pallocated_buf = NULL; - - // - int i = 0; - for(; i < buffer_len; i++) - if(buffer[i] == '\a') break; - if(i == buffer_len) - ptarget_buf = buffer; - else - { - pallocated_buf = new char[buffer_len]; - ptarget_buf = pallocated_buf; - for(i = 0; i < buffer_len; i++) - { - if(buffer[i] == '\a') - pallocated_buf[i] = '^'; - else - pallocated_buf[i] = buffer[i]; - } - } - - //uint32_t b = 0; - //::WriteConsoleA(::GetStdHandle(STD_OUTPUT_HANDLE), ptarget_buf, buffer_len, (DWORD*)&b, 0); - std::cout << ptarget_buf; - if(pallocated_buf) delete [] pallocated_buf; -#else - std::string buf(buffer, buffer_len); - for(size_t i = 0; i!= buf.size(); i++) - { - if(buf[i] == 7 || buf[i] == -107) - buf[i] = '^'; - } - - std::cout << buf; -#endif - reset_console_color(); - return true; - } - //---------------------------------------------------------------------------- - file_output_stream::file_output_stream(std::string default_log_file_name, std::string log_path) - { - m_default_log_filename = default_log_file_name; - m_max_logfile_size = 0; - m_default_log_path = log_path; - m_pdefault_file_stream = add_new_stream_and_open(default_log_file_name.c_str()); - } - //---------------------------------------------------------------------------- - file_output_stream::~file_output_stream() - { - for(named_log_streams::iterator it = m_log_file_names.begin(); it!=m_log_file_names.end(); it++) - { - if ( it->second->is_open() ) - { - it->second->flush(); - it->second->close(); - } - delete it->second; - } - } - //---------------------------------------------------------------------------- - std::ofstream* file_output_stream::add_new_stream_and_open(const char* pstream_name) - { - //log_space::rotate_log_file((m_default_log_path + "\\" + pstream_name).c_str()); - - std::ofstream* pstream = (m_log_file_names[pstream_name] = new std::ofstream); - std::string target_path = m_default_log_path + "/" + pstream_name; - pstream->open( target_path.c_str(), std::ios_base::out | std::ios::app /*ios_base::trunc */); - if(pstream->fail()) - return NULL; - return pstream; - } - //---------------------------------------------------------------------------- - bool file_output_stream::set_max_logfile_size(uint64_t max_size) - { - m_max_logfile_size = max_size; - return true; - } - //---------------------------------------------------------------------------- - bool file_output_stream::set_log_rotate_cmd(const std::string& cmd) - { - m_log_rotate_cmd = cmd; - return true; - } - //---------------------------------------------------------------------------- - bool file_output_stream::out_buffer(const char* buffer, int buffer_len, int log_level, int color, const char* plog_name/* = NULL*/) - { - std::ofstream* m_target_file_stream = m_pdefault_file_stream; - if(plog_name) - { //find named stream - named_log_streams::iterator it = m_log_file_names.find(plog_name); - if(it == m_log_file_names.end()) - m_target_file_stream = add_new_stream_and_open(plog_name); - else - m_target_file_stream = it->second; - } - if(!m_target_file_stream || !m_target_file_stream->is_open()) - return false;//TODO: add assert here - - m_target_file_stream->write(buffer, buffer_len ); - m_target_file_stream->flush(); - - if(m_max_logfile_size) - { - std::ofstream::pos_type pt = m_target_file_stream->tellp(); - uint64_t current_sz = pt; - if(current_sz > m_max_logfile_size) - { - std::cout << "current_sz= " << current_sz << " m_max_logfile_size= " << m_max_logfile_size << std::endl; - std::string log_file_name; - if(!plog_name) - log_file_name = m_default_log_filename; - else - log_file_name = plog_name; - - m_target_file_stream->close(); - std::string new_log_file_name = log_file_name; - - time_t tm = 0; - time(&tm); - - int err_count = 0; - boost::system::error_code ec; - do - { - new_log_file_name = string_tools::cut_off_extension(log_file_name); - if(err_count) - new_log_file_name += misc_utils::get_time_str_v2(tm) + "(" + boost::lexical_cast(err_count) + ")" + ".log"; - else - new_log_file_name += misc_utils::get_time_str_v2(tm) + ".log"; - - err_count++; - }while(boost::filesystem::exists(m_default_log_path + "/" + new_log_file_name, ec)); - - std::string new_log_file_path = m_default_log_path + "/" + new_log_file_name; - boost::filesystem::rename(m_default_log_path + "/" + log_file_name, new_log_file_path, ec); - if(ec) - { - std::cout << "Filed to rename, ec = " << ec.message() << std::endl; - } - - if(m_log_rotate_cmd.size()) - { - - std::string m_log_rotate_cmd_local_copy = m_log_rotate_cmd; - //boost::replace_all(m_log_rotate_cmd, "[*SOURCE*]", new_log_file_path); - boost::replace_all(m_log_rotate_cmd_local_copy, "[*TARGET*]", new_log_file_path); - - misc_utils::call_sys_cmd(m_log_rotate_cmd_local_copy); - } - - m_target_file_stream->open( (m_default_log_path + "/" + log_file_name).c_str(), std::ios_base::out | std::ios::app /*ios_base::trunc */); - if(m_target_file_stream->fail()) - return false; - } - } - - return true; - } - //---------------------------------------------------------------------------- - log_stream_splitter::~log_stream_splitter() - { - //free pointers - std::for_each(m_log_streams.begin(), m_log_streams.end(), delete_ptr()); - } - //---------------------------------------------------------------------------- - bool log_stream_splitter::set_max_logfile_size(uint64_t max_size) - { - for(streams_container::iterator it = m_log_streams.begin(); it!=m_log_streams.end();it++) - it->first->set_max_logfile_size(max_size); - return true; - } - //---------------------------------------------------------------------------- - bool log_stream_splitter::set_log_rotate_cmd(const std::string& cmd) - { - for(streams_container::iterator it = m_log_streams.begin(); it!=m_log_streams.end();it++) - it->first->set_log_rotate_cmd(cmd); - return true; - } - //---------------------------------------------------------------------------- - bool log_stream_splitter::do_log_message(const std::string& rlog_mes, int log_level, int color, const char* plog_name/* = NULL*/) - { - std::string str_mess = rlog_mes; - size_t str_len = str_mess.size(); - const char* pstr = str_mess.c_str(); - for(streams_container::iterator it = m_log_streams.begin(); it!=m_log_streams.end();it++) - if(it->second >= log_level) - it->first->out_buffer(pstr, (int)str_len, log_level, color, plog_name); - return true; - } - //---------------------------------------------------------------------------- - bool log_stream_splitter::add_logger(int type, const char* pdefault_file_name, const char* pdefault_log_folder, int log_level_limit/* = LOG_LEVEL_4*/) - { - ibase_log_stream* ls = NULL; - - switch( type ) - { - case LOGGER_FILE: - ls = new file_output_stream( pdefault_file_name, pdefault_log_folder ); - break; - - case LOGGER_DEBUGGER: -#ifdef _MSC_VER - ls = new debug_output_stream( ); -#else - return false;//not implemented yet -#endif - break; - case LOGGER_CONSOLE: - ls = new console_output_stream( ); - break; - } - - if ( ls ) { - m_log_streams.push_back(streams_container::value_type(ls, log_level_limit)); - return true; - } - return ls ? true:false; - } - //---------------------------------------------------------------------------- - bool log_stream_splitter::add_logger(ibase_log_stream* pstream, int log_level_limit/* = LOG_LEVEL_4*/) - { - m_log_streams.push_back(streams_container::value_type(pstream, log_level_limit) ); - return true; - } - //---------------------------------------------------------------------------- - bool log_stream_splitter::remove_logger(int type) - { - streams_container::iterator it = m_log_streams.begin(); - for(;it!=m_log_streams.end(); it++) - { - if(it->first->get_type() == type) - { - delete it->first; - m_log_streams.erase(it); - return true; - } - } - return false; - } - //---------------------------------------------------------------------------- - std::string get_daytime_string2() - { - boost::posix_time::ptime p = boost::posix_time::microsec_clock::local_time(); - return misc_utils::get_time_str_v3(p); - } - //---------------------------------------------------------------------------- - std::string get_day_time_string() - { - return get_daytime_string2(); - //time_t tm = 0; - //time(&tm); - //return misc_utils::get_time_str(tm); - } - //---------------------------------------------------------------------------- - std::string get_time_string() - { - return get_daytime_string2(); - } - //---------------------------------------------------------------------------- -#ifdef _MSC_VER - std::string get_time_string_adv(SYSTEMTIME* pst/* = NULL*/) - { - SYSTEMTIME st = {0}; - if(!pst) - { - pst = &st; - GetSystemTime(&st); - } - std::stringstream str_str; - str_str.fill('0'); - str_str << std::setw(2) << pst->wHour << "_" - << std::setw(2) << pst->wMinute << "_" - << std::setw(2) << pst->wSecond << "_" - << std::setw(3) << pst->wMilliseconds; - return str_str.str(); - } -#endif - //---------------------------------------------------------------------------- - logger::logger() - { - CRITICAL_REGION_BEGIN(m_critical_sec); - init(); - CRITICAL_REGION_END(); - } - //---------------------------------------------------------------------------- - bool logger::set_max_logfile_size(uint64_t max_size) - { - CRITICAL_REGION_BEGIN(m_critical_sec); - m_log_target.set_max_logfile_size(max_size); - CRITICAL_REGION_END(); - return true; - } - //---------------------------------------------------------------------------- - bool logger::set_log_rotate_cmd(const std::string& cmd) - { - CRITICAL_REGION_BEGIN(m_critical_sec); - m_log_target.set_log_rotate_cmd(cmd); - CRITICAL_REGION_END(); - return true; - } - //---------------------------------------------------------------------------- - bool logger::take_away_journal(std::list& journal) - { - CRITICAL_REGION_BEGIN(m_critical_sec); - m_journal.swap(journal); - CRITICAL_REGION_END(); - return true; - } - //---------------------------------------------------------------------------- - bool logger::do_log_message(const std::string& rlog_mes, int log_level, int color, bool add_to_journal/* = false*/, const char* plog_name/* = NULL*/) - { - CRITICAL_REGION_BEGIN(m_critical_sec); - m_log_target.do_log_message(rlog_mes, log_level, color, plog_name); - if(add_to_journal) - m_journal.push_back(rlog_mes); - - return true; - CRITICAL_REGION_END(); - } - //---------------------------------------------------------------------------- - bool logger::add_logger( int type, const char* pdefault_file_name, const char* pdefault_log_folder, int log_level_limit/* = LOG_LEVEL_4*/) - { - CRITICAL_REGION_BEGIN(m_critical_sec); - return m_log_target.add_logger( type, pdefault_file_name, pdefault_log_folder, log_level_limit); - CRITICAL_REGION_END(); - } - //---------------------------------------------------------------------------- - bool logger::add_logger( ibase_log_stream* pstream, int log_level_limit/* = LOG_LEVEL_4*/) - { - CRITICAL_REGION_BEGIN(m_critical_sec); - return m_log_target.add_logger(pstream, log_level_limit); - CRITICAL_REGION_END(); - } - //---------------------------------------------------------------------------- - bool logger::remove_logger(int type) - { - CRITICAL_REGION_BEGIN(m_critical_sec); - return m_log_target.remove_logger(type); - CRITICAL_REGION_END(); - } - //---------------------------------------------------------------------------- - bool logger::set_thread_prefix(const std::string& prefix) - { - CRITICAL_REGION_BEGIN(m_critical_sec); - m_thr_prefix_strings[misc_utils::get_thread_string_id()] = prefix; - CRITICAL_REGION_END(); - return true; - } - //---------------------------------------------------------------------------- - bool logger::init() - { - m_process_name = string_tools::get_current_module_name(); - - init_log_path_by_default(); - - //init default set of loggers - init_default_loggers(); - - std::stringstream ss; - ss << get_time_string() << " Init logging. Level=" << get_set_log_detalisation_level() - << " Log path=" << m_default_log_folder << std::endl; - this->do_log_message(ss.str(), console_color_white, LOG_LEVEL_0); - return true; - } - //---------------------------------------------------------------------------- - bool logger::init_default_loggers() - { - return true; - } - //---------------------------------------------------------------------------- - bool logger::init_log_path_by_default() - { - //load process name - m_default_log_folder = string_tools::get_current_module_folder(); - - m_default_log_file = m_process_name; - std::string::size_type a = m_default_log_file.rfind('.'); - if ( a != std::string::npos ) - m_default_log_file.erase( a, m_default_log_file.size()); - m_default_log_file += ".log"; - - return true; - } - //---------------------------------------------------------------------------- - int log_singletone::get_log_detalisation_level() - { - get_or_create_instance();//to initialize logger, if it not initialized - return get_set_log_detalisation_level(); - } - //---------------------------------------------------------------------------- - bool log_singletone::is_filter_error(int error_code) - { - return false; - } - //---------------------------------------------------------------------------- - bool log_singletone::do_log_message(const std::string& rlog_mes, int log_level, int color, bool keep_in_journal, const char* plog_name/* = NULL*/) - { - logger* plogger = get_or_create_instance(); - bool res = false; - if(plogger) - res = plogger->do_log_message(rlog_mes, log_level, color, keep_in_journal, plog_name); - else - { //globally uninitialized, create new logger for each call of do_log_message() and then delete it - plogger = new logger(); - //TODO: some extra initialization - res = plogger->do_log_message(rlog_mes, log_level, color, keep_in_journal, plog_name); - delete plogger; - plogger = NULL; - } - return res; - } - //---------------------------------------------------------------------------- - bool log_singletone::take_away_journal(std::list& journal) - { - logger* plogger = get_or_create_instance(); - bool res = false; - if(plogger) - res = plogger->take_away_journal(journal); - - return res; - } - //---------------------------------------------------------------------------- - bool log_singletone::set_max_logfile_size(uint64_t file_size) - { - logger* plogger = get_or_create_instance(); - if(!plogger) return false; - return plogger->set_max_logfile_size(file_size); - } - //---------------------------------------------------------------------------- - bool log_singletone::set_log_rotate_cmd(const std::string& cmd) - { - logger* plogger = get_or_create_instance(); - if(!plogger) return false; - return plogger->set_log_rotate_cmd(cmd); - } - //---------------------------------------------------------------------------- - bool log_singletone::add_logger( int type, const char* pdefault_file_name, const char* pdefault_log_folder, int log_level_limit/* = LOG_LEVEL_4*/) - { - logger* plogger = get_or_create_instance(); - if(!plogger) return false; - return plogger->add_logger(type, pdefault_file_name, pdefault_log_folder, log_level_limit); - } - //---------------------------------------------------------------------------- - std::string log_singletone::get_default_log_file() - { - logger* plogger = get_or_create_instance(); - if(plogger) - return plogger->get_default_log_file(); - - return ""; - } - //---------------------------------------------------------------------------- - std::string log_singletone::get_default_log_folder() - { - logger* plogger = get_or_create_instance(); - if(plogger) - return plogger->get_default_log_folder(); - - return ""; - } - //---------------------------------------------------------------------------- - bool log_singletone::add_logger(ibase_log_stream* pstream, int log_level_limit/* = LOG_LEVEL_4*/) - { - logger* plogger = get_or_create_instance(); - if(!plogger) return false; - return plogger->add_logger(pstream, log_level_limit); - } - //---------------------------------------------------------------------------- - bool log_singletone::remove_logger(int type) - { - logger* plogger = get_or_create_instance(); - if(!plogger) return false; - return plogger->remove_logger(type); - } - //---------------------------------------------------------------------------- -PUSH_WARNINGS -DISABLE_GCC_WARNING(maybe-uninitialized) - int log_singletone::get_set_log_detalisation_level(bool is_need_set/* = false*/, int log_level_to_set/* = LOG_LEVEL_1*/) - { - static int log_detalisation_level = LOG_LEVEL_1; - if(is_need_set) - log_detalisation_level = log_level_to_set; - return log_detalisation_level; - } -POP_WARNINGS - //---------------------------------------------------------------------------- - int log_singletone::get_set_time_level(bool is_need_set/* = false*/, int time_log_level/* = LOG_LEVEL_0*/) - { - static int val_time_log_level = LOG_LEVEL_0; - if(is_need_set) - val_time_log_level = time_log_level; - - return val_time_log_level; - } - //---------------------------------------------------------------------------- - int log_singletone::get_set_process_level(bool is_need_set/* = false*/, int process_log_level/* = LOG_LEVEL_0*/) - { - static int val_process_log_level = LOG_LEVEL_0; - if(is_need_set) - val_process_log_level = process_log_level; - - return val_process_log_level; - } - //---------------------------------------------------------------------------- - bool log_singletone::get_set_need_thread_id(bool is_need_set/* = false*/, bool is_need_val/* = false*/) - { - static bool is_need = false; - if(is_need_set) - is_need = is_need_val; - - return is_need; - } - //---------------------------------------------------------------------------- - bool log_singletone::get_set_need_proc_name(bool is_need_set/* = false*/, bool is_need_val/* = false*/) - { - static bool is_need = true; - if(is_need_set) - is_need = is_need_val; - - return is_need; - } - //---------------------------------------------------------------------------- - uint64_t log_singletone::get_set_err_count(bool is_need_set/* = false*/, uint64_t err_val/* = false*/) - { - static uint64_t err_count = 0; - if(is_need_set) - err_count = err_val; - - return err_count; - } - //---------------------------------------------------------------------------- -#ifdef _MSC_VER - void log_singletone::SetThreadName( DWORD dwThreadID, const char* threadName) - { -#define MS_VC_EXCEPTION 0x406D1388 - -#pragma pack(push,8) - typedef struct tagTHREADNAME_INFO - { - DWORD dwType; // Must be 0x1000. - LPCSTR szName; // Pointer to name (in user addr space). - DWORD dwThreadID; // Thread ID (-1=caller thread). - DWORD dwFlags; // Reserved for future use, must be zero. - } THREADNAME_INFO; -#pragma pack(pop) - - Sleep(10); - THREADNAME_INFO info; - info.dwType = 0x1000; - info.szName = (char*)threadName; - info.dwThreadID = dwThreadID; - info.dwFlags = 0; - - __try - { - RaiseException( MS_VC_EXCEPTION, 0, sizeof(info)/sizeof(ULONG_PTR), (ULONG_PTR*)&info ); - } - __except(EXCEPTION_EXECUTE_HANDLER) - { - } - } -#endif - //---------------------------------------------------------------------------- - bool log_singletone::set_thread_log_prefix(const std::string& prefix) - { -#ifdef _MSC_VER - SetThreadName(-1, prefix.c_str()); -#endif - - logger* plogger = get_or_create_instance(); - if(!plogger) return false; - return plogger->set_thread_prefix(prefix); - } - //---------------------------------------------------------------------------- - std::string log_singletone::get_prefix_entry() - { - std::stringstream str_prefix; - //write time entry - if ( get_set_time_level() <= get_set_log_detalisation_level() ) - str_prefix << get_day_time_string() << " "; - - //write process info - logger* plogger = get_or_create_instance(); - //bool res = false; - if(!plogger) - { //globally uninitialized, create new logger for each call of get_prefix_entry() and then delete it - plogger = new logger(); - } - - //if ( get_set_need_proc_name() && get_set_process_level() <= get_set_log_detalisation_level() ) - // str_prefix << "[" << plogger->m_process_name << " (id=" << GetCurrentProcessId() << ")] "; -//#ifdef _MSC_VER_EX - if ( get_set_need_thread_id() /*&& get_set_tid_level() <= get_set_log_detalisation_level()*/ ) - str_prefix << "tid:" << misc_utils::get_thread_string_id() << " "; -//#endif - - if(plogger->m_thr_prefix_strings.size()) - { - CRITICAL_REGION_LOCAL(plogger->m_critical_sec); - std::string thr_str = misc_utils::get_thread_string_id(); - std::map::iterator it = plogger->m_thr_prefix_strings.find(thr_str); - if(it!=plogger->m_thr_prefix_strings.end()) - { - str_prefix << it->second; - } - } - - if(get_set_is_uninitialized()) - delete plogger; - - return str_prefix.str(); - } - //---------------------------------------------------------------------------- - bool log_singletone::init() - { - return true;/*do nothing here*/ - } - //---------------------------------------------------------------------------- - bool log_singletone::un_init() - { - //delete object - logger* plogger = get_set_instance_internal(); - if(plogger) delete plogger; - //set uninitialized - get_set_is_uninitialized(true, true); - get_set_instance_internal(true, NULL); - return true; - } - //---------------------------------------------------------------------------- - logger* log_singletone::get_or_create_instance() - { - logger* plogger = get_set_instance_internal(); - if(!plogger) - if(!get_set_is_uninitialized()) - get_set_instance_internal(true, plogger = new logger); - - return plogger; - } - //---------------------------------------------------------------------------- - logger* log_singletone::get_set_instance_internal(bool is_need_set/* = false*/, logger* pnew_logger_val/* = NULL*/) - { - static logger* val_plogger = NULL; - - if(is_need_set) - val_plogger = pnew_logger_val; - - return val_plogger; - } - //---------------------------------------------------------------------------- - bool log_singletone::get_set_is_uninitialized(bool is_need_set/* = false*/, bool is_uninitialized/* = false*/) - { - static bool val_is_uninitialized = false; - - if(is_need_set) - val_is_uninitialized = is_uninitialized; - - return val_is_uninitialized; - } - //---------------------------------------------------------------------------- - log_frame::log_frame(const std::string& name, int dlevel/* = LOG_LEVEL_2*/, const char* plog_name/* = NULL*/) - { -#ifdef _MSC_VER - int lasterr=::GetLastError(); -#endif - m_plog_name = plog_name; - if ( dlevel <= log_singletone::get_log_detalisation_level() ) - { - m_name = name; - std::stringstream ss; - ss << log_space::log_singletone::get_prefix_entry() << "-->>" << m_name << std::endl; - log_singletone::do_log_message(ss.str(), dlevel, console_color_default, false, m_plog_name); - } - m_level = dlevel; -#ifdef _MSC_VER - ::SetLastError(lasterr); -#endif - } - //---------------------------------------------------------------------------- - log_frame::~log_frame() - { -#ifdef _MSC_VER - int lasterr=::GetLastError(); -#endif - - if (m_level <= log_singletone::get_log_detalisation_level() ) - { - std::stringstream ss; - ss << log_space::log_singletone::get_prefix_entry() << "<<--" << m_name << std::endl; - log_singletone::do_log_message(ss.str(), m_level, console_color_default, false,m_plog_name); - } -#ifdef _MSC_VER - ::SetLastError(lasterr); -#endif - } - //---------------------------------------------------------------------------- - std::string get_win32_err_descr(int err_no) - { -#ifdef _MSC_VER - LPVOID lpMsgBuf; - - FormatMessageA( - FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM, - NULL, - err_no, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - (char*) &lpMsgBuf, - 0, NULL ); - - std::string fix_sys_message = "(null)"; - if(lpMsgBuf) fix_sys_message = (char*)lpMsgBuf; - std::string::size_type a; - if ( (a = fix_sys_message.rfind( '\n' )) != std::string::npos ) - fix_sys_message.erase(a); - if ( (a = fix_sys_message.rfind( '\r' )) != std::string::npos ) - fix_sys_message.erase(a); - - LocalFree(lpMsgBuf); - return fix_sys_message; -#else - return "Not implemented yet"; -#endif - } - //---------------------------------------------------------------------------- - bool getwin32_err_text(std::stringstream& ref_message, int error_no) - { - ref_message << "win32 error:" << get_win32_err_descr(error_no); - return true; - } -} -} diff --git a/contrib/epee/include/misc_log_ex.h b/contrib/epee/include/misc_log_ex.h deleted file mode 100644 index 918bfcf2d2..0000000000 --- a/contrib/epee/include/misc_log_ex.h +++ /dev/null @@ -1,507 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#ifndef _MISC_LOG_EX_H_ -#define _MISC_LOG_EX_H_ - -#if defined(WIN32) -#if !defined(WIN32_LEAN_AND_MEAN) -#define WIN32_LEAN_AND_MEAN -#endif -#endif - -#include -#include -#include -#include -#include - -#include "misc_os_dependent.h" -#include "static_initializer.h" -#include "syncobj.h" -#include "warnings.h" - - -#define LOG_LEVEL_SILENT -1 -#define LOG_LEVEL_0 0 -#define LOG_LEVEL_1 1 -#define LOG_LEVEL_2 2 -#define LOG_LEVEL_3 3 -#define LOG_LEVEL_4 4 -#define LOG_LEVEL_MIN LOG_LEVEL_SILENT -#define LOG_LEVEL_MAX LOG_LEVEL_4 - - -#define LOGGER_NULL 0 -#define LOGGER_FILE 1 -#define LOGGER_DEBUGGER 2 -#define LOGGER_CONSOLE 3 -#define LOGGER_DUMP 4 - - -#ifndef LOCAL_ASSERT -#include -#if (defined _MSC_VER) -#define LOCAL_ASSERT(expr) {if(epee::debug::get_set_enable_assert()){_ASSERTE(expr);}} -#else -#define LOCAL_ASSERT(expr) -#endif - -#endif - -namespace epee -{ -namespace debug -{ - inline bool get_set_enable_assert(bool set = false, bool v = false) - { - static bool e = true; - if(set) - e = v; - return e; - } -} -namespace log_space -{ - class logger; - class log_message; - class log_singletone; - - /************************************************************************/ - /* */ - /************************************************************************/ - enum console_colors - { - console_color_default, - console_color_white, - console_color_red, - console_color_green, - console_color_blue, - console_color_cyan, - console_color_magenta, - console_color_yellow - }; - - - struct ibase_log_stream - { - ibase_log_stream() {} - virtual ~ibase_log_stream() {} - virtual bool out_buffer(const char* buffer, int buffer_len , int log_level, int color, const char* plog_name = NULL)=0; - virtual int get_type() const { return 0; } - - virtual bool set_max_logfile_size(uint64_t max_size) { return true; } - virtual bool set_log_rotate_cmd(const std::string& cmd) { return true; } - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - struct delete_ptr - { - template - void operator() (P p) - { - delete p.first; - } - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - - bool is_stdout_a_tty(); - void set_console_color(int color, bool bright); - void reset_console_color(); - bool rotate_log_file(const char* pfile_path); - - //------------------------------------------------------------------------ -#define max_dbg_str_len 80 -#ifdef _MSC_VER - class debug_output_stream: public ibase_log_stream - { - virtual bool out_buffer(const char* buffer, int buffer_len , int log_level, int color, const char* plog_name = NULL) override; - }; -#endif - - class console_output_stream: public ibase_log_stream - { -#ifdef _MSC_VER - bool m_have_to_kill_console; -#endif - - public: - console_output_stream(); - virtual ~console_output_stream(); - - virtual int get_type() const override { return LOGGER_CONSOLE; } - virtual bool out_buffer(const char* buffer, int buffer_len , int log_level, int color, const char* plog_name = NULL) override; - }; - - //--------------------------------------------------------------------------// - class file_output_stream : public ibase_log_stream - { - public: - typedef std::map named_log_streams; - - file_output_stream(std::string default_log_file_name, std::string log_path); - ~file_output_stream(); - - private: - named_log_streams m_log_file_names; - std::string m_default_log_path; - std::ofstream* m_pdefault_file_stream; - std::string m_log_rotate_cmd; - std::string m_default_log_filename; - uint64_t m_max_logfile_size; - - virtual int get_type() const override { return LOGGER_FILE; } - virtual bool out_buffer(const char* buffer, int buffer_len, int log_level, int color, const char* plog_name = NULL) override; - virtual bool set_max_logfile_size(uint64_t max_size) override; - virtual bool set_log_rotate_cmd(const std::string& cmd) override; - - std::ofstream* add_new_stream_and_open(const char* pstream_name); - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - class log_stream_splitter - { - public: - typedef std::list > streams_container; - - log_stream_splitter() { } - ~log_stream_splitter(); - - bool set_max_logfile_size(uint64_t max_size); - bool set_log_rotate_cmd(const std::string& cmd); - bool do_log_message(const std::string& rlog_mes, int log_level, int color, const char* plog_name = NULL); - bool add_logger(int type, const char* pdefault_file_name, const char* pdefault_log_folder, int log_level_limit = LOG_LEVEL_4); - bool add_logger(ibase_log_stream* pstream, int log_level_limit = LOG_LEVEL_4); - bool remove_logger(int type); - - private: - streams_container m_log_streams; - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - int get_set_log_detalisation_level(bool is_need_set = false, int log_level_to_set = LOG_LEVEL_1); - int get_set_time_level(bool is_need_set = false, int time_log_level = LOG_LEVEL_0); - bool get_set_need_thread_id(bool is_need_set = false, bool is_need_val = false); - bool get_set_need_proc_name(bool is_need_set = false, bool is_need_val = false); - - std::string get_daytime_string2(); - std::string get_day_time_string(); - std::string get_time_string(); - -#ifdef _MSC_VER - inline std::string get_time_string_adv(SYSTEMTIME* pst = NULL); -#endif - - class logger - { - public: - friend class log_singletone; - - logger(); - ~logger() { } - - bool set_max_logfile_size(uint64_t max_size); - bool set_log_rotate_cmd(const std::string& cmd); - bool take_away_journal(std::list& journal); - bool do_log_message(const std::string& rlog_mes, int log_level, int color, bool add_to_journal = false, const char* plog_name = NULL); - bool add_logger(int type, const char* pdefault_file_name, const char* pdefault_log_folder , int log_level_limit = LOG_LEVEL_4); - bool add_logger(ibase_log_stream* pstream, int log_level_limit = LOG_LEVEL_4); - bool remove_logger(int type); - bool set_thread_prefix(const std::string& prefix); - std::string get_default_log_file() { return m_default_log_file; } - std::string get_default_log_folder() { return m_default_log_folder; } - - private: - bool init(); - bool init_default_loggers(); - bool init_log_path_by_default(); - - log_stream_splitter m_log_target; - - std::string m_default_log_folder; - std::string m_default_log_file; - std::string m_process_name; - std::map m_thr_prefix_strings; - std::list m_journal; - critical_section m_critical_sec; - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - class log_singletone - { - public: - friend class initializer; - friend class logger; - - static int get_log_detalisation_level(); - static bool is_filter_error(int error_code); - static bool do_log_message(const std::string& rlog_mes, int log_level, int color, bool keep_in_journal, const char* plog_name = NULL); - static bool take_away_journal(std::list& journal); - static bool set_max_logfile_size(uint64_t file_size); - static bool set_log_rotate_cmd(const std::string& cmd); - static bool add_logger(int type, const char* pdefault_file_name, const char* pdefault_log_folder, int log_level_limit = LOG_LEVEL_4); - static std::string get_default_log_file(); - static std::string get_default_log_folder(); - static bool add_logger( ibase_log_stream* pstream, int log_level_limit = LOG_LEVEL_4); - static bool remove_logger(int type); - -PUSH_WARNINGS -DISABLE_GCC_WARNING(maybe-uninitialized) - static int get_set_log_detalisation_level(bool is_need_set = false, int log_level_to_set = LOG_LEVEL_1); -POP_WARNINGS - - static int get_set_time_level(bool is_need_set = false, int time_log_level = LOG_LEVEL_0); - static int get_set_process_level(bool is_need_set = false, int process_log_level = LOG_LEVEL_0); - static bool get_set_need_thread_id(bool is_need_set = false, bool is_need_val = false); - static bool get_set_need_proc_name(bool is_need_set = false, bool is_need_val = false); - static uint64_t get_set_err_count(bool is_need_set = false, uint64_t err_val = false); - -#ifdef _MSC_VER - static void SetThreadName( DWORD dwThreadID, const char* threadName); -#endif - - static bool set_thread_log_prefix(const std::string& prefix); - static std::string get_prefix_entry(); - - private: - log_singletone() { } //restric to create an instance - //static initializer m_log_initializer;//must be in one .cpp file (for example main.cpp) via DEFINE_LOGGING macro - - static bool init(); - static bool un_init(); - - static logger* get_or_create_instance(); - static logger* get_set_instance_internal(bool is_need_set = false, logger* pnew_logger_val = NULL); - static bool get_set_is_uninitialized(bool is_need_set = false, bool is_uninitialized = false); - }; - - const static initializer log_initializer; - - class log_frame - { - std::string m_name; - int m_level; - const char* m_plog_name; - - public: - log_frame(const std::string& name, int dlevel = LOG_LEVEL_2 , const char* plog_name = NULL); - ~log_frame(); - }; - - inline int get_set_time_level(bool is_need_set, int time_log_level) - { - return log_singletone::get_set_time_level(is_need_set, time_log_level); - } - inline int get_set_log_detalisation_level(bool is_need_set, int log_level_to_set) - { - return log_singletone::get_set_log_detalisation_level(is_need_set, log_level_to_set); - } - inline std::string get_prefix_entry() - { - return log_singletone::get_prefix_entry(); - } - inline bool get_set_need_thread_id(bool is_need_set, bool is_need_val) - { - return log_singletone::get_set_need_thread_id(is_need_set, is_need_val); - } - inline bool get_set_need_proc_name(bool is_need_set, bool is_need_val ) - { - return log_singletone::get_set_need_proc_name(is_need_set, is_need_val); - } - - inline std::string get_win32_err_descr(int err_no); - inline bool getwin32_err_text(std::stringstream& ref_message, int error_no); -} - -#if defined(_DEBUG) || defined(__GNUC__) - #define ENABLE_LOGGING_INTERNAL -#endif - -#if defined(ENABLE_RELEASE_LOGGING) - #define ENABLE_LOGGING_INTERNAL -#endif - - -#if defined(ENABLE_LOGGING_INTERNAL) - -#define LOG_PRINT_NO_PREFIX2(log_name, x, y) {if ( y <= epee::log_space::log_singletone::get_log_detalisation_level() )\ - {std::stringstream ss________; ss________ << x << std::endl; epee::log_space::log_singletone::do_log_message(ss________.str() , y, epee::log_space::console_color_default, false, log_name);}} - -#define LOG_PRINT_NO_PREFIX_NO_POSTFIX2(log_name, x, y) {if ( y <= epee::log_space::log_singletone::get_log_detalisation_level() )\ - {std::stringstream ss________; ss________ << x; epee::log_space::log_singletone::do_log_message(ss________.str(), y, epee::log_space::console_color_default, false, log_name);}} - - -#define LOG_PRINT_NO_POSTFIX2(log_name, x, y) {if ( y <= epee::log_space::log_singletone::get_log_detalisation_level() )\ - {std::stringstream ss________; ss________ << epee::log_space::log_singletone::get_prefix_entry() << x; epee::log_space::log_singletone::do_log_message(ss________.str(), y, epee::log_space::console_color_default, false, log_name);}} - - -#define LOG_PRINT2(log_name, x, y) {if ( y <= epee::log_space::log_singletone::get_log_detalisation_level() )\ - {std::stringstream ss________; ss________ << epee::log_space::log_singletone::get_prefix_entry() << x << std::endl;epee::log_space::log_singletone::do_log_message(ss________.str(), y, epee::log_space::console_color_default, false, log_name);}} - -#define LOG_PRINT_COLOR2(log_name, x, y, color) {if ( y <= epee::log_space::log_singletone::get_log_detalisation_level() )\ - {std::stringstream ss________; ss________ << epee::log_space::log_singletone::get_prefix_entry() << x << std::endl;epee::log_space::log_singletone::do_log_message(ss________.str(), y, color, false, log_name);}} - - -#define LOG_PRINT2_JORNAL(log_name, x, y) {if ( y <= epee::log_space::log_singletone::get_log_detalisation_level() )\ - {std::stringstream ss________; ss________ << epee::log_space::log_singletone::get_prefix_entry() << x << std::endl;epee::log_space::log_singletone::do_log_message(ss________.str(), y, epee::log_space::console_color_default, true, log_name);}} - - -#define LOG_ERROR2(log_name, x) { \ - std::stringstream ss________; ss________ << epee::log_space::log_singletone::get_prefix_entry() << "ERROR " << __FILE__ << ":" << __LINE__ << " " << x << std::endl; epee::log_space::log_singletone::do_log_message(ss________.str(), LOG_LEVEL_0, epee::log_space::console_color_red, true, log_name);LOCAL_ASSERT(0); epee::log_space::log_singletone::get_set_err_count(true, epee::log_space::log_singletone::get_set_err_count()+1);} - -#define LOG_FRAME2(log_name, x, y) epee::log_space::log_frame frame(x, y, log_name) - -#define LOG_WARNING2(log_name, x, y) {if ( y <= epee::log_space::log_singletone::get_log_detalisation_level() )\ - {std::stringstream ss________; ss________ << epee::log_space::log_singletone::get_prefix_entry() << "WARNING " << __FILE__ << ":" << __LINE__ << " " << x << std::endl; epee::log_space::log_singletone::do_log_message(ss________.str(), y, epee::log_space::console_color_red, true, log_name);LOCAL_ASSERT(0); epee::log_space::log_singletone::get_set_err_count(true, epee::log_space::log_singletone::get_set_err_count()+1);}} - -#else - - -#define LOG_PRINT_NO_PREFIX2(log_name, x, y) - -#define LOG_PRINT_NO_PREFIX_NO_POSTFIX2(log_name, x, y) - -#define LOG_PRINT_NO_POSTFIX2(log_name, x, y) - -#define LOG_PRINT_COLOR2(log_name, x, y, color) - -#define LOG_PRINT2_JORNAL(log_name, x, y) - -#define LOG_PRINT2(log_name, x, y) - -#define LOG_ERROR2(log_name, x) - - -#define LOG_FRAME2(log_name, x, y) - -#define LOG_WARNING2(log_name, x, level) - - -#endif - - -#ifndef LOG_DEFAULT_TARGET - #define LOG_DEFAULT_TARGET NULL -#endif - - -#define LOG_PRINT_NO_POSTFIX(mess, level) LOG_PRINT_NO_POSTFIX2(LOG_DEFAULT_TARGET, mess, level) -#define LOG_PRINT_NO_PREFIX(mess, level) LOG_PRINT_NO_PREFIX2(LOG_DEFAULT_TARGET, mess, level) -#define LOG_PRINT_NO_PREFIX_NO_POSTFIX(mess, level) LOG_PRINT_NO_PREFIX_NO_POSTFIX2(LOG_DEFAULT_TARGET, mess, level) -#define LOG_PRINT(mess, level) LOG_PRINT2(LOG_DEFAULT_TARGET, mess, level) - -#define LOG_PRINT_COLOR(mess, level, color) LOG_PRINT_COLOR2(LOG_DEFAULT_TARGET, mess, level, color) -#define LOG_PRINT_RED(mess, level) LOG_PRINT_COLOR2(LOG_DEFAULT_TARGET, mess, level, epee::log_space::console_color_red) -#define LOG_PRINT_GREEN(mess, level) LOG_PRINT_COLOR2(LOG_DEFAULT_TARGET, mess, level, epee::log_space::console_color_green) -#define LOG_PRINT_BLUE(mess, level) LOG_PRINT_COLOR2(LOG_DEFAULT_TARGET, mess, level, epee::log_space::console_color_blue) -#define LOG_PRINT_YELLOW(mess, level) LOG_PRINT_COLOR2(LOG_DEFAULT_TARGET, mess, level, epee::log_space::console_color_yellow) -#define LOG_PRINT_CYAN(mess, level) LOG_PRINT_COLOR2(LOG_DEFAULT_TARGET, mess, level, epee::log_space::console_color_cyan) -#define LOG_PRINT_MAGENTA(mess, level) LOG_PRINT_COLOR2(LOG_DEFAULT_TARGET, mess, level, epee::log_space::console_color_magenta) - -#define LOG_PRINT_RED_L0(mess) LOG_PRINT_COLOR2(LOG_DEFAULT_TARGET, mess, LOG_LEVEL_0, epee::log_space::console_color_red) - -#define LOG_PRINT_L0(mess) LOG_PRINT(mess, LOG_LEVEL_0) -#define LOG_PRINT_L1(mess) LOG_PRINT(mess, LOG_LEVEL_1) -#define LOG_PRINT_L2(mess) LOG_PRINT(mess, LOG_LEVEL_2) -#define LOG_PRINT_L3(mess) LOG_PRINT(mess, LOG_LEVEL_3) -#define LOG_PRINT_L4(mess) LOG_PRINT(mess, LOG_LEVEL_4) -#define LOG_PRINT_J(mess, level) LOG_PRINT2_JORNAL(LOG_DEFAULT_TARGET, mess, level) - -#define LOG_ERROR(mess) LOG_ERROR2(LOG_DEFAULT_TARGET, mess) -#define LOG_FRAME(mess, level) LOG_FRAME2(LOG_DEFAULT_TARGET, mess, level) -#define LOG_VALUE(mess, level) LOG_VALUE2(LOG_DEFAULT_TARGET, mess, level) -#define LOG_ARRAY(mess, level) LOG_ARRAY2(LOG_DEFAULT_TARGET, mess, level) -//#define LOGWIN_PLATFORM_ERROR(err_no) LOGWINDWOS_PLATFORM_ERROR2(LOG_DEFAULT_TARGET, err_no) -#define LOG_SOCKET_ERROR(err_no) LOG_SOCKET_ERROR2(LOG_DEFAULT_TARGET, err_no) -//#define LOGWIN_PLATFORM_ERROR_UNCRITICAL(mess) LOGWINDWOS_PLATFORM_ERROR_UNCRITICAL2(LOG_DEFAULT_TARGET, mess) -#define LOG_WARNING(mess, level) LOG_WARNING2(LOG_DEFAULT_TARGET, mess, level) - -#define ENDL std::endl - -#define TRY_ENTRY() try { -#define CATCH_ENTRY(location, return_val) } \ - catch(const std::exception& ex) \ -{ \ - (void)(ex); \ - LOG_ERROR("Exception at [" << location << "], what=" << ex.what()); \ - return return_val; \ -}\ - catch(...)\ -{\ - LOG_ERROR("Exception at [" << location << "], generic exception \"...\"");\ - return return_val; \ -} - -#define CATCH_ENTRY_L0(lacation, return_val) CATCH_ENTRY(lacation, return_val) -#define CATCH_ENTRY_L1(lacation, return_val) CATCH_ENTRY(lacation, return_val) -#define CATCH_ENTRY_L2(lacation, return_val) CATCH_ENTRY(lacation, return_val) -#define CATCH_ENTRY_L3(lacation, return_val) CATCH_ENTRY(lacation, return_val) -#define CATCH_ENTRY_L4(lacation, return_val) CATCH_ENTRY(lacation, return_val) - - -#define ASSERT_MES_AND_THROW(message) {LOG_ERROR(message); std::stringstream ss; ss << message; throw std::runtime_error(ss.str());} -#define CHECK_AND_ASSERT_THROW_MES(expr, message) {if(!(expr)) ASSERT_MES_AND_THROW(message);} - - -#ifndef CHECK_AND_ASSERT -#define CHECK_AND_ASSERT(expr, fail_ret_val) do{if(!(expr)){LOCAL_ASSERT(expr); return fail_ret_val;};}while(0) -#endif - -#define NOTHING - -#ifndef CHECK_AND_ASSERT_MES -#define CHECK_AND_ASSERT_MES(expr, fail_ret_val, message) do{if(!(expr)) {LOG_ERROR(message); return fail_ret_val;};}while(0) -#endif - -#ifndef CHECK_AND_NO_ASSERT_MES -#define CHECK_AND_NO_ASSERT_MES(expr, fail_ret_val, message) do{if(!(expr)) {LOG_PRINT_L0(message); /*LOCAL_ASSERT(expr);*/ return fail_ret_val;};}while(0) -#endif - - -#ifndef CHECK_AND_ASSERT_MES_NO_RET -#define CHECK_AND_ASSERT_MES_NO_RET(expr, message) do{if(!(expr)) {LOG_ERROR(message); return;};}while(0) -#endif - - -#ifndef CHECK_AND_ASSERT_MES2 -#define CHECK_AND_ASSERT_MES2(expr, message) do{if(!(expr)) {LOG_ERROR(message); };}while(0) -#endif - -} -#endif //_MISC_LOG_EX_H_ diff --git a/contrib/epee/include/misc_os_dependent.cpp b/contrib/epee/include/misc_os_dependent.cpp deleted file mode 100644 index 3bff65853c..0000000000 --- a/contrib/epee/include/misc_os_dependent.cpp +++ /dev/null @@ -1,97 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#include "misc_os_dependent.h" - -#include - -#include - -#ifdef __MACH__ -#include -#include -#endif - -namespace epee -{ -namespace misc_utils -{ - - uint64_t get_tick_count() - { -#if defined(_MSC_VER) - return ::GetTickCount64(); -#elif defined(__MACH__) - clock_serv_t cclock; - mach_timespec_t mts; - - host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); - clock_get_time(cclock, &mts); - mach_port_deallocate(mach_task_self(), cclock); - - return (mts.tv_sec * 1000) + (mts.tv_nsec/1000000); -#else - struct timespec ts; - if(clock_gettime(CLOCK_MONOTONIC, &ts) != 0) { - return 0; - } - return (ts.tv_sec * 1000) + (ts.tv_nsec/1000000); -#endif - } - - int call_sys_cmd(const std::string& cmd) - { - std::cout << "# " << cmd << std::endl; - - FILE * fp ; - //char tstCommand[] ="ls *"; - char path[1000] = {0}; -#if !defined(__GNUC__) - fp = _popen(cmd.c_str(), "r"); -#else - fp = popen(cmd.c_str(), "r"); -#endif - while ( fgets( path, 1000, fp ) != NULL ) - std::cout << path; - -#if !defined(__GNUC__) - _pclose(fp); -#else - pclose(fp); -#endif - return 0; - } - - std::string get_thread_string_id() - { -#if defined(_MSC_VER) - return boost::lexical_cast(GetCurrentThreadId()); -#elif defined(__GNUC__) - return boost::lexical_cast(pthread_self()); -#endif - } -} -} diff --git a/contrib/epee/include/misc_os_dependent.h b/contrib/epee/include/misc_os_dependent.h deleted file mode 100644 index eb4a3a8a75..0000000000 --- a/contrib/epee/include/misc_os_dependent.h +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#include -#include - -#ifdef WIN32 - #ifndef WIN32_LEAN_AND_MEAN - #define WIN32_LEAN_AND_MEAN - #endif - - #if !defined(NOMINMAX) - #define NOMINMAX 1 - #endif // !defined(NOMINMAX) - - #include -#endif - -namespace epee -{ -namespace misc_utils -{ - uint64_t get_tick_count(); - int call_sys_cmd(const std::string& cmd); - std::string get_thread_string_id(); -} -} diff --git a/contrib/epee/include/net/abstract_tcp_server.h b/contrib/epee/include/net/abstract_tcp_server.h deleted file mode 100644 index c74444c8e0..0000000000 --- a/contrib/epee/include/net/abstract_tcp_server.h +++ /dev/null @@ -1,316 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _ABSTRACT_TCP_SERVER_H_ -#define _ABSTRACT_TCP_SERVER_H_ - -#include -#include -#include -#include "winobj.h" -//#include "threads_helper.h" -#include "net_utils_base.h" - -#pragma comment(lib, "Ws2_32.lib") - -namespace epee -{ -namespace net_utils -{ - /************************************************************************/ - /* */ - /************************************************************************/ - class soket_sender: public i_service_endpoint - { - public: - soket_sender(SOCKET sock):m_sock(sock){} - private: - virtual bool handle_send(const void* ptr, size_t cb) - { - if(cb != send(m_sock, (char*)ptr, (int)cb, 0)) - { - int sock_err = WSAGetLastError(); - LOG_ERROR("soket_sender: Failed to send " << cb << " bytes, Error=" << sock_err); - return false; - } - return true; - - } - - SOCKET m_sock; - }; - - - - /************************************************************************/ - /* */ - /************************************************************************/ - template - class abstract_tcp_server - { - public: - abstract_tcp_server(); - - bool init_server(int port_no); - bool deinit_server(); - bool run_server(); - bool send_stop_signal(); - - typename THandler::config_type& get_config_object(){return m_config;} - - private: - bool invoke_connection(SOCKET hnew_sock, long ip_from, int post_from); - static unsigned __stdcall ConnectionHandlerProc(void* lpParameter); - - class thread_context; - typedef std::list connections_container; - typedef typename connections_container::iterator connections_iterator; - - struct thread_context - { - HANDLE m_htread; - SOCKET m_socket; - abstract_tcp_server* powner; - connection_context m_context; - typename connections_iterator m_self_it; - }; - - SOCKET m_listen_socket; - int m_port; - bool m_initialized; - volatile LONG m_stop_server; - volatile LONG m_threads_count; - typename THandler::config_type m_config; - connections_container m_connections; - critical_section m_connections_lock; - }; - - template - unsigned __stdcall abstract_tcp_server::ConnectionHandlerProc(void* lpParameter) - { - - thread_context* pthread_context = (thread_context*)lpParameter; - if(!pthread_context) - return 0; - abstract_tcp_server* pthis = pthread_context->powner; - - ::InterlockedIncrement(&pthis->m_threads_count); - - ::CoInitialize(NULL); - - - LOG_PRINT("Handler thread STARTED with socket=" << pthread_context->m_socket, LOG_LEVEL_2); - int res = 0; - - soket_sender sndr(pthread_context->m_socket); - THandler srv(&sndr, pthread_context->powner->m_config, pthread_context->m_context); - - - srv.after_init_connection(); - - char buff[1000] = {0}; - std::string ansver; - while ( (res = recv(pthread_context->m_socket, (char*)buff, 1000, 0)) > 0) - { - LOG_PRINT("Data in, " << res << " bytes", LOG_LEVEL_3); - if(!srv.handle_recv(buff, res)) - break; - } - shutdown(pthread_context->m_socket, SD_BOTH); - closesocket(pthread_context->m_socket); - - abstract_tcp_server* powner = pthread_context->powner; - LOG_PRINT("Handler thread with socket=" << pthread_context->m_socket << " STOPPED", LOG_LEVEL_2); - powner->m_connections_lock.lock(); - ::CloseHandle(pthread_context->m_htread); - pthread_context->powner->m_connections.erase(pthread_context->m_self_it); - powner->m_connections_lock.unlock(); - CoUninitialize(); - ::InterlockedDecrement(&pthis->m_threads_count); - return 1; - } - //---------------------------------------------------------------------------------------- - template - abstract_tcp_server::abstract_tcp_server():m_listen_socket(INVALID_SOCKET), - m_initialized(false), - m_stop_server(0), m_port(0), m_threads_count(0) - { - - } - - //---------------------------------------------------------------------------------------- - template - bool abstract_tcp_server::init_server(int port_no) - { - m_port = port_no; - WSADATA wsad = {0}; - int err = ::WSAStartup(MAKEWORD(2,2), &wsad); - if ( err != 0 || LOBYTE( wsad.wVersion ) != 2 || HIBYTE( wsad.wVersion ) != 2 ) - { - LOG_ERROR("Could not find a usable WinSock DLL, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - return false; - } - - m_initialized = true; - - m_listen_socket = ::WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, 0); - if(INVALID_SOCKET == m_listen_socket) - { - err = ::WSAGetLastError(); - LOG_ERROR("Failed to create socket, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - return false; - } - - int opt = 1; - setsockopt (m_listen_socket, SOL_SOCKET,SO_REUSEADDR, reinterpret_cast(&opt), sizeof(int)); - - sockaddr_in adr = {0}; - adr.sin_family = AF_INET; - adr.sin_addr.s_addr = htonl(INADDR_ANY); - adr.sin_port = (u_short)htons(port_no); - - err = bind(m_listen_socket, (const sockaddr*)&adr, sizeof(adr )); - if(SOCKET_ERROR == err ) - { - err = ::WSAGetLastError(); - LOG_PRINT("Failed to Bind, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\"", LOG_LEVEL_2); - deinit_server(); - return false; - } - - ::InterlockedExchange(&m_stop_server, 0); - - return true; - } - //---------------------------------------------------------------------------------------- - template - bool abstract_tcp_server::deinit_server() - { - - if(!m_initialized) - return true; - - if(INVALID_SOCKET != m_listen_socket) - { - shutdown(m_listen_socket, SD_BOTH); - int res = closesocket(m_listen_socket); - if(SOCKET_ERROR == res) - { - int err = ::WSAGetLastError(); - LOG_ERROR("Failed to closesocket(), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - } - m_listen_socket = INVALID_SOCKET; - } - - int res = ::WSACleanup(); - if(SOCKET_ERROR == res) - { - int err = ::WSAGetLastError(); - LOG_ERROR("Failed to WSACleanup(), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - } - m_initialized = false; - - return true; - } - //---------------------------------------------------------------------------------------- - template - bool abstract_tcp_server::send_stop_signal() - { - InterlockedExchange(&m_stop_server, 1); - return true; - } - //---------------------------------------------------------------------------------------- - template - bool abstract_tcp_server::run_server() - { - int err = listen(m_listen_socket, 10000); - if(SOCKET_ERROR == err ) - { - err = ::WSAGetLastError(); - LOG_ERROR("Failed to listen, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - return false; - } - - LOG_PRINT("Listening port "<< m_port << "...." , LOG_LEVEL_2); - - while(!m_stop_server) - { - sockaddr_in adr_from = {0}; - int adr_len = sizeof(adr_from); - fd_set read_fs = {0}; - read_fs.fd_count = 1; - read_fs.fd_array[0] = m_listen_socket; - TIMEVAL tv = {0}; - tv.tv_usec = 100; - int select_res = select(0, &read_fs, NULL, NULL, &tv); - if(!select_res) - continue; - SOCKET new_sock = WSAAccept(m_listen_socket, (sockaddr *)&adr_from, &adr_len, NULL, NULL); - LOG_PRINT("Accepted connection on socket=" << new_sock, LOG_LEVEL_2); - invoke_connection(new_sock, adr_from.sin_addr.s_addr, adr_from.sin_port); - } - - deinit_server(); - -#define ABSTR_TCP_SRV_WAIT_COUNT_MAX 5000 -#define ABSTR_TCP_SRV_WAIT_COUNT_INTERVAL 1000 - - int wait_count = 0; - - while(m_threads_count && wait_count*1000 < ABSTR_TCP_SRV_WAIT_COUNT_MAX) - { - ::Sleep(ABSTR_TCP_SRV_WAIT_COUNT_INTERVAL); - wait_count++; - } - LOG_PRINT("abstract_tcp_server exit with wait count=" << wait_count*ABSTR_TCP_SRV_WAIT_COUNT_INTERVAL << "(max=" << ABSTR_TCP_SRV_WAIT_COUNT_MAX <<")", LOG_LEVEL_0); - - return true; - } - //---------------------------------------------------------------------------------------- - template - bool abstract_tcp_server::invoke_connection(SOCKET hnew_sock, long ip_from, int post_from) - { - m_connections_lock.lock(); - m_connections.push_back(thread_context()); - m_connections_lock.unlock(); - m_connections.back().m_socket = hnew_sock; - m_connections.back().powner = this; - m_connections.back().m_self_it = --m_connections.end(); - m_connections.back().m_context.m_remote_ip = ip_from; - m_connections.back().m_context.m_remote_port = post_from; - m_connections.back().m_htread = threads_helper::create_thread(ConnectionHandlerProc, &m_connections.back()); - - return true; - } - //---------------------------------------------------------------------------------------- - - //---------------------------------------------------------------------------------------- - //---------------------------------------------------------------------------------------- -} -} -#endif //_ABSTRACT_TCP_SERVER_H_ diff --git a/contrib/epee/include/net/abstract_tcp_server2.h b/contrib/epee/include/net/abstract_tcp_server2.h deleted file mode 100644 index b8e291c327..0000000000 --- a/contrib/epee/include/net/abstract_tcp_server2.h +++ /dev/null @@ -1,276 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _ABSTRACT_TCP_SERVER2_H_ -#define _ABSTRACT_TCP_SERVER2_H_ - - -#include -#include -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include -#include -#include "net_utils_base.h" -#include "syncobj.h" - - -#define ABSTRACT_SERVER_SEND_QUE_MAX_COUNT 100 - -namespace epee -{ -namespace net_utils -{ - - struct i_connection_filter - { - virtual bool is_remote_ip_allowed(uint32_t adress)=0; - protected: - virtual ~i_connection_filter(){} - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - /// Represents a single connection from a client. - template - class connection - : public boost::enable_shared_from_this >, - private boost::noncopyable, - public i_service_endpoint - { - public: - typedef typename t_protocol_handler::connection_context t_connection_context; - /// Construct a connection with the given io_service. - explicit connection(boost::asio::io_service& io_service, - typename t_protocol_handler::config_type& config, volatile uint32_t& sock_count, i_connection_filter * &pfilter); - - virtual ~connection(); - /// Get the socket associated with the connection. - boost::asio::ip::tcp::socket& socket(); - - /// Start the first asynchronous operation for the connection. - bool start(bool is_income, bool is_multithreaded); - - void get_context(t_connection_context& context_){context_ = context;} - - void call_back_starter(); - private: - //----------------- i_service_endpoint --------------------- - virtual bool do_send(const void* ptr, size_t cb); - virtual bool close(); - virtual bool call_run_once_service_io(); - virtual bool request_callback(); - virtual boost::asio::io_service& get_io_service(); - virtual bool add_ref(); - virtual bool release(); - //------------------------------------------------------ - boost::shared_ptr > safe_shared_from_this(); - bool shutdown(); - /// Handle completion of a read operation. - void handle_read(const boost::system::error_code& e, - std::size_t bytes_transferred); - - /// Handle completion of a write operation. - void handle_write(const boost::system::error_code& e, size_t cb); - - /// Strand to ensure the connection's handlers are not called concurrently. - boost::asio::io_service::strand strand_; - - /// Socket for the connection. - boost::asio::ip::tcp::socket socket_; - - /// Buffer for incoming data. - boost::array buffer_; - - t_connection_context context; - volatile uint32_t m_want_close_connection; - std::atomic m_was_shutdown; - critical_section m_send_que_lock; - std::list m_send_que; - volatile uint32_t& m_ref_sockets_count; - i_connection_filter* &m_pfilter; - volatile bool m_is_multithreaded; - - //this should be the last one, because it could be wait on destructor, while other activities possible on other threads - t_protocol_handler m_protocol_handler; - //typename t_protocol_handler::config_type m_dummy_config; - std::list > > m_self_refs; // add_ref/release support - critical_section m_self_refs_lock; - }; - - - /************************************************************************/ - /* */ - /************************************************************************/ - template - class boosted_tcp_server - : private boost::noncopyable - { - public: - typedef boost::shared_ptr > connection_ptr; - typedef typename t_protocol_handler::connection_context t_connection_context; - /// Construct the server to listen on the specified TCP address and port, and - /// serve up files from the given directory. - boosted_tcp_server(); - explicit boosted_tcp_server(boost::asio::io_service& external_io_service); - ~boosted_tcp_server(); - - bool init_server(uint32_t port, const std::string address = "0.0.0.0"); - bool init_server(const std::string port, const std::string& address = "0.0.0.0"); - - /// Run the server's io_service loop. - bool run_server(size_t threads_count, bool wait = true, const boost::thread::attributes& attrs = boost::thread::attributes()); - - /// wait for service workers stop - bool timed_wait_server_stop(uint64_t wait_mseconds); - - /// Stop the server. - void send_stop_signal(); - - bool is_stop_signal_sent(); - - void set_threads_prefix(const std::string& prefix_name); - - bool deinit_server(){return true;} - - size_t get_threads_count(){return m_threads_count;} - - void set_connection_filter(i_connection_filter* pfilter); - - bool connect(const std::string& adr, const std::string& port, uint32_t conn_timeot, t_connection_context& cn, const std::string& bind_ip = "0.0.0.0"); - template - bool connect_async(const std::string& adr, const std::string& port, uint32_t conn_timeot, t_callback cb, const std::string& bind_ip = "0.0.0.0"); - - typename t_protocol_handler::config_type& get_config_object(){return m_config;} - - int get_binded_port(){return m_port;} - - boost::asio::io_service& get_io_service(){return io_service_;} - - struct idle_callback_conext_base - { - virtual ~idle_callback_conext_base(){} - - virtual bool call_handler(){return true;} - - idle_callback_conext_base(boost::asio::io_service& io_serice): - m_timer(io_serice) - {} - boost::asio::deadline_timer m_timer; - uint64_t m_period; - }; - - template - struct idle_callback_conext: public idle_callback_conext_base - { - idle_callback_conext(boost::asio::io_service& io_serice, t_handler& h, uint64_t period): - idle_callback_conext_base(io_serice), - m_handler(h) - {this->m_period = period;} - - t_handler m_handler; - virtual bool call_handler() - { - return m_handler(); - } - }; - - template - bool add_idle_handler(t_handler t_callback, uint64_t timeout_ms) - { - boost::shared_ptr ptr(new idle_callback_conext(io_service_, t_callback, timeout_ms)); - //needed call handler here ?... - ptr->m_timer.expires_from_now(boost::posix_time::milliseconds(ptr->m_period)); - ptr->m_timer.async_wait(boost::bind(&boosted_tcp_server::global_timer_handler, this, ptr)); - return true; - } - - bool global_timer_handler(/*const boost::system::error_code& err, */boost::shared_ptr ptr) - { - //if handler return false - he don't want to be called anymore - if(!ptr->call_handler()) - return true; - ptr->m_timer.expires_from_now(boost::posix_time::milliseconds(ptr->m_period)); - ptr->m_timer.async_wait(boost::bind(&boosted_tcp_server::global_timer_handler, this, ptr)); - return true; - } - - template - bool async_call(t_handler t_callback) - { - io_service_.post(t_callback); - return true; - } - - protected: - typename t_protocol_handler::config_type m_config; - - private: - /// Run the server's io_service loop. - bool worker_thread(); - /// Handle completion of an asynchronous accept operation. - void handle_accept(const boost::system::error_code& e); - - bool is_thread_worker(); - - /// The io_service used to perform asynchronous operations. - std::unique_ptr m_io_service_local_instance; - boost::asio::io_service& io_service_; - - /// Acceptor used to listen for incoming connections. - boost::asio::ip::tcp::acceptor acceptor_; - - /// The next connection to be accepted. - connection_ptr new_connection_; - std::atomic m_stop_signal_sent; - uint32_t m_port; - volatile uint32_t m_sockets_count; - std::string m_address; - std::string m_thread_name_prefix; - size_t m_threads_count; - i_connection_filter* m_pfilter; - std::vector > m_threads; - boost::thread::id m_main_thread_id; - critical_section m_threads_lock; - volatile uint32_t m_thread_index; - }; -} -} - -#include "abstract_tcp_server2.inl" - -#endif diff --git a/contrib/epee/include/net/abstract_tcp_server2.inl b/contrib/epee/include/net/abstract_tcp_server2.inl deleted file mode 100644 index 0265d57ee9..0000000000 --- a/contrib/epee/include/net/abstract_tcp_server2.inl +++ /dev/null @@ -1,816 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#include "net_utils_base.h" -#include -#include -#include -#include -#include -#include -#include -#include -#include "include_base_utils.h" -#include "misc_language.h" -#include "pragma_comp_defs.h" - -PRAGMA_WARNING_PUSH -namespace epee -{ -namespace net_utils -{ - /************************************************************************/ - /* */ - /************************************************************************/ -PRAGMA_WARNING_DISABLE_VS(4355) - - template - connection::connection(boost::asio::io_service& io_service, - typename t_protocol_handler::config_type& config, volatile uint32_t& sock_count, i_connection_filter* &pfilter) - : strand_(io_service), - socket_(io_service), - m_want_close_connection(0), - m_was_shutdown(0), - m_ref_sockets_count(sock_count), - m_pfilter(pfilter), - m_protocol_handler(this, config, context) - { - boost::interprocess::ipcdetail::atomic_inc32(&m_ref_sockets_count); - } -PRAGMA_WARNING_DISABLE_VS(4355) - //--------------------------------------------------------------------------------- - template - connection::~connection() - { - if(!m_was_shutdown) - { - LOG_PRINT_L3("[sock " << socket_.native_handle() << "] Socket destroyed without shutdown."); - shutdown(); - } - - LOG_PRINT_L3("[sock " << socket_.native_handle() << "] Socket destroyed"); - boost::interprocess::ipcdetail::atomic_dec32(&m_ref_sockets_count); - } - //--------------------------------------------------------------------------------- - template - boost::asio::ip::tcp::socket& connection::socket() - { - return socket_; - } - //--------------------------------------------------------------------------------- - template - boost::shared_ptr > connection::safe_shared_from_this() - { - try - { - return connection::shared_from_this(); - } - catch (const boost::bad_weak_ptr&) - { - // It happens when the connection is being deleted - return boost::shared_ptr >(); - } - } - //--------------------------------------------------------------------------------- - template - bool connection::start(bool is_income, bool is_multithreaded) - { - TRY_ENTRY(); - - // Use safe_shared_from_this, because of this is public method and it can be called on the object being deleted - auto self = safe_shared_from_this(); - if(!self) - return false; - - m_is_multithreaded = is_multithreaded; - - boost::system::error_code ec; - auto remote_ep = socket_.remote_endpoint(ec); - CHECK_AND_NO_ASSERT_MES(!ec, false, "Failed to get remote endpoint: " << ec.message() << ':' << ec.value()); - - auto local_ep = socket_.local_endpoint(ec); - CHECK_AND_NO_ASSERT_MES(!ec, false, "Failed to get local endpoint: " << ec.message() << ':' << ec.value()); - - context = boost::value_initialized(); - long ip_ = boost::asio::detail::socket_ops::host_to_network_long(remote_ep.address().to_v4().to_ulong()); - - context.set_details(boost::uuids::random_generator()(), ip_, remote_ep.port(), is_income); - LOG_PRINT_L3("[sock " << socket_.native_handle() << "] new connection from " << print_connection_context_short(context) << - " to " << local_ep.address().to_string() << ':' << local_ep.port() << - ", total sockets objects " << m_ref_sockets_count); - - if(m_pfilter && !m_pfilter->is_remote_ip_allowed(context.m_remote_ip)) - { - LOG_PRINT_L2("[sock " << socket_.native_handle() << "] ip denied " << string_tools::get_ip_string_from_int32(context.m_remote_ip) << ", shutdowning connection"); - close(); - return false; - } - - m_protocol_handler.after_init_connection(); - - socket_.async_read_some(boost::asio::buffer(buffer_), - strand_.wrap( - boost::bind(&connection::handle_read, self, - boost::asio::placeholders::error, - boost::asio::placeholders::bytes_transferred))); - - return true; - - CATCH_ENTRY_L0("connection::start()", false); - } - //--------------------------------------------------------------------------------- - template - bool connection::request_callback() - { - TRY_ENTRY(); - LOG_PRINT_L2("[" << print_connection_context_short(context) << "] request_callback"); - // Use safe_shared_from_this, because of this is public method and it can be called on the object being deleted - auto self = safe_shared_from_this(); - if(!self) - return false; - - strand_.post(boost::bind(&connection::call_back_starter, self)); - CATCH_ENTRY_L0("connection::request_callback()", false); - return true; - } - //--------------------------------------------------------------------------------- - template - boost::asio::io_service& connection::get_io_service() - { - return socket_.get_io_service(); - } - //--------------------------------------------------------------------------------- - template - bool connection::add_ref() - { - TRY_ENTRY(); - LOG_PRINT_L4("[sock " << socket_.native_handle() << "] add_ref"); - CRITICAL_REGION_LOCAL(m_self_refs_lock); - - // Use safe_shared_from_this, because of this is public method and it can be called on the object being deleted - auto self = safe_shared_from_this(); - if(!self) - return false; - if(m_was_shutdown) - return false; - m_self_refs.push_back(self); - return true; - CATCH_ENTRY_L0("connection::add_ref()", false); - } - //--------------------------------------------------------------------------------- - template - bool connection::release() - { - TRY_ENTRY(); - boost::shared_ptr > back_connection_copy; - LOG_PRINT_L4("[sock " << socket_.native_handle() << "] release"); - CRITICAL_REGION_BEGIN(m_self_refs_lock); - CHECK_AND_ASSERT_MES(m_self_refs.size(), false, "[sock " << socket_.native_handle() << "] m_self_refs empty at connection::release() call"); - //erasing from container without additional copy can cause start deleting object, including m_self_refs - back_connection_copy = m_self_refs.back(); - m_self_refs.pop_back(); - CRITICAL_REGION_END(); - return true; - CATCH_ENTRY_L0("connection::release()", false); - } - //--------------------------------------------------------------------------------- - template - void connection::call_back_starter() - { - TRY_ENTRY(); - LOG_PRINT_L2("[" << print_connection_context_short(context) << "] fired_callback"); - m_protocol_handler.handle_qued_callback(); - CATCH_ENTRY_L0("connection::call_back_starter()", void()); - } - //--------------------------------------------------------------------------------- - template - void connection::handle_read(const boost::system::error_code& e, - std::size_t bytes_transferred) - { - TRY_ENTRY(); - LOG_PRINT_L4("[sock " << socket_.native_handle() << "] Async read calledback."); - - if (!e) - { - LOG_PRINT("[sock " << socket_.native_handle() << "] RECV " << bytes_transferred, LOG_LEVEL_4); - context.m_last_recv = time(NULL); - context.m_recv_cnt += bytes_transferred; - bool recv_res = m_protocol_handler.handle_recv(buffer_.data(), bytes_transferred); - if(!recv_res) - { - LOG_PRINT("[sock " << socket_.native_handle() << "] protocol_want_close", LOG_LEVEL_4); - - //some error in protocol, protocol handler ask to close connection - boost::interprocess::ipcdetail::atomic_write32(&m_want_close_connection, 1); - bool do_shutdown = false; - CRITICAL_REGION_BEGIN(m_send_que_lock); - if(!m_send_que.size()) - do_shutdown = true; - CRITICAL_REGION_END(); - if(do_shutdown) - shutdown(); - }else - { - socket_.async_read_some(boost::asio::buffer(buffer_), - strand_.wrap( - boost::bind(&connection::handle_read, connection::shared_from_this(), - boost::asio::placeholders::error, - boost::asio::placeholders::bytes_transferred))); - LOG_PRINT_L4("[sock " << socket_.native_handle() << "]Async read requested."); - } - }else - { - LOG_PRINT_L3("[sock " << socket_.native_handle() << "] Some not success at read: " << e.message() << ':' << e.value()); - if(e.value() != 2) - { - LOG_PRINT_L3("[sock " << socket_.native_handle() << "] Some problems at read: " << e.message() << ':' << e.value()); - shutdown(); - } - } - // If an error occurs then no new asynchronous operations are started. This - // means that all shared_ptr references to the connection object will - // disappear and the object will be destroyed automatically after this - // handler returns. The connection class's destructor closes the socket. - CATCH_ENTRY_L0("connection::handle_read", void()); - } - //--------------------------------------------------------------------------------- - template - bool connection::call_run_once_service_io() - { - TRY_ENTRY(); - if(!m_is_multithreaded) - { - //single thread model, we can wait in blocked call - size_t cnt = socket_.get_io_service().run_one(); - if(!cnt)//service is going to quit - return false; - }else - { - //multi thread model, we can't(!) wait in blocked call - //so we make non blocking call and releasing CPU by calling sleep(0); - //if no handlers were called - //TODO: Maybe we need to have have critical section + event + callback to upper protocol to - //ask it inside(!) critical region if we still able to go in event wait... - size_t cnt = socket_.get_io_service().poll_one(); - if(!cnt) - misc_utils::sleep_no_w(0); - } - - return true; - CATCH_ENTRY_L0("connection::call_run_once_service_io", false); - } - //--------------------------------------------------------------------------------- - template - bool connection::do_send(const void* ptr, size_t cb) - { - TRY_ENTRY(); - // Use safe_shared_from_this, because of this is public method and it can be called on the object being deleted - auto self = safe_shared_from_this(); - if(!self) - return false; - if(m_was_shutdown) - return false; - - LOG_PRINT("[sock " << socket_.native_handle() << "] SEND " << cb, LOG_LEVEL_4); - context.m_last_send = time(NULL); - context.m_send_cnt += cb; - //some data should be wrote to stream - //request complete - - epee::critical_region_t send_guard(m_send_que_lock); - if(m_send_que.size() > ABSTRACT_SERVER_SEND_QUE_MAX_COUNT) - { - send_guard.unlock(); - LOG_WARNING("send que size is more than ABSTRACT_SERVER_SEND_QUE_MAX_COUNT(" << ABSTRACT_SERVER_SEND_QUE_MAX_COUNT << "), shutting down connection", LOG_LEVEL_2); - close(); - return false; - } - - m_send_que.resize(m_send_que.size()+1); - m_send_que.back().assign((const char*)ptr, cb); - - if(m_send_que.size() > 1) - { - //active operation should be in progress, nothing to do, just wait last operation callback - }else - { - //no active operation - if(m_send_que.size()!=1) - { - LOG_ERROR("Looks like no active operations, but send que size != 1!!"); - return false; - } - - boost::asio::async_write(socket_, boost::asio::buffer(m_send_que.front().data(), m_send_que.front().size()), - //strand_.wrap( - boost::bind(&connection::handle_write, self, _1, _2) - //) - ); - - LOG_PRINT_L4("[sock " << socket_.native_handle() << "] Async send requested " << m_send_que.front().size()); - } - - return true; - - CATCH_ENTRY_L0("connection::do_send", false); - } - //--------------------------------------------------------------------------------- - template - bool connection::shutdown() - { - // Initiate graceful connection closure. - boost::system::error_code ignored_ec; - socket_.shutdown(boost::asio::ip::tcp::socket::shutdown_both, ignored_ec); - m_was_shutdown = true; - m_protocol_handler.release_protocol(); - return true; - } - //--------------------------------------------------------------------------------- - template - bool connection::close() - { - TRY_ENTRY(); - LOG_PRINT_L4("[sock " << socket_.native_handle() << "] Que Shutdown called."); - size_t send_que_size = 0; - CRITICAL_REGION_BEGIN(m_send_que_lock); - send_que_size = m_send_que.size(); - CRITICAL_REGION_END(); - boost::interprocess::ipcdetail::atomic_write32(&m_want_close_connection, 1); - if(!send_que_size) - { - shutdown(); - } - - return true; - CATCH_ENTRY_L0("connection::close", false); - } - //--------------------------------------------------------------------------------- - template - void connection::handle_write(const boost::system::error_code& e, size_t cb) - { - TRY_ENTRY(); - LOG_PRINT_L4("[sock " << socket_.native_handle() << "] Async send calledback " << cb); - - if (e) - { - LOG_PRINT_L0("[sock " << socket_.native_handle() << "] Some problems at write: " << e.message() << ':' << e.value()); - shutdown(); - return; - } - - bool do_shutdown = false; - CRITICAL_REGION_BEGIN(m_send_que_lock); - if(m_send_que.empty()) - { - LOG_ERROR("[sock " << socket_.native_handle() << "] m_send_que.size() == 0 at handle_write!"); - return; - } - - m_send_que.pop_front(); - if(m_send_que.empty()) - { - if(boost::interprocess::ipcdetail::atomic_read32(&m_want_close_connection)) - { - do_shutdown = true; - } - }else - { - //have more data to send - boost::asio::async_write(socket_, boost::asio::buffer(m_send_que.front().data(), m_send_que.front().size()), - //strand_.wrap( - boost::bind(&connection::handle_write, connection::shared_from_this(), _1, _2)); - //); - } - CRITICAL_REGION_END(); - - if(do_shutdown) - { - shutdown(); - } - CATCH_ENTRY_L0("connection::handle_write", void()); - } - /************************************************************************/ - /* */ - /************************************************************************/ - template - boosted_tcp_server::boosted_tcp_server(): - m_io_service_local_instance(new boost::asio::io_service()), - io_service_(*m_io_service_local_instance.get()), - acceptor_(io_service_), - new_connection_(new connection(io_service_, m_config, m_sockets_count, m_pfilter)), - m_stop_signal_sent(false), m_port(0), m_sockets_count(0), m_threads_count(0), m_pfilter(NULL), m_thread_index(0) - { - m_thread_name_prefix = "NET"; - } - - template - boosted_tcp_server::boosted_tcp_server(boost::asio::io_service& extarnal_io_service): - io_service_(extarnal_io_service), - acceptor_(io_service_), - new_connection_(new connection(io_service_, m_config, m_sockets_count, m_pfilter)), - m_stop_signal_sent(false), m_port(0), m_sockets_count(0), m_threads_count(0), m_pfilter(NULL), m_thread_index(0) - { - m_thread_name_prefix = "NET"; - } - //--------------------------------------------------------------------------------- - template - boosted_tcp_server::~boosted_tcp_server() - { - this->send_stop_signal(); - timed_wait_server_stop(10000); - } - //--------------------------------------------------------------------------------- - template - bool boosted_tcp_server::init_server(uint32_t port, const std::string address) - { - TRY_ENTRY(); - m_stop_signal_sent = false; - m_port = port; - m_address = address; - // Open the acceptor with the option to reuse the address (i.e. SO_REUSEADDR). - boost::asio::ip::tcp::resolver resolver(io_service_); - boost::asio::ip::tcp::resolver::query query(address, boost::lexical_cast(port)); - boost::asio::ip::tcp::endpoint endpoint = *resolver.resolve(query); - acceptor_.open(endpoint.protocol()); - acceptor_.set_option(boost::asio::ip::tcp::acceptor::reuse_address(true)); - acceptor_.bind(endpoint); - acceptor_.listen(); - boost::asio::ip::tcp::endpoint binded_endpoint = acceptor_.local_endpoint(); - m_port = binded_endpoint.port(); - acceptor_.async_accept(new_connection_->socket(), - boost::bind(&boosted_tcp_server::handle_accept, this, - boost::asio::placeholders::error)); - - return true; - CATCH_ENTRY_L0("boosted_tcp_server::init_server", false); - } - //----------------------------------------------------------------------------- -PUSH_WARNINGS -DISABLE_GCC_WARNING(maybe-uninitialized) - template - bool boosted_tcp_server::init_server(const std::string port, const std::string& address) - { - uint32_t p = 0; - - if (port.size() && !string_tools::get_xtype_from_string(p, port)) { - LOG_ERROR("Failed to convert port no = " << port); - return false; - } - return this->init_server(p, address); - } -POP_WARNINGS - //--------------------------------------------------------------------------------- - template - bool boosted_tcp_server::worker_thread() - { - TRY_ENTRY(); - uint32_t local_thr_index = boost::interprocess::ipcdetail::atomic_inc32(&m_thread_index); - std::string thread_name = std::string("[") + m_thread_name_prefix; - thread_name += boost::to_string(local_thr_index) + "]"; - log_space::log_singletone::set_thread_log_prefix(thread_name); - while(!m_stop_signal_sent) - { - try - { - io_service_.run(); - } - catch(const std::exception& ex) - { - LOG_ERROR("Exception at server worker thread, what=" << ex.what()); - } - catch(...) - { - LOG_ERROR("Exception at server worker thread, unknown execption"); - } - } - LOG_PRINT_L4("Worker thread finished"); - return true; - CATCH_ENTRY_L0("boosted_tcp_server::worker_thread", false); - } - //--------------------------------------------------------------------------------- - template - void boosted_tcp_server::set_threads_prefix(const std::string& prefix_name) - { - m_thread_name_prefix = prefix_name; - } - //--------------------------------------------------------------------------------- - template - void boosted_tcp_server::set_connection_filter(i_connection_filter* pfilter) - { - m_pfilter = pfilter; - } - //--------------------------------------------------------------------------------- - template - bool boosted_tcp_server::run_server(size_t threads_count, bool wait, const boost::thread::attributes& attrs) - { - TRY_ENTRY(); - m_threads_count = threads_count; - m_main_thread_id = boost::this_thread::get_id(); - log_space::log_singletone::set_thread_log_prefix("[SRV_MAIN]"); - while(!m_stop_signal_sent) - { - - // Create a pool of threads to run all of the io_services. - CRITICAL_REGION_BEGIN(m_threads_lock); - for (std::size_t i = 0; i < threads_count; ++i) - { - boost::shared_ptr thread(new boost::thread( - attrs, boost::bind(&boosted_tcp_server::worker_thread, this))); - m_threads.push_back(thread); - } - CRITICAL_REGION_END(); - // Wait for all threads in the pool to exit. - if(wait) - { - for (std::size_t i = 0; i < m_threads.size(); ++i) - m_threads[i]->join(); - m_threads.clear(); - - }else - { - return true; - } - - if(wait && !m_stop_signal_sent) - { - //some problems with the listening socket ?.. - LOG_PRINT_L0("Net service stopped without stop request, restarting..."); - if(!this->init_server(m_port, m_address)) - { - LOG_PRINT_L0("Reiniting service failed, exit."); - return false; - }else - { - LOG_PRINT_L0("Reiniting OK."); - } - } - } - return true; - CATCH_ENTRY_L0("boosted_tcp_server::run_server", false); - } - //--------------------------------------------------------------------------------- - template - bool boosted_tcp_server::is_thread_worker() - { - TRY_ENTRY(); - CRITICAL_REGION_LOCAL(m_threads_lock); - BOOST_FOREACH(boost::shared_ptr& thp, m_threads) - { - if(thp->get_id() == boost::this_thread::get_id()) - return true; - } - if(m_threads_count == 1 && boost::this_thread::get_id() == m_main_thread_id) - return true; - return false; - CATCH_ENTRY_L0("boosted_tcp_server::is_thread_worker", false); - } - //--------------------------------------------------------------------------------- - template - bool boosted_tcp_server::timed_wait_server_stop(uint64_t wait_mseconds) - { - TRY_ENTRY(); - boost::chrono::milliseconds ms(wait_mseconds); - for (std::size_t i = 0; i < m_threads.size(); ++i) - { - if(m_threads[i]->joinable() && !m_threads[i]->try_join_for(ms)) - { - LOG_PRINT_L0("Interrupting thread " << m_threads[i]->native_handle()); - m_threads[i]->interrupt(); - } - } - return true; - CATCH_ENTRY_L0("boosted_tcp_server::timed_wait_server_stop", false); - } - //--------------------------------------------------------------------------------- - template - void boosted_tcp_server::send_stop_signal() - { - m_stop_signal_sent = true; - TRY_ENTRY(); - io_service_.stop(); - CATCH_ENTRY_L0("boosted_tcp_server::send_stop_signal()", void()); - } - //--------------------------------------------------------------------------------- - template - bool boosted_tcp_server::is_stop_signal_sent() - { - return m_stop_signal_sent; - } - //--------------------------------------------------------------------------------- - template - void boosted_tcp_server::handle_accept(const boost::system::error_code& e) - { - TRY_ENTRY(); - if (!e) - { - connection_ptr conn(std::move(new_connection_)); - - new_connection_.reset(new connection(io_service_, m_config, m_sockets_count, m_pfilter)); - acceptor_.async_accept(new_connection_->socket(), - boost::bind(&boosted_tcp_server::handle_accept, this, - boost::asio::placeholders::error)); - - bool r = conn->start(true, 1 < m_threads_count); - if (!r) - LOG_ERROR("[sock " << conn->socket().native_handle() << "] Failed to start connection, connections_count = " << m_sockets_count); - }else - { - LOG_ERROR("Some problems at accept: " << e.message() << ", connections_count = " << m_sockets_count); - } - CATCH_ENTRY_L0("boosted_tcp_server::handle_accept", void()); - } - //--------------------------------------------------------------------------------- - template - bool boosted_tcp_server::connect(const std::string& adr, const std::string& port, uint32_t conn_timeout, t_connection_context& conn_context, const std::string& bind_ip) - { - TRY_ENTRY(); - - connection_ptr new_connection_l(new connection(io_service_, m_config, m_sockets_count, m_pfilter) ); - boost::asio::ip::tcp::socket& sock_ = new_connection_l->socket(); - - ////////////////////////////////////////////////////////////////////////// - boost::asio::ip::tcp::resolver resolver(io_service_); - boost::asio::ip::tcp::resolver::query query(boost::asio::ip::tcp::v4(), adr, port); - boost::asio::ip::tcp::resolver::iterator iterator = resolver.resolve(query); - boost::asio::ip::tcp::resolver::iterator end; - if(iterator == end) - { - LOG_ERROR("Failed to resolve " << adr); - return false; - } - ////////////////////////////////////////////////////////////////////////// - - - //boost::asio::ip::tcp::endpoint remote_endpoint(boost::asio::ip::address::from_string(addr.c_str()), port); - boost::asio::ip::tcp::endpoint remote_endpoint(*iterator); - - sock_.open(remote_endpoint.protocol()); - if(bind_ip != "0.0.0.0" && bind_ip != "0" && bind_ip != "" ) - { - boost::asio::ip::tcp::endpoint local_endpoint(boost::asio::ip::address::from_string(adr.c_str()), 0); - sock_.bind(local_endpoint); - } - - /* - NOTICE: be careful to make sync connection from event handler: in case if all threads suddenly do sync connect, there will be no thread to dispatch events from io service. - */ - - boost::system::error_code ec = boost::asio::error::would_block; - - //have another free thread(s), work in wait mode, without event handling - struct local_async_context - { - boost::system::error_code ec; - boost::mutex connect_mut; - boost::condition_variable cond; - }; - - boost::shared_ptr local_shared_context(new local_async_context()); - local_shared_context->ec = boost::asio::error::would_block; - boost::unique_lock lock(local_shared_context->connect_mut); - auto connect_callback = [](boost::system::error_code ec_, boost::shared_ptr shared_context) - { - shared_context->connect_mut.lock(); shared_context->ec = ec_; shared_context->connect_mut.unlock(); shared_context->cond.notify_one(); - }; - - sock_.async_connect(remote_endpoint, boost::bind(connect_callback, _1, local_shared_context)); - while(local_shared_context->ec == boost::asio::error::would_block) - { - bool r = local_shared_context->cond.timed_wait(lock, boost::get_system_time() + boost::posix_time::milliseconds(conn_timeout)); - if(local_shared_context->ec == boost::asio::error::would_block && !r) - { - //timeout - sock_.close(); - LOG_PRINT_L3("Failed to connect to " << adr << ":" << port << ", because of timeout (" << conn_timeout << ")"); - return false; - } - } - ec = local_shared_context->ec; - - if (ec || !sock_.is_open()) - { - LOG_PRINT("Some problems at connect, message: " << ec.message(), LOG_LEVEL_3); - return false; - } - - LOG_PRINT_L3("Connected success to " << adr << ':' << port); - - bool r = new_connection_l->start(false, 1 < m_threads_count); - if (r) - { - new_connection_l->get_context(conn_context); - //new_connection_l.reset(new connection(io_service_, m_config, m_sockets_count, m_pfilter)); - } - else - { - LOG_ERROR("[sock " << new_connection_->socket().native_handle() << "] Failed to start connection, connections_count = " << m_sockets_count); - } - - return r; - - CATCH_ENTRY_L0("boosted_tcp_server::connect", false); - } - //--------------------------------------------------------------------------------- - template template - bool boosted_tcp_server::connect_async(const std::string& adr, const std::string& port, uint32_t conn_timeout, t_callback cb, const std::string& bind_ip) - { - TRY_ENTRY(); - connection_ptr new_connection_l(new connection(io_service_, m_config, m_sockets_count, m_pfilter) ); - boost::asio::ip::tcp::socket& sock_ = new_connection_l->socket(); - - ////////////////////////////////////////////////////////////////////////// - boost::asio::ip::tcp::resolver resolver(io_service_); - boost::asio::ip::tcp::resolver::query query(boost::asio::ip::tcp::v4(), adr, port); - boost::asio::ip::tcp::resolver::iterator iterator = resolver.resolve(query); - boost::asio::ip::tcp::resolver::iterator end; - if(iterator == end) - { - LOG_ERROR("Failed to resolve " << adr); - return false; - } - ////////////////////////////////////////////////////////////////////////// - boost::asio::ip::tcp::endpoint remote_endpoint(*iterator); - - sock_.open(remote_endpoint.protocol()); - if(bind_ip != "0.0.0.0" && bind_ip != "0" && bind_ip != "" ) - { - boost::asio::ip::tcp::endpoint local_endpoint(boost::asio::ip::address::from_string(adr.c_str()), 0); - sock_.bind(local_endpoint); - } - - boost::shared_ptr sh_deadline(new boost::asio::deadline_timer(io_service_)); - //start deadline - sh_deadline->expires_from_now(boost::posix_time::milliseconds(conn_timeout)); - sh_deadline->async_wait([=](const boost::system::error_code& error) - { - if(error != boost::asio::error::operation_aborted) - { - LOG_PRINT_L3("Failed to connect to " << adr << ':' << port << ", because of timeout (" << conn_timeout << ")"); - new_connection_l->socket().close(); - } - }); - //start async connect - sock_.async_connect(remote_endpoint, [=](const boost::system::error_code& ec_) - { - t_connection_context conn_context = AUTO_VAL_INIT(conn_context); - boost::system::error_code ignored_ec; - boost::asio::ip::tcp::socket::endpoint_type lep = new_connection_l->socket().local_endpoint(ignored_ec); - if(!ec_) - {//success - if(!sh_deadline->cancel()) - { - cb(conn_context, boost::asio::error::operation_aborted);//this mean that deadline timer already queued callback with cancel operation, rare situation - }else - { - LOG_PRINT_L3("[sock " << new_connection_l->socket().native_handle() << "] Connected success to " << adr << ':' << port << - " from " << lep.address().to_string() << ':' << lep.port()); - bool r = new_connection_l->start(false, 1 < m_threads_count); - if (r) - { - new_connection_l->get_context(conn_context); - cb(conn_context, ec_); - } - else - { - LOG_PRINT_L3("[sock " << new_connection_l->socket().native_handle() << "] Failed to start connection to " << adr << ':' << port); - cb(conn_context, boost::asio::error::fault); - } - } - }else - { - LOG_PRINT_L3("[sock " << new_connection_l->socket().native_handle() << "] Failed to connect to " << adr << ':' << port << - " from " << lep.address().to_string() << ':' << lep.port() << ": " << ec_.message() << ':' << ec_.value()); - cb(conn_context, ec_); - } - }); - return true; - CATCH_ENTRY_L0("boosted_tcp_server::connect_async", false); - } -} -} -PRAGMA_WARNING_POP diff --git a/contrib/epee/include/net/abstract_tcp_server_cp.h b/contrib/epee/include/net/abstract_tcp_server_cp.h deleted file mode 100644 index b6410e120f..0000000000 --- a/contrib/epee/include/net/abstract_tcp_server_cp.h +++ /dev/null @@ -1,233 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef _LEVIN_CP_SERVER_H_ -#define _LEVIN_CP_SERVER_H_ - -#include -#include -#include -#include -#include - -#include "misc_log_ex.h" -//#include "threads_helper.h" -#include "syncobj.h" -#define ENABLE_PROFILING -#include "profile_tools.h" -#include "net_utils_base.h" -#include "pragma_comp_defs.h" - -#define LEVIN_DEFAULT_DATA_BUFF_SIZE 2000 - -namespace epee -{ -namespace net_utils -{ - - template - class cp_server_impl//: public abstract_handler - { - public: - cp_server_impl(/*abstract_handler* phandler = NULL*/); - virtual ~cp_server_impl(); - - bool init_server(int port_no); - bool deinit_server(); - bool run_server(int threads_count = 0); - bool send_stop_signal(); - bool is_stop_signal(); - virtual bool on_net_idle(){return true;} - size_t get_active_connections_num(); - typename TProtocol::config_type& get_config_object(){return m_config;} - private: - enum overlapped_operation_type - { - op_type_recv, - op_type_send, - op_type_stop - }; - - struct io_data_base - { - OVERLAPPED m_overlapped; - WSABUF DataBuf; - overlapped_operation_type m_op_type; - DWORD TotalBuffBytes; - volatile LONG m_is_in_use; - char Buffer[1]; - }; - -PRAGMA_WARNING_PUSH -PRAGMA_WARNING_DISABLE_VS(4355) - template - struct connection: public net_utils::i_service_endpoint - { - connection(typename TProtocol::config_type& ref_config):m_sock(INVALID_SOCKET), m_tprotocol_handler(this, ref_config, context), m_psend_data(NULL), m_precv_data(NULL), m_asked_to_shutdown(0), m_connection_shutwoned(0) - { - } - - //connection():m_sock(INVALID_SOCKET), m_tprotocol_handler(this, m_dummy_config, context), m_psend_data(NULL), m_precv_data(NULL), m_asked_to_shutdown(0), m_connection_shutwoned(0) - //{ - //} - - connection& operator=(const connection& obj) - { - return *this; - } - - bool init_buffers() - { - m_psend_data = (io_data_base*)new char[sizeof(io_data_base) + LEVIN_DEFAULT_DATA_BUFF_SIZE-1]; - m_psend_data->TotalBuffBytes = LEVIN_DEFAULT_DATA_BUFF_SIZE; - m_precv_data = (io_data_base*)new char[sizeof(io_data_base) + LEVIN_DEFAULT_DATA_BUFF_SIZE-1]; - m_precv_data->TotalBuffBytes = LEVIN_DEFAULT_DATA_BUFF_SIZE; - return true; - } - - bool query_shutdown() - { - if(!::InterlockedCompareExchange(&m_asked_to_shutdown, 1, 0)) - { - m_psend_data->m_op_type = op_type_stop; - ::PostQueuedCompletionStatus(m_completion_port, 0, (ULONG_PTR)this, &m_psend_data->m_overlapped); - } - return true; - } - - //bool set_config(typename TProtocol::config_type& config) - //{ - // this->~connection(); - // new(this) connection(config); - // return true; - //} - ~connection() - { - if(m_psend_data) - delete m_psend_data; - - if(m_precv_data) - delete m_precv_data; - } - virtual bool handle_send(const void* ptr, size_t cb) - { - PROFILE_FUNC("[handle_send]"); - if(m_psend_data->TotalBuffBytes < cb) - resize_send_buff((DWORD)cb); - - ZeroMemory(&m_psend_data->m_overlapped, sizeof(OVERLAPPED)); - m_psend_data->DataBuf.len = (u_long)cb;//m_psend_data->TotalBuffBytes; - m_psend_data->DataBuf.buf = m_psend_data->Buffer; - memcpy(m_psend_data->DataBuf.buf, ptr, cb); - m_psend_data->m_op_type = op_type_send; - InterlockedExchange(&m_psend_data->m_is_in_use, 1); - DWORD bytes_sent = 0; - DWORD flags = 0; - int res = 0; - { - PROFILE_FUNC("[handle_send] ::WSASend"); - res = ::WSASend(m_sock, &(m_psend_data->DataBuf), 1, &bytes_sent, flags, &(m_psend_data->m_overlapped), NULL); - } - - if(res == SOCKET_ERROR ) - { - int err = ::WSAGetLastError(); - if(WSA_IO_PENDING == err ) - return true; - } - LOG_ERROR("BIG FAIL: WSASend error code not correct, res=" << res << " last_err=" << err); - ::InterlockedExchange(&m_psend_data->m_is_in_use, 0); - query_shutdown(); - //closesocket(m_psend_data); - return false; - }else if(0 == res) - { - ::InterlockedExchange(&m_psend_data->m_is_in_use, 0); - if(!bytes_sent || bytes_sent != cb) - { - int err = ::WSAGetLastError(); - LOG_ERROR("BIG FAIL: WSASend immediatly complete? but bad results, res=" << res << " last_err=" << err); - query_shutdown(); - return false; - }else - { - return true; - } - } - - return true; - } - bool resize_send_buff(DWORD new_size) - { - if(m_psend_data->TotalBuffBytes >= new_size) - return true; - - delete m_psend_data; - m_psend_data = (io_data_base*)new char[sizeof(io_data_base) + new_size-1]; - m_psend_data->TotalBuffBytes = new_size; - LOG_PRINT("Connection buffer resized up to " << new_size, LOG_LEVEL_3); - return true; - } - - - SOCKET m_sock; - net_utils::connection_context_base context; - TProtocol m_tprotocol_handler; - typename TProtocol::config_type m_dummy_config; - io_data_base* m_precv_data; - io_data_base* m_psend_data; - HANDLE m_completion_port; - volatile LONG m_asked_to_shutdown; - volatile LONG m_connection_shutwoned; - }; -PRAGMA_WARNING_POP - - bool worker_thread_member(); - static unsigned CALLBACK worker_thread(void* param); - - bool add_new_connection(SOCKET new_sock, long ip_from, int port_from); - bool shutdown_connection(connection* pconn); - - - typedef std::map > > connections_container; - SOCKET m_listen_socket; - HANDLE m_completion_port; - connections_container m_connections; - critical_section m_connections_lock; - int m_port; - volatile LONG m_stop; - //abstract_handler* m_phandler; - bool m_initialized; - volatile LONG m_worker_thread_counter; - typename TProtocol::config_type m_config; - }; -} -} -#include "abstract_tcp_server_cp.inl" - - -#endif //_LEVIN_SERVER_H_ diff --git a/contrib/epee/include/net/abstract_tcp_server_cp.inl b/contrib/epee/include/net/abstract_tcp_server_cp.inl deleted file mode 100644 index 5673c50be0..0000000000 --- a/contrib/epee/include/net/abstract_tcp_server_cp.inl +++ /dev/null @@ -1,605 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#pragma comment(lib, "Ws2_32.lib") - -namespace epee -{ -namespace net_utils -{ -template -cp_server_impl::cp_server_impl(): - m_port(0), m_stop(false), - m_worker_thread_counter(0), m_listen_socket(INVALID_SOCKET) -{ -} -//------------------------------------------------------------- -template -cp_server_impl::~cp_server_impl() -{ - deinit_server(); -} -//------------------------------------------------------------- -template -bool cp_server_impl::init_server(int port_no) -{ - m_port = port_no; - - WSADATA wsad = {0}; - int err = ::WSAStartup(MAKEWORD(2,2), &wsad); - if ( err != 0 || LOBYTE( wsad.wVersion ) != 2 || HIBYTE( wsad.wVersion ) != 2 ) - { - LOG_ERROR("Could not find a usable WinSock DLL, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - return false; - } - - m_initialized = true; - - m_listen_socket = ::WSASocket(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL, 0, WSA_FLAG_OVERLAPPED); - if(INVALID_SOCKET == m_listen_socket) - { - err = ::WSAGetLastError(); - LOG_ERROR("Failed to create socket, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - return false; - } - - - int opt = 1; - err = setsockopt (m_listen_socket, SOL_SOCKET,SO_REUSEADDR, reinterpret_cast(&opt), sizeof(int)); - if(SOCKET_ERROR == err ) - { - err = ::WSAGetLastError(); - LOG_PRINT("Failed to setsockopt(SO_REUSEADDR), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\"", LOG_LEVEL_1); - deinit_server(); - return false; - } - - - sockaddr_in adr = {0}; - adr.sin_family = AF_INET; - adr.sin_addr.s_addr = htonl(INADDR_ANY); - adr.sin_port = (u_short)htons(m_port); - - //binding - err = bind(m_listen_socket, (const sockaddr*)&adr, sizeof(adr )); - if(SOCKET_ERROR == err ) - { - err = ::WSAGetLastError(); - LOG_PRINT("Failed to Bind, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\"", LOG_LEVEL_1); - deinit_server(); - return false; - } - - - m_completion_port = ::CreateIoCompletionPort(INVALID_HANDLE_VALUE, NULL, 0, 0); - if(INVALID_HANDLE_VALUE == m_completion_port) - { - err = ::WSAGetLastError(); - LOG_PRINT("Failed to CreateIoCompletionPort, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\"", LOG_LEVEL_1); - deinit_server(); - return false; - } - - - return true; -} -//------------------------------------------------------------- - -//------------------------------------------------------------- -static int CALLBACK CPConditionFunc( - IN LPWSABUF lpCallerId, - IN LPWSABUF lpCallerData, - IN OUT LPQOS lpSQOS, - IN OUT LPQOS lpGQOS, - IN LPWSABUF lpCalleeId, - OUT LPWSABUF lpCalleeData, - OUT GROUP FAR *g, - IN DWORD_PTR dwCallbackData - ) -{ - - /*cp_server_impl* pthis = (cp_server_impl*)dwCallbackData; - if(!pthis) - return CF_REJECT;*/ - /*if(pthis->get_active_connections_num()>=FD_SETSIZE-1) - { - LOG_PRINT("Maximum connections count overfull.", LOG_LEVEL_2); - return CF_REJECT; - }*/ - - return CF_ACCEPT; -} -//------------------------------------------------------------- -template -size_t cp_server_impl::get_active_connections_num() -{ - return m_connections.size(); -} -//------------------------------------------------------------- -template -unsigned CALLBACK cp_server_impl::worker_thread(void* param) -{ - if(!param) - return 0; - - cp_server_impl* pthis = (cp_server_impl*)param; - pthis->worker_thread_member(); - return 1; -} -//------------------------------------------------------------- -template -bool cp_server_impl::worker_thread_member() -{ - LOG_PRINT("Worker thread STARTED", LOG_LEVEL_1); - bool stop_handling = false; - while(!stop_handling) - { - PROFILE_FUNC("[worker_thread]Worker Loop"); - DWORD bytes_transfered = 0; - connection* pconnection = 0; - io_data_base* pio_data = 0; - - { - PROFILE_FUNC("[worker_thread]GetQueuedCompletionStatus"); - BOOL res = ::GetQueuedCompletionStatus (m_completion_port, &bytes_transfered , (PULONG_PTR)&pconnection, (LPOVERLAPPED *)&pio_data, INFINITE); - if (res == 0) - { - // check return code for error - int err = GetLastError(); - LOG_PRINT("GetQueuedCompletionStatus failed with error " << err << " " << log_space::get_win32_err_descr(err), LOG_LEVEL_1); - - if(pio_data) - ::InterlockedExchange(&pio_data->m_is_in_use, 0); - - - continue; - } - } - - if(pio_data) - ::InterlockedExchange(&pio_data->m_is_in_use, 0); - - - - if(!bytes_transfered && !pconnection && !pio_data) - { - //signal to stop - break; - } - if(!pconnection || !pio_data) - { - LOG_PRINT("BIG FAIL: pconnection or pio_data is empty: pconnection=" << pconnection << " pio_data=" << pio_data, LOG_LEVEL_0); - break; - } - - - - if(::InterlockedCompareExchange(&pconnection->m_connection_shutwoned, 0, 0)) - { - LOG_ERROR("InterlockedCompareExchange(&pconnection->m_connection_shutwoned, 0, 0)"); - //DebugBreak(); - } - - if(pio_data->m_op_type == op_type_stop) - { - if(!pconnection) - { - LOG_ERROR("op_type=op_type_stop, but pconnection is empty!!!"); - continue; - } - shutdown_connection(pconnection); - continue;// - } - else if(pio_data->m_op_type == op_type_send) - { - continue; - //do nothing, just queuing request - }else if(pio_data->m_op_type == op_type_recv) - { - PROFILE_FUNC("[worker_thread]m_tprotocol_handler.handle_recv"); - if(bytes_transfered) - { - bool res = pconnection->m_tprotocol_handler.handle_recv(pio_data->Buffer, bytes_transfered); - if(!res) - pconnection->query_shutdown(); - } - else - { - pconnection->query_shutdown(); - continue; - } - - } - - //preparing new request, - - { - PROFILE_FUNC("[worker_thread]RECV Request small loop"); - int res = 0; - while(true) - { - LOG_PRINT("Prepearing data for WSARecv....", LOG_LEVEL_3); - ZeroMemory(&pio_data->m_overlapped, sizeof(OVERLAPPED)); - pio_data->DataBuf.len = pio_data->TotalBuffBytes; - pio_data->DataBuf.buf = pio_data->Buffer; - pio_data->m_op_type = op_type_recv; - //calling WSARecv() and go to completion waiting - DWORD bytes_recvd = 0; - DWORD flags = 0; - - LOG_PRINT("Calling WSARecv....", LOG_LEVEL_3); - ::InterlockedExchange(&pio_data->m_is_in_use, 1); - res = WSARecv(pconnection->m_sock, &(pio_data->DataBuf), 1, &bytes_recvd , &flags, &(pio_data->m_overlapped), NULL); - if(res == SOCKET_ERROR ) - { - int err = ::WSAGetLastError(); - if(WSA_IO_PENDING == err ) - {//go pending, ok - LOG_PRINT("WSARecv return WSA_IO_PENDING", LOG_LEVEL_3); - break; - } - LOG_ERROR("BIG FAIL: WSARecv error code not correct, res=" << res << " last_err=" << err); - ::InterlockedExchange(&pio_data->m_is_in_use, 0); - pconnection->query_shutdown(); - break; - } - break; - /*else if(0 == res) - { - if(!bytes_recvd) - { - ::InterlockedExchange(&pio_data->m_is_in_use, 0); - LOG_PRINT("WSARecv return 0, bytes_recvd=0, graceful close.", LOG_LEVEL_3); - int err = ::WSAGetLastError(); - //LOG_ERROR("BIG FAIL: WSARecv error code not correct, res=" << res << " last_err=" << err); - //pconnection->query_shutdown(); - break; - }else - { - LOG_PRINT("WSARecv return immediatily 0, bytes_recvd=" << bytes_recvd, LOG_LEVEL_3); - //pconnection->m_tprotocol_handler.handle_recv(pio_data->Buffer, bytes_recvd); - } - }*/ - } - } - } - - - LOG_PRINT("Worker thread STOPED", LOG_LEVEL_1); - ::InterlockedDecrement(&m_worker_thread_counter); - return true; -} -//------------------------------------------------------------- -template -bool cp_server_impl::shutdown_connection(connection* pconn) -{ - PROFILE_FUNC("[shutdown_connection]"); - - if(!pconn) - { - LOG_ERROR("Attempt to remove null pptr connection!"); - return false; - } - else - { - LOG_PRINT("Shutting down connection ("<< pconn << ")", LOG_LEVEL_3); - } - m_connections_lock.lock(); - connections_container::iterator it = m_connections.find(pconn->m_sock); - m_connections_lock.unlock(); - if(it == m_connections.end()) - { - LOG_ERROR("Failed to find closing socket=" << pconn->m_sock); - return false; - } - SOCKET sock = it->second->m_sock; - { - PROFILE_FUNC("[shutdown_connection] shutdown, close"); - ::shutdown(it->second->m_sock, SD_SEND ); - } - size_t close_sock_wait_count = 0; - { - LOG_PRINT("Entered to 'in_use wait zone'", LOG_LEVEL_3); - PROFILE_FUNC("[shutdown_connection] wait for in_use"); - while(::InterlockedCompareExchange(&it->second->m_precv_data->m_is_in_use, 1, 1)) - { - - Sleep(100); - close_sock_wait_count++; - } - LOG_PRINT("First step to 'in_use wait zone'", LOG_LEVEL_3); - - - while(::InterlockedCompareExchange(&it->second->m_psend_data->m_is_in_use, 1, 1)) - { - Sleep(100); - close_sock_wait_count++; - } - LOG_PRINT("Leaved 'in_use wait zone'", LOG_LEVEL_3); - } - - ::closesocket(it->second->m_sock); - - ::InterlockedExchange(&it->second->m_connection_shutwoned, 1); - m_connections_lock.lock(); - m_connections.erase(it); - m_connections_lock.unlock(); - LOG_PRINT("Socked " << sock << " closed, wait_count=" << close_sock_wait_count, LOG_LEVEL_2); - return true; -} -//------------------------------------------------------------- -template -bool cp_server_impl::run_server(int threads_count = 0) -{ - int err = listen(m_listen_socket, 100); - if(SOCKET_ERROR == err ) - { - err = ::WSAGetLastError(); - LOG_ERROR("Failed to listen, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - return false; - } - - if(!threads_count) - { - SYSTEM_INFO si = {0}; - ::GetSystemInfo(&si); - threads_count = si.dwNumberOfProcessors + 2; - } - for(int i = 0; i != threads_count; i++) - { - boost::thread(boost::bind(&cp_server_impl::worker_thread_member, this)); - //HANDLE h_thread = threads_helper::create_thread(worker_thread, this); - InterlockedIncrement(&m_worker_thread_counter); - //::CloseHandle(h_thread); - } - - LOG_PRINT("Numbers of worker threads started: " << threads_count, LOG_LEVEL_1); - - m_stop = false; - while(!m_stop) - { - PROFILE_FUNC("[run_server] main_loop"); - TIMEVAL tv = {0}; - tv.tv_sec = 0; - tv.tv_usec = 100; - fd_set sock_set; - sock_set.fd_count = 1; - sock_set.fd_array[0] = m_listen_socket; - int select_res = 0; - { - PROFILE_FUNC("[run_server] select"); - select_res = select(0, &sock_set, &sock_set, NULL, &tv); - } - - if(SOCKET_ERROR == select_res) - { - err = ::WSAGetLastError(); - LOG_ERROR("Failed to select, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - return false; - } - if(!select_res) - { - on_net_idle(); - continue; - } - else - { - sockaddr_in adr_from = {0}; - int adr_len = sizeof(adr_from); - SOCKET new_sock = INVALID_SOCKET; - { - PROFILE_FUNC("[run_server] WSAAccept"); - new_sock = ::WSAAccept(m_listen_socket, (sockaddr *)&adr_from, &adr_len, CPConditionFunc, (DWORD_PTR)this); - } - - if(INVALID_SOCKET == new_sock) - { - if(m_stop) - break; - int err = ::WSAGetLastError(); - LOG_PRINT("Failed to WSAAccept, err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\"", LOG_LEVEL_2); - continue; - } - LOG_PRINT("Accepted connection (new socket=" << new_sock << ")", LOG_LEVEL_2); - { - PROFILE_FUNC("[run_server] Add new connection"); - add_new_connection(new_sock, adr_from.sin_addr.s_addr, adr_from.sin_port); - } - - } - - } - LOG_PRINT("Closing connections("<< m_connections.size() << ") and waiting...", LOG_LEVEL_2); - m_connections_lock.lock(); - for(connections_container::iterator it = m_connections.begin(); it != m_connections.end(); it++) - { - ::shutdown(it->second->m_sock, SD_BOTH); - ::closesocket(it->second->m_sock); - } - m_connections_lock.unlock(); - size_t wait_count = 0; - while(m_connections.size() && wait_count < 100) - { - ::Sleep(100); - wait_count++; - } - LOG_PRINT("Connections closed OK (wait_count=" << wait_count << ")", LOG_LEVEL_2); - - - LOG_PRINT("Stopping worker threads("<< m_worker_thread_counter << ").", LOG_LEVEL_2); - for(int i = 0; i > ptr; - ptr.reset(new connection(m_config)); - - connection& conn = *ptr.get(); - m_connections[new_sock] = ptr; - LOG_PRINT("Add new connection zone: leaving lock", LOG_LEVEL_3); - m_connections_lock.unlock(); - conn.init_buffers(); - conn.m_sock = new_sock; - conn.context.m_remote_ip = ip_from; - conn.context.m_remote_port = port_from; - conn.m_completion_port = m_completion_port; - { - PROFILE_FUNC("[add_new_connection] CreateIoCompletionPort"); - ::CreateIoCompletionPort((HANDLE)new_sock, m_completion_port, (ULONG_PTR)&conn, 0); - } - - //if(NULL == ::CreateIoCompletionPort((HANDLE)new_sock, m_completion_port, (ULONG_PTR)&conn, 0)) - //{ - // int err = ::GetLastError(); - // LOG_PRINT("Failed to CreateIoCompletionPort(associate socket and completion port), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\"", LOG_LEVEL_2); - // return false; - //} - - conn.m_tprotocol_handler.after_init_connection(); - { - PROFILE_FUNC("[add_new_connection] starting loop"); - int res = 0; - while(true)//res!=SOCKET_ERROR) - { - PROFILE_FUNC("[add_new_connection] in loop time"); - conn.m_precv_data->TotalBuffBytes = LEVIN_DEFAULT_DATA_BUFF_SIZE; - ZeroMemory(&conn.m_precv_data->m_overlapped, sizeof(OVERLAPPED)); - conn.m_precv_data->DataBuf.len = conn.m_precv_data->TotalBuffBytes; - conn.m_precv_data->DataBuf.buf = conn.m_precv_data->Buffer; - conn.m_precv_data->m_op_type = op_type_recv; - InterlockedExchange(&conn.m_precv_data->m_is_in_use, 1); - DWORD bytes_recvd = 0; - DWORD flags = 0; - - ::InterlockedExchange(&conn.m_precv_data->m_is_in_use, 1); - { - PROFILE_FUNC("[add_new_connection] ::WSARecv"); - res = ::WSARecv(conn.m_sock, &(conn.m_precv_data->DataBuf), 1, &bytes_recvd , &flags, &(conn.m_precv_data->m_overlapped), NULL); - } - if(res == SOCKET_ERROR ) - { - int err = ::WSAGetLastError(); - if(WSA_IO_PENDING == err ) - { - break; - } - LOG_ERROR("BIG FAIL: WSARecv error code not correct, res=" << res << " last_err=" << err << " " << log_space::get_win32_err_descr(err)); - ::InterlockedExchange(&conn.m_precv_data->m_is_in_use, 0); - conn.query_shutdown(); - //shutdown_connection(&conn); - break; - } - - - break; - /*else if(0 == res) - { - if(!bytes_recvd) - { - PROFILE_FUNC("[add_new_connection] shutdown_connection"); - ::InterlockedExchange(&conn.m_precv_data->m_is_in_use, 0); - conn.query_shutdown(); - //shutdown_connection(&conn); - break; - }else - { - PROFILE_FUNC("[add_new_connection] handle_recv"); - } - }*/ - } - } - - - - return true; -} -//------------------------------------------------------------- -template -bool cp_server_impl::deinit_server() -{ - if(!m_initialized) - return true; - - if(INVALID_SOCKET != m_listen_socket) - { - shutdown(m_listen_socket, SD_BOTH); - int res = closesocket(m_listen_socket); - if(SOCKET_ERROR == res) - { - int err = ::WSAGetLastError(); - LOG_ERROR("Failed to closesocket(), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - } - m_listen_socket = INVALID_SOCKET; - } - - int res = ::WSACleanup(); - if(SOCKET_ERROR == res) - { - int err = ::WSAGetLastError(); - LOG_ERROR("Failed to WSACleanup(), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - } - m_initialized = false; - - return true; -} - -//------------------------------------------------------------- -template -bool cp_server_impl::send_stop_signal() -{ - ::InterlockedExchange(&m_stop, 1); - return true; -} -//------------------------------------------------------------- -template -bool cp_server_impl::is_stop_signal() -{ - return m_stop?true:false; -} -//------------------------------------------------------------- -} -} \ No newline at end of file diff --git a/contrib/epee/include/net/http_base.h b/contrib/epee/include/net/http_base.h deleted file mode 100644 index 49b0839b2b..0000000000 --- a/contrib/epee/include/net/http_base.h +++ /dev/null @@ -1,184 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once -#include -#include - -#include "string_tools.h" -namespace epee -{ -namespace net_utils -{ - namespace http - { - - enum http_method{ - http_method_get, - http_method_post, - http_method_put, - http_method_head, - http_method_etc, - http_method_unknown - }; - - enum http_content_type - { - http_content_type_text_html, - http_content_type_image_gif, - http_content_type_other, - http_content_type_not_set - }; - - typedef std::list > fields_list; - - inline - std::string get_value_from_fields_list(const std::string& param_name, const net_utils::http::fields_list& fields) - { - fields_list::const_iterator it = fields.begin(); - for(; it != fields.end(); it++) - if(!string_tools::compare_no_case(param_name, it->first)) - break; - - if(it==fields.end()) - return std::string(); - - return it->second; - } - - - inline - std::string get_value_from_uri_line(const std::string& param_name, const std::string& uri) - { - std::string buff = "([\\?|&])"; - buff += param_name + "=([^&]*)"; - boost::regex match_param(buff.c_str(), boost::regex::icase | boost::regex::normal); - boost::smatch result; - if(boost::regex_search(uri, result, match_param, boost::match_default) && result[0].matched) - { - return result[2]; - } - return std::string(); - } - - - - struct http_header_info - { - std::string m_connection; //"Connection:" - std::string m_referer; //"Referer:" - std::string m_content_length; //"Content-Length:" - std::string m_content_type; //"Content-Type:" - std::string m_transfer_encoding;//"Transfer-Encoding:" - std::string m_content_encoding; //"Content-Encoding:" - std::string m_host; //"Host:" - std::string m_cookie; //"Cookie:" - fields_list m_etc_fields; - - void clear() - { - m_connection.clear(); - m_referer.clear(); - m_content_length.clear(); - m_content_type.clear(); - m_transfer_encoding.clear(); - m_content_encoding.clear(); - m_host.clear(); - m_cookie.clear(); - m_etc_fields.clear(); - } - }; - - struct uri_content - { - std::string m_path; - std::string m_query; - std::string m_fragment; - std::list > m_query_params; - }; - - struct url_content - { - std::string schema; - std::string host; - std::string uri; - uint64_t port; - uri_content m_uri_content; - }; - - - struct http_request_info - { - http_request_info():m_http_method(http_method_unknown), - m_http_ver_hi(0), - m_http_ver_lo(0), - m_have_to_block(false) - {} - - http_method m_http_method; - std::string m_URI; - std::string m_http_method_str; - std::string m_full_request_str; - std::string m_replace_html; - std::string m_request_head; - int m_http_ver_hi; - int m_http_ver_lo; - bool m_have_to_block; - http_header_info m_header_info; - uri_content m_uri_content; - size_t m_full_request_buf_size; - std::string m_body; - - void clear() - { - this->~http_request_info(); - new(this) http_request_info(); - } - }; - - - struct http_response_info - { - int m_response_code; - std::string m_response_comment; - fields_list m_additional_fields; - std::string m_body; - std::string m_mime_tipe; - http_header_info m_header_info; - int m_http_ver_hi;// OUT paramter only - int m_http_ver_lo;// OUT paramter only - - void clear() - { - this->~http_response_info(); - new(this) http_response_info(); - } - }; - } -} -} \ No newline at end of file diff --git a/contrib/epee/include/net/http_client.h b/contrib/epee/include/net/http_client.h deleted file mode 100644 index 5897a017e7..0000000000 --- a/contrib/epee/include/net/http_client.h +++ /dev/null @@ -1,875 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once -#include -#include -#include -//#include -#include -#include -#include - -#include "net_helper.h" -#include "http_client_base.h" - -#ifdef HTTP_ENABLE_GZIP -#include "gzip_encoding.h" -#endif - -#include "string_tools.h" -#include "reg_exp_definer.h" -#include "http_base.h" -#include "to_nonconst_iterator.h" -#include "net_parse_helpers.h" - -//#include "shlwapi.h" - -//#pragma comment(lib, "shlwapi.lib") - -extern epee::critical_section gregexp_lock; - - -namespace epee -{ -namespace net_utils -{ - -using namespace std; - - /*struct url - { - public: - void parse(const std::string& url_s) - { - const string prot_end("://"); - string::const_iterator prot_i = search(url_s.begin(), url_s.end(), - prot_end.begin(), prot_end.end()); - protocol_.reserve(distance(url_s.begin(), prot_i)); - transform(url_s.begin(), prot_i, - back_inserter(protocol_), - ptr_fun(tolower)); // protocol is icase - if( prot_i == url_s.end() ) - return; - advance(prot_i, prot_end.length()); - string::const_iterator path_i = find(prot_i, url_s.end(), '/'); - host_.reserve(distance(prot_i, path_i)); - transform(prot_i, path_i, - back_inserter(host_), - ptr_fun(tolower)); // host is icase - string::const_iterator query_i = find(path_i, url_s.end(), '?'); - path_.assign(path_i, query_i); - if( query_i != url_s.end() ) - ++query_i; - query_.assign(query_i, url_s.end()); - } - - std::string protocol_; - std::string host_; - std::string path_; - std::string query_; - };*/ - - - - - //--------------------------------------------------------------------------- - static inline const char* get_hex_vals() - { - static char hexVals[16] = {'0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F'}; - return hexVals; - } - - static inline const char* get_unsave_chars() - { - //static char unsave_chars[] = "\"<>%\\^[]`+$,@:;/!#?=&"; - static char unsave_chars[] = "\"<>%\\^[]`+$,@:;!#&"; - return unsave_chars; - } - - static inline bool is_unsafe(unsigned char compare_char) - { - if(compare_char <= 32 || compare_char >= 123) - return true; - - const char* punsave = get_unsave_chars(); - - for(int ichar_pos = 0; 0!=punsave[ichar_pos] ;ichar_pos++) - if(compare_char == punsave[ichar_pos]) - return true; - - return false; - } - - static inline - std::string dec_to_hex(char num, int radix) - { - int temp=0; - std::string csTmp; - int num_char; - - num_char = (int) num; - if (num_char < 0) - num_char = 256 + num_char; - - while (num_char >= radix) - { - temp = num_char % radix; - num_char = (int)floor((float)num_char / (float)radix); - csTmp = get_hex_vals()[temp]; - } - - csTmp += get_hex_vals()[num_char]; - - if(csTmp.size() < 2) - { - csTmp += '0'; - } - - std::reverse(csTmp.begin(), csTmp.end()); - //_mbsrev((unsigned char*)csTmp.data()); - - return csTmp; - } - - static inline std::string convert(char val) - { - std::string csRet; - csRet += "%"; - csRet += dec_to_hex(val, 16); - return csRet; - } - static inline std::string conver_to_url_format(const std::string& uri) - { - - std::string result; - - for(size_t i = 0; i!= uri.size(); i++) - { - if(is_unsafe(uri[i])) - result += convert(uri[i]); - else - result += uri[i]; - - } - - return result; - } - - static inline std::string convert_to_url_format_force_all(const std::string& uri) - { - - std::string result; - - for(size_t i = 0; i!= uri.size(); i++) - { - result += convert(uri[i]); - } - - return result; - } - - - - - - namespace http - { - - class http_simple_client: public i_target_handler - { - public: - - - private: - enum reciev_machine_state - { - reciev_machine_state_header, - reciev_machine_state_body_content_len, - reciev_machine_state_body_connection_close, - reciev_machine_state_body_chunked, - reciev_machine_state_done, - reciev_machine_state_error - }; - - - - enum chunked_state{ - http_chunked_state_chunk_head, - http_chunked_state_chunk_body, - http_chunked_state_done, - http_chunked_state_undefined - }; - - - blocked_mode_client m_net_client; - std::string m_host_buff; - std::string m_port; - unsigned int m_timeout; - std::string m_header_cache; - http_response_info m_response_info; - size_t m_len_in_summary; - size_t m_len_in_remain; - //std::string* m_ptarget_buffer; - boost::shared_ptr m_pcontent_encoding_handler; - reciev_machine_state m_state; - chunked_state m_chunked_state; - std::string m_chunked_cache; - critical_section m_lock; - - public: - void set_host_name(const std::string& name) - { - CRITICAL_REGION_LOCAL(m_lock); - m_host_buff = name; - } - bool connect(const std::string& host, int port, unsigned int timeout) - { - return connect(host, std::to_string(port), timeout); - } - bool connect(const std::string& host, const std::string& port, unsigned int timeout) - { - CRITICAL_REGION_LOCAL(m_lock); - m_host_buff = host; - m_port = port; - m_timeout = timeout; - - return m_net_client.connect(host, port, timeout, timeout); - } - //--------------------------------------------------------------------------- - bool disconnect() - { - CRITICAL_REGION_LOCAL(m_lock); - return m_net_client.disconnect(); - } - //--------------------------------------------------------------------------- - bool is_connected() - { - CRITICAL_REGION_LOCAL(m_lock); - return m_net_client.is_connected(); - } - //--------------------------------------------------------------------------- - virtual bool handle_target_data(std::string& piece_of_transfer) - { - CRITICAL_REGION_LOCAL(m_lock); - m_response_info.m_body += piece_of_transfer; - piece_of_transfer.clear(); - return true; - } - //--------------------------------------------------------------------------- - inline - bool invoke_get(const std::string& uri, const std::string& body = std::string(), const http_response_info** ppresponse_info = NULL, const fields_list& additional_params = fields_list()) - { - CRITICAL_REGION_LOCAL(m_lock); - return invoke(uri, "GET", body, ppresponse_info, additional_params); - } - - //--------------------------------------------------------------------------- - inline bool invoke(const std::string& uri, const std::string& method, const std::string& body, const http_response_info** ppresponse_info = NULL, const fields_list& additional_params = fields_list()) - { - CRITICAL_REGION_LOCAL(m_lock); - if(!is_connected()) - { - LOG_PRINT("Reconnecting...", LOG_LEVEL_3); - if(!connect(m_host_buff, m_port, m_timeout)) - { - LOG_PRINT("Failed to connect to " << m_host_buff << ":" << m_port, LOG_LEVEL_3); - return false; - } - } - m_response_info.clear(); - std::string req_buff = method + " "; - req_buff += uri + " HTTP/1.1\r\n" + - "Host: "+ m_host_buff +"\r\n" + "Content-Length: " + boost::lexical_cast(body.size()) + "\r\n"; - - - //handle "additional_params" - for(fields_list::const_iterator it = additional_params.begin(); it!=additional_params.end(); it++) - req_buff += it->first + ": " + it->second + "\r\n"; - req_buff += "\r\n"; - //-- - - bool res = m_net_client.send(req_buff); - CHECK_AND_ASSERT_MES(res, false, "HTTP_CLIENT: Failed to SEND"); - if(body.size()) - res = m_net_client.send(body); - CHECK_AND_ASSERT_MES(res, false, "HTTP_CLIENT: Failed to SEND"); - - if(ppresponse_info) - *ppresponse_info = &m_response_info; - - m_state = reciev_machine_state_header; - return handle_reciev(); - } - //--------------------------------------------------------------------------- - inline bool invoke_post(const std::string& uri, const std::string& body, const http_response_info** ppresponse_info = NULL, const fields_list& additional_params = fields_list()) - { - CRITICAL_REGION_LOCAL(m_lock); - return invoke(uri, "POST", body, ppresponse_info, additional_params); - } - private: - //--------------------------------------------------------------------------- - inline bool handle_reciev() - { - CRITICAL_REGION_LOCAL(m_lock); - bool keep_handling = true; - bool need_more_data = true; - std::string recv_buffer; - while(keep_handling) - { - if(need_more_data) - { - if(!m_net_client.recv(recv_buffer)) - { - LOG_PRINT("Unexpected reciec fail", LOG_LEVEL_3); - m_state = reciev_machine_state_error; - } - if(!recv_buffer.size()) - { - //connection is going to be closed - if(reciev_machine_state_body_connection_close != m_state) - { - m_state = reciev_machine_state_error; - } - } - need_more_data = false; - } - switch(m_state) - { - case reciev_machine_state_header: - keep_handling = handle_header(recv_buffer, need_more_data); - break; - case reciev_machine_state_body_content_len: - keep_handling = handle_body_content_len(recv_buffer, need_more_data); - break; - case reciev_machine_state_body_connection_close: - keep_handling = handle_body_connection_close(recv_buffer, need_more_data); - break; - case reciev_machine_state_body_chunked: - keep_handling = handle_body_body_chunked(recv_buffer, need_more_data); - break; - case reciev_machine_state_done: - keep_handling = false; - break; - case reciev_machine_state_error: - keep_handling = false; - break; - } - - } - m_header_cache.clear(); - if(m_state != reciev_machine_state_error) - { - if(m_response_info.m_header_info.m_connection.size() && !string_tools::compare_no_case("close", m_response_info.m_header_info.m_connection)) - disconnect(); - - return true; - } - else - { - LOG_PRINT_L3("Returning false because of wrong state machine. state: " << m_state); - return false; - } - } - //--------------------------------------------------------------------------- - inline - bool handle_header(std::string& recv_buff, bool& need_more_data) - { - - CRITICAL_REGION_LOCAL(m_lock); - if(!recv_buff.size()) - { - LOG_ERROR("Connection closed at handle_header"); - m_state = reciev_machine_state_error; - return false; - } - - m_header_cache += recv_buff; - recv_buff.clear(); - std::string::size_type pos = m_header_cache.find("\r\n\r\n"); - if(pos != std::string::npos) - { - recv_buff.assign(m_header_cache.begin()+pos+4, m_header_cache.end()); - m_header_cache.erase(m_header_cache.begin()+pos+4, m_header_cache.end()); - - analize_cached_header_and_invoke_state(); - m_header_cache.clear(); - if(!recv_buff.size() && (m_state != reciev_machine_state_error && m_state != reciev_machine_state_done)) - need_more_data = true; - - return true; - }else - need_more_data = true; - return true; - } - //--------------------------------------------------------------------------- - inline - bool handle_body_content_len(std::string& recv_buff, bool& need_more_data) - { - CRITICAL_REGION_LOCAL(m_lock); - if(!recv_buff.size()) - { - LOG_PRINT("Warning: Content-Len mode, but connection unexpectedly closed", LOG_LEVEL_3); - m_state = reciev_machine_state_done; - return true; - } - CHECK_AND_ASSERT_MES(m_len_in_remain >= recv_buff.size(), false, "m_len_in_remain >= recv_buff.size()"); - m_len_in_remain -= recv_buff.size(); - m_pcontent_encoding_handler->update_in(recv_buff); - - if(m_len_in_remain == 0) - m_state = reciev_machine_state_done; - else - need_more_data = true; - - return true; - } - //--------------------------------------------------------------------------- - inline - bool handle_body_connection_close(std::string& recv_buff, bool& need_more_data) - { - CRITICAL_REGION_LOCAL(m_lock); - if(!recv_buff.size()) - { - m_state = reciev_machine_state_done; - return true; - } - need_more_data = true; - m_pcontent_encoding_handler->update_in(recv_buff); - - - return true; - } - //--------------------------------------------------------------------------- - inline bool is_hex_symbol(char ch) - { - - if( (ch >= '0' && ch <='9')||(ch >= 'A' && ch <='F')||(ch >= 'a' && ch <='f')) - return true; - else - return false; - } - //--------------------------------------------------------------------------- - inline - bool get_len_from_chunk_head(const std::string &chunk_head, size_t& result_size) - { - std::stringstream str_stream; - str_stream << std::hex; - if(!(str_stream << chunk_head && str_stream >> result_size)) - return false; - - return true; - } - //--------------------------------------------------------------------------- - inline - bool get_chunk_head(std::string& buff, size_t& chunk_size, bool& is_matched) - { - is_matched = false; - size_t offset = 0; - for(std::string::iterator it = buff.begin(); it!= buff.end(); it++, offset++) - { - if(!is_hex_symbol(*it)) - { - if(*it == '\r' || *it == ' ' ) - { - offset--; - continue; - } - else if(*it == '\n') - { - std::string chunk_head = buff.substr(0, offset); - if(!get_len_from_chunk_head(chunk_head, chunk_size)) - return false; - - if(0 == chunk_size) - { - //Here is a small confusion - //In breif - if the chunk is the last one we need to get terminating sequence - //along with the cipher, generally in the "ddd\r\n\r\n" form - - for(it++;it != buff.end(); it++) - { - if('\r' == *it) - continue; - else if('\n' == *it) - break; - else - { - LOG_ERROR("http_stream_filter: Wrong last chunk terminator"); - return false; - } - } - - if(it == buff.end()) - return true; - } - - buff.erase(buff.begin(), ++it); - - is_matched = true; - return true; - } - else - return false; - } - } - - return true; - } - //--------------------------------------------------------------------------- - inline - bool handle_body_body_chunked(std::string& recv_buff, bool& need_more_data) - { - CRITICAL_REGION_LOCAL(m_lock); - if(!recv_buff.size()) - { - LOG_PRINT("Warning: CHUNKED mode, but connection unexpectedly closed", LOG_LEVEL_3); - m_state = reciev_machine_state_done; - return true; - } - m_chunked_cache += recv_buff; - recv_buff.clear(); - bool is_matched = false; - - while(true) - { - if(!m_chunked_cache.size()) - { - need_more_data = true; - break; - } - - switch(m_chunked_state) - { - case http_chunked_state_chunk_head: - if(m_chunked_cache[0] == '\n' || m_chunked_cache[0] == '\r') - { - //optimize a bit - if(m_chunked_cache[0] == '\r' && m_chunked_cache.size()>1 && m_chunked_cache[1] == '\n') - m_chunked_cache.erase(0, 2); - else - m_chunked_cache.erase(0, 1); - break; - } - if(!get_chunk_head(m_chunked_cache, m_len_in_remain, is_matched)) - { - LOG_ERROR("http_stream_filter::handle_chunked(*) Failed to get length from chunked head:" << m_chunked_cache); - m_state = reciev_machine_state_error; - return false; - } - - if(!is_matched) - { - need_more_data = true; - return true; - }else - { - m_chunked_state = http_chunked_state_chunk_body; - if(m_len_in_remain == 0) - {//last chunk, let stop the stream and fix the chunk queue. - m_state = reciev_machine_state_done; - return true; - } - m_chunked_state = http_chunked_state_chunk_body; - break; - } - break; - case http_chunked_state_chunk_body: - { - std::string chunk_body; - if(m_len_in_remain >= m_chunked_cache.size()) - { - m_len_in_remain -= m_chunked_cache.size(); - chunk_body.swap(m_chunked_cache); - }else - { - chunk_body.assign(m_chunked_cache, 0, m_len_in_remain); - m_chunked_cache.erase(0, m_len_in_remain); - m_len_in_remain = 0; - } - - m_pcontent_encoding_handler->update_in(chunk_body); - - if(!m_len_in_remain) - m_chunked_state = http_chunked_state_chunk_head; - } - break; - case http_chunked_state_done: - m_state = reciev_machine_state_done; - return true; - case http_chunked_state_undefined: - default: - LOG_ERROR("http_stream_filter::handle_chunked(): Wrong state" << m_chunked_state); - return false; - } - } - - return true; - } - //--------------------------------------------------------------------------- - inline - bool parse_header(http_header_info& body_info, const std::string& m_cache_to_process) - { - LOG_FRAME("http_stream_filter::parse_cached_header(*)", LOG_LEVEL_4); - - STATIC_REGEXP_EXPR_1(rexp_mach_field, - "\n?((Connection)|(Referer)|(Content-Length)|(Content-Type)|(Transfer-Encoding)|(Content-Encoding)|(Host)|(Cookie)" - // 12 3 4 5 6 7 8 9 - "|([\\w-]+?)) ?: ?((.*?)(\r?\n))[^\t ]", - //10 1112 13 - boost::regex::icase | boost::regex::normal); - - boost::smatch result; - std::string::const_iterator it_current_bound = m_cache_to_process.begin(); - std::string::const_iterator it_end_bound = m_cache_to_process.end(); - - - - //lookup all fields and fill well-known fields - while( boost::regex_search( it_current_bound, it_end_bound, result, rexp_mach_field, boost::match_default) && result[0].matched) - { - const size_t field_val = 12; - //const size_t field_etc_name = 10; - - int i = 2; //start position = 2 - if(result[i++].matched)//"Connection" - body_info.m_connection = result[field_val]; - else if(result[i++].matched)//"Referrer" - body_info.m_referer = result[field_val]; - else if(result[i++].matched)//"Content-Length" - body_info.m_content_length = result[field_val]; - else if(result[i++].matched)//"Content-Type" - body_info.m_content_type = result[field_val]; - else if(result[i++].matched)//"Transfer-Encoding" - body_info.m_transfer_encoding = result[field_val]; - else if(result[i++].matched)//"Content-Encoding" - body_info.m_content_encoding = result[field_val]; - else if(result[i++].matched)//"Host" - { body_info.m_host = result[field_val]; - string_tools::trim(body_info.m_host); - } - else if(result[i++].matched)//"Cookie" - body_info.m_cookie = result[field_val]; - else if(result[i++].matched)//e.t.c (HAVE TO BE MATCHED!) - {;} - else - {CHECK_AND_ASSERT_MES(false, false, "http_stream_filter::parse_cached_header() not matched last entry in:"<(result[1]); - m_response_info.m_http_ver_lo = boost::lexical_cast(result[2]); - m_response_info.m_response_code = boost::lexical_cast(result[3]); - - m_header_cache.erase(to_nonsonst_iterator(m_header_cache, result[0].first), to_nonsonst_iterator(m_header_cache, result[0].second)); - return true; - }else - { - LOG_ERROR("http_stream_filter::handle_invoke_reply_line(): Failed to match first response line:" << m_header_cache); - return false; - } - - } - inline - bool set_reply_content_encoder() - { - STATIC_REGEXP_EXPR_1(rexp_match_gzip, "^.*?((gzip)|(deflate))", boost::regex::icase | boost::regex::normal); - boost::smatch result; // 12 3 - if(boost::regex_search( m_response_info.m_header_info.m_content_encoding, result, rexp_match_gzip, boost::match_default) && result[0].matched) - { -#ifdef HTTP_ENABLE_GZIP - m_pcontent_encoding_handler.reset(new content_encoding_gzip(this, result[3].matched)); -#else - m_pcontent_encoding_handler.reset(new do_nothing_sub_handler(this)); - LOG_ERROR("GZIP encoding not supported in this build, please add zlib to your project and define HTTP_ENABLE_GZIP"); - return false; -#endif - } - else - { - m_pcontent_encoding_handler.reset(new do_nothing_sub_handler(this)); - } - - return true; - } - inline - bool analize_cached_header_and_invoke_state() - { - m_response_info.clear(); - analize_first_response_line(); - std::string fake_str; //gcc error workaround - - bool res = parse_header(m_response_info.m_header_info, m_header_cache); - CHECK_AND_ASSERT_MES(res, false, "http_stream_filter::analize_cached_reply_header_and_invoke_state(): failed to anilize reply header: " << m_header_cache); - - set_reply_content_encoder(); - - m_len_in_summary = 0; - bool content_len_valid = false; - if(m_response_info.m_header_info.m_content_length.size()) - content_len_valid = string_tools::get_xtype_from_string(m_len_in_summary, m_response_info.m_header_info.m_content_length); - - - - if(!m_len_in_summary && ((m_response_info.m_response_code>=100&&m_response_info.m_response_code<200) - || 204 == m_response_info.m_response_code - || 304 == m_response_info.m_response_code) ) - {//There will be no response body, server will display the local page with error - m_state = reciev_machine_state_done; - return true; - }else if(m_response_info.m_header_info.m_transfer_encoding.size()) - { - string_tools::trim(m_response_info.m_header_info.m_transfer_encoding); - if(string_tools::compare_no_case(m_response_info.m_header_info.m_transfer_encoding, "chunked")) - { - LOG_ERROR("Wrong Transfer-Encoding:" << m_response_info.m_header_info.m_transfer_encoding); - m_state = reciev_machine_state_error; - return false; - } - m_state = reciev_machine_state_body_chunked; - m_chunked_state = http_chunked_state_chunk_head; - return true; - } - else if(!m_response_info.m_header_info.m_content_length.empty()) - { - //In the response header the length was specified - if(!content_len_valid) - { - LOG_ERROR("http_stream_filter::analize_cached_reply_header_and_invoke_state(): Failed to get_len_from_content_lenght();, m_query_info.m_content_length="<(u_c.port), timeout); - CHECK_AND_ASSERT_MES(res, false, "failed to connect " << u_c.host << ":" << u_c.port); - } - - return tr.invoke(u_c.uri, method, body, ppresponse_info, additional_params); - } - - } -} -} \ No newline at end of file diff --git a/contrib/epee/include/net/http_client_abstract_invoke.h b/contrib/epee/include/net/http_client_abstract_invoke.h deleted file mode 100644 index 425a355eee..0000000000 --- a/contrib/epee/include/net/http_client_abstract_invoke.h +++ /dev/null @@ -1,98 +0,0 @@ - -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once -#include "storages/serializeble_struct_helper.h" - -namespace epee -{ - namespace net_utils - { - namespace http - { - template - bool invoke_http_json_remote_command(const std::string& url, TArg& out_struct, TResult& result_struct, TTransport& transport, unsigned int timeout = 5000, const std::string& method = "GET") - { - std::string req_param; - StorageNamed::InMemStorageSpace::json::store_t_to_json(out_struct, req_param); - - const http_response_info* pri = NULL; - if(!invoke_request(url, transport, timeout, &pri, method, req_param)) - { - LOG_PRINT_L1("Failed to invoke http request to " << url); - return false; - } - - if(!pri->m_response_code) - { - LOG_PRINT_L1("Failed to invoke http request to " << url << ", internal error (null response ptr)"); - return false; - } - - if(pri->m_response_code != 200) - { - LOG_PRINT_L1("Failed to invoke http request to " << url << ", wrong response code: " << pri->m_response_code); - return false; - } - - return StorageNamed::InMemStorageSpace::json::load_t_from_json(result_struct, pri->m_body); - } - - - - template - bool invoke_http_bin_remote_command(const std::string& url, TArg& out_struct, TResult& result_struct, TTransport& transport, unsigned int timeout = 5000, const std::string& method = "GET") - { - std::string req_param; - epee::StorageNamed::save_struct_as_storage_to_buff(out_struct, req_param); - - const http_response_info* pri = NULL; - if(!invoke_request(url, transport, timeout, &pri, method, req_param)) - { - LOG_PRINT_L1("Failed to invoke http request to " << url); - return false; - } - - if(!pri->m_response_code) - { - LOG_PRINT_L1("Failed to invoke http request to " << url << ", internal error (null response ptr)"); - return false; - } - - if(pri->m_response_code != 200) - { - LOG_PRINT_L1("Failed to invoke http request to " << url << ", wrong response code: " << pri->m_response_code); - return false; - } - - return epee::StorageNamed::load_struct_from_storage_buff(result_struct, pri->m_body); - } - - - } - } -} diff --git a/contrib/epee/include/net/http_client_base.h b/contrib/epee/include/net/http_client_base.h deleted file mode 100644 index 571e27f73c..0000000000 --- a/contrib/epee/include/net/http_client_base.h +++ /dev/null @@ -1,73 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -namespace epee -{ - namespace net_utils - { - struct i_sub_handler - { - virtual ~i_sub_handler(){} - - virtual bool update_in( std::string& piece_of_transfer)=0; - virtual void stop(std::string& OUT collect_remains)=0; - virtual bool update_and_stop(std::string& OUT collect_remains, bool& is_changed) - { - is_changed = true; - bool res = this->update_in(collect_remains); - if(res) - this->stop(collect_remains); - return res; - } - }; - - - struct i_target_handler - { - virtual ~i_target_handler(){} - virtual bool handle_target_data( std::string& piece_of_transfer)=0; - }; - - - class do_nothing_sub_handler: public i_sub_handler - { - public: - do_nothing_sub_handler(i_target_handler* powner_filter):m_powner_filter(powner_filter) - {} - virtual bool update_in( std::string& piece_of_transfer) - { - return m_powner_filter->handle_target_data(piece_of_transfer); - } - virtual void stop(std::string& OUT collect_remains) - { - - } - i_target_handler* m_powner_filter; - }; - } -} \ No newline at end of file diff --git a/contrib/epee/include/net/http_client_via_api_helper.h b/contrib/epee/include/net/http_client_via_api_helper.h deleted file mode 100644 index 45a70993b3..0000000000 --- a/contrib/epee/include/net/http_client_via_api_helper.h +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#pragma once -#include -#include -#pragma comment(lib, "Wininet.lib") - -namespace epee -{ -namespace net_utils -{ - inline - bool http_ssl_invoke(const std::string& url, const std::string usr, const std::string psw, std::string& http_response_body, bool use_post = false) - { - bool final_res = false; - - ATL::CUrl url_obj; - BOOL crack_rss = url_obj.CrackUrl(string_encoding::convert_to_t >(url).c_str()); - - HINTERNET hinet = ::InternetOpenA(SHARED_JOBSCOMMON_HTTP_AGENT, INTERNET_OPEN_TYPE_PRECONFIG, NULL, NULL, 0); - if(!hinet) - { - int err = ::GetLastError(); - LOG_PRINT("Failed to call InternetOpenA, \nError: " << err << " " << log_space::get_win32_err_descr(err), LOG_LEVEL_0); - return false; - } - - DWORD dwFlags = 0; - DWORD dwBuffLen = sizeof(dwFlags); - - if(usr.size()) - { - dwFlags |= INTERNET_FLAG_IGNORE_CERT_CN_INVALID|INTERNET_FLAG_IGNORE_CERT_DATE_INVALID| - INTERNET_FLAG_PRAGMA_NOCACHE | SECURITY_FLAG_IGNORE_UNKNOWN_CA|INTERNET_FLAG_SECURE; - }else - { - dwFlags |= INTERNET_FLAG_PRAGMA_NOCACHE; - } - - - int port = url_obj.GetPortNumber(); - BOOL res = FALSE; - - HINTERNET hsession = ::InternetConnectA(hinet, string_encoding::convert_to_ansii(url_obj.GetHostName()).c_str(), port/*INTERNET_DEFAULT_HTTPS_PORT*/, usr.c_str(), psw.c_str(), INTERNET_SERVICE_HTTP, dwFlags, NULL); - if(hsession) - { - const std::string uri = string_encoding::convert_to_ansii(url_obj.GetUrlPath()) + string_encoding::convert_to_ansii(url_obj.GetExtraInfo()); - - HINTERNET hrequest = ::HttpOpenRequestA(hsession, use_post?"POST":NULL, uri.c_str(), NULL, NULL,NULL, dwFlags, NULL); - if(hrequest) - { - while(true) - { - res = ::HttpSendRequestA(hrequest, NULL, 0, NULL, 0); - if(!res) - { - //ERROR_INTERNET_INVALID_CA 45 - //ERROR_INTERNET_INVALID_URL (INTERNET_ERROR_BASE + 5) - int err = ::GetLastError(); - LOG_PRINT("Failed to call HttpSendRequestA, \nError: " << log_space::get_win32_err_descr(err), LOG_LEVEL_0); - break; - } - - DWORD code = 0; - DWORD buf_len = sizeof(code); - DWORD index = 0; - res = ::HttpQueryInfo(hrequest, HTTP_QUERY_FLAG_NUMBER|HTTP_QUERY_STATUS_CODE, &code, &buf_len, &index); - if(!res) - { - //ERROR_INTERNET_INVALID_CA 45 - //ERROR_INTERNET_INVALID_URL (INTERNET_ERROR_BASE + 5) - int err = ::GetLastError(); - LOG_PRINT("Failed to call HttpQueryInfo, \nError: " << log_space::get_win32_err_descr(err), LOG_LEVEL_0); - break; - } - if(code < 200 || code > 299) - { - LOG_PRINT("Wrong server response, HttpQueryInfo returned statuse code" << code , LOG_LEVEL_0); - break; - } - - - char buff[100000] = {0}; - DWORD readed = 0; - while(true) - { - res = ::InternetReadFile(hrequest, buff, sizeof(buff), &readed); - if(!res) - { - int err = ::GetLastError(); - LOG_PRINT("Failed to call InternetReadFile, \nError: " << log_space::get_win32_err_descr(err), LOG_LEVEL_0); - break; - } - if(readed) - { - http_response_body.append(buff, readed); - } - else - break; - } - - if(!res) - break; - - - //we success - final_res = true; - - res = ::InternetCloseHandle(hrequest); - if(!res) - { - int err = ::GetLastError(); - LOG_PRINT("Failed to call InternetCloseHandle, \nError: " << log_space::get_win32_err_descr(err), LOG_LEVEL_0); - } - - break; - } - } - else - { - //ERROR_INTERNET_INVALID_CA - int err = ::GetLastError(); - LOG_PRINT("Failed to call InternetOpenUrlA, \nError: " << log_space::get_win32_err_descr(err), LOG_LEVEL_0); - return false; - } - - res = ::InternetCloseHandle(hsession); - if(!res) - { - int err = ::GetLastError(); - LOG_PRINT("Failed to call InternetCloseHandle, \nError: " << log_space::get_win32_err_descr(err), LOG_LEVEL_0); - } - }else - { - int err = ::GetLastError(); - LOG_PRINT("Failed to call InternetConnectA(" << string_encoding::convert_to_ansii(url_obj.GetHostName()) << ", port " << port << " \nError: " << log_space::get_win32_err_descr(err), LOG_LEVEL_0); - } - - - - res = ::InternetCloseHandle(hinet); - if(!res) - { - int err = ::GetLastError(); - LOG_PRINT("Failed to call InternetCloseHandle, \nError: " << log_space::get_win32_err_descr(err), LOG_LEVEL_0); - } - return final_res; - } -} -} \ No newline at end of file diff --git a/contrib/epee/include/net/http_protocol_handler.h b/contrib/epee/include/net/http_protocol_handler.h deleted file mode 100644 index 72b4b57569..0000000000 --- a/contrib/epee/include/net/http_protocol_handler.h +++ /dev/null @@ -1,212 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#ifndef _HTTP_SERVER_H_ -#define _HTTP_SERVER_H_ - -#include -#include "include_base_utils.h" -#include "net_utils_base.h" -#include "to_nonconst_iterator.h" -#include "http_base.h" - -namespace epee -{ -namespace net_utils -{ - namespace http - { - - - /************************************************************************/ - /* */ - /************************************************************************/ - struct http_server_config - { - std::string m_folder; - critical_section m_lock; - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - template - class simple_http_connection_handler - { - public: - typedef t_connection_context connection_context;//t_connection_context net_utils::connection_context_base connection_context; - typedef http_server_config config_type; - - simple_http_connection_handler(i_service_endpoint* psnd_hndlr, config_type& config); - virtual ~simple_http_connection_handler(){} - - bool release_protocol() - { - return true; - } - - virtual bool thread_init() - { - return true; - } - - virtual bool thread_deinit() - { - return true; - } - bool after_init_connection() - { - return true; - } - virtual bool handle_recv(const void* ptr, size_t cb); - virtual bool handle_request(const http::http_request_info& query_info, http_response_info& response); - - private: - enum machine_state{ - http_state_retriving_comand_line, - http_state_retriving_header, - http_state_retriving_body, - http_state_connection_close, - http_state_error - }; - - enum body_transfer_type{ - http_body_transfer_chunked, - http_body_transfer_measure,//mean "Content-Length" valid - http_body_transfer_chunked_instead_measure, - http_body_transfer_connection_close, - http_body_transfer_multipart, - http_body_transfer_undefined - }; - - bool handle_buff_in(std::string& buf); - - bool analize_cached_request_header_and_invoke_state(size_t pos); - - bool handle_invoke_query_line(); - bool parse_cached_header(http_header_info& body_info, const std::string& m_cache_to_process, size_t pos); - std::string::size_type match_end_of_header(const std::string& buf); - bool get_len_from_content_lenght(const std::string& str, size_t& len); - bool handle_retriving_query_body(); - bool handle_query_measure(); - bool set_ready_state(); - bool slash_to_back_slash(std::string& str); - std::string get_file_mime_tipe(const std::string& path); - std::string get_response_header(const http_response_info& response); - - //major function - inline bool handle_request_and_send_response(const http::http_request_info& query_info); - - - std::string get_not_found_response_body(const std::string& URI); - - std::string m_root_path; - std::string m_cache; - machine_state m_state; - body_transfer_type m_body_transfer_type; - bool m_is_stop_handling; - http::http_request_info m_query_info; - size_t m_len_summary, m_len_remain; - config_type& m_config; - bool m_want_close; - protected: - i_service_endpoint* m_psnd_hndlr; - }; - - template - struct i_http_server_handler - { - virtual ~i_http_server_handler(){} - virtual bool handle_http_request(const http_request_info& query_info, - http_response_info& response, - t_connection_context& m_conn_context) = 0; - virtual bool init_server_thread(){return true;} - virtual bool deinit_server_thread(){return true;} - }; - - template - struct custum_handler_config: public http_server_config - { - i_http_server_handler* m_phandler; - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - - template - class http_custom_handler: public simple_http_connection_handler - { - public: - typedef custum_handler_config config_type; - - http_custom_handler(i_service_endpoint* psnd_hndlr, config_type& config, t_connection_context& conn_context) - : simple_http_connection_handler(psnd_hndlr, config), - m_config(config), - m_conn_context(conn_context) - {} - inline bool handle_request(const http_request_info& query_info, http_response_info& response) - { - CHECK_AND_ASSERT_MES(m_config.m_phandler, false, "m_config.m_phandler is NULL!!!!"); - //fill with default values - response.m_mime_tipe = "text/plain"; - response.m_response_code = 200; - response.m_response_comment = "OK"; - response.m_body.clear(); - return m_config.m_phandler->handle_http_request(query_info, response, m_conn_context); - } - - virtual bool thread_init() - { - return m_config.m_phandler->init_server_thread();; - } - - virtual bool thread_deinit() - { - return m_config.m_phandler->deinit_server_thread(); - } - void handle_qued_callback() - {} - bool after_init_connection() - { - return true; - } - - private: - //simple_http_connection_handler::config_type m_stub_config; - config_type& m_config; - t_connection_context& m_conn_context; - }; - } -} -} - -#include "http_protocol_handler.inl" - -#endif //_HTTP_SERVER_H_ diff --git a/contrib/epee/include/net/http_protocol_handler.inl b/contrib/epee/include/net/http_protocol_handler.inl deleted file mode 100644 index fc091a2126..0000000000 --- a/contrib/epee/include/net/http_protocol_handler.inl +++ /dev/null @@ -1,682 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#include -#include -#include "http_protocol_handler.h" -#include "include_base_utils.h" -#include "reg_exp_definer.h" -#include "string_tools.h" -#include "time_helper.h" -#include "file_io_utils.h" -#include "net_parse_helpers.h" - -#define HTTP_MAX_URI_LEN 9000 -#define HTTP_MAX_HEADER_LEN 100000 - -namespace epee -{ -namespace net_utils -{ - namespace http - { - - struct multipart_entry - { - std::list > m_etc_header_fields; - std::string m_content_disposition; - std::string m_content_type; - std::string m_body; - }; - - inline - bool match_boundary(const std::string& content_type, std::string& boundary) - { - STATIC_REGEXP_EXPR_1(rexp_match_boundary, "boundary=(.*?)(($)|([;\\s,]))", boost::regex::icase | boost::regex::normal); - // 1 - boost::smatch result; - if(boost::regex_search(content_type, result, rexp_match_boundary, boost::match_default) && result[0].matched) - { - boundary = result[1]; - return true; - } - - return false; - } - - inline - bool parse_header(std::string::const_iterator it_begin, std::string::const_iterator it_end, multipart_entry& entry) - { - STATIC_REGEXP_EXPR_1(rexp_mach_field, - "\n?((Content-Disposition)|(Content-Type)" - // 12 3 - "|([\\w-]+?)) ?: ?((.*?)(\r?\n))[^\t ]", - //4 56 7 - boost::regex::icase | boost::regex::normal); - - boost::smatch result; - std::string::const_iterator it_current_bound = it_begin; - std::string::const_iterator it_end_bound = it_end; - - //lookup all fields and fill well-known fields - while( boost::regex_search( it_current_bound, it_end_bound, result, rexp_mach_field, boost::match_default) && result[0].matched) - { - const size_t field_val = 6; - const size_t field_etc_name = 4; - - int i = 2; //start position = 2 - if(result[i++].matched)//"Content-Disposition" - entry.m_content_disposition = result[field_val]; - else if(result[i++].matched)//"Content-Type" - entry.m_content_type = result[field_val]; - else if(result[i++].matched)//e.t.c (HAVE TO BE MATCHED!) - entry.m_etc_header_fields.push_back(std::pair(result[field_etc_name], result[field_val])); - else - { - LOG_ERROR("simple_http_connection_handler::parse_header() not matched last entry in:"<& out_values) - { - //bool res = file_io_utils::load_file_to_string("C:\\public\\multupart_data", body); - - std::string boundary; - if(!match_boundary(content_type, boundary)) - { - LOG_PRINT("Failed to match boundary in content type: " << content_type, LOG_LEVEL_0); - return false; - } - - boundary+="\r\n"; - bool is_stop = false; - bool first_step = true; - - std::string::const_iterator it_begin = body.begin(); - std::string::const_iterator it_end; - while(!is_stop) - { - std::string::size_type pos = body.find(boundary, std::distance(body.begin(), it_begin)); - - if(std::string::npos == pos) - { - is_stop = true; - boundary.erase(boundary.size()-2, 2); - boundary+= "--"; - pos = body.find(boundary, std::distance(body.begin(), it_begin)); - if(std::string::npos == pos) - { - LOG_PRINT("Error: Filed to match closing multipart tag", LOG_LEVEL_0); - it_end = body.end(); - }else - { - it_end = body.begin() + pos; - } - }else - it_end = body.begin() + pos; - - - if(first_step && !is_stop) - { - first_step = false; - it_begin = it_end + boundary.size(); - std::string temp = "\r\n--"; - boundary = temp + boundary; - continue; - } - - out_values.push_back(multipart_entry()); - if(!handle_part_of_multipart(it_begin, it_end, out_values.back())) - { - LOG_PRINT("Failed to handle_part_of_multipart", LOG_LEVEL_0); - return false; - } - - it_begin = it_end + boundary.size(); - } - - return true; - } - - - - - //-------------------------------------------------------------------------------------------- - template - simple_http_connection_handler::simple_http_connection_handler(i_service_endpoint* psnd_hndlr, config_type& config): - m_state(http_state_retriving_comand_line), - m_body_transfer_type(http_body_transfer_undefined), - m_is_stop_handling(false), - m_len_summary(0), - m_len_remain(0), - m_config(config), - m_want_close(false), - m_psnd_hndlr(psnd_hndlr) - { - - } - //-------------------------------------------------------------------------------------------- - template - bool simple_http_connection_handler::set_ready_state() - { - m_is_stop_handling = false; - m_state = http_state_retriving_comand_line; - m_body_transfer_type = http_body_transfer_undefined; - m_query_info.clear(); - m_len_summary = 0; - return true; - } - //-------------------------------------------------------------------------------------------- - template - bool simple_http_connection_handler::handle_recv(const void* ptr, size_t cb) - { - std::string buf((const char*)ptr, cb); - //LOG_PRINT_L0("HTTP_RECV: " << ptr << "\r\n" << buf); - //file_io_utils::save_string_to_file(string_tools::get_current_module_folder() + "/" + boost::lexical_cast(ptr), std::string((const char*)ptr, cb)); - - bool res = handle_buff_in(buf); - if(m_want_close/*m_state == http_state_connection_close || m_state == http_state_error*/) - return false; - return res; - } - //-------------------------------------------------------------------------------------------- - template - bool simple_http_connection_handler::handle_buff_in(std::string& buf) - { - - if(m_cache.size()) - m_cache += buf; - else - m_cache.swap(buf); - - m_is_stop_handling = false; - while(!m_is_stop_handling) - { - switch(m_state) - { - case http_state_retriving_comand_line: - //The HTTP protocol does not place any a priori limit on the length of a URI. (c)RFC2616 - //but we forebly restirct it len to HTTP_MAX_URI_LEN to make it more safely - if(!m_cache.size()) - break; - - //check_and_handle_fake_response(); - if((m_cache[0] == '\r' || m_cache[0] == '\n')) - { - //some times it could be that before query line cold be few line breaks - //so we have to be calm without panic with assers - m_cache.erase(0, 1); - break; - } - - if(std::string::npos != m_cache.find('\n', 0)) - handle_invoke_query_line(); - else - { - m_is_stop_handling = true; - if(m_cache.size() > HTTP_MAX_URI_LEN) - { - LOG_ERROR("simple_http_connection_handler::handle_buff_out: Too long URI line"); - m_state = http_state_error; - return false; - } - } - break; - case http_state_retriving_header: - { - std::string::size_type pos = match_end_of_header(m_cache); - if(std::string::npos == pos) - { - m_is_stop_handling = true; - if(m_cache.size() > HTTP_MAX_HEADER_LEN) - { - LOG_ERROR("simple_http_connection_handler::handle_buff_in: Too long header area"); - m_state = http_state_error; - return false; - } - break; - } - analize_cached_request_header_and_invoke_state(pos); - break; - } - case http_state_retriving_body: - return handle_retriving_query_body(); - case http_state_connection_close: - return false; - default: - LOG_ERROR("simple_http_connection_handler::handle_char_out: Wrong state: " << m_state); - return false; - case http_state_error: - LOG_ERROR("simple_http_connection_handler::handle_char_out: Error state!!!"); - return false; - } - - if(!m_cache.size()) - m_is_stop_handling = true; - } - - return true; - } - //-------------------------------------------------------------------------------------------- - inline bool analize_http_method(const boost::smatch& result, http::http_method& method, int& http_ver_major, int& http_ver_minor) - { - CHECK_AND_ASSERT_MES(result[0].matched, false, "simple_http_connection_handler::analize_http_method() assert failed..."); - http_ver_major = boost::lexical_cast(result[11]); - http_ver_minor = boost::lexical_cast(result[12]); - if(result[4].matched) - method = http::http_method_get; - else if(result[5].matched) - method = http::http_method_head; - else if(result[6].matched) - method = http::http_method_post; - else if(result[7].matched) - method = http::http_method_put; - else - method = http::http_method_etc; - - return true; - } - - //-------------------------------------------------------------------------------------------- - template - bool simple_http_connection_handler::handle_invoke_query_line() - { - LOG_FRAME("simple_http_connection_handler::handle_recognize_protocol_out(*)", LOG_LEVEL_3); - - STATIC_REGEXP_EXPR_1(rexp_match_command_line, "^(((OPTIONS)|(GET)|(HEAD)|(POST)|(PUT)|(DELETE)|(TRACE)) (\\S+) HTTP/(\\d+).(\\d+))\r?\n", boost::regex::icase | boost::regex::normal); - // 123 4 5 6 7 8 9 10 11 12 - //size_t match_len = 0; - boost::smatch result; - if(boost::regex_search(m_cache, result, rexp_match_command_line, boost::match_default) && result[0].matched) - { - analize_http_method(result, m_query_info.m_http_method, m_query_info.m_http_ver_hi, m_query_info.m_http_ver_hi); - m_query_info.m_URI = result[10]; - parse_uri(m_query_info.m_URI, m_query_info.m_uri_content); - m_query_info.m_http_method_str = result[2]; - m_query_info.m_full_request_str = result[0]; - - m_cache.erase(m_cache.begin(), to_nonsonst_iterator(m_cache, result[0].second)); - - m_state = http_state_retriving_header; - - return true; - }else - { - m_state = http_state_error; - LOG_ERROR("simple_http_connection_handler::handle_invoke_query_line(): Failed to match first line: " << m_cache); - return false; - } - - return false; - } - //-------------------------------------------------------------------------------------------- - template - std::string::size_type simple_http_connection_handler::match_end_of_header(const std::string& buf) - { - - //Here we returning head size, including terminating sequence (\r\n\r\n or \n\n) - std::string::size_type res = buf.find("\r\n\r\n"); - if(std::string::npos != res) - return res+4; - res = buf.find("\n\n"); - if(std::string::npos != res) - return res+2; - return res; - } - //-------------------------------------------------------------------------------------------- - template - bool simple_http_connection_handler::analize_cached_request_header_and_invoke_state(size_t pos) - { - //LOG_PRINT_L4("HTTP HEAD:\r\n" << m_cache.substr(0, pos)); - - LOG_FRAME("simple_http_connection_handler::analize_cached_request_header_and_invoke_state(*)", LOG_LEVEL_3); - - m_query_info.m_full_request_buf_size = pos; - m_query_info.m_request_head.assign(m_cache.begin(), m_cache.begin()+pos); - - if(!parse_cached_header(m_query_info.m_header_info, m_cache, pos)) - { - LOG_ERROR("simple_http_connection_handler::analize_cached_request_header_and_invoke_state(): failed to anilize request header: " << m_cache); - m_state = http_state_error; - } - - m_cache.erase(0, pos); - - std::string req_command_str = m_query_info.m_full_request_str; - //if we have POST or PUT command, it is very possible tha we will get body - //but now, we suppose than we have body only in case of we have "ContentLength" - if(m_query_info.m_header_info.m_content_length.size()) - { - m_state = http_state_retriving_body; - m_body_transfer_type = http_body_transfer_measure; - if(!get_len_from_content_lenght(m_query_info.m_header_info.m_content_length, m_len_summary)) - { - LOG_ERROR("simple_http_connection_handler::analize_cached_request_header_and_invoke_state(): Failed to get_len_from_content_lenght();, m_query_info.m_content_length="< - bool simple_http_connection_handler::handle_retriving_query_body() - { - switch(m_body_transfer_type) - { - case http_body_transfer_measure: - return handle_query_measure(); - case http_body_transfer_chunked: - case http_body_transfer_connection_close: - case http_body_transfer_multipart: - case http_body_transfer_undefined: - default: - LOG_ERROR("simple_http_connection_handler::handle_retriving_query_body(): Unexpected m_body_query_type state:" << m_body_transfer_type); - m_state = http_state_error; - return false; - } - - return true; - } - //----------------------------------------------------------------------------------- - template - bool simple_http_connection_handler::handle_query_measure() - { - - if(m_len_remain >= m_cache.size()) - { - m_len_remain -= m_cache.size(); - m_query_info.m_body += m_cache; - m_cache.clear(); - }else - { - m_query_info.m_body.append(m_cache.begin(), m_cache.begin() + m_len_remain); - m_cache.erase(0, m_len_remain); - m_len_remain = 0; - } - - if(!m_len_remain) - { - if(handle_request_and_send_response(m_query_info)) - set_ready_state(); - else - m_state = http_state_error; - } - return true; - } - //-------------------------------------------------------------------------------------------- - template - bool simple_http_connection_handler::parse_cached_header(http_header_info& body_info, const std::string& m_cache_to_process, size_t pos) - { - LOG_FRAME("http_stream_filter::parse_cached_header(*)", LOG_LEVEL_3); - - STATIC_REGEXP_EXPR_1(rexp_mach_field, - "\n?((Connection)|(Referer)|(Content-Length)|(Content-Type)|(Transfer-Encoding)|(Content-Encoding)|(Host)|(Cookie)" - // 12 3 4 5 6 7 8 9 - "|([\\w-]+?)) ?: ?((.*?)(\r?\n))[^\t ]", - //10 1112 13 - boost::regex::icase | boost::regex::normal); - - boost::smatch result; - std::string::const_iterator it_current_bound = m_cache_to_process.begin(); - std::string::const_iterator it_end_bound = m_cache_to_process.begin()+pos; - - body_info.clear(); - - //lookup all fields and fill well-known fields - while( boost::regex_search( it_current_bound, it_end_bound, result, rexp_mach_field, boost::match_default) && result[0].matched) - { - const size_t field_val = 12; - const size_t field_etc_name = 10; - - int i = 2; //start position = 2 - if(result[i++].matched)//"Connection" - body_info.m_connection = result[field_val]; - else if(result[i++].matched)//"Referer" - body_info.m_referer = result[field_val]; - else if(result[i++].matched)//"Content-Length" - body_info.m_content_length = result[field_val]; - else if(result[i++].matched)//"Content-Type" - body_info.m_content_type = result[field_val]; - else if(result[i++].matched)//"Transfer-Encoding" - body_info.m_transfer_encoding = result[field_val]; - else if(result[i++].matched)//"Content-Encoding" - body_info.m_content_encoding = result[field_val]; - else if(result[i++].matched)//"Host" - body_info.m_host = result[field_val]; - else if(result[i++].matched)//"Cookie" - body_info.m_cookie = result[field_val]; - else if(result[i++].matched)//e.t.c (HAVE TO BE MATCHED!) - body_info.m_etc_fields.push_back(std::pair(result[field_etc_name], result[field_val])); - else - { - LOG_ERROR("simple_http_connection_handler::parse_cached_header() not matched last entry in:"< - bool simple_http_connection_handler::get_len_from_content_lenght(const std::string& str, size_t& OUT len) - { - STATIC_REGEXP_EXPR_1(rexp_mach_field, "\\d+", boost::regex::normal); - std::string res; - boost::smatch result; - if(!(boost::regex_search( str, result, rexp_mach_field, boost::match_default) && result[0].matched)) - return false; - - len = boost::lexical_cast(result[0]); - return true; - } - //----------------------------------------------------------------------------------- - template - bool simple_http_connection_handler::handle_request_and_send_response(const http::http_request_info& query_info) - { - http_response_info response; - bool res = handle_request(query_info, response); - //CHECK_AND_ASSERT_MES(res, res, "handle_request(query_info, response) returned false" ); - - std::string response_data = get_response_header(response); - - //LOG_PRINT_L0("HTTP_SEND: << \r\n" << response_data + response.m_body); - LOG_PRINT_L3("HTTP_RESPONSE_HEAD: << \r\n" << response_data); - - m_psnd_hndlr->do_send((void*)response_data.data(), response_data.size()); - if(response.m_body.size()) - m_psnd_hndlr->do_send((void*)response.m_body.data(), response.m_body.size()); - return res; - } - //----------------------------------------------------------------------------------- - template - bool simple_http_connection_handler::handle_request(const http::http_request_info& query_info, http_response_info& response) - { - - std::string uri_to_path = query_info.m_uri_content.m_path; - if("/" == uri_to_path) - uri_to_path = "/index.html"; - - //slash_to_back_slash(uri_to_path); - m_config.m_lock.lock(); - std::string destination_file_path = m_config.m_folder + uri_to_path; - m_config.m_lock.unlock(); - if(!file_io_utils::load_file_to_string(destination_file_path.c_str(), response.m_body)) - { - LOG_PRINT("URI \""<< query_info.m_full_request_str.substr(0, query_info.m_full_request_str.size()-2) << "\" [" << destination_file_path << "] Not Found (404 )" , LOG_LEVEL_1); - response.m_body = get_not_found_response_body(query_info.m_URI); - response.m_response_code = 404; - response.m_response_comment = "Not found"; - response.m_mime_tipe = "text/html"; - return true; - } - - LOG_PRINT(" -->> " << query_info.m_full_request_str << "\r\n<<--OK" , LOG_LEVEL_3); - response.m_response_code = 200; - response.m_response_comment = "OK"; - response.m_mime_tipe = get_file_mime_tipe(uri_to_path); - - - return true; - } - //----------------------------------------------------------------------------------- - template - std::string simple_http_connection_handler::get_response_header(const http_response_info& response) - { - std::string buf = "HTTP/1.1 "; - buf += boost::lexical_cast(response.m_response_code) + " " + response.m_response_comment + "\r\n" + - "Server: Epee-based\r\n" - "Content-Length: "; - buf += boost::lexical_cast(response.m_body.size()) + "\r\n"; - buf += "Content-Type: "; - buf += response.m_mime_tipe + "\r\n"; - - buf += "Last-Modified: "; - time_t tm; - time(&tm); - buf += misc_utils::get_internet_time_str(tm) + "\r\n"; - buf += "Accept-Ranges: bytes\r\n"; - //Wed, 01 Dec 2010 03:27:41 GMT" - - string_tools::trim(m_query_info.m_header_info.m_connection); - if(m_query_info.m_header_info.m_connection.size()) - { - if(!string_tools::compare_no_case("close", m_query_info.m_header_info.m_connection)) - { - //closing connection after sending - buf += "Connection: close\r\n"; - m_state = http_state_connection_close; - m_want_close = true; - } - } - //add additional fields, if it is - for(fields_list::const_iterator it = response.m_additional_fields.begin(); it!=response.m_additional_fields.end(); it++) - buf += it->first + ":" + it->second + "\r\n"; - - buf+="\r\n"; - - return buf; - } - //----------------------------------------------------------------------------------- - template - std::string simple_http_connection_handler::get_file_mime_tipe(const std::string& path) - { - std::string result; - std::string ext = string_tools::get_extension(path); - if(!string_tools::compare_no_case(ext, "gif")) - result = "image/gif"; - else if(!string_tools::compare_no_case(ext, "jpg")) - result = "image/jpeg"; - else if(!string_tools::compare_no_case(ext, "html")) - result = "text/html"; - else if(!string_tools::compare_no_case(ext, "htm")) - result = "text/html"; - else if(!string_tools::compare_no_case(ext, "js")) - result = "application/x-javascript"; - else if(!string_tools::compare_no_case(ext, "css")) - result = "text/css"; - else if(!string_tools::compare_no_case(ext, "xml")) - result = "application/xml"; - else if(!string_tools::compare_no_case(ext, "svg")) - result = "image/svg+xml"; - - - return result; - } - //----------------------------------------------------------------------------------- - template - std::string simple_http_connection_handler::get_not_found_response_body(const std::string& URI) - { - std::string body = - "\r\n" - "\r\n" - "404 Not Found\r\n" - "\r\n" - "

Not Found

\r\n" - "

The requested URL \r\n"; - body += URI; - body += "was not found on this server.

\r\n" - "\r\n"; - - return body; - } - //-------------------------------------------------------------------------------------------- - template - bool simple_http_connection_handler::slash_to_back_slash(std::string& str) - { - for(std::string::iterator it = str.begin(); it!=str.end(); it++) - if('/' == *it) - *it = '\\'; - return true; - } - } -} -} - -//-------------------------------------------------------------------------------------------- -//-------------------------------------------------------------------------------------------- -//-------------------------------------------------------------------------------------------- \ No newline at end of file diff --git a/contrib/epee/include/net/http_server_cp.h b/contrib/epee/include/net/http_server_cp.h deleted file mode 100644 index bbb167f9f3..0000000000 --- a/contrib/epee/include/net/http_server_cp.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#ifndef _HTTP_SERVER_CP_H_ -#define _HTTP_SERVER_CP_H_ - -#include "abstract_tcp_server_cp.h" -#include "http_server.h" -namespace epee -{ -namespace net_utils -{ - typedef cp_server_impl cp_http_server_file_system; - typedef cp_server_impl cp_http_server_custum_handling; -} -} - - - -#endif - - diff --git a/contrib/epee/include/net/http_server_cp2.h b/contrib/epee/include/net/http_server_cp2.h deleted file mode 100644 index 1a503a4de2..0000000000 --- a/contrib/epee/include/net/http_server_cp2.h +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#ifndef _HTTP_SERVER_CP2_H_ -#define _HTTP_SERVER_CP2_H_ - -#include "abstract_tcp_server2.h" -#include "http_protocol_handler.h" -namespace epee -{ -namespace net_utils -{ - typedef boosted_tcp_server > boosted_http_server_file_system; - typedef boosted_tcp_server > boosted_http_server_custum_handling; -} -} - - -#endif - - diff --git a/contrib/epee/include/net/http_server_handlers_map2.h b/contrib/epee/include/net/http_server_handlers_map2.h deleted file mode 100644 index 12ad9d9731..0000000000 --- a/contrib/epee/include/net/http_server_handlers_map2.h +++ /dev/null @@ -1,230 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#pragma once -#include "http_base.h" -#include "jsonrpc_structs.h" -#include "misc_os_dependent.h" -#include "storages/portable_storage.h" -#include "storages/portable_storage_template_helper.h" - - -#define CHAIN_HTTP_TO_MAP2(context_type) bool handle_http_request(const epee::net_utils::http::http_request_info& query_info, \ - epee::net_utils::http::http_response_info& response, \ - context_type& m_conn_context) \ -{\ - LOG_PRINT_L2("HTTP [" << epee::string_tools::get_ip_string_from_int32(m_conn_context.m_remote_ip ) << "] " << query_info.m_http_method_str << " " << query_info.m_URI); \ - response.m_response_code = 200; \ - response.m_response_comment = "Ok"; \ - if(!handle_http_request_map(query_info, response, m_conn_context)) \ - {response.m_response_code = 404;response.m_response_comment = "Not found";} \ - return true; \ -} - - -#define BEGIN_URI_MAP2() template bool handle_http_request_map(const epee::net_utils::http::http_request_info& query_info, \ - epee::net_utils::http::http_response_info& response_info, \ - t_context& m_conn_context) { \ - bool handled = false; \ - if(false) return true; //just a stub to have "else if" - -#define MAP_URI2(pattern, callback) else if(std::string::npos != query_info.m_URI.find(pattern)) return callback(query_info, response_info, m_conn_context); - -#define MAP_URI_AUTO_XML2(s_pattern, callback_f, command_type) //TODO: don't think i ever again will use xml - ambiguous and "overtagged" format - -#define MAP_URI_AUTO_JON2(s_pattern, callback_f, command_type) \ - else if(query_info.m_URI == s_pattern) \ - { \ - handled = true; \ - uint64_t ticks = epee::misc_utils::get_tick_count(); \ - boost::value_initialized req; \ - bool parse_res = epee::serialization::load_t_from_json(static_cast(req), query_info.m_body); \ - CHECK_AND_ASSERT_MES(parse_res, false, "Failed to parse json: \r\n" << query_info.m_body); \ - uint64_t ticks1 = epee::misc_utils::get_tick_count(); \ - boost::value_initialized resp;\ - if(!callback_f(static_cast(req), static_cast(resp), m_conn_context)) \ - { \ - LOG_ERROR("Failed to " << #callback_f << "()"); \ - response_info.m_response_code = 500; \ - response_info.m_response_comment = "Internal Server Error"; \ - return true; \ - } \ - uint64_t ticks2 = epee::misc_utils::get_tick_count(); \ - epee::serialization::store_t_to_json(static_cast(resp), response_info.m_body); \ - uint64_t ticks3 = epee::misc_utils::get_tick_count(); \ - response_info.m_mime_tipe = "application/json"; \ - response_info.m_header_info.m_content_type = " application/json"; \ - LOG_PRINT( s_pattern << " processed with " << ticks1-ticks << "/"<< ticks2-ticks1 << "/" << ticks3-ticks2 << "ms", LOG_LEVEL_2); \ - } - -#define MAP_URI_AUTO_BIN2(s_pattern, callback_f, command_type) \ - else if(query_info.m_URI == s_pattern) \ - { \ - handled = true; \ - uint64_t ticks = epee::misc_utils::get_tick_count(); \ - boost::value_initialized req; \ - bool parse_res = epee::serialization::load_t_from_binary(static_cast(req), query_info.m_body); \ - CHECK_AND_ASSERT_MES(parse_res, false, "Failed to parse bin body data, body size=" << query_info.m_body.size()); \ - uint64_t ticks1 = epee::misc_utils::get_tick_count(); \ - boost::value_initialized resp;\ - if(!callback_f(static_cast(req), static_cast(resp), m_conn_context)) \ - { \ - LOG_ERROR("Failed to " << #callback_f << "()"); \ - response_info.m_response_code = 500; \ - response_info.m_response_comment = "Internal Server Error"; \ - return true; \ - } \ - uint64_t ticks2 = epee::misc_utils::get_tick_count(); \ - epee::serialization::store_t_to_binary(static_cast(resp), response_info.m_body); \ - uint64_t ticks3 = epee::misc_utils::get_tick_count(); \ - response_info.m_mime_tipe = " application/octet-stream"; \ - response_info.m_header_info.m_content_type = " application/octet-stream"; \ - LOG_PRINT( s_pattern << "() processed with " << ticks1-ticks << "/"<< ticks2-ticks1 << "/" << ticks3-ticks2 << "ms", LOG_LEVEL_2); \ - } - -#define CHAIN_URI_MAP2(callback) else {callback(query_info, response_info, m_conn_context);handled = true;} - -#define END_URI_MAP2() return handled;} - - -#define BEGIN_JSON_RPC_MAP(uri) else if(query_info.m_URI == uri) \ - { \ - uint64_t ticks = epee::misc_utils::get_tick_count(); \ - epee::serialization::portable_storage ps; \ - if(!ps.load_from_json(query_info.m_body)) \ - { \ - boost::value_initialized rsp; \ - static_cast(rsp).error.code = -32700; \ - static_cast(rsp).error.message = "Parse error"; \ - epee::serialization::store_t_to_json(static_cast(rsp), response_info.m_body); \ - return true; \ - } \ - epee::serialization::storage_entry id_; \ - id_ = epee::serialization::storage_entry(std::string()); \ - ps.get_value("id", id_, nullptr); \ - std::string callback_name; \ - if(!ps.get_value("method", callback_name, nullptr)) \ - { \ - epee::json_rpc::error_response rsp; \ - rsp.jsonrpc = "2.0"; \ - rsp.error.code = -32600; \ - rsp.error.message = "Invalid Request"; \ - epee::serialization::store_t_to_json(static_cast(rsp), response_info.m_body); \ - return true; \ - } \ - if(false) return true; //just a stub to have "else if" - - -#define PREPARE_OBJECTS_FROM_JSON(command_type) \ - handled = true; \ - boost::value_initialized > req_; \ - epee::json_rpc::request& req = static_cast&>(req_);\ - if(!req.load(ps)) \ - { \ - epee::json_rpc::error_response fail_resp = AUTO_VAL_INIT(fail_resp); \ - fail_resp.jsonrpc = "2.0"; \ - fail_resp.id = req.id; \ - fail_resp.error.code = -32602; \ - fail_resp.error.message = "Invalid params"; \ - epee::serialization::store_t_to_json(static_cast(fail_resp), response_info.m_body); \ - return true; \ - } \ - uint64_t ticks1 = epee::misc_utils::get_tick_count(); \ - boost::value_initialized > resp_; \ - epee::json_rpc::response& resp = static_cast &>(resp_); \ - resp.jsonrpc = "2.0"; \ - resp.id = req.id; - -#define FINALIZE_OBJECTS_TO_JSON(method_name) \ - uint64_t ticks2 = epee::misc_utils::get_tick_count(); \ - epee::serialization::store_t_to_json(resp, response_info.m_body); \ - uint64_t ticks3 = epee::misc_utils::get_tick_count(); \ - response_info.m_mime_tipe = "application/json"; \ - response_info.m_header_info.m_content_type = " application/json"; \ - LOG_PRINT( query_info.m_URI << "[" << method_name << "] processed with " << ticks1-ticks << "/"<< ticks2-ticks1 << "/" << ticks3-ticks2 << "ms", LOG_LEVEL_2); - -#define MAP_JON_RPC_WE(method_name, callback_f, command_type) \ - else if(callback_name == method_name) \ -{ \ - PREPARE_OBJECTS_FROM_JSON(command_type) \ - epee::json_rpc::error_response fail_resp = AUTO_VAL_INIT(fail_resp); \ - fail_resp.jsonrpc = "2.0"; \ - fail_resp.id = req.id; \ - if(!callback_f(req.params, resp.result, fail_resp.error, m_conn_context)) \ - { \ - epee::serialization::store_t_to_json(static_cast(fail_resp), response_info.m_body); \ - return true; \ - } \ - FINALIZE_OBJECTS_TO_JSON(method_name) \ - return true;\ -} - -#define MAP_JON_RPC_WERI(method_name, callback_f, command_type) \ - else if(callback_name == method_name) \ -{ \ - PREPARE_OBJECTS_FROM_JSON(command_type) \ - epee::json_rpc::error_response fail_resp = AUTO_VAL_INIT(fail_resp); \ - fail_resp.jsonrpc = "2.0"; \ - fail_resp.id = req.id; \ - if(!callback_f(req.params, resp.result, fail_resp.error, m_conn_context, response_info)) \ - { \ - epee::serialization::store_t_to_json(static_cast(fail_resp), response_info.m_body); \ - return true; \ - } \ - FINALIZE_OBJECTS_TO_JSON(method_name) \ - return true;\ -} - -#define MAP_JON_RPC(method_name, callback_f, command_type) \ - else if(callback_name == method_name) \ -{ \ - PREPARE_OBJECTS_FROM_JSON(command_type) \ - if(!callback_f(req.params, resp.result, m_conn_context)) \ - { \ - epee::json_rpc::error_response fail_resp = AUTO_VAL_INIT(fail_resp); \ - fail_resp.jsonrpc = "2.0"; \ - fail_resp.id = req.id; \ - fail_resp.error.code = -32603; \ - fail_resp.error.message = "Internal error"; \ - epee::serialization::store_t_to_json(static_cast(fail_resp), response_info.m_body); \ - return true; \ - } \ - FINALIZE_OBJECTS_TO_JSON(method_name) \ - return true;\ -} - -#define END_JSON_RPC_MAP() \ - epee::json_rpc::error_response rsp; \ - rsp.id = id_; \ - rsp.jsonrpc = "2.0"; \ - rsp.error.code = -32601; \ - rsp.error.message = "Method not found"; \ - epee::serialization::store_t_to_json(static_cast(rsp), response_info.m_body); \ - return true; \ -} - - diff --git a/contrib/epee/include/net/http_server_impl_base.h b/contrib/epee/include/net/http_server_impl_base.h deleted file mode 100644 index c02475c34f..0000000000 --- a/contrib/epee/include/net/http_server_impl_base.h +++ /dev/null @@ -1,112 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#pragma once - - -#include -#include - -#include "net/http_server_cp2.h" -#include "net/http_server_handlers_map2.h" - -namespace epee -{ - - template - class http_server_impl_base: public net_utils::http::i_http_server_handler - { - - public: - http_server_impl_base() - : m_net_server() - {} - - explicit http_server_impl_base(boost::asio::io_service& external_io_service) - : m_net_server(external_io_service) - {} - - bool init(const std::string& bind_port = "0", const std::string& bind_ip = "0.0.0.0") - { - - //set self as callback handler - m_net_server.get_config_object().m_phandler = static_cast(this); - - //here set folder for hosting reqests - m_net_server.get_config_object().m_folder = ""; - - LOG_PRINT_L0("Binding on " << bind_ip << ":" << bind_port); - bool res = m_net_server.init_server(bind_port, bind_ip); - if(!res) - { - LOG_ERROR("Failed to bind server"); - return false; - } - return true; - } - - bool run(size_t threads_count, bool wait = true) - { - //go to loop - LOG_PRINT("Run net_service loop( " << threads_count << " threads)...", LOG_LEVEL_0); - if(!m_net_server.run_server(threads_count, wait)) - { - LOG_ERROR("Failed to run net tcp server!"); - } - - if(wait) - LOG_PRINT("net_service loop stopped.", LOG_LEVEL_0); - return true; - } - - bool deinit() - { - return m_net_server.deinit_server(); - } - - bool timed_wait_server_stop(uint64_t ms) - { - return m_net_server.timed_wait_server_stop(ms); - } - - bool send_stop_signal() - { - m_net_server.send_stop_signal(); - return true; - } - - int get_binded_port() - { - return m_net_server.get_binded_port(); - } - - protected: - net_utils::boosted_tcp_server > m_net_server; - }; -} \ No newline at end of file diff --git a/contrib/epee/include/net/http_server_thread_per_connect.h b/contrib/epee/include/net/http_server_thread_per_connect.h deleted file mode 100644 index bec43b726e..0000000000 --- a/contrib/epee/include/net/http_server_thread_per_connect.h +++ /dev/null @@ -1,48 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _HTTP_SERVER_CP_H_ -#define _HTTP_SERVER_CP_H_ - -#include "abstract_tcp_server.h" -#include "http_server.h" - -namespace epee -{ -namespace net_utils -{ - typedef abstract_tcp_server mt_http_server_file_system; - typedef abstract_tcp_server mt_http_server_custum_handling; - -} -} - - -#endif - - diff --git a/contrib/epee/include/net/jsonrpc_protocol_handler.h b/contrib/epee/include/net/jsonrpc_protocol_handler.h deleted file mode 100644 index b224c34292..0000000000 --- a/contrib/epee/include/net/jsonrpc_protocol_handler.h +++ /dev/null @@ -1,167 +0,0 @@ -#ifndef JSONRPC_PROTOCOL_HANDLER_H -#define JSONRPC_PROTOCOL_HANDLER_H - -#include -#include - -#include "net/net_utils_base.h" -#include "jsonrpc_structs.h" -#include "storages/portable_storage.h" -#include "storages/portable_storage_template_helper.h" - -namespace epee -{ -namespace net_utils -{ - namespace jsonrpc2 - { - inline - std::string& make_error_resp_json(int64_t code, const std::string& message, - std::string& response_data, - const epee::serialization::storage_entry& id = nullptr) - { - epee::json_rpc::error_response rsp; - rsp.id = id; - rsp.jsonrpc = "2.0"; - rsp.error.code = code; - rsp.error.message = message; - epee::serialization::store_t_to_json(static_cast(rsp), response_data, 0, false); - response_data += "\n"; - return response_data; - } - - template - struct i_jsonrpc2_server_handler - { - virtual ~i_jsonrpc2_server_handler() - {} - virtual bool handle_rpc_request(const std::string& req_data, - std::string& resp_data, - t_connection_context& conn_context) = 0; - virtual bool init_server_thread() - { return true; } - virtual bool deinit_server_thread() - { return true; } - }; - - template - struct jsonrpc2_server_config - { - i_jsonrpc2_server_handler* m_phandler; - critical_section m_lock; - }; - - template - class jsonrpc2_connection_handler - { - public: - typedef t_connection_context connection_context; - typedef jsonrpc2_server_config config_type; - - jsonrpc2_connection_handler(i_service_endpoint* psnd_hndlr, - config_type& config, - t_connection_context& conn_context) - : m_psnd_hndlr(psnd_hndlr), - m_config(config), - m_conn_context(conn_context), - m_is_stop_handling(false) - {} - virtual ~jsonrpc2_connection_handler() - {} - - bool release_protocol() - { - return true; - } - virtual bool thread_init() - { - return true; - } - virtual bool thread_deinit() - { - return true; - } - void handle_qued_callback() - {} - bool after_init_connection() - { - return true; - } - virtual bool handle_recv(const void* ptr, size_t cb) - { - std::string buf((const char*)ptr, cb); - LOG_PRINT_L0("JSONRPC2_RECV: " << ptr << "\r\n" << buf); - - bool res = handle_buff_in(buf); - return res; - } - private: - bool handle_buff_in(std::string& buf) - { - if(m_cache.size()) - m_cache += buf; - else - m_cache.swap(buf); - - m_is_stop_handling = false; - while (!m_is_stop_handling) { - std::string::size_type pos = match_end_of_request(m_cache); - if (std::string::npos == pos) { - m_is_stop_handling = true; - if (m_cache.size() > 4096) { - LOG_ERROR("jsonrpc2_connection_handler::handle_buff_in: Too long request"); - return false; - } - break; - } else { - extract_cached_request_and_handle(pos); - } - - if (!m_cache.size()) { - m_is_stop_handling = true; - } - } - - return true; - } - bool extract_cached_request_and_handle(std::string::size_type pos) - { - std::string request_data(m_cache.begin(), m_cache.begin() + pos); - m_cache.erase(0, pos); - return handle_request_and_send_response(request_data); - } - bool handle_request_and_send_response(const std::string& request_data) - { - CHECK_AND_ASSERT_MES(m_config.m_phandler, false, "m_config.m_phandler is NULL!!!!"); - std::string response_data; - - LOG_PRINT_L3("JSONRPC2_REQUEST: >> \r\n" << request_data); - bool rpc_result = m_config.m_phandler->handle_rpc_request(request_data, response_data, m_conn_context); - LOG_PRINT_L3("JSONRPC2_RESPONSE: << \r\n" << response_data); - - m_psnd_hndlr->do_send((void*)response_data.data(), response_data.size()); - return rpc_result; - } - std::string::size_type match_end_of_request(const std::string& buf) - { - std::string::size_type res = buf.find("\n"); - if(std::string::npos != res) { - return res + 2; - } - return res; - } - - protected: - i_service_endpoint* m_psnd_hndlr; - - private: - config_type& m_config; - t_connection_context& m_conn_context; - std::string m_cache; - bool m_is_stop_handling; - }; - } -} -} - -#endif /* JSONRPC_PROTOCOL_HANDLER_H */ diff --git a/contrib/epee/include/net/jsonrpc_server_handlers_map.h b/contrib/epee/include/net/jsonrpc_server_handlers_map.h deleted file mode 100644 index 8c747d1af4..0000000000 --- a/contrib/epee/include/net/jsonrpc_server_handlers_map.h +++ /dev/null @@ -1,86 +0,0 @@ -#ifndef JSONRPC_SERVER_HANDLERS_MAP_H -#define JSONRPC_SERVER_HANDLERS_MAP_H - -#include -#include "serialization/keyvalue_serialization.h" -#include "storages/portable_storage_template_helper.h" -#include "storages/portable_storage_base.h" -#include "jsonrpc_structs.h" -#include "jsonrpc_protocol_handler.h" - -#define BEGIN_JSONRPC2_MAP(t_connection_context) \ -bool handle_rpc_request(const std::string& req_data, \ - std::string& resp_data, \ - t_connection_context& m_conn_context) \ -{ \ - bool handled = false; \ - uint64_t ticks = epee::misc_utils::get_tick_count(); \ - epee::serialization::portable_storage ps; \ - if (!ps.load_from_json(req_data)) \ - { \ - epee::net_utils::jsonrpc2::make_error_resp_json(-32700, "Parse error", resp_data); \ - return true; \ - } \ - epee::serialization::storage_entry id_; \ - id_ = epee::serialization::storage_entry(std::string()); \ - if (!ps.get_value("id", id_, nullptr)) \ - { \ - epee::net_utils::jsonrpc2::make_error_resp_json(-32600, "Invalid Request", resp_data); \ - return true; \ - } \ - std::string callback_name; \ - if (!ps.get_value("method", callback_name, nullptr)) \ - { \ - epee::net_utils::jsonrpc2::make_error_resp_json(-32600, "Invalid Request", resp_data, id_); \ - return true; \ - } \ - if (false) return true; //just a stub to have "else if" - - - -#define PREPARE_JSONRPC2_OBJECTS_FROM_JSON(command_type) \ - handled = true; \ - boost::value_initialized > req_; \ - epee::json_rpc::request& req = static_cast&>(req_);\ - if(!req.load(ps)) \ - { \ - epee::net_utils::jsonrpc2::make_error_resp_json(-32602, "Invalid params", resp_data, req.id); \ - return true; \ - } \ - uint64_t ticks1 = epee::misc_utils::get_tick_count(); \ - boost::value_initialized > resp_; \ - epee::json_rpc::response& resp = static_cast &>(resp_); \ - resp.jsonrpc = "2.0"; \ - resp.id = req.id; - -#define FINALIZE_JSONRPC2_OBJECTS_TO_JSON(method_name) \ - uint64_t ticks2 = epee::misc_utils::get_tick_count(); \ - epee::serialization::store_t_to_json(resp, resp_data, 0, false); \ - resp_data += "\n"; \ - uint64_t ticks3 = epee::misc_utils::get_tick_count(); \ - LOG_PRINT("[" << method_name << "] processed with " << ticks1-ticks << "/"<< ticks2-ticks1 << "/" << ticks3-ticks2 << "ms", LOG_LEVEL_2); - - -#define MAP_JSONRPC2_WE(method_name, callback_f, command_type) \ - else if (callback_name == method_name) \ - { \ - PREPARE_JSONRPC2_OBJECTS_FROM_JSON(command_type) \ - epee::json_rpc::error_response fail_resp = AUTO_VAL_INIT(fail_resp); \ - fail_resp.jsonrpc = "2.0"; \ - fail_resp.id = req.id; \ - if(!callback_f(req.params, resp.result, fail_resp.error, m_conn_context)) \ - { \ - epee::serialization::store_t_to_json(static_cast(fail_resp), resp_data, 0, false); \ - resp_data += "\n"; \ - return true; \ - } \ - FINALIZE_JSONRPC2_OBJECTS_TO_JSON(method_name) \ - return true; \ - } - -#define END_JSONRPC2_MAP() \ - epee::net_utils::jsonrpc2::make_error_resp_json(-32601, "Method not found", resp_data, id_); \ - return true; \ -} - -#endif /* JSONRPC_SERVER_HANDLERS_MAP_H */ diff --git a/contrib/epee/include/net/jsonrpc_server_impl_base.h b/contrib/epee/include/net/jsonrpc_server_impl_base.h deleted file mode 100644 index 8a5a9a5b68..0000000000 --- a/contrib/epee/include/net/jsonrpc_server_impl_base.h +++ /dev/null @@ -1,84 +0,0 @@ -#ifndef JSONRPC_SERVER_IMPL_BASE_H -#define JSONRPC_SERVER_IMPL_BASE_H - -#include -#include - -#include "net/jsonrpc_protocol_handler.h" -#include "net/jsonrpc_server_handlers_map.h" -#include "net/abstract_tcp_server2.h" - -namespace epee -{ - -template - class jsonrpc_server_impl_base: public net_utils::jsonrpc2::i_jsonrpc2_server_handler - { - - public: - jsonrpc_server_impl_base() - : m_net_server() - {} - - explicit jsonrpc_server_impl_base(boost::asio::io_service& external_io_service) - : m_net_server(external_io_service) - {} - - bool init(const std::string& bind_port = "0", const std::string& bind_ip = "0.0.0.0") - { - //set self as callback handler - m_net_server.get_config_object().m_phandler = static_cast(this); - - LOG_PRINT_L0("Binding on " << bind_ip << ":" << bind_port); - bool res = m_net_server.init_server(bind_port, bind_ip); - if (!res) - { - LOG_ERROR("Failed to bind server"); - return false; - } - return true; - } - - bool run(size_t threads_count, bool wait = true) - { - //go to loop - LOG_PRINT("Run net_service loop( " << threads_count << " threads)...", LOG_LEVEL_0); - if(!m_net_server.run_server(threads_count, wait)) - { - LOG_ERROR("Failed to run net tcp server!"); - } - - if(wait) - LOG_PRINT("net_service loop stopped.", LOG_LEVEL_0); - return true; - } - - bool deinit() - { - return m_net_server.deinit_server(); - } - - bool timed_wait_server_stop(uint64_t ms) - { - return m_net_server.timed_wait_server_stop(ms); - } - - bool send_stop_signal() - { - m_net_server.send_stop_signal(); - return true; - } - - int get_binded_port() - { - return m_net_server.get_binded_port(); - } - - protected: - net_utils::boosted_tcp_server > m_net_server; - }; - -} - -#endif /* JSONRPC_SERVER_IMPL_BASE_H */ - diff --git a/contrib/epee/include/net/jsonrpc_structs.h b/contrib/epee/include/net/jsonrpc_structs.h deleted file mode 100644 index 9df9e25961..0000000000 --- a/contrib/epee/include/net/jsonrpc_structs.h +++ /dev/null @@ -1,96 +0,0 @@ -#ifndef JSONRPC_STRUCTS_H -#define JSONRPC_STRUCTS_H - -#include -#include -#include "serialization/keyvalue_serialization.h" -#include "storages/portable_storage_base.h" - -namespace epee -{ - namespace json_rpc - { - template - struct request - { - std::string jsonrpc; - std::string method; - epee::serialization::storage_entry id; - t_param params; - - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(jsonrpc) - KV_SERIALIZE(id) - KV_SERIALIZE(method) - KV_SERIALIZE(params) - END_KV_SERIALIZE_MAP() - }; - - struct error - { - int64_t code; - std::string message; - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(code) - KV_SERIALIZE(message) - END_KV_SERIALIZE_MAP() - }; - - struct dummy_error - { - BEGIN_KV_SERIALIZE_MAP() - END_KV_SERIALIZE_MAP() - }; - - struct dummy_result - { - BEGIN_KV_SERIALIZE_MAP() - END_KV_SERIALIZE_MAP() - }; - - template - struct response - { - std::string jsonrpc; - t_param result; - epee::serialization::storage_entry id; - t_error error; - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(jsonrpc) - KV_SERIALIZE(id) - KV_SERIALIZE(result) - KV_SERIALIZE(error) - END_KV_SERIALIZE_MAP() - }; - - template - struct response - { - std::string jsonrpc; - t_param result; - epee::serialization::storage_entry id; - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(jsonrpc) - KV_SERIALIZE(id) - KV_SERIALIZE(result) - END_KV_SERIALIZE_MAP() - }; - - template - struct response - { - std::string jsonrpc; - t_error error; - epee::serialization::storage_entry id; - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE(jsonrpc) - KV_SERIALIZE(id) - KV_SERIALIZE(error) - END_KV_SERIALIZE_MAP() - }; - - typedef response error_response; - } -} - -#endif /* JSONRPC_STRUCTS_H */ diff --git a/contrib/epee/include/net/levin_base.h b/contrib/epee/include/net/levin_base.h deleted file mode 100644 index d630bff198..0000000000 --- a/contrib/epee/include/net/levin_base.h +++ /dev/null @@ -1,125 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _LEVIN_BASE_H_ -#define _LEVIN_BASE_H_ - -#include "net_utils_base.h" - -#define LEVIN_SIGNATURE 0x0101010101012101LL //Bender's nightmare - -namespace epee -{ -namespace levin -{ -#pragma pack(push) -#pragma pack(1) - struct bucket_head - { - uint64_t m_signature; - uint64_t m_cb; - bool m_have_to_return_data; - uint32_t m_command; - int32_t m_return_code; - uint32_t m_reservedA; //probably some flags in future - uint32_t m_reservedB; //probably some check sum in future - }; -#pragma pack(pop) - - -#pragma pack(push) -#pragma pack(1) - struct bucket_head2 - { - uint64_t m_signature; - uint64_t m_cb; - bool m_have_to_return_data; - uint32_t m_command; - int32_t m_return_code; - uint32_t m_flags; - uint32_t m_protocol_version; - }; -#pragma pack(pop) - - -#define LEVIN_DEFAULT_TIMEOUT_PRECONFIGURED 0 -#define LEVIN_DEFAULT_MAX_PACKET_SIZE 100000000 //100MB by default - -#define LEVIN_PACKET_REQUEST 0x00000001 -#define LEVIN_PACKET_RESPONSE 0x00000002 - - -#define LEVIN_PROTOCOL_VER_0 0 -#define LEVIN_PROTOCOL_VER_1 1 - - template - struct levin_commands_handler - { - virtual int invoke(int command, const std::string& in_buff, std::string& buff_out, t_connection_context& context)=0; - virtual int notify(int command, const std::string& in_buff, t_connection_context& context)=0; - virtual void callback(t_connection_context& context){}; - - virtual void on_connection_new(t_connection_context& context){}; - virtual void on_connection_close(t_connection_context& context){}; - - }; - -#define LEVIN_OK 0 -#define LEVIN_ERROR_CONNECTION -1 -#define LEVIN_ERROR_CONNECTION_NOT_FOUND -2 -#define LEVIN_ERROR_CONNECTION_DESTROYED -3 -#define LEVIN_ERROR_CONNECTION_TIMEDOUT -4 -#define LEVIN_ERROR_CONNECTION_NO_DUPLEX_PROTOCOL -5 -#define LEVIN_ERROR_CONNECTION_HANDLER_NOT_DEFINED -6 -#define LEVIN_ERROR_FORMAT -7 - -#define DESCRIBE_RET_CODE(code) case code: return #code; - inline - const char* get_err_descr(int err) - { - switch(err) - { - DESCRIBE_RET_CODE(LEVIN_OK); - DESCRIBE_RET_CODE(LEVIN_ERROR_CONNECTION); - DESCRIBE_RET_CODE(LEVIN_ERROR_CONNECTION_NOT_FOUND); - DESCRIBE_RET_CODE(LEVIN_ERROR_CONNECTION_DESTROYED); - DESCRIBE_RET_CODE(LEVIN_ERROR_CONNECTION_TIMEDOUT); - DESCRIBE_RET_CODE(LEVIN_ERROR_CONNECTION_NO_DUPLEX_PROTOCOL); - DESCRIBE_RET_CODE(LEVIN_ERROR_CONNECTION_HANDLER_NOT_DEFINED); - DESCRIBE_RET_CODE(LEVIN_ERROR_FORMAT); - default: - return "unknown code"; - } - } - - -} -} - - -#endif //_LEVIN_BASE_H_ diff --git a/contrib/epee/include/net/levin_client.h b/contrib/epee/include/net/levin_client.h deleted file mode 100644 index 335f6ba02f..0000000000 --- a/contrib/epee/include/net/levin_client.h +++ /dev/null @@ -1,89 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - - -#ifndef _LEVIN_CLIENT_H_ -#define _LEVIN_CLIENT_H_ - -#include "net_helper.h" -#include "levin_base.h" - - -#ifndef MAKE_IP -#define MAKE_IP( a1, a2, a3, a4 ) (a1|(a2<<8)|(a3<<16)|(a4<<24)) -#endif - -namespace epee -{ -namespace levin -{ - /************************************************************************/ - /* */ - /************************************************************************/ - class levin_client_impl - { - public: - levin_client_impl(); - virtual ~levin_client_impl(); - - bool connect(u_long ip, int port, unsigned int timeout, const std::string& bind_ip = "0.0.0.0"); - bool connect(const std::string& addr, int port, unsigned int timeout, const std::string& bind_ip = "0.0.0.0"); - bool is_connected(); - bool disconnect(); - - virtual int invoke(int command, const std::string& in_buff, std::string& buff_out); - virtual int notify(int command, const std::string& in_buff); - - protected: - net_utils::blocked_mode_client m_transport; - }; - - - /************************************************************************/ - /* */ - /************************************************************************/ - class levin_client_impl2: public levin_client_impl - { - public: - - int invoke(int command, const std::string& in_buff, std::string& buff_out); - int notify(int command, const std::string& in_buff); - }; - -} -namespace net_utils -{ - typedef levin::levin_client_impl levin_client; - typedef levin::levin_client_impl2 levin_client2; -} -} - -#include "levin_client.inl" - -#endif //_LEVIN_CLIENT_H_ diff --git a/contrib/epee/include/net/levin_client.inl b/contrib/epee/include/net/levin_client.inl deleted file mode 100644 index ae159da6e5..0000000000 --- a/contrib/epee/include/net/levin_client.inl +++ /dev/null @@ -1,194 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -//------------------------------------------------------------------------------ -//------------------------------------------------------------------------------ -#include "string_tools.h" -namespace epee -{ -namespace levin -{ -inline -bool levin_client_impl::connect(u_long ip, int port, unsigned int timeout, const std::string& bind_ip) -{ - return m_transport.connect(string_tools::get_ip_string_from_int32(ip), port, timeout, timeout, bind_ip); -} -//------------------------------------------------------------------------------ -inline - bool levin_client_impl::connect(const std::string& addr, int port, unsigned int timeout, const std::string& bind_ip) -{ - return m_transport.connect(addr, port, timeout, timeout, bind_ip); -} -//------------------------------------------------------------------------------ -inline -bool levin_client_impl::is_connected() -{ - return m_transport.is_connected(); -} -//------------------------------------------------------------------------------ -inline -bool levin_client_impl::disconnect() -{ - return m_transport.disconnect(); -} -//------------------------------------------------------------------------------ -inline -levin_client_impl::levin_client_impl() -{ -} -//------------------------------------------------------------------------------ -inline -levin_client_impl::~levin_client_impl() -{ - disconnect(); -} -//------------------------------------------------------------------------------ -inline -int levin_client_impl::invoke(int command, const std::string& in_buff, std::string& buff_out) -{ - if(!is_connected()) - return -1; - - bucket_head head = {0}; - head.m_signature = LEVIN_SIGNATURE; - head.m_cb = in_buff.size(); - head.m_have_to_return_data = true; - head.m_command = command; - if(!m_transport.send(&head, sizeof(head))) - return -1; - - if(!m_transport.send(in_buff)) - return -1; - - std::string local_buff; - if(!m_transport.recv_n(local_buff, sizeof(bucket_head))) - return -1; - - head = *(bucket_head*)local_buff.data(); - - - if(head.m_signature!=LEVIN_SIGNATURE) - { - LOG_PRINT_L0("Signature missmatch in response"); - return -1; - } - - if(!m_transport.recv_n(buff_out, head.m_cb)) - return -1; - - return head.m_return_code; -} -//------------------------------------------------------------------------------ -inline -int levin_client_impl::notify(int command, const std::string& in_buff) -{ - if(!is_connected()) - return -1; - - bucket_head head = {0}; - head.m_signature = LEVIN_SIGNATURE; - head.m_cb = in_buff.size(); - head.m_have_to_return_data = false; - head.m_command = command; - - if(!m_transport.send((const char*)&head, sizeof(head))) - return -1; - - if(!m_transport.send(in_buff)) - return -1; - - return 1; -} - -//------------------------------------------------------------------------------ -//------------------------------------------------------------------------------ -inline - int levin_client_impl2::invoke(int command, const std::string& in_buff, std::string& buff_out) -{ - if(!is_connected()) - return -1; - - bucket_head2 head = {0}; - head.m_signature = LEVIN_SIGNATURE; - head.m_cb = in_buff.size(); - head.m_have_to_return_data = true; - head.m_command = command; - head.m_protocol_version = LEVIN_PROTOCOL_VER_1; - head.m_flags = LEVIN_PACKET_REQUEST; - if(!m_transport.send(&head, sizeof(head))) - return -1; - - if(!m_transport.send(in_buff)) - return -1; - - std::string local_buff; - if(!m_transport.recv_n(local_buff, sizeof(bucket_head2))) - return -1; - - head = *(bucket_head2*)local_buff.data(); - - - if(head.m_signature!=LEVIN_SIGNATURE) - { - LOG_PRINT_L0("Signature missmatch in response"); - return -1; - } - - if(!m_transport.recv_n(buff_out, head.m_cb)) - return -1; - - return head.m_return_code; -} -//------------------------------------------------------------------------------ -inline - int levin_client_impl2::notify(int command, const std::string& in_buff) -{ - if(!is_connected()) - return -1; - - bucket_head2 head = {0}; - head.m_signature = LEVIN_SIGNATURE; - head.m_cb = in_buff.size(); - head.m_have_to_return_data = false; - head.m_command = command; - head.m_protocol_version = LEVIN_PROTOCOL_VER_1; - head.m_flags = LEVIN_PACKET_REQUEST; - - if(!m_transport.send((const char*)&head, sizeof(head))) - return -1; - - if(!m_transport.send(in_buff)) - return -1; - - return 1; -} - -} -} -//------------------------------------------------------------------------------ \ No newline at end of file diff --git a/contrib/epee/include/net/levin_client_async.h b/contrib/epee/include/net/levin_client_async.h deleted file mode 100644 index 9e76cd5099..0000000000 --- a/contrib/epee/include/net/levin_client_async.h +++ /dev/null @@ -1,577 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#pragma once - -#include "" -#include "net_helper.h" -#include "levin_base.h" - - -namespace epee -{ -namespace levin -{ - - /************************************************************************ - * levin_client_async - probably it is not really fast implementation, - * each handler thread could make up to 30 ms latency. - * But, handling events in reader thread will cause dead locks in - * case of recursive call (call invoke() to the same connection - * on reader thread on remote invoke() handler) - ***********************************************************************/ - - - class levin_client_async - { - levin_commands_handler* m_pcommands_handler; - volatile uint32_t m_is_stop; - volatile uint32_t m_threads_count; - ::critical_section m_send_lock; - - std::string m_local_invoke_buff; - ::critical_section m_local_invoke_buff_lock; - volatile int m_invoke_res; - - volatile uint32_t m_invoke_data_ready; - volatile uint32_t m_invoke_is_active; - - boost::mutex m_invoke_event; - boost::condition_variable m_invoke_cond; - size_t m_timeout; - - ::critical_section m_recieved_packets_lock; - struct packet_entry - { - bucket_head m_hd; - std::string m_body; - uint32_t m_connection_index; - }; - std::list m_recieved_packets; - /* - m_current_connection_index needed when some connection was broken and reconnected - in this - case we could have some received packets in que, which shoud not be handled - */ - volatile uint32_t m_current_connection_index; - ::critical_section m_invoke_lock; - ::critical_section m_reciev_packet_lock; - ::critical_section m_connection_lock; - net_utils::blocked_mode_client m_transport; - public: - levin_client_async():m_pcommands_handler(NULL), m_is_stop(0), m_threads_count(0), m_invoke_data_ready(0), m_invoke_is_active(0) - {} - levin_client_async(const levin_client_async& /*v*/):m_pcommands_handler(NULL), m_is_stop(0), m_threads_count(0), m_invoke_data_ready(0), m_invoke_is_active(0) - {} - ~levin_client_async() - { - boost::interprocess::ipcdetail::atomic_write32(&m_is_stop, 1); - disconnect(); - - - while(boost::interprocess::ipcdetail::atomic_read32(&m_threads_count)) - ::Sleep(100); - } - - void set_handler(levin_commands_handler* phandler) - { - m_pcommands_handler = phandler; - } - - bool connect(uint32_t ip, uint32_t port, uint32_t timeout) - { - loop_call_guard(); - critical_region cr(m_connection_lock); - - m_timeout = timeout; - bool res = false; - CRITICAL_REGION_BEGIN(m_reciev_packet_lock); - CRITICAL_REGION_BEGIN(m_send_lock); - res = levin_client_impl::connect(ip, port, timeout); - boost::interprocess::ipcdetail::atomic_inc32(&m_current_connection_index); - CRITICAL_REGION_END(); - CRITICAL_REGION_END(); - if(res && !boost::interprocess::ipcdetail::atomic_read32(&m_threads_count) ) - { - //boost::interprocess::ipcdetail::atomic_write32(&m_is_stop, 0);//m_is_stop = false; - boost::thread( boost::bind(&levin_duplex_client::reciever_thread, this) ); - boost::thread( boost::bind(&levin_duplex_client::handler_thread, this) ); - boost::thread( boost::bind(&levin_duplex_client::handler_thread, this) ); - } - - return res; - } - bool is_connected() - { - loop_call_guard(); - critical_region cr(m_cs); - return levin_client_impl::is_connected(); - } - - inline - bool check_connection() - { - loop_call_guard(); - critical_region cr(m_cs); - - if(!is_connected()) - { - if( !reconnect() ) - { - LOG_ERROR("Reconnect Failed. Failed to invoke() becouse not connected!"); - return false; - } - } - return true; - } - - //------------------------------------------------------------------------------ - inline - bool recv_n(SOCKET s, char* pbuff, size_t cb) - { - while(cb) - { - int res = ::recv(m_socket, pbuff, (int)cb, 0); - - if(SOCKET_ERROR == res) - { - if(!m_connected) - return false; - - int err = ::WSAGetLastError(); - LOG_ERROR("Failed to recv(), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - disconnect(); - //reconnect(); - return false; - }else if(res == 0) - { - disconnect(); - //reconnect(); - return false; - } - LOG_PRINT_L4("[" << m_socket <<"] RECV " << res); - cb -= res; - pbuff += res; - } - - return true; - } - - //------------------------------------------------------------------------------ - inline - bool recv_n(SOCKET s, std::string& buff) - { - size_t cb_remain = buff.size(); - char* m_current_ptr = (char*)buff.data(); - return recv_n(s, m_current_ptr, cb_remain); - } - - bool disconnect() - { - //boost::interprocess::ipcdetail::atomic_write32(&m_is_stop, 1);//m_is_stop = true; - loop_call_guard(); - critical_region cr(m_cs); - levin_client_impl::disconnect(); - - CRITICAL_REGION_BEGIN(m_local_invoke_buff_lock); - m_local_invoke_buff.clear(); - m_invoke_res = LEVIN_ERROR_CONNECTION_DESTROYED; - CRITICAL_REGION_END(); - boost::interprocess::ipcdetail::atomic_write32(&m_invoke_data_ready, 1); //m_invoke_data_ready = true; - m_invoke_cond.notify_all(); - return true; - } - - void loop_call_guard() - { - - } - - void on_leave_invoke() - { - boost::interprocess::ipcdetail::atomic_write32(&m_invoke_is_active, 0); - } - - int invoke(const GUID& target, int command, const std::string& in_buff, std::string& buff_out) - { - - critical_region cr_invoke(m_invoke_lock); - - boost::interprocess::ipcdetail::atomic_write32(&m_invoke_is_active, 1); - boost::interprocess::ipcdetail::atomic_write32(&m_invoke_data_ready, 0); - misc_utils::destr_ptr hdlr = misc_utils::add_exit_scope_handler(boost::bind(&levin_duplex_client::on_leave_invoke, this)); - - loop_call_guard(); - - if(!check_connection()) - return LEVIN_ERROR_CONNECTION_DESTROYED; - - - bucket_head head = {0}; - head.m_signature = LEVIN_SIGNATURE; - head.m_cb = in_buff.size(); - head.m_have_to_return_data = true; - head.m_id = target; -#ifdef TRACE_LEVIN_PACKETS_BY_GUIDS - ::UuidCreate(&head.m_id); -#endif - head.m_command = command; - head.m_protocol_version = LEVIN_PROTOCOL_VER_1; - head.m_flags = LEVIN_PACKET_REQUEST; - LOG_PRINT("[" << m_socket <<"] Sending invoke data", LOG_LEVEL_4); - - CRITICAL_REGION_BEGIN(m_send_lock); - LOG_PRINT_L4("[" << m_socket <<"] SEND " << sizeof(head)); - int res = ::send(m_socket, (const char*)&head, sizeof(head), 0); - if(SOCKET_ERROR == res) - { - int err = ::WSAGetLastError(); - LOG_ERROR("Failed to send(), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - disconnect(); - return LEVIN_ERROR_CONNECTION_DESTROYED; - } - LOG_PRINT_L4("[" << m_socket <<"] SEND " << (int)in_buff.size()); - res = ::send(m_socket, in_buff.data(), (int)in_buff.size(), 0); - if(SOCKET_ERROR == res) - { - int err = ::WSAGetLastError(); - LOG_ERROR("Failed to send(), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - disconnect(); - return LEVIN_ERROR_CONNECTION_DESTROYED; - } - CRITICAL_REGION_END(); - LOG_PRINT_L4("LEVIN_PACKET_SENT. [len=" << head.m_cb << ", flags=" << head.m_flags << ", is_cmd=" << head.m_have_to_return_data <<", cmd_id = " << head.m_command << ", pr_v=" << head.m_protocol_version << ", uid=" << string_tools::get_str_from_guid_a(head.m_id) << "]"); - - //hard coded timeout in 10 minutes for maximum invoke period. if it happens, it could mean only some real troubles. - boost::system_time timeout = boost::get_system_time()+ boost::posix_time::milliseconds(100); - size_t timeout_count = 0; - boost::unique_lock lock(m_invoke_event); - - while(!boost::interprocess::ipcdetail::atomic_read32(&m_invoke_data_ready)) - { - if(!m_invoke_cond.timed_wait(lock, timeout)) - { - if(timeout_count < 10) - { - //workaround to avoid freezing at timed_wait called after notify_all. - timeout = boost::get_system_time()+ boost::posix_time::milliseconds(100); - ++timeout_count; - continue; - }else if(timeout_count == 10) - { - //workaround to avoid freezing at timed_wait called after notify_all. - timeout = boost::get_system_time()+ boost::posix_time::minutes(10); - ++timeout_count; - continue; - }else - { - LOG_PRINT("[" << m_socket <<"] Timeout on waiting invoke result. ", LOG_LEVEL_0); - //disconnect(); - return LEVIN_ERROR_CONNECTION_TIMEDOUT; - } - } - } - - - CRITICAL_REGION_BEGIN(m_local_invoke_buff_lock); - buff_out.swap(m_local_invoke_buff); - m_local_invoke_buff.clear(); - CRITICAL_REGION_END(); - return m_invoke_res; - } - - int notify(const GUID& target, int command, const std::string& in_buff) - { - if(!check_connection()) - return LEVIN_ERROR_CONNECTION_DESTROYED; - - bucket_head head = {0}; - head.m_signature = LEVIN_SIGNATURE; - head.m_cb = in_buff.size(); - head.m_have_to_return_data = false; - head.m_id = target; -#ifdef TRACE_LEVIN_PACKETS_BY_GUIDS - ::UuidCreate(&head.m_id); -#endif - head.m_command = command; - head.m_protocol_version = LEVIN_PROTOCOL_VER_1; - head.m_flags = LEVIN_PACKET_REQUEST; - CRITICAL_REGION_BEGIN(m_send_lock); - LOG_PRINT_L4("[" << m_socket <<"] SEND " << sizeof(head)); - int res = ::send(m_socket, (const char*)&head, sizeof(head), 0); - if(SOCKET_ERROR == res) - { - int err = ::WSAGetLastError(); - LOG_ERROR("Failed to send(), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - disconnect(); - return LEVIN_ERROR_CONNECTION_DESTROYED; - } - LOG_PRINT_L4("[" << m_socket <<"] SEND " << (int)in_buff.size()); - res = ::send(m_socket, in_buff.data(), (int)in_buff.size(), 0); - if(SOCKET_ERROR == res) - { - int err = ::WSAGetLastError(); - LOG_ERROR("Failed to send(), err = " << err << " \"" << socket_errors::get_socket_error_text(err) <<"\""); - disconnect(); - return LEVIN_ERROR_CONNECTION_DESTROYED; - } - CRITICAL_REGION_END(); - LOG_PRINT_L4("LEVIN_PACKET_SENT. [len=" << head.m_cb << ", flags=" << head.m_flags << ", is_cmd=" << head.m_have_to_return_data <<", cmd_id = " << head.m_command << ", pr_v=" << head.m_protocol_version << ", uid=" << string_tools::get_str_from_guid_a(head.m_id) << "]"); - - return 1; - } - - - private: - bool have_some_data(SOCKET sock, int interval = 1) - { - fd_set fds; - FD_ZERO(&fds); - FD_SET(sock, &fds); - - fd_set fdse; - FD_ZERO(&fdse); - FD_SET(sock, &fdse); - - - timeval tv; - tv.tv_sec = interval; - tv.tv_usec = 0; - - int sel_res = select(0, &fds, 0, &fdse, &tv); - if(0 == sel_res) - return false; - else if(sel_res == SOCKET_ERROR) - { - if(m_is_stop) - return false; - int err_code = ::WSAGetLastError(); - LOG_ERROR("Filed to call select, err code = " << err_code); - disconnect(); - }else - { - if(fds.fd_array[0]) - {//some read operations was performed - return true; - }else if(fdse.fd_array[0]) - {//some error was at the socket - return true; - } - } - return false; - } - - - bool reciev_and_process_incoming_data() - { - bucket_head head = {0}; - uint32_t conn_index = 0; - bool is_request = false; - std::string local_buff; - CRITICAL_REGION_BEGIN(m_reciev_packet_lock);//to protect from socket reconnect between head and body - - if(!recv_n(m_socket, (char*)&head, sizeof(head))) - { - if(m_is_stop) - return false; - LOG_ERROR("Failed to recv_n"); - return false; - } - - conn_index = boost::interprocess::ipcdetail::atomic_read32(&m_current_connection_index); - - if(head.m_signature!=LEVIN_SIGNATURE) - { - LOG_ERROR("Signature missmatch in response"); - return false; - } - - is_request = (head.m_protocol_version == LEVIN_PROTOCOL_VER_1 && head.m_flags&LEVIN_PACKET_REQUEST); - - - local_buff.resize((size_t)head.m_cb); - if(!recv_n(m_socket, local_buff)) - { - if(m_is_stop) - return false; - LOG_ERROR("Filed to reciev"); - return false; - } - CRITICAL_REGION_END(); - - LOG_PRINT_L4("LEVIN_PACKET_RECIEVED. [len=" << head.m_cb << ", flags=" << head.m_flags << ", is_cmd=" << head.m_have_to_return_data <<", cmd_id = " << head.m_command << ", pr_v=" << head.m_protocol_version << ", uid=" << string_tools::get_str_from_guid_a(head.m_id) << "]"); - - if(is_request) - { - CRITICAL_REGION_BEGIN(m_recieved_packets_lock); - m_recieved_packets.resize(m_recieved_packets.size() + 1); - m_recieved_packets.back().m_hd = head; - m_recieved_packets.back().m_body.swap(local_buff); - m_recieved_packets.back().m_connection_index = conn_index; - CRITICAL_REGION_END(); - /* - - */ - }else - {//this is some response - - CRITICAL_REGION_BEGIN(m_local_invoke_buff_lock); - m_local_invoke_buff.swap(local_buff); - m_invoke_res = head.m_return_code; - CRITICAL_REGION_END(); - boost::interprocess::ipcdetail::atomic_write32(&m_invoke_data_ready, 1); //m_invoke_data_ready = true; - m_invoke_cond.notify_all(); - - } - return true; - } - - bool reciever_thread() - { - LOG_PRINT_L3("[" << m_socket <<"] Socket reciever thread started.[m_threads_count=" << m_threads_count << "]"); - log_space::log_singletone::set_thread_log_prefix("RECIEVER_WORKER"); - boost::interprocess::ipcdetail::atomic_inc32(&m_threads_count); - - while(!m_is_stop) - { - if(!m_connected) - { - Sleep(100); - continue; - } - - if(have_some_data(m_socket, 1)) - { - if(!reciev_and_process_incoming_data()) - { - if(m_is_stop) - { - break;//boost::interprocess::ipcdetail::atomic_dec32(&m_threads_count); - //return true; - } - LOG_ERROR("Failed to reciev_and_process_incoming_data. shutting down"); - //boost::interprocess::ipcdetail::atomic_dec32(&m_threads_count); - //disconnect_no_wait(); - //break; - } - } - } - - boost::interprocess::ipcdetail::atomic_dec32(&m_threads_count); - LOG_PRINT_L3("[" << m_socket <<"] Socket reciever thread stopped.[m_threads_count=" << m_threads_count << "]"); - return true; - } - - bool process_recieved_packet(bucket_head& head, const std::string& local_buff, uint32_t conn_index) - { - - net_utils::connection_context_base conn_context; - conn_context.m_remote_ip = m_ip; - conn_context.m_remote_port = m_port; - if(head.m_have_to_return_data) - { - std::string return_buff; - if(m_pcommands_handler) - head.m_return_code = m_pcommands_handler->invoke(head.m_id, head.m_command, local_buff, return_buff, conn_context); - else - head.m_return_code = LEVIN_ERROR_CONNECTION_HANDLER_NOT_DEFINED; - - - - head.m_cb = return_buff.size(); - head.m_have_to_return_data = false; - head.m_protocol_version = LEVIN_PROTOCOL_VER_1; - head.m_flags = LEVIN_PACKET_RESPONSE; - - std::string send_buff((const char*)&head, sizeof(head)); - send_buff += return_buff; - CRITICAL_REGION_BEGIN(m_send_lock); - if(conn_index != boost::interprocess::ipcdetail::atomic_read32(&m_current_connection_index)) - {//there was reconnect, send response back is not allowed - return true; - } - int res = ::send(m_socket, (const char*)send_buff.data(), send_buff.size(), 0); - if(res == SOCKET_ERROR) - { - int err_code = ::WSAGetLastError(); - LOG_ERROR("Failed to send, err = " << err_code); - return false; - } - CRITICAL_REGION_END(); - LOG_PRINT_L4("LEVIN_PACKET_SENT. [len=" << head.m_cb << ", flags=" << head.m_flags << ", is_cmd=" << head.m_have_to_return_data <<", cmd_id = " << head.m_command << ", pr_v=" << head.m_protocol_version << ", uid=" << string_tools::get_str_from_guid_a(head.m_id) << "]"); - - } - else - { - if(m_pcommands_handler) - m_pcommands_handler->notify(head.m_id, head.m_command, local_buff, conn_context); - } - - return true; - } - - bool handler_thread() - { - LOG_PRINT_L3("[" << m_socket <<"] Socket handler thread started.[m_threads_count=" << m_threads_count << "]"); - log_space::log_singletone::set_thread_log_prefix("HANDLER_WORKER"); - boost::interprocess::ipcdetail::atomic_inc32(&m_threads_count); - - while(!m_is_stop) - { - bool have_some_work = false; - std::string local_buff; - bucket_head bh = {0}; - uint32_t conn_index = 0; - - CRITICAL_REGION_BEGIN(m_recieved_packets_lock); - if(m_recieved_packets.size()) - { - bh = m_recieved_packets.begin()->m_hd; - conn_index = m_recieved_packets.begin()->m_connection_index; - local_buff.swap(m_recieved_packets.begin()->m_body); - have_some_work = true; - m_recieved_packets.pop_front(); - } - CRITICAL_REGION_END(); - - if(have_some_work) - { - process_recieved_packet(bh, local_buff, conn_index); - }else - { - //Idle when no work - Sleep(30); - } - } - - boost::interprocess::ipcdetail::atomic_dec32(&m_threads_count); - LOG_PRINT_L3("[" << m_socket <<"] Socket handler thread stopped.[m_threads_count=" << m_threads_count << "]"); - return true; - } - }; - -} -} \ No newline at end of file diff --git a/contrib/epee/include/net/levin_client_async.inl b/contrib/epee/include/net/levin_client_async.inl deleted file mode 100644 index e69de29bb2..0000000000 diff --git a/contrib/epee/include/net/levin_helper.h b/contrib/epee/include/net/levin_helper.h deleted file mode 100644 index a8406103ca..0000000000 --- a/contrib/epee/include/net/levin_helper.h +++ /dev/null @@ -1,137 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include "levin_base.h" -#include "serializeble_struct_helper.h" - -namespace epee -{ -namespace levin -{ - template - bool pack_struct_to_levin_message(const t_struct& t, std::string& buff, int command_id) - { - buff.resize(sizeof(levin::bucket_head)); - levin::bucket_head& head = *(levin::bucket_head*)(&buff[0]); - head.m_signature = LEVIN_SIGNATURE; - head.m_cb = 0; - head.m_have_to_return_data = true; - head.m_command = command_id; - head.m_return_code = 1; - head.m_reservedA = rand(); //probably some flags in future - head.m_reservedB = rand(); //probably some check summ in future - - std::string buff_strg; - if(!StorageNamed::save_struct_as_storage_to_buff_t(t, buff_strg)) - return false; - - head.m_cb = buff_strg.size(); - buff.append(buff_strg); - return true; - } - - - bool pack_data_to_levin_message(const std::string& data, std::string& buff, int command_id) - { - buff.resize(sizeof(levin::bucket_head)); - levin::bucket_head& head = *(levin::bucket_head*)(&buff[0]); - head.m_signature = LEVIN_SIGNATURE; - head.m_cb = 0; - head.m_have_to_return_data = true; - head.m_command = command_id; - head.m_return_code = 1; - head.m_reservedA = rand(); //probably some flags in future - head.m_reservedB = rand(); //probably some check summ in future - - head.m_cb = data.size(); - buff.append(data); - return true; - } - - bool load_levin_data_from_levin_message(std::string& levin_data, const std::string& buff, int& command) - { - if(buff.size() < sizeof(levin::bucket_head) ) - { - LOG_PRINT_L3("size of buff(" << buff.size() << ") is too small, at load_struct_from_levin_message"); - return false; - } - - levin::bucket_head& head = *(levin::bucket_head*)(&buff[0]); - if(head.m_signature != LEVIN_SIGNATURE) - { - LOG_PRINT_L3("Failed to read signature in levin message, at load_struct_from_levin_message"); - return false; - } - if(head.m_cb != buff.size()-sizeof(levin::bucket_head)) - { - LOG_PRINT_L3("sizes missmatch, at load_struct_from_levin_message"); - return false; - } - - //std::string buff_strg; - levin_data.assign(&buff[sizeof(levin::bucket_head)], buff.size()-sizeof(levin::bucket_head)); - command = head.m_command; - return true; - } - - template - bool load_struct_from_levin_message(t_struct& t, const std::string& buff, int& command) - { - if(buff.size() < sizeof(levin::bucket_head) ) - { - LOG_ERROR("size of buff(" << buff.size() << ") is too small, at load_struct_from_levin_message"); - return false; - } - - levin::bucket_head& head = *(levin::bucket_head*)(&buff[0]); - if(head.m_signature != LEVIN_SIGNATURE) - { - LOG_ERROR("Failed to read signature in levin message, at load_struct_from_levin_message"); - return false; - } - if(head.m_cb != buff.size()-sizeof(levin::bucket_head)) - { - LOG_ERROR("sizes missmatch, at load_struct_from_levin_message"); - return false; - } - - std::string buff_strg; - buff_strg.assign(&buff[sizeof(levin::bucket_head)], buff.size()-sizeof(levin::bucket_head)); - - if(!StorageNamed::load_struct_from_storage_buff_t(t, buff_strg)) - { - LOG_ERROR("Failed to read storage, at load_struct_from_levin_message"); - return false; - } - command = head.m_command; - return true; - } -} -} \ No newline at end of file diff --git a/contrib/epee/include/net/levin_protocol_handler.h b/contrib/epee/include/net/levin_protocol_handler.h deleted file mode 100644 index 512ba1c3ca..0000000000 --- a/contrib/epee/include/net/levin_protocol_handler.h +++ /dev/null @@ -1,178 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _LEVIN_PROTOCOL_HANDLER_H_ -#define _LEVIN_PROTOCOL_HANDLER_H_ - -#include -#include "levin_base.h" - -namespace epee -{ -namespace levin -{ - template - struct protocl_handler_config - { - levin_commands_handler* m_pcommands_handler; - }; - - template - class protocol_handler - { - public: - typedef t_connection_context connection_context; - typedef protocl_handler_config config_type; - - protocol_handler(net_utils::i_service_endpoint* psnd_hndlr, config_type& config, t_connection_context& conn_context); - virtual ~protocol_handler(){} - - virtual bool handle_recv(const void* ptr, size_t cb); - - bool after_init_connection(){return true;} - private: - enum connection_data_state - { - conn_state_reading_head, - conn_state_reading_body - }; - - - config_type& m_config; - t_connection_context& m_conn_context; - net_utils::i_service_endpoint* m_psnd_hndlr; - std::string m_cach_in_buffer; - connection_data_state m_state; - bucket_head m_current_head; - }; - - template - protocol_handler::protocol_handler(net_utils::i_service_endpoint* psnd_hndlr, config_type& config, t_connection_context& conn_context): - m_config(config), - m_conn_context(conn_context), - m_psnd_hndlr(psnd_hndlr), - m_state(conn_state_reading_head), - m_current_head(bucket_head()) - {} - - template - bool protocol_handler::handle_recv(const void* ptr, size_t cb) - { - if(!m_config.m_pcommands_handler) - { - LOG_ERROR("Command handler not set!"); - return false; - } - m_cach_in_buffer.append((const char*)ptr, cb); - - bool is_continue = true; - while(is_continue) - { - switch(m_state) - { - case conn_state_reading_head: - if(m_cach_in_buffer.size() < sizeof(bucket_head)) - { - if(m_cach_in_buffer.size() >= sizeof(uint64_t) && *((uint64_t*)m_cach_in_buffer.data()) != LEVIN_SIGNATURE) - { - LOG_ERROR("Signature missmatch on accepted connection"); - return false; - } - is_continue = false; - break; - } - { - bucket_head* phead = (bucket_head*)m_cach_in_buffer.data(); - if(LEVIN_SIGNATURE != phead->m_signature) - { - LOG_ERROR("Signature missmatch on accepted connection"); - return false; - } - m_current_head = *phead; - } - m_cach_in_buffer.erase(0, sizeof(bucket_head)); - m_state = conn_state_reading_body; - break; - case conn_state_reading_body: - if(m_cach_in_buffer.size() < m_current_head.m_cb) - { - is_continue = false; - break; - } - { - std::string buff_to_invoke; - if(m_cach_in_buffer.size() == m_current_head.m_cb) - buff_to_invoke.swap(m_cach_in_buffer); - else - { - buff_to_invoke.assign(m_cach_in_buffer, 0, (std::string::size_type)m_current_head.m_cb); - m_cach_in_buffer.erase(0, (std::string::size_type)m_current_head.m_cb); - } - - - if(m_current_head.m_have_to_return_data) - { - std::string return_buff; - m_current_head.m_return_code = m_config.m_pcommands_handler->invoke(m_current_head.m_command, buff_to_invoke, return_buff, m_conn_context); - m_current_head.m_cb = return_buff.size(); - m_current_head.m_have_to_return_data = false; - std::string send_buff((const char*)&m_current_head, sizeof(m_current_head)); - send_buff += return_buff; - - if(!m_psnd_hndlr->do_send(send_buff.data(), send_buff.size())) - return false; - - } - else - m_config.m_pcommands_handler->notify(m_current_head.m_command, buff_to_invoke, m_conn_context); - } - m_state = conn_state_reading_head; - break; - default: - LOG_ERROR("Undefined state in levin_server_impl::connection_handler, m_state=" << m_state); - return false; - } - } - - return true; - } - - - - - - - -} -} - - - - -#endif //_LEVIN_PROTOCOL_HANDLER_H_ - diff --git a/contrib/epee/include/net/levin_protocol_handler_async.h b/contrib/epee/include/net/levin_protocol_handler_async.h deleted file mode 100644 index 406d92b285..0000000000 --- a/contrib/epee/include/net/levin_protocol_handler_async.h +++ /dev/null @@ -1,779 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once -#include -#include -#include -#include - -#include - -#include "levin_base.h" -#include "misc_language.h" - - -namespace epee -{ -namespace levin -{ - -/************************************************************************/ -/* */ -/************************************************************************/ -template -class async_protocol_handler; - -template -class async_protocol_handler_config -{ - typedef std::map* > connections_map; - critical_section m_connects_lock; - connections_map m_connects; - - void add_connection(async_protocol_handler* pc); - void del_connection(async_protocol_handler* pc); - - async_protocol_handler* find_connection(boost::uuids::uuid connection_id) const; - int find_and_lock_connection(boost::uuids::uuid connection_id, async_protocol_handler*& aph); - - friend class async_protocol_handler; - -public: - typedef t_connection_context connection_context; - levin_commands_handler* m_pcommands_handler; - uint64_t m_max_packet_size; - uint64_t m_invoke_timeout; - - int invoke(int command, const std::string& in_buff, std::string& buff_out, boost::uuids::uuid connection_id); - template - int invoke_async(int command, const std::string& in_buff, boost::uuids::uuid connection_id, callback_t cb, size_t timeout = LEVIN_DEFAULT_TIMEOUT_PRECONFIGURED); - - int notify(int command, const std::string& in_buff, boost::uuids::uuid connection_id); - bool close(boost::uuids::uuid connection_id); - bool update_connection_context(const t_connection_context& contxt); - bool request_callback(boost::uuids::uuid connection_id); - template - bool foreach_connection(callback_t cb); - size_t get_connections_count(); - - async_protocol_handler_config():m_pcommands_handler(NULL), m_max_packet_size(LEVIN_DEFAULT_MAX_PACKET_SIZE) - {} -}; - - -/************************************************************************/ -/* */ -/************************************************************************/ -template -class async_protocol_handler -{ -public: - typedef t_connection_context connection_context; - typedef async_protocol_handler_config config_type; - - enum stream_state - { - stream_state_head, - stream_state_body - }; - - std::atomic m_deletion_initiated; - std::atomic m_protocol_released; - volatile uint32_t m_invoke_buf_ready; - - volatile int m_invoke_result_code; - - critical_section m_local_inv_buff_lock; - std::string m_local_inv_buff; - - critical_section m_send_lock; - critical_section m_call_lock; - - volatile uint32_t m_wait_count; - volatile uint32_t m_close_called; - bucket_head2 m_current_head; - net_utils::i_service_endpoint* m_pservice_endpoint; - config_type& m_config; - t_connection_context& m_connection_context; - - std::string m_cache_in_buffer; - stream_state m_state; - - int32_t m_oponent_protocol_ver; - bool m_connection_initialized; - - struct invoke_response_handler_base - { - virtual bool handle(int res, const std::string& buff, connection_context& context)=0; - virtual bool is_timer_started() const=0; - virtual void cancel()=0; - virtual bool cancel_timer()=0; - }; - template - struct anvoke_handler: invoke_response_handler_base - { - anvoke_handler(const callback_t& cb, uint64_t timeout, async_protocol_handler& con, int command) - :m_cb(cb), m_con(con), m_timer(con.m_pservice_endpoint->get_io_service()), m_timer_started(false), - m_cancel_timer_called(false), m_timer_cancelled(false), m_command(command) - { - if(m_con.start_outer_call()) - { - m_timer.expires_from_now(boost::posix_time::milliseconds(timeout)); - m_timer.async_wait([&con, command, cb](const boost::system::error_code& ec) - { - if(ec == boost::asio::error::operation_aborted) - return; - LOG_PRINT_CC(con.get_context_ref(), "Timeout on invoke operation happened, command: " << command, LOG_LEVEL_2); - std::string fake; - cb(LEVIN_ERROR_CONNECTION_TIMEDOUT, fake, con.get_context_ref()); - con.close(); - con.finish_outer_call(); - }); - m_timer_started = true; - } - } - virtual ~anvoke_handler() - {} - callback_t m_cb; - async_protocol_handler& m_con; - boost::asio::deadline_timer m_timer; - bool m_timer_started; - bool m_cancel_timer_called; - bool m_timer_cancelled; - int m_command; - virtual bool handle(int res, const std::string& buff, typename async_protocol_handler::connection_context& context) - { - if(!cancel_timer()) - return false; - m_cb(res, buff, context); - m_con.finish_outer_call(); - return true; - } - virtual bool is_timer_started() const - { - return m_timer_started; - } - virtual void cancel() - { - if(cancel_timer()) - { - std::string fake; - m_cb(LEVIN_ERROR_CONNECTION_DESTROYED, fake, m_con.get_context_ref()); - m_con.finish_outer_call(); - } - } - virtual bool cancel_timer() - { - if(!m_cancel_timer_called) - { - m_cancel_timer_called = true; - boost::system::error_code ignored_ec; - m_timer_cancelled = 1 == m_timer.cancel(ignored_ec); - } - return m_timer_cancelled; - } - }; - critical_section m_invoke_response_handlers_lock; - std::list > m_invoke_response_handlers; - - template - bool add_invoke_response_handler(callback_t cb, uint64_t timeout, async_protocol_handler& con, int command) - { - CRITICAL_REGION_LOCAL(m_invoke_response_handlers_lock); - boost::shared_ptr handler(boost::make_shared>(cb, timeout, con, command)); - m_invoke_response_handlers.push_back(handler); - return handler->is_timer_started(); - } - template friend struct anvoke_handler; -public: - async_protocol_handler(net_utils::i_service_endpoint* psnd_hndlr, - config_type& config, - t_connection_context& conn_context): - m_current_head(bucket_head2()), - m_pservice_endpoint(psnd_hndlr), - m_config(config), - m_connection_context(conn_context), - m_state(stream_state_head) - { - m_close_called = 0; - m_deletion_initiated = false; - m_protocol_released = false; - m_wait_count = 0; - m_oponent_protocol_ver = 0; - m_connection_initialized = false; - } - virtual ~async_protocol_handler() - { - m_deletion_initiated = true; - if(m_connection_initialized) - { - m_config.del_connection(this); - } - - for (size_t i = 0; i < 60 * 1000 / 100 && 0 != boost::interprocess::ipcdetail::atomic_read32(&m_wait_count); ++i) - { - misc_utils::sleep_no_w(100); - } - CHECK_AND_ASSERT_MES_NO_RET(0 == boost::interprocess::ipcdetail::atomic_read32(&m_wait_count), "Failed to wait for operation completion. m_wait_count = " << m_wait_count); - - LOG_PRINT_CC(m_connection_context, "~async_protocol_handler()", LOG_LEVEL_4); - } - - bool start_outer_call() - { - LOG_PRINT_CC_L4(m_connection_context, "[levin_protocol] -->> start_outer_call"); - if(!m_pservice_endpoint->add_ref()) - { - LOG_PRINT_CC_RED(m_connection_context, "[levin_protocol] -->> start_outer_call failed", LOG_LEVEL_4); - return false; - } - boost::interprocess::ipcdetail::atomic_inc32(&m_wait_count); - return true; - } - bool finish_outer_call() - { - LOG_PRINT_CC_L4(m_connection_context, "[levin_protocol] <<-- finish_outer_call"); - boost::interprocess::ipcdetail::atomic_dec32(&m_wait_count); - m_pservice_endpoint->release(); - return true; - } - - bool release_protocol() - { - decltype(m_invoke_response_handlers) local_invoke_response_handlers; - CRITICAL_REGION_BEGIN(m_invoke_response_handlers_lock); - local_invoke_response_handlers.swap(m_invoke_response_handlers); - m_protocol_released = true; - CRITICAL_REGION_END(); - - // Never call callback inside critical section, that can cause deadlock. Callback can be called when - // invoke_response_handler_base is cancelled - std::for_each(local_invoke_response_handlers.begin(), local_invoke_response_handlers.end(), [](const boost::shared_ptr& pinv_resp_hndlr) { - pinv_resp_hndlr->cancel(); - }); - - return true; - } - - bool close() - { - boost::interprocess::ipcdetail::atomic_inc32(&m_close_called); - - m_pservice_endpoint->close(); - return true; - } - - void update_connection_context(const connection_context& contxt) - { - m_connection_context = contxt; - } - - void request_callback() - { - misc_utils::auto_scope_leave_caller scope_exit_handler = misc_utils::create_scope_leave_handler( - boost::bind(&async_protocol_handler::finish_outer_call, this)); - - m_pservice_endpoint->request_callback(); - } - - void handle_qued_callback() - { - m_config.m_pcommands_handler->callback(m_connection_context); - } - - virtual bool handle_recv(const void* ptr, size_t cb) - { - if(boost::interprocess::ipcdetail::atomic_read32(&m_close_called)) - return false; //closing connections - - if(!m_config.m_pcommands_handler) - { - LOG_ERROR_CC(m_connection_context, "Commands handler not set!"); - return false; - } - - if(m_cache_in_buffer.size() + cb > m_config.m_max_packet_size) - { - LOG_ERROR_CC(m_connection_context, "Maximum packet size exceed!, m_max_packet_size = " << m_config.m_max_packet_size - << ", packet received " << m_cache_in_buffer.size() + cb - << ", connection will be closed."); - return false; - } - - m_cache_in_buffer.append((const char*)ptr, cb); - - bool is_continue = true; - while(is_continue) - { - switch(m_state) - { - case stream_state_body: - if(m_cache_in_buffer.size() < m_current_head.m_cb) - { - is_continue = false; - break; - } - { - std::string buff_to_invoke; - if(m_cache_in_buffer.size() == m_current_head.m_cb) - buff_to_invoke.swap(m_cache_in_buffer); - else - { - buff_to_invoke.assign(m_cache_in_buffer, 0, (std::string::size_type)m_current_head.m_cb); - m_cache_in_buffer.erase(0, (std::string::size_type)m_current_head.m_cb); - } - - bool is_response = (m_oponent_protocol_ver == LEVIN_PROTOCOL_VER_1 && m_current_head.m_flags&LEVIN_PACKET_RESPONSE); - - LOG_PRINT_CC_L4(m_connection_context, "LEVIN_PACKET_RECIEVED. [len=" << m_current_head.m_cb - << ", flags" << m_current_head.m_flags - << ", r?=" << m_current_head.m_have_to_return_data - <<", cmd = " << m_current_head.m_command - << ", v=" << m_current_head.m_protocol_version); - - if(is_response) - {//response to some invoke - - epee::critical_region_t invoke_response_handlers_guard(m_invoke_response_handlers_lock); - if(!m_invoke_response_handlers.empty()) - {//async call scenario - boost::shared_ptr response_handler = m_invoke_response_handlers.front(); - bool timer_cancelled = response_handler->cancel_timer(); - // Don't pop handler, to avoid destroying it - if(timer_cancelled) - m_invoke_response_handlers.pop_front(); - invoke_response_handlers_guard.unlock(); - - if(timer_cancelled) - response_handler->handle(m_current_head.m_command, buff_to_invoke, m_connection_context); - } - else - { - invoke_response_handlers_guard.unlock(); - //use sync call scenario - if(!boost::interprocess::ipcdetail::atomic_read32(&m_wait_count) && !boost::interprocess::ipcdetail::atomic_read32(&m_close_called)) - { - LOG_ERROR_CC(m_connection_context, "no active invoke when response came, wtf?"); - return false; - }else - { - CRITICAL_REGION_BEGIN(m_local_inv_buff_lock); - buff_to_invoke.swap(m_local_inv_buff); - buff_to_invoke.clear(); - m_invoke_result_code = m_current_head.m_return_code; - CRITICAL_REGION_END(); - boost::interprocess::ipcdetail::atomic_write32(&m_invoke_buf_ready, 1); - } - } - }else - { - if(m_current_head.m_have_to_return_data) - { - std::string return_buff; - m_current_head.m_return_code = m_config.m_pcommands_handler->invoke( - m_current_head.m_command, - buff_to_invoke, - return_buff, - m_connection_context); - m_current_head.m_cb = return_buff.size(); - m_current_head.m_have_to_return_data = false; - m_current_head.m_protocol_version = LEVIN_PROTOCOL_VER_1; - m_current_head.m_flags = LEVIN_PACKET_RESPONSE; - std::string send_buff((const char*)&m_current_head, sizeof(m_current_head)); - send_buff += return_buff; - CRITICAL_REGION_BEGIN(m_send_lock); - if(!m_pservice_endpoint->do_send(send_buff.data(), send_buff.size())) - return false; - CRITICAL_REGION_END(); - LOG_PRINT_CC_L4(m_connection_context, "LEVIN_PACKET_SENT. [len=" << m_current_head.m_cb - << ", flags" << m_current_head.m_flags - << ", r?=" << m_current_head.m_have_to_return_data - <<", cmd = " << m_current_head.m_command - << ", ver=" << m_current_head.m_protocol_version); - } - else - m_config.m_pcommands_handler->notify(m_current_head.m_command, buff_to_invoke, m_connection_context); - } - } - m_state = stream_state_head; - break; - case stream_state_head: - { - if(m_cache_in_buffer.size() < sizeof(bucket_head2)) - { - if(m_cache_in_buffer.size() >= sizeof(uint64_t) && *((uint64_t*)m_cache_in_buffer.data()) != LEVIN_SIGNATURE) - { - LOG_ERROR_CC(m_connection_context, "Signature mismatch, connection will be closed"); - return false; - } - is_continue = false; - break; - } - - bucket_head2* phead = (bucket_head2*)m_cache_in_buffer.data(); - if(LEVIN_SIGNATURE != phead->m_signature) - { - LOG_ERROR_CC(m_connection_context, "Signature mismatch, connection will be closed"); - return false; - } - m_current_head = *phead; - - m_cache_in_buffer.erase(0, sizeof(bucket_head2)); - m_state = stream_state_body; - m_oponent_protocol_ver = m_current_head.m_protocol_version; - if(m_current_head.m_cb > m_config.m_max_packet_size) - { - LOG_ERROR_CC(m_connection_context, "Maximum packet size exceed!, m_max_packet_size = " << m_config.m_max_packet_size - << ", packet header received " << m_current_head.m_cb - << ", connection will be closed."); - return false; - } - } - break; - default: - LOG_ERROR_CC(m_connection_context, "Undefined state in levin_server_impl::connection_handler, m_state=" << m_state); - return false; - } - } - - return true; - } - - bool after_init_connection() - { - if (!m_connection_initialized) - { - m_connection_initialized = true; - m_config.add_connection(this); - } - return true; - } - - template - bool async_invoke(int command, const std::string& in_buff, callback_t cb, size_t timeout = LEVIN_DEFAULT_TIMEOUT_PRECONFIGURED) - { - misc_utils::auto_scope_leave_caller scope_exit_handler = misc_utils::create_scope_leave_handler( - boost::bind(&async_protocol_handler::finish_outer_call, this)); - - if(timeout == LEVIN_DEFAULT_TIMEOUT_PRECONFIGURED) - timeout = m_config.m_invoke_timeout; - - int err_code = LEVIN_OK; - do - { - if(m_deletion_initiated) - { - err_code = LEVIN_ERROR_CONNECTION_DESTROYED; - break; - } - - CRITICAL_REGION_LOCAL(m_call_lock); - - if(m_deletion_initiated) - { - err_code = LEVIN_ERROR_CONNECTION_DESTROYED; - break; - } - - bucket_head2 head = {0}; - head.m_signature = LEVIN_SIGNATURE; - head.m_cb = in_buff.size(); - head.m_have_to_return_data = true; - - head.m_flags = LEVIN_PACKET_REQUEST; - head.m_command = command; - head.m_protocol_version = LEVIN_PROTOCOL_VER_1; - - boost::interprocess::ipcdetail::atomic_write32(&m_invoke_buf_ready, 0); - CRITICAL_REGION_BEGIN(m_send_lock); - CRITICAL_REGION_LOCAL1(m_invoke_response_handlers_lock); - if(!m_pservice_endpoint->do_send(&head, sizeof(head))) - { -// LOG_ERROR_CC(m_connection_context, "Failed to do_send"); - err_code = LEVIN_ERROR_CONNECTION; - break; - } - - if(!m_pservice_endpoint->do_send(in_buff.data(), (int)in_buff.size())) - { - LOG_ERROR_CC(m_connection_context, "Failed to do_send"); - err_code = LEVIN_ERROR_CONNECTION; - break; - } - - if(!add_invoke_response_handler(cb, timeout, *this, command)) - { - err_code = LEVIN_ERROR_CONNECTION_DESTROYED; - break; - } - CRITICAL_REGION_END(); - } while (false); - - if (LEVIN_OK != err_code) - { - std::string stub_buff; - // Never call callback inside critical section, that can cause deadlock - cb(err_code, stub_buff, m_connection_context); - return false; - } - - return true; - } - - int invoke(int command, const std::string& in_buff, std::string& buff_out) - { - misc_utils::auto_scope_leave_caller scope_exit_handler = misc_utils::create_scope_leave_handler( - boost::bind(&async_protocol_handler::finish_outer_call, this)); - - if(m_deletion_initiated) - return LEVIN_ERROR_CONNECTION_DESTROYED; - - CRITICAL_REGION_LOCAL(m_call_lock); - - if(m_deletion_initiated) - return LEVIN_ERROR_CONNECTION_DESTROYED; - - bucket_head2 head = {0}; - head.m_signature = LEVIN_SIGNATURE; - head.m_cb = in_buff.size(); - head.m_have_to_return_data = true; - - head.m_flags = LEVIN_PACKET_REQUEST; - head.m_command = command; - head.m_protocol_version = LEVIN_PROTOCOL_VER_1; - - boost::interprocess::ipcdetail::atomic_write32(&m_invoke_buf_ready, 0); - CRITICAL_REGION_BEGIN(m_send_lock); - if(!m_pservice_endpoint->do_send(&head, sizeof(head))) - { - LOG_ERROR_CC(m_connection_context, "Failed to do_send"); - return LEVIN_ERROR_CONNECTION; - } - - if(!m_pservice_endpoint->do_send(in_buff.data(), (int)in_buff.size())) - { - LOG_ERROR_CC(m_connection_context, "Failed to do_send"); - return LEVIN_ERROR_CONNECTION; - } - CRITICAL_REGION_END(); - - LOG_PRINT_CC_L4(m_connection_context, "LEVIN_PACKET_SENT. [len=" << head.m_cb - << ", f=" << head.m_flags - << ", r?=" << head.m_have_to_return_data - << ", cmd = " << head.m_command - << ", ver=" << head.m_protocol_version); - - uint64_t ticks_start = misc_utils::get_tick_count(); - - while(!boost::interprocess::ipcdetail::atomic_read32(&m_invoke_buf_ready) && !m_deletion_initiated && !m_protocol_released) - { - if(misc_utils::get_tick_count() - ticks_start > m_config.m_invoke_timeout) - { - LOG_PRINT_CC_L2(m_connection_context, "invoke timeout (" << m_config.m_invoke_timeout << "), closing connection "); - close(); - return LEVIN_ERROR_CONNECTION_TIMEDOUT; - } - if(!m_pservice_endpoint->call_run_once_service_io()) - return LEVIN_ERROR_CONNECTION_DESTROYED; - } - - if(m_deletion_initiated || m_protocol_released) - return LEVIN_ERROR_CONNECTION_DESTROYED; - - CRITICAL_REGION_BEGIN(m_local_inv_buff_lock); - buff_out.swap(m_local_inv_buff); - m_local_inv_buff.clear(); - CRITICAL_REGION_END(); - - return m_invoke_result_code; - } - - int notify(int command, const std::string& in_buff) - { - misc_utils::auto_scope_leave_caller scope_exit_handler = misc_utils::create_scope_leave_handler( - boost::bind(&async_protocol_handler::finish_outer_call, this)); - - if(m_deletion_initiated) - return LEVIN_ERROR_CONNECTION_DESTROYED; - - CRITICAL_REGION_LOCAL(m_call_lock); - - if(m_deletion_initiated) - return LEVIN_ERROR_CONNECTION_DESTROYED; - - bucket_head2 head = {0}; - head.m_signature = LEVIN_SIGNATURE; - head.m_have_to_return_data = false; - head.m_cb = in_buff.size(); - - head.m_command = command; - head.m_protocol_version = LEVIN_PROTOCOL_VER_1; - head.m_flags = LEVIN_PACKET_REQUEST; - CRITICAL_REGION_BEGIN(m_send_lock); - if(!m_pservice_endpoint->do_send(&head, sizeof(head))) - { -// LOG_ERROR_CC(m_connection_context, "Failed to do_send()"); - return -1; - } - - if(!m_pservice_endpoint->do_send(in_buff.data(), (int)in_buff.size())) - { - LOG_ERROR("Failed to do_send()"); - return -1; - } - CRITICAL_REGION_END(); - LOG_PRINT_CC_L4(m_connection_context, "LEVIN_PACKET_SENT. [len=" << head.m_cb << - ", f=" << head.m_flags << - ", r?=" << head.m_have_to_return_data << - ", cmd = " << head.m_command << - ", ver=" << head.m_protocol_version); - - return 1; - } - //------------------------------------------------------------------------------------------ - boost::uuids::uuid get_connection_id() {return m_connection_context.m_connection_id;} - //------------------------------------------------------------------------------------------ - t_connection_context& get_context_ref() {return m_connection_context;} -}; -//------------------------------------------------------------------------------------------ -template -void async_protocol_handler_config::del_connection(async_protocol_handler* pconn) -{ - CRITICAL_REGION_BEGIN(m_connects_lock); - m_connects.erase(pconn->get_connection_id()); - CRITICAL_REGION_END(); - m_pcommands_handler->on_connection_close(pconn->m_connection_context); -} -//------------------------------------------------------------------------------------------ -template -void async_protocol_handler_config::add_connection(async_protocol_handler* pconn) -{ - CRITICAL_REGION_BEGIN(m_connects_lock); - m_connects[pconn->get_connection_id()] = pconn; - CRITICAL_REGION_END(); - m_pcommands_handler->on_connection_new(pconn->m_connection_context); -} -//------------------------------------------------------------------------------------------ -template -async_protocol_handler* async_protocol_handler_config::find_connection(boost::uuids::uuid connection_id) const -{ - auto it = m_connects.find(connection_id); - return it == m_connects.end() ? 0 : it->second; -} -//------------------------------------------------------------------------------------------ -template -int async_protocol_handler_config::find_and_lock_connection(boost::uuids::uuid connection_id, async_protocol_handler*& aph) -{ - CRITICAL_REGION_LOCAL(m_connects_lock); - aph = find_connection(connection_id); - if(0 == aph) - return LEVIN_ERROR_CONNECTION_NOT_FOUND; - if(!aph->start_outer_call()) - return LEVIN_ERROR_CONNECTION_DESTROYED; - return LEVIN_OK; -} -//------------------------------------------------------------------------------------------ -template -int async_protocol_handler_config::invoke(int command, const std::string& in_buff, std::string& buff_out, boost::uuids::uuid connection_id) -{ - async_protocol_handler* aph; - int r = find_and_lock_connection(connection_id, aph); - return LEVIN_OK == r ? aph->invoke(command, in_buff, buff_out) : r; -} -//------------------------------------------------------------------------------------------ -template template -int async_protocol_handler_config::invoke_async(int command, const std::string& in_buff, boost::uuids::uuid connection_id, callback_t cb, size_t timeout) -{ - async_protocol_handler* aph; - int r = find_and_lock_connection(connection_id, aph); - return LEVIN_OK == r ? aph->async_invoke(command, in_buff, cb, timeout) : r; -} -//------------------------------------------------------------------------------------------ -template template -bool async_protocol_handler_config::foreach_connection(callback_t cb) -{ - CRITICAL_REGION_LOCAL(m_connects_lock); - for(auto& c: m_connects) - { - async_protocol_handler* aph = c.second; - if(!cb(aph->get_context_ref())) - return false; - } - return true; -} -//------------------------------------------------------------------------------------------ -template -size_t async_protocol_handler_config::get_connections_count() -{ - CRITICAL_REGION_LOCAL(m_connects_lock); - return m_connects.size(); -} -//------------------------------------------------------------------------------------------ -template -int async_protocol_handler_config::notify(int command, const std::string& in_buff, boost::uuids::uuid connection_id) -{ - async_protocol_handler* aph; - int r = find_and_lock_connection(connection_id, aph); - return LEVIN_OK == r ? aph->notify(command, in_buff) : r; -} -//------------------------------------------------------------------------------------------ -template -bool async_protocol_handler_config::close(boost::uuids::uuid connection_id) -{ - CRITICAL_REGION_LOCAL(m_connects_lock); - async_protocol_handler* aph = find_connection(connection_id); - return 0 != aph ? aph->close() : false; -} -//------------------------------------------------------------------------------------------ -template -bool async_protocol_handler_config::update_connection_context(const t_connection_context& contxt) -{ - CRITICAL_REGION_LOCAL(m_connects_lock); - async_protocol_handler* aph = find_connection(contxt.m_connection_id); - if(0 == aph) - return false; - aph->update_connection_context(contxt); - return true; -} -//------------------------------------------------------------------------------------------ -template -bool async_protocol_handler_config::request_callback(boost::uuids::uuid connection_id) -{ - async_protocol_handler* aph; - int r = find_and_lock_connection(connection_id, aph); - if(LEVIN_OK == r) - { - aph->request_callback(); - return true; - } - else - { - return false; - } -} -} -} diff --git a/contrib/epee/include/net/levin_server_cp.h b/contrib/epee/include/net/levin_server_cp.h deleted file mode 100644 index 8ece350595..0000000000 --- a/contrib/epee/include/net/levin_server_cp.h +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#ifndef _HTTP_SERVER_CP_H_ -#define _HTTP_SERVER_CP_H_ - -#include "abstract_tcp_server_cp.h" -#include "levin_protocol_handler.h" -namespace epee -{ -namespace net_utils -{ - typedef cp_server_impl cp_levin_server; -} -} - - - -#endif - - diff --git a/contrib/epee/include/net/levin_server_cp2.h b/contrib/epee/include/net/levin_server_cp2.h deleted file mode 100644 index b29d49bf8f..0000000000 --- a/contrib/epee/include/net/levin_server_cp2.h +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _HTTP_SERVER_CP_H_ -#define _HTTP_SERVER_CP_H_ - -#include "abstract_tcp_server2.h" -#include "levin_protocol_handler.h" -#include "levin_protocol_handler_async.h" - -namespace epee -{ -namespace net_utils -{ - typedef boosted_tcp_server > boosted_levin_server; - typedef boosted_tcp_server > boosted_levin_async_server; -} -} - - - -#endif - - diff --git a/contrib/epee/include/net/local_ip.h b/contrib/epee/include/net/local_ip.h deleted file mode 100644 index 0d458963c6..0000000000 --- a/contrib/epee/include/net/local_ip.h +++ /dev/null @@ -1,72 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#pragma once - -namespace epee -{ - namespace net_utils - { - inline - bool is_ip_local(uint32_t ip) - { - /* - local ip area - 10.0.0.0 — 10.255.255.255 - 172.16.0.0 — 172.31.255.255 - 192.168.0.0 — 192.168.255.255 - */ - if( (ip | 0xffffff00) == 0xffffff0a) - return true; - - if( (ip | 0xffff0000) == 0xffffa8c0) - return true; - - if( (ip | 0xffffff00) == 0xffffffac) - { - uint32_t second_num = (ip << 8) & 0xff000000; - if(second_num >= 16 && second_num <= 31 ) - return true; - } - return false; - } - inline - bool is_ip_loopback(uint32_t ip) - { - if( (ip | 0xffffff00) == 0xffffff7f) - return true; - //MAKE_IP - /* - loopback ip - 127.0.0.0 — 127.255.255.255 - */ - return false; - } - - } -} - diff --git a/contrib/epee/include/net/multiprotocols_server.h b/contrib/epee/include/net/multiprotocols_server.h deleted file mode 100644 index 4807a44218..0000000000 --- a/contrib/epee/include/net/multiprotocols_server.h +++ /dev/null @@ -1,47 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _MULTIPROTOCOLS_SERVER_H_ -#define _MULTIPROTOCOLS_SERVER_H_ - -//#include "abstract_tcp_server_cp.h" -#include "protocol_switcher.h" -#include "abstract_tcp_server2.h" - -namespace epee -{ -namespace net_utils -{ - //typedef cp_server_impl multiprotocol_server; - typedef boosted_tcp_server boosted_multiprotocol_server; -} -} - - -#endif //_MULTIPROTOCOLS_SERVER_H_ - diff --git a/contrib/epee/include/net/munin_connection_handler.h b/contrib/epee/include/net/munin_connection_handler.h deleted file mode 100644 index 8579339c57..0000000000 --- a/contrib/epee/include/net/munin_connection_handler.h +++ /dev/null @@ -1,376 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _MUNIN_CONNECTION_HANDLER_H_ -#define _MUNIN_CONNECTION_HANDLER_H_ - -#include -#include "net_utils_base.h" -#include "to_nonconst_iterator.h" -#include "http_base.h" -#include "reg_exp_definer.h" - -#define MUNIN_ARGS_DEFAULT(vertial_lable_str) "graph_args --base 1000 -l 0 --vertical-label " vertial_lable_str " \n" -#define MUNIN_ARGS_FORCE_AUPPER_LIMIT(vertial_lable_str, limit) "graph_args --base 1000 -l 0 --vertical-label " vertial_lable_str " --rigid --upper-limit " limit " \n" -#define MUNIN_TITLE(title_str) "graph_title " title_str "\n" -#define MUNIN_CATEGORY(category_str) "graph_category " category_str "\n" -#define MUNIN_INFO(info_str) "graph_info " info_str "\n" -#define MUNIN_ENTRY(var_name) #var_name".label " #var_name "\n" #var_name".info "#var_name".\n" -#define MUNIN_ENTRY_AREA(var_name) #var_name".label " #var_name "\n" #var_name".info "#var_name".\n" #var_name".draw AREASTACK\n" -#define MUNIN_ENTRY_ALIAS(var_name, alias) #var_name".label " #alias"\n" #var_name".info "#alias".\n" -#define BEGIN_MUNIN_SERVICE(servivece_name_str) if(servivece_name_str == pservice->m_service_name) { -#define END_MUNIN_SERVICE() } -#define MUNIN_SERVICE_PARAM(munin_var_name_str, variable) paramters_text += std::string() + munin_var_name_str ".value " + boost::lexical_cast(variable) + "\n" - - - - -namespace epee -{ -namespace net_utils -{ - namespace munin - { - - - /************************************************************************/ - /* */ - /************************************************************************/ - struct munin_service; - - struct munin_service_data_provider - { - virtual bool update_service_data(munin_service* pservice, std::string& paramters_text)=0; - }; - - struct munin_service - { - std::string m_service_name; - std::string m_service_config_string; - munin_service_data_provider* m_pdata_provider; - }; - - struct node_server_config - { - std::list m_services; - //TODO: - }; - - struct fake_send_handler: public i_service_endpoint - { - virtual bool do_send(const void* ptr, size_t cb) - { - m_cache += std::string((const char*)ptr, cb); - return true; - } - public: - - std::string m_cache; - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - class munin_node_server_connection_handler - { - public: - typedef node_server_config config_type; - typedef connection_context_base connection_context; - - munin_node_server_connection_handler(i_service_endpoint* psnd_hndlr, config_type& config, const connection_context_base& context):m_psnd_hndlr(psnd_hndlr), - m_machine_state(http_state_retriving_comand_line), - m_config(config) - { - init(); - } - virtual ~munin_node_server_connection_handler() - { - - } - - bool release_protocol() - { - return true; - } - bool after_init_connection() - { - std::string hello_str = "# munin node at "; - hello_str += m_host_name + "\n"; - send_hook(hello_str); - return true; - } - - virtual bool thread_init() - { - return true; - } - - virtual bool thread_deinit() - { - return true; - } - - void handle_qued_callback() - { - - } - - virtual bool handle_recv(const void* ptr, size_t cb) - { - - const char* pbuff = (const char*)ptr; - std::string recvd_buff(pbuff, cb); - LOG_PRINT("munin_recv: \n" << recvd_buff, LOG_LEVEL_3); - - m_cache += recvd_buff; - - bool stop_handling = false; - while(!stop_handling) - { - switch(m_machine_state) - { - case http_state_retriving_comand_line: - { - - std::string::size_type fpos = m_cache.find('\n'); - if(std::string::npos != fpos ) - { - bool res = handle_command(m_cache); - if(!res) - return false; - m_cache.erase(0, fpos+1); - continue; - } - stop_handling = true; - } - break; - case http_state_error: - stop_handling = true; - return false; - default: - LOG_ERROR("Error in munin state machine! Unkonwon state=" << m_machine_state); - stop_handling = true; - m_machine_state = http_state_error; - return false; - } - - } - - return true; - } - - private: - - - bool init() - { - char hostname[64] = {0}; - int res = gethostname(hostname, 64); - hostname[63] = 0;//be happy - m_host_name = hostname; - return true; - } - bool handle_command(const std::string& command) - { - // list, nodes, config, fetch, version or quit - STATIC_REGEXP_EXPR_1(rexp_match_command_line, "^((list)|(nodes)|(config)|(fetch)|(version)|(quit))(\\s+(\\S+))?", boost::regex::icase | boost::regex::normal); - // 12 3 4 5 6 7 8 9 - size_t match_len = 0; - boost::smatch result; - if(boost::regex_search(command, result, rexp_match_command_line, boost::match_default) && result[0].matched) - { - if(result[2].matched) - {//list command - return handle_list_command(); - }else if(result[3].matched) - {//nodes command - return handle_nodes_command(); - }else if(result[4].matched) - {//config command - if(result[9].matched) - return handle_config_command(result[9]); - else - { - send_hook("Unknown service\n"); - } - }else if(result[5].matched) - {//fetch command - if(result[9].matched) - return handle_fetch_command(result[9]); - else - { - send_hook("Unknown service\n"); - } - }else if(result[6].matched) - {//version command - return handle_version_command(); - }else if(result[7].matched) - {//quit command - return handle_quit_command(); - } - else - return send_hook("Unknown command. Try list, nodes, config, fetch, version or quit\n"); - } - - return send_hook("Unknown command. Try list, nodes, config, fetch, version or quit\n");; - } - - bool handle_list_command() - { - std::string buff_to_send; - for(std::list::const_iterator it = m_config.m_services.begin(); it!=m_config.m_services.end();it++) - { - buff_to_send += it->m_service_name + " "; - } - buff_to_send+='\n'; - return send_hook(buff_to_send); - } - bool handle_nodes_command() - { - //supports only one node - host name - send_hook(m_host_name + "\n.\n"); - return true; - } - bool handle_config_command(const std::string& service_name) - { - munin_service* psrv = get_service_by_name(service_name); - if(!psrv) - return send_hook(std::string() + "Unknown service\n"); - - - return send_hook(psrv->m_service_config_string + ".\n"); - } - - bool handle_fetch_command(const std::string& service_name) - { - munin_service* psrv = get_service_by_name(service_name); - if(!psrv) - return send_hook(std::string() + "Unknown service\n"); - - std::string buff; - psrv->m_pdata_provider->update_service_data(psrv, buff); - - buff += ".\n"; - return send_hook(buff); - } - bool handle_version_command() - { - return send_hook("Munin node component by Andrey Sabelnikov\n"); - } - bool handle_quit_command() - { - return false; - } - - bool send_hook(const std::string& buff) - { - LOG_PRINT("munin_send: \n" << buff, LOG_LEVEL_3); - - if(m_psnd_hndlr) - return m_psnd_hndlr->do_send(buff.data(), buff.size()); - else - return false; - } - - - munin_service* get_service_by_name(const std::string& srv_name) - { - std::list::iterator it = m_config.m_services.begin(); - for(; it!=m_config.m_services.end(); it++) - if(it->m_service_name == srv_name) - break; - - if(it==m_config.m_services.end()) - return NULL; - - return &(*it); - } - - enum machine_state{ - http_state_retriving_comand_line, - http_state_error - }; - - - config_type& m_config; - machine_state m_machine_state; - std::string m_cache; - std::string m_host_name; - protected: - i_service_endpoint* m_psnd_hndlr; - }; - - - inline bool test_self() - { - /*WSADATA w; - ::WSAStartup(MAKEWORD(1, 1), &w); - node_server_config sc; - sc.m_services.push_back(munin_service()); - sc.m_services.back().m_service_name = "test_service"; - - sc.m_services.back().m_service_config_string = - "graph_args --base 1000 -l 0 --vertical-label N --upper-limit 329342976\n" - "graph_title REPORTS STATICTICS\n" - "graph_category bind\n" - "graph_info This graph shows how many reports came in fixed time period.\n" - "graph_order apps free swap\n" - "apps.label apps\n" - "apps.draw AREA\n" - "apps.info Memory used by user-space applications.\n" - "swap.label swap\n" - "swap.draw STACK\n" - "swap.info Swap space used.\n" - "free.label unused\n" - "free.draw STACK\n" - "free.info Wasted memory. Memory that is not used for anything at all.\n" - "committed.label committed\n" - "committed.draw LINE2\n" - "committed.warn 625410048\n" - "committed.info The amount of memory that would be used if all the memory that's been allocated were to be used.\n"; - - - sc.m_services.push_back(munin_service()); - sc.m_services.back().m_service_name = "test_service1"; - fake_send_handler fh; - munin_node_server_connection_handler mh(&fh, sc); - - std::string buff = "list\n"; - mh.handle_recv(buff.data(), buff.size()); - - - buff = "nodes\n"; - mh.handle_recv(buff.data(), buff.size()); -*/ - return true; - } - - } -} -} -#endif//!_MUNIN_CONNECTION_HANDLER_H_ \ No newline at end of file diff --git a/contrib/epee/include/net/munin_node_server.h b/contrib/epee/include/net/munin_node_server.h deleted file mode 100644 index 07637f550d..0000000000 --- a/contrib/epee/include/net/munin_node_server.h +++ /dev/null @@ -1,49 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _MUNIN_NODE_SERVER_H_ -#define _MUNIN_NODE_SERVER_H_ - -#include -//#include "net_utils_base.h" -#include "munin_connection_handler.h" -//#include "abstract_tcp_server.h" -//#include "abstract_tcp_server_cp.h" -#include "abstract_tcp_server2.h" -namespace epee -{ -namespace net_utils -{ - namespace munin - { - typedef boosted_tcp_server munin_node_server; - //typedef cp_server_impl munin_node_cp_server; - } -} -} -#endif//!_MUNIN_NODE_SERVER_H_ \ No newline at end of file diff --git a/contrib/epee/include/net/net_helper.h b/contrib/epee/include/net/net_helper.h deleted file mode 100644 index 4f7ebfa044..0000000000 --- a/contrib/epee/include/net/net_helper.h +++ /dev/null @@ -1,683 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#pragma once - -//#include -//#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "net/net_utils_base.h" -#include "misc_language.h" -//#include "profile_tools.h" -#include "../string_tools.h" - -#ifndef MAKE_IP -#define MAKE_IP( a1, a2, a3, a4 ) (a1|(a2<<8)|(a3<<16)|(a4<<24)) -#endif - - -namespace epee -{ -namespace net_utils -{ - - class blocked_mode_client - { - - - struct handler_obj - { - handler_obj(boost::system::error_code& error, size_t& bytes_transferred):ref_error(error), ref_bytes_transferred(bytes_transferred) - {} - handler_obj(const handler_obj& other_obj):ref_error(other_obj.ref_error), ref_bytes_transferred(other_obj.ref_bytes_transferred) - {} - - boost::system::error_code& ref_error; - size_t& ref_bytes_transferred; - - void operator()(const boost::system::error_code& error, // Result of operation. - std::size_t bytes_transferred // Number of bytes read. - ) - { - ref_error = error; - ref_bytes_transferred = bytes_transferred; - } - }; - - public: - inline - blocked_mode_client():m_socket(m_io_service), - m_initialized(false), - m_connected(false), - m_deadline(m_io_service), - m_shutdowned(0) - { - - - m_initialized = true; - - - // No deadline is required until the first socket operation is started. We - // set the deadline to positive infinity so that the actor takes no action - // until a specific deadline is set. - m_deadline.expires_at(boost::posix_time::pos_infin); - - // Start the persistent actor that checks for deadline expiry. - check_deadline(); - - } - inline - ~blocked_mode_client() - { - //profile_tools::local_coast lc("~blocked_mode_client()", 3); - shutdown(); - } - - inline void set_recv_timeout(int reciev_timeout) - { - m_reciev_timeout = reciev_timeout; - } - - inline - bool connect(const std::string& addr, int port, unsigned int connect_timeout, unsigned int reciev_timeout, const std::string& bind_ip = "0.0.0.0") - { - return connect(addr, std::to_string(port), connect_timeout, reciev_timeout, bind_ip); - } - - inline - bool connect(const std::string& addr, const std::string& port, unsigned int connect_timeout, unsigned int reciev_timeout, const std::string& bind_ip = "0.0.0.0") - { - m_connect_timeout = connect_timeout; - m_reciev_timeout = reciev_timeout; - m_connected = false; - if(!m_reciev_timeout) - m_reciev_timeout = m_connect_timeout; - - try - { - m_socket.close(); - // Get a list of endpoints corresponding to the server name. - - - ////////////////////////////////////////////////////////////////////////// - - boost::asio::ip::tcp::resolver resolver(m_io_service); - boost::asio::ip::tcp::resolver::query query(boost::asio::ip::tcp::v4(), addr, port); - boost::asio::ip::tcp::resolver::iterator iterator = resolver.resolve(query); - boost::asio::ip::tcp::resolver::iterator end; - if(iterator == end) - { - LOG_ERROR("Failed to resolve " << addr); - return false; - } - - ////////////////////////////////////////////////////////////////////////// - - - //boost::asio::ip::tcp::endpoint remote_endpoint(boost::asio::ip::address::from_string(addr.c_str()), port); - boost::asio::ip::tcp::endpoint remote_endpoint(*iterator); - - - m_socket.open(remote_endpoint.protocol()); - if(bind_ip != "0.0.0.0" && bind_ip != "0" && bind_ip != "" ) - { - boost::asio::ip::tcp::endpoint local_endpoint(boost::asio::ip::address::from_string(addr.c_str()), 0); - m_socket.bind(local_endpoint); - } - - - m_deadline.expires_from_now(boost::posix_time::milliseconds(m_connect_timeout)); - - - boost::system::error_code ec = boost::asio::error::would_block; - - //m_socket.connect(remote_endpoint); - m_socket.async_connect(remote_endpoint, boost::lambda::var(ec) = boost::lambda::_1); - while (ec == boost::asio::error::would_block) - { - m_io_service.run_one(); - } - - if (!ec && m_socket.is_open()) - { - m_connected = true; - m_deadline.expires_at(boost::posix_time::pos_infin); - return true; - }else - { - LOG_PRINT("Some problems at connect, message: " << ec.message(), LOG_LEVEL_3); - return false; - } - - } - catch(const boost::system::system_error& er) - { - LOG_PRINT("Some problems at connect, message: " << er.what(), LOG_LEVEL_4); - return false; - } - catch(...) - { - LOG_PRINT("Some fatal problems.", LOG_LEVEL_4); - return false; - } - - return true; - } - - - inline - bool disconnect() - { - try - { - if(m_connected) - { - m_connected = false; - m_socket.shutdown(boost::asio::ip::tcp::socket::shutdown_both); - - } - } - - catch(const boost::system::system_error& /*er*/) - { - //LOG_ERROR("Some problems at disconnect, message: " << er.what()); - return false; - } - catch(...) - { - //LOG_ERROR("Some fatal problems."); - return false; - } - return true; - } - - - inline - bool send(const std::string& buff) - { - - try - { - m_deadline.expires_from_now(boost::posix_time::milliseconds(m_reciev_timeout)); - - // Set up the variable that receives the result of the asynchronous - // operation. The error code is set to would_block to signal that the - // operation is incomplete. Asio guarantees that its asynchronous - // operations will never fail with would_block, so any other value in - // ec indicates completion. - boost::system::error_code ec = boost::asio::error::would_block; - - // Start the asynchronous operation itself. The boost::lambda function - // object is used as a callback and will update the ec variable when the - // operation completes. The blocking_udp_client.cpp example shows how you - // can use boost::bind rather than boost::lambda. - boost::asio::async_write(m_socket, boost::asio::buffer(buff), boost::lambda::var(ec) = boost::lambda::_1); - - // Block until the asynchronous operation has completed. - while (ec == boost::asio::error::would_block) - { - m_io_service.run_one(); - } - - if (ec) - { - LOG_PRINT_L3("Problems at write: " << ec.message()); - m_connected = false; - return false; - }else - { - m_deadline.expires_at(boost::posix_time::pos_infin); - } - } - - catch(const boost::system::system_error& er) - { - LOG_ERROR("Some problems at connect, message: " << er.what()); - return false; - } - catch(...) - { - LOG_ERROR("Some fatal problems."); - return false; - } - - return true; - } - - inline - bool send(const void* data, size_t sz) - { - try - { - /* - m_deadline.expires_from_now(boost::posix_time::milliseconds(m_reciev_timeout)); - - // Set up the variable that receives the result of the asynchronous - // operation. The error code is set to would_block to signal that the - // operation is incomplete. Asio guarantees that its asynchronous - // operations will never fail with would_block, so any other value in - // ec indicates completion. - boost::system::error_code ec = boost::asio::error::would_block; - - // Start the asynchronous operation itself. The boost::lambda function - // object is used as a callback and will update the ec variable when the - // operation completes. The blocking_udp_client.cpp example shows how you - // can use boost::bind rather than boost::lambda. - boost::asio::async_write(m_socket, boost::asio::buffer(data, sz), boost::lambda::var(ec) = boost::lambda::_1); - - // Block until the asynchronous operation has completed. - while (ec == boost::asio::error::would_block) - { - m_io_service.run_one(); - } - */ - boost::system::error_code ec; - - size_t writen = m_socket.write_some(boost::asio::buffer(data, sz), ec); - - - - if (!writen || ec) - { - LOG_PRINT_L3("Problems at write: " << ec.message()); - m_connected = false; - return false; - }else - { - m_deadline.expires_at(boost::posix_time::pos_infin); - } - } - - catch(const boost::system::system_error& er) - { - LOG_ERROR("Some problems at send, message: " << er.what()); - m_connected = false; - return false; - } - catch(...) - { - LOG_ERROR("Some fatal problems."); - return false; - } - - return true; - } - - bool is_connected() - { - return m_connected && m_socket.is_open(); - //TRY_ENTRY() - //return m_socket.is_open(); - //CATCH_ENTRY_L0("is_connected", false) - } - - inline - bool recv(std::string& buff) - { - - try - { - // Set a deadline for the asynchronous operation. Since this function uses - // a composed operation (async_read_until), the deadline applies to the - // entire operation, rather than individual reads from the socket. - m_deadline.expires_from_now(boost::posix_time::milliseconds(m_reciev_timeout)); - - // Set up the variable that receives the result of the asynchronous - // operation. The error code is set to would_block to signal that the - // operation is incomplete. Asio guarantees that its asynchronous - // operations will never fail with would_block, so any other value in - // ec indicates completion. - //boost::system::error_code ec = boost::asio::error::would_block; - - // Start the asynchronous operation itself. The boost::lambda function - // object is used as a callback and will update the ec variable when the - // operation completes. The blocking_udp_client.cpp example shows how you - // can use boost::bind rather than boost::lambda. - - boost::system::error_code ec = boost::asio::error::would_block; - size_t bytes_transfered = 0; - - handler_obj hndlr(ec, bytes_transfered); - - char local_buff[10000] = {0}; - //m_socket.async_read_some(boost::asio::buffer(local_buff, sizeof(local_buff)), hndlr); - boost::asio::async_read(m_socket, boost::asio::buffer(local_buff, sizeof(local_buff)), boost::asio::transfer_at_least(1), hndlr); - - // Block until the asynchronous operation has completed. - while (ec == boost::asio::error::would_block && !boost::interprocess::ipcdetail::atomic_read32(&m_shutdowned)) - { - m_io_service.run_one(); - } - - - if (ec) - { - LOG_PRINT_L4("READ ENDS: Connection err_code " << ec.value()); - if(ec == boost::asio::error::eof) - { - LOG_PRINT_L4("Connection err_code eof."); - //connection closed there, empty - return true; - } - - LOG_PRINT_L3("Problems at read: " << ec.message()); - m_connected = false; - return false; - }else - { - LOG_PRINT_L4("READ ENDS: Success. bytes_tr: " << bytes_transfered); - m_deadline.expires_at(boost::posix_time::pos_infin); - } - - /*if(!bytes_transfered) - return false;*/ - - buff.assign(local_buff, bytes_transfered); - return true; - } - - catch(const boost::system::system_error& er) - { - LOG_ERROR("Some problems at read, message: " << er.what()); - m_connected = false; - return false; - } - catch(...) - { - LOG_ERROR("Some fatal problems at read."); - return false; - } - - - - return false; - - } - - inline bool recv_n(std::string& buff, int64_t sz) - { - - try - { - // Set a deadline for the asynchronous operation. Since this function uses - // a composed operation (async_read_until), the deadline applies to the - // entire operation, rather than individual reads from the socket. - m_deadline.expires_from_now(boost::posix_time::milliseconds(m_reciev_timeout)); - - // Set up the variable that receives the result of the asynchronous - // operation. The error code is set to would_block to signal that the - // operation is incomplete. Asio guarantees that its asynchronous - // operations will never fail with would_block, so any other value in - // ec indicates completion. - //boost::system::error_code ec = boost::asio::error::would_block; - - // Start the asynchronous operation itself. The boost::lambda function - // object is used as a callback and will update the ec variable when the - // operation completes. The blocking_udp_client.cpp example shows how you - // can use boost::bind rather than boost::lambda. - - buff.resize(static_cast(sz)); - boost::system::error_code ec = boost::asio::error::would_block; - size_t bytes_transfered = 0; - - - handler_obj hndlr(ec, bytes_transfered); - - //char local_buff[10000] = {0}; - boost::asio::async_read(m_socket, boost::asio::buffer((char*)buff.data(), buff.size()), boost::asio::transfer_at_least(buff.size()), hndlr); - - // Block until the asynchronous operation has completed. - while (ec == boost::asio::error::would_block && !boost::interprocess::ipcdetail::atomic_read32(&m_shutdowned)) - { - m_io_service.run_one(); - } - - if (ec) - { - LOG_PRINT_L3("Problems at read: " << ec.message()); - m_connected = false; - return false; - }else - { - m_deadline.expires_at(boost::posix_time::pos_infin); - } - - if(bytes_transfered != buff.size()) - { - LOG_ERROR("Transferred missmatch with transfer_at_least value: m_bytes_transferred=" << bytes_transfered << " at_least value=" << buff.size()); - return false; - } - - return true; - } - - catch(const boost::system::system_error& er) - { - LOG_ERROR("Some problems at read, message: " << er.what()); - m_connected = false; - return false; - } - catch(...) - { - LOG_ERROR("Some fatal problems at read."); - return false; - } - - - - return false; - } - - bool shutdown() - { - m_deadline.cancel(); - boost::system::error_code ignored_ec; - m_socket.cancel(ignored_ec); - m_socket.shutdown(boost::asio::ip::tcp::socket::shutdown_both, ignored_ec); - m_socket.close(ignored_ec); - boost::interprocess::ipcdetail::atomic_write32(&m_shutdowned, 1); - m_connected = false; - return true; - } - - void set_connected(bool connected) - { - m_connected = connected; - } - boost::asio::io_service& get_io_service() - { - return m_io_service; - } - - boost::asio::ip::tcp::socket& get_socket() - { - return m_socket; - } - - private: - - void check_deadline() - { - // Check whether the deadline has passed. We compare the deadline against - // the current time since a new asynchronous operation may have moved the - // deadline before this actor had a chance to run. - if (m_deadline.expires_at() <= boost::asio::deadline_timer::traits_type::now()) - { - // The deadline has passed. The socket is closed so that any outstanding - // asynchronous operations are cancelled. This allows the blocked - // connect(), read_line() or write_line() functions to return. - LOG_PRINT_L3("Timed out socket"); - m_connected = false; - m_socket.close(); - - // There is no longer an active deadline. The expiry is set to positive - // infinity so that the actor takes no action until a new deadline is set. - m_deadline.expires_at(boost::posix_time::pos_infin); - } - - // Put the actor back to sleep. - m_deadline.async_wait(boost::bind(&blocked_mode_client::check_deadline, this)); - } - - - - protected: - boost::asio::io_service m_io_service; - boost::asio::ip::tcp::socket m_socket; - int m_connect_timeout; - int m_reciev_timeout; - bool m_initialized; - bool m_connected; - boost::asio::deadline_timer m_deadline; - volatile uint32_t m_shutdowned; - }; - - - /************************************************************************/ - /* */ - /************************************************************************/ - class async_blocked_mode_client: public blocked_mode_client - { - public: - async_blocked_mode_client():m_send_deadline(blocked_mode_client::m_io_service) - { - - // No deadline is required until the first socket operation is started. We - // set the deadline to positive infinity so that the actor takes no action - // until a specific deadline is set. - m_send_deadline.expires_at(boost::posix_time::pos_infin); - - // Start the persistent actor that checks for deadline expiry. - check_send_deadline(); - } - ~async_blocked_mode_client() - { - m_send_deadline.cancel(); - } - - bool shutdown() - { - blocked_mode_client::shutdown(); - m_send_deadline.cancel(); - return true; - } - - inline - bool send(const void* data, size_t sz) - { - try - { - /* - m_send_deadline.expires_from_now(boost::posix_time::milliseconds(m_reciev_timeout)); - - // Set up the variable that receives the result of the asynchronous - // operation. The error code is set to would_block to signal that the - // operation is incomplete. Asio guarantees that its asynchronous - // operations will never fail with would_block, so any other value in - // ec indicates completion. - boost::system::error_code ec = boost::asio::error::would_block; - - // Start the asynchronous operation itself. The boost::lambda function - // object is used as a callback and will update the ec variable when the - // operation completes. The blocking_udp_client.cpp example shows how you - // can use boost::bind rather than boost::lambda. - boost::asio::async_write(m_socket, boost::asio::buffer(data, sz), boost::lambda::var(ec) = boost::lambda::_1); - - // Block until the asynchronous operation has completed. - while(ec == boost::asio::error::would_block) - { - m_io_service.run_one(); - }*/ - - boost::system::error_code ec; - - size_t writen = m_socket.write_some(boost::asio::buffer(data, sz), ec); - - if (!writen || ec) - { - LOG_PRINT_L3("Problems at write: " << ec.message()); - return false; - }else - { - m_send_deadline.expires_at(boost::posix_time::pos_infin); - } - } - - catch(const boost::system::system_error& er) - { - LOG_ERROR("Some problems at connect, message: " << er.what()); - return false; - } - catch(...) - { - LOG_ERROR("Some fatal problems."); - return false; - } - - return true; - } - - - private: - - boost::asio::deadline_timer m_send_deadline; - - void check_send_deadline() - { - // Check whether the deadline has passed. We compare the deadline against - // the current time since a new asynchronous operation may have moved the - // deadline before this actor had a chance to run. - if (m_send_deadline.expires_at() <= boost::asio::deadline_timer::traits_type::now()) - { - // The deadline has passed. The socket is closed so that any outstanding - // asynchronous operations are cancelled. This allows the blocked - // connect(), read_line() or write_line() functions to return. - LOG_PRINT_L3("Timed out socket"); - m_socket.close(); - - // There is no longer an active deadline. The expiry is set to positive - // infinity so that the actor takes no action until a new deadline is set. - m_send_deadline.expires_at(boost::posix_time::pos_infin); - } - - // Put the actor back to sleep. - m_send_deadline.async_wait(boost::bind(&async_blocked_mode_client::check_send_deadline, this)); - } - }; -} -} diff --git a/contrib/epee/include/net/net_parse_helpers.h b/contrib/epee/include/net/net_parse_helpers.h deleted file mode 100644 index 40c3d935d2..0000000000 --- a/contrib/epee/include/net/net_parse_helpers.h +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#pragma once -#include "http_base.h" -#include "include_base_utils.h" -#include "reg_exp_definer.h" - - -namespace epee -{ -namespace net_utils -{ - - inline bool parse_uri_query(const std::string& query, std::list >& params) - { - enum state - { - st_param_name, - st_param_val - }; - state st = st_param_name; - std::string::const_iterator start_it = query.begin(); - std::pair e; - for(std::string::const_iterator it = query.begin(); it != query.end(); it++) - { - switch(st) - { - case st_param_name: - if(*it == '=') - { - e.first.assign(start_it, it); - start_it = it;++start_it; - st = st_param_val; - } - break; - case st_param_val: - if(*it == '&') - { - e.second.assign(start_it, it); - start_it = it;++start_it; - params.push_back(e); - e.first.clear();e.second.clear(); - st = st_param_name; - } - break; - default: - LOG_ERROR("Unknown state " << (int)st); - return false; - } - } - if(st == st_param_name) - { - if(start_it != query.end()) - { - e.first.assign(start_it, query.end()); - params.push_back(e); - } - }else - { - if(start_it != query.end()) - e.second.assign(start_it, query.end()); - - if(e.first.size()) - params.push_back(e); - } - return true; - } - - inline - bool parse_uri(const std::string uri, http::uri_content& content) - { - - ///iframe_test.html?api_url=http://api.vk.com/api.php&api_id=3289090&api_settings=1&viewer_id=562964060&viewer_type=0&sid=0aad8d1c5713130f9ca0076f2b7b47e532877424961367d81e7fa92455f069be7e21bc3193cbd0be11895&secret=368ebbc0ef&access_token=668bc03f43981d883f73876ffff4aa8564254b359cc745dfa1b3cde7bdab2e94105d8f6d8250717569c0a7&user_id=0&group_id=0&is_app_user=1&auth_key=d2f7a895ca5ff3fdb2a2a8ae23fe679a&language=0&parent_language=0&ad_info=ElsdCQBaQlxiAQRdFUVUXiN2AVBzBx5pU1BXIgZUJlIEAWcgAUoLQg==&referrer=unknown&lc_name=9834b6a3&hash= - content.m_query_params.clear(); - STATIC_REGEXP_EXPR_1(rexp_match_uri, "^([^?#]*)(\\?([^#]*))?(#(.*))?", boost::regex::icase | boost::regex::normal); - - boost::smatch result; - if(!boost::regex_search(uri, result, rexp_match_uri, boost::match_default) && result[0].matched) - { - LOG_PRINT_L0("[PARSE URI] regex not matched for uri: " << uri); - content.m_path = uri; - return true; - } - if(result[1].matched) - { - content.m_path = result[1]; - } - if(result[3].matched) - { - content.m_query = result[3]; - } - if(result[5].matched) - { - content.m_fragment = result[5]; - } - if(content.m_query.size()) - { - parse_uri_query(content.m_query, content.m_query_params); - } - return true; - } - - - inline - bool parse_url(const std::string url_str, http::url_content& content) - { - - ///iframe_test.html?api_url=http://api.vk.com/api.php&api_id=3289090&api_settings=1&viewer_id=562964060&viewer_type=0&sid=0aad8d1c5713130f9ca0076f2b7b47e532877424961367d81e7fa92455f069be7e21bc3193cbd0be11895&secret=368ebbc0ef&access_token=668bc03f43981d883f73876ffff4aa8564254b359cc745dfa1b3cde7bdab2e94105d8f6d8250717569c0a7&user_id=0&group_id=0&is_app_user=1&auth_key=d2f7a895ca5ff3fdb2a2a8ae23fe679a&language=0&parent_language=0&ad_info=ElsdCQBaQlxiAQRdFUVUXiN2AVBzBx5pU1BXIgZUJlIEAWcgAUoLQg==&referrer=unknown&lc_name=9834b6a3&hash= - //STATIC_REGEXP_EXPR_1(rexp_match_uri, "^([^?#]*)(\\?([^#]*))?(#(.*))?", boost::regex::icase | boost::regex::normal); - STATIC_REGEXP_EXPR_1(rexp_match_uri, "^((.*?)://)?(([^/:]*)(:(\\d+))?)(.*)?", boost::regex::icase | boost::regex::normal); - // 12 34 5 6 7 - content.port = 0; - boost::smatch result; - if(!boost::regex_search(url_str, result, rexp_match_uri, boost::match_default) && result[0].matched) - { - LOG_PRINT_L0("[PARSE URI] regex not matched for uri: " << rexp_match_uri); - //content.m_path = uri; - return true; - } - if(result[2].matched) - { - content.schema = result[2]; - } - if(result[4].matched) - { - content.host = result[4]; - } - if(result[6].matched) - { - content.port = boost::lexical_cast(result[6]); - } - if(result[7].matched) - { - content.uri = result[7]; - return parse_uri(result[7], content.m_uri_content); - } - - return true; - } - -} -} \ No newline at end of file diff --git a/contrib/epee/include/net/net_utils_base.h b/contrib/epee/include/net/net_utils_base.h deleted file mode 100644 index b0e44f5dd0..0000000000 --- a/contrib/epee/include/net/net_utils_base.h +++ /dev/null @@ -1,177 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _NET_UTILS_BASE_H_ -#define _NET_UTILS_BASE_H_ - -#include -#include "string_tools.h" - -#ifndef MAKE_IP -#define MAKE_IP( a1, a2, a3, a4 ) (a1|(a2<<8)|(a3<<16)|(a4<<24)) -#endif - -namespace boost { - namespace asio { - class io_service; - } -} - -namespace epee -{ -namespace net_utils -{ - /************************************************************************/ - /* */ - /************************************************************************/ - struct connection_context_base - { - const boost::uuids::uuid m_connection_id; - const uint32_t m_remote_ip; - const uint32_t m_remote_port; - const bool m_is_income; - const time_t m_started; - time_t m_last_recv; - time_t m_last_send; - uint64_t m_recv_cnt; - uint64_t m_send_cnt; - - connection_context_base(boost::uuids::uuid connection_id, - long remote_ip, int remote_port, bool is_income, - time_t last_recv = 0, time_t last_send = 0, - uint64_t recv_cnt = 0, uint64_t send_cnt = 0): - m_connection_id(connection_id), - m_remote_ip(remote_ip), - m_remote_port(remote_port), - m_is_income(is_income), - m_started(time(NULL)), - m_last_recv(last_recv), - m_last_send(last_send), - m_recv_cnt(recv_cnt), - m_send_cnt(send_cnt) - {} - - connection_context_base(): m_connection_id(), - m_remote_ip(0), - m_remote_port(0), - m_is_income(false), - m_started(time(NULL)), - m_last_recv(0), - m_last_send(0), - m_recv_cnt(0), - m_send_cnt(0) - {} - - connection_context_base& operator=(const connection_context_base& a) - { - set_details(a.m_connection_id, a.m_remote_ip, a.m_remote_port, a.m_is_income); - return *this; - } - - private: - template - friend class connection; - void set_details(boost::uuids::uuid connection_id, long remote_ip, int remote_port, bool is_income) - { - this->~connection_context_base(); - new(this) connection_context_base(connection_id, remote_ip, remote_port, is_income); - } - - }; - - /************************************************************************/ - /* */ - /************************************************************************/ - struct i_service_endpoint - { - virtual bool do_send(const void* ptr, size_t cb)=0; - virtual bool close()=0; - virtual bool call_run_once_service_io()=0; - virtual bool request_callback()=0; - virtual boost::asio::io_service& get_io_service()=0; - //protect from deletion connection object(with protocol instance) during external call "invoke" - virtual bool add_ref()=0; - virtual bool release()=0; - protected: - virtual ~i_service_endpoint(){} - }; - - - //some helpers - - - inline - std::string print_connection_context(const connection_context_base& ctx) - { - std::stringstream ss; - ss << epee::string_tools::get_ip_string_from_int32(ctx.m_remote_ip) << ":" << ctx.m_remote_port << " " << epee::string_tools::get_str_from_guid_a(ctx.m_connection_id) << (ctx.m_is_income ? " INC":" OUT"); - return ss.str(); - } - - inline - std::string print_connection_context_short(const connection_context_base& ctx) - { - std::stringstream ss; - ss << epee::string_tools::get_ip_string_from_int32(ctx.m_remote_ip) << ":" << ctx.m_remote_port << (ctx.m_is_income ? " INC":" OUT"); - return ss.str(); - } - -#define LOG_PRINT_CC(ct, message, log_level) LOG_PRINT("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message, log_level) -#define LOG_PRINT_CC_GREEN(ct, message, log_level) LOG_PRINT_GREEN("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message, log_level) -#define LOG_PRINT_CC_RED(ct, message, log_level) LOG_PRINT_RED("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message, log_level) -#define LOG_PRINT_CC_BLUE(ct, message, log_level) LOG_PRINT_BLUE("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message, log_level) -#define LOG_PRINT_CC_YELLOW(ct, message, log_level) LOG_PRINT_YELLOW("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message, log_level) -#define LOG_PRINT_CC_CYAN(ct, message, log_level) LOG_PRINT_CYAN("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message, log_level) -#define LOG_PRINT_CC_MAGENTA(ct, message, log_level) LOG_PRINT_MAGENTA("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message, log_level) -#define LOG_ERROR_CC(ct, message) LOG_ERROR("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message) - -#define LOG_PRINT_CC_L0(ct, message) LOG_PRINT_L0("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message) -#define LOG_PRINT_CC_L1(ct, message) LOG_PRINT_L1("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message) -#define LOG_PRINT_CC_L2(ct, message) LOG_PRINT_L2("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message) -#define LOG_PRINT_CC_L3(ct, message) LOG_PRINT_L3("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message) -#define LOG_PRINT_CC_L4(ct, message) LOG_PRINT_L4("[" << epee::net_utils::print_connection_context_short(ct) << "]" << message) - -#define LOG_PRINT_CCONTEXT_L0(message) LOG_PRINT_CC_L0(context, message) -#define LOG_PRINT_CCONTEXT_L1(message) LOG_PRINT_CC_L1(context, message) -#define LOG_PRINT_CCONTEXT_L2(message) LOG_PRINT_CC_L2(context, message) -#define LOG_PRINT_CCONTEXT_L3(message) LOG_PRINT_CC_L3(context, message) -#define LOG_ERROR_CCONTEXT(message) LOG_ERROR_CC(context, message) - -#define LOG_PRINT_CCONTEXT_GREEN(message, log_level) LOG_PRINT_CC_GREEN(context, message, log_level) -#define LOG_PRINT_CCONTEXT_RED(message, log_level) LOG_PRINT_CC_RED(context, message, log_level) -#define LOG_PRINT_CCONTEXT_BLUE(message, log_level) LOG_PRINT_CC_BLUE(context, message, log_level) -#define LOG_PRINT_CCONTEXT_YELLOW(message, log_level) LOG_PRINT_CC_YELLOW(context, message, log_level) -#define LOG_PRINT_CCONTEXT_CYAN(message, log_level) LOG_PRINT_CC_CYAN(context, message, log_level) -#define LOG_PRINT_CCONTEXT_MAGENTA(message, log_level) LOG_PRINT_CC_MAGENTA(context, message, log_level) - -#define CHECK_AND_ASSERT_MES_CC(condition, return_val, err_message) CHECK_AND_ASSERT_MES(condition, return_val, "[" << epee::net_utils::print_connection_context_short(context) << "]" << err_message) - -} -} - -#endif //_NET_UTILS_BASE_H_ diff --git a/contrib/epee/include/net/protocol_switcher.h b/contrib/epee/include/net/protocol_switcher.h deleted file mode 100644 index ca0ce6f97e..0000000000 --- a/contrib/epee/include/net/protocol_switcher.h +++ /dev/null @@ -1,121 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _PROTOCOL_SWITCHER_H_ -#define _PROTOCOL_SWITCHER_H_ - -#include "levin_base.h" -#include "http_server.h" -#include "levin_protocol_handler.h" -//#include "abstract_tcp_server.h" - -namespace epee -{ -namespace net_utils -{ - struct protocl_switcher_config - { - http::http_custom_handler::config_type m_http_config; - levin::protocol_handler::config_type m_levin_config; - }; - - - struct i_protocol_handler - { - virtual bool handle_recv(const void* ptr, size_t cb)=0; - }; - - template - class t_protocol_handler: public i_protocol_handler - { - public: - typedef t t_type; - t_protocol_handler(i_service_endpoint* psnd_hndlr, typename t_type::config_type& config, const connection_context& conn_context):m_hadler(psnd_hndlr, config, conn_context) - {} - private: - bool handle_recv(const void* ptr, size_t cb) - { - return m_hadler.handle_recv(ptr, cb); - } - t_type m_hadler; - }; - - - class protocol_switcher - { - public: - typedef protocl_switcher_config config_type; - - protocol_switcher(net_utils::i_service_endpoint* psnd_hndlr, config_type& config, const net_utils::connection_context_base& conn_context); - virtual ~protocol_switcher(){} - - virtual bool handle_recv(const void* ptr, size_t cb); - - bool after_init_connection(){return true;} - private: - t_protocol_handler m_http_handler; - t_protocol_handler m_levin_handler; - i_protocol_handler* pcurrent_handler; - - std::string m_cached_buff; - }; - - protocol_switcher::protocol_switcher(net_utils::i_service_endpoint* psnd_hndlr, config_type& config, const net_utils::connection_context_base& conn_context):m_http_handler(psnd_hndlr, config.m_http_config, conn_context), m_levin_handler(psnd_hndlr, config.m_levin_config, conn_context), pcurrent_handler(NULL) - {} - - bool protocol_switcher::handle_recv(const void* ptr, size_t cb) - { - if(pcurrent_handler) - return pcurrent_handler->handle_recv(ptr, cb); - else - { - m_cached_buff.append((const char*)ptr, cb); - if(m_cached_buff.size() < sizeof(uint64_t)) - return true; - - if(*((uint64_t*)&m_cached_buff[0]) == LEVIN_SIGNATURE) - { - pcurrent_handler = &m_levin_handler; - return pcurrent_handler->handle_recv(m_cached_buff.data(), m_cached_buff.size()); - } - if(m_cached_buff.substr(0, 4) == "GET " || m_cached_buff.substr(0, 4) == "POST") - { - pcurrent_handler = &m_http_handler; - return pcurrent_handler->handle_recv(m_cached_buff.data(), m_cached_buff.size()); - }else - { - LOG_ERROR("Wrong protocol accepted on port..."); - return false; - } - } - - return true; - } -} -} -#endif //_PROTOCOL_SWITCHER_H_ \ No newline at end of file diff --git a/contrib/epee/include/net/rpc_method_name.h b/contrib/epee/include/net/rpc_method_name.h deleted file mode 100644 index c226639c4b..0000000000 --- a/contrib/epee/include/net/rpc_method_name.h +++ /dev/null @@ -1,31 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#pragma once - - -#define RPC_METHOD_NAME(name) static inline const char* methodname(){return name;} \ No newline at end of file diff --git a/contrib/epee/include/net/smtp.h b/contrib/epee/include/net/smtp.h deleted file mode 100644 index d2e8598fd7..0000000000 --- a/contrib/epee/include/net/smtp.h +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#pragma once -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - - -namespace epee -{ -namespace net_utils -{ - namespace smtp - { - - using boost::asio::ip::tcp; - using namespace boost::archive::iterators; - typedef base64_from_binary > base64_text; - - /************************************************************************/ - /* */ - /************************************************************************/ - class smtp_client - { - public: - smtp_client(std::string pServer,unsigned int pPort,std::string pUser,std::string pPassword): - mServer(pServer),mPort(pPort),mUserName(pUser),mPassword(pPassword),mSocket(mIOService),mResolver(mIOService) - { - tcp::resolver::query qry(mServer,boost::lexical_cast( mPort )); - mResolver.async_resolve(qry,boost::bind(&smtp_client::handleResolve,this,boost::asio::placeholders::error, - boost::asio::placeholders::iterator)); - } - bool Send(std::string pFrom,std::string pTo,std::string pSubject,std::string pMessage) - { - mHasError = true; - mFrom=pFrom; - mTo=pTo; - mSubject=pSubject; - mMessage=pMessage; - mIOService.run(); - return !mHasError; - } - private: - std::string encodeBase64(std::string pData) - { - std::stringstream os; - size_t sz=pData.size(); - std::copy(base64_text(pData.c_str()),base64_text(pData.c_str()+sz),std::ostream_iterator(os)); - return os.str(); - } - void handleResolve(const boost::system::error_code& err,tcp::resolver::iterator endpoint_iterator) - { - if(!err) - { - tcp::endpoint endpoint=*endpoint_iterator; - mSocket.async_connect(endpoint, - boost::bind(&smtp_client::handleConnect,this,boost::asio::placeholders::error,++endpoint_iterator)); - } - else - { - mHasError=true; - mErrorMsg= err.message(); - } - } - void writeLine(std::string pData) - { - std::ostream req_strm(&mRequest); - req_strm << pData << "\r\n"; - boost::asio::write(mSocket,mRequest); - req_strm.clear(); - } - void readLine(std::string& pData) - { - boost::asio::streambuf response; - boost::asio::read_until(mSocket, response, "\r\n"); - std::istream response_stream(&response); - response_stream >> pData; - } - void handleConnect(const boost::system::error_code& err,tcp::resolver::iterator endpoint_iterator) - { - if (!err) - { - std::string read_buff; - // The connection was successful. Send the request. - std::ostream req_strm(&mRequest); - writeLine("EHLO "+mServer); - readLine(read_buff);//220 - writeLine("AUTH LOGIN"); - readLine(read_buff);// - writeLine(encodeBase64(mUserName)); - readLine(read_buff); - writeLine(encodeBase64(mPassword)); - readLine(read_buff); - writeLine( "MAIL FROM:<"+mFrom+">"); - writeLine( "RCPT TO:<"+mTo+">"); - writeLine( "DATA"); - writeLine( "SUBJECT:"+mSubject); - writeLine( "From:"+mFrom); - writeLine( "To:"+mTo); - writeLine( ""); - writeLine( mMessage ); - writeLine( "\r\n.\r\n"); - readLine(read_buff); - if(read_buff == "250") - mHasError = false; - writeLine( "QUIT"); - } - else - { - mHasError=true; - mErrorMsg= err.message(); - } - } - std::string mServer; - std::string mUserName; - std::string mPassword; - std::string mFrom; - std::string mTo; - std::string mSubject; - std::string mMessage; - unsigned int mPort; - boost::asio::io_service mIOService; - tcp::resolver mResolver; - tcp::socket mSocket; - boost::asio::streambuf mRequest; - boost::asio::streambuf mResponse; - bool mHasError; - std::string mErrorMsg; - }; - - - bool send_mail(const std::string& server, int port, const std::string& login, const std::string& pass, const std::string& from_email, /*"STIL CRAWLER",*/ - const std::string& maillist, const std::string& subject, const std::string& body) - { - STD_TRY_BEGIN(); - //smtp_client mailc("yoursmtpserver.com",25,"user@yourdomain.com","password"); - //mailc.Send("from@yourdomain.com","to@somewhere.com","subject","Hello from C++ SMTP Client!"); - smtp_client mailc(server,port,login,pass); - return mailc.Send(from_email,maillist,subject,body); - STD_TRY_CATCH("at send_mail", false); - } - - } -} -} - -//#include "smtp.inl" \ No newline at end of file diff --git a/contrib/epee/include/net/smtp.inl b/contrib/epee/include/net/smtp.inl deleted file mode 100644 index d42c8b950c..0000000000 --- a/contrib/epee/include/net/smtp.inl +++ /dev/null @@ -1,1569 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#include "md5.h" - -namespace epee -{ -namespace net_utils -{ - namespace smtp - { - - - ////////////////////////////////////////////////////////////////////////// - inline char * convert_hex( unsigned char *in, int len ) - { - static char hex[] = "0123456789abcdef"; - char * out; - int i; - - out = (char *) malloc(len * 2 + 1); - if (out == NULL) - return NULL; - - for (i = 0; i < len; i++) { - out[i * 2] = hex[in[i] >> 4]; - out[i * 2 + 1] = hex[in[i] & 15]; - } - - out[i*2] = 0; - - return out; - } - - ////////////////////////////////////////////////////////////////////////// - inline char * hash_md5(const char * sec_key, const char * data, int len) - { - char key[65], digest[24]; - char * hash_hex; - - int sec_len, i; - - sec_len = strlen(sec_key); - - if (sec_len < 64) { - memcpy(key, sec_key, sec_len); - for (i = sec_len; i < 64; i++) { - key[i] = 0; - } - } else { - memcpy(key, sec_key, 64); - } - - md5::hmac_md5( (const unsigned char*)data, len, (const unsigned char*)key, 64, (unsigned char*)digest ); - hash_hex = convert_hex( (unsigned char*)digest, 16 ); - - return hash_hex; - } - ////////////////////////////////////////////////////////////////////////// - ////////////////////////////////////////////////////////////////////////// - ////////////////////////////////////////////////////////////////////////// - ////////////////////////////////////////////////////////////////////////// - inline CSMTPClient::CSMTPClient(void) - { - m_dwSupportedAuthModesCount = 0; - m_bConnected = FALSE; - m_hSocket = INVALID_SOCKET; - m_pErrorText = NULL; - - // Initialize WinSock - WORD wVer = MAKEWORD( 2, 2 ); - if ( WSAStartup( wVer, &m_wsaData ) != NO_ERROR ) - { - SetErrorText( "WSAStartup.", WSAGetLastError() ); - throw; - } - if ( LOBYTE( m_wsaData.wVersion ) != 2 || HIBYTE( m_wsaData.wVersion ) != 2 ) - { - SetErrorText( "Can't find a useable WinSock DLL." ); - WSACleanup(); - throw; - } - } - - ////////////////////////////////////////////////////////////////////////// - inline CSMTPClient::~CSMTPClient(void) - { - if ( m_pErrorText ) - { - free( m_pErrorText ); - m_pErrorText = NULL; - } - - if ( m_bConnected ) - ServerDisconnect(); - - // Cleanup - WSACleanup(); - } - - ////////////////////////////////////////////////////////////////////////// - inline void CSMTPClient::SetErrorText( LPCSTR szErrorText, DWORD dwErrorCode ) - { - if ( m_pErrorText ) - { - free( m_pErrorText ); - m_pErrorText = NULL; - } - - LPVOID lpMsgBuf = NULL; - if ( dwErrorCode ) - { - FormatMessageA( - FORMAT_MESSAGE_ALLOCATE_BUFFER | - FORMAT_MESSAGE_FROM_SYSTEM, - NULL, - dwErrorCode, - MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT), - (LPSTR) &lpMsgBuf, - 0, NULL ); - } - - if ( szErrorText && strlen( szErrorText ) ) - { - m_pErrorText = (LPBYTE)malloc( strlen( szErrorText ) + 1 ); - strcpy( (char*)m_pErrorText, szErrorText ); - - if ( lpMsgBuf ) - { - strcat( (char*)m_pErrorText, " " ); - strcpy( (char*)m_pErrorText, (char*)lpMsgBuf ); - - LocalFree( lpMsgBuf ); - } - } - } - - inline void CSMTPClient::SetErrorText( PBYTE szErrorText, DWORD dwErrorCode ) - { - SetErrorText( (LPCSTR)szErrorText, dwErrorCode ); - } - - ////////////////////////////////////////////////////////////////////////// - inline char* CSMTPClient::GetLastErrorText() - { - return (char*)m_pErrorText; - } - - ////////////////////////////////////////////////////////////////////////// - inline DWORD CSMTPClient::ReceiveData( SOCKET hSocket, PBYTE pReceiveBuffer, DWORD dwReceiveBufferSize ) - { - DWORD dwReceivedDataSize = 0; - - if ( hSocket != INVALID_SOCKET && pReceiveBuffer && dwReceiveBufferSize ) - { - int iReceived = 0; - int iLength = 0; - - iLength = recv( hSocket, (LPSTR)pReceiveBuffer + iReceived, dwReceiveBufferSize - iReceived, - NO_FLAGS ); - - if ( iLength != 0 && iLength != SOCKET_ERROR ) - iReceived += iLength; - - dwReceivedDataSize = iReceived; - - pReceiveBuffer[ iReceived ] = 0; - } - - return dwReceivedDataSize; - } - - inline ////////////////////////////////////////////////////////////////////////// - DWORD CSMTPClient::SendData( SOCKET hSocket, PBYTE pSendBuffer, DWORD dwSendBufferSize ) - { - DWORD dwSended = 0; - - if ( hSocket != INVALID_SOCKET && pSendBuffer && dwSendBufferSize ) - { - int iSended = 0; - int iLength = 0; - - while ( iLength != SOCKET_ERROR && dwSendBufferSize - iSended > 0 ) - { - iLength = send( hSocket, (LPSTR)pSendBuffer + iSended, dwSendBufferSize - iSended, - NO_FLAGS ); - - if ( iLength != 0 && iLength != SOCKET_ERROR ) - iSended += iLength; - } - - dwSended = iSended; - } - - //if ( dwSended ) - // printf( "C: %s", pSendBuffer ); - - return dwSended; - } - - ////////////////////////////////////////////////////////////////////////// - inline unsigned short CSMTPClient::GetResponseCode( LPBYTE pBuffer, DWORD dwBufferSize ) - { - unsigned short iCode = 0; - - if ( dwBufferSize >= 3 ) - { - CHAR szResponseCode[ 4 ] = { 0 }; - memcpy( szResponseCode, pBuffer, 3 ); - szResponseCode[ 3 ] = 0; - iCode = atoi( szResponseCode ); - } - - return iCode; - } - - ////////////////////////////////////////////////////////////////////////// - inline void CSMTPClient::ParseESMTPExtensions( LPBYTE pBuffer, DWORD dwBufferSize ) - { - const char *szSubstring = strstr( (const char*)pBuffer, "250-AUTH " ); - if ( !szSubstring ) - { - szSubstring = strstr( (const char*)pBuffer, "250 AUTH " ); - } - - if ( szSubstring ) - { - const char *szSubstringEnd = strstr( (const char*)szSubstring, "\r\n" ); - if ( szSubstringEnd ) - { - szSubstring += 9; - char szAuthMode[ 256 ] = { 0 }; - for ( ; szSubstring < szSubstringEnd + 1 ; szSubstring++ ) - { - if ( *szSubstring == ' ' || *szSubstring == '\r' ) - { - if ( _strcmpi( szAuthMode, SMTP_COMMAND_AUTH_PLAIN ) == 0 ) - { - m_aSupportedAuthModes[ m_dwSupportedAuthModesCount ] = AUTH_MODE_PLAIN; - m_dwSupportedAuthModesCount++; - } - else if ( _strcmpi( szAuthMode, SMTP_COMMAND_AUTH_LOGIN ) == 0 ) - { - m_aSupportedAuthModes[ m_dwSupportedAuthModesCount ] = AUTH_MODE_LOGIN; - m_dwSupportedAuthModesCount++; - } - else if ( _strcmpi( szAuthMode, SMTP_COMMAND_AUTH_CRAM_MD5 ) == 0 ) - { - m_aSupportedAuthModes[ m_dwSupportedAuthModesCount ] = AUTH_MODE_CRAM_MD5; - m_dwSupportedAuthModesCount++; - } - - szAuthMode[ 0 ] = 0; - - if ( m_dwSupportedAuthModesCount == MAX_AUTH_MODES_COUND ) - break; - } - else - { - szAuthMode[ strlen( szAuthMode ) + 1 ] = 0; - szAuthMode[ strlen( szAuthMode ) ] = *szSubstring; - } - } - } - } - } - - ////////////////////////////////////////////////////////////////////////// - inline BOOL CSMTPClient::ServerConnect( LPCSTR szServerAddress, const unsigned short iPortNumber ) - { - if ( m_bConnected ) - ServerDisconnect(); - - m_bConnected = FALSE; - m_hSocket = INVALID_SOCKET; - - m_hSocket = _connectServerSocket( szServerAddress, iPortNumber ); - - if ( m_hSocket != INVALID_SOCKET ) - { - DWORD dwReceiveBufferSize = 1024*16; - PBYTE pReceiveBuffer = (PBYTE)malloc( dwReceiveBufferSize ); - if ( pReceiveBuffer ) - { - // Connected. Wait server hello string. - DWORD iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - // Check 220 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 220 ) - { - SetErrorText( pReceiveBuffer ); - free( pReceiveBuffer ); - ServerDisconnect(); - return FALSE; - } - } - else - { - SetErrorText( "ReceiveData error. ", WSAGetLastError() ); - free( pReceiveBuffer ); - ServerDisconnect(); - return FALSE; - } - - // EHLO / HELO - BYTE szHelloBuffer[ 256 ]; - sprintf( (char*)szHelloBuffer, "%s %s\r\n", (char*)SMTP_COMMAND_EHLO, (char*)szServerAddress ); - if ( SendData( m_hSocket, (PBYTE)szHelloBuffer, strlen( (const char*)szHelloBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - ServerDisconnect(); - return FALSE; - } - - iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - // Check 250 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode == 500 ) - { - SetErrorText( pReceiveBuffer ); - - sprintf( (char*)szHelloBuffer, "%s %s\r\n", (char*)SMTP_COMMAND_HELO, (char*)szServerAddress ); - if ( SendData( m_hSocket, (PBYTE)szHelloBuffer, strlen( (const char*)szHelloBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - ServerDisconnect(); - return FALSE; - } - - iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 250 ) - { - SetErrorText( pReceiveBuffer ); - free( pReceiveBuffer ); - ServerDisconnect(); - return FALSE; - } - } - else if ( iResponseCode != 250 ) - { - SetErrorText( pReceiveBuffer ); - free( pReceiveBuffer ); - ServerDisconnect(); - return FALSE; - } - - // Parse AUTH supported modes - ParseESMTPExtensions( pReceiveBuffer, iReceived ); - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - ServerDisconnect(); - return FALSE; - } - - free( pReceiveBuffer ); - } - } - else - { - return FALSE; - } - - m_bConnected = TRUE; - - return TRUE; - } - - ////////////////////////////////////////////////////////////////////////// - inline BOOL CSMTPClient::ServerConnect( LPCSTR szServerAddress, const unsigned short iPortNumber, LPCSTR szUsername, LPCSTR szPassword ) - { - BOOL bSuccess = FALSE; - - bSuccess = ServerConnect( szServerAddress, iPortNumber ); - if ( bSuccess ) - { - if ( GetAuthModeIsSupported( AUTH_MODE_CRAM_MD5 ) ) - { - ServerLogin( szUsername, szPassword, AUTH_MODE_CRAM_MD5 ); - } - else - if ( GetAuthModeIsSupported( AUTH_MODE_PLAIN ) ) - { - ServerLogin( szUsername, szPassword, AUTH_MODE_PLAIN ); - } - else - if ( GetAuthModeIsSupported( AUTH_MODE_LOGIN ) ) - { - ServerLogin( szUsername, szPassword, AUTH_MODE_LOGIN ); - } - } - - return bSuccess; - } - - ////////////////////////////////////////////////////////////////////////// - inline SOCKET CSMTPClient::_connectServerSocket( LPCSTR szServerAddress, const unsigned short iPortNumber ) - { - int nConnect; - short nProtocolPort = iPortNumber; - LPHOSTENT lpHostEnt; - SOCKADDR_IN sockAddr; - - SOCKET hServerSocket = INVALID_SOCKET; - - lpHostEnt = gethostbyname( szServerAddress ); - if (lpHostEnt) - { - hServerSocket = socket( AF_INET, SOCK_STREAM, IPPROTO_TCP ); - if (hServerSocket != INVALID_SOCKET) - { - sockAddr.sin_family = AF_INET; - sockAddr.sin_port = htons( nProtocolPort ); - sockAddr.sin_addr = *((LPIN_ADDR)*lpHostEnt->h_addr_list); - - nConnect = connect( hServerSocket, (PSOCKADDR)&sockAddr, - sizeof(sockAddr) ); - - if ( nConnect != 0 ) - { - SetErrorText( "connect error.", WSAGetLastError() ); - hServerSocket = INVALID_SOCKET; - } - } - else - { - SetErrorText( "Invalid socket." ); - throw; - } - } - else - { - SetErrorText( "Error retrieving host by name.", WSAGetLastError() ); - } - - return hServerSocket ; - } - - ////////////////////////////////////////////////////////////////////////// - inline void CSMTPClient::ServerDisconnect() - { - if ( m_hSocket != INVALID_SOCKET ) - { - if ( SendData( m_hSocket, (PBYTE)SMTP_COMMAND_QUIT, strlen( SMTP_COMMAND_QUIT ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - return; - } - - DWORD dwReceiveBufferSize = 1024*16; - PBYTE pReceiveBuffer = (PBYTE)malloc( dwReceiveBufferSize ); - if ( pReceiveBuffer ) - { - DWORD iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - - if ( iReceived ) - SetErrorText( pReceiveBuffer ); - - free( pReceiveBuffer ); - } - - m_hSocket = INVALID_SOCKET; - } - - m_bConnected = FALSE; - } - - ////////////////////////////////////////////////////////////////////////// - inline BOOL CSMTPClient::GetAuthModeIsSupported( int iMode ) - { - BOOL bSupported = FALSE; - - for ( int i = 0 ; i < m_dwSupportedAuthModesCount ; i++ ) - { - if ( m_aSupportedAuthModes[ i ] == iMode ) - { - bSupported = TRUE; - break; - } - } - - return bSupported; - } - - ////////////////////////////////////////////////////////////////////////// - inline BOOL CSMTPClient::ServerLogin( LPCSTR szUsername, LPCSTR szPassword, int iAuthMode ) - { - BOOL bSuccess = FALSE; - - if ( iAuthMode == AUTH_MODE_PLAIN ) - { - bSuccess = ServerLoginMethodPlain( szUsername, szPassword ); - } - else if ( iAuthMode == AUTH_MODE_LOGIN ) - { - bSuccess = ServerLoginMethodLogin( szUsername, szPassword ); - } - else if ( iAuthMode == AUTH_MODE_CRAM_MD5 ) - { - bSuccess = ServerLoginMethodCramMD5( szUsername, szPassword ); - } - - return bSuccess; - } - - ////////////////////////////////////////////////////////////////////////// - inline BOOL CSMTPClient::ServerLogin( LPCSTR szUsername, LPCSTR szPassword ) - { - BOOL bSuccess = FALSE; - - if ( GetAuthModeIsSupported( AUTH_MODE_CRAM_MD5 ) ) - { - bSuccess = ServerLogin( szUsername, szPassword, AUTH_MODE_CRAM_MD5 ); - } - else - if ( GetAuthModeIsSupported( AUTH_MODE_PLAIN ) ) - { - bSuccess = ServerLogin( szUsername, szPassword, AUTH_MODE_PLAIN ); - } - else - if ( GetAuthModeIsSupported( AUTH_MODE_LOGIN ) ) - { - bSuccess = ServerLogin( szUsername, szPassword, AUTH_MODE_LOGIN ); - } - - return bSuccess; - } - - ////////////////////////////////////////////////////////////////////////// - inline BOOL CSMTPClient::ServerLoginMethodPlain( LPCSTR szUsername, LPCSTR szPassword ) - { - BOOL bSuccess = FALSE; - - BYTE szCommandBuffer[ 256 ]; - sprintf( (char*)szCommandBuffer, "%s %s\r\n", (char*)SMTP_COMMAND_AUTH, (char*)SMTP_COMMAND_AUTH_PLAIN ); - if ( SendData( m_hSocket, (PBYTE)szCommandBuffer, strlen( (const char*)szCommandBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - return FALSE; - } - - DWORD dwReceiveBufferSize = 1024*16; - PBYTE pReceiveBuffer = (PBYTE)malloc( dwReceiveBufferSize ); - if ( pReceiveBuffer ) - { - // Connected. Wait server hello string. - DWORD iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 334 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 334 ) - { - free( pReceiveBuffer ); - return FALSE; - } - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - return FALSE; - } - - // Encode. - DWORD dwLoginBuffer = strlen( szUsername ) + strlen( szPassword ) + 3; - char *pLoginBuffer = (char*)malloc( dwLoginBuffer ); - if ( pLoginBuffer ) - { - ZeroMemory( pLoginBuffer, dwLoginBuffer ); - strcpy( pLoginBuffer + 1, szUsername ); - strcpy( pLoginBuffer + 1 + strlen( szUsername ) + 1, szPassword ); - - Base64Coder coder; - coder.Encode( (const PBYTE)pLoginBuffer, dwLoginBuffer - 1 ); - LPCSTR szLoginBufferEncoded = coder.EncodedMessage(); - - if ( szLoginBufferEncoded && strlen( szLoginBufferEncoded ) > 0 ) - { - DWORD dwSendBufferSize = strlen( szLoginBufferEncoded ) + 4; - char* pSendBuffer = (char*)malloc( dwSendBufferSize ); - if ( pSendBuffer ) - { - strcpy( pSendBuffer, szLoginBufferEncoded ); - strcat( pSendBuffer, "\r\n" ); - - if ( SendData( m_hSocket, (PBYTE)pSendBuffer, strlen( (const char*)pSendBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( pSendBuffer ); - free( pLoginBuffer ); - free( pReceiveBuffer ); - return FALSE; - } - - free( pSendBuffer ); - } - } - - free( pLoginBuffer ); - - // check result - iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 235 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 235 ) - { - free( pReceiveBuffer ); - return FALSE; - } - - bSuccess = TRUE; - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - return FALSE; - } - } - - free( pReceiveBuffer ); - } - - return bSuccess; - } - - ////////////////////////////////////////////////////////////////////////// - inline BOOL CSMTPClient::ServerLoginMethodLogin( LPCSTR szUsername, LPCSTR szPassword ) - { - BOOL bSuccess = FALSE; - - BYTE szCommandBuffer[ 256 ]; - sprintf( (char*)szCommandBuffer, "%s %s\r\n", (char*)SMTP_COMMAND_AUTH, (char*)SMTP_COMMAND_AUTH_LOGIN ); - if ( SendData( m_hSocket, (PBYTE)szCommandBuffer, strlen( (const char*)szCommandBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - return FALSE; - } - - DWORD dwReceiveBufferSize = 1024*16; - PBYTE pReceiveBuffer = (PBYTE)malloc( dwReceiveBufferSize ); - if ( pReceiveBuffer ) - { - DWORD iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 334 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 334 ) - { - free( pReceiveBuffer ); - return FALSE; - } - - // Check request - if ( iReceived > 6 ) - { - Base64Coder coder; - coder.Decode( pReceiveBuffer + 4, iReceived - 6 ); - LPCSTR szRequest = coder.DecodedMessage(); - if ( szRequest && strlen( szRequest ) > 0 ) - { - if ( strcmpi( szRequest, "Username:" ) == 0 ) - { - coder.Encode( (const PBYTE)szUsername, strlen( szUsername ) ); - LPCSTR szUsernameEncoded = coder.EncodedMessage(); - - char* szLoginUsernameBuffer = (char*)malloc( strlen( szUsernameEncoded ) + 4 ); - if ( szLoginUsernameBuffer ) - { - strcpy( szLoginUsernameBuffer, szUsernameEncoded ); - strcat( szLoginUsernameBuffer, "\r\n" ); - - if ( SendData( m_hSocket, (PBYTE)szLoginUsernameBuffer, strlen( (const char*)szLoginUsernameBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - return FALSE; - } - - free( szLoginUsernameBuffer ); - } - else - { - free( pReceiveBuffer ); - return FALSE; - } - - iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 334 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 334 ) - { - free( pReceiveBuffer ); - return FALSE; - } - - // Check request - if ( iReceived > 6 ) - { - coder.Decode( pReceiveBuffer + 4, iReceived - 6 ); - LPCSTR szRequest2 = coder.DecodedMessage(); - if ( szRequest2 && strlen( szRequest2 ) > 0 ) - { - if ( strcmpi( szRequest2, "Password:" ) == 0 ) - { - coder.Encode( (const PBYTE)szPassword, strlen( szPassword ) ); - LPCSTR szPasswordEncoded = coder.EncodedMessage(); - - char* szLoginPasswordBuffer = (char*)malloc( strlen( szPasswordEncoded ) + 4 ); - if ( szLoginPasswordBuffer ) - { - strcpy( szLoginPasswordBuffer, szPasswordEncoded ); - strcat( szLoginPasswordBuffer, "\r\n" ); - - if ( SendData( m_hSocket, (PBYTE)szLoginPasswordBuffer, strlen( (const char*)szLoginPasswordBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - return FALSE; - } - - free( szLoginPasswordBuffer ); - } - else - { - free( pReceiveBuffer ); - return FALSE; - } - - iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 235 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 235 ) - { - free( pReceiveBuffer ); - return FALSE; - } - - bSuccess = TRUE; - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - return FALSE; - } - } - } - } - } - else - { - free( pReceiveBuffer ); - return FALSE; - } - } - } - else - { - free( pReceiveBuffer ); - return FALSE; - } - } - else - { - free( pReceiveBuffer ); - return FALSE; - } - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - return FALSE; - } - - free( pReceiveBuffer ); - } - - return bSuccess; - } - - ////////////////////////////////////////////////////////////////////////// - inline BOOL CSMTPClient::ServerLoginMethodCramMD5( LPCSTR szUsername, LPCSTR szPassword ) - { - BOOL bSuccess = FALSE; - - BYTE szCommandBuffer[ 256 ]; - sprintf( (char*)szCommandBuffer, "%s %s\r\n", (char*)SMTP_COMMAND_AUTH, (char*)SMTP_COMMAND_AUTH_CRAM_MD5 ); - if ( SendData( m_hSocket, (PBYTE)szCommandBuffer, strlen( (const char*)szCommandBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - return FALSE; - } - - DWORD dwReceiveBufferSize = 1024*16; - PBYTE pReceiveBuffer = (PBYTE)malloc( dwReceiveBufferSize ); - if ( pReceiveBuffer ) - { - // Connected. Wait server hello string. - DWORD iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 334 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 334 ) - { - free( pReceiveBuffer ); - return FALSE; - } - - // Check request - if ( iReceived > 6 ) - { - Base64Coder coder; - coder.Decode( pReceiveBuffer + 4, iReceived - 6 ); - LPCSTR szResponse = coder.DecodedMessage(); - if ( szResponse && strlen( szResponse ) > 0 ) - { - char *auth_hex = hash_md5( szPassword, szResponse, strlen(szResponse) ); - if ( !auth_hex ) - { - free( pReceiveBuffer ); - return FALSE; - } - - char *szCommand = (char*)malloc( strlen( szUsername ) + strlen( auth_hex ) + 5 ); - if ( szCommand ) - { - sprintf( szCommand, "%s %s", szUsername, auth_hex ); - - free( auth_hex ); - - coder.Encode( (const PBYTE)szCommand, strlen( szCommand ) ); - - free( szCommand ); - - LPCSTR szAuthEncoded = coder.EncodedMessage(); - if ( szAuthEncoded == NULL ) - { - free( pReceiveBuffer ); - return FALSE; - } - - char *szAuthCommand = (char*)malloc( strlen( szAuthEncoded ) + 4 ); - if ( szAuthCommand ) - { - strcpy( szAuthCommand, szAuthEncoded ); - strcat( szAuthCommand, "\r\n" ); - - // Send auth data - if ( SendData( m_hSocket, (PBYTE)szAuthCommand, strlen( (const char*)szAuthCommand ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( szAuthCommand ); - free( pReceiveBuffer ); - return FALSE; - } - - // Check response - iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 235 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 235 ) - { - free( pReceiveBuffer ); - return FALSE; - } - - bSuccess = TRUE; - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - return FALSE; - } - - free( szAuthCommand ); - } - else - { - free( pReceiveBuffer ); - return FALSE; - } - } - else - { - free( auth_hex ); - free( pReceiveBuffer ); - return FALSE; - } - } - else - { - free( pReceiveBuffer ); - return FALSE; - } - } - - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - return FALSE; - } - - free( pReceiveBuffer ); - } - else - { - SetErrorText( "malloc() failed.", GetLastError() ); - } - - return bSuccess; - } - - ////////////////////////////////////////////////////////////////////////// - inline BOOL CSMTPClient::SendMessage( LPCSTR szFromAddress, LPCSTR szFromName, LPCSTR szToAddresses, LPCSTR szSubject, LPCSTR szXMailer, LPBYTE pBodyBuffer, DWORD dwBodySize ) - { - BOOL bSuccess = FALSE; - - // Format Header - if ( !szFromAddress ) - { - SetErrorText( "SendMessage. Invalid Parameters!" ); - return NULL; - } - - char *szHeaderBuffer = (char*)malloc( 1024 * 16 ); - if ( szHeaderBuffer ) - { - // get the current date and time - char szDate[ 500 ]; - char sztTime[ 500 ]; - - SYSTEMTIME st = { 0 }; - ::GetSystemTime(&st); - - ::GetDateFormatA( MAKELCID( MAKELANGID( LANG_ENGLISH, SUBLANG_ENGLISH_US), SORT_DEFAULT), 0, &st, "ddd',' dd MMM yyyy", szDate , sizeof( szDate ) ); - ::GetTimeFormatA( MAKELCID( MAKELANGID( LANG_ENGLISH, SUBLANG_ENGLISH_US), SORT_DEFAULT), TIME_FORCE24HOURFORMAT, &st, "HH':'mm':'ss", sztTime, sizeof( sztTime ) ); - - sprintf( szHeaderBuffer, "DATE: %s %s\r\n", szDate, sztTime ); - - // X-Mailer Field - if ( szXMailer && strlen( szXMailer ) ) - { - strcat( szHeaderBuffer, "X-Mailer: " ); - strcat( szHeaderBuffer, szXMailer ); - strcat( szHeaderBuffer, "\r\n" ); - } - - // From: - strcat( szHeaderBuffer, "From: " ); - if ( szFromName ) - { - strcat( szHeaderBuffer, "\"" ); - strcat( szHeaderBuffer, szFromName ); - strcat( szHeaderBuffer, "\" <" ); - strcat( szHeaderBuffer, szFromAddress ); - strcat( szHeaderBuffer, ">\r\n" ); - } - else - { - strcat( szHeaderBuffer, "<" ); - strcat( szHeaderBuffer, szFromAddress ); - strcat( szHeaderBuffer, ">\r\n" ); - } - - // Subject: - if ( szSubject && strlen( szSubject ) ) - { - strcat( szHeaderBuffer, "Subject: " ); - strcat( szHeaderBuffer, szSubject ); - strcat( szHeaderBuffer, "\r\n" ); - } - - // To Fields - strcat( szHeaderBuffer, "To: " ); - strcat( szHeaderBuffer, szToAddresses ); - strcat( szHeaderBuffer, "\r\n" ); - - // MIME - strcat( szHeaderBuffer, "MIME-Version: 1.0\r\nContent-type: text/plain; charset=US-ASCII\r\n" ); - - // End Header - strcat( szHeaderBuffer, "\r\n" ); - } - else - { - SetErrorText( "malloc error.", GetLastError() ); - return FALSE; - } - - - BYTE szCommandBuffer[ 256 ]; - sprintf( (char*)szCommandBuffer, "MAIL FROM:<%s> SIZE=%u\r\n", (char*)szFromAddress, strlen( szHeaderBuffer ) + dwBodySize + 2 ); - if ( SendData( m_hSocket, (PBYTE)szCommandBuffer, strlen( (const char*)szCommandBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( szHeaderBuffer ); - return FALSE; - } - - DWORD dwReceiveBufferSize = 1024*16; - PBYTE pReceiveBuffer = (PBYTE)malloc( dwReceiveBufferSize ); - if ( pReceiveBuffer ) - { - DWORD iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 250 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 250 ) - { - free( szHeaderBuffer ); - free( pReceiveBuffer ); - return FALSE; - } - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - free( szHeaderBuffer ); - free( pReceiveBuffer ); - return FALSE; - } - - // Post "RCTP TO:" - char *szCurrentAddr = (char*)malloc( strlen( szToAddresses ) + 1 ); - if ( !szCurrentAddr ) - { - SetErrorText( "malloc error.", GetLastError() ); - free( szHeaderBuffer ); - free( pReceiveBuffer ); - return FALSE; - } - - const char* szToOffset = szToAddresses; - char* szZap = NULL; - - BOOL bRCPTAccepted = FALSE; - do - { - strcpy( szCurrentAddr, szToOffset ); - char *szExtractedAdress = szCurrentAddr; - szZap = strchr( szCurrentAddr, ',' ); - - if ( szZap ) - { - *szZap = 0; - szToOffset = szZap + 1; - } - - char *pSkobka1 = strchr( szCurrentAddr, '<' ); - char *pSkobka2 = strchr( szCurrentAddr, '>' ); - - if ( pSkobka1 && pSkobka2 && pSkobka2 > pSkobka1 ) - { - szExtractedAdress = pSkobka1 + 1; - *pSkobka2 = NULL; - } - - if ( szExtractedAdress && strlen( szExtractedAdress ) > 0 ) - { - sprintf( (char*)szCommandBuffer, "RCPT TO:<%s>\r\n", (char*)szExtractedAdress ); - if ( SendData( m_hSocket, (PBYTE)szCommandBuffer, strlen( (const char*)szCommandBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( szCurrentAddr ); - free( pReceiveBuffer ); - free( szHeaderBuffer ); - return FALSE; - } - - iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 250 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode == 250 ) - { - bRCPTAccepted = TRUE; - } - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - free( szCurrentAddr ); - free( pReceiveBuffer ); - free( szHeaderBuffer ); - return FALSE; - } - } - - } while( szZap ); - - free( szCurrentAddr ); - - if ( bRCPTAccepted ) - { - sprintf( (char*)szCommandBuffer, "DATA\r\n" ); - if ( SendData( m_hSocket, (PBYTE)szCommandBuffer, strlen( (const char*)szCommandBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - free( szHeaderBuffer ); - return FALSE; - } - - iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 354 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode != 354 ) - { - free( pReceiveBuffer ); - free( szHeaderBuffer ); - return FALSE; - } - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - free( szHeaderBuffer ); - return FALSE; - } - - // Send message data (header + body + .) - if ( SendData( m_hSocket, (PBYTE)szHeaderBuffer, strlen( (const char*)szHeaderBuffer ) ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - free( szHeaderBuffer ); - return FALSE; - } - - if ( SendData( m_hSocket, (PBYTE)pBodyBuffer, dwBodySize ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - free( szHeaderBuffer ); - return FALSE; - } - - if ( SendData( m_hSocket, (PBYTE)"\r\n.\r\n", 5 ) == 0 ) - { - SetErrorText( "SendData error.", WSAGetLastError() ); - free( pReceiveBuffer ); - free( szHeaderBuffer ); - return FALSE; - } - - iReceived = ReceiveData( m_hSocket, pReceiveBuffer, dwReceiveBufferSize ); - if ( iReceived ) - { - SetErrorText( pReceiveBuffer ); - - // Check 250 - int iResponseCode = GetResponseCode( pReceiveBuffer, iReceived ); - if ( iResponseCode == 250 ) - { - bSuccess = TRUE; - } - } - else - { - SetErrorText( "ReceiveData error.", WSAGetLastError() ); - } - } - - free( pReceiveBuffer ); - } - else - { - SetErrorText( "malloc error.", GetLastError() ); - } - - if ( szHeaderBuffer ) - free( szHeaderBuffer ); - - return bSuccess; - } - - - - ////////////////////////////////////////////////////////////////////////// - ////////////////////////////////////////////////////////////////////////// - ////////////////////////////////////////////////////////////////////////// - ////////////////////////////////////////////////////////////////////////// - - -#ifndef PAGESIZE -#define PAGESIZE 4096 -#endif - -#ifndef ROUNDTOPAGE -#define ROUNDTOPAGE(a) (((a/4096)+1)*4096) -#endif - - ////////////////////////////////////////////////////////////////////// - // Construction/Destruction - ////////////////////////////////////////////////////////////////////// - - inline Base64Coder::Base64Coder() - : m_pDBuffer(NULL), - m_pEBuffer(NULL), - m_nDBufLen(0), - m_nEBufLen(0) - { - - } - - inline Base64Coder::~Base64Coder() - { - if(m_pDBuffer != NULL) - delete [] m_pDBuffer; - - if(m_pEBuffer != NULL) - delete [] m_pEBuffer; - } - - inline LPCSTR Base64Coder::DecodedMessage() const - { - return (LPCSTR) m_pDBuffer; - } - - inline LPCSTR Base64Coder::EncodedMessage() const - { - return (LPCSTR) m_pEBuffer; - } - - inline void Base64Coder::AllocEncode(DWORD nSize) - { - if(m_nEBufLen < nSize) - { - if(m_pEBuffer != NULL) - delete [] m_pEBuffer; - - m_nEBufLen = ROUNDTOPAGE(nSize); - m_pEBuffer = new BYTE[m_nEBufLen]; - } - - ::ZeroMemory(m_pEBuffer, m_nEBufLen); - m_nEDataLen = 0; - } - - inline void Base64Coder::AllocDecode(DWORD nSize) - { - if(m_nDBufLen < nSize) - { - if(m_pDBuffer != NULL) - delete [] m_pDBuffer; - - m_nDBufLen = ROUNDTOPAGE(nSize); - m_pDBuffer = new BYTE[m_nDBufLen]; - } - - ::ZeroMemory(m_pDBuffer, m_nDBufLen); - m_nDDataLen = 0; - } - - inline void Base64Coder::SetEncodeBuffer(const PBYTE pBuffer, DWORD nBufLen) - { - DWORD i = 0; - - AllocEncode(nBufLen); - while(i < nBufLen) - { - if(!_IsBadMimeChar(pBuffer[i])) - { - m_pEBuffer[m_nEDataLen] = pBuffer[i]; - m_nEDataLen++; - } - - i++; - } - } - - inline void Base64Coder::SetDecodeBuffer(const PBYTE pBuffer, DWORD nBufLen) - { - AllocDecode(nBufLen); - ::CopyMemory(m_pDBuffer, pBuffer, nBufLen); - m_nDDataLen = nBufLen; - } - - inline void Base64Coder::Encode(const PBYTE pBuffer, DWORD nBufLen) - { - SetDecodeBuffer(pBuffer, nBufLen); - AllocEncode(nBufLen * 2); - - TempBucket Raw; - DWORD nIndex = 0; - - while((nIndex + 3) <= nBufLen) - { - Raw.Clear(); - ::CopyMemory(&Raw, m_pDBuffer + nIndex, 3); - Raw.nSize = 3; - _EncodeToBuffer(Raw, m_pEBuffer + m_nEDataLen); - nIndex += 3; - m_nEDataLen += 4; - } - - if(nBufLen > nIndex) - { - Raw.Clear(); - Raw.nSize = (BYTE) (nBufLen - nIndex); - ::CopyMemory(&Raw, m_pDBuffer + nIndex, nBufLen - nIndex); - _EncodeToBuffer(Raw, m_pEBuffer + m_nEDataLen); - m_nEDataLen += 4; - } - } - - inline void Base64Coder::Encode(LPCSTR szMessage) - { - if(szMessage != NULL) - Base64Coder::Encode((const PBYTE)szMessage, strlen( (const char*)szMessage)); - } - - inline void Base64Coder::Decode(const PBYTE pBuffer, DWORD dwBufLen) - { - if(is_init()) - _Init(); - - SetEncodeBuffer(pBuffer, dwBufLen); - - AllocDecode(dwBufLen); - - TempBucket Raw; - - DWORD nIndex = 0; - - while((nIndex + 4) <= m_nEDataLen) - { - Raw.Clear(); - Raw.nData[0] = DecodeTable()[m_pEBuffer[nIndex]]; - Raw.nData[1] = DecodeTable()[m_pEBuffer[nIndex + 1]]; - Raw.nData[2] = DecodeTable()[m_pEBuffer[nIndex + 2]]; - Raw.nData[3] = DecodeTable()[m_pEBuffer[nIndex + 3]]; - - if(Raw.nData[2] == 255) - Raw.nData[2] = 0; - if(Raw.nData[3] == 255) - Raw.nData[3] = 0; - - Raw.nSize = 4; - _DecodeToBuffer(Raw, m_pDBuffer + m_nDDataLen); - nIndex += 4; - m_nDDataLen += 3; - } - - // If nIndex < m_nEDataLen, then we got a decode message without padding. - // We may want to throw some kind of warning here, but we are still required - // to handle the decoding as if it was properly padded. - if(nIndex < m_nEDataLen) - { - Raw.Clear(); - for(DWORD i = nIndex; i < m_nEDataLen; i++) - { - Raw.nData[i - nIndex] = DecodeTable()[m_pEBuffer[i]]; - Raw.nSize++; - if(Raw.nData[i - nIndex] == 255) - Raw.nData[i - nIndex] = 0; - } - - _DecodeToBuffer(Raw, m_pDBuffer + m_nDDataLen); - m_nDDataLen += (m_nEDataLen - nIndex); - } - } - - inline void Base64Coder::Decode(LPCSTR szMessage) - { - if(szMessage != NULL) - Base64Coder::Decode((const PBYTE)szMessage, strlen((const char*)szMessage)); - } - - inline DWORD Base64Coder::_DecodeToBuffer(const TempBucket &Decode, PBYTE pBuffer) - { - TempBucket Data; - DWORD nCount = 0; - - _DecodeRaw(Data, Decode); - - for(int i = 0; i < 3; i++) - { - pBuffer[i] = Data.nData[i]; - if(pBuffer[i] != 255) - nCount++; - } - - return nCount; - } - - - inline void Base64Coder::_EncodeToBuffer(const TempBucket &Decode, PBYTE pBuffer) - { - TempBucket Data; - - _EncodeRaw(Data, Decode); - - for(int i = 0; i < 4; i++) - pBuffer[i] = Base64Digits()[Data.nData[i]]; - - switch(Decode.nSize) - { - case 1: - pBuffer[2] = '='; - case 2: - pBuffer[3] = '='; - } - } - - inline void Base64Coder::_DecodeRaw(TempBucket &Data, const TempBucket &Decode) - { - BYTE nTemp; - - Data.nData[0] = Decode.nData[0]; - Data.nData[0] <<= 2; - - nTemp = Decode.nData[1]; - nTemp >>= 4; - nTemp &= 0x03; - Data.nData[0] |= nTemp; - - Data.nData[1] = Decode.nData[1]; - Data.nData[1] <<= 4; - - nTemp = Decode.nData[2]; - nTemp >>= 2; - nTemp &= 0x0F; - Data.nData[1] |= nTemp; - - Data.nData[2] = Decode.nData[2]; - Data.nData[2] <<= 6; - nTemp = Decode.nData[3]; - nTemp &= 0x3F; - Data.nData[2] |= nTemp; - } - - inline void Base64Coder::_EncodeRaw(TempBucket &Data, const TempBucket &Decode) - { - BYTE nTemp; - - Data.nData[0] = Decode.nData[0]; - Data.nData[0] >>= 2; - - Data.nData[1] = Decode.nData[0]; - Data.nData[1] <<= 4; - nTemp = Decode.nData[1]; - nTemp >>= 4; - Data.nData[1] |= nTemp; - Data.nData[1] &= 0x3F; - - Data.nData[2] = Decode.nData[1]; - Data.nData[2] <<= 2; - - nTemp = Decode.nData[2]; - nTemp >>= 6; - - Data.nData[2] |= nTemp; - Data.nData[2] &= 0x3F; - - Data.nData[3] = Decode.nData[2]; - Data.nData[3] &= 0x3F; - } - - inline BOOL Base64Coder::_IsBadMimeChar(BYTE nData) - { - switch(nData) - { - case '\r': case '\n': case '\t': case ' ' : - case '\b': case '\a': case '\f': case '\v': - return TRUE; - default: - return FALSE; - } - } - - inline void Base64Coder::_Init() - { // Initialize Decoding table. - - int i; - - for(i = 0; i < 256; i++) - DecodeTable()[i] = -2; - - for(i = 0; i < 64; i++) - { - DecodeTable()[Base64Digits()[i]] = i; - DecodeTable()[Base64Digits()[i]|0x80] = i; - } - - DecodeTable()['='] = -1; - DecodeTable()['='|0x80] = -1; - - is_init() = TRUE; - } - - - } -} -} \ No newline at end of file diff --git a/contrib/epee/include/net/smtp_helper.h b/contrib/epee/include/net/smtp_helper.h deleted file mode 100644 index b8252e1cff..0000000000 --- a/contrib/epee/include/net/smtp_helper.h +++ /dev/null @@ -1,88 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#pragma once -#include "smtp.h" - -namespace epee -{ -namespace net_utils -{ - namespace smtp - { - - inline bool send_mail(const std::string& server, int port, const std::string& login, const std::string& pass, const std::string& from_addres, const std::string& from_name, const std::string& maillist, const std::string& subject, const std::string& mail_body) - { - net_utils::smtp::CSMTPClient smtp; - - if ( !smtp.ServerConnect( server.c_str(), port ) ) - { - LOG_PRINT("Reporting: Failed to connect to server " << server <<":"<< port, LOG_LEVEL_0); - return false; - } - - if(login.size() && pass.size()) - { - if ( !smtp.ServerLogin( login.c_str(), pass.c_str()) ) - { - LOG_PRINT("Reporting: Failed to auth on server " << server <<":"<< port, LOG_LEVEL_0); - return false; - - } - } - - if ( !smtp.SendMessage( from_addres.c_str(), - from_name.c_str(), - maillist.c_str(), - subject.c_str(), - "bicycle-client", - (LPBYTE)mail_body.data(), - mail_body.size())) - { - char *szErrorText = smtp.GetLastErrorText(); - if ( szErrorText ) - { - LOG_PRINT("Failed to send message, error text: " << szErrorText, LOG_LEVEL_0); - } - else - { - LOG_PRINT("Failed to send message, error text: null", LOG_LEVEL_0); - } - return false; - } - - smtp.ServerDisconnect(); - - return true; - - - } - } -} -} \ No newline at end of file diff --git a/contrib/epee/include/pragma_comp_defs.h b/contrib/epee/include/pragma_comp_defs.h deleted file mode 100644 index f4ef7057e4..0000000000 --- a/contrib/epee/include/pragma_comp_defs.h +++ /dev/null @@ -1,14 +0,0 @@ -#pragma once - -#if defined(__GNUC__) - #define PRAGMA_WARNING_PUSH _Pragma("GCC diagnostic push") - #define PRAGMA_WARNING_POP _Pragma("GCC diagnostic pop") - #define PRAGMA_WARNING_DISABLE_VS(w) - #define PRAGMA_GCC(w) _Pragma(w) -#elif defined(_MSC_VER) - #define PRAGMA_WARNING_PUSH __pragma(warning( push )) - #define PRAGMA_WARNING_POP __pragma(warning( pop )) - #define PRAGMA_WARNING_DISABLE_VS(w) __pragma( warning ( disable: w )) - //#define PRAGMA_WARNING_DISABLE_GCC(w) - #define PRAGMA_GCC(w) -#endif diff --git a/contrib/epee/include/profile_tools.h b/contrib/epee/include/profile_tools.h deleted file mode 100644 index 49180c6a3c..0000000000 --- a/contrib/epee/include/profile_tools.h +++ /dev/null @@ -1,113 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef _PROFILE_TOOLS_H_ -#define _PROFILE_TOOLS_H_ - -#include - -namespace epee -{ - -#ifdef ENABLE_PROFILING -#define PROFILE_FUNC(immortal_ptr_str) static profile_tools::local_call_account lcl_acc(immortal_ptr_str); \ - profile_tools::call_frame cf(lcl_acc); - -#define PROFILE_FUNC_SECOND(immortal_ptr_str) static profile_tools::local_call_account lcl_acc2(immortal_ptr_str); \ - profile_tools::call_frame cf2(lcl_acc2); - -#define PROFILE_FUNC_THIRD(immortal_ptr_str) static profile_tools::local_call_account lcl_acc3(immortal_ptr_str); \ - profile_tools::call_frame cf3(lcl_acc3); - -#define PROFILE_FUNC_ACC(acc) \ - profile_tools::call_frame cf(acc); - - -#else -#define PROFILE_FUNC(immortal_ptr_str) -#define PROFILE_FUNC_SECOND(immortal_ptr_str) -#define PROFILE_FUNC_THIRD(immortal_ptr_str) -#endif - -#define START_WAY_POINTS() uint64_t _____way_point_time = epee::misc_utils::get_tick_count(); -#define WAY_POINT(name) {uint64_t delta = epee::misc_utils::get_tick_count()-_____way_point_time; LOG_PRINT("Way point " << name << ": " << delta, LOG_LEVEL_2);_____way_point_time = misc_utils::get_tick_count();} -#define WAY_POINT2(name, avrg_obj) {uint64_t delta = epee::misc_utils::get_tick_count()-_____way_point_time; avrg_obj.push(delta); LOG_PRINT("Way point " << name << ": " << delta, LOG_LEVEL_2);_____way_point_time = misc_utils::get_tick_count();} - - -#define TIME_MEASURE_START(var_name) uint64_t var_name = epee::misc_utils::get_tick_count(); -#define TIME_MEASURE_FINISH(var_name) var_name = epee::misc_utils::get_tick_count() - var_name; - -namespace profile_tools -{ - struct local_call_account - { - local_call_account(const char* pstr):m_count_of_call(0), m_summary_time_used(0),m_pname(pstr) - {} - ~local_call_account() - { - LOG_PRINT2("profile_details.log", "PROFILE "< - - -namespace epee -{ - class global_regexp_critical_section - { - private: - mutable critical_section regexp_lock; - public: - global_regexp_critical_section(){} - critical_section& get_lock()const {return regexp_lock;} - }; - - const static global_regexp_critical_section gregexplock; - -#define STATIC_REGEXP_EXPR_1(var_name, xpr_text, reg_exp_flags) \ - static volatile uint32_t regexp_initialized_1 = 0;\ - volatile uint32_t local_is_initialized_1 = regexp_initialized_1;\ - if(!local_is_initialized_1)\ - gregexplock.get_lock().lock();\ - static const boost::regex var_name(xpr_text , reg_exp_flags);\ - if(!local_is_initialized_1)\ -{\ - boost::interprocess::ipcdetail::atomic_write32(®exp_initialized_1, 1);\ - gregexplock.get_lock().unlock();\ -} - -#define STATIC_REGEXP_EXPR_2(var_name, xpr_text, reg_exp_flags) \ - static volatile uint32_t regexp_initialized_2 = 0;\ - volatile uint32_t local_is_initialized_2 = regexp_initialized_2;\ - if(!local_is_initialized_2)\ - gregexplock.get_lock().lock().lock();\ - static const boost::regex var_name(xpr_text , reg_exp_flags);\ - if(!local_is_initialized_2)\ -{\ - boost::interprocess::ipcdetail::atomic_write32(®exp_initialized_2, 1);\ - gregexplock.get_lock().lock().unlock();\ -} - -#define STATIC_REGEXP_EXPR_3(var_name, xpr_text, reg_exp_flags) \ - static volatile uint32_t regexp_initialized_3 = 0;\ - volatile uint32_t local_is_initialized_3 = regexp_initialized_3;\ - if(!local_is_initialized_3)\ - gregexplock.get_lock().lock().lock();\ - static const boost::regex var_name(xpr_text , reg_exp_flags);\ - if(!local_is_initialized_3)\ -{\ - boost::interprocess::ipcdetail::atomic_write32(®exp_initialized_3, 1);\ - gregexplock.get_lock().lock().unlock();\ -} -} - -#endif //_REG_EXP_DEFINER_H_ diff --git a/contrib/epee/include/reg_utils.h b/contrib/epee/include/reg_utils.h deleted file mode 100644 index 22227a9b27..0000000000 --- a/contrib/epee/include/reg_utils.h +++ /dev/null @@ -1,249 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef _MUSC_UTILS_EX_H_ -#define _MUSC_UTILS_EX_H_ - -namespace epee -{ -namespace reg_utils -{ - //----------------------------------------------------------------------------------------------------------------------------------- - template - bool RegSetPODValue(HKEY hParentKey, const char* pSubKey, const char* pValName, const T& valToSave, bool force_create = true) - { - HKEY hRegKey = 0; - DWORD dw = 0; - - if( ::RegOpenKeyExA(hParentKey, pSubKey, 0, KEY_WRITE, &hRegKey) != ERROR_SUCCESS ) - if(force_create && (::RegCreateKeyExA(hParentKey, pSubKey, 0, "", REG_OPTION_NON_VOLATILE, KEY_WRITE, NULL, &hRegKey, &dw) != ERROR_SUCCESS) ) - return false; - - - DWORD val_type = (sizeof(valToSave) == sizeof(DWORD)) ? REG_DWORD:REG_BINARY; - - BOOL res = ::RegSetValueExA( hRegKey, pValName, 0, val_type, (LPBYTE)&valToSave, sizeof(valToSave)) == ERROR_SUCCESS; - - ::RegCloseKey(hRegKey); - return ERROR_SUCCESS==res ? true:false; - } - //----------------------------------------------------------------------------------------------------------------------------------- - template - bool RegGetPODValue(HKEY hParentKey, const char* pSubKey, const char* pValName, T& valToSave) - { - HKEY hRegKey = 0; - LONG res = 0; - - - if(::RegOpenKeyExA(hParentKey, pSubKey, 0, KEY_READ, &hRegKey) == ERROR_SUCCESS ) - { - DWORD dwType, lSize = 0; - res = ::RegQueryValueExA(hRegKey, pValName, 0, &dwType, NULL, &lSize); - if(ERROR_SUCCESS!=res || (sizeof(valToSave) < lSize) ) - { - ::RegCloseKey(hRegKey); - return false; - } - res = ::RegQueryValueExA(hRegKey, pValName, 0, &dwType, (LPBYTE)&valToSave, &lSize); - } - return ERROR_SUCCESS==res ? true:false; - } - //----------------------------------------------------------------------------------------------------------------------------------- - inline - bool RegSetANSIString(HKEY hParentKey, const char* pSubKey, const char* pValName, const std::string& strToSave) - { - HKEY hRegKey = 0; - DWORD dw = 0; - DWORD res_ = 0; - if( (res_ = ::RegCreateKeyExA(hParentKey, pSubKey, 0, "", REG_OPTION_NON_VOLATILE, KEY_WRITE, NULL, &hRegKey, &dw)) != ERROR_SUCCESS ) - if( (res_= ::RegOpenKeyExA(hParentKey, pSubKey, 0, KEY_WRITE, &hRegKey)) != ERROR_SUCCESS ) - return false; - - DWORD valType = REG_SZ; - const char* pStr = strToSave.c_str(); - DWORD sizeOfStr = (DWORD)strToSave.size()+1; - LSTATUS res = ::RegSetValueExA(hRegKey, pValName, 0, valType, (LPBYTE)pStr, sizeOfStr); - - ::RegCloseKey(hRegKey); - return ERROR_SUCCESS==res ? true:false; - } - //----------------------------------------------------------------------------------------------------------------------------------- - inline - bool RegGetANSIString(HKEY hParentKey, const char* pSubKey, const char* pValName, std::string& strToSave) - { - HKEY hRegKey = 0; - LONG res = 0; - - - if((res = ::RegOpenKeyExA(hParentKey, pSubKey, 0, KEY_READ, &hRegKey)) == ERROR_SUCCESS ) - { - DWORD dwType, lSize = 0; - res = ::RegQueryValueExA(hRegKey, pValName, 0, &dwType, NULL, &lSize); - if(ERROR_SUCCESS!=res) - { - - ::RegCloseKey(hRegKey); - return false; - } - char* pTmpStr = new char[lSize+2]; - memset(pTmpStr, 0, lSize+2); - res = ::RegQueryValueExA(hRegKey, pValName, 0, &dwType, (LPBYTE)pTmpStr, &lSize); - pTmpStr[lSize+1] = 0; //be happy ;) - strToSave = pTmpStr; - delete [] pTmpStr; - ::RegCloseKey(hRegKey); - } - return ERROR_SUCCESS==res ? true:false; - } - //----------------------------------------------------------------------------------------------------------------------------------- - template - bool RegSetRAWValue(HKEY hKey, const char* pValName, const TMemoryObject& valToSave, DWORD valType = REG_BINARY) - { - LONG res = ::RegSetValueExA( hKey, pValName, 0, valType, (CONST BYTE*)valToSave.get(0), (DWORD)valToSave.get_size()); - - return ERROR_SUCCESS==res ? true:false; - } - //---------------------------------------------------------------------------------------------------------------------------------- - bool RegSetRAWValue(HKEY hKey, const char* pValName, const std::string & valToSave, DWORD valType = REG_BINARY) - { - LONG res = ::RegSetValueExA( hKey, pValName, 0, valType, (CONST BYTE*)valToSave.data(), (DWORD)valToSave.size()); - - return ERROR_SUCCESS==res ? true:false; - } - //----------------------------------------------------------------------------------------------------------------------------------- - template - bool RegGetRAWValue(HKEY hKey, const char* pValName, TMemoryObject& valToSave, DWORD* pRegType) - { - DWORD dwType, lSize = 0; - LONG res = ::RegQueryValueExA(hKey, pValName, 0, &dwType, NULL, &lSize); - if(ERROR_SUCCESS!=res || 0 >= lSize) - { - valToSave.release(); - return false; - } - if(valToSave.get_size() < lSize) - valToSave.alloc_buff(lSize); - res = ::RegQueryValueExA(hKey, pValName, 0, &dwType, (LPBYTE)valToSave.get(0), &lSize); - if(pRegType) *pRegType = dwType; - - return ERROR_SUCCESS==res ? true:false; - } - //----------------------------------------------------------------------------------------------------------------------------------- - bool RegGetRAWValue(HKEY hKey, const char* pValName, std::string& valToSave, DWORD* pRegType) - { - DWORD dwType, lSize = 0; - LONG res = ::RegQueryValueExA(hKey, pValName, 0, &dwType, NULL, &lSize); - if(ERROR_SUCCESS!=res || 0 >= lSize) - { - return false; - } - - valToSave.resize(lSize); - res = ::RegQueryValueExA(hKey, pValName, 0, &dwType, (LPBYTE)valToSave.data(), &lSize); - if(pRegType) *pRegType = dwType; - - return ERROR_SUCCESS==res ? true:false; - } - //----------------------------------------------------------------------------------------------------------------------------------- - template - bool RegSetRAWValue(HKEY hParentKey, const char* pSubKey, const char* pValName, const TMemoryObject& valToSave, DWORD valType = REG_BINARY) - { - HKEY hRegKey = 0; - DWORD dw = 0; - bool res = false; - - if( ::RegCreateKeyExA(hParentKey, pSubKey, 0, "", REG_OPTION_NON_VOLATILE, KEY_WRITE, NULL, &hRegKey, &dw) != ERROR_SUCCESS ) - if( ::RegOpenKeyExA(hParentKey, pSubKey, 0, KEY_WRITE, &hRegKey) != ERROR_SUCCESS ) - return false; - - res = RegSetRAWValue(hRegKey, pValName, valToSave, valType); - - ::RegCloseKey(hRegKey); - return res; - } - //----------------------------------------------------------------------------------------------------------------------------------- - bool RegSetRAWValue(HKEY hParentKey, const char* pSubKey, const char* pValName, const std::string& valToSave, DWORD valType = REG_BINARY) - { - HKEY hRegKey = 0; - DWORD dw = 0; - bool res = false; - - if( ::RegCreateKeyExA(hParentKey, pSubKey, 0, "", REG_OPTION_NON_VOLATILE, KEY_WRITE, NULL, &hRegKey, &dw) != ERROR_SUCCESS ) - if( ::RegOpenKeyExA(hParentKey, pSubKey, 0, KEY_WRITE, &hRegKey) != ERROR_SUCCESS ) - return false; - - res = RegSetRAWValue(hRegKey, pValName, valToSave, valType); - - ::RegCloseKey(hRegKey); - return res; - } - //----------------------------------------------------------------------------------------------------------------------------------- - template - bool RegGetRAWValue(HKEY hParentKey, const char* pSubKey, const char* pValName, TMemoryObject& valToSave, DWORD* pRegType) - { - HKEY hRegKey = 0; - bool res = false; - - if(::RegOpenKeyExA(hParentKey, pSubKey, 0, KEY_READ, &hRegKey) == ERROR_SUCCESS ) - { - res = RegGetRAWValue(hRegKey, pValName, valToSave, pRegType); - ::RegCloseKey(hRegKey); - } - return res; - } - //----------------------------------------------------------------------------------------------------------------------------------- - inline - bool RegGetRAWValue(HKEY hParentKey, const char* pSubKey, const char* pValName, std::string& valToSave, DWORD* pRegType) - { - HKEY hRegKey = 0; - bool res = false; - - if(::RegOpenKeyExA(hParentKey, pSubKey, 0, KEY_READ, &hRegKey) == ERROR_SUCCESS ) - { - res = RegGetRAWValue(hRegKey, pValName, valToSave, pRegType); - ::RegCloseKey(hRegKey); - } - return res; - } - //----------------------------------------------------------------------------------------------------------------------------------- - inline - bool RegRemoveValue(HKEY hParentKey, const char* pValName) - { - //CHECK_AND_ASSERT(hParentKey&&pValName, false); - return ::RegDeleteValueA(hParentKey, pValName)==ERROR_SUCCESS ? true:false; - } - //----------------------------------------------------------------------------------------------------------------------------------- - inline - bool RegRemoveKey(HKEY hParentKey, const char* pKeyName) - { - //CHECK_AND_ASSERT(hParentKey&&pKeyName, false); - return ::RegDeleteKeyA(hParentKey, pKeyName)==ERROR_SUCCESS ? true:false; - } - -} -} -#endif //_MUSC_UTILS_EX_H_ diff --git a/contrib/epee/include/serialization/enableable.h b/contrib/epee/include/serialization/enableable.h deleted file mode 100644 index ab1d799e64..0000000000 --- a/contrib/epee/include/serialization/enableable.h +++ /dev/null @@ -1,53 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -namespace epee -{ - - template - struct enableable - { - t_obj v; - bool enabled; - - enableable() - : v(t_obj()), enabled(true) - { // construct from defaults - } - - enableable(const t_obj& _v) - : v(_v), enabled(true) - { // construct from specified values - } - - enableable(const enableable& _v) - : v(_v.v), enabled(_v.enabled) - { // construct from specified values - } - }; -} \ No newline at end of file diff --git a/contrib/epee/include/serialization/keyvalue_serialization.h b/contrib/epee/include/serialization/keyvalue_serialization.h deleted file mode 100644 index bf2c8dacd0..0000000000 --- a/contrib/epee/include/serialization/keyvalue_serialization.h +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#include -#include -#include "misc_log_ex.h" -#include "enableable.h" -#include "keyvalue_serialization_overloads.h" -#include "serialization/serialization.h" - -namespace epee -{ - /************************************************************************/ - /* Serialize map declarations */ - /************************************************************************/ -#define BEGIN_KV_SERIALIZE_MAP() \ -public: \ - template \ - bool store( t_storage& st, typename t_storage::hsection hparent_section = nullptr) const\ - {\ - return serialize_map(*this, st, hparent_section);\ - }\ - template \ - bool _load( t_storage& stg, typename t_storage::hsection hparent_section = nullptr)\ - {\ - return serialize_map(*this, stg, hparent_section);\ - }\ - template \ - bool load( t_storage& stg, typename t_storage::hsection hparent_section = nullptr)\ - {\ - try{\ - return serialize_map(*this, stg, hparent_section);\ - }\ - catch(const std::exception& err) \ - { \ - (void)(err); \ - LOG_ERROR("Exception on unserializing: " << err.what());\ - return false; \ - }\ - }\ - template \ - static bool serialize_map(this_type& this_ref, t_storage& stg, typename t_storage::hsection hparent_section) \ - { - -#define KV_SERIALIZE_N(varialble, val_name) \ - epee::serialization::selector::serialize(this_ref.varialble, stg, hparent_section, val_name); - -#define KV_SERIALIZE_VAL_POD_AS_BLOB_FORCE_N(varialble, val_name) \ - epee::serialization::selector::serialize_t_val_as_blob(this_ref.varialble, stg, hparent_section, val_name); - -#define KV_SERIALIZE_VAL_POD_AS_BLOB_N(varialble, val_name) \ - static_assert(std::is_pod::value, "t_type must be a POD type."); \ - KV_SERIALIZE_VAL_POD_AS_BLOB_FORCE_N(varialble, val_name) - -#define KV_SERIALIZE_CONTAINER_POD_AS_BLOB_N(varialble, val_name) \ - epee::serialization::selector::serialize_stl_container_pod_val_as_blob(this_ref.varialble, stg, hparent_section, val_name); - -#define END_KV_SERIALIZE_MAP() return true;} - -#define KV_SERIALIZE(varialble) KV_SERIALIZE_N(varialble, #varialble) -#define KV_SERIALIZE_VAL_POD_AS_BLOB(varialble) KV_SERIALIZE_VAL_POD_AS_BLOB_N(varialble, #varialble) -#define KV_SERIALIZE_VAL_POD_AS_BLOB_FORCE(varialble) KV_SERIALIZE_VAL_POD_AS_BLOB_FORCE_N(varialble, #varialble) //skip is_pod compile time check -#define KV_SERIALIZE_CONTAINER_POD_AS_BLOB(varialble) KV_SERIALIZE_CONTAINER_POD_AS_BLOB_N(varialble, #varialble) - -} - - - - diff --git a/contrib/epee/include/serialization/keyvalue_serialization_overloads.h b/contrib/epee/include/serialization/keyvalue_serialization_overloads.h deleted file mode 100644 index d53f167fb0..0000000000 --- a/contrib/epee/include/serialization/keyvalue_serialization_overloads.h +++ /dev/null @@ -1,374 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#include -#include - -#include -#include - -namespace epee -{ - namespace serialization - { - - //------------------------------------------------------------------------------------------------------------------- - template - static bool serialize_t_val(const t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return stg.set_value(pname, d, hparent_section); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool unserialize_t_val(t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return stg.get_value(pname, d, hparent_section); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool serialize_t_val_as_blob(const t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - std::string blob((const char *)&d, sizeof(d)); - return stg.set_value(pname, blob, hparent_section); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool unserialize_t_val_as_blob(t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - std::string blob; - if(!stg.get_value(pname, blob, hparent_section)) - return false; - CHECK_AND_ASSERT_MES(blob.size() == sizeof(d), false, "unserialize_t_val_as_blob: size of " << typeid(t_type).name() << " = " << sizeof(t_type) << ", but stored blod size = " << blob.size() << ", value name = " << pname); - d = *(const t_type*)blob.data(); - return true; - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool serialize_t_obj(const serializible_type& obj, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - typename t_storage::hsection hchild_section = stg.open_section(pname, hparent_section, true); - CHECK_AND_ASSERT_MES(hchild_section, false, "serialize_t_obj: failed to open/create section " << pname); - return obj.store(stg, hchild_section); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool unserialize_t_obj(serializible_type& obj, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - typename t_storage::hsection hchild_section = stg.open_section(pname, hparent_section, true); - if(!hchild_section) return false; - return obj._load(stg, hchild_section); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool serialize_t_obj(enableable& obj, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - if(!obj.enabled) - return true; - return serialize_t_obj(obj.v, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool unserialize_t_obj(enableable& obj, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - obj.enabled = false; - typename t_storage::hsection hchild_section = stg.open_section(pname, hparent_section, true); - if(!hchild_section) return false; - obj.enabled = true; - return obj.v._load(stg, hchild_section); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool serialize_stl_container_t_val (const stl_container& container, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - if(!container.size()) return true; - typename stl_container::const_iterator it = container.begin(); - typename t_storage::harray hval_array = stg.insert_first_value(pname, *it, hparent_section); - CHECK_AND_ASSERT_MES(hval_array, false, "failed to insert first value to storage"); - it++; - for(;it!= container.end();it++) - stg.insert_next_value(hval_array, *it); - - return true; - } - //-------------------------------------------------------------------------------------------------------------------- - template - static bool unserialize_stl_container_t_val(stl_container& container, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - container.clear(); - typename stl_container::value_type exchange_val; - typename t_storage::harray hval_array = stg.get_first_value(pname, exchange_val, hparent_section); - if(!hval_array) return false; - container.push_back(std::move(exchange_val)); - while(stg.get_next_value(hval_array, exchange_val)) - container.push_back(std::move(exchange_val)); - return true; - }//-------------------------------------------------------------------------------------------------------------------- - template - static bool serialize_stl_container_pod_val_as_blob(const stl_container& container, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - if(!container.size()) return true; - typename stl_container::const_iterator it = container.begin(); - std::string mb; - mb.resize(sizeof(typename stl_container::value_type)*container.size()); - typename stl_container::value_type* p_elem = (typename stl_container::value_type*)mb.data(); - BOOST_FOREACH(const typename stl_container::value_type& v, container) - { - *p_elem = v; - p_elem++; - } - return stg.set_value(pname, mb, hparent_section); - } - //-------------------------------------------------------------------------------------------------------------------- - template - static bool unserialize_stl_container_pod_val_as_blob(stl_container& container, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - container.clear(); - std::string buff; - bool res = stg.get_value(pname, buff, hparent_section); - if(res) - { - size_t loaded_size = buff.size(); - typename stl_container::value_type* pelem = (typename stl_container::value_type*)buff.data(); - CHECK_AND_ASSERT_MES(!(loaded_size%sizeof(typename stl_container::value_type)), - false, - "size in blob " << loaded_size << " not have not zero modulo for sizeof(value_type) = " << sizeof(typename stl_container::value_type)); - size_t count = (loaded_size/sizeof(typename stl_container::value_type)); - for(size_t i = 0; i < count; i++) - container.push_back(*(pelem++)); - } - return res; - } - //-------------------------------------------------------------------------------------------------------------------- - template - static bool serialize_stl_container_t_obj (const stl_container& container, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - bool res = false; - if(!container.size()) return true; - typename stl_container::const_iterator it = container.begin(); - typename t_storage::hsection hchild_section = nullptr; - typename t_storage::harray hsec_array = stg.insert_first_section(pname, hchild_section, hparent_section); - CHECK_AND_ASSERT_MES(hsec_array && hchild_section, false, "failed to insert first section with section name " << pname); - res = it->store(stg, hchild_section); - it++; - for(;it!= container.end();it++) - { - stg.insert_next_section(hsec_array, hchild_section); - res |= it->store(stg, hchild_section); - } - return res; - } - //-------------------------------------------------------------------------------------------------------------------- - template - static bool unserialize_stl_container_t_obj(stl_container& container, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - bool res = false; - container.clear(); - typename stl_container::value_type val = typename stl_container::value_type(); - typename t_storage::hsection hchild_section = nullptr; - typename t_storage::harray hsec_array = stg.get_first_section(pname, hchild_section, hparent_section); - if(!hsec_array || !hchild_section) return false; - res = val._load(stg, hchild_section); - container.push_back(val); - while(stg.get_next_section(hsec_array, hchild_section)) - { - typename stl_container::value_type val_l = typename stl_container::value_type(); - res |= val_l._load(stg, hchild_section); - container.push_back(std::move(val_l)); - } - return res; - } - //-------------------------------------------------------------------------------------------------------------------- - template - struct kv_serialization_overloads_impl_is_base_serializable_types; - - template<> - struct kv_serialization_overloads_impl_is_base_serializable_types - { - template - static bool kv_serialize(const t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return stg.set_value(pname, d, hparent_section); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool kv_unserialize(t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return stg.get_value(pname, d, hparent_section); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool kv_serialize(const std::vector& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return serialize_stl_container_t_val(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool kv_unserialize(std::vector& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return unserialize_stl_container_t_val(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool kv_serialize(const std::list& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return serialize_stl_container_t_val(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool kv_unserialize(std::list& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return unserialize_stl_container_t_val(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - }; - template<> - struct kv_serialization_overloads_impl_is_base_serializable_types - { - template - static bool kv_serialize(const t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return serialize_t_obj(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool kv_unserialize(t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return unserialize_t_obj(d, stg, hparent_section, pname); - } - - //------------------------------------------------------------------------------------------------------------------- - template - static bool kv_serialize(const std::vector& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return serialize_stl_container_t_obj(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool kv_unserialize(std::vector& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return unserialize_stl_container_t_obj(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool kv_serialize(const std::list& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return serialize_stl_container_t_obj(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - static bool kv_unserialize(std::list& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return unserialize_stl_container_t_obj(d, stg, hparent_section, pname); - } - }; - template - struct base_serializable_types: public boost::mpl::vector::type - {}; - //------------------------------------------------------------------------------------------------------------------- - template struct selector; - template<> - struct selector - { - template - static bool serialize(const t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return kv_serialize(d, stg, hparent_section, pname); - } - - template - static bool serialize_stl_container_pod_val_as_blob(const t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return epee::serialization::serialize_stl_container_pod_val_as_blob(d, stg, hparent_section, pname); - } - - template - static bool serialize_t_val_as_blob(const t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return epee::serialization::serialize_t_val_as_blob(d, stg, hparent_section, pname); - } - - - }; - template<> - struct selector - { - template - static bool serialize(t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return kv_unserialize(d, stg, hparent_section, pname); - } - template - static bool serialize_stl_container_pod_val_as_blob(t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return epee::serialization::unserialize_stl_container_pod_val_as_blob(d, stg, hparent_section, pname); - } - - template - static bool serialize_t_val_as_blob(t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return epee::serialization::unserialize_t_val_as_blob(d, stg, hparent_section, pname); - } - }; - - template - bool kv_serialize(const t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return kv_serialization_overloads_impl_is_base_serializable_types, typename std::remove_const::type>::value>::kv_serialize(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - bool kv_unserialize(t_type& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return kv_serialization_overloads_impl_is_base_serializable_types, typename std::remove_const::type>::value>::kv_unserialize(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - bool kv_serialize(const std::vector& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return kv_serialization_overloads_impl_is_base_serializable_types, typename std::remove_const::type>::value>::kv_serialize(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - bool kv_unserialize(std::vector& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return kv_serialization_overloads_impl_is_base_serializable_types, typename std::remove_const::type>::value>::kv_unserialize(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - bool kv_serialize(const std::list& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return kv_serialization_overloads_impl_is_base_serializable_types, typename std::remove_const::type>::value>::kv_serialize(d, stg, hparent_section, pname); - } - //------------------------------------------------------------------------------------------------------------------- - template - bool kv_unserialize(std::list& d, t_storage& stg, typename t_storage::hsection hparent_section, const char* pname) - { - return kv_serialization_overloads_impl_is_base_serializable_types, typename std::remove_const::type>::value>::kv_unserialize(d, stg, hparent_section, pname); - } - } -} diff --git a/contrib/epee/include/serialization/serialize_base.h b/contrib/epee/include/serialization/serialize_base.h deleted file mode 100644 index 84a1624cb6..0000000000 --- a/contrib/epee/include/serialization/serialize_base.h +++ /dev/null @@ -1,2 +0,0 @@ -#pragma once - diff --git a/contrib/epee/include/service_impl_base.h b/contrib/epee/include/service_impl_base.h deleted file mode 100644 index 6e9aefc461..0000000000 --- a/contrib/epee/include/service_impl_base.h +++ /dev/null @@ -1,323 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef _SERVICE_IMPL_BASE_H_ -#define _SERVICE_IMPL_BASE_H_ - -#pragma comment(lib, "advapi32.lib") - - -namespace epee -{ -class service_impl_base { - public: - service_impl_base(); - virtual ~service_impl_base(); - - virtual const char *get_name() = 0; - virtual const char *get_caption() = 0; - virtual const char *get_description() = 0; - - bool run_service(); - virtual bool install(); - virtual bool remove(); - virtual bool init(); - void set_control_accepted(unsigned controls); - void set_status(unsigned state, unsigned pending = 0); - unsigned get_control_accepted(); - - private: - virtual void service_main() = 0; - virtual unsigned service_handler(unsigned control, unsigned event_code, - void *pdata) = 0; - //------------------------------------------------------------------------- - static service_impl_base*& instance(); - //------------------------------------------------------------------------- - static DWORD __stdcall _service_handler(DWORD control, DWORD event, - void *pdata, void *pcontext); - static void __stdcall service_entry(DWORD argc, char **pargs); - virtual SERVICE_FAILURE_ACTIONSA* get_failure_actions(); - - private: - SC_HANDLE m_manager; - SC_HANDLE m_service; - SERVICE_STATUS_HANDLE m_status_handle; - DWORD m_accepted_control; -}; - -inline service_impl_base::service_impl_base() { - m_manager = 0; - m_service = 0; - m_status_handle = 0; - m_accepted_control = SERVICE_ACCEPT_STOP | SERVICE_ACCEPT_SHUTDOWN - | SERVICE_ACCEPT_PAUSE_CONTINUE; - - instance() = this; -} -//----------------------------------------------------------------------------- -inline service_impl_base::~service_impl_base() { - if (m_service) { - ::CloseServiceHandle(m_service); - } - m_service = 0; - if (m_manager) { - ::CloseServiceHandle(m_manager); - } - m_manager = 0; - instance() = 0; -} -//----------------------------------------------------------------------------- -inline service_impl_base*& service_impl_base::instance() { - static service_impl_base *pservice = NULL; - return pservice; -} -//----------------------------------------------------------------------------- -inline -bool service_impl_base::install() { - CHECK_AND_ASSERT(!m_service, false); - const char *psz_descr = get_description(); - SERVICE_FAILURE_ACTIONSA* fail_acts = get_failure_actions(); - - char sz_path[MAX_PATH]; - ::GetModuleFileNameA(0, sz_path, sizeof(sz_path)); - ::GetShortPathNameA(sz_path, sz_path, sizeof(sz_path)); - - while (TRUE) { - if (!m_manager) { - m_manager = ::OpenSCManager(NULL, NULL, GENERIC_ALL); - if (!m_manager) { - int err = GetLastError(); - LOG_ERROR( - "Failed to OpenSCManager(), last err=" - << log_space::get_win32_err_descr(err)); - break; - } - } - m_service = ::CreateServiceA(m_manager, get_name(), get_caption(), - SERVICE_ALL_ACCESS, SERVICE_WIN32_OWN_PROCESS, SERVICE_DEMAND_START, - SERVICE_ERROR_IGNORE, sz_path, 0, 0, 0, 0, 0); - if (!m_service) { - int err = GetLastError(); - LOG_ERROR( - "Failed to CreateService(), last err=" - << log_space::get_win32_err_descr(err)); - break; - } - - if (psz_descr) { - SERVICE_DESCRIPTIONA sd = { (char*) psz_descr }; - if (!::ChangeServiceConfig2A(m_service, SERVICE_CONFIG_DESCRIPTION, - &sd)) { - int err = GetLastError(); - LOG_ERROR( - "Failed to ChangeServiceConfig2(SERVICE_CONFIG_DESCRIPTION), last err=" - << log_space::get_win32_err_descr(err)); - break; - } - } - - if (fail_acts) { - if (!::ChangeServiceConfig2A(m_service, SERVICE_CONFIG_FAILURE_ACTIONS, - fail_acts)) { - int err = GetLastError(); - LOG_ERROR( - "Failed to ChangeServiceConfig2(SERVICE_CONFIG_FAILURE_ACTIONS), last err=" - << log_space::get_win32_err_descr(err)); - break; - } - } - LOG_PRINT("Installed succesfully.", LOG_LEVEL_0); - return true; - } - LOG_PRINT("Failed to install.", LOG_LEVEL_0); - return false; -} -//----------------------------------------------------------------------------- -inline -bool service_impl_base::remove() { - CHECK_AND_ASSERT(!m_service, false); - - while (TRUE) { - if (!m_manager) { - m_manager = ::OpenSCManager(0, 0, GENERIC_ALL); - if (!m_manager) { - int err = GetLastError(); - LOG_ERROR( - "Failed to OpenSCManager(), last err=" - << log_space::get_win32_err_descr(err)); - break; - } - } - - if (!m_service) { - m_service = ::OpenServiceA(m_manager, get_name(), SERVICE_STOP | DELETE); - if (!m_service) { - int err = GetLastError(); - LOG_ERROR( - "Failed to OpenService(), last err=" - << log_space::get_win32_err_descr(err)); - break; - } - } - - SERVICE_STATUS status = { }; - if (!::ControlService(m_service, SERVICE_CONTROL_STOP, &status)) { - int err = ::GetLastError(); - if (err == ERROR_SHUTDOWN_IN_PROGRESS) - continue; - else if (err != ERROR_SERVICE_NOT_ACTIVE) { - LOG_ERROR( - "Failed to ControlService(SERVICE_CONTROL_STOP), last err=" - << log_space::get_win32_err_descr(err)); - break; - } - } - - if (!::DeleteService(m_service)) { - int err = ::GetLastError(); - LOG_ERROR( - "Failed to ControlService(SERVICE_CONTROL_STOP), last err=" - << log_space::get_win32_err_descr(err)); - break; - } - - LOG_PRINT("Removed successfully.", LOG_LEVEL_0); - break; - } - - return true; -} -//----------------------------------------------------------------------------- -inline -bool service_impl_base::init() { - return true; -} -//----------------------------------------------------------------------------- -inline -bool service_impl_base::run_service() { - CHECK_AND_ASSERT(!m_service, false); - - long error_code = 0; - - SERVICE_TABLE_ENTRYA service_table[2]; - ZeroMemory(&service_table, sizeof(service_table)); - - service_table->lpServiceName = (char*) get_name(); - service_table->lpServiceProc = service_entry; - - LOG_PRINT("[+] Start service control dispatcher for \"" << get_name() << "\"", - LOG_LEVEL_1); - - error_code = 1; - BOOL res = ::StartServiceCtrlDispatcherA(service_table); - if (!res) { - int err = GetLastError(); - LOG_PRINT( - "[+] Error starting service control dispatcher, err=" - << log_space::get_win32_err_descr(err), LOG_LEVEL_1); - return false; - } else { - LOG_PRINT("[+] End service control dispatcher for \"" << get_name() << "\"", - LOG_LEVEL_1); - } - return true; -} -//----------------------------------------------------------------------------- -inline DWORD __stdcall service_impl_base::_service_handler(DWORD control, - DWORD event, void *pdata, void *pcontext) { - CHECK_AND_ASSERT(pcontext, ERROR_CALL_NOT_IMPLEMENTED); - - service_impl_base *pservice = (service_impl_base*) pcontext; - return pservice->service_handler(control, event, pdata); -} -//----------------------------------------------------------------------------- -inline -void __stdcall service_impl_base::service_entry(DWORD argc, char **pargs) { - service_impl_base *pme = instance(); - LOG_PRINT("instance: " << pme, LOG_LEVEL_4); - if (!pme) { - LOG_ERROR("Error: at service_entry() pme = NULL"); - return; - } - pme->m_status_handle = ::RegisterServiceCtrlHandlerExA(pme->get_name(), - _service_handler, pme); - - pme->set_status(SERVICE_RUNNING); - pme->service_main(); - pme->set_status(SERVICE_STOPPED); -} -//----------------------------------------------------------------------------- -inline -void service_impl_base::set_status(unsigned state, unsigned pending) { - if (!m_status_handle) - return; - - SERVICE_STATUS status = { 0 }; - status.dwServiceType = SERVICE_WIN32_OWN_PROCESS; - status.dwCurrentState = state; - status.dwControlsAccepted = m_accepted_control; - /*status.dwWin32ExitCode = NO_ERROR; - status.dwServiceSpecificExitCode = ERROR_SERVICE_SPECIFIC_ERROR; - status.dwCheckPoint = 0; - status.dwWaitHint = 0; - - status.dwCurrentState = state;*/ - - if (state == SERVICE_START_PENDING || state == SERVICE_STOP_PENDING - || state == SERVICE_CONTINUE_PENDING || state == SERVICE_PAUSE_PENDING) { - status.dwWaitHint = 2000; - status.dwCheckPoint = pending; - } - ::SetServiceStatus(m_status_handle, &status); -} -//----------------------------------------------------------------------------------------- -inline -void service_impl_base::set_control_accepted(unsigned controls) { - m_accepted_control = controls; -} -//----------------------------------------------------------------------------------------- -inline -unsigned service_impl_base::get_control_accepted() { - return m_accepted_control; -} -//----------------------------------------------------------------------------------------- -inline SERVICE_FAILURE_ACTIONSA* service_impl_base::get_failure_actions() { - // first 3 failures in 30 minutes. Service will be restarted. - // do nothing for next failures - static SC_ACTION sa[] = { { SC_ACTION_RESTART, 3 * 1000 }, { - SC_ACTION_RESTART, 3 * 1000 }, { SC_ACTION_RESTART, 3 * 1000 }, { - SC_ACTION_NONE, 0 } }; - - static SERVICE_FAILURE_ACTIONSA sfa = { 1800, // interval for failures counter - 30 min - "", NULL, 4, (SC_ACTION*) &sa }; - - // TODO: refactor this code, really unsafe! - return &sfa; -} -} - -#endif //_SERVICE_IMPL_BASE_H_ diff --git a/contrib/epee/include/sha1.h b/contrib/epee/include/sha1.h deleted file mode 100644 index ce42082f8e..0000000000 --- a/contrib/epee/include/sha1.h +++ /dev/null @@ -1,51 +0,0 @@ - -/* - Copyright (c) 2011, Micael Hildenborg - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Micael Hildenborg nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY Micael Hildenborg ''AS IS'' AND ANY - EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL Micael Hildenborg BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef SHA1_DEFINED -#define SHA1_DEFINED - -namespace sha1 { - - /** - @param src points to any kind of data to be hashed. - @param bytelength the number of bytes to hash from the src pointer. - @param hash should point to a buffer of at least 20 bytes of size for storing the sha1 result in. - */ - void calc(const void* src, const int bytelength, unsigned char* hash); - - /** - @param hash is 20 bytes of sha1 hash. This is the same data that is the result from the calc function. - @param hexstring should point to a buffer of at least 41 bytes of size for storing the hexadecimal representation of the hash. A zero will be written at position 40, so the buffer will be a valid zero ended string. - */ - void toHexString(const unsigned char* hash, char* hexstring); - -} // namespace sha1 - -#include "sha1.inl" - -#endif // SHA1_DEFINED diff --git a/contrib/epee/include/sha1.inl b/contrib/epee/include/sha1.inl deleted file mode 100644 index d332027240..0000000000 --- a/contrib/epee/include/sha1.inl +++ /dev/null @@ -1,179 +0,0 @@ - -/* - Copyright (c) 2011, Micael Hildenborg - All rights reserved. - - Redistribution and use in source and binary forms, with or without - modification, are permitted provided that the following conditions are met: - * Redistributions of source code must retain the above copyright - notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above copyright - notice, this list of conditions and the following disclaimer in the - documentation and/or other materials provided with the distribution. - * Neither the name of Micael Hildenborg nor the - names of its contributors may be used to endorse or promote products - derived from this software without specific prior written permission. - - THIS SOFTWARE IS PROVIDED BY Micael Hildenborg ''AS IS'' AND ANY - EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED - WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE - DISCLAIMED. IN NO EVENT SHALL Micael Hildenborg BE LIABLE FOR ANY - DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES - (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; - LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND - ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS - SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -/* - Contributors: - Gustav - Several members in the gamedev.se forum. - Gregory Petrosyan - */ - -#include "sha1.h" - -namespace sha1 { -namespace {// local -// Rotate an integer value to left. -inline const unsigned int rol(const unsigned int value, - const unsigned int steps) { - return ((value << steps) | (value >> (32 - steps))); -} - -// Sets the first 16 integers in the buffert to zero. -// Used for clearing the W buffert. -inline void clearWBuffert(unsigned int* buffert) { - for (int pos = 16; --pos >= 0;) - { - buffert[pos] = 0; - } -} - -inline -void innerHash(unsigned int* result, unsigned int* w) { - unsigned int a = result[0]; - unsigned int b = result[1]; - unsigned int c = result[2]; - unsigned int d = result[3]; - unsigned int e = result[4]; - - int round = 0; - -#define sha1macro(func,val) \ - { \ - const unsigned int t = rol(a, 5) + (func) + e + val + w[round]; \ - e = d; \ - d = c; \ - c = rol(b, 30); \ - b = a; \ - a = t; \ - } - - while (round < 16) { - sha1macro((b & c) | (~b & d), 0x5a827999) - ++round; - } - while (round < 20) { - w[round] = rol( - (w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1); - sha1macro((b & c) | (~b & d), 0x5a827999) - ++round; - } - while (round < 40) { - w[round] = rol( - (w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1); - sha1macro(b ^ c ^ d, 0x6ed9eba1) - ++round; - } - while (round < 60) { - w[round] = rol( - (w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1); - sha1macro((b & c) | (b & d) | (c & d), 0x8f1bbcdc) - ++round; - } - while (round < 80) { - w[round] = rol( - (w[round - 3] ^ w[round - 8] ^ w[round - 14] ^ w[round - 16]), 1); - sha1macro(b ^ c ^ d, 0xca62c1d6) - ++round; - } - -#undef sha1macro - - result[0] += a; - result[1] += b; - result[2] += c; - result[3] += d; - result[4] += e; -} -} // namespace - -inline -void calc(const void* src, const int bytelength, unsigned char* hash) { - // Init the result array. - unsigned int result[5] = { 0x67452301, 0xefcdab89, 0x98badcfe, 0x10325476, - 0xc3d2e1f0 }; - - // Cast the void src pointer to be the byte array we can work with. - const unsigned char* sarray = (const unsigned char*) src; - - // The reusable round buffer - unsigned int w[80]; - - // Loop through all complete 64byte blocks. - const int endOfFullBlocks = bytelength - 64; - int endCurrentBlock; - int currentBlock(0); - - while (currentBlock <= endOfFullBlocks) { - endCurrentBlock = currentBlock + 64; - - // Init the round buffer with the 64 byte block data. - for (int roundPos = 0; currentBlock < endCurrentBlock; currentBlock += 4) - { - // This line will swap endian on big endian and keep endian on little endian. - w[roundPos++] = (unsigned int) sarray[currentBlock + 3] - | (((unsigned int) sarray[currentBlock + 2]) << 8) - | (((unsigned int) sarray[currentBlock + 1]) << 16) - | (((unsigned int) sarray[currentBlock]) << 24); - } - innerHash(result, w); - } - - // Handle the last and not full 64 byte block if existing. - endCurrentBlock = bytelength - currentBlock; - clearWBuffert(w); - int lastBlockBytes = 0; - for (; lastBlockBytes < endCurrentBlock; ++lastBlockBytes) { - w[lastBlockBytes >> 2] |= (unsigned int) sarray[lastBlockBytes - + currentBlock] << ((3 - (lastBlockBytes & 3)) << 3); - } - w[lastBlockBytes >> 2] |= 0x80 << ((3 - (lastBlockBytes & 3)) << 3); - if (endCurrentBlock >= 56) { - innerHash(result, w); - clearWBuffert(w); - } - w[15] = bytelength << 3; - innerHash(result, w); - - // Store hash in result pointer, and make sure we get in in the correct order on both endian models. - for (int hashByte = 20; --hashByte >= 0;) { - hash[hashByte] = (result[hashByte >> 2] >> (((3 - hashByte) & 0x3) << 3)) - & 0xff; - } -} -inline -void toHexString(const unsigned char* hash, char* hexstring) { - const char hexDigits[] = { "0123456789abcdef" }; - - for (int hashByte = 20; --hashByte >= 0;) - { - hexstring[hashByte << 1] = hexDigits[(hash[hashByte] >> 4) & 0xf]; - hexstring[(hashByte << 1) + 1] = hexDigits[hash[hashByte] & 0xf]; - } - hexstring[40] = 0; -} -} // namespace sha1 diff --git a/contrib/epee/include/soci_helper.h b/contrib/epee/include/soci_helper.h deleted file mode 100644 index 813edc1fcf..0000000000 --- a/contrib/epee/include/soci_helper.h +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#pragma once -#include "soci.h" -#include "soci-postgresql.h" - -using namespace epee; -namespace soci -{ - - template <> - struct type_conversion - { - typedef long long base_type; - - static void from_base(base_type a_, indicator ind, uint64_t & mi) - { - if (ind == i_null) - { - mi = 0; - //throw soci_error("Null value not allowed for this type"); - } - mi = (uint64_t)a_; - //mi.set(i); - } - - static void to_base(const uint64_t & mi, base_type & i, indicator & ind) - { - i = (base_type)mi; - ind = i_ok; - } - }; - - - - template <> - struct type_conversion - { - typedef int base_type; - - static void from_base(base_type a_, indicator ind, bool& mi) - { - if (ind == i_null) - { - mi = false; - //throw soci_error("Null value not allowed for this type"); - } - mi = a_? true:false; - //mi.set(i); - } - - static void to_base(const bool & mi, base_type & i, indicator & ind) - { - i = mi? 1:0; - ind = i_ok; - } - }; - - - - class per_thread_session - { - public: - bool init(const std::string& connection_string) - { - m_connection_string = connection_string; - - return true; - } - - soci::session& get() - { - - //soci::session - - m_db_connections_lock.lock(); - boost::shared_ptr& conn_ptr = m_db_connections[epee::misc_utils::get_thread_string_id()]; - m_db_connections_lock.unlock(); - if(!conn_ptr.get()) - { - conn_ptr.reset(new soci::session(soci::postgresql, m_connection_string)); - } - //init new connection - return *conn_ptr.get(); - } - - bool reopen() - { - //soci::session - - m_db_connections_lock.lock(); - boost::shared_ptr& conn_ptr = m_db_connections[misc_utils::get_thread_string_id()]; - m_db_connections_lock.unlock(); - if(conn_ptr.get()) - { - conn_ptr->close(); - conn_ptr.reset(new soci::session(soci::postgresql, m_connection_string)); - } - - //init new connection - return true; - } - - //---------------------------------------------------------------------------------------------- - bool check_status() - { - return true; - } - - protected: - private: - std::map > m_db_connections; - epee::critical_section m_db_connections_lock; - std::string m_connection_string; - }; -} -/*}*/ \ No newline at end of file diff --git a/contrib/epee/include/static_initializer.h b/contrib/epee/include/static_initializer.h deleted file mode 100644 index 1510805c25..0000000000 --- a/contrib/epee/include/static_initializer.h +++ /dev/null @@ -1,55 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _STATIC_INITIALIZER_H_ -#define _STATIC_INITIALIZER_H_ - - -namespace epee -{ -/*********************************************************************** -class initializer - useful to initialize some static classes - which have init() and un_init() static members -************************************************************************/ - -template -class initializer -{ -public: - initializer() - { - to_initialize::init(); - } - ~initializer() - { - to_initialize::un_init(); - } -}; - -} -#endif //_STATIC_INITIALIZER_H_ diff --git a/contrib/epee/include/storages/activity_notifier.h b/contrib/epee/include/storages/activity_notifier.h deleted file mode 100644 index 14b6ebbfbc..0000000000 --- a/contrib/epee/include/storages/activity_notifier.h +++ /dev/null @@ -1,132 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include "inmemtoxml.h" - -//#include "levin/levin_server.h" - -namespace epee -{ - -class activity_printer_base -{ -public: - activity_printer_base(){} - virtual ~activity_printer_base(){} -}; - -template -class notify_activity_printer: public activity_printer_base -{ -public: - notify_activity_printer(int level, A& arg, bool is_notify_mode = true):m_ref_arg(arg), m_level(level), m_is_notify_mode(is_notify_mode) - { - m_command_name = typeid(m_ref_arg).name(); - m_command_name.erase(0, 7); - m_command_name.erase(m_command_name.size()-10, m_command_name.size()-1); - if(level == log_space::get_set_log_detalisation_level()) - { - LOG_PRINT(m_command_name, level); - } - else if(level+1 == log_space::get_set_log_detalisation_level()) - { - LOG_PRINT(" -->>" << m_command_name, level); - } - else if(level+2 == log_space::get_set_log_detalisation_level()) - { - LOG_PRINT(" -->>" << m_command_name << "\n" << StorageNamed::xml::get_t_as_xml(m_ref_arg), level); - } - } - - virtual ~notify_activity_printer() - { - if(m_is_notify_mode) - { - if(m_level+1 == log_space::get_set_log_detalisation_level()) - { - LOG_PRINT(" <<--" << m_command_name, m_level); - } - } - } -protected: - std::string m_command_name; - A& m_ref_arg; - int m_level; - bool m_is_notify_mode; -}; - -template -class command_activity_printer: public notify_activity_printer -{ -public: - command_activity_printer(int level, A& arg, R& rsp):notify_activity_printer(level, arg, false), m_ref_rsp(rsp) - { - } - - virtual ~command_activity_printer() - { - if(m_level+1 == log_space::get_set_log_detalisation_level()) - { - LOG_PRINT(" <<--" << m_command_name, m_level); - } - else if(m_level+2 == log_space::get_set_log_detalisation_level()) - { - LOG_PRINT(" <<--" << m_command_name << "\n" << StorageNamed::trace_as_xml(m_ref_rsp), m_level); - } - } -private: - R& m_ref_rsp; -}; - -template -activity_printer_base* create_activity_printer(int level, A& arg, R& rsp) -{ - return new command_activity_printer(level, arg, rsp); -} - -template -activity_printer_base* create_activity_printer(int level, A& arg) -{ - return new notify_activity_printer(level, arg); -} - -} - -#define PRINT_COMMAND_ACTIVITY(level) boost::shared_ptr local_activity_printer(create_activity_printer(level, in_struct, out_struct)); -#define PRINT_NOTIFY_ACTIVITY(level) boost::shared_ptr local_activity_printer(create_activity_printer(level, in_struct)); - -#define PRINT_ACTIVITY(level) \ -{std::string some_str = typeid(in_struct).name(); \ - some_str.erase(0, 7); \ - some_str.erase(some_str.size()-10, some_str.size()-1); \ - LOG_PRINT(some_str, level);} - -} - diff --git a/contrib/epee/include/storages/crypted_storage.h b/contrib/epee/include/storages/crypted_storage.h deleted file mode 100644 index d6e6edcba8..0000000000 --- a/contrib/epee/include/storages/crypted_storage.h +++ /dev/null @@ -1,62 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef _CRYPTED_STORAGE_H_ -#define _CRYPTED_STORAGE_H_ - -#include "cryptopp_helper.h" - -namespace epee -{ -template -class crypted_storage: public t_base_storage -{ -public: - size_t PackToSolidBuffer(std::string& targetObj) - { - size_t res = t_base_storage::PackToSolidBuffer(targetObj); - if(res <= 0) - return res; - - if(!crypt_provider::encrypt(targetObj, t_key_provider::get_storage_default_key())) - return 0; - - return targetObj.size(); - } - - size_t LoadFromSolidBuffer(const std::string& pTargetObj) - { - std::string buff_to_decrypt = pTargetObj; - if(crypt_provider::decrypt(buff_to_decrypt, t_key_provider::get_storage_default_key())) - return t_base_storage::LoadFromSolidBuffer(buff_to_decrypt); - - return 0; - } -}; -} - -#endif //_CRYPTED_STORAGE_H_ \ No newline at end of file diff --git a/contrib/epee/include/storages/gzipped_inmemstorage.h b/contrib/epee/include/storages/gzipped_inmemstorage.h deleted file mode 100644 index 5c53fffa7b..0000000000 --- a/contrib/epee/include/storages/gzipped_inmemstorage.h +++ /dev/null @@ -1,68 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef _GZIPPED_INMEMSTORAGE_H_ -#define _GZIPPED_INMEMSTORAGE_H_ - -#include "zlib_helper.h" -namespace epee -{ -namespace StorageNamed -{ - - template - class gziped_storage: public t_base_storage - { - public: - size_t PackToSolidBuffer(std::string& targetObj) - { - size_t res = t_base_storage::PackToSolidBuffer(targetObj); - if(res <= 0) - return res; - - if(!zlib_helper::pack(targetObj)) - return 0; - - return targetObj.size(); - } - - size_t LoadFromSolidBuffer(const std::string& pTargetObj) - { - std::string buff_to_ungzip = pTargetObj; - if(zlib_helper::unpack(buff_to_ungzip)) - return t_base_storage::LoadFromSolidBuffer(buff_to_ungzip); - - return 0; - } - - private: - }; - -} -} - -#endif \ No newline at end of file diff --git a/contrib/epee/include/storages/http_abstract_invoke.h b/contrib/epee/include/storages/http_abstract_invoke.h deleted file mode 100644 index 00ee8a4ad3..0000000000 --- a/contrib/epee/include/storages/http_abstract_invoke.h +++ /dev/null @@ -1,126 +0,0 @@ - -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once -#include "portable_storage_template_helper.h" -#include "net/http_base.h" -#include "net/http_server_handlers_map2.h" - -namespace epee -{ - namespace net_utils - { - template - bool invoke_http_json_remote_command2(const std::string& url, t_request& out_struct, t_response& result_struct, t_transport& transport, unsigned int timeout = 5000, const std::string& method = "GET") - { - std::string req_param; - if(!serialization::store_t_to_json(out_struct, req_param)) - return false; - - const http::http_response_info* pri = NULL; - if(!invoke_request(url, transport, timeout, &pri, method, req_param)) - { - LOG_PRINT_L1("Failed to invoke http request to " << url); - return false; - } - - if(!pri->m_response_code) - { - LOG_PRINT_L1("Failed to invoke http request to " << url << ", internal error (null response ptr)"); - return false; - } - - if(pri->m_response_code != 200) - { - LOG_PRINT_L1("Failed to invoke http request to " << url << ", wrong response code: " << pri->m_response_code); - return false; - } - - return serialization::load_t_from_json(result_struct, pri->m_body); - } - - - - template - bool invoke_http_bin_remote_command2(const std::string& url, t_request& out_struct, t_response& result_struct, t_transport& transport, unsigned int timeout = 5000, const std::string& method = "GET") - { - std::string req_param; - if(!serialization::store_t_to_binary(out_struct, req_param)) - return false; - - const http::http_response_info* pri = NULL; - if(!invoke_request(url, transport, timeout, &pri, method, req_param)) - { - LOG_PRINT_L1("Failed to invoke http request to " << url); - return false; - } - - if(!pri->m_response_code) - { - LOG_PRINT_L1("Failed to invoke http request to " << url << ", internal error (null response ptr)"); - return false; - } - - if(pri->m_response_code != 200) - { - LOG_PRINT_L1("Failed to invoke http request to " << url << ", wrong response code: " << pri->m_response_code); - return false; - } - - return serialization::load_t_from_binary(result_struct, pri->m_body); - } - - template - bool invoke_http_json_rpc(const std::string& url, const std::string& method_name, t_request& out_struct, t_response& result_struct, t_transport& transport, unsigned int timeout = 5000, const std::string& http_method = "GET", const std::string& req_id = "0") - { - epee::json_rpc::request req_t = AUTO_VAL_INIT(req_t); - req_t.jsonrpc = "2.0"; - req_t.id = req_id; - req_t.method = method_name; - req_t.params = out_struct; - epee::json_rpc::response resp_t = AUTO_VAL_INIT(resp_t); - if(!epee::net_utils::invoke_http_json_remote_command2(url, req_t, resp_t, transport, timeout, http_method)) - { - return false; - } - if(resp_t.error.code || resp_t.error.message.size()) - { - LOG_ERROR("RPC call of \"" << method_name << "\" returned error: " << resp_t.error.code << ", message: " << resp_t.error.message); - return false; - } - result_struct = resp_t.result; - return true; - } - - template - bool invoke_http_json_rpc(const std::string& url, typename t_command::request& out_struct, typename t_command::response& result_struct, t_transport& transport, unsigned int timeout = 5000, const std::string& http_method = "GET", const std::string& req_id = "0") - { - return invoke_http_json_rpc(url, t_command::methodname(), out_struct, result_struct, transport, timeout, http_method, req_id); - } - - } -} diff --git a/contrib/epee/include/storages/levin_abstract_invoke2.h b/contrib/epee/include/storages/levin_abstract_invoke2.h deleted file mode 100644 index da12c10607..0000000000 --- a/contrib/epee/include/storages/levin_abstract_invoke2.h +++ /dev/null @@ -1,291 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#include "portable_storage_template_helper.h" -#include -#include "net/levin_base.h" - -namespace epee -{ - namespace net_utils - { - template - bool invoke_remote_command2(int command, const t_arg& out_struct, t_result& result_struct, t_transport& transport) - { - if(!transport.is_connected()) - return false; - - serialization::portable_storage stg; - out_struct.store(stg); - std::string buff_to_send, buff_to_recv; - stg.store_to_binary(buff_to_send); - - int res = transport.invoke(command, buff_to_send, buff_to_recv); - if( res <=0 ) - { - LOG_PRINT_RED("Failed to invoke command " << command << " return code " << res, LOG_LEVEL_1); - return false; - } - serialization::portable_storage stg_ret; - if(!stg_ret.load_from_binary(buff_to_recv)) - { - LOG_ERROR("Failed to load_from_binary on command " << command); - return false; - } - result_struct.load(stg_ret); - return true; - } - - template - bool notify_remote_command2(int command, const t_arg& out_struct, t_transport& transport) - { - if(!transport.is_connected()) - return false; - - serialization::portable_storage stg; - out_struct.store(&stg); - std::string buff_to_send; - stg.store_to_binary(buff_to_send); - - int res = transport.notify(command, buff_to_send); - if(res <=0 ) - { - LOG_ERROR("Failed to notify command " << command << " return code " << res); - return false; - } - return true; - } - - template - bool invoke_remote_command2(boost::uuids::uuid conn_id, int command, const t_arg& out_struct, t_result& result_struct, t_transport& transport) - { - - typename serialization::portable_storage stg; - out_struct.store(stg); - std::string buff_to_send, buff_to_recv; - stg.store_to_binary(buff_to_send); - - int res = transport.invoke(command, buff_to_send, buff_to_recv, conn_id); - if( res <=0 ) - { - LOG_PRINT_L1("Failed to invoke command " << command << " return code " << res); - return false; - } - typename serialization::portable_storage stg_ret; - if(!stg_ret.load_from_binary(buff_to_recv)) - { - LOG_ERROR("Failed to load_from_binary on command " << command); - return false; - } - result_struct.load(stg_ret); - - return true; - } - - template - bool async_invoke_remote_command2(boost::uuids::uuid conn_id, int command, const t_arg& out_struct, t_transport& transport, callback_t cb, size_t inv_timeout = LEVIN_DEFAULT_TIMEOUT_PRECONFIGURED) - { - typename serialization::portable_storage stg; - const_cast(out_struct).store(stg);//TODO: add true const support to searilzation - std::string buff_to_send, buff_to_recv; - stg.store_to_binary(buff_to_send); - - int res = transport.invoke_async(command, buff_to_send, conn_id, [cb, command](int code, const std::string& buff, typename t_transport::connection_context& context)->bool - { - t_result result_struct = AUTO_VAL_INIT(result_struct); - if( code <=0 ) - { - LOG_PRINT_L1("Failed to invoke command " << command << " return code " << code); - cb(code, result_struct, context); - return false; - } - serialization::portable_storage stg_ret; - if(!stg_ret.load_from_binary(buff)) - { - LOG_ERROR("Failed to load_from_binary on command " << command); - cb(LEVIN_ERROR_FORMAT, result_struct, context); - return false; - } - result_struct.load(stg_ret); - - cb(code, result_struct, context); - return true; - }, inv_timeout); - if( res <=0 ) - { - LOG_PRINT_L1("Failed to invoke command " << command << " return code " << res); - return false; - } - return true; - } - - template - bool notify_remote_command2(boost::uuids::uuid conn_id, int command, const t_arg& out_struct, t_transport& transport) - { - - serialization::portable_storage stg; - out_struct.store(stg); - std::string buff_to_send, buff_to_recv; - stg.store_to_binary(buff_to_send); - - int res = transport.notify(command, buff_to_send, conn_id); - if(res <=0 ) - { - LOG_PRINT_RED_L0("Failed to notify command " << command << " return code " << res); - return false; - } - return true; - } - //---------------------------------------------------------------------------------------------------- - //---------------------------------------------------------------------------------------------------- - template - int buff_to_t_adapter(int command, const std::string& in_buff, std::string& buff_out, callback_t cb, t_context& context ) - { - serialization::portable_storage strg; - if(!strg.load_from_binary(in_buff)) - { - LOG_ERROR("Failed to load_from_binary in command " << command); - return -1; - } - boost::value_initialized in_struct; - boost::value_initialized out_struct; - - static_cast(in_struct).load(strg); - int res = cb(command, static_cast(in_struct), static_cast(out_struct), context); - serialization::portable_storage strg_out; - static_cast(out_struct).store(strg_out); - - if(!strg_out.store_to_binary(buff_out)) - { - LOG_ERROR("Failed to store_to_binary in command" << command); - return -1; - } - - return res; - }; - - template - int buff_to_t_adapter(t_owner* powner, int command, const std::string& in_buff, callback_t cb, t_context& context) - { - serialization::portable_storage strg; - if(!strg.load_from_binary(in_buff)) - { - LOG_ERROR("Failed to load_from_binary in notify " << command); - return -1; - } - boost::value_initialized in_struct; - static_cast(in_struct).load(strg); - return cb(command, in_struct, context); - }; - -#define CHAIN_LEVIN_INVOKE_MAP2(context_type) \ - int invoke(int command, const std::string& in_buff, std::string& buff_out, context_type& context) \ - { \ - bool handled = false; \ - return handle_invoke_map(false, command, in_buff, buff_out, context, handled); \ - } - -#define CHAIN_LEVIN_NOTIFY_MAP2(context_type) \ - int notify(int command, const std::string& in_buff, context_type& context) \ - { \ - bool handled = false; std::string fake_str;\ - return handle_invoke_map(true, command, in_buff, fake_str, context, handled); \ - } - - -#define CHAIN_LEVIN_INVOKE_MAP() \ - int invoke(int command, const std::string& in_buff, std::string& buff_out, epee::net_utils::connection_context_base& context) \ - { \ - bool handled = false; \ - return handle_invoke_map(false, command, in_buff, buff_out, context, handled); \ - } - -#define CHAIN_LEVIN_NOTIFY_MAP() \ - int notify(int command, const std::string& in_buff, epee::net_utils::connection_context_base& context) \ - { \ - bool handled = false; std::string fake_str;\ - return handle_invoke_map(true, command, in_buff, fake_str, context, handled); \ - } - -#define CHAIN_LEVIN_NOTIFY_STUB() \ - int notify(int command, const std::string& in_buff, epee::net_utils::connection_context_base& context) \ - { \ - return -1; \ - } - -#define BEGIN_INVOKE_MAP2(owner_type) \ - template int handle_invoke_map(bool is_notify, int command, const std::string& in_buff, std::string& buff_out, t_context& context, bool& handled) \ - { \ - typedef owner_type internal_owner_type_name; - -#define HANDLE_INVOKE2(command_id, func, type_name_in, typename_out) \ - if(!is_notify && command_id == command) \ - {handled=true;return epee::net_utils::buff_to_t_adapter(this, command, in_buff, buff_out, boost::bind(func, this, _1, _2, _3, _4), context);} - -#define HANDLE_INVOKE_T2(COMMAND, func) \ - if(!is_notify && COMMAND::ID == command) \ - {handled=true;return epee::net_utils::buff_to_t_adapter(command, in_buff, buff_out, boost::bind(func, this, _1, _2, _3, _4), context);} - - -#define HANDLE_NOTIFY2(command_id, func, type_name_in) \ - if(is_notify && command_id == command) \ - {handled=true;return epee::net_utils::buff_to_t_adapter(this, command, in_buff, boost::bind(func, this, _1, _2, _3), context);} - -#define HANDLE_NOTIFY_T2(NOTIFY, func) \ - if(is_notify && NOTIFY::ID == command) \ - {handled=true;return epee::net_utils::buff_to_t_adapter(this, command, in_buff, boost::bind(func, this, _1, _2, _3), context);} - - -#define CHAIN_INVOKE_MAP2(func) \ - { \ - int res = func(is_notify, command, in_buff, buff_out, context, handled); \ - if(handled) \ - return res; \ - } - -#define CHAIN_INVOKE_MAP_TO_OBJ2(obj) \ - { \ - int res = obj.handle_invoke_map(is_notify, command, in_buff, buff_out, context, handled); \ - if(handled) \ - return res; \ - } - -#define CHAIN_INVOKE_MAP_TO_OBJ_FORCE_CONTEXT(obj, context_type) \ - { \ - int res = obj.handle_invoke_map(is_notify, command, in_buff, buff_out, static_cast(context), handled); \ - if(handled) return res; \ - } - - -#define END_INVOKE_MAP2() \ - LOG_ERROR("Unkonown command:" << command); \ - return LEVIN_ERROR_CONNECTION_HANDLER_NOT_DEFINED; \ - } - } -} - diff --git a/contrib/epee/include/storages/parserse_base_utils.h b/contrib/epee/include/storages/parserse_base_utils.h deleted file mode 100644 index baafb56230..0000000000 --- a/contrib/epee/include/storages/parserse_base_utils.h +++ /dev/null @@ -1,260 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -namespace epee -{ -namespace misc_utils -{ - namespace parse - { - inline std::string transform_to_escape_sequence(const std::string& src) - { - //std::stringstream res; - std::string res; - for(std::string::const_iterator it = src.begin(); it!=src.end(); ++it) - { - switch(*it) - { - case '\b': //Backspace (ascii code 08) - res+="\\b"; break; - case '\f': //Form feed (ascii code 0C) - res+="\\f"; break; - case '\n': //New line - res+="\\n"; break; - case '\r': //Carriage return - res+="\\r"; break; - case '\t': //Tab - res+="\\t"; break; - case '\v': //Vertical tab - res+="\\v"; break; - //case '\'': //Apostrophe or single quote - // res+="\\'"; break; - case '"': //Double quote - res+="\\\""; break; - case '\\': //Backslash caracter - res+="\\\\"; break; - case '/': //Backslash caracter - res+="\\/"; break; - default: - res.push_back(*it); - } - } - return res; - } - /* - - \b Backspace (ascii code 08) - \f Form feed (ascii code 0C) - \n New line - \r Carriage return - \t Tab - \v Vertical tab - \' Apostrophe or single quote - \" Double quote - \\ Backslash character - - */ - inline void match_string2(std::string::const_iterator& star_end_string, std::string::const_iterator buf_end, std::string& val) - { - val.clear(); - bool escape_mode = false; - std::string::const_iterator it = star_end_string; - ++it; - for(;it != buf_end;it++) - { - if(escape_mode/*prev_ch == '\\'*/) - { - switch(*it) - { - case 'b': //Backspace (ascii code 08) - val.push_back(0x08);break; - case 'f': //Form feed (ascii code 0C) - val.push_back(0x0C);break; - case 'n': //New line - val.push_back('\n');break; - case 'r': //Carriage return - val.push_back('\r');break; - case 't': //Tab - val.push_back('\t');break; - case 'v': //Vertical tab - val.push_back('\v');break; - case '\'': //Apostrophe or single quote - val.push_back('\'');break; - case '"': //Double quote - val.push_back('"');break; - case '\\': //Backslash character - val.push_back('\\');break; - case '/': //Slash character - val.push_back('/');break; - default: - val.push_back(*it); - LOG_PRINT_L0("Unknown escape sequence :\"\\" << *it << "\""); - } - escape_mode = false; - }else if(*it == '"') - { - star_end_string = it; - return; - }else if(*it == '\\') - { - escape_mode = true; - } - else - { - val.push_back(*it); - } - } - ASSERT_MES_AND_THROW("Failed to match string in json entry: " << std::string(star_end_string, buf_end)); - } - inline bool match_string(std::string::const_iterator& star_end_string, std::string::const_iterator buf_end, std::string& val) - { - try - { - - match_string2(star_end_string, buf_end, val); - return true; - } - catch(...) - { - return false; - } - } - inline void match_number2(std::string::const_iterator& star_end_string, std::string::const_iterator buf_end, std::string& val, bool& is_float_val, bool& is_signed_val) - { - val.clear(); - is_float_val = false; - for(std::string::const_iterator it = star_end_string;it != buf_end;it++) - { - if(isdigit(*it) || (it == star_end_string && *it == '-') || (val.size() && *it == '.' ) || (is_float_val && (*it == 'e' || *it == 'E' || *it == '-' || *it == '+' )) ) - { - if(!val.size() && *it == '-') - is_signed_val = true; - if(*it == '.' ) - is_float_val = true; - val.push_back(*it); - } - else - { - if(val.size()) - { - star_end_string = --it; - return; - } - else - ASSERT_MES_AND_THROW("wrong number in json entry: " << std::string(star_end_string, buf_end)); - } - } - ASSERT_MES_AND_THROW("wrong number in json entry: " << std::string(star_end_string, buf_end)); - } - inline bool match_number(std::string::const_iterator& star_end_string, std::string::const_iterator buf_end, std::string& val) - { - try - { - bool is_v_float = false;bool is_signed_val = false; - match_number2(star_end_string, buf_end, val, is_v_float, is_signed_val); - return !is_v_float; - } - catch(...) - { - return false; - } - } - inline void match_word2(std::string::const_iterator& star_end_string, std::string::const_iterator buf_end, std::string& val) - { - val.clear(); - - for(std::string::const_iterator it = star_end_string;it != buf_end;it++) - { - if(!isalpha(*it)) - { - val.assign(star_end_string, it); - if(val.size()) - { - star_end_string = --it; - return; - }else - ASSERT_MES_AND_THROW("failed to match word number in json entry: " << std::string(star_end_string, buf_end)); - } - } - ASSERT_MES_AND_THROW("failed to match word number in json entry: " << std::string(star_end_string, buf_end)); - } - inline bool match_word(std::string::const_iterator& star_end_string, std::string::const_iterator buf_end, std::string& val) - { - try - { - match_word2(star_end_string, buf_end, val); - return true; - } - catch(...) - { - return false; - } - } - inline bool match_word_with_extrasymb(std::string::const_iterator& star_end_string, std::string::const_iterator buf_end, std::string& val) - { - val.clear(); - - for(std::string::const_iterator it = star_end_string;it != buf_end;it++) - { - if(!isalnum(*it) && *it != '-' && *it != '_') - { - val.assign(star_end_string, it); - if(val.size()) - { - star_end_string = --it; - return true; - }else - return false; - } - } - return false; - } - inline bool match_word_til_equal_mark(std::string::const_iterator& star_end_string, std::string::const_iterator buf_end, std::string::const_iterator& word_end) - { - word_end = star_end_string; - - for(std::string::const_iterator it = star_end_string;it != buf_end;it++) - { - if(isspace(*it)) - { - - continue; - }else if( *it == '=' ) - { - star_end_string = it; - word_end = it; - return true; - } - } - return false; - } - } -} -} \ No newline at end of file diff --git a/contrib/epee/include/storages/portable_storage.h b/contrib/epee/include/storages/portable_storage.h deleted file mode 100644 index 87c6edb2b9..0000000000 --- a/contrib/epee/include/storages/portable_storage.h +++ /dev/null @@ -1,480 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include "misc_language.h" -#include "portable_storage_base.h" -#include "portable_storage_to_bin.h" -#include "portable_storage_from_bin.h" -#include "portable_storage_to_json.h" -#include "portable_storage_from_json.h" -#include "portable_storage_val_converters.h" - -namespace epee -{ - namespace serialization - { - /************************************************************************/ - /* */ - /************************************************************************/ - class portable_storage - { - public: - typedef epee::serialization::hsection hsection; - typedef epee::serialization::harray harray; - typedef storage_entry meta_entry; - - portable_storage(){} - virtual ~portable_storage(){} - hsection open_section(const std::string& section_name, hsection hparent_section, bool create_if_notexist = false); - template - bool get_value(const std::string& value_name, t_value& val, hsection hparent_section); - bool get_value(const std::string& value_name, storage_entry& val, hsection hparent_section); - template - bool set_value(const std::string& value_name, const t_value& target, hsection hparent_section); - - //serial access for arrays of values -------------------------------------- - //values - template - harray get_first_value(const std::string& value_name, t_value& target, hsection hparent_section); - template - bool get_next_value(harray hval_array, t_value& target); - template - harray insert_first_value(const std::string& value_name, const t_value& target, hsection hparent_section); - template - bool insert_next_value(harray hval_array, const t_value& target); - //sections - harray get_first_section(const std::string& pSectionName, hsection& h_child_section, hsection hparent_section); - bool get_next_section(harray hSecArray, hsection& h_child_section); - harray insert_first_section(const std::string& pSectionName, hsection& hinserted_childsection, hsection hparent_section); - bool insert_next_section(harray hSecArray, hsection& hinserted_childsection); - //------------------------------------------------------------------------ - //delete entry (section, value or array) - bool delete_entry(const std::string& pentry_name, hsection hparent_section = nullptr); - - //------------------------------------------------------------------------------- - bool store_to_binary(binarybuffer& target); - bool load_from_binary(const binarybuffer& target); - template - bool dump_as_xml(std::string& targetObj, const std::string& root_name = ""); - bool dump_as_json(std::string& targetObj, size_t indent = 0, bool insert_newlines = true); - bool load_from_json(const std::string& source); - - private: - section m_root; - hsection get_root_section() {return &m_root;} - storage_entry* find_storage_entry(const std::string& pentry_name, hsection psection); - template - storage_entry* insert_new_entry_get_storage_entry(const std::string& pentry_name, hsection psection, const entry_type& entry); - - hsection insert_new_section(const std::string& pentry_name, hsection psection); - -#pragma pack(push) -#pragma pack(1) - struct storage_block_header - { - uint32_t m_signature_a; - uint32_t m_signature_b; - uint8_t m_ver; - }; -#pragma pack(pop) - }; - inline - bool portable_storage::dump_as_json(std::string& buff, size_t indent, bool insert_newlines) - { - TRY_ENTRY(); - std::stringstream ss; - epee::serialization::dump_as_json(ss, m_root, indent, insert_newlines); - buff = ss.str(); - return true; - CATCH_ENTRY("portable_storage::dump_as_json", false) - } - inline - bool portable_storage::load_from_json(const std::string& source) - { - TRY_ENTRY(); - return json::load_from_json(source, *this); - CATCH_ENTRY("portable_storage::load_from_json", false) - } - - template - bool portable_storage::dump_as_xml(std::string& targetObj, const std::string& root_name) - { - return false;//TODO: don't think i ever again will use xml - ambiguous and "overtagged" format - } - - inline - bool portable_storage::store_to_binary(binarybuffer& target) - { - TRY_ENTRY(); - std::stringstream ss; - storage_block_header sbh = AUTO_VAL_INIT(sbh); - sbh.m_signature_a = PORTABLE_STORAGE_SIGNATUREA; - sbh.m_signature_b = PORTABLE_STORAGE_SIGNATUREB; - sbh.m_ver = PORTABLE_STORAGE_FORMAT_VER; - ss.write((const char*)&sbh, sizeof(storage_block_header)); - pack_entry_to_buff(ss, m_root); - target = ss.str(); - return true; - CATCH_ENTRY("portable_storage::store_to_binary", false) - } - inline - bool portable_storage::load_from_binary(const binarybuffer& source) - { - m_root.m_entries.clear(); - if(source.size() < sizeof(storage_block_header)) - { - LOG_WARNING("portable_storage: wrong binary format, packet size = " << source.size() << " less than expected sizeof(storage_block_header)=" - << sizeof(storage_block_header), LOG_LEVEL_2) - return false; - } - storage_block_header* pbuff = (storage_block_header*)source.data(); - if(pbuff->m_signature_a != PORTABLE_STORAGE_SIGNATUREA || - pbuff->m_signature_b != PORTABLE_STORAGE_SIGNATUREB - ) - { - LOG_WARNING("portable_storage: wrong binary format - signature missmatch", LOG_LEVEL_2); - return false; - } - if(pbuff->m_ver != PORTABLE_STORAGE_FORMAT_VER) - { - LOG_WARNING("portable_storage: wrong binary format - unknown format ver = " << pbuff->m_ver, LOG_LEVEL_2); - return false; - } - TRY_ENTRY(); - throwable_buffer_reader buf_reader(source.data()+sizeof(storage_block_header), source.size()-sizeof(storage_block_header)); - buf_reader.read(m_root); - return true;//TODO: - CATCH_ENTRY("portable_storage::load_from_binary", false); - } - //--------------------------------------------------------------------------------------------------------------- - inline - hsection portable_storage::open_section(const std::string& section_name, hsection hparent_section, bool create_if_notexist) - { - TRY_ENTRY(); - hparent_section = hparent_section ? hparent_section:&m_root; - storage_entry* pentry = find_storage_entry(section_name, hparent_section); - if(!pentry) - { - if(!create_if_notexist) - return nullptr; - return insert_new_section(section_name, hparent_section); - } - CHECK_AND_ASSERT(pentry , nullptr); - //check that section_entry we find is real "CSSection" - if(pentry->type() != typeid(section)) - { - if(create_if_notexist) - *pentry = storage_entry(section());//replace - else - return nullptr; - } - return &boost::get
(*pentry); - CATCH_ENTRY("portable_storage::open_section", nullptr); - } - //--------------------------------------------------------------------------------------------------------------- - template - struct get_value_visitor: boost::static_visitor - { - to_type& m_target; - get_value_visitor(to_type& target):m_target(target){} - template - void operator()(const from_type& v){convert_t(v, m_target);} - }; - - template - bool portable_storage::get_value(const std::string& value_name, t_value& val, hsection hparent_section) - { - BOOST_MPL_ASSERT(( boost::mpl::contains )); - //TRY_ENTRY(); - if(!hparent_section) hparent_section = &m_root; - storage_entry* pentry = find_storage_entry(value_name, hparent_section); - if(!pentry) - return false; - - get_value_visitor gvv(val); - boost::apply_visitor(gvv, *pentry); - return true; - //CATCH_ENTRY("portable_storage::template<>get_value", false); - } - //--------------------------------------------------------------------------------------------------------------- - inline - bool portable_storage::get_value(const std::string& value_name, storage_entry& val, hsection hparent_section) - { - //TRY_ENTRY(); - if(!hparent_section) hparent_section = &m_root; - storage_entry* pentry = find_storage_entry(value_name, hparent_section); - if(!pentry) - return false; - - val = *pentry; - return true; - //CATCH_ENTRY("portable_storage::template<>get_value", false); - } - //--------------------------------------------------------------------------------------------------------------- - template - bool portable_storage::set_value(const std::string& value_name, const t_value& v, hsection hparent_section) - { - BOOST_MPL_ASSERT(( boost::mpl::contains::type, t_value> )); - TRY_ENTRY(); - if(!hparent_section) - hparent_section = &m_root; - storage_entry* pentry = find_storage_entry(value_name, hparent_section); - if(!pentry) - { - pentry = insert_new_entry_get_storage_entry(value_name, hparent_section, v); - if(!pentry) - return false; - return true; - } - *pentry = storage_entry(v); - return true; - CATCH_ENTRY("portable_storage::template<>set_value", false); - } - //--------------------------------------------------------------------------------------------------------------- - inline - storage_entry* portable_storage::find_storage_entry(const std::string& pentry_name, hsection psection) - { - TRY_ENTRY(); - CHECK_AND_ASSERT(psection, nullptr); - auto it = psection->m_entries.find(pentry_name); - if(it == psection->m_entries.end()) - return nullptr; - - return &it->second; - CATCH_ENTRY("portable_storage::find_storage_entry", nullptr); - } - //--------------------------------------------------------------------------------------------------------------- - template - storage_entry* portable_storage::insert_new_entry_get_storage_entry(const std::string& pentry_name, hsection psection, const entry_type& entry) - { - TRY_ENTRY(); - CHECK_AND_ASSERT(psection, nullptr); - auto ins_res = psection->m_entries.insert(std::pair(pentry_name, entry)); - return &ins_res.first->second; - CATCH_ENTRY("portable_storage::insert_new_entry_get_storage_entry", nullptr); - } - //--------------------------------------------------------------------------------------------------------------- - inline - hsection portable_storage::insert_new_section(const std::string& pentry_name, hsection psection) - { - TRY_ENTRY(); - storage_entry* pse = insert_new_entry_get_storage_entry(pentry_name, psection, section()); - if(!pse) return nullptr; - return &boost::get
(*pse); - CATCH_ENTRY("portable_storage::insert_new_section", nullptr); - } - //--------------------------------------------------------------------------------------------------------------- - template - struct get_first_value_visitor: boost::static_visitor - { - to_type& m_target; - get_first_value_visitor(to_type& target):m_target(target){} - template - bool operator()(const array_entry_t& a) - { - const from_type* pv = a.get_first_val(); - if(!pv) - return false; - convert_t(*pv, m_target); - return true; - } - }; - //--------------------------------------------------------------------------------------------------------------- - template - harray portable_storage::get_first_value(const std::string& value_name, t_value& target, hsection hparent_section) - { - BOOST_MPL_ASSERT(( boost::mpl::contains )); - //TRY_ENTRY(); - if(!hparent_section) hparent_section = &m_root; - storage_entry* pentry = find_storage_entry(value_name, hparent_section); - if(!pentry) - return nullptr; - if(pentry->type() != typeid(array_entry)) - return nullptr; - array_entry& ar_entry = boost::get(*pentry); - - get_first_value_visitor gfv(target); - if(!boost::apply_visitor(gfv, ar_entry)) - return nullptr; - return &ar_entry; - //CATCH_ENTRY("portable_storage::get_first_value", nullptr); - } - //--------------------------------------------------------------------------------------------------------------- - template - struct get_next_value_visitor: boost::static_visitor - { - to_type& m_target; - get_next_value_visitor(to_type& target):m_target(target){} - template - bool operator()(const array_entry_t& a) - { - //TODO: optimize code here: work without get_next_val function - const from_type* pv = a.get_next_val(); - if(!pv) - return false; - convert_t(*pv, m_target); - return true; - } - }; - - - template - bool portable_storage::get_next_value(harray hval_array, t_value& target) - { - BOOST_MPL_ASSERT(( boost::mpl::contains )); - //TRY_ENTRY(); - CHECK_AND_ASSERT(hval_array, false); - array_entry& ar_entry = *hval_array; - get_next_value_visitor gnv(target); - if(!boost::apply_visitor(gnv, ar_entry)) - return false; - return true; - //CATCH_ENTRY("portable_storage::get_next_value", false); - } - //--------------------------------------------------------------------------------------------------------------- - template - harray portable_storage::insert_first_value(const std::string& value_name, const t_value& target, hsection hparent_section) - { - TRY_ENTRY(); - if(!hparent_section) hparent_section = &m_root; - storage_entry* pentry = find_storage_entry(value_name, hparent_section); - if(!pentry) - { - pentry = insert_new_entry_get_storage_entry(value_name, hparent_section, array_entry(array_entry_t())); - if(!pentry) - return nullptr; - } - if(pentry->type() != typeid(array_entry)) - *pentry = storage_entry(array_entry(array_entry_t())); - - array_entry& arr = boost::get(*pentry); - if(arr.type() != typeid(array_entry_t)) - arr = array_entry(array_entry_t()); - - array_entry_t& arr_typed = boost::get >(arr); - arr_typed.insert_first_val(target); - return &arr; - CATCH_ENTRY("portable_storage::insert_first_value", nullptr); - } - //--------------------------------------------------------------------------------------------------------------- - template - bool portable_storage::insert_next_value(harray hval_array, const t_value& target) - { - TRY_ENTRY(); - CHECK_AND_ASSERT(hval_array, false); - - CHECK_AND_ASSERT_MES(hval_array->type() == typeid(array_entry_t), - false, "unexpected type in insert_next_value: " << typeid(array_entry_t).name()); - - array_entry_t& arr_typed = boost::get >(*hval_array); - arr_typed.insert_next_value(target); - return true; - CATCH_ENTRY("portable_storage::insert_next_value", false); - } - //--------------------------------------------------------------------------------------------------------------- - //sections - inline - harray portable_storage::get_first_section(const std::string& sec_name, hsection& h_child_section, hsection hparent_section) - { - TRY_ENTRY(); - if(!hparent_section) hparent_section = &m_root; - storage_entry* pentry = find_storage_entry(sec_name, hparent_section); - if(!pentry) - return nullptr; - if(pentry->type() != typeid(array_entry)) - return nullptr; - array_entry& ar_entry = boost::get(*pentry); - if(ar_entry.type() != typeid(array_entry_t
)) - return nullptr; - array_entry_t
& sec_array = boost::get>(ar_entry); - section* psec = sec_array.get_first_val(); - if(!psec) - return nullptr; - h_child_section = psec; - return &ar_entry; - CATCH_ENTRY("portable_storage::get_first_section", nullptr); - } - //--------------------------------------------------------------------------------------------------------------- - inline - bool portable_storage::get_next_section(harray hsec_array, hsection& h_child_section) - { - TRY_ENTRY(); - CHECK_AND_ASSERT(hsec_array, false); - if(hsec_array->type() != typeid(array_entry_t
)) - return nullptr; - array_entry_t
& sec_array = boost::get>(*hsec_array); - h_child_section = sec_array.get_next_val(); - if(!h_child_section) - return false; - return true; - CATCH_ENTRY("portable_storage::get_next_section", false); - } - //--------------------------------------------------------------------------------------------------------------- - inline - harray portable_storage::insert_first_section(const std::string& sec_name, hsection& hinserted_childsection, hsection hparent_section) - { - TRY_ENTRY(); - if(!hparent_section) hparent_section = &m_root; - storage_entry* pentry = find_storage_entry(sec_name, hparent_section); - if(!pentry) - { - pentry = insert_new_entry_get_storage_entry(sec_name, hparent_section, array_entry(array_entry_t
())); - if(!pentry) - return nullptr; - } - if(pentry->type() != typeid(array_entry)) - *pentry = storage_entry(array_entry(array_entry_t
())); - - array_entry& ar_entry = boost::get(*pentry); - if(ar_entry.type() != typeid(array_entry_t
)) - ar_entry = array_entry(array_entry_t
()); - - array_entry_t
& sec_array = boost::get>(ar_entry); - hinserted_childsection = &sec_array.insert_first_val(section()); - return &ar_entry; - CATCH_ENTRY("portable_storage::insert_first_section", nullptr); - } - //--------------------------------------------------------------------------------------------------------------- - inline - bool portable_storage::insert_next_section(harray hsec_array, hsection& hinserted_childsection) - { - TRY_ENTRY(); - CHECK_AND_ASSERT(hsec_array, false); - CHECK_AND_ASSERT_MES(hsec_array->type() == typeid(array_entry_t
), - false, "unexpected type(not 'section') in insert_next_section, type: " << hsec_array->type().name()); - - array_entry_t
& sec_array = boost::get>(*hsec_array); - hinserted_childsection = &sec_array.insert_next_value(section()); - return true; - CATCH_ENTRY("portable_storage::insert_next_section", false); - } - //--------------------------------------------------------------------------------------------------------------- - } -} diff --git a/contrib/epee/include/storages/portable_storage_base.h b/contrib/epee/include/storages/portable_storage_base.h deleted file mode 100644 index 3f16375388..0000000000 --- a/contrib/epee/include/storages/portable_storage_base.h +++ /dev/null @@ -1,160 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include -#include -#include -#include - -#define PORTABLE_STORAGE_SIGNATUREA 0x01011101 -#define PORTABLE_STORAGE_SIGNATUREB 0x01020101 // bender's nightmare -#define PORTABLE_STORAGE_FORMAT_VER 1 - -#define PORTABLE_RAW_SIZE_MARK_MASK 0x03 -#define PORTABLE_RAW_SIZE_MARK_BYTE 0 -#define PORTABLE_RAW_SIZE_MARK_WORD 1 -#define PORTABLE_RAW_SIZE_MARK_DWORD 2 -#define PORTABLE_RAW_SIZE_MARK_INT64 3 - -#ifndef MAX_STRING_LEN_POSSIBLE -#define MAX_STRING_LEN_POSSIBLE 2000000000 //do not let string be so big -#endif - -//data types -#define SERIALIZE_TYPE_INT64 1 -#define SERIALIZE_TYPE_INT32 2 -#define SERIALIZE_TYPE_INT16 3 -#define SERIALIZE_TYPE_INT8 4 -#define SERIALIZE_TYPE_UINT64 5 -#define SERIALIZE_TYPE_UINT32 6 -#define SERIALIZE_TYPE_UINT16 7 -#define SERIALIZE_TYPE_UINT8 8 -#define SERIALIZE_TYPE_DUOBLE 9 -#define SERIALIZE_TYPE_STRING 10 -#define SERIALIZE_TYPE_BOOL 11 -#define SERIALIZE_TYPE_OBJECT 12 -#define SERIALIZE_TYPE_ARRAY 13 - -#define SERIALIZE_FLAG_ARRAY 0x80 - - -namespace epee -{ - namespace serialization - { - struct section; - - /************************************************************************/ - /* */ - /************************************************************************/ - template - struct array_entry_t - { - array_entry_t():m_it(m_array.end()){} - - const t_entry_type* get_first_val() const - { - m_it = m_array.begin(); - return get_next_val(); - } - - t_entry_type* get_first_val() - { - m_it = m_array.begin(); - return get_next_val(); - } - - - const t_entry_type* get_next_val() const - { - if(m_it == m_array.end()) - return nullptr; - return &(*(m_it++)); - } - - t_entry_type* get_next_val() - { - if(m_it == m_array.end()) - return nullptr; - return (t_entry_type*)&(*(m_it++));//fuckoff - } - - t_entry_type& insert_first_val(const t_entry_type& v) - { - m_array.clear(); - m_it = m_array.end(); - return insert_next_value(v); - } - - t_entry_type& insert_next_value(const t_entry_type& v) - { - m_array.push_back(v); - return m_array.back(); - } - - std::list m_array; - mutable typename std::list::const_iterator m_it; - }; - - - typedef boost::make_recursive_variant< - array_entry_t
, - array_entry_t, - array_entry_t, - array_entry_t, - array_entry_t, - array_entry_t, - array_entry_t, - array_entry_t, - array_entry_t, - array_entry_t, - array_entry_t, - array_entry_t, - array_entry_t
, - array_entry_t - >::type array_entry; - - typedef boost::variant storage_entry; - - typedef std::string binarybuffer;//it's ok - - /************************************************************************/ - /* */ - /************************************************************************/ - struct section - { - std::map m_entries; - }; - - //handle-like aliases - typedef section* hsection; - typedef array_entry* harray; - } -} \ No newline at end of file diff --git a/contrib/epee/include/storages/portable_storage_from_bin.h b/contrib/epee/include/storages/portable_storage_from_bin.h deleted file mode 100644 index e9b7e2e6f3..0000000000 --- a/contrib/epee/include/storages/portable_storage_from_bin.h +++ /dev/null @@ -1,281 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include "misc_language.h" -#include "portable_storage_base.h" - -#ifdef EPEE_PORTABLE_STORAGE_RECURSION_LIMIT -#define EPEE_PORTABLE_STORAGE_RECURSION_LIMIT_INTERNAL EPEE_PORTABLE_STORAGE_RECURSION_LIMIT -#else -#define EPEE_PORTABLE_STORAGE_RECURSION_LIMIT_INTERNAL 100 -#endif - -namespace epee -{ - namespace serialization - { - struct throwable_buffer_reader - { - throwable_buffer_reader(const void* ptr, size_t sz); - void read(void* target, size_t count); - void read_sec_name(std::string& sce_name); - template - void read(t_pod_type& pod_val); - template - t_type read(); - template - storage_entry read_ae(); - storage_entry load_storage_array_entry(uint8_t type); - size_t read_varint(); - template - storage_entry read_se(); - storage_entry load_storage_entry(); - void read(section& sec); - void read(std::string& str); - private: - struct recursuion_limitation_guard - { - size_t& m_counter_ref; - recursuion_limitation_guard(size_t& counter):m_counter_ref(counter) - { - ++m_counter_ref; - CHECK_AND_ASSERT_THROW_MES(m_counter_ref < EPEE_PORTABLE_STORAGE_RECURSION_LIMIT_INTERNAL, "Wrong blob data in portable storage: recursion limitation (" << EPEE_PORTABLE_STORAGE_RECURSION_LIMIT_INTERNAL << ") exceeded"); - } - ~recursuion_limitation_guard() - { - CHECK_AND_ASSERT_THROW_MES(m_counter_ref != 0, "Internal error: m_counter_ref == 0 while ~recursuion_limitation_guard()"); - --m_counter_ref; - } - }; -#define RECURSION_LIMITATION() recursuion_limitation_guard rl(m_recursion_count) - - const uint8_t* m_ptr; - size_t m_count; - size_t m_recursion_count; - }; - - inline throwable_buffer_reader::throwable_buffer_reader(const void* ptr, size_t sz) - { - if(!ptr) - throw std::runtime_error("throwable_buffer_reader: ptr==nullptr"); - if(!sz) - throw std::runtime_error("throwable_buffer_reader: sz==0"); - m_ptr = (uint8_t*)ptr; - m_count = sz; - m_recursion_count = 0; - } - inline - void throwable_buffer_reader::read(void* target, size_t count) - { - RECURSION_LIMITATION(); - CHECK_AND_ASSERT_THROW_MES(m_count >= count, " attempt to read " << count << " bytes from buffer with " << m_count << " bytes remained"); - memcpy(target, m_ptr, count); - m_ptr += count; - m_count -= count; - } - inline - void throwable_buffer_reader::read_sec_name(std::string& sce_name) - { - RECURSION_LIMITATION(); - uint8_t name_len = 0; - read(name_len); - sce_name.resize(name_len); - read((void*)sce_name.data(), name_len); - } - - template - void throwable_buffer_reader::read(t_pod_type& pod_val) - { - RECURSION_LIMITATION(); - read(&pod_val, sizeof(pod_val)); - } - - template - t_type throwable_buffer_reader::read() - { - RECURSION_LIMITATION(); - t_type v; - read(v); - return v; - } - - - template - storage_entry throwable_buffer_reader::read_ae() - { - RECURSION_LIMITATION(); - //for pod types - array_entry_t sa; - size_t size = read_varint(); - //TODO: add some optimization here later - while(size--) - sa.m_array.push_back(read()); - return storage_entry(array_entry(sa)); - } - - inline - storage_entry throwable_buffer_reader::load_storage_array_entry(uint8_t type) - { - RECURSION_LIMITATION(); - type &= ~SERIALIZE_FLAG_ARRAY; - switch(type) - { - case SERIALIZE_TYPE_INT64: return read_ae(); - case SERIALIZE_TYPE_INT32: return read_ae(); - case SERIALIZE_TYPE_INT16: return read_ae(); - case SERIALIZE_TYPE_INT8: return read_ae(); - case SERIALIZE_TYPE_UINT64: return read_ae(); - case SERIALIZE_TYPE_UINT32: return read_ae(); - case SERIALIZE_TYPE_UINT16: return read_ae(); - case SERIALIZE_TYPE_UINT8: return read_ae(); - case SERIALIZE_TYPE_DUOBLE: return read_ae(); - case SERIALIZE_TYPE_BOOL: return read_ae(); - case SERIALIZE_TYPE_STRING: return read_ae(); - case SERIALIZE_TYPE_OBJECT: return read_ae
(); - case SERIALIZE_TYPE_ARRAY: return read_ae(); - default: - CHECK_AND_ASSERT_THROW_MES(false, "unknown entry_type code = " << type); - } - } - - inline - size_t throwable_buffer_reader::read_varint() - { - RECURSION_LIMITATION(); - CHECK_AND_ASSERT_THROW_MES(m_count >= 1, "empty buff, expected place for varint"); - size_t v = 0; - uint8_t size_mask = (*(uint8_t*)m_ptr) &PORTABLE_RAW_SIZE_MARK_MASK; - switch (size_mask) - { - case PORTABLE_RAW_SIZE_MARK_BYTE: v = read();break; - case PORTABLE_RAW_SIZE_MARK_WORD: v = read();break; - case PORTABLE_RAW_SIZE_MARK_DWORD: v = read();break; - case PORTABLE_RAW_SIZE_MARK_INT64: v = read();break; - default: - CHECK_AND_ASSERT_THROW_MES(false, "unknown varint size_mask = " << size_mask); - } - v >>= 2; - return v; - } - - template - storage_entry throwable_buffer_reader::read_se() - { - RECURSION_LIMITATION(); - t_type v; - read(v); - return storage_entry(v); - } - - template<> - inline storage_entry throwable_buffer_reader::read_se() - { - RECURSION_LIMITATION(); - return storage_entry(read()); - } - - - template<> - inline storage_entry throwable_buffer_reader::read_se
() - { - RECURSION_LIMITATION(); - section s;//use extra variable due to vs bug, line "storage_entry se(section()); " can't be compiled in visual studio - storage_entry se(s); - section& section_entry = boost::get
(se); - read(section_entry); - return se; - } - - template<> - inline storage_entry throwable_buffer_reader::read_se() - { - RECURSION_LIMITATION(); - uint8_t ent_type = 0; - read(ent_type); - CHECK_AND_ASSERT_THROW_MES(ent_type&SERIALIZE_FLAG_ARRAY, "wrong type sequenses"); - return load_storage_array_entry(ent_type); - } - - inline - storage_entry throwable_buffer_reader::load_storage_entry() - { - RECURSION_LIMITATION(); - uint8_t ent_type = 0; - read(ent_type); - if(ent_type&SERIALIZE_FLAG_ARRAY) - return load_storage_array_entry(ent_type); - - switch(ent_type) - { - case SERIALIZE_TYPE_INT64: return read_se(); - case SERIALIZE_TYPE_INT32: return read_se(); - case SERIALIZE_TYPE_INT16: return read_se(); - case SERIALIZE_TYPE_INT8: return read_se(); - case SERIALIZE_TYPE_UINT64: return read_se(); - case SERIALIZE_TYPE_UINT32: return read_se(); - case SERIALIZE_TYPE_UINT16: return read_se(); - case SERIALIZE_TYPE_UINT8: return read_se(); - case SERIALIZE_TYPE_DUOBLE: return read_se(); - case SERIALIZE_TYPE_BOOL: return read_se(); - case SERIALIZE_TYPE_STRING: return read_se(); - case SERIALIZE_TYPE_OBJECT: return read_se
(); - case SERIALIZE_TYPE_ARRAY: return read_se(); - default: - CHECK_AND_ASSERT_THROW_MES(false, "unknown entry_type code = " << ent_type); - } - } - inline - void throwable_buffer_reader::read(section& sec) - { - RECURSION_LIMITATION(); - sec.m_entries.clear(); - size_t count = read_varint(); - while(count--) - { - //read section name string - std::string sec_name; - read_sec_name(sec_name); - sec.m_entries.insert(std::make_pair(sec_name, load_storage_entry())); - } - } - inline - void throwable_buffer_reader::read(std::string& str) - { - RECURSION_LIMITATION(); - size_t len = read_varint(); - CHECK_AND_ASSERT_THROW_MES(len < MAX_STRING_LEN_POSSIBLE, "to big string len value in storage: " << len); - CHECK_AND_ASSERT_THROW_MES(m_count >= len, "string len count value " << len << " goes out of remain storage len " << m_count); - //do this manually to avoid double memory write in huge strings (first time at resize, second at read) - str.assign((const char*)m_ptr, len); - m_ptr+=len; - m_count -= len; - } - } -} \ No newline at end of file diff --git a/contrib/epee/include/storages/portable_storage_from_json.h b/contrib/epee/include/storages/portable_storage_from_json.h deleted file mode 100644 index 3ca61c02de..0000000000 --- a/contrib/epee/include/storages/portable_storage_from_json.h +++ /dev/null @@ -1,384 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#include -#include - -#include "parserse_base_utils.h" -#include "file_io_utils.h" - -namespace epee -{ - using namespace misc_utils::parse; - namespace serialization - { - namespace json - { -#define CHECK_ISSPACE() if(!isspace(*it)){ ASSERT_MES_AND_THROW("Wrong JSON character at: " << std::string(it, buf_end));} - - /*inline void parse_error() - { - ASSERT_MES_AND_THROW("json parse error"); - }*/ - template - inline void run_handler(typename t_storage::hsection current_section, std::string::const_iterator& sec_buf_begin, std::string::const_iterator buf_end, t_storage& stg) - { - - std::string::const_iterator sub_element_start; - std::string name; - typename t_storage::harray h_array = nullptr; - enum match_state - { - match_state_lookup_for_section_start, - match_state_lookup_for_name, - match_state_waiting_separator, - match_state_wonder_after_separator, - match_state_wonder_after_value, - match_state_wonder_array, - match_state_array_after_value, - match_state_array_waiting_value, - match_state_error - }; - - enum array_mode - { - array_mode_undifined = 0, - array_mode_sections, - array_mode_string, - array_mode_numbers, - array_mode_booleans - }; - - match_state state = match_state_lookup_for_section_start; - array_mode array_md = array_mode_undifined; - std::string::const_iterator it = sec_buf_begin; - for(;it != buf_end;it++) - { - switch (state) - { - case match_state_lookup_for_section_start: - if(*it == '{') - state = match_state_lookup_for_name; - else CHECK_ISSPACE(); - break; - case match_state_lookup_for_name: - switch(*it) - { - case '"': - match_string2(it, buf_end, name); - state = match_state_waiting_separator; - break; - case '}': - //this is it! section ends here. - //seems that it is empty section - sec_buf_begin = it; - return; - default: - CHECK_ISSPACE(); - } - break; - case match_state_waiting_separator: - if(*it == ':') - state = match_state_wonder_after_separator; - else CHECK_ISSPACE(); - break; - case match_state_wonder_after_separator: - if(*it == '"') - {//just a named string value started - std::string val; - match_string2(it, buf_end, val); - //insert text value - stg.set_value(name, val, current_section); - state = match_state_wonder_after_value; - }else if (isdigit(*it) || *it == '-') - {//just a named number value started - std::string val; - bool is_v_float = false;bool is_signed = false; - match_number2(it, buf_end, val, is_v_float, is_signed); - if(!is_v_float) - { - if(is_signed) - { - int64_t nval = boost::lexical_cast(val); - stg.set_value(name, nval, current_section); - }else - { - uint64_t nval = boost::lexical_cast(val); - stg.set_value(name, nval, current_section); - } - }else - { - double nval = boost::lexical_cast(val); - stg.set_value(name, nval, current_section); - } - state = match_state_wonder_after_value; - }else if(isalpha(*it) ) - {// could be null, true or false - std::string word; - match_word2(it, buf_end, word); - if(boost::iequals(word, "null")) - { - state = match_state_wonder_after_value; - //just skip this, - }else if(boost::iequals(word, "true")) - { - stg.set_value(name, true, current_section); - state = match_state_wonder_after_value; - }else if(boost::iequals(word, "false")) - { - stg.set_value(name, false, current_section); - state = match_state_wonder_after_value; - }else ASSERT_MES_AND_THROW("Unknown value keyword " << word); - }else if(*it == '{') - { - //sub section here - typename t_storage::hsection new_sec = stg.open_section(name, current_section, true); - CHECK_AND_ASSERT_THROW_MES(new_sec, "Failed to insert new section in json: " << std::string(it, buf_end)); - run_handler(new_sec, it, buf_end, stg); - state = match_state_wonder_after_value; - }else if(*it == '[') - {//array of something - state = match_state_wonder_array; - }else CHECK_ISSPACE(); - break; - case match_state_wonder_after_value: - if(*it == ',') - state = match_state_lookup_for_name; - else if(*it == '}') - { - //this is it! section ends here. - sec_buf_begin = it; - return; - }else CHECK_ISSPACE(); - break; - case match_state_wonder_array: - if(*it == '[') - { - ASSERT_MES_AND_THROW("array of array not suppoerted yet :( sorry"); - //mean array of array - } - if(*it == '{') - { - //mean array of sections - typename t_storage::hsection new_sec = nullptr; - h_array = stg.insert_first_section(name, new_sec, current_section); - CHECK_AND_ASSERT_THROW_MES(h_array&&new_sec, "failed to create new section"); - run_handler(new_sec, it, buf_end, stg); - state = match_state_array_after_value; - array_md = array_mode_sections; - }else if(*it == '"') - { - //mean array of strings - std::string val; - match_string2(it, buf_end, val); - h_array = stg.insert_first_value(name, val, current_section); - CHECK_AND_ASSERT_THROW_MES(h_array, " failed to insert values entry"); - state = match_state_array_after_value; - array_md = array_mode_string; - }else if (isdigit(*it) || *it == '-') - {//array of numbers value started - std::string val; - bool is_v_float = false;bool is_signed_val = false; - match_number2(it, buf_end, val, is_v_float, is_signed_val); - if(!is_v_float) - { - int64_t nval = boost::lexical_cast(val);//bool res = string_tools::string_to_num_fast(val, nval); - h_array = stg.insert_first_value(name, nval, current_section); - CHECK_AND_ASSERT_THROW_MES(h_array, " failed to insert values section entry"); - }else - { - double nval = boost::lexical_cast(val);//bool res = string_tools::string_to_num_fast(val, nval); - h_array = stg.insert_first_value(name, nval, current_section); - CHECK_AND_ASSERT_THROW_MES(h_array, " failed to insert values section entry"); - } - - state = match_state_array_after_value; - array_md = array_mode_numbers; - }else if(*it == ']')//empty array - { - array_md = array_mode_undifined; - state = match_state_wonder_after_value; - }else if(isalpha(*it) ) - {// array of booleans - std::string word; - match_word2(it, buf_end, word); - if(boost::iequals(word, "true")) - { - h_array = stg.insert_first_value(name, true, current_section); - CHECK_AND_ASSERT_THROW_MES(h_array, " failed to insert values section entry"); - state = match_state_array_after_value; - array_md = array_mode_booleans; - }else if(boost::iequals(word, "false")) - { - h_array = stg.insert_first_value(name, false, current_section); - CHECK_AND_ASSERT_THROW_MES(h_array, " failed to insert values section entry"); - state = match_state_array_after_value; - array_md = array_mode_booleans; - - }else ASSERT_MES_AND_THROW("Unknown value keyword " << word) - }else CHECK_ISSPACE(); - break; - case match_state_array_after_value: - if(*it == ',') - state = match_state_array_waiting_value; - else if(*it == ']') - { - h_array = nullptr; - array_md = array_mode_undifined; - state = match_state_wonder_after_value; - }else CHECK_ISSPACE(); - break; - case match_state_array_waiting_value: - switch(array_md) - { - case array_mode_sections: - if(*it == '{') - { - typename t_storage::hsection new_sec = NULL; - bool res = stg.insert_next_section(h_array, new_sec); - CHECK_AND_ASSERT_THROW_MES(res&&new_sec, "failed to insert next section"); - run_handler(new_sec, it, buf_end, stg); - state = match_state_array_after_value; - }else CHECK_ISSPACE(); - break; - case array_mode_string: - if(*it == '"') - { - std::string val; - match_string2(it, buf_end, val); - bool res = stg.insert_next_value(h_array, val); - CHECK_AND_ASSERT_THROW_MES(res, "failed to insert values"); - state = match_state_array_after_value; - }else CHECK_ISSPACE(); - break; - case array_mode_numbers: - if (isdigit(*it) || *it == '-') - {//array of numbers value started - std::string val; - bool is_v_float = false;bool is_signed_val = false; - match_number2(it, buf_end, val, is_v_float, is_signed_val); - bool insert_res = false; - if(!is_v_float) - { - int64_t nval = boost::lexical_cast(val); //bool res = string_tools::string_to_num_fast(val, nval); - insert_res = stg.insert_next_value(h_array, nval); - - }else - { - //TODO: optimize here if need - double nval = boost::lexical_cast(val); //string_tools::string_to_num_fast(val, nval); - insert_res = stg.insert_next_value(h_array, nval); - } - CHECK_AND_ASSERT_THROW_MES(insert_res, "Failed to insert next value"); - state = match_state_array_after_value; - array_md = array_mode_numbers; - }else CHECK_ISSPACE(); - break; - case array_mode_booleans: - if(isalpha(*it) ) - {// array of booleans - std::string word; - match_word2(it, buf_end, word); - if(boost::iequals(word, "true")) - { - bool r = stg.insert_next_value(h_array, true); - CHECK_AND_ASSERT_THROW_MES(r, " failed to insert values section entry"); - state = match_state_array_after_value; - }else if(boost::iequals(word, "false")) - { - bool r = stg.insert_next_value(h_array, false); - CHECK_AND_ASSERT_THROW_MES(r, " failed to insert values section entry"); - state = match_state_array_after_value; - } - else ASSERT_MES_AND_THROW("Unknown value keyword " << word); - }else CHECK_ISSPACE(); - break; - case array_mode_undifined: - default: - ASSERT_MES_AND_THROW("Bad array state"); - } - break; - case match_state_error: - default: - ASSERT_MES_AND_THROW("WRONG JSON STATE"); - } - } - } -/* -{ - "firstName": "John", - "lastName": "Smith", - "age": 25, - "address": { - "streetAddress": "21 2nd Street", - "city": "New York", - "state": "NY", - "postalCode": -10021, - "have_boobs": true, - "have_balls": false - }, - "phoneNumber": [ - { - "type": "home", - "number": "212 555-1234" - }, - { - "type": "fax", - "number": "646 555-4567" - } - ], - "phoneNumbers": [ - "812 123-1234", - "916 123-4567" - ] -} -*/ - template - inline bool load_from_json(const std::string& buff_json, t_storage& stg) - { - std::string::const_iterator sec_buf_begin = buff_json.begin(); - try - { - run_handler(nullptr, sec_buf_begin, buff_json.end(), stg); - return true; - } - catch(const std::exception& ex) - { - (void)(ex); - LOG_PRINT_RED_L0("Failed to parse json, what: " << ex.what()); - return false; - } - catch(...) - { - LOG_PRINT_RED_L0("Failed to parse json"); - return false; - } - } - } - } -} \ No newline at end of file diff --git a/contrib/epee/include/storages/portable_storage_template_helper.h b/contrib/epee/include/storages/portable_storage_template_helper.h deleted file mode 100644 index 008f443211..0000000000 --- a/contrib/epee/include/storages/portable_storage_template_helper.h +++ /dev/null @@ -1,123 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#include - -#include "parserse_base_utils.h" -#include "portable_storage.h" -#include "file_io_utils.h" - -namespace epee -{ - namespace serialization - { - //----------------------------------------------------------------------------------------------------------- - template - bool load_t_from_json(t_struct& out, const std::string& json_buff) - { - portable_storage ps; - bool rs = ps.load_from_json(json_buff); - if(!rs) - return false; - - return out.load(ps); - } - //----------------------------------------------------------------------------------------------------------- - template - bool load_t_from_json_file(t_struct& out, const std::string& json_file) - { - std::string f_buff; - if(!file_io_utils::load_file_to_string(json_file, f_buff)) - return false; - - return load_t_from_json(out, f_buff); - } - //----------------------------------------------------------------------------------------------------------- - template - bool store_t_to_json(t_struct& str_in, std::string& json_buff, size_t indent = 0, bool insert_newlines = true) - { - portable_storage ps; - str_in.store(ps); - ps.dump_as_json(json_buff, indent, insert_newlines); - return true; - } - //----------------------------------------------------------------------------------------------------------- - template - std::string store_t_to_json(t_struct& str_in, size_t indent = 0, bool insert_newlines = true) - { - std::string json_buff; - store_t_to_json(str_in, json_buff, indent, insert_newlines); - return std::move(json_buff); - } - //----------------------------------------------------------------------------------------------------------- - template - bool store_t_to_json_file(t_struct& str_in, const std::string& fpath) - { - std::string json_buff; - store_t_to_json(str_in, json_buff); - return file_io_utils::save_string_to_file(fpath, json_buff); - } - //----------------------------------------------------------------------------------------------------------- - template - bool load_t_from_binary(t_struct& out, const std::string& binary_buff) - { - portable_storage ps; - bool rs = ps.load_from_binary(binary_buff); - if(!rs) - return false; - - return out.load(ps); - } - //----------------------------------------------------------------------------------------------------------- - template - bool load_t_from_binary_file(t_struct& out, const std::string& binary_file) - { - std::string f_buff; - if(!file_io_utils::load_file_to_string(binary_file, f_buff)) - return false; - - return load_t_from_binary(out, f_buff); - } - //----------------------------------------------------------------------------------------------------------- - template - bool store_t_to_binary(t_struct& str_in, std::string& binary_buff, size_t indent = 0) - { - portable_storage ps; - str_in.store(ps); - return ps.store_to_binary(binary_buff); - } - //----------------------------------------------------------------------------------------------------------- - template - std::string store_t_to_binary(t_struct& str_in, size_t indent = 0) - { - std::string binary_buff; - store_t_to_binary(str_in, binary_buff, indent); - return std::move(binary_buff); - } - } -} \ No newline at end of file diff --git a/contrib/epee/include/storages/portable_storage_to_bin.h b/contrib/epee/include/storages/portable_storage_to_bin.h deleted file mode 100644 index 6743a60879..0000000000 --- a/contrib/epee/include/storages/portable_storage_to_bin.h +++ /dev/null @@ -1,213 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include "misc_language.h" -#include "portable_storage_base.h" -#include "pragma_comp_defs.h" - -namespace epee -{ - namespace serialization - { - - template - size_t pack_varint_t(t_stream& strm, uint8_t type_or, size_t& pv) - { - pack_value v = (*((pack_value*)&pv)) << 2; - v |= type_or; - strm.write((const char*)&v, sizeof(pack_value)); - return sizeof(pack_value); - } - - PRAGMA_WARNING_PUSH - PRAGMA_GCC("GCC diagnostic ignored \"-Wstrict-aliasing\"") - template - size_t pack_varint(t_stream& strm, size_t val) - { //the first two bits always reserved for size information - - if(val <= 63) - {//mean enough one byte - return pack_varint_t(strm, PORTABLE_RAW_SIZE_MARK_BYTE, val); - } - else if(val <= 16383) - {//mean need word - return pack_varint_t(strm, PORTABLE_RAW_SIZE_MARK_WORD, val); - }else if(val <= 1073741823) - {//mean need dword - return pack_varint_t(strm, PORTABLE_RAW_SIZE_MARK_DWORD, val); - }else - { - CHECK_AND_ASSERT_THROW_MES(val <= 4611686018427387903, "failed to pack varint - too big amount = " << val); - return pack_varint_t(strm, PORTABLE_RAW_SIZE_MARK_INT64, val); - } - } - PRAGMA_WARNING_POP - - template - bool put_string(t_stream& strm, const std::string& v) - { - pack_varint(strm, v.size()); - if(v.size()) - strm.write((const char*)v.data(), v.size()); - return true; - } - - template - struct array_entry_store_visitor: public boost::static_visitor - { - t_stream& m_strm; - - template - bool pack_pod_array_type(uint8_t contained_type, const array_entry_t& arr_pod) - { - uint8_t type = contained_type|SERIALIZE_FLAG_ARRAY; - m_strm.write((const char*)&type, 1); - pack_varint(m_strm, arr_pod.m_array.size()); - for(const t_pod_type& x: arr_pod.m_array) - m_strm.write((const char*)&x, sizeof(t_pod_type)); - return true; - } - - array_entry_store_visitor(t_stream& strm):m_strm(strm){} - bool operator()(const array_entry_t& v){ return pack_pod_array_type(SERIALIZE_TYPE_UINT64, v);} - bool operator()(const array_entry_t& v){ return pack_pod_array_type(SERIALIZE_TYPE_UINT32, v);} - bool operator()(const array_entry_t& v){ return pack_pod_array_type(SERIALIZE_TYPE_UINT16, v);} - bool operator()(const array_entry_t& v) { return pack_pod_array_type(SERIALIZE_TYPE_UINT8, v);} - bool operator()(const array_entry_t& v) { return pack_pod_array_type(SERIALIZE_TYPE_INT64, v);} - bool operator()(const array_entry_t& v) { return pack_pod_array_type(SERIALIZE_TYPE_INT32, v);} - bool operator()(const array_entry_t& v) { return pack_pod_array_type(SERIALIZE_TYPE_INT16, v);} - bool operator()(const array_entry_t& v) { return pack_pod_array_type(SERIALIZE_TYPE_INT8, v);} - bool operator()(const array_entry_t& v) { return pack_pod_array_type(SERIALIZE_TYPE_DUOBLE, v);} - bool operator()(const array_entry_t& v) { return pack_pod_array_type(SERIALIZE_TYPE_BOOL, v);} - bool operator()(const array_entry_t& arr_str) - { - uint8_t type = SERIALIZE_TYPE_STRING|SERIALIZE_FLAG_ARRAY; - m_strm.write((const char*)&type, 1); - pack_varint(m_strm, arr_str.m_array.size()); - for(const std::string& s: arr_str.m_array) - put_string(m_strm, s); - return true; - } - bool operator()(const array_entry_t
& arr_sec) - { - uint8_t type = SERIALIZE_TYPE_OBJECT|SERIALIZE_FLAG_ARRAY; - m_strm.write((const char*)&type, 1); - pack_varint(m_strm, arr_sec.m_array.size()); - for(const section& s: arr_sec.m_array) - pack_entry_to_buff(m_strm, s); - return true; - } - bool operator()(const array_entry_t& arra_ar) - { - uint8_t type = SERIALIZE_TYPE_ARRAY|SERIALIZE_FLAG_ARRAY; - m_strm.write((const char*)&type, 1); - pack_varint(m_strm, arra_ar.m_array.size()); - for(const array_entry& s: arra_ar.m_array) - pack_entry_to_buff(m_strm, s); - return true; - } - }; - - template - struct storage_entry_store_visitor: public boost::static_visitor - { - t_stream& m_strm; - storage_entry_store_visitor(t_stream& strm):m_strm(strm){} - template - bool pack_pod_type(uint8_t type, const pod_type& v) - { - m_strm.write((const char*)&type, 1); - m_strm.write((const char*)&v, sizeof(pod_type)); - return true; - } - //section, array_entry - bool operator()(const uint64_t& v){ return pack_pod_type(SERIALIZE_TYPE_UINT64, v);} - bool operator()(const uint32_t& v){ return pack_pod_type(SERIALIZE_TYPE_UINT32, v);} - bool operator()(const uint16_t& v){ return pack_pod_type(SERIALIZE_TYPE_UINT16, v);} - bool operator()(const uint8_t& v) { return pack_pod_type(SERIALIZE_TYPE_UINT8, v);} - bool operator()(const int64_t& v) { return pack_pod_type(SERIALIZE_TYPE_INT64, v);} - bool operator()(const int32_t& v) { return pack_pod_type(SERIALIZE_TYPE_INT32, v);} - bool operator()(const int16_t& v) { return pack_pod_type(SERIALIZE_TYPE_INT16, v);} - bool operator()(const int8_t& v) { return pack_pod_type(SERIALIZE_TYPE_INT8, v);} - bool operator()(const double& v) { return pack_pod_type(SERIALIZE_TYPE_DUOBLE, v);} - bool operator()(const bool& v) { return pack_pod_type(SERIALIZE_TYPE_BOOL, v);} - bool operator()(const std::string& v) - { - uint8_t type = SERIALIZE_TYPE_STRING; - m_strm.write((const char*)&type, 1); - put_string(m_strm, v); - return true; - } - bool operator()(const section& v) - { - uint8_t type = SERIALIZE_TYPE_OBJECT; - m_strm.write((const char*)&type, 1); - return pack_entry_to_buff(m_strm, v); - } - - bool operator()(const array_entry& v) - { - //uint8_t type = SERIALIZE_TYPE_ARRAY; - //m_strm.write((const char*)&type, 1); - return pack_entry_to_buff(m_strm, v); - } - }; - - template - bool pack_entry_to_buff(t_stream& strm, const array_entry& ae) - { - array_entry_store_visitor aesv(strm); - return boost::apply_visitor(aesv, ae); - } - - template - bool pack_entry_to_buff(t_stream& strm, const storage_entry& se) - { - storage_entry_store_visitor sv(strm); - return boost::apply_visitor(sv, se); - } - - template - bool pack_entry_to_buff(t_stream& strm, const section& sec) - { - typedef std::map::value_type section_pair; - pack_varint(strm, sec.m_entries.size()); - for(const section_pair& se: sec.m_entries) - { - CHECK_AND_ASSERT_THROW_MES(se.first.size() < std::numeric_limits::max(), "storage_entry_name is too long: " << se.first.size() << ", val: " << se.first); - uint8_t len = static_cast(se.first.size()); - strm.write((const char*)&len, sizeof(len)); - strm.write(se.first.data(), size_t(len)); - pack_entry_to_buff(strm, se.second); - } - return true; - } - } -} \ No newline at end of file diff --git a/contrib/epee/include/storages/portable_storage_to_json.h b/contrib/epee/include/storages/portable_storage_to_json.h deleted file mode 100644 index e3fdcec29c..0000000000 --- a/contrib/epee/include/storages/portable_storage_to_json.h +++ /dev/null @@ -1,181 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include "misc_language.h" -#include "portable_storage_base.h" -#include "parserse_base_utils.h" - -namespace epee -{ - namespace serialization - { - - template - void dump_as_json(t_stream& strm, const array_entry& ae, size_t indent, bool insert_newlines); - template - void dump_as_json(t_stream& strm, const storage_entry& se, size_t indent, bool insert_newlines); - template - void dump_as_json(t_stream& strm, const std::string& v, size_t indent, bool insert_newlines); - template - void dump_as_json(t_stream& strm, const int8_t& v, size_t indent, bool insert_newlines); - template - void dump_as_json(t_stream& strm, const uint8_t& v, size_t indent, bool insert_newlines); - template - void dump_as_json(t_stream& strm, const bool& v, size_t indent, bool insert_newlines); - template - void dump_as_json(t_stream& strm, const t_type& v, size_t indent, bool insert_newlines); - template - void dump_as_json(t_stream& strm, const section& sec, size_t indent, bool insert_newlines); - - - inline std::string make_indent(size_t indent) - { - return std::string(indent*2, ' '); - } - - template - struct array_entry_store_to_json_visitor: public boost::static_visitor - { - t_stream& m_strm; - size_t m_indent; - bool m_insert_newlines; - array_entry_store_to_json_visitor(t_stream& strm, size_t indent, - bool insert_newlines = true) - : m_strm(strm), m_indent(indent), m_insert_newlines(insert_newlines) - {} - - template - void operator()(const array_entry_t& a) - { - m_strm << "["; - if(a.m_array.size()) - { - auto last_it = --a.m_array.end(); - for(auto it = a.m_array.begin(); it != a.m_array.end(); it++) - { - dump_as_json(m_strm, *it, m_indent, m_insert_newlines); - if(it != last_it) - m_strm << ","; - } - } - m_strm << "]"; - } - }; - - template - struct storage_entry_store_to_json_visitor: public boost::static_visitor - { - t_stream& m_strm; - size_t m_indent; - bool m_insert_newlines; - storage_entry_store_to_json_visitor(t_stream& strm, size_t indent, - bool insert_newlines = true) - : m_strm(strm), m_indent(indent), m_insert_newlines(insert_newlines) - {} - //section, array_entry - template - void operator()(const visited_type& v) - { - dump_as_json(m_strm, v, m_indent, m_insert_newlines); - } - }; - - template - void dump_as_json(t_stream& strm, const array_entry& ae, size_t indent, bool insert_newlines) - { - array_entry_store_to_json_visitor aesv(strm, indent, insert_newlines); - boost::apply_visitor(aesv, ae); - } - - template - void dump_as_json(t_stream& strm, const storage_entry& se, size_t indent, bool insert_newlines) - { - storage_entry_store_to_json_visitor sv(strm, indent, insert_newlines); - boost::apply_visitor(sv, se); - } - - template - void dump_as_json(t_stream& strm, const std::string& v, size_t indent, bool insert_newlines) - { - strm << "\"" << misc_utils::parse::transform_to_escape_sequence(v) << "\""; - } - - template - void dump_as_json(t_stream& strm, const int8_t& v, size_t indent, bool insert_newlines) - { - strm << static_cast(v); - } - - template - void dump_as_json(t_stream& strm, const uint8_t& v, size_t indent, bool insert_newlines) - { - strm << static_cast(v); - } - - template - void dump_as_json(t_stream& strm, const bool& v, size_t indent, bool insert_newlines) - { - if(v) - strm << "true"; - else - strm << "false"; - } - - - - template - void dump_as_json(t_stream& strm, const t_type& v, size_t indent, bool insert_newlines) - { - strm << v; - } - - template - void dump_as_json(t_stream& strm, const section& sec, size_t indent, bool insert_newlines) - { - size_t local_indent = indent + 1; - std::string newline = insert_newlines ? "\r\n" : ""; - strm << "{" << newline; - std::string indent_str = make_indent(local_indent); - if(sec.m_entries.size()) - { - auto it_last = --sec.m_entries.end(); - for(auto it = sec.m_entries.begin(); it!= sec.m_entries.end();it++) - { - strm << indent_str << "\"" << misc_utils::parse::transform_to_escape_sequence(it->first) << "\"" << ": "; - dump_as_json(strm, it->second, local_indent, insert_newlines); - if(it_last != it) - strm << ","; - strm << newline; - } - } - strm << make_indent(indent) << "}"; - } - } -} \ No newline at end of file diff --git a/contrib/epee/include/storages/portable_storage_val_converters.h b/contrib/epee/include/storages/portable_storage_val_converters.h deleted file mode 100644 index 73d339f5d0..0000000000 --- a/contrib/epee/include/storages/portable_storage_val_converters.h +++ /dev/null @@ -1,169 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#include - -#include "misc_language.h" -#include "portable_storage_base.h" -#include "warnings.h" - -namespace epee -{ - namespace serialization - { -#define ASSERT_AND_THROW_WRONG_CONVERSION() ASSERT_MES_AND_THROW("WRONG DATA CONVERSION: from type=" << typeid(from).name() << " to type " << typeid(to).name()) - - template - void convert_int_to_uint(const from_type& from, to_type& to) - { -PUSH_WARNINGS -DISABLE_VS_WARNINGS(4018) - CHECK_AND_ASSERT_THROW_MES(from >=0, "unexpected int value with signed storage value less than 0, and unsigned receiver value"); -DISABLE_GCC_AND_CLANG_WARNING(sign-compare) - CHECK_AND_ASSERT_THROW_MES(from <= std::numeric_limits::max(), "int value overhead: try to set value " << from << " to type " << typeid(to_type).name() << " with max possible value = " << std::numeric_limits::max()); - to = static_cast(from); -POP_WARNINGS - } - template - void convert_int_to_int(const from_type& from, to_type& to) - { - CHECK_AND_ASSERT_THROW_MES(from >= boost::numeric::bounds::lowest(), "int value overhead: try to set value " << from << " to type " << typeid(to_type).name() << " with lowest possible value = " << boost::numeric::bounds::lowest()); -PUSH_WARNINGS -DISABLE_CLANG_WARNING(tautological-constant-out-of-range-compare) - CHECK_AND_ASSERT_THROW_MES(from <= std::numeric_limits::max(), "int value overhead: try to set value " << from << " to type " << typeid(to_type).name() << " with max possible value = " << std::numeric_limits::max()); -POP_WARNINGS - to = static_cast(from); - } - template - void convert_uint_to_any_int(const from_type& from, to_type& to) - { -PUSH_WARNINGS -DISABLE_VS_WARNINGS(4018) -DISABLE_CLANG_WARNING(tautological-constant-out-of-range-compare) - CHECK_AND_ASSERT_THROW_MES(from <= std::numeric_limits::max(), "uint value overhead: try to set value " << from << " to type " << typeid(to_type).name() << " with max possible value = " << std::numeric_limits::max()); - to = static_cast(from); -POP_WARNINGS - } - - template //is from signed, is from to signed - struct convert_to_signed_unsigned; - - template - struct convert_to_signed_unsigned - { - static void convert(const from_type& from, to_type& to) - {//from signed to signed - convert_int_to_int(from, to); - } - }; - - template - struct convert_to_signed_unsigned - { - static void convert(const from_type& from, to_type& to) - {//from signed to unsigned - convert_int_to_uint(from, to); - } - }; - - template - struct convert_to_signed_unsigned - { - static void convert(const from_type& from, to_type& to) - {//from unsigned to signed - convert_uint_to_any_int(from, to); - } - }; - - template - struct convert_to_signed_unsigned - { - static void convert(const from_type& from, to_type& to) - { - //from unsigned to unsigned - convert_uint_to_any_int(from, to); - } - }; - - template - struct convert_to_integral; - - template - struct convert_to_integral - { - static void convert(const from_type& from, to_type& to) - { - convert_to_signed_unsigned::value, std::is_signed::value>::convert(from, to); - } - }; - - template - struct convert_to_integral - { - static void convert(const from_type& from, to_type& to) - { - ASSERT_AND_THROW_WRONG_CONVERSION(); - } - }; - - template - struct is_convertable: std::integral_constant::value && - std::is_integral::value && - !std::is_same::value && - !std::is_same::value > {}; - - template - struct convert_to_same; - - template - struct convert_to_same - { - static void convert(const from_type& from, to_type& to) - { - to = from; - } - }; - - template - struct convert_to_same - { - static void convert(const from_type& from, to_type& to) - { - convert_to_integral::value>::convert(from, to);// ASSERT_AND_THROW_WRONG_CONVERSION(); - } - }; - - - template - void convert_t(const from_type& from, to_type& to) - { - convert_to_same::value>::convert(from, to); - } - } -} \ No newline at end of file diff --git a/contrib/epee/include/string_coding.h b/contrib/epee/include/string_coding.h deleted file mode 100644 index a2e3d6eb2d..0000000000 --- a/contrib/epee/include/string_coding.h +++ /dev/null @@ -1,295 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef _STRING_CODING_H_ -#define _STRING_CODING_H_ - -#include -//#include "md5_l.h" -namespace epee -{ -namespace string_encoding -{ - inline std::string convert_to_ansii(const std::wstring& str_from) - { - - std::string res(str_from.begin(), str_from.end()); - return res; - /* - std::string result; - std::locale loc; - for(unsigned int i= 0; i < str_from.size(); ++i) - { - result += std::use_facet >(loc).narrow(str_from[i]); - } - return result; - */ - - //return boost::lexical_cast(str_from); - /* - std::string str_trgt; - if(!str_from.size()) - return str_trgt; - int cb = ::WideCharToMultiByte( code_page, 0, str_from.data(), (__int32)str_from.size(), 0, 0, 0, 0 ); - if(!cb) - return str_trgt; - str_trgt.resize(cb); - ::WideCharToMultiByte( code_page, 0, str_from.data(), (int)str_from.size(), - (char*)str_trgt.data(), (int)str_trgt.size(), 0, 0); - return str_trgt;*/ - } -#ifdef WINDOWS_PLATFORM_EX - inline std::string convert_to_ansii_win(const std::wstring& str_from) - { - - int code_page = CP_ACP; - std::string str_trgt; - if(!str_from.size()) - return str_trgt; - int cb = ::WideCharToMultiByte( code_page, 0, str_from.data(), (__int32)str_from.size(), 0, 0, 0, 0 ); - if(!cb) - return str_trgt; - str_trgt.resize(cb); - ::WideCharToMultiByte( code_page, 0, str_from.data(), (int)str_from.size(), - (char*)str_trgt.data(), (int)str_trgt.size(), 0, 0); - return str_trgt; - } -#endif - - inline std::string convert_to_ansii(const std::string& str_from) - { - return str_from; - } - - inline std::wstring convert_to_unicode(const std::string& str_from) - { - std::wstring result; - std::locale loc; - for(unsigned int i= 0; i < str_from.size(); ++i) - { - result += std::use_facet >(loc).widen(str_from[i]); - } - return result; - - //return boost::lexical_cast(str_from); - /* - std::wstring str_trgt; - if(!str_from.size()) - return str_trgt; - - int cb = ::MultiByteToWideChar( code_page, 0, str_from.data(), (int)str_from.size(), 0, 0 ); - if(!cb) - return str_trgt; - - str_trgt.resize(cb); - ::MultiByteToWideChar( code_page, 0, str_from.data(),(int)str_from.size(), - (wchar_t*)str_trgt.data(),(int)str_trgt.size()); - return str_trgt;*/ - } - inline std::wstring convert_to_unicode(const std::wstring& str_from) - { - return str_from; - } - - template - inline target_string convert_to_t(const std::wstring& str_from); - - template<> - inline std::string convert_to_t(const std::wstring& str_from) - { - return convert_to_ansii(str_from); - } - - template<> - inline std::wstring convert_to_t(const std::wstring& str_from) - { - return str_from; - } - - template - inline target_string convert_to_t(const std::string& str_from); - - template<> - inline std::string convert_to_t(const std::string& str_from) - { - return str_from; - } - - template<> - inline std::wstring convert_to_t(const std::string& str_from) - { - return convert_to_unicode(str_from); - } - - inline - std::string& base64_chars() - { - - static std::string chars = "ABCDEFGHIJKLMNOPQRSTUVWXYZ" - "abcdefghijklmnopqrstuvwxyz" - "0123456789+/"; - - return chars; - - } - - inline - std::string base64_encode(unsigned char const* bytes_to_encode, size_t in_len) { - std::string ret; - int i = 0; - int j = 0; - unsigned char char_array_3[3]; - unsigned char char_array_4[4]; - - while (in_len--) { - char_array_3[i++] = *(bytes_to_encode++); - if (i == 3) { - char_array_4[0] = (char_array_3[0] & 0xfc) >> 2; - char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4); - char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6); - char_array_4[3] = char_array_3[2] & 0x3f; - - for(i = 0; (i <4) ; i++) - ret += base64_chars()[char_array_4[i]]; - i = 0; - } - } - - if (i) - { - for(j = i; j < 3; j++) - char_array_3[j] = '\0'; - - char_array_4[0] = (char_array_3[0] & 0xfc) >> 2; - char_array_4[1] = ((char_array_3[0] & 0x03) << 4) + ((char_array_3[1] & 0xf0) >> 4); - char_array_4[2] = ((char_array_3[1] & 0x0f) << 2) + ((char_array_3[2] & 0xc0) >> 6); - char_array_4[3] = char_array_3[2] & 0x3f; - - for (j = 0; (j < i + 1); j++) - ret += base64_chars()[char_array_4[j]]; - - while((i++ < 3)) - ret += '='; - - } - - return ret; - - } - - inline - std::string base64_encode(const std::string& str) - { - return base64_encode((unsigned char const* )str.data(), str.size()); - } - - inline bool is_base64(unsigned char c) { - return (isalnum(c) || (c == '+') || (c == '/')); - } - - - inline - std::string base64_decode(std::string const& encoded_string) { - size_t in_len = encoded_string.size(); - size_t i = 0; - size_t j = 0; - size_t in_ = 0; - unsigned char char_array_4[4], char_array_3[3]; - std::string ret; - - while (in_len-- && ( encoded_string[in_] != '=') && is_base64(encoded_string[in_])) { - char_array_4[i++] = encoded_string[in_]; in_++; - if (i ==4) { - for (i = 0; i <4; i++) - char_array_4[i] = (unsigned char)base64_chars().find(char_array_4[i]); - - char_array_3[0] = (char_array_4[0] << 2) + ((char_array_4[1] & 0x30) >> 4); - char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); - char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3]; - - for (i = 0; (i < 3); i++) - ret += char_array_3[i]; - i = 0; - } - } - - if (i) { - for (j = i; j <4; j++) - char_array_4[j] = 0; - - for (j = 0; j <4; j++) - char_array_4[j] = (unsigned char)base64_chars().find(char_array_4[j]); - - char_array_3[0] = (char_array_4[0] << 2) + ((char_array_4[1] & 0x30) >> 4); - char_array_3[1] = ((char_array_4[1] & 0xf) << 4) + ((char_array_4[2] & 0x3c) >> 2); - char_array_3[2] = ((char_array_4[2] & 0x3) << 6) + char_array_4[3]; - - for (j = 0; (j < i - 1); j++) ret += char_array_3[j]; - } - - return ret; - } - - //md5 -#ifdef MD5_H - inline - std::string get_buf_as_hex_string(const void* pbuf, size_t len) - { - std::ostringstream result; - - const unsigned char* p_buff = (const unsigned char*)pbuf; - - for(unsigned int i=0;i(len), output); - return get_buf_as_hex_string(output, sizeof(output)); - } - - inline - std::string get_md5_as_hexstring(const std::string& src) - { - return get_md5_as_hexstring(src.data(), src.size()); - } -#endif - - -} -} - -#endif //_STRING_CODING_H_ diff --git a/contrib/epee/include/string_tools.cpp b/contrib/epee/include/string_tools.cpp deleted file mode 100644 index 8181bc1021..0000000000 --- a/contrib/epee/include/string_tools.cpp +++ /dev/null @@ -1,487 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#include "string_tools.h" - -#include -#include -#include -#include - -#include -#include -#include -#include -#include -#include - - -#ifdef WINDOWS_PLATFORM -#pragma comment (lib, "Rpcrt4.lib") -#endif - -namespace epee -{ -namespace string_tools -{ - std::wstring get_str_from_guid(const boost::uuids::uuid& rid) - { - return boost::lexical_cast(rid); - } - //---------------------------------------------------------------------------- - std::string get_str_from_guid_a(const boost::uuids::uuid& rid) - { - return boost::lexical_cast(rid); - } - //---------------------------------------------------------------------------- - bool get_guid_from_string( boost::uuids::uuid& inetifer, std::wstring str_id) - { - if(str_id.size() < 36) - return false; - - if('{' == *str_id.begin()) - str_id.erase(0, 1); - - if('}' == *(--str_id.end())) - str_id.erase(--str_id.end()); - - try - { - inetifer = boost::lexical_cast(str_id); - return true; - } - catch(...) - { - return false; - } - } - //---------------------------------------------------------------------------- - bool get_guid_from_string(OUT boost::uuids::uuid& inetifer, const std::string& str_id) - { - std::string local_str_id = str_id; - if(local_str_id.size() < 36) - return false; - - if('{' == *local_str_id.begin()) - local_str_id.erase(0, 1); - - if('}' == *(--local_str_id.end())) - local_str_id.erase(--local_str_id.end()); - - try - { - inetifer = boost::lexical_cast(local_str_id); - return true; - } - catch(...) - { - return false; - } - } - //---------------------------------------------------------------------------- -//#ifdef _WINSOCK2API_ - std::string get_ip_string_from_int32(uint32_t ip) - { - in_addr adr; - adr.s_addr = ip; - const char* pbuf = inet_ntoa(adr); - if(pbuf) - return pbuf; - else - return "[failed]"; - } - //---------------------------------------------------------------------------- - bool get_ip_int32_from_string(uint32_t& ip, const std::string& ip_str) - { - ip = inet_addr(ip_str.c_str()); - if(INADDR_NONE == ip) - return false; - - return true; - } - //---------------------------------------------------------------------------- - bool parse_peer_from_string(uint32_t& ip, uint32_t& port, const std::string& addres) - { - //parse ip and address - std::string::size_type p = addres.find(':'); - if(p == std::string::npos) - { - return false; - } - std::string ip_str = addres.substr(0, p); - std::string port_str = addres.substr(p+1, addres.size()); - - if(!get_ip_int32_from_string(ip, ip_str)) - { - return false; - } - - if(!get_xtype_from_string(port, port_str)) - { - return false; - } - return true; - } - - //---------------------------------------------------------------------------- - std::string num_to_string_fast(int64_t val) - { - /* - char buff[30] = {0}; - i64toa_s(val, buff, sizeof(buff)-1, 10); - return buff;*/ - return boost::lexical_cast(val); - } - //---------------------------------------------------------------------------- - bool string_to_num_fast(const std::string& buff, int64_t& val) - { - //return get_xtype_from_string(val, buff); -#if (defined _MSC_VER) - val = _atoi64(buff.c_str()); -#else - val = atoll(buff.c_str()); -#endif - /* - * val = atoi64(buff.c_str()); - */ - if(buff != "0" && val == 0) - return false; - return true; - } - //---------------------------------------------------------------------------- - bool string_to_num_fast(const std::string& buff, int& val) - { - val = atoi(buff.c_str()); - if(buff != "0" && val == 0) - return false; - - return true; - } - //---------------------------------------------------------------------------- -#ifdef WINDOWS_PLATFORM - std::string system_time_to_string(const SYSTEMTIME& st) - { - - /* - TIME_ZONE_INFORMATION tzi; - GetTimeZoneInformation(&tzi); - SystemTimeToTzSpecificLocalTime(&tzi, &stUTC, &stLocal); - */ - - char szTime[25], szDate[25]; - ::GetTimeFormatA( - LOCALE_USER_DEFAULT, // locale - TIME_FORCE24HOURFORMAT, // options - &st, // time - NULL, // time format string - szTime, // formatted string buffer - 25 // size of string buffer - ); - - ::GetDateFormatA( - LOCALE_USER_DEFAULT, // locale - NULL, // options - &st, // date - NULL, // date format - szDate, // formatted string buffer - 25 // size of buffer - ); - szTime[24] = szDate[24] = 0; //be happy :) - - std::string res = szDate; - (res += " " )+= szTime; - return res; - - } -#endif - //---------------------------------------------------------------------------- - - bool compare_no_case(const std::string& str1, const std::string& str2) - { - - return !boost::iequals(str1, str2); - } - //---------------------------------------------------------------------------- - bool compare_no_case(const std::wstring& str1, const std::wstring& str2) - { - return !boost::iequals(str1, str2); - } - //---------------------------------------------------------------------------- - bool is_match_prefix(const std::wstring& str1, const std::wstring& prefix) - { - if(prefix.size()>str1.size()) - return false; - - if(!compare_no_case(str1.substr(0, prefix.size()), prefix)) - return true; - else - return false; - } - //---------------------------------------------------------------------------- - bool is_match_prefix(const std::string& str1, const std::string& prefix) - { - if(prefix.size()>str1.size()) - return false; - - if(!compare_no_case(str1.substr(0, prefix.size()), prefix)) - return true; - else - return false; - } - //---------------------------------------------------------------------------- - std::string& get_current_module_name() - { - static std::string module_name; - return module_name; - } - //---------------------------------------------------------------------------- - std::string& get_current_module_folder() - { - static std::string module_folder; - return module_folder; - } - //---------------------------------------------------------------------------- -#ifdef _WIN32 - std::string get_current_module_path() - { - char pname [5000] = {0}; - GetModuleFileNameA( NULL, pname, sizeof(pname)); - pname[sizeof(pname)-1] = 0; //be happy ;) - return pname; - } -#endif - //---------------------------------------------------------------------------- - bool set_module_name_and_folder(const std::string& path_to_process_) - { - std::string path_to_process = path_to_process_; -#ifdef _WIN32 - path_to_process = get_current_module_path(); -#endif - std::string::size_type a = path_to_process.rfind( '\\' ); - if(a == std::string::npos ) - { - a = path_to_process.rfind( '/' ); - } - if ( a != std::string::npos ) - { - get_current_module_name() = path_to_process.substr(a+1, path_to_process.size()); - get_current_module_folder() = path_to_process.substr(0, a); - return true; - }else - return false; - - } - - //---------------------------------------------------------------------------- - bool trim_left(std::string& str) - { - for(std::string::iterator it = str.begin(); it!= str.end() && isspace(static_cast(*it));) - str.erase(str.begin()); - - return true; - } - //---------------------------------------------------------------------------- - bool trim_right(std::string& str) - { - - for(std::string::reverse_iterator it = str.rbegin(); it!= str.rend() && isspace(static_cast(*it));) - str.erase( --((it++).base())); - - return true; - } - //---------------------------------------------------------------------------- - std::string& trim(std::string& str) - { - - trim_left(str); - trim_right(str); - return str; - } - //---------------------------------------------------------------------------- - std::string trim(const std::string& str_) - { - std::string str = str_; - trim_left(str); - trim_right(str); - return str; - } - //---------------------------------------------------------------------------- - std::string get_extension(const std::string& str) - { - std::string res; - std::string::size_type pos = str.rfind('.'); - if(std::string::npos == pos) - return res; - - res = str.substr(pos+1, str.size()-pos); - return res; - } - //---------------------------------------------------------------------------- - std::string get_filename_from_path(const std::string& str) - { - std::string res; - std::string::size_type pos = str.rfind('\\'); - if(std::string::npos == pos) - return str; - - res = str.substr(pos+1, str.size()-pos); - return res; - } - //---------------------------------------------------------------------------- - - - - std::string cut_off_extension(const std::string& str) - { - std::string res; - std::string::size_type pos = str.rfind('.'); - if(std::string::npos == pos) - return str; - - res = str.substr(0, pos); - return res; - } - - //---------------------------------------------------------------------------- -#ifdef _WININET_ - std::string get_string_from_systemtime(const SYSTEMTIME& sys_time) - { - std::string result_string; - - char buff[100] = {0}; - BOOL res = ::InternetTimeFromSystemTimeA(&sys_time, INTERNET_RFC1123_FORMAT, buff, 99); - if(!res) - { - LOG_ERROR("Failed to load SytemTime to string"); - } - - result_string = buff; - return result_string; - - } - //------------------------------------------------------------------------------------- - SYSTEMTIME get_systemtime_from_string(const std::string& buff) - { - SYSTEMTIME result_time = {0}; - - BOOL res = ::InternetTimeToSystemTimeA(buff.c_str(), &result_time, NULL); - if(!res) - { - LOG_ERROR("Failed to load SytemTime from string " << buff << "interval set to 15 minutes"); - } - - return result_time; - } -#endif - -#ifdef WINDOWS_PLATFORM - const wchar_t* get_pc_name() - { - static wchar_t info[INFO_BUFFER_SIZE]; - static DWORD bufCharCount = INFO_BUFFER_SIZE; - static bool init = false; - - if (!init) { - if (!GetComputerNameW( info, &bufCharCount )) - info[0] = 0; - else - init = true; - } - - return info; - } - - const wchar_t* get_user_name() - { - static wchar_t info[INFO_BUFFER_SIZE]; - static DWORD bufCharCount = INFO_BUFFER_SIZE; - static bool init = false; - - if (!init) { - if (!GetUserNameW( info, &bufCharCount )) - info[0] = 0; - else - init = true; - } - - return info; - } -#endif - -#ifdef _LM_ - const wchar_t* get_domain_name() - { - static wchar_t info[INFO_BUFFER_SIZE]; - static DWORD bufCharCount = 0; - static bool init = false; - - if (!init) { - LPWSTR domain( NULL ); - NETSETUP_JOIN_STATUS status; - info[0] = 0; - - if (NET_API_STATUS result = NetGetJoinInformation( NULL, &domain, &status )) - { - LOG_ERROR("get_domain_name error: " << log_space::get_win32_err_descr(result)); - } else - { - StringCchCopyW( info, sizeof(info)/sizeof( info[0] ), domain ); - NetApiBufferFree((void*)domain); - init = true; - } - } - - return info; - } -#endif -#ifdef WINDOWS_PLATFORM - inline - std::string load_resource_string_a(int id, const char* pmodule_name = NULL) - { - //slow realization - HMODULE h = ::GetModuleHandleA( pmodule_name ); - - char buff[2000] = {0}; - - ::LoadStringA( h, id, buff, sizeof(buff)); - buff[sizeof(buff)-1] = 0; //be happy :) - return buff; - } - inline - std::wstring load_resource_string_w(int id, const char* pmodule_name = NULL) - { - //slow realization - HMODULE h = ::GetModuleHandleA( pmodule_name ); - - wchar_t buff[2000] = {0}; - - ::LoadStringW( h, id, buff, sizeof(buff) / sizeof( buff[0] ) ); - buff[(sizeof(buff)/sizeof(buff[0]))-1] = 0; //be happy :) - return buff; - } -#endif -} -} diff --git a/contrib/epee/include/string_tools.h b/contrib/epee/include/string_tools.h deleted file mode 100644 index c5d09421b5..0000000000 --- a/contrib/epee/include/string_tools.h +++ /dev/null @@ -1,360 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#ifndef _STRING_TOOLS_H_ -#define _STRING_TOOLS_H_ - -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -#include "warnings.h" - -#ifndef OUT - #define OUT -#endif - -#ifdef WINDOWS_PLATFORM -#pragma comment (lib, "Rpcrt4.lib") -#endif - -// Don't include lexical_cast.hpp, to reduce compilation time -//#include - -namespace boost { - namespace uuids { - struct uuid; - } - - template - inline Target lexical_cast(const Source &arg); -} - -namespace epee -{ -namespace string_tools -{ - std::wstring get_str_from_guid(const boost::uuids::uuid& rid); - std::string get_str_from_guid_a(const boost::uuids::uuid& rid); - bool get_guid_from_string( boost::uuids::uuid& inetifer, std::wstring str_id); - bool get_guid_from_string(OUT boost::uuids::uuid& inetifer, const std::string& str_id); - //---------------------------------------------------------------------------- - template - std::basic_string buff_to_hex(const std::basic_string& s) - { - std::basic_stringstream hexStream; - hexStream << std::hex << std::noshowbase << std::setw(2); - - for(typename std::basic_string::const_iterator it = s.begin(); it != s.end(); it++) - { - hexStream << "0x"<< static_cast(static_cast(*it)) << " "; - } - return hexStream.str(); - } - //---------------------------------------------------------------------------- - template - std::basic_string buff_to_hex_nodelimer(const std::basic_string& s) - { - std::basic_stringstream hexStream; - hexStream << std::hex << std::noshowbase; - - for(typename std::basic_string::const_iterator it = s.begin(); it != s.end(); it++) - { - hexStream << std::setw(2) << std::setfill('0') << static_cast(static_cast(*it)); - } - return hexStream.str(); - } - //---------------------------------------------------------------------------- - template - bool parse_hexstr_to_binbuff(const std::basic_string& s, std::basic_string& res) - { - res.clear(); - try - { - long v = 0; - for(size_t i = 0; i < (s.size() + 1) / 2; i++) - { - CharT byte_str[3]; - size_t copied = s.copy(byte_str, 2, 2 * i); - byte_str[copied] = CharT(0); - CharT* endptr; - v = strtoul(byte_str, &endptr, 16); - if (v < 0 || 0xFF < v || endptr != byte_str + copied) - { - return false; - } - res.push_back(static_cast(v)); - } - - return true; - }catch(...) - { - return false; - } - } - //---------------------------------------------------------------------------- - template - bool parse_tpod_from_hex_string(const std::string& str_hash, t_pod_type& t_pod) - { - std::string buf; - bool res = epee::string_tools::parse_hexstr_to_binbuff(str_hash, buf); - if (!res || buf.size() != sizeof(t_pod_type)) - { - return false; - } - else - { - buf.copy(reinterpret_cast(&t_pod), sizeof(t_pod_type)); - return true; - } - } - //---------------------------------------------------------------------------- -PUSH_WARNINGS -DISABLE_GCC_WARNING(maybe-uninitialized) - template - inline bool get_xtype_from_string(OUT XType& val, const std::string& str_id) - { - if (std::is_integral::value && !std::numeric_limits::is_signed && !std::is_same::value) - { - for (char c : str_id) - { - if (!std::isdigit(c)) - return false; - } - } - - try - { - val = boost::lexical_cast(str_id); - return true; - } - catch(std::exception& /*e*/) - { - //const char* pmsg = e.what(); - return false; - } - catch(...) - { - return false; - } - - return true; - } -POP_WARNINGS - //--------------------------------------------------- - template - bool get_xnum_from_hex_string(const std::string str, int_t& res ) - { - try - { - std::stringstream ss; - ss << std::hex << str; - ss >> res; - return true; - } - catch(...) - { - return false; - } - } - //---------------------------------------------------------------------------- - template - inline bool xtype_to_string(const XType& val, std::string& str) - { - try - { - str = boost::lexical_cast(val); - } - catch(...) - { - return false; - } - - return true; - } - - typedef std::map command_line_params_a; - typedef std::map command_line_params_w; - - template - bool parse_commandline(std::map& res, int argc, char** argv) - { - t_string key; - for(int i = 1; i < argc; i++) - { - if(!argv[i]) - break; - t_string s = argv[i]; - std::string::size_type p = s.find('='); - if(std::string::npos == p) - { - res[s] = ""; - }else - { - std::string ss; - t_string nm = s.substr(0, p); - t_string vl = s.substr(p+1, s.size()); - res[nm] = vl; - } - } - return true; - } - - template - bool get_xparam_from_command_line(const std::map& res, const t_string & key, t_type& val) - { - typename std::map::const_iterator it = res.find(key); - if(it == res.end()) - return false; - - if(it->second.size()) - { - return get_xtype_from_string(val, it->second); - } - - return true; - } - - template - t_type get_xparam_from_command_line(const std::map& res, const t_string & key, const t_type& default_value) - { - typename std::map::const_iterator it = res.find(key); - if(it == res.end()) - return default_value; - - if(it->second.size()) - { - t_type s; - get_xtype_from_string(s, it->second); - return s; - } - - return default_value; - } - - template - bool have_in_command_line(const std::map& res, const std::basic_string& key) - { - typename std::map::const_iterator it = res.find(key); - if(it == res.end()) - return false; - - return true; - } - - //---------------------------------------------------------------------------- - std::string get_ip_string_from_int32(uint32_t ip); - bool get_ip_int32_from_string(uint32_t& ip, const std::string& ip_str); - bool parse_peer_from_string(uint32_t& ip, uint32_t& port, const std::string& addres); - //---------------------------------------------------------------------------- - template - inline std::string get_t_as_hex_nwidth(const t& v, std::streamsize w = 8) - { - std::stringstream ss; - ss << std::setfill ('0') << std::setw (w) << std::hex << std::noshowbase; - ss << v; - return ss.str(); - } - //---------------------------------------------------------------------------- - std::string num_to_string_fast(int64_t val); - bool string_to_num_fast(const std::string& buff, int64_t& val); - bool string_to_num_fast(const std::string& buff, int& val); -#ifdef WINDOWS_PLATFORM - std::string system_time_to_string(const SYSTEMTIME& st); -#endif - bool compare_no_case(const std::string& str1, const std::string& str2); - bool compare_no_case(const std::wstring& str1, const std::wstring& str2); - bool is_match_prefix(const std::wstring& str1, const std::wstring& prefix); - bool is_match_prefix(const std::string& str1, const std::string& prefix); - std::string& get_current_module_name(); - std::string& get_current_module_folder(); -#ifdef _WIN32 - std::string get_current_module_path(); -#endif - bool set_module_name_and_folder(const std::string& path_to_process_); - bool trim_left(std::string& str); - bool trim_right(std::string& str); - std::string& trim(std::string& str); - std::string trim(const std::string& str_); - //---------------------------------------------------------------------------- - template - std::string pod_to_hex(const t_pod_type& s) - { - std::string buff; - buff.assign(reinterpret_cast(&s), sizeof(s)); - return buff_to_hex_nodelimer(buff); - } - //---------------------------------------------------------------------------- - template - bool hex_to_pod(const std::string& hex_str, t_pod_type& s) - { - std::string hex_str_tr = trim(hex_str); - if(sizeof(s)*2 != hex_str.size()) - return false; - std::string bin_buff; - if(!parse_hexstr_to_binbuff(hex_str_tr, bin_buff)) - return false; - if(bin_buff.size()!=sizeof(s)) - return false; - - s = *(t_pod_type*)bin_buff.data(); - return true; - } - //---------------------------------------------------------------------------- - std::string get_extension(const std::string& str); - std::string get_filename_from_path(const std::string& str); - std::string cut_off_extension(const std::string& str); -#ifdef _WININET_ - std::string get_string_from_systemtime(const SYSTEMTIME& sys_time); - SYSTEMTIME get_systemtime_from_string(const std::string& buff); -#endif - -#ifdef WINDOWS_PLATFORM - const DWORD INFO_BUFFER_SIZE = 10000; - - const wchar_t* get_pc_name(); - const wchar_t* get_user_name(); -#endif - -#ifdef _LM_ - const wchar_t* get_domain_name(); -#endif -#ifdef WINDOWS_PLATFORM - std::string load_resource_string_a(int id, const char* pmodule_name = NULL); - std::wstring load_resource_string_w(int id, const char* pmodule_name = NULL); -#endif -} -} -#endif //_STRING_TOOLS_H_ diff --git a/contrib/epee/include/syncobj.h b/contrib/epee/include/syncobj.h deleted file mode 100644 index 52cb70e609..0000000000 --- a/contrib/epee/include/syncobj.h +++ /dev/null @@ -1,238 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - - -#ifndef __WINH_OBJ_H__ -#define __WINH_OBJ_H__ - -#include -#include - -namespace epee -{ - struct simple_event - { - simple_event() : m_rised(false) - { - } - - void raise() - { - std::unique_lock lock(m_mx); - m_rised = true; - m_cond_var.notify_one(); - } - - void wait() - { - std::unique_lock lock(m_mx); - while (!m_rised) - m_cond_var.wait(lock); - m_rised = false; - } - - private: - std::mutex m_mx; - std::condition_variable m_cond_var; - bool m_rised; - }; - - class critical_region; - - class critical_section - { - std::recursive_mutex m_section; - - public: - //to make copy fake! - critical_section(const critical_section& section) - { - } - - critical_section() - { - } - - ~critical_section() - { - } - - void lock() - { - m_section.lock(); - } - - void unlock() - { - m_section.unlock(); - } - - bool tryLock() - { - return m_section.try_lock(); - } - - // to make copy fake - critical_section& operator=(const critical_section& section) - { - return *this; - } - }; - - - template - class critical_region_t - { - t_lock& m_locker; - bool m_unlocked; - - critical_region_t(const critical_region_t&) {} - - public: - critical_region_t(t_lock& cs): m_locker(cs), m_unlocked(false) - { - m_locker.lock(); - } - - ~critical_region_t() - { - unlock(); - } - - void unlock() - { - if (!m_unlocked) - { - m_locker.unlock(); - m_unlocked = true; - } - } - }; - - -#if defined(WINDWOS_PLATFORM) - class shared_critical_section - { - public: - shared_critical_section() - { - ::InitializeSRWLock(&m_srw_lock); - } - ~shared_critical_section() - {} - - bool lock_shared() - { - AcquireSRWLockShared(&m_srw_lock); - return true; - } - bool unlock_shared() - { - ReleaseSRWLockShared(&m_srw_lock); - return true; - } - bool lock_exclusive() - { - ::AcquireSRWLockExclusive(&m_srw_lock); - return true; - } - bool unlock_exclusive() - { - ::ReleaseSRWLockExclusive(&m_srw_lock); - return true; - } - private: - SRWLOCK m_srw_lock; - }; - - - class shared_guard - { - public: - shared_guard(shared_critical_section& ref_sec):m_ref_sec(ref_sec) - { - m_ref_sec.lock_shared(); - } - - ~shared_guard() - { - m_ref_sec.unlock_shared(); - } - - private: - shared_critical_section& m_ref_sec; - }; - - - class exclusive_guard - { - public: - exclusive_guard(shared_critical_section& ref_sec):m_ref_sec(ref_sec) - { - m_ref_sec.lock_exclusive(); - } - - ~exclusive_guard() - { - m_ref_sec.unlock_exclusive(); - } - - private: - shared_critical_section& m_ref_sec; - }; -#endif - -#define SHARED_CRITICAL_REGION_BEGIN(x) { shared_guard critical_region_var(x) -#define EXCLUSIVE_CRITICAL_REGION_BEGIN(x) { exclusive_guard critical_region_var(x) - -#define CRITICAL_REGION_LOCAL(x) epee::critical_region_t critical_region_var(x) -#define CRITICAL_REGION_BEGIN(x) { epee::critical_region_t critical_region_var(x) -#define CRITICAL_REGION_LOCAL1(x) epee::critical_region_t critical_region_var1(x) -#define CRITICAL_REGION_BEGIN1(x) { epee::critical_region_t critical_region_var1(x) - -#define CRITICAL_REGION_END() } - - -#if defined(WINDWOS_PLATFORM) - inline const char* get_wait_for_result_as_text(DWORD res) - { - switch(res) - { - case WAIT_ABANDONED: return "WAIT_ABANDONED"; - case WAIT_TIMEOUT: return "WAIT_TIMEOUT"; - case WAIT_OBJECT_0: return "WAIT_OBJECT_0"; - case WAIT_OBJECT_0+1: return "WAIT_OBJECT_1"; - case WAIT_OBJECT_0+2: return "WAIT_OBJECT_2"; - default: return "UNKNOWN CODE"; - } - } -#endif - -} - -#endif diff --git a/contrib/epee/include/time_helper.h b/contrib/epee/include/time_helper.h deleted file mode 100644 index 958176da6f..0000000000 --- a/contrib/epee/include/time_helper.h +++ /dev/null @@ -1,159 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -//#include -//#include -#include -#include -#include "pragma_comp_defs.h" - -namespace epee -{ -namespace misc_utils -{ - -#ifdef __ATLTIME_H__ - - inline - bool get_time_t_from_ole_date(DATE src, time_t& res) - { - SYSTEMTIME st = {0}; - if(TRUE != ::VariantTimeToSystemTime(src, &st)) - return false; - ATL::CTime ss(st); - res = ss.GetTime(); - return true; - } -#endif - inline - std::string get_time_str(const time_t& time_) - { - - - char tmpbuf[200] = {0}; - tm* pt = NULL; -PRAGMA_WARNING_PUSH -PRAGMA_WARNING_DISABLE_VS(4996) - pt = localtime(&time_); -PRAGMA_WARNING_POP - - if(pt) - strftime( tmpbuf, 199, "%d.%m.%Y %H:%M:%S", pt ); - else - { - std::stringstream strs; - strs << "[wrong_time: " << std::hex << time_ << "]"; - return strs.str(); - } - return tmpbuf; - } - - inline - std::string get_time_str_v2(const time_t& time_) - { - - char tmpbuf[200] = {0}; - tm* pt = NULL; -PRAGMA_WARNING_PUSH -PRAGMA_WARNING_DISABLE_VS(4996) - pt = localtime(&time_); -PRAGMA_WARNING_POP - - if(pt) - strftime( tmpbuf, 199, "%Y_%m_%d %H_%M_%S", pt ); - else - { - std::stringstream strs; - strs << "[wrong_time: " << std::hex << time_ << "]"; - return strs.str(); - } - return tmpbuf; - } - - inline - std::string get_time_str_v3(const boost::posix_time::ptime& time_) - { - return boost::posix_time::to_simple_string(time_); - } - - - - inline std::string get_internet_time_str(const time_t& time_) - { - char tmpbuf[200] = {0}; - tm* pt = NULL; -PRAGMA_WARNING_PUSH -PRAGMA_WARNING_DISABLE_VS(4996) - pt = gmtime(&time_); -PRAGMA_WARNING_POP - strftime( tmpbuf, 199, "%a, %d %b %Y %H:%M:%S GMT", pt ); - return tmpbuf; - } - - inline std::string get_time_interval_string(const time_t& time_) - { - std::string res; - time_t tail = time_; -PRAGMA_WARNING_PUSH -PRAGMA_WARNING_DISABLE_VS(4244) - int days = tail/(60*60*24); - tail = tail%(60*60*24); - int hours = tail/(60*60); - tail = tail%(60*60); - int minutes = tail/(60); - tail = tail%(60); - int seconds = tail; -PRAGMA_WARNING_POP - res = std::string() + "d" + boost::lexical_cast(days) + ".h" + boost::lexical_cast(hours) + ".m" + boost::lexical_cast(minutes) + ".s" + boost::lexical_cast(seconds); - return res; - } - -#ifdef __SQLEXT - inline - bool odbc_time_to_oledb_taime(const SQL_TIMESTAMP_STRUCT& odbc_timestamp, DATE& oledb_date) - { - - SYSTEMTIME st = {0}; - st.wYear = odbc_timestamp.year; - st.wDay = odbc_timestamp.day; - st.wHour = odbc_timestamp.hour ; - st.wMilliseconds = (WORD)odbc_timestamp.fraction ; - st.wMinute = odbc_timestamp.minute ; - st.wMonth = odbc_timestamp.month ; - st.wSecond = odbc_timestamp.second ; - - if(TRUE != ::SystemTimeToVariantTime(&st, &oledb_date)) - return false; - return true; - } - -#endif -} -} \ No newline at end of file diff --git a/contrib/epee/include/tiny_ini.h b/contrib/epee/include/tiny_ini.h deleted file mode 100644 index 2bc71fc1a4..0000000000 --- a/contrib/epee/include/tiny_ini.h +++ /dev/null @@ -1,75 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef _TINY_INI_H_ -#define _TINY_INI_H_ - -#include -#include -#include "string_tools.h" - -namespace epee -{ -namespace tiny_ini -{ - - inline - bool get_param_value(const std::string& param_name, const std::string& ini_entry, std::string& res) - { - std::string expr_str = std::string() + "^("+ param_name +") *=(.*?)$"; - const boost::regex match_ini_entry( expr_str, boost::regex::icase | boost::regex::normal); - boost::smatch result; - if(!boost::regex_search(ini_entry, result, match_ini_entry, boost::match_default)) - return false; - res = result[2]; - string_tools::trim(res); - return true; - } - inline - std::string get_param_value(const std::string& param_name, const std::string& ini_entry) - { - std::string buff; - get_param_value(param_name, ini_entry, buff); - return buff; - } - - template - bool get_param_value_as_t(const std::string& param_name, const std::string& ini_entry, T& res) - { - std::string str_res = get_param_value(param_name, ini_entry); - - string_tools::trim(str_res); - if(!str_res.size()) - return false; - - return string_tools::get_xtype_from_string(res, str_res); - } - -} -} - -#endif //_TINY_INI_H_ diff --git a/contrib/epee/include/to_nonconst_iterator.h b/contrib/epee/include/to_nonconst_iterator.h deleted file mode 100644 index 729b0e8b29..0000000000 --- a/contrib/epee/include/to_nonconst_iterator.h +++ /dev/null @@ -1,52 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#ifndef _TO_NONCONST_ITERATOR_H_ -#define _TO_NONCONST_ITERATOR_H_ - -namespace epee -{ - -template -typename Type::iterator to_nonsonst_iterator(Type& obj, typename Type::const_iterator it) -{ - typename Type::difference_type dist = std::distance(static_cast(obj.begin()), it); - typename Type::iterator res_it = obj.begin()+dist; - return res_it; -} - - -template -typename Type::iterator to_nonsonst_iterator(typename Type::iterator base_it, typename Type::const_iterator it) -{ - typename Type::difference_type dist = std::distance(static_cast(base_it), it); - typename Type::iterator res_it = base_it+dist; - return res_it; -} -}//namespace epee -#endif //_TO_NONCONST_ITERATOR_H_ diff --git a/contrib/epee/include/warnings.h b/contrib/epee/include/warnings.h deleted file mode 100644 index 37d7a29004..0000000000 --- a/contrib/epee/include/warnings.h +++ /dev/null @@ -1,30 +0,0 @@ -#pragma once - -#if defined(_MSC_VER) - -#define PUSH_WARNINGS __pragma(warning(push)) -#define POP_WARNINGS __pragma(warning(pop)) -#define DISABLE_VS_WARNINGS(w) __pragma(warning(disable: w)) -#define DISABLE_GCC_WARNING(w) -#define DISABLE_CLANG_WARNING(w) -#define DISABLE_GCC_AND_CLANG_WARNING(w) - -#else - -#include - -#define PUSH_WARNINGS _Pragma("GCC diagnostic push") -#define POP_WARNINGS _Pragma("GCC diagnostic pop") -#define DISABLE_VS_WARNINGS(w) - -#if defined(__clang__) -#define DISABLE_GCC_WARNING(w) -#define DISABLE_CLANG_WARNING DISABLE_GCC_AND_CLANG_WARNING -#else -#define DISABLE_GCC_WARNING DISABLE_GCC_AND_CLANG_WARNING -#define DISABLE_CLANG_WARNING(w) -#endif - -#define DISABLE_GCC_AND_CLANG_WARNING(w) _Pragma(BOOST_PP_STRINGIZE(GCC diagnostic ignored BOOST_PP_STRINGIZE(-W##w))) - -#endif \ No newline at end of file diff --git a/contrib/epee/include/winobj.h b/contrib/epee/include/winobj.h deleted file mode 100644 index 3279cdac6e..0000000000 --- a/contrib/epee/include/winobj.h +++ /dev/null @@ -1,227 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#ifndef __WINH_OBJ_H__ -#define __WINH_OBJ_H__ - -#include - -namespace epee -{ -class critical_region; - -class critical_section { - - boost::mutex m_section; - -public: - - critical_section( const critical_section& section ) { - InitializeCriticalSection( &m_section ); - } - - critical_section() { - InitializeCriticalSection( &m_section ); - } - - ~critical_section() { - DeleteCriticalSection( &m_section ); - } - - void lock() { - EnterCriticalSection( &m_section ); - } - - void unlock() { - LeaveCriticalSection( &m_section ); - } - - bool tryLock() { - return TryEnterCriticalSection( &m_section )? true:false; - } - - critical_section& operator=( const critical_section& section ) - { - return *this; - } - - -}; - -class critical_region { - - ::critical_section *m_locker; - - critical_region( const critical_region& ){} - -public: - - critical_region(critical_section &cs ) { - m_locker = &cs; - cs.lock(); - } - - ~critical_region() - { - m_locker->unlock(); - } -}; - - -class shared_critical_section -{ -public: - shared_critical_section() - { - ::InitializeSRWLock(&m_srw_lock); - } - ~shared_critical_section() - {} - - bool lock_shared() - { - AcquireSRWLockShared(&m_srw_lock); - return true; - } - bool unlock_shared() - { - ReleaseSRWLockShared(&m_srw_lock); - return true; - } - bool lock_exclusive() - { - ::AcquireSRWLockExclusive(&m_srw_lock); - return true; - } - bool unlock_exclusive() - { - ::ReleaseSRWLockExclusive(&m_srw_lock); - return true; - } -private: - SRWLOCK m_srw_lock; - -}; - - -class shared_guard -{ -public: - shared_guard(shared_critical_section& ref_sec):m_ref_sec(ref_sec) - { - m_ref_sec.lock_shared(); - } - - ~shared_guard() - { - m_ref_sec.unlock_shared(); - } - -private: - shared_critical_section& m_ref_sec; -}; - - -class exclusive_guard -{ -public: - exclusive_guard(shared_critical_section& ref_sec):m_ref_sec(ref_sec) - { - m_ref_sec.lock_exclusive(); - } - - ~exclusive_guard() - { - m_ref_sec.unlock_exclusive(); - } - -private: - shared_critical_section& m_ref_sec; -}; - - -class event -{ -public: - event() - { - m_hevent = ::CreateEvent(NULL, FALSE, FALSE, NULL); - } - ~event() - { - ::CloseHandle(m_hevent); - - } - - bool set() - { - return ::SetEvent(m_hevent) ? true:false; - } - - bool reset() - { - return ::ResetEvent(m_hevent) ? true:false; - } - - HANDLE get_handle() - { - return m_hevent; - } -private: - HANDLE m_hevent; - -}; - - -#define SHARED_CRITICAL_REGION_BEGIN(x) { shared_guard critical_region_var(x) -#define EXCLUSIVE_CRITICAL_REGION_BEGIN(x) { exclusive_guard critical_region_var(x) - - - -#define CRITICAL_REGION_LOCAL(x) critical_region critical_region_var(x) -#define CRITICAL_REGION_BEGIN(x) { critical_region critical_region_var(x) -#define CRITICAL_REGION_END() } - - - inline const char* get_wait_for_result_as_text(DWORD res) - { - switch(res) - { - case WAIT_ABANDONED: return "WAIT_ABANDONED"; - case WAIT_TIMEOUT: return "WAIT_TIMEOUT"; - case WAIT_OBJECT_0: return "WAIT_OBJECT_0"; - case WAIT_OBJECT_0+1: return "WAIT_OBJECT_1"; - case WAIT_OBJECT_0+2: return "WAIT_OBJECT_2"; - default: - return "UNKNOWN CODE"; - } - - } - -}// namespace epee - -#endif diff --git a/contrib/epee/include/zlib_helper.h b/contrib/epee/include/zlib_helper.h deleted file mode 100644 index 46c7f48e61..0000000000 --- a/contrib/epee/include/zlib_helper.h +++ /dev/null @@ -1,139 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - -#pragma once -extern "C" { -#include "zlib/zlib.h" -} -#pragma comment(lib, "zlibstat.lib") - -namespace epee -{ -namespace zlib_helper -{ - inline - bool pack(std::string& target){ - std::string result_packed_buff; - - z_stream zstream = {0}; - int ret = deflateInit(&zstream, Z_DEFAULT_COMPRESSION); - if(target.size()) - { - - - result_packed_buff.resize(target.size()*2, 'X'); - - zstream.next_in = (Bytef*)target.data(); - zstream.avail_in = (uInt)target.size(); - zstream.next_out = (Bytef*)result_packed_buff.data(); - zstream.avail_out = (uInt)result_packed_buff.size(); - - ret = deflate(&zstream, Z_FINISH); - CHECK_AND_ASSERT_MES(ret>=0, false, "Failed to deflate. err = " << ret); - - if(result_packed_buff.size() != zstream.avail_out) - result_packed_buff.resize(result_packed_buff.size()-zstream.avail_out); - - - result_packed_buff.erase(0, 2); - target.swap(result_packed_buff); - } - - deflateEnd(& zstream ); - return true; - } - - inline bool unpack(std::string& target) - { - z_stream zstream = {0}; - int ret = inflateInit(&zstream);// - - std::string decode_summary_buff; - size_t ungzip_buff_size = target.size() * 0x30; - std::string current_decode_buff(ungzip_buff_size, 'X'); - - while(target.size()) - { - - - zstream.next_out = (Bytef*)current_decode_buff.data(); - zstream.avail_out = (uInt)ungzip_buff_size; - - int flag = Z_SYNC_FLUSH; - - static char dummy_head[2] = - { - 0x8 + 0x7 * 0x10, - (((0x8 + 0x7 * 0x10) * 0x100 + 30) / 31 * 31) & 0xFF, - }; - zstream.next_in = (Bytef*) dummy_head; - zstream.avail_in = sizeof(dummy_head); - ret = inflate(&zstream, Z_NO_FLUSH); - if (ret != Z_OK) - { - LOCAL_ASSERT(0); - return false; - } - - zstream.next_in = (Bytef*)target.data(); - zstream.avail_in = (uInt)target.size(); - - ret = inflate(&zstream, Z_SYNC_FLUSH); - if (ret != Z_OK && ret != Z_STREAM_END) - { - LOCAL_ASSERT(0); - return false; - } - - - target.erase(0, target.size()-zstream.avail_in); - - - if(ungzip_buff_size == zstream.avail_out) - { - LOG_ERROR("Can't unpack buffer"); - return false; - } - - - current_decode_buff.resize(ungzip_buff_size - zstream.avail_out); - if(decode_summary_buff.size()) - decode_summary_buff += current_decode_buff; - else - current_decode_buff.swap(decode_summary_buff); - - current_decode_buff.resize(ungzip_buff_size); - } - - inflateEnd(&zstream ); - - decode_summary_buff.swap(target); - return 1; - } - -}; -}//namespace epee diff --git a/contrib/epee/tests/.gitignore b/contrib/epee/tests/.gitignore deleted file mode 100644 index d9b4f015d3..0000000000 --- a/contrib/epee/tests/.gitignore +++ /dev/null @@ -1 +0,0 @@ -/build/* diff --git a/contrib/epee/tests/data/storages/invalid_storage_1.bin b/contrib/epee/tests/data/storages/invalid_storage_1.bin deleted file mode 100644 index fac7b3e97c..0000000000 Binary files a/contrib/epee/tests/data/storages/invalid_storage_1.bin and /dev/null differ diff --git a/contrib/epee/tests/data/storages/invalid_storage_2.bin b/contrib/epee/tests/data/storages/invalid_storage_2.bin deleted file mode 100644 index a8c29f155a..0000000000 Binary files a/contrib/epee/tests/data/storages/invalid_storage_2.bin and /dev/null differ diff --git a/contrib/epee/tests/data/storages/invalid_storage_3.bin b/contrib/epee/tests/data/storages/invalid_storage_3.bin deleted file mode 100644 index b5c31aa058..0000000000 --- a/contrib/epee/tests/data/storages/invalid_storage_3.bin +++ /dev/null @@ -1 +0,0 @@ -¢IMóÙŸˆm_bo \ No newline at end of file diff --git a/contrib/epee/tests/data/storages/invalid_storage_4.bin b/contrib/epee/tests/data/storages/invalid_storage_4.bin deleted file mode 100644 index 4f8372d197..0000000000 Binary files a/contrib/epee/tests/data/storages/invalid_storage_4.bin and /dev/null differ diff --git a/contrib/epee/tests/data/storages/valid_storage.bin b/contrib/epee/tests/data/storages/valid_storage.bin deleted file mode 100644 index e13f780b17..0000000000 Binary files a/contrib/epee/tests/data/storages/valid_storage.bin and /dev/null differ diff --git a/contrib/epee/tests/generate_vc_proj.bat b/contrib/epee/tests/generate_vc_proj.bat deleted file mode 100644 index a81bdce052..0000000000 --- a/contrib/epee/tests/generate_vc_proj.bat +++ /dev/null @@ -1,5 +0,0 @@ -mkdir build -cd build -cmake "-DBoost_USE_STATIC_LIBS=TRUE" -G "Visual Studio 11 Win64" ../src -cd .. -pause \ No newline at end of file diff --git a/contrib/epee/tests/src/CMakeLists.txt b/contrib/epee/tests/src/CMakeLists.txt deleted file mode 100644 index c7d31735ba..0000000000 --- a/contrib/epee/tests/src/CMakeLists.txt +++ /dev/null @@ -1,33 +0,0 @@ - -cmake_minimum_required(VERSION 2.8) - -set(Boost_USE_MULTITHREADED ON) - -include_directories(.) -include_directories(../../include) - -find_package(Boost COMPONENTS system filesystem thread date_time chrono regex) -include_directories( ${Boost_INCLUDE_DIRS} ) - -IF (MSVC) - add_definitions( "/W3 /D_CRT_SECURE_NO_WARNINGS /wd4996 /wd4345 /nologo /D_WIN32_WINNT=0x0600 /DWIN32_LEAN_AND_MEAN /bigobj" ) - include_directories(SYSTEM platform/msvc) -ELSE() - # set stuff for other systems - SET(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -std=c11 -Wall -Werror") - SET(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11 -Wall -Werror -Wno-reorder") -ENDIF() - - -# Add folders to filters -file(GLOB_RECURSE SRC RELATIVE ${CMAKE_CURRENT_SOURCE_DIR} - ${CMAKE_CURRENT_SOURCE_DIR}/*.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/*.inl - ${CMAKE_CURRENT_SOURCE_DIR}/*.h) - -source_group(general FILES ${SRC}) - - -add_executable(tests ${SRC} ) -target_link_libraries( tests ${Boost_LIBRARIES} ) - diff --git a/contrib/epee/tests/src/misc/test_math.h b/contrib/epee/tests/src/misc/test_math.h deleted file mode 100644 index 8b3064c2ad..0000000000 --- a/contrib/epee/tests/src/misc/test_math.h +++ /dev/null @@ -1,82 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include "misc_language.h" - -namespace epee -{ - namespace tests - { - bool test_median() - { - LOG_PRINT_L0("Testing median"); - std::vector sz; - size_t m = misc_utils::median(sz); - CHECK_AND_ASSERT_MES(m == 0, false, "test failed"); - sz.push_back(1); - m = misc_utils::median(sz); - CHECK_AND_ASSERT_MES(m == 1, false, "test failed"); - sz.push_back(10); - m = misc_utils::median(sz); - CHECK_AND_ASSERT_MES(m == 5, false, "test failed"); - - sz.clear(); - sz.resize(3); - sz[0] = 0; - sz[1] = 9; - sz[2] = 3; - m = misc_utils::median(sz); - CHECK_AND_ASSERT_MES(m == 3, false, "test failed"); - - sz.clear(); - sz.resize(4); - sz[0] = 77; - sz[1] = 9; - sz[2] = 22; - sz[3] = 60; - m = misc_utils::median(sz); - CHECK_AND_ASSERT_MES(m == 41, false, "test failed"); - - - - sz.clear(); - sz.resize(5); - sz[0] = 77; - sz[1] = 9; - sz[2] = 22; - sz[3] = 60; - sz[4] = 11; - m = misc_utils::median(sz); - CHECK_AND_ASSERT_MES(m == 22, false, "test failed"); - return true; - } - } -} - diff --git a/contrib/epee/tests/src/net/test_net.h b/contrib/epee/tests/src/net/test_net.h deleted file mode 100644 index 0b6dc1f73b..0000000000 --- a/contrib/epee/tests/src/net/test_net.h +++ /dev/null @@ -1,403 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - -#pragma once - -#include -#include - -#include "net/levin_server_cp2.h" -#include "storages/abstract_invoke.h" - -namespace epee -{ -namespace StorageNamed -{ - typedef CInMemStorage DefaultStorageType; -} -namespace tests -{ - struct some_subdata - { - - std::string str1; - std::list array_of_id; - - BEGIN_NAMED_SERIALIZE_MAP() - SERIALIZE_STL_ANSI_STRING(str1) - SERIALIZE_STL_CONTAINER_POD(array_of_id) - END_NAMED_SERIALIZE_MAP() - }; - - - /************************************************************************/ - /* */ - /************************************************************************/ - struct COMMAND_EXAMPLE_1 - { - const static int ID = 1000; - - struct request - { - - std::string example_string_data; - uint64_t example_id_data; - some_subdata sub; - - BEGIN_NAMED_SERIALIZE_MAP() - SERIALIZE_STL_ANSI_STRING(example_string_data) - SERIALIZE_POD(example_id_data) - SERIALIZE_T(sub) - END_NAMED_SERIALIZE_MAP() - }; - - - struct response - { - bool m_success; - uint64_t example_id_data; - std::list subs; - - BEGIN_NAMED_SERIALIZE_MAP() - SERIALIZE_POD(m_success) - SERIALIZE_POD(example_id_data) - SERIALIZE_STL_CONTAINER_T(subs) - END_NAMED_SERIALIZE_MAP() - }; - }; - - struct COMMAND_EXAMPLE_2 - { - const static int ID = 1001; - - struct request - { - std::string example_string_data2; - uint64_t example_id_data; - - BEGIN_NAMED_SERIALIZE_MAP() - SERIALIZE_POD(example_id_data) - SERIALIZE_STL_ANSI_STRING(example_string_data2) - END_NAMED_SERIALIZE_MAP() - }; - - struct response - { - bool m_success; - uint64_t example_id_data; - - BEGIN_NAMED_SERIALIZE_MAP() - SERIALIZE_POD(example_id_data) - SERIALIZE_POD(m_success) - END_NAMED_SERIALIZE_MAP() - }; - }; - typedef boost::uuids::uuid uuid; - - class test_levin_server: public levin::levin_commands_handler<> - { - test_levin_server(const test_levin_server&){} - public: - test_levin_server(){} - void set_thread_prefix(const std::string& pref) - { - m_net_server.set_threads_prefix(pref); - } - template - bool connect_async(const std::string adr, const std::string& port, uint32_t conn_timeot, calback_t cb, const std::string& bind_ip = "0.0.0.0") - { - return m_net_server.connect_async(adr, port, conn_timeot, cb, bind_ip); - } - - bool connect(const std::string adr, const std::string& port, uint32_t conn_timeot, net_utils::connection_context_base& cn, const std::string& bind_ip = "0.0.0.0") - { - return m_net_server.connect(adr, port, conn_timeot, cn, bind_ip); - } - void close(net_utils::connection_context_base& cn) - { - m_net_server.get_config_object().close(cn.m_connection_id); - } - - template - bool invoke(uuid con_id, int command, t_request& req, t_response& resp) - { - return invoke_remote_command(con_id, command, req, resp, m_net_server.get_config_object()); - } - - template< class t_response, class t_request, class callback_t> - bool invoke_async(uuid con_id, int command, t_request& req, callback_t cb) - { - return async_invoke_remote_command(con_id, command, req, m_net_server.get_config_object(), cb); - } - - bool init(const std::string& bind_port = "", const std::string& bind_ip = "0.0.0.0") - { - m_net_server.get_config_object().m_pcommands_handler = this; - m_net_server.get_config_object().m_invoke_timeout = 1000; - LOG_PRINT_L0("Binding on " << bind_ip << ":" << bind_port); - return m_net_server.init_server(bind_port, bind_ip); - } - - bool run() - { - //here you can set worker threads count - int thrds_count = 4; - - //go to loop - LOG_PRINT("Run net_service loop( " << thrds_count << " threads)...", LOG_LEVEL_0); - if(!m_net_server.run_server(thrds_count)) - { - LOG_ERROR("Failed to run net tcp server!"); - } - - LOG_PRINT("net_service loop stopped.", LOG_LEVEL_0); - return true; - } - - bool deinit() - { - return m_net_server.deinit_server(); - } - - bool send_stop_signal() - { - m_net_server.send_stop_signal(); - return true; - } - - uint32_t get_binded_port() - { - return m_net_server.get_binded_port(); - } - private: - - - CHAIN_LEVIN_INVOKE_TO_MAP(); //move levin_commands_handler interface invoke(...) callbacks into invoke map - CHAIN_LEVIN_NOTIFY_TO_STUB(); //move levin_commands_handler interface notify(...) callbacks into nothing - - BEGIN_INVOKE_MAP(test_levin_server) - HANDLE_INVOKE_T(COMMAND_EXAMPLE_1, &test_levin_server::handle_1) - HANDLE_INVOKE_T(COMMAND_EXAMPLE_2, &test_levin_server::handle_2) - END_INVOKE_MAP() - - //----------------- commands handlers ---------------------------------------------- - int handle_1(int command, COMMAND_EXAMPLE_1::request& arg, COMMAND_EXAMPLE_1::response& rsp, const net_utils::connection_context_base& context) - { - LOG_PRINT_L0("on_command_1: id " << arg.example_id_data << "---->>"); - COMMAND_EXAMPLE_2::request arg_ = AUTO_VAL_INIT(arg_); - arg_.example_id_data = arg.example_id_data; - COMMAND_EXAMPLE_2::response rsp_ = AUTO_VAL_INIT(rsp_); - invoke_async(context.m_connection_id, COMMAND_EXAMPLE_2::ID, arg_, [](int code, const COMMAND_EXAMPLE_2::response& rsp, const net_utils::connection_context_base& context) - { - if(code < 0) - {LOG_PRINT_RED_L0("on_command_1: command_2 failed to invoke");} - else - {LOG_PRINT_L0("on_command_1: command_2 response " << rsp.example_id_data);} - }); - rsp.example_id_data = arg.example_id_data; - LOG_PRINT_L0("on_command_1: id " << arg.example_id_data << "<<----"); - return true; - } - int handle_2(int command, COMMAND_EXAMPLE_2::request& arg, COMMAND_EXAMPLE_2::response& rsp, const net_utils::connection_context_base& context) - { - LOG_PRINT_L0("on_command_2: id "<< arg.example_id_data); - rsp.example_id_data = arg.example_id_data; - //misc_utils::sleep_no_w(6000); - return true; - } - //---------------------------------------------------------------------------------- - net_utils::boosted_levin_async_server m_net_server; - }; - - - inline - bool do_run_test_server() - { - - test_levin_server srv1, srv2; - - - std::string bind_param = "0.0.0.0"; - std::string port = ""; - - if(!srv1.init(port, bind_param)) - { - LOG_ERROR("Failed to initialize srv!"); - return 1; - } - - if(!srv2.init(port, bind_param)) - { - LOG_ERROR("Failed to initialize srv!"); - return 1; - } - - srv1.set_thread_prefix("SRV_A"); - srv2.set_thread_prefix("SRV_B"); - - boost::thread th1( boost::bind(&test_levin_server::run, &srv1)); - boost::thread th2( boost::bind(&test_levin_server::run, &srv2)); - - LOG_PRINT_L0("Initalized servers, waiting for worker threads started..."); - misc_utils::sleep_no_w(1000); - - - LOG_PRINT_L0("Connecting to each other..."); - uint32_t port1 = srv1.get_binded_port(); - uint32_t port2 = srv2.get_binded_port(); - - COMMAND_EXAMPLE_1::request arg; - COMMAND_EXAMPLE_1::request resp; - - net_utils::connection_context_base cntxt_1; - bool r = srv1.connect("127.0.0.1", string_tools::num_to_string_fast(port2), 5000, cntxt_1); - CHECK_AND_ASSERT_MES(r, false, "connect to server failed"); - - net_utils::connection_context_base cntxt_2; - r = srv2.connect("127.0.0.1", string_tools::num_to_string_fast(port1), 5000, cntxt_2); - CHECK_AND_ASSERT_MES(r, false, "connect to server failed"); - - while(true) - { - LOG_PRINT_L0("Invoking from A to B..."); - int r = srv1.invoke(cntxt_1.m_connection_id, COMMAND_EXAMPLE_1::ID, arg, resp); - if(r<=0) - { - LOG_ERROR("Failed tp invoke A to B"); - break; - } - - LOG_PRINT_L0("Invoking from B to A..."); - r = srv2.invoke(cntxt_2.m_connection_id, COMMAND_EXAMPLE_1::ID, arg, resp); - if(r<=0) - { - LOG_ERROR("Failed tp invoke B to A"); - break; - } - } - srv1.send_stop_signal(); - srv2.send_stop_signal(); - th1.join(); - th1.join(); - - return true; - } - - - - inline bool do_test2_work_with_srv(test_levin_server& srv, int port) - { - uint64_t i = 0; - boost::mutex wait_event; - wait_event.lock(); - while(true) - { - net_utils::connection_context_base cntxt_local = AUTO_VAL_INIT(cntxt_local); - bool r = srv.connect_async("127.0.0.1", string_tools::num_to_string_fast(port), 5000, [&srv, &port, &wait_event, &i, &cntxt_local](const net_utils::connection_context_base& cntxt, const boost::system::error_code& ec) - { - CHECK_AND_ASSERT_MES(!ec, void(), "Some problems at connect, message: " << ec.message() ); - cntxt_local = cntxt; - LOG_PRINT_L0("Invoking command 1 to " << port); - COMMAND_EXAMPLE_1::request arg = AUTO_VAL_INIT(arg); - arg.example_id_data = i; - /*vc2010 workaround*/ - int port_ = port; - boost::mutex& wait_event_ = wait_event; - int r = srv.invoke_async(cntxt.m_connection_id, COMMAND_EXAMPLE_1::ID, arg, [port_, &wait_event_](int code, const COMMAND_EXAMPLE_1::request& rsp, const net_utils::connection_context_base& cntxt) - { - CHECK_AND_ASSERT_MES(code > 0, void(), "Failed to invoke"); - LOG_PRINT_L0("command 1 invoke to " << port_ << " OK."); - wait_event_.unlock(); - }); - }); - wait_event.lock(); - srv.close(cntxt_local); - ++i; - } - return true; - } - - inline - bool do_run_test_server_async_connect() - { - test_levin_server srv1, srv2; - - - std::string bind_param = "0.0.0.0"; - std::string port = ""; - - if(!srv1.init(port, bind_param)) - { - LOG_ERROR("Failed to initialize srv!"); - return 1; - } - - if(!srv2.init(port, bind_param)) - { - LOG_ERROR("Failed to initialize srv!"); - return 1; - } - - srv1.set_thread_prefix("SRV_A"); - srv2.set_thread_prefix("SRV_B"); - - boost::thread thmain1( boost::bind(&test_levin_server::run, &srv1)); - boost::thread thmain2( boost::bind(&test_levin_server::run, &srv2)); - - LOG_PRINT_L0("Initalized servers, waiting for worker threads started..."); - misc_utils::sleep_no_w(1000); - - - LOG_PRINT_L0("Connecting to each other..."); - uint32_t port1 = srv1.get_binded_port(); - uint32_t port2 = srv2.get_binded_port(); - - COMMAND_EXAMPLE_1::request arg; - COMMAND_EXAMPLE_1::request resp; - - - boost::thread work_1( boost::bind(do_test2_work_with_srv, boost::ref(srv1), port2)); - boost::thread work_2( boost::bind(do_test2_work_with_srv, boost::ref(srv2), port1)); - boost::thread work_3( boost::bind(do_test2_work_with_srv, boost::ref(srv1), port2)); - boost::thread work_4( boost::bind(do_test2_work_with_srv, boost::ref(srv2), port1)); - boost::thread work_5( boost::bind(do_test2_work_with_srv, boost::ref(srv1), port2)); - boost::thread work_6( boost::bind(do_test2_work_with_srv, boost::ref(srv2), port1)); - boost::thread work_7( boost::bind(do_test2_work_with_srv, boost::ref(srv1), port2)); - boost::thread work_8( boost::bind(do_test2_work_with_srv, boost::ref(srv2), port1)); - - - work_1.join(); - work_2.join(); - srv1.send_stop_signal(); - srv2.send_stop_signal(); - thmain1.join(); - thmain2.join(); - - return true; - } - -} -} \ No newline at end of file diff --git a/contrib/epee/tests/src/storages/portable_storages_test.h b/contrib/epee/tests/src/storages/portable_storages_test.h deleted file mode 100644 index ecded8dada..0000000000 --- a/contrib/epee/tests/src/storages/portable_storages_test.h +++ /dev/null @@ -1,232 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include -#include -#include "storages/serializeble_struct_helper.h" -#include "serialization/keyvalue_serialization.h" -#include "storages/portable_storage.h" -#include "storages/portable_storage_template_helper.h" - -namespace epee -{ - namespace tests - { - - struct port_test_struct_sub - { - std::string m_str; - - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE_VAL(m_str) - END_KV_SERIALIZE_MAP() - }; - -#pragma pack (push, 1) - struct some_pod_struct - { - uint64_t a; - int32_t b; - }; -#pragma pack(pop) - - struct port_test_struct - { - std::string m_str; - uint64_t m_uint64; - uint32_t m_uint32; - uint16_t m_uint16; - uint8_t m_uint8; - int64_t m_int64; - int32_t m_int32; - int16_t m_int16; - int8_t m_int8; - double m_double; - bool m_bool; - some_pod_struct m_pod; - std::list m_list_of_str; - std::list m_list_of_uint64_t; - std::list m_list_of_uint32_t; - std::list m_list_of_uint16_t; - std::list m_list_of_uint8_t; - std::list m_list_of_int64_t; - std::list m_list_of_int32_t; - std::list m_list_of_int16_t; - std::list m_list_of_int8_t; - std::list m_list_of_double; - std::list m_list_of_bool; - port_test_struct_sub m_subobj; - std::list m_list_of_self; - - BEGIN_KV_SERIALIZE_MAP() - KV_SERIALIZE_VAL(m_str) - KV_SERIALIZE_VAL(m_uint64) - KV_SERIALIZE_VAL(m_uint32) - KV_SERIALIZE_VAL(m_uint16) - KV_SERIALIZE_VAL(m_uint8) - KV_SERIALIZE_VAL(m_int64) - KV_SERIALIZE_VAL(m_int32) - KV_SERIALIZE_VAL(m_int16) - KV_SERIALIZE_VAL(m_int8) - KV_SERIALIZE_VAL(m_double) - KV_SERIALIZE_VAL(m_bool) - KV_SERIALIZE_VAL_POD_AS_BLOB(m_pod) - KV_SERIALIZE_OBJ(m_subobj) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_str) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_uint64_t) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_uint32_t) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_uint16_t) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_uint8_t) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_int64_t) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_int32_t) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_int16_t) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_int8_t) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_double) - KV_SERIALIZE_CONTAINER_VAL(m_list_of_bool) - KV_SERIALIZE_CONTAINER_OBJ(m_list_of_self) - END_KV_SERIALIZE_MAP() - }; - - bool operator != (const port_test_struct_sub& a, const port_test_struct_sub& b) - { - return b.m_str != a.m_str; - } - - bool operator == (const port_test_struct& a, const port_test_struct& b) - { - if( b.m_str != a.m_str - || b.m_uint64 != a.m_uint64 - || b.m_uint32 != a.m_uint32 - || b.m_uint16 != a.m_uint16 - || b.m_uint8 != a.m_uint8 - || b.m_int64 != a.m_int64 - || b.m_int32 != a.m_int32 - || b.m_int16 != a.m_int16 - || b.m_int8 != a.m_int8 - || b.m_double != a.m_double - || b.m_bool != a.m_bool - || b.m_pod.a != a.m_pod.a - || b.m_pod.b != a.m_pod.b - || b.m_list_of_str != a.m_list_of_str - || b.m_list_of_uint64_t != a.m_list_of_uint64_t - || b.m_list_of_uint32_t != a.m_list_of_uint32_t - || b.m_list_of_uint16_t != a.m_list_of_uint16_t - || b.m_list_of_uint8_t != a.m_list_of_uint8_t - || b.m_list_of_int64_t != a.m_list_of_int64_t - || b.m_list_of_int32_t != a.m_list_of_int32_t - || b.m_list_of_int16_t != a.m_list_of_int16_t - || b.m_list_of_int8_t != a.m_list_of_int8_t - || b.m_list_of_double != a.m_list_of_double - || b.m_list_of_bool != a.m_list_of_bool - || b.m_subobj != a.m_subobj - || b.m_list_of_self != a.m_list_of_self - ) - return false; - return true; - } - - void fill_struct_with_test_values(port_test_struct& s) - { - s.m_str = "zuzuzuzuzuz"; - s.m_uint64 = 111111111111111; - s.m_uint32 = 2222222; - s.m_uint16 = 2222; - s.m_uint8 = 22; - s.m_int64 = -111111111111111; - s.m_int32 = -2222222; - s.m_int16 = -2222; - s.m_int8 = -24; - s.m_double = 0.11111; - s.m_bool = true; - s.m_pod.a = 32342342342342; - s.m_pod.b = -342342; - s.m_list_of_str.push_back("1112121"); - s.m_list_of_uint64_t.push_back(1111111111); - s.m_list_of_uint64_t.push_back(2222222222); - s.m_list_of_uint32_t.push_back(1111111); - s.m_list_of_uint32_t.push_back(2222222); - s.m_list_of_uint16_t.push_back(1111); - s.m_list_of_uint16_t.push_back(2222); - s.m_list_of_uint8_t.push_back(11); - s.m_list_of_uint8_t.push_back(22); - - - s.m_list_of_int64_t.push_back(-1111111111); - s.m_list_of_int64_t.push_back(-222222222); - s.m_list_of_int32_t.push_back(-1111111); - s.m_list_of_int32_t.push_back(-2222222); - s.m_list_of_int16_t.push_back(-1111); - s.m_list_of_int16_t.push_back(-2222); - s.m_list_of_int8_t.push_back(-11); - s.m_list_of_int8_t.push_back(-22); - - s.m_list_of_double.push_back(0.11111); - s.m_list_of_double.push_back(0.22222); - s.m_list_of_bool.push_back(true); - s.m_list_of_bool.push_back(false); - - s.m_subobj.m_str = "subszzzzzzzz"; - s.m_list_of_self.push_back(s); - } - - bool test_portable_storages(const std::string& tests_folder) - { - serialization::portable_storage ps, ps2; - port_test_struct s1, s2; - fill_struct_with_test_values(s1); - - s1.store(ps); - std::string binbuf; - bool r = ps.store_to_binary(binbuf); - - ps2.load_from_binary(binbuf); - s2.load(ps2); - if(!(s1 == s2)) - { - LOG_ERROR("Portable storage test failed!"); - return false; - } - - - port_test_struct ss1, ss2; - fill_struct_with_test_values(ss1); - std::string json_buff = epee::serialization::store_t_to_json(ss1); - epee::serialization::load_t_from_json(ss2, json_buff); - if(!(ss1 == ss2)) - { - LOG_ERROR("Portable storage test failed!"); - return false; - } - - return true; - } - - } -} \ No newline at end of file diff --git a/contrib/epee/tests/src/storages/storage_tests.h b/contrib/epee/tests/src/storages/storage_tests.h deleted file mode 100644 index 522e589c4b..0000000000 --- a/contrib/epee/tests/src/storages/storage_tests.h +++ /dev/null @@ -1,142 +0,0 @@ -// Copyright (c) 2006-2013, Andrey N. Sabelnikov, www.sabelnikov.net -// All rights reserved. -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are met: -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above copyright -// notice, this list of conditions and the following disclaimer in the -// documentation and/or other materials provided with the distribution. -// * Neither the name of the Andrey N. Sabelnikov nor the -// names of its contributors may be used to endorse or promote products -// derived from this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND -// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -// WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -// DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER BE LIABLE FOR ANY -// DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -// (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -// LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -// SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// - - - -#pragma once - -#include "storages/serializeble_struct_helper.h" -#include "storages/portable_storage.h" - -namespace epee -{ - namespace tests - { - - - struct test_struct - { - - std::string m_str; - unsigned int m_uint; - bool m_bool; - std::list m_list_of_str; - std::list m_list_of_int; - std::list m_list_of_self; - - - BEGIN_NAMED_SERIALIZE_MAP() - SERIALIZE_STL_ANSI_STRING(m_str) - SERIALIZE_POD(m_uint) - SERIALIZE_POD(m_bool) - SERIALIZE_STL_CONTAINER_ANSII_STRING(m_list_of_str) - SERIALIZE_STL_CONTAINER_POD(m_list_of_int) - SERIALIZE_STL_CONTAINER_T(m_list_of_self) - END_NAMED_SERIALIZE_MAP() - - }; - - - bool operator == (const test_struct& a, const test_struct& b) - { - if( b.m_str != a.m_str - || b.m_uint != a.m_uint - || b.m_bool != a.m_bool - || b.m_list_of_str != a.m_list_of_str - || b.m_list_of_int != a.m_list_of_int - || b.m_list_of_self != a.m_list_of_self - ) - return false; - return true; - } - - inline test_struct get_test_struct() - { - test_struct t = boost::value_initialized(); - t.m_bool = true; - t.m_str = "ackamdc'kmecemcececmacmecmcm[aicm[oeicm[oeicm[qaicm[qoe"; - t.m_uint = 233242; - for(int i = 0; i!=500; i++) - t.m_list_of_int.push_back(i); - - for(int i = 0; i!=500; i++) - t.m_list_of_str.push_back("ssccd"); - - for(int i = 0; i!=5; i++) - { - t.m_list_of_self.push_back(t); - } - return t; - } - - bool test_storages(const std::string& tests_folder) - { - - epee::serialization::portable_storage ps; - auto s = ps.open_section("zzz", nullptr); - uint64_t i = 0; - ps.get_value("afdsdf", i, s); - - - LOG_PRINT_L0("Generating test struct..."); - boost::filesystem::path storage_folder = tests_folder; - storage_folder /= "storages"; - - - test_struct t = get_test_struct(); - - LOG_PRINT_L0("Loading test struct from storage..."); - test_struct t2; - bool res = epee::StorageNamed::load_struct_from_storage_file(t2, (storage_folder /+ "valid_storage.bin").string()); - CHECK_AND_ASSERT_MES(res, false, "Failed to load valid_storage.bin"); - - LOG_PRINT_L0("Comparing generated and loaded test struct..."); - if(!(t == t2)) - return false; - - LOG_PRINT_L0("Loading broken archive 1..."); - test_struct t3; - res = epee::StorageNamed::load_struct_from_storage_file(t3, (storage_folder /+ "invalid_storage_1.bin").string()); - CHECK_AND_ASSERT_MES(!res, false, "invalid_storage_1.bin loaded, but should not "); - - - LOG_PRINT_L0("Loading broken archive 2..."); - res = epee::StorageNamed::load_struct_from_storage_file(t3, (storage_folder /+ "invalid_storage_2.bin").string()); - CHECK_AND_ASSERT_MES(!res, false, "invalid_storage_2.bin loaded, but should not "); - - LOG_PRINT_L0("Loading broken archive 3..."); - res = epee::StorageNamed::load_struct_from_storage_file(t3, (storage_folder /+ "invalid_storage_3.bin").string()); - CHECK_AND_ASSERT_MES(!res, false, "invalid_storage_3.bin loaded, but should not "); - - LOG_PRINT_L0("Loading broken archive 4..."); - res = epee::StorageNamed::load_struct_from_storage_file(t3, (storage_folder /+ "invalid_storage_4.bin").string()); - CHECK_AND_ASSERT_MES(!res, false, "invalid_storage_3.bin loaded, but should not "); - - return true; - } - } -} - diff --git a/contrib/epee/tests/src/tests.cpp b/contrib/epee/tests/src/tests.cpp deleted file mode 100644 index ed045d8331..0000000000 --- a/contrib/epee/tests/src/tests.cpp +++ /dev/null @@ -1,59 +0,0 @@ - -#include "include_base_utils.h" -#include "storages/storage_tests.h" -#include "misc/test_math.h" -#include "storages/portable_storages_test.h" -#include "net/test_net.h" - -using namespace epee; - -int main(int argc, char* argv[]) -{ - - string_tools::set_module_name_and_folder(argv[0]); - - //set up logging options - log_space::get_set_log_detalisation_level(true, LOG_LEVEL_2); - log_space::log_singletone::add_logger(LOGGER_CONSOLE, NULL, NULL); - log_space::log_singletone::add_logger(LOGGER_FILE, - log_space::log_singletone::get_default_log_file().c_str(), - log_space::log_singletone::get_default_log_folder().c_str()); - - - string_tools::command_line_params_a start_params; - string_tools::parse_commandline(start_params, argc, argv); - std::string tests_data_path; - string_tools::get_xparam_from_command_line(start_params, std::string("/tests_folder"), tests_data_path); - - if(string_tools::have_in_command_line(start_params, std::string("/run_net_tests"))) - { - if(!tests::do_run_test_server()) - { - LOG_ERROR("net tests failed"); - return 1; - } - if(!tests::do_run_test_server_async_connect() ) - { - LOG_ERROR("net tests failed"); - return 1; - } - }else if(string_tools::have_in_command_line(start_params, std::string("/run_unit_tests"))) - { - if(!tests::test_median()) - { - LOG_ERROR("median test failed"); - return 1; - } - - - if(!tests::test_storages(tests_data_path)) - { - LOG_ERROR("storage test failed"); - return 1; - } - }else if(string_tools::have_in_command_line(start_params, std::string("/run_portable_storage_test"))) - { - tests::test_portable_storages(tests_data_path); - } - return 1; -} \ No newline at end of file diff --git a/external/CMakeLists.txt b/external/CMakeLists.txt index 0a7a5d1ae8..a4ed8d4860 100755 --- a/external/CMakeLists.txt +++ b/external/CMakeLists.txt @@ -1,11 +1,29 @@ set(UPNPC_BUILD_STATIC ON CACHE BOOL "Build static library") set(UPNPC_BUILD_SHARED OFF CACHE BOOL "Build shared library") set(UPNPC_BUILD_TESTS OFF CACHE BOOL "Build test executables") + add_subdirectory(miniupnpc) +add_subdirectory(gtest) + +if(MSVC) + include_directories(${gtest_SOURCE_DIR}/include ${gtest_SOURCE_DIR} ../version) + add_subdirectory(rocksDB EXCLUDE_FROM_ALL) + set_property(TARGET upnpc-static gtest gtest_main rocksdblib PROPERTY FOLDER "external") +elseif(NOT MSVC) + set_property(TARGET upnpc-static gtest gtest_main PROPERTY FOLDER "external") + add_custom_target( + rocksdb + COMMAND make static_lib + WORKING_DIRECTORY ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/ + ) + add_library(rocksdblib STATIC IMPORTED GLOBAL) + set_target_properties(rocksdblib PROPERTIES IMPORTED_LOCATION ${CMAKE_CURRENT_SOURCE_DIR}/rocksdb/librocksdb.a) + add_dependencies(rocksdblib rocksdb) +endif() -set_property(TARGET upnpc-static PROPERTY FOLDER "external") if(MSVC) set_property(TARGET upnpc-static APPEND_STRING PROPERTY COMPILE_FLAGS " -wd4244 -wd4267") elseif(NOT MSVC) set_property(TARGET upnpc-static APPEND_STRING PROPERTY COMPILE_FLAGS " -Wno-undef -Wno-unused-result -Wno-unused-value") endif() + diff --git a/external/google/dense_hash_map b/external/google/dense_hash_map old mode 100644 new mode 100755 diff --git a/external/google/dense_hash_set b/external/google/dense_hash_set old mode 100644 new mode 100755 diff --git a/external/google/sparse_hash_map b/external/google/sparse_hash_map old mode 100644 new mode 100755 diff --git a/external/google/sparse_hash_set b/external/google/sparse_hash_set old mode 100644 new mode 100755 diff --git a/external/google/sparsehash/densehashtable.h b/external/google/sparsehash/densehashtable.h old mode 100644 new mode 100755 diff --git a/external/google/sparsehash/hashtable-common.h b/external/google/sparsehash/hashtable-common.h old mode 100644 new mode 100755 diff --git a/external/google/sparsehash/libc_allocator_with_realloc.h b/external/google/sparsehash/libc_allocator_with_realloc.h old mode 100644 new mode 100755 diff --git a/external/google/sparsehash/os_config.h b/external/google/sparsehash/os_config.h old mode 100644 new mode 100755 diff --git a/external/google/sparsehash/sparseconfig.h b/external/google/sparsehash/sparseconfig.h old mode 100644 new mode 100755 diff --git a/external/google/sparsehash/sparseconfig_win.h b/external/google/sparsehash/sparseconfig_win.h old mode 100644 new mode 100755 diff --git a/external/google/sparsehash/sparsehashtable.h b/external/google/sparsehash/sparsehashtable.h old mode 100644 new mode 100755 diff --git a/external/google/sparsetable b/external/google/sparsetable old mode 100644 new mode 100755 diff --git a/external/google/type_traits.h b/external/google/type_traits.h old mode 100644 new mode 100755 diff --git a/external/gtest/CHANGES b/external/gtest/CHANGES new file mode 100755 index 0000000000..0552132421 --- /dev/null +++ b/external/gtest/CHANGES @@ -0,0 +1,157 @@ +Changes for 1.7.0: + +* New feature: death tests are supported on OpenBSD and in iOS + simulator now. +* New feature: Google Test now implements a protocol to allow + a test runner to detect that a test program has exited + prematurely and report it as a failure (before it would be + falsely reported as a success if the exit code is 0). +* New feature: Test::RecordProperty() can now be used outside of the + lifespan of a test method, in which case it will be attributed to + the current test case or the test program in the XML report. +* New feature (potentially breaking): --gtest_list_tests now prints + the type parameters and value parameters for each test. +* Improvement: char pointers and char arrays are now escaped properly + in failure messages. +* Improvement: failure summary in XML reports now includes file and + line information. +* Improvement: the XML element now has a timestamp attribute. +* Improvement: When --gtest_filter is specified, XML report now doesn't + contain information about tests that are filtered out. +* Fixed the bug where long --gtest_filter flag values are truncated in + death tests. +* Potentially breaking change: RUN_ALL_TESTS() is now implemented as a + function instead of a macro in order to work better with Clang. +* Compatibility fixes with C++ 11 and various platforms. +* Bug/warning fixes. + +Changes for 1.6.0: + +* New feature: ADD_FAILURE_AT() for reporting a test failure at the + given source location -- useful for writing testing utilities. +* New feature: the universal value printer is moved from Google Mock + to Google Test. +* New feature: type parameters and value parameters are reported in + the XML report now. +* A gtest_disable_pthreads CMake option. +* Colored output works in GNU Screen sessions now. +* Parameters of value-parameterized tests are now printed in the + textual output. +* Failures from ad hoc test assertions run before RUN_ALL_TESTS() are + now correctly reported. +* Arguments of ASSERT_XY and EXPECT_XY no longer need to support << to + ostream. +* More complete handling of exceptions. +* GTEST_ASSERT_XY can be used instead of ASSERT_XY in case the latter + name is already used by another library. +* --gtest_catch_exceptions is now true by default, allowing a test + program to continue after an exception is thrown. +* Value-parameterized test fixtures can now derive from Test and + WithParamInterface separately, easing conversion of legacy tests. +* Death test messages are clearly marked to make them more + distinguishable from other messages. +* Compatibility fixes for Android, Google Native Client, MinGW, HP UX, + PowerPC, Lucid autotools, libCStd, Sun C++, Borland C++ Builder (Code Gear), + IBM XL C++ (Visual Age C++), and C++0x. +* Bug fixes and implementation clean-ups. +* Potentially incompatible changes: disables the harmful 'make install' + command in autotools. + +Changes for 1.5.0: + + * New feature: assertions can be safely called in multiple threads + where the pthreads library is available. + * New feature: predicates used inside EXPECT_TRUE() and friends + can now generate custom failure messages. + * New feature: Google Test can now be compiled as a DLL. + * New feature: fused source files are included. + * New feature: prints help when encountering unrecognized Google Test flags. + * Experimental feature: CMake build script (requires CMake 2.6.4+). + * Experimental feature: the Pump script for meta programming. + * double values streamed to an assertion are printed with enough precision + to differentiate any two different values. + * Google Test now works on Solaris and AIX. + * Build and test script improvements. + * Bug fixes and implementation clean-ups. + + Potentially breaking changes: + + * Stopped supporting VC++ 7.1 with exceptions disabled. + * Dropped support for 'make install'. + +Changes for 1.4.0: + + * New feature: the event listener API + * New feature: test shuffling + * New feature: the XML report format is closer to junitreport and can + be parsed by Hudson now. + * New feature: when a test runs under Visual Studio, its failures are + integrated in the IDE. + * New feature: /MD(d) versions of VC++ projects. + * New feature: elapsed time for the tests is printed by default. + * New feature: comes with a TR1 tuple implementation such that Boost + is no longer needed for Combine(). + * New feature: EXPECT_DEATH_IF_SUPPORTED macro and friends. + * New feature: the Xcode project can now produce static gtest + libraries in addition to a framework. + * Compatibility fixes for Solaris, Cygwin, minGW, Windows Mobile, + Symbian, gcc, and C++Builder. + * Bug fixes and implementation clean-ups. + +Changes for 1.3.0: + + * New feature: death tests on Windows, Cygwin, and Mac. + * New feature: ability to use Google Test assertions in other testing + frameworks. + * New feature: ability to run disabled test via + --gtest_also_run_disabled_tests. + * New feature: the --help flag for printing the usage. + * New feature: access to Google Test flag values in user code. + * New feature: a script that packs Google Test into one .h and one + .cc file for easy deployment. + * New feature: support for distributing test functions to multiple + machines (requires support from the test runner). + * Bug fixes and implementation clean-ups. + +Changes for 1.2.1: + + * Compatibility fixes for Linux IA-64 and IBM z/OS. + * Added support for using Boost and other TR1 implementations. + * Changes to the build scripts to support upcoming release of Google C++ + Mocking Framework. + * Added Makefile to the distribution package. + * Improved build instructions in README. + +Changes for 1.2.0: + + * New feature: value-parameterized tests. + * New feature: the ASSERT/EXPECT_(NON)FATAL_FAILURE(_ON_ALL_THREADS) + macros. + * Changed the XML report format to match JUnit/Ant's. + * Added tests to the Xcode project. + * Added scons/SConscript for building with SCons. + * Added src/gtest-all.cc for building Google Test from a single file. + * Fixed compatibility with Solaris and z/OS. + * Enabled running Python tests on systems with python 2.3 installed, + e.g. Mac OS X 10.4. + * Bug fixes. + +Changes for 1.1.0: + + * New feature: type-parameterized tests. + * New feature: exception assertions. + * New feature: printing elapsed time of tests. + * Improved the robustness of death tests. + * Added an Xcode project and samples. + * Adjusted the output format on Windows to be understandable by Visual Studio. + * Minor bug fixes. + +Changes for 1.0.1: + + * Added project files for Visual Studio 7.1. + * Fixed issues with compiling on Mac OS X. + * Fixed issues with compiling on Cygwin. + +Changes for 1.0.0: + + * Initial Open Source release of Google Test diff --git a/external/gtest/CMakeLists.txt b/external/gtest/CMakeLists.txt new file mode 100755 index 0000000000..57470c84f3 --- /dev/null +++ b/external/gtest/CMakeLists.txt @@ -0,0 +1,252 @@ +######################################################################## +# CMake build script for Google Test. +# +# To run the tests for Google Test itself on Linux, use 'make test' or +# ctest. You can select which tests to run using 'ctest -R regex'. +# For more options, run 'ctest --help'. + +# BUILD_SHARED_LIBS is a standard CMake variable, but we declare it here to +# make it prominent in the GUI. +option(BUILD_SHARED_LIBS "Build shared libraries (DLLs)." OFF) + +# When other libraries are using a shared version of runtime libraries, +# Google Test also has to use one. +option( + gtest_force_shared_crt + "Use shared (DLL) run-time lib even when Google Test is built as static lib." + OFF) + +option(gtest_build_tests "Build all of gtest's own tests." OFF) + +option(gtest_build_samples "Build gtest's sample programs." OFF) + +option(gtest_disable_pthreads "Disable uses of pthreads in gtest." OFF) + +# Defines pre_project_set_up_hermetic_build() and set_up_hermetic_build(). +include(cmake/hermetic_build.cmake OPTIONAL) + +if (COMMAND pre_project_set_up_hermetic_build) + pre_project_set_up_hermetic_build() +endif() + +######################################################################## +# +# Project-wide settings + +# Name of the project. +# +# CMake files in this project can refer to the root source directory +# as ${gtest_SOURCE_DIR} and to the root binary directory as +# ${gtest_BINARY_DIR}. +# Language "C" is required for find_package(Threads). +project(gtest CXX C) +cmake_minimum_required(VERSION 2.6.2) + +if (COMMAND set_up_hermetic_build) + set_up_hermetic_build() +endif() + +# Define helper functions and macros used by Google Test. +include(cmake/internal_utils.cmake) + +config_compiler_and_linker() # Defined in internal_utils.cmake. + +# Where Google Test's .h files can be found. +include_directories( + ${gtest_SOURCE_DIR}/include + ${gtest_SOURCE_DIR}) + +# Where Google Test's libraries can be found. +link_directories(${gtest_BINARY_DIR}/src) + +######################################################################## +# +# Defines the gtest & gtest_main libraries. User tests should link +# with one of them. + +# Google Test libraries. We build them using more strict warnings than what +# are used for other targets, to ensure that gtest can be compiled by a user +# aggressive about warnings. +cxx_library(gtest "${cxx_strict}" src/gtest-all.cc) +cxx_library(gtest_main "${cxx_strict}" src/gtest_main.cc) +target_link_libraries(gtest_main gtest) + +######################################################################## +# +# Samples on how to link user tests with gtest or gtest_main. +# +# They are not built by default. To build them, set the +# gtest_build_samples option to ON. You can do it by running ccmake +# or specifying the -Dgtest_build_samples=ON flag when running cmake. + +if (gtest_build_samples) + cxx_executable(sample1_unittest samples gtest_main samples/sample1.cc) + cxx_executable(sample2_unittest samples gtest_main samples/sample2.cc) + cxx_executable(sample3_unittest samples gtest_main) + cxx_executable(sample4_unittest samples gtest_main samples/sample4.cc) + cxx_executable(sample5_unittest samples gtest_main samples/sample1.cc) + cxx_executable(sample6_unittest samples gtest_main) + cxx_executable(sample7_unittest samples gtest_main) + cxx_executable(sample8_unittest samples gtest_main) + cxx_executable(sample9_unittest samples gtest) + cxx_executable(sample10_unittest samples gtest) +endif() + +######################################################################## +# +# Google Test's own tests. +# +# You can skip this section if you aren't interested in testing +# Google Test itself. +# +# The tests are not built by default. To build them, set the +# gtest_build_tests option to ON. You can do it by running ccmake +# or specifying the -Dgtest_build_tests=ON flag when running cmake. + +if (gtest_build_tests) + # This must be set in the root directory for the tests to be run by + # 'make test' or ctest. + enable_testing() + + ############################################################ + # C++ tests built with standard compiler flags. + + cxx_test(gtest-death-test_test gtest_main) + cxx_test(gtest_environment_test gtest) + cxx_test(gtest-filepath_test gtest_main) + cxx_test(gtest-linked_ptr_test gtest_main) + cxx_test(gtest-listener_test gtest_main) + cxx_test(gtest_main_unittest gtest_main) + cxx_test(gtest-message_test gtest_main) + cxx_test(gtest_no_test_unittest gtest) + cxx_test(gtest-options_test gtest_main) + cxx_test(gtest-param-test_test gtest + test/gtest-param-test2_test.cc) + cxx_test(gtest-port_test gtest_main) + cxx_test(gtest_pred_impl_unittest gtest_main) + cxx_test(gtest_premature_exit_test gtest + test/gtest_premature_exit_test.cc) + cxx_test(gtest-printers_test gtest_main) + cxx_test(gtest_prod_test gtest_main + test/production.cc) + cxx_test(gtest_repeat_test gtest) + cxx_test(gtest_sole_header_test gtest_main) + cxx_test(gtest_stress_test gtest) + cxx_test(gtest-test-part_test gtest_main) + cxx_test(gtest_throw_on_failure_ex_test gtest) + cxx_test(gtest-typed-test_test gtest_main + test/gtest-typed-test2_test.cc) + cxx_test(gtest_unittest gtest_main) + cxx_test(gtest-unittest-api_test gtest) + + ############################################################ + # C++ tests built with non-standard compiler flags. + + # MSVC 7.1 does not support STL with exceptions disabled. + if (NOT MSVC OR MSVC_VERSION GREATER 1310) + cxx_library(gtest_no_exception "${cxx_no_exception}" + src/gtest-all.cc) + cxx_library(gtest_main_no_exception "${cxx_no_exception}" + src/gtest-all.cc src/gtest_main.cc) + endif() + cxx_library(gtest_main_no_rtti "${cxx_no_rtti}" + src/gtest-all.cc src/gtest_main.cc) + + cxx_test_with_flags(gtest-death-test_ex_nocatch_test + "${cxx_exception} -DGTEST_ENABLE_CATCH_EXCEPTIONS_=0" + gtest test/gtest-death-test_ex_test.cc) + cxx_test_with_flags(gtest-death-test_ex_catch_test + "${cxx_exception} -DGTEST_ENABLE_CATCH_EXCEPTIONS_=1" + gtest test/gtest-death-test_ex_test.cc) + + cxx_test_with_flags(gtest_no_rtti_unittest "${cxx_no_rtti}" + gtest_main_no_rtti test/gtest_unittest.cc) + + cxx_shared_library(gtest_dll "${cxx_default}" + src/gtest-all.cc src/gtest_main.cc) + + cxx_executable_with_flags(gtest_dll_test_ "${cxx_default}" + gtest_dll test/gtest_all_test.cc) + set_target_properties(gtest_dll_test_ + PROPERTIES + COMPILE_DEFINITIONS "GTEST_LINKED_AS_SHARED_LIBRARY=1") + + if (NOT MSVC OR NOT MSVC_VERSION EQUAL 1600) + # The C++ Standard specifies tuple_element. + # Yet MSVC 10's declares tuple_element. + # That declaration conflicts with our own standard-conforming + # tuple implementation. Therefore using our own tuple with + # MSVC 10 doesn't compile. + cxx_library(gtest_main_use_own_tuple "${cxx_use_own_tuple}" + src/gtest-all.cc src/gtest_main.cc) + + cxx_test_with_flags(gtest-tuple_test "${cxx_use_own_tuple}" + gtest_main_use_own_tuple test/gtest-tuple_test.cc) + + cxx_test_with_flags(gtest_use_own_tuple_test "${cxx_use_own_tuple}" + gtest_main_use_own_tuple + test/gtest-param-test_test.cc test/gtest-param-test2_test.cc) + endif() + + ############################################################ + # Python tests. + + cxx_executable(gtest_break_on_failure_unittest_ test gtest) + py_test(gtest_break_on_failure_unittest) + + # MSVC 7.1 does not support STL with exceptions disabled. + if (NOT MSVC OR MSVC_VERSION GREATER 1310) + cxx_executable_with_flags( + gtest_catch_exceptions_no_ex_test_ + "${cxx_no_exception}" + gtest_main_no_exception + test/gtest_catch_exceptions_test_.cc) + endif() + + cxx_executable_with_flags( + gtest_catch_exceptions_ex_test_ + "${cxx_exception}" + gtest_main + test/gtest_catch_exceptions_test_.cc) + py_test(gtest_catch_exceptions_test) + + cxx_executable(gtest_color_test_ test gtest) + py_test(gtest_color_test) + + cxx_executable(gtest_env_var_test_ test gtest) + py_test(gtest_env_var_test) + + cxx_executable(gtest_filter_unittest_ test gtest) + py_test(gtest_filter_unittest) + + cxx_executable(gtest_help_test_ test gtest_main) + py_test(gtest_help_test) + + cxx_executable(gtest_list_tests_unittest_ test gtest) + py_test(gtest_list_tests_unittest) + + cxx_executable(gtest_output_test_ test gtest) + py_test(gtest_output_test) + + cxx_executable(gtest_shuffle_test_ test gtest) + py_test(gtest_shuffle_test) + + # MSVC 7.1 does not support STL with exceptions disabled. + if (NOT MSVC OR MSVC_VERSION GREATER 1310) + cxx_executable(gtest_throw_on_failure_test_ test gtest_no_exception) + set_target_properties(gtest_throw_on_failure_test_ + PROPERTIES + COMPILE_FLAGS "${cxx_no_exception}") + py_test(gtest_throw_on_failure_test) + endif() + + cxx_executable(gtest_uninitialized_test_ test gtest) + py_test(gtest_uninitialized_test) + + cxx_executable(gtest_xml_outfile1_test_ test gtest_main) + cxx_executable(gtest_xml_outfile2_test_ test gtest_main) + py_test(gtest_xml_outfiles_test) + + cxx_executable(gtest_xml_output_unittest_ test gtest) + py_test(gtest_xml_output_unittest) +endif() diff --git a/external/gtest/CONTRIBUTORS b/external/gtest/CONTRIBUTORS new file mode 100755 index 0000000000..feae2fc044 --- /dev/null +++ b/external/gtest/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This file contains a list of people who've made non-trivial +# contribution to the Google C++ Testing Framework project. People +# who commit code to the project are encouraged to add their names +# here. Please keep the list sorted by first names. + +Ajay Joshi +Balázs Dán +Bharat Mediratta +Chandler Carruth +Chris Prince +Chris Taylor +Dan Egnor +Eric Roman +Hady Zalek +Jeffrey Yasskin +Jói Sigurðsson +Keir Mierle +Keith Ray +Kenton Varda +Manuel Klimek +Markus Heule +Mika Raento +Miklós Fazekas +Pasi Valminen +Patrick Hanna +Patrick Riley +Peter Kaminski +Preston Jackson +Rainer Klaffenboeck +Russ Cox +Russ Rufer +Sean Mcafee +Sigurður Ãsgeirsson +Tracy Bialik +Vadim Berman +Vlad Losev +Zhanyong Wan diff --git a/external/gtest/LICENSE b/external/gtest/LICENSE new file mode 100755 index 0000000000..1941a11f8c --- /dev/null +++ b/external/gtest/LICENSE @@ -0,0 +1,28 @@ +Copyright 2008, Google Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/external/gtest/Makefile.am b/external/gtest/Makefile.am new file mode 100755 index 0000000000..9c96b42572 --- /dev/null +++ b/external/gtest/Makefile.am @@ -0,0 +1,306 @@ +# Automake file + +ACLOCAL_AMFLAGS = -I m4 + +# Nonstandard package files for distribution +EXTRA_DIST = \ + CHANGES \ + CONTRIBUTORS \ + LICENSE \ + include/gtest/gtest-param-test.h.pump \ + include/gtest/internal/gtest-param-util-generated.h.pump \ + include/gtest/internal/gtest-tuple.h.pump \ + include/gtest/internal/gtest-type-util.h.pump \ + make/Makefile \ + scripts/fuse_gtest_files.py \ + scripts/gen_gtest_pred_impl.py \ + scripts/pump.py \ + scripts/test/Makefile + +# gtest source files that we don't compile directly. They are +# #included by gtest-all.cc. +GTEST_SRC = \ + src/gtest-death-test.cc \ + src/gtest-filepath.cc \ + src/gtest-internal-inl.h \ + src/gtest-port.cc \ + src/gtest-printers.cc \ + src/gtest-test-part.cc \ + src/gtest-typed-test.cc \ + src/gtest.cc + +EXTRA_DIST += $(GTEST_SRC) + +# Sample files that we don't compile. +EXTRA_DIST += \ + samples/prime_tables.h \ + samples/sample2_unittest.cc \ + samples/sample3_unittest.cc \ + samples/sample4_unittest.cc \ + samples/sample5_unittest.cc \ + samples/sample6_unittest.cc \ + samples/sample7_unittest.cc \ + samples/sample8_unittest.cc \ + samples/sample9_unittest.cc + +# C++ test files that we don't compile directly. +EXTRA_DIST += \ + test/gtest-death-test_ex_test.cc \ + test/gtest-death-test_test.cc \ + test/gtest-filepath_test.cc \ + test/gtest-linked_ptr_test.cc \ + test/gtest-listener_test.cc \ + test/gtest-message_test.cc \ + test/gtest-options_test.cc \ + test/gtest-param-test2_test.cc \ + test/gtest-param-test2_test.cc \ + test/gtest-param-test_test.cc \ + test/gtest-param-test_test.cc \ + test/gtest-param-test_test.h \ + test/gtest-port_test.cc \ + test/gtest_premature_exit_test.cc \ + test/gtest-printers_test.cc \ + test/gtest-test-part_test.cc \ + test/gtest-tuple_test.cc \ + test/gtest-typed-test2_test.cc \ + test/gtest-typed-test_test.cc \ + test/gtest-typed-test_test.h \ + test/gtest-unittest-api_test.cc \ + test/gtest_break_on_failure_unittest_.cc \ + test/gtest_catch_exceptions_test_.cc \ + test/gtest_color_test_.cc \ + test/gtest_env_var_test_.cc \ + test/gtest_environment_test.cc \ + test/gtest_filter_unittest_.cc \ + test/gtest_help_test_.cc \ + test/gtest_list_tests_unittest_.cc \ + test/gtest_main_unittest.cc \ + test/gtest_no_test_unittest.cc \ + test/gtest_output_test_.cc \ + test/gtest_pred_impl_unittest.cc \ + test/gtest_prod_test.cc \ + test/gtest_repeat_test.cc \ + test/gtest_shuffle_test_.cc \ + test/gtest_sole_header_test.cc \ + test/gtest_stress_test.cc \ + test/gtest_throw_on_failure_ex_test.cc \ + test/gtest_throw_on_failure_test_.cc \ + test/gtest_uninitialized_test_.cc \ + test/gtest_unittest.cc \ + test/gtest_unittest.cc \ + test/gtest_xml_outfile1_test_.cc \ + test/gtest_xml_outfile2_test_.cc \ + test/gtest_xml_output_unittest_.cc \ + test/production.cc \ + test/production.h + +# Python tests that we don't run. +EXTRA_DIST += \ + test/gtest_break_on_failure_unittest.py \ + test/gtest_catch_exceptions_test.py \ + test/gtest_color_test.py \ + test/gtest_env_var_test.py \ + test/gtest_filter_unittest.py \ + test/gtest_help_test.py \ + test/gtest_list_tests_unittest.py \ + test/gtest_output_test.py \ + test/gtest_output_test_golden_lin.txt \ + test/gtest_shuffle_test.py \ + test/gtest_test_utils.py \ + test/gtest_throw_on_failure_test.py \ + test/gtest_uninitialized_test.py \ + test/gtest_xml_outfiles_test.py \ + test/gtest_xml_output_unittest.py \ + test/gtest_xml_test_utils.py + +# CMake script +EXTRA_DIST += \ + CMakeLists.txt \ + cmake/internal_utils.cmake + +# MSVC project files +EXTRA_DIST += \ + msvc/gtest-md.sln \ + msvc/gtest-md.vcproj \ + msvc/gtest.sln \ + msvc/gtest.vcproj \ + msvc/gtest_main-md.vcproj \ + msvc/gtest_main.vcproj \ + msvc/gtest_prod_test-md.vcproj \ + msvc/gtest_prod_test.vcproj \ + msvc/gtest_unittest-md.vcproj \ + msvc/gtest_unittest.vcproj + +# xcode project files +EXTRA_DIST += \ + xcode/Config/DebugProject.xcconfig \ + xcode/Config/FrameworkTarget.xcconfig \ + xcode/Config/General.xcconfig \ + xcode/Config/ReleaseProject.xcconfig \ + xcode/Config/StaticLibraryTarget.xcconfig \ + xcode/Config/TestTarget.xcconfig \ + xcode/Resources/Info.plist \ + xcode/Scripts/runtests.sh \ + xcode/Scripts/versiongenerate.py \ + xcode/gtest.xcodeproj/project.pbxproj + +# xcode sample files +EXTRA_DIST += \ + xcode/Samples/FrameworkSample/Info.plist \ + xcode/Samples/FrameworkSample/WidgetFramework.xcodeproj/project.pbxproj \ + xcode/Samples/FrameworkSample/runtests.sh \ + xcode/Samples/FrameworkSample/widget.cc \ + xcode/Samples/FrameworkSample/widget.h \ + xcode/Samples/FrameworkSample/widget_test.cc + +# C++Builder project files +EXTRA_DIST += \ + codegear/gtest.cbproj \ + codegear/gtest.groupproj \ + codegear/gtest_all.cc \ + codegear/gtest_link.cc \ + codegear/gtest_main.cbproj \ + codegear/gtest_unittest.cbproj + +# Distribute and install M4 macro +m4datadir = $(datadir)/aclocal +m4data_DATA = m4/gtest.m4 +EXTRA_DIST += $(m4data_DATA) + +# We define the global AM_CPPFLAGS as everything we compile includes from these +# directories. +AM_CPPFLAGS = -I$(srcdir) -I$(srcdir)/include + +# Modifies compiler and linker flags for pthreads compatibility. +if HAVE_PTHREADS + AM_CXXFLAGS = @PTHREAD_CFLAGS@ -DGTEST_HAS_PTHREAD=1 + AM_LIBS = @PTHREAD_LIBS@ +else + AM_CXXFLAGS = -DGTEST_HAS_PTHREAD=0 +endif + +# Build rules for libraries. +lib_LTLIBRARIES = lib/libgtest.la lib/libgtest_main.la + +lib_libgtest_la_SOURCES = src/gtest-all.cc + +pkginclude_HEADERS = \ + include/gtest/gtest-death-test.h \ + include/gtest/gtest-message.h \ + include/gtest/gtest-param-test.h \ + include/gtest/gtest-printers.h \ + include/gtest/gtest-spi.h \ + include/gtest/gtest-test-part.h \ + include/gtest/gtest-typed-test.h \ + include/gtest/gtest.h \ + include/gtest/gtest_pred_impl.h \ + include/gtest/gtest_prod.h + +pkginclude_internaldir = $(pkgincludedir)/internal +pkginclude_internal_HEADERS = \ + include/gtest/internal/gtest-death-test-internal.h \ + include/gtest/internal/gtest-filepath.h \ + include/gtest/internal/gtest-internal.h \ + include/gtest/internal/gtest-linked_ptr.h \ + include/gtest/internal/gtest-param-util-generated.h \ + include/gtest/internal/gtest-param-util.h \ + include/gtest/internal/gtest-port.h \ + include/gtest/internal/gtest-string.h \ + include/gtest/internal/gtest-tuple.h \ + include/gtest/internal/gtest-type-util.h + +lib_libgtest_main_la_SOURCES = src/gtest_main.cc +lib_libgtest_main_la_LIBADD = lib/libgtest.la + +# Bulid rules for samples and tests. Automake's naming for some of +# these variables isn't terribly obvious, so this is a brief +# reference: +# +# TESTS -- Programs run automatically by "make check" +# check_PROGRAMS -- Programs built by "make check" but not necessarily run + +noinst_LTLIBRARIES = samples/libsamples.la + +samples_libsamples_la_SOURCES = \ + samples/sample1.cc \ + samples/sample1.h \ + samples/sample2.cc \ + samples/sample2.h \ + samples/sample3-inl.h \ + samples/sample4.cc \ + samples/sample4.h + +TESTS= +TESTS_ENVIRONMENT = GTEST_SOURCE_DIR="$(srcdir)/test" \ + GTEST_BUILD_DIR="$(top_builddir)/test" +check_PROGRAMS= + +# A simple sample on using gtest. +TESTS += samples/sample1_unittest +check_PROGRAMS += samples/sample1_unittest +samples_sample1_unittest_SOURCES = samples/sample1_unittest.cc +samples_sample1_unittest_LDADD = lib/libgtest_main.la \ + lib/libgtest.la \ + samples/libsamples.la + +# Another sample. It also verifies that libgtest works. +TESTS += samples/sample10_unittest +check_PROGRAMS += samples/sample10_unittest +samples_sample10_unittest_SOURCES = samples/sample10_unittest.cc +samples_sample10_unittest_LDADD = lib/libgtest.la + +# This tests most constructs of gtest and verifies that libgtest_main +# and libgtest work. +TESTS += test/gtest_all_test +check_PROGRAMS += test/gtest_all_test +test_gtest_all_test_SOURCES = test/gtest_all_test.cc +test_gtest_all_test_LDADD = lib/libgtest_main.la \ + lib/libgtest.la + +# Tests that fused gtest files compile and work. +FUSED_GTEST_SRC = \ + fused-src/gtest/gtest-all.cc \ + fused-src/gtest/gtest.h \ + fused-src/gtest/gtest_main.cc + +if HAVE_PYTHON +TESTS += test/fused_gtest_test +check_PROGRAMS += test/fused_gtest_test +test_fused_gtest_test_SOURCES = $(FUSED_GTEST_SRC) \ + samples/sample1.cc samples/sample1_unittest.cc +test_fused_gtest_test_CPPFLAGS = -I"$(srcdir)/fused-src" + +# Build rules for putting fused Google Test files into the distribution +# package. The user can also create those files by manually running +# scripts/fuse_gtest_files.py. +$(test_fused_gtest_test_SOURCES): fused-gtest + +fused-gtest: $(pkginclude_HEADERS) $(pkginclude_internal_HEADERS) \ + $(GTEST_SRC) src/gtest-all.cc src/gtest_main.cc \ + scripts/fuse_gtest_files.py + mkdir -p "$(srcdir)/fused-src" + chmod -R u+w "$(srcdir)/fused-src" + rm -f "$(srcdir)/fused-src/gtest/gtest-all.cc" + rm -f "$(srcdir)/fused-src/gtest/gtest.h" + "$(srcdir)/scripts/fuse_gtest_files.py" "$(srcdir)/fused-src" + cp -f "$(srcdir)/src/gtest_main.cc" "$(srcdir)/fused-src/gtest/" + +maintainer-clean-local: + rm -rf "$(srcdir)/fused-src" +endif + +# Death tests may produce core dumps in the build directory. In case +# this happens, clean them to keep distcleancheck happy. +CLEANFILES = core + +# Disables 'make install' as installing a compiled version of Google +# Test can lead to undefined behavior due to violation of the +# One-Definition Rule. + +install-exec-local: + echo "'make install' is dangerous and not supported. Instead, see README for how to integrate Google Test into your build system." + false + +install-data-local: + echo "'make install' is dangerous and not supported. Instead, see README for how to integrate Google Test into your build system." + false diff --git a/external/gtest/Makefile.in b/external/gtest/Makefile.in new file mode 100755 index 0000000000..874de7473f --- /dev/null +++ b/external/gtest/Makefile.in @@ -0,0 +1,1360 @@ +# Makefile.in generated by automake 1.11.3 from Makefile.am. +# @configure_input@ + +# Copyright (C) 1994, 1995, 1996, 1997, 1998, 1999, 2000, 2001, 2002, +# 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# This Makefile.in is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +@SET_MAKE@ + +# Automake file + + + +VPATH = @srcdir@ +pkgdatadir = $(datadir)/@PACKAGE@ +pkgincludedir = $(includedir)/@PACKAGE@ +pkglibdir = $(libdir)/@PACKAGE@ +pkglibexecdir = $(libexecdir)/@PACKAGE@ +am__cd = CDPATH="$${ZSH_VERSION+.}$(PATH_SEPARATOR)" && cd +install_sh_DATA = $(install_sh) -c -m 644 +install_sh_PROGRAM = $(install_sh) -c +install_sh_SCRIPT = $(install_sh) -c +INSTALL_HEADER = $(INSTALL_DATA) +transform = $(program_transform_name) +NORMAL_INSTALL = : +PRE_INSTALL = : +POST_INSTALL = : +NORMAL_UNINSTALL = : +PRE_UNINSTALL = : +POST_UNINSTALL = : +build_triplet = @build@ +host_triplet = @host@ +TESTS = samples/sample1_unittest$(EXEEXT) \ + samples/sample10_unittest$(EXEEXT) \ + test/gtest_all_test$(EXEEXT) $(am__EXEEXT_1) +check_PROGRAMS = samples/sample1_unittest$(EXEEXT) \ + samples/sample10_unittest$(EXEEXT) \ + test/gtest_all_test$(EXEEXT) $(am__EXEEXT_1) +@HAVE_PYTHON_TRUE@am__append_1 = test/fused_gtest_test +@HAVE_PYTHON_TRUE@am__append_2 = test/fused_gtest_test +subdir = . +DIST_COMMON = README $(am__configure_deps) $(pkginclude_HEADERS) \ + $(pkginclude_internal_HEADERS) $(srcdir)/Makefile.am \ + $(srcdir)/Makefile.in $(top_srcdir)/build-aux/config.h.in \ + $(top_srcdir)/configure $(top_srcdir)/scripts/gtest-config.in \ + build-aux/config.guess build-aux/config.sub build-aux/depcomp \ + build-aux/install-sh build-aux/ltmain.sh build-aux/missing +ACLOCAL_M4 = $(top_srcdir)/aclocal.m4 +am__aclocal_m4_deps = $(top_srcdir)/m4/libtool.m4 \ + $(top_srcdir)/m4/ltoptions.m4 $(top_srcdir)/m4/ltsugar.m4 \ + $(top_srcdir)/m4/ltversion.m4 $(top_srcdir)/m4/lt~obsolete.m4 \ + $(top_srcdir)/m4/acx_pthread.m4 $(top_srcdir)/configure.ac +am__configure_deps = $(am__aclocal_m4_deps) $(CONFIGURE_DEPENDENCIES) \ + $(ACLOCAL_M4) +am__CONFIG_DISTCLEAN_FILES = config.status config.cache config.log \ + configure.lineno config.status.lineno +mkinstalldirs = $(install_sh) -d +CONFIG_HEADER = $(top_builddir)/build-aux/config.h +CONFIG_CLEAN_FILES = scripts/gtest-config +CONFIG_CLEAN_VPATH_FILES = +am__vpath_adj_setup = srcdirstrip=`echo "$(srcdir)" | sed 's|.|.|g'`; +am__vpath_adj = case $$p in \ + $(srcdir)/*) f=`echo "$$p" | sed "s|^$$srcdirstrip/||"`;; \ + *) f=$$p;; \ + esac; +am__strip_dir = f=`echo $$p | sed -e 's|^.*/||'`; +am__install_max = 40 +am__nobase_strip_setup = \ + srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*|]/\\\\&/g'` +am__nobase_strip = \ + for p in $$list; do echo "$$p"; done | sed -e "s|$$srcdirstrip/||" +am__nobase_list = $(am__nobase_strip_setup); \ + for p in $$list; do echo "$$p $$p"; done | \ + sed "s| $$srcdirstrip/| |;"' / .*\//!s/ .*/ ./; s,\( .*\)/[^/]*$$,\1,' | \ + $(AWK) 'BEGIN { files["."] = "" } { files[$$2] = files[$$2] " " $$1; \ + if (++n[$$2] == $(am__install_max)) \ + { print $$2, files[$$2]; n[$$2] = 0; files[$$2] = "" } } \ + END { for (dir in files) print dir, files[dir] }' +am__base_list = \ + sed '$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;$$!N;s/\n/ /g' | \ + sed '$$!N;$$!N;$$!N;$$!N;s/\n/ /g' +am__uninstall_files_from_dir = { \ + test -z "$$files" \ + || { test ! -d "$$dir" && test ! -f "$$dir" && test ! -r "$$dir"; } \ + || { echo " ( cd '$$dir' && rm -f" $$files ")"; \ + $(am__cd) "$$dir" && rm -f $$files; }; \ + } +am__installdirs = "$(DESTDIR)$(libdir)" "$(DESTDIR)$(m4datadir)" \ + "$(DESTDIR)$(pkgincludedir)" \ + "$(DESTDIR)$(pkginclude_internaldir)" +LTLIBRARIES = $(lib_LTLIBRARIES) $(noinst_LTLIBRARIES) +lib_libgtest_la_LIBADD = +am__dirstamp = $(am__leading_dot)dirstamp +am_lib_libgtest_la_OBJECTS = src/gtest-all.lo +lib_libgtest_la_OBJECTS = $(am_lib_libgtest_la_OBJECTS) +lib_libgtest_main_la_DEPENDENCIES = lib/libgtest.la +am_lib_libgtest_main_la_OBJECTS = src/gtest_main.lo +lib_libgtest_main_la_OBJECTS = $(am_lib_libgtest_main_la_OBJECTS) +samples_libsamples_la_LIBADD = +am_samples_libsamples_la_OBJECTS = samples/sample1.lo \ + samples/sample2.lo samples/sample4.lo +samples_libsamples_la_OBJECTS = $(am_samples_libsamples_la_OBJECTS) +@HAVE_PYTHON_TRUE@am__EXEEXT_1 = test/fused_gtest_test$(EXEEXT) +am_samples_sample10_unittest_OBJECTS = \ + samples/sample10_unittest.$(OBJEXT) +samples_sample10_unittest_OBJECTS = \ + $(am_samples_sample10_unittest_OBJECTS) +samples_sample10_unittest_DEPENDENCIES = lib/libgtest.la +am_samples_sample1_unittest_OBJECTS = \ + samples/sample1_unittest.$(OBJEXT) +samples_sample1_unittest_OBJECTS = \ + $(am_samples_sample1_unittest_OBJECTS) +samples_sample1_unittest_DEPENDENCIES = lib/libgtest_main.la \ + lib/libgtest.la samples/libsamples.la +am__test_fused_gtest_test_SOURCES_DIST = fused-src/gtest/gtest-all.cc \ + fused-src/gtest/gtest.h fused-src/gtest/gtest_main.cc \ + samples/sample1.cc samples/sample1_unittest.cc +am__objects_1 = \ + fused-src/gtest/test_fused_gtest_test-gtest-all.$(OBJEXT) \ + fused-src/gtest/test_fused_gtest_test-gtest_main.$(OBJEXT) +@HAVE_PYTHON_TRUE@am_test_fused_gtest_test_OBJECTS = $(am__objects_1) \ +@HAVE_PYTHON_TRUE@ samples/test_fused_gtest_test-sample1.$(OBJEXT) \ +@HAVE_PYTHON_TRUE@ samples/test_fused_gtest_test-sample1_unittest.$(OBJEXT) +test_fused_gtest_test_OBJECTS = $(am_test_fused_gtest_test_OBJECTS) +test_fused_gtest_test_LDADD = $(LDADD) +am_test_gtest_all_test_OBJECTS = test/gtest_all_test.$(OBJEXT) +test_gtest_all_test_OBJECTS = $(am_test_gtest_all_test_OBJECTS) +test_gtest_all_test_DEPENDENCIES = lib/libgtest_main.la \ + lib/libgtest.la +DEFAULT_INCLUDES = -I.@am__isrc@ -I$(top_builddir)/build-aux +depcomp = $(SHELL) $(top_srcdir)/build-aux/depcomp +am__depfiles_maybe = depfiles +am__mv = mv -f +CXXCOMPILE = $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +LTCXXCOMPILE = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) +CXXLD = $(CXX) +CXXLINK = $(LIBTOOL) --tag=CXX $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CXXLD) $(AM_CXXFLAGS) $(CXXFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +COMPILE = $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(AM_CPPFLAGS) \ + $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +LTCOMPILE = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=compile $(CC) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) \ + $(AM_CPPFLAGS) $(CPPFLAGS) $(AM_CFLAGS) $(CFLAGS) +CCLD = $(CC) +LINK = $(LIBTOOL) --tag=CC $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) \ + --mode=link $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) \ + $(LDFLAGS) -o $@ +SOURCES = $(lib_libgtest_la_SOURCES) $(lib_libgtest_main_la_SOURCES) \ + $(samples_libsamples_la_SOURCES) \ + $(samples_sample10_unittest_SOURCES) \ + $(samples_sample1_unittest_SOURCES) \ + $(test_fused_gtest_test_SOURCES) \ + $(test_gtest_all_test_SOURCES) +DIST_SOURCES = $(lib_libgtest_la_SOURCES) \ + $(lib_libgtest_main_la_SOURCES) \ + $(samples_libsamples_la_SOURCES) \ + $(samples_sample10_unittest_SOURCES) \ + $(samples_sample1_unittest_SOURCES) \ + $(am__test_fused_gtest_test_SOURCES_DIST) \ + $(test_gtest_all_test_SOURCES) +DATA = $(m4data_DATA) +HEADERS = $(pkginclude_HEADERS) $(pkginclude_internal_HEADERS) +ETAGS = etags +CTAGS = ctags +am__tty_colors = \ +red=; grn=; lgn=; blu=; std= +DISTFILES = $(DIST_COMMON) $(DIST_SOURCES) $(TEXINFOS) $(EXTRA_DIST) +distdir = $(PACKAGE)-$(VERSION) +top_distdir = $(distdir) +am__remove_distdir = \ + if test -d "$(distdir)"; then \ + find "$(distdir)" -type d ! -perm -200 -exec chmod u+w {} ';' \ + && rm -rf "$(distdir)" \ + || { sleep 5 && rm -rf "$(distdir)"; }; \ + else :; fi +DIST_ARCHIVES = $(distdir).tar.gz $(distdir).tar.bz2 $(distdir).zip +GZIP_ENV = --best +distuninstallcheck_listfiles = find . -type f -print +am__distuninstallcheck_listfiles = $(distuninstallcheck_listfiles) \ + | sed 's|^\./|$(prefix)/|' | grep -v '$(infodir)/dir$$' +distcleancheck_listfiles = find . -type f -print +ACLOCAL = @ACLOCAL@ +AMTAR = @AMTAR@ +AR = @AR@ +AUTOCONF = @AUTOCONF@ +AUTOHEADER = @AUTOHEADER@ +AUTOMAKE = @AUTOMAKE@ +AWK = @AWK@ +CC = @CC@ +CCDEPMODE = @CCDEPMODE@ +CFLAGS = @CFLAGS@ +CPP = @CPP@ +CPPFLAGS = @CPPFLAGS@ +CXX = @CXX@ +CXXCPP = @CXXCPP@ +CXXDEPMODE = @CXXDEPMODE@ +CXXFLAGS = @CXXFLAGS@ +CYGPATH_W = @CYGPATH_W@ +DEFS = @DEFS@ +DEPDIR = @DEPDIR@ +DLLTOOL = @DLLTOOL@ +DSYMUTIL = @DSYMUTIL@ +DUMPBIN = @DUMPBIN@ +ECHO_C = @ECHO_C@ +ECHO_N = @ECHO_N@ +ECHO_T = @ECHO_T@ +EGREP = @EGREP@ +EXEEXT = @EXEEXT@ +FGREP = @FGREP@ +GREP = @GREP@ +INSTALL = @INSTALL@ +INSTALL_DATA = @INSTALL_DATA@ +INSTALL_PROGRAM = @INSTALL_PROGRAM@ +INSTALL_SCRIPT = @INSTALL_SCRIPT@ +INSTALL_STRIP_PROGRAM = @INSTALL_STRIP_PROGRAM@ +LD = @LD@ +LDFLAGS = @LDFLAGS@ +LIBOBJS = @LIBOBJS@ +LIBS = @LIBS@ +LIBTOOL = @LIBTOOL@ +LIPO = @LIPO@ +LN_S = @LN_S@ +LTLIBOBJS = @LTLIBOBJS@ +MAKEINFO = @MAKEINFO@ +MANIFEST_TOOL = @MANIFEST_TOOL@ +MKDIR_P = @MKDIR_P@ +NM = @NM@ +NMEDIT = @NMEDIT@ +OBJDUMP = @OBJDUMP@ +OBJEXT = @OBJEXT@ +OTOOL = @OTOOL@ +OTOOL64 = @OTOOL64@ +PACKAGE = @PACKAGE@ +PACKAGE_BUGREPORT = @PACKAGE_BUGREPORT@ +PACKAGE_NAME = @PACKAGE_NAME@ +PACKAGE_STRING = @PACKAGE_STRING@ +PACKAGE_TARNAME = @PACKAGE_TARNAME@ +PACKAGE_URL = @PACKAGE_URL@ +PACKAGE_VERSION = @PACKAGE_VERSION@ +PATH_SEPARATOR = @PATH_SEPARATOR@ +PTHREAD_CC = @PTHREAD_CC@ +PTHREAD_CFLAGS = @PTHREAD_CFLAGS@ +PTHREAD_LIBS = @PTHREAD_LIBS@ +PYTHON = @PYTHON@ +RANLIB = @RANLIB@ +SED = @SED@ +SET_MAKE = @SET_MAKE@ +SHELL = @SHELL@ +STRIP = @STRIP@ +VERSION = @VERSION@ +abs_builddir = @abs_builddir@ +abs_srcdir = @abs_srcdir@ +abs_top_builddir = @abs_top_builddir@ +abs_top_srcdir = @abs_top_srcdir@ +ac_ct_AR = @ac_ct_AR@ +ac_ct_CC = @ac_ct_CC@ +ac_ct_CXX = @ac_ct_CXX@ +ac_ct_DUMPBIN = @ac_ct_DUMPBIN@ +acx_pthread_config = @acx_pthread_config@ +am__include = @am__include@ +am__leading_dot = @am__leading_dot@ +am__quote = @am__quote@ +am__tar = @am__tar@ +am__untar = @am__untar@ +bindir = @bindir@ +build = @build@ +build_alias = @build_alias@ +build_cpu = @build_cpu@ +build_os = @build_os@ +build_vendor = @build_vendor@ +builddir = @builddir@ +datadir = @datadir@ +datarootdir = @datarootdir@ +docdir = @docdir@ +dvidir = @dvidir@ +exec_prefix = @exec_prefix@ +host = @host@ +host_alias = @host_alias@ +host_cpu = @host_cpu@ +host_os = @host_os@ +host_vendor = @host_vendor@ +htmldir = @htmldir@ +includedir = @includedir@ +infodir = @infodir@ +install_sh = @install_sh@ +libdir = @libdir@ +libexecdir = @libexecdir@ +localedir = @localedir@ +localstatedir = @localstatedir@ +mandir = @mandir@ +mkdir_p = @mkdir_p@ +oldincludedir = @oldincludedir@ +pdfdir = @pdfdir@ +prefix = @prefix@ +program_transform_name = @program_transform_name@ +psdir = @psdir@ +sbindir = @sbindir@ +sharedstatedir = @sharedstatedir@ +srcdir = @srcdir@ +sysconfdir = @sysconfdir@ +target_alias = @target_alias@ +top_build_prefix = @top_build_prefix@ +top_builddir = @top_builddir@ +top_srcdir = @top_srcdir@ +ACLOCAL_AMFLAGS = -I m4 + +# Nonstandard package files for distribution + +# Sample files that we don't compile. + +# C++ test files that we don't compile directly. + +# Python tests that we don't run. + +# CMake script + +# MSVC project files + +# xcode project files + +# xcode sample files + +# C++Builder project files +EXTRA_DIST = CHANGES CONTRIBUTORS LICENSE \ + include/gtest/gtest-param-test.h.pump \ + include/gtest/internal/gtest-param-util-generated.h.pump \ + include/gtest/internal/gtest-tuple.h.pump \ + include/gtest/internal/gtest-type-util.h.pump make/Makefile \ + scripts/fuse_gtest_files.py scripts/gen_gtest_pred_impl.py \ + scripts/pump.py scripts/test/Makefile $(GTEST_SRC) \ + samples/prime_tables.h samples/sample2_unittest.cc \ + samples/sample3_unittest.cc samples/sample4_unittest.cc \ + samples/sample5_unittest.cc samples/sample6_unittest.cc \ + samples/sample7_unittest.cc samples/sample8_unittest.cc \ + samples/sample9_unittest.cc test/gtest-death-test_ex_test.cc \ + test/gtest-death-test_test.cc test/gtest-filepath_test.cc \ + test/gtest-linked_ptr_test.cc test/gtest-listener_test.cc \ + test/gtest-message_test.cc test/gtest-options_test.cc \ + test/gtest-param-test2_test.cc test/gtest-param-test2_test.cc \ + test/gtest-param-test_test.cc test/gtest-param-test_test.cc \ + test/gtest-param-test_test.h test/gtest-port_test.cc \ + test/gtest_premature_exit_test.cc test/gtest-printers_test.cc \ + test/gtest-test-part_test.cc test/gtest-tuple_test.cc \ + test/gtest-typed-test2_test.cc test/gtest-typed-test_test.cc \ + test/gtest-typed-test_test.h test/gtest-unittest-api_test.cc \ + test/gtest_break_on_failure_unittest_.cc \ + test/gtest_catch_exceptions_test_.cc test/gtest_color_test_.cc \ + test/gtest_env_var_test_.cc test/gtest_environment_test.cc \ + test/gtest_filter_unittest_.cc test/gtest_help_test_.cc \ + test/gtest_list_tests_unittest_.cc test/gtest_main_unittest.cc \ + test/gtest_no_test_unittest.cc test/gtest_output_test_.cc \ + test/gtest_pred_impl_unittest.cc test/gtest_prod_test.cc \ + test/gtest_repeat_test.cc test/gtest_shuffle_test_.cc \ + test/gtest_sole_header_test.cc test/gtest_stress_test.cc \ + test/gtest_throw_on_failure_ex_test.cc \ + test/gtest_throw_on_failure_test_.cc \ + test/gtest_uninitialized_test_.cc test/gtest_unittest.cc \ + test/gtest_unittest.cc test/gtest_xml_outfile1_test_.cc \ + test/gtest_xml_outfile2_test_.cc \ + test/gtest_xml_output_unittest_.cc test/production.cc \ + test/production.h test/gtest_break_on_failure_unittest.py \ + test/gtest_catch_exceptions_test.py test/gtest_color_test.py \ + test/gtest_env_var_test.py test/gtest_filter_unittest.py \ + test/gtest_help_test.py test/gtest_list_tests_unittest.py \ + test/gtest_output_test.py \ + test/gtest_output_test_golden_lin.txt \ + test/gtest_shuffle_test.py test/gtest_test_utils.py \ + test/gtest_throw_on_failure_test.py \ + test/gtest_uninitialized_test.py \ + test/gtest_xml_outfiles_test.py \ + test/gtest_xml_output_unittest.py test/gtest_xml_test_utils.py \ + CMakeLists.txt cmake/internal_utils.cmake msvc/gtest-md.sln \ + msvc/gtest-md.vcproj msvc/gtest.sln msvc/gtest.vcproj \ + msvc/gtest_main-md.vcproj msvc/gtest_main.vcproj \ + msvc/gtest_prod_test-md.vcproj msvc/gtest_prod_test.vcproj \ + msvc/gtest_unittest-md.vcproj msvc/gtest_unittest.vcproj \ + xcode/Config/DebugProject.xcconfig \ + xcode/Config/FrameworkTarget.xcconfig \ + xcode/Config/General.xcconfig \ + xcode/Config/ReleaseProject.xcconfig \ + xcode/Config/StaticLibraryTarget.xcconfig \ + xcode/Config/TestTarget.xcconfig xcode/Resources/Info.plist \ + xcode/Scripts/runtests.sh xcode/Scripts/versiongenerate.py \ + xcode/gtest.xcodeproj/project.pbxproj \ + xcode/Samples/FrameworkSample/Info.plist \ + xcode/Samples/FrameworkSample/WidgetFramework.xcodeproj/project.pbxproj \ + xcode/Samples/FrameworkSample/runtests.sh \ + xcode/Samples/FrameworkSample/widget.cc \ + xcode/Samples/FrameworkSample/widget.h \ + xcode/Samples/FrameworkSample/widget_test.cc \ + codegear/gtest.cbproj codegear/gtest.groupproj \ + codegear/gtest_all.cc codegear/gtest_link.cc \ + codegear/gtest_main.cbproj codegear/gtest_unittest.cbproj \ + $(m4data_DATA) + +# gtest source files that we don't compile directly. They are +# #included by gtest-all.cc. +GTEST_SRC = \ + src/gtest-death-test.cc \ + src/gtest-filepath.cc \ + src/gtest-internal-inl.h \ + src/gtest-port.cc \ + src/gtest-printers.cc \ + src/gtest-test-part.cc \ + src/gtest-typed-test.cc \ + src/gtest.cc + + +# Distribute and install M4 macro +m4datadir = $(datadir)/aclocal +m4data_DATA = m4/gtest.m4 + +# We define the global AM_CPPFLAGS as everything we compile includes from these +# directories. +AM_CPPFLAGS = -I$(srcdir) -I$(srcdir)/include +@HAVE_PTHREADS_FALSE@AM_CXXFLAGS = -DGTEST_HAS_PTHREAD=0 + +# Modifies compiler and linker flags for pthreads compatibility. +@HAVE_PTHREADS_TRUE@AM_CXXFLAGS = @PTHREAD_CFLAGS@ -DGTEST_HAS_PTHREAD=1 +@HAVE_PTHREADS_TRUE@AM_LIBS = @PTHREAD_LIBS@ + +# Build rules for libraries. +lib_LTLIBRARIES = lib/libgtest.la lib/libgtest_main.la +lib_libgtest_la_SOURCES = src/gtest-all.cc +pkginclude_HEADERS = \ + include/gtest/gtest-death-test.h \ + include/gtest/gtest-message.h \ + include/gtest/gtest-param-test.h \ + include/gtest/gtest-printers.h \ + include/gtest/gtest-spi.h \ + include/gtest/gtest-test-part.h \ + include/gtest/gtest-typed-test.h \ + include/gtest/gtest.h \ + include/gtest/gtest_pred_impl.h \ + include/gtest/gtest_prod.h + +pkginclude_internaldir = $(pkgincludedir)/internal +pkginclude_internal_HEADERS = \ + include/gtest/internal/gtest-death-test-internal.h \ + include/gtest/internal/gtest-filepath.h \ + include/gtest/internal/gtest-internal.h \ + include/gtest/internal/gtest-linked_ptr.h \ + include/gtest/internal/gtest-param-util-generated.h \ + include/gtest/internal/gtest-param-util.h \ + include/gtest/internal/gtest-port.h \ + include/gtest/internal/gtest-string.h \ + include/gtest/internal/gtest-tuple.h \ + include/gtest/internal/gtest-type-util.h + +lib_libgtest_main_la_SOURCES = src/gtest_main.cc +lib_libgtest_main_la_LIBADD = lib/libgtest.la + +# Bulid rules for samples and tests. Automake's naming for some of +# these variables isn't terribly obvious, so this is a brief +# reference: +# +# TESTS -- Programs run automatically by "make check" +# check_PROGRAMS -- Programs built by "make check" but not necessarily run +noinst_LTLIBRARIES = samples/libsamples.la +samples_libsamples_la_SOURCES = \ + samples/sample1.cc \ + samples/sample1.h \ + samples/sample2.cc \ + samples/sample2.h \ + samples/sample3-inl.h \ + samples/sample4.cc \ + samples/sample4.h + +TESTS_ENVIRONMENT = GTEST_SOURCE_DIR="$(srcdir)/test" \ + GTEST_BUILD_DIR="$(top_builddir)/test" + +samples_sample1_unittest_SOURCES = samples/sample1_unittest.cc +samples_sample1_unittest_LDADD = lib/libgtest_main.la \ + lib/libgtest.la \ + samples/libsamples.la + +samples_sample10_unittest_SOURCES = samples/sample10_unittest.cc +samples_sample10_unittest_LDADD = lib/libgtest.la +test_gtest_all_test_SOURCES = test/gtest_all_test.cc +test_gtest_all_test_LDADD = lib/libgtest_main.la \ + lib/libgtest.la + + +# Tests that fused gtest files compile and work. +FUSED_GTEST_SRC = \ + fused-src/gtest/gtest-all.cc \ + fused-src/gtest/gtest.h \ + fused-src/gtest/gtest_main.cc + +@HAVE_PYTHON_TRUE@test_fused_gtest_test_SOURCES = $(FUSED_GTEST_SRC) \ +@HAVE_PYTHON_TRUE@ samples/sample1.cc samples/sample1_unittest.cc + +@HAVE_PYTHON_TRUE@test_fused_gtest_test_CPPFLAGS = -I"$(srcdir)/fused-src" + +# Death tests may produce core dumps in the build directory. In case +# this happens, clean them to keep distcleancheck happy. +CLEANFILES = core +all: all-am + +.SUFFIXES: +.SUFFIXES: .cc .lo .o .obj +am--refresh: Makefile + @: +$(srcdir)/Makefile.in: $(srcdir)/Makefile.am $(am__configure_deps) + @for dep in $?; do \ + case '$(am__configure_deps)' in \ + *$$dep*) \ + echo ' cd $(srcdir) && $(AUTOMAKE) --foreign'; \ + $(am__cd) $(srcdir) && $(AUTOMAKE) --foreign \ + && exit 0; \ + exit 1;; \ + esac; \ + done; \ + echo ' cd $(top_srcdir) && $(AUTOMAKE) --foreign Makefile'; \ + $(am__cd) $(top_srcdir) && \ + $(AUTOMAKE) --foreign Makefile +.PRECIOUS: Makefile +Makefile: $(srcdir)/Makefile.in $(top_builddir)/config.status + @case '$?' in \ + *config.status*) \ + echo ' $(SHELL) ./config.status'; \ + $(SHELL) ./config.status;; \ + *) \ + echo ' cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe)'; \ + cd $(top_builddir) && $(SHELL) ./config.status $@ $(am__depfiles_maybe);; \ + esac; + +$(top_builddir)/config.status: $(top_srcdir)/configure $(CONFIG_STATUS_DEPENDENCIES) + $(SHELL) ./config.status --recheck + +$(top_srcdir)/configure: $(am__configure_deps) + $(am__cd) $(srcdir) && $(AUTOCONF) +$(ACLOCAL_M4): $(am__aclocal_m4_deps) + $(am__cd) $(srcdir) && $(ACLOCAL) $(ACLOCAL_AMFLAGS) +$(am__aclocal_m4_deps): + +build-aux/config.h: build-aux/stamp-h1 + @if test ! -f $@; then rm -f build-aux/stamp-h1; else :; fi + @if test ! -f $@; then $(MAKE) $(AM_MAKEFLAGS) build-aux/stamp-h1; else :; fi + +build-aux/stamp-h1: $(top_srcdir)/build-aux/config.h.in $(top_builddir)/config.status + @rm -f build-aux/stamp-h1 + cd $(top_builddir) && $(SHELL) ./config.status build-aux/config.h +$(top_srcdir)/build-aux/config.h.in: $(am__configure_deps) + ($(am__cd) $(top_srcdir) && $(AUTOHEADER)) + rm -f build-aux/stamp-h1 + touch $@ + +distclean-hdr: + -rm -f build-aux/config.h build-aux/stamp-h1 +scripts/gtest-config: $(top_builddir)/config.status $(top_srcdir)/scripts/gtest-config.in + cd $(top_builddir) && $(SHELL) ./config.status $@ +install-libLTLIBRARIES: $(lib_LTLIBRARIES) + @$(NORMAL_INSTALL) + test -z "$(libdir)" || $(MKDIR_P) "$(DESTDIR)$(libdir)" + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + list2=; for p in $$list; do \ + if test -f $$p; then \ + list2="$$list2 $$p"; \ + else :; fi; \ + done; \ + test -z "$$list2" || { \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 '$(DESTDIR)$(libdir)'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=install $(INSTALL) $(INSTALL_STRIP_FLAG) $$list2 "$(DESTDIR)$(libdir)"; \ + } + +uninstall-libLTLIBRARIES: + @$(NORMAL_UNINSTALL) + @list='$(lib_LTLIBRARIES)'; test -n "$(libdir)" || list=; \ + for p in $$list; do \ + $(am__strip_dir) \ + echo " $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f '$(DESTDIR)$(libdir)/$$f'"; \ + $(LIBTOOL) $(AM_LIBTOOLFLAGS) $(LIBTOOLFLAGS) --mode=uninstall rm -f "$(DESTDIR)$(libdir)/$$f"; \ + done + +clean-libLTLIBRARIES: + -test -z "$(lib_LTLIBRARIES)" || rm -f $(lib_LTLIBRARIES) + @list='$(lib_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done + +clean-noinstLTLIBRARIES: + -test -z "$(noinst_LTLIBRARIES)" || rm -f $(noinst_LTLIBRARIES) + @list='$(noinst_LTLIBRARIES)'; for p in $$list; do \ + dir="`echo $$p | sed -e 's|/[^/]*$$||'`"; \ + test "$$dir" != "$$p" || dir=.; \ + echo "rm -f \"$${dir}/so_locations\""; \ + rm -f "$${dir}/so_locations"; \ + done +src/$(am__dirstamp): + @$(MKDIR_P) src + @: > src/$(am__dirstamp) +src/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) src/$(DEPDIR) + @: > src/$(DEPDIR)/$(am__dirstamp) +src/gtest-all.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +lib/$(am__dirstamp): + @$(MKDIR_P) lib + @: > lib/$(am__dirstamp) +lib/libgtest.la: $(lib_libgtest_la_OBJECTS) $(lib_libgtest_la_DEPENDENCIES) $(EXTRA_lib_libgtest_la_DEPENDENCIES) lib/$(am__dirstamp) + $(CXXLINK) -rpath $(libdir) $(lib_libgtest_la_OBJECTS) $(lib_libgtest_la_LIBADD) $(LIBS) +src/gtest_main.lo: src/$(am__dirstamp) src/$(DEPDIR)/$(am__dirstamp) +lib/libgtest_main.la: $(lib_libgtest_main_la_OBJECTS) $(lib_libgtest_main_la_DEPENDENCIES) $(EXTRA_lib_libgtest_main_la_DEPENDENCIES) lib/$(am__dirstamp) + $(CXXLINK) -rpath $(libdir) $(lib_libgtest_main_la_OBJECTS) $(lib_libgtest_main_la_LIBADD) $(LIBS) +samples/$(am__dirstamp): + @$(MKDIR_P) samples + @: > samples/$(am__dirstamp) +samples/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) samples/$(DEPDIR) + @: > samples/$(DEPDIR)/$(am__dirstamp) +samples/sample1.lo: samples/$(am__dirstamp) \ + samples/$(DEPDIR)/$(am__dirstamp) +samples/sample2.lo: samples/$(am__dirstamp) \ + samples/$(DEPDIR)/$(am__dirstamp) +samples/sample4.lo: samples/$(am__dirstamp) \ + samples/$(DEPDIR)/$(am__dirstamp) +samples/libsamples.la: $(samples_libsamples_la_OBJECTS) $(samples_libsamples_la_DEPENDENCIES) $(EXTRA_samples_libsamples_la_DEPENDENCIES) samples/$(am__dirstamp) + $(CXXLINK) $(samples_libsamples_la_OBJECTS) $(samples_libsamples_la_LIBADD) $(LIBS) + +clean-checkPROGRAMS: + @list='$(check_PROGRAMS)'; test -n "$$list" || exit 0; \ + echo " rm -f" $$list; \ + rm -f $$list || exit $$?; \ + test -n "$(EXEEXT)" || exit 0; \ + list=`for p in $$list; do echo "$$p"; done | sed 's/$(EXEEXT)$$//'`; \ + echo " rm -f" $$list; \ + rm -f $$list +samples/sample10_unittest.$(OBJEXT): samples/$(am__dirstamp) \ + samples/$(DEPDIR)/$(am__dirstamp) +samples/sample10_unittest$(EXEEXT): $(samples_sample10_unittest_OBJECTS) $(samples_sample10_unittest_DEPENDENCIES) $(EXTRA_samples_sample10_unittest_DEPENDENCIES) samples/$(am__dirstamp) + @rm -f samples/sample10_unittest$(EXEEXT) + $(CXXLINK) $(samples_sample10_unittest_OBJECTS) $(samples_sample10_unittest_LDADD) $(LIBS) +samples/sample1_unittest.$(OBJEXT): samples/$(am__dirstamp) \ + samples/$(DEPDIR)/$(am__dirstamp) +samples/sample1_unittest$(EXEEXT): $(samples_sample1_unittest_OBJECTS) $(samples_sample1_unittest_DEPENDENCIES) $(EXTRA_samples_sample1_unittest_DEPENDENCIES) samples/$(am__dirstamp) + @rm -f samples/sample1_unittest$(EXEEXT) + $(CXXLINK) $(samples_sample1_unittest_OBJECTS) $(samples_sample1_unittest_LDADD) $(LIBS) +fused-src/gtest/$(am__dirstamp): + @$(MKDIR_P) fused-src/gtest + @: > fused-src/gtest/$(am__dirstamp) +fused-src/gtest/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) fused-src/gtest/$(DEPDIR) + @: > fused-src/gtest/$(DEPDIR)/$(am__dirstamp) +fused-src/gtest/test_fused_gtest_test-gtest-all.$(OBJEXT): \ + fused-src/gtest/$(am__dirstamp) \ + fused-src/gtest/$(DEPDIR)/$(am__dirstamp) +fused-src/gtest/test_fused_gtest_test-gtest_main.$(OBJEXT): \ + fused-src/gtest/$(am__dirstamp) \ + fused-src/gtest/$(DEPDIR)/$(am__dirstamp) +samples/test_fused_gtest_test-sample1.$(OBJEXT): \ + samples/$(am__dirstamp) samples/$(DEPDIR)/$(am__dirstamp) +samples/test_fused_gtest_test-sample1_unittest.$(OBJEXT): \ + samples/$(am__dirstamp) samples/$(DEPDIR)/$(am__dirstamp) +test/$(am__dirstamp): + @$(MKDIR_P) test + @: > test/$(am__dirstamp) +test/fused_gtest_test$(EXEEXT): $(test_fused_gtest_test_OBJECTS) $(test_fused_gtest_test_DEPENDENCIES) $(EXTRA_test_fused_gtest_test_DEPENDENCIES) test/$(am__dirstamp) + @rm -f test/fused_gtest_test$(EXEEXT) + $(CXXLINK) $(test_fused_gtest_test_OBJECTS) $(test_fused_gtest_test_LDADD) $(LIBS) +test/$(DEPDIR)/$(am__dirstamp): + @$(MKDIR_P) test/$(DEPDIR) + @: > test/$(DEPDIR)/$(am__dirstamp) +test/gtest_all_test.$(OBJEXT): test/$(am__dirstamp) \ + test/$(DEPDIR)/$(am__dirstamp) +test/gtest_all_test$(EXEEXT): $(test_gtest_all_test_OBJECTS) $(test_gtest_all_test_DEPENDENCIES) $(EXTRA_test_gtest_all_test_DEPENDENCIES) test/$(am__dirstamp) + @rm -f test/gtest_all_test$(EXEEXT) + $(CXXLINK) $(test_gtest_all_test_OBJECTS) $(test_gtest_all_test_LDADD) $(LIBS) + +mostlyclean-compile: + -rm -f *.$(OBJEXT) + -rm -f fused-src/gtest/test_fused_gtest_test-gtest-all.$(OBJEXT) + -rm -f fused-src/gtest/test_fused_gtest_test-gtest_main.$(OBJEXT) + -rm -f samples/sample1.$(OBJEXT) + -rm -f samples/sample1.lo + -rm -f samples/sample10_unittest.$(OBJEXT) + -rm -f samples/sample1_unittest.$(OBJEXT) + -rm -f samples/sample2.$(OBJEXT) + -rm -f samples/sample2.lo + -rm -f samples/sample4.$(OBJEXT) + -rm -f samples/sample4.lo + -rm -f samples/test_fused_gtest_test-sample1.$(OBJEXT) + -rm -f samples/test_fused_gtest_test-sample1_unittest.$(OBJEXT) + -rm -f src/gtest-all.$(OBJEXT) + -rm -f src/gtest-all.lo + -rm -f src/gtest_main.$(OBJEXT) + -rm -f src/gtest_main.lo + -rm -f test/gtest_all_test.$(OBJEXT) + +distclean-compile: + -rm -f *.tab.c + +@AMDEP_TRUE@@am__include@ @am__quote@fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest-all.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest_main.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@samples/$(DEPDIR)/sample1.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@samples/$(DEPDIR)/sample10_unittest.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@samples/$(DEPDIR)/sample1_unittest.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@samples/$(DEPDIR)/sample2.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@samples/$(DEPDIR)/sample4.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@samples/$(DEPDIR)/test_fused_gtest_test-sample1.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@samples/$(DEPDIR)/test_fused_gtest_test-sample1_unittest.Po@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/gtest-all.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@src/$(DEPDIR)/gtest_main.Plo@am__quote@ +@AMDEP_TRUE@@am__include@ @am__quote@test/$(DEPDIR)/gtest_all_test.Po@am__quote@ + +.cc.o: +@am__fastdepCXX_TRUE@ depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.o$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ $< + +.cc.obj: +@am__fastdepCXX_TRUE@ depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.obj$$||'`;\ +@am__fastdepCXX_TRUE@ $(CXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ `$(CYGPATH_W) '$<'` &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXXCOMPILE) -c -o $@ `$(CYGPATH_W) '$<'` + +.cc.lo: +@am__fastdepCXX_TRUE@ depbase=`echo $@ | sed 's|[^/]*$$|$(DEPDIR)/&|;s|\.lo$$||'`;\ +@am__fastdepCXX_TRUE@ $(LTCXXCOMPILE) -MT $@ -MD -MP -MF $$depbase.Tpo -c -o $@ $< &&\ +@am__fastdepCXX_TRUE@ $(am__mv) $$depbase.Tpo $$depbase.Plo +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='$<' object='$@' libtool=yes @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(LTCXXCOMPILE) -c -o $@ $< + +fused-src/gtest/test_fused_gtest_test-gtest-all.o: fused-src/gtest/gtest-all.cc +@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT fused-src/gtest/test_fused_gtest_test-gtest-all.o -MD -MP -MF fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest-all.Tpo -c -o fused-src/gtest/test_fused_gtest_test-gtest-all.o `test -f 'fused-src/gtest/gtest-all.cc' || echo '$(srcdir)/'`fused-src/gtest/gtest-all.cc +@am__fastdepCXX_TRUE@ $(am__mv) fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest-all.Tpo fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest-all.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='fused-src/gtest/gtest-all.cc' object='fused-src/gtest/test_fused_gtest_test-gtest-all.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o fused-src/gtest/test_fused_gtest_test-gtest-all.o `test -f 'fused-src/gtest/gtest-all.cc' || echo '$(srcdir)/'`fused-src/gtest/gtest-all.cc + +fused-src/gtest/test_fused_gtest_test-gtest-all.obj: fused-src/gtest/gtest-all.cc +@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT fused-src/gtest/test_fused_gtest_test-gtest-all.obj -MD -MP -MF fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest-all.Tpo -c -o fused-src/gtest/test_fused_gtest_test-gtest-all.obj `if test -f 'fused-src/gtest/gtest-all.cc'; then $(CYGPATH_W) 'fused-src/gtest/gtest-all.cc'; else $(CYGPATH_W) '$(srcdir)/fused-src/gtest/gtest-all.cc'; fi` +@am__fastdepCXX_TRUE@ $(am__mv) fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest-all.Tpo fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest-all.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='fused-src/gtest/gtest-all.cc' object='fused-src/gtest/test_fused_gtest_test-gtest-all.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o fused-src/gtest/test_fused_gtest_test-gtest-all.obj `if test -f 'fused-src/gtest/gtest-all.cc'; then $(CYGPATH_W) 'fused-src/gtest/gtest-all.cc'; else $(CYGPATH_W) '$(srcdir)/fused-src/gtest/gtest-all.cc'; fi` + +fused-src/gtest/test_fused_gtest_test-gtest_main.o: fused-src/gtest/gtest_main.cc +@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT fused-src/gtest/test_fused_gtest_test-gtest_main.o -MD -MP -MF fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest_main.Tpo -c -o fused-src/gtest/test_fused_gtest_test-gtest_main.o `test -f 'fused-src/gtest/gtest_main.cc' || echo '$(srcdir)/'`fused-src/gtest/gtest_main.cc +@am__fastdepCXX_TRUE@ $(am__mv) fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest_main.Tpo fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest_main.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='fused-src/gtest/gtest_main.cc' object='fused-src/gtest/test_fused_gtest_test-gtest_main.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o fused-src/gtest/test_fused_gtest_test-gtest_main.o `test -f 'fused-src/gtest/gtest_main.cc' || echo '$(srcdir)/'`fused-src/gtest/gtest_main.cc + +fused-src/gtest/test_fused_gtest_test-gtest_main.obj: fused-src/gtest/gtest_main.cc +@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT fused-src/gtest/test_fused_gtest_test-gtest_main.obj -MD -MP -MF fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest_main.Tpo -c -o fused-src/gtest/test_fused_gtest_test-gtest_main.obj `if test -f 'fused-src/gtest/gtest_main.cc'; then $(CYGPATH_W) 'fused-src/gtest/gtest_main.cc'; else $(CYGPATH_W) '$(srcdir)/fused-src/gtest/gtest_main.cc'; fi` +@am__fastdepCXX_TRUE@ $(am__mv) fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest_main.Tpo fused-src/gtest/$(DEPDIR)/test_fused_gtest_test-gtest_main.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='fused-src/gtest/gtest_main.cc' object='fused-src/gtest/test_fused_gtest_test-gtest_main.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o fused-src/gtest/test_fused_gtest_test-gtest_main.obj `if test -f 'fused-src/gtest/gtest_main.cc'; then $(CYGPATH_W) 'fused-src/gtest/gtest_main.cc'; else $(CYGPATH_W) '$(srcdir)/fused-src/gtest/gtest_main.cc'; fi` + +samples/test_fused_gtest_test-sample1.o: samples/sample1.cc +@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT samples/test_fused_gtest_test-sample1.o -MD -MP -MF samples/$(DEPDIR)/test_fused_gtest_test-sample1.Tpo -c -o samples/test_fused_gtest_test-sample1.o `test -f 'samples/sample1.cc' || echo '$(srcdir)/'`samples/sample1.cc +@am__fastdepCXX_TRUE@ $(am__mv) samples/$(DEPDIR)/test_fused_gtest_test-sample1.Tpo samples/$(DEPDIR)/test_fused_gtest_test-sample1.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='samples/sample1.cc' object='samples/test_fused_gtest_test-sample1.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o samples/test_fused_gtest_test-sample1.o `test -f 'samples/sample1.cc' || echo '$(srcdir)/'`samples/sample1.cc + +samples/test_fused_gtest_test-sample1.obj: samples/sample1.cc +@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT samples/test_fused_gtest_test-sample1.obj -MD -MP -MF samples/$(DEPDIR)/test_fused_gtest_test-sample1.Tpo -c -o samples/test_fused_gtest_test-sample1.obj `if test -f 'samples/sample1.cc'; then $(CYGPATH_W) 'samples/sample1.cc'; else $(CYGPATH_W) '$(srcdir)/samples/sample1.cc'; fi` +@am__fastdepCXX_TRUE@ $(am__mv) samples/$(DEPDIR)/test_fused_gtest_test-sample1.Tpo samples/$(DEPDIR)/test_fused_gtest_test-sample1.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='samples/sample1.cc' object='samples/test_fused_gtest_test-sample1.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o samples/test_fused_gtest_test-sample1.obj `if test -f 'samples/sample1.cc'; then $(CYGPATH_W) 'samples/sample1.cc'; else $(CYGPATH_W) '$(srcdir)/samples/sample1.cc'; fi` + +samples/test_fused_gtest_test-sample1_unittest.o: samples/sample1_unittest.cc +@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT samples/test_fused_gtest_test-sample1_unittest.o -MD -MP -MF samples/$(DEPDIR)/test_fused_gtest_test-sample1_unittest.Tpo -c -o samples/test_fused_gtest_test-sample1_unittest.o `test -f 'samples/sample1_unittest.cc' || echo '$(srcdir)/'`samples/sample1_unittest.cc +@am__fastdepCXX_TRUE@ $(am__mv) samples/$(DEPDIR)/test_fused_gtest_test-sample1_unittest.Tpo samples/$(DEPDIR)/test_fused_gtest_test-sample1_unittest.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='samples/sample1_unittest.cc' object='samples/test_fused_gtest_test-sample1_unittest.o' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o samples/test_fused_gtest_test-sample1_unittest.o `test -f 'samples/sample1_unittest.cc' || echo '$(srcdir)/'`samples/sample1_unittest.cc + +samples/test_fused_gtest_test-sample1_unittest.obj: samples/sample1_unittest.cc +@am__fastdepCXX_TRUE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -MT samples/test_fused_gtest_test-sample1_unittest.obj -MD -MP -MF samples/$(DEPDIR)/test_fused_gtest_test-sample1_unittest.Tpo -c -o samples/test_fused_gtest_test-sample1_unittest.obj `if test -f 'samples/sample1_unittest.cc'; then $(CYGPATH_W) 'samples/sample1_unittest.cc'; else $(CYGPATH_W) '$(srcdir)/samples/sample1_unittest.cc'; fi` +@am__fastdepCXX_TRUE@ $(am__mv) samples/$(DEPDIR)/test_fused_gtest_test-sample1_unittest.Tpo samples/$(DEPDIR)/test_fused_gtest_test-sample1_unittest.Po +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ source='samples/sample1_unittest.cc' object='samples/test_fused_gtest_test-sample1_unittest.obj' libtool=no @AMDEPBACKSLASH@ +@AMDEP_TRUE@@am__fastdepCXX_FALSE@ DEPDIR=$(DEPDIR) $(CXXDEPMODE) $(depcomp) @AMDEPBACKSLASH@ +@am__fastdepCXX_FALSE@ $(CXX) $(DEFS) $(DEFAULT_INCLUDES) $(INCLUDES) $(test_fused_gtest_test_CPPFLAGS) $(CPPFLAGS) $(AM_CXXFLAGS) $(CXXFLAGS) -c -o samples/test_fused_gtest_test-sample1_unittest.obj `if test -f 'samples/sample1_unittest.cc'; then $(CYGPATH_W) 'samples/sample1_unittest.cc'; else $(CYGPATH_W) '$(srcdir)/samples/sample1_unittest.cc'; fi` + +mostlyclean-libtool: + -rm -f *.lo + +clean-libtool: + -rm -rf .libs _libs + -rm -rf lib/.libs lib/_libs + -rm -rf samples/.libs samples/_libs + -rm -rf src/.libs src/_libs + -rm -rf test/.libs test/_libs + +distclean-libtool: + -rm -f libtool config.lt +install-m4dataDATA: $(m4data_DATA) + @$(NORMAL_INSTALL) + test -z "$(m4datadir)" || $(MKDIR_P) "$(DESTDIR)$(m4datadir)" + @list='$(m4data_DATA)'; test -n "$(m4datadir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_DATA) $$files '$(DESTDIR)$(m4datadir)'"; \ + $(INSTALL_DATA) $$files "$(DESTDIR)$(m4datadir)" || exit $$?; \ + done + +uninstall-m4dataDATA: + @$(NORMAL_UNINSTALL) + @list='$(m4data_DATA)'; test -n "$(m4datadir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(m4datadir)'; $(am__uninstall_files_from_dir) +install-pkgincludeHEADERS: $(pkginclude_HEADERS) + @$(NORMAL_INSTALL) + test -z "$(pkgincludedir)" || $(MKDIR_P) "$(DESTDIR)$(pkgincludedir)" + @list='$(pkginclude_HEADERS)'; test -n "$(pkgincludedir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(pkgincludedir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(pkgincludedir)" || exit $$?; \ + done + +uninstall-pkgincludeHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(pkginclude_HEADERS)'; test -n "$(pkgincludedir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pkgincludedir)'; $(am__uninstall_files_from_dir) +install-pkginclude_internalHEADERS: $(pkginclude_internal_HEADERS) + @$(NORMAL_INSTALL) + test -z "$(pkginclude_internaldir)" || $(MKDIR_P) "$(DESTDIR)$(pkginclude_internaldir)" + @list='$(pkginclude_internal_HEADERS)'; test -n "$(pkginclude_internaldir)" || list=; \ + for p in $$list; do \ + if test -f "$$p"; then d=; else d="$(srcdir)/"; fi; \ + echo "$$d$$p"; \ + done | $(am__base_list) | \ + while read files; do \ + echo " $(INSTALL_HEADER) $$files '$(DESTDIR)$(pkginclude_internaldir)'"; \ + $(INSTALL_HEADER) $$files "$(DESTDIR)$(pkginclude_internaldir)" || exit $$?; \ + done + +uninstall-pkginclude_internalHEADERS: + @$(NORMAL_UNINSTALL) + @list='$(pkginclude_internal_HEADERS)'; test -n "$(pkginclude_internaldir)" || list=; \ + files=`for p in $$list; do echo $$p; done | sed -e 's|^.*/||'`; \ + dir='$(DESTDIR)$(pkginclude_internaldir)'; $(am__uninstall_files_from_dir) + +ID: $(HEADERS) $(SOURCES) $(LISP) $(TAGS_FILES) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + mkid -fID $$unique +tags: TAGS + +TAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + set x; \ + here=`pwd`; \ + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + shift; \ + if test -z "$(ETAGS_ARGS)$$*$$unique"; then :; else \ + test -n "$$unique" || unique=$$empty_fix; \ + if test $$# -gt 0; then \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + "$$@" $$unique; \ + else \ + $(ETAGS) $(ETAGSFLAGS) $(AM_ETAGSFLAGS) $(ETAGS_ARGS) \ + $$unique; \ + fi; \ + fi +ctags: CTAGS +CTAGS: $(HEADERS) $(SOURCES) $(TAGS_DEPENDENCIES) \ + $(TAGS_FILES) $(LISP) + list='$(SOURCES) $(HEADERS) $(LISP) $(TAGS_FILES)'; \ + unique=`for i in $$list; do \ + if test -f "$$i"; then echo $$i; else echo $(srcdir)/$$i; fi; \ + done | \ + $(AWK) '{ files[$$0] = 1; nonempty = 1; } \ + END { if (nonempty) { for (i in files) print i; }; }'`; \ + test -z "$(CTAGS_ARGS)$$unique" \ + || $(CTAGS) $(CTAGSFLAGS) $(AM_CTAGSFLAGS) $(CTAGS_ARGS) \ + $$unique + +GTAGS: + here=`$(am__cd) $(top_builddir) && pwd` \ + && $(am__cd) $(top_srcdir) \ + && gtags -i $(GTAGS_ARGS) "$$here" + +distclean-tags: + -rm -f TAGS ID GTAGS GRTAGS GSYMS GPATH tags + +check-TESTS: $(TESTS) + @failed=0; all=0; xfail=0; xpass=0; skip=0; \ + srcdir=$(srcdir); export srcdir; \ + list=' $(TESTS) '; \ + $(am__tty_colors); \ + if test -n "$$list"; then \ + for tst in $$list; do \ + if test -f ./$$tst; then dir=./; \ + elif test -f $$tst; then dir=; \ + else dir="$(srcdir)/"; fi; \ + if $(TESTS_ENVIRONMENT) $${dir}$$tst; then \ + all=`expr $$all + 1`; \ + case " $(XFAIL_TESTS) " in \ + *[\ \ ]$$tst[\ \ ]*) \ + xpass=`expr $$xpass + 1`; \ + failed=`expr $$failed + 1`; \ + col=$$red; res=XPASS; \ + ;; \ + *) \ + col=$$grn; res=PASS; \ + ;; \ + esac; \ + elif test $$? -ne 77; then \ + all=`expr $$all + 1`; \ + case " $(XFAIL_TESTS) " in \ + *[\ \ ]$$tst[\ \ ]*) \ + xfail=`expr $$xfail + 1`; \ + col=$$lgn; res=XFAIL; \ + ;; \ + *) \ + failed=`expr $$failed + 1`; \ + col=$$red; res=FAIL; \ + ;; \ + esac; \ + else \ + skip=`expr $$skip + 1`; \ + col=$$blu; res=SKIP; \ + fi; \ + echo "$${col}$$res$${std}: $$tst"; \ + done; \ + if test "$$all" -eq 1; then \ + tests="test"; \ + All=""; \ + else \ + tests="tests"; \ + All="All "; \ + fi; \ + if test "$$failed" -eq 0; then \ + if test "$$xfail" -eq 0; then \ + banner="$$All$$all $$tests passed"; \ + else \ + if test "$$xfail" -eq 1; then failures=failure; else failures=failures; fi; \ + banner="$$All$$all $$tests behaved as expected ($$xfail expected $$failures)"; \ + fi; \ + else \ + if test "$$xpass" -eq 0; then \ + banner="$$failed of $$all $$tests failed"; \ + else \ + if test "$$xpass" -eq 1; then passes=pass; else passes=passes; fi; \ + banner="$$failed of $$all $$tests did not behave as expected ($$xpass unexpected $$passes)"; \ + fi; \ + fi; \ + dashes="$$banner"; \ + skipped=""; \ + if test "$$skip" -ne 0; then \ + if test "$$skip" -eq 1; then \ + skipped="($$skip test was not run)"; \ + else \ + skipped="($$skip tests were not run)"; \ + fi; \ + test `echo "$$skipped" | wc -c` -le `echo "$$banner" | wc -c` || \ + dashes="$$skipped"; \ + fi; \ + report=""; \ + if test "$$failed" -ne 0 && test -n "$(PACKAGE_BUGREPORT)"; then \ + report="Please report to $(PACKAGE_BUGREPORT)"; \ + test `echo "$$report" | wc -c` -le `echo "$$banner" | wc -c` || \ + dashes="$$report"; \ + fi; \ + dashes=`echo "$$dashes" | sed s/./=/g`; \ + if test "$$failed" -eq 0; then \ + col="$$grn"; \ + else \ + col="$$red"; \ + fi; \ + echo "$${col}$$dashes$${std}"; \ + echo "$${col}$$banner$${std}"; \ + test -z "$$skipped" || echo "$${col}$$skipped$${std}"; \ + test -z "$$report" || echo "$${col}$$report$${std}"; \ + echo "$${col}$$dashes$${std}"; \ + test "$$failed" -eq 0; \ + else :; fi + +distdir: $(DISTFILES) + $(am__remove_distdir) + test -d "$(distdir)" || mkdir "$(distdir)" + @srcdirstrip=`echo "$(srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + topsrcdirstrip=`echo "$(top_srcdir)" | sed 's/[].[^$$\\*]/\\\\&/g'`; \ + list='$(DISTFILES)'; \ + dist_files=`for file in $$list; do echo $$file; done | \ + sed -e "s|^$$srcdirstrip/||;t" \ + -e "s|^$$topsrcdirstrip/|$(top_builddir)/|;t"`; \ + case $$dist_files in \ + */*) $(MKDIR_P) `echo "$$dist_files" | \ + sed '/\//!d;s|^|$(distdir)/|;s,/[^/]*$$,,' | \ + sort -u` ;; \ + esac; \ + for file in $$dist_files; do \ + if test -f $$file || test -d $$file; then d=.; else d=$(srcdir); fi; \ + if test -d $$d/$$file; then \ + dir=`echo "/$$file" | sed -e 's,/[^/]*$$,,'`; \ + if test -d "$(distdir)/$$file"; then \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + if test -d $(srcdir)/$$file && test $$d != $(srcdir); then \ + cp -fpR $(srcdir)/$$file "$(distdir)$$dir" || exit 1; \ + find "$(distdir)/$$file" -type d ! -perm -700 -exec chmod u+rwx {} \;; \ + fi; \ + cp -fpR $$d/$$file "$(distdir)$$dir" || exit 1; \ + else \ + test -f "$(distdir)/$$file" \ + || cp -p $$d/$$file "$(distdir)/$$file" \ + || exit 1; \ + fi; \ + done + -test -n "$(am__skip_mode_fix)" \ + || find "$(distdir)" -type d ! -perm -755 \ + -exec chmod u+rwx,go+rx {} \; -o \ + ! -type d ! -perm -444 -links 1 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -400 -exec chmod a+r {} \; -o \ + ! -type d ! -perm -444 -exec $(install_sh) -c -m a+r {} {} \; \ + || chmod -R a+r "$(distdir)" +dist-gzip: distdir + tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz + $(am__remove_distdir) +dist-bzip2: distdir + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + $(am__remove_distdir) + +dist-lzip: distdir + tardir=$(distdir) && $(am__tar) | lzip -c $${LZIP_OPT--9} >$(distdir).tar.lz + $(am__remove_distdir) + +dist-lzma: distdir + tardir=$(distdir) && $(am__tar) | lzma -9 -c >$(distdir).tar.lzma + $(am__remove_distdir) + +dist-xz: distdir + tardir=$(distdir) && $(am__tar) | XZ_OPT=$${XZ_OPT--e} xz -c >$(distdir).tar.xz + $(am__remove_distdir) + +dist-tarZ: distdir + tardir=$(distdir) && $(am__tar) | compress -c >$(distdir).tar.Z + $(am__remove_distdir) + +dist-shar: distdir + shar $(distdir) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).shar.gz + $(am__remove_distdir) +dist-zip: distdir + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__remove_distdir) + +dist dist-all: distdir + tardir=$(distdir) && $(am__tar) | GZIP=$(GZIP_ENV) gzip -c >$(distdir).tar.gz + tardir=$(distdir) && $(am__tar) | BZIP2=$${BZIP2--9} bzip2 -c >$(distdir).tar.bz2 + -rm -f $(distdir).zip + zip -rq $(distdir).zip $(distdir) + $(am__remove_distdir) + +# This target untars the dist file and tries a VPATH configuration. Then +# it guarantees that the distribution is self-contained by making another +# tarfile. +distcheck: dist + case '$(DIST_ARCHIVES)' in \ + *.tar.gz*) \ + GZIP=$(GZIP_ENV) gzip -dc $(distdir).tar.gz | $(am__untar) ;;\ + *.tar.bz2*) \ + bzip2 -dc $(distdir).tar.bz2 | $(am__untar) ;;\ + *.tar.lzma*) \ + lzma -dc $(distdir).tar.lzma | $(am__untar) ;;\ + *.tar.lz*) \ + lzip -dc $(distdir).tar.lz | $(am__untar) ;;\ + *.tar.xz*) \ + xz -dc $(distdir).tar.xz | $(am__untar) ;;\ + *.tar.Z*) \ + uncompress -c $(distdir).tar.Z | $(am__untar) ;;\ + *.shar.gz*) \ + GZIP=$(GZIP_ENV) gzip -dc $(distdir).shar.gz | unshar ;;\ + *.zip*) \ + unzip $(distdir).zip ;;\ + esac + chmod -R a-w $(distdir); chmod a+w $(distdir) + mkdir $(distdir)/_build + mkdir $(distdir)/_inst + chmod a-w $(distdir) + test -d $(distdir)/_build || exit 0; \ + dc_install_base=`$(am__cd) $(distdir)/_inst && pwd | sed -e 's,^[^:\\/]:[\\/],/,'` \ + && dc_destdir="$${TMPDIR-/tmp}/am-dc-$$$$/" \ + && am__cwd=`pwd` \ + && $(am__cd) $(distdir)/_build \ + && ../configure --srcdir=.. --prefix="$$dc_install_base" \ + $(AM_DISTCHECK_CONFIGURE_FLAGS) \ + $(DISTCHECK_CONFIGURE_FLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) \ + && $(MAKE) $(AM_MAKEFLAGS) dvi \ + && $(MAKE) $(AM_MAKEFLAGS) check \ + && $(MAKE) $(AM_MAKEFLAGS) install \ + && $(MAKE) $(AM_MAKEFLAGS) installcheck \ + && $(MAKE) $(AM_MAKEFLAGS) uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) distuninstallcheck_dir="$$dc_install_base" \ + distuninstallcheck \ + && chmod -R a-w "$$dc_install_base" \ + && ({ \ + (cd ../.. && umask 077 && mkdir "$$dc_destdir") \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" install \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" uninstall \ + && $(MAKE) $(AM_MAKEFLAGS) DESTDIR="$$dc_destdir" \ + distuninstallcheck_dir="$$dc_destdir" distuninstallcheck; \ + } || { rm -rf "$$dc_destdir"; exit 1; }) \ + && rm -rf "$$dc_destdir" \ + && $(MAKE) $(AM_MAKEFLAGS) dist \ + && rm -rf $(DIST_ARCHIVES) \ + && $(MAKE) $(AM_MAKEFLAGS) distcleancheck \ + && cd "$$am__cwd" \ + || exit 1 + $(am__remove_distdir) + @(echo "$(distdir) archives ready for distribution: "; \ + list='$(DIST_ARCHIVES)'; for i in $$list; do echo $$i; done) | \ + sed -e 1h -e 1s/./=/g -e 1p -e 1x -e '$$p' -e '$$x' +distuninstallcheck: + @test -n '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: trying to run $@ with an empty' \ + '$$(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + $(am__cd) '$(distuninstallcheck_dir)' || { \ + echo 'ERROR: cannot chdir into $(distuninstallcheck_dir)' >&2; \ + exit 1; \ + }; \ + test `$(am__distuninstallcheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left after uninstall:" ; \ + if test -n "$(DESTDIR)"; then \ + echo " (check DESTDIR support)"; \ + fi ; \ + $(distuninstallcheck_listfiles) ; \ + exit 1; } >&2 +distcleancheck: distclean + @if test '$(srcdir)' = . ; then \ + echo "ERROR: distcleancheck can only run from a VPATH build" ; \ + exit 1 ; \ + fi + @test `$(distcleancheck_listfiles) | wc -l` -eq 0 \ + || { echo "ERROR: files left in build directory after distclean:" ; \ + $(distcleancheck_listfiles) ; \ + exit 1; } >&2 +check-am: all-am + $(MAKE) $(AM_MAKEFLAGS) $(check_PROGRAMS) + $(MAKE) $(AM_MAKEFLAGS) check-TESTS +check: check-am +all-am: Makefile $(LTLIBRARIES) $(DATA) $(HEADERS) +installdirs: + for dir in "$(DESTDIR)$(libdir)" "$(DESTDIR)$(m4datadir)" "$(DESTDIR)$(pkgincludedir)" "$(DESTDIR)$(pkginclude_internaldir)"; do \ + test -z "$$dir" || $(MKDIR_P) "$$dir"; \ + done +install: install-am +install-exec: install-exec-am +install-data: install-data-am +uninstall: uninstall-am + +install-am: all-am + @$(MAKE) $(AM_MAKEFLAGS) install-exec-am install-data-am + +installcheck: installcheck-am +install-strip: + if test -z '$(STRIP)'; then \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + install; \ + else \ + $(MAKE) $(AM_MAKEFLAGS) INSTALL_PROGRAM="$(INSTALL_STRIP_PROGRAM)" \ + install_sh_PROGRAM="$(INSTALL_STRIP_PROGRAM)" INSTALL_STRIP_FLAG=-s \ + "INSTALL_PROGRAM_ENV=STRIPPROG='$(STRIP)'" install; \ + fi +mostlyclean-generic: + +clean-generic: + -test -z "$(CLEANFILES)" || rm -f $(CLEANFILES) + +distclean-generic: + -test -z "$(CONFIG_CLEAN_FILES)" || rm -f $(CONFIG_CLEAN_FILES) + -test . = "$(srcdir)" || test -z "$(CONFIG_CLEAN_VPATH_FILES)" || rm -f $(CONFIG_CLEAN_VPATH_FILES) + -rm -f fused-src/gtest/$(DEPDIR)/$(am__dirstamp) + -rm -f fused-src/gtest/$(am__dirstamp) + -rm -f lib/$(am__dirstamp) + -rm -f samples/$(DEPDIR)/$(am__dirstamp) + -rm -f samples/$(am__dirstamp) + -rm -f src/$(DEPDIR)/$(am__dirstamp) + -rm -f src/$(am__dirstamp) + -rm -f test/$(DEPDIR)/$(am__dirstamp) + -rm -f test/$(am__dirstamp) + +maintainer-clean-generic: + @echo "This command is intended for maintainers to use" + @echo "it deletes files that may require special tools to rebuild." +@HAVE_PYTHON_FALSE@maintainer-clean-local: +clean: clean-am + +clean-am: clean-checkPROGRAMS clean-generic clean-libLTLIBRARIES \ + clean-libtool clean-noinstLTLIBRARIES mostlyclean-am + +distclean: distclean-am + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf fused-src/gtest/$(DEPDIR) samples/$(DEPDIR) src/$(DEPDIR) test/$(DEPDIR) + -rm -f Makefile +distclean-am: clean-am distclean-compile distclean-generic \ + distclean-hdr distclean-libtool distclean-tags + +dvi: dvi-am + +dvi-am: + +html: html-am + +html-am: + +info: info-am + +info-am: + +install-data-am: install-data-local install-m4dataDATA \ + install-pkgincludeHEADERS install-pkginclude_internalHEADERS + +install-dvi: install-dvi-am + +install-dvi-am: + +install-exec-am: install-exec-local install-libLTLIBRARIES + +install-html: install-html-am + +install-html-am: + +install-info: install-info-am + +install-info-am: + +install-man: + +install-pdf: install-pdf-am + +install-pdf-am: + +install-ps: install-ps-am + +install-ps-am: + +installcheck-am: + +maintainer-clean: maintainer-clean-am + -rm -f $(am__CONFIG_DISTCLEAN_FILES) + -rm -rf $(top_srcdir)/autom4te.cache + -rm -rf fused-src/gtest/$(DEPDIR) samples/$(DEPDIR) src/$(DEPDIR) test/$(DEPDIR) + -rm -f Makefile +maintainer-clean-am: distclean-am maintainer-clean-generic \ + maintainer-clean-local + +mostlyclean: mostlyclean-am + +mostlyclean-am: mostlyclean-compile mostlyclean-generic \ + mostlyclean-libtool + +pdf: pdf-am + +pdf-am: + +ps: ps-am + +ps-am: + +uninstall-am: uninstall-libLTLIBRARIES uninstall-m4dataDATA \ + uninstall-pkgincludeHEADERS \ + uninstall-pkginclude_internalHEADERS + +.MAKE: check-am install-am install-strip + +.PHONY: CTAGS GTAGS all all-am am--refresh check check-TESTS check-am \ + clean clean-checkPROGRAMS clean-generic clean-libLTLIBRARIES \ + clean-libtool clean-noinstLTLIBRARIES ctags dist dist-all \ + dist-bzip2 dist-gzip dist-lzip dist-lzma dist-shar dist-tarZ \ + dist-xz dist-zip distcheck distclean distclean-compile \ + distclean-generic distclean-hdr distclean-libtool \ + distclean-tags distcleancheck distdir distuninstallcheck dvi \ + dvi-am html html-am info info-am install install-am \ + install-data install-data-am install-data-local install-dvi \ + install-dvi-am install-exec install-exec-am install-exec-local \ + install-html install-html-am install-info install-info-am \ + install-libLTLIBRARIES install-m4dataDATA install-man \ + install-pdf install-pdf-am install-pkgincludeHEADERS \ + install-pkginclude_internalHEADERS install-ps install-ps-am \ + install-strip installcheck installcheck-am installdirs \ + maintainer-clean maintainer-clean-generic \ + maintainer-clean-local mostlyclean mostlyclean-compile \ + mostlyclean-generic mostlyclean-libtool pdf pdf-am ps ps-am \ + tags uninstall uninstall-am uninstall-libLTLIBRARIES \ + uninstall-m4dataDATA uninstall-pkgincludeHEADERS \ + uninstall-pkginclude_internalHEADERS + + +# Build rules for putting fused Google Test files into the distribution +# package. The user can also create those files by manually running +# scripts/fuse_gtest_files.py. +@HAVE_PYTHON_TRUE@$(test_fused_gtest_test_SOURCES): fused-gtest + +@HAVE_PYTHON_TRUE@fused-gtest: $(pkginclude_HEADERS) $(pkginclude_internal_HEADERS) \ +@HAVE_PYTHON_TRUE@ $(GTEST_SRC) src/gtest-all.cc src/gtest_main.cc \ +@HAVE_PYTHON_TRUE@ scripts/fuse_gtest_files.py +@HAVE_PYTHON_TRUE@ mkdir -p "$(srcdir)/fused-src" +@HAVE_PYTHON_TRUE@ chmod -R u+w "$(srcdir)/fused-src" +@HAVE_PYTHON_TRUE@ rm -f "$(srcdir)/fused-src/gtest/gtest-all.cc" +@HAVE_PYTHON_TRUE@ rm -f "$(srcdir)/fused-src/gtest/gtest.h" +@HAVE_PYTHON_TRUE@ "$(srcdir)/scripts/fuse_gtest_files.py" "$(srcdir)/fused-src" +@HAVE_PYTHON_TRUE@ cp -f "$(srcdir)/src/gtest_main.cc" "$(srcdir)/fused-src/gtest/" + +@HAVE_PYTHON_TRUE@maintainer-clean-local: +@HAVE_PYTHON_TRUE@ rm -rf "$(srcdir)/fused-src" + +# Disables 'make install' as installing a compiled version of Google +# Test can lead to undefined behavior due to violation of the +# One-Definition Rule. + +install-exec-local: + echo "'make install' is dangerous and not supported. Instead, see README for how to integrate Google Test into your build system." + false + +install-data-local: + echo "'make install' is dangerous and not supported. Instead, see README for how to integrate Google Test into your build system." + false + +# Tell versions [3.59,3.63) of GNU make to not export all variables. +# Otherwise a system limit (for SysV at least) may be exceeded. +.NOEXPORT: diff --git a/external/gtest/README b/external/gtest/README new file mode 100755 index 0000000000..26f35a8479 --- /dev/null +++ b/external/gtest/README @@ -0,0 +1,435 @@ +Google C++ Testing Framework +============================ + +http://code.google.com/p/googletest/ + +Overview +-------- + +Google's framework for writing C++ tests on a variety of platforms +(Linux, Mac OS X, Windows, Windows CE, Symbian, etc). Based on the +xUnit architecture. Supports automatic test discovery, a rich set of +assertions, user-defined assertions, death tests, fatal and non-fatal +failures, various options for running the tests, and XML test report +generation. + +Please see the project page above for more information as well as the +mailing list for questions, discussions, and development. There is +also an IRC channel on OFTC (irc.oftc.net) #gtest available. Please +join us! + +Requirements for End Users +-------------------------- + +Google Test is designed to have fairly minimal requirements to build +and use with your projects, but there are some. Currently, we support +Linux, Windows, Mac OS X, and Cygwin. We will also make our best +effort to support other platforms (e.g. Solaris, AIX, and z/OS). +However, since core members of the Google Test project have no access +to these platforms, Google Test may have outstanding issues there. If +you notice any problems on your platform, please notify +googletestframework@googlegroups.com. Patches for fixing them are +even more welcome! + +### Linux Requirements ### + +These are the base requirements to build and use Google Test from a source +package (as described below): + * GNU-compatible Make or gmake + * POSIX-standard shell + * POSIX(-2) Regular Expressions (regex.h) + * A C++98-standard-compliant compiler + +### Windows Requirements ### + + * Microsoft Visual C++ 7.1 or newer + +### Cygwin Requirements ### + + * Cygwin 1.5.25-14 or newer + +### Mac OS X Requirements ### + + * Mac OS X 10.4 Tiger or newer + * Developer Tools Installed + +Also, you'll need CMake 2.6.4 or higher if you want to build the +samples using the provided CMake script, regardless of the platform. + +Requirements for Contributors +----------------------------- + +We welcome patches. If you plan to contribute a patch, you need to +build Google Test and its own tests from an SVN checkout (described +below), which has further requirements: + + * Python version 2.3 or newer (for running some of the tests and + re-generating certain source files from templates) + * CMake 2.6.4 or newer + +Getting the Source +------------------ + +There are two primary ways of getting Google Test's source code: you +can download a stable source release in your preferred archive format, +or directly check out the source from our Subversion (SVN) repositary. +The SVN checkout requires a few extra steps and some extra software +packages on your system, but lets you track the latest development and +make patches much more easily, so we highly encourage it. + +### Source Package ### + +Google Test is released in versioned source packages which can be +downloaded from the download page [1]. Several different archive +formats are provided, but the only difference is the tools used to +manipulate them, and the size of the resulting file. Download +whichever you are most comfortable with. + + [1] http://code.google.com/p/googletest/downloads/list + +Once the package is downloaded, expand it using whichever tools you +prefer for that type. This will result in a new directory with the +name "gtest-X.Y.Z" which contains all of the source code. Here are +some examples on Linux: + + tar -xvzf gtest-X.Y.Z.tar.gz + tar -xvjf gtest-X.Y.Z.tar.bz2 + unzip gtest-X.Y.Z.zip + +### SVN Checkout ### + +To check out the main branch (also known as the "trunk") of Google +Test, run the following Subversion command: + + svn checkout http://googletest.googlecode.com/svn/trunk/ gtest-svn + +Setting up the Build +-------------------- + +To build Google Test and your tests that use it, you need to tell your +build system where to find its headers and source files. The exact +way to do it depends on which build system you use, and is usually +straightforward. + +### Generic Build Instructions ### + +Suppose you put Google Test in directory ${GTEST_DIR}. To build it, +create a library build target (or a project as called by Visual Studio +and Xcode) to compile + + ${GTEST_DIR}/src/gtest-all.cc + +with ${GTEST_DIR}/include in the system header search path and ${GTEST_DIR} +in the normal header search path. Assuming a Linux-like system and gcc, +something like the following will do: + + g++ -isystem ${GTEST_DIR}/include -I${GTEST_DIR} \ + -pthread -c ${GTEST_DIR}/src/gtest-all.cc + ar -rv libgtest.a gtest-all.o + +(We need -pthread as Google Test uses threads.) + +Next, you should compile your test source file with +${GTEST_DIR}/include in the system header search path, and link it +with gtest and any other necessary libraries: + + g++ -isystem ${GTEST_DIR}/include -pthread path/to/your_test.cc libgtest.a \ + -o your_test + +As an example, the make/ directory contains a Makefile that you can +use to build Google Test on systems where GNU make is available +(e.g. Linux, Mac OS X, and Cygwin). It doesn't try to build Google +Test's own tests. Instead, it just builds the Google Test library and +a sample test. You can use it as a starting point for your own build +script. + +If the default settings are correct for your environment, the +following commands should succeed: + + cd ${GTEST_DIR}/make + make + ./sample1_unittest + +If you see errors, try to tweak the contents of make/Makefile to make +them go away. There are instructions in make/Makefile on how to do +it. + +### Using CMake ### + +Google Test comes with a CMake build script (CMakeLists.txt) that can +be used on a wide range of platforms ("C" stands for cross-platofrm.). +If you don't have CMake installed already, you can download it for +free from http://www.cmake.org/. + +CMake works by generating native makefiles or build projects that can +be used in the compiler environment of your choice. The typical +workflow starts with: + + mkdir mybuild # Create a directory to hold the build output. + cd mybuild + cmake ${GTEST_DIR} # Generate native build scripts. + +If you want to build Google Test's samples, you should replace the +last command with + + cmake -Dgtest_build_samples=ON ${GTEST_DIR} + +If you are on a *nix system, you should now see a Makefile in the +current directory. Just type 'make' to build gtest. + +If you use Windows and have Vistual Studio installed, a gtest.sln file +and several .vcproj files will be created. You can then build them +using Visual Studio. + +On Mac OS X with Xcode installed, a .xcodeproj file will be generated. + +### Legacy Build Scripts ### + +Before settling on CMake, we have been providing hand-maintained build +projects/scripts for Visual Studio, Xcode, and Autotools. While we +continue to provide them for convenience, they are not actively +maintained any more. We highly recommend that you follow the +instructions in the previous two sections to integrate Google Test +with your existing build system. + +If you still need to use the legacy build scripts, here's how: + +The msvc\ folder contains two solutions with Visual C++ projects. +Open the gtest.sln or gtest-md.sln file using Visual Studio, and you +are ready to build Google Test the same way you build any Visual +Studio project. Files that have names ending with -md use DLL +versions of Microsoft runtime libraries (the /MD or the /MDd compiler +option). Files without that suffix use static versions of the runtime +libraries (the /MT or the /MTd option). Please note that one must use +the same option to compile both gtest and the test code. If you use +Visual Studio 2005 or above, we recommend the -md version as /MD is +the default for new projects in these versions of Visual Studio. + +On Mac OS X, open the gtest.xcodeproj in the xcode/ folder using +Xcode. Build the "gtest" target. The universal binary framework will +end up in your selected build directory (selected in the Xcode +"Preferences..." -> "Building" pane and defaults to xcode/build). +Alternatively, at the command line, enter: + + xcodebuild + +This will build the "Release" configuration of gtest.framework in your +default build location. See the "xcodebuild" man page for more +information about building different configurations and building in +different locations. + +If you wish to use the Google Test Xcode project with Xcode 4.x and +above, you need to either: + * update the SDK configuration options in xcode/Config/General.xconfig. + Comment options SDKROOT, MACOS_DEPLOYMENT_TARGET, and GCC_VERSION. If + you choose this route you lose the ability to target earlier versions + of MacOS X. + * Install an SDK for an earlier version. This doesn't appear to be + supported by Apple, but has been reported to work + (http://stackoverflow.com/questions/5378518). + +Tweaking Google Test +-------------------- + +Google Test can be used in diverse environments. The default +configuration may not work (or may not work well) out of the box in +some environments. However, you can easily tweak Google Test by +defining control macros on the compiler command line. Generally, +these macros are named like GTEST_XYZ and you define them to either 1 +or 0 to enable or disable a certain feature. + +We list the most frequently used macros below. For a complete list, +see file include/gtest/internal/gtest-port.h. + +### Choosing a TR1 Tuple Library ### + +Some Google Test features require the C++ Technical Report 1 (TR1) +tuple library, which is not yet available with all compilers. The +good news is that Google Test implements a subset of TR1 tuple that's +enough for its own need, and will automatically use this when the +compiler doesn't provide TR1 tuple. + +Usually you don't need to care about which tuple library Google Test +uses. However, if your project already uses TR1 tuple, you need to +tell Google Test to use the same TR1 tuple library the rest of your +project uses, or the two tuple implementations will clash. To do +that, add + + -DGTEST_USE_OWN_TR1_TUPLE=0 + +to the compiler flags while compiling Google Test and your tests. If +you want to force Google Test to use its own tuple library, just add + + -DGTEST_USE_OWN_TR1_TUPLE=1 + +to the compiler flags instead. + +If you don't want Google Test to use tuple at all, add + + -DGTEST_HAS_TR1_TUPLE=0 + +and all features using tuple will be disabled. + +### Multi-threaded Tests ### + +Google Test is thread-safe where the pthread library is available. +After #include "gtest/gtest.h", you can check the GTEST_IS_THREADSAFE +macro to see whether this is the case (yes if the macro is #defined to +1, no if it's undefined.). + +If Google Test doesn't correctly detect whether pthread is available +in your environment, you can force it with + + -DGTEST_HAS_PTHREAD=1 + +or + + -DGTEST_HAS_PTHREAD=0 + +When Google Test uses pthread, you may need to add flags to your +compiler and/or linker to select the pthread library, or you'll get +link errors. If you use the CMake script or the deprecated Autotools +script, this is taken care of for you. If you use your own build +script, you'll need to read your compiler and linker's manual to +figure out what flags to add. + +### As a Shared Library (DLL) ### + +Google Test is compact, so most users can build and link it as a +static library for the simplicity. You can choose to use Google Test +as a shared library (known as a DLL on Windows) if you prefer. + +To compile *gtest* as a shared library, add + + -DGTEST_CREATE_SHARED_LIBRARY=1 + +to the compiler flags. You'll also need to tell the linker to produce +a shared library instead - consult your linker's manual for how to do +it. + +To compile your *tests* that use the gtest shared library, add + + -DGTEST_LINKED_AS_SHARED_LIBRARY=1 + +to the compiler flags. + +Note: while the above steps aren't technically necessary today when +using some compilers (e.g. GCC), they may become necessary in the +future, if we decide to improve the speed of loading the library (see +http://gcc.gnu.org/wiki/Visibility for details). Therefore you are +recommended to always add the above flags when using Google Test as a +shared library. Otherwise a future release of Google Test may break +your build script. + +### Avoiding Macro Name Clashes ### + +In C++, macros don't obey namespaces. Therefore two libraries that +both define a macro of the same name will clash if you #include both +definitions. In case a Google Test macro clashes with another +library, you can force Google Test to rename its macro to avoid the +conflict. + +Specifically, if both Google Test and some other code define macro +FOO, you can add + + -DGTEST_DONT_DEFINE_FOO=1 + +to the compiler flags to tell Google Test to change the macro's name +from FOO to GTEST_FOO. Currently FOO can be FAIL, SUCCEED, or TEST. +For example, with -DGTEST_DONT_DEFINE_TEST=1, you'll need to write + + GTEST_TEST(SomeTest, DoesThis) { ... } + +instead of + + TEST(SomeTest, DoesThis) { ... } + +in order to define a test. + +Upgrating from an Earlier Version +--------------------------------- + +We strive to keep Google Test releases backward compatible. +Sometimes, though, we have to make some breaking changes for the +users' long-term benefits. This section describes what you'll need to +do if you are upgrading from an earlier version of Google Test. + +### Upgrading from 1.3.0 or Earlier ### + +You may need to explicitly enable or disable Google Test's own TR1 +tuple library. See the instructions in section "Choosing a TR1 Tuple +Library". + +### Upgrading from 1.4.0 or Earlier ### + +The Autotools build script (configure + make) is no longer officially +supportted. You are encouraged to migrate to your own build system or +use CMake. If you still need to use Autotools, you can find +instructions in the README file from Google Test 1.4.0. + +On platforms where the pthread library is available, Google Test uses +it in order to be thread-safe. See the "Multi-threaded Tests" section +for what this means to your build script. + +If you use Microsoft Visual C++ 7.1 with exceptions disabled, Google +Test will no longer compile. This should affect very few people, as a +large portion of STL (including ) doesn't compile in this mode +anyway. We decided to stop supporting it in order to greatly simplify +Google Test's implementation. + +Developing Google Test +---------------------- + +This section discusses how to make your own changes to Google Test. + +### Testing Google Test Itself ### + +To make sure your changes work as intended and don't break existing +functionality, you'll want to compile and run Google Test's own tests. +For that you can use CMake: + + mkdir mybuild + cd mybuild + cmake -Dgtest_build_tests=ON ${GTEST_DIR} + +Make sure you have Python installed, as some of Google Test's tests +are written in Python. If the cmake command complains about not being +able to find Python ("Could NOT find PythonInterp (missing: +PYTHON_EXECUTABLE)"), try telling it explicitly where your Python +executable can be found: + + cmake -DPYTHON_EXECUTABLE=path/to/python -Dgtest_build_tests=ON ${GTEST_DIR} + +Next, you can build Google Test and all of its own tests. On *nix, +this is usually done by 'make'. To run the tests, do + + make test + +All tests should pass. + +### Regenerating Source Files ### + +Some of Google Test's source files are generated from templates (not +in the C++ sense) using a script. A template file is named FOO.pump, +where FOO is the name of the file it will generate. For example, the +file include/gtest/internal/gtest-type-util.h.pump is used to generate +gtest-type-util.h in the same directory. + +Normally you don't need to worry about regenerating the source files, +unless you need to modify them. In that case, you should modify the +corresponding .pump files instead and run the pump.py Python script to +regenerate them. You can find pump.py in the scripts/ directory. +Read the Pump manual [2] for how to use it. + + [2] http://code.google.com/p/googletest/wiki/PumpManual + +### Contributing a Patch ### + +We welcome patches. Please read the Google Test developer's guide [3] +for how you can contribute. In particular, make sure you have signed +the Contributor License Agreement, or we won't be able to accept the +patch. + + [3] http://code.google.com/p/googletest/wiki/GoogleTestDevGuide + +Happy testing! diff --git a/external/gtest/aclocal.m4 b/external/gtest/aclocal.m4 new file mode 100755 index 0000000000..e7df9fe0ec --- /dev/null +++ b/external/gtest/aclocal.m4 @@ -0,0 +1,1198 @@ +# generated automatically by aclocal 1.11.3 -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, +# 2005, 2006, 2007, 2008, 2009, 2010, 2011 Free Software Foundation, +# Inc. +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY, to the extent permitted by law; without +# even the implied warranty of MERCHANTABILITY or FITNESS FOR A +# PARTICULAR PURPOSE. + +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +m4_if(m4_defn([AC_AUTOCONF_VERSION]), [2.68],, +[m4_warning([this file was generated for autoconf 2.68. +You have another version of autoconf. It may work, but is not guaranteed to. +If you have problems, you may need to regenerate the build system entirely. +To do so, use the procedure documented by the package, typically `autoreconf'.])]) + +# Copyright (C) 2002, 2003, 2005, 2006, 2007, 2008, 2011 Free Software +# Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# AM_AUTOMAKE_VERSION(VERSION) +# ---------------------------- +# Automake X.Y traces this macro to ensure aclocal.m4 has been +# generated from the m4 files accompanying Automake X.Y. +# (This private macro should not be called outside this file.) +AC_DEFUN([AM_AUTOMAKE_VERSION], +[am__api_version='1.11' +dnl Some users find AM_AUTOMAKE_VERSION and mistake it for a way to +dnl require some minimum version. Point them to the right macro. +m4_if([$1], [1.11.3], [], + [AC_FATAL([Do not call $0, use AM_INIT_AUTOMAKE([$1]).])])dnl +]) + +# _AM_AUTOCONF_VERSION(VERSION) +# ----------------------------- +# aclocal traces this macro to find the Autoconf version. +# This is a private macro too. Using m4_define simplifies +# the logic in aclocal, which can simply ignore this definition. +m4_define([_AM_AUTOCONF_VERSION], []) + +# AM_SET_CURRENT_AUTOMAKE_VERSION +# ------------------------------- +# Call AM_AUTOMAKE_VERSION and AM_AUTOMAKE_VERSION so they can be traced. +# This function is AC_REQUIREd by AM_INIT_AUTOMAKE. +AC_DEFUN([AM_SET_CURRENT_AUTOMAKE_VERSION], +[AM_AUTOMAKE_VERSION([1.11.3])dnl +m4_ifndef([AC_AUTOCONF_VERSION], + [m4_copy([m4_PACKAGE_VERSION], [AC_AUTOCONF_VERSION])])dnl +_AM_AUTOCONF_VERSION(m4_defn([AC_AUTOCONF_VERSION]))]) + +# AM_AUX_DIR_EXPAND -*- Autoconf -*- + +# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# For projects using AC_CONFIG_AUX_DIR([foo]), Autoconf sets +# $ac_aux_dir to `$srcdir/foo'. In other projects, it is set to +# `$srcdir', `$srcdir/..', or `$srcdir/../..'. +# +# Of course, Automake must honor this variable whenever it calls a +# tool from the auxiliary directory. The problem is that $srcdir (and +# therefore $ac_aux_dir as well) can be either absolute or relative, +# depending on how configure is run. This is pretty annoying, since +# it makes $ac_aux_dir quite unusable in subdirectories: in the top +# source directory, any form will work fine, but in subdirectories a +# relative path needs to be adjusted first. +# +# $ac_aux_dir/missing +# fails when called from a subdirectory if $ac_aux_dir is relative +# $top_srcdir/$ac_aux_dir/missing +# fails if $ac_aux_dir is absolute, +# fails when called from a subdirectory in a VPATH build with +# a relative $ac_aux_dir +# +# The reason of the latter failure is that $top_srcdir and $ac_aux_dir +# are both prefixed by $srcdir. In an in-source build this is usually +# harmless because $srcdir is `.', but things will broke when you +# start a VPATH build or use an absolute $srcdir. +# +# So we could use something similar to $top_srcdir/$ac_aux_dir/missing, +# iff we strip the leading $srcdir from $ac_aux_dir. That would be: +# am_aux_dir='\$(top_srcdir)/'`expr "$ac_aux_dir" : "$srcdir//*\(.*\)"` +# and then we would define $MISSING as +# MISSING="\${SHELL} $am_aux_dir/missing" +# This will work as long as MISSING is not called from configure, because +# unfortunately $(top_srcdir) has no meaning in configure. +# However there are other variables, like CC, which are often used in +# configure, and could therefore not use this "fixed" $ac_aux_dir. +# +# Another solution, used here, is to always expand $ac_aux_dir to an +# absolute PATH. The drawback is that using absolute paths prevent a +# configured tree to be moved without reconfiguration. + +AC_DEFUN([AM_AUX_DIR_EXPAND], +[dnl Rely on autoconf to set up CDPATH properly. +AC_PREREQ([2.50])dnl +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` +]) + +# AM_CONDITIONAL -*- Autoconf -*- + +# Copyright (C) 1997, 2000, 2001, 2003, 2004, 2005, 2006, 2008 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 9 + +# AM_CONDITIONAL(NAME, SHELL-CONDITION) +# ------------------------------------- +# Define a conditional. +AC_DEFUN([AM_CONDITIONAL], +[AC_PREREQ(2.52)dnl + ifelse([$1], [TRUE], [AC_FATAL([$0: invalid condition: $1])], + [$1], [FALSE], [AC_FATAL([$0: invalid condition: $1])])dnl +AC_SUBST([$1_TRUE])dnl +AC_SUBST([$1_FALSE])dnl +_AM_SUBST_NOTMAKE([$1_TRUE])dnl +_AM_SUBST_NOTMAKE([$1_FALSE])dnl +m4_define([_AM_COND_VALUE_$1], [$2])dnl +if $2; then + $1_TRUE= + $1_FALSE='#' +else + $1_TRUE='#' + $1_FALSE= +fi +AC_CONFIG_COMMANDS_PRE( +[if test -z "${$1_TRUE}" && test -z "${$1_FALSE}"; then + AC_MSG_ERROR([[conditional "$1" was never defined. +Usually this means the macro was only invoked conditionally.]]) +fi])]) + +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2009, +# 2010, 2011 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 12 + +# There are a few dirty hacks below to avoid letting `AC_PROG_CC' be +# written in clear, in which case automake, when reading aclocal.m4, +# will think it sees a *use*, and therefore will trigger all it's +# C support machinery. Also note that it means that autoscan, seeing +# CC etc. in the Makefile, will ask for an AC_PROG_CC use... + + +# _AM_DEPENDENCIES(NAME) +# ---------------------- +# See how the compiler implements dependency checking. +# NAME is "CC", "CXX", "GCJ", or "OBJC". +# We try a few techniques and use that to set a single cache variable. +# +# We don't AC_REQUIRE the corresponding AC_PROG_CC since the latter was +# modified to invoke _AM_DEPENDENCIES(CC); we would have a circular +# dependency, and given that the user is not expected to run this macro, +# just rely on AC_PROG_CC. +AC_DEFUN([_AM_DEPENDENCIES], +[AC_REQUIRE([AM_SET_DEPDIR])dnl +AC_REQUIRE([AM_OUTPUT_DEPENDENCY_COMMANDS])dnl +AC_REQUIRE([AM_MAKE_INCLUDE])dnl +AC_REQUIRE([AM_DEP_TRACK])dnl + +ifelse([$1], CC, [depcc="$CC" am_compiler_list=], + [$1], CXX, [depcc="$CXX" am_compiler_list=], + [$1], OBJC, [depcc="$OBJC" am_compiler_list='gcc3 gcc'], + [$1], UPC, [depcc="$UPC" am_compiler_list=], + [$1], GCJ, [depcc="$GCJ" am_compiler_list='gcc3 gcc'], + [depcc="$$1" am_compiler_list=]) + +AC_CACHE_CHECK([dependency style of $depcc], + [am_cv_$1_dependencies_compiler_type], +[if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_$1_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n ['s/^#*\([a-zA-Z0-9]*\))$/\1/p'] < ./depcomp` + fi + am__universal=false + m4_case([$1], [CC], + [case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac], + [CXX], + [case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac]) + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok `-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_$1_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_$1_dependencies_compiler_type=none +fi +]) +AC_SUBST([$1DEPMODE], [depmode=$am_cv_$1_dependencies_compiler_type]) +AM_CONDITIONAL([am__fastdep$1], [ + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_$1_dependencies_compiler_type" = gcc3]) +]) + + +# AM_SET_DEPDIR +# ------------- +# Choose a directory name for dependency files. +# This macro is AC_REQUIREd in _AM_DEPENDENCIES +AC_DEFUN([AM_SET_DEPDIR], +[AC_REQUIRE([AM_SET_LEADING_DOT])dnl +AC_SUBST([DEPDIR], ["${am__leading_dot}deps"])dnl +]) + + +# AM_DEP_TRACK +# ------------ +AC_DEFUN([AM_DEP_TRACK], +[AC_ARG_ENABLE(dependency-tracking, +[ --disable-dependency-tracking speeds up one-time build + --enable-dependency-tracking do not reject slow dependency extractors]) +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi +AM_CONDITIONAL([AMDEP], [test "x$enable_dependency_tracking" != xno]) +AC_SUBST([AMDEPBACKSLASH])dnl +_AM_SUBST_NOTMAKE([AMDEPBACKSLASH])dnl +AC_SUBST([am__nodep])dnl +_AM_SUBST_NOTMAKE([am__nodep])dnl +]) + +# Generate code to set up dependency tracking. -*- Autoconf -*- + +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +#serial 5 + +# _AM_OUTPUT_DEPENDENCY_COMMANDS +# ------------------------------ +AC_DEFUN([_AM_OUTPUT_DEPENDENCY_COMMANDS], +[{ + # Autoconf 2.62 quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + case $CONFIG_FILES in + *\'*) eval set x "$CONFIG_FILES" ;; + *) set x $CONFIG_FILES ;; + esac + shift + for mf + do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named `Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # Grep'ing the whole file is not good either: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then + dirpart=`AS_DIRNAME("$mf")` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running `make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # When using ansi2knr, U may be empty or an underscore; expand it + U=`sed -n 's/^U = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`AS_DIRNAME(["$file"])` + AS_MKDIR_P([$dirpart/$fdir]) + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done + done +} +])# _AM_OUTPUT_DEPENDENCY_COMMANDS + + +# AM_OUTPUT_DEPENDENCY_COMMANDS +# ----------------------------- +# This macro should only be invoked once -- use via AC_REQUIRE. +# +# This code is only required when automatic dependency tracking +# is enabled. FIXME. This creates each `.P' file that we will +# need in order to bootstrap the dependency handling code. +AC_DEFUN([AM_OUTPUT_DEPENDENCY_COMMANDS], +[AC_CONFIG_COMMANDS([depfiles], + [test x"$AMDEP_TRUE" != x"" || _AM_OUTPUT_DEPENDENCY_COMMANDS], + [AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir"]) +]) + +# Do all the work for Automake. -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2002, 2003, 2004, +# 2005, 2006, 2008, 2009 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 16 + +# This macro actually does too much. Some checks are only needed if +# your package does certain things. But this isn't really a big deal. + +# AM_INIT_AUTOMAKE(PACKAGE, VERSION, [NO-DEFINE]) +# AM_INIT_AUTOMAKE([OPTIONS]) +# ----------------------------------------------- +# The call with PACKAGE and VERSION arguments is the old style +# call (pre autoconf-2.50), which is being phased out. PACKAGE +# and VERSION should now be passed to AC_INIT and removed from +# the call to AM_INIT_AUTOMAKE. +# We support both call styles for the transition. After +# the next Automake release, Autoconf can make the AC_INIT +# arguments mandatory, and then we can depend on a new Autoconf +# release and drop the old call support. +AC_DEFUN([AM_INIT_AUTOMAKE], +[AC_PREREQ([2.62])dnl +dnl Autoconf wants to disallow AM_ names. We explicitly allow +dnl the ones we care about. +m4_pattern_allow([^AM_[A-Z]+FLAGS$])dnl +AC_REQUIRE([AM_SET_CURRENT_AUTOMAKE_VERSION])dnl +AC_REQUIRE([AC_PROG_INSTALL])dnl +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + AC_SUBST([am__isrc], [' -I$(srcdir)'])_AM_SUBST_NOTMAKE([am__isrc])dnl + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + AC_MSG_ERROR([source directory already configured; run "make distclean" there first]) + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi +AC_SUBST([CYGPATH_W]) + +# Define the identity of the package. +dnl Distinguish between old-style and new-style calls. +m4_ifval([$2], +[m4_ifval([$3], [_AM_SET_OPTION([no-define])])dnl + AC_SUBST([PACKAGE], [$1])dnl + AC_SUBST([VERSION], [$2])], +[_AM_SET_OPTIONS([$1])dnl +dnl Diagnose old-style AC_INIT with new-style AM_AUTOMAKE_INIT. +m4_if(m4_ifdef([AC_PACKAGE_NAME], 1)m4_ifdef([AC_PACKAGE_VERSION], 1), 11,, + [m4_fatal([AC_INIT should be called with package and version arguments])])dnl + AC_SUBST([PACKAGE], ['AC_PACKAGE_TARNAME'])dnl + AC_SUBST([VERSION], ['AC_PACKAGE_VERSION'])])dnl + +_AM_IF_OPTION([no-define],, +[AC_DEFINE_UNQUOTED(PACKAGE, "$PACKAGE", [Name of package]) + AC_DEFINE_UNQUOTED(VERSION, "$VERSION", [Version number of package])])dnl + +# Some tools Automake needs. +AC_REQUIRE([AM_SANITY_CHECK])dnl +AC_REQUIRE([AC_ARG_PROGRAM])dnl +AM_MISSING_PROG(ACLOCAL, aclocal-${am__api_version}) +AM_MISSING_PROG(AUTOCONF, autoconf) +AM_MISSING_PROG(AUTOMAKE, automake-${am__api_version}) +AM_MISSING_PROG(AUTOHEADER, autoheader) +AM_MISSING_PROG(MAKEINFO, makeinfo) +AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +AC_REQUIRE([AM_PROG_INSTALL_STRIP])dnl +AC_REQUIRE([AM_PROG_MKDIR_P])dnl +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([AC_PROG_MAKE_SET])dnl +AC_REQUIRE([AM_SET_LEADING_DOT])dnl +_AM_IF_OPTION([tar-ustar], [_AM_PROG_TAR([ustar])], + [_AM_IF_OPTION([tar-pax], [_AM_PROG_TAR([pax])], + [_AM_PROG_TAR([v7])])]) +_AM_IF_OPTION([no-dependencies],, +[AC_PROVIDE_IFELSE([AC_PROG_CC], + [_AM_DEPENDENCIES(CC)], + [define([AC_PROG_CC], + defn([AC_PROG_CC])[_AM_DEPENDENCIES(CC)])])dnl +AC_PROVIDE_IFELSE([AC_PROG_CXX], + [_AM_DEPENDENCIES(CXX)], + [define([AC_PROG_CXX], + defn([AC_PROG_CXX])[_AM_DEPENDENCIES(CXX)])])dnl +AC_PROVIDE_IFELSE([AC_PROG_OBJC], + [_AM_DEPENDENCIES(OBJC)], + [define([AC_PROG_OBJC], + defn([AC_PROG_OBJC])[_AM_DEPENDENCIES(OBJC)])])dnl +]) +_AM_IF_OPTION([silent-rules], [AC_REQUIRE([AM_SILENT_RULES])])dnl +dnl The `parallel-tests' driver may need to know about EXEEXT, so add the +dnl `am__EXEEXT' conditional if _AM_COMPILER_EXEEXT was seen. This macro +dnl is hooked onto _AC_COMPILER_EXEEXT early, see below. +AC_CONFIG_COMMANDS_PRE(dnl +[m4_provide_if([_AM_COMPILER_EXEEXT], + [AM_CONDITIONAL([am__EXEEXT], [test -n "$EXEEXT"])])])dnl +]) + +dnl Hook into `_AC_COMPILER_EXEEXT' early to learn its expansion. Do not +dnl add the conditional right here, as _AC_COMPILER_EXEEXT may be further +dnl mangled by Autoconf and run in a shell conditional statement. +m4_define([_AC_COMPILER_EXEEXT], +m4_defn([_AC_COMPILER_EXEEXT])[m4_provide([_AM_COMPILER_EXEEXT])]) + + +# When config.status generates a header, we must update the stamp-h file. +# This file resides in the same directory as the config header +# that is generated. The stamp files are numbered to have different names. + +# Autoconf calls _AC_AM_CONFIG_HEADER_HOOK (when defined) in the +# loop where config.status creates the headers, so we can generate +# our stamp files there. +AC_DEFUN([_AC_AM_CONFIG_HEADER_HOOK], +[# Compute $1's index in $config_headers. +_am_arg=$1 +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`AS_DIRNAME(["$_am_arg"])`/stamp-h[]$_am_stamp_count]) + +# Copyright (C) 2001, 2003, 2005, 2008, 2011 Free Software Foundation, +# Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# AM_PROG_INSTALL_SH +# ------------------ +# Define $install_sh. +AC_DEFUN([AM_PROG_INSTALL_SH], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +if test x"${install_sh}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi +AC_SUBST(install_sh)]) + +# Copyright (C) 2003, 2005 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 2 + +# Check whether the underlying file-system supports filenames +# with a leading dot. For instance MS-DOS doesn't. +AC_DEFUN([AM_SET_LEADING_DOT], +[rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null +AC_SUBST([am__leading_dot])]) + +# Check to see how 'make' treats includes. -*- Autoconf -*- + +# Copyright (C) 2001, 2002, 2003, 2005, 2009 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 4 + +# AM_MAKE_INCLUDE() +# ----------------- +# Check to see how make treats includes. +AC_DEFUN([AM_MAKE_INCLUDE], +[am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo this is the am__doit target +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +AC_MSG_CHECKING([for style of include used by $am_make]) +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# Ignore all kinds of additional output from `make'. +case `$am_make -s -f confmf 2> /dev/null` in #( +*the\ am__doit\ target*) + am__include=include + am__quote= + _am_result=GNU + ;; +esac +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + case `$am_make -s -f confmf 2> /dev/null` in #( + *the\ am__doit\ target*) + am__include=.include + am__quote="\"" + _am_result=BSD + ;; + esac +fi +AC_SUBST([am__include]) +AC_SUBST([am__quote]) +AC_MSG_RESULT([$_am_result]) +rm -f confinc confmf +]) + +# Fake the existence of programs that GNU maintainers use. -*- Autoconf -*- + +# Copyright (C) 1997, 1999, 2000, 2001, 2003, 2004, 2005, 2008 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 6 + +# AM_MISSING_PROG(NAME, PROGRAM) +# ------------------------------ +AC_DEFUN([AM_MISSING_PROG], +[AC_REQUIRE([AM_MISSING_HAS_RUN]) +$1=${$1-"${am_missing_run}$2"} +AC_SUBST($1)]) + + +# AM_MISSING_HAS_RUN +# ------------------ +# Define MISSING if not defined so far and test if it supports --run. +# If it does, set am_missing_run to use it, otherwise, to nothing. +AC_DEFUN([AM_MISSING_HAS_RUN], +[AC_REQUIRE([AM_AUX_DIR_EXPAND])dnl +AC_REQUIRE_AUX_FILE([missing])dnl +if test x"${MISSING+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; + *) + MISSING="\${SHELL} $am_aux_dir/missing" ;; + esac +fi +# Use eval to expand $SHELL +if eval "$MISSING --run true"; then + am_missing_run="$MISSING --run " +else + am_missing_run= + AC_MSG_WARN([`missing' script is too old or missing]) +fi +]) + +# Copyright (C) 2003, 2004, 2005, 2006, 2011 Free Software Foundation, +# Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# AM_PROG_MKDIR_P +# --------------- +# Check for `mkdir -p'. +AC_DEFUN([AM_PROG_MKDIR_P], +[AC_PREREQ([2.60])dnl +AC_REQUIRE([AC_PROG_MKDIR_P])dnl +dnl Automake 1.8 to 1.9.6 used to define mkdir_p. We now use MKDIR_P, +dnl while keeping a definition of mkdir_p for backward compatibility. +dnl @MKDIR_P@ is magic: AC_OUTPUT adjusts its value for each Makefile. +dnl However we cannot define mkdir_p as $(MKDIR_P) for the sake of +dnl Makefile.ins that do not define MKDIR_P, so we do our own +dnl adjustment using top_builddir (which is defined more often than +dnl MKDIR_P). +AC_SUBST([mkdir_p], ["$MKDIR_P"])dnl +case $mkdir_p in + [[\\/$]]* | ?:[[\\/]]*) ;; + */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; +esac +]) + +# Helper functions for option handling. -*- Autoconf -*- + +# Copyright (C) 2001, 2002, 2003, 2005, 2008, 2010 Free Software +# Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 5 + +# _AM_MANGLE_OPTION(NAME) +# ----------------------- +AC_DEFUN([_AM_MANGLE_OPTION], +[[_AM_OPTION_]m4_bpatsubst($1, [[^a-zA-Z0-9_]], [_])]) + +# _AM_SET_OPTION(NAME) +# -------------------- +# Set option NAME. Presently that only means defining a flag for this option. +AC_DEFUN([_AM_SET_OPTION], +[m4_define(_AM_MANGLE_OPTION([$1]), 1)]) + +# _AM_SET_OPTIONS(OPTIONS) +# ------------------------ +# OPTIONS is a space-separated list of Automake options. +AC_DEFUN([_AM_SET_OPTIONS], +[m4_foreach_w([_AM_Option], [$1], [_AM_SET_OPTION(_AM_Option)])]) + +# _AM_IF_OPTION(OPTION, IF-SET, [IF-NOT-SET]) +# ------------------------------------------- +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +AC_DEFUN([_AM_IF_OPTION], +[m4_ifset(_AM_MANGLE_OPTION([$1]), [$2], [$3])]) + +# Copyright (C) 1999, 2000, 2001, 2002, 2003, 2004, 2005, 2008, 2009, +# 2011 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 2 + +# AM_PATH_PYTHON([MINIMUM-VERSION], [ACTION-IF-FOUND], [ACTION-IF-NOT-FOUND]) +# --------------------------------------------------------------------------- +# Adds support for distributing Python modules and packages. To +# install modules, copy them to $(pythondir), using the python_PYTHON +# automake variable. To install a package with the same name as the +# automake package, install to $(pkgpythondir), or use the +# pkgpython_PYTHON automake variable. +# +# The variables $(pyexecdir) and $(pkgpyexecdir) are provided as +# locations to install python extension modules (shared libraries). +# Another macro is required to find the appropriate flags to compile +# extension modules. +# +# If your package is configured with a different prefix to python, +# users will have to add the install directory to the PYTHONPATH +# environment variable, or create a .pth file (see the python +# documentation for details). +# +# If the MINIMUM-VERSION argument is passed, AM_PATH_PYTHON will +# cause an error if the version of python installed on the system +# doesn't meet the requirement. MINIMUM-VERSION should consist of +# numbers and dots only. +AC_DEFUN([AM_PATH_PYTHON], + [ + dnl Find a Python interpreter. Python versions prior to 2.0 are not + dnl supported. (2.0 was released on October 16, 2000). + m4_define_default([_AM_PYTHON_INTERPRETER_LIST], +[python python2 python3 python3.2 python3.1 python3.0 python2.7 dnl + python2.6 python2.5 python2.4 python2.3 python2.2 python2.1 python2.0]) + + AC_ARG_VAR([PYTHON], [the Python interpreter]) + + m4_if([$1],[],[ + dnl No version check is needed. + # Find any Python interpreter. + if test -z "$PYTHON"; then + AC_PATH_PROGS([PYTHON], _AM_PYTHON_INTERPRETER_LIST, :) + fi + am_display_PYTHON=python + ], [ + dnl A version check is needed. + if test -n "$PYTHON"; then + # If the user set $PYTHON, use it and don't search something else. + AC_MSG_CHECKING([whether $PYTHON version >= $1]) + AM_PYTHON_CHECK_VERSION([$PYTHON], [$1], + [AC_MSG_RESULT(yes)], + [AC_MSG_ERROR(too old)]) + am_display_PYTHON=$PYTHON + else + # Otherwise, try each interpreter until we find one that satisfies + # VERSION. + AC_CACHE_CHECK([for a Python interpreter with version >= $1], + [am_cv_pathless_PYTHON],[ + for am_cv_pathless_PYTHON in _AM_PYTHON_INTERPRETER_LIST none; do + test "$am_cv_pathless_PYTHON" = none && break + AM_PYTHON_CHECK_VERSION([$am_cv_pathless_PYTHON], [$1], [break]) + done]) + # Set $PYTHON to the absolute path of $am_cv_pathless_PYTHON. + if test "$am_cv_pathless_PYTHON" = none; then + PYTHON=: + else + AC_PATH_PROG([PYTHON], [$am_cv_pathless_PYTHON]) + fi + am_display_PYTHON=$am_cv_pathless_PYTHON + fi + ]) + + if test "$PYTHON" = :; then + dnl Run any user-specified action, or abort. + m4_default([$3], [AC_MSG_ERROR([no suitable Python interpreter found])]) + else + + dnl Query Python for its version number. Getting [:3] seems to be + dnl the best way to do this; it's what "site.py" does in the standard + dnl library. + + AC_CACHE_CHECK([for $am_display_PYTHON version], [am_cv_python_version], + [am_cv_python_version=`$PYTHON -c "import sys; sys.stdout.write(sys.version[[:3]])"`]) + AC_SUBST([PYTHON_VERSION], [$am_cv_python_version]) + + dnl Use the values of $prefix and $exec_prefix for the corresponding + dnl values of PYTHON_PREFIX and PYTHON_EXEC_PREFIX. These are made + dnl distinct variables so they can be overridden if need be. However, + dnl general consensus is that you shouldn't need this ability. + + AC_SUBST([PYTHON_PREFIX], ['${prefix}']) + AC_SUBST([PYTHON_EXEC_PREFIX], ['${exec_prefix}']) + + dnl At times (like when building shared libraries) you may want + dnl to know which OS platform Python thinks this is. + + AC_CACHE_CHECK([for $am_display_PYTHON platform], [am_cv_python_platform], + [am_cv_python_platform=`$PYTHON -c "import sys; sys.stdout.write(sys.platform)"`]) + AC_SUBST([PYTHON_PLATFORM], [$am_cv_python_platform]) + + + dnl Set up 4 directories: + + dnl pythondir -- where to install python scripts. This is the + dnl site-packages directory, not the python standard library + dnl directory like in previous automake betas. This behavior + dnl is more consistent with lispdir.m4 for example. + dnl Query distutils for this directory. + AC_CACHE_CHECK([for $am_display_PYTHON script directory], + [am_cv_python_pythondir], + [if test "x$prefix" = xNONE + then + am_py_prefix=$ac_default_prefix + else + am_py_prefix=$prefix + fi + am_cv_python_pythondir=`$PYTHON -c "import sys; from distutils import sysconfig; sys.stdout.write(sysconfig.get_python_lib(0,0,prefix='$am_py_prefix'))" 2>/dev/null` + case $am_cv_python_pythondir in + $am_py_prefix*) + am__strip_prefix=`echo "$am_py_prefix" | sed 's|.|.|g'` + am_cv_python_pythondir=`echo "$am_cv_python_pythondir" | sed "s,^$am__strip_prefix,$PYTHON_PREFIX,"` + ;; + *) + case $am_py_prefix in + /usr|/System*) ;; + *) + am_cv_python_pythondir=$PYTHON_PREFIX/lib/python$PYTHON_VERSION/site-packages + ;; + esac + ;; + esac + ]) + AC_SUBST([pythondir], [$am_cv_python_pythondir]) + + dnl pkgpythondir -- $PACKAGE directory under pythondir. Was + dnl PYTHON_SITE_PACKAGE in previous betas, but this naming is + dnl more consistent with the rest of automake. + + AC_SUBST([pkgpythondir], [\${pythondir}/$PACKAGE]) + + dnl pyexecdir -- directory for installing python extension modules + dnl (shared libraries) + dnl Query distutils for this directory. + AC_CACHE_CHECK([for $am_display_PYTHON extension module directory], + [am_cv_python_pyexecdir], + [if test "x$exec_prefix" = xNONE + then + am_py_exec_prefix=$am_py_prefix + else + am_py_exec_prefix=$exec_prefix + fi + am_cv_python_pyexecdir=`$PYTHON -c "import sys; from distutils import sysconfig; sys.stdout.write(sysconfig.get_python_lib(1,0,prefix='$am_py_exec_prefix'))" 2>/dev/null` + case $am_cv_python_pyexecdir in + $am_py_exec_prefix*) + am__strip_prefix=`echo "$am_py_exec_prefix" | sed 's|.|.|g'` + am_cv_python_pyexecdir=`echo "$am_cv_python_pyexecdir" | sed "s,^$am__strip_prefix,$PYTHON_EXEC_PREFIX,"` + ;; + *) + case $am_py_exec_prefix in + /usr|/System*) ;; + *) + am_cv_python_pyexecdir=$PYTHON_EXEC_PREFIX/lib/python$PYTHON_VERSION/site-packages + ;; + esac + ;; + esac + ]) + AC_SUBST([pyexecdir], [$am_cv_python_pyexecdir]) + + dnl pkgpyexecdir -- $(pyexecdir)/$(PACKAGE) + + AC_SUBST([pkgpyexecdir], [\${pyexecdir}/$PACKAGE]) + + dnl Run any user-specified action. + $2 + fi + +]) + + +# AM_PYTHON_CHECK_VERSION(PROG, VERSION, [ACTION-IF-TRUE], [ACTION-IF-FALSE]) +# --------------------------------------------------------------------------- +# Run ACTION-IF-TRUE if the Python interpreter PROG has version >= VERSION. +# Run ACTION-IF-FALSE otherwise. +# This test uses sys.hexversion instead of the string equivalent (first +# word of sys.version), in order to cope with versions such as 2.2c1. +# This supports Python 2.0 or higher. (2.0 was released on October 16, 2000). +AC_DEFUN([AM_PYTHON_CHECK_VERSION], + [prog="import sys +# split strings by '.' and convert to numeric. Append some zeros +# because we need at least 4 digits for the hex conversion. +# map returns an iterator in Python 3.0 and a list in 2.x +minver = list(map(int, '$2'.split('.'))) + [[0, 0, 0]] +minverhex = 0 +# xrange is not present in Python 3.0 and range returns an iterator +for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[[i]] +sys.exit(sys.hexversion < minverhex)" + AS_IF([AM_RUN_LOG([$1 -c "$prog"])], [$3], [$4])]) + +# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# AM_RUN_LOG(COMMAND) +# ------------------- +# Run COMMAND, save the exit status in ac_status, and log it. +# (This has been adapted from Autoconf's _AC_RUN_LOG macro.) +AC_DEFUN([AM_RUN_LOG], +[{ echo "$as_me:$LINENO: $1" >&AS_MESSAGE_LOG_FD + ($1) >&AS_MESSAGE_LOG_FD 2>&AS_MESSAGE_LOG_FD + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + (exit $ac_status); }]) + +# Check to make sure that the build environment is sane. -*- Autoconf -*- + +# Copyright (C) 1996, 1997, 2000, 2001, 2003, 2005, 2008 +# Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 5 + +# AM_SANITY_CHECK +# --------------- +AC_DEFUN([AM_SANITY_CHECK], +[AC_MSG_CHECKING([whether build environment is sane]) +# Just in case +sleep 1 +echo timestamp > conftest.file +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[[\\\"\#\$\&\'\`$am_lf]]*) + AC_MSG_ERROR([unsafe absolute working directory name]);; +esac +case $srcdir in + *[[\\\"\#\$\&\'\`$am_lf\ \ ]]*) + AC_MSG_ERROR([unsafe srcdir value: `$srcdir']);; +esac + +# Do `set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$[*]" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + rm -f conftest.file + if test "$[*]" != "X $srcdir/configure conftest.file" \ + && test "$[*]" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + AC_MSG_ERROR([ls -t appears to fail. Make sure there is not a broken +alias in your environment]) + fi + + test "$[2]" = conftest.file + ) +then + # Ok. + : +else + AC_MSG_ERROR([newly created file is older than distributed files! +Check your system clock]) +fi +AC_MSG_RESULT(yes)]) + +# Copyright (C) 2001, 2003, 2005, 2011 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 1 + +# AM_PROG_INSTALL_STRIP +# --------------------- +# One issue with vendor `install' (even GNU) is that you can't +# specify the program used to strip binaries. This is especially +# annoying in cross-compiling environments, where the build's strip +# is unlikely to handle the host's binaries. +# Fortunately install-sh will honor a STRIPPROG variable, so we +# always use install-sh in `make install-strip', and initialize +# STRIPPROG with the value of the STRIP variable (set by the user). +AC_DEFUN([AM_PROG_INSTALL_STRIP], +[AC_REQUIRE([AM_PROG_INSTALL_SH])dnl +# Installed binaries are usually stripped using `strip' when the user +# run `make install-strip'. However `strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the `STRIP' environment variable to overrule this program. +dnl Don't test for $cross_compiling = yes, because it might be `maybe'. +if test "$cross_compiling" != no; then + AC_CHECK_TOOL([STRIP], [strip], :) +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" +AC_SUBST([INSTALL_STRIP_PROGRAM])]) + +# Copyright (C) 2006, 2008, 2010 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 3 + +# _AM_SUBST_NOTMAKE(VARIABLE) +# --------------------------- +# Prevent Automake from outputting VARIABLE = @VARIABLE@ in Makefile.in. +# This macro is traced by Automake. +AC_DEFUN([_AM_SUBST_NOTMAKE]) + +# AM_SUBST_NOTMAKE(VARIABLE) +# -------------------------- +# Public sister of _AM_SUBST_NOTMAKE. +AC_DEFUN([AM_SUBST_NOTMAKE], [_AM_SUBST_NOTMAKE($@)]) + +# Check how to create a tarball. -*- Autoconf -*- + +# Copyright (C) 2004, 2005, 2012 Free Software Foundation, Inc. +# +# This file is free software; the Free Software Foundation +# gives unlimited permission to copy and/or distribute it, +# with or without modifications, as long as this notice is preserved. + +# serial 2 + +# _AM_PROG_TAR(FORMAT) +# -------------------- +# Check how to create a tarball in format FORMAT. +# FORMAT should be one of `v7', `ustar', or `pax'. +# +# Substitute a variable $(am__tar) that is a command +# writing to stdout a FORMAT-tarball containing the directory +# $tardir. +# tardir=directory && $(am__tar) > result.tar +# +# Substitute a variable $(am__untar) that extract such +# a tarball read from stdin. +# $(am__untar) < result.tar +AC_DEFUN([_AM_PROG_TAR], +[# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AC_SUBST([AMTAR], ['$${TAR-tar}']) +m4_if([$1], [v7], + [am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -'], + [m4_case([$1], [ustar],, [pax],, + [m4_fatal([Unknown tar format])]) +AC_MSG_CHECKING([how to create a $1 tar archive]) +# Loop over all known methods to create a tar archive until one works. +_am_tools='gnutar m4_if([$1], [ustar], [plaintar]) pax cpio none' +_am_tools=${am_cv_prog_tar_$1-$_am_tools} +# Do not fold the above two line into one, because Tru64 sh and +# Solaris sh will not grok spaces in the rhs of `-'. +for _am_tool in $_am_tools +do + case $_am_tool in + gnutar) + for _am_tar in tar gnutar gtar; + do + AM_RUN_LOG([$_am_tar --version]) && break + done + am__tar="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$$tardir"' + am__tar_="$_am_tar --format=m4_if([$1], [pax], [posix], [$1]) -chf - "'"$tardir"' + am__untar="$_am_tar -xf -" + ;; + plaintar) + # Must skip GNU tar: if it does not support --format= it doesn't create + # ustar tarball either. + (tar --version) >/dev/null 2>&1 && continue + am__tar='tar chf - "$$tardir"' + am__tar_='tar chf - "$tardir"' + am__untar='tar xf -' + ;; + pax) + am__tar='pax -L -x $1 -w "$$tardir"' + am__tar_='pax -L -x $1 -w "$tardir"' + am__untar='pax -r' + ;; + cpio) + am__tar='find "$$tardir" -print | cpio -o -H $1 -L' + am__tar_='find "$tardir" -print | cpio -o -H $1 -L' + am__untar='cpio -i -H $1 -d' + ;; + none) + am__tar=false + am__tar_=false + am__untar=false + ;; + esac + + # If the value was cached, stop now. We just wanted to have am__tar + # and am__untar set. + test -n "${am_cv_prog_tar_$1}" && break + + # tar/untar a dummy directory, and stop if the command works + rm -rf conftest.dir + mkdir conftest.dir + echo GrepMe > conftest.dir/file + AM_RUN_LOG([tardir=conftest.dir && eval $am__tar_ >conftest.tar]) + rm -rf conftest.dir + if test -s conftest.tar; then + AM_RUN_LOG([$am__untar /dev/null 2>&1 && break + fi +done +rm -rf conftest.dir + +AC_CACHE_VAL([am_cv_prog_tar_$1], [am_cv_prog_tar_$1=$_am_tool]) +AC_MSG_RESULT([$am_cv_prog_tar_$1])]) +AC_SUBST([am__tar]) +AC_SUBST([am__untar]) +]) # _AM_PROG_TAR + +m4_include([m4/libtool.m4]) +m4_include([m4/ltoptions.m4]) +m4_include([m4/ltsugar.m4]) +m4_include([m4/ltversion.m4]) +m4_include([m4/lt~obsolete.m4]) diff --git a/external/gtest/build-aux/config.guess b/external/gtest/build-aux/config.guess new file mode 100755 index 0000000000..d622a44e55 --- /dev/null +++ b/external/gtest/build-aux/config.guess @@ -0,0 +1,1530 @@ +#! /bin/sh +# Attempt to guess a canonical system name. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012 Free Software Foundation, Inc. + +timestamp='2012-02-10' + +# This file is free software; you can redistribute it and/or modify it +# under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + + +# Originally written by Per Bothner. Please send patches (context +# diff format) to and include a ChangeLog +# entry. +# +# This script attempts to guess a canonical system name similar to +# config.sub. If it succeeds, it prints the system name on stdout, and +# exits with 0. Otherwise, it exits with 1. +# +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.guess;hb=HEAD + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] + +Output the configuration name of the system \`$me' is run on. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.guess ($timestamp) + +Originally written by Per Bothner. +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, +2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" >&2 + exit 1 ;; + * ) + break ;; + esac +done + +if test $# != 0; then + echo "$me: too many arguments$help" >&2 + exit 1 +fi + +trap 'exit 1' 1 2 15 + +# CC_FOR_BUILD -- compiler used by this script. Note that the use of a +# compiler to aid in system detection is discouraged as it requires +# temporary files to be created and, as you can see below, it is a +# headache to deal with in a portable fashion. + +# Historically, `CC_FOR_BUILD' used to be named `HOST_CC'. We still +# use `HOST_CC' if defined, but it is deprecated. + +# Portable tmp directory creation inspired by the Autoconf team. + +set_cc_for_build=' +trap "exitcode=\$?; (rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null) && exit \$exitcode" 0 ; +trap "rm -f \$tmpfiles 2>/dev/null; rmdir \$tmp 2>/dev/null; exit 1" 1 2 13 15 ; +: ${TMPDIR=/tmp} ; + { tmp=`(umask 077 && mktemp -d "$TMPDIR/cgXXXXXX") 2>/dev/null` && test -n "$tmp" && test -d "$tmp" ; } || + { test -n "$RANDOM" && tmp=$TMPDIR/cg$$-$RANDOM && (umask 077 && mkdir $tmp) ; } || + { tmp=$TMPDIR/cg-$$ && (umask 077 && mkdir $tmp) && echo "Warning: creating insecure temp directory" >&2 ; } || + { echo "$me: cannot create a temporary directory in $TMPDIR" >&2 ; exit 1 ; } ; +dummy=$tmp/dummy ; +tmpfiles="$dummy.c $dummy.o $dummy.rel $dummy" ; +case $CC_FOR_BUILD,$HOST_CC,$CC in + ,,) echo "int x;" > $dummy.c ; + for c in cc gcc c89 c99 ; do + if ($c -c -o $dummy.o $dummy.c) >/dev/null 2>&1 ; then + CC_FOR_BUILD="$c"; break ; + fi ; + done ; + if test x"$CC_FOR_BUILD" = x ; then + CC_FOR_BUILD=no_compiler_found ; + fi + ;; + ,,*) CC_FOR_BUILD=$CC ;; + ,*,*) CC_FOR_BUILD=$HOST_CC ;; +esac ; set_cc_for_build= ;' + +# This is needed to find uname on a Pyramid OSx when run in the BSD universe. +# (ghazi@noc.rutgers.edu 1994-08-24) +if (test -f /.attbin/uname) >/dev/null 2>&1 ; then + PATH=$PATH:/.attbin ; export PATH +fi + +UNAME_MACHINE=`(uname -m) 2>/dev/null` || UNAME_MACHINE=unknown +UNAME_RELEASE=`(uname -r) 2>/dev/null` || UNAME_RELEASE=unknown +UNAME_SYSTEM=`(uname -s) 2>/dev/null` || UNAME_SYSTEM=unknown +UNAME_VERSION=`(uname -v) 2>/dev/null` || UNAME_VERSION=unknown + +# Note: order is significant - the case branches are not exclusive. + +case "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" in + *:NetBSD:*:*) + # NetBSD (nbsd) targets should (where applicable) match one or + # more of the tuples: *-*-netbsdelf*, *-*-netbsdaout*, + # *-*-netbsdecoff* and *-*-netbsd*. For targets that recently + # switched to ELF, *-*-netbsd* would select the old + # object file format. This provides both forward + # compatibility and a consistent mechanism for selecting the + # object file format. + # + # Note: NetBSD doesn't particularly care about the vendor + # portion of the name. We always set it to "unknown". + sysctl="sysctl -n hw.machine_arch" + UNAME_MACHINE_ARCH=`(/sbin/$sysctl 2>/dev/null || \ + /usr/sbin/$sysctl 2>/dev/null || echo unknown)` + case "${UNAME_MACHINE_ARCH}" in + armeb) machine=armeb-unknown ;; + arm*) machine=arm-unknown ;; + sh3el) machine=shl-unknown ;; + sh3eb) machine=sh-unknown ;; + sh5el) machine=sh5le-unknown ;; + *) machine=${UNAME_MACHINE_ARCH}-unknown ;; + esac + # The Operating System including object format, if it has switched + # to ELF recently, or will in the future. + case "${UNAME_MACHINE_ARCH}" in + arm*|i386|m68k|ns32k|sh3*|sparc|vax) + eval $set_cc_for_build + if echo __ELF__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ELF__ + then + # Once all utilities can be ECOFF (netbsdecoff) or a.out (netbsdaout). + # Return netbsd for either. FIX? + os=netbsd + else + os=netbsdelf + fi + ;; + *) + os=netbsd + ;; + esac + # The OS release + # Debian GNU/NetBSD machines have a different userland, and + # thus, need a distinct triplet. However, they do not need + # kernel version information, so it can be replaced with a + # suitable tag, in the style of linux-gnu. + case "${UNAME_VERSION}" in + Debian*) + release='-gnu' + ;; + *) + release=`echo ${UNAME_RELEASE}|sed -e 's/[-_].*/\./'` + ;; + esac + # Since CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM: + # contains redundant information, the shorter form: + # CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM is used. + echo "${machine}-${os}${release}" + exit ;; + *:OpenBSD:*:*) + UNAME_MACHINE_ARCH=`arch | sed 's/OpenBSD.//'` + echo ${UNAME_MACHINE_ARCH}-unknown-openbsd${UNAME_RELEASE} + exit ;; + *:ekkoBSD:*:*) + echo ${UNAME_MACHINE}-unknown-ekkobsd${UNAME_RELEASE} + exit ;; + *:SolidBSD:*:*) + echo ${UNAME_MACHINE}-unknown-solidbsd${UNAME_RELEASE} + exit ;; + macppc:MirBSD:*:*) + echo powerpc-unknown-mirbsd${UNAME_RELEASE} + exit ;; + *:MirBSD:*:*) + echo ${UNAME_MACHINE}-unknown-mirbsd${UNAME_RELEASE} + exit ;; + alpha:OSF1:*:*) + case $UNAME_RELEASE in + *4.0) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $3}'` + ;; + *5.*) + UNAME_RELEASE=`/usr/sbin/sizer -v | awk '{print $4}'` + ;; + esac + # According to Compaq, /usr/sbin/psrinfo has been available on + # OSF/1 and Tru64 systems produced since 1995. I hope that + # covers most systems running today. This code pipes the CPU + # types through head -n 1, so we only detect the type of CPU 0. + ALPHA_CPU_TYPE=`/usr/sbin/psrinfo -v | sed -n -e 's/^ The alpha \(.*\) processor.*$/\1/p' | head -n 1` + case "$ALPHA_CPU_TYPE" in + "EV4 (21064)") + UNAME_MACHINE="alpha" ;; + "EV4.5 (21064)") + UNAME_MACHINE="alpha" ;; + "LCA4 (21066/21068)") + UNAME_MACHINE="alpha" ;; + "EV5 (21164)") + UNAME_MACHINE="alphaev5" ;; + "EV5.6 (21164A)") + UNAME_MACHINE="alphaev56" ;; + "EV5.6 (21164PC)") + UNAME_MACHINE="alphapca56" ;; + "EV5.7 (21164PC)") + UNAME_MACHINE="alphapca57" ;; + "EV6 (21264)") + UNAME_MACHINE="alphaev6" ;; + "EV6.7 (21264A)") + UNAME_MACHINE="alphaev67" ;; + "EV6.8CB (21264C)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8AL (21264B)") + UNAME_MACHINE="alphaev68" ;; + "EV6.8CX (21264D)") + UNAME_MACHINE="alphaev68" ;; + "EV6.9A (21264/EV69A)") + UNAME_MACHINE="alphaev69" ;; + "EV7 (21364)") + UNAME_MACHINE="alphaev7" ;; + "EV7.9 (21364A)") + UNAME_MACHINE="alphaev79" ;; + esac + # A Pn.n version is a patched version. + # A Vn.n version is a released version. + # A Tn.n version is a released field test version. + # A Xn.n version is an unreleased experimental baselevel. + # 1.2 uses "1.2" for uname -r. + echo ${UNAME_MACHINE}-dec-osf`echo ${UNAME_RELEASE} | sed -e 's/^[PVTX]//' | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + # Reset EXIT trap before exiting to avoid spurious non-zero exit code. + exitcode=$? + trap '' 0 + exit $exitcode ;; + Alpha\ *:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # Should we change UNAME_MACHINE based on the output of uname instead + # of the specific Alpha model? + echo alpha-pc-interix + exit ;; + 21064:Windows_NT:50:3) + echo alpha-dec-winnt3.5 + exit ;; + Amiga*:UNIX_System_V:4.0:*) + echo m68k-unknown-sysv4 + exit ;; + *:[Aa]miga[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-amigaos + exit ;; + *:[Mm]orph[Oo][Ss]:*:*) + echo ${UNAME_MACHINE}-unknown-morphos + exit ;; + *:OS/390:*:*) + echo i370-ibm-openedition + exit ;; + *:z/VM:*:*) + echo s390-ibm-zvmoe + exit ;; + *:OS400:*:*) + echo powerpc-ibm-os400 + exit ;; + arm:RISC*:1.[012]*:*|arm:riscix:1.[012]*:*) + echo arm-acorn-riscix${UNAME_RELEASE} + exit ;; + arm:riscos:*:*|arm:RISCOS:*:*) + echo arm-unknown-riscos + exit ;; + SR2?01:HI-UX/MPP:*:* | SR8000:HI-UX/MPP:*:*) + echo hppa1.1-hitachi-hiuxmpp + exit ;; + Pyramid*:OSx*:*:* | MIS*:OSx*:*:* | MIS*:SMP_DC-OSx*:*:*) + # akee@wpdis03.wpafb.af.mil (Earle F. Ake) contributed MIS and NILE. + if test "`(/bin/universe) 2>/dev/null`" = att ; then + echo pyramid-pyramid-sysv3 + else + echo pyramid-pyramid-bsd + fi + exit ;; + NILE*:*:*:dcosx) + echo pyramid-pyramid-svr4 + exit ;; + DRS?6000:unix:4.0:6*) + echo sparc-icl-nx6 + exit ;; + DRS?6000:UNIX_SV:4.2*:7* | DRS?6000:isis:4.2*:7*) + case `/usr/bin/uname -p` in + sparc) echo sparc-icl-nx7; exit ;; + esac ;; + s390x:SunOS:*:*) + echo ${UNAME_MACHINE}-ibm-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4H:SunOS:5.*:*) + echo sparc-hal-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:5.*:* | tadpole*:SunOS:5.*:*) + echo sparc-sun-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + i86pc:AuroraUX:5.*:* | i86xen:AuroraUX:5.*:*) + echo i386-pc-auroraux${UNAME_RELEASE} + exit ;; + i86pc:SunOS:5.*:* | i86xen:SunOS:5.*:*) + eval $set_cc_for_build + SUN_ARCH="i386" + # If there is a compiler, see if it is configured for 64-bit objects. + # Note that the Sun cc does not turn __LP64__ into 1 like gcc does. + # This test works for both compilers. + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __amd64'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + SUN_ARCH="x86_64" + fi + fi + echo ${SUN_ARCH}-pc-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:6*:*) + # According to config.sub, this is the proper way to canonicalize + # SunOS6. Hard to guess exactly what SunOS6 will be like, but + # it's likely to be more like Solaris than SunOS4. + echo sparc-sun-solaris3`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + sun4*:SunOS:*:*) + case "`/usr/bin/arch -k`" in + Series*|S4*) + UNAME_RELEASE=`uname -v` + ;; + esac + # Japanese Language versions have a version number like `4.1.3-JL'. + echo sparc-sun-sunos`echo ${UNAME_RELEASE}|sed -e 's/-/_/'` + exit ;; + sun3*:SunOS:*:*) + echo m68k-sun-sunos${UNAME_RELEASE} + exit ;; + sun*:*:4.2BSD:*) + UNAME_RELEASE=`(sed 1q /etc/motd | awk '{print substr($5,1,3)}') 2>/dev/null` + test "x${UNAME_RELEASE}" = "x" && UNAME_RELEASE=3 + case "`/bin/arch`" in + sun3) + echo m68k-sun-sunos${UNAME_RELEASE} + ;; + sun4) + echo sparc-sun-sunos${UNAME_RELEASE} + ;; + esac + exit ;; + aushp:SunOS:*:*) + echo sparc-auspex-sunos${UNAME_RELEASE} + exit ;; + # The situation for MiNT is a little confusing. The machine name + # can be virtually everything (everything which is not + # "atarist" or "atariste" at least should have a processor + # > m68000). The system name ranges from "MiNT" over "FreeMiNT" + # to the lowercase version "mint" (or "freemint"). Finally + # the system name "TOS" denotes a system which is actually not + # MiNT. But MiNT is downward compatible to TOS, so this should + # be no problem. + atarist[e]:*MiNT:*:* | atarist[e]:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + atari*:*MiNT:*:* | atari*:*mint:*:* | atarist[e]:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + *falcon*:*MiNT:*:* | *falcon*:*mint:*:* | *falcon*:*TOS:*:*) + echo m68k-atari-mint${UNAME_RELEASE} + exit ;; + milan*:*MiNT:*:* | milan*:*mint:*:* | *milan*:*TOS:*:*) + echo m68k-milan-mint${UNAME_RELEASE} + exit ;; + hades*:*MiNT:*:* | hades*:*mint:*:* | *hades*:*TOS:*:*) + echo m68k-hades-mint${UNAME_RELEASE} + exit ;; + *:*MiNT:*:* | *:*mint:*:* | *:*TOS:*:*) + echo m68k-unknown-mint${UNAME_RELEASE} + exit ;; + m68k:machten:*:*) + echo m68k-apple-machten${UNAME_RELEASE} + exit ;; + powerpc:machten:*:*) + echo powerpc-apple-machten${UNAME_RELEASE} + exit ;; + RISC*:Mach:*:*) + echo mips-dec-mach_bsd4.3 + exit ;; + RISC*:ULTRIX:*:*) + echo mips-dec-ultrix${UNAME_RELEASE} + exit ;; + VAX*:ULTRIX*:*:*) + echo vax-dec-ultrix${UNAME_RELEASE} + exit ;; + 2020:CLIX:*:* | 2430:CLIX:*:*) + echo clipper-intergraph-clix${UNAME_RELEASE} + exit ;; + mips:*:*:UMIPS | mips:*:*:RISCos) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c +#ifdef __cplusplus +#include /* for printf() prototype */ + int main (int argc, char *argv[]) { +#else + int main (argc, argv) int argc; char *argv[]; { +#endif + #if defined (host_mips) && defined (MIPSEB) + #if defined (SYSTYPE_SYSV) + printf ("mips-mips-riscos%ssysv\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_SVR4) + printf ("mips-mips-riscos%ssvr4\n", argv[1]); exit (0); + #endif + #if defined (SYSTYPE_BSD43) || defined(SYSTYPE_BSD) + printf ("mips-mips-riscos%sbsd\n", argv[1]); exit (0); + #endif + #endif + exit (-1); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && + dummyarg=`echo "${UNAME_RELEASE}" | sed -n 's/\([0-9]*\).*/\1/p'` && + SYSTEM_NAME=`$dummy $dummyarg` && + { echo "$SYSTEM_NAME"; exit; } + echo mips-mips-riscos${UNAME_RELEASE} + exit ;; + Motorola:PowerMAX_OS:*:*) + echo powerpc-motorola-powermax + exit ;; + Motorola:*:4.3:PL8-*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:*:*:PowerMAX_OS | Synergy:PowerMAX_OS:*:*) + echo powerpc-harris-powermax + exit ;; + Night_Hawk:Power_UNIX:*:*) + echo powerpc-harris-powerunix + exit ;; + m88k:CX/UX:7*:*) + echo m88k-harris-cxux7 + exit ;; + m88k:*:4*:R4*) + echo m88k-motorola-sysv4 + exit ;; + m88k:*:3*:R3*) + echo m88k-motorola-sysv3 + exit ;; + AViiON:dgux:*:*) + # DG/UX returns AViiON for all architectures + UNAME_PROCESSOR=`/usr/bin/uname -p` + if [ $UNAME_PROCESSOR = mc88100 ] || [ $UNAME_PROCESSOR = mc88110 ] + then + if [ ${TARGET_BINARY_INTERFACE}x = m88kdguxelfx ] || \ + [ ${TARGET_BINARY_INTERFACE}x = x ] + then + echo m88k-dg-dgux${UNAME_RELEASE} + else + echo m88k-dg-dguxbcs${UNAME_RELEASE} + fi + else + echo i586-dg-dgux${UNAME_RELEASE} + fi + exit ;; + M88*:DolphinOS:*:*) # DolphinOS (SVR3) + echo m88k-dolphin-sysv3 + exit ;; + M88*:*:R3*:*) + # Delta 88k system running SVR3 + echo m88k-motorola-sysv3 + exit ;; + XD88*:*:*:*) # Tektronix XD88 system running UTekV (SVR3) + echo m88k-tektronix-sysv3 + exit ;; + Tek43[0-9][0-9]:UTek:*:*) # Tektronix 4300 system running UTek (BSD) + echo m68k-tektronix-bsd + exit ;; + *:IRIX*:*:*) + echo mips-sgi-irix`echo ${UNAME_RELEASE}|sed -e 's/-/_/g'` + exit ;; + ????????:AIX?:[12].1:2) # AIX 2.2.1 or AIX 2.1.1 is RT/PC AIX. + echo romp-ibm-aix # uname -m gives an 8 hex-code CPU id + exit ;; # Note that: echo "'`uname -s`'" gives 'AIX ' + i*86:AIX:*:*) + echo i386-ibm-aix + exit ;; + ia64:AIX:*:*) + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${UNAME_MACHINE}-ibm-aix${IBM_REV} + exit ;; + *:AIX:2:3) + if grep bos325 /usr/include/stdio.h >/dev/null 2>&1; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + + main() + { + if (!__power_pc()) + exit(1); + puts("powerpc-ibm-aix3.2.5"); + exit(0); + } +EOF + if $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` + then + echo "$SYSTEM_NAME" + else + echo rs6000-ibm-aix3.2.5 + fi + elif grep bos324 /usr/include/stdio.h >/dev/null 2>&1; then + echo rs6000-ibm-aix3.2.4 + else + echo rs6000-ibm-aix3.2 + fi + exit ;; + *:AIX:*:[4567]) + IBM_CPU_ID=`/usr/sbin/lsdev -C -c processor -S available | sed 1q | awk '{ print $1 }'` + if /usr/sbin/lsattr -El ${IBM_CPU_ID} | grep ' POWER' >/dev/null 2>&1; then + IBM_ARCH=rs6000 + else + IBM_ARCH=powerpc + fi + if [ -x /usr/bin/oslevel ] ; then + IBM_REV=`/usr/bin/oslevel` + else + IBM_REV=${UNAME_VERSION}.${UNAME_RELEASE} + fi + echo ${IBM_ARCH}-ibm-aix${IBM_REV} + exit ;; + *:AIX:*:*) + echo rs6000-ibm-aix + exit ;; + ibmrt:4.4BSD:*|romp-ibm:BSD:*) + echo romp-ibm-bsd4.4 + exit ;; + ibmrt:*BSD:*|romp-ibm:BSD:*) # covers RT/PC BSD and + echo romp-ibm-bsd${UNAME_RELEASE} # 4.3 with uname added to + exit ;; # report: romp-ibm BSD 4.3 + *:BOSX:*:*) + echo rs6000-bull-bosx + exit ;; + DPX/2?00:B.O.S.:*:*) + echo m68k-bull-sysv3 + exit ;; + 9000/[34]??:4.3bsd:1.*:*) + echo m68k-hp-bsd + exit ;; + hp300:4.4BSD:*:* | 9000/[34]??:4.3bsd:2.*:*) + echo m68k-hp-bsd4.4 + exit ;; + 9000/[34678]??:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + case "${UNAME_MACHINE}" in + 9000/31? ) HP_ARCH=m68000 ;; + 9000/[34]?? ) HP_ARCH=m68k ;; + 9000/[678][0-9][0-9]) + if [ -x /usr/bin/getconf ]; then + sc_cpu_version=`/usr/bin/getconf SC_CPU_VERSION 2>/dev/null` + sc_kernel_bits=`/usr/bin/getconf SC_KERNEL_BITS 2>/dev/null` + case "${sc_cpu_version}" in + 523) HP_ARCH="hppa1.0" ;; # CPU_PA_RISC1_0 + 528) HP_ARCH="hppa1.1" ;; # CPU_PA_RISC1_1 + 532) # CPU_PA_RISC2_0 + case "${sc_kernel_bits}" in + 32) HP_ARCH="hppa2.0n" ;; + 64) HP_ARCH="hppa2.0w" ;; + '') HP_ARCH="hppa2.0" ;; # HP-UX 10.20 + esac ;; + esac + fi + if [ "${HP_ARCH}" = "" ]; then + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + + #define _HPUX_SOURCE + #include + #include + + int main () + { + #if defined(_SC_KERNEL_BITS) + long bits = sysconf(_SC_KERNEL_BITS); + #endif + long cpu = sysconf (_SC_CPU_VERSION); + + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1"); break; + case CPU_PA_RISC2_0: + #if defined(_SC_KERNEL_BITS) + switch (bits) + { + case 64: puts ("hppa2.0w"); break; + case 32: puts ("hppa2.0n"); break; + default: puts ("hppa2.0"); break; + } break; + #else /* !defined(_SC_KERNEL_BITS) */ + puts ("hppa2.0"); break; + #endif + default: puts ("hppa1.0"); break; + } + exit (0); + } +EOF + (CCOPTS= $CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null) && HP_ARCH=`$dummy` + test -z "$HP_ARCH" && HP_ARCH=hppa + fi ;; + esac + if [ ${HP_ARCH} = "hppa2.0w" ] + then + eval $set_cc_for_build + + # hppa2.0w-hp-hpux* has a 64-bit kernel and a compiler generating + # 32-bit code. hppa64-hp-hpux* has the same kernel and a compiler + # generating 64-bit code. GNU and HP use different nomenclature: + # + # $ CC_FOR_BUILD=cc ./config.guess + # => hppa2.0w-hp-hpux11.23 + # $ CC_FOR_BUILD="cc +DA2.0w" ./config.guess + # => hppa64-hp-hpux11.23 + + if echo __LP64__ | (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | + grep -q __LP64__ + then + HP_ARCH="hppa2.0w" + else + HP_ARCH="hppa64" + fi + fi + echo ${HP_ARCH}-hp-hpux${HPUX_REV} + exit ;; + ia64:HP-UX:*:*) + HPUX_REV=`echo ${UNAME_RELEASE}|sed -e 's/[^.]*.[0B]*//'` + echo ia64-hp-hpux${HPUX_REV} + exit ;; + 3050*:HI-UX:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #include + int + main () + { + long cpu = sysconf (_SC_CPU_VERSION); + /* The order matters, because CPU_IS_HP_MC68K erroneously returns + true for CPU_PA_RISC1_0. CPU_IS_PA_RISC returns correct + results, however. */ + if (CPU_IS_PA_RISC (cpu)) + { + switch (cpu) + { + case CPU_PA_RISC1_0: puts ("hppa1.0-hitachi-hiuxwe2"); break; + case CPU_PA_RISC1_1: puts ("hppa1.1-hitachi-hiuxwe2"); break; + case CPU_PA_RISC2_0: puts ("hppa2.0-hitachi-hiuxwe2"); break; + default: puts ("hppa-hitachi-hiuxwe2"); break; + } + } + else if (CPU_IS_HP_MC68K (cpu)) + puts ("m68k-hitachi-hiuxwe2"); + else puts ("unknown-hitachi-hiuxwe2"); + exit (0); + } +EOF + $CC_FOR_BUILD -o $dummy $dummy.c && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + echo unknown-hitachi-hiuxwe2 + exit ;; + 9000/7??:4.3bsd:*:* | 9000/8?[79]:4.3bsd:*:* ) + echo hppa1.1-hp-bsd + exit ;; + 9000/8??:4.3bsd:*:*) + echo hppa1.0-hp-bsd + exit ;; + *9??*:MPE/iX:*:* | *3000*:MPE/iX:*:*) + echo hppa1.0-hp-mpeix + exit ;; + hp7??:OSF1:*:* | hp8?[79]:OSF1:*:* ) + echo hppa1.1-hp-osf + exit ;; + hp8??:OSF1:*:*) + echo hppa1.0-hp-osf + exit ;; + i*86:OSF1:*:*) + if [ -x /usr/sbin/sysversion ] ; then + echo ${UNAME_MACHINE}-unknown-osf1mk + else + echo ${UNAME_MACHINE}-unknown-osf1 + fi + exit ;; + parisc*:Lites*:*:*) + echo hppa1.1-hp-lites + exit ;; + C1*:ConvexOS:*:* | convex:ConvexOS:C1*:*) + echo c1-convex-bsd + exit ;; + C2*:ConvexOS:*:* | convex:ConvexOS:C2*:*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + C34*:ConvexOS:*:* | convex:ConvexOS:C34*:*) + echo c34-convex-bsd + exit ;; + C38*:ConvexOS:*:* | convex:ConvexOS:C38*:*) + echo c38-convex-bsd + exit ;; + C4*:ConvexOS:*:* | convex:ConvexOS:C4*:*) + echo c4-convex-bsd + exit ;; + CRAY*Y-MP:*:*:*) + echo ymp-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*[A-Z]90:*:*:*) + echo ${UNAME_MACHINE}-cray-unicos${UNAME_RELEASE} \ + | sed -e 's/CRAY.*\([A-Z]90\)/\1/' \ + -e y/ABCDEFGHIJKLMNOPQRSTUVWXYZ/abcdefghijklmnopqrstuvwxyz/ \ + -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*TS:*:*:*) + echo t90-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*T3E:*:*:*) + echo alphaev5-cray-unicosmk${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + CRAY*SV1:*:*:*) + echo sv1-cray-unicos${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + *:UNICOS/mp:*:*) + echo craynv-cray-unicosmp${UNAME_RELEASE} | sed -e 's/\.[^.]*$/.X/' + exit ;; + F30[01]:UNIX_System_V:*:* | F700:UNIX_System_V:*:*) + FUJITSU_PROC=`uname -m | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz'` + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | sed -e 's/ /_/'` + echo "${FUJITSU_PROC}-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + 5000:UNIX_System_V:4.*:*) + FUJITSU_SYS=`uname -p | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/\///'` + FUJITSU_REL=`echo ${UNAME_RELEASE} | tr 'ABCDEFGHIJKLMNOPQRSTUVWXYZ' 'abcdefghijklmnopqrstuvwxyz' | sed -e 's/ /_/'` + echo "sparc-fujitsu-${FUJITSU_SYS}${FUJITSU_REL}" + exit ;; + i*86:BSD/386:*:* | i*86:BSD/OS:*:* | *:Ascend\ Embedded/OS:*:*) + echo ${UNAME_MACHINE}-pc-bsdi${UNAME_RELEASE} + exit ;; + sparc*:BSD/OS:*:*) + echo sparc-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:BSD/OS:*:*) + echo ${UNAME_MACHINE}-unknown-bsdi${UNAME_RELEASE} + exit ;; + *:FreeBSD:*:*) + UNAME_PROCESSOR=`/usr/bin/uname -p` + case ${UNAME_PROCESSOR} in + amd64) + echo x86_64-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + *) + echo ${UNAME_PROCESSOR}-unknown-freebsd`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` ;; + esac + exit ;; + i*:CYGWIN*:*) + echo ${UNAME_MACHINE}-pc-cygwin + exit ;; + *:MINGW*:*) + echo ${UNAME_MACHINE}-pc-mingw32 + exit ;; + i*:MSYS*:*) + echo ${UNAME_MACHINE}-pc-msys + exit ;; + i*:windows32*:*) + # uname -m includes "-pc" on this system. + echo ${UNAME_MACHINE}-mingw32 + exit ;; + i*:PW*:*) + echo ${UNAME_MACHINE}-pc-pw32 + exit ;; + *:Interix*:*) + case ${UNAME_MACHINE} in + x86) + echo i586-pc-interix${UNAME_RELEASE} + exit ;; + authenticamd | genuineintel | EM64T) + echo x86_64-unknown-interix${UNAME_RELEASE} + exit ;; + IA64) + echo ia64-unknown-interix${UNAME_RELEASE} + exit ;; + esac ;; + [345]86:Windows_95:* | [345]86:Windows_98:* | [345]86:Windows_NT:*) + echo i${UNAME_MACHINE}-pc-mks + exit ;; + 8664:Windows_NT:*) + echo x86_64-pc-mks + exit ;; + i*:Windows_NT*:* | Pentium*:Windows_NT*:*) + # How do we know it's Interix rather than the generic POSIX subsystem? + # It also conflicts with pre-2.0 versions of AT&T UWIN. Should we + # UNAME_MACHINE based on the output of uname instead of i386? + echo i586-pc-interix + exit ;; + i*:UWIN*:*) + echo ${UNAME_MACHINE}-pc-uwin + exit ;; + amd64:CYGWIN*:*:* | x86_64:CYGWIN*:*:*) + echo x86_64-unknown-cygwin + exit ;; + p*:CYGWIN*:*) + echo powerpcle-unknown-cygwin + exit ;; + prep*:SunOS:5.*:*) + echo powerpcle-unknown-solaris2`echo ${UNAME_RELEASE}|sed -e 's/[^.]*//'` + exit ;; + *:GNU:*:*) + # the GNU system + echo `echo ${UNAME_MACHINE}|sed -e 's,[-/].*$,,'`-unknown-gnu`echo ${UNAME_RELEASE}|sed -e 's,/.*$,,'` + exit ;; + *:GNU/*:*:*) + # other systems with GNU libc and userland + echo ${UNAME_MACHINE}-unknown-`echo ${UNAME_SYSTEM} | sed 's,^[^/]*/,,' | tr '[A-Z]' '[a-z]'``echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'`-gnu + exit ;; + i*86:Minix:*:*) + echo ${UNAME_MACHINE}-pc-minix + exit ;; + aarch64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + aarch64_be:Linux:*:*) + UNAME_MACHINE=aarch64_be + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + alpha:Linux:*:*) + case `sed -n '/^cpu model/s/^.*: \(.*\)/\1/p' < /proc/cpuinfo` in + EV5) UNAME_MACHINE=alphaev5 ;; + EV56) UNAME_MACHINE=alphaev56 ;; + PCA56) UNAME_MACHINE=alphapca56 ;; + PCA57) UNAME_MACHINE=alphapca56 ;; + EV6) UNAME_MACHINE=alphaev6 ;; + EV67) UNAME_MACHINE=alphaev67 ;; + EV68*) UNAME_MACHINE=alphaev68 ;; + esac + objdump --private-headers /bin/sh | grep -q ld.so.1 + if test "$?" = 0 ; then LIBC="libc1" ; else LIBC="" ; fi + echo ${UNAME_MACHINE}-unknown-linux-gnu${LIBC} + exit ;; + arm*:Linux:*:*) + eval $set_cc_for_build + if echo __ARM_EABI__ | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_EABI__ + then + echo ${UNAME_MACHINE}-unknown-linux-gnu + else + if echo __ARM_PCS_VFP | $CC_FOR_BUILD -E - 2>/dev/null \ + | grep -q __ARM_PCS_VFP + then + echo ${UNAME_MACHINE}-unknown-linux-gnueabi + else + echo ${UNAME_MACHINE}-unknown-linux-gnueabihf + fi + fi + exit ;; + avr32*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + cris:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-gnu + exit ;; + crisv32:Linux:*:*) + echo ${UNAME_MACHINE}-axis-linux-gnu + exit ;; + frv:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + hexagon:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + i*86:Linux:*:*) + LIBC=gnu + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #ifdef __dietlibc__ + LIBC=dietlibc + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^LIBC'` + echo "${UNAME_MACHINE}-pc-linux-${LIBC}" + exit ;; + ia64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + m32r*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + m68*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + mips:Linux:*:* | mips64:Linux:*:*) + eval $set_cc_for_build + sed 's/^ //' << EOF >$dummy.c + #undef CPU + #undef ${UNAME_MACHINE} + #undef ${UNAME_MACHINE}el + #if defined(__MIPSEL__) || defined(__MIPSEL) || defined(_MIPSEL) || defined(MIPSEL) + CPU=${UNAME_MACHINE}el + #else + #if defined(__MIPSEB__) || defined(__MIPSEB) || defined(_MIPSEB) || defined(MIPSEB) + CPU=${UNAME_MACHINE} + #else + CPU= + #endif + #endif +EOF + eval `$CC_FOR_BUILD -E $dummy.c 2>/dev/null | grep '^CPU'` + test x"${CPU}" != x && { echo "${CPU}-unknown-linux-gnu"; exit; } + ;; + or32:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + padre:Linux:*:*) + echo sparc-unknown-linux-gnu + exit ;; + parisc64:Linux:*:* | hppa64:Linux:*:*) + echo hppa64-unknown-linux-gnu + exit ;; + parisc:Linux:*:* | hppa:Linux:*:*) + # Look for CPU level + case `grep '^cpu[^a-z]*:' /proc/cpuinfo 2>/dev/null | cut -d' ' -f2` in + PA7*) echo hppa1.1-unknown-linux-gnu ;; + PA8*) echo hppa2.0-unknown-linux-gnu ;; + *) echo hppa-unknown-linux-gnu ;; + esac + exit ;; + ppc64:Linux:*:*) + echo powerpc64-unknown-linux-gnu + exit ;; + ppc:Linux:*:*) + echo powerpc-unknown-linux-gnu + exit ;; + s390:Linux:*:* | s390x:Linux:*:*) + echo ${UNAME_MACHINE}-ibm-linux + exit ;; + sh64*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + sh*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + sparc:Linux:*:* | sparc64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + tile*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + vax:Linux:*:*) + echo ${UNAME_MACHINE}-dec-linux-gnu + exit ;; + x86_64:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + xtensa*:Linux:*:*) + echo ${UNAME_MACHINE}-unknown-linux-gnu + exit ;; + i*86:DYNIX/ptx:4*:*) + # ptx 4.0 does uname -s correctly, with DYNIX/ptx in there. + # earlier versions are messed up and put the nodename in both + # sysname and nodename. + echo i386-sequent-sysv4 + exit ;; + i*86:UNIX_SV:4.2MP:2.*) + # Unixware is an offshoot of SVR4, but it has its own version + # number series starting with 2... + # I am not positive that other SVR4 systems won't match this, + # I just have to hope. -- rms. + # Use sysv4.2uw... so that sysv4* matches it. + echo ${UNAME_MACHINE}-pc-sysv4.2uw${UNAME_VERSION} + exit ;; + i*86:OS/2:*:*) + # If we were able to find `uname', then EMX Unix compatibility + # is probably installed. + echo ${UNAME_MACHINE}-pc-os2-emx + exit ;; + i*86:XTS-300:*:STOP) + echo ${UNAME_MACHINE}-unknown-stop + exit ;; + i*86:atheos:*:*) + echo ${UNAME_MACHINE}-unknown-atheos + exit ;; + i*86:syllable:*:*) + echo ${UNAME_MACHINE}-pc-syllable + exit ;; + i*86:LynxOS:2.*:* | i*86:LynxOS:3.[01]*:* | i*86:LynxOS:4.[02]*:*) + echo i386-unknown-lynxos${UNAME_RELEASE} + exit ;; + i*86:*DOS:*:*) + echo ${UNAME_MACHINE}-pc-msdosdjgpp + exit ;; + i*86:*:4.*:* | i*86:SYSTEM_V:4.*:*) + UNAME_REL=`echo ${UNAME_RELEASE} | sed 's/\/MP$//'` + if grep Novell /usr/include/link.h >/dev/null 2>/dev/null; then + echo ${UNAME_MACHINE}-univel-sysv${UNAME_REL} + else + echo ${UNAME_MACHINE}-pc-sysv${UNAME_REL} + fi + exit ;; + i*86:*:5:[678]*) + # UnixWare 7.x, OpenUNIX and OpenServer 6. + case `/bin/uname -X | grep "^Machine"` in + *486*) UNAME_MACHINE=i486 ;; + *Pentium) UNAME_MACHINE=i586 ;; + *Pent*|*Celeron) UNAME_MACHINE=i686 ;; + esac + echo ${UNAME_MACHINE}-unknown-sysv${UNAME_RELEASE}${UNAME_SYSTEM}${UNAME_VERSION} + exit ;; + i*86:*:3.2:*) + if test -f /usr/options/cb.name; then + UNAME_REL=`sed -n 's/.*Version //p' /dev/null >/dev/null ; then + UNAME_REL=`(/bin/uname -X|grep Release|sed -e 's/.*= //')` + (/bin/uname -X|grep i80486 >/dev/null) && UNAME_MACHINE=i486 + (/bin/uname -X|grep '^Machine.*Pentium' >/dev/null) \ + && UNAME_MACHINE=i586 + (/bin/uname -X|grep '^Machine.*Pent *II' >/dev/null) \ + && UNAME_MACHINE=i686 + (/bin/uname -X|grep '^Machine.*Pentium Pro' >/dev/null) \ + && UNAME_MACHINE=i686 + echo ${UNAME_MACHINE}-pc-sco$UNAME_REL + else + echo ${UNAME_MACHINE}-pc-sysv32 + fi + exit ;; + pc:*:*:*) + # Left here for compatibility: + # uname -m prints for DJGPP always 'pc', but it prints nothing about + # the processor, so we play safe by assuming i586. + # Note: whatever this is, it MUST be the same as what config.sub + # prints for the "djgpp" host, or else GDB configury will decide that + # this is a cross-build. + echo i586-pc-msdosdjgpp + exit ;; + Intel:Mach:3*:*) + echo i386-pc-mach3 + exit ;; + paragon:*:*:*) + echo i860-intel-osf1 + exit ;; + i860:*:4.*:*) # i860-SVR4 + if grep Stardent /usr/include/sys/uadmin.h >/dev/null 2>&1 ; then + echo i860-stardent-sysv${UNAME_RELEASE} # Stardent Vistra i860-SVR4 + else # Add other i860-SVR4 vendors below as they are discovered. + echo i860-unknown-sysv${UNAME_RELEASE} # Unknown i860-SVR4 + fi + exit ;; + mini*:CTIX:SYS*5:*) + # "miniframe" + echo m68010-convergent-sysv + exit ;; + mc68k:UNIX:SYSTEM5:3.51m) + echo m68k-convergent-sysv + exit ;; + M680?0:D-NIX:5.3:*) + echo m68k-diab-dnix + exit ;; + M68*:*:R3V[5678]*:*) + test -r /sysV68 && { echo 'm68k-motorola-sysv'; exit; } ;; + 3[345]??:*:4.0:3.0 | 3[34]??A:*:4.0:3.0 | 3[34]??,*:*:4.0:3.0 | 3[34]??/*:*:4.0:3.0 | 4400:*:4.0:3.0 | 4850:*:4.0:3.0 | SKA40:*:4.0:3.0 | SDS2:*:4.0:3.0 | SHG2:*:4.0:3.0 | S7501*:*:4.0:3.0) + OS_REL='' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + 3[34]??:*:4.0:* | 3[34]??,*:*:4.0:*) + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4; exit; } ;; + NCR*:*:4.2:* | MPRAS*:*:4.2:*) + OS_REL='.3' + test -r /etc/.relid \ + && OS_REL=.`sed -n 's/[^ ]* [^ ]* \([0-9][0-9]\).*/\1/p' < /etc/.relid` + /bin/uname -p 2>/dev/null | grep 86 >/dev/null \ + && { echo i486-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep entium >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } + /bin/uname -p 2>/dev/null | /bin/grep pteron >/dev/null \ + && { echo i586-ncr-sysv4.3${OS_REL}; exit; } ;; + m68*:LynxOS:2.*:* | m68*:LynxOS:3.0*:*) + echo m68k-unknown-lynxos${UNAME_RELEASE} + exit ;; + mc68030:UNIX_System_V:4.*:*) + echo m68k-atari-sysv4 + exit ;; + TSUNAMI:LynxOS:2.*:*) + echo sparc-unknown-lynxos${UNAME_RELEASE} + exit ;; + rs6000:LynxOS:2.*:*) + echo rs6000-unknown-lynxos${UNAME_RELEASE} + exit ;; + PowerPC:LynxOS:2.*:* | PowerPC:LynxOS:3.[01]*:* | PowerPC:LynxOS:4.[02]*:*) + echo powerpc-unknown-lynxos${UNAME_RELEASE} + exit ;; + SM[BE]S:UNIX_SV:*:*) + echo mips-dde-sysv${UNAME_RELEASE} + exit ;; + RM*:ReliantUNIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + RM*:SINIX-*:*:*) + echo mips-sni-sysv4 + exit ;; + *:SINIX-*:*:*) + if uname -p 2>/dev/null >/dev/null ; then + UNAME_MACHINE=`(uname -p) 2>/dev/null` + echo ${UNAME_MACHINE}-sni-sysv4 + else + echo ns32k-sni-sysv + fi + exit ;; + PENTIUM:*:4.0*:*) # Unisys `ClearPath HMP IX 4000' SVR4/MP effort + # says + echo i586-unisys-sysv4 + exit ;; + *:UNIX_System_V:4*:FTX*) + # From Gerald Hewes . + # How about differentiating between stratus architectures? -djm + echo hppa1.1-stratus-sysv4 + exit ;; + *:*:*:FTX*) + # From seanf@swdc.stratus.com. + echo i860-stratus-sysv4 + exit ;; + i*86:VOS:*:*) + # From Paul.Green@stratus.com. + echo ${UNAME_MACHINE}-stratus-vos + exit ;; + *:VOS:*:*) + # From Paul.Green@stratus.com. + echo hppa1.1-stratus-vos + exit ;; + mc68*:A/UX:*:*) + echo m68k-apple-aux${UNAME_RELEASE} + exit ;; + news*:NEWS-OS:6*:*) + echo mips-sony-newsos6 + exit ;; + R[34]000:*System_V*:*:* | R4000:UNIX_SYSV:*:* | R*000:UNIX_SV:*:*) + if [ -d /usr/nec ]; then + echo mips-nec-sysv${UNAME_RELEASE} + else + echo mips-unknown-sysv${UNAME_RELEASE} + fi + exit ;; + BeBox:BeOS:*:*) # BeOS running on hardware made by Be, PPC only. + echo powerpc-be-beos + exit ;; + BeMac:BeOS:*:*) # BeOS running on Mac or Mac clone, PPC only. + echo powerpc-apple-beos + exit ;; + BePC:BeOS:*:*) # BeOS running on Intel PC compatible. + echo i586-pc-beos + exit ;; + BePC:Haiku:*:*) # Haiku running on Intel PC compatible. + echo i586-pc-haiku + exit ;; + SX-4:SUPER-UX:*:*) + echo sx4-nec-superux${UNAME_RELEASE} + exit ;; + SX-5:SUPER-UX:*:*) + echo sx5-nec-superux${UNAME_RELEASE} + exit ;; + SX-6:SUPER-UX:*:*) + echo sx6-nec-superux${UNAME_RELEASE} + exit ;; + SX-7:SUPER-UX:*:*) + echo sx7-nec-superux${UNAME_RELEASE} + exit ;; + SX-8:SUPER-UX:*:*) + echo sx8-nec-superux${UNAME_RELEASE} + exit ;; + SX-8R:SUPER-UX:*:*) + echo sx8r-nec-superux${UNAME_RELEASE} + exit ;; + Power*:Rhapsody:*:*) + echo powerpc-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Rhapsody:*:*) + echo ${UNAME_MACHINE}-apple-rhapsody${UNAME_RELEASE} + exit ;; + *:Darwin:*:*) + UNAME_PROCESSOR=`uname -p` || UNAME_PROCESSOR=unknown + case $UNAME_PROCESSOR in + i386) + eval $set_cc_for_build + if [ "$CC_FOR_BUILD" != 'no_compiler_found' ]; then + if (echo '#ifdef __LP64__'; echo IS_64BIT_ARCH; echo '#endif') | \ + (CCOPTS= $CC_FOR_BUILD -E - 2>/dev/null) | \ + grep IS_64BIT_ARCH >/dev/null + then + UNAME_PROCESSOR="x86_64" + fi + fi ;; + unknown) UNAME_PROCESSOR=powerpc ;; + esac + echo ${UNAME_PROCESSOR}-apple-darwin${UNAME_RELEASE} + exit ;; + *:procnto*:*:* | *:QNX:[0123456789]*:*) + UNAME_PROCESSOR=`uname -p` + if test "$UNAME_PROCESSOR" = "x86"; then + UNAME_PROCESSOR=i386 + UNAME_MACHINE=pc + fi + echo ${UNAME_PROCESSOR}-${UNAME_MACHINE}-nto-qnx${UNAME_RELEASE} + exit ;; + *:QNX:*:4*) + echo i386-pc-qnx + exit ;; + NEO-?:NONSTOP_KERNEL:*:*) + echo neo-tandem-nsk${UNAME_RELEASE} + exit ;; + NSE-?:NONSTOP_KERNEL:*:*) + echo nse-tandem-nsk${UNAME_RELEASE} + exit ;; + NSR-?:NONSTOP_KERNEL:*:*) + echo nsr-tandem-nsk${UNAME_RELEASE} + exit ;; + *:NonStop-UX:*:*) + echo mips-compaq-nonstopux + exit ;; + BS2000:POSIX*:*:*) + echo bs2000-siemens-sysv + exit ;; + DS/*:UNIX_System_V:*:*) + echo ${UNAME_MACHINE}-${UNAME_SYSTEM}-${UNAME_RELEASE} + exit ;; + *:Plan9:*:*) + # "uname -m" is not consistent, so use $cputype instead. 386 + # is converted to i386 for consistency with other x86 + # operating systems. + if test "$cputype" = "386"; then + UNAME_MACHINE=i386 + else + UNAME_MACHINE="$cputype" + fi + echo ${UNAME_MACHINE}-unknown-plan9 + exit ;; + *:TOPS-10:*:*) + echo pdp10-unknown-tops10 + exit ;; + *:TENEX:*:*) + echo pdp10-unknown-tenex + exit ;; + KS10:TOPS-20:*:* | KL10:TOPS-20:*:* | TYPE4:TOPS-20:*:*) + echo pdp10-dec-tops20 + exit ;; + XKL-1:TOPS-20:*:* | TYPE5:TOPS-20:*:*) + echo pdp10-xkl-tops20 + exit ;; + *:TOPS-20:*:*) + echo pdp10-unknown-tops20 + exit ;; + *:ITS:*:*) + echo pdp10-unknown-its + exit ;; + SEI:*:*:SEIUX) + echo mips-sei-seiux${UNAME_RELEASE} + exit ;; + *:DragonFly:*:*) + echo ${UNAME_MACHINE}-unknown-dragonfly`echo ${UNAME_RELEASE}|sed -e 's/[-(].*//'` + exit ;; + *:*VMS:*:*) + UNAME_MACHINE=`(uname -p) 2>/dev/null` + case "${UNAME_MACHINE}" in + A*) echo alpha-dec-vms ; exit ;; + I*) echo ia64-dec-vms ; exit ;; + V*) echo vax-dec-vms ; exit ;; + esac ;; + *:XENIX:*:SysV) + echo i386-pc-xenix + exit ;; + i*86:skyos:*:*) + echo ${UNAME_MACHINE}-pc-skyos`echo ${UNAME_RELEASE}` | sed -e 's/ .*$//' + exit ;; + i*86:rdos:*:*) + echo ${UNAME_MACHINE}-pc-rdos + exit ;; + i*86:AROS:*:*) + echo ${UNAME_MACHINE}-pc-aros + exit ;; + x86_64:VMkernel:*:*) + echo ${UNAME_MACHINE}-unknown-esx + exit ;; +esac + +#echo '(No uname command or uname output not recognized.)' 1>&2 +#echo "${UNAME_MACHINE}:${UNAME_SYSTEM}:${UNAME_RELEASE}:${UNAME_VERSION}" 1>&2 + +eval $set_cc_for_build +cat >$dummy.c < +# include +#endif +main () +{ +#if defined (sony) +#if defined (MIPSEB) + /* BFD wants "bsd" instead of "newsos". Perhaps BFD should be changed, + I don't know.... */ + printf ("mips-sony-bsd\n"); exit (0); +#else +#include + printf ("m68k-sony-newsos%s\n", +#ifdef NEWSOS4 + "4" +#else + "" +#endif + ); exit (0); +#endif +#endif + +#if defined (__arm) && defined (__acorn) && defined (__unix) + printf ("arm-acorn-riscix\n"); exit (0); +#endif + +#if defined (hp300) && !defined (hpux) + printf ("m68k-hp-bsd\n"); exit (0); +#endif + +#if defined (NeXT) +#if !defined (__ARCHITECTURE__) +#define __ARCHITECTURE__ "m68k" +#endif + int version; + version=`(hostinfo | sed -n 's/.*NeXT Mach \([0-9]*\).*/\1/p') 2>/dev/null`; + if (version < 4) + printf ("%s-next-nextstep%d\n", __ARCHITECTURE__, version); + else + printf ("%s-next-openstep%d\n", __ARCHITECTURE__, version); + exit (0); +#endif + +#if defined (MULTIMAX) || defined (n16) +#if defined (UMAXV) + printf ("ns32k-encore-sysv\n"); exit (0); +#else +#if defined (CMU) + printf ("ns32k-encore-mach\n"); exit (0); +#else + printf ("ns32k-encore-bsd\n"); exit (0); +#endif +#endif +#endif + +#if defined (__386BSD__) + printf ("i386-pc-bsd\n"); exit (0); +#endif + +#if defined (sequent) +#if defined (i386) + printf ("i386-sequent-dynix\n"); exit (0); +#endif +#if defined (ns32000) + printf ("ns32k-sequent-dynix\n"); exit (0); +#endif +#endif + +#if defined (_SEQUENT_) + struct utsname un; + + uname(&un); + + if (strncmp(un.version, "V2", 2) == 0) { + printf ("i386-sequent-ptx2\n"); exit (0); + } + if (strncmp(un.version, "V1", 2) == 0) { /* XXX is V1 correct? */ + printf ("i386-sequent-ptx1\n"); exit (0); + } + printf ("i386-sequent-ptx\n"); exit (0); + +#endif + +#if defined (vax) +# if !defined (ultrix) +# include +# if defined (BSD) +# if BSD == 43 + printf ("vax-dec-bsd4.3\n"); exit (0); +# else +# if BSD == 199006 + printf ("vax-dec-bsd4.3reno\n"); exit (0); +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# endif +# else + printf ("vax-dec-bsd\n"); exit (0); +# endif +# else + printf ("vax-dec-ultrix\n"); exit (0); +# endif +#endif + +#if defined (alliant) && defined (i860) + printf ("i860-alliant-bsd\n"); exit (0); +#endif + + exit (1); +} +EOF + +$CC_FOR_BUILD -o $dummy $dummy.c 2>/dev/null && SYSTEM_NAME=`$dummy` && + { echo "$SYSTEM_NAME"; exit; } + +# Apollos put the system type in the environment. + +test -d /usr/apollo && { echo ${ISP}-apollo-${SYSTYPE}; exit; } + +# Convex versions that predate uname can use getsysinfo(1) + +if [ -x /usr/convex/getsysinfo ] +then + case `getsysinfo -f cpu_type` in + c1*) + echo c1-convex-bsd + exit ;; + c2*) + if getsysinfo -f scalar_acc + then echo c32-convex-bsd + else echo c2-convex-bsd + fi + exit ;; + c34*) + echo c34-convex-bsd + exit ;; + c38*) + echo c38-convex-bsd + exit ;; + c4*) + echo c4-convex-bsd + exit ;; + esac +fi + +cat >&2 < in order to provide the needed +information to handle your system. + +config.guess timestamp = $timestamp + +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null` + +hostinfo = `(hostinfo) 2>/dev/null` +/bin/universe = `(/bin/universe) 2>/dev/null` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null` +/bin/arch = `(/bin/arch) 2>/dev/null` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null` + +UNAME_MACHINE = ${UNAME_MACHINE} +UNAME_RELEASE = ${UNAME_RELEASE} +UNAME_SYSTEM = ${UNAME_SYSTEM} +UNAME_VERSION = ${UNAME_VERSION} +EOF + +exit 1 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/external/gtest/build-aux/config.h.in b/external/gtest/build-aux/config.h.in new file mode 100755 index 0000000000..843b5b10cb --- /dev/null +++ b/external/gtest/build-aux/config.h.in @@ -0,0 +1,69 @@ +/* build-aux/config.h.in. Generated from configure.ac by autoheader. */ + +/* Define to 1 if you have the header file. */ +#undef HAVE_DLFCN_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_INTTYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_MEMORY_H + +/* Define if you have POSIX threads libraries and header files. */ +#undef HAVE_PTHREAD + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDINT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STDLIB_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRINGS_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_STRING_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_STAT_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_SYS_TYPES_H + +/* Define to 1 if you have the header file. */ +#undef HAVE_UNISTD_H + +/* Define to the sub-directory in which libtool stores uninstalled libraries. + */ +#undef LT_OBJDIR + +/* Name of package */ +#undef PACKAGE + +/* Define to the address where bug reports for this package should be sent. */ +#undef PACKAGE_BUGREPORT + +/* Define to the full name of this package. */ +#undef PACKAGE_NAME + +/* Define to the full name and version of this package. */ +#undef PACKAGE_STRING + +/* Define to the one symbol short name of this package. */ +#undef PACKAGE_TARNAME + +/* Define to the home page for this package. */ +#undef PACKAGE_URL + +/* Define to the version of this package. */ +#undef PACKAGE_VERSION + +/* Define to necessary symbol if this constant uses a non-standard name on + your system. */ +#undef PTHREAD_CREATE_JOINABLE + +/* Define to 1 if you have the ANSI C header files. */ +#undef STDC_HEADERS + +/* Version number of package */ +#undef VERSION diff --git a/external/gtest/build-aux/config.sub b/external/gtest/build-aux/config.sub new file mode 100755 index 0000000000..c894da4550 --- /dev/null +++ b/external/gtest/build-aux/config.sub @@ -0,0 +1,1773 @@ +#! /bin/sh +# Configuration validation subroutine script. +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, +# 2000, 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, +# 2011, 2012 Free Software Foundation, Inc. + +timestamp='2012-02-10' + +# This file is (in principle) common to ALL GNU software. +# The presence of a machine in this file suggests that SOME GNU software +# can handle that machine. It does not imply ALL GNU software can. +# +# This file is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, see . +# +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + + +# Please send patches to . Submit a context +# diff and a properly formatted GNU ChangeLog entry. +# +# Configuration subroutine to validate and canonicalize a configuration type. +# Supply the specified configuration type as an argument. +# If it is invalid, we print an error message on stderr and exit with code 1. +# Otherwise, we print the canonical config type on stdout and succeed. + +# You can get the latest version of this script from: +# http://git.savannah.gnu.org/gitweb/?p=config.git;a=blob_plain;f=config.sub;hb=HEAD + +# This file is supposed to be the same for all GNU packages +# and recognize all the CPU types, system types and aliases +# that are meaningful with *any* GNU software. +# Each package is responsible for reporting which valid configurations +# it does not support. The user should be able to distinguish +# a failure to support a valid configuration from a meaningless +# configuration. + +# The goal of this file is to map all the various variations of a given +# machine specification into a single specification in the form: +# CPU_TYPE-MANUFACTURER-OPERATING_SYSTEM +# or in some cases, the newer four-part form: +# CPU_TYPE-MANUFACTURER-KERNEL-OPERATING_SYSTEM +# It is wrong to echo any other type of specification. + +me=`echo "$0" | sed -e 's,.*/,,'` + +usage="\ +Usage: $0 [OPTION] CPU-MFR-OPSYS + $0 [OPTION] ALIAS + +Canonicalize a configuration name. + +Operation modes: + -h, --help print this help, then exit + -t, --time-stamp print date of last modification, then exit + -v, --version print version number, then exit + +Report bugs and patches to ." + +version="\ +GNU config.sub ($timestamp) + +Copyright (C) 1992, 1993, 1994, 1995, 1996, 1997, 1998, 1999, 2000, +2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012 +Free Software Foundation, Inc. + +This is free software; see the source for copying conditions. There is NO +warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE." + +help=" +Try \`$me --help' for more information." + +# Parse command line +while test $# -gt 0 ; do + case $1 in + --time-stamp | --time* | -t ) + echo "$timestamp" ; exit ;; + --version | -v ) + echo "$version" ; exit ;; + --help | --h* | -h ) + echo "$usage"; exit ;; + -- ) # Stop option processing + shift; break ;; + - ) # Use stdin as input. + break ;; + -* ) + echo "$me: invalid option $1$help" + exit 1 ;; + + *local*) + # First pass through any local machine types. + echo $1 + exit ;; + + * ) + break ;; + esac +done + +case $# in + 0) echo "$me: missing argument$help" >&2 + exit 1;; + 1) ;; + *) echo "$me: too many arguments$help" >&2 + exit 1;; +esac + +# Separate what the user gave into CPU-COMPANY and OS or KERNEL-OS (if any). +# Here we must recognize all the valid KERNEL-OS combinations. +maybe_os=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\2/'` +case $maybe_os in + nto-qnx* | linux-gnu* | linux-android* | linux-dietlibc | linux-newlib* | \ + linux-uclibc* | uclinux-uclibc* | uclinux-gnu* | kfreebsd*-gnu* | \ + knetbsd*-gnu* | netbsd*-gnu* | \ + kopensolaris*-gnu* | \ + storm-chaos* | os2-emx* | rtmk-nova*) + os=-$maybe_os + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'` + ;; + android-linux) + os=-linux-android + basic_machine=`echo $1 | sed 's/^\(.*\)-\([^-]*-[^-]*\)$/\1/'`-unknown + ;; + *) + basic_machine=`echo $1 | sed 's/-[^-]*$//'` + if [ $basic_machine != $1 ] + then os=`echo $1 | sed 's/.*-/-/'` + else os=; fi + ;; +esac + +### Let's recognize common machines as not being operating systems so +### that things like config.sub decstation-3100 work. We also +### recognize some manufacturers as not being operating systems, so we +### can provide default operating systems below. +case $os in + -sun*os*) + # Prevent following clause from handling this invalid input. + ;; + -dec* | -mips* | -sequent* | -encore* | -pc532* | -sgi* | -sony* | \ + -att* | -7300* | -3300* | -delta* | -motorola* | -sun[234]* | \ + -unicom* | -ibm* | -next | -hp | -isi* | -apollo | -altos* | \ + -convergent* | -ncr* | -news | -32* | -3600* | -3100* | -hitachi* |\ + -c[123]* | -convex* | -sun | -crds | -omron* | -dg | -ultra | -tti* | \ + -harris | -dolphin | -highlevel | -gould | -cbm | -ns | -masscomp | \ + -apple | -axis | -knuth | -cray | -microblaze) + os= + basic_machine=$1 + ;; + -bluegene*) + os=-cnk + ;; + -sim | -cisco | -oki | -wec | -winbond) + os= + basic_machine=$1 + ;; + -scout) + ;; + -wrs) + os=-vxworks + basic_machine=$1 + ;; + -chorusos*) + os=-chorusos + basic_machine=$1 + ;; + -chorusrdb) + os=-chorusrdb + basic_machine=$1 + ;; + -hiux*) + os=-hiuxwe2 + ;; + -sco6) + os=-sco5v6 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5) + os=-sco3.2v5 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco4) + os=-sco3.2v4 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2.[4-9]*) + os=`echo $os | sed -e 's/sco3.2./sco3.2v/'` + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco3.2v[4-9]*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco5v6*) + # Don't forget version if it is 3.2v4 or newer. + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -sco*) + os=-sco3.2v2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -udk*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -isc) + os=-isc2.2 + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -clix*) + basic_machine=clipper-intergraph + ;; + -isc*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-pc/'` + ;; + -lynx*) + os=-lynxos + ;; + -ptx*) + basic_machine=`echo $1 | sed -e 's/86-.*/86-sequent/'` + ;; + -windowsnt*) + os=`echo $os | sed -e 's/windowsnt/winnt/'` + ;; + -psos*) + os=-psos + ;; + -mint | -mint[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; +esac + +# Decode aliases for certain CPU-COMPANY combinations. +case $basic_machine in + # Recognize the basic CPU types without company name. + # Some are omitted here because they have special meanings below. + 1750a | 580 \ + | a29k \ + | aarch64 | aarch64_be \ + | alpha | alphaev[4-8] | alphaev56 | alphaev6[78] | alphapca5[67] \ + | alpha64 | alpha64ev[4-8] | alpha64ev56 | alpha64ev6[78] | alpha64pca5[67] \ + | am33_2.0 \ + | arc | arm | arm[bl]e | arme[lb] | armv[2345] | armv[345][lb] | avr | avr32 \ + | be32 | be64 \ + | bfin \ + | c4x | clipper \ + | d10v | d30v | dlx | dsp16xx \ + | epiphany \ + | fido | fr30 | frv \ + | h8300 | h8500 | hppa | hppa1.[01] | hppa2.0 | hppa2.0[nw] | hppa64 \ + | hexagon \ + | i370 | i860 | i960 | ia64 \ + | ip2k | iq2000 \ + | le32 | le64 \ + | lm32 \ + | m32c | m32r | m32rle | m68000 | m68k | m88k \ + | maxq | mb | microblaze | mcore | mep | metag \ + | mips | mipsbe | mipseb | mipsel | mipsle \ + | mips16 \ + | mips64 | mips64el \ + | mips64octeon | mips64octeonel \ + | mips64orion | mips64orionel \ + | mips64r5900 | mips64r5900el \ + | mips64vr | mips64vrel \ + | mips64vr4100 | mips64vr4100el \ + | mips64vr4300 | mips64vr4300el \ + | mips64vr5000 | mips64vr5000el \ + | mips64vr5900 | mips64vr5900el \ + | mipsisa32 | mipsisa32el \ + | mipsisa32r2 | mipsisa32r2el \ + | mipsisa64 | mipsisa64el \ + | mipsisa64r2 | mipsisa64r2el \ + | mipsisa64sb1 | mipsisa64sb1el \ + | mipsisa64sr71k | mipsisa64sr71kel \ + | mipstx39 | mipstx39el \ + | mn10200 | mn10300 \ + | moxie \ + | mt \ + | msp430 \ + | nds32 | nds32le | nds32be \ + | nios | nios2 \ + | ns16k | ns32k \ + | open8 \ + | or32 \ + | pdp10 | pdp11 | pj | pjl \ + | powerpc | powerpc64 | powerpc64le | powerpcle \ + | pyramid \ + | rl78 | rx \ + | score \ + | sh | sh[1234] | sh[24]a | sh[24]aeb | sh[23]e | sh[34]eb | sheb | shbe | shle | sh[1234]le | sh3ele \ + | sh64 | sh64le \ + | sparc | sparc64 | sparc64b | sparc64v | sparc86x | sparclet | sparclite \ + | sparcv8 | sparcv9 | sparcv9b | sparcv9v \ + | spu \ + | tahoe | tic4x | tic54x | tic55x | tic6x | tic80 | tron \ + | ubicom32 \ + | v850 | v850e | v850e1 | v850e2 | v850es | v850e2v3 \ + | we32k \ + | x86 | xc16x | xstormy16 | xtensa \ + | z8k | z80) + basic_machine=$basic_machine-unknown + ;; + c54x) + basic_machine=tic54x-unknown + ;; + c55x) + basic_machine=tic55x-unknown + ;; + c6x) + basic_machine=tic6x-unknown + ;; + m6811 | m68hc11 | m6812 | m68hc12 | m68hcs12x | picochip) + basic_machine=$basic_machine-unknown + os=-none + ;; + m88110 | m680[12346]0 | m683?2 | m68360 | m5200 | v70 | w65 | z8k) + ;; + ms1) + basic_machine=mt-unknown + ;; + + strongarm | thumb | xscale) + basic_machine=arm-unknown + ;; + xgate) + basic_machine=$basic_machine-unknown + os=-none + ;; + xscaleeb) + basic_machine=armeb-unknown + ;; + + xscaleel) + basic_machine=armel-unknown + ;; + + # We use `pc' rather than `unknown' + # because (1) that's what they normally are, and + # (2) the word "unknown" tends to confuse beginning users. + i*86 | x86_64) + basic_machine=$basic_machine-pc + ;; + # Object if more than one company name word. + *-*-*) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; + # Recognize the basic CPU types with company name. + 580-* \ + | a29k-* \ + | aarch64-* | aarch64_be-* \ + | alpha-* | alphaev[4-8]-* | alphaev56-* | alphaev6[78]-* \ + | alpha64-* | alpha64ev[4-8]-* | alpha64ev56-* | alpha64ev6[78]-* \ + | alphapca5[67]-* | alpha64pca5[67]-* | arc-* \ + | arm-* | armbe-* | armle-* | armeb-* | armv*-* \ + | avr-* | avr32-* \ + | be32-* | be64-* \ + | bfin-* | bs2000-* \ + | c[123]* | c30-* | [cjt]90-* | c4x-* \ + | clipper-* | craynv-* | cydra-* \ + | d10v-* | d30v-* | dlx-* \ + | elxsi-* \ + | f30[01]-* | f700-* | fido-* | fr30-* | frv-* | fx80-* \ + | h8300-* | h8500-* \ + | hppa-* | hppa1.[01]-* | hppa2.0-* | hppa2.0[nw]-* | hppa64-* \ + | hexagon-* \ + | i*86-* | i860-* | i960-* | ia64-* \ + | ip2k-* | iq2000-* \ + | le32-* | le64-* \ + | lm32-* \ + | m32c-* | m32r-* | m32rle-* \ + | m68000-* | m680[012346]0-* | m68360-* | m683?2-* | m68k-* \ + | m88110-* | m88k-* | maxq-* | mcore-* | metag-* | microblaze-* \ + | mips-* | mipsbe-* | mipseb-* | mipsel-* | mipsle-* \ + | mips16-* \ + | mips64-* | mips64el-* \ + | mips64octeon-* | mips64octeonel-* \ + | mips64orion-* | mips64orionel-* \ + | mips64r5900-* | mips64r5900el-* \ + | mips64vr-* | mips64vrel-* \ + | mips64vr4100-* | mips64vr4100el-* \ + | mips64vr4300-* | mips64vr4300el-* \ + | mips64vr5000-* | mips64vr5000el-* \ + | mips64vr5900-* | mips64vr5900el-* \ + | mipsisa32-* | mipsisa32el-* \ + | mipsisa32r2-* | mipsisa32r2el-* \ + | mipsisa64-* | mipsisa64el-* \ + | mipsisa64r2-* | mipsisa64r2el-* \ + | mipsisa64sb1-* | mipsisa64sb1el-* \ + | mipsisa64sr71k-* | mipsisa64sr71kel-* \ + | mipstx39-* | mipstx39el-* \ + | mmix-* \ + | mt-* \ + | msp430-* \ + | nds32-* | nds32le-* | nds32be-* \ + | nios-* | nios2-* \ + | none-* | np1-* | ns16k-* | ns32k-* \ + | open8-* \ + | orion-* \ + | pdp10-* | pdp11-* | pj-* | pjl-* | pn-* | power-* \ + | powerpc-* | powerpc64-* | powerpc64le-* | powerpcle-* \ + | pyramid-* \ + | rl78-* | romp-* | rs6000-* | rx-* \ + | sh-* | sh[1234]-* | sh[24]a-* | sh[24]aeb-* | sh[23]e-* | sh[34]eb-* | sheb-* | shbe-* \ + | shle-* | sh[1234]le-* | sh3ele-* | sh64-* | sh64le-* \ + | sparc-* | sparc64-* | sparc64b-* | sparc64v-* | sparc86x-* | sparclet-* \ + | sparclite-* \ + | sparcv8-* | sparcv9-* | sparcv9b-* | sparcv9v-* | sv1-* | sx?-* \ + | tahoe-* \ + | tic30-* | tic4x-* | tic54x-* | tic55x-* | tic6x-* | tic80-* \ + | tile*-* \ + | tron-* \ + | ubicom32-* \ + | v850-* | v850e-* | v850e1-* | v850es-* | v850e2-* | v850e2v3-* \ + | vax-* \ + | we32k-* \ + | x86-* | x86_64-* | xc16x-* | xps100-* \ + | xstormy16-* | xtensa*-* \ + | ymp-* \ + | z8k-* | z80-*) + ;; + # Recognize the basic CPU types without company name, with glob match. + xtensa*) + basic_machine=$basic_machine-unknown + ;; + # Recognize the various machine names and aliases which stand + # for a CPU type and a company and sometimes even an OS. + 386bsd) + basic_machine=i386-unknown + os=-bsd + ;; + 3b1 | 7300 | 7300-att | att-7300 | pc7300 | safari | unixpc) + basic_machine=m68000-att + ;; + 3b*) + basic_machine=we32k-att + ;; + a29khif) + basic_machine=a29k-amd + os=-udi + ;; + abacus) + basic_machine=abacus-unknown + ;; + adobe68k) + basic_machine=m68010-adobe + os=-scout + ;; + alliant | fx80) + basic_machine=fx80-alliant + ;; + altos | altos3068) + basic_machine=m68k-altos + ;; + am29k) + basic_machine=a29k-none + os=-bsd + ;; + amd64) + basic_machine=x86_64-pc + ;; + amd64-*) + basic_machine=x86_64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + amdahl) + basic_machine=580-amdahl + os=-sysv + ;; + amiga | amiga-*) + basic_machine=m68k-unknown + ;; + amigaos | amigados) + basic_machine=m68k-unknown + os=-amigaos + ;; + amigaunix | amix) + basic_machine=m68k-unknown + os=-sysv4 + ;; + apollo68) + basic_machine=m68k-apollo + os=-sysv + ;; + apollo68bsd) + basic_machine=m68k-apollo + os=-bsd + ;; + aros) + basic_machine=i386-pc + os=-aros + ;; + aux) + basic_machine=m68k-apple + os=-aux + ;; + balance) + basic_machine=ns32k-sequent + os=-dynix + ;; + blackfin) + basic_machine=bfin-unknown + os=-linux + ;; + blackfin-*) + basic_machine=bfin-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + bluegene*) + basic_machine=powerpc-ibm + os=-cnk + ;; + c54x-*) + basic_machine=tic54x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c55x-*) + basic_machine=tic55x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c6x-*) + basic_machine=tic6x-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + c90) + basic_machine=c90-cray + os=-unicos + ;; + cegcc) + basic_machine=arm-unknown + os=-cegcc + ;; + convex-c1) + basic_machine=c1-convex + os=-bsd + ;; + convex-c2) + basic_machine=c2-convex + os=-bsd + ;; + convex-c32) + basic_machine=c32-convex + os=-bsd + ;; + convex-c34) + basic_machine=c34-convex + os=-bsd + ;; + convex-c38) + basic_machine=c38-convex + os=-bsd + ;; + cray | j90) + basic_machine=j90-cray + os=-unicos + ;; + craynv) + basic_machine=craynv-cray + os=-unicosmp + ;; + cr16 | cr16-*) + basic_machine=cr16-unknown + os=-elf + ;; + crds | unos) + basic_machine=m68k-crds + ;; + crisv32 | crisv32-* | etraxfs*) + basic_machine=crisv32-axis + ;; + cris | cris-* | etrax*) + basic_machine=cris-axis + ;; + crx) + basic_machine=crx-unknown + os=-elf + ;; + da30 | da30-*) + basic_machine=m68k-da30 + ;; + decstation | decstation-3100 | pmax | pmax-* | pmin | dec3100 | decstatn) + basic_machine=mips-dec + ;; + decsystem10* | dec10*) + basic_machine=pdp10-dec + os=-tops10 + ;; + decsystem20* | dec20*) + basic_machine=pdp10-dec + os=-tops20 + ;; + delta | 3300 | motorola-3300 | motorola-delta \ + | 3300-motorola | delta-motorola) + basic_machine=m68k-motorola + ;; + delta88) + basic_machine=m88k-motorola + os=-sysv3 + ;; + dicos) + basic_machine=i686-pc + os=-dicos + ;; + djgpp) + basic_machine=i586-pc + os=-msdosdjgpp + ;; + dpx20 | dpx20-*) + basic_machine=rs6000-bull + os=-bosx + ;; + dpx2* | dpx2*-bull) + basic_machine=m68k-bull + os=-sysv3 + ;; + ebmon29k) + basic_machine=a29k-amd + os=-ebmon + ;; + elxsi) + basic_machine=elxsi-elxsi + os=-bsd + ;; + encore | umax | mmax) + basic_machine=ns32k-encore + ;; + es1800 | OSE68k | ose68k | ose | OSE) + basic_machine=m68k-ericsson + os=-ose + ;; + fx2800) + basic_machine=i860-alliant + ;; + genix) + basic_machine=ns32k-ns + ;; + gmicro) + basic_machine=tron-gmicro + os=-sysv + ;; + go32) + basic_machine=i386-pc + os=-go32 + ;; + h3050r* | hiux*) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + h8300hms) + basic_machine=h8300-hitachi + os=-hms + ;; + h8300xray) + basic_machine=h8300-hitachi + os=-xray + ;; + h8500hms) + basic_machine=h8500-hitachi + os=-hms + ;; + harris) + basic_machine=m88k-harris + os=-sysv3 + ;; + hp300-*) + basic_machine=m68k-hp + ;; + hp300bsd) + basic_machine=m68k-hp + os=-bsd + ;; + hp300hpux) + basic_machine=m68k-hp + os=-hpux + ;; + hp3k9[0-9][0-9] | hp9[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k2[0-9][0-9] | hp9k31[0-9]) + basic_machine=m68000-hp + ;; + hp9k3[2-9][0-9]) + basic_machine=m68k-hp + ;; + hp9k6[0-9][0-9] | hp6[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hp9k7[0-79][0-9] | hp7[0-79][0-9]) + basic_machine=hppa1.1-hp + ;; + hp9k78[0-9] | hp78[0-9]) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[67]1 | hp8[67]1 | hp9k80[24] | hp80[24] | hp9k8[78]9 | hp8[78]9 | hp9k893 | hp893) + # FIXME: really hppa2.0-hp + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][13679] | hp8[0-9][13679]) + basic_machine=hppa1.1-hp + ;; + hp9k8[0-9][0-9] | hp8[0-9][0-9]) + basic_machine=hppa1.0-hp + ;; + hppa-next) + os=-nextstep3 + ;; + hppaosf) + basic_machine=hppa1.1-hp + os=-osf + ;; + hppro) + basic_machine=hppa1.1-hp + os=-proelf + ;; + i370-ibm* | ibm*) + basic_machine=i370-ibm + ;; + i*86v32) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv32 + ;; + i*86v4*) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv4 + ;; + i*86v) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-sysv + ;; + i*86sol2) + basic_machine=`echo $1 | sed -e 's/86.*/86-pc/'` + os=-solaris2 + ;; + i386mach) + basic_machine=i386-mach + os=-mach + ;; + i386-vsta | vsta) + basic_machine=i386-unknown + os=-vsta + ;; + iris | iris4d) + basic_machine=mips-sgi + case $os in + -irix*) + ;; + *) + os=-irix4 + ;; + esac + ;; + isi68 | isi) + basic_machine=m68k-isi + os=-sysv + ;; + m68knommu) + basic_machine=m68k-unknown + os=-linux + ;; + m68knommu-*) + basic_machine=m68k-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + m88k-omron*) + basic_machine=m88k-omron + ;; + magnum | m3230) + basic_machine=mips-mips + os=-sysv + ;; + merlin) + basic_machine=ns32k-utek + os=-sysv + ;; + microblaze) + basic_machine=microblaze-xilinx + ;; + mingw32) + basic_machine=i386-pc + os=-mingw32 + ;; + mingw32ce) + basic_machine=arm-unknown + os=-mingw32ce + ;; + miniframe) + basic_machine=m68000-convergent + ;; + *mint | -mint[0-9]* | *MiNT | *MiNT[0-9]*) + basic_machine=m68k-atari + os=-mint + ;; + mips3*-*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'` + ;; + mips3*) + basic_machine=`echo $basic_machine | sed -e 's/mips3/mips64/'`-unknown + ;; + monitor) + basic_machine=m68k-rom68k + os=-coff + ;; + morphos) + basic_machine=powerpc-unknown + os=-morphos + ;; + msdos) + basic_machine=i386-pc + os=-msdos + ;; + ms1-*) + basic_machine=`echo $basic_machine | sed -e 's/ms1-/mt-/'` + ;; + msys) + basic_machine=i386-pc + os=-msys + ;; + mvs) + basic_machine=i370-ibm + os=-mvs + ;; + nacl) + basic_machine=le32-unknown + os=-nacl + ;; + ncr3000) + basic_machine=i486-ncr + os=-sysv4 + ;; + netbsd386) + basic_machine=i386-unknown + os=-netbsd + ;; + netwinder) + basic_machine=armv4l-rebel + os=-linux + ;; + news | news700 | news800 | news900) + basic_machine=m68k-sony + os=-newsos + ;; + news1000) + basic_machine=m68030-sony + os=-newsos + ;; + news-3600 | risc-news) + basic_machine=mips-sony + os=-newsos + ;; + necv70) + basic_machine=v70-nec + os=-sysv + ;; + next | m*-next ) + basic_machine=m68k-next + case $os in + -nextstep* ) + ;; + -ns2*) + os=-nextstep2 + ;; + *) + os=-nextstep3 + ;; + esac + ;; + nh3000) + basic_machine=m68k-harris + os=-cxux + ;; + nh[45]000) + basic_machine=m88k-harris + os=-cxux + ;; + nindy960) + basic_machine=i960-intel + os=-nindy + ;; + mon960) + basic_machine=i960-intel + os=-mon960 + ;; + nonstopux) + basic_machine=mips-compaq + os=-nonstopux + ;; + np1) + basic_machine=np1-gould + ;; + neo-tandem) + basic_machine=neo-tandem + ;; + nse-tandem) + basic_machine=nse-tandem + ;; + nsr-tandem) + basic_machine=nsr-tandem + ;; + op50n-* | op60c-*) + basic_machine=hppa1.1-oki + os=-proelf + ;; + openrisc | openrisc-*) + basic_machine=or32-unknown + ;; + os400) + basic_machine=powerpc-ibm + os=-os400 + ;; + OSE68000 | ose68000) + basic_machine=m68000-ericsson + os=-ose + ;; + os68k) + basic_machine=m68k-none + os=-os68k + ;; + pa-hitachi) + basic_machine=hppa1.1-hitachi + os=-hiuxwe2 + ;; + paragon) + basic_machine=i860-intel + os=-osf + ;; + parisc) + basic_machine=hppa-unknown + os=-linux + ;; + parisc-*) + basic_machine=hppa-`echo $basic_machine | sed 's/^[^-]*-//'` + os=-linux + ;; + pbd) + basic_machine=sparc-tti + ;; + pbb) + basic_machine=m68k-tti + ;; + pc532 | pc532-*) + basic_machine=ns32k-pc532 + ;; + pc98) + basic_machine=i386-pc + ;; + pc98-*) + basic_machine=i386-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium | p5 | k5 | k6 | nexgen | viac3) + basic_machine=i586-pc + ;; + pentiumpro | p6 | 6x86 | athlon | athlon_*) + basic_machine=i686-pc + ;; + pentiumii | pentium2 | pentiumiii | pentium3) + basic_machine=i686-pc + ;; + pentium4) + basic_machine=i786-pc + ;; + pentium-* | p5-* | k5-* | k6-* | nexgen-* | viac3-*) + basic_machine=i586-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumpro-* | p6-* | 6x86-* | athlon-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentiumii-* | pentium2-* | pentiumiii-* | pentium3-*) + basic_machine=i686-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pentium4-*) + basic_machine=i786-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + pn) + basic_machine=pn-gould + ;; + power) basic_machine=power-ibm + ;; + ppc | ppcbe) basic_machine=powerpc-unknown + ;; + ppc-* | ppcbe-*) + basic_machine=powerpc-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppcle | powerpclittle | ppc-le | powerpc-little) + basic_machine=powerpcle-unknown + ;; + ppcle-* | powerpclittle-*) + basic_machine=powerpcle-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64) basic_machine=powerpc64-unknown + ;; + ppc64-*) basic_machine=powerpc64-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ppc64le | powerpc64little | ppc64-le | powerpc64-little) + basic_machine=powerpc64le-unknown + ;; + ppc64le-* | powerpc64little-*) + basic_machine=powerpc64le-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + ps2) + basic_machine=i386-ibm + ;; + pw32) + basic_machine=i586-unknown + os=-pw32 + ;; + rdos) + basic_machine=i386-pc + os=-rdos + ;; + rom68k) + basic_machine=m68k-rom68k + os=-coff + ;; + rm[46]00) + basic_machine=mips-siemens + ;; + rtpc | rtpc-*) + basic_machine=romp-ibm + ;; + s390 | s390-*) + basic_machine=s390-ibm + ;; + s390x | s390x-*) + basic_machine=s390x-ibm + ;; + sa29200) + basic_machine=a29k-amd + os=-udi + ;; + sb1) + basic_machine=mipsisa64sb1-unknown + ;; + sb1el) + basic_machine=mipsisa64sb1el-unknown + ;; + sde) + basic_machine=mipsisa32-sde + os=-elf + ;; + sei) + basic_machine=mips-sei + os=-seiux + ;; + sequent) + basic_machine=i386-sequent + ;; + sh) + basic_machine=sh-hitachi + os=-hms + ;; + sh5el) + basic_machine=sh5le-unknown + ;; + sh64) + basic_machine=sh64-unknown + ;; + sparclite-wrs | simso-wrs) + basic_machine=sparclite-wrs + os=-vxworks + ;; + sps7) + basic_machine=m68k-bull + os=-sysv2 + ;; + spur) + basic_machine=spur-unknown + ;; + st2000) + basic_machine=m68k-tandem + ;; + stratus) + basic_machine=i860-stratus + os=-sysv4 + ;; + strongarm-* | thumb-*) + basic_machine=arm-`echo $basic_machine | sed 's/^[^-]*-//'` + ;; + sun2) + basic_machine=m68000-sun + ;; + sun2os3) + basic_machine=m68000-sun + os=-sunos3 + ;; + sun2os4) + basic_machine=m68000-sun + os=-sunos4 + ;; + sun3os3) + basic_machine=m68k-sun + os=-sunos3 + ;; + sun3os4) + basic_machine=m68k-sun + os=-sunos4 + ;; + sun4os3) + basic_machine=sparc-sun + os=-sunos3 + ;; + sun4os4) + basic_machine=sparc-sun + os=-sunos4 + ;; + sun4sol2) + basic_machine=sparc-sun + os=-solaris2 + ;; + sun3 | sun3-*) + basic_machine=m68k-sun + ;; + sun4) + basic_machine=sparc-sun + ;; + sun386 | sun386i | roadrunner) + basic_machine=i386-sun + ;; + sv1) + basic_machine=sv1-cray + os=-unicos + ;; + symmetry) + basic_machine=i386-sequent + os=-dynix + ;; + t3e) + basic_machine=alphaev5-cray + os=-unicos + ;; + t90) + basic_machine=t90-cray + os=-unicos + ;; + tile*) + basic_machine=$basic_machine-unknown + os=-linux-gnu + ;; + tx39) + basic_machine=mipstx39-unknown + ;; + tx39el) + basic_machine=mipstx39el-unknown + ;; + toad1) + basic_machine=pdp10-xkl + os=-tops20 + ;; + tower | tower-32) + basic_machine=m68k-ncr + ;; + tpf) + basic_machine=s390x-ibm + os=-tpf + ;; + udi29k) + basic_machine=a29k-amd + os=-udi + ;; + ultra3) + basic_machine=a29k-nyu + os=-sym1 + ;; + v810 | necv810) + basic_machine=v810-nec + os=-none + ;; + vaxv) + basic_machine=vax-dec + os=-sysv + ;; + vms) + basic_machine=vax-dec + os=-vms + ;; + vpp*|vx|vx-*) + basic_machine=f301-fujitsu + ;; + vxworks960) + basic_machine=i960-wrs + os=-vxworks + ;; + vxworks68) + basic_machine=m68k-wrs + os=-vxworks + ;; + vxworks29k) + basic_machine=a29k-wrs + os=-vxworks + ;; + w65*) + basic_machine=w65-wdc + os=-none + ;; + w89k-*) + basic_machine=hppa1.1-winbond + os=-proelf + ;; + xbox) + basic_machine=i686-pc + os=-mingw32 + ;; + xps | xps100) + basic_machine=xps100-honeywell + ;; + xscale-* | xscalee[bl]-*) + basic_machine=`echo $basic_machine | sed 's/^xscale/arm/'` + ;; + ymp) + basic_machine=ymp-cray + os=-unicos + ;; + z8k-*-coff) + basic_machine=z8k-unknown + os=-sim + ;; + z80-*-coff) + basic_machine=z80-unknown + os=-sim + ;; + none) + basic_machine=none-none + os=-none + ;; + +# Here we handle the default manufacturer of certain CPU types. It is in +# some cases the only manufacturer, in others, it is the most popular. + w89k) + basic_machine=hppa1.1-winbond + ;; + op50n) + basic_machine=hppa1.1-oki + ;; + op60c) + basic_machine=hppa1.1-oki + ;; + romp) + basic_machine=romp-ibm + ;; + mmix) + basic_machine=mmix-knuth + ;; + rs6000) + basic_machine=rs6000-ibm + ;; + vax) + basic_machine=vax-dec + ;; + pdp10) + # there are many clones, so DEC is not a safe bet + basic_machine=pdp10-unknown + ;; + pdp11) + basic_machine=pdp11-dec + ;; + we32k) + basic_machine=we32k-att + ;; + sh[1234] | sh[24]a | sh[24]aeb | sh[34]eb | sh[1234]le | sh[23]ele) + basic_machine=sh-unknown + ;; + sparc | sparcv8 | sparcv9 | sparcv9b | sparcv9v) + basic_machine=sparc-sun + ;; + cydra) + basic_machine=cydra-cydrome + ;; + orion) + basic_machine=orion-highlevel + ;; + orion105) + basic_machine=clipper-highlevel + ;; + mac | mpw | mac-mpw) + basic_machine=m68k-apple + ;; + pmac | pmac-mpw) + basic_machine=powerpc-apple + ;; + *-unknown) + # Make sure to match an already-canonicalized machine name. + ;; + *) + echo Invalid configuration \`$1\': machine \`$basic_machine\' not recognized 1>&2 + exit 1 + ;; +esac + +# Here we canonicalize certain aliases for manufacturers. +case $basic_machine in + *-digital*) + basic_machine=`echo $basic_machine | sed 's/digital.*/dec/'` + ;; + *-commodore*) + basic_machine=`echo $basic_machine | sed 's/commodore.*/cbm/'` + ;; + *) + ;; +esac + +# Decode manufacturer-specific aliases for certain operating systems. + +if [ x"$os" != x"" ] +then +case $os in + # First match some system type aliases + # that might get confused with valid system types. + # -solaris* is a basic system type, with this one exception. + -auroraux) + os=-auroraux + ;; + -solaris1 | -solaris1.*) + os=`echo $os | sed -e 's|solaris1|sunos4|'` + ;; + -solaris) + os=-solaris2 + ;; + -svr4*) + os=-sysv4 + ;; + -unixware*) + os=-sysv4.2uw + ;; + -gnu/linux*) + os=`echo $os | sed -e 's|gnu/linux|linux-gnu|'` + ;; + # First accept the basic system types. + # The portable systems comes first. + # Each alternative MUST END IN A *, to match a version number. + # -sysv* is not here because it comes later, after sysvr4. + -gnu* | -bsd* | -mach* | -minix* | -genix* | -ultrix* | -irix* \ + | -*vms* | -sco* | -esix* | -isc* | -aix* | -cnk* | -sunos | -sunos[34]*\ + | -hpux* | -unos* | -osf* | -luna* | -dgux* | -auroraux* | -solaris* \ + | -sym* | -kopensolaris* \ + | -amigaos* | -amigados* | -msdos* | -newsos* | -unicos* | -aof* \ + | -aos* | -aros* \ + | -nindy* | -vxsim* | -vxworks* | -ebmon* | -hms* | -mvs* \ + | -clix* | -riscos* | -uniplus* | -iris* | -rtu* | -xenix* \ + | -hiux* | -386bsd* | -knetbsd* | -mirbsd* | -netbsd* \ + | -openbsd* | -solidbsd* \ + | -ekkobsd* | -kfreebsd* | -freebsd* | -riscix* | -lynxos* \ + | -bosx* | -nextstep* | -cxux* | -aout* | -elf* | -oabi* \ + | -ptx* | -coff* | -ecoff* | -winnt* | -domain* | -vsta* \ + | -udi* | -eabi* | -lites* | -ieee* | -go32* | -aux* \ + | -chorusos* | -chorusrdb* | -cegcc* \ + | -cygwin* | -msys* | -pe* | -psos* | -moss* | -proelf* | -rtems* \ + | -mingw32* | -linux-gnu* | -linux-android* \ + | -linux-newlib* | -linux-uclibc* \ + | -uxpv* | -beos* | -mpeix* | -udk* \ + | -interix* | -uwin* | -mks* | -rhapsody* | -darwin* | -opened* \ + | -openstep* | -oskit* | -conix* | -pw32* | -nonstopux* \ + | -storm-chaos* | -tops10* | -tenex* | -tops20* | -its* \ + | -os2* | -vos* | -palmos* | -uclinux* | -nucleus* \ + | -morphos* | -superux* | -rtmk* | -rtmk-nova* | -windiss* \ + | -powermax* | -dnix* | -nx6 | -nx7 | -sei* | -dragonfly* \ + | -skyos* | -haiku* | -rdos* | -toppers* | -drops* | -es*) + # Remember, each alternative MUST END IN *, to match a version number. + ;; + -qnx*) + case $basic_machine in + x86-* | i*86-*) + ;; + *) + os=-nto$os + ;; + esac + ;; + -nto-qnx*) + ;; + -nto*) + os=`echo $os | sed -e 's|nto|nto-qnx|'` + ;; + -sim | -es1800* | -hms* | -xray | -os68k* | -none* | -v88r* \ + | -windows* | -osx | -abug | -netware* | -os9* | -beos* | -haiku* \ + | -macos* | -mpw* | -magic* | -mmixware* | -mon960* | -lnews*) + ;; + -mac*) + os=`echo $os | sed -e 's|mac|macos|'` + ;; + -linux-dietlibc) + os=-linux-dietlibc + ;; + -linux*) + os=`echo $os | sed -e 's|linux|linux-gnu|'` + ;; + -sunos5*) + os=`echo $os | sed -e 's|sunos5|solaris2|'` + ;; + -sunos6*) + os=`echo $os | sed -e 's|sunos6|solaris3|'` + ;; + -opened*) + os=-openedition + ;; + -os400*) + os=-os400 + ;; + -wince*) + os=-wince + ;; + -osfrose*) + os=-osfrose + ;; + -osf*) + os=-osf + ;; + -utek*) + os=-bsd + ;; + -dynix*) + os=-bsd + ;; + -acis*) + os=-aos + ;; + -atheos*) + os=-atheos + ;; + -syllable*) + os=-syllable + ;; + -386bsd) + os=-bsd + ;; + -ctix* | -uts*) + os=-sysv + ;; + -nova*) + os=-rtmk-nova + ;; + -ns2 ) + os=-nextstep2 + ;; + -nsk*) + os=-nsk + ;; + # Preserve the version number of sinix5. + -sinix5.*) + os=`echo $os | sed -e 's|sinix|sysv|'` + ;; + -sinix*) + os=-sysv4 + ;; + -tpf*) + os=-tpf + ;; + -triton*) + os=-sysv3 + ;; + -oss*) + os=-sysv3 + ;; + -svr4) + os=-sysv4 + ;; + -svr3) + os=-sysv3 + ;; + -sysvr4) + os=-sysv4 + ;; + # This must come after -sysvr4. + -sysv*) + ;; + -ose*) + os=-ose + ;; + -es1800*) + os=-ose + ;; + -xenix) + os=-xenix + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + os=-mint + ;; + -aros*) + os=-aros + ;; + -kaos*) + os=-kaos + ;; + -zvmoe) + os=-zvmoe + ;; + -dicos*) + os=-dicos + ;; + -nacl*) + ;; + -none) + ;; + *) + # Get rid of the `-' at the beginning of $os. + os=`echo $os | sed 's/[^-]*-//'` + echo Invalid configuration \`$1\': system \`$os\' not recognized 1>&2 + exit 1 + ;; +esac +else + +# Here we handle the default operating systems that come with various machines. +# The value should be what the vendor currently ships out the door with their +# machine or put another way, the most popular os provided with the machine. + +# Note that if you're going to try to match "-MANUFACTURER" here (say, +# "-sun"), then you have to tell the case statement up towards the top +# that MANUFACTURER isn't an operating system. Otherwise, code above +# will signal an error saying that MANUFACTURER isn't an operating +# system, and we'll never get to this point. + +case $basic_machine in + score-*) + os=-elf + ;; + spu-*) + os=-elf + ;; + *-acorn) + os=-riscix1.2 + ;; + arm*-rebel) + os=-linux + ;; + arm*-semi) + os=-aout + ;; + c4x-* | tic4x-*) + os=-coff + ;; + tic54x-*) + os=-coff + ;; + tic55x-*) + os=-coff + ;; + tic6x-*) + os=-coff + ;; + # This must come before the *-dec entry. + pdp10-*) + os=-tops20 + ;; + pdp11-*) + os=-none + ;; + *-dec | vax-*) + os=-ultrix4.2 + ;; + m68*-apollo) + os=-domain + ;; + i386-sun) + os=-sunos4.0.2 + ;; + m68000-sun) + os=-sunos3 + ;; + m68*-cisco) + os=-aout + ;; + mep-*) + os=-elf + ;; + mips*-cisco) + os=-elf + ;; + mips*-*) + os=-elf + ;; + or32-*) + os=-coff + ;; + *-tti) # must be before sparc entry or we get the wrong os. + os=-sysv3 + ;; + sparc-* | *-sun) + os=-sunos4.1.1 + ;; + *-be) + os=-beos + ;; + *-haiku) + os=-haiku + ;; + *-ibm) + os=-aix + ;; + *-knuth) + os=-mmixware + ;; + *-wec) + os=-proelf + ;; + *-winbond) + os=-proelf + ;; + *-oki) + os=-proelf + ;; + *-hp) + os=-hpux + ;; + *-hitachi) + os=-hiux + ;; + i860-* | *-att | *-ncr | *-altos | *-motorola | *-convergent) + os=-sysv + ;; + *-cbm) + os=-amigaos + ;; + *-dg) + os=-dgux + ;; + *-dolphin) + os=-sysv3 + ;; + m68k-ccur) + os=-rtu + ;; + m88k-omron*) + os=-luna + ;; + *-next ) + os=-nextstep + ;; + *-sequent) + os=-ptx + ;; + *-crds) + os=-unos + ;; + *-ns) + os=-genix + ;; + i370-*) + os=-mvs + ;; + *-next) + os=-nextstep3 + ;; + *-gould) + os=-sysv + ;; + *-highlevel) + os=-bsd + ;; + *-encore) + os=-bsd + ;; + *-sgi) + os=-irix + ;; + *-siemens) + os=-sysv4 + ;; + *-masscomp) + os=-rtu + ;; + f30[01]-fujitsu | f700-fujitsu) + os=-uxpv + ;; + *-rom68k) + os=-coff + ;; + *-*bug) + os=-coff + ;; + *-apple) + os=-macos + ;; + *-atari*) + os=-mint + ;; + *) + os=-none + ;; +esac +fi + +# Here we handle the case where we know the os, and the CPU type, but not the +# manufacturer. We pick the logical manufacturer. +vendor=unknown +case $basic_machine in + *-unknown) + case $os in + -riscix*) + vendor=acorn + ;; + -sunos*) + vendor=sun + ;; + -cnk*|-aix*) + vendor=ibm + ;; + -beos*) + vendor=be + ;; + -hpux*) + vendor=hp + ;; + -mpeix*) + vendor=hp + ;; + -hiux*) + vendor=hitachi + ;; + -unos*) + vendor=crds + ;; + -dgux*) + vendor=dg + ;; + -luna*) + vendor=omron + ;; + -genix*) + vendor=ns + ;; + -mvs* | -opened*) + vendor=ibm + ;; + -os400*) + vendor=ibm + ;; + -ptx*) + vendor=sequent + ;; + -tpf*) + vendor=ibm + ;; + -vxsim* | -vxworks* | -windiss*) + vendor=wrs + ;; + -aux*) + vendor=apple + ;; + -hms*) + vendor=hitachi + ;; + -mpw* | -macos*) + vendor=apple + ;; + -*mint | -mint[0-9]* | -*MiNT | -MiNT[0-9]*) + vendor=atari + ;; + -vos*) + vendor=stratus + ;; + esac + basic_machine=`echo $basic_machine | sed "s/unknown/$vendor/"` + ;; +esac + +echo $basic_machine$os +exit + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "timestamp='" +# time-stamp-format: "%:y-%02m-%02d" +# time-stamp-end: "'" +# End: diff --git a/external/gtest/build-aux/depcomp b/external/gtest/build-aux/depcomp new file mode 100755 index 0000000000..bd0ac08958 --- /dev/null +++ b/external/gtest/build-aux/depcomp @@ -0,0 +1,688 @@ +#! /bin/sh +# depcomp - compile a program generating dependencies as side-effects + +scriptversion=2011-12-04.11; # UTC + +# Copyright (C) 1999, 2000, 2003, 2004, 2005, 2006, 2007, 2009, 2010, +# 2011 Free Software Foundation, Inc. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +# Originally written by Alexandre Oliva . + +case $1 in + '') + echo "$0: No command. Try \`$0 --help' for more information." 1>&2 + exit 1; + ;; + -h | --h*) + cat <<\EOF +Usage: depcomp [--help] [--version] PROGRAM [ARGS] + +Run PROGRAMS ARGS to compile a file, generating dependencies +as side-effects. + +Environment variables: + depmode Dependency tracking mode. + source Source file read by `PROGRAMS ARGS'. + object Object file output by `PROGRAMS ARGS'. + DEPDIR directory where to store dependencies. + depfile Dependency file to output. + tmpdepfile Temporary file to use when outputting dependencies. + libtool Whether libtool is used (yes/no). + +Report bugs to . +EOF + exit $? + ;; + -v | --v*) + echo "depcomp $scriptversion" + exit $? + ;; +esac + +if test -z "$depmode" || test -z "$source" || test -z "$object"; then + echo "depcomp: Variables source, object and depmode must be set" 1>&2 + exit 1 +fi + +# Dependencies for sub/bar.o or sub/bar.obj go into sub/.deps/bar.Po. +depfile=${depfile-`echo "$object" | + sed 's|[^\\/]*$|'${DEPDIR-.deps}'/&|;s|\.\([^.]*\)$|.P\1|;s|Pobj$|Po|'`} +tmpdepfile=${tmpdepfile-`echo "$depfile" | sed 's/\.\([^.]*\)$/.T\1/'`} + +rm -f "$tmpdepfile" + +# Some modes work just like other modes, but use different flags. We +# parameterize here, but still list the modes in the big case below, +# to make depend.m4 easier to write. Note that we *cannot* use a case +# here, because this file can only contain one case statement. +if test "$depmode" = hp; then + # HP compiler uses -M and no extra arg. + gccflag=-M + depmode=gcc +fi + +if test "$depmode" = dashXmstdout; then + # This is just like dashmstdout with a different argument. + dashmflag=-xM + depmode=dashmstdout +fi + +cygpath_u="cygpath -u -f -" +if test "$depmode" = msvcmsys; then + # This is just like msvisualcpp but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvisualcpp +fi + +if test "$depmode" = msvc7msys; then + # This is just like msvc7 but w/o cygpath translation. + # Just convert the backslash-escaped backslashes to single forward + # slashes to satisfy depend.m4 + cygpath_u='sed s,\\\\,/,g' + depmode=msvc7 +fi + +case "$depmode" in +gcc3) +## gcc 3 implements dependency tracking that does exactly what +## we want. Yay! Note: for some reason libtool 1.4 doesn't like +## it if -MD -MP comes after the -MF stuff. Hmm. +## Unfortunately, FreeBSD c89 acceptance of flags depends upon +## the command line argument order; so add the flags where they +## appear in depend2.am. Note that the slowdown incurred here +## affects only configure: in makefiles, %FASTDEP% shortcuts this. + for arg + do + case $arg in + -c) set fnord "$@" -MT "$object" -MD -MP -MF "$tmpdepfile" "$arg" ;; + *) set fnord "$@" "$arg" ;; + esac + shift # fnord + shift # $arg + done + "$@" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + mv "$tmpdepfile" "$depfile" + ;; + +gcc) +## There are various ways to get dependency output from gcc. Here's +## why we pick this rather obscure method: +## - Don't want to use -MD because we'd like the dependencies to end +## up in a subdir. Having to rename by hand is ugly. +## (We might end up doing this anyway to support other compilers.) +## - The DEPENDENCIES_OUTPUT environment variable makes gcc act like +## -MM, not -M (despite what the docs say). +## - Using -M directly means running the compiler twice (even worse +## than renaming). + if test -z "$gccflag"; then + gccflag=-MD, + fi + "$@" -Wp,"$gccflag$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + alpha=ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz +## The second -e expression handles DOS-style file names with drive letters. + sed -e 's/^[^:]*: / /' \ + -e 's/^['$alpha']:\/[^:]*: / /' < "$tmpdepfile" >> "$depfile" +## This next piece of magic avoids the `deleted header file' problem. +## The problem is that when a header file which appears in a .P file +## is deleted, the dependency causes make to die (because there is +## typically no way to rebuild the header). We avoid this by adding +## dummy dependencies for each header file. Too bad gcc doesn't do +## this for us directly. + tr ' ' ' +' < "$tmpdepfile" | +## Some versions of gcc put a space before the `:'. On the theory +## that the space means something, we add a space to the output as +## well. hp depmode also adds that space, but also prefixes the VPATH +## to the object. Take care to not repeat it in the output. +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e "s|.*$object$||" -e '/:$/d' \ + | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +sgi) + if test "$libtool" = yes; then + "$@" "-Wp,-MDupdate,$tmpdepfile" + else + "$@" -MDupdate "$tmpdepfile" + fi + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + + if test -f "$tmpdepfile"; then # yes, the sourcefile depend on other files + echo "$object : \\" > "$depfile" + + # Clip off the initial element (the dependent). Don't try to be + # clever and replace this with sed code, as IRIX sed won't handle + # lines with more than a fixed number of characters (4096 in + # IRIX 6.2 sed, 8192 in IRIX 6.5). We also remove comment lines; + # the IRIX cc adds comments like `#:fec' to the end of the + # dependency line. + tr ' ' ' +' < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' | \ + tr ' +' ' ' >> "$depfile" + echo >> "$depfile" + + # The second pass generates a dummy entry for each header file. + tr ' ' ' +' < "$tmpdepfile" \ + | sed -e 's/^.*\.o://' -e 's/#.*$//' -e '/^$/ d' -e 's/$/:/' \ + >> "$depfile" + else + # The sourcefile does not contain any dependencies, so just + # store a dummy comment line, to avoid errors with the Makefile + # "include basename.Plo" scheme. + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +aix) + # The C for AIX Compiler uses -M and outputs the dependencies + # in a .u file. In older versions, this file always lives in the + # current directory. Also, the AIX compiler puts `$object:' at the + # start of each line; $object doesn't have directory information. + # Version 6 uses the directory in both cases. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.u + tmpdepfile2=$base.u + tmpdepfile3=$dir.libs/$base.u + "$@" -Wc,-M + else + tmpdepfile1=$dir$base.u + tmpdepfile2=$dir$base.u + tmpdepfile3=$dir$base.u + "$@" -M + fi + stat=$? + + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + # Each line is of the form `foo.o: dependent.h'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" + # That's a tab and a space in the []. + sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" + else + # The sourcefile does not contain any dependencies, so just + # store a dummy comment line, to avoid errors with the Makefile + # "include basename.Plo" scheme. + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +icc) + # Intel's C compiler understands `-MD -MF file'. However on + # icc -MD -MF foo.d -c -o sub/foo.o sub/foo.c + # ICC 7.0 will fill foo.d with something like + # foo.o: sub/foo.c + # foo.o: sub/foo.h + # which is wrong. We want: + # sub/foo.o: sub/foo.c + # sub/foo.o: sub/foo.h + # sub/foo.c: + # sub/foo.h: + # ICC 7.1 will output + # foo.o: sub/foo.c sub/foo.h + # and will wrap long lines using \ : + # foo.o: sub/foo.c ... \ + # sub/foo.h ... \ + # ... + + "$@" -MD -MF "$tmpdepfile" + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + # Each line is of the form `foo.o: dependent.h', + # or `foo.o: dep1.h dep2.h \', or ` dep3.h dep4.h \'. + # Do two passes, one to just change these to + # `$object: dependent.h' and one to simply `dependent.h:'. + sed "s,^[^:]*:,$object :," < "$tmpdepfile" > "$depfile" + # Some versions of the HPUX 10.20 sed can't process this invocation + # correctly. Breaking it into two sed invocations is a workaround. + sed 's,^[^:]*: \(.*\)$,\1,;s/^\\$//;/^$/d;/:$/d' < "$tmpdepfile" | + sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +hp2) + # The "hp" stanza above does not work with aCC (C++) and HP's ia64 + # compilers, which have integrated preprocessors. The correct option + # to use with these is +Maked; it writes dependencies to a file named + # 'foo.d', which lands next to the object file, wherever that + # happens to be. + # Much of this is similar to the tru64 case; see comments there. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + if test "$libtool" = yes; then + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir.libs/$base.d + "$@" -Wc,+Maked + else + tmpdepfile1=$dir$base.d + tmpdepfile2=$dir$base.d + "$@" +Maked + fi + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[a-z]*:,$object:," "$tmpdepfile" > "$depfile" + # Add `dependent.h:' lines. + sed -ne '2,${ + s/^ *// + s/ \\*$// + s/$/:/ + p + }' "$tmpdepfile" >> "$depfile" + else + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" "$tmpdepfile2" + ;; + +tru64) + # The Tru64 compiler uses -MD to generate dependencies as a side + # effect. `cc -MD -o foo.o ...' puts the dependencies into `foo.o.d'. + # At least on Alpha/Redhat 6.1, Compaq CCC V6.2-504 seems to put + # dependencies in `foo.d' instead, so we check for that too. + # Subdirectories are respected. + dir=`echo "$object" | sed -e 's|/[^/]*$|/|'` + test "x$dir" = "x$object" && dir= + base=`echo "$object" | sed -e 's|^.*/||' -e 's/\.o$//' -e 's/\.lo$//'` + + if test "$libtool" = yes; then + # With Tru64 cc, shared objects can also be used to make a + # static library. This mechanism is used in libtool 1.4 series to + # handle both shared and static libraries in a single compilation. + # With libtool 1.4, dependencies were output in $dir.libs/$base.lo.d. + # + # With libtool 1.5 this exception was removed, and libtool now + # generates 2 separate objects for the 2 libraries. These two + # compilations output dependencies in $dir.libs/$base.o.d and + # in $dir$base.o.d. We have to check for both files, because + # one of the two compilations can be disabled. We should prefer + # $dir$base.o.d over $dir.libs/$base.o.d because the latter is + # automatically cleaned when .libs/ is deleted, while ignoring + # the former would cause a distcleancheck panic. + tmpdepfile1=$dir.libs/$base.lo.d # libtool 1.4 + tmpdepfile2=$dir$base.o.d # libtool 1.5 + tmpdepfile3=$dir.libs/$base.o.d # libtool 1.5 + tmpdepfile4=$dir.libs/$base.d # Compaq CCC V6.2-504 + "$@" -Wc,-MD + else + tmpdepfile1=$dir$base.o.d + tmpdepfile2=$dir$base.d + tmpdepfile3=$dir$base.d + tmpdepfile4=$dir$base.d + "$@" -MD + fi + + stat=$? + if test $stat -eq 0; then : + else + rm -f "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" + exit $stat + fi + + for tmpdepfile in "$tmpdepfile1" "$tmpdepfile2" "$tmpdepfile3" "$tmpdepfile4" + do + test -f "$tmpdepfile" && break + done + if test -f "$tmpdepfile"; then + sed -e "s,^.*\.[a-z]*:,$object:," < "$tmpdepfile" > "$depfile" + # That's a tab and a space in the []. + sed -e 's,^.*\.[a-z]*:[ ]*,,' -e 's,$,:,' < "$tmpdepfile" >> "$depfile" + else + echo "#dummy" > "$depfile" + fi + rm -f "$tmpdepfile" + ;; + +msvc7) + if test "$libtool" = yes; then + showIncludes=-Wc,-showIncludes + else + showIncludes=-showIncludes + fi + "$@" $showIncludes > "$tmpdepfile" + stat=$? + grep -v '^Note: including file: ' "$tmpdepfile" + if test "$stat" = 0; then : + else + rm -f "$tmpdepfile" + exit $stat + fi + rm -f "$depfile" + echo "$object : \\" > "$depfile" + # The first sed program below extracts the file names and escapes + # backslashes for cygpath. The second sed program outputs the file + # name when reading, but also accumulates all include files in the + # hold buffer in order to output them again at the end. This only + # works with sed implementations that can handle large buffers. + sed < "$tmpdepfile" -n ' +/^Note: including file: *\(.*\)/ { + s//\1/ + s/\\/\\\\/g + p +}' | $cygpath_u | sort -u | sed -n ' +s/ /\\ /g +s/\(.*\)/ \1 \\/p +s/.\(.*\) \\/\1:/ +H +$ { + s/.*/ / + G + p +}' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvc7msys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +#nosideeffect) + # This comment above is used by automake to tell side-effect + # dependency tracking mechanisms from slower ones. + +dashmstdout) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout, regardless of -o. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove `-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + test -z "$dashmflag" && dashmflag=-M + # Require at least two characters before searching for `:' + # in the target name. This is to cope with DOS-style filenames: + # a dependency such as `c:/foo/bar' could be seen as target `c' otherwise. + "$@" $dashmflag | + sed 's:^[ ]*[^: ][^:][^:]*\:[ ]*:'"$object"'\: :' > "$tmpdepfile" + rm -f "$depfile" + cat < "$tmpdepfile" > "$depfile" + tr ' ' ' +' < "$tmpdepfile" | \ +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +dashXmstdout) + # This case only exists to satisfy depend.m4. It is never actually + # run, as this mode is specially recognized in the preamble. + exit 1 + ;; + +makedepend) + "$@" || exit $? + # Remove any Libtool call + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + # X makedepend + shift + cleared=no eat=no + for arg + do + case $cleared in + no) + set ""; shift + cleared=yes ;; + esac + if test $eat = yes; then + eat=no + continue + fi + case "$arg" in + -D*|-I*) + set fnord "$@" "$arg"; shift ;; + # Strip any option that makedepend may not understand. Remove + # the object too, otherwise makedepend will parse it as a source file. + -arch) + eat=yes ;; + -*|$object) + ;; + *) + set fnord "$@" "$arg"; shift ;; + esac + done + obj_suffix=`echo "$object" | sed 's/^.*\././'` + touch "$tmpdepfile" + ${MAKEDEPEND-makedepend} -o"$obj_suffix" -f"$tmpdepfile" "$@" + rm -f "$depfile" + # makedepend may prepend the VPATH from the source file name to the object. + # No need to regex-escape $object, excess matching of '.' is harmless. + sed "s|^.*\($object *:\)|\1|" "$tmpdepfile" > "$depfile" + sed '1,2d' "$tmpdepfile" | tr ' ' ' +' | \ +## Some versions of the HPUX 10.20 sed can't process this invocation +## correctly. Breaking it into two sed invocations is a workaround. + sed -e 's/^\\$//' -e '/^$/d' -e '/:$/d' | sed -e 's/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" "$tmpdepfile".bak + ;; + +cpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + # Remove `-o $object'. + IFS=" " + for arg + do + case $arg in + -o) + shift + ;; + $object) + shift + ;; + *) + set fnord "$@" "$arg" + shift # fnord + shift # $arg + ;; + esac + done + + "$@" -E | + sed -n -e '/^# [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' \ + -e '/^#line [0-9][0-9]* "\([^"]*\)".*/ s:: \1 \\:p' | + sed '$ s: \\$::' > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + cat < "$tmpdepfile" >> "$depfile" + sed < "$tmpdepfile" '/^$/d;s/^ //;s/ \\$//;s/$/ :/' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvisualcpp) + # Important note: in order to support this mode, a compiler *must* + # always write the preprocessed file to stdout. + "$@" || exit $? + + # Remove the call to Libtool. + if test "$libtool" = yes; then + while test "X$1" != 'X--mode=compile'; do + shift + done + shift + fi + + IFS=" " + for arg + do + case "$arg" in + -o) + shift + ;; + $object) + shift + ;; + "-Gm"|"/Gm"|"-Gi"|"/Gi"|"-ZI"|"/ZI") + set fnord "$@" + shift + shift + ;; + *) + set fnord "$@" "$arg" + shift + shift + ;; + esac + done + "$@" -E 2>/dev/null | + sed -n '/^#line [0-9][0-9]* "\([^"]*\)"/ s::\1:p' | $cygpath_u | sort -u > "$tmpdepfile" + rm -f "$depfile" + echo "$object : \\" > "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s:: \1 \\:p' >> "$depfile" + echo " " >> "$depfile" + sed < "$tmpdepfile" -n -e 's% %\\ %g' -e '/^\(.*\)$/ s::\1\::p' >> "$depfile" + rm -f "$tmpdepfile" + ;; + +msvcmsys) + # This case exists only to let depend.m4 do its work. It works by + # looking at the text of this script. This case will never be run, + # since it is checked for above. + exit 1 + ;; + +none) + exec "$@" + ;; + +*) + echo "Unknown depmode $depmode" 1>&2 + exit 1 + ;; +esac + +exit 0 + +# Local Variables: +# mode: shell-script +# sh-indentation: 2 +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/external/gtest/build-aux/install-sh b/external/gtest/build-aux/install-sh new file mode 100755 index 0000000000..a9244eb078 --- /dev/null +++ b/external/gtest/build-aux/install-sh @@ -0,0 +1,527 @@ +#!/bin/sh +# install - install a program, script, or datafile + +scriptversion=2011-01-19.21; # UTC + +# This originates from X11R5 (mit/util/scripts/install.sh), which was +# later released in X11R6 (xc/config/util/install.sh) with the +# following copyright and license. +# +# Copyright (C) 1994 X Consortium +# +# Permission is hereby granted, free of charge, to any person obtaining a copy +# of this software and associated documentation files (the "Software"), to +# deal in the Software without restriction, including without limitation the +# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or +# sell copies of the Software, and to permit persons to whom the Software is +# furnished to do so, subject to the following conditions: +# +# The above copyright notice and this permission notice shall be included in +# all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +# X CONSORTIUM BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN +# AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNEC- +# TION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. +# +# Except as contained in this notice, the name of the X Consortium shall not +# be used in advertising or otherwise to promote the sale, use or other deal- +# ings in this Software without prior written authorization from the X Consor- +# tium. +# +# +# FSF changes to this file are in the public domain. +# +# Calling this script install-sh is preferred over install.sh, to prevent +# `make' implicit rules from creating a file called install from it +# when there is no Makefile. +# +# This script is compatible with the BSD install script, but was written +# from scratch. + +nl=' +' +IFS=" "" $nl" + +# set DOITPROG to echo to test this script + +# Don't use :- since 4.3BSD and earlier shells don't like it. +doit=${DOITPROG-} +if test -z "$doit"; then + doit_exec=exec +else + doit_exec=$doit +fi + +# Put in absolute file names if you don't have them in your path; +# or use environment vars. + +chgrpprog=${CHGRPPROG-chgrp} +chmodprog=${CHMODPROG-chmod} +chownprog=${CHOWNPROG-chown} +cmpprog=${CMPPROG-cmp} +cpprog=${CPPROG-cp} +mkdirprog=${MKDIRPROG-mkdir} +mvprog=${MVPROG-mv} +rmprog=${RMPROG-rm} +stripprog=${STRIPPROG-strip} + +posix_glob='?' +initialize_posix_glob=' + test "$posix_glob" != "?" || { + if (set -f) 2>/dev/null; then + posix_glob= + else + posix_glob=: + fi + } +' + +posix_mkdir= + +# Desired mode of installed file. +mode=0755 + +chgrpcmd= +chmodcmd=$chmodprog +chowncmd= +mvcmd=$mvprog +rmcmd="$rmprog -f" +stripcmd= + +src= +dst= +dir_arg= +dst_arg= + +copy_on_change=false +no_target_directory= + +usage="\ +Usage: $0 [OPTION]... [-T] SRCFILE DSTFILE + or: $0 [OPTION]... SRCFILES... DIRECTORY + or: $0 [OPTION]... -t DIRECTORY SRCFILES... + or: $0 [OPTION]... -d DIRECTORIES... + +In the 1st form, copy SRCFILE to DSTFILE. +In the 2nd and 3rd, copy all SRCFILES to DIRECTORY. +In the 4th, create DIRECTORIES. + +Options: + --help display this help and exit. + --version display version info and exit. + + -c (ignored) + -C install only if different (preserve the last data modification time) + -d create directories instead of installing files. + -g GROUP $chgrpprog installed files to GROUP. + -m MODE $chmodprog installed files to MODE. + -o USER $chownprog installed files to USER. + -s $stripprog installed files. + -t DIRECTORY install into DIRECTORY. + -T report an error if DSTFILE is a directory. + +Environment variables override the default commands: + CHGRPPROG CHMODPROG CHOWNPROG CMPPROG CPPROG MKDIRPROG MVPROG + RMPROG STRIPPROG +" + +while test $# -ne 0; do + case $1 in + -c) ;; + + -C) copy_on_change=true;; + + -d) dir_arg=true;; + + -g) chgrpcmd="$chgrpprog $2" + shift;; + + --help) echo "$usage"; exit $?;; + + -m) mode=$2 + case $mode in + *' '* | *' '* | *' +'* | *'*'* | *'?'* | *'['*) + echo "$0: invalid mode: $mode" >&2 + exit 1;; + esac + shift;; + + -o) chowncmd="$chownprog $2" + shift;; + + -s) stripcmd=$stripprog;; + + -t) dst_arg=$2 + # Protect names problematic for `test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + shift;; + + -T) no_target_directory=true;; + + --version) echo "$0 $scriptversion"; exit $?;; + + --) shift + break;; + + -*) echo "$0: invalid option: $1" >&2 + exit 1;; + + *) break;; + esac + shift +done + +if test $# -ne 0 && test -z "$dir_arg$dst_arg"; then + # When -d is used, all remaining arguments are directories to create. + # When -t is used, the destination is already specified. + # Otherwise, the last argument is the destination. Remove it from $@. + for arg + do + if test -n "$dst_arg"; then + # $@ is not empty: it contains at least $arg. + set fnord "$@" "$dst_arg" + shift # fnord + fi + shift # arg + dst_arg=$arg + # Protect names problematic for `test' and other utilities. + case $dst_arg in + -* | [=\(\)!]) dst_arg=./$dst_arg;; + esac + done +fi + +if test $# -eq 0; then + if test -z "$dir_arg"; then + echo "$0: no input file specified." >&2 + exit 1 + fi + # It's OK to call `install-sh -d' without argument. + # This can happen when creating conditional directories. + exit 0 +fi + +if test -z "$dir_arg"; then + do_exit='(exit $ret); exit $ret' + trap "ret=129; $do_exit" 1 + trap "ret=130; $do_exit" 2 + trap "ret=141; $do_exit" 13 + trap "ret=143; $do_exit" 15 + + # Set umask so as not to create temps with too-generous modes. + # However, 'strip' requires both read and write access to temps. + case $mode in + # Optimize common cases. + *644) cp_umask=133;; + *755) cp_umask=22;; + + *[0-7]) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw='% 200' + fi + cp_umask=`expr '(' 777 - $mode % 1000 ')' $u_plus_rw`;; + *) + if test -z "$stripcmd"; then + u_plus_rw= + else + u_plus_rw=,u+rw + fi + cp_umask=$mode$u_plus_rw;; + esac +fi + +for src +do + # Protect names problematic for `test' and other utilities. + case $src in + -* | [=\(\)!]) src=./$src;; + esac + + if test -n "$dir_arg"; then + dst=$src + dstdir=$dst + test -d "$dstdir" + dstdir_status=$? + else + + # Waiting for this to be detected by the "$cpprog $src $dsttmp" command + # might cause directories to be created, which would be especially bad + # if $src (and thus $dsttmp) contains '*'. + if test ! -f "$src" && test ! -d "$src"; then + echo "$0: $src does not exist." >&2 + exit 1 + fi + + if test -z "$dst_arg"; then + echo "$0: no destination specified." >&2 + exit 1 + fi + dst=$dst_arg + + # If destination is a directory, append the input filename; won't work + # if double slashes aren't ignored. + if test -d "$dst"; then + if test -n "$no_target_directory"; then + echo "$0: $dst_arg: Is a directory" >&2 + exit 1 + fi + dstdir=$dst + dst=$dstdir/`basename "$src"` + dstdir_status=0 + else + # Prefer dirname, but fall back on a substitute if dirname fails. + dstdir=` + (dirname "$dst") 2>/dev/null || + expr X"$dst" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$dst" : 'X\(//\)[^/]' \| \ + X"$dst" : 'X\(//\)$' \| \ + X"$dst" : 'X\(/\)' \| . 2>/dev/null || + echo X"$dst" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q' + ` + + test -d "$dstdir" + dstdir_status=$? + fi + fi + + obsolete_mkdir_used=false + + if test $dstdir_status != 0; then + case $posix_mkdir in + '') + # Create intermediate dirs using mode 755 as modified by the umask. + # This is like FreeBSD 'install' as of 1997-10-28. + umask=`umask` + case $stripcmd.$umask in + # Optimize common cases. + *[2367][2367]) mkdir_umask=$umask;; + .*0[02][02] | .[02][02] | .[02]) mkdir_umask=22;; + + *[0-7]) + mkdir_umask=`expr $umask + 22 \ + - $umask % 100 % 40 + $umask % 20 \ + - $umask % 10 % 4 + $umask % 2 + `;; + *) mkdir_umask=$umask,go-w;; + esac + + # With -d, create the new directory with the user-specified mode. + # Otherwise, rely on $mkdir_umask. + if test -n "$dir_arg"; then + mkdir_mode=-m$mode + else + mkdir_mode= + fi + + posix_mkdir=false + case $umask in + *[123567][0-7][0-7]) + # POSIX mkdir -p sets u+wx bits regardless of umask, which + # is incompatible with FreeBSD 'install' when (umask & 300) != 0. + ;; + *) + tmpdir=${TMPDIR-/tmp}/ins$RANDOM-$$ + trap 'ret=$?; rmdir "$tmpdir/d" "$tmpdir" 2>/dev/null; exit $ret' 0 + + if (umask $mkdir_umask && + exec $mkdirprog $mkdir_mode -p -- "$tmpdir/d") >/dev/null 2>&1 + then + if test -z "$dir_arg" || { + # Check for POSIX incompatibilities with -m. + # HP-UX 11.23 and IRIX 6.5 mkdir -m -p sets group- or + # other-writeable bit of parent directory when it shouldn't. + # FreeBSD 6.1 mkdir -m -p sets mode of existing directory. + ls_ld_tmpdir=`ls -ld "$tmpdir"` + case $ls_ld_tmpdir in + d????-?r-*) different_mode=700;; + d????-?--*) different_mode=755;; + *) false;; + esac && + $mkdirprog -m$different_mode -p -- "$tmpdir" && { + ls_ld_tmpdir_1=`ls -ld "$tmpdir"` + test "$ls_ld_tmpdir" = "$ls_ld_tmpdir_1" + } + } + then posix_mkdir=: + fi + rmdir "$tmpdir/d" "$tmpdir" + else + # Remove any dirs left behind by ancient mkdir implementations. + rmdir ./$mkdir_mode ./-p ./-- 2>/dev/null + fi + trap '' 0;; + esac;; + esac + + if + $posix_mkdir && ( + umask $mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir" + ) + then : + else + + # The umask is ridiculous, or mkdir does not conform to POSIX, + # or it failed possibly due to a race condition. Create the + # directory the slow way, step by step, checking for races as we go. + + case $dstdir in + /*) prefix='/';; + [-=\(\)!]*) prefix='./';; + *) prefix='';; + esac + + eval "$initialize_posix_glob" + + oIFS=$IFS + IFS=/ + $posix_glob set -f + set fnord $dstdir + shift + $posix_glob set +f + IFS=$oIFS + + prefixes= + + for d + do + test X"$d" = X && continue + + prefix=$prefix$d + if test -d "$prefix"; then + prefixes= + else + if $posix_mkdir; then + (umask=$mkdir_umask && + $doit_exec $mkdirprog $mkdir_mode -p -- "$dstdir") && break + # Don't fail if two instances are running concurrently. + test -d "$prefix" || exit 1 + else + case $prefix in + *\'*) qprefix=`echo "$prefix" | sed "s/'/'\\\\\\\\''/g"`;; + *) qprefix=$prefix;; + esac + prefixes="$prefixes '$qprefix'" + fi + fi + prefix=$prefix/ + done + + if test -n "$prefixes"; then + # Don't fail if two instances are running concurrently. + (umask $mkdir_umask && + eval "\$doit_exec \$mkdirprog $prefixes") || + test -d "$dstdir" || exit 1 + obsolete_mkdir_used=true + fi + fi + fi + + if test -n "$dir_arg"; then + { test -z "$chowncmd" || $doit $chowncmd "$dst"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dst"; } && + { test "$obsolete_mkdir_used$chowncmd$chgrpcmd" = false || + test -z "$chmodcmd" || $doit $chmodcmd $mode "$dst"; } || exit 1 + else + + # Make a couple of temp file names in the proper directory. + dsttmp=$dstdir/_inst.$$_ + rmtmp=$dstdir/_rm.$$_ + + # Trap to clean up those temp files at exit. + trap 'ret=$?; rm -f "$dsttmp" "$rmtmp" && exit $ret' 0 + + # Copy the file name to the temp name. + (umask $cp_umask && $doit_exec $cpprog "$src" "$dsttmp") && + + # and set any options; do chmod last to preserve setuid bits. + # + # If any of these fail, we abort the whole thing. If we want to + # ignore errors from any of these, just make sure not to ignore + # errors from the above "$doit $cpprog $src $dsttmp" command. + # + { test -z "$chowncmd" || $doit $chowncmd "$dsttmp"; } && + { test -z "$chgrpcmd" || $doit $chgrpcmd "$dsttmp"; } && + { test -z "$stripcmd" || $doit $stripcmd "$dsttmp"; } && + { test -z "$chmodcmd" || $doit $chmodcmd $mode "$dsttmp"; } && + + # If -C, don't bother to copy if it wouldn't change the file. + if $copy_on_change && + old=`LC_ALL=C ls -dlL "$dst" 2>/dev/null` && + new=`LC_ALL=C ls -dlL "$dsttmp" 2>/dev/null` && + + eval "$initialize_posix_glob" && + $posix_glob set -f && + set X $old && old=:$2:$4:$5:$6 && + set X $new && new=:$2:$4:$5:$6 && + $posix_glob set +f && + + test "$old" = "$new" && + $cmpprog "$dst" "$dsttmp" >/dev/null 2>&1 + then + rm -f "$dsttmp" + else + # Rename the file to the real destination. + $doit $mvcmd -f "$dsttmp" "$dst" 2>/dev/null || + + # The rename failed, perhaps because mv can't rename something else + # to itself, or perhaps because mv is so ancient that it does not + # support -f. + { + # Now remove or move aside any old file at destination location. + # We try this two ways since rm can't unlink itself on some + # systems and the destination file might be busy for other + # reasons. In this case, the final cleanup might fail but the new + # file should still install successfully. + { + test ! -f "$dst" || + $doit $rmcmd -f "$dst" 2>/dev/null || + { $doit $mvcmd -f "$dst" "$rmtmp" 2>/dev/null && + { $doit $rmcmd -f "$rmtmp" 2>/dev/null; :; } + } || + { echo "$0: cannot unlink or rename $dst" >&2 + (exit 1); exit 1 + } + } && + + # Now rename the file to the real destination. + $doit $mvcmd "$dsttmp" "$dst" + } + fi || exit 1 + + trap '' 0 + fi +done + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/external/gtest/build-aux/ltmain.sh b/external/gtest/build-aux/ltmain.sh new file mode 100755 index 0000000000..c2852d8561 --- /dev/null +++ b/external/gtest/build-aux/ltmain.sh @@ -0,0 +1,9661 @@ + +# libtool (GNU libtool) 2.4.2 +# Written by Gordon Matzigkeit , 1996 + +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, 2006, +# 2007, 2008, 2009, 2010, 2011 Free Software Foundation, Inc. +# This is free software; see the source for copying conditions. There is NO +# warranty; not even for MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. + +# GNU Libtool is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2 of the License, or +# (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, but +# WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU +# General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, +# or obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + +# Usage: $progname [OPTION]... [MODE-ARG]... +# +# Provide generalized library-building support services. +# +# --config show all configuration variables +# --debug enable verbose shell tracing +# -n, --dry-run display commands without modifying any files +# --features display basic configuration information and exit +# --mode=MODE use operation mode MODE +# --preserve-dup-deps don't remove duplicate dependency libraries +# --quiet, --silent don't print informational messages +# --no-quiet, --no-silent +# print informational messages (default) +# --no-warn don't display warning messages +# --tag=TAG use configuration variables from tag TAG +# -v, --verbose print more informational messages than default +# --no-verbose don't print the extra informational messages +# --version print version information +# -h, --help, --help-all print short, long, or detailed help message +# +# MODE must be one of the following: +# +# clean remove files from the build directory +# compile compile a source file into a libtool object +# execute automatically set library path, then run a program +# finish complete the installation of libtool libraries +# install install libraries or executables +# link create a library or an executable +# uninstall remove libraries from an installed directory +# +# MODE-ARGS vary depending on the MODE. When passed as first option, +# `--mode=MODE' may be abbreviated as `MODE' or a unique abbreviation of that. +# Try `$progname --help --mode=MODE' for a more detailed description of MODE. +# +# When reporting a bug, please describe a test case to reproduce it and +# include the following information: +# +# host-triplet: $host +# shell: $SHELL +# compiler: $LTCC +# compiler flags: $LTCFLAGS +# linker: $LD (gnu? $with_gnu_ld) +# $progname: (GNU libtool) 2.4.2 Debian-2.4.2-1ubuntu1 +# automake: $automake_version +# autoconf: $autoconf_version +# +# Report bugs to . +# GNU libtool home page: . +# General help using GNU software: . + +PROGRAM=libtool +PACKAGE=libtool +VERSION="2.4.2 Debian-2.4.2-1ubuntu1" +TIMESTAMP="" +package_revision=1.3337 + +# Be Bourne compatible +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' +} + +# NLS nuisances: We save the old values to restore during execute mode. +lt_user_locale= +lt_safe_locale= +for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES +do + eval "if test \"\${$lt_var+set}\" = set; then + save_$lt_var=\$$lt_var + $lt_var=C + export $lt_var + lt_user_locale=\"$lt_var=\\\$save_\$lt_var; \$lt_user_locale\" + lt_safe_locale=\"$lt_var=C; \$lt_safe_locale\" + fi" +done +LC_ALL=C +LANGUAGE=C +export LANGUAGE LC_ALL + +$lt_unset CDPATH + + +# Work around backward compatibility issue on IRIX 6.5. On IRIX 6.4+, sh +# is ksh but when the shell is invoked as "sh" and the current value of +# the _XPG environment variable is not equal to 1 (one), the special +# positional parameter $0, within a function call, is the name of the +# function. +progpath="$0" + + + +: ${CP="cp -f"} +test "${ECHO+set}" = set || ECHO=${as_echo-'printf %s\n'} +: ${MAKE="make"} +: ${MKDIR="mkdir"} +: ${MV="mv -f"} +: ${RM="rm -f"} +: ${SHELL="${CONFIG_SHELL-/bin/sh}"} +: ${Xsed="$SED -e 1s/^X//"} + +# Global variables: +EXIT_SUCCESS=0 +EXIT_FAILURE=1 +EXIT_MISMATCH=63 # $? = 63 is used to indicate version mismatch to missing. +EXIT_SKIP=77 # $? = 77 is used to indicate a skipped test to automake. + +exit_status=$EXIT_SUCCESS + +# Make sure IFS has a sensible default +lt_nl=' +' +IFS=" $lt_nl" + +dirname="s,/[^/]*$,," +basename="s,^.*/,," + +# func_dirname file append nondir_replacement +# Compute the dirname of FILE. If nonempty, add APPEND to the result, +# otherwise set result to NONDIR_REPLACEMENT. +func_dirname () +{ + func_dirname_result=`$ECHO "${1}" | $SED "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi +} # func_dirname may be replaced by extended shell implementation + + +# func_basename file +func_basename () +{ + func_basename_result=`$ECHO "${1}" | $SED "$basename"` +} # func_basename may be replaced by extended shell implementation + + +# func_dirname_and_basename file append nondir_replacement +# perform func_basename and func_dirname in a single function +# call: +# dirname: Compute the dirname of FILE. If nonempty, +# add APPEND to the result, otherwise set result +# to NONDIR_REPLACEMENT. +# value returned in "$func_dirname_result" +# basename: Compute filename of FILE. +# value retuned in "$func_basename_result" +# Implementation must be kept synchronized with func_dirname +# and func_basename. For efficiency, we do not delegate to +# those functions but instead duplicate the functionality here. +func_dirname_and_basename () +{ + # Extract subdirectory from the argument. + func_dirname_result=`$ECHO "${1}" | $SED -e "$dirname"` + if test "X$func_dirname_result" = "X${1}"; then + func_dirname_result="${3}" + else + func_dirname_result="$func_dirname_result${2}" + fi + func_basename_result=`$ECHO "${1}" | $SED -e "$basename"` +} # func_dirname_and_basename may be replaced by extended shell implementation + + +# func_stripname prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# func_strip_suffix prefix name +func_stripname () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname may be replaced by extended shell implementation + + +# These SED scripts presuppose an absolute path with a trailing slash. +pathcar='s,^/\([^/]*\).*$,\1,' +pathcdr='s,^/[^/]*,,' +removedotparts=':dotsl + s@/\./@/@g + t dotsl + s,/\.$,/,' +collapseslashes='s@/\{1,\}@/@g' +finalslash='s,/*$,/,' + +# func_normal_abspath PATH +# Remove doubled-up and trailing slashes, "." path components, +# and cancel out any ".." path components in PATH after making +# it an absolute path. +# value returned in "$func_normal_abspath_result" +func_normal_abspath () +{ + # Start from root dir and reassemble the path. + func_normal_abspath_result= + func_normal_abspath_tpath=$1 + func_normal_abspath_altnamespace= + case $func_normal_abspath_tpath in + "") + # Empty path, that just means $cwd. + func_stripname '' '/' "`pwd`" + func_normal_abspath_result=$func_stripname_result + return + ;; + # The next three entries are used to spot a run of precisely + # two leading slashes without using negated character classes; + # we take advantage of case's first-match behaviour. + ///*) + # Unusual form of absolute path, do nothing. + ;; + //*) + # Not necessarily an ordinary path; POSIX reserves leading '//' + # and for example Cygwin uses it to access remote file shares + # over CIFS/SMB, so we conserve a leading double slash if found. + func_normal_abspath_altnamespace=/ + ;; + /*) + # Absolute path, do nothing. + ;; + *) + # Relative path, prepend $cwd. + func_normal_abspath_tpath=`pwd`/$func_normal_abspath_tpath + ;; + esac + # Cancel out all the simple stuff to save iterations. We also want + # the path to end with a slash for ease of parsing, so make sure + # there is one (and only one) here. + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$removedotparts" -e "$collapseslashes" -e "$finalslash"` + while :; do + # Processed it all yet? + if test "$func_normal_abspath_tpath" = / ; then + # If we ascended to the root using ".." the result may be empty now. + if test -z "$func_normal_abspath_result" ; then + func_normal_abspath_result=/ + fi + break + fi + func_normal_abspath_tcomponent=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$pathcar"` + func_normal_abspath_tpath=`$ECHO "$func_normal_abspath_tpath" | $SED \ + -e "$pathcdr"` + # Figure out what to do with it + case $func_normal_abspath_tcomponent in + "") + # Trailing empty path component, ignore it. + ;; + ..) + # Parent dir; strip last assembled component from result. + func_dirname "$func_normal_abspath_result" + func_normal_abspath_result=$func_dirname_result + ;; + *) + # Actual path component, append it. + func_normal_abspath_result=$func_normal_abspath_result/$func_normal_abspath_tcomponent + ;; + esac + done + # Restore leading double-slash if one was found on entry. + func_normal_abspath_result=$func_normal_abspath_altnamespace$func_normal_abspath_result +} + +# func_relative_path SRCDIR DSTDIR +# generates a relative path from SRCDIR to DSTDIR, with a trailing +# slash if non-empty, suitable for immediately appending a filename +# without needing to append a separator. +# value returned in "$func_relative_path_result" +func_relative_path () +{ + func_relative_path_result= + func_normal_abspath "$1" + func_relative_path_tlibdir=$func_normal_abspath_result + func_normal_abspath "$2" + func_relative_path_tbindir=$func_normal_abspath_result + + # Ascend the tree starting from libdir + while :; do + # check if we have found a prefix of bindir + case $func_relative_path_tbindir in + $func_relative_path_tlibdir) + # found an exact match + func_relative_path_tcancelled= + break + ;; + $func_relative_path_tlibdir*) + # found a matching prefix + func_stripname "$func_relative_path_tlibdir" '' "$func_relative_path_tbindir" + func_relative_path_tcancelled=$func_stripname_result + if test -z "$func_relative_path_result"; then + func_relative_path_result=. + fi + break + ;; + *) + func_dirname $func_relative_path_tlibdir + func_relative_path_tlibdir=${func_dirname_result} + if test "x$func_relative_path_tlibdir" = x ; then + # Have to descend all the way to the root! + func_relative_path_result=../$func_relative_path_result + func_relative_path_tcancelled=$func_relative_path_tbindir + break + fi + func_relative_path_result=../$func_relative_path_result + ;; + esac + done + + # Now calculate path; take care to avoid doubling-up slashes. + func_stripname '' '/' "$func_relative_path_result" + func_relative_path_result=$func_stripname_result + func_stripname '/' '/' "$func_relative_path_tcancelled" + if test "x$func_stripname_result" != x ; then + func_relative_path_result=${func_relative_path_result}/${func_stripname_result} + fi + + # Normalisation. If bindir is libdir, return empty string, + # else relative path ending with a slash; either way, target + # file name can be directly appended. + if test ! -z "$func_relative_path_result"; then + func_stripname './' '' "$func_relative_path_result/" + func_relative_path_result=$func_stripname_result + fi +} + +# The name of this program: +func_dirname_and_basename "$progpath" +progname=$func_basename_result + +# Make sure we have an absolute path for reexecution: +case $progpath in + [\\/]*|[A-Za-z]:\\*) ;; + *[\\/]*) + progdir=$func_dirname_result + progdir=`cd "$progdir" && pwd` + progpath="$progdir/$progname" + ;; + *) + save_IFS="$IFS" + IFS=${PATH_SEPARATOR-:} + for progdir in $PATH; do + IFS="$save_IFS" + test -x "$progdir/$progname" && break + done + IFS="$save_IFS" + test -n "$progdir" || progdir=`pwd` + progpath="$progdir/$progname" + ;; +esac + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +Xsed="${SED}"' -e 1s/^X//' +sed_quote_subst='s/\([`"$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution that turns a string into a regex matching for the +# string literally. +sed_make_literal_regex='s,[].[^$\\*\/],\\&,g' + +# Sed substitution that converts a w32 file name or path +# which contains forward slashes, into one that contains +# (escaped) backslashes. A very naive implementation. +lt_sed_naive_backslashify='s|\\\\*|\\|g;s|/|\\|g;s|\\|\\\\|g' + +# Re-`\' parameter expansions in output of double_quote_subst that were +# `\'-ed in input to the same. If an odd number of `\' preceded a '$' +# in input to double_quote_subst, that '$' was protected from expansion. +# Since each input `\' is now two `\'s, look for any number of runs of +# four `\'s followed by two `\'s and then a '$'. `\' that '$'. +bs='\\' +bs2='\\\\' +bs4='\\\\\\\\' +dollar='\$' +sed_double_backslash="\ + s/$bs4/&\\ +/g + s/^$bs2$dollar/$bs&/ + s/\\([^$bs]\\)$bs2$dollar/\\1$bs2$bs$dollar/g + s/\n//g" + +# Standard options: +opt_dry_run=false +opt_help=false +opt_quiet=false +opt_verbose=false +opt_warning=: + +# func_echo arg... +# Echo program name prefixed message, along with the current mode +# name if it has been set yet. +func_echo () +{ + $ECHO "$progname: ${opt_mode+$opt_mode: }$*" +} + +# func_verbose arg... +# Echo program name prefixed message in verbose mode only. +func_verbose () +{ + $opt_verbose && func_echo ${1+"$@"} + + # A bug in bash halts the script if the last line of a function + # fails when set -e is in force, so we need another command to + # work around that: + : +} + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + +# func_error arg... +# Echo program name prefixed message to standard error. +func_error () +{ + $ECHO "$progname: ${opt_mode+$opt_mode: }"${1+"$@"} 1>&2 +} + +# func_warning arg... +# Echo program name prefixed warning message to standard error. +func_warning () +{ + $opt_warning && $ECHO "$progname: ${opt_mode+$opt_mode: }warning: "${1+"$@"} 1>&2 + + # bash bug again: + : +} + +# func_fatal_error arg... +# Echo program name prefixed message to standard error, and exit. +func_fatal_error () +{ + func_error ${1+"$@"} + exit $EXIT_FAILURE +} + +# func_fatal_help arg... +# Echo program name prefixed message to standard error, followed by +# a help hint, and exit. +func_fatal_help () +{ + func_error ${1+"$@"} + func_fatal_error "$help" +} +help="Try \`$progname --help' for more information." ## default + + +# func_grep expression filename +# Check whether EXPRESSION matches any line of FILENAME, without output. +func_grep () +{ + $GREP "$1" "$2" >/dev/null 2>&1 +} + + +# func_mkdir_p directory-path +# Make sure the entire path to DIRECTORY-PATH is available. +func_mkdir_p () +{ + my_directory_path="$1" + my_dir_list= + + if test -n "$my_directory_path" && test "$opt_dry_run" != ":"; then + + # Protect directory names starting with `-' + case $my_directory_path in + -*) my_directory_path="./$my_directory_path" ;; + esac + + # While some portion of DIR does not yet exist... + while test ! -d "$my_directory_path"; do + # ...make a list in topmost first order. Use a colon delimited + # list incase some portion of path contains whitespace. + my_dir_list="$my_directory_path:$my_dir_list" + + # If the last portion added has no slash in it, the list is done + case $my_directory_path in */*) ;; *) break ;; esac + + # ...otherwise throw away the child directory and loop + my_directory_path=`$ECHO "$my_directory_path" | $SED -e "$dirname"` + done + my_dir_list=`$ECHO "$my_dir_list" | $SED 's,:*$,,'` + + save_mkdir_p_IFS="$IFS"; IFS=':' + for my_dir in $my_dir_list; do + IFS="$save_mkdir_p_IFS" + # mkdir can fail with a `File exist' error if two processes + # try to create one of the directories concurrently. Don't + # stop in that case! + $MKDIR "$my_dir" 2>/dev/null || : + done + IFS="$save_mkdir_p_IFS" + + # Bail out if we (or some other process) failed to create a directory. + test -d "$my_directory_path" || \ + func_fatal_error "Failed to create \`$1'" + fi +} + + +# func_mktempdir [string] +# Make a temporary directory that won't clash with other running +# libtool processes, and avoids race conditions if possible. If +# given, STRING is the basename for that directory. +func_mktempdir () +{ + my_template="${TMPDIR-/tmp}/${1-$progname}" + + if test "$opt_dry_run" = ":"; then + # Return a directory name, but don't create it in dry-run mode + my_tmpdir="${my_template}-$$" + else + + # If mktemp works, use that first and foremost + my_tmpdir=`mktemp -d "${my_template}-XXXXXXXX" 2>/dev/null` + + if test ! -d "$my_tmpdir"; then + # Failing that, at least try and use $RANDOM to avoid a race + my_tmpdir="${my_template}-${RANDOM-0}$$" + + save_mktempdir_umask=`umask` + umask 0077 + $MKDIR "$my_tmpdir" + umask $save_mktempdir_umask + fi + + # If we're not in dry-run mode, bomb out on failure + test -d "$my_tmpdir" || \ + func_fatal_error "cannot create temporary directory \`$my_tmpdir'" + fi + + $ECHO "$my_tmpdir" +} + + +# func_quote_for_eval arg +# Aesthetically quote ARG to be evaled later. +# This function returns two values: FUNC_QUOTE_FOR_EVAL_RESULT +# is double-quoted, suitable for a subsequent eval, whereas +# FUNC_QUOTE_FOR_EVAL_UNQUOTED_RESULT has merely all characters +# which are still active within double quotes backslashified. +func_quote_for_eval () +{ + case $1 in + *[\\\`\"\$]*) + func_quote_for_eval_unquoted_result=`$ECHO "$1" | $SED "$sed_quote_subst"` ;; + *) + func_quote_for_eval_unquoted_result="$1" ;; + esac + + case $func_quote_for_eval_unquoted_result in + # Double-quote args containing shell metacharacters to delay + # word splitting, command substitution and and variable + # expansion for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + func_quote_for_eval_result="\"$func_quote_for_eval_unquoted_result\"" + ;; + *) + func_quote_for_eval_result="$func_quote_for_eval_unquoted_result" + esac +} + + +# func_quote_for_expand arg +# Aesthetically quote ARG to be evaled later; same as above, +# but do not quote variable references. +func_quote_for_expand () +{ + case $1 in + *[\\\`\"]*) + my_arg=`$ECHO "$1" | $SED \ + -e "$double_quote_subst" -e "$sed_double_backslash"` ;; + *) + my_arg="$1" ;; + esac + + case $my_arg in + # Double-quote args containing shell metacharacters to delay + # word splitting and command substitution for a subsequent eval. + # Many Bourne shells cannot handle close brackets correctly + # in scan sets, so we specify it separately. + *[\[\~\#\^\&\*\(\)\{\}\|\;\<\>\?\'\ \ ]*|*]*|"") + my_arg="\"$my_arg\"" + ;; + esac + + func_quote_for_expand_result="$my_arg" +} + + +# func_show_eval cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. +func_show_eval () +{ + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$my_cmd" + my_status=$? + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi + fi +} + + +# func_show_eval_locale cmd [fail_exp] +# Unless opt_silent is true, then output CMD. Then, if opt_dryrun is +# not true, evaluate CMD. If the evaluation of CMD fails, and FAIL_EXP +# is given, then evaluate it. Use the saved locale for evaluation. +func_show_eval_locale () +{ + my_cmd="$1" + my_fail_exp="${2-:}" + + ${opt_silent-false} || { + func_quote_for_expand "$my_cmd" + eval "func_echo $func_quote_for_expand_result" + } + + if ${opt_dry_run-false}; then :; else + eval "$lt_user_locale + $my_cmd" + my_status=$? + eval "$lt_safe_locale" + if test "$my_status" -eq 0; then :; else + eval "(exit $my_status); $my_fail_exp" + fi + fi +} + +# func_tr_sh +# Turn $1 into a string suitable for a shell variable name. +# Result is stored in $func_tr_sh_result. All characters +# not in the set a-zA-Z0-9_ are replaced with '_'. Further, +# if $1 begins with a digit, a '_' is prepended as well. +func_tr_sh () +{ + case $1 in + [0-9]* | *[!a-zA-Z0-9_]*) + func_tr_sh_result=`$ECHO "$1" | $SED 's/^\([0-9]\)/_\1/; s/[^a-zA-Z0-9_]/_/g'` + ;; + * ) + func_tr_sh_result=$1 + ;; + esac +} + + +# func_version +# Echo version message to standard output and exit. +func_version () +{ + $opt_debug + + $SED -n '/(C)/!b go + :more + /\./!{ + N + s/\n# / / + b more + } + :go + /^# '$PROGRAM' (GNU /,/# warranty; / { + s/^# // + s/^# *$// + s/\((C)\)[ 0-9,-]*\( [1-9][0-9]*\)/\1\2/ + p + }' < "$progpath" + exit $? +} + +# func_usage +# Echo short help message to standard output and exit. +func_usage () +{ + $opt_debug + + $SED -n '/^# Usage:/,/^# *.*--help/ { + s/^# // + s/^# *$// + s/\$progname/'$progname'/ + p + }' < "$progpath" + echo + $ECHO "run \`$progname --help | more' for full usage" + exit $? +} + +# func_help [NOEXIT] +# Echo long help message to standard output and exit, +# unless 'noexit' is passed as argument. +func_help () +{ + $opt_debug + + $SED -n '/^# Usage:/,/# Report bugs to/ { + :print + s/^# // + s/^# *$// + s*\$progname*'$progname'* + s*\$host*'"$host"'* + s*\$SHELL*'"$SHELL"'* + s*\$LTCC*'"$LTCC"'* + s*\$LTCFLAGS*'"$LTCFLAGS"'* + s*\$LD*'"$LD"'* + s/\$with_gnu_ld/'"$with_gnu_ld"'/ + s/\$automake_version/'"`(${AUTOMAKE-automake} --version) 2>/dev/null |$SED 1q`"'/ + s/\$autoconf_version/'"`(${AUTOCONF-autoconf} --version) 2>/dev/null |$SED 1q`"'/ + p + d + } + /^# .* home page:/b print + /^# General help using/b print + ' < "$progpath" + ret=$? + if test -z "$1"; then + exit $ret + fi +} + +# func_missing_arg argname +# Echo program name prefixed message to standard error and set global +# exit_cmd. +func_missing_arg () +{ + $opt_debug + + func_error "missing argument for $1." + exit_cmd=exit +} + + +# func_split_short_opt shortopt +# Set func_split_short_opt_name and func_split_short_opt_arg shell +# variables after splitting SHORTOPT after the 2nd character. +func_split_short_opt () +{ + my_sed_short_opt='1s/^\(..\).*$/\1/;q' + my_sed_short_rest='1s/^..\(.*\)$/\1/;q' + + func_split_short_opt_name=`$ECHO "$1" | $SED "$my_sed_short_opt"` + func_split_short_opt_arg=`$ECHO "$1" | $SED "$my_sed_short_rest"` +} # func_split_short_opt may be replaced by extended shell implementation + + +# func_split_long_opt longopt +# Set func_split_long_opt_name and func_split_long_opt_arg shell +# variables after splitting LONGOPT at the `=' sign. +func_split_long_opt () +{ + my_sed_long_opt='1s/^\(--[^=]*\)=.*/\1/;q' + my_sed_long_arg='1s/^--[^=]*=//' + + func_split_long_opt_name=`$ECHO "$1" | $SED "$my_sed_long_opt"` + func_split_long_opt_arg=`$ECHO "$1" | $SED "$my_sed_long_arg"` +} # func_split_long_opt may be replaced by extended shell implementation + +exit_cmd=: + + + + + +magic="%%%MAGIC variable%%%" +magic_exe="%%%MAGIC EXE variable%%%" + +# Global variables. +nonopt= +preserve_args= +lo2o="s/\\.lo\$/.${objext}/" +o2lo="s/\\.${objext}\$/.lo/" +extracted_archives= +extracted_serial=0 + +# If this variable is set in any of the actions, the command in it +# will be execed at the end. This prevents here-documents from being +# left over by shells. +exec_cmd= + +# func_append var value +# Append VALUE to the end of shell variable VAR. +func_append () +{ + eval "${1}=\$${1}\${2}" +} # func_append may be replaced by extended shell implementation + +# func_append_quoted var value +# Quote VALUE and append to the end of shell variable VAR, separated +# by a space. +func_append_quoted () +{ + func_quote_for_eval "${2}" + eval "${1}=\$${1}\\ \$func_quote_for_eval_result" +} # func_append_quoted may be replaced by extended shell implementation + + +# func_arith arithmetic-term... +func_arith () +{ + func_arith_result=`expr "${@}"` +} # func_arith may be replaced by extended shell implementation + + +# func_len string +# STRING may not start with a hyphen. +func_len () +{ + func_len_result=`expr "${1}" : ".*" 2>/dev/null || echo $max_cmd_len` +} # func_len may be replaced by extended shell implementation + + +# func_lo2o object +func_lo2o () +{ + func_lo2o_result=`$ECHO "${1}" | $SED "$lo2o"` +} # func_lo2o may be replaced by extended shell implementation + + +# func_xform libobj-or-source +func_xform () +{ + func_xform_result=`$ECHO "${1}" | $SED 's/\.[^.]*$/.lo/'` +} # func_xform may be replaced by extended shell implementation + + +# func_fatal_configuration arg... +# Echo program name prefixed message to standard error, followed by +# a configuration failure hint, and exit. +func_fatal_configuration () +{ + func_error ${1+"$@"} + func_error "See the $PACKAGE documentation for more information." + func_fatal_error "Fatal configuration error." +} + + +# func_config +# Display the configuration for all the tags in this script. +func_config () +{ + re_begincf='^# ### BEGIN LIBTOOL' + re_endcf='^# ### END LIBTOOL' + + # Default configuration. + $SED "1,/$re_begincf CONFIG/d;/$re_endcf CONFIG/,\$d" < "$progpath" + + # Now print the configurations for the tags. + for tagname in $taglist; do + $SED -n "/$re_begincf TAG CONFIG: $tagname\$/,/$re_endcf TAG CONFIG: $tagname\$/p" < "$progpath" + done + + exit $? +} + +# func_features +# Display the features supported by this script. +func_features () +{ + echo "host: $host" + if test "$build_libtool_libs" = yes; then + echo "enable shared libraries" + else + echo "disable shared libraries" + fi + if test "$build_old_libs" = yes; then + echo "enable static libraries" + else + echo "disable static libraries" + fi + + exit $? +} + +# func_enable_tag tagname +# Verify that TAGNAME is valid, and either flag an error and exit, or +# enable the TAGNAME tag. We also add TAGNAME to the global $taglist +# variable here. +func_enable_tag () +{ + # Global variable: + tagname="$1" + + re_begincf="^# ### BEGIN LIBTOOL TAG CONFIG: $tagname\$" + re_endcf="^# ### END LIBTOOL TAG CONFIG: $tagname\$" + sed_extractcf="/$re_begincf/,/$re_endcf/p" + + # Validate tagname. + case $tagname in + *[!-_A-Za-z0-9,/]*) + func_fatal_error "invalid tag name: $tagname" + ;; + esac + + # Don't test for the "default" C tag, as we know it's + # there but not specially marked. + case $tagname in + CC) ;; + *) + if $GREP "$re_begincf" "$progpath" >/dev/null 2>&1; then + taglist="$taglist $tagname" + + # Evaluate the configuration. Be careful to quote the path + # and the sed script, to avoid splitting on whitespace, but + # also don't use non-portable quotes within backquotes within + # quotes we have to do it in 2 steps: + extractedcf=`$SED -n -e "$sed_extractcf" < "$progpath"` + eval "$extractedcf" + else + func_error "ignoring unknown tag $tagname" + fi + ;; + esac +} + +# func_check_version_match +# Ensure that we are using m4 macros, and libtool script from the same +# release of libtool. +func_check_version_match () +{ + if test "$package_revision" != "$macro_revision"; then + if test "$VERSION" != "$macro_version"; then + if test -z "$macro_version"; then + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from an older release. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, but the +$progname: definition of this LT_INIT comes from $PACKAGE $macro_version. +$progname: You should recreate aclocal.m4 with macros from $PACKAGE $VERSION +$progname: and run autoconf again. +_LT_EOF + fi + else + cat >&2 <<_LT_EOF +$progname: Version mismatch error. This is $PACKAGE $VERSION, revision $package_revision, +$progname: but the definition of this LT_INIT comes from revision $macro_revision. +$progname: You should recreate aclocal.m4 with macros from revision $package_revision +$progname: of $PACKAGE $VERSION and run autoconf again. +_LT_EOF + fi + + exit $EXIT_MISMATCH + fi +} + + +# Shorthand for --mode=foo, only valid as the first argument +case $1 in +clean|clea|cle|cl) + shift; set dummy --mode clean ${1+"$@"}; shift + ;; +compile|compil|compi|comp|com|co|c) + shift; set dummy --mode compile ${1+"$@"}; shift + ;; +execute|execut|execu|exec|exe|ex|e) + shift; set dummy --mode execute ${1+"$@"}; shift + ;; +finish|finis|fini|fin|fi|f) + shift; set dummy --mode finish ${1+"$@"}; shift + ;; +install|instal|insta|inst|ins|in|i) + shift; set dummy --mode install ${1+"$@"}; shift + ;; +link|lin|li|l) + shift; set dummy --mode link ${1+"$@"}; shift + ;; +uninstall|uninstal|uninsta|uninst|unins|unin|uni|un|u) + shift; set dummy --mode uninstall ${1+"$@"}; shift + ;; +esac + + + +# Option defaults: +opt_debug=: +opt_dry_run=false +opt_config=false +opt_preserve_dup_deps=false +opt_features=false +opt_finish=false +opt_help=false +opt_help_all=false +opt_silent=: +opt_warning=: +opt_verbose=: +opt_silent=false +opt_verbose=false + + +# Parse options once, thoroughly. This comes as soon as possible in the +# script to make things like `--version' happen as quickly as we can. +{ + # this just eases exit handling + while test $# -gt 0; do + opt="$1" + shift + case $opt in + --debug|-x) opt_debug='set -x' + func_echo "enabling shell trace mode" + $opt_debug + ;; + --dry-run|--dryrun|-n) + opt_dry_run=: + ;; + --config) + opt_config=: +func_config + ;; + --dlopen|-dlopen) + optarg="$1" + opt_dlopen="${opt_dlopen+$opt_dlopen +}$optarg" + shift + ;; + --preserve-dup-deps) + opt_preserve_dup_deps=: + ;; + --features) + opt_features=: +func_features + ;; + --finish) + opt_finish=: +set dummy --mode finish ${1+"$@"}; shift + ;; + --help) + opt_help=: + ;; + --help-all) + opt_help_all=: +opt_help=': help-all' + ;; + --mode) + test $# = 0 && func_missing_arg $opt && break + optarg="$1" + opt_mode="$optarg" +case $optarg in + # Valid mode arguments: + clean|compile|execute|finish|install|link|relink|uninstall) ;; + + # Catch anything else as an error + *) func_error "invalid argument for $opt" + exit_cmd=exit + break + ;; +esac + shift + ;; + --no-silent|--no-quiet) + opt_silent=false +func_append preserve_args " $opt" + ;; + --no-warning|--no-warn) + opt_warning=false +func_append preserve_args " $opt" + ;; + --no-verbose) + opt_verbose=false +func_append preserve_args " $opt" + ;; + --silent|--quiet) + opt_silent=: +func_append preserve_args " $opt" + opt_verbose=false + ;; + --verbose|-v) + opt_verbose=: +func_append preserve_args " $opt" +opt_silent=false + ;; + --tag) + test $# = 0 && func_missing_arg $opt && break + optarg="$1" + opt_tag="$optarg" +func_append preserve_args " $opt $optarg" +func_enable_tag "$optarg" + shift + ;; + + -\?|-h) func_usage ;; + --help) func_help ;; + --version) func_version ;; + + # Separate optargs to long options: + --*=*) + func_split_long_opt "$opt" + set dummy "$func_split_long_opt_name" "$func_split_long_opt_arg" ${1+"$@"} + shift + ;; + + # Separate non-argument short options: + -\?*|-h*|-n*|-v*) + func_split_short_opt "$opt" + set dummy "$func_split_short_opt_name" "-$func_split_short_opt_arg" ${1+"$@"} + shift + ;; + + --) break ;; + -*) func_fatal_help "unrecognized option \`$opt'" ;; + *) set dummy "$opt" ${1+"$@"}; shift; break ;; + esac + done + + # Validate options: + + # save first non-option argument + if test "$#" -gt 0; then + nonopt="$opt" + shift + fi + + # preserve --debug + test "$opt_debug" = : || func_append preserve_args " --debug" + + case $host in + *cygwin* | *mingw* | *pw32* | *cegcc*) + # don't eliminate duplications in $postdeps and $predeps + opt_duplicate_compiler_generated_deps=: + ;; + *) + opt_duplicate_compiler_generated_deps=$opt_preserve_dup_deps + ;; + esac + + $opt_help || { + # Sanity checks first: + func_check_version_match + + if test "$build_libtool_libs" != yes && test "$build_old_libs" != yes; then + func_fatal_configuration "not configured to build any kind of library" + fi + + # Darwin sucks + eval std_shrext=\"$shrext_cmds\" + + # Only execute mode is allowed to have -dlopen flags. + if test -n "$opt_dlopen" && test "$opt_mode" != execute; then + func_error "unrecognized option \`-dlopen'" + $ECHO "$help" 1>&2 + exit $EXIT_FAILURE + fi + + # Change the help message to a mode-specific one. + generic_help="$help" + help="Try \`$progname --help --mode=$opt_mode' for more information." + } + + + # Bail if the options were screwed + $exit_cmd $EXIT_FAILURE +} + + + + +## ----------- ## +## Main. ## +## ----------- ## + +# func_lalib_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_lalib_p () +{ + test -f "$1" && + $SED -e 4q "$1" 2>/dev/null \ + | $GREP "^# Generated by .*$PACKAGE" > /dev/null 2>&1 +} + +# func_lalib_unsafe_p file +# True iff FILE is a libtool `.la' library or `.lo' object file. +# This function implements the same check as func_lalib_p without +# resorting to external programs. To this end, it redirects stdin and +# closes it afterwards, without saving the original file descriptor. +# As a safety measure, use it only where a negative result would be +# fatal anyway. Works if `file' does not exist. +func_lalib_unsafe_p () +{ + lalib_p=no + if test -f "$1" && test -r "$1" && exec 5<&0 <"$1"; then + for lalib_p_l in 1 2 3 4 + do + read lalib_p_line + case "$lalib_p_line" in + \#\ Generated\ by\ *$PACKAGE* ) lalib_p=yes; break;; + esac + done + exec 0<&5 5<&- + fi + test "$lalib_p" = yes +} + +# func_ltwrapper_script_p file +# True iff FILE is a libtool wrapper script +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_script_p () +{ + func_lalib_p "$1" +} + +# func_ltwrapper_executable_p file +# True iff FILE is a libtool wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_executable_p () +{ + func_ltwrapper_exec_suffix= + case $1 in + *.exe) ;; + *) func_ltwrapper_exec_suffix=.exe ;; + esac + $GREP "$magic_exe" "$1$func_ltwrapper_exec_suffix" >/dev/null 2>&1 +} + +# func_ltwrapper_scriptname file +# Assumes file is an ltwrapper_executable +# uses $file to determine the appropriate filename for a +# temporary ltwrapper_script. +func_ltwrapper_scriptname () +{ + func_dirname_and_basename "$1" "" "." + func_stripname '' '.exe' "$func_basename_result" + func_ltwrapper_scriptname_result="$func_dirname_result/$objdir/${func_stripname_result}_ltshwrapper" +} + +# func_ltwrapper_p file +# True iff FILE is a libtool wrapper script or wrapper executable +# This function is only a basic sanity check; it will hardly flush out +# determined imposters. +func_ltwrapper_p () +{ + func_ltwrapper_script_p "$1" || func_ltwrapper_executable_p "$1" +} + + +# func_execute_cmds commands fail_cmd +# Execute tilde-delimited COMMANDS. +# If FAIL_CMD is given, eval that upon failure. +# FAIL_CMD may read-access the current command in variable CMD! +func_execute_cmds () +{ + $opt_debug + save_ifs=$IFS; IFS='~' + for cmd in $1; do + IFS=$save_ifs + eval cmd=\"$cmd\" + func_show_eval "$cmd" "${2-:}" + done + IFS=$save_ifs +} + + +# func_source file +# Source FILE, adding directory component if necessary. +# Note that it is not necessary on cygwin/mingw to append a dot to +# FILE even if both FILE and FILE.exe exist: automatic-append-.exe +# behavior happens only for exec(3), not for open(2)! Also, sourcing +# `FILE.' does not work on cygwin managed mounts. +func_source () +{ + $opt_debug + case $1 in + */* | *\\*) . "$1" ;; + *) . "./$1" ;; + esac +} + + +# func_resolve_sysroot PATH +# Replace a leading = in PATH with a sysroot. Store the result into +# func_resolve_sysroot_result +func_resolve_sysroot () +{ + func_resolve_sysroot_result=$1 + case $func_resolve_sysroot_result in + =*) + func_stripname '=' '' "$func_resolve_sysroot_result" + func_resolve_sysroot_result=$lt_sysroot$func_stripname_result + ;; + esac +} + +# func_replace_sysroot PATH +# If PATH begins with the sysroot, replace it with = and +# store the result into func_replace_sysroot_result. +func_replace_sysroot () +{ + case "$lt_sysroot:$1" in + ?*:"$lt_sysroot"*) + func_stripname "$lt_sysroot" '' "$1" + func_replace_sysroot_result="=$func_stripname_result" + ;; + *) + # Including no sysroot. + func_replace_sysroot_result=$1 + ;; + esac +} + +# func_infer_tag arg +# Infer tagged configuration to use if any are available and +# if one wasn't chosen via the "--tag" command line option. +# Only attempt this if the compiler in the base compile +# command doesn't match the default compiler. +# arg is usually of the form 'gcc ...' +func_infer_tag () +{ + $opt_debug + if test -n "$available_tags" && test -z "$tagname"; then + CC_quoted= + for arg in $CC; do + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case $@ in + # Blanks in the command may have been stripped by the calling shell, + # but not from the CC environment variable when configure was run. + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) ;; + # Blanks at the start of $base_compile will cause this to fail + # if we don't check for them as well. + *) + for z in $available_tags; do + if $GREP "^# ### BEGIN LIBTOOL TAG CONFIG: $z$" < "$progpath" > /dev/null; then + # Evaluate the configuration. + eval "`${SED} -n -e '/^# ### BEGIN LIBTOOL TAG CONFIG: '$z'$/,/^# ### END LIBTOOL TAG CONFIG: '$z'$/p' < $progpath`" + CC_quoted= + for arg in $CC; do + # Double-quote args containing other shell metacharacters. + func_append_quoted CC_quoted "$arg" + done + CC_expanded=`func_echo_all $CC` + CC_quoted_expanded=`func_echo_all $CC_quoted` + case "$@ " in + " $CC "* | "$CC "* | " $CC_expanded "* | "$CC_expanded "* | \ + " $CC_quoted"* | "$CC_quoted "* | " $CC_quoted_expanded "* | "$CC_quoted_expanded "*) + # The compiler in the base compile command matches + # the one in the tagged configuration. + # Assume this is the tagged configuration we want. + tagname=$z + break + ;; + esac + fi + done + # If $tagname still isn't set, then no tagged configuration + # was found and let the user know that the "--tag" command + # line option must be used. + if test -z "$tagname"; then + func_echo "unable to infer tagged configuration" + func_fatal_error "specify a tag with \`--tag'" +# else +# func_verbose "using $tagname tagged configuration" + fi + ;; + esac + fi +} + + + +# func_write_libtool_object output_name pic_name nonpic_name +# Create a libtool object file (analogous to a ".la" file), +# but don't create it if we're doing a dry run. +func_write_libtool_object () +{ + write_libobj=${1} + if test "$build_libtool_libs" = yes; then + write_lobj=\'${2}\' + else + write_lobj=none + fi + + if test "$build_old_libs" = yes; then + write_oldobj=\'${3}\' + else + write_oldobj=none + fi + + $opt_dry_run || { + cat >${write_libobj}T </dev/null` + if test "$?" -eq 0 && test -n "${func_convert_core_file_wine_to_w32_tmp}"; then + func_convert_core_file_wine_to_w32_result=`$ECHO "$func_convert_core_file_wine_to_w32_tmp" | + $SED -e "$lt_sed_naive_backslashify"` + else + func_convert_core_file_wine_to_w32_result= + fi + fi +} +# end: func_convert_core_file_wine_to_w32 + + +# func_convert_core_path_wine_to_w32 ARG +# Helper function used by path conversion functions when $build is *nix, and +# $host is mingw, cygwin, or some other w32 environment. Relies on a correctly +# configured wine environment available, with the winepath program in $build's +# $PATH. Assumes ARG has no leading or trailing path separator characters. +# +# ARG is path to be converted from $build format to win32. +# Result is available in $func_convert_core_path_wine_to_w32_result. +# Unconvertible file (directory) names in ARG are skipped; if no directory names +# are convertible, then the result may be empty. +func_convert_core_path_wine_to_w32 () +{ + $opt_debug + # unfortunately, winepath doesn't convert paths, only file names + func_convert_core_path_wine_to_w32_result="" + if test -n "$1"; then + oldIFS=$IFS + IFS=: + for func_convert_core_path_wine_to_w32_f in $1; do + IFS=$oldIFS + func_convert_core_file_wine_to_w32 "$func_convert_core_path_wine_to_w32_f" + if test -n "$func_convert_core_file_wine_to_w32_result" ; then + if test -z "$func_convert_core_path_wine_to_w32_result"; then + func_convert_core_path_wine_to_w32_result="$func_convert_core_file_wine_to_w32_result" + else + func_append func_convert_core_path_wine_to_w32_result ";$func_convert_core_file_wine_to_w32_result" + fi + fi + done + IFS=$oldIFS + fi +} +# end: func_convert_core_path_wine_to_w32 + + +# func_cygpath ARGS... +# Wrapper around calling the cygpath program via LT_CYGPATH. This is used when +# when (1) $build is *nix and Cygwin is hosted via a wine environment; or (2) +# $build is MSYS and $host is Cygwin, or (3) $build is Cygwin. In case (1) or +# (2), returns the Cygwin file name or path in func_cygpath_result (input +# file name or path is assumed to be in w32 format, as previously converted +# from $build's *nix or MSYS format). In case (3), returns the w32 file name +# or path in func_cygpath_result (input file name or path is assumed to be in +# Cygwin format). Returns an empty string on error. +# +# ARGS are passed to cygpath, with the last one being the file name or path to +# be converted. +# +# Specify the absolute *nix (or w32) name to cygpath in the LT_CYGPATH +# environment variable; do not put it in $PATH. +func_cygpath () +{ + $opt_debug + if test -n "$LT_CYGPATH" && test -f "$LT_CYGPATH"; then + func_cygpath_result=`$LT_CYGPATH "$@" 2>/dev/null` + if test "$?" -ne 0; then + # on failure, ensure result is empty + func_cygpath_result= + fi + else + func_cygpath_result= + func_error "LT_CYGPATH is empty or specifies non-existent file: \`$LT_CYGPATH'" + fi +} +#end: func_cygpath + + +# func_convert_core_msys_to_w32 ARG +# Convert file name or path ARG from MSYS format to w32 format. Return +# result in func_convert_core_msys_to_w32_result. +func_convert_core_msys_to_w32 () +{ + $opt_debug + # awkward: cmd appends spaces to result + func_convert_core_msys_to_w32_result=`( cmd //c echo "$1" ) 2>/dev/null | + $SED -e 's/[ ]*$//' -e "$lt_sed_naive_backslashify"` +} +#end: func_convert_core_msys_to_w32 + + +# func_convert_file_check ARG1 ARG2 +# Verify that ARG1 (a file name in $build format) was converted to $host +# format in ARG2. Otherwise, emit an error message, but continue (resetting +# func_to_host_file_result to ARG1). +func_convert_file_check () +{ + $opt_debug + if test -z "$2" && test -n "$1" ; then + func_error "Could not determine host file name corresponding to" + func_error " \`$1'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback: + func_to_host_file_result="$1" + fi +} +# end func_convert_file_check + + +# func_convert_path_check FROM_PATHSEP TO_PATHSEP FROM_PATH TO_PATH +# Verify that FROM_PATH (a path in $build format) was converted to $host +# format in TO_PATH. Otherwise, emit an error message, but continue, resetting +# func_to_host_file_result to a simplistic fallback value (see below). +func_convert_path_check () +{ + $opt_debug + if test -z "$4" && test -n "$3"; then + func_error "Could not determine the host path corresponding to" + func_error " \`$3'" + func_error "Continuing, but uninstalled executables may not work." + # Fallback. This is a deliberately simplistic "conversion" and + # should not be "improved". See libtool.info. + if test "x$1" != "x$2"; then + lt_replace_pathsep_chars="s|$1|$2|g" + func_to_host_path_result=`echo "$3" | + $SED -e "$lt_replace_pathsep_chars"` + else + func_to_host_path_result="$3" + fi + fi +} +# end func_convert_path_check + + +# func_convert_path_front_back_pathsep FRONTPAT BACKPAT REPL ORIG +# Modifies func_to_host_path_result by prepending REPL if ORIG matches FRONTPAT +# and appending REPL if ORIG matches BACKPAT. +func_convert_path_front_back_pathsep () +{ + $opt_debug + case $4 in + $1 ) func_to_host_path_result="$3$func_to_host_path_result" + ;; + esac + case $4 in + $2 ) func_append func_to_host_path_result "$3" + ;; + esac +} +# end func_convert_path_front_back_pathsep + + +################################################## +# $build to $host FILE NAME CONVERSION FUNCTIONS # +################################################## +# invoked via `$to_host_file_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# Result will be available in $func_to_host_file_result. + + +# func_to_host_file ARG +# Converts the file name ARG from $build format to $host format. Return result +# in func_to_host_file_result. +func_to_host_file () +{ + $opt_debug + $to_host_file_cmd "$1" +} +# end func_to_host_file + + +# func_to_tool_file ARG LAZY +# converts the file name ARG from $build format to toolchain format. Return +# result in func_to_tool_file_result. If the conversion in use is listed +# in (the comma separated) LAZY, no conversion takes place. +func_to_tool_file () +{ + $opt_debug + case ,$2, in + *,"$to_tool_file_cmd",*) + func_to_tool_file_result=$1 + ;; + *) + $to_tool_file_cmd "$1" + func_to_tool_file_result=$func_to_host_file_result + ;; + esac +} +# end func_to_tool_file + + +# func_convert_file_noop ARG +# Copy ARG to func_to_host_file_result. +func_convert_file_noop () +{ + func_to_host_file_result="$1" +} +# end func_convert_file_noop + + +# func_convert_file_msys_to_w32 ARG +# Convert file name ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_file_result. +func_convert_file_msys_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_to_host_file_result="$func_convert_core_msys_to_w32_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_w32 + + +# func_convert_file_cygwin_to_w32 ARG +# Convert file name ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_file_cygwin_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + # because $build is cygwin, we call "the" cygpath in $PATH; no need to use + # LT_CYGPATH in this case. + func_to_host_file_result=`cygpath -m "$1"` + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_cygwin_to_w32 + + +# func_convert_file_nix_to_w32 ARG +# Convert file name ARG from *nix to w32 format. Requires a wine environment +# and a working winepath. Returns result in func_to_host_file_result. +func_convert_file_nix_to_w32 () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_file_wine_to_w32 "$1" + func_to_host_file_result="$func_convert_core_file_wine_to_w32_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_w32 + + +# func_convert_file_msys_to_cygwin ARG +# Convert file name ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_file_msys_to_cygwin () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + func_convert_core_msys_to_w32 "$1" + func_cygpath -u "$func_convert_core_msys_to_w32_result" + func_to_host_file_result="$func_cygpath_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_msys_to_cygwin + + +# func_convert_file_nix_to_cygwin ARG +# Convert file name ARG from *nix to Cygwin format. Requires Cygwin installed +# in a wine environment, working winepath, and LT_CYGPATH set. Returns result +# in func_to_host_file_result. +func_convert_file_nix_to_cygwin () +{ + $opt_debug + func_to_host_file_result="$1" + if test -n "$1"; then + # convert from *nix to w32, then use cygpath to convert from w32 to cygwin. + func_convert_core_file_wine_to_w32 "$1" + func_cygpath -u "$func_convert_core_file_wine_to_w32_result" + func_to_host_file_result="$func_cygpath_result" + fi + func_convert_file_check "$1" "$func_to_host_file_result" +} +# end func_convert_file_nix_to_cygwin + + +############################################# +# $build to $host PATH CONVERSION FUNCTIONS # +############################################# +# invoked via `$to_host_path_cmd ARG' +# +# In each case, ARG is the path to be converted from $build to $host format. +# The result will be available in $func_to_host_path_result. +# +# Path separators are also converted from $build format to $host format. If +# ARG begins or ends with a path separator character, it is preserved (but +# converted to $host format) on output. +# +# All path conversion functions are named using the following convention: +# file name conversion function : func_convert_file_X_to_Y () +# path conversion function : func_convert_path_X_to_Y () +# where, for any given $build/$host combination the 'X_to_Y' value is the +# same. If conversion functions are added for new $build/$host combinations, +# the two new functions must follow this pattern, or func_init_to_host_path_cmd +# will break. + + +# func_init_to_host_path_cmd +# Ensures that function "pointer" variable $to_host_path_cmd is set to the +# appropriate value, based on the value of $to_host_file_cmd. +to_host_path_cmd= +func_init_to_host_path_cmd () +{ + $opt_debug + if test -z "$to_host_path_cmd"; then + func_stripname 'func_convert_file_' '' "$to_host_file_cmd" + to_host_path_cmd="func_convert_path_${func_stripname_result}" + fi +} + + +# func_to_host_path ARG +# Converts the path ARG from $build format to $host format. Return result +# in func_to_host_path_result. +func_to_host_path () +{ + $opt_debug + func_init_to_host_path_cmd + $to_host_path_cmd "$1" +} +# end func_to_host_path + + +# func_convert_path_noop ARG +# Copy ARG to func_to_host_path_result. +func_convert_path_noop () +{ + func_to_host_path_result="$1" +} +# end func_convert_path_noop + + +# func_convert_path_msys_to_w32 ARG +# Convert path ARG from (mingw) MSYS to (mingw) w32 format; automatic +# conversion to w32 is not available inside the cwrapper. Returns result in +# func_to_host_path_result. +func_convert_path_msys_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # Remove leading and trailing path separator characters from ARG. MSYS + # behavior is inconsistent here; cygpath turns them into '.;' and ';.'; + # and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result="$func_convert_core_msys_to_w32_result" + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_msys_to_w32 + + +# func_convert_path_cygwin_to_w32 ARG +# Convert path ARG from Cygwin to w32 format. Returns result in +# func_to_host_file_result. +func_convert_path_cygwin_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_to_host_path_result=`cygpath -m -p "$func_to_host_path_tmp1"` + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_cygwin_to_w32 + + +# func_convert_path_nix_to_w32 ARG +# Convert path ARG from *nix to w32 format. Requires a wine environment and +# a working winepath. Returns result in func_to_host_file_result. +func_convert_path_nix_to_w32 () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_to_host_path_result="$func_convert_core_path_wine_to_w32_result" + func_convert_path_check : ";" \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" ";" "$1" + fi +} +# end func_convert_path_nix_to_w32 + + +# func_convert_path_msys_to_cygwin ARG +# Convert path ARG from MSYS to Cygwin format. Requires LT_CYGPATH set. +# Returns result in func_to_host_file_result. +func_convert_path_msys_to_cygwin () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # See func_convert_path_msys_to_w32: + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_msys_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_msys_to_w32_result" + func_to_host_path_result="$func_cygpath_result" + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_msys_to_cygwin + + +# func_convert_path_nix_to_cygwin ARG +# Convert path ARG from *nix to Cygwin format. Requires Cygwin installed in a +# a wine environment, working winepath, and LT_CYGPATH set. Returns result in +# func_to_host_file_result. +func_convert_path_nix_to_cygwin () +{ + $opt_debug + func_to_host_path_result="$1" + if test -n "$1"; then + # Remove leading and trailing path separator characters from + # ARG. msys behavior is inconsistent here, cygpath turns them + # into '.;' and ';.', and winepath ignores them completely. + func_stripname : : "$1" + func_to_host_path_tmp1=$func_stripname_result + func_convert_core_path_wine_to_w32 "$func_to_host_path_tmp1" + func_cygpath -u -p "$func_convert_core_path_wine_to_w32_result" + func_to_host_path_result="$func_cygpath_result" + func_convert_path_check : : \ + "$func_to_host_path_tmp1" "$func_to_host_path_result" + func_convert_path_front_back_pathsep ":*" "*:" : "$1" + fi +} +# end func_convert_path_nix_to_cygwin + + +# func_mode_compile arg... +func_mode_compile () +{ + $opt_debug + # Get the compilation command and the source file. + base_compile= + srcfile="$nonopt" # always keep a non-empty value in "srcfile" + suppress_opt=yes + suppress_output= + arg_mode=normal + libobj= + later= + pie_flag= + + for arg + do + case $arg_mode in + arg ) + # do not "continue". Instead, add this to base_compile + lastarg="$arg" + arg_mode=normal + ;; + + target ) + libobj="$arg" + arg_mode=normal + continue + ;; + + normal ) + # Accept any command-line options. + case $arg in + -o) + test -n "$libobj" && \ + func_fatal_error "you cannot specify \`-o' more than once" + arg_mode=target + continue + ;; + + -pie | -fpie | -fPIE) + func_append pie_flag " $arg" + continue + ;; + + -shared | -static | -prefer-pic | -prefer-non-pic) + func_append later " $arg" + continue + ;; + + -no-suppress) + suppress_opt=no + continue + ;; + + -Xcompiler) + arg_mode=arg # the next one goes into the "base_compile" arg list + continue # The current "srcfile" will either be retained or + ;; # replaced later. I would guess that would be a bug. + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + lastarg= + save_ifs="$IFS"; IFS=',' + for arg in $args; do + IFS="$save_ifs" + func_append_quoted lastarg "$arg" + done + IFS="$save_ifs" + func_stripname ' ' '' "$lastarg" + lastarg=$func_stripname_result + + # Add the arguments to base_compile. + func_append base_compile " $lastarg" + continue + ;; + + *) + # Accept the current argument as the source file. + # The previous "srcfile" becomes the current argument. + # + lastarg="$srcfile" + srcfile="$arg" + ;; + esac # case $arg + ;; + esac # case $arg_mode + + # Aesthetically quote the previous argument. + func_append_quoted base_compile "$lastarg" + done # for arg + + case $arg_mode in + arg) + func_fatal_error "you must specify an argument for -Xcompile" + ;; + target) + func_fatal_error "you must specify a target with \`-o'" + ;; + *) + # Get the name of the library object. + test -z "$libobj" && { + func_basename "$srcfile" + libobj="$func_basename_result" + } + ;; + esac + + # Recognize several different file suffixes. + # If the user specifies -o file.o, it is replaced with file.lo + case $libobj in + *.[cCFSifmso] | \ + *.ada | *.adb | *.ads | *.asm | \ + *.c++ | *.cc | *.ii | *.class | *.cpp | *.cxx | \ + *.[fF][09]? | *.for | *.java | *.go | *.obj | *.sx | *.cu | *.cup) + func_xform "$libobj" + libobj=$func_xform_result + ;; + esac + + case $libobj in + *.lo) func_lo2o "$libobj"; obj=$func_lo2o_result ;; + *) + func_fatal_error "cannot determine name of library object from \`$libobj'" + ;; + esac + + func_infer_tag $base_compile + + for arg in $later; do + case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + continue + ;; + + -static) + build_libtool_libs=no + build_old_libs=yes + continue + ;; + + -prefer-pic) + pic_mode=yes + continue + ;; + + -prefer-non-pic) + pic_mode=no + continue + ;; + esac + done + + func_quote_for_eval "$libobj" + test "X$libobj" != "X$func_quote_for_eval_result" \ + && $ECHO "X$libobj" | $GREP '[]~#^*{};<>?"'"'"' &()|`$[]' \ + && func_warning "libobj name \`$libobj' may not contain shell special characters." + func_dirname_and_basename "$obj" "/" "" + objname="$func_basename_result" + xdir="$func_dirname_result" + lobj=${xdir}$objdir/$objname + + test -z "$base_compile" && \ + func_fatal_help "you must specify a compilation command" + + # Delete any leftover library objects. + if test "$build_old_libs" = yes; then + removelist="$obj $lobj $libobj ${libobj}T" + else + removelist="$lobj $libobj ${libobj}T" + fi + + # On Cygwin there's no "real" PIC flag so we must build both object types + case $host_os in + cygwin* | mingw* | pw32* | os2* | cegcc*) + pic_mode=default + ;; + esac + if test "$pic_mode" = no && test "$deplibs_check_method" != pass_all; then + # non-PIC code in shared libraries is not supported + pic_mode=default + fi + + # Calculate the filename of the output object if compiler does + # not support -o with -c + if test "$compiler_c_o" = no; then + output_obj=`$ECHO "$srcfile" | $SED 's%^.*/%%; s%\.[^.]*$%%'`.${objext} + lockfile="$output_obj.lock" + else + output_obj= + need_locks=no + lockfile= + fi + + # Lock this critical section if it is needed + # We use this script file to make the link, it avoids creating a new file + if test "$need_locks" = yes; then + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + elif test "$need_locks" = warn; then + if test -f "$lockfile"; then + $ECHO "\ +*** ERROR, $lockfile exists and contains: +`cat $lockfile 2>/dev/null` + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + func_append removelist " $output_obj" + $ECHO "$srcfile" > "$lockfile" + fi + + $opt_dry_run || $RM $removelist + func_append removelist " $lockfile" + trap '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' 1 2 15 + + func_to_tool_file "$srcfile" func_convert_file_msys_to_w32 + srcfile=$func_to_tool_file_result + func_quote_for_eval "$srcfile" + qsrcfile=$func_quote_for_eval_result + + # Only build a PIC object if we are building libtool libraries. + if test "$build_libtool_libs" = yes; then + # Without this assignment, base_compile gets emptied. + fbsd_hideous_sh_bug=$base_compile + + if test "$pic_mode" != no; then + command="$base_compile $qsrcfile $pic_flag" + else + # Don't build PIC code + command="$base_compile $qsrcfile" + fi + + func_mkdir_p "$xdir$objdir" + + if test -z "$output_obj"; then + # Place PIC objects in $objdir + func_append command " -o $lobj" + fi + + func_show_eval_locale "$command" \ + 'test -n "$output_obj" && $RM $removelist; exit $EXIT_FAILURE' + + if test "$need_locks" = warn && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed, then go on to compile the next one + if test -n "$output_obj" && test "X$output_obj" != "X$lobj"; then + func_show_eval '$MV "$output_obj" "$lobj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + + # Allow error messages only from the first compilation. + if test "$suppress_opt" = yes; then + suppress_output=' >/dev/null 2>&1' + fi + fi + + # Only build a position-dependent object if we build old libraries. + if test "$build_old_libs" = yes; then + if test "$pic_mode" != yes; then + # Don't build PIC code + command="$base_compile $qsrcfile$pie_flag" + else + command="$base_compile $qsrcfile $pic_flag" + fi + if test "$compiler_c_o" = yes; then + func_append command " -o $obj" + fi + + # Suppress compiler output if we already did a PIC compilation. + func_append command "$suppress_output" + func_show_eval_locale "$command" \ + '$opt_dry_run || $RM $removelist; exit $EXIT_FAILURE' + + if test "$need_locks" = warn && + test "X`cat $lockfile 2>/dev/null`" != "X$srcfile"; then + $ECHO "\ +*** ERROR, $lockfile contains: +`cat $lockfile 2>/dev/null` + +but it should contain: +$srcfile + +This indicates that another process is trying to use the same +temporary object file, and libtool could not work around it because +your compiler does not support \`-c' and \`-o' together. If you +repeat this compilation, it may succeed, by chance, but you had better +avoid parallel builds (make -j) in this platform, or get a better +compiler." + + $opt_dry_run || $RM $removelist + exit $EXIT_FAILURE + fi + + # Just move the object if needed + if test -n "$output_obj" && test "X$output_obj" != "X$obj"; then + func_show_eval '$MV "$output_obj" "$obj"' \ + 'error=$?; $opt_dry_run || $RM $removelist; exit $error' + fi + fi + + $opt_dry_run || { + func_write_libtool_object "$libobj" "$objdir/$objname" "$objname" + + # Unlock the critical section if it was locked + if test "$need_locks" != no; then + removelist=$lockfile + $RM "$lockfile" + fi + } + + exit $EXIT_SUCCESS +} + +$opt_help || { + test "$opt_mode" = compile && func_mode_compile ${1+"$@"} +} + +func_mode_help () +{ + # We need to display help for each of the modes. + case $opt_mode in + "") + # Generic help is extracted from the usage comments + # at the start of this file. + func_help + ;; + + clean) + $ECHO \ +"Usage: $progname [OPTION]... --mode=clean RM [RM-OPTION]... FILE... + +Remove files from the build directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, object or program, all the files associated +with it are deleted. Otherwise, only FILE itself is deleted using RM." + ;; + + compile) + $ECHO \ +"Usage: $progname [OPTION]... --mode=compile COMPILE-COMMAND... SOURCEFILE + +Compile a source file into a libtool library object. + +This mode accepts the following additional options: + + -o OUTPUT-FILE set the output file name to OUTPUT-FILE + -no-suppress do not suppress compiler output for multiple passes + -prefer-pic try to build PIC objects only + -prefer-non-pic try to build non-PIC objects only + -shared do not build a \`.o' file suitable for static linking + -static only build a \`.o' file suitable for static linking + -Wc,FLAG pass FLAG directly to the compiler + +COMPILE-COMMAND is a command to be used in creating a \`standard' object file +from the given SOURCEFILE. + +The output file name is determined by removing the directory component from +SOURCEFILE, then substituting the C source code suffix \`.c' with the +library object suffix, \`.lo'." + ;; + + execute) + $ECHO \ +"Usage: $progname [OPTION]... --mode=execute COMMAND [ARGS]... + +Automatically set library path, then run a program. + +This mode accepts the following additional options: + + -dlopen FILE add the directory containing FILE to the library path + +This mode sets the library path environment variable according to \`-dlopen' +flags. + +If any of the ARGS are libtool executable wrappers, then they are translated +into their corresponding uninstalled binary, and any of their required library +directories are added to the library path. + +Then, COMMAND is executed, with ARGS as arguments." + ;; + + finish) + $ECHO \ +"Usage: $progname [OPTION]... --mode=finish [LIBDIR]... + +Complete the installation of libtool libraries. + +Each LIBDIR is a directory that contains libtool libraries. + +The commands that this mode executes may require superuser privileges. Use +the \`--dry-run' option if you just want to see what would be executed." + ;; + + install) + $ECHO \ +"Usage: $progname [OPTION]... --mode=install INSTALL-COMMAND... + +Install executables or libraries. + +INSTALL-COMMAND is the installation command. The first component should be +either the \`install' or \`cp' program. + +The following components of INSTALL-COMMAND are treated specially: + + -inst-prefix-dir PREFIX-DIR Use PREFIX-DIR as a staging area for installation + +The rest of the components are interpreted as arguments to that command (only +BSD-compatible install options are recognized)." + ;; + + link) + $ECHO \ +"Usage: $progname [OPTION]... --mode=link LINK-COMMAND... + +Link object files or libraries together to form another library, or to +create an executable program. + +LINK-COMMAND is a command using the C compiler that you would use to create +a program from several object files. + +The following components of LINK-COMMAND are treated specially: + + -all-static do not do any dynamic linking at all + -avoid-version do not add a version suffix if possible + -bindir BINDIR specify path to binaries directory (for systems where + libraries must be found in the PATH setting at runtime) + -dlopen FILE \`-dlpreopen' FILE if it cannot be dlopened at runtime + -dlpreopen FILE link in FILE and add its symbols to lt_preloaded_symbols + -export-dynamic allow symbols from OUTPUT-FILE to be resolved with dlsym(3) + -export-symbols SYMFILE + try to export only the symbols listed in SYMFILE + -export-symbols-regex REGEX + try to export only the symbols matching REGEX + -LLIBDIR search LIBDIR for required installed libraries + -lNAME OUTPUT-FILE requires the installed library libNAME + -module build a library that can dlopened + -no-fast-install disable the fast-install mode + -no-install link a not-installable executable + -no-undefined declare that a library does not refer to external symbols + -o OUTPUT-FILE create OUTPUT-FILE from the specified objects + -objectlist FILE Use a list of object files found in FILE to specify objects + -precious-files-regex REGEX + don't remove output files matching REGEX + -release RELEASE specify package release information + -rpath LIBDIR the created library will eventually be installed in LIBDIR + -R[ ]LIBDIR add LIBDIR to the runtime path of programs and libraries + -shared only do dynamic linking of libtool libraries + -shrext SUFFIX override the standard shared library file extension + -static do not do any dynamic linking of uninstalled libtool libraries + -static-libtool-libs + do not do any dynamic linking of libtool libraries + -version-info CURRENT[:REVISION[:AGE]] + specify library version info [each variable defaults to 0] + -weak LIBNAME declare that the target provides the LIBNAME interface + -Wc,FLAG + -Xcompiler FLAG pass linker-specific FLAG directly to the compiler + -Wl,FLAG + -Xlinker FLAG pass linker-specific FLAG directly to the linker + -XCClinker FLAG pass link-specific FLAG to the compiler driver (CC) + +All other options (arguments beginning with \`-') are ignored. + +Every other argument is treated as a filename. Files ending in \`.la' are +treated as uninstalled libtool libraries, other files are standard or library +object files. + +If the OUTPUT-FILE ends in \`.la', then a libtool library is created, +only library objects (\`.lo' files) may be specified, and \`-rpath' is +required, except when creating a convenience library. + +If OUTPUT-FILE ends in \`.a' or \`.lib', then a standard library is created +using \`ar' and \`ranlib', or on Windows using \`lib'. + +If OUTPUT-FILE ends in \`.lo' or \`.${objext}', then a reloadable object file +is created, otherwise an executable program is created." + ;; + + uninstall) + $ECHO \ +"Usage: $progname [OPTION]... --mode=uninstall RM [RM-OPTION]... FILE... + +Remove libraries from an installation directory. + +RM is the name of the program to use to delete files associated with each FILE +(typically \`/bin/rm'). RM-OPTIONS are options (such as \`-f') to be passed +to RM. + +If FILE is a libtool library, all the files associated with it are deleted. +Otherwise, only FILE itself is deleted using RM." + ;; + + *) + func_fatal_help "invalid operation mode \`$opt_mode'" + ;; + esac + + echo + $ECHO "Try \`$progname --help' for more information about other modes." +} + +# Now that we've collected a possible --mode arg, show help if necessary +if $opt_help; then + if test "$opt_help" = :; then + func_mode_help + else + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + func_mode_help + done + } | sed -n '1p; 2,$s/^Usage:/ or: /p' + { + func_help noexit + for opt_mode in compile link execute install finish uninstall clean; do + echo + func_mode_help + done + } | + sed '1d + /^When reporting/,/^Report/{ + H + d + } + $x + /information about other modes/d + /more detailed .*MODE/d + s/^Usage:.*--mode=\([^ ]*\) .*/Description of \1 mode:/' + fi + exit $? +fi + + +# func_mode_execute arg... +func_mode_execute () +{ + $opt_debug + # The first argument is the command name. + cmd="$nonopt" + test -z "$cmd" && \ + func_fatal_help "you must specify a COMMAND" + + # Handle -dlopen flags immediately. + for file in $opt_dlopen; do + test -f "$file" \ + || func_fatal_help "\`$file' is not a file" + + dir= + case $file in + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$lib' is not a valid libtool archive" + + # Read the libtool library. + dlname= + library_names= + func_source "$file" + + # Skip this library if it cannot be dlopened. + if test -z "$dlname"; then + # Warn if it was a shared library. + test -n "$library_names" && \ + func_warning "\`$file' was not linked with \`-export-dynamic'" + continue + fi + + func_dirname "$file" "" "." + dir="$func_dirname_result" + + if test -f "$dir/$objdir/$dlname"; then + func_append dir "/$objdir" + else + if test ! -f "$dir/$dlname"; then + func_fatal_error "cannot find \`$dlname' in \`$dir' or \`$dir/$objdir'" + fi + fi + ;; + + *.lo) + # Just add the directory containing the .lo file. + func_dirname "$file" "" "." + dir="$func_dirname_result" + ;; + + *) + func_warning "\`-dlopen' is ignored for non-libtool libraries and objects" + continue + ;; + esac + + # Get the absolute pathname. + absdir=`cd "$dir" && pwd` + test -n "$absdir" && dir="$absdir" + + # Now add the directory to shlibpath_var. + if eval "test -z \"\$$shlibpath_var\""; then + eval "$shlibpath_var=\"\$dir\"" + else + eval "$shlibpath_var=\"\$dir:\$$shlibpath_var\"" + fi + done + + # This variable tells wrapper scripts just to set shlibpath_var + # rather than running their programs. + libtool_execute_magic="$magic" + + # Check if any of the arguments is a wrapper script. + args= + for file + do + case $file in + -* | *.la | *.lo ) ;; + *) + # Do a test to see if this is really a libtool program. + if func_ltwrapper_script_p "$file"; then + func_source "$file" + # Transform arg to wrapped name. + file="$progdir/$program" + elif func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + func_source "$func_ltwrapper_scriptname_result" + # Transform arg to wrapped name. + file="$progdir/$program" + fi + ;; + esac + # Quote arguments (to preserve shell metacharacters). + func_append_quoted args "$file" + done + + if test "X$opt_dry_run" = Xfalse; then + if test -n "$shlibpath_var"; then + # Export the shlibpath_var. + eval "export $shlibpath_var" + fi + + # Restore saved environment variables + for lt_var in LANG LANGUAGE LC_ALL LC_CTYPE LC_COLLATE LC_MESSAGES + do + eval "if test \"\${save_$lt_var+set}\" = set; then + $lt_var=\$save_$lt_var; export $lt_var + else + $lt_unset $lt_var + fi" + done + + # Now prepare to actually exec the command. + exec_cmd="\$cmd$args" + else + # Display what would be done. + if test -n "$shlibpath_var"; then + eval "\$ECHO \"\$shlibpath_var=\$$shlibpath_var\"" + echo "export $shlibpath_var" + fi + $ECHO "$cmd$args" + exit $EXIT_SUCCESS + fi +} + +test "$opt_mode" = execute && func_mode_execute ${1+"$@"} + + +# func_mode_finish arg... +func_mode_finish () +{ + $opt_debug + libs= + libdirs= + admincmds= + + for opt in "$nonopt" ${1+"$@"} + do + if test -d "$opt"; then + func_append libdirs " $opt" + + elif test -f "$opt"; then + if func_lalib_unsafe_p "$opt"; then + func_append libs " $opt" + else + func_warning "\`$opt' is not a valid libtool archive" + fi + + else + func_fatal_error "invalid argument \`$opt'" + fi + done + + if test -n "$libs"; then + if test -n "$lt_sysroot"; then + sysroot_regex=`$ECHO "$lt_sysroot" | $SED "$sed_make_literal_regex"` + sysroot_cmd="s/\([ ']\)$sysroot_regex/\1/g;" + else + sysroot_cmd= + fi + + # Remove sysroot references + if $opt_dry_run; then + for lib in $libs; do + echo "removing references to $lt_sysroot and \`=' prefixes from $lib" + done + else + tmpdir=`func_mktempdir` + for lib in $libs; do + sed -e "${sysroot_cmd} s/\([ ']-[LR]\)=/\1/g; s/\([ ']\)=/\1/g" $lib \ + > $tmpdir/tmp-la + mv -f $tmpdir/tmp-la $lib + done + ${RM}r "$tmpdir" + fi + fi + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + for libdir in $libdirs; do + if test -n "$finish_cmds"; then + # Do each command in the finish commands. + func_execute_cmds "$finish_cmds" 'admincmds="$admincmds +'"$cmd"'"' + fi + if test -n "$finish_eval"; then + # Do the single finish_eval. + eval cmds=\"$finish_eval\" + $opt_dry_run || eval "$cmds" || func_append admincmds " + $cmds" + fi + done + fi + + # Exit here if they wanted silent mode. + $opt_silent && exit $EXIT_SUCCESS + + if test -n "$finish_cmds$finish_eval" && test -n "$libdirs"; then + echo "----------------------------------------------------------------------" + echo "Libraries have been installed in:" + for libdir in $libdirs; do + $ECHO " $libdir" + done + echo + echo "If you ever happen to want to link against installed libraries" + echo "in a given directory, LIBDIR, you must either use libtool, and" + echo "specify the full pathname of the library, or use the \`-LLIBDIR'" + echo "flag during linking and do at least one of the following:" + if test -n "$shlibpath_var"; then + echo " - add LIBDIR to the \`$shlibpath_var' environment variable" + echo " during execution" + fi + if test -n "$runpath_var"; then + echo " - add LIBDIR to the \`$runpath_var' environment variable" + echo " during linking" + fi + if test -n "$hardcode_libdir_flag_spec"; then + libdir=LIBDIR + eval flag=\"$hardcode_libdir_flag_spec\" + + $ECHO " - use the \`$flag' linker flag" + fi + if test -n "$admincmds"; then + $ECHO " - have your system administrator run these commands:$admincmds" + fi + if test -f /etc/ld.so.conf; then + echo " - have your system administrator add LIBDIR to \`/etc/ld.so.conf'" + fi + echo + + echo "See any operating system documentation about shared libraries for" + case $host in + solaris2.[6789]|solaris2.1[0-9]) + echo "more information, such as the ld(1), crle(1) and ld.so(8) manual" + echo "pages." + ;; + *) + echo "more information, such as the ld(1) and ld.so(8) manual pages." + ;; + esac + echo "----------------------------------------------------------------------" + fi + exit $EXIT_SUCCESS +} + +test "$opt_mode" = finish && func_mode_finish ${1+"$@"} + + +# func_mode_install arg... +func_mode_install () +{ + $opt_debug + # There may be an optional sh(1) argument at the beginning of + # install_prog (especially on Windows NT). + if test "$nonopt" = "$SHELL" || test "$nonopt" = /bin/sh || + # Allow the use of GNU shtool's install command. + case $nonopt in *shtool*) :;; *) false;; esac; then + # Aesthetically quote it. + func_quote_for_eval "$nonopt" + install_prog="$func_quote_for_eval_result " + arg=$1 + shift + else + install_prog= + arg=$nonopt + fi + + # The real first argument should be the name of the installation program. + # Aesthetically quote it. + func_quote_for_eval "$arg" + func_append install_prog "$func_quote_for_eval_result" + install_shared_prog=$install_prog + case " $install_prog " in + *[\\\ /]cp\ *) install_cp=: ;; + *) install_cp=false ;; + esac + + # We need to accept at least all the BSD install flags. + dest= + files= + opts= + prev= + install_type= + isdir=no + stripme= + no_mode=: + for arg + do + arg2= + if test -n "$dest"; then + func_append files " $dest" + dest=$arg + continue + fi + + case $arg in + -d) isdir=yes ;; + -f) + if $install_cp; then :; else + prev=$arg + fi + ;; + -g | -m | -o) + prev=$arg + ;; + -s) + stripme=" -s" + continue + ;; + -*) + ;; + *) + # If the previous option needed an argument, then skip it. + if test -n "$prev"; then + if test "x$prev" = x-m && test -n "$install_override_mode"; then + arg2=$install_override_mode + no_mode=false + fi + prev= + else + dest=$arg + continue + fi + ;; + esac + + # Aesthetically quote the argument. + func_quote_for_eval "$arg" + func_append install_prog " $func_quote_for_eval_result" + if test -n "$arg2"; then + func_quote_for_eval "$arg2" + fi + func_append install_shared_prog " $func_quote_for_eval_result" + done + + test -z "$install_prog" && \ + func_fatal_help "you must specify an install program" + + test -n "$prev" && \ + func_fatal_help "the \`$prev' option requires an argument" + + if test -n "$install_override_mode" && $no_mode; then + if $install_cp; then :; else + func_quote_for_eval "$install_override_mode" + func_append install_shared_prog " -m $func_quote_for_eval_result" + fi + fi + + if test -z "$files"; then + if test -z "$dest"; then + func_fatal_help "no file or destination specified" + else + func_fatal_help "you must specify a destination" + fi + fi + + # Strip any trailing slash from the destination. + func_stripname '' '/' "$dest" + dest=$func_stripname_result + + # Check to see that the destination is a directory. + test -d "$dest" && isdir=yes + if test "$isdir" = yes; then + destdir="$dest" + destname= + else + func_dirname_and_basename "$dest" "" "." + destdir="$func_dirname_result" + destname="$func_basename_result" + + # Not a directory, so check to see that there is only one file specified. + set dummy $files; shift + test "$#" -gt 1 && \ + func_fatal_help "\`$dest' is not a directory" + fi + case $destdir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + for file in $files; do + case $file in + *.lo) ;; + *) + func_fatal_help "\`$destdir' must be an absolute directory name" + ;; + esac + done + ;; + esac + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + staticlibs= + future_libdirs= + current_libdirs= + for file in $files; do + + # Do each installation. + case $file in + *.$libext) + # Do the static libraries later. + func_append staticlibs " $file" + ;; + + *.la) + func_resolve_sysroot "$file" + file=$func_resolve_sysroot_result + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$file" \ + || func_fatal_help "\`$file' is not a valid libtool archive" + + library_names= + old_library= + relink_command= + func_source "$file" + + # Add the libdir to current_libdirs if it is the destination. + if test "X$destdir" = "X$libdir"; then + case "$current_libdirs " in + *" $libdir "*) ;; + *) func_append current_libdirs " $libdir" ;; + esac + else + # Note the libdir as a future libdir. + case "$future_libdirs " in + *" $libdir "*) ;; + *) func_append future_libdirs " $libdir" ;; + esac + fi + + func_dirname "$file" "/" "" + dir="$func_dirname_result" + func_append dir "$objdir" + + if test -n "$relink_command"; then + # Determine the prefix the user has applied to our future dir. + inst_prefix_dir=`$ECHO "$destdir" | $SED -e "s%$libdir\$%%"` + + # Don't allow the user to place us outside of our expected + # location b/c this prevents finding dependent libraries that + # are installed to the same prefix. + # At present, this check doesn't affect windows .dll's that + # are installed into $libdir/../bin (currently, that works fine) + # but it's something to keep an eye on. + test "$inst_prefix_dir" = "$destdir" && \ + func_fatal_error "error: cannot install \`$file' to a directory not ending in $libdir" + + if test -n "$inst_prefix_dir"; then + # Stick the inst_prefix_dir data into the link command. + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%-inst-prefix-dir $inst_prefix_dir%"` + else + relink_command=`$ECHO "$relink_command" | $SED "s%@inst_prefix_dir@%%"` + fi + + func_warning "relinking \`$file'" + func_show_eval "$relink_command" \ + 'func_fatal_error "error: relink \`$file'\'' with the above command before installing it"' + fi + + # See the names of the shared library. + set dummy $library_names; shift + if test -n "$1"; then + realname="$1" + shift + + srcname="$realname" + test -n "$relink_command" && srcname="$realname"T + + # Install the shared library and build the symlinks. + func_show_eval "$install_shared_prog $dir/$srcname $destdir/$realname" \ + 'exit $?' + tstripme="$stripme" + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + case $realname in + *.dll.a) + tstripme="" + ;; + esac + ;; + esac + if test -n "$tstripme" && test -n "$striplib"; then + func_show_eval "$striplib $destdir/$realname" 'exit $?' + fi + + if test "$#" -gt 0; then + # Delete the old symlinks, and create new ones. + # Try `ln -sf' first, because the `ln' binary might depend on + # the symlink we replace! Solaris /bin/ln does not understand -f, + # so we also need to try rm && ln -s. + for linkname + do + test "$linkname" != "$realname" \ + && func_show_eval "(cd $destdir && { $LN_S -f $realname $linkname || { $RM $linkname && $LN_S $realname $linkname; }; })" + done + fi + + # Do each command in the postinstall commands. + lib="$destdir/$realname" + func_execute_cmds "$postinstall_cmds" 'exit $?' + fi + + # Install the pseudo-library for information purposes. + func_basename "$file" + name="$func_basename_result" + instname="$dir/$name"i + func_show_eval "$install_prog $instname $destdir/$name" 'exit $?' + + # Maybe install the static library, too. + test -n "$old_library" && func_append staticlibs " $dir/$old_library" + ;; + + *.lo) + # Install (i.e. copy) a libtool object. + + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi + + # Deduce the name of the destination old-style object file. + case $destfile in + *.lo) + func_lo2o "$destfile" + staticdest=$func_lo2o_result + ;; + *.$objext) + staticdest="$destfile" + destfile= + ;; + *) + func_fatal_help "cannot copy a libtool object to \`$destfile'" + ;; + esac + + # Install the libtool object if requested. + test -n "$destfile" && \ + func_show_eval "$install_prog $file $destfile" 'exit $?' + + # Install the old object if enabled. + if test "$build_old_libs" = yes; then + # Deduce the name of the old-style object file. + func_lo2o "$file" + staticobj=$func_lo2o_result + func_show_eval "$install_prog \$staticobj \$staticdest" 'exit $?' + fi + exit $EXIT_SUCCESS + ;; + + *) + # Figure out destination file name, if it wasn't already specified. + if test -n "$destname"; then + destfile="$destdir/$destname" + else + func_basename "$file" + destfile="$func_basename_result" + destfile="$destdir/$destfile" + fi + + # If the file is missing, and there is a .exe on the end, strip it + # because it is most likely a libtool script we actually want to + # install + stripped_ext="" + case $file in + *.exe) + if test ! -f "$file"; then + func_stripname '' '.exe' "$file" + file=$func_stripname_result + stripped_ext=".exe" + fi + ;; + esac + + # Do a test to see if this is really a libtool program. + case $host in + *cygwin* | *mingw*) + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + wrapper=$func_ltwrapper_scriptname_result + else + func_stripname '' '.exe' "$file" + wrapper=$func_stripname_result + fi + ;; + *) + wrapper=$file + ;; + esac + if func_ltwrapper_script_p "$wrapper"; then + notinst_deplibs= + relink_command= + + func_source "$wrapper" + + # Check the variables that should have been set. + test -z "$generated_by_libtool_version" && \ + func_fatal_error "invalid libtool wrapper script \`$wrapper'" + + finalize=yes + for lib in $notinst_deplibs; do + # Check to see that each library is installed. + libdir= + if test -f "$lib"; then + func_source "$lib" + fi + libfile="$libdir/"`$ECHO "$lib" | $SED 's%^.*/%%g'` ### testsuite: skip nested quoting test + if test -n "$libdir" && test ! -f "$libfile"; then + func_warning "\`$lib' has not been installed in \`$libdir'" + finalize=no + fi + done + + relink_command= + func_source "$wrapper" + + outputname= + if test "$fast_install" = no && test -n "$relink_command"; then + $opt_dry_run || { + if test "$finalize" = yes; then + tmpdir=`func_mktempdir` + func_basename "$file$stripped_ext" + file="$func_basename_result" + outputname="$tmpdir/$file" + # Replace the output file specification. + relink_command=`$ECHO "$relink_command" | $SED 's%@OUTPUT@%'"$outputname"'%g'` + + $opt_silent || { + func_quote_for_expand "$relink_command" + eval "func_echo $func_quote_for_expand_result" + } + if eval "$relink_command"; then : + else + func_error "error: relink \`$file' with the above command before installing it" + $opt_dry_run || ${RM}r "$tmpdir" + continue + fi + file="$outputname" + else + func_warning "cannot relink \`$file'" + fi + } + else + # Install the binary that we compiled earlier. + file=`$ECHO "$file$stripped_ext" | $SED "s%\([^/]*\)$%$objdir/\1%"` + fi + fi + + # remove .exe since cygwin /usr/bin/install will append another + # one anyway + case $install_prog,$host in + */usr/bin/install*,*cygwin*) + case $file:$destfile in + *.exe:*.exe) + # this is ok + ;; + *.exe:*) + destfile=$destfile.exe + ;; + *:*.exe) + func_stripname '' '.exe' "$destfile" + destfile=$func_stripname_result + ;; + esac + ;; + esac + func_show_eval "$install_prog\$stripme \$file \$destfile" 'exit $?' + $opt_dry_run || if test -n "$outputname"; then + ${RM}r "$tmpdir" + fi + ;; + esac + done + + for file in $staticlibs; do + func_basename "$file" + name="$func_basename_result" + + # Set up the ranlib parameters. + oldlib="$destdir/$name" + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + + func_show_eval "$install_prog \$file \$oldlib" 'exit $?' + + if test -n "$stripme" && test -n "$old_striplib"; then + func_show_eval "$old_striplib $tool_oldlib" 'exit $?' + fi + + # Do each command in the postinstall commands. + func_execute_cmds "$old_postinstall_cmds" 'exit $?' + done + + test -n "$future_libdirs" && \ + func_warning "remember to run \`$progname --finish$future_libdirs'" + + if test -n "$current_libdirs"; then + # Maybe just do a dry run. + $opt_dry_run && current_libdirs=" -n$current_libdirs" + exec_cmd='$SHELL $progpath $preserve_args --finish$current_libdirs' + else + exit $EXIT_SUCCESS + fi +} + +test "$opt_mode" = install && func_mode_install ${1+"$@"} + + +# func_generate_dlsyms outputname originator pic_p +# Extract symbols from dlprefiles and create ${outputname}S.o with +# a dlpreopen symbol table. +func_generate_dlsyms () +{ + $opt_debug + my_outputname="$1" + my_originator="$2" + my_pic_p="${3-no}" + my_prefix=`$ECHO "$my_originator" | sed 's%[^a-zA-Z0-9]%_%g'` + my_dlsyms= + + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + if test -n "$NM" && test -n "$global_symbol_pipe"; then + my_dlsyms="${my_outputname}S.c" + else + func_error "not configured to extract global symbols from dlpreopened files" + fi + fi + + if test -n "$my_dlsyms"; then + case $my_dlsyms in + "") ;; + *.c) + # Discover the nlist of each of the dlfiles. + nlist="$output_objdir/${my_outputname}.nm" + + func_show_eval "$RM $nlist ${nlist}S ${nlist}T" + + # Parse the name list into a source file. + func_verbose "creating $output_objdir/$my_dlsyms" + + $opt_dry_run || $ECHO > "$output_objdir/$my_dlsyms" "\ +/* $my_dlsyms - symbol resolution table for \`$my_outputname' dlsym emulation. */ +/* Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION */ + +#ifdef __cplusplus +extern \"C\" { +#endif + +#if defined(__GNUC__) && (((__GNUC__ == 4) && (__GNUC_MINOR__ >= 4)) || (__GNUC__ > 4)) +#pragma GCC diagnostic ignored \"-Wstrict-prototypes\" +#endif + +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +/* External symbol declarations for the compiler. */\ +" + + if test "$dlself" = yes; then + func_verbose "generating symbol list for \`$output'" + + $opt_dry_run || echo ': @PROGRAM@ ' > "$nlist" + + # Add our own program objects to the symbol list. + progfiles=`$ECHO "$objs$old_deplibs" | $SP2NL | $SED "$lo2o" | $NL2SP` + for progfile in $progfiles; do + func_to_tool_file "$progfile" func_convert_file_msys_to_w32 + func_verbose "extracting global C symbols from \`$func_to_tool_file_result'" + $opt_dry_run || eval "$NM $func_to_tool_file_result | $global_symbol_pipe >> '$nlist'" + done + + if test -n "$exclude_expsyms"; then + $opt_dry_run || { + eval '$EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + if test -n "$export_symbols_regex"; then + $opt_dry_run || { + eval '$EGREP -e "$export_symbols_regex" "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + } + fi + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + export_symbols="$output_objdir/$outputname.exp" + $opt_dry_run || { + $RM $export_symbols + eval "${SED} -n -e '/^: @PROGRAM@ $/d' -e 's/^.* \(.*\)$/\1/p' "'< "$nlist" > "$export_symbols"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$export_symbols" >> "$output_objdir/$outputname.def"' + ;; + esac + } + else + $opt_dry_run || { + eval "${SED} -e 's/\([].[*^$]\)/\\\\\1/g' -e 's/^/ /' -e 's/$/$/'"' < "$export_symbols" > "$output_objdir/$outputname.exp"' + eval '$GREP -f "$output_objdir/$outputname.exp" < "$nlist" > "$nlist"T' + eval '$MV "$nlist"T "$nlist"' + case $host in + *cygwin* | *mingw* | *cegcc* ) + eval "echo EXPORTS "'> "$output_objdir/$outputname.def"' + eval 'cat "$nlist" >> "$output_objdir/$outputname.def"' + ;; + esac + } + fi + fi + + for dlprefile in $dlprefiles; do + func_verbose "extracting global C symbols from \`$dlprefile'" + func_basename "$dlprefile" + name="$func_basename_result" + case $host in + *cygwin* | *mingw* | *cegcc* ) + # if an import library, we need to obtain dlname + if func_win32_import_lib_p "$dlprefile"; then + func_tr_sh "$dlprefile" + eval "curr_lafile=\$libfile_$func_tr_sh_result" + dlprefile_dlbasename="" + if test -n "$curr_lafile" && func_lalib_p "$curr_lafile"; then + # Use subshell, to avoid clobbering current variable values + dlprefile_dlname=`source "$curr_lafile" && echo "$dlname"` + if test -n "$dlprefile_dlname" ; then + func_basename "$dlprefile_dlname" + dlprefile_dlbasename="$func_basename_result" + else + # no lafile. user explicitly requested -dlpreopen . + $sharedlib_from_linklib_cmd "$dlprefile" + dlprefile_dlbasename=$sharedlib_from_linklib_result + fi + fi + $opt_dry_run || { + if test -n "$dlprefile_dlbasename" ; then + eval '$ECHO ": $dlprefile_dlbasename" >> "$nlist"' + else + func_warning "Could not compute DLL name from $name" + eval '$ECHO ": $name " >> "$nlist"' + fi + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe | + $SED -e '/I __imp/d' -e 's/I __nm_/D /;s/_nm__//' >> '$nlist'" + } + else # not an import lib + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + fi + ;; + *) + $opt_dry_run || { + eval '$ECHO ": $name " >> "$nlist"' + func_to_tool_file "$dlprefile" func_convert_file_msys_to_w32 + eval "$NM \"$func_to_tool_file_result\" 2>/dev/null | $global_symbol_pipe >> '$nlist'" + } + ;; + esac + done + + $opt_dry_run || { + # Make sure we have at least an empty file. + test -f "$nlist" || : > "$nlist" + + if test -n "$exclude_expsyms"; then + $EGREP -v " ($exclude_expsyms)$" "$nlist" > "$nlist"T + $MV "$nlist"T "$nlist" + fi + + # Try sorting and uniquifying the output. + if $GREP -v "^: " < "$nlist" | + if sort -k 3 /dev/null 2>&1; then + sort -k 3 + else + sort +2 + fi | + uniq > "$nlist"S; then + : + else + $GREP -v "^: " < "$nlist" > "$nlist"S + fi + + if test -f "$nlist"S; then + eval "$global_symbol_to_cdecl"' < "$nlist"S >> "$output_objdir/$my_dlsyms"' + else + echo '/* NONE */' >> "$output_objdir/$my_dlsyms" + fi + + echo >> "$output_objdir/$my_dlsyms" "\ + +/* The mapping between symbol names and symbols. */ +typedef struct { + const char *name; + void *address; +} lt_dlsymlist; +extern LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[]; +LT_DLSYM_CONST lt_dlsymlist +lt_${my_prefix}_LTX_preloaded_symbols[] = +{\ + { \"$my_originator\", (void *) 0 }," + + case $need_lib_prefix in + no) + eval "$global_symbol_to_c_name_address" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + *) + eval "$global_symbol_to_c_name_address_lib_prefix" < "$nlist" >> "$output_objdir/$my_dlsyms" + ;; + esac + echo >> "$output_objdir/$my_dlsyms" "\ + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt_${my_prefix}_LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif\ +" + } # !$opt_dry_run + + pic_flag_for_symtable= + case "$compile_command " in + *" -static "*) ;; + *) + case $host in + # compiling the symbol table file with pic_flag works around + # a FreeBSD bug that causes programs to crash when -lm is + # linked before any other PIC object. But we must not use + # pic_flag when linking with -static. The problem exists in + # FreeBSD 2.2.6 and is fixed in FreeBSD 3.1. + *-*-freebsd2.*|*-*-freebsd3.0*|*-*-freebsdelf3.0*) + pic_flag_for_symtable=" $pic_flag -DFREEBSD_WORKAROUND" ;; + *-*-hpux*) + pic_flag_for_symtable=" $pic_flag" ;; + *) + if test "X$my_pic_p" != Xno; then + pic_flag_for_symtable=" $pic_flag" + fi + ;; + esac + ;; + esac + symtab_cflags= + for arg in $LTCFLAGS; do + case $arg in + -pie | -fpie | -fPIE) ;; + *) func_append symtab_cflags " $arg" ;; + esac + done + + # Now compile the dynamic symbol file. + func_show_eval '(cd $output_objdir && $LTCC$symtab_cflags -c$no_builtin_flag$pic_flag_for_symtable "$my_dlsyms")' 'exit $?' + + # Clean up the generated files. + func_show_eval '$RM "$output_objdir/$my_dlsyms" "$nlist" "${nlist}S" "${nlist}T"' + + # Transform the symbol file into the correct name. + symfileobj="$output_objdir/${my_outputname}S.$objext" + case $host in + *cygwin* | *mingw* | *cegcc* ) + if test -f "$output_objdir/$my_outputname.def"; then + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$output_objdir/$my_outputname.def $symfileobj%"` + else + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + fi + ;; + *) + compile_command=`$ECHO "$compile_command" | $SED "s%@SYMFILE@%$symfileobj%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s%@SYMFILE@%$symfileobj%"` + ;; + esac + ;; + *) + func_fatal_error "unknown suffix for \`$my_dlsyms'" + ;; + esac + else + # We keep going just in case the user didn't refer to + # lt_preloaded_symbols. The linker will fail if global_symbol_pipe + # really was required. + + # Nullify the symbol file. + compile_command=`$ECHO "$compile_command" | $SED "s% @SYMFILE@%%"` + finalize_command=`$ECHO "$finalize_command" | $SED "s% @SYMFILE@%%"` + fi +} + +# func_win32_libid arg +# return the library type of file 'arg' +# +# Need a lot of goo to handle *both* DLLs and import libs +# Has to be a shell function in order to 'eat' the argument +# that is supplied when $file_magic_command is called. +# Despite the name, also deal with 64 bit binaries. +func_win32_libid () +{ + $opt_debug + win32_libid_type="unknown" + win32_fileres=`file -L $1 2>/dev/null` + case $win32_fileres in + *ar\ archive\ import\ library*) # definitely import + win32_libid_type="x86 archive import" + ;; + *ar\ archive*) # could be an import, or static + # Keep the egrep pattern in sync with the one in _LT_CHECK_MAGIC_METHOD. + if eval $OBJDUMP -f $1 | $SED -e '10q' 2>/dev/null | + $EGREP 'file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' >/dev/null; then + func_to_tool_file "$1" func_convert_file_msys_to_w32 + win32_nmres=`eval $NM -f posix -A \"$func_to_tool_file_result\" | + $SED -n -e ' + 1,100{ + / I /{ + s,.*,import, + p + q + } + }'` + case $win32_nmres in + import*) win32_libid_type="x86 archive import";; + *) win32_libid_type="x86 archive static";; + esac + fi + ;; + *DLL*) + win32_libid_type="x86 DLL" + ;; + *executable*) # but shell scripts are "executable" too... + case $win32_fileres in + *MS\ Windows\ PE\ Intel*) + win32_libid_type="x86 DLL" + ;; + esac + ;; + esac + $ECHO "$win32_libid_type" +} + +# func_cygming_dll_for_implib ARG +# +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib () +{ + $opt_debug + sharedlib_from_linklib_result=`$DLLTOOL --identify-strict --identify "$1"` +} + +# func_cygming_dll_for_implib_fallback_core SECTION_NAME LIBNAMEs +# +# The is the core of a fallback implementation of a +# platform-specific function to extract the name of the +# DLL associated with the specified import library LIBNAME. +# +# SECTION_NAME is either .idata$6 or .idata$7, depending +# on the platform and compiler that created the implib. +# +# Echos the name of the DLL associated with the +# specified import library. +func_cygming_dll_for_implib_fallback_core () +{ + $opt_debug + match_literal=`$ECHO "$1" | $SED "$sed_make_literal_regex"` + $OBJDUMP -s --section "$1" "$2" 2>/dev/null | + $SED '/^Contents of section '"$match_literal"':/{ + # Place marker at beginning of archive member dllname section + s/.*/====MARK====/ + p + d + } + # These lines can sometimes be longer than 43 characters, but + # are always uninteresting + /:[ ]*file format pe[i]\{,1\}-/d + /^In archive [^:]*:/d + # Ensure marker is printed + /^====MARK====/p + # Remove all lines with less than 43 characters + /^.\{43\}/!d + # From remaining lines, remove first 43 characters + s/^.\{43\}//' | + $SED -n ' + # Join marker and all lines until next marker into a single line + /^====MARK====/ b para + H + $ b para + b + :para + x + s/\n//g + # Remove the marker + s/^====MARK====// + # Remove trailing dots and whitespace + s/[\. \t]*$// + # Print + /./p' | + # we now have a list, one entry per line, of the stringified + # contents of the appropriate section of all members of the + # archive which possess that section. Heuristic: eliminate + # all those which have a first or second character that is + # a '.' (that is, objdump's representation of an unprintable + # character.) This should work for all archives with less than + # 0x302f exports -- but will fail for DLLs whose name actually + # begins with a literal '.' or a single character followed by + # a '.'. + # + # Of those that remain, print the first one. + $SED -e '/^\./d;/^.\./d;q' +} + +# func_cygming_gnu_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is a GNU/binutils-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_gnu_implib_p () +{ + $opt_debug + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_gnu_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $EGREP ' (_head_[A-Za-z0-9_]+_[ad]l*|[A-Za-z0-9_]+_[ad]l*_iname)$'` + test -n "$func_cygming_gnu_implib_tmp" +} + +# func_cygming_ms_implib_p ARG +# This predicate returns with zero status (TRUE) if +# ARG is an MS-style import library. Returns +# with nonzero status (FALSE) otherwise. +func_cygming_ms_implib_p () +{ + $opt_debug + func_to_tool_file "$1" func_convert_file_msys_to_w32 + func_cygming_ms_implib_tmp=`$NM "$func_to_tool_file_result" | eval "$global_symbol_pipe" | $GREP '_NULL_IMPORT_DESCRIPTOR'` + test -n "$func_cygming_ms_implib_tmp" +} + +# func_cygming_dll_for_implib_fallback ARG +# Platform-specific function to extract the +# name of the DLL associated with the specified +# import library ARG. +# +# This fallback implementation is for use when $DLLTOOL +# does not support the --identify-strict option. +# Invoked by eval'ing the libtool variable +# $sharedlib_from_linklib_cmd +# Result is available in the variable +# $sharedlib_from_linklib_result +func_cygming_dll_for_implib_fallback () +{ + $opt_debug + if func_cygming_gnu_implib_p "$1" ; then + # binutils import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$7' "$1"` + elif func_cygming_ms_implib_p "$1" ; then + # ms-generated import library + sharedlib_from_linklib_result=`func_cygming_dll_for_implib_fallback_core '.idata$6' "$1"` + else + # unknown + sharedlib_from_linklib_result="" + fi +} + + +# func_extract_an_archive dir oldlib +func_extract_an_archive () +{ + $opt_debug + f_ex_an_ar_dir="$1"; shift + f_ex_an_ar_oldlib="$1" + if test "$lock_old_archive_extraction" = yes; then + lockfile=$f_ex_an_ar_oldlib.lock + until $opt_dry_run || ln "$progpath" "$lockfile" 2>/dev/null; do + func_echo "Waiting for $lockfile to be removed" + sleep 2 + done + fi + func_show_eval "(cd \$f_ex_an_ar_dir && $AR x \"\$f_ex_an_ar_oldlib\")" \ + 'stat=$?; rm -f "$lockfile"; exit $stat' + if test "$lock_old_archive_extraction" = yes; then + $opt_dry_run || rm -f "$lockfile" + fi + if ($AR t "$f_ex_an_ar_oldlib" | sort | sort -uc >/dev/null 2>&1); then + : + else + func_fatal_error "object name conflicts in archive: $f_ex_an_ar_dir/$f_ex_an_ar_oldlib" + fi +} + + +# func_extract_archives gentop oldlib ... +func_extract_archives () +{ + $opt_debug + my_gentop="$1"; shift + my_oldlibs=${1+"$@"} + my_oldobjs="" + my_xlib="" + my_xabs="" + my_xdir="" + + for my_xlib in $my_oldlibs; do + # Extract the objects. + case $my_xlib in + [\\/]* | [A-Za-z]:[\\/]*) my_xabs="$my_xlib" ;; + *) my_xabs=`pwd`"/$my_xlib" ;; + esac + func_basename "$my_xlib" + my_xlib="$func_basename_result" + my_xlib_u=$my_xlib + while :; do + case " $extracted_archives " in + *" $my_xlib_u "*) + func_arith $extracted_serial + 1 + extracted_serial=$func_arith_result + my_xlib_u=lt$extracted_serial-$my_xlib ;; + *) break ;; + esac + done + extracted_archives="$extracted_archives $my_xlib_u" + my_xdir="$my_gentop/$my_xlib_u" + + func_mkdir_p "$my_xdir" + + case $host in + *-darwin*) + func_verbose "Extracting $my_xabs" + # Do not bother doing anything if just a dry run + $opt_dry_run || { + darwin_orig_dir=`pwd` + cd $my_xdir || exit $? + darwin_archive=$my_xabs + darwin_curdir=`pwd` + darwin_base_archive=`basename "$darwin_archive"` + darwin_arches=`$LIPO -info "$darwin_archive" 2>/dev/null | $GREP Architectures 2>/dev/null || true` + if test -n "$darwin_arches"; then + darwin_arches=`$ECHO "$darwin_arches" | $SED -e 's/.*are://'` + darwin_arch= + func_verbose "$darwin_base_archive has multiple architectures $darwin_arches" + for darwin_arch in $darwin_arches ; do + func_mkdir_p "unfat-$$/${darwin_base_archive}-${darwin_arch}" + $LIPO -thin $darwin_arch -output "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" "${darwin_archive}" + cd "unfat-$$/${darwin_base_archive}-${darwin_arch}" + func_extract_an_archive "`pwd`" "${darwin_base_archive}" + cd "$darwin_curdir" + $RM "unfat-$$/${darwin_base_archive}-${darwin_arch}/${darwin_base_archive}" + done # $darwin_arches + ## Okay now we've a bunch of thin objects, gotta fatten them up :) + darwin_filelist=`find unfat-$$ -type f -name \*.o -print -o -name \*.lo -print | $SED -e "$basename" | sort -u` + darwin_file= + darwin_files= + for darwin_file in $darwin_filelist; do + darwin_files=`find unfat-$$ -name $darwin_file -print | sort | $NL2SP` + $LIPO -create -output "$darwin_file" $darwin_files + done # $darwin_filelist + $RM -rf unfat-$$ + cd "$darwin_orig_dir" + else + cd $darwin_orig_dir + func_extract_an_archive "$my_xdir" "$my_xabs" + fi # $darwin_arches + } # !$opt_dry_run + ;; + *) + func_extract_an_archive "$my_xdir" "$my_xabs" + ;; + esac + my_oldobjs="$my_oldobjs "`find $my_xdir -name \*.$objext -print -o -name \*.lo -print | sort | $NL2SP` + done + + func_extract_archives_result="$my_oldobjs" +} + + +# func_emit_wrapper [arg=no] +# +# Emit a libtool wrapper script on stdout. +# Don't directly open a file because we may want to +# incorporate the script contents within a cygwin/mingw +# wrapper executable. Must ONLY be called from within +# func_mode_link because it depends on a number of variables +# set therein. +# +# ARG is the value that the WRAPPER_SCRIPT_BELONGS_IN_OBJDIR +# variable will take. If 'yes', then the emitted script +# will assume that the directory in which it is stored is +# the $objdir directory. This is a cygwin/mingw-specific +# behavior. +func_emit_wrapper () +{ + func_emit_wrapper_arg1=${1-no} + + $ECHO "\ +#! $SHELL + +# $output - temporary wrapper script for $objdir/$outputname +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# The $output program cannot be directly executed until all the libtool +# libraries that it depends on are installed. +# +# This wrapper script should never be moved out of the build directory. +# If it is, it will not operate correctly. + +# Sed substitution that helps us do robust quoting. It backslashifies +# metacharacters that are still active within double-quoted strings. +sed_quote_subst='$sed_quote_subst' + +# Be Bourne compatible +if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then + emulate sh + NULLCMD=: + # Zsh 3.x and 4.x performs word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in *posix*) set -o posix;; esac +fi +BIN_SH=xpg4; export BIN_SH # for Tru64 +DUALCASE=1; export DUALCASE # for MKS sh + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +relink_command=\"$relink_command\" + +# This environment variable determines our operation mode. +if test \"\$libtool_install_magic\" = \"$magic\"; then + # install mode needs the following variables: + generated_by_libtool_version='$macro_version' + notinst_deplibs='$notinst_deplibs' +else + # When we are sourced in execute mode, \$file and \$ECHO are already set. + if test \"\$libtool_execute_magic\" != \"$magic\"; then + file=\"\$0\"" + + qECHO=`$ECHO "$ECHO" | $SED "$sed_quote_subst"` + $ECHO "\ + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + ECHO=\"$qECHO\" + fi + +# Very basic option parsing. These options are (a) specific to +# the libtool wrapper, (b) are identical between the wrapper +# /script/ and the wrapper /executable/ which is used only on +# windows platforms, and (c) all begin with the string "--lt-" +# (application programs are unlikely to have options which match +# this pattern). +# +# There are only two supported options: --lt-debug and +# --lt-dump-script. There is, deliberately, no --lt-help. +# +# The first argument to this parsing function should be the +# script's $0 value, followed by "$@". +lt_option_debug= +func_parse_lt_options () +{ + lt_script_arg0=\$0 + shift + for lt_opt + do + case \"\$lt_opt\" in + --lt-debug) lt_option_debug=1 ;; + --lt-dump-script) + lt_dump_D=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%/[^/]*$%%'\` + test \"X\$lt_dump_D\" = \"X\$lt_script_arg0\" && lt_dump_D=. + lt_dump_F=\`\$ECHO \"X\$lt_script_arg0\" | $SED -e 's/^X//' -e 's%^.*/%%'\` + cat \"\$lt_dump_D/\$lt_dump_F\" + exit 0 + ;; + --lt-*) + \$ECHO \"Unrecognized --lt- option: '\$lt_opt'\" 1>&2 + exit 1 + ;; + esac + done + + # Print the debug banner immediately: + if test -n \"\$lt_option_debug\"; then + echo \"${outputname}:${output}:\${LINENO}: libtool wrapper (GNU $PACKAGE$TIMESTAMP) $VERSION\" 1>&2 + fi +} + +# Used when --lt-debug. Prints its arguments to stdout +# (redirection is the responsibility of the caller) +func_lt_dump_args () +{ + lt_dump_args_N=1; + for lt_arg + do + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[\$lt_dump_args_N]: \$lt_arg\" + lt_dump_args_N=\`expr \$lt_dump_args_N + 1\` + done +} + +# Core function for launching the target application +func_exec_program_core () +{ +" + case $host in + # Backslashes separate directories on plain windows + *-*-mingw | *-*-os2* | *-cegcc*) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir\\\\\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir\\\\\$program\" \${1+\"\$@\"} +" + ;; + + *) + $ECHO "\ + if test -n \"\$lt_option_debug\"; then + \$ECHO \"${outputname}:${output}:\${LINENO}: newargv[0]: \$progdir/\$program\" 1>&2 + func_lt_dump_args \${1+\"\$@\"} 1>&2 + fi + exec \"\$progdir/\$program\" \${1+\"\$@\"} +" + ;; + esac + $ECHO "\ + \$ECHO \"\$0: cannot exec \$program \$*\" 1>&2 + exit 1 +} + +# A function to encapsulate launching the target application +# Strips options in the --lt-* namespace from \$@ and +# launches target application with the remaining arguments. +func_exec_program () +{ + case \" \$* \" in + *\\ --lt-*) + for lt_wr_arg + do + case \$lt_wr_arg in + --lt-*) ;; + *) set x \"\$@\" \"\$lt_wr_arg\"; shift;; + esac + shift + done ;; + esac + func_exec_program_core \${1+\"\$@\"} +} + + # Parse options + func_parse_lt_options \"\$0\" \${1+\"\$@\"} + + # Find the directory that this script lives in. + thisdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*$%%'\` + test \"x\$thisdir\" = \"x\$file\" && thisdir=. + + # Follow symbolic links until we get to the real thisdir. + file=\`ls -ld \"\$file\" | $SED -n 's/.*-> //p'\` + while test -n \"\$file\"; do + destdir=\`\$ECHO \"\$file\" | $SED 's%/[^/]*\$%%'\` + + # If there was a directory component, then change thisdir. + if test \"x\$destdir\" != \"x\$file\"; then + case \"\$destdir\" in + [\\\\/]* | [A-Za-z]:[\\\\/]*) thisdir=\"\$destdir\" ;; + *) thisdir=\"\$thisdir/\$destdir\" ;; + esac + fi + + file=\`\$ECHO \"\$file\" | $SED 's%^.*/%%'\` + file=\`ls -ld \"\$thisdir/\$file\" | $SED -n 's/.*-> //p'\` + done + + # Usually 'no', except on cygwin/mingw when embedded into + # the cwrapper. + WRAPPER_SCRIPT_BELONGS_IN_OBJDIR=$func_emit_wrapper_arg1 + if test \"\$WRAPPER_SCRIPT_BELONGS_IN_OBJDIR\" = \"yes\"; then + # special case for '.' + if test \"\$thisdir\" = \".\"; then + thisdir=\`pwd\` + fi + # remove .libs from thisdir + case \"\$thisdir\" in + *[\\\\/]$objdir ) thisdir=\`\$ECHO \"\$thisdir\" | $SED 's%[\\\\/][^\\\\/]*$%%'\` ;; + $objdir ) thisdir=. ;; + esac + fi + + # Try to get the absolute directory name. + absdir=\`cd \"\$thisdir\" && pwd\` + test -n \"\$absdir\" && thisdir=\"\$absdir\" +" + + if test "$fast_install" = yes; then + $ECHO "\ + program=lt-'$outputname'$exeext + progdir=\"\$thisdir/$objdir\" + + if test ! -f \"\$progdir/\$program\" || + { file=\`ls -1dt \"\$progdir/\$program\" \"\$progdir/../\$program\" 2>/dev/null | ${SED} 1q\`; \\ + test \"X\$file\" != \"X\$progdir/\$program\"; }; then + + file=\"\$\$-\$program\" + + if test ! -d \"\$progdir\"; then + $MKDIR \"\$progdir\" + else + $RM \"\$progdir/\$file\" + fi" + + $ECHO "\ + + # relink executable if necessary + if test -n \"\$relink_command\"; then + if relink_command_output=\`eval \$relink_command 2>&1\`; then : + else + $ECHO \"\$relink_command_output\" >&2 + $RM \"\$progdir/\$file\" + exit 1 + fi + fi + + $MV \"\$progdir/\$file\" \"\$progdir/\$program\" 2>/dev/null || + { $RM \"\$progdir/\$program\"; + $MV \"\$progdir/\$file\" \"\$progdir/\$program\"; } + $RM \"\$progdir/\$file\" + fi" + else + $ECHO "\ + program='$outputname' + progdir=\"\$thisdir/$objdir\" +" + fi + + $ECHO "\ + + if test -f \"\$progdir/\$program\"; then" + + # fixup the dll searchpath if we need to. + # + # Fix the DLL searchpath if we need to. Do this before prepending + # to shlibpath, because on Windows, both are PATH and uninstalled + # libraries must come first. + if test -n "$dllsearchpath"; then + $ECHO "\ + # Add the dll search path components to the executable PATH + PATH=$dllsearchpath:\$PATH +" + fi + + # Export our shlibpath_var if we have one. + if test "$shlibpath_overrides_runpath" = yes && test -n "$shlibpath_var" && test -n "$temp_rpath"; then + $ECHO "\ + # Add our own library path to $shlibpath_var + $shlibpath_var=\"$temp_rpath\$$shlibpath_var\" + + # Some systems cannot cope with colon-terminated $shlibpath_var + # The second colon is a workaround for a bug in BeOS R4 sed + $shlibpath_var=\`\$ECHO \"\$$shlibpath_var\" | $SED 's/::*\$//'\` + + export $shlibpath_var +" + fi + + $ECHO "\ + if test \"\$libtool_execute_magic\" != \"$magic\"; then + # Run the actual program with our arguments. + func_exec_program \${1+\"\$@\"} + fi + else + # The program doesn't exist. + \$ECHO \"\$0: error: \\\`\$progdir/\$program' does not exist\" 1>&2 + \$ECHO \"This script is just a wrapper for \$program.\" 1>&2 + \$ECHO \"See the $PACKAGE documentation for more information.\" 1>&2 + exit 1 + fi +fi\ +" +} + + +# func_emit_cwrapperexe_src +# emit the source code for a wrapper executable on stdout +# Must ONLY be called from within func_mode_link because +# it depends on a number of variable set therein. +func_emit_cwrapperexe_src () +{ + cat < +#include +#ifdef _MSC_VER +# include +# include +# include +#else +# include +# include +# ifdef __CYGWIN__ +# include +# endif +#endif +#include +#include +#include +#include +#include +#include +#include +#include + +/* declarations of non-ANSI functions */ +#if defined(__MINGW32__) +# ifdef __STRICT_ANSI__ +int _putenv (const char *); +# endif +#elif defined(__CYGWIN__) +# ifdef __STRICT_ANSI__ +char *realpath (const char *, char *); +int putenv (char *); +int setenv (const char *, const char *, int); +# endif +/* #elif defined (other platforms) ... */ +#endif + +/* portability defines, excluding path handling macros */ +#if defined(_MSC_VER) +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +# define S_IXUSR _S_IEXEC +# ifndef _INTPTR_T_DEFINED +# define _INTPTR_T_DEFINED +# define intptr_t int +# endif +#elif defined(__MINGW32__) +# define setmode _setmode +# define stat _stat +# define chmod _chmod +# define getcwd _getcwd +# define putenv _putenv +#elif defined(__CYGWIN__) +# define HAVE_SETENV +# define FOPEN_WB "wb" +/* #elif defined (other platforms) ... */ +#endif + +#if defined(PATH_MAX) +# define LT_PATHMAX PATH_MAX +#elif defined(MAXPATHLEN) +# define LT_PATHMAX MAXPATHLEN +#else +# define LT_PATHMAX 1024 +#endif + +#ifndef S_IXOTH +# define S_IXOTH 0 +#endif +#ifndef S_IXGRP +# define S_IXGRP 0 +#endif + +/* path handling portability macros */ +#ifndef DIR_SEPARATOR +# define DIR_SEPARATOR '/' +# define PATH_SEPARATOR ':' +#endif + +#if defined (_WIN32) || defined (__MSDOS__) || defined (__DJGPP__) || \ + defined (__OS2__) +# define HAVE_DOS_BASED_FILE_SYSTEM +# define FOPEN_WB "wb" +# ifndef DIR_SEPARATOR_2 +# define DIR_SEPARATOR_2 '\\' +# endif +# ifndef PATH_SEPARATOR_2 +# define PATH_SEPARATOR_2 ';' +# endif +#endif + +#ifndef DIR_SEPARATOR_2 +# define IS_DIR_SEPARATOR(ch) ((ch) == DIR_SEPARATOR) +#else /* DIR_SEPARATOR_2 */ +# define IS_DIR_SEPARATOR(ch) \ + (((ch) == DIR_SEPARATOR) || ((ch) == DIR_SEPARATOR_2)) +#endif /* DIR_SEPARATOR_2 */ + +#ifndef PATH_SEPARATOR_2 +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR) +#else /* PATH_SEPARATOR_2 */ +# define IS_PATH_SEPARATOR(ch) ((ch) == PATH_SEPARATOR_2) +#endif /* PATH_SEPARATOR_2 */ + +#ifndef FOPEN_WB +# define FOPEN_WB "w" +#endif +#ifndef _O_BINARY +# define _O_BINARY 0 +#endif + +#define XMALLOC(type, num) ((type *) xmalloc ((num) * sizeof(type))) +#define XFREE(stale) do { \ + if (stale) { free ((void *) stale); stale = 0; } \ +} while (0) + +#if defined(LT_DEBUGWRAPPER) +static int lt_debug = 1; +#else +static int lt_debug = 0; +#endif + +const char *program_name = "libtool-wrapper"; /* in case xstrdup fails */ + +void *xmalloc (size_t num); +char *xstrdup (const char *string); +const char *base_name (const char *name); +char *find_executable (const char *wrapper); +char *chase_symlinks (const char *pathspec); +int make_executable (const char *path); +int check_executable (const char *path); +char *strendzap (char *str, const char *pat); +void lt_debugprintf (const char *file, int line, const char *fmt, ...); +void lt_fatal (const char *file, int line, const char *message, ...); +static const char *nonnull (const char *s); +static const char *nonempty (const char *s); +void lt_setenv (const char *name, const char *value); +char *lt_extend_str (const char *orig_value, const char *add, int to_end); +void lt_update_exe_path (const char *name, const char *value); +void lt_update_lib_path (const char *name, const char *value); +char **prepare_spawn (char **argv); +void lt_dump_script (FILE *f); +EOF + + cat <= 0) + && (st.st_mode & (S_IXUSR | S_IXGRP | S_IXOTH))) + return 1; + else + return 0; +} + +int +make_executable (const char *path) +{ + int rval = 0; + struct stat st; + + lt_debugprintf (__FILE__, __LINE__, "(make_executable): %s\n", + nonempty (path)); + if ((!path) || (!*path)) + return 0; + + if (stat (path, &st) >= 0) + { + rval = chmod (path, st.st_mode | S_IXOTH | S_IXGRP | S_IXUSR); + } + return rval; +} + +/* Searches for the full path of the wrapper. Returns + newly allocated full path name if found, NULL otherwise + Does not chase symlinks, even on platforms that support them. +*/ +char * +find_executable (const char *wrapper) +{ + int has_slash = 0; + const char *p; + const char *p_next; + /* static buffer for getcwd */ + char tmp[LT_PATHMAX + 1]; + int tmp_len; + char *concat_name; + + lt_debugprintf (__FILE__, __LINE__, "(find_executable): %s\n", + nonempty (wrapper)); + + if ((wrapper == NULL) || (*wrapper == '\0')) + return NULL; + + /* Absolute path? */ +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + if (isalpha ((unsigned char) wrapper[0]) && wrapper[1] == ':') + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + else + { +#endif + if (IS_DIR_SEPARATOR (wrapper[0])) + { + concat_name = xstrdup (wrapper); + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } +#if defined (HAVE_DOS_BASED_FILE_SYSTEM) + } +#endif + + for (p = wrapper; *p; p++) + if (*p == '/') + { + has_slash = 1; + break; + } + if (!has_slash) + { + /* no slashes; search PATH */ + const char *path = getenv ("PATH"); + if (path != NULL) + { + for (p = path; *p; p = p_next) + { + const char *q; + size_t p_len; + for (q = p; *q; q++) + if (IS_PATH_SEPARATOR (*q)) + break; + p_len = q - p; + p_next = (*q == '\0' ? q : q + 1); + if (p_len == 0) + { + /* empty path: current directory */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = + XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + } + else + { + concat_name = + XMALLOC (char, p_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, p, p_len); + concat_name[p_len] = '/'; + strcpy (concat_name + p_len + 1, wrapper); + } + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + } + } + /* not found in PATH; assume curdir */ + } + /* Relative path | not found in path: prepend cwd */ + if (getcwd (tmp, LT_PATHMAX) == NULL) + lt_fatal (__FILE__, __LINE__, "getcwd failed: %s", + nonnull (strerror (errno))); + tmp_len = strlen (tmp); + concat_name = XMALLOC (char, tmp_len + 1 + strlen (wrapper) + 1); + memcpy (concat_name, tmp, tmp_len); + concat_name[tmp_len] = '/'; + strcpy (concat_name + tmp_len + 1, wrapper); + + if (check_executable (concat_name)) + return concat_name; + XFREE (concat_name); + return NULL; +} + +char * +chase_symlinks (const char *pathspec) +{ +#ifndef S_ISLNK + return xstrdup (pathspec); +#else + char buf[LT_PATHMAX]; + struct stat s; + char *tmp_pathspec = xstrdup (pathspec); + char *p; + int has_symlinks = 0; + while (strlen (tmp_pathspec) && !has_symlinks) + { + lt_debugprintf (__FILE__, __LINE__, + "checking path component for symlinks: %s\n", + tmp_pathspec); + if (lstat (tmp_pathspec, &s) == 0) + { + if (S_ISLNK (s.st_mode) != 0) + { + has_symlinks = 1; + break; + } + + /* search backwards for last DIR_SEPARATOR */ + p = tmp_pathspec + strlen (tmp_pathspec) - 1; + while ((p > tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + p--; + if ((p == tmp_pathspec) && (!IS_DIR_SEPARATOR (*p))) + { + /* no more DIR_SEPARATORS left */ + break; + } + *p = '\0'; + } + else + { + lt_fatal (__FILE__, __LINE__, + "error accessing file \"%s\": %s", + tmp_pathspec, nonnull (strerror (errno))); + } + } + XFREE (tmp_pathspec); + + if (!has_symlinks) + { + return xstrdup (pathspec); + } + + tmp_pathspec = realpath (pathspec, buf); + if (tmp_pathspec == 0) + { + lt_fatal (__FILE__, __LINE__, + "could not follow symlinks for %s", pathspec); + } + return xstrdup (tmp_pathspec); +#endif +} + +char * +strendzap (char *str, const char *pat) +{ + size_t len, patlen; + + assert (str != NULL); + assert (pat != NULL); + + len = strlen (str); + patlen = strlen (pat); + + if (patlen <= len) + { + str += len - patlen; + if (strcmp (str, pat) == 0) + *str = '\0'; + } + return str; +} + +void +lt_debugprintf (const char *file, int line, const char *fmt, ...) +{ + va_list args; + if (lt_debug) + { + (void) fprintf (stderr, "%s:%s:%d: ", program_name, file, line); + va_start (args, fmt); + (void) vfprintf (stderr, fmt, args); + va_end (args); + } +} + +static void +lt_error_core (int exit_status, const char *file, + int line, const char *mode, + const char *message, va_list ap) +{ + fprintf (stderr, "%s:%s:%d: %s: ", program_name, file, line, mode); + vfprintf (stderr, message, ap); + fprintf (stderr, ".\n"); + + if (exit_status >= 0) + exit (exit_status); +} + +void +lt_fatal (const char *file, int line, const char *message, ...) +{ + va_list ap; + va_start (ap, message); + lt_error_core (EXIT_FAILURE, file, line, "FATAL", message, ap); + va_end (ap); +} + +static const char * +nonnull (const char *s) +{ + return s ? s : "(null)"; +} + +static const char * +nonempty (const char *s) +{ + return (s && !*s) ? "(empty)" : nonnull (s); +} + +void +lt_setenv (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_setenv) setting '%s' to '%s'\n", + nonnull (name), nonnull (value)); + { +#ifdef HAVE_SETENV + /* always make a copy, for consistency with !HAVE_SETENV */ + char *str = xstrdup (value); + setenv (name, str, 1); +#else + int len = strlen (name) + 1 + strlen (value) + 1; + char *str = XMALLOC (char, len); + sprintf (str, "%s=%s", name, value); + if (putenv (str) != EXIT_SUCCESS) + { + XFREE (str); + } +#endif + } +} + +char * +lt_extend_str (const char *orig_value, const char *add, int to_end) +{ + char *new_value; + if (orig_value && *orig_value) + { + int orig_value_len = strlen (orig_value); + int add_len = strlen (add); + new_value = XMALLOC (char, add_len + orig_value_len + 1); + if (to_end) + { + strcpy (new_value, orig_value); + strcpy (new_value + orig_value_len, add); + } + else + { + strcpy (new_value, add); + strcpy (new_value + add_len, orig_value); + } + } + else + { + new_value = xstrdup (add); + } + return new_value; +} + +void +lt_update_exe_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_exe_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + /* some systems can't cope with a ':'-terminated path #' */ + int len = strlen (new_value); + while (((len = strlen (new_value)) > 0) && IS_PATH_SEPARATOR (new_value[len-1])) + { + new_value[len-1] = '\0'; + } + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +void +lt_update_lib_path (const char *name, const char *value) +{ + lt_debugprintf (__FILE__, __LINE__, + "(lt_update_lib_path) modifying '%s' by prepending '%s'\n", + nonnull (name), nonnull (value)); + + if (name && *name && value && *value) + { + char *new_value = lt_extend_str (getenv (name), value, 0); + lt_setenv (name, new_value); + XFREE (new_value); + } +} + +EOF + case $host_os in + mingw*) + cat <<"EOF" + +/* Prepares an argument vector before calling spawn(). + Note that spawn() does not by itself call the command interpreter + (getenv ("COMSPEC") != NULL ? getenv ("COMSPEC") : + ({ OSVERSIONINFO v; v.dwOSVersionInfoSize = sizeof(OSVERSIONINFO); + GetVersionEx(&v); + v.dwPlatformId == VER_PLATFORM_WIN32_NT; + }) ? "cmd.exe" : "command.com"). + Instead it simply concatenates the arguments, separated by ' ', and calls + CreateProcess(). We must quote the arguments since Win32 CreateProcess() + interprets characters like ' ', '\t', '\\', '"' (but not '<' and '>') in a + special way: + - Space and tab are interpreted as delimiters. They are not treated as + delimiters if they are surrounded by double quotes: "...". + - Unescaped double quotes are removed from the input. Their only effect is + that within double quotes, space and tab are treated like normal + characters. + - Backslashes not followed by double quotes are not special. + - But 2*n+1 backslashes followed by a double quote become + n backslashes followed by a double quote (n >= 0): + \" -> " + \\\" -> \" + \\\\\" -> \\" + */ +#define SHELL_SPECIAL_CHARS "\"\\ \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +#define SHELL_SPACE_CHARS " \001\002\003\004\005\006\007\010\011\012\013\014\015\016\017\020\021\022\023\024\025\026\027\030\031\032\033\034\035\036\037" +char ** +prepare_spawn (char **argv) +{ + size_t argc; + char **new_argv; + size_t i; + + /* Count number of arguments. */ + for (argc = 0; argv[argc] != NULL; argc++) + ; + + /* Allocate new argument vector. */ + new_argv = XMALLOC (char *, argc + 1); + + /* Put quoted arguments into the new argument vector. */ + for (i = 0; i < argc; i++) + { + const char *string = argv[i]; + + if (string[0] == '\0') + new_argv[i] = xstrdup ("\"\""); + else if (strpbrk (string, SHELL_SPECIAL_CHARS) != NULL) + { + int quote_around = (strpbrk (string, SHELL_SPACE_CHARS) != NULL); + size_t length; + unsigned int backslashes; + const char *s; + char *quoted_string; + char *p; + + length = 0; + backslashes = 0; + if (quote_around) + length++; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + length += backslashes + 1; + length++; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + length += backslashes + 1; + + quoted_string = XMALLOC (char, length + 1); + + p = quoted_string; + backslashes = 0; + if (quote_around) + *p++ = '"'; + for (s = string; *s != '\0'; s++) + { + char c = *s; + if (c == '"') + { + unsigned int j; + for (j = backslashes + 1; j > 0; j--) + *p++ = '\\'; + } + *p++ = c; + if (c == '\\') + backslashes++; + else + backslashes = 0; + } + if (quote_around) + { + unsigned int j; + for (j = backslashes; j > 0; j--) + *p++ = '\\'; + *p++ = '"'; + } + *p = '\0'; + + new_argv[i] = quoted_string; + } + else + new_argv[i] = (char *) string; + } + new_argv[argc] = NULL; + + return new_argv; +} +EOF + ;; + esac + + cat <<"EOF" +void lt_dump_script (FILE* f) +{ +EOF + func_emit_wrapper yes | + $SED -n -e ' +s/^\(.\{79\}\)\(..*\)/\1\ +\2/ +h +s/\([\\"]\)/\\\1/g +s/$/\\n/ +s/\([^\n]*\).*/ fputs ("\1", f);/p +g +D' + cat <<"EOF" +} +EOF +} +# end: func_emit_cwrapperexe_src + +# func_win32_import_lib_p ARG +# True if ARG is an import lib, as indicated by $file_magic_cmd +func_win32_import_lib_p () +{ + $opt_debug + case `eval $file_magic_cmd \"\$1\" 2>/dev/null | $SED -e 10q` in + *import*) : ;; + *) false ;; + esac +} + +# func_mode_link arg... +func_mode_link () +{ + $opt_debug + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + # It is impossible to link a dll without this setting, and + # we shouldn't force the makefile maintainer to figure out + # which system we are compiling for in order to pass an extra + # flag for every libtool invocation. + # allow_undefined=no + + # FIXME: Unfortunately, there are problems with the above when trying + # to make a dll which has undefined symbols, in which case not + # even a static library is built. For now, we need to specify + # -no-undefined on the libtool link line when we can be certain + # that all symbols are satisfied, otherwise we get a static library. + allow_undefined=yes + ;; + *) + allow_undefined=yes + ;; + esac + libtool_args=$nonopt + base_compile="$nonopt $@" + compile_command=$nonopt + finalize_command=$nonopt + + compile_rpath= + finalize_rpath= + compile_shlibpath= + finalize_shlibpath= + convenience= + old_convenience= + deplibs= + old_deplibs= + compiler_flags= + linker_flags= + dllsearchpath= + lib_search_path=`pwd` + inst_prefix_dir= + new_inherited_linker_flags= + + avoid_version=no + bindir= + dlfiles= + dlprefiles= + dlself=no + export_dynamic=no + export_symbols= + export_symbols_regex= + generated= + libobjs= + ltlibs= + module=no + no_install=no + objs= + non_pic_objects= + precious_files_regex= + prefer_static_libs=no + preload=no + prev= + prevarg= + release= + rpath= + xrpath= + perm_rpath= + temp_rpath= + thread_safe=no + vinfo= + vinfo_number=no + weak_libs= + single_module="${wl}-single_module" + func_infer_tag $base_compile + + # We need to know -static, to get the right output filenames. + for arg + do + case $arg in + -shared) + test "$build_libtool_libs" != yes && \ + func_fatal_configuration "can not build a shared library" + build_old_libs=no + break + ;; + -all-static | -static | -static-libtool-libs) + case $arg in + -all-static) + if test "$build_libtool_libs" = yes && test -z "$link_static_flag"; then + func_warning "complete static linking is impossible in this configuration" + fi + if test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + -static) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=built + ;; + -static-libtool-libs) + if test -z "$pic_flag" && test -n "$link_static_flag"; then + dlopen_self=$dlopen_self_static + fi + prefer_static_libs=yes + ;; + esac + build_libtool_libs=no + build_old_libs=yes + break + ;; + esac + done + + # See if our shared archives depend on static archives. + test -n "$old_archive_from_new_cmds" && build_old_libs=yes + + # Go through the arguments, transforming them on the way. + while test "$#" -gt 0; do + arg="$1" + shift + func_quote_for_eval "$arg" + qarg=$func_quote_for_eval_unquoted_result + func_append libtool_args " $func_quote_for_eval_result" + + # If the previous option needs an argument, assign it. + if test -n "$prev"; then + case $prev in + output) + func_append compile_command " @OUTPUT@" + func_append finalize_command " @OUTPUT@" + ;; + esac + + case $prev in + bindir) + bindir="$arg" + prev= + continue + ;; + dlfiles|dlprefiles) + if test "$preload" = no; then + # Add the symbol object into the linking commands. + func_append compile_command " @SYMFILE@" + func_append finalize_command " @SYMFILE@" + preload=yes + fi + case $arg in + *.la | *.lo) ;; # We handle these cases below. + force) + if test "$dlself" = no; then + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + self) + if test "$prev" = dlprefiles; then + dlself=yes + elif test "$prev" = dlfiles && test "$dlopen_self" != yes; then + dlself=yes + else + dlself=needless + export_dynamic=yes + fi + prev= + continue + ;; + *) + if test "$prev" = dlfiles; then + func_append dlfiles " $arg" + else + func_append dlprefiles " $arg" + fi + prev= + continue + ;; + esac + ;; + expsyms) + export_symbols="$arg" + test -f "$arg" \ + || func_fatal_error "symbol file \`$arg' does not exist" + prev= + continue + ;; + expsyms_regex) + export_symbols_regex="$arg" + prev= + continue + ;; + framework) + case $host in + *-*-darwin*) + case "$deplibs " in + *" $qarg.ltframework "*) ;; + *) func_append deplibs " $qarg.ltframework" # this is fixed later + ;; + esac + ;; + esac + prev= + continue + ;; + inst_prefix) + inst_prefix_dir="$arg" + prev= + continue + ;; + objectlist) + if test -f "$arg"; then + save_arg=$arg + moreargs= + for fil in `cat "$save_arg"` + do +# func_append moreargs " $fil" + arg=$fil + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + done + else + func_fatal_error "link input file \`$arg' does not exist" + fi + arg=$save_arg + prev= + continue + ;; + precious_regex) + precious_files_regex="$arg" + prev= + continue + ;; + release) + release="-$arg" + prev= + continue + ;; + rpath | xrpath) + # We need an absolute path. + case $arg in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + if test "$prev" = rpath; then + case "$rpath " in + *" $arg "*) ;; + *) func_append rpath " $arg" ;; + esac + else + case "$xrpath " in + *" $arg "*) ;; + *) func_append xrpath " $arg" ;; + esac + fi + prev= + continue + ;; + shrext) + shrext_cmds="$arg" + prev= + continue + ;; + weak) + func_append weak_libs " $arg" + prev= + continue + ;; + xcclinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xcompiler) + func_append compiler_flags " $qarg" + prev= + func_append compile_command " $qarg" + func_append finalize_command " $qarg" + continue + ;; + xlinker) + func_append linker_flags " $qarg" + func_append compiler_flags " $wl$qarg" + prev= + func_append compile_command " $wl$qarg" + func_append finalize_command " $wl$qarg" + continue + ;; + *) + eval "$prev=\"\$arg\"" + prev= + continue + ;; + esac + fi # test -n "$prev" + + prevarg="$arg" + + case $arg in + -all-static) + if test -n "$link_static_flag"; then + # See comment for -static flag below, for more details. + func_append compile_command " $link_static_flag" + func_append finalize_command " $link_static_flag" + fi + continue + ;; + + -allow-undefined) + # FIXME: remove this flag sometime in the future. + func_fatal_error "\`-allow-undefined' must not be used because it is the default" + ;; + + -avoid-version) + avoid_version=yes + continue + ;; + + -bindir) + prev=bindir + continue + ;; + + -dlopen) + prev=dlfiles + continue + ;; + + -dlpreopen) + prev=dlprefiles + continue + ;; + + -export-dynamic) + export_dynamic=yes + continue + ;; + + -export-symbols | -export-symbols-regex) + if test -n "$export_symbols" || test -n "$export_symbols_regex"; then + func_fatal_error "more than one -exported-symbols argument is not allowed" + fi + if test "X$arg" = "X-export-symbols"; then + prev=expsyms + else + prev=expsyms_regex + fi + continue + ;; + + -framework) + prev=framework + continue + ;; + + -inst-prefix-dir) + prev=inst_prefix + continue + ;; + + # The native IRIX linker understands -LANG:*, -LIST:* and -LNO:* + # so, if we see these flags be careful not to treat them like -L + -L[A-Z][A-Z]*:*) + case $with_gcc/$host in + no/*-*-irix* | /*-*-irix*) + func_append compile_command " $arg" + func_append finalize_command " $arg" + ;; + esac + continue + ;; + + -L*) + func_stripname "-L" '' "$arg" + if test -z "$func_stripname_result"; then + if test "$#" -gt 0; then + func_fatal_error "require no space between \`-L' and \`$1'" + else + func_fatal_error "need path for \`-L' option" + fi + fi + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + *) + absdir=`cd "$dir" && pwd` + test -z "$absdir" && \ + func_fatal_error "cannot determine absolute directory name of \`$dir'" + dir="$absdir" + ;; + esac + case "$deplibs " in + *" -L$dir "* | *" $arg "*) + # Will only happen for absolute or sysroot arguments + ;; + *) + # Preserve sysroot, but never include relative directories + case $dir in + [\\/]* | [A-Za-z]:[\\/]* | =*) func_append deplibs " $arg" ;; + *) func_append deplibs " -L$dir" ;; + esac + func_append lib_search_path " $dir" + ;; + esac + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`$ECHO "$dir" | $SED 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$dir:"*) ;; + ::) dllsearchpath=$dir;; + *) func_append dllsearchpath ":$dir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + continue + ;; + + -l*) + if test "X$arg" = "X-lc" || test "X$arg" = "X-lm"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-beos* | *-cegcc* | *-*-haiku*) + # These systems don't actually have a C or math library (as such) + continue + ;; + *-*-os2*) + # These systems don't actually have a C library (as such) + test "X$arg" = "X-lc" && continue + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + test "X$arg" = "X-lc" && continue + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C and math libraries are in the System framework + func_append deplibs " System.ltframework" + continue + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + test "X$arg" = "X-lc" && continue + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + test "X$arg" = "X-lc" && continue + ;; + esac + elif test "X$arg" = "X-lc_r"; then + case $host in + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc_r directly, use -pthread flag. + continue + ;; + esac + fi + func_append deplibs " $arg" + continue + ;; + + -module) + module=yes + continue + ;; + + # Tru64 UNIX uses -model [arg] to determine the layout of C++ + # classes, name mangling, and exception handling. + # Darwin uses the -arch flag to determine output architecture. + -model|-arch|-isysroot|--sysroot) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + prev=xcompiler + continue + ;; + + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + func_append compiler_flags " $arg" + func_append compile_command " $arg" + func_append finalize_command " $arg" + case "$new_inherited_linker_flags " in + *" $arg "*) ;; + * ) func_append new_inherited_linker_flags " $arg" ;; + esac + continue + ;; + + -multi_module) + single_module="${wl}-multi_module" + continue + ;; + + -no-fast-install) + fast_install=no + continue + ;; + + -no-install) + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-darwin* | *-cegcc*) + # The PATH hackery in wrapper scripts is required on Windows + # and Darwin in order for the loader to find any dlls it needs. + func_warning "\`-no-install' is ignored for $host" + func_warning "assuming \`-no-fast-install' instead" + fast_install=no + ;; + *) no_install=yes ;; + esac + continue + ;; + + -no-undefined) + allow_undefined=no + continue + ;; + + -objectlist) + prev=objectlist + continue + ;; + + -o) prev=output ;; + + -precious-files-regex) + prev=precious_regex + continue + ;; + + -release) + prev=release + continue + ;; + + -rpath) + prev=rpath + continue + ;; + + -R) + prev=xrpath + continue + ;; + + -R*) + func_stripname '-R' '' "$arg" + dir=$func_stripname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) ;; + =*) + func_stripname '=' '' "$dir" + dir=$lt_sysroot$func_stripname_result + ;; + *) + func_fatal_error "only absolute run-paths are allowed" + ;; + esac + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + continue + ;; + + -shared) + # The effects of -shared are defined in a previous loop. + continue + ;; + + -shrext) + prev=shrext + continue + ;; + + -static | -static-libtool-libs) + # The effects of -static are defined in a previous loop. + # We used to do the same as -all-static on platforms that + # didn't have a PIC flag, but the assumption that the effects + # would be equivalent was wrong. It would break on at least + # Digital Unix and AIX. + continue + ;; + + -thread-safe) + thread_safe=yes + continue + ;; + + -version-info) + prev=vinfo + continue + ;; + + -version-number) + prev=vinfo + vinfo_number=yes + continue + ;; + + -weak) + prev=weak + continue + ;; + + -Wc,*) + func_stripname '-Wc,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + func_append arg " $func_quote_for_eval_result" + func_append compiler_flags " $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Wl,*) + func_stripname '-Wl,' '' "$arg" + args=$func_stripname_result + arg= + save_ifs="$IFS"; IFS=',' + for flag in $args; do + IFS="$save_ifs" + func_quote_for_eval "$flag" + func_append arg " $wl$func_quote_for_eval_result" + func_append compiler_flags " $wl$func_quote_for_eval_result" + func_append linker_flags " $func_quote_for_eval_result" + done + IFS="$save_ifs" + func_stripname ' ' '' "$arg" + arg=$func_stripname_result + ;; + + -Xcompiler) + prev=xcompiler + continue + ;; + + -Xlinker) + prev=xlinker + continue + ;; + + -XCClinker) + prev=xcclinker + continue + ;; + + # -msg_* for osf cc + -msg_*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + + # Flags to be passed through unchanged, with rationale: + # -64, -mips[0-9] enable 64-bit mode for the SGI compiler + # -r[0-9][0-9]* specify processor for the SGI compiler + # -xarch=*, -xtarget=* enable 64-bit mode for the Sun compiler + # +DA*, +DD* enable 64-bit mode for the HP compiler + # -q* compiler args for the IBM compiler + # -m*, -t[45]*, -txscale* architecture-specific flags for GCC + # -F/path path to uninstalled frameworks, gcc on darwin + # -p, -pg, --coverage, -fprofile-* profiling flags for GCC + # @file GCC response files + # -tp=* Portland pgcc target processor selection + # --sysroot=* for sysroot support + # -O*, -flto*, -fwhopr*, -fuse-linker-plugin GCC link-time optimization + -64|-mips[0-9]|-r[0-9][0-9]*|-xarch=*|-xtarget=*|+DA*|+DD*|-q*|-m*| \ + -t[45]*|-txscale*|-p|-pg|--coverage|-fprofile-*|-F*|@*|-tp=*|--sysroot=*| \ + -O*|-flto*|-fwhopr*|-fuse-linker-plugin) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + func_append compile_command " $arg" + func_append finalize_command " $arg" + func_append compiler_flags " $arg" + continue + ;; + + # Some other compiler flag. + -* | +*) + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + + *.$objext) + # A standard object. + func_append objs " $arg" + ;; + + *.lo) + # A libtool-controlled object. + + # Check to see that this really is a libtool object. + if func_lalib_unsafe_p "$arg"; then + pic_object= + non_pic_object= + + # Read the .lo file + func_source "$arg" + + if test -z "$pic_object" || + test -z "$non_pic_object" || + test "$pic_object" = none && + test "$non_pic_object" = none; then + func_fatal_error "cannot find name of object for \`$arg'" + fi + + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + if test "$pic_object" != none; then + # Prepend the subdirectory the object is found in. + pic_object="$xdir$pic_object" + + if test "$prev" = dlfiles; then + if test "$build_libtool_libs" = yes && test "$dlopen_support" = yes; then + func_append dlfiles " $pic_object" + prev= + continue + else + # If libtool objects are unsupported, then we need to preload. + prev=dlprefiles + fi + fi + + # CHECK ME: I think I busted this. -Ossama + if test "$prev" = dlprefiles; then + # Preload the old-style object. + func_append dlprefiles " $pic_object" + prev= + fi + + # A PIC object. + func_append libobjs " $pic_object" + arg="$pic_object" + fi + + # Non-PIC object. + if test "$non_pic_object" != none; then + # Prepend the subdirectory the object is found in. + non_pic_object="$xdir$non_pic_object" + + # A standard non-PIC object + func_append non_pic_objects " $non_pic_object" + if test -z "$pic_object" || test "$pic_object" = none ; then + arg="$non_pic_object" + fi + else + # If the PIC object exists, use it instead. + # $xdir was prepended to $pic_object above. + non_pic_object="$pic_object" + func_append non_pic_objects " $non_pic_object" + fi + else + # Only an error if not doing a dry-run. + if $opt_dry_run; then + # Extract subdirectory from the argument. + func_dirname "$arg" "/" "" + xdir="$func_dirname_result" + + func_lo2o "$arg" + pic_object=$xdir$objdir/$func_lo2o_result + non_pic_object=$xdir$func_lo2o_result + func_append libobjs " $pic_object" + func_append non_pic_objects " $non_pic_object" + else + func_fatal_error "\`$arg' is not a valid libtool object" + fi + fi + ;; + + *.$libext) + # An archive. + func_append deplibs " $arg" + func_append old_deplibs " $arg" + continue + ;; + + *.la) + # A libtool-controlled library. + + func_resolve_sysroot "$arg" + if test "$prev" = dlfiles; then + # This library was specified with -dlopen. + func_append dlfiles " $func_resolve_sysroot_result" + prev= + elif test "$prev" = dlprefiles; then + # The library was specified with -dlpreopen. + func_append dlprefiles " $func_resolve_sysroot_result" + prev= + else + func_append deplibs " $func_resolve_sysroot_result" + fi + continue + ;; + + # Some other compiler argument. + *) + # Unknown arguments in both finalize_command and compile_command need + # to be aesthetically quoted because they are evaled later. + func_quote_for_eval "$arg" + arg="$func_quote_for_eval_result" + ;; + esac # arg + + # Now actually substitute the argument into the commands. + if test -n "$arg"; then + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + done # argument parsing loop + + test -n "$prev" && \ + func_fatal_help "the \`$prevarg' option requires an argument" + + if test "$export_dynamic" = yes && test -n "$export_dynamic_flag_spec"; then + eval arg=\"$export_dynamic_flag_spec\" + func_append compile_command " $arg" + func_append finalize_command " $arg" + fi + + oldlibs= + # calculate the name of the file, without its directory + func_basename "$output" + outputname="$func_basename_result" + libobjs_save="$libobjs" + + if test -n "$shlibpath_var"; then + # get the directories listed in $shlibpath_var + eval shlib_search_path=\`\$ECHO \"\${$shlibpath_var}\" \| \$SED \'s/:/ /g\'\` + else + shlib_search_path= + fi + eval sys_lib_search_path=\"$sys_lib_search_path_spec\" + eval sys_lib_dlsearch_path=\"$sys_lib_dlsearch_path_spec\" + + func_dirname "$output" "/" "" + output_objdir="$func_dirname_result$objdir" + func_to_tool_file "$output_objdir/" + tool_output_objdir=$func_to_tool_file_result + # Create the object directory. + func_mkdir_p "$output_objdir" + + # Determine the type of output + case $output in + "") + func_fatal_help "you must specify an output file" + ;; + *.$libext) linkmode=oldlib ;; + *.lo | *.$objext) linkmode=obj ;; + *.la) linkmode=lib ;; + *) linkmode=prog ;; # Anything else should be a program. + esac + + specialdeplibs= + + libs= + # Find all interdependent deplibs by searching for libraries + # that are linked more than once (e.g. -la -lb -la) + for deplib in $deplibs; do + if $opt_preserve_dup_deps ; then + case "$libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append libs " $deplib" + done + + if test "$linkmode" = lib; then + libs="$predeps $libs $compiler_lib_search_path $postdeps" + + # Compute libraries that are listed more than once in $predeps + # $postdeps and mark them as special (i.e., whose duplicates are + # not to be eliminated). + pre_post_deps= + if $opt_duplicate_compiler_generated_deps; then + for pre_post_dep in $predeps $postdeps; do + case "$pre_post_deps " in + *" $pre_post_dep "*) func_append specialdeplibs " $pre_post_deps" ;; + esac + func_append pre_post_deps " $pre_post_dep" + done + fi + pre_post_deps= + fi + + deplibs= + newdependency_libs= + newlib_search_path= + need_relink=no # whether we're linking any uninstalled libtool libraries + notinst_deplibs= # not-installed libtool libraries + notinst_path= # paths that contain not-installed libtool libraries + + case $linkmode in + lib) + passes="conv dlpreopen link" + for file in $dlfiles $dlprefiles; do + case $file in + *.la) ;; + *) + func_fatal_help "libraries can \`-dlopen' only libtool libraries: $file" + ;; + esac + done + ;; + prog) + compile_deplibs= + finalize_deplibs= + alldeplibs=no + newdlfiles= + newdlprefiles= + passes="conv scan dlopen dlpreopen link" + ;; + *) passes="conv" + ;; + esac + + for pass in $passes; do + # The preopen pass in lib mode reverses $deplibs; put it back here + # so that -L comes before libs that need it for instance... + if test "$linkmode,$pass" = "lib,link"; then + ## FIXME: Find the place where the list is rebuilt in the wrong + ## order, and fix it there properly + tmp_deplibs= + for deplib in $deplibs; do + tmp_deplibs="$deplib $tmp_deplibs" + done + deplibs="$tmp_deplibs" + fi + + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan"; then + libs="$deplibs" + deplibs= + fi + if test "$linkmode" = prog; then + case $pass in + dlopen) libs="$dlfiles" ;; + dlpreopen) libs="$dlprefiles" ;; + link) + libs="$deplibs %DEPLIBS%" + test "X$link_all_deplibs" != Xno && libs="$libs $dependency_libs" + ;; + esac + fi + if test "$linkmode,$pass" = "lib,dlpreopen"; then + # Collect and forward deplibs of preopened libtool libs + for lib in $dlprefiles; do + # Ignore non-libtool-libs + dependency_libs= + func_resolve_sysroot "$lib" + case $lib in + *.la) func_source "$func_resolve_sysroot_result" ;; + esac + + # Collect preopened libtool deplibs, except any this library + # has declared as weak libs + for deplib in $dependency_libs; do + func_basename "$deplib" + deplib_base=$func_basename_result + case " $weak_libs " in + *" $deplib_base "*) ;; + *) func_append deplibs " $deplib" ;; + esac + done + done + libs="$dlprefiles" + fi + if test "$pass" = dlopen; then + # Collect dlpreopened libraries + save_deplibs="$deplibs" + deplibs= + fi + + for deplib in $libs; do + lib= + found=no + case $deplib in + -mt|-mthreads|-kthread|-Kthread|-pthread|-pthreads|--thread-safe \ + |-threads|-fopenmp|-openmp|-mp|-xopenmp|-omp|-qsmp=*) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append compiler_flags " $deplib" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -l*) + if test "$linkmode" != lib && test "$linkmode" != prog; then + func_warning "\`-l' is ignored for archives/objects" + continue + fi + func_stripname '-l' '' "$deplib" + name=$func_stripname_result + if test "$linkmode" = lib; then + searchdirs="$newlib_search_path $lib_search_path $compiler_lib_search_dirs $sys_lib_search_path $shlib_search_path" + else + searchdirs="$newlib_search_path $lib_search_path $sys_lib_search_path $shlib_search_path" + fi + for searchdir in $searchdirs; do + for search_ext in .la $std_shrext .so .a; do + # Search the libtool library + lib="$searchdir/lib${name}${search_ext}" + if test -f "$lib"; then + if test "$search_ext" = ".la"; then + found=yes + else + found=no + fi + break 2 + fi + done + done + if test "$found" != yes; then + # deplib doesn't seem to be a libtool library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + else # deplib is a libtool library + # If $allow_libtool_libs_with_static_runtimes && $deplib is a stdlib, + # We need to do some special things here, and not later. + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $deplib "*) + if func_lalib_p "$lib"; then + library_names= + old_library= + func_source "$lib" + for l in $old_library $library_names; do + ll="$l" + done + if test "X$ll" = "X$old_library" ; then # only static version available + found=no + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + lib=$ladir/$old_library + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + test "$linkmode" = lib && newdependency_libs="$deplib $newdependency_libs" + fi + continue + fi + fi + ;; + *) ;; + esac + fi + fi + ;; # -l + *.ltframework) + if test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + deplibs="$deplib $deplibs" + if test "$linkmode" = lib ; then + case "$new_inherited_linker_flags " in + *" $deplib "*) ;; + * ) func_append new_inherited_linker_flags " $deplib" ;; + esac + fi + fi + continue + ;; + -L*) + case $linkmode in + lib) + deplibs="$deplib $deplibs" + test "$pass" = conv && continue + newdependency_libs="$deplib $newdependency_libs" + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + prog) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + if test "$pass" = scan; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + *) + func_warning "\`-L' is ignored for archives/objects" + ;; + esac # linkmode + continue + ;; # -L + -R*) + if test "$pass" = link; then + func_stripname '-R' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + dir=$func_resolve_sysroot_result + # Make sure the xrpath contains only unique directories. + case "$xrpath " in + *" $dir "*) ;; + *) func_append xrpath " $dir" ;; + esac + fi + deplibs="$deplib $deplibs" + continue + ;; + *.la) + func_resolve_sysroot "$deplib" + lib=$func_resolve_sysroot_result + ;; + *.$libext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + continue + fi + case $linkmode in + lib) + # Linking convenience modules into shared libraries is allowed, + # but linking other static libraries is non-portable. + case " $dlpreconveniencelibs " in + *" $deplib "*) ;; + *) + valid_a_lib=no + case $deplibs_check_method in + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + if eval "\$ECHO \"$deplib\"" 2>/dev/null | $SED 10q \ + | $EGREP "$match_pattern_regex" > /dev/null; then + valid_a_lib=yes + fi + ;; + pass_all) + valid_a_lib=yes + ;; + esac + if test "$valid_a_lib" != yes; then + echo + $ECHO "*** Warning: Trying to link with static lib archive $deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because the file extensions .$libext of this argument makes me believe" + echo "*** that it is just a static archive that I should not use here." + else + echo + $ECHO "*** Warning: Linking the shared library $output against the" + $ECHO "*** static library $deplib is not portable!" + deplibs="$deplib $deplibs" + fi + ;; + esac + continue + ;; + prog) + if test "$pass" != link; then + deplibs="$deplib $deplibs" + else + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + fi + continue + ;; + esac # linkmode + ;; # *.$libext + *.lo | *.$objext) + if test "$pass" = conv; then + deplibs="$deplib $deplibs" + elif test "$linkmode" = prog; then + if test "$pass" = dlpreopen || test "$dlopen_support" != yes || test "$build_libtool_libs" = no; then + # If there is no dlopen support or we're linking statically, + # we need to preload. + func_append newdlprefiles " $deplib" + compile_deplibs="$deplib $compile_deplibs" + finalize_deplibs="$deplib $finalize_deplibs" + else + func_append newdlfiles " $deplib" + fi + fi + continue + ;; + %DEPLIBS%) + alldeplibs=yes + continue + ;; + esac # case $deplib + + if test "$found" = yes || test -f "$lib"; then : + else + func_fatal_error "cannot find the library \`$lib' or unhandled argument \`$deplib'" + fi + + # Check to see that this really is a libtool archive. + func_lalib_unsafe_p "$lib" \ + || func_fatal_error "\`$lib' is not a valid libtool archive" + + func_dirname "$lib" "" "." + ladir="$func_dirname_result" + + dlname= + dlopen= + dlpreopen= + libdir= + library_names= + old_library= + inherited_linker_flags= + # If the library was installed with an old release of libtool, + # it will not redefine variables installed, or shouldnotlink + installed=yes + shouldnotlink=no + avoidtemprpath= + + + # Read the .la file + func_source "$lib" + + # Convert "-framework foo" to "foo.ltframework" + if test -n "$inherited_linker_flags"; then + tmp_inherited_linker_flags=`$ECHO "$inherited_linker_flags" | $SED 's/-framework \([^ $]*\)/\1.ltframework/g'` + for tmp_inherited_linker_flag in $tmp_inherited_linker_flags; do + case " $new_inherited_linker_flags " in + *" $tmp_inherited_linker_flag "*) ;; + *) func_append new_inherited_linker_flags " $tmp_inherited_linker_flag";; + esac + done + fi + dependency_libs=`$ECHO " $dependency_libs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + if test "$linkmode,$pass" = "lib,link" || + test "$linkmode,$pass" = "prog,scan" || + { test "$linkmode" != prog && test "$linkmode" != lib; }; then + test -n "$dlopen" && func_append dlfiles " $dlopen" + test -n "$dlpreopen" && func_append dlprefiles " $dlpreopen" + fi + + if test "$pass" = conv; then + # Only check for convenience libraries + deplibs="$lib $deplibs" + if test -z "$libdir"; then + if test -z "$old_library"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + # It is a libtool convenience library, so add in its objects. + func_append convenience " $ladir/$objdir/$old_library" + func_append old_convenience " $ladir/$objdir/$old_library" + tmp_libs= + for deplib in $dependency_libs; do + deplibs="$deplib $deplibs" + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done + elif test "$linkmode" != prog && test "$linkmode" != lib; then + func_fatal_error "\`$lib' is not a convenience library" + fi + continue + fi # $pass = conv + + + # Get the name of the library we link against. + linklib= + if test -n "$old_library" && + { test "$prefer_static_libs" = yes || + test "$prefer_static_libs,$installed" = "built,no"; }; then + linklib=$old_library + else + for l in $old_library $library_names; do + linklib="$l" + done + fi + if test -z "$linklib"; then + func_fatal_error "cannot find name of link library for \`$lib'" + fi + + # This library was specified with -dlopen. + if test "$pass" = dlopen; then + if test -z "$libdir"; then + func_fatal_error "cannot -dlopen a convenience library: \`$lib'" + fi + if test -z "$dlname" || + test "$dlopen_support" != yes || + test "$build_libtool_libs" = no; then + # If there is no dlname, no dlopen support or we're linking + # statically, we need to preload. We also need to preload any + # dependent libraries so libltdl's deplib preloader doesn't + # bomb out in the load deplibs phase. + func_append dlprefiles " $lib $dependency_libs" + else + func_append newdlfiles " $lib" + fi + continue + fi # $pass = dlopen + + # We need an absolute path. + case $ladir in + [\\/]* | [A-Za-z]:[\\/]*) abs_ladir="$ladir" ;; + *) + abs_ladir=`cd "$ladir" && pwd` + if test -z "$abs_ladir"; then + func_warning "cannot determine absolute directory name of \`$ladir'" + func_warning "passing it literally to the linker, although it might fail" + abs_ladir="$ladir" + fi + ;; + esac + func_basename "$lib" + laname="$func_basename_result" + + # Find the relevant object directory and library name. + if test "X$installed" = Xyes; then + if test ! -f "$lt_sysroot$libdir/$linklib" && test -f "$abs_ladir/$linklib"; then + func_warning "library \`$lib' was moved." + dir="$ladir" + absdir="$abs_ladir" + libdir="$abs_ladir" + else + dir="$lt_sysroot$libdir" + absdir="$lt_sysroot$libdir" + fi + test "X$hardcode_automatic" = Xyes && avoidtemprpath=yes + else + if test ! -f "$ladir/$objdir/$linklib" && test -f "$abs_ladir/$linklib"; then + dir="$ladir" + absdir="$abs_ladir" + # Remove this search path later + func_append notinst_path " $abs_ladir" + else + dir="$ladir/$objdir" + absdir="$abs_ladir/$objdir" + # Remove this search path later + func_append notinst_path " $abs_ladir" + fi + fi # $installed = yes + func_stripname 'lib' '.la' "$laname" + name=$func_stripname_result + + # This library was specified with -dlpreopen. + if test "$pass" = dlpreopen; then + if test -z "$libdir" && test "$linkmode" = prog; then + func_fatal_error "only libraries may -dlpreopen a convenience library: \`$lib'" + fi + case "$host" in + # special handling for platforms with PE-DLLs. + *cygwin* | *mingw* | *cegcc* ) + # Linker will automatically link against shared library if both + # static and shared are present. Therefore, ensure we extract + # symbols from the import library if a shared library is present + # (otherwise, the dlopen module name will be incorrect). We do + # this by putting the import library name into $newdlprefiles. + # We recover the dlopen module name by 'saving' the la file + # name in a special purpose variable, and (later) extracting the + # dlname from the la file. + if test -n "$dlname"; then + func_tr_sh "$dir/$linklib" + eval "libfile_$func_tr_sh_result=\$abs_ladir/\$laname" + func_append newdlprefiles " $dir/$linklib" + else + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + fi + ;; + * ) + # Prefer using a static library (so that no silly _DYNAMIC symbols + # are required to link). + if test -n "$old_library"; then + func_append newdlprefiles " $dir/$old_library" + # Keep a list of preopened convenience libraries to check + # that they are being used correctly in the link pass. + test -z "$libdir" && \ + func_append dlpreconveniencelibs " $dir/$old_library" + # Otherwise, use the dlname, so that lt_dlopen finds it. + elif test -n "$dlname"; then + func_append newdlprefiles " $dir/$dlname" + else + func_append newdlprefiles " $dir/$linklib" + fi + ;; + esac + fi # $pass = dlpreopen + + if test -z "$libdir"; then + # Link the convenience library + if test "$linkmode" = lib; then + deplibs="$dir/$old_library $deplibs" + elif test "$linkmode,$pass" = "prog,link"; then + compile_deplibs="$dir/$old_library $compile_deplibs" + finalize_deplibs="$dir/$old_library $finalize_deplibs" + else + deplibs="$lib $deplibs" # used for prog,scan pass + fi + continue + fi + + + if test "$linkmode" = prog && test "$pass" != link; then + func_append newlib_search_path " $ladir" + deplibs="$lib $deplibs" + + linkalldeplibs=no + if test "$link_all_deplibs" != no || test -z "$library_names" || + test "$build_libtool_libs" = no; then + linkalldeplibs=yes + fi + + tmp_libs= + for deplib in $dependency_libs; do + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result" + func_append newlib_search_path " $func_resolve_sysroot_result" + ;; + esac + # Need to link against all dependency_libs? + if test "$linkalldeplibs" = yes; then + deplibs="$deplib $deplibs" + else + # Need to hardcode shared library paths + # or/and link against static libraries + newdependency_libs="$deplib $newdependency_libs" + fi + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $deplib "*) func_append specialdeplibs " $deplib" ;; + esac + fi + func_append tmp_libs " $deplib" + done # for deplib + continue + fi # $linkmode = prog... + + if test "$linkmode,$pass" = "prog,link"; then + if test -n "$library_names" && + { { test "$prefer_static_libs" = no || + test "$prefer_static_libs,$installed" = "built,yes"; } || + test -z "$old_library"; }; then + # We need to hardcode the library path + if test -n "$shlibpath_var" && test -z "$avoidtemprpath" ; then + # Make sure the rpath contains only unique directories. + case "$temp_rpath:" in + *"$absdir:"*) ;; + *) func_append temp_rpath "$absdir:" ;; + esac + fi + + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi # $linkmode,$pass = prog,link... + + if test "$alldeplibs" = yes && + { test "$deplibs_check_method" = pass_all || + { test "$build_libtool_libs" = yes && + test -n "$library_names"; }; }; then + # We only need to search for static libraries + continue + fi + fi + + link_static=no # Whether the deplib will be linked statically + use_static_libs=$prefer_static_libs + if test "$use_static_libs" = built && test "$installed" = yes; then + use_static_libs=no + fi + if test -n "$library_names" && + { test "$use_static_libs" = no || test -z "$old_library"; }; then + case $host in + *cygwin* | *mingw* | *cegcc*) + # No point in relinking DLLs because paths are not encoded + func_append notinst_deplibs " $lib" + need_relink=no + ;; + *) + if test "$installed" = no; then + func_append notinst_deplibs " $lib" + need_relink=yes + fi + ;; + esac + # This is a shared library + + # Warn about portability, can't link against -module's on some + # systems (darwin). Don't bleat about dlopened modules though! + dlopenmodule="" + for dlpremoduletest in $dlprefiles; do + if test "X$dlpremoduletest" = "X$lib"; then + dlopenmodule="$dlpremoduletest" + break + fi + done + if test -z "$dlopenmodule" && test "$shouldnotlink" = yes && test "$pass" = link; then + echo + if test "$linkmode" = prog; then + $ECHO "*** Warning: Linking the executable $output against the loadable module" + else + $ECHO "*** Warning: Linking the shared library $output against the loadable module" + fi + $ECHO "*** $linklib is not portable!" + fi + if test "$linkmode" = lib && + test "$hardcode_into_libs" = yes; then + # Hardcode the library path. + # Skip directories that are in the system default run-time + # search path. + case " $sys_lib_dlsearch_path " in + *" $absdir "*) ;; + *) + case "$compile_rpath " in + *" $absdir "*) ;; + *) func_append compile_rpath " $absdir" ;; + esac + ;; + esac + case " $sys_lib_dlsearch_path " in + *" $libdir "*) ;; + *) + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + ;; + esac + fi + + if test -n "$old_archive_from_expsyms_cmds"; then + # figure out the soname + set dummy $library_names + shift + realname="$1" + shift + libname=`eval "\\$ECHO \"$libname_spec\""` + # use dlname if we got it. it's perfectly good, no? + if test -n "$dlname"; then + soname="$dlname" + elif test -n "$soname_spec"; then + # bleh windows + case $host in + *cygwin* | mingw* | *cegcc*) + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + esac + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + + # Make a new name for the extract_expsyms_cmds to use + soroot="$soname" + func_basename "$soroot" + soname="$func_basename_result" + func_stripname 'lib' '.dll' "$soname" + newlib=libimp-$func_stripname_result.a + + # If the library has no export list, then create one now + if test -f "$output_objdir/$soname-def"; then : + else + func_verbose "extracting exported symbol list from \`$soname'" + func_execute_cmds "$extract_expsyms_cmds" 'exit $?' + fi + + # Create $newlib + if test -f "$output_objdir/$newlib"; then :; else + func_verbose "generating import library for \`$soname'" + func_execute_cmds "$old_archive_from_expsyms_cmds" 'exit $?' + fi + # make sure the library variables are pointing to the new library + dir=$output_objdir + linklib=$newlib + fi # test -n "$old_archive_from_expsyms_cmds" + + if test "$linkmode" = prog || test "$opt_mode" != relink; then + add_shlibpath= + add_dir= + add= + lib_linked=yes + case $hardcode_action in + immediate | unsupported) + if test "$hardcode_direct" = no; then + add="$dir/$linklib" + case $host in + *-*-sco3.2v5.0.[024]*) add_dir="-L$dir" ;; + *-*-sysv4*uw2*) add_dir="-L$dir" ;; + *-*-sysv5OpenUNIX* | *-*-sysv5UnixWare7.[01].[10]* | \ + *-*-unixware7*) add_dir="-L$dir" ;; + *-*-darwin* ) + # if the lib is a (non-dlopened) module then we can not + # link against it, someone is ignoring the earlier warnings + if /usr/bin/file -L $add 2> /dev/null | + $GREP ": [^:]* bundle" >/dev/null ; then + if test "X$dlopenmodule" != "X$lib"; then + $ECHO "*** Warning: lib $linklib is a module, not a shared library" + if test -z "$old_library" ; then + echo + echo "*** And there doesn't seem to be a static archive available" + echo "*** The link will probably fail, sorry" + else + add="$dir/$old_library" + fi + elif test -n "$old_library"; then + add="$dir/$old_library" + fi + fi + esac + elif test "$hardcode_minus_L" = no; then + case $host in + *-*-sunos*) add_shlibpath="$dir" ;; + esac + add_dir="-L$dir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = no; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + relink) + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$dir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$absdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + add_shlibpath="$dir" + add="-l$name" + else + lib_linked=no + fi + ;; + *) lib_linked=no ;; + esac + + if test "$lib_linked" != yes; then + func_fatal_configuration "unsupported hardcode properties" + fi + + if test -n "$add_shlibpath"; then + case :$compile_shlibpath: in + *":$add_shlibpath:"*) ;; + *) func_append compile_shlibpath "$add_shlibpath:" ;; + esac + fi + if test "$linkmode" = prog; then + test -n "$add_dir" && compile_deplibs="$add_dir $compile_deplibs" + test -n "$add" && compile_deplibs="$add $compile_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + if test "$hardcode_direct" != yes && + test "$hardcode_minus_L" != yes && + test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + fi + fi + fi + + if test "$linkmode" = prog || test "$opt_mode" = relink; then + add_shlibpath= + add_dir= + add= + # Finalize command for both is simple: just hardcode it. + if test "$hardcode_direct" = yes && + test "$hardcode_direct_absolute" = no; then + add="$libdir/$linklib" + elif test "$hardcode_minus_L" = yes; then + add_dir="-L$libdir" + add="-l$name" + elif test "$hardcode_shlibpath_var" = yes; then + case :$finalize_shlibpath: in + *":$libdir:"*) ;; + *) func_append finalize_shlibpath "$libdir:" ;; + esac + add="-l$name" + elif test "$hardcode_automatic" = yes; then + if test -n "$inst_prefix_dir" && + test -f "$inst_prefix_dir$libdir/$linklib" ; then + add="$inst_prefix_dir$libdir/$linklib" + else + add="$libdir/$linklib" + fi + else + # We cannot seem to hardcode it, guess we'll fake it. + add_dir="-L$libdir" + # Try looking first in the location we're being installed to. + if test -n "$inst_prefix_dir"; then + case $libdir in + [\\/]*) + func_append add_dir " -L$inst_prefix_dir$libdir" + ;; + esac + fi + add="-l$name" + fi + + if test "$linkmode" = prog; then + test -n "$add_dir" && finalize_deplibs="$add_dir $finalize_deplibs" + test -n "$add" && finalize_deplibs="$add $finalize_deplibs" + else + test -n "$add_dir" && deplibs="$add_dir $deplibs" + test -n "$add" && deplibs="$add $deplibs" + fi + fi + elif test "$linkmode" = prog; then + # Here we assume that one of hardcode_direct or hardcode_minus_L + # is not unsupported. This is valid on all known static and + # shared platforms. + if test "$hardcode_direct" != unsupported; then + test -n "$old_library" && linklib="$old_library" + compile_deplibs="$dir/$linklib $compile_deplibs" + finalize_deplibs="$dir/$linklib $finalize_deplibs" + else + compile_deplibs="-l$name -L$dir $compile_deplibs" + finalize_deplibs="-l$name -L$dir $finalize_deplibs" + fi + elif test "$build_libtool_libs" = yes; then + # Not a shared library + if test "$deplibs_check_method" != pass_all; then + # We're trying link a shared library against a static one + # but the system doesn't support it. + + # Just print a warning and add the library to dependency_libs so + # that the program can be linked against the static library. + echo + $ECHO "*** Warning: This system can not link to static lib archive $lib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have." + if test "$module" = yes; then + echo "*** But as you try to build a module library, libtool will still create " + echo "*** a static module, that should work as long as the dlopening application" + echo "*** is linked with the -dlopen flag to resolve symbols at runtime." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + else + deplibs="$dir/$old_library $deplibs" + link_static=yes + fi + fi # link shared/static library? + + if test "$linkmode" = lib; then + if test -n "$dependency_libs" && + { test "$hardcode_into_libs" != yes || + test "$build_old_libs" = yes || + test "$link_static" = yes; }; then + # Extract -R from dependency_libs + temp_deplibs= + for libdir in $dependency_libs; do + case $libdir in + -R*) func_stripname '-R' '' "$libdir" + temp_xrpath=$func_stripname_result + case " $xrpath " in + *" $temp_xrpath "*) ;; + *) func_append xrpath " $temp_xrpath";; + esac;; + *) func_append temp_deplibs " $libdir";; + esac + done + dependency_libs="$temp_deplibs" + fi + + func_append newlib_search_path " $absdir" + # Link against this library + test "$link_static" = no && newdependency_libs="$abs_ladir/$laname $newdependency_libs" + # ... and its dependency_libs + tmp_libs= + for deplib in $dependency_libs; do + newdependency_libs="$deplib $newdependency_libs" + case $deplib in + -L*) func_stripname '-L' '' "$deplib" + func_resolve_sysroot "$func_stripname_result";; + *) func_resolve_sysroot "$deplib" ;; + esac + if $opt_preserve_dup_deps ; then + case "$tmp_libs " in + *" $func_resolve_sysroot_result "*) + func_append specialdeplibs " $func_resolve_sysroot_result" ;; + esac + fi + func_append tmp_libs " $func_resolve_sysroot_result" + done + + if test "$link_all_deplibs" != no; then + # Add the search paths of all dependency libraries + for deplib in $dependency_libs; do + path= + case $deplib in + -L*) path="$deplib" ;; + *.la) + func_resolve_sysroot "$deplib" + deplib=$func_resolve_sysroot_result + func_dirname "$deplib" "" "." + dir=$func_dirname_result + # We need an absolute path. + case $dir in + [\\/]* | [A-Za-z]:[\\/]*) absdir="$dir" ;; + *) + absdir=`cd "$dir" && pwd` + if test -z "$absdir"; then + func_warning "cannot determine absolute directory name of \`$dir'" + absdir="$dir" + fi + ;; + esac + if $GREP "^installed=no" $deplib > /dev/null; then + case $host in + *-*-darwin*) + depdepl= + eval deplibrary_names=`${SED} -n -e 's/^library_names=\(.*\)$/\1/p' $deplib` + if test -n "$deplibrary_names" ; then + for tmp in $deplibrary_names ; do + depdepl=$tmp + done + if test -f "$absdir/$objdir/$depdepl" ; then + depdepl="$absdir/$objdir/$depdepl" + darwin_install_name=`${OTOOL} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + if test -z "$darwin_install_name"; then + darwin_install_name=`${OTOOL64} -L $depdepl | awk '{if (NR == 2) {print $1;exit}}'` + fi + func_append compiler_flags " ${wl}-dylib_file ${wl}${darwin_install_name}:${depdepl}" + func_append linker_flags " -dylib_file ${darwin_install_name}:${depdepl}" + path= + fi + fi + ;; + *) + path="-L$absdir/$objdir" + ;; + esac + else + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $deplib` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + test "$absdir" != "$libdir" && \ + func_warning "\`$deplib' seems to be moved" + + path="-L$absdir" + fi + ;; + esac + case " $deplibs " in + *" $path "*) ;; + *) deplibs="$path $deplibs" ;; + esac + done + fi # link_all_deplibs != no + fi # linkmode = lib + done # for deplib in $libs + if test "$pass" = link; then + if test "$linkmode" = "prog"; then + compile_deplibs="$new_inherited_linker_flags $compile_deplibs" + finalize_deplibs="$new_inherited_linker_flags $finalize_deplibs" + else + compiler_flags="$compiler_flags "`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + fi + fi + dependency_libs="$newdependency_libs" + if test "$pass" = dlpreopen; then + # Link the dlpreopened libraries before other libraries + for deplib in $save_deplibs; do + deplibs="$deplib $deplibs" + done + fi + if test "$pass" != dlopen; then + if test "$pass" != conv; then + # Make sure lib_search_path contains only unique directories. + lib_search_path= + for dir in $newlib_search_path; do + case "$lib_search_path " in + *" $dir "*) ;; + *) func_append lib_search_path " $dir" ;; + esac + done + newlib_search_path= + fi + + if test "$linkmode,$pass" != "prog,link"; then + vars="deplibs" + else + vars="compile_deplibs finalize_deplibs" + fi + for var in $vars dependency_libs; do + # Add libraries to $var in reverse order + eval tmp_libs=\"\$$var\" + new_libs= + for deplib in $tmp_libs; do + # FIXME: Pedantically, this is the right thing to do, so + # that some nasty dependency loop isn't accidentally + # broken: + #new_libs="$deplib $new_libs" + # Pragmatically, this seems to cause very few problems in + # practice: + case $deplib in + -L*) new_libs="$deplib $new_libs" ;; + -R*) ;; + *) + # And here is the reason: when a library appears more + # than once as an explicit dependence of a library, or + # is implicitly linked in more than once by the + # compiler, it is considered special, and multiple + # occurrences thereof are not removed. Compare this + # with having the same library being listed as a + # dependency of multiple other libraries: in this case, + # we know (pedantically, we assume) the library does not + # need to be listed more than once, so we keep only the + # last copy. This is not always right, but it is rare + # enough that we require users that really mean to play + # such unportable linking tricks to link the library + # using -Wl,-lname, so that libtool does not consider it + # for duplicate removal. + case " $specialdeplibs " in + *" $deplib "*) new_libs="$deplib $new_libs" ;; + *) + case " $new_libs " in + *" $deplib "*) ;; + *) new_libs="$deplib $new_libs" ;; + esac + ;; + esac + ;; + esac + done + tmp_libs= + for deplib in $new_libs; do + case $deplib in + -L*) + case " $tmp_libs " in + *" $deplib "*) ;; + *) func_append tmp_libs " $deplib" ;; + esac + ;; + *) func_append tmp_libs " $deplib" ;; + esac + done + eval $var=\"$tmp_libs\" + done # for var + fi + # Last step: remove runtime libs from dependency_libs + # (they stay in deplibs) + tmp_libs= + for i in $dependency_libs ; do + case " $predeps $postdeps $compiler_lib_search_path " in + *" $i "*) + i="" + ;; + esac + if test -n "$i" ; then + func_append tmp_libs " $i" + fi + done + dependency_libs=$tmp_libs + done # for pass + if test "$linkmode" = prog; then + dlfiles="$newdlfiles" + fi + if test "$linkmode" = prog || test "$linkmode" = lib; then + dlprefiles="$newdlprefiles" + fi + + case $linkmode in + oldlib) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for archives" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for archives" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for archives" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for archives" + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for archives" + + test -n "$release" && \ + func_warning "\`-release' is ignored for archives" + + test -n "$export_symbols$export_symbols_regex" && \ + func_warning "\`-export-symbols' is ignored for archives" + + # Now set the variables for building old libraries. + build_libtool_libs=no + oldlibs="$output" + func_append objs "$old_deplibs" + ;; + + lib) + # Make sure we only generate libraries of the form `libNAME.la'. + case $outputname in + lib*) + func_stripname 'lib' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + ;; + *) + test "$module" = no && \ + func_fatal_help "libtool library \`$output' must begin with \`lib'" + + if test "$need_lib_prefix" != no; then + # Add the "lib" prefix for modules if required + func_stripname '' '.la' "$outputname" + name=$func_stripname_result + eval shared_ext=\"$shrext_cmds\" + eval libname=\"$libname_spec\" + else + func_stripname '' '.la' "$outputname" + libname=$func_stripname_result + fi + ;; + esac + + if test -n "$objs"; then + if test "$deplibs_check_method" != pass_all; then + func_fatal_error "cannot build libtool library \`$output' from non-libtool objects on this host:$objs" + else + echo + $ECHO "*** Warning: Linking the shared library $output against the non-libtool" + $ECHO "*** objects $objs is not portable!" + func_append libobjs " $objs" + fi + fi + + test "$dlself" != no && \ + func_warning "\`-dlopen self' is ignored for libtool libraries" + + set dummy $rpath + shift + test "$#" -gt 1 && \ + func_warning "ignoring multiple \`-rpath's for a libtool library" + + install_libdir="$1" + + oldlibs= + if test -z "$rpath"; then + if test "$build_libtool_libs" = yes; then + # Building a libtool convenience library. + # Some compilers have problems with a `.al' extension so + # convenience libraries should have the same extension an + # archive normally would. + oldlibs="$output_objdir/$libname.$libext $oldlibs" + build_libtool_libs=convenience + build_old_libs=yes + fi + + test -n "$vinfo" && \ + func_warning "\`-version-info/-version-number' is ignored for convenience libraries" + + test -n "$release" && \ + func_warning "\`-release' is ignored for convenience libraries" + else + + # Parse the version information argument. + save_ifs="$IFS"; IFS=':' + set dummy $vinfo 0 0 0 + shift + IFS="$save_ifs" + + test -n "$7" && \ + func_fatal_help "too many parameters to \`-version-info'" + + # convert absolute version numbers to libtool ages + # this retains compatibility with .la files and attempts + # to make the code below a bit more comprehensible + + case $vinfo_number in + yes) + number_major="$1" + number_minor="$2" + number_revision="$3" + # + # There are really only two kinds -- those that + # use the current revision as the major version + # and those that subtract age and use age as + # a minor version. But, then there is irix + # which has an extra 1 added just for fun + # + case $version_type in + # correct linux to gnu/linux during the next big refactor + darwin|linux|osf|windows|none) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_revision" + ;; + freebsd-aout|freebsd-elf|qnx|sunos) + current="$number_major" + revision="$number_minor" + age="0" + ;; + irix|nonstopux) + func_arith $number_major + $number_minor + current=$func_arith_result + age="$number_minor" + revision="$number_minor" + lt_irix_increment=no + ;; + *) + func_fatal_configuration "$modename: unknown library version type \`$version_type'" + ;; + esac + ;; + no) + current="$1" + revision="$2" + age="$3" + ;; + esac + + # Check that each of the things are valid numbers. + case $current in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "CURRENT \`$current' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $revision in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "REVISION \`$revision' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + case $age in + 0|[1-9]|[1-9][0-9]|[1-9][0-9][0-9]|[1-9][0-9][0-9][0-9]|[1-9][0-9][0-9][0-9][0-9]) ;; + *) + func_error "AGE \`$age' must be a nonnegative integer" + func_fatal_error "\`$vinfo' is not valid version information" + ;; + esac + + if test "$age" -gt "$current"; then + func_error "AGE \`$age' is greater than the current interface number \`$current'" + func_fatal_error "\`$vinfo' is not valid version information" + fi + + # Calculate the version variables. + major= + versuffix= + verstring= + case $version_type in + none) ;; + + darwin) + # Like Linux, but with the current version available in + # verstring for coding it into the library header + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + # Darwin ld doesn't like 0 for these options... + func_arith $current + 1 + minor_current=$func_arith_result + xlcverstring="${wl}-compatibility_version ${wl}$minor_current ${wl}-current_version ${wl}$minor_current.$revision" + verstring="-compatibility_version $minor_current -current_version $minor_current.$revision" + ;; + + freebsd-aout) + major=".$current" + versuffix=".$current.$revision"; + ;; + + freebsd-elf) + major=".$current" + versuffix=".$current" + ;; + + irix | nonstopux) + if test "X$lt_irix_increment" = "Xno"; then + func_arith $current - $age + else + func_arith $current - $age + 1 + fi + major=$func_arith_result + + case $version_type in + nonstopux) verstring_prefix=nonstopux ;; + *) verstring_prefix=sgi ;; + esac + verstring="$verstring_prefix$major.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$revision + while test "$loop" -ne 0; do + func_arith $revision - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring_prefix$major.$iface:$verstring" + done + + # Before this point, $major must not contain `.'. + major=.$major + versuffix="$major.$revision" + ;; + + linux) # correct to gnu/linux during the next big refactor + func_arith $current - $age + major=.$func_arith_result + versuffix="$major.$age.$revision" + ;; + + osf) + func_arith $current - $age + major=.$func_arith_result + versuffix=".$current.$age.$revision" + verstring="$current.$age.$revision" + + # Add in all the interfaces that we are compatible with. + loop=$age + while test "$loop" -ne 0; do + func_arith $current - $loop + iface=$func_arith_result + func_arith $loop - 1 + loop=$func_arith_result + verstring="$verstring:${iface}.0" + done + + # Make executables depend on our current version. + func_append verstring ":${current}.0" + ;; + + qnx) + major=".$current" + versuffix=".$current" + ;; + + sunos) + major=".$current" + versuffix=".$current.$revision" + ;; + + windows) + # Use '-' rather than '.', since we only want one + # extension on DOS 8.3 filesystems. + func_arith $current - $age + major=$func_arith_result + versuffix="-$major" + ;; + + *) + func_fatal_configuration "unknown library version type \`$version_type'" + ;; + esac + + # Clear the version info if we defaulted, and they specified a release. + if test -z "$vinfo" && test -n "$release"; then + major= + case $version_type in + darwin) + # we can't check for "0.0" in archive_cmds due to quoting + # problems, so we reset it completely + verstring= + ;; + *) + verstring="0.0" + ;; + esac + if test "$need_version" = no; then + versuffix= + else + versuffix=".0.0" + fi + fi + + # Remove version info from name if versioning should be avoided + if test "$avoid_version" = yes && test "$need_version" = no; then + major= + versuffix= + verstring="" + fi + + # Check to see if the archive will have undefined symbols. + if test "$allow_undefined" = yes; then + if test "$allow_undefined_flag" = unsupported; then + func_warning "undefined symbols not allowed in $host shared libraries" + build_libtool_libs=no + build_old_libs=yes + fi + else + # Don't allow undefined symbols. + allow_undefined_flag="$no_undefined_flag" + fi + + fi + + func_generate_dlsyms "$libname" "$libname" "yes" + func_append libobjs " $symfileobj" + test "X$libobjs" = "X " && libobjs= + + if test "$opt_mode" != relink; then + # Remove our outputs, but don't remove object files since they + # may have been created when compiling PIC objects. + removelist= + tempremovelist=`$ECHO "$output_objdir/*"` + for p in $tempremovelist; do + case $p in + *.$objext | *.gcno) + ;; + $output_objdir/$outputname | $output_objdir/$libname.* | $output_objdir/${libname}${release}.*) + if test "X$precious_files_regex" != "X"; then + if $ECHO "$p" | $EGREP -e "$precious_files_regex" >/dev/null 2>&1 + then + continue + fi + fi + func_append removelist " $p" + ;; + *) ;; + esac + done + test -n "$removelist" && \ + func_show_eval "${RM}r \$removelist" + fi + + # Now set the variables for building old libraries. + if test "$build_old_libs" = yes && test "$build_libtool_libs" != convenience ; then + func_append oldlibs " $output_objdir/$libname.$libext" + + # Transform .lo files to .o files. + oldobjs="$objs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; $lo2o" | $NL2SP` + fi + + # Eliminate all temporary directories. + #for path in $notinst_path; do + # lib_search_path=`$ECHO "$lib_search_path " | $SED "s% $path % %g"` + # deplibs=`$ECHO "$deplibs " | $SED "s% -L$path % %g"` + # dependency_libs=`$ECHO "$dependency_libs " | $SED "s% -L$path % %g"` + #done + + if test -n "$xrpath"; then + # If the user specified any rpath flags, then add them. + temp_xrpath= + for libdir in $xrpath; do + func_replace_sysroot "$libdir" + func_append temp_xrpath " -R$func_replace_sysroot_result" + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + if test "$hardcode_into_libs" != yes || test "$build_old_libs" = yes; then + dependency_libs="$temp_xrpath $dependency_libs" + fi + fi + + # Make sure dlfiles contains only unique files that won't be dlpreopened + old_dlfiles="$dlfiles" + dlfiles= + for lib in $old_dlfiles; do + case " $dlprefiles $dlfiles " in + *" $lib "*) ;; + *) func_append dlfiles " $lib" ;; + esac + done + + # Make sure dlprefiles contains only unique files + old_dlprefiles="$dlprefiles" + dlprefiles= + for lib in $old_dlprefiles; do + case "$dlprefiles " in + *" $lib "*) ;; + *) func_append dlprefiles " $lib" ;; + esac + done + + if test "$build_libtool_libs" = yes; then + if test -n "$rpath"; then + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-*-beos* | *-cegcc* | *-*-haiku*) + # these systems don't actually have a c library (as such)! + ;; + *-*-rhapsody* | *-*-darwin1.[012]) + # Rhapsody C library is in the System framework + func_append deplibs " System.ltframework" + ;; + *-*-netbsd*) + # Don't link with libc until the a.out ld.so is fixed. + ;; + *-*-openbsd* | *-*-freebsd* | *-*-dragonfly*) + # Do not include libc due to us having libc/libc_r. + ;; + *-*-sco3.2v5* | *-*-sco5v6*) + # Causes problems with __ctype + ;; + *-*-sysv4.2uw2* | *-*-sysv5* | *-*-unixware* | *-*-OpenUNIX*) + # Compiler inserts libc in the correct place for threads to work + ;; + *) + # Add libc to deplibs on all other systems if necessary. + if test "$build_libtool_need_lc" = "yes"; then + func_append deplibs " -lc" + fi + ;; + esac + fi + + # Transform deplibs into only deplibs that can be linked in shared. + name_save=$name + libname_save=$libname + release_save=$release + versuffix_save=$versuffix + major_save=$major + # I'm not sure if I'm treating the release correctly. I think + # release should show up in the -l (ie -lgmp5) so we don't want to + # add it in twice. Is that correct? + release="" + versuffix="" + major="" + newdeplibs= + droppeddeps=no + case $deplibs_check_method in + pass_all) + # Don't check for shared/static. Everything works. + # This might be a little naive. We might want to check + # whether the library exists or not. But this is on + # osf3 & osf4 and I'm not really sure... Just + # implementing what was already the behavior. + newdeplibs=$deplibs + ;; + test_compile) + # This code stresses the "libraries are programs" paradigm to its + # limits. Maybe even breaks it. We compile a program, linking it + # against the deplibs as a proxy for the library. Then we can check + # whether they linked in statically or dynamically with ldd. + $opt_dry_run || $RM conftest.c + cat > conftest.c </dev/null` + $nocaseglob + else + potential_libs=`ls $i/$libnameglob[.-]* 2>/dev/null` + fi + for potent_lib in $potential_libs; do + # Follow soft links. + if ls -lLd "$potent_lib" 2>/dev/null | + $GREP " -> " >/dev/null; then + continue + fi + # The statement above tries to avoid entering an + # endless loop below, in case of cyclic links. + # We might still enter an endless loop, since a link + # loop can be closed while we follow links, + # but so what? + potlib="$potent_lib" + while test -h "$potlib" 2>/dev/null; do + potliblink=`ls -ld $potlib | ${SED} 's/.* -> //'` + case $potliblink in + [\\/]* | [A-Za-z]:[\\/]*) potlib="$potliblink";; + *) potlib=`$ECHO "$potlib" | $SED 's,[^/]*$,,'`"$potliblink";; + esac + done + if eval $file_magic_cmd \"\$potlib\" 2>/dev/null | + $SED -e 10q | + $EGREP "$file_magic_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for file magic test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a file magic. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + match_pattern*) + set dummy $deplibs_check_method; shift + match_pattern_regex=`expr "$deplibs_check_method" : "$1 \(.*\)"` + for a_deplib in $deplibs; do + case $a_deplib in + -l*) + func_stripname -l '' "$a_deplib" + name=$func_stripname_result + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + case " $predeps $postdeps " in + *" $a_deplib "*) + func_append newdeplibs " $a_deplib" + a_deplib="" + ;; + esac + fi + if test -n "$a_deplib" ; then + libname=`eval "\\$ECHO \"$libname_spec\""` + for i in $lib_search_path $sys_lib_search_path $shlib_search_path; do + potential_libs=`ls $i/$libname[.-]* 2>/dev/null` + for potent_lib in $potential_libs; do + potlib="$potent_lib" # see symlink-check above in file_magic test + if eval "\$ECHO \"$potent_lib\"" 2>/dev/null | $SED 10q | \ + $EGREP "$match_pattern_regex" > /dev/null; then + func_append newdeplibs " $a_deplib" + a_deplib="" + break 2 + fi + done + done + fi + if test -n "$a_deplib" ; then + droppeddeps=yes + echo + $ECHO "*** Warning: linker path does not have real file for library $a_deplib." + echo "*** I have the capability to make that library automatically link in when" + echo "*** you link to this library. But I can only do this if you have a" + echo "*** shared version of the library, which you do not appear to have" + echo "*** because I did check the linker path looking for a file starting" + if test -z "$potlib" ; then + $ECHO "*** with $libname but no candidates were found. (...for regex pattern test)" + else + $ECHO "*** with $libname and none of the candidates passed a file format test" + $ECHO "*** using a regex pattern. Last file checked: $potlib" + fi + fi + ;; + *) + # Add a -L argument. + func_append newdeplibs " $a_deplib" + ;; + esac + done # Gone through all deplibs. + ;; + none | unknown | *) + newdeplibs="" + tmp_deplibs=`$ECHO " $deplibs" | $SED 's/ -lc$//; s/ -[LR][^ ]*//g'` + if test "X$allow_libtool_libs_with_static_runtimes" = "Xyes" ; then + for i in $predeps $postdeps ; do + # can't use Xsed below, because $i might contain '/' + tmp_deplibs=`$ECHO " $tmp_deplibs" | $SED "s,$i,,"` + done + fi + case $tmp_deplibs in + *[!\ \ ]*) + echo + if test "X$deplibs_check_method" = "Xnone"; then + echo "*** Warning: inter-library dependencies are not supported in this platform." + else + echo "*** Warning: inter-library dependencies are not known to be supported." + fi + echo "*** All declared inter-library dependencies are being dropped." + droppeddeps=yes + ;; + esac + ;; + esac + versuffix=$versuffix_save + major=$major_save + release=$release_save + libname=$libname_save + name=$name_save + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library with the System framework + newdeplibs=`$ECHO " $newdeplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + if test "$droppeddeps" = yes; then + if test "$module" = yes; then + echo + echo "*** Warning: libtool could not satisfy all declared inter-library" + $ECHO "*** dependencies of module $libname. Therefore, libtool will create" + echo "*** a static module, that should work as long as the dlopening" + echo "*** application is linked with the -dlopen flag." + if test -z "$global_symbol_pipe"; then + echo + echo "*** However, this would only work if libtool was able to extract symbol" + echo "*** lists from a program, using \`nm' or equivalent, but libtool could" + echo "*** not find such a program. So, this module is probably useless." + echo "*** \`nm' from GNU binutils and a full rebuild may help." + fi + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + else + echo "*** The inter-library dependencies that have been dropped here will be" + echo "*** automatically added whenever a program is linked with this library" + echo "*** or is declared to -dlopen it." + + if test "$allow_undefined" = no; then + echo + echo "*** Since this library must not contain undefined symbols," + echo "*** because either the platform does not support them or" + echo "*** it was explicitly requested with -no-undefined," + echo "*** libtool will only create a static version of it." + if test "$build_old_libs" = no; then + oldlibs="$output_objdir/$libname.$libext" + build_libtool_libs=module + build_old_libs=yes + else + build_libtool_libs=no + fi + fi + fi + fi + # Done checking deplibs! + deplibs=$newdeplibs + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + case $host in + *-*-darwin*) + newdeplibs=`$ECHO " $newdeplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + new_inherited_linker_flags=`$ECHO " $new_inherited_linker_flags" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + deplibs=`$ECHO " $deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + deplibs="$new_libs" + + # All the library-specific variables (install_libdir is set above). + library_names= + old_library= + dlname= + + # Test again, we may have decided not to build it any more + if test "$build_libtool_libs" = yes; then + # Remove ${wl} instances when linking with ld. + # FIXME: should test the right _cmds variable. + case $archive_cmds in + *\$LD\ *) wl= ;; + esac + if test "$hardcode_into_libs" = yes; then + # Hardcode the library paths + hardcode_libdirs= + dep_rpath= + rpath="$finalize_rpath" + test "$opt_mode" != relink && rpath="$compile_rpath$rpath" + for libdir in $rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + func_replace_sysroot "$libdir" + libdir=$func_replace_sysroot_result + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append dep_rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval "dep_rpath=\"$hardcode_libdir_flag_spec\"" + fi + if test -n "$runpath_var" && test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + eval "$runpath_var='$rpath\$$runpath_var'; export $runpath_var" + fi + test -n "$dep_rpath" && deplibs="$dep_rpath $deplibs" + fi + + shlibpath="$finalize_shlibpath" + test "$opt_mode" != relink && shlibpath="$compile_shlibpath$shlibpath" + if test -n "$shlibpath"; then + eval "$shlibpath_var='$shlibpath\$$shlibpath_var'; export $shlibpath_var" + fi + + # Get the real and link names of the library. + eval shared_ext=\"$shrext_cmds\" + eval library_names=\"$library_names_spec\" + set dummy $library_names + shift + realname="$1" + shift + + if test -n "$soname_spec"; then + eval soname=\"$soname_spec\" + else + soname="$realname" + fi + if test -z "$dlname"; then + dlname=$soname + fi + + lib="$output_objdir/$realname" + linknames= + for link + do + func_append linknames " $link" + done + + # Use standard objects if they are pic + test -z "$pic_flag" && libobjs=`$ECHO "$libobjs" | $SP2NL | $SED "$lo2o" | $NL2SP` + test "X$libobjs" = "X " && libobjs= + + delfiles= + if test -n "$export_symbols" && test -n "$include_expsyms"; then + $opt_dry_run || cp "$export_symbols" "$output_objdir/$libname.uexp" + export_symbols="$output_objdir/$libname.uexp" + func_append delfiles " $export_symbols" + fi + + orig_export_symbols= + case $host_os in + cygwin* | mingw* | cegcc*) + if test -n "$export_symbols" && test -z "$export_symbols_regex"; then + # exporting using user supplied symfile + if test "x`$SED 1q $export_symbols`" != xEXPORTS; then + # and it's NOT already a .def file. Must figure out + # which of the given symbols are data symbols and tag + # them as such. So, trigger use of export_symbols_cmds. + # export_symbols gets reassigned inside the "prepare + # the list of exported symbols" if statement, so the + # include_expsyms logic still works. + orig_export_symbols="$export_symbols" + export_symbols= + always_export_symbols=yes + fi + fi + ;; + esac + + # Prepare the list of exported symbols + if test -z "$export_symbols"; then + if test "$always_export_symbols" = yes || test -n "$export_symbols_regex"; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + cmds=$export_symbols_cmds + save_ifs="$IFS"; IFS='~' + for cmd1 in $cmds; do + IFS="$save_ifs" + # Take the normal branch if the nm_file_list_spec branch + # doesn't work or if tool conversion is not needed. + case $nm_file_list_spec~$to_tool_file_cmd in + *~func_convert_file_noop | *~func_convert_file_msys_to_w32 | ~*) + try_normal_branch=yes + eval cmd=\"$cmd1\" + func_len " $cmd" + len=$func_len_result + ;; + *) + try_normal_branch=no + ;; + esac + if test "$try_normal_branch" = yes \ + && { test "$len" -lt "$max_cmd_len" \ + || test "$max_cmd_len" -le -1; } + then + func_show_eval "$cmd" 'exit $?' + skipped_export=false + elif test -n "$nm_file_list_spec"; then + func_basename "$output" + output_la=$func_basename_result + save_libobjs=$libobjs + save_output=$output + output=${output_objdir}/${output_la}.nm + func_to_tool_file "$output" + libobjs=$nm_file_list_spec$func_to_tool_file_result + func_append delfiles " $output" + func_verbose "creating $NM input file list: $output" + for obj in $save_libobjs; do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > "$output" + eval cmd=\"$cmd1\" + func_show_eval "$cmd" 'exit $?' + output=$save_output + libobjs=$save_libobjs + skipped_export=false + else + # The command line is too long to execute in one step. + func_verbose "using reloadable object file for export list..." + skipped_export=: + # Break out early, otherwise skipped_export may be + # set to false by a later but shorter cmd. + break + fi + done + IFS="$save_ifs" + if test -n "$export_symbols_regex" && test "X$skipped_export" != "X:"; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + fi + + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test "X$skipped_export" != "X:" && test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + + tmp_deplibs= + for test_deplib in $deplibs; do + case " $convenience " in + *" $test_deplib "*) ;; + *) + func_append tmp_deplibs " $test_deplib" + ;; + esac + done + deplibs="$tmp_deplibs" + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec" && + test "$compiler_needs_object" = yes && + test -z "$libobjs"; then + # extract the archives, so we have objects to list. + # TODO: could optimize this to just extract one archive. + whole_archive_flag_spec= + fi + if test -n "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + else + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + fi + + if test "$thread_safe" = yes && test -n "$thread_safe_flag_spec"; then + eval flag=\"$thread_safe_flag_spec\" + func_append linker_flags " $flag" + fi + + # Make a backup of the uninstalled library when relinking + if test "$opt_mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}U && $MV $realname ${realname}U)' || exit $? + fi + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + eval test_cmds=\"$module_expsym_cmds\" + cmds=$module_expsym_cmds + else + eval test_cmds=\"$module_cmds\" + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + eval test_cmds=\"$archive_expsym_cmds\" + cmds=$archive_expsym_cmds + else + eval test_cmds=\"$archive_cmds\" + cmds=$archive_cmds + fi + fi + + if test "X$skipped_export" != "X:" && + func_len " $test_cmds" && + len=$func_len_result && + test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + : + else + # The command line is too long to link in one step, link piecewise + # or, if using GNU ld and skipped_export is not :, use a linker + # script. + + # Save the value of $output and $libobjs because we want to + # use them later. If we have whole_archive_flag_spec, we + # want to use save_libobjs as it was before + # whole_archive_flag_spec was expanded, because we can't + # assume the linker understands whole_archive_flag_spec. + # This may have to be revisited, in case too many + # convenience libraries get linked in and end up exceeding + # the spec. + if test -z "$convenience" || test -z "$whole_archive_flag_spec"; then + save_libobjs=$libobjs + fi + save_output=$output + func_basename "$output" + output_la=$func_basename_result + + # Clear the reloadable object creation command queue and + # initialize k to one. + test_cmds= + concat_cmds= + objlist= + last_robj= + k=1 + + if test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "$with_gnu_ld" = yes; then + output=${output_objdir}/${output_la}.lnkscript + func_verbose "creating GNU ld script: $output" + echo 'INPUT (' > $output + for obj in $save_libobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + echo ')' >> $output + func_append delfiles " $output" + func_to_tool_file "$output" + output=$func_to_tool_file_result + elif test -n "$save_libobjs" && test "X$skipped_export" != "X:" && test "X$file_list_spec" != X; then + output=${output_objdir}/${output_la}.lnk + func_verbose "creating linker input file list: $output" + : > $output + set x $save_libobjs + shift + firstobj= + if test "$compiler_needs_object" = yes; then + firstobj="$1 " + shift + fi + for obj + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" >> $output + done + func_append delfiles " $output" + func_to_tool_file "$output" + output=$firstobj\"$file_list_spec$func_to_tool_file_result\" + else + if test -n "$save_libobjs"; then + func_verbose "creating reloadable object files..." + output=$output_objdir/$output_la-${k}.$objext + eval test_cmds=\"$reload_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + + # Loop over the list of objects to be linked. + for obj in $save_libobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + if test "X$objlist" = X || + test "$len" -lt "$max_cmd_len"; then + func_append objlist " $obj" + else + # The command $test_cmds is almost too long, add a + # command to the queue. + if test "$k" -eq 1 ; then + # The first file doesn't have a previous command to add. + reload_objs=$objlist + eval concat_cmds=\"$reload_cmds\" + else + # All subsequent reloadable object files will link in + # the last one created. + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\$concat_cmds~$reload_cmds~\$RM $last_robj\" + fi + last_robj=$output_objdir/$output_la-${k}.$objext + func_arith $k + 1 + k=$func_arith_result + output=$output_objdir/$output_la-${k}.$objext + objlist=" $obj" + func_len " $last_robj" + func_arith $len0 + $func_len_result + len=$func_arith_result + fi + done + # Handle the remaining objects by creating one last + # reloadable object file. All subsequent reloadable object + # files will link in the last one created. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + reload_objs="$objlist $last_robj" + eval concat_cmds=\"\${concat_cmds}$reload_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\${concat_cmds}~\$RM $last_robj\" + fi + func_append delfiles " $output" + + else + output= + fi + + if ${skipped_export-false}; then + func_verbose "generating symbol list for \`$libname.la'" + export_symbols="$output_objdir/$libname.exp" + $opt_dry_run || $RM $export_symbols + libobjs=$output + # Append the command to create the export file. + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\$concat_cmds$export_symbols_cmds\" + if test -n "$last_robj"; then + eval concat_cmds=\"\$concat_cmds~\$RM $last_robj\" + fi + fi + + test -n "$save_libobjs" && + func_verbose "creating a temporary reloadable object file: $output" + + # Loop through the commands generated above and execute them. + save_ifs="$IFS"; IFS='~' + for cmd in $concat_cmds; do + IFS="$save_ifs" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + if test -n "$export_symbols_regex" && ${skipped_export-false}; then + func_show_eval '$EGREP -e "$export_symbols_regex" "$export_symbols" > "${export_symbols}T"' + func_show_eval '$MV "${export_symbols}T" "$export_symbols"' + fi + fi + + if ${skipped_export-false}; then + if test -n "$export_symbols" && test -n "$include_expsyms"; then + tmp_export_symbols="$export_symbols" + test -n "$orig_export_symbols" && tmp_export_symbols="$orig_export_symbols" + $opt_dry_run || eval '$ECHO "$include_expsyms" | $SP2NL >> "$tmp_export_symbols"' + fi + + if test -n "$orig_export_symbols"; then + # The given exports_symbols file has to be filtered, so filter it. + func_verbose "filter symbol list for \`$libname.la' to tag DATA exports" + # FIXME: $output_objdir/$libname.filter potentially contains lots of + # 's' commands which not all seds can handle. GNU sed should be fine + # though. Also, the filter scales superlinearly with the number of + # global variables. join(1) would be nice here, but unfortunately + # isn't a blessed tool. + $opt_dry_run || $SED -e '/[ ,]DATA/!d;s,\(.*\)\([ \,].*\),s|^\1$|\1\2|,' < $export_symbols > $output_objdir/$libname.filter + func_append delfiles " $export_symbols $output_objdir/$libname.filter" + export_symbols=$output_objdir/$libname.def + $opt_dry_run || $SED -f $output_objdir/$libname.filter < $orig_export_symbols > $export_symbols + fi + fi + + libobjs=$output + # Restore the value of output. + output=$save_output + + if test -n "$convenience" && test -n "$whole_archive_flag_spec"; then + eval libobjs=\"\$libobjs $whole_archive_flag_spec\" + test "X$libobjs" = "X " && libobjs= + fi + # Expand the library linking commands again to reset the + # value of $libobjs for piecewise linking. + + # Do each of the archive commands. + if test "$module" = yes && test -n "$module_cmds" ; then + if test -n "$export_symbols" && test -n "$module_expsym_cmds"; then + cmds=$module_expsym_cmds + else + cmds=$module_cmds + fi + else + if test -n "$export_symbols" && test -n "$archive_expsym_cmds"; then + cmds=$archive_expsym_cmds + else + cmds=$archive_cmds + fi + fi + fi + + if test -n "$delfiles"; then + # Append the command to remove temporary files to $cmds. + eval cmds=\"\$cmds~\$RM $delfiles\" + fi + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append libobjs " $func_extract_archives_result" + test "X$libobjs" = "X " && libobjs= + fi + + save_ifs="$IFS"; IFS='~' + for cmd in $cmds; do + IFS="$save_ifs" + eval cmd=\"$cmd\" + $opt_silent || { + func_quote_for_expand "$cmd" + eval "func_echo $func_quote_for_expand_result" + } + $opt_dry_run || eval "$cmd" || { + lt_exit=$? + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + ( cd "$output_objdir" && \ + $RM "${realname}T" && \ + $MV "${realname}U" "$realname" ) + fi + + exit $lt_exit + } + done + IFS="$save_ifs" + + # Restore the uninstalled library and exit + if test "$opt_mode" = relink; then + $opt_dry_run || eval '(cd $output_objdir && $RM ${realname}T && $MV $realname ${realname}T && $MV ${realname}U $realname)' || exit $? + + if test -n "$convenience"; then + if test -z "$whole_archive_flag_spec"; then + func_show_eval '${RM}r "$gentop"' + fi + fi + + exit $EXIT_SUCCESS + fi + + # Create links to the real library. + for linkname in $linknames; do + if test "$realname" != "$linkname"; then + func_show_eval '(cd "$output_objdir" && $RM "$linkname" && $LN_S "$realname" "$linkname")' 'exit $?' + fi + done + + # If -module or -export-dynamic was specified, set the dlname. + if test "$module" = yes || test "$export_dynamic" = yes; then + # On all known operating systems, these are identical. + dlname="$soname" + fi + fi + ;; + + obj) + if test -n "$dlfiles$dlprefiles" || test "$dlself" != no; then + func_warning "\`-dlopen' is ignored for objects" + fi + + case " $deplibs" in + *\ -l* | *\ -L*) + func_warning "\`-l' and \`-L' are ignored for objects" ;; + esac + + test -n "$rpath" && \ + func_warning "\`-rpath' is ignored for objects" + + test -n "$xrpath" && \ + func_warning "\`-R' is ignored for objects" + + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for objects" + + test -n "$release" && \ + func_warning "\`-release' is ignored for objects" + + case $output in + *.lo) + test -n "$objs$old_deplibs" && \ + func_fatal_error "cannot build library object \`$output' from non-libtool objects" + + libobj=$output + func_lo2o "$libobj" + obj=$func_lo2o_result + ;; + *) + libobj= + obj="$output" + ;; + esac + + # Delete the old objects. + $opt_dry_run || $RM $obj $libobj + + # Objects from convenience libraries. This assumes + # single-version convenience libraries. Whenever we create + # different ones for PIC/non-PIC, this we'll have to duplicate + # the extraction. + reload_conv_objs= + gentop= + # reload_cmds runs $LD directly, so let us get rid of + # -Wl from whole_archive_flag_spec and hope we can get by with + # turning comma into space.. + wl= + + if test -n "$convenience"; then + if test -n "$whole_archive_flag_spec"; then + eval tmp_whole_archive_flags=\"$whole_archive_flag_spec\" + reload_conv_objs=$reload_objs\ `$ECHO "$tmp_whole_archive_flags" | $SED 's|,| |g'` + else + gentop="$output_objdir/${obj}x" + func_append generated " $gentop" + + func_extract_archives $gentop $convenience + reload_conv_objs="$reload_objs $func_extract_archives_result" + fi + fi + + # If we're not building shared, we need to use non_pic_objs + test "$build_libtool_libs" != yes && libobjs="$non_pic_objects" + + # Create the old-style object. + reload_objs="$objs$old_deplibs "`$ECHO "$libobjs" | $SP2NL | $SED "/\.${libext}$/d; /\.lib$/d; $lo2o" | $NL2SP`" $reload_conv_objs" ### testsuite: skip nested quoting test + + output="$obj" + func_execute_cmds "$reload_cmds" 'exit $?' + + # Exit if we aren't doing a library object file. + if test -z "$libobj"; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + fi + + if test "$build_libtool_libs" != yes; then + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + # Create an invalid libtool object if no PIC, so that we don't + # accidentally link it into a program. + # $show "echo timestamp > $libobj" + # $opt_dry_run || eval "echo timestamp > $libobj" || exit $? + exit $EXIT_SUCCESS + fi + + if test -n "$pic_flag" || test "$pic_mode" != default; then + # Only do commands if we really have different PIC objects. + reload_objs="$libobjs $reload_conv_objs" + output="$libobj" + func_execute_cmds "$reload_cmds" 'exit $?' + fi + + if test -n "$gentop"; then + func_show_eval '${RM}r "$gentop"' + fi + + exit $EXIT_SUCCESS + ;; + + prog) + case $host in + *cygwin*) func_stripname '' '.exe' "$output" + output=$func_stripname_result.exe;; + esac + test -n "$vinfo" && \ + func_warning "\`-version-info' is ignored for programs" + + test -n "$release" && \ + func_warning "\`-release' is ignored for programs" + + test "$preload" = yes \ + && test "$dlopen_support" = unknown \ + && test "$dlopen_self" = unknown \ + && test "$dlopen_self_static" = unknown && \ + func_warning "\`LT_INIT([dlopen])' not used. Assuming no dlopen support." + + case $host in + *-*-rhapsody* | *-*-darwin1.[012]) + # On Rhapsody replace the C library is the System framework + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's/ -lc / System.ltframework /'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's/ -lc / System.ltframework /'` + ;; + esac + + case $host in + *-*-darwin*) + # Don't allow lazy linking, it breaks C++ global constructors + # But is supposedly fixed on 10.4 or later (yay!). + if test "$tagname" = CXX ; then + case ${MACOSX_DEPLOYMENT_TARGET-10.0} in + 10.[0123]) + func_append compile_command " ${wl}-bind_at_load" + func_append finalize_command " ${wl}-bind_at_load" + ;; + esac + fi + # Time to change all our "foo.ltframework" stuff back to "-framework foo" + compile_deplibs=`$ECHO " $compile_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + finalize_deplibs=`$ECHO " $finalize_deplibs" | $SED 's% \([^ $]*\).ltframework% -framework \1%g'` + ;; + esac + + + # move library search paths that coincide with paths to not yet + # installed libraries to the beginning of the library search list + new_libs= + for path in $notinst_path; do + case " $new_libs " in + *" -L$path/$objdir "*) ;; + *) + case " $compile_deplibs " in + *" -L$path/$objdir "*) + func_append new_libs " -L$path/$objdir" ;; + esac + ;; + esac + done + for deplib in $compile_deplibs; do + case $deplib in + -L*) + case " $new_libs " in + *" $deplib "*) ;; + *) func_append new_libs " $deplib" ;; + esac + ;; + *) func_append new_libs " $deplib" ;; + esac + done + compile_deplibs="$new_libs" + + + func_append compile_command " $compile_deplibs" + func_append finalize_command " $finalize_deplibs" + + if test -n "$rpath$xrpath"; then + # If the user specified any rpath flags, then add them. + for libdir in $rpath $xrpath; do + # This is the magic to use -rpath. + case "$finalize_rpath " in + *" $libdir "*) ;; + *) func_append finalize_rpath " $libdir" ;; + esac + done + fi + + # Now hardcode the library paths + rpath= + hardcode_libdirs= + for libdir in $compile_rpath $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$perm_rpath " in + *" $libdir "*) ;; + *) func_append perm_rpath " $libdir" ;; + esac + fi + case $host in + *-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-os2* | *-cegcc*) + testbindir=`${ECHO} "$libdir" | ${SED} -e 's*/lib$*/bin*'` + case :$dllsearchpath: in + *":$libdir:"*) ;; + ::) dllsearchpath=$libdir;; + *) func_append dllsearchpath ":$libdir";; + esac + case :$dllsearchpath: in + *":$testbindir:"*) ;; + ::) dllsearchpath=$testbindir;; + *) func_append dllsearchpath ":$testbindir";; + esac + ;; + esac + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + compile_rpath="$rpath" + + rpath= + hardcode_libdirs= + for libdir in $finalize_rpath; do + if test -n "$hardcode_libdir_flag_spec"; then + if test -n "$hardcode_libdir_separator"; then + if test -z "$hardcode_libdirs"; then + hardcode_libdirs="$libdir" + else + # Just accumulate the unique libdirs. + case $hardcode_libdir_separator$hardcode_libdirs$hardcode_libdir_separator in + *"$hardcode_libdir_separator$libdir$hardcode_libdir_separator"*) + ;; + *) + func_append hardcode_libdirs "$hardcode_libdir_separator$libdir" + ;; + esac + fi + else + eval flag=\"$hardcode_libdir_flag_spec\" + func_append rpath " $flag" + fi + elif test -n "$runpath_var"; then + case "$finalize_perm_rpath " in + *" $libdir "*) ;; + *) func_append finalize_perm_rpath " $libdir" ;; + esac + fi + done + # Substitute the hardcoded libdirs into the rpath. + if test -n "$hardcode_libdir_separator" && + test -n "$hardcode_libdirs"; then + libdir="$hardcode_libdirs" + eval rpath=\" $hardcode_libdir_flag_spec\" + fi + finalize_rpath="$rpath" + + if test -n "$libobjs" && test "$build_old_libs" = yes; then + # Transform all the library objects into standard objects. + compile_command=`$ECHO "$compile_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + finalize_command=`$ECHO "$finalize_command" | $SP2NL | $SED "$lo2o" | $NL2SP` + fi + + func_generate_dlsyms "$outputname" "@PROGRAM@" "no" + + # template prelinking step + if test -n "$prelink_cmds"; then + func_execute_cmds "$prelink_cmds" 'exit $?' + fi + + wrappers_required=yes + case $host in + *cegcc* | *mingw32ce*) + # Disable wrappers for cegcc and mingw32ce hosts, we are cross compiling anyway. + wrappers_required=no + ;; + *cygwin* | *mingw* ) + if test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + *) + if test "$need_relink" = no || test "$build_libtool_libs" != yes; then + wrappers_required=no + fi + ;; + esac + if test "$wrappers_required" = no; then + # Replace the output file specification. + compile_command=`$ECHO "$compile_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + link_command="$compile_command$compile_rpath" + + # We have no uninstalled library dependencies, so finalize right now. + exit_status=0 + func_show_eval "$link_command" 'exit_status=$?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Delete the generated files. + if test -f "$output_objdir/${outputname}S.${objext}"; then + func_show_eval '$RM "$output_objdir/${outputname}S.${objext}"' + fi + + exit $exit_status + fi + + if test -n "$compile_shlibpath$finalize_shlibpath"; then + compile_command="$shlibpath_var=\"$compile_shlibpath$finalize_shlibpath\$$shlibpath_var\" $compile_command" + fi + if test -n "$finalize_shlibpath"; then + finalize_command="$shlibpath_var=\"$finalize_shlibpath\$$shlibpath_var\" $finalize_command" + fi + + compile_var= + finalize_var= + if test -n "$runpath_var"; then + if test -n "$perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $perm_rpath; do + func_append rpath "$dir:" + done + compile_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + if test -n "$finalize_perm_rpath"; then + # We should set the runpath_var. + rpath= + for dir in $finalize_perm_rpath; do + func_append rpath "$dir:" + done + finalize_var="$runpath_var=\"$rpath\$$runpath_var\" " + fi + fi + + if test "$no_install" = yes; then + # We don't need to create a wrapper script. + link_command="$compile_var$compile_command$compile_rpath" + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output"'%g'` + # Delete the old output file. + $opt_dry_run || $RM $output + # Link the executable and exit + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + exit $EXIT_SUCCESS + fi + + if test "$hardcode_action" = relink; then + # Fast installation is not supported + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + + func_warning "this platform does not like uninstalled shared libraries" + func_warning "\`$output' will be relinked during installation" + else + if test "$fast_install" != no; then + link_command="$finalize_var$compile_command$finalize_rpath" + if test "$fast_install" = yes; then + relink_command=`$ECHO "$compile_var$compile_command$compile_rpath" | $SED 's%@OUTPUT@%\$progdir/\$file%g'` + else + # fast_install is set to needless + relink_command= + fi + else + link_command="$compile_var$compile_command$compile_rpath" + relink_command="$finalize_var$finalize_command$finalize_rpath" + fi + fi + + # Replace the output file specification. + link_command=`$ECHO "$link_command" | $SED 's%@OUTPUT@%'"$output_objdir/$outputname"'%g'` + + # Delete the old output files. + $opt_dry_run || $RM $output $output_objdir/$outputname $output_objdir/lt-$outputname + + func_show_eval "$link_command" 'exit $?' + + if test -n "$postlink_cmds"; then + func_to_tool_file "$output_objdir/$outputname" + postlink_cmds=`func_echo_all "$postlink_cmds" | $SED -e 's%@OUTPUT@%'"$output_objdir/$outputname"'%g' -e 's%@TOOL_OUTPUT@%'"$func_to_tool_file_result"'%g'` + func_execute_cmds "$postlink_cmds" 'exit $?' + fi + + # Now create the wrapper script. + func_verbose "creating $output" + + # Quote the relink command for shipping. + if test -n "$relink_command"; then + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + relink_command="(cd `pwd`; $relink_command)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + fi + + # Only actually do things if not in dry run mode. + $opt_dry_run || { + # win32 will think the script is a binary if it has + # a .exe suffix, so we strip it off here. + case $output in + *.exe) func_stripname '' '.exe' "$output" + output=$func_stripname_result ;; + esac + # test for cygwin because mv fails w/o .exe extensions + case $host in + *cygwin*) + exeext=.exe + func_stripname '' '.exe' "$outputname" + outputname=$func_stripname_result ;; + *) exeext= ;; + esac + case $host in + *cygwin* | *mingw* ) + func_dirname_and_basename "$output" "" "." + output_name=$func_basename_result + output_path=$func_dirname_result + cwrappersource="$output_path/$objdir/lt-$output_name.c" + cwrapper="$output_path/$output_name.exe" + $RM $cwrappersource $cwrapper + trap "$RM $cwrappersource $cwrapper; exit $EXIT_FAILURE" 1 2 15 + + func_emit_cwrapperexe_src > $cwrappersource + + # The wrapper executable is built using the $host compiler, + # because it contains $host paths and files. If cross- + # compiling, it, like the target executable, must be + # executed on the $host or under an emulation environment. + $opt_dry_run || { + $LTCC $LTCFLAGS -o $cwrapper $cwrappersource + $STRIP $cwrapper + } + + # Now, create the wrapper script for func_source use: + func_ltwrapper_scriptname $cwrapper + $RM $func_ltwrapper_scriptname_result + trap "$RM $func_ltwrapper_scriptname_result; exit $EXIT_FAILURE" 1 2 15 + $opt_dry_run || { + # note: this script will not be executed, so do not chmod. + if test "x$build" = "x$host" ; then + $cwrapper --lt-dump-script > $func_ltwrapper_scriptname_result + else + func_emit_wrapper no > $func_ltwrapper_scriptname_result + fi + } + ;; + * ) + $RM $output + trap "$RM $output; exit $EXIT_FAILURE" 1 2 15 + + func_emit_wrapper no > $output + chmod +x $output + ;; + esac + } + exit $EXIT_SUCCESS + ;; + esac + + # See if we need to build an old-fashioned archive. + for oldlib in $oldlibs; do + + if test "$build_libtool_libs" = convenience; then + oldobjs="$libobjs_save $symfileobj" + addlibs="$convenience" + build_libtool_libs=no + else + if test "$build_libtool_libs" = module; then + oldobjs="$libobjs_save" + build_libtool_libs=no + else + oldobjs="$old_deplibs $non_pic_objects" + if test "$preload" = yes && test -f "$symfileobj"; then + func_append oldobjs " $symfileobj" + fi + fi + addlibs="$old_convenience" + fi + + if test -n "$addlibs"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $addlibs + func_append oldobjs " $func_extract_archives_result" + fi + + # Do each command in the archive commands. + if test -n "$old_archive_from_new_cmds" && test "$build_libtool_libs" = yes; then + cmds=$old_archive_from_new_cmds + else + + # Add any objects from preloaded convenience libraries + if test -n "$dlprefiles"; then + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + + func_extract_archives $gentop $dlprefiles + func_append oldobjs " $func_extract_archives_result" + fi + + # POSIX demands no paths to be encoded in archives. We have + # to avoid creating archives with duplicate basenames if we + # might have to extract them afterwards, e.g., when creating a + # static archive out of a convenience library, or when linking + # the entirety of a libtool archive into another (currently + # not supported by libtool). + if (for obj in $oldobjs + do + func_basename "$obj" + $ECHO "$func_basename_result" + done | sort | sort -uc >/dev/null 2>&1); then + : + else + echo "copying selected object files to avoid basename conflicts..." + gentop="$output_objdir/${outputname}x" + func_append generated " $gentop" + func_mkdir_p "$gentop" + save_oldobjs=$oldobjs + oldobjs= + counter=1 + for obj in $save_oldobjs + do + func_basename "$obj" + objbase="$func_basename_result" + case " $oldobjs " in + " ") oldobjs=$obj ;; + *[\ /]"$objbase "*) + while :; do + # Make sure we don't pick an alternate name that also + # overlaps. + newobj=lt$counter-$objbase + func_arith $counter + 1 + counter=$func_arith_result + case " $oldobjs " in + *[\ /]"$newobj "*) ;; + *) if test ! -f "$gentop/$newobj"; then break; fi ;; + esac + done + func_show_eval "ln $obj $gentop/$newobj || cp $obj $gentop/$newobj" + func_append oldobjs " $gentop/$newobj" + ;; + *) func_append oldobjs " $obj" ;; + esac + done + fi + func_to_tool_file "$oldlib" func_convert_file_msys_to_w32 + tool_oldlib=$func_to_tool_file_result + eval cmds=\"$old_archive_cmds\" + + func_len " $cmds" + len=$func_len_result + if test "$len" -lt "$max_cmd_len" || test "$max_cmd_len" -le -1; then + cmds=$old_archive_cmds + elif test -n "$archiver_list_spec"; then + func_verbose "using command file archive linking..." + for obj in $oldobjs + do + func_to_tool_file "$obj" + $ECHO "$func_to_tool_file_result" + done > $output_objdir/$libname.libcmd + func_to_tool_file "$output_objdir/$libname.libcmd" + oldobjs=" $archiver_list_spec$func_to_tool_file_result" + cmds=$old_archive_cmds + else + # the command line is too long to link in one step, link in parts + func_verbose "using piecewise archive linking..." + save_RANLIB=$RANLIB + RANLIB=: + objlist= + concat_cmds= + save_oldobjs=$oldobjs + oldobjs= + # Is there a better way of finding the last object in the list? + for obj in $save_oldobjs + do + last_oldobj=$obj + done + eval test_cmds=\"$old_archive_cmds\" + func_len " $test_cmds" + len0=$func_len_result + len=$len0 + for obj in $save_oldobjs + do + func_len " $obj" + func_arith $len + $func_len_result + len=$func_arith_result + func_append objlist " $obj" + if test "$len" -lt "$max_cmd_len"; then + : + else + # the above command should be used before it gets too long + oldobjs=$objlist + if test "$obj" = "$last_oldobj" ; then + RANLIB=$save_RANLIB + fi + test -z "$concat_cmds" || concat_cmds=$concat_cmds~ + eval concat_cmds=\"\${concat_cmds}$old_archive_cmds\" + objlist= + len=$len0 + fi + done + RANLIB=$save_RANLIB + oldobjs=$objlist + if test "X$oldobjs" = "X" ; then + eval cmds=\"\$concat_cmds\" + else + eval cmds=\"\$concat_cmds~\$old_archive_cmds\" + fi + fi + fi + func_execute_cmds "$cmds" 'exit $?' + done + + test -n "$generated" && \ + func_show_eval "${RM}r$generated" + + # Now create the libtool archive. + case $output in + *.la) + old_library= + test "$build_old_libs" = yes && old_library="$libname.$libext" + func_verbose "creating $output" + + # Preserve any variables that may affect compiler behavior + for var in $variables_saved_for_relink; do + if eval test -z \"\${$var+set}\"; then + relink_command="{ test -z \"\${$var+set}\" || $lt_unset $var || { $var=; export $var; }; }; $relink_command" + elif eval var_value=\$$var; test -z "$var_value"; then + relink_command="$var=; export $var; $relink_command" + else + func_quote_for_eval "$var_value" + relink_command="$var=$func_quote_for_eval_result; export $var; $relink_command" + fi + done + # Quote the link command for shipping. + relink_command="(cd `pwd`; $SHELL $progpath $preserve_args --mode=relink $libtool_args @inst_prefix_dir@)" + relink_command=`$ECHO "$relink_command" | $SED "$sed_quote_subst"` + if test "$hardcode_automatic" = yes ; then + relink_command= + fi + + # Only create the output if not a dry run. + $opt_dry_run || { + for installed in no yes; do + if test "$installed" = yes; then + if test -z "$install_libdir"; then + break + fi + output="$output_objdir/$outputname"i + # Replace all uninstalled libtool libraries with the installed ones + newdependency_libs= + for deplib in $dependency_libs; do + case $deplib in + *.la) + func_basename "$deplib" + name="$func_basename_result" + func_resolve_sysroot "$deplib" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $func_resolve_sysroot_result` + test -z "$libdir" && \ + func_fatal_error "\`$deplib' is not a valid libtool archive" + func_append newdependency_libs " ${lt_sysroot:+=}$libdir/$name" + ;; + -L*) + func_stripname -L '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -L$func_replace_sysroot_result" + ;; + -R*) + func_stripname -R '' "$deplib" + func_replace_sysroot "$func_stripname_result" + func_append newdependency_libs " -R$func_replace_sysroot_result" + ;; + *) func_append newdependency_libs " $deplib" ;; + esac + done + dependency_libs="$newdependency_libs" + newdlfiles= + + for lib in $dlfiles; do + case $lib in + *.la) + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + func_append newdlfiles " ${lt_sysroot:+=}$libdir/$name" + ;; + *) func_append newdlfiles " $lib" ;; + esac + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + *.la) + # Only pass preopened files to the pseudo-archive (for + # eventual linking with the app. that links it) if we + # didn't already link the preopened objects directly into + # the library: + func_basename "$lib" + name="$func_basename_result" + eval libdir=`${SED} -n -e 's/^libdir=\(.*\)$/\1/p' $lib` + test -z "$libdir" && \ + func_fatal_error "\`$lib' is not a valid libtool archive" + func_append newdlprefiles " ${lt_sysroot:+=}$libdir/$name" + ;; + esac + done + dlprefiles="$newdlprefiles" + else + newdlfiles= + for lib in $dlfiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlfiles " $abs" + done + dlfiles="$newdlfiles" + newdlprefiles= + for lib in $dlprefiles; do + case $lib in + [\\/]* | [A-Za-z]:[\\/]*) abs="$lib" ;; + *) abs=`pwd`"/$lib" ;; + esac + func_append newdlprefiles " $abs" + done + dlprefiles="$newdlprefiles" + fi + $RM $output + # place dlname in correct position for cygwin + # In fact, it would be nice if we could use this code for all target + # systems that can't hard-code library paths into their executables + # and that have no shared library path variable independent of PATH, + # but it turns out we can't easily determine that from inspecting + # libtool variables, so we have to hard-code the OSs to which it + # applies here; at the moment, that means platforms that use the PE + # object format with DLL files. See the long comment at the top of + # tests/bindir.at for full details. + tdlname=$dlname + case $host,$output,$installed,$module,$dlname in + *cygwin*,*lai,yes,no,*.dll | *mingw*,*lai,yes,no,*.dll | *cegcc*,*lai,yes,no,*.dll) + # If a -bindir argument was supplied, place the dll there. + if test "x$bindir" != x ; + then + func_relative_path "$install_libdir" "$bindir" + tdlname=$func_relative_path_result$dlname + else + # Otherwise fall back on heuristic. + tdlname=../bin/$dlname + fi + ;; + esac + $ECHO > $output "\ +# $outputname - a libtool library file +# Generated by $PROGRAM (GNU $PACKAGE$TIMESTAMP) $VERSION +# +# Please DO NOT delete this file! +# It is necessary for linking the library. + +# The name that we can dlopen(3). +dlname='$tdlname' + +# Names of this library. +library_names='$library_names' + +# The name of the static archive. +old_library='$old_library' + +# Linker flags that can not go in dependency_libs. +inherited_linker_flags='$new_inherited_linker_flags' + +# Libraries that this one depends upon. +dependency_libs='$dependency_libs' + +# Names of additional weak libraries provided by this library +weak_library_names='$weak_libs' + +# Version information for $libname. +current=$current +age=$age +revision=$revision + +# Is this an already installed library? +installed=$installed + +# Should we warn about portability when linking against -modules? +shouldnotlink=$module + +# Files to dlopen/dlpreopen +dlopen='$dlfiles' +dlpreopen='$dlprefiles' + +# Directory that this library needs to be installed in: +libdir='$install_libdir'" + if test "$installed" = no && test "$need_relink" = yes; then + $ECHO >> $output "\ +relink_command=\"$relink_command\"" + fi + done + } + + # Do a symbolic link so that the libtool archive can be found in + # LD_LIBRARY_PATH before the program is installed. + func_show_eval '( cd "$output_objdir" && $RM "$outputname" && $LN_S "../$outputname" "$outputname" )' 'exit $?' + ;; + esac + exit $EXIT_SUCCESS +} + +{ test "$opt_mode" = link || test "$opt_mode" = relink; } && + func_mode_link ${1+"$@"} + + +# func_mode_uninstall arg... +func_mode_uninstall () +{ + $opt_debug + RM="$nonopt" + files= + rmforce= + exit_status=0 + + # This variable tells wrapper scripts just to set variables rather + # than running their programs. + libtool_install_magic="$magic" + + for arg + do + case $arg in + -f) func_append RM " $arg"; rmforce=yes ;; + -*) func_append RM " $arg" ;; + *) func_append files " $arg" ;; + esac + done + + test -z "$RM" && \ + func_fatal_help "you must specify an RM program" + + rmdirs= + + for file in $files; do + func_dirname "$file" "" "." + dir="$func_dirname_result" + if test "X$dir" = X.; then + odir="$objdir" + else + odir="$dir/$objdir" + fi + func_basename "$file" + name="$func_basename_result" + test "$opt_mode" = uninstall && odir="$dir" + + # Remember odir for removal later, being careful to avoid duplicates + if test "$opt_mode" = clean; then + case " $rmdirs " in + *" $odir "*) ;; + *) func_append rmdirs " $odir" ;; + esac + fi + + # Don't error if the file doesn't exist and rm -f was used. + if { test -L "$file"; } >/dev/null 2>&1 || + { test -h "$file"; } >/dev/null 2>&1 || + test -f "$file"; then + : + elif test -d "$file"; then + exit_status=1 + continue + elif test "$rmforce" = yes; then + continue + fi + + rmfiles="$file" + + case $name in + *.la) + # Possibly a libtool archive, so verify it. + if func_lalib_p "$file"; then + func_source $dir/$name + + # Delete the libtool libraries and symlinks. + for n in $library_names; do + func_append rmfiles " $odir/$n" + done + test -n "$old_library" && func_append rmfiles " $odir/$old_library" + + case "$opt_mode" in + clean) + case " $library_names " in + *" $dlname "*) ;; + *) test -n "$dlname" && func_append rmfiles " $odir/$dlname" ;; + esac + test -n "$libdir" && func_append rmfiles " $odir/$name $odir/${name}i" + ;; + uninstall) + if test -n "$library_names"; then + # Do each command in the postuninstall commands. + func_execute_cmds "$postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' + fi + + if test -n "$old_library"; then + # Do each command in the old_postuninstall commands. + func_execute_cmds "$old_postuninstall_cmds" 'test "$rmforce" = yes || exit_status=1' + fi + # FIXME: should reinstall the best remaining shared library. + ;; + esac + fi + ;; + + *.lo) + # Possibly a libtool object, so verify it. + if func_lalib_p "$file"; then + + # Read the .lo file + func_source $dir/$name + + # Add PIC object to the list of files to remove. + if test -n "$pic_object" && + test "$pic_object" != none; then + func_append rmfiles " $dir/$pic_object" + fi + + # Add non-PIC object to the list of files to remove. + if test -n "$non_pic_object" && + test "$non_pic_object" != none; then + func_append rmfiles " $dir/$non_pic_object" + fi + fi + ;; + + *) + if test "$opt_mode" = clean ; then + noexename=$name + case $file in + *.exe) + func_stripname '' '.exe' "$file" + file=$func_stripname_result + func_stripname '' '.exe' "$name" + noexename=$func_stripname_result + # $file with .exe has already been added to rmfiles, + # add $file without .exe + func_append rmfiles " $file" + ;; + esac + # Do a test to see if this is a libtool program. + if func_ltwrapper_p "$file"; then + if func_ltwrapper_executable_p "$file"; then + func_ltwrapper_scriptname "$file" + relink_command= + func_source $func_ltwrapper_scriptname_result + func_append rmfiles " $func_ltwrapper_scriptname_result" + else + relink_command= + func_source $dir/$noexename + fi + + # note $name still contains .exe if it was in $file originally + # as does the version of $file that was added into $rmfiles + func_append rmfiles " $odir/$name $odir/${name}S.${objext}" + if test "$fast_install" = yes && test -n "$relink_command"; then + func_append rmfiles " $odir/lt-$name" + fi + if test "X$noexename" != "X$name" ; then + func_append rmfiles " $odir/lt-${noexename}.c" + fi + fi + fi + ;; + esac + func_show_eval "$RM $rmfiles" 'exit_status=1' + done + + # Try to remove the ${objdir}s in the directories where we deleted files + for dir in $rmdirs; do + if test -d "$dir"; then + func_show_eval "rmdir $dir >/dev/null 2>&1" + fi + done + + exit $exit_status +} + +{ test "$opt_mode" = uninstall || test "$opt_mode" = clean; } && + func_mode_uninstall ${1+"$@"} + +test -z "$opt_mode" && { + help="$generic_help" + func_fatal_help "you must specify a MODE" +} + +test -z "$exec_cmd" && \ + func_fatal_help "invalid operation mode \`$opt_mode'" + +if test -n "$exec_cmd"; then + eval exec "$exec_cmd" + exit $EXIT_FAILURE +fi + +exit $exit_status + + +# The TAGs below are defined such that we never get into a situation +# in which we disable both kinds of libraries. Given conflicting +# choices, we go for a static library, that is the most portable, +# since we can't tell whether shared libraries were disabled because +# the user asked for that or because the platform doesn't support +# them. This is particularly important on AIX, because we don't +# support having both static and shared libraries enabled at the same +# time on that platform, so we default to a shared-only configuration. +# If a disable-shared tag is given, we'll fallback to a static-only +# configuration. But we'll never go from static-only to shared-only. + +# ### BEGIN LIBTOOL TAG CONFIG: disable-shared +build_libtool_libs=no +build_old_libs=yes +# ### END LIBTOOL TAG CONFIG: disable-shared + +# ### BEGIN LIBTOOL TAG CONFIG: disable-static +build_old_libs=`case $build_libtool_libs in yes) echo no;; *) echo yes;; esac` +# ### END LIBTOOL TAG CONFIG: disable-static + +# Local Variables: +# mode:shell-script +# sh-indentation:2 +# End: +# vi:sw=2 + diff --git a/external/gtest/build-aux/missing b/external/gtest/build-aux/missing new file mode 100755 index 0000000000..86a8fc31e3 --- /dev/null +++ b/external/gtest/build-aux/missing @@ -0,0 +1,331 @@ +#! /bin/sh +# Common stub for a few missing GNU programs while installing. + +scriptversion=2012-01-06.13; # UTC + +# Copyright (C) 1996, 1997, 1999, 2000, 2002, 2003, 2004, 2005, 2006, +# 2008, 2009, 2010, 2011, 2012 Free Software Foundation, Inc. +# Originally by Fran,cois Pinard , 1996. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation; either version 2, or (at your option) +# any later version. + +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. + +# You should have received a copy of the GNU General Public License +# along with this program. If not, see . + +# As a special exception to the GNU General Public License, if you +# distribute this file as part of a program that contains a +# configuration script generated by Autoconf, you may include it under +# the same distribution terms that you use for the rest of that program. + +if test $# -eq 0; then + echo 1>&2 "Try \`$0 --help' for more information" + exit 1 +fi + +run=: +sed_output='s/.* --output[ =]\([^ ]*\).*/\1/p' +sed_minuso='s/.* -o \([^ ]*\).*/\1/p' + +# In the cases where this matters, `missing' is being run in the +# srcdir already. +if test -f configure.ac; then + configure_ac=configure.ac +else + configure_ac=configure.in +fi + +msg="missing on your system" + +case $1 in +--run) + # Try to run requested program, and just exit if it succeeds. + run= + shift + "$@" && exit 0 + # Exit code 63 means version mismatch. This often happens + # when the user try to use an ancient version of a tool on + # a file that requires a minimum version. In this case we + # we should proceed has if the program had been absent, or + # if --run hadn't been passed. + if test $? = 63; then + run=: + msg="probably too old" + fi + ;; + + -h|--h|--he|--hel|--help) + echo "\ +$0 [OPTION]... PROGRAM [ARGUMENT]... + +Handle \`PROGRAM [ARGUMENT]...' for when PROGRAM is missing, or return an +error status if there is no known handling for PROGRAM. + +Options: + -h, --help display this help and exit + -v, --version output version information and exit + --run try to run the given command, and emulate it if it fails + +Supported PROGRAM values: + aclocal touch file \`aclocal.m4' + autoconf touch file \`configure' + autoheader touch file \`config.h.in' + autom4te touch the output file, or create a stub one + automake touch all \`Makefile.in' files + bison create \`y.tab.[ch]', if possible, from existing .[ch] + flex create \`lex.yy.c', if possible, from existing .c + help2man touch the output file + lex create \`lex.yy.c', if possible, from existing .c + makeinfo touch the output file + yacc create \`y.tab.[ch]', if possible, from existing .[ch] + +Version suffixes to PROGRAM as well as the prefixes \`gnu-', \`gnu', and +\`g' are ignored when checking the name. + +Send bug reports to ." + exit $? + ;; + + -v|--v|--ve|--ver|--vers|--versi|--versio|--version) + echo "missing $scriptversion (GNU Automake)" + exit $? + ;; + + -*) + echo 1>&2 "$0: Unknown \`$1' option" + echo 1>&2 "Try \`$0 --help' for more information" + exit 1 + ;; + +esac + +# normalize program name to check for. +program=`echo "$1" | sed ' + s/^gnu-//; t + s/^gnu//; t + s/^g//; t'` + +# Now exit if we have it, but it failed. Also exit now if we +# don't have it and --version was passed (most likely to detect +# the program). This is about non-GNU programs, so use $1 not +# $program. +case $1 in + lex*|yacc*) + # Not GNU programs, they don't have --version. + ;; + + *) + if test -z "$run" && ($1 --version) > /dev/null 2>&1; then + # We have it, but it failed. + exit 1 + elif test "x$2" = "x--version" || test "x$2" = "x--help"; then + # Could not run --version or --help. This is probably someone + # running `$TOOL --version' or `$TOOL --help' to check whether + # $TOOL exists and not knowing $TOOL uses missing. + exit 1 + fi + ;; +esac + +# If it does not exist, or fails to run (possibly an outdated version), +# try to emulate it. +case $program in + aclocal*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`acinclude.m4' or \`${configure_ac}'. You might want + to install the \`Automake' and \`Perl' packages. Grab them from + any GNU archive site." + touch aclocal.m4 + ;; + + autoconf*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`${configure_ac}'. You might want to install the + \`Autoconf' and \`GNU m4' packages. Grab them from any GNU + archive site." + touch configure + ;; + + autoheader*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`acconfig.h' or \`${configure_ac}'. You might want + to install the \`Autoconf' and \`GNU m4' packages. Grab them + from any GNU archive site." + files=`sed -n 's/^[ ]*A[CM]_CONFIG_HEADER(\([^)]*\)).*/\1/p' ${configure_ac}` + test -z "$files" && files="config.h" + touch_files= + for f in $files; do + case $f in + *:*) touch_files="$touch_files "`echo "$f" | + sed -e 's/^[^:]*://' -e 's/:.*//'`;; + *) touch_files="$touch_files $f.in";; + esac + done + touch $touch_files + ;; + + automake*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified \`Makefile.am', \`acinclude.m4' or \`${configure_ac}'. + You might want to install the \`Automake' and \`Perl' packages. + Grab them from any GNU archive site." + find . -type f -name Makefile.am -print | + sed 's/\.am$/.in/' | + while read f; do touch "$f"; done + ;; + + autom4te*) + echo 1>&2 "\ +WARNING: \`$1' is needed, but is $msg. + You might have modified some files without having the + proper tools for further handling them. + You can get \`$1' as part of \`Autoconf' from any GNU + archive site." + + file=`echo "$*" | sed -n "$sed_output"` + test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` + if test -f "$file"; then + touch $file + else + test -z "$file" || exec >$file + echo "#! /bin/sh" + echo "# Created by GNU Automake missing as a replacement of" + echo "# $ $@" + echo "exit 0" + chmod +x $file + exit 1 + fi + ;; + + bison*|yacc*) + echo 1>&2 "\ +WARNING: \`$1' $msg. You should only need it if + you modified a \`.y' file. You may need the \`Bison' package + in order for those modifications to take effect. You can get + \`Bison' from any GNU archive site." + rm -f y.tab.c y.tab.h + if test $# -ne 1; then + eval LASTARG=\${$#} + case $LASTARG in + *.y) + SRCFILE=`echo "$LASTARG" | sed 's/y$/c/'` + if test -f "$SRCFILE"; then + cp "$SRCFILE" y.tab.c + fi + SRCFILE=`echo "$LASTARG" | sed 's/y$/h/'` + if test -f "$SRCFILE"; then + cp "$SRCFILE" y.tab.h + fi + ;; + esac + fi + if test ! -f y.tab.h; then + echo >y.tab.h + fi + if test ! -f y.tab.c; then + echo 'main() { return 0; }' >y.tab.c + fi + ;; + + lex*|flex*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a \`.l' file. You may need the \`Flex' package + in order for those modifications to take effect. You can get + \`Flex' from any GNU archive site." + rm -f lex.yy.c + if test $# -ne 1; then + eval LASTARG=\${$#} + case $LASTARG in + *.l) + SRCFILE=`echo "$LASTARG" | sed 's/l$/c/'` + if test -f "$SRCFILE"; then + cp "$SRCFILE" lex.yy.c + fi + ;; + esac + fi + if test ! -f lex.yy.c; then + echo 'main() { return 0; }' >lex.yy.c + fi + ;; + + help2man*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a dependency of a manual page. You may need the + \`Help2man' package in order for those modifications to take + effect. You can get \`Help2man' from any GNU archive site." + + file=`echo "$*" | sed -n "$sed_output"` + test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` + if test -f "$file"; then + touch $file + else + test -z "$file" || exec >$file + echo ".ab help2man is required to generate this page" + exit $? + fi + ;; + + makeinfo*) + echo 1>&2 "\ +WARNING: \`$1' is $msg. You should only need it if + you modified a \`.texi' or \`.texinfo' file, or any other file + indirectly affecting the aspect of the manual. The spurious + call might also be the consequence of using a buggy \`make' (AIX, + DU, IRIX). You might want to install the \`Texinfo' package or + the \`GNU make' package. Grab either from any GNU archive site." + # The file to touch is that specified with -o ... + file=`echo "$*" | sed -n "$sed_output"` + test -z "$file" && file=`echo "$*" | sed -n "$sed_minuso"` + if test -z "$file"; then + # ... or it is the one specified with @setfilename ... + infile=`echo "$*" | sed 's/.* \([^ ]*\) *$/\1/'` + file=`sed -n ' + /^@setfilename/{ + s/.* \([^ ]*\) *$/\1/ + p + q + }' $infile` + # ... or it is derived from the source name (dir/f.texi becomes f.info) + test -z "$file" && file=`echo "$infile" | sed 's,.*/,,;s,.[^.]*$,,'`.info + fi + # If the file does not exist, the user really needs makeinfo; + # let's fail without touching anything. + test -f $file || exit 1 + touch $file + ;; + + *) + echo 1>&2 "\ +WARNING: \`$1' is needed, and is $msg. + You might have modified some files without having the + proper tools for further handling them. Check the \`README' file, + it often tells you about the needed prerequisites for installing + this package. You may also peek at any GNU archive site, in case + some other package would contain this missing \`$1' program." + exit 1 + ;; +esac + +exit 0 + +# Local variables: +# eval: (add-hook 'write-file-hooks 'time-stamp) +# time-stamp-start: "scriptversion=" +# time-stamp-format: "%:y-%02m-%02d.%02H" +# time-stamp-time-zone: "UTC" +# time-stamp-end: "; # UTC" +# End: diff --git a/tests/gtest/cmake/internal_utils.cmake b/external/gtest/cmake/internal_utils.cmake old mode 100644 new mode 100755 similarity index 93% rename from tests/gtest/cmake/internal_utils.cmake rename to external/gtest/cmake/internal_utils.cmake index 7efc2ac797..8cb21894ce --- a/tests/gtest/cmake/internal_utils.cmake +++ b/external/gtest/cmake/internal_utils.cmake @@ -56,6 +56,16 @@ macro(config_compiler_and_linker) # Newlines inside flags variables break CMake's NMake generator. # TODO(vladl@google.com): Add -RTCs and -RTCu to debug builds. set(cxx_base_flags "-GS -W4 -WX -wd4127 -wd4251 -wd4275 -nologo -J -Zi") + if (MSVC_VERSION LESS 1400) + # Suppress spurious warnings MSVC 7.1 sometimes issues. + # Forcing value to bool. + set(cxx_base_flags "${cxx_base_flags} -wd4800") + # Copy constructor and assignment operator could not be generated. + set(cxx_base_flags "${cxx_base_flags} -wd4511 -wd4512") + # Compatibility warnings not applicable to Google Test. + # Resolved overload was found by argument-dependent lookup. + set(cxx_base_flags "${cxx_base_flags} -wd4675") + endif() set(cxx_base_flags "${cxx_base_flags} -D_UNICODE -DUNICODE -DWIN32 -D_WIN32") set(cxx_base_flags "${cxx_base_flags} -DSTRICT -DWIN32_LEAN_AND_MEAN") set(cxx_exception_flags "-EHsc -D_HAS_EXCEPTIONS=1") @@ -69,7 +79,8 @@ macro(config_compiler_and_linker) # whether RTTI is enabled. Therefore we define GTEST_HAS_RTTI # explicitly. set(cxx_no_rtti_flags "-fno-rtti -DGTEST_HAS_RTTI=0") - set(cxx_strict_flags "-Wextra") + set(cxx_strict_flags + "-Wextra -Wno-unused-parameter -Wno-missing-field-initializers") elseif (CMAKE_CXX_COMPILER_ID STREQUAL "SunPro") set(cxx_exception_flags "-features=except") # Sun Pro doesn't provide macros to indicate whether exceptions and diff --git a/external/gtest/codegear/gtest.cbproj b/external/gtest/codegear/gtest.cbproj new file mode 100755 index 0000000000..285bb2a87b --- /dev/null +++ b/external/gtest/codegear/gtest.cbproj @@ -0,0 +1,138 @@ + + + + {bca37a72-5b07-46cf-b44e-89f8e06451a2} + Release + + + true + + + true + true + Base + + + true + true + Base + + + true + lib + JPHNE + NO_STRICT + true + true + CppStaticLibrary + true + rtl.bpi;vcl.bpi;bcbie.bpi;vclx.bpi;vclactnband.bpi;xmlrtl.bpi;bcbsmp.bpi;dbrtl.bpi;vcldb.bpi;bdertl.bpi;vcldbx.bpi;dsnap.bpi;dsnapcon.bpi;vclib.bpi;ibxpress.bpi;adortl.bpi;dbxcds.bpi;dbexpress.bpi;DbxCommonDriver.bpi;websnap.bpi;vclie.bpi;webdsnap.bpi;inet.bpi;inetdbbde.bpi;inetdbxpress.bpi;soaprtl.bpi;Rave75VCL.bpi;teeUI.bpi;tee.bpi;teedb.bpi;IndyCore.bpi;IndySystem.bpi;IndyProtocols.bpi;IntrawebDB_90_100.bpi;Intraweb_90_100.bpi;dclZipForged11.bpi;vclZipForged11.bpi;GR32_BDS2006.bpi;GR32_DSGN_BDS2006.bpi;Jcl.bpi;JclVcl.bpi;JvCoreD11R.bpi;JvSystemD11R.bpi;JvStdCtrlsD11R.bpi;JvAppFrmD11R.bpi;JvBandsD11R.bpi;JvDBD11R.bpi;JvDlgsD11R.bpi;JvBDED11R.bpi;JvCmpD11R.bpi;JvCryptD11R.bpi;JvCtrlsD11R.bpi;JvCustomD11R.bpi;JvDockingD11R.bpi;JvDotNetCtrlsD11R.bpi;JvEDID11R.bpi;JvGlobusD11R.bpi;JvHMID11R.bpi;JvInterpreterD11R.bpi;JvJansD11R.bpi;JvManagedThreadsD11R.bpi;JvMMD11R.bpi;JvNetD11R.bpi;JvPageCompsD11R.bpi;JvPluginD11R.bpi;JvPrintPreviewD11R.bpi;JvRuntimeDesignD11R.bpi;JvTimeFrameworkD11R.bpi;JvValidatorsD11R.bpi;JvWizardD11R.bpi;JvXPCtrlsD11R.bpi;VclSmp.bpi;CExceptionExpert11.bpi + false + $(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\src;..\include;.. + rtl.lib;vcl.lib + 32 + $(BDS)\lib;$(BDS)\lib\obj;$(BDS)\lib\psdk + + + false + false + true + _DEBUG;$(Defines) + true + false + true + None + DEBUG + true + Debug + true + true + true + $(BDS)\lib\debug;$(ILINK_LibraryPath) + Full + true + + + NDEBUG;$(Defines) + Release + $(BDS)\lib\release;$(ILINK_LibraryPath) + None + + + CPlusPlusBuilder.Personality + CppStaticLibrary + +FalseFalse1000FalseFalseFalseFalseFalse103312521.0.0.01.0.0.0FalseFalseFalseTrueFalse + + + CodeGear C++Builder Office 2000 Servers Package + CodeGear C++Builder Office XP Servers Package + FalseTrueTrue3$(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\src;..\include;..$(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\src;..\include;..$(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\src;..\src;..\include1$(BDS)\lib;$(BDS)\lib\obj;$(BDS)\lib\psdk1NO_STRICT13216 + + + + + 3 + + + 4 + + + 5 + + + 6 + + + 7 + + + 8 + + + 0 + + + 1 + + + 2 + + + 9 + + + 10 + + + 11 + + + 12 + + + 14 + + + 13 + + + 15 + + + 16 + + + 17 + + + 18 + + + Cfg_1 + + + Cfg_2 + + + \ No newline at end of file diff --git a/external/gtest/codegear/gtest.groupproj b/external/gtest/codegear/gtest.groupproj new file mode 100755 index 0000000000..849f4c4b81 --- /dev/null +++ b/external/gtest/codegear/gtest.groupproj @@ -0,0 +1,54 @@ + + + {c1d923e0-6cba-4332-9b6f-3420acbf5091} + + + + + + + + + Default.Personality + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/external/gtest/codegear/gtest_all.cc b/external/gtest/codegear/gtest_all.cc new file mode 100755 index 0000000000..ba7ad68ad1 --- /dev/null +++ b/external/gtest/codegear/gtest_all.cc @@ -0,0 +1,38 @@ +// Copyright 2009, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: Josh Kelley (joshkel@gmail.com) +// +// Google C++ Testing Framework (Google Test) +// +// C++Builder's IDE cannot build a static library from files with hyphens +// in their name. See http://qc.codegear.com/wc/qcmain.aspx?d=70977 . +// This file serves as a workaround. + +#include "src/gtest-all.cc" diff --git a/external/gtest/codegear/gtest_link.cc b/external/gtest/codegear/gtest_link.cc new file mode 100755 index 0000000000..b955ebf2f9 --- /dev/null +++ b/external/gtest/codegear/gtest_link.cc @@ -0,0 +1,40 @@ +// Copyright 2009, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: Josh Kelley (joshkel@gmail.com) +// +// Google C++ Testing Framework (Google Test) +// +// Links gtest.lib and gtest_main.lib into the current project in C++Builder. +// This means that these libraries can't be renamed, but it's the only way to +// ensure that Debug versus Release test builds are linked against the +// appropriate Debug or Release build of the libraries. + +#pragma link "gtest.lib" +#pragma link "gtest_main.lib" diff --git a/external/gtest/codegear/gtest_main.cbproj b/external/gtest/codegear/gtest_main.cbproj new file mode 100755 index 0000000000..fae32cb29b --- /dev/null +++ b/external/gtest/codegear/gtest_main.cbproj @@ -0,0 +1,82 @@ + + + + {bca37a72-5b07-46cf-b44e-89f8e06451a2} + Release + + + true + + + true + true + Base + + + true + true + Base + + + true + lib + JPHNE + NO_STRICT + true + true + CppStaticLibrary + true + rtl.bpi;vcl.bpi;bcbie.bpi;vclx.bpi;vclactnband.bpi;xmlrtl.bpi;bcbsmp.bpi;dbrtl.bpi;vcldb.bpi;bdertl.bpi;vcldbx.bpi;dsnap.bpi;dsnapcon.bpi;vclib.bpi;ibxpress.bpi;adortl.bpi;dbxcds.bpi;dbexpress.bpi;DbxCommonDriver.bpi;websnap.bpi;vclie.bpi;webdsnap.bpi;inet.bpi;inetdbbde.bpi;inetdbxpress.bpi;soaprtl.bpi;Rave75VCL.bpi;teeUI.bpi;tee.bpi;teedb.bpi;IndyCore.bpi;IndySystem.bpi;IndyProtocols.bpi;IntrawebDB_90_100.bpi;Intraweb_90_100.bpi;dclZipForged11.bpi;vclZipForged11.bpi;GR32_BDS2006.bpi;GR32_DSGN_BDS2006.bpi;Jcl.bpi;JclVcl.bpi;JvCoreD11R.bpi;JvSystemD11R.bpi;JvStdCtrlsD11R.bpi;JvAppFrmD11R.bpi;JvBandsD11R.bpi;JvDBD11R.bpi;JvDlgsD11R.bpi;JvBDED11R.bpi;JvCmpD11R.bpi;JvCryptD11R.bpi;JvCtrlsD11R.bpi;JvCustomD11R.bpi;JvDockingD11R.bpi;JvDotNetCtrlsD11R.bpi;JvEDID11R.bpi;JvGlobusD11R.bpi;JvHMID11R.bpi;JvInterpreterD11R.bpi;JvJansD11R.bpi;JvManagedThreadsD11R.bpi;JvMMD11R.bpi;JvNetD11R.bpi;JvPageCompsD11R.bpi;JvPluginD11R.bpi;JvPrintPreviewD11R.bpi;JvRuntimeDesignD11R.bpi;JvTimeFrameworkD11R.bpi;JvValidatorsD11R.bpi;JvWizardD11R.bpi;JvXPCtrlsD11R.bpi;VclSmp.bpi;CExceptionExpert11.bpi + false + $(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\src;..\include;.. + rtl.lib;vcl.lib + 32 + $(BDS)\lib;$(BDS)\lib\obj;$(BDS)\lib\psdk + + + false + false + true + _DEBUG;$(Defines) + true + false + true + None + DEBUG + true + Debug + true + true + true + $(BDS)\lib\debug;$(ILINK_LibraryPath) + Full + true + + + NDEBUG;$(Defines) + Release + $(BDS)\lib\release;$(ILINK_LibraryPath) + None + + + CPlusPlusBuilder.Personality + CppStaticLibrary + +FalseFalse1000FalseFalseFalseFalseFalse103312521.0.0.01.0.0.0FalseFalseFalseTrueFalse + CodeGear C++Builder Office 2000 Servers Package + CodeGear C++Builder Office XP Servers Package + FalseTrueTrue3$(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\src;..\include;..$(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\src;..\include;..$(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\src;..\src;..\include1$(BDS)\lib;$(BDS)\lib\obj;$(BDS)\lib\psdk1NO_STRICT13216 + + + + + 0 + + + Cfg_1 + + + Cfg_2 + + + diff --git a/external/gtest/codegear/gtest_unittest.cbproj b/external/gtest/codegear/gtest_unittest.cbproj new file mode 100755 index 0000000000..33f7056346 --- /dev/null +++ b/external/gtest/codegear/gtest_unittest.cbproj @@ -0,0 +1,88 @@ + + + + {eea63393-5ac5-4b9c-8909-d75fef2daa41} + Release + + + true + + + true + true + Base + + + true + true + Base + + + exe + true + NO_STRICT + JPHNE + true + ..\test + true + CppConsoleApplication + true + true + rtl.bpi;vcl.bpi;bcbie.bpi;vclx.bpi;vclactnband.bpi;xmlrtl.bpi;bcbsmp.bpi;dbrtl.bpi;vcldb.bpi;bdertl.bpi;vcldbx.bpi;dsnap.bpi;dsnapcon.bpi;vclib.bpi;ibxpress.bpi;adortl.bpi;dbxcds.bpi;dbexpress.bpi;DbxCommonDriver.bpi;websnap.bpi;vclie.bpi;webdsnap.bpi;inet.bpi;inetdbbde.bpi;inetdbxpress.bpi;soaprtl.bpi;Rave75VCL.bpi;teeUI.bpi;tee.bpi;teedb.bpi;IndyCore.bpi;IndySystem.bpi;IndyProtocols.bpi;IntrawebDB_90_100.bpi;Intraweb_90_100.bpi;Jcl.bpi;JclVcl.bpi;JvCoreD11R.bpi;JvSystemD11R.bpi;JvStdCtrlsD11R.bpi;JvAppFrmD11R.bpi;JvBandsD11R.bpi;JvDBD11R.bpi;JvDlgsD11R.bpi;JvBDED11R.bpi;JvCmpD11R.bpi;JvCryptD11R.bpi;JvCtrlsD11R.bpi;JvCustomD11R.bpi;JvDockingD11R.bpi;JvDotNetCtrlsD11R.bpi;JvEDID11R.bpi;JvGlobusD11R.bpi;JvHMID11R.bpi;JvInterpreterD11R.bpi;JvJansD11R.bpi;JvManagedThreadsD11R.bpi;JvMMD11R.bpi;JvNetD11R.bpi;JvPageCompsD11R.bpi;JvPluginD11R.bpi;JvPrintPreviewD11R.bpi;JvRuntimeDesignD11R.bpi;JvTimeFrameworkD11R.bpi;JvValidatorsD11R.bpi;JvWizardD11R.bpi;JvXPCtrlsD11R.bpi;VclSmp.bpi + false + $(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\include;..\test;.. + $(BDS)\lib;$(BDS)\lib\obj;$(BDS)\lib\psdk;..\test + true + + + false + false + true + _DEBUG;$(Defines) + true + false + true + None + DEBUG + true + Debug + true + true + true + $(BDS)\lib\debug;$(ILINK_LibraryPath) + Full + true + + + NDEBUG;$(Defines) + Release + $(BDS)\lib\release;$(ILINK_LibraryPath) + None + + + CPlusPlusBuilder.Personality + CppConsoleApplication + +FalseFalse1000FalseFalseFalseFalseFalse103312521.0.0.01.0.0.0FalseFalseFalseTrueFalse + + + CodeGear C++Builder Office 2000 Servers Package + CodeGear C++Builder Office XP Servers Package + FalseTrueTrue3$(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\include;..\test;..$(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\include;..\test$(BDS)\include;$(BDS)\include\dinkumware;$(BDS)\include\vcl;..\include1$(BDS)\lib;$(BDS)\lib\obj;$(BDS)\lib\psdk;..\test$(BDS)\lib;$(BDS)\lib\obj;$(BDS)\lib\psdk;..\test$(BDS)\lib;$(BDS)\lib\obj;$(BDS)\lib\psdk;$(OUTPUTDIR);..\test2NO_STRICTSTRICT + + + + + 0 + + + 1 + + + Cfg_1 + + + Cfg_2 + + + \ No newline at end of file diff --git a/external/gtest/configure b/external/gtest/configure new file mode 100755 index 0000000000..582a9a05f5 --- /dev/null +++ b/external/gtest/configure @@ -0,0 +1,18222 @@ +#! /bin/sh +# Guess values for system-dependent variables and create Makefiles. +# Generated by GNU Autoconf 2.68 for Google C++ Testing Framework 1.7.0. +# +# Report bugs to . +# +# +# Copyright (C) 1992, 1993, 1994, 1995, 1996, 1998, 1999, 2000, 2001, +# 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010 Free Software +# Foundation, Inc. +# +# +# This configure script is free software; the Free Software Foundation +# gives unlimited permission to copy, distribute and modify it. +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +if test "x$CONFIG_SHELL" = x; then + as_bourne_compatible="if test -n \"\${ZSH_VERSION+set}\" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on \${1+\"\$@\"}, which + # is contrary to our usage. Disable this feature. + alias -g '\${1+\"\$@\"}'='\"\$@\"' + setopt NO_GLOB_SUBST +else + case \`(set -o) 2>/dev/null\` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi +" + as_required="as_fn_return () { (exit \$1); } +as_fn_success () { as_fn_return 0; } +as_fn_failure () { as_fn_return 1; } +as_fn_ret_success () { return 0; } +as_fn_ret_failure () { return 1; } + +exitcode=0 +as_fn_success || { exitcode=1; echo as_fn_success failed.; } +as_fn_failure && { exitcode=1; echo as_fn_failure succeeded.; } +as_fn_ret_success || { exitcode=1; echo as_fn_ret_success failed.; } +as_fn_ret_failure && { exitcode=1; echo as_fn_ret_failure succeeded.; } +if ( set x; as_fn_ret_success y && test x = \"\$1\" ); then : + +else + exitcode=1; echo positional parameters were not saved. +fi +test x\$exitcode = x0 || exit 1" + as_suggested=" as_lineno_1=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_1a=\$LINENO + as_lineno_2=";as_suggested=$as_suggested$LINENO;as_suggested=$as_suggested" as_lineno_2a=\$LINENO + eval 'test \"x\$as_lineno_1'\$as_run'\" != \"x\$as_lineno_2'\$as_run'\" && + test \"x\`expr \$as_lineno_1'\$as_run' + 1\`\" = \"x\$as_lineno_2'\$as_run'\"' || exit 1 + + test -n \"\${ZSH_VERSION+set}\${BASH_VERSION+set}\" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + ECHO=\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO\$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test \"X\`printf %s \$ECHO\`\" = \"X\$ECHO\" \\ + || test \"X\`print -r -- \$ECHO\`\" = \"X\$ECHO\" ) || exit 1 +test \$(( 1 + 1 )) = 2 || exit 1" + if (eval "$as_required") 2>/dev/null; then : + as_have_required=yes +else + as_have_required=no +fi + if test x$as_have_required = xyes && (eval "$as_suggested") 2>/dev/null; then : + +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +as_found=false +for as_dir in /bin$PATH_SEPARATOR/usr/bin$PATH_SEPARATOR$PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + as_found=: + case $as_dir in #( + /*) + for as_base in sh bash ksh sh5; do + # Try only shells that exist, to save several forks. + as_shell=$as_dir/$as_base + if { test -f "$as_shell" || test -f "$as_shell.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$as_shell"; } 2>/dev/null; then : + CONFIG_SHELL=$as_shell as_have_required=yes + if { $as_echo "$as_bourne_compatible""$as_suggested" | as_run=a "$as_shell"; } 2>/dev/null; then : + break 2 +fi +fi + done;; + esac + as_found=false +done +$as_found || { if { test -f "$SHELL" || test -f "$SHELL.exe"; } && + { $as_echo "$as_bourne_compatible""$as_required" | as_run=a "$SHELL"; } 2>/dev/null; then : + CONFIG_SHELL=$SHELL as_have_required=yes +fi; } +IFS=$as_save_IFS + + + if test "x$CONFIG_SHELL" != x; then : + # We cannot yet assume a decent shell, so we have to provide a + # neutralization value for shells without unset; and this also + # works around shells that cannot unset nonexistent variables. + # Preserve -v and -x to the replacement shell. + BASH_ENV=/dev/null + ENV=/dev/null + (unset BASH_ENV) >/dev/null 2>&1 && unset BASH_ENV ENV + export CONFIG_SHELL + case $- in # (((( + *v*x* | *x*v* ) as_opts=-vx ;; + *v* ) as_opts=-v ;; + *x* ) as_opts=-x ;; + * ) as_opts= ;; + esac + exec "$CONFIG_SHELL" $as_opts "$as_myself" ${1+"$@"} +fi + + if test x$as_have_required = xno; then : + $as_echo "$0: This script requires a shell more modern than all" + $as_echo "$0: the shells that I found on your system." + if test x${ZSH_VERSION+set} = xset ; then + $as_echo "$0: In particular, zsh $ZSH_VERSION has bugs and should" + $as_echo "$0: be upgraded to zsh 4.3.4 or later." + else + $as_echo "$0: Please tell bug-autoconf@gnu.org and +$0: googletestframework@googlegroups.com about your system, +$0: including any error possibly output before this +$0: message. Then install a modern shell, or manually run +$0: the script under such a shell if you do have one." + fi + exit 1 +fi +fi +fi +SHELL=${CONFIG_SHELL-/bin/sh} +export SHELL +# Unset more variables known to interfere with behavior of common tools. +CLICOLOR_FORCE= GREP_OPTIONS= +unset CLICOLOR_FORCE GREP_OPTIONS + +## --------------------- ## +## M4sh Shell Functions. ## +## --------------------- ## +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + + + as_lineno_1=$LINENO as_lineno_1a=$LINENO + as_lineno_2=$LINENO as_lineno_2a=$LINENO + eval 'test "x$as_lineno_1'$as_run'" != "x$as_lineno_2'$as_run'" && + test "x`expr $as_lineno_1'$as_run' + 1`" = "x$as_lineno_2'$as_run'"' || { + # Blame Lee E. McMahon (1931-1989) for sed's syntax. :-) + sed -n ' + p + /[$]LINENO/= + ' <$as_myself | + sed ' + s/[$]LINENO.*/&-/ + t lineno + b + :lineno + N + :loop + s/[$]LINENO\([^'$as_cr_alnum'_].*\n\)\(.*\)/\2\1\2/ + t loop + s/-\n.*// + ' >$as_me.lineno && + chmod +x "$as_me.lineno" || + { $as_echo "$as_me: error: cannot create $as_me.lineno; rerun with a POSIX shell" >&2; as_fn_exit 1; } + + # Don't try to exec as it changes $[0], causing all sort of problems + # (the dirname of $[0] is not the place where we might find the + # original and so on. Autoconf is especially sensitive to this). + . "./$as_me.lineno" + # Exit status is that of the last command. + exit +} + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -p' + fi +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in #( + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + +SHELL=${CONFIG_SHELL-/bin/sh} + + +test -n "$DJDIR" || exec 7<&0 &1 + +# Name of the host. +# hostname on some systems (SVR3.2, old GNU/Linux) returns a bogus exit status, +# so uname gets run too. +ac_hostname=`(hostname || uname -n) 2>/dev/null | sed 1q` + +# +# Initializations. +# +ac_default_prefix=/usr/local +ac_clean_files= +ac_config_libobj_dir=. +LIBOBJS= +cross_compiling=no +subdirs= +MFLAGS= +MAKEFLAGS= + +# Identity of this package. +PACKAGE_NAME='Google C++ Testing Framework' +PACKAGE_TARNAME='gtest' +PACKAGE_VERSION='1.7.0' +PACKAGE_STRING='Google C++ Testing Framework 1.7.0' +PACKAGE_BUGREPORT='googletestframework@googlegroups.com' +PACKAGE_URL='' + +ac_unique_file="./LICENSE" +# Factoring default headers for most tests. +ac_includes_default="\ +#include +#ifdef HAVE_SYS_TYPES_H +# include +#endif +#ifdef HAVE_SYS_STAT_H +# include +#endif +#ifdef STDC_HEADERS +# include +# include +#else +# ifdef HAVE_STDLIB_H +# include +# endif +#endif +#ifdef HAVE_STRING_H +# if !defined STDC_HEADERS && defined HAVE_MEMORY_H +# include +# endif +# include +#endif +#ifdef HAVE_STRINGS_H +# include +#endif +#ifdef HAVE_INTTYPES_H +# include +#endif +#ifdef HAVE_STDINT_H +# include +#endif +#ifdef HAVE_UNISTD_H +# include +#endif" + +ac_subst_vars='am__EXEEXT_FALSE +am__EXEEXT_TRUE +LTLIBOBJS +LIBOBJS +HAVE_PTHREADS_FALSE +HAVE_PTHREADS_TRUE +PTHREAD_CFLAGS +PTHREAD_LIBS +PTHREAD_CC +acx_pthread_config +HAVE_PYTHON_FALSE +HAVE_PYTHON_TRUE +PYTHON +CXXCPP +CPP +OTOOL64 +OTOOL +LIPO +NMEDIT +DSYMUTIL +MANIFEST_TOOL +RANLIB +ac_ct_AR +AR +DLLTOOL +OBJDUMP +LN_S +NM +ac_ct_DUMPBIN +DUMPBIN +LD +FGREP +EGREP +GREP +SED +host_os +host_vendor +host_cpu +host +build_os +build_vendor +build_cpu +build +LIBTOOL +am__fastdepCXX_FALSE +am__fastdepCXX_TRUE +CXXDEPMODE +ac_ct_CXX +CXXFLAGS +CXX +am__fastdepCC_FALSE +am__fastdepCC_TRUE +CCDEPMODE +am__nodep +AMDEPBACKSLASH +AMDEP_FALSE +AMDEP_TRUE +am__quote +am__include +DEPDIR +OBJEXT +EXEEXT +ac_ct_CC +CPPFLAGS +LDFLAGS +CFLAGS +CC +am__untar +am__tar +AMTAR +am__leading_dot +SET_MAKE +AWK +mkdir_p +MKDIR_P +INSTALL_STRIP_PROGRAM +STRIP +install_sh +MAKEINFO +AUTOHEADER +AUTOMAKE +AUTOCONF +ACLOCAL +VERSION +PACKAGE +CYGPATH_W +am__isrc +INSTALL_DATA +INSTALL_SCRIPT +INSTALL_PROGRAM +target_alias +host_alias +build_alias +LIBS +ECHO_T +ECHO_N +ECHO_C +DEFS +mandir +localedir +libdir +psdir +pdfdir +dvidir +htmldir +infodir +docdir +oldincludedir +includedir +localstatedir +sharedstatedir +sysconfdir +datadir +datarootdir +libexecdir +sbindir +bindir +program_transform_name +prefix +exec_prefix +PACKAGE_URL +PACKAGE_BUGREPORT +PACKAGE_STRING +PACKAGE_VERSION +PACKAGE_TARNAME +PACKAGE_NAME +PATH_SEPARATOR +SHELL' +ac_subst_files='' +ac_user_opts=' +enable_option_checking +enable_dependency_tracking +enable_shared +enable_static +with_pic +enable_fast_install +with_gnu_ld +with_sysroot +enable_libtool_lock +with_pthreads +' + ac_precious_vars='build_alias +host_alias +target_alias +CC +CFLAGS +LDFLAGS +LIBS +CPPFLAGS +CXX +CXXFLAGS +CCC +CPP +CXXCPP' + + +# Initialize some variables set by options. +ac_init_help= +ac_init_version=false +ac_unrecognized_opts= +ac_unrecognized_sep= +# The variables have the same names as the options, with +# dashes changed to underlines. +cache_file=/dev/null +exec_prefix=NONE +no_create= +no_recursion= +prefix=NONE +program_prefix=NONE +program_suffix=NONE +program_transform_name=s,x,x, +silent= +site= +srcdir= +verbose= +x_includes=NONE +x_libraries=NONE + +# Installation directory options. +# These are left unexpanded so users can "make install exec_prefix=/foo" +# and all the variables that are supposed to be based on exec_prefix +# by default will actually change. +# Use braces instead of parens because sh, perl, etc. also accept them. +# (The list follows the same order as the GNU Coding Standards.) +bindir='${exec_prefix}/bin' +sbindir='${exec_prefix}/sbin' +libexecdir='${exec_prefix}/libexec' +datarootdir='${prefix}/share' +datadir='${datarootdir}' +sysconfdir='${prefix}/etc' +sharedstatedir='${prefix}/com' +localstatedir='${prefix}/var' +includedir='${prefix}/include' +oldincludedir='/usr/include' +docdir='${datarootdir}/doc/${PACKAGE_TARNAME}' +infodir='${datarootdir}/info' +htmldir='${docdir}' +dvidir='${docdir}' +pdfdir='${docdir}' +psdir='${docdir}' +libdir='${exec_prefix}/lib' +localedir='${datarootdir}/locale' +mandir='${datarootdir}/man' + +ac_prev= +ac_dashdash= +for ac_option +do + # If the previous option needs an argument, assign it. + if test -n "$ac_prev"; then + eval $ac_prev=\$ac_option + ac_prev= + continue + fi + + case $ac_option in + *=?*) ac_optarg=`expr "X$ac_option" : '[^=]*=\(.*\)'` ;; + *=) ac_optarg= ;; + *) ac_optarg=yes ;; + esac + + # Accept the important Cygnus configure options, so we can diagnose typos. + + case $ac_dashdash$ac_option in + --) + ac_dashdash=yes ;; + + -bindir | --bindir | --bindi | --bind | --bin | --bi) + ac_prev=bindir ;; + -bindir=* | --bindir=* | --bindi=* | --bind=* | --bin=* | --bi=*) + bindir=$ac_optarg ;; + + -build | --build | --buil | --bui | --bu) + ac_prev=build_alias ;; + -build=* | --build=* | --buil=* | --bui=* | --bu=*) + build_alias=$ac_optarg ;; + + -cache-file | --cache-file | --cache-fil | --cache-fi \ + | --cache-f | --cache- | --cache | --cach | --cac | --ca | --c) + ac_prev=cache_file ;; + -cache-file=* | --cache-file=* | --cache-fil=* | --cache-fi=* \ + | --cache-f=* | --cache-=* | --cache=* | --cach=* | --cac=* | --ca=* | --c=*) + cache_file=$ac_optarg ;; + + --config-cache | -C) + cache_file=config.cache ;; + + -datadir | --datadir | --datadi | --datad) + ac_prev=datadir ;; + -datadir=* | --datadir=* | --datadi=* | --datad=*) + datadir=$ac_optarg ;; + + -datarootdir | --datarootdir | --datarootdi | --datarootd | --dataroot \ + | --dataroo | --dataro | --datar) + ac_prev=datarootdir ;; + -datarootdir=* | --datarootdir=* | --datarootdi=* | --datarootd=* \ + | --dataroot=* | --dataroo=* | --dataro=* | --datar=*) + datarootdir=$ac_optarg ;; + + -disable-* | --disable-*) + ac_useropt=`expr "x$ac_option" : 'x-*disable-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--disable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=no ;; + + -docdir | --docdir | --docdi | --doc | --do) + ac_prev=docdir ;; + -docdir=* | --docdir=* | --docdi=* | --doc=* | --do=*) + docdir=$ac_optarg ;; + + -dvidir | --dvidir | --dvidi | --dvid | --dvi | --dv) + ac_prev=dvidir ;; + -dvidir=* | --dvidir=* | --dvidi=* | --dvid=* | --dvi=* | --dv=*) + dvidir=$ac_optarg ;; + + -enable-* | --enable-*) + ac_useropt=`expr "x$ac_option" : 'x-*enable-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid feature name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"enable_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--enable-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval enable_$ac_useropt=\$ac_optarg ;; + + -exec-prefix | --exec_prefix | --exec-prefix | --exec-prefi \ + | --exec-pref | --exec-pre | --exec-pr | --exec-p | --exec- \ + | --exec | --exe | --ex) + ac_prev=exec_prefix ;; + -exec-prefix=* | --exec_prefix=* | --exec-prefix=* | --exec-prefi=* \ + | --exec-pref=* | --exec-pre=* | --exec-pr=* | --exec-p=* | --exec-=* \ + | --exec=* | --exe=* | --ex=*) + exec_prefix=$ac_optarg ;; + + -gas | --gas | --ga | --g) + # Obsolete; use --with-gas. + with_gas=yes ;; + + -help | --help | --hel | --he | -h) + ac_init_help=long ;; + -help=r* | --help=r* | --hel=r* | --he=r* | -hr*) + ac_init_help=recursive ;; + -help=s* | --help=s* | --hel=s* | --he=s* | -hs*) + ac_init_help=short ;; + + -host | --host | --hos | --ho) + ac_prev=host_alias ;; + -host=* | --host=* | --hos=* | --ho=*) + host_alias=$ac_optarg ;; + + -htmldir | --htmldir | --htmldi | --htmld | --html | --htm | --ht) + ac_prev=htmldir ;; + -htmldir=* | --htmldir=* | --htmldi=* | --htmld=* | --html=* | --htm=* \ + | --ht=*) + htmldir=$ac_optarg ;; + + -includedir | --includedir | --includedi | --included | --include \ + | --includ | --inclu | --incl | --inc) + ac_prev=includedir ;; + -includedir=* | --includedir=* | --includedi=* | --included=* | --include=* \ + | --includ=* | --inclu=* | --incl=* | --inc=*) + includedir=$ac_optarg ;; + + -infodir | --infodir | --infodi | --infod | --info | --inf) + ac_prev=infodir ;; + -infodir=* | --infodir=* | --infodi=* | --infod=* | --info=* | --inf=*) + infodir=$ac_optarg ;; + + -libdir | --libdir | --libdi | --libd) + ac_prev=libdir ;; + -libdir=* | --libdir=* | --libdi=* | --libd=*) + libdir=$ac_optarg ;; + + -libexecdir | --libexecdir | --libexecdi | --libexecd | --libexec \ + | --libexe | --libex | --libe) + ac_prev=libexecdir ;; + -libexecdir=* | --libexecdir=* | --libexecdi=* | --libexecd=* | --libexec=* \ + | --libexe=* | --libex=* | --libe=*) + libexecdir=$ac_optarg ;; + + -localedir | --localedir | --localedi | --localed | --locale) + ac_prev=localedir ;; + -localedir=* | --localedir=* | --localedi=* | --localed=* | --locale=*) + localedir=$ac_optarg ;; + + -localstatedir | --localstatedir | --localstatedi | --localstated \ + | --localstate | --localstat | --localsta | --localst | --locals) + ac_prev=localstatedir ;; + -localstatedir=* | --localstatedir=* | --localstatedi=* | --localstated=* \ + | --localstate=* | --localstat=* | --localsta=* | --localst=* | --locals=*) + localstatedir=$ac_optarg ;; + + -mandir | --mandir | --mandi | --mand | --man | --ma | --m) + ac_prev=mandir ;; + -mandir=* | --mandir=* | --mandi=* | --mand=* | --man=* | --ma=* | --m=*) + mandir=$ac_optarg ;; + + -nfp | --nfp | --nf) + # Obsolete; use --without-fp. + with_fp=no ;; + + -no-create | --no-create | --no-creat | --no-crea | --no-cre \ + | --no-cr | --no-c | -n) + no_create=yes ;; + + -no-recursion | --no-recursion | --no-recursio | --no-recursi \ + | --no-recurs | --no-recur | --no-recu | --no-rec | --no-re | --no-r) + no_recursion=yes ;; + + -oldincludedir | --oldincludedir | --oldincludedi | --oldincluded \ + | --oldinclude | --oldinclud | --oldinclu | --oldincl | --oldinc \ + | --oldin | --oldi | --old | --ol | --o) + ac_prev=oldincludedir ;; + -oldincludedir=* | --oldincludedir=* | --oldincludedi=* | --oldincluded=* \ + | --oldinclude=* | --oldinclud=* | --oldinclu=* | --oldincl=* | --oldinc=* \ + | --oldin=* | --oldi=* | --old=* | --ol=* | --o=*) + oldincludedir=$ac_optarg ;; + + -prefix | --prefix | --prefi | --pref | --pre | --pr | --p) + ac_prev=prefix ;; + -prefix=* | --prefix=* | --prefi=* | --pref=* | --pre=* | --pr=* | --p=*) + prefix=$ac_optarg ;; + + -program-prefix | --program-prefix | --program-prefi | --program-pref \ + | --program-pre | --program-pr | --program-p) + ac_prev=program_prefix ;; + -program-prefix=* | --program-prefix=* | --program-prefi=* \ + | --program-pref=* | --program-pre=* | --program-pr=* | --program-p=*) + program_prefix=$ac_optarg ;; + + -program-suffix | --program-suffix | --program-suffi | --program-suff \ + | --program-suf | --program-su | --program-s) + ac_prev=program_suffix ;; + -program-suffix=* | --program-suffix=* | --program-suffi=* \ + | --program-suff=* | --program-suf=* | --program-su=* | --program-s=*) + program_suffix=$ac_optarg ;; + + -program-transform-name | --program-transform-name \ + | --program-transform-nam | --program-transform-na \ + | --program-transform-n | --program-transform- \ + | --program-transform | --program-transfor \ + | --program-transfo | --program-transf \ + | --program-trans | --program-tran \ + | --progr-tra | --program-tr | --program-t) + ac_prev=program_transform_name ;; + -program-transform-name=* | --program-transform-name=* \ + | --program-transform-nam=* | --program-transform-na=* \ + | --program-transform-n=* | --program-transform-=* \ + | --program-transform=* | --program-transfor=* \ + | --program-transfo=* | --program-transf=* \ + | --program-trans=* | --program-tran=* \ + | --progr-tra=* | --program-tr=* | --program-t=*) + program_transform_name=$ac_optarg ;; + + -pdfdir | --pdfdir | --pdfdi | --pdfd | --pdf | --pd) + ac_prev=pdfdir ;; + -pdfdir=* | --pdfdir=* | --pdfdi=* | --pdfd=* | --pdf=* | --pd=*) + pdfdir=$ac_optarg ;; + + -psdir | --psdir | --psdi | --psd | --ps) + ac_prev=psdir ;; + -psdir=* | --psdir=* | --psdi=* | --psd=* | --ps=*) + psdir=$ac_optarg ;; + + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + silent=yes ;; + + -sbindir | --sbindir | --sbindi | --sbind | --sbin | --sbi | --sb) + ac_prev=sbindir ;; + -sbindir=* | --sbindir=* | --sbindi=* | --sbind=* | --sbin=* \ + | --sbi=* | --sb=*) + sbindir=$ac_optarg ;; + + -sharedstatedir | --sharedstatedir | --sharedstatedi \ + | --sharedstated | --sharedstate | --sharedstat | --sharedsta \ + | --sharedst | --shareds | --shared | --share | --shar \ + | --sha | --sh) + ac_prev=sharedstatedir ;; + -sharedstatedir=* | --sharedstatedir=* | --sharedstatedi=* \ + | --sharedstated=* | --sharedstate=* | --sharedstat=* | --sharedsta=* \ + | --sharedst=* | --shareds=* | --shared=* | --share=* | --shar=* \ + | --sha=* | --sh=*) + sharedstatedir=$ac_optarg ;; + + -site | --site | --sit) + ac_prev=site ;; + -site=* | --site=* | --sit=*) + site=$ac_optarg ;; + + -srcdir | --srcdir | --srcdi | --srcd | --src | --sr) + ac_prev=srcdir ;; + -srcdir=* | --srcdir=* | --srcdi=* | --srcd=* | --src=* | --sr=*) + srcdir=$ac_optarg ;; + + -sysconfdir | --sysconfdir | --sysconfdi | --sysconfd | --sysconf \ + | --syscon | --sysco | --sysc | --sys | --sy) + ac_prev=sysconfdir ;; + -sysconfdir=* | --sysconfdir=* | --sysconfdi=* | --sysconfd=* | --sysconf=* \ + | --syscon=* | --sysco=* | --sysc=* | --sys=* | --sy=*) + sysconfdir=$ac_optarg ;; + + -target | --target | --targe | --targ | --tar | --ta | --t) + ac_prev=target_alias ;; + -target=* | --target=* | --targe=* | --targ=* | --tar=* | --ta=* | --t=*) + target_alias=$ac_optarg ;; + + -v | -verbose | --verbose | --verbos | --verbo | --verb) + verbose=yes ;; + + -version | --version | --versio | --versi | --vers | -V) + ac_init_version=: ;; + + -with-* | --with-*) + ac_useropt=`expr "x$ac_option" : 'x-*with-\([^=]*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--with-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=\$ac_optarg ;; + + -without-* | --without-*) + ac_useropt=`expr "x$ac_option" : 'x-*without-\(.*\)'` + # Reject names that are not valid shell variable names. + expr "x$ac_useropt" : ".*[^-+._$as_cr_alnum]" >/dev/null && + as_fn_error $? "invalid package name: $ac_useropt" + ac_useropt_orig=$ac_useropt + ac_useropt=`$as_echo "$ac_useropt" | sed 's/[-+.]/_/g'` + case $ac_user_opts in + *" +"with_$ac_useropt" +"*) ;; + *) ac_unrecognized_opts="$ac_unrecognized_opts$ac_unrecognized_sep--without-$ac_useropt_orig" + ac_unrecognized_sep=', ';; + esac + eval with_$ac_useropt=no ;; + + --x) + # Obsolete; use --with-x. + with_x=yes ;; + + -x-includes | --x-includes | --x-include | --x-includ | --x-inclu \ + | --x-incl | --x-inc | --x-in | --x-i) + ac_prev=x_includes ;; + -x-includes=* | --x-includes=* | --x-include=* | --x-includ=* | --x-inclu=* \ + | --x-incl=* | --x-inc=* | --x-in=* | --x-i=*) + x_includes=$ac_optarg ;; + + -x-libraries | --x-libraries | --x-librarie | --x-librari \ + | --x-librar | --x-libra | --x-libr | --x-lib | --x-li | --x-l) + ac_prev=x_libraries ;; + -x-libraries=* | --x-libraries=* | --x-librarie=* | --x-librari=* \ + | --x-librar=* | --x-libra=* | --x-libr=* | --x-lib=* | --x-li=* | --x-l=*) + x_libraries=$ac_optarg ;; + + -*) as_fn_error $? "unrecognized option: \`$ac_option' +Try \`$0 --help' for more information" + ;; + + *=*) + ac_envvar=`expr "x$ac_option" : 'x\([^=]*\)='` + # Reject names that are not valid shell variable names. + case $ac_envvar in #( + '' | [0-9]* | *[!_$as_cr_alnum]* ) + as_fn_error $? "invalid variable name: \`$ac_envvar'" ;; + esac + eval $ac_envvar=\$ac_optarg + export $ac_envvar ;; + + *) + # FIXME: should be removed in autoconf 3.0. + $as_echo "$as_me: WARNING: you should use --build, --host, --target" >&2 + expr "x$ac_option" : ".*[^-._$as_cr_alnum]" >/dev/null && + $as_echo "$as_me: WARNING: invalid host type: $ac_option" >&2 + : "${build_alias=$ac_option} ${host_alias=$ac_option} ${target_alias=$ac_option}" + ;; + + esac +done + +if test -n "$ac_prev"; then + ac_option=--`echo $ac_prev | sed 's/_/-/g'` + as_fn_error $? "missing argument to $ac_option" +fi + +if test -n "$ac_unrecognized_opts"; then + case $enable_option_checking in + no) ;; + fatal) as_fn_error $? "unrecognized options: $ac_unrecognized_opts" ;; + *) $as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2 ;; + esac +fi + +# Check all directory arguments for consistency. +for ac_var in exec_prefix prefix bindir sbindir libexecdir datarootdir \ + datadir sysconfdir sharedstatedir localstatedir includedir \ + oldincludedir docdir infodir htmldir dvidir pdfdir psdir \ + libdir localedir mandir +do + eval ac_val=\$$ac_var + # Remove trailing slashes. + case $ac_val in + */ ) + ac_val=`expr "X$ac_val" : 'X\(.*[^/]\)' \| "X$ac_val" : 'X\(.*\)'` + eval $ac_var=\$ac_val;; + esac + # Be sure to have absolute directory names. + case $ac_val in + [\\/$]* | ?:[\\/]* ) continue;; + NONE | '' ) case $ac_var in *prefix ) continue;; esac;; + esac + as_fn_error $? "expected an absolute directory name for --$ac_var: $ac_val" +done + +# There might be people who depend on the old broken behavior: `$host' +# used to hold the argument of --host etc. +# FIXME: To remove some day. +build=$build_alias +host=$host_alias +target=$target_alias + +# FIXME: To remove some day. +if test "x$host_alias" != x; then + if test "x$build_alias" = x; then + cross_compiling=maybe + $as_echo "$as_me: WARNING: if you wanted to set the --build type, don't use --host. + If a cross compiler is detected then cross compile mode will be used" >&2 + elif test "x$build_alias" != "x$host_alias"; then + cross_compiling=yes + fi +fi + +ac_tool_prefix= +test -n "$host_alias" && ac_tool_prefix=$host_alias- + +test "$silent" = yes && exec 6>/dev/null + + +ac_pwd=`pwd` && test -n "$ac_pwd" && +ac_ls_di=`ls -di .` && +ac_pwd_ls_di=`cd "$ac_pwd" && ls -di .` || + as_fn_error $? "working directory cannot be determined" +test "X$ac_ls_di" = "X$ac_pwd_ls_di" || + as_fn_error $? "pwd does not report name of working directory" + + +# Find the source files, if location was not specified. +if test -z "$srcdir"; then + ac_srcdir_defaulted=yes + # Try the directory containing this script, then the parent directory. + ac_confdir=`$as_dirname -- "$as_myself" || +$as_expr X"$as_myself" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_myself" : 'X\(//\)[^/]' \| \ + X"$as_myself" : 'X\(//\)$' \| \ + X"$as_myself" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_myself" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + srcdir=$ac_confdir + if test ! -r "$srcdir/$ac_unique_file"; then + srcdir=.. + fi +else + ac_srcdir_defaulted=no +fi +if test ! -r "$srcdir/$ac_unique_file"; then + test "$ac_srcdir_defaulted" = yes && srcdir="$ac_confdir or .." + as_fn_error $? "cannot find sources ($ac_unique_file) in $srcdir" +fi +ac_msg="sources are in $srcdir, but \`cd $srcdir' does not work" +ac_abs_confdir=`( + cd "$srcdir" && test -r "./$ac_unique_file" || as_fn_error $? "$ac_msg" + pwd)` +# When building in place, set srcdir=. +if test "$ac_abs_confdir" = "$ac_pwd"; then + srcdir=. +fi +# Remove unnecessary trailing slashes from srcdir. +# Double slashes in file names in object file debugging info +# mess up M-x gdb in Emacs. +case $srcdir in +*/) srcdir=`expr "X$srcdir" : 'X\(.*[^/]\)' \| "X$srcdir" : 'X\(.*\)'`;; +esac +for ac_var in $ac_precious_vars; do + eval ac_env_${ac_var}_set=\${${ac_var}+set} + eval ac_env_${ac_var}_value=\$${ac_var} + eval ac_cv_env_${ac_var}_set=\${${ac_var}+set} + eval ac_cv_env_${ac_var}_value=\$${ac_var} +done + +# +# Report the --help message. +# +if test "$ac_init_help" = "long"; then + # Omit some internal or obsolete options to make the list less imposing. + # This message is too long to be a string in the A/UX 3.1 sh. + cat <<_ACEOF +\`configure' configures Google C++ Testing Framework 1.7.0 to adapt to many kinds of systems. + +Usage: $0 [OPTION]... [VAR=VALUE]... + +To assign environment variables (e.g., CC, CFLAGS...), specify them as +VAR=VALUE. See below for descriptions of some of the useful variables. + +Defaults for the options are specified in brackets. + +Configuration: + -h, --help display this help and exit + --help=short display options specific to this package + --help=recursive display the short help of all the included packages + -V, --version display version information and exit + -q, --quiet, --silent do not print \`checking ...' messages + --cache-file=FILE cache test results in FILE [disabled] + -C, --config-cache alias for \`--cache-file=config.cache' + -n, --no-create do not create output files + --srcdir=DIR find the sources in DIR [configure dir or \`..'] + +Installation directories: + --prefix=PREFIX install architecture-independent files in PREFIX + [$ac_default_prefix] + --exec-prefix=EPREFIX install architecture-dependent files in EPREFIX + [PREFIX] + +By default, \`make install' will install all the files in +\`$ac_default_prefix/bin', \`$ac_default_prefix/lib' etc. You can specify +an installation prefix other than \`$ac_default_prefix' using \`--prefix', +for instance \`--prefix=\$HOME'. + +For better control, use the options below. + +Fine tuning of the installation directories: + --bindir=DIR user executables [EPREFIX/bin] + --sbindir=DIR system admin executables [EPREFIX/sbin] + --libexecdir=DIR program executables [EPREFIX/libexec] + --sysconfdir=DIR read-only single-machine data [PREFIX/etc] + --sharedstatedir=DIR modifiable architecture-independent data [PREFIX/com] + --localstatedir=DIR modifiable single-machine data [PREFIX/var] + --libdir=DIR object code libraries [EPREFIX/lib] + --includedir=DIR C header files [PREFIX/include] + --oldincludedir=DIR C header files for non-gcc [/usr/include] + --datarootdir=DIR read-only arch.-independent data root [PREFIX/share] + --datadir=DIR read-only architecture-independent data [DATAROOTDIR] + --infodir=DIR info documentation [DATAROOTDIR/info] + --localedir=DIR locale-dependent data [DATAROOTDIR/locale] + --mandir=DIR man documentation [DATAROOTDIR/man] + --docdir=DIR documentation root [DATAROOTDIR/doc/gtest] + --htmldir=DIR html documentation [DOCDIR] + --dvidir=DIR dvi documentation [DOCDIR] + --pdfdir=DIR pdf documentation [DOCDIR] + --psdir=DIR ps documentation [DOCDIR] +_ACEOF + + cat <<\_ACEOF + +Program names: + --program-prefix=PREFIX prepend PREFIX to installed program names + --program-suffix=SUFFIX append SUFFIX to installed program names + --program-transform-name=PROGRAM run sed PROGRAM on installed program names + +System types: + --build=BUILD configure for building on BUILD [guessed] + --host=HOST cross-compile to build programs to run on HOST [BUILD] +_ACEOF +fi + +if test -n "$ac_init_help"; then + case $ac_init_help in + short | recursive ) echo "Configuration of Google C++ Testing Framework 1.7.0:";; + esac + cat <<\_ACEOF + +Optional Features: + --disable-option-checking ignore unrecognized --enable/--with options + --disable-FEATURE do not include FEATURE (same as --enable-FEATURE=no) + --enable-FEATURE[=ARG] include FEATURE [ARG=yes] + --disable-dependency-tracking speeds up one-time build + --enable-dependency-tracking do not reject slow dependency extractors + --enable-shared[=PKGS] build shared libraries [default=yes] + --enable-static[=PKGS] build static libraries [default=yes] + --enable-fast-install[=PKGS] + optimize for fast installation [default=yes] + --disable-libtool-lock avoid locking (might break parallel builds) + +Optional Packages: + --with-PACKAGE[=ARG] use PACKAGE [ARG=yes] + --without-PACKAGE do not use PACKAGE (same as --with-PACKAGE=no) + --with-pic[=PKGS] try to use only PIC/non-PIC objects [default=use + both] + --with-gnu-ld assume the C compiler uses GNU ld [default=no] + --with-sysroot=DIR Search for dependent libraries within DIR + (or the compiler's sysroot if not specified). + --with-pthreads use pthreads (default is yes) + +Some influential environment variables: + CC C compiler command + CFLAGS C compiler flags + LDFLAGS linker flags, e.g. -L if you have libraries in a + nonstandard directory + LIBS libraries to pass to the linker, e.g. -l + CPPFLAGS (Objective) C/C++ preprocessor flags, e.g. -I if + you have headers in a nonstandard directory + CXX C++ compiler command + CXXFLAGS C++ compiler flags + CPP C preprocessor + CXXCPP C++ preprocessor + +Use these variables to override the choices made by `configure' or to help +it to find libraries and programs with nonstandard names/locations. + +Report bugs to . +_ACEOF +ac_status=$? +fi + +if test "$ac_init_help" = "recursive"; then + # If there are subdirs, report their specific --help. + for ac_dir in : $ac_subdirs_all; do test "x$ac_dir" = x: && continue + test -d "$ac_dir" || + { cd "$srcdir" && ac_pwd=`pwd` && srcdir=. && test -d "$ac_dir"; } || + continue + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + cd "$ac_dir" || { ac_status=$?; continue; } + # Check for guested configure. + if test -f "$ac_srcdir/configure.gnu"; then + echo && + $SHELL "$ac_srcdir/configure.gnu" --help=recursive + elif test -f "$ac_srcdir/configure"; then + echo && + $SHELL "$ac_srcdir/configure" --help=recursive + else + $as_echo "$as_me: WARNING: no configuration information is in $ac_dir" >&2 + fi || ac_status=$? + cd "$ac_pwd" || { ac_status=$?; break; } + done +fi + +test -n "$ac_init_help" && exit $ac_status +if $ac_init_version; then + cat <<\_ACEOF +Google C++ Testing Framework configure 1.7.0 +generated by GNU Autoconf 2.68 + +Copyright (C) 2010 Free Software Foundation, Inc. +This configure script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it. +_ACEOF + exit +fi + +## ------------------------ ## +## Autoconf initialization. ## +## ------------------------ ## + +# ac_fn_c_try_compile LINENO +# -------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_compile + +# ac_fn_cxx_try_compile LINENO +# ---------------------------- +# Try to compile conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext + if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest.$ac_objext; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_compile + +# ac_fn_c_try_link LINENO +# ----------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_c_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_link + +# ac_fn_c_check_header_compile LINENO HEADER VAR INCLUDES +# ------------------------------------------------------- +# Tests whether HEADER exists and can be compiled using the include files in +# INCLUDES, setting the cache variable VAR accordingly. +ac_fn_c_check_header_compile () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +$4 +#include <$2> +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_header_compile + +# ac_fn_c_try_cpp LINENO +# ---------------------- +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_c_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_c_preproc_warn_flag$ac_c_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_cpp + +# ac_fn_c_try_run LINENO +# ---------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. Assumes +# that executables *can* be run. +ac_fn_c_try_run () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { ac_try='./conftest$ac_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then : + ac_retval=0 +else + $as_echo "$as_me: program exited with status $ac_status" >&5 + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=$ac_status +fi + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_c_try_run + +# ac_fn_c_check_func LINENO FUNC VAR +# ---------------------------------- +# Tests whether FUNC exists, setting the cache variable VAR accordingly +ac_fn_c_check_func () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for $2" >&5 +$as_echo_n "checking for $2... " >&6; } +if eval \${$3+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +/* Define $2 to an innocuous variant, in case declares $2. + For example, HP-UX 11i declares gettimeofday. */ +#define $2 innocuous_$2 + +/* System header to define __stub macros and hopefully few prototypes, + which can conflict with char $2 (); below. + Prefer to if __STDC__ is defined, since + exists even on freestanding compilers. */ + +#ifdef __STDC__ +# include +#else +# include +#endif + +#undef $2 + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char $2 (); +/* The GNU C library defines this for functions which it implements + to always fail with ENOSYS. Some functions are actually named + something starting with __ and the normal name is an alias. */ +#if defined __stub_$2 || defined __stub___$2 +choke me +#endif + +int +main () +{ +return $2 (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + eval "$3=yes" +else + eval "$3=no" +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +fi +eval ac_res=\$$3 + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_res" >&5 +$as_echo "$ac_res" >&6; } + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + +} # ac_fn_c_check_func + +# ac_fn_cxx_try_cpp LINENO +# ------------------------ +# Try to preprocess conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_cpp () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + if { { ac_try="$ac_cpp conftest.$ac_ext" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_cpp conftest.$ac_ext") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } > conftest.i && { + test -z "$ac_cxx_preproc_warn_flag$ac_cxx_werror_flag" || + test ! -s conftest.err + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_cpp + +# ac_fn_cxx_try_link LINENO +# ------------------------- +# Try to link conftest.$ac_ext, and return whether this succeeded. +ac_fn_cxx_try_link () +{ + as_lineno=${as_lineno-"$1"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + rm -f conftest.$ac_objext conftest$ac_exeext + if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + grep -v '^ *+' conftest.err >conftest.er1 + cat conftest.er1 >&5 + mv -f conftest.er1 conftest.err + fi + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && { + test -z "$ac_cxx_werror_flag" || + test ! -s conftest.err + } && test -s conftest$ac_exeext && { + test "$cross_compiling" = yes || + $as_test_x conftest$ac_exeext + }; then : + ac_retval=0 +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + + ac_retval=1 +fi + # Delete the IPA/IPO (Inter Procedural Analysis/Optimization) information + # created by the PGI compiler (conftest_ipa8_conftest.oo), as it would + # interfere with the next link command; also delete a directory that is + # left behind by Apple's compiler. We do this before executing the actions. + rm -rf conftest.dSYM conftest_ipa8_conftest.oo + eval $as_lineno_stack; ${as_lineno_stack:+:} unset as_lineno + as_fn_set_status $ac_retval + +} # ac_fn_cxx_try_link +cat >config.log <<_ACEOF +This file contains any messages produced by compilers while +running configure, to aid debugging if configure makes a mistake. + +It was created by Google C++ Testing Framework $as_me 1.7.0, which was +generated by GNU Autoconf 2.68. Invocation command line was + + $ $0 $@ + +_ACEOF +exec 5>>config.log +{ +cat <<_ASUNAME +## --------- ## +## Platform. ## +## --------- ## + +hostname = `(hostname || uname -n) 2>/dev/null | sed 1q` +uname -m = `(uname -m) 2>/dev/null || echo unknown` +uname -r = `(uname -r) 2>/dev/null || echo unknown` +uname -s = `(uname -s) 2>/dev/null || echo unknown` +uname -v = `(uname -v) 2>/dev/null || echo unknown` + +/usr/bin/uname -p = `(/usr/bin/uname -p) 2>/dev/null || echo unknown` +/bin/uname -X = `(/bin/uname -X) 2>/dev/null || echo unknown` + +/bin/arch = `(/bin/arch) 2>/dev/null || echo unknown` +/usr/bin/arch -k = `(/usr/bin/arch -k) 2>/dev/null || echo unknown` +/usr/convex/getsysinfo = `(/usr/convex/getsysinfo) 2>/dev/null || echo unknown` +/usr/bin/hostinfo = `(/usr/bin/hostinfo) 2>/dev/null || echo unknown` +/bin/machine = `(/bin/machine) 2>/dev/null || echo unknown` +/usr/bin/oslevel = `(/usr/bin/oslevel) 2>/dev/null || echo unknown` +/bin/universe = `(/bin/universe) 2>/dev/null || echo unknown` + +_ASUNAME + +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + $as_echo "PATH: $as_dir" + done +IFS=$as_save_IFS + +} >&5 + +cat >&5 <<_ACEOF + + +## ----------- ## +## Core tests. ## +## ----------- ## + +_ACEOF + + +# Keep a trace of the command line. +# Strip out --no-create and --no-recursion so they do not pile up. +# Strip out --silent because we don't want to record it for future runs. +# Also quote any args containing shell meta-characters. +# Make two passes to allow for proper duplicate-argument suppression. +ac_configure_args= +ac_configure_args0= +ac_configure_args1= +ac_must_keep_next=false +for ac_pass in 1 2 +do + for ac_arg + do + case $ac_arg in + -no-create | --no-c* | -n | -no-recursion | --no-r*) continue ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil) + continue ;; + *\'*) + ac_arg=`$as_echo "$ac_arg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + case $ac_pass in + 1) as_fn_append ac_configure_args0 " '$ac_arg'" ;; + 2) + as_fn_append ac_configure_args1 " '$ac_arg'" + if test $ac_must_keep_next = true; then + ac_must_keep_next=false # Got value, back to normal. + else + case $ac_arg in + *=* | --config-cache | -C | -disable-* | --disable-* \ + | -enable-* | --enable-* | -gas | --g* | -nfp | --nf* \ + | -q | -quiet | --q* | -silent | --sil* | -v | -verb* \ + | -with-* | --with-* | -without-* | --without-* | --x) + case "$ac_configure_args0 " in + "$ac_configure_args1"*" '$ac_arg' "* ) continue ;; + esac + ;; + -* ) ac_must_keep_next=true ;; + esac + fi + as_fn_append ac_configure_args " '$ac_arg'" + ;; + esac + done +done +{ ac_configure_args0=; unset ac_configure_args0;} +{ ac_configure_args1=; unset ac_configure_args1;} + +# When interrupted or exit'd, cleanup temporary files, and complete +# config.log. We remove comments because anyway the quotes in there +# would cause problems or look ugly. +# WARNING: Use '\'' to represent an apostrophe within the trap. +# WARNING: Do not start the trap code with a newline, due to a FreeBSD 4.0 bug. +trap 'exit_status=$? + # Save into config.log some information that might help in debugging. + { + echo + + $as_echo "## ---------------- ## +## Cache variables. ## +## ---------------- ##" + echo + # The following way of writing the cache mishandles newlines in values, +( + for ac_var in `(set) 2>&1 | sed -n '\''s/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'\''`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + (set) 2>&1 | + case $as_nl`(ac_space='\'' '\''; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + sed -n \ + "s/'\''/'\''\\\\'\'''\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\''\\2'\''/p" + ;; #( + *) + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) + echo + + $as_echo "## ----------------- ## +## Output variables. ## +## ----------------- ##" + echo + for ac_var in $ac_subst_vars + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + + if test -n "$ac_subst_files"; then + $as_echo "## ------------------- ## +## File substitutions. ## +## ------------------- ##" + echo + for ac_var in $ac_subst_files + do + eval ac_val=\$$ac_var + case $ac_val in + *\'\''*) ac_val=`$as_echo "$ac_val" | sed "s/'\''/'\''\\\\\\\\'\'''\''/g"`;; + esac + $as_echo "$ac_var='\''$ac_val'\''" + done | sort + echo + fi + + if test -s confdefs.h; then + $as_echo "## ----------- ## +## confdefs.h. ## +## ----------- ##" + echo + cat confdefs.h + echo + fi + test "$ac_signal" != 0 && + $as_echo "$as_me: caught signal $ac_signal" + $as_echo "$as_me: exit $exit_status" + } >&5 + rm -f core *.core core.conftest.* && + rm -f -r conftest* confdefs* conf$$* $ac_clean_files && + exit $exit_status +' 0 +for ac_signal in 1 2 13 15; do + trap 'ac_signal='$ac_signal'; as_fn_exit 1' $ac_signal +done +ac_signal=0 + +# confdefs.h avoids OS command line length limits that DEFS can exceed. +rm -f -r conftest* confdefs.h + +$as_echo "/* confdefs.h */" > confdefs.h + +# Predefined preprocessor variables. + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_NAME "$PACKAGE_NAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_TARNAME "$PACKAGE_TARNAME" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_VERSION "$PACKAGE_VERSION" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_STRING "$PACKAGE_STRING" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_BUGREPORT "$PACKAGE_BUGREPORT" +_ACEOF + +cat >>confdefs.h <<_ACEOF +#define PACKAGE_URL "$PACKAGE_URL" +_ACEOF + + +# Let the site file select an alternate cache file if it wants to. +# Prefer an explicitly selected file to automatically selected ones. +ac_site_file1=NONE +ac_site_file2=NONE +if test -n "$CONFIG_SITE"; then + # We do not want a PATH search for config.site. + case $CONFIG_SITE in #(( + -*) ac_site_file1=./$CONFIG_SITE;; + */*) ac_site_file1=$CONFIG_SITE;; + *) ac_site_file1=./$CONFIG_SITE;; + esac +elif test "x$prefix" != xNONE; then + ac_site_file1=$prefix/share/config.site + ac_site_file2=$prefix/etc/config.site +else + ac_site_file1=$ac_default_prefix/share/config.site + ac_site_file2=$ac_default_prefix/etc/config.site +fi +for ac_site_file in "$ac_site_file1" "$ac_site_file2" +do + test "x$ac_site_file" = xNONE && continue + if test /dev/null != "$ac_site_file" && test -r "$ac_site_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading site script $ac_site_file" >&5 +$as_echo "$as_me: loading site script $ac_site_file" >&6;} + sed 's/^/| /' "$ac_site_file" >&5 + . "$ac_site_file" \ + || { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "failed to load site script $ac_site_file +See \`config.log' for more details" "$LINENO" 5; } + fi +done + +if test -r "$cache_file"; then + # Some versions of bash will fail to source /dev/null (special files + # actually), so we avoid doing that. DJGPP emulates it as a regular file. + if test /dev/null != "$cache_file" && test -f "$cache_file"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: loading cache $cache_file" >&5 +$as_echo "$as_me: loading cache $cache_file" >&6;} + case $cache_file in + [\\/]* | ?:[\\/]* ) . "$cache_file";; + *) . "./$cache_file";; + esac + fi +else + { $as_echo "$as_me:${as_lineno-$LINENO}: creating cache $cache_file" >&5 +$as_echo "$as_me: creating cache $cache_file" >&6;} + >$cache_file +fi + +# Check that the precious variables saved in the cache have kept the same +# value. +ac_cache_corrupted=false +for ac_var in $ac_precious_vars; do + eval ac_old_set=\$ac_cv_env_${ac_var}_set + eval ac_new_set=\$ac_env_${ac_var}_set + eval ac_old_val=\$ac_cv_env_${ac_var}_value + eval ac_new_val=\$ac_env_${ac_var}_value + case $ac_old_set,$ac_new_set in + set,) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was set to \`$ac_old_val' in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,set) + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' was not set in the previous run" >&5 +$as_echo "$as_me: error: \`$ac_var' was not set in the previous run" >&2;} + ac_cache_corrupted=: ;; + ,);; + *) + if test "x$ac_old_val" != "x$ac_new_val"; then + # differences in whitespace do not lead to failure. + ac_old_val_w=`echo x $ac_old_val` + ac_new_val_w=`echo x $ac_new_val` + if test "$ac_old_val_w" != "$ac_new_val_w"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: \`$ac_var' has changed since the previous run:" >&5 +$as_echo "$as_me: error: \`$ac_var' has changed since the previous run:" >&2;} + ac_cache_corrupted=: + else + { $as_echo "$as_me:${as_lineno-$LINENO}: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&5 +$as_echo "$as_me: warning: ignoring whitespace changes in \`$ac_var' since the previous run:" >&2;} + eval $ac_var=\$ac_old_val + fi + { $as_echo "$as_me:${as_lineno-$LINENO}: former value: \`$ac_old_val'" >&5 +$as_echo "$as_me: former value: \`$ac_old_val'" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: current value: \`$ac_new_val'" >&5 +$as_echo "$as_me: current value: \`$ac_new_val'" >&2;} + fi;; + esac + # Pass precious variables to config.status. + if test "$ac_new_set" = set; then + case $ac_new_val in + *\'*) ac_arg=$ac_var=`$as_echo "$ac_new_val" | sed "s/'/'\\\\\\\\''/g"` ;; + *) ac_arg=$ac_var=$ac_new_val ;; + esac + case " $ac_configure_args " in + *" '$ac_arg' "*) ;; # Avoid dups. Use of quotes ensures accuracy. + *) as_fn_append ac_configure_args " '$ac_arg'" ;; + esac + fi +done +if $ac_cache_corrupted; then + { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} + { $as_echo "$as_me:${as_lineno-$LINENO}: error: changes in the environment can compromise the build" >&5 +$as_echo "$as_me: error: changes in the environment can compromise the build" >&2;} + as_fn_error $? "run \`make distclean' and/or \`rm $cache_file' and start over" "$LINENO" 5 +fi +## -------------------- ## +## Main body of script. ## +## -------------------- ## + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + + +# Provide various options to initialize the Autoconf and configure processes. + + + +ac_aux_dir= +for ac_dir in build-aux "$srcdir"/build-aux; do + if test -f "$ac_dir/install-sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install-sh -c" + break + elif test -f "$ac_dir/install.sh"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/install.sh -c" + break + elif test -f "$ac_dir/shtool"; then + ac_aux_dir=$ac_dir + ac_install_sh="$ac_aux_dir/shtool install -c" + break + fi +done +if test -z "$ac_aux_dir"; then + as_fn_error $? "cannot find install-sh, install.sh, or shtool in build-aux \"$srcdir\"/build-aux" "$LINENO" 5 +fi + +# These three variables are undocumented and unsupported, +# and are intended to be withdrawn in a future Autoconf release. +# They can cause serious problems if a builder's source tree is in a directory +# whose full name contains unusual characters. +ac_config_guess="$SHELL $ac_aux_dir/config.guess" # Please don't use this var. +ac_config_sub="$SHELL $ac_aux_dir/config.sub" # Please don't use this var. +ac_configure="$SHELL $ac_aux_dir/configure" # Please don't use this var. + + +ac_config_headers="$ac_config_headers build-aux/config.h" + +ac_config_files="$ac_config_files Makefile" + +ac_config_files="$ac_config_files scripts/gtest-config" + + +# Initialize Automake with various options. We require at least v1.9, prevent +# pedantic complaints about package files, and enable various distribution +# targets. +am__api_version='1.11' + +# Find a good install program. We prefer a C program (faster), +# so one script is as good as another. But avoid the broken or +# incompatible versions: +# SysV /etc/install, /usr/sbin/install +# SunOS /usr/etc/install +# IRIX /sbin/install +# AIX /bin/install +# AmigaOS /C/install, which installs bootblocks on floppy discs +# AIX 4 /usr/bin/installbsd, which doesn't work without a -g flag +# AFS /usr/afsws/bin/install, which mishandles nonexistent args +# SVR4 /usr/ucb/install, which tries to use the nonexistent group "staff" +# OS/2's system install, which has a completely different semantic +# ./install, which can be erroneously created by make from ./install.sh. +# Reject install programs that cannot install multiple files. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a BSD-compatible install" >&5 +$as_echo_n "checking for a BSD-compatible install... " >&6; } +if test -z "$INSTALL"; then +if ${ac_cv_path_install+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + # Account for people who put trailing slashes in PATH elements. +case $as_dir/ in #(( + ./ | .// | /[cC]/* | \ + /etc/* | /usr/sbin/* | /usr/etc/* | /sbin/* | /usr/afsws/bin/* | \ + ?:[\\/]os2[\\/]install[\\/]* | ?:[\\/]OS2[\\/]INSTALL[\\/]* | \ + /usr/ucb/* ) ;; + *) + # OSF1 and SCO ODT 3.0 have their own names for install. + # Don't use installbsd from OSF since it installs stuff as root + # by default. + for ac_prog in ginstall scoinst install; do + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; }; then + if test $ac_prog = install && + grep dspmsg "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # AIX install. It has an incompatible calling convention. + : + elif test $ac_prog = install && + grep pwplus "$as_dir/$ac_prog$ac_exec_ext" >/dev/null 2>&1; then + # program-specific install script used by HP pwplus--don't use. + : + else + rm -rf conftest.one conftest.two conftest.dir + echo one > conftest.one + echo two > conftest.two + mkdir conftest.dir + if "$as_dir/$ac_prog$ac_exec_ext" -c conftest.one conftest.two "`pwd`/conftest.dir" && + test -s conftest.one && test -s conftest.two && + test -s conftest.dir/conftest.one && + test -s conftest.dir/conftest.two + then + ac_cv_path_install="$as_dir/$ac_prog$ac_exec_ext -c" + break 3 + fi + fi + fi + done + done + ;; +esac + + done +IFS=$as_save_IFS + +rm -rf conftest.one conftest.two conftest.dir + +fi + if test "${ac_cv_path_install+set}" = set; then + INSTALL=$ac_cv_path_install + else + # As a last resort, use the slow shell script. Don't cache a + # value for INSTALL within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + INSTALL=$ac_install_sh + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $INSTALL" >&5 +$as_echo "$INSTALL" >&6; } + +# Use test -z because SunOS4 sh mishandles braces in ${var-val}. +# It thinks the first close brace ends the variable substitution. +test -z "$INSTALL_PROGRAM" && INSTALL_PROGRAM='${INSTALL}' + +test -z "$INSTALL_SCRIPT" && INSTALL_SCRIPT='${INSTALL}' + +test -z "$INSTALL_DATA" && INSTALL_DATA='${INSTALL} -m 644' + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether build environment is sane" >&5 +$as_echo_n "checking whether build environment is sane... " >&6; } +# Just in case +sleep 1 +echo timestamp > conftest.file +# Reject unsafe characters in $srcdir or the absolute working directory +# name. Accept space and tab only in the latter. +am_lf=' +' +case `pwd` in + *[\\\"\#\$\&\'\`$am_lf]*) + as_fn_error $? "unsafe absolute working directory name" "$LINENO" 5;; +esac +case $srcdir in + *[\\\"\#\$\&\'\`$am_lf\ \ ]*) + as_fn_error $? "unsafe srcdir value: \`$srcdir'" "$LINENO" 5;; +esac + +# Do `set' in a subshell so we don't clobber the current shell's +# arguments. Must try -L first in case configure is actually a +# symlink; some systems play weird games with the mod time of symlinks +# (eg FreeBSD returns the mod time of the symlink's containing +# directory). +if ( + set X `ls -Lt "$srcdir/configure" conftest.file 2> /dev/null` + if test "$*" = "X"; then + # -L didn't work. + set X `ls -t "$srcdir/configure" conftest.file` + fi + rm -f conftest.file + if test "$*" != "X $srcdir/configure conftest.file" \ + && test "$*" != "X conftest.file $srcdir/configure"; then + + # If neither matched, then we have a broken ls. This can happen + # if, for instance, CONFIG_SHELL is bash and it inherits a + # broken ls alias from the environment. This has actually + # happened. Such a system could not be considered "sane". + as_fn_error $? "ls -t appears to fail. Make sure there is not a broken +alias in your environment" "$LINENO" 5 + fi + + test "$2" = conftest.file + ) +then + # Ok. + : +else + as_fn_error $? "newly created file is older than distributed files! +Check your system clock" "$LINENO" 5 +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +test "$program_prefix" != NONE && + program_transform_name="s&^&$program_prefix&;$program_transform_name" +# Use a double $ so make ignores it. +test "$program_suffix" != NONE && + program_transform_name="s&\$&$program_suffix&;$program_transform_name" +# Double any \ or $. +# By default was `s,x,x', remove it if useless. +ac_script='s/[\\$]/&&/g;s/;s,x,x,$//' +program_transform_name=`$as_echo "$program_transform_name" | sed "$ac_script"` + +# expand $ac_aux_dir to an absolute path +am_aux_dir=`cd $ac_aux_dir && pwd` + +if test x"${MISSING+set}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + MISSING="\${SHELL} \"$am_aux_dir/missing\"" ;; + *) + MISSING="\${SHELL} $am_aux_dir/missing" ;; + esac +fi +# Use eval to expand $SHELL +if eval "$MISSING --run true"; then + am_missing_run="$MISSING --run " +else + am_missing_run= + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`missing' script is too old or missing" >&5 +$as_echo "$as_me: WARNING: \`missing' script is too old or missing" >&2;} +fi + +if test x"${install_sh}" != xset; then + case $am_aux_dir in + *\ * | *\ *) + install_sh="\${SHELL} '$am_aux_dir/install-sh'" ;; + *) + install_sh="\${SHELL} $am_aux_dir/install-sh" + esac +fi + +# Installed binaries are usually stripped using `strip' when the user +# run `make install-strip'. However `strip' might not be the right +# tool to use in cross-compilation environments, therefore Automake +# will honor the `STRIP' environment variable to overrule this program. +if test "$cross_compiling" != no; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +fi +INSTALL_STRIP_PROGRAM="\$(install_sh) -c -s" + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a thread-safe mkdir -p" >&5 +$as_echo_n "checking for a thread-safe mkdir -p... " >&6; } +if test -z "$MKDIR_P"; then + if ${ac_cv_path_mkdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/opt/sfw/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in mkdir gmkdir; do + for ac_exec_ext in '' $ac_executable_extensions; do + { test -f "$as_dir/$ac_prog$ac_exec_ext" && $as_test_x "$as_dir/$ac_prog$ac_exec_ext"; } || continue + case `"$as_dir/$ac_prog$ac_exec_ext" --version 2>&1` in #( + 'mkdir (GNU coreutils) '* | \ + 'mkdir (coreutils) '* | \ + 'mkdir (fileutils) '4.1*) + ac_cv_path_mkdir=$as_dir/$ac_prog$ac_exec_ext + break 3;; + esac + done + done + done +IFS=$as_save_IFS + +fi + + test -d ./--version && rmdir ./--version + if test "${ac_cv_path_mkdir+set}" = set; then + MKDIR_P="$ac_cv_path_mkdir -p" + else + # As a last resort, use the slow shell script. Don't cache a + # value for MKDIR_P within a source directory, because that will + # break other packages using the cache if that directory is + # removed, or if the value is a relative name. + MKDIR_P="$ac_install_sh -d" + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $MKDIR_P" >&5 +$as_echo "$MKDIR_P" >&6; } + +mkdir_p="$MKDIR_P" +case $mkdir_p in + [\\/$]* | ?:[\\/]*) ;; + */*) mkdir_p="\$(top_builddir)/$mkdir_p" ;; +esac + +for ac_prog in gawk mawk nawk awk +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AWK+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AWK"; then + ac_cv_prog_AWK="$AWK" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AWK="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AWK=$ac_cv_prog_AWK +if test -n "$AWK"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AWK" >&5 +$as_echo "$AWK" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AWK" && break +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ${MAKE-make} sets \$(MAKE)" >&5 +$as_echo_n "checking whether ${MAKE-make} sets \$(MAKE)... " >&6; } +set x ${MAKE-make} +ac_make=`$as_echo "$2" | sed 's/+/p/g; s/[^a-zA-Z0-9_]/_/g'` +if eval \${ac_cv_prog_make_${ac_make}_set+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat >conftest.make <<\_ACEOF +SHELL = /bin/sh +all: + @echo '@@@%%%=$(MAKE)=@@@%%%' +_ACEOF +# GNU make sometimes prints "make[1]: Entering ...", which would confuse us. +case `${MAKE-make} -f conftest.make 2>/dev/null` in + *@@@%%%=?*=@@@%%%*) + eval ac_cv_prog_make_${ac_make}_set=yes;; + *) + eval ac_cv_prog_make_${ac_make}_set=no;; +esac +rm -f conftest.make +fi +if eval test \$ac_cv_prog_make_${ac_make}_set = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + SET_MAKE= +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + SET_MAKE="MAKE=${MAKE-make}" +fi + +rm -rf .tst 2>/dev/null +mkdir .tst 2>/dev/null +if test -d .tst; then + am__leading_dot=. +else + am__leading_dot=_ +fi +rmdir .tst 2>/dev/null + +if test "`cd $srcdir && pwd`" != "`pwd`"; then + # Use -I$(srcdir) only when $(srcdir) != ., so that make's output + # is not polluted with repeated "-I." + am__isrc=' -I$(srcdir)' + # test to see if srcdir already configured + if test -f $srcdir/config.status; then + as_fn_error $? "source directory already configured; run \"make distclean\" there first" "$LINENO" 5 + fi +fi + +# test whether we have cygpath +if test -z "$CYGPATH_W"; then + if (cygpath --version) >/dev/null 2>/dev/null; then + CYGPATH_W='cygpath -w' + else + CYGPATH_W=echo + fi +fi + + +# Define the identity of the package. + PACKAGE='gtest' + VERSION='1.7.0' + + +cat >>confdefs.h <<_ACEOF +#define PACKAGE "$PACKAGE" +_ACEOF + + +cat >>confdefs.h <<_ACEOF +#define VERSION "$VERSION" +_ACEOF + +# Some tools Automake needs. + +ACLOCAL=${ACLOCAL-"${am_missing_run}aclocal-${am__api_version}"} + + +AUTOCONF=${AUTOCONF-"${am_missing_run}autoconf"} + + +AUTOMAKE=${AUTOMAKE-"${am_missing_run}automake-${am__api_version}"} + + +AUTOHEADER=${AUTOHEADER-"${am_missing_run}autoheader"} + + +MAKEINFO=${MAKEINFO-"${am_missing_run}makeinfo"} + +# We need awk for the "check" target. The system "awk" is bad on +# some platforms. +# Always define AMTAR for backward compatibility. Yes, it's still used +# in the wild :-( We should find a proper way to deprecate it ... +AMTAR='$${TAR-tar}' + +am__tar='$${TAR-tar} chof - "$$tardir"' am__untar='$${TAR-tar} xf -' + + + + + + +# Check for programs used in building Google Test. +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}gcc", so it can be a program name with args. +set dummy ${ac_tool_prefix}gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_CC"; then + ac_ct_CC=$CC + # Extract the first word of "gcc", so it can be a program name with args. +set dummy gcc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="gcc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +else + CC="$ac_cv_prog_CC" +fi + +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}cc", so it can be a program name with args. +set dummy ${ac_tool_prefix}cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="${ac_tool_prefix}cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + fi +fi +if test -z "$CC"; then + # Extract the first word of "cc", so it can be a program name with args. +set dummy cc; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else + ac_prog_rejected=no +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + if test "$as_dir/$ac_word$ac_exec_ext" = "/usr/ucb/cc"; then + ac_prog_rejected=yes + continue + fi + ac_cv_prog_CC="cc" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +if test $ac_prog_rejected = yes; then + # We found a bogon in the path, so make sure we never use it. + set dummy $ac_cv_prog_CC + shift + if test $# != 0; then + # We chose a different compiler from the bogus one. + # However, it has the same basename, so the bogon will be chosen + # first if we set CC to just the basename; use the full file name. + shift + ac_cv_prog_CC="$as_dir/$ac_word${1+' '}$@" + fi +fi +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$CC"; then + if test -n "$ac_tool_prefix"; then + for ac_prog in cl.exe + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CC"; then + ac_cv_prog_CC="$CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CC="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CC=$ac_cv_prog_CC +if test -n "$CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CC" >&5 +$as_echo "$CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CC" && break + done +fi +if test -z "$CC"; then + ac_ct_CC=$CC + for ac_prog in cl.exe +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CC"; then + ac_cv_prog_ac_ct_CC="$ac_ct_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CC=$ac_cv_prog_ac_ct_CC +if test -n "$ac_ct_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CC" >&5 +$as_echo "$ac_ct_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CC" && break +done + + if test "x$ac_ct_CC" = x; then + CC="" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CC=$ac_ct_CC + fi +fi + +fi + + +test -z "$CC" && { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "no acceptable C compiler found in \$PATH +See \`config.log' for more details" "$LINENO" 5; } + +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files a.out a.out.dSYM a.exe b.out" +# Try to create an executable without -o first, disregard a.out. +# It will help us diagnose broken compilers, and finding out an intuition +# of exeext. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler works" >&5 +$as_echo_n "checking whether the C compiler works... " >&6; } +ac_link_default=`$as_echo "$ac_link" | sed 's/ -o *conftest[^ ]*//'` + +# The possible output files: +ac_files="a.out conftest.exe conftest a.exe a_out.exe b.out conftest.*" + +ac_rmfiles= +for ac_file in $ac_files +do + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + * ) ac_rmfiles="$ac_rmfiles $ac_file";; + esac +done +rm -f $ac_rmfiles + +if { { ac_try="$ac_link_default" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link_default") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # Autoconf-2.13 could set the ac_cv_exeext variable to `no'. +# So ignore a value of `no', otherwise this would lead to `EXEEXT = no' +# in a Makefile. We should not override ac_cv_exeext if it was cached, +# so that the user can short-circuit this test for compilers unknown to +# Autoconf. +for ac_file in $ac_files '' +do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) + ;; + [ab].out ) + # We found the default executable, but exeext='' is most + # certainly right. + break;; + *.* ) + if test "${ac_cv_exeext+set}" = set && test "$ac_cv_exeext" != no; + then :; else + ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + fi + # We set ac_cv_exeext here because the later test for it is not + # safe: cross compilers may not add the suffix if given an `-o' + # argument, so we may need to know it at that point already. + # Even if this section looks crufty: it has the advantage of + # actually working. + break;; + * ) + break;; + esac +done +test "$ac_cv_exeext" = no && ac_cv_exeext= + +else + ac_file='' +fi +if test -z "$ac_file"; then : + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +$as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error 77 "C compiler cannot create executables +See \`config.log' for more details" "$LINENO" 5; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for C compiler default output file name" >&5 +$as_echo_n "checking for C compiler default output file name... " >&6; } +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_file" >&5 +$as_echo "$ac_file" >&6; } +ac_exeext=$ac_cv_exeext + +rm -f -r a.out a.out.dSYM a.exe conftest$ac_cv_exeext b.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of executables" >&5 +$as_echo_n "checking for suffix of executables... " >&6; } +if { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + # If both `conftest.exe' and `conftest' are `present' (well, observable) +# catch `conftest.exe'. For instance with Cygwin, `ls conftest' will +# work properly (i.e., refer to `conftest.exe'), while it won't with +# `rm'. +for ac_file in conftest.exe conftest conftest.*; do + test -f "$ac_file" || continue + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM | *.o | *.obj ) ;; + *.* ) ac_cv_exeext=`expr "$ac_file" : '[^.]*\(\..*\)'` + break;; + * ) break;; + esac +done +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of executables: cannot compile and link +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest conftest$ac_cv_exeext +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_exeext" >&5 +$as_echo "$ac_cv_exeext" >&6; } + +rm -f conftest.$ac_ext +EXEEXT=$ac_cv_exeext +ac_exeext=$EXEEXT +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +FILE *f = fopen ("conftest.out", "w"); + return ferror (f) || fclose (f) != 0; + + ; + return 0; +} +_ACEOF +ac_clean_files="$ac_clean_files conftest.out" +# Check that the compiler produces executables we can run. If not, either +# the compiler is broken, or we cross compile. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are cross compiling" >&5 +$as_echo_n "checking whether we are cross compiling... " >&6; } +if test "$cross_compiling" != yes; then + { { ac_try="$ac_link" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_link") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if { ac_try='./conftest$ac_cv_exeext' + { { case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_try") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; }; then + cross_compiling=no + else + if test "$cross_compiling" = maybe; then + cross_compiling=yes + else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot run C compiled programs. +If you meant to cross compile, use \`--host'. +See \`config.log' for more details" "$LINENO" 5; } + fi + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $cross_compiling" >&5 +$as_echo "$cross_compiling" >&6; } + +rm -f conftest.$ac_ext conftest$ac_cv_exeext conftest.out +ac_clean_files=$ac_clean_files_save +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for suffix of object files" >&5 +$as_echo_n "checking for suffix of object files... " >&6; } +if ${ac_cv_objext+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +rm -f conftest.o conftest.obj +if { { ac_try="$ac_compile" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compile") 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then : + for ac_file in conftest.o conftest.obj conftest.*; do + test -f "$ac_file" || continue; + case $ac_file in + *.$ac_ext | *.xcoff | *.tds | *.d | *.pdb | *.xSYM | *.bb | *.bbg | *.map | *.inf | *.dSYM ) ;; + *) ac_cv_objext=`expr "$ac_file" : '.*\.\(.*\)'` + break;; + esac +done +else + $as_echo "$as_me: failed program was:" >&5 +sed 's/^/| /' conftest.$ac_ext >&5 + +{ { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "cannot compute suffix of object files: cannot compile +See \`config.log' for more details" "$LINENO" 5; } +fi +rm -f conftest.$ac_cv_objext conftest.$ac_ext +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_objext" >&5 +$as_echo "$ac_cv_objext" >&6; } +OBJEXT=$ac_cv_objext +ac_objext=$OBJEXT +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C compiler" >&5 +$as_echo_n "checking whether we are using the GNU C compiler... " >&6; } +if ${ac_cv_c_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_c_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_c_compiler_gnu" >&5 +$as_echo "$ac_cv_c_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GCC=yes +else + GCC= +fi +ac_test_CFLAGS=${CFLAGS+set} +ac_save_CFLAGS=$CFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CC accepts -g" >&5 +$as_echo_n "checking whether $CC accepts -g... " >&6; } +if ${ac_cv_prog_cc_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_c_werror_flag=$ac_c_werror_flag + ac_c_werror_flag=yes + ac_cv_prog_cc_g=no + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +else + CFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + +else + ac_c_werror_flag=$ac_save_c_werror_flag + CFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_c_werror_flag=$ac_save_c_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_g" >&5 +$as_echo "$ac_cv_prog_cc_g" >&6; } +if test "$ac_test_CFLAGS" = set; then + CFLAGS=$ac_save_CFLAGS +elif test $ac_cv_prog_cc_g = yes; then + if test "$GCC" = yes; then + CFLAGS="-g -O2" + else + CFLAGS="-g" + fi +else + if test "$GCC" = yes; then + CFLAGS="-O2" + else + CFLAGS= + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $CC option to accept ISO C89" >&5 +$as_echo_n "checking for $CC option to accept ISO C89... " >&6; } +if ${ac_cv_prog_cc_c89+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_cv_prog_cc_c89=no +ac_save_CC=$CC +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include +/* Most of the following tests are stolen from RCS 5.7's src/conf.sh. */ +struct buf { int x; }; +FILE * (*rcsopen) (struct buf *, struct stat *, int); +static char *e (p, i) + char **p; + int i; +{ + return p[i]; +} +static char *f (char * (*g) (char **, int), char **p, ...) +{ + char *s; + va_list v; + va_start (v,p); + s = g (p, va_arg (v,int)); + va_end (v); + return s; +} + +/* OSF 4.0 Compaq cc is some sort of almost-ANSI by default. It has + function prototypes and stuff, but not '\xHH' hex character constants. + These don't provoke an error unfortunately, instead are silently treated + as 'x'. The following induces an error, until -std is added to get + proper ANSI mode. Curiously '\x00'!='x' always comes out true, for an + array size at least. It's necessary to write '\x00'==0 to get something + that's true only with -std. */ +int osf4_cc_array ['\x00' == 0 ? 1 : -1]; + +/* IBM C 6 for AIX is almost-ANSI by default, but it replaces macro parameters + inside strings and character constants. */ +#define FOO(x) 'x' +int xlc6_cc_array[FOO(a) == 'x' ? 1 : -1]; + +int test (int i, double x); +struct s1 {int (*f) (int a);}; +struct s2 {int (*f) (double a);}; +int pairnames (int, char **, FILE *(*)(struct buf *, struct stat *, int), int, int); +int argc; +char **argv; +int +main () +{ +return f (e, argv, 0) != argv[0] || f (e, argv, 1) != argv[1]; + ; + return 0; +} +_ACEOF +for ac_arg in '' -qlanglvl=extc89 -qlanglvl=ansi -std \ + -Ae "-Aa -D_HPUX_SOURCE" "-Xc -D__EXTENSIONS__" +do + CC="$ac_save_CC $ac_arg" + if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_prog_cc_c89=$ac_arg +fi +rm -f core conftest.err conftest.$ac_objext + test "x$ac_cv_prog_cc_c89" != "xno" && break +done +rm -f conftest.$ac_ext +CC=$ac_save_CC + +fi +# AC_CACHE_VAL +case "x$ac_cv_prog_cc_c89" in + x) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none needed" >&5 +$as_echo "none needed" >&6; } ;; + xno) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: unsupported" >&5 +$as_echo "unsupported" >&6; } ;; + *) + CC="$CC $ac_cv_prog_cc_c89" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cc_c89" >&5 +$as_echo "$ac_cv_prog_cc_c89" >&6; } ;; +esac +if test "x$ac_cv_prog_cc_c89" != xno; then : + +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +DEPDIR="${am__leading_dot}deps" + +ac_config_commands="$ac_config_commands depfiles" + + +am_make=${MAKE-make} +cat > confinc << 'END' +am__doit: + @echo this is the am__doit target +.PHONY: am__doit +END +# If we don't find an include directive, just comment out the code. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for style of include used by $am_make" >&5 +$as_echo_n "checking for style of include used by $am_make... " >&6; } +am__include="#" +am__quote= +_am_result=none +# First try GNU make style include. +echo "include confinc" > confmf +# Ignore all kinds of additional output from `make'. +case `$am_make -s -f confmf 2> /dev/null` in #( +*the\ am__doit\ target*) + am__include=include + am__quote= + _am_result=GNU + ;; +esac +# Now try BSD make style include. +if test "$am__include" = "#"; then + echo '.include "confinc"' > confmf + case `$am_make -s -f confmf 2> /dev/null` in #( + *the\ am__doit\ target*) + am__include=.include + am__quote="\"" + _am_result=BSD + ;; + esac +fi + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $_am_result" >&5 +$as_echo "$_am_result" >&6; } +rm -f confinc confmf + +# Check whether --enable-dependency-tracking was given. +if test "${enable_dependency_tracking+set}" = set; then : + enableval=$enable_dependency_tracking; +fi + +if test "x$enable_dependency_tracking" != xno; then + am_depcomp="$ac_aux_dir/depcomp" + AMDEPBACKSLASH='\' + am__nodep='_no' +fi + if test "x$enable_dependency_tracking" != xno; then + AMDEP_TRUE= + AMDEP_FALSE='#' +else + AMDEP_TRUE='#' + AMDEP_FALSE= +fi + + + +depcc="$CC" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CC_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CC_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok `-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CC_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CC_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CC_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CC_dependencies_compiler_type" >&6; } +CCDEPMODE=depmode=$am_cv_CC_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CC_dependencies_compiler_type" = gcc3; then + am__fastdepCC_TRUE= + am__fastdepCC_FALSE='#' +else + am__fastdepCC_TRUE='#' + am__fastdepCC_FALSE= +fi + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +if test -z "$CXX"; then + if test -n "$CCC"; then + CXX=$CCC + else + if test -n "$ac_tool_prefix"; then + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$CXX"; then + ac_cv_prog_CXX="$CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_CXX="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +CXX=$ac_cv_prog_CXX +if test -n "$CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXX" >&5 +$as_echo "$CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$CXX" && break + done +fi +if test -z "$CXX"; then + ac_ct_CXX=$CXX + for ac_prog in g++ c++ gpp aCC CC cxx cc++ cl.exe FCC KCC RCC xlC_r xlC +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_CXX"; then + ac_cv_prog_ac_ct_CXX="$ac_ct_CXX" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_CXX="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_CXX=$ac_cv_prog_ac_ct_CXX +if test -n "$ac_ct_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_CXX" >&5 +$as_echo "$ac_ct_CXX" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_CXX" && break +done + + if test "x$ac_ct_CXX" = x; then + CXX="g++" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + CXX=$ac_ct_CXX + fi +fi + + fi +fi +# Provide some information about the compiler. +$as_echo "$as_me:${as_lineno-$LINENO}: checking for C++ compiler version" >&5 +set X $ac_compile +ac_compiler=$2 +for ac_option in --version -v -V -qversion; do + { { ac_try="$ac_compiler $ac_option >&5" +case "(($ac_try" in + *\"* | *\`* | *\\*) ac_try_echo=\$ac_try;; + *) ac_try_echo=$ac_try;; +esac +eval ac_try_echo="\"\$as_me:${as_lineno-$LINENO}: $ac_try_echo\"" +$as_echo "$ac_try_echo"; } >&5 + (eval "$ac_compiler $ac_option >&5") 2>conftest.err + ac_status=$? + if test -s conftest.err; then + sed '10a\ +... rest of stderr output deleted ... + 10q' conftest.err >conftest.er1 + cat conftest.er1 >&5 + fi + rm -f conftest.er1 conftest.err + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } +done + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether we are using the GNU C++ compiler" >&5 +$as_echo_n "checking whether we are using the GNU C++ compiler... " >&6; } +if ${ac_cv_cxx_compiler_gnu+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ +#ifndef __GNUC__ + choke me +#endif + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_compiler_gnu=yes +else + ac_compiler_gnu=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +ac_cv_cxx_compiler_gnu=$ac_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_cxx_compiler_gnu" >&5 +$as_echo "$ac_cv_cxx_compiler_gnu" >&6; } +if test $ac_compiler_gnu = yes; then + GXX=yes +else + GXX= +fi +ac_test_CXXFLAGS=${CXXFLAGS+set} +ac_save_CXXFLAGS=$CXXFLAGS +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether $CXX accepts -g" >&5 +$as_echo_n "checking whether $CXX accepts -g... " >&6; } +if ${ac_cv_prog_cxx_g+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_save_cxx_werror_flag=$ac_cxx_werror_flag + ac_cxx_werror_flag=yes + ac_cv_prog_cxx_g=no + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +else + CXXFLAGS="" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + +else + ac_cxx_werror_flag=$ac_save_cxx_werror_flag + CXXFLAGS="-g" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + ac_cv_prog_cxx_g=yes +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + ac_cxx_werror_flag=$ac_save_cxx_werror_flag +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_prog_cxx_g" >&5 +$as_echo "$ac_cv_prog_cxx_g" >&6; } +if test "$ac_test_CXXFLAGS" = set; then + CXXFLAGS=$ac_save_CXXFLAGS +elif test $ac_cv_prog_cxx_g = yes; then + if test "$GXX" = yes; then + CXXFLAGS="-g -O2" + else + CXXFLAGS="-g" + fi +else + if test "$GXX" = yes; then + CXXFLAGS="-O2" + else + CXXFLAGS= + fi +fi +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +depcc="$CXX" am_compiler_list= + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking dependency style of $depcc" >&5 +$as_echo_n "checking dependency style of $depcc... " >&6; } +if ${am_cv_CXX_dependencies_compiler_type+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$AMDEP_TRUE" && test -f "$am_depcomp"; then + # We make a subdir and do the tests there. Otherwise we can end up + # making bogus files that we don't know about and never remove. For + # instance it was reported that on HP-UX the gcc test will end up + # making a dummy file named `D' -- because `-MD' means `put the output + # in D'. + rm -rf conftest.dir + mkdir conftest.dir + # Copy depcomp to subdir because otherwise we won't find it if we're + # using a relative directory. + cp "$am_depcomp" conftest.dir + cd conftest.dir + # We will build objects and dependencies in a subdirectory because + # it helps to detect inapplicable dependency modes. For instance + # both Tru64's cc and ICC support -MD to output dependencies as a + # side effect of compilation, but ICC will put the dependencies in + # the current directory while Tru64 will put them in the object + # directory. + mkdir sub + + am_cv_CXX_dependencies_compiler_type=none + if test "$am_compiler_list" = ""; then + am_compiler_list=`sed -n 's/^#*\([a-zA-Z0-9]*\))$/\1/p' < ./depcomp` + fi + am__universal=false + case " $depcc " in #( + *\ -arch\ *\ -arch\ *) am__universal=true ;; + esac + + for depmode in $am_compiler_list; do + # Setup a source with many dependencies, because some compilers + # like to wrap large dependency lists on column 80 (with \), and + # we should not choose a depcomp mode which is confused by this. + # + # We need to recreate these files for each test, as the compiler may + # overwrite some of them when testing with obscure command lines. + # This happens at least with the AIX C compiler. + : > sub/conftest.c + for i in 1 2 3 4 5 6; do + echo '#include "conftst'$i'.h"' >> sub/conftest.c + # Using `: > sub/conftst$i.h' creates only sub/conftst1.h with + # Solaris 8's {/usr,}/bin/sh. + touch sub/conftst$i.h + done + echo "${am__include} ${am__quote}sub/conftest.Po${am__quote}" > confmf + + # We check with `-c' and `-o' for the sake of the "dashmstdout" + # mode. It turns out that the SunPro C++ compiler does not properly + # handle `-M -o', and we need to detect this. Also, some Intel + # versions had trouble with output in subdirs + am__obj=sub/conftest.${OBJEXT-o} + am__minus_obj="-o $am__obj" + case $depmode in + gcc) + # This depmode causes a compiler race in universal mode. + test "$am__universal" = false || continue + ;; + nosideeffect) + # after this tag, mechanisms are not by side-effect, so they'll + # only be used when explicitly requested + if test "x$enable_dependency_tracking" = xyes; then + continue + else + break + fi + ;; + msvc7 | msvc7msys | msvisualcpp | msvcmsys) + # This compiler won't grok `-c -o', but also, the minuso test has + # not run yet. These depmodes are late enough in the game, and + # so weak that their functioning should not be impacted. + am__obj=conftest.${OBJEXT-o} + am__minus_obj= + ;; + none) break ;; + esac + if depmode=$depmode \ + source=sub/conftest.c object=$am__obj \ + depfile=sub/conftest.Po tmpdepfile=sub/conftest.TPo \ + $SHELL ./depcomp $depcc -c $am__minus_obj sub/conftest.c \ + >/dev/null 2>conftest.err && + grep sub/conftst1.h sub/conftest.Po > /dev/null 2>&1 && + grep sub/conftst6.h sub/conftest.Po > /dev/null 2>&1 && + grep $am__obj sub/conftest.Po > /dev/null 2>&1 && + ${MAKE-make} -s -f confmf > /dev/null 2>&1; then + # icc doesn't choke on unknown options, it will just issue warnings + # or remarks (even with -Werror). So we grep stderr for any message + # that says an option was ignored or not supported. + # When given -MP, icc 7.0 and 7.1 complain thusly: + # icc: Command line warning: ignoring option '-M'; no argument required + # The diagnosis changed in icc 8.0: + # icc: Command line remark: option '-MP' not supported + if (grep 'ignoring option' conftest.err || + grep 'not supported' conftest.err) >/dev/null 2>&1; then :; else + am_cv_CXX_dependencies_compiler_type=$depmode + break + fi + fi + done + + cd .. + rm -rf conftest.dir +else + am_cv_CXX_dependencies_compiler_type=none +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $am_cv_CXX_dependencies_compiler_type" >&5 +$as_echo "$am_cv_CXX_dependencies_compiler_type" >&6; } +CXXDEPMODE=depmode=$am_cv_CXX_dependencies_compiler_type + + if + test "x$enable_dependency_tracking" != xno \ + && test "$am_cv_CXX_dependencies_compiler_type" = gcc3; then + am__fastdepCXX_TRUE= + am__fastdepCXX_FALSE='#' +else + am__fastdepCXX_TRUE='#' + am__fastdepCXX_FALSE= +fi + + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +case `pwd` in + *\ * | *\ *) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&5 +$as_echo "$as_me: WARNING: Libtool does not cope well with whitespace in \`pwd\`" >&2;} ;; +esac + + + +macro_version='2.4.2' +macro_revision='1.3337' + + + + + + + + + + + + + +ltmain="$ac_aux_dir/ltmain.sh" + +# Make sure we can run config.sub. +$SHELL "$ac_aux_dir/config.sub" sun4 >/dev/null 2>&1 || + as_fn_error $? "cannot run $SHELL $ac_aux_dir/config.sub" "$LINENO" 5 + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking build system type" >&5 +$as_echo_n "checking build system type... " >&6; } +if ${ac_cv_build+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_build_alias=$build_alias +test "x$ac_build_alias" = x && + ac_build_alias=`$SHELL "$ac_aux_dir/config.guess"` +test "x$ac_build_alias" = x && + as_fn_error $? "cannot guess build type; you must specify one" "$LINENO" 5 +ac_cv_build=`$SHELL "$ac_aux_dir/config.sub" $ac_build_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $ac_build_alias failed" "$LINENO" 5 + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_build" >&5 +$as_echo "$ac_cv_build" >&6; } +case $ac_cv_build in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical build" "$LINENO" 5;; +esac +build=$ac_cv_build +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_build +shift +build_cpu=$1 +build_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +build_os=$* +IFS=$ac_save_IFS +case $build_os in *\ *) build_os=`echo "$build_os" | sed 's/ /-/g'`;; esac + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking host system type" >&5 +$as_echo_n "checking host system type... " >&6; } +if ${ac_cv_host+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "x$host_alias" = x; then + ac_cv_host=$ac_cv_build +else + ac_cv_host=`$SHELL "$ac_aux_dir/config.sub" $host_alias` || + as_fn_error $? "$SHELL $ac_aux_dir/config.sub $host_alias failed" "$LINENO" 5 +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_host" >&5 +$as_echo "$ac_cv_host" >&6; } +case $ac_cv_host in +*-*-*) ;; +*) as_fn_error $? "invalid value of canonical host" "$LINENO" 5;; +esac +host=$ac_cv_host +ac_save_IFS=$IFS; IFS='-' +set x $ac_cv_host +shift +host_cpu=$1 +host_vendor=$2 +shift; shift +# Remember, the first character of IFS is used to create $*, +# except with old shells: +host_os=$* +IFS=$ac_save_IFS +case $host_os in *\ *) host_os=`echo "$host_os" | sed 's/ /-/g'`;; esac + + +# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\(["`$\\]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\(["`\\]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' + +ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to print strings" >&5 +$as_echo_n "checking how to print strings... " >&6; } +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "" +} + +case "$ECHO" in + printf*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: printf" >&5 +$as_echo "printf" >&6; } ;; + print*) { $as_echo "$as_me:${as_lineno-$LINENO}: result: print -r" >&5 +$as_echo "print -r" >&6; } ;; + *) { $as_echo "$as_me:${as_lineno-$LINENO}: result: cat" >&5 +$as_echo "cat" >&6; } ;; +esac + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for a sed that does not truncate output" >&5 +$as_echo_n "checking for a sed that does not truncate output... " >&6; } +if ${ac_cv_path_SED+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_script=s/aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa/bbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbbb/ + for ac_i in 1 2 3 4 5 6 7; do + ac_script="$ac_script$as_nl$ac_script" + done + echo "$ac_script" 2>/dev/null | sed 99q >conftest.sed + { ac_script=; unset ac_script;} + if test -z "$SED"; then + ac_path_SED_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_SED="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_SED" && $as_test_x "$ac_path_SED"; } || continue +# Check for GNU ac_path_SED and select it if it is found. + # Check for GNU $ac_path_SED +case `"$ac_path_SED" --version 2>&1` in +*GNU*) + ac_cv_path_SED="$ac_path_SED" ac_path_SED_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo '' >> "conftest.nl" + "$ac_path_SED" -f conftest.sed < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_SED_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_SED="$ac_path_SED" + ac_path_SED_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_SED_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_SED"; then + as_fn_error $? "no acceptable sed could be found in \$PATH" "$LINENO" 5 + fi +else + ac_cv_path_SED=$SED +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_SED" >&5 +$as_echo "$ac_cv_path_SED" >&6; } + SED="$ac_cv_path_SED" + rm -f conftest.sed + +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for grep that handles long lines and -e" >&5 +$as_echo_n "checking for grep that handles long lines and -e... " >&6; } +if ${ac_cv_path_GREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$GREP"; then + ac_path_GREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in grep ggrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_GREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_GREP" && $as_test_x "$ac_path_GREP"; } || continue +# Check for GNU ac_path_GREP and select it if it is found. + # Check for GNU $ac_path_GREP +case `"$ac_path_GREP" --version 2>&1` in +*GNU*) + ac_cv_path_GREP="$ac_path_GREP" ac_path_GREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'GREP' >> "conftest.nl" + "$ac_path_GREP" -e 'GREP$' -e '-(cannot match)-' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_GREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_GREP="$ac_path_GREP" + ac_path_GREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_GREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_GREP"; then + as_fn_error $? "no acceptable grep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_GREP=$GREP +fi + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_GREP" >&5 +$as_echo "$ac_cv_path_GREP" >&6; } + GREP="$ac_cv_path_GREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for egrep" >&5 +$as_echo_n "checking for egrep... " >&6; } +if ${ac_cv_path_EGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo a | $GREP -E '(a|b)' >/dev/null 2>&1 + then ac_cv_path_EGREP="$GREP -E" + else + if test -z "$EGREP"; then + ac_path_EGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in egrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_EGREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_EGREP" && $as_test_x "$ac_path_EGREP"; } || continue +# Check for GNU ac_path_EGREP and select it if it is found. + # Check for GNU $ac_path_EGREP +case `"$ac_path_EGREP" --version 2>&1` in +*GNU*) + ac_cv_path_EGREP="$ac_path_EGREP" ac_path_EGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'EGREP' >> "conftest.nl" + "$ac_path_EGREP" 'EGREP$' < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_EGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_EGREP="$ac_path_EGREP" + ac_path_EGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_EGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_EGREP"; then + as_fn_error $? "no acceptable egrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_EGREP=$EGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_EGREP" >&5 +$as_echo "$ac_cv_path_EGREP" >&6; } + EGREP="$ac_cv_path_EGREP" + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for fgrep" >&5 +$as_echo_n "checking for fgrep... " >&6; } +if ${ac_cv_path_FGREP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if echo 'ab*c' | $GREP -F 'ab*c' >/dev/null 2>&1 + then ac_cv_path_FGREP="$GREP -F" + else + if test -z "$FGREP"; then + ac_path_FGREP_found=false + # Loop through the user's path and test for each of PROGNAME-LIST + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH$PATH_SEPARATOR/usr/xpg4/bin +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_prog in fgrep; do + for ac_exec_ext in '' $ac_executable_extensions; do + ac_path_FGREP="$as_dir/$ac_prog$ac_exec_ext" + { test -f "$ac_path_FGREP" && $as_test_x "$ac_path_FGREP"; } || continue +# Check for GNU ac_path_FGREP and select it if it is found. + # Check for GNU $ac_path_FGREP +case `"$ac_path_FGREP" --version 2>&1` in +*GNU*) + ac_cv_path_FGREP="$ac_path_FGREP" ac_path_FGREP_found=:;; +*) + ac_count=0 + $as_echo_n 0123456789 >"conftest.in" + while : + do + cat "conftest.in" "conftest.in" >"conftest.tmp" + mv "conftest.tmp" "conftest.in" + cp "conftest.in" "conftest.nl" + $as_echo 'FGREP' >> "conftest.nl" + "$ac_path_FGREP" FGREP < "conftest.nl" >"conftest.out" 2>/dev/null || break + diff "conftest.out" "conftest.nl" >/dev/null 2>&1 || break + as_fn_arith $ac_count + 1 && ac_count=$as_val + if test $ac_count -gt ${ac_path_FGREP_max-0}; then + # Best one so far, save it but keep looking for a better one + ac_cv_path_FGREP="$ac_path_FGREP" + ac_path_FGREP_max=$ac_count + fi + # 10*(2^10) chars as input seems more than enough + test $ac_count -gt 10 && break + done + rm -f conftest.in conftest.tmp conftest.nl conftest.out;; +esac + + $ac_path_FGREP_found && break 3 + done + done + done +IFS=$as_save_IFS + if test -z "$ac_cv_path_FGREP"; then + as_fn_error $? "no acceptable fgrep could be found in $PATH$PATH_SEPARATOR/usr/xpg4/bin" "$LINENO" 5 + fi +else + ac_cv_path_FGREP=$FGREP +fi + + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_path_FGREP" >&5 +$as_echo "$ac_cv_path_FGREP" >&6; } + FGREP="$ac_cv_path_FGREP" + + +test -z "$GREP" && GREP=grep + + + + + + + + + + + + + + + + + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for BSD- or MS-compatible name lister (nm)" >&5 +$as_echo_n "checking for BSD- or MS-compatible name lister (nm)... " >&6; } +if ${lt_cv_path_NM+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + : ${lt_cv_path_NM=no} +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_NM" >&5 +$as_echo "$lt_cv_path_NM" >&6; } +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + if test -n "$ac_tool_prefix"; then + for ac_prog in dumpbin "link -dump" + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DUMPBIN"; then + ac_cv_prog_DUMPBIN="$DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DUMPBIN="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DUMPBIN=$ac_cv_prog_DUMPBIN +if test -n "$DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DUMPBIN" >&5 +$as_echo "$DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$DUMPBIN" && break + done +fi +if test -z "$DUMPBIN"; then + ac_ct_DUMPBIN=$DUMPBIN + for ac_prog in dumpbin "link -dump" +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DUMPBIN+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DUMPBIN"; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_ct_DUMPBIN" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_DUMPBIN="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DUMPBIN=$ac_cv_prog_ac_ct_DUMPBIN +if test -n "$ac_ct_DUMPBIN"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DUMPBIN" >&5 +$as_echo "$ac_ct_DUMPBIN" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_DUMPBIN" && break +done + + if test "x$ac_ct_DUMPBIN" = x; then + DUMPBIN=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DUMPBIN=$ac_ct_DUMPBIN + fi +fi + + case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols" + ;; + *) + DUMPBIN=: + ;; + esac + fi + + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" + fi +fi +test -z "$NM" && NM=nm + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the name lister ($NM) interface" >&5 +$as_echo_n "checking the name lister ($NM) interface... " >&6; } +if ${lt_cv_nm_interface+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&5) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&5) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&5 + (eval echo "\"\$as_me:$LINENO: output\"" >&5) + cat conftest.out >&5 + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_nm_interface" >&5 +$as_echo "$lt_cv_nm_interface" >&6; } + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether ln -s works" >&5 +$as_echo_n "checking whether ln -s works... " >&6; } +LN_S=$as_ln_s +if test "$LN_S" = "ln -s"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no, using $LN_S" >&5 +$as_echo "no, using $LN_S" >&6; } +fi + +# find the maximum length of command line arguments +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking the maximum length of command line arguments" >&5 +$as_echo_n "checking the maximum length of command line arguments... " >&6; } +if ${lt_cv_sys_max_cmd_len+:} false; then : + $as_echo_n "(cached) " >&6 +else + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[ ]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac + +fi + +if test -n $lt_cv_sys_max_cmd_len ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sys_max_cmd_len" >&5 +$as_echo "$lt_cv_sys_max_cmd_len" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: none" >&5 +$as_echo "none" >&6; } +fi +max_cmd_len=$lt_cv_sys_max_cmd_len + + + + + + +: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands some XSI constructs" >&5 +$as_echo_n "checking whether the shell understands some XSI constructs... " >&6; } +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $xsi_shell" >&5 +$as_echo "$xsi_shell" >&6; } + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the shell understands \"+=\"" >&5 +$as_echo_n "checking whether the shell understands \"+=\"... " >&6; } +lt_shell_append=no +( foo=bar; set foo baz; eval "$1+=\$2" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_shell_append" >&5 +$as_echo "$lt_shell_append" >&6; } + + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi + + + + + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to $host format" >&5 +$as_echo_n "checking how to convert $build file names to $host format... " >&6; } +if ${lt_cv_to_host_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac + +fi + +to_host_file_cmd=$lt_cv_to_host_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_host_file_cmd" >&5 +$as_echo "$lt_cv_to_host_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to convert $build file names to toolchain format" >&5 +$as_echo_n "checking how to convert $build file names to toolchain format... " >&6; } +if ${lt_cv_to_tool_file_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + #assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac + +fi + +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_to_tool_file_cmd" >&5 +$as_echo "$lt_cv_to_tool_file_cmd" >&6; } + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $LD option to reload object files" >&5 +$as_echo_n "checking for $LD option to reload object files... " >&6; } +if ${lt_cv_ld_reload_flag+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_reload_flag='-r' +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_reload_flag" >&5 +$as_echo "$lt_cv_ld_reload_flag" >&6; } +reload_flag=$lt_cv_ld_reload_flag +case $reload_flag in +"" | " "*) ;; +*) reload_flag=" $reload_flag" ;; +esac +reload_cmds='$LD$reload_flag -o $output$reload_objs' +case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + if test "$GCC" != yes; then + reload_cmds=false + fi + ;; + darwin*) + if test "$GCC" = yes; then + reload_cmds='$LTCC $LTCFLAGS -nostdlib ${wl}-r -o $output$reload_objs' + else + reload_cmds='$LD$reload_flag -o $output$reload_objs' + fi + ;; +esac + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}objdump", so it can be a program name with args. +set dummy ${ac_tool_prefix}objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OBJDUMP"; then + ac_cv_prog_OBJDUMP="$OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_OBJDUMP="${ac_tool_prefix}objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OBJDUMP=$ac_cv_prog_OBJDUMP +if test -n "$OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OBJDUMP" >&5 +$as_echo "$OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OBJDUMP"; then + ac_ct_OBJDUMP=$OBJDUMP + # Extract the first word of "objdump", so it can be a program name with args. +set dummy objdump; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OBJDUMP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OBJDUMP"; then + ac_cv_prog_ac_ct_OBJDUMP="$ac_ct_OBJDUMP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_OBJDUMP="objdump" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OBJDUMP=$ac_cv_prog_ac_ct_OBJDUMP +if test -n "$ac_ct_OBJDUMP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OBJDUMP" >&5 +$as_echo "$ac_ct_OBJDUMP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OBJDUMP" = x; then + OBJDUMP="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OBJDUMP=$ac_ct_OBJDUMP + fi +else + OBJDUMP="$ac_cv_prog_OBJDUMP" +fi + +test -z "$OBJDUMP" && OBJDUMP=objdump + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to recognize dependent libraries" >&5 +$as_echo_n "checking how to recognize dependent libraries... " >&6; } +if ${lt_cv_deplibs_check_method+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_file_magic_cmd='$MAGIC_CMD' +lt_cv_file_magic_test_file= +lt_cv_deplibs_check_method='unknown' +# Need to set the preceding variable on all platforms that support +# interlibrary dependencies. +# 'none' -- dependencies not supported. +# `unknown' -- same as none, but documents that we really don't know. +# 'pass_all' -- all dependencies passed with no checks. +# 'test_compile' -- check by making test program. +# 'file_magic [[regex]]' -- check by looking for files in library path +# which responds to the $file_magic_cmd with a given extended regex. +# If you have `file' or equivalent on your system and you're not sure +# whether `pass_all' will *always* work, you probably want this one. + +case $host_os in +aix[4-9]*) + lt_cv_deplibs_check_method=pass_all + ;; + +beos*) + lt_cv_deplibs_check_method=pass_all + ;; + +bsdi[45]*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib)' + lt_cv_file_magic_cmd='/usr/bin/file -L' + lt_cv_file_magic_test_file=/shlib/libc.so + ;; + +cygwin*) + # func_win32_libid is a shell function defined in ltmain.sh + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + ;; + +mingw* | pw32*) + # Base MSYS/MinGW do not provide the 'file' command needed by + # func_win32_libid shell function, so use a weaker test based on 'objdump', + # unless we find 'file', for example because we are cross-compiling. + # func_win32_libid assumes BSD nm, so disallow it if using MS dumpbin. + if ( test "$lt_cv_nm_interface" = "BSD nm" && file / ) >/dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[3-9]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF-[0-9][0-9]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]' + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|PA-RISC[0-9]\.[0-9]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[3-9]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[^/]+(\.so\.[0-9]+\.[0-9]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [ML]SB (shared object|dynamic lib) M[0-9][0-9]* Version [0-9]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [0-9][0-9]*-bit [LM]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [0-9][0-9]*-bit [LM]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_deplibs_check_method" >&5 +$as_echo "$lt_cv_deplibs_check_method" >&6; } + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[\1]\/[\1]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + + + + + + + + + + + + + + + + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dlltool", so it can be a program name with args. +set dummy ${ac_tool_prefix}dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DLLTOOL"; then + ac_cv_prog_DLLTOOL="$DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DLLTOOL="${ac_tool_prefix}dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DLLTOOL=$ac_cv_prog_DLLTOOL +if test -n "$DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DLLTOOL" >&5 +$as_echo "$DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DLLTOOL"; then + ac_ct_DLLTOOL=$DLLTOOL + # Extract the first word of "dlltool", so it can be a program name with args. +set dummy dlltool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DLLTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DLLTOOL"; then + ac_cv_prog_ac_ct_DLLTOOL="$ac_ct_DLLTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_DLLTOOL="dlltool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DLLTOOL=$ac_cv_prog_ac_ct_DLLTOOL +if test -n "$ac_ct_DLLTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DLLTOOL" >&5 +$as_echo "$ac_ct_DLLTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DLLTOOL" = x; then + DLLTOOL="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DLLTOOL=$ac_ct_DLLTOOL + fi +else + DLLTOOL="$ac_cv_prog_DLLTOOL" +fi + +test -z "$DLLTOOL" && DLLTOOL=dlltool + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to associate runtime and link libraries" >&5 +$as_echo_n "checking how to associate runtime and link libraries... " >&6; } +if ${lt_cv_sharedlib_from_linklib_cmd+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh + # decide which to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd="$ECHO" + ;; +esac + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_sharedlib_from_linklib_cmd" >&5 +$as_echo "$lt_cv_sharedlib_from_linklib_cmd" >&6; } +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + + + + + + + + +if test -n "$ac_tool_prefix"; then + for ac_prog in ar + do + # Extract the first word of "$ac_tool_prefix$ac_prog", so it can be a program name with args. +set dummy $ac_tool_prefix$ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$AR"; then + ac_cv_prog_AR="$AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_AR="$ac_tool_prefix$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +AR=$ac_cv_prog_AR +if test -n "$AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $AR" >&5 +$as_echo "$AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$AR" && break + done +fi +if test -z "$AR"; then + ac_ct_AR=$AR + for ac_prog in ar +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_AR+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_AR"; then + ac_cv_prog_ac_ct_AR="$ac_ct_AR" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_AR="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_AR=$ac_cv_prog_ac_ct_AR +if test -n "$ac_ct_AR"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_AR" >&5 +$as_echo "$ac_ct_AR" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$ac_ct_AR" && break +done + + if test "x$ac_ct_AR" = x; then + AR="false" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + AR=$ac_ct_AR + fi +fi + +: ${AR=ar} +: ${AR_FLAGS=cru} + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for archiver @FILE support" >&5 +$as_echo_n "checking for archiver @FILE support... " >&6; } +if ${lt_cv_ar_at_file+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ar_at_file=no + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_compile "$LINENO"; then : + echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&5' + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test "$ac_status" -eq 0; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$lt_ar_try\""; } >&5 + (eval $lt_ar_try) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + if test "$ac_status" -ne 0; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ar_at_file" >&5 +$as_echo "$lt_cv_ar_at_file" >&6; } + +if test "x$lt_cv_ar_at_file" = xno; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi + + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}strip", so it can be a program name with args. +set dummy ${ac_tool_prefix}strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$STRIP"; then + ac_cv_prog_STRIP="$STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_STRIP="${ac_tool_prefix}strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +STRIP=$ac_cv_prog_STRIP +if test -n "$STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $STRIP" >&5 +$as_echo "$STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_STRIP"; then + ac_ct_STRIP=$STRIP + # Extract the first word of "strip", so it can be a program name with args. +set dummy strip; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_STRIP+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_STRIP"; then + ac_cv_prog_ac_ct_STRIP="$ac_ct_STRIP" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_STRIP="strip" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_STRIP=$ac_cv_prog_ac_ct_STRIP +if test -n "$ac_ct_STRIP"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_STRIP" >&5 +$as_echo "$ac_ct_STRIP" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_STRIP" = x; then + STRIP=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + STRIP=$ac_ct_STRIP + fi +else + STRIP="$ac_cv_prog_STRIP" +fi + +test -z "$STRIP" && STRIP=: + + + + + + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}ranlib", so it can be a program name with args. +set dummy ${ac_tool_prefix}ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$RANLIB"; then + ac_cv_prog_RANLIB="$RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_RANLIB="${ac_tool_prefix}ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +RANLIB=$ac_cv_prog_RANLIB +if test -n "$RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $RANLIB" >&5 +$as_echo "$RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_RANLIB"; then + ac_ct_RANLIB=$RANLIB + # Extract the first word of "ranlib", so it can be a program name with args. +set dummy ranlib; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_RANLIB+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_RANLIB"; then + ac_cv_prog_ac_ct_RANLIB="$ac_ct_RANLIB" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_RANLIB="ranlib" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_RANLIB=$ac_cv_prog_ac_ct_RANLIB +if test -n "$ac_ct_RANLIB"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_RANLIB" >&5 +$as_echo "$ac_ct_RANLIB" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_RANLIB" = x; then + RANLIB=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + RANLIB=$ac_ct_RANLIB + fi +else + RANLIB="$ac_cv_prog_RANLIB" +fi + +test -z "$RANLIB" && RANLIB=: + + + + + + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + +# Check for command to grab the raw symbol name followed by C symbol from nm. +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking command to parse $NM output from $compiler object" >&5 +$as_echo_n "checking command to parse $NM output from $compiler object... " >&6; } +if ${lt_cv_sys_global_symbol_pipe+:} false; then : + $as_echo_n "(cached) " >&6 +else + +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[BCDEGRST]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([_A-Za-z][_A-Za-z0-9]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[BCDT]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[ABCDGISTW]' + ;; +hpux*) + if test "$host_cpu" = ia64; then + symcode='[ABCDEGRST]' + fi + ;; +irix* | nonstopux*) + symcode='[BCDEGRST]' + ;; +osf*) + symcode='[BCDEGQRST]' + ;; +solaris*) + symcode='[BDRT]' + ;; +sco3.2v5*) + symcode='[DT]' + ;; +sysv4.2uw2*) + symcode='[DT]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[ABDT]' + ;; +sysv4) + symcode='[DFNSTU]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[ABCDGIRSTW]' ;; +esac + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([^ ]*\)[ ]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([^ ]*\) \(lib[^ ]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([^ ]*\) \([^ ]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK '"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[ ]\($symcode$symcode*\)[ ][ ]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Now try to grab the symbols. + nlist=conftest.nm + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist\""; } >&5 + (eval $NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT_DLSYM_CONST +#else +# define LT_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$lt_prog_compiler_no_builtin_flag" + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&5 + fi + else + echo "cannot find nm_test_var in $nlist" >&5 + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&5 + fi + else + echo "$progname: failed program was:" >&5 + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done + +fi + +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: failed" >&5 +$as_echo "failed" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ok" >&5 +$as_echo "ok" >&6; } +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[@]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for sysroot" >&5 +$as_echo_n "checking for sysroot... " >&6; } + +# Check whether --with-sysroot was given. +if test "${with_sysroot+set}" = set; then : + withval=$with_sysroot; +else + with_sysroot=no +fi + + +lt_sysroot= +case ${with_sysroot} in #( + yes) + if test "$GCC" = yes; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${with_sysroot}" >&5 +$as_echo "${with_sysroot}" >&6; } + as_fn_error $? "The sysroot must be an absolute path." "$LINENO" 5 + ;; +esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${lt_sysroot:-no}" >&5 +$as_echo "${lt_sysroot:-no}" >&6; } + + + + + + +# Check whether --enable-libtool-lock was given. +if test "${enable_libtool_lock+set}" = set; then : + enableval=$enable_libtool_lock; +fi + +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '#line '$LINENO' "configure"' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the C compiler needs -belf" >&5 +$as_echo_n "checking whether the C compiler needs -belf... " >&6; } +if ${lt_cv_cc_needs_belf+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_cc_needs_belf=yes +else + lt_cv_cc_needs_belf=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_cc_needs_belf" >&5 +$as_echo "$lt_cv_cc_needs_belf" >&6; } + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD="${LD-ld}_sol2" + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks="$enable_libtool_lock" + +if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}mt", so it can be a program name with args. +set dummy ${ac_tool_prefix}mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$MANIFEST_TOOL"; then + ac_cv_prog_MANIFEST_TOOL="$MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_MANIFEST_TOOL="${ac_tool_prefix}mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +MANIFEST_TOOL=$ac_cv_prog_MANIFEST_TOOL +if test -n "$MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MANIFEST_TOOL" >&5 +$as_echo "$MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_MANIFEST_TOOL"; then + ac_ct_MANIFEST_TOOL=$MANIFEST_TOOL + # Extract the first word of "mt", so it can be a program name with args. +set dummy mt; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_MANIFEST_TOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_MANIFEST_TOOL"; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="$ac_ct_MANIFEST_TOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_MANIFEST_TOOL="mt" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_MANIFEST_TOOL=$ac_cv_prog_ac_ct_MANIFEST_TOOL +if test -n "$ac_ct_MANIFEST_TOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_MANIFEST_TOOL" >&5 +$as_echo "$ac_ct_MANIFEST_TOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_MANIFEST_TOOL" = x; then + MANIFEST_TOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + MANIFEST_TOOL=$ac_ct_MANIFEST_TOOL + fi +else + MANIFEST_TOOL="$ac_cv_prog_MANIFEST_TOOL" +fi + +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $MANIFEST_TOOL is a manifest tool" >&5 +$as_echo_n "checking if $MANIFEST_TOOL is a manifest tool... " >&6; } +if ${lt_cv_path_mainfest_tool+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&5 + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&5 + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest* +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_path_mainfest_tool" >&5 +$as_echo "$lt_cv_path_mainfest_tool" >&6; } +if test "x$lt_cv_path_mainfest_tool" != xyes; then + MANIFEST_TOOL=: +fi + + + + + + + case $host_os in + rhapsody* | darwin*) + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}dsymutil", so it can be a program name with args. +set dummy ${ac_tool_prefix}dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$DSYMUTIL"; then + ac_cv_prog_DSYMUTIL="$DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_DSYMUTIL="${ac_tool_prefix}dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +DSYMUTIL=$ac_cv_prog_DSYMUTIL +if test -n "$DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $DSYMUTIL" >&5 +$as_echo "$DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_DSYMUTIL"; then + ac_ct_DSYMUTIL=$DSYMUTIL + # Extract the first word of "dsymutil", so it can be a program name with args. +set dummy dsymutil; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_DSYMUTIL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_DSYMUTIL"; then + ac_cv_prog_ac_ct_DSYMUTIL="$ac_ct_DSYMUTIL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_DSYMUTIL="dsymutil" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_DSYMUTIL=$ac_cv_prog_ac_ct_DSYMUTIL +if test -n "$ac_ct_DSYMUTIL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_DSYMUTIL" >&5 +$as_echo "$ac_ct_DSYMUTIL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_DSYMUTIL" = x; then + DSYMUTIL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + DSYMUTIL=$ac_ct_DSYMUTIL + fi +else + DSYMUTIL="$ac_cv_prog_DSYMUTIL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}nmedit", so it can be a program name with args. +set dummy ${ac_tool_prefix}nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$NMEDIT"; then + ac_cv_prog_NMEDIT="$NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_NMEDIT="${ac_tool_prefix}nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +NMEDIT=$ac_cv_prog_NMEDIT +if test -n "$NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $NMEDIT" >&5 +$as_echo "$NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_NMEDIT"; then + ac_ct_NMEDIT=$NMEDIT + # Extract the first word of "nmedit", so it can be a program name with args. +set dummy nmedit; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_NMEDIT+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_NMEDIT"; then + ac_cv_prog_ac_ct_NMEDIT="$ac_ct_NMEDIT" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_NMEDIT="nmedit" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_NMEDIT=$ac_cv_prog_ac_ct_NMEDIT +if test -n "$ac_ct_NMEDIT"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_NMEDIT" >&5 +$as_echo "$ac_ct_NMEDIT" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_NMEDIT" = x; then + NMEDIT=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + NMEDIT=$ac_ct_NMEDIT + fi +else + NMEDIT="$ac_cv_prog_NMEDIT" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}lipo", so it can be a program name with args. +set dummy ${ac_tool_prefix}lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$LIPO"; then + ac_cv_prog_LIPO="$LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_LIPO="${ac_tool_prefix}lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +LIPO=$ac_cv_prog_LIPO +if test -n "$LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $LIPO" >&5 +$as_echo "$LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_LIPO"; then + ac_ct_LIPO=$LIPO + # Extract the first word of "lipo", so it can be a program name with args. +set dummy lipo; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_LIPO+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_LIPO"; then + ac_cv_prog_ac_ct_LIPO="$ac_ct_LIPO" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_LIPO="lipo" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_LIPO=$ac_cv_prog_ac_ct_LIPO +if test -n "$ac_ct_LIPO"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_LIPO" >&5 +$as_echo "$ac_ct_LIPO" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_LIPO" = x; then + LIPO=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + LIPO=$ac_ct_LIPO + fi +else + LIPO="$ac_cv_prog_LIPO" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL"; then + ac_cv_prog_OTOOL="$OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_OTOOL="${ac_tool_prefix}otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL=$ac_cv_prog_OTOOL +if test -n "$OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL" >&5 +$as_echo "$OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL"; then + ac_ct_OTOOL=$OTOOL + # Extract the first word of "otool", so it can be a program name with args. +set dummy otool; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL"; then + ac_cv_prog_ac_ct_OTOOL="$ac_ct_OTOOL" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_OTOOL="otool" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL=$ac_cv_prog_ac_ct_OTOOL +if test -n "$ac_ct_OTOOL"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL" >&5 +$as_echo "$ac_ct_OTOOL" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL" = x; then + OTOOL=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL=$ac_ct_OTOOL + fi +else + OTOOL="$ac_cv_prog_OTOOL" +fi + + if test -n "$ac_tool_prefix"; then + # Extract the first word of "${ac_tool_prefix}otool64", so it can be a program name with args. +set dummy ${ac_tool_prefix}otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$OTOOL64"; then + ac_cv_prog_OTOOL64="$OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_OTOOL64="${ac_tool_prefix}otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +OTOOL64=$ac_cv_prog_OTOOL64 +if test -n "$OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $OTOOL64" >&5 +$as_echo "$OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +fi +if test -z "$ac_cv_prog_OTOOL64"; then + ac_ct_OTOOL64=$OTOOL64 + # Extract the first word of "otool64", so it can be a program name with args. +set dummy otool64; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_ac_ct_OTOOL64+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$ac_ct_OTOOL64"; then + ac_cv_prog_ac_ct_OTOOL64="$ac_ct_OTOOL64" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_ac_ct_OTOOL64="otool64" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +ac_ct_OTOOL64=$ac_cv_prog_ac_ct_OTOOL64 +if test -n "$ac_ct_OTOOL64"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_ct_OTOOL64" >&5 +$as_echo "$ac_ct_OTOOL64" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + if test "x$ac_ct_OTOOL64" = x; then + OTOOL64=":" + else + case $cross_compiling:$ac_tool_warned in +yes:) +{ $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: using cross tools not prefixed with host triplet" >&5 +$as_echo "$as_me: WARNING: using cross tools not prefixed with host triplet" >&2;} +ac_tool_warned=yes ;; +esac + OTOOL64=$ac_ct_OTOOL64 + fi +else + OTOOL64="$ac_cv_prog_OTOOL64" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -single_module linker flag" >&5 +$as_echo_n "checking for -single_module linker flag... " >&6; } +if ${lt_cv_apple_cc_single_mod+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&5 + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test $_lt_result -eq 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&5 + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_apple_cc_single_mod" >&5 +$as_echo "$lt_cv_apple_cc_single_mod" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -exported_symbols_list linker flag" >&5 +$as_echo_n "checking for -exported_symbols_list linker flag... " >&6; } +if ${lt_cv_ld_exported_symbols_list+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_ld_exported_symbols_list=yes +else + lt_cv_ld_exported_symbols_list=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_exported_symbols_list" >&5 +$as_echo "$lt_cv_ld_exported_symbols_list" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for -force_load linker flag" >&5 +$as_echo_n "checking for -force_load linker flag... " >&6; } +if ${lt_cv_ld_force_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&5 + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&5 + echo "$AR cru libconftest.a conftest.o" >&5 + $AR cru libconftest.a conftest.o 2>&5 + echo "$RANLIB libconftest.a" >&5 + $RANLIB libconftest.a 2>&5 + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&5 + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&5 + elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&5 + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_ld_force_load" >&5 +$as_echo "$lt_cv_ld_force_load" >&6; } + case $host_os in + rhapsody* | darwin1.[012]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[91]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[012]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C preprocessor" >&5 +$as_echo_n "checking how to run the C preprocessor... " >&6; } +# On Suns, sometimes $CPP names a directory. +if test -n "$CPP" && test -d "$CPP"; then + CPP= +fi +if test -z "$CPP"; then + if ${ac_cv_prog_CPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CPP needs to be expanded + for CPP in "$CC -E" "$CC -E -traditional-cpp" "/lib/cpp" + do + ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CPP=$CPP + +fi + CPP=$ac_cv_prog_CPP +else + ac_cv_prog_CPP=$CPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CPP" >&5 +$as_echo "$CPP" >&6; } +ac_preproc_ok=false +for ac_c_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_c_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C preprocessor \"$CPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for ANSI C header files" >&5 +$as_echo_n "checking for ANSI C header files... " >&6; } +if ${ac_cv_header_stdc+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#include +#include + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_compile "$LINENO"; then : + ac_cv_header_stdc=yes +else + ac_cv_header_stdc=no +fi +rm -f core conftest.err conftest.$ac_objext conftest.$ac_ext + +if test $ac_cv_header_stdc = yes; then + # SunOS 4.x string.h does not declare mem*, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "memchr" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # ISC 2.0.2 stdlib.h does not declare free, contrary to ANSI. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include + +_ACEOF +if (eval "$ac_cpp conftest.$ac_ext") 2>&5 | + $EGREP "free" >/dev/null 2>&1; then : + +else + ac_cv_header_stdc=no +fi +rm -f conftest* + +fi + +if test $ac_cv_header_stdc = yes; then + # /bin/cc in Irix-4.0.5 gets non-ANSI ctype macros unless using -ansi. + if test "$cross_compiling" = yes; then : + : +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +#include +#if ((' ' & 0x0FF) == 0x020) +# define ISLOWER(c) ('a' <= (c) && (c) <= 'z') +# define TOUPPER(c) (ISLOWER(c) ? 'A' + ((c) - 'a') : (c)) +#else +# define ISLOWER(c) \ + (('a' <= (c) && (c) <= 'i') \ + || ('j' <= (c) && (c) <= 'r') \ + || ('s' <= (c) && (c) <= 'z')) +# define TOUPPER(c) (ISLOWER(c) ? ((c) | 0x40) : (c)) +#endif + +#define XOR(e, f) (((e) && !(f)) || (!(e) && (f))) +int +main () +{ + int i; + for (i = 0; i < 256; i++) + if (XOR (islower (i), ISLOWER (i)) + || toupper (i) != TOUPPER (i)) + return 2; + return 0; +} +_ACEOF +if ac_fn_c_try_run "$LINENO"; then : + +else + ac_cv_header_stdc=no +fi +rm -f core *.core core.conftest.* gmon.out bb.out conftest$ac_exeext \ + conftest.$ac_objext conftest.beam conftest.$ac_ext +fi + +fi +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_header_stdc" >&5 +$as_echo "$ac_cv_header_stdc" >&6; } +if test $ac_cv_header_stdc = yes; then + +$as_echo "#define STDC_HEADERS 1" >>confdefs.h + +fi + +# On IRIX 5.3, sys/types and inttypes.h are conflicting. +for ac_header in sys/types.h sys/stat.h stdlib.h string.h memory.h strings.h \ + inttypes.h stdint.h unistd.h +do : + as_ac_Header=`$as_echo "ac_cv_header_$ac_header" | $as_tr_sh` +ac_fn_c_check_header_compile "$LINENO" "$ac_header" "$as_ac_Header" "$ac_includes_default +" +if eval test \"x\$"$as_ac_Header"\" = x"yes"; then : + cat >>confdefs.h <<_ACEOF +#define `$as_echo "HAVE_$ac_header" | $as_tr_cpp` 1 +_ACEOF + +fi + +done + + +for ac_header in dlfcn.h +do : + ac_fn_c_check_header_compile "$LINENO" "dlfcn.h" "ac_cv_header_dlfcn_h" "$ac_includes_default +" +if test "x$ac_cv_header_dlfcn_h" = xyes; then : + cat >>confdefs.h <<_ACEOF +#define HAVE_DLFCN_H 1 +_ACEOF + +fi + +done + + + +func_stripname_cnf () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname_cnf + + + + + +# Set options + + + + enable_dlopen=no + + + enable_win32_dll=no + + + # Check whether --enable-shared was given. +if test "${enable_shared+set}" = set; then : + enableval=$enable_shared; p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_shared=yes +fi + + + + + + + + + + # Check whether --enable-static was given. +if test "${enable_static+set}" = set; then : + enableval=$enable_static; p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_static=yes +fi + + + + + + + + + + +# Check whether --with-pic was given. +if test "${with_pic+set}" = set; then : + withval=$with_pic; lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for lt_pkg in $withval; do + IFS="$lt_save_ifs" + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + pic_mode=default +fi + + +test -z "$pic_mode" && pic_mode=default + + + + + + + + # Check whether --enable-fast-install was given. +if test "${enable_fast_install+set}" = set; then : + enableval=$enable_fast_install; p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac +else + enable_fast_install=yes +fi + + + + + + + + + + + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ltmain" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +test -z "$LN_S" && LN_S="ln -s" + + + + + + + + + + + + + + +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for objdir" >&5 +$as_echo_n "checking for objdir... " >&6; } +if ${lt_cv_objdir+:} false; then : + $as_echo_n "(cached) " >&6 +else + rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_objdir" >&5 +$as_echo "$lt_cv_objdir" >&6; } +objdir=$lt_cv_objdir + + + + + +cat >>confdefs.h <<_ACEOF +#define LT_OBJDIR "$lt_cv_objdir/" +_ACEOF + + + + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld="$lt_cv_prog_gnu_ld" + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` + + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ${ac_tool_prefix}file" >&5 +$as_echo_n "checking for ${ac_tool_prefix}file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/${ac_tool_prefix}file; then + lt_cv_path_MAGIC_CMD="$ac_dir/${ac_tool_prefix}file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + + + +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for file" >&5 +$as_echo_n "checking for file... " >&6; } +if ${lt_cv_path_MAGIC_CMD+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $MAGIC_CMD in +[\\/*] | ?:[\\/]*) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + ac_dummy="/usr/bin$PATH_SEPARATOR$PATH" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/file; then + lt_cv_path_MAGIC_CMD="$ac_dir/file" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac +fi + +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $MAGIC_CMD" >&5 +$as_echo "$MAGIC_CMD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + else + MAGIC_CMD=: + fi +fi + + fi + ;; +esac + +# Use C for the default configuration in the libtool script + +lt_save_CC="$CC" +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +objext=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + +lt_prog_compiler_no_builtin_flag= + +if test "$GCC" = yes; then + case $cc_basename in + nvcc*) + lt_prog_compiler_no_builtin_flag=' -Xcompiler -fno-builtin' ;; + *) + lt_prog_compiler_no_builtin_flag=' -fno-builtin' ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -fno-rtti -fno-exceptions" >&5 +$as_echo_n "checking if $compiler supports -fno-rtti -fno-exceptions... " >&6; } +if ${lt_cv_prog_compiler_rtti_exceptions+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_rtti_exceptions=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="-fno-rtti -fno-exceptions" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_rtti_exceptions=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_rtti_exceptions" >&5 +$as_echo "$lt_cv_prog_compiler_rtti_exceptions" >&6; } + +if test x"$lt_cv_prog_compiler_rtti_exceptions" = xyes; then + lt_prog_compiler_no_builtin_flag="$lt_prog_compiler_no_builtin_flag -fno-rtti -fno-exceptions" +else + : +fi + +fi + + + + + + + lt_prog_compiler_wl= +lt_prog_compiler_pic= +lt_prog_compiler_static= + + + if test "$GCC" = yes; then + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_static='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + ;; + + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + lt_prog_compiler_can_build_shared=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic=-Kconform_pic + fi + ;; + + *) + lt_prog_compiler_pic='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + lt_prog_compiler_wl='-Xlinker ' + if test -n "$lt_prog_compiler_pic"; then + lt_prog_compiler_pic="-Xcompiler $lt_prog_compiler_pic" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + lt_prog_compiler_wl='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static='-Bstatic' + else + lt_prog_compiler_static='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic='-DDLL_EXPORT' + ;; + + hpux9* | hpux10* | hpux11*) + lt_prog_compiler_wl='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + lt_prog_compiler_static='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + lt_prog_compiler_wl='-Wl,' + # PIC (with -KPIC) is the default. + lt_prog_compiler_static='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + # old Intel for x86_64 which still supported -KPIC. + ecc*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='--shared' + lt_prog_compiler_static='--static' + ;; + nagfor*) + # NAG Fortran compiler + lt_prog_compiler_wl='-Wl,-Wl,,' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + ccc*) + lt_prog_compiler_wl='-Wl,' + # All Alpha code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-qpic' + lt_prog_compiler_static='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [1-7].* | *Sun*Fortran*\ 8.[0-3]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='' + ;; + *Sun\ F* | *Sun*Fortran*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + lt_prog_compiler_wl='-Wl,' + ;; + *Intel*\ [CF]*Compiler*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fPIC' + lt_prog_compiler_static='-static' + ;; + *Portland\ Group*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-fpic' + lt_prog_compiler_static='-Bstatic' + ;; + esac + ;; + esac + ;; + + newsos6) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + lt_prog_compiler_wl='-Wl,' + # All OSF/1 code is PIC. + lt_prog_compiler_static='-non_shared' + ;; + + rdos*) + lt_prog_compiler_static='-non_shared' + ;; + + solaris*) + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + lt_prog_compiler_wl='-Qoption ld ';; + *) + lt_prog_compiler_wl='-Wl,';; + esac + ;; + + sunos4*) + lt_prog_compiler_wl='-Qoption ld ' + lt_prog_compiler_pic='-PIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + lt_prog_compiler_pic='-Kconform_pic' + lt_prog_compiler_static='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_pic='-KPIC' + lt_prog_compiler_static='-Bstatic' + ;; + + unicos*) + lt_prog_compiler_wl='-Wl,' + lt_prog_compiler_can_build_shared=no + ;; + + uts4*) + lt_prog_compiler_pic='-pic' + lt_prog_compiler_static='-Bstatic' + ;; + + *) + lt_prog_compiler_can_build_shared=no + ;; + esac + fi + +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic= + ;; + *) + lt_prog_compiler_pic="$lt_prog_compiler_pic -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic=$lt_prog_compiler_pic +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic" >&5 +$as_echo "$lt_cv_prog_compiler_pic" >&6; } +lt_prog_compiler_pic=$lt_cv_prog_compiler_pic + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic works... " >&6; } +if ${lt_cv_prog_compiler_pic_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works" >&6; } + +if test x"$lt_cv_prog_compiler_pic_works" = xyes; then + case $lt_prog_compiler_pic in + "" | " "*) ;; + *) lt_prog_compiler_pic=" $lt_prog_compiler_pic" ;; + esac +else + lt_prog_compiler_pic= + lt_prog_compiler_can_build_shared=no +fi + +fi + + + + + + + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl eval lt_tmp_static_flag=\"$lt_prog_compiler_static\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works=yes + fi + else + lt_cv_prog_compiler_static_works=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works" >&5 +$as_echo "$lt_cv_prog_compiler_static_works" >&6; } + +if test x"$lt_cv_prog_compiler_static_works" = xyes; then + : +else + lt_prog_compiler_static= +fi + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o" >&5 +$as_echo "$lt_cv_prog_compiler_c_o" >&6; } + + + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test "$hard_links" = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + runpath_var= + allow_undefined_flag= + always_export_symbols=no + archive_cmds= + archive_expsym_cmds= + compiler_needs_object=no + enable_shared_with_static_runtimes=no + export_dynamic_flag_spec= + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + hardcode_automatic=no + hardcode_direct=no + hardcode_direct_absolute=no + hardcode_libdir_flag_spec= + hardcode_libdir_separator= + hardcode_minus_L=no + hardcode_shlibpath_var=unsupported + inherit_rpath=no + link_all_deplibs=unknown + module_cmds= + module_expsym_cmds= + old_archive_from_new_cmds= + old_archive_from_expsyms_cmds= + thread_safe_flag_spec= + whole_archive_flag_spec= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + include_expsyms= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + exclude_expsyms='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + linux* | k*bsd*-gnu | gnu*) + link_all_deplibs=no + ;; + esac + + ld_shlibs=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test "$with_gnu_ld" = yes; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[2-9]*) ;; + *\ \(GNU\ Binutils\)\ [3-9]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test "$lt_use_gnu_ld_interface" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [01].* | *\ 2.[0-9].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[3-9]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, ) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec='-L$libdir' + export_dynamic_flag_spec='${wl}--export-all-symbols' + allow_undefined_flag=unsupported + always_export_symbols=no + enable_shared_with_static_runtimes=yes + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs=no + fi + ;; + + haiku*) + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + link_all_deplibs=yes + ;; + + interix[3-9]*) + hardcode_direct=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + whole_archive_flag_spec= + tmp_sharedflag='--shared' ;; + xl[cC]* | bgxl[cC]* | mpixl[cC]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + whole_archive_flag_spec='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + whole_archive_flag_spec='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + archive_cmds='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + whole_archive_flag_spec='--whole-archive$convenience --no-whole-archive' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + archive_cmds='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + ld_shlibs=no + fi + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [01].* | *\ 2.[0-9].* | *\ 2.1[0-5].*) + ld_shlibs=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + ;; + + sunos4*) + archive_cmds='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + ld_shlibs=no + fi + ;; + esac + + if test "$ld_shlibs" = no; then + runpath_var= + hardcode_libdir_flag_spec= + export_dynamic_flag_spec= + whole_archive_flag_spec= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + allow_undefined_flag=unsupported + always_export_symbols=yes + archive_expsym_cmds='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + hardcode_minus_L=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + hardcode_direct=unsupported + fi + ;; + + aix[4-9]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global + # defined symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds='' + hardcode_direct=yes + hardcode_direct_absolute=yes + hardcode_libdir_separator=':' + link_all_deplibs=yes + file_list_spec='${wl}-f,' + + if test "$GCC" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L=yes + hardcode_libdir_flag_spec='-L$libdir' + hardcode_libdir_separator= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + link_all_deplibs=no + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + export_dynamic_flag_spec='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + always_export_symbols=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + archive_expsym_cmds='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag="-z nodefs" + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath_+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath_=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath_"; then + lt_cv_aix_libpath_="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath_ +fi + + hardcode_libdir_flag_spec='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag=' ${wl}-bernotok' + allow_undefined_flag=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec='$convenience' + fi + archive_cmds_need_lc=yes + # This is similar to how AIX traditionally builds its shared libraries. + archive_expsym_cmds="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds='' + ;; + m68k) + archive_cmds='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + ;; + esac + ;; + + bsdi[45]*) + export_dynamic_flag_spec=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + always_export_symbols=yes + file_list_spec='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + archive_expsym_cmds='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, )='true' + enable_shared_with_static_runtimes=yes + exclude_expsyms='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + export_symbols_cmds='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1,DATA/'\'' | $SED -e '\''/^[AITW][ ]/s/.*[ ]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + old_postinstall_cmds='chmod 644 $oldlib' + postlink_cmds='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + hardcode_libdir_flag_spec=' ' + allow_undefined_flag=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + old_archive_from_new_cmds='true' + # FIXME: Should let the user specify the lib program. + old_archive_cmds='lib -OUT:$oldlib$oldobjs$old_deplibs' + enable_shared_with_static_runtimes=yes + ;; + esac + ;; + + darwin* | rhapsody*) + + + archive_cmds_need_lc=no + hardcode_direct=no + hardcode_automatic=yes + hardcode_shlibpath_var=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + whole_archive_flag_spec='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec='' + fi + link_all_deplibs=yes + allow_undefined_flag="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + archive_cmds="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + + else + ld_shlibs=no + fi + + ;; + + dgux*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + hpux9*) + if test "$GCC" = yes; then + archive_cmds='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + archive_cmds='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + export_dynamic_flag_spec='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + archive_cmds='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $CC understands -b" >&5 +$as_echo_n "checking if $CC understands -b... " >&6; } +if ${lt_cv_prog_compiler__b+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler__b=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -b" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler__b=yes + fi + else + lt_cv_prog_compiler__b=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler__b" >&5 +$as_echo "$lt_cv_prog_compiler__b" >&6; } + +if test x"$lt_cv_prog_compiler__b" = xyes; then + archive_cmds='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' +else + archive_cmds='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' +fi + + ;; + esac + fi + if test "$with_gnu_ld" = no; then + hardcode_libdir_flag_spec='${wl}+b ${wl}$libdir' + hardcode_libdir_separator=: + + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct=no + hardcode_shlibpath_var=no + ;; + *) + hardcode_direct=yes + hardcode_direct_absolute=yes + export_dynamic_flag_spec='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + hardcode_minus_L=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + archive_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $host_os linker accepts -exported_symbol" >&5 +$as_echo_n "checking whether the $host_os linker accepts -exported_symbol... " >&6; } +if ${lt_cv_irix_exported_symbol+:} false; then : + $as_echo_n "(cached) " >&6 +else + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +int foo (void) { return 0; } +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + lt_cv_irix_exported_symbol=yes +else + lt_cv_irix_exported_symbol=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS="$save_LDFLAGS" +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_irix_exported_symbol" >&5 +$as_echo "$lt_cv_irix_exported_symbol" >&6; } + if test "$lt_cv_irix_exported_symbol" = yes; then + archive_expsym_cmds='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' + fi + else + archive_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + inherit_rpath=yes + link_all_deplibs=yes + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + archive_cmds='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_direct=yes + hardcode_shlibpath_var=no + ;; + + newsos6) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + hardcode_shlibpath_var=no + ;; + + *nto* | *qnx*) + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct=yes + hardcode_shlibpath_var=no + hardcode_direct_absolute=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + export_dynamic_flag_spec='${wl}-E' + else + case $host_os in + openbsd[01].* | openbsd2.[0-7] | openbsd2.[0-7].*) + archive_cmds='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-R$libdir' + ;; + *) + archive_cmds='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + hardcode_libdir_flag_spec='${wl}-rpath,$libdir' + ;; + esac + fi + else + ld_shlibs=no + fi + ;; + + os2*) + hardcode_libdir_flag_spec='-L$libdir' + hardcode_minus_L=yes + allow_undefined_flag=unsupported + archive_cmds='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + old_archive_from_new_cmds='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + fi + archive_cmds_need_lc='no' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + allow_undefined_flag=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec='${wl}-rpath ${wl}$libdir' + else + allow_undefined_flag=' -expect_unresolved \*' + archive_cmds='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + hardcode_libdir_flag_spec='-rpath $libdir' + fi + archive_cmds_need_lc='no' + hardcode_libdir_separator=: + ;; + + solaris*) + no_undefined_flag=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + archive_cmds='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + archive_cmds='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + archive_cmds='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + hardcode_libdir_flag_spec='-R$libdir' + hardcode_shlibpath_var=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + whole_archive_flag_spec='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + whole_archive_flag_spec='-z allextract$convenience -z defaultextract' + fi + ;; + esac + link_all_deplibs=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + archive_cmds='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + hardcode_libdir_flag_spec='-L$libdir' + hardcode_direct=yes + hardcode_minus_L=yes + hardcode_shlibpath_var=no + ;; + + sysv4) + case $host_vendor in + sni) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + archive_cmds='$LD -G -o $lib $libobjs $deplibs $linker_flags' + reload_cmds='$CC -r -o $output$reload_objs' + hardcode_direct=no + ;; + motorola) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_direct=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + hardcode_shlibpath_var=no + ;; + + sysv4.3*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + export_dynamic_flag_spec='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_shlibpath_var=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + ld_shlibs=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag='${wl}-z,text' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag='${wl}-z,text' + allow_undefined_flag='${wl}-z,nodefs' + archive_cmds_need_lc=no + hardcode_shlibpath_var=no + hardcode_libdir_flag_spec='${wl}-R,$libdir' + hardcode_libdir_separator=':' + link_all_deplibs=yes + export_dynamic_flag_spec='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + archive_cmds='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + archive_cmds='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + archive_cmds='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + hardcode_libdir_flag_spec='-L$libdir' + hardcode_shlibpath_var=no + ;; + + *) + ld_shlibs=no + ;; + esac + + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + export_dynamic_flag_spec='${wl}-Blargedynsym' + ;; + esac + fi + fi + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs" >&5 +$as_echo "$ld_shlibs" >&6; } +test "$ld_shlibs" = no && can_build_shared=no + +with_gnu_ld=$with_gnu_ld + + + + + + + + + + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl + pic_flag=$lt_prog_compiler_pic + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag + allow_undefined_flag= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc=no + else + lt_cv_archive_cmds_need_lc=yes + fi + allow_undefined_flag=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc" >&6; } + archive_cmds_need_lc=$lt_cv_archive_cmds_need_lc + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq="s,=\([A-Za-z]:\),\1,g" ;; + *) lt_sed_strip_eq="s,=/,/,g" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[lt_foo]++; } + if (lt_freq[lt_foo] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's,/\([A-Za-z]:\),\1,g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api" + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib" + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action= +if test -n "$hardcode_libdir_flag_spec" || + test -n "$runpath_var" || + test "X$hardcode_automatic" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$hardcode_direct" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, )" != no && + test "$hardcode_minus_L" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action" >&5 +$as_echo "$hardcode_action" >&6; } + +if test "$hardcode_action" = relink || + test "$inherit_rpath" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + +fi + + ;; + + *) + ac_fn_c_check_func "$LINENO" "shl_load" "ac_cv_func_shl_load" +if test "x$ac_cv_func_shl_load" = xyes; then : + lt_cv_dlopen="shl_load" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for shl_load in -ldld" >&5 +$as_echo_n "checking for shl_load in -ldld... " >&6; } +if ${ac_cv_lib_dld_shl_load+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char shl_load (); +int +main () +{ +return shl_load (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_shl_load=yes +else + ac_cv_lib_dld_shl_load=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_shl_load" >&5 +$as_echo "$ac_cv_lib_dld_shl_load" >&6; } +if test "x$ac_cv_lib_dld_shl_load" = xyes; then : + lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld" +else + ac_fn_c_check_func "$LINENO" "dlopen" "ac_cv_func_dlopen" +if test "x$ac_cv_func_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -ldl" >&5 +$as_echo_n "checking for dlopen in -ldl... " >&6; } +if ${ac_cv_lib_dl_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldl $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dl_dlopen=yes +else + ac_cv_lib_dl_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dl_dlopen" >&5 +$as_echo "$ac_cv_lib_dl_dlopen" >&6; } +if test "x$ac_cv_lib_dl_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dlopen in -lsvld" >&5 +$as_echo_n "checking for dlopen in -lsvld... " >&6; } +if ${ac_cv_lib_svld_dlopen+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-lsvld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dlopen (); +int +main () +{ +return dlopen (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_svld_dlopen=yes +else + ac_cv_lib_svld_dlopen=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_svld_dlopen" >&5 +$as_echo "$ac_cv_lib_svld_dlopen" >&6; } +if test "x$ac_cv_lib_svld_dlopen" = xyes; then : + lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld" +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for dld_link in -ldld" >&5 +$as_echo_n "checking for dld_link in -ldld... " >&6; } +if ${ac_cv_lib_dld_dld_link+:} false; then : + $as_echo_n "(cached) " >&6 +else + ac_check_lib_save_LIBS=$LIBS +LIBS="-ldld $LIBS" +cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char dld_link (); +int +main () +{ +return dld_link (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + ac_cv_lib_dld_dld_link=yes +else + ac_cv_lib_dld_dld_link=no +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext +LIBS=$ac_check_lib_save_LIBS +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ac_cv_lib_dld_dld_link" >&5 +$as_echo "$ac_cv_lib_dld_dld_link" >&6; } +if test "x$ac_cv_lib_dld_dld_link" = xyes; then : + lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld" +fi + + +fi + + +fi + + +fi + + +fi + + +fi + + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a program can dlopen itself" >&5 +$as_echo_n "checking whether a program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self" >&5 +$as_echo "$lt_cv_dlopen_self" >&6; } + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether a statically linked program can dlopen itself" >&5 +$as_echo_n "checking whether a statically linked program can dlopen itself... " >&6; } +if ${lt_cv_dlopen_self_static+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test "$cross_compiling" = yes; then : + lt_cv_dlopen_self_static=cross +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +} +_LT_EOF + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_link\""; } >&5 + (eval $ac_link) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&5 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlneed_uscore) lt_cv_dlopen_self_static=yes ;; + x$lt_dlunknown|x*) lt_cv_dlopen_self_static=no ;; + esac + else : + # compilation failed + lt_cv_dlopen_self_static=no + fi +fi +rm -fr conftest* + + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_dlopen_self_static" >&5 +$as_echo "$lt_cv_dlopen_self_static" >&6; } + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi + + + + + + + + + + + + + + + + + +striplib= +old_striplib= +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking whether stripping libraries is possible" >&5 +$as_echo_n "checking whether stripping libraries is possible... " >&6; } +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + ;; + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + ;; + esac +fi + + + + + + + + + + + + + # Report which library types will actually be built + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if libtool supports shared libraries" >&5 +$as_echo_n "checking if libtool supports shared libraries... " >&6; } + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $can_build_shared" >&5 +$as_echo "$can_build_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build shared libraries" >&5 +$as_echo_n "checking whether to build shared libraries... " >&6; } + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[4-9]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_shared" >&5 +$as_echo "$enable_shared" >&6; } + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to build static libraries" >&5 +$as_echo_n "checking whether to build static libraries... " >&6; } + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $enable_static" >&5 +$as_echo "$enable_static" >&6; } + + + + +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +CC="$lt_save_CC" + + if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking how to run the C++ preprocessor" >&5 +$as_echo_n "checking how to run the C++ preprocessor... " >&6; } +if test -z "$CXXCPP"; then + if ${ac_cv_prog_CXXCPP+:} false; then : + $as_echo_n "(cached) " >&6 +else + # Double quotes because CXXCPP needs to be expanded + for CXXCPP in "$CXX -E" "/lib/cpp" + do + ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + break +fi + + done + ac_cv_prog_CXXCPP=$CXXCPP + +fi + CXXCPP=$ac_cv_prog_CXXCPP +else + ac_cv_prog_CXXCPP=$CXXCPP +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $CXXCPP" >&5 +$as_echo "$CXXCPP" >&6; } +ac_preproc_ok=false +for ac_cxx_preproc_warn_flag in '' yes +do + # Use a header file that comes with gcc, so configuring glibc + # with a fresh cross-compiler works. + # Prefer to if __STDC__ is defined, since + # exists even on freestanding compilers. + # On the NeXT, cc -E runs the code through the compiler's parser, + # not just through cpp. "Syntax error" is here to catch this case. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#ifdef __STDC__ +# include +#else +# include +#endif + Syntax error +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + +else + # Broken: fails on valid input. +continue +fi +rm -f conftest.err conftest.i conftest.$ac_ext + + # OK, works on sane cases. Now check whether nonexistent headers + # can be detected and how. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +_ACEOF +if ac_fn_cxx_try_cpp "$LINENO"; then : + # Broken: success on invalid input. +continue +else + # Passes both tests. +ac_preproc_ok=: +break +fi +rm -f conftest.err conftest.i conftest.$ac_ext + +done +# Because of `break', _AC_PREPROC_IFELSE's cleaning code was skipped. +rm -f conftest.i conftest.err conftest.$ac_ext +if $ac_preproc_ok; then : + +else + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "C++ preprocessor \"$CXXCPP\" fails sanity check +See \`config.log' for more details" "$LINENO" 5; } +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +else + _lt_caught_CXX_error=yes +fi + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + +archive_cmds_need_lc_CXX=no +allow_undefined_flag_CXX= +always_export_symbols_CXX=no +archive_expsym_cmds_CXX= +compiler_needs_object_CXX=no +export_dynamic_flag_spec_CXX= +hardcode_direct_CXX=no +hardcode_direct_absolute_CXX=no +hardcode_libdir_flag_spec_CXX= +hardcode_libdir_separator_CXX= +hardcode_minus_L_CXX=no +hardcode_shlibpath_var_CXX=unsupported +hardcode_automatic_CXX=no +inherit_rpath_CXX=no +module_cmds_CXX= +module_expsym_cmds_CXX= +link_all_deplibs_CXX=unknown +old_archive_cmds_CXX=$old_archive_cmds +reload_flag_CXX=$reload_flag +reload_cmds_CXX=$reload_cmds +no_undefined_flag_CXX= +whole_archive_flag_spec_CXX= +enable_shared_with_static_runtimes_CXX=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +objext_CXX=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_caught_CXX_error" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + + + + + + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC + + + # save warnings/boilerplate of simple test code + ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* + + ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* + + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + compiler_CXX=$CC + for cc_temp in $compiler""; do + case $cc_temp in + compile | *[\\/]compile | ccache | *[\\/]ccache ) ;; + distcc | *[\\/]distcc | purify | *[\\/]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` + + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test "$GXX" = yes; then + lt_prog_compiler_no_builtin_flag_CXX=' -fno-builtin' + else + lt_prog_compiler_no_builtin_flag_CXX= + fi + + if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + + +# Check whether --with-gnu-ld was given. +if test "${with_gnu_ld+set}" = set; then : + withval=$with_gnu_ld; test "$withval" = no || with_gnu_ld=yes +else + with_gnu_ld=no +fi + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for ld used by $CC" >&5 +$as_echo_n "checking for ld used by $CC... " >&6; } + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [\\/]* | ?:[\\/]*) + re_direlt='/[^/][^/]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for GNU ld" >&5 +$as_echo_n "checking for GNU ld... " >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for non-GNU ld" >&5 +$as_echo_n "checking for non-GNU ld... " >&6; } +fi +if ${lt_cv_path_LD+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &5 +$as_echo "$LD" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi +test -z "$LD" && as_fn_error $? "no acceptable ld found in \$PATH" "$LINENO" 5 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if the linker ($LD) is GNU ld" >&5 +$as_echo_n "checking if the linker ($LD) is GNU ld... " >&6; } +if ${lt_cv_prog_gnu_ld+:} false; then : + $as_echo_n "(cached) " >&6 +else + # I'd rather use --version here, but apparently some GNU lds only accept -v. +case `$LD -v 2>&1 &5 +$as_echo "$lt_cv_prog_gnu_ld" >&6; } +with_gnu_ld=$lt_cv_prog_gnu_ld + + + + + + + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + archive_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + whole_archive_flag_spec_CXX= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + ld_shlibs_CXX=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aix[4-9]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[23]|aix4.[23].*|aix[5-9]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + archive_cmds_CXX='' + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + file_list_spec_CXX='${wl}-f,' + + if test "$GXX" = yes; then + case $host_os in aix4.[012]|aix4.[012].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + hardcode_direct_CXX=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + hardcode_minus_L_CXX=yes + hardcode_libdir_flag_spec_CXX='-L$libdir' + hardcode_libdir_separator_CXX= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + export_dynamic_flag_spec_CXX='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + always_export_symbols_CXX=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + allow_undefined_flag_CXX='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath__CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + + archive_expsym_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + hardcode_libdir_flag_spec_CXX='${wl}-R $libdir:/usr/lib:/lib' + allow_undefined_flag_CXX="-z nodefs" + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + if ${lt_cv_aix_libpath__CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + + lt_aix_libpath_sed=' + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }' + lt_cv_aix_libpath__CXX=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + if test -z "$lt_cv_aix_libpath__CXX"; then + lt_cv_aix_libpath__CXX="/usr/lib:/lib" + fi + +fi + + aix_libpath=$lt_cv_aix_libpath__CXX +fi + + hardcode_libdir_flag_spec_CXX='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + no_undefined_flag_CXX=' ${wl}-bernotok' + allow_undefined_flag_CXX=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + whole_archive_flag_spec_CXX='$convenience' + fi + archive_cmds_need_lc_CXX=yes + # This is similar to how AIX traditionally builds its shared + # libraries. + archive_expsym_cmds_CXX="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + allow_undefined_flag_CXX=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + archive_cmds_CXX='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + ld_shlibs_CXX=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + hardcode_libdir_flag_spec_CXX=' ' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=yes + file_list_spec_CXX='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + archive_cmds_CXX='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, CXX)='true' + enable_shared_with_static_runtimes_CXX=yes + # Don't use ranlib + old_postinstall_cmds_CXX='chmod 644 $oldlib' + postlink_cmds_CXX='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, CXX) is actually meaningless, + # as there is no search path for DLLs. + hardcode_libdir_flag_spec_CXX='-L$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-all-symbols' + allow_undefined_flag_CXX=unsupported + always_export_symbols_CXX=no + enable_shared_with_static_runtimes_CXX=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + archive_cmds_CXX='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + archive_expsym_cmds_CXX='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + ld_shlibs_CXX=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + + + archive_cmds_need_lc_CXX=no + hardcode_direct_CXX=no + hardcode_automatic_CXX=yes + hardcode_shlibpath_var_CXX=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + whole_archive_flag_spec_CXX='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + + else + whole_archive_flag_spec_CXX='' + fi + link_all_deplibs_CXX=yes + allow_undefined_flag_CXX="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + archive_cmds_CXX="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + module_cmds_CXX="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + module_expsym_cmds_CXX="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + if test "$lt_cv_apple_cc_single_mod" != "yes"; then + archive_cmds_CXX="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + archive_expsym_cmds_CXX="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" + fi + + else + ld_shlibs_CXX=no + fi + + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + freebsd2.*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + ld_shlibs_CXX=no + ;; + + freebsd-elf*) + archive_cmds_need_lc_CXX=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + ld_shlibs_CXX=yes + ;; + + gnu*) + ;; + + haiku*) + archive_cmds_CXX='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + link_all_deplibs_CXX=yes + ;; + + hpux9*) + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + export_dynamic_flag_spec_CXX='${wl}-E' + hardcode_direct_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + archive_cmds_CXX='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + hardcode_libdir_flag_spec_CXX='${wl}+b ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + export_dynamic_flag_spec_CXX='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + ;; + *) + hardcode_direct_CXX=yes + hardcode_direct_absolute_CXX=yes + hardcode_minus_L_CXX=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + archive_cmds_CXX='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + interix[3-9]*) + hardcode_direct_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + archive_cmds_CXX='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + archive_expsym_cmds_CXX='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + archive_cmds_CXX='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' + fi + fi + link_all_deplibs_CXX=yes + ;; + esac + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + inherit_rpath_CXX=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + archive_expsym_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + archive_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + archive_cmds_need_lc_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [1-5].* | *pgcpp\ [1-5].*) + prelink_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + old_archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + archive_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='${wl}--rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + whole_archive_flag_spec_CXX='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + archive_expsym_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + export_dynamic_flag_spec_CXX='${wl}--export-dynamic' + archive_cmds_CXX='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + archive_expsym_cmds_CXX='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' + hardcode_libdir_flag_spec_CXX='-R$libdir' + whole_archive_flag_spec_CXX='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + compiler_needs_object_CXX=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + archive_cmds_CXX='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + ld_shlibs_CXX=yes + ;; + + openbsd2*) + # C++ shared libraries are fairly broken + ld_shlibs_CXX=no + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + hardcode_direct_CXX=yes + hardcode_shlibpath_var_CXX=no + hardcode_direct_absolute_CXX=yes + archive_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + archive_expsym_cmds_CXX='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + export_dynamic_flag_spec_CXX='${wl}-E' + whole_archive_flag_spec_CXX="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + ld_shlibs_CXX=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + archive_cmds_CXX='tempext=`echo $shared_ext | $SED -e '\''s/\([^()0-9A-Za-z{}]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + hardcode_libdir_flag_spec_CXX='${wl}-rpath,$libdir' + hardcode_libdir_separator_CXX=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) old_archive_cmds_CXX='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) old_archive_cmds_CXX='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + cxx*) + case $host in + osf3*) + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + ;; + *) + allow_undefined_flag_CXX=' -expect_unresolved \*' + archive_cmds_CXX='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + archive_expsym_cmds_CXX='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ + $RM $lib.exp' + hardcode_libdir_flag_spec_CXX='-rpath $libdir' + ;; + esac + + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + allow_undefined_flag_CXX=' ${wl}-expect_unresolved ${wl}\*' + case $host in + osf3*) + archive_cmds_CXX='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + + hardcode_libdir_flag_spec_CXX='${wl}-rpath ${wl}$libdir' + hardcode_libdir_separator_CXX=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + archive_cmds_need_lc_CXX=yes + no_undefined_flag_CXX=' -zdefs' + archive_cmds_CXX='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + hardcode_libdir_flag_spec_CXX='-R$libdir' + hardcode_shlibpath_var_CXX=no + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + whole_archive_flag_spec_CXX='-z allextract$convenience -z defaultextract' + ;; + esac + link_all_deplibs_CXX=yes + + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + old_archive_cmds_CXX='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + archive_cmds_CXX='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + old_archive_cmds_CXX='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + no_undefined_flag_CXX=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + archive_cmds_CXX='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + archive_cmds_CXX='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + archive_expsym_cmds_CXX='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + fi + + hardcode_libdir_flag_spec_CXX='${wl}-R $wl$libdir' + case $host_os in + solaris2.[0-5] | solaris2.[0-5].*) ;; + *) + whole_archive_flag_spec_CXX='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[01].[10]* | unixware7* | sco3.2v5.0.[024]*) + no_undefined_flag_CXX='${wl}-z,text' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + no_undefined_flag_CXX='${wl}-z,text' + allow_undefined_flag_CXX='${wl}-z,nodefs' + archive_cmds_need_lc_CXX=no + hardcode_shlibpath_var_CXX=no + hardcode_libdir_flag_spec_CXX='${wl}-R,$libdir' + hardcode_libdir_separator_CXX=':' + link_all_deplibs_CXX=yes + export_dynamic_flag_spec_CXX='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + archive_cmds_CXX='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + old_archive_cmds_CXX='$CC -Tprelink_objects $oldobjs~ + '"$old_archive_cmds_CXX" + reload_cmds_CXX='$CC -Tprelink_objects $reload_objs~ + '"$reload_cmds_CXX" + ;; + *) + archive_cmds_CXX='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + archive_expsym_cmds_CXX='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + + *) + # FIXME: insert proper C++ library support + ld_shlibs_CXX=no + ;; + esac + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +$as_echo "$ld_shlibs_CXX" >&6; } + test "$ld_shlibs_CXX" = no && can_build_shared=no + + GCC_CXX="$GXX" + LD_CXX="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + # Dependencies to place before and after the object being linked: +predep_objects_CXX= +postdep_objects_CXX= +predeps_CXX= +postdeps_CXX= +compiler_lib_search_path_CXX= + +cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF + + +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +esac + +if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; }; then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case ${prev}${p} in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" || + test $p = "-R"; then + prev=$p + continue + fi + + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test "$pre_test_object_deps_done" = no; then + case ${prev} in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$compiler_lib_search_path_CXX"; then + compiler_lib_search_path_CXX="${prev}${p}" + else + compiler_lib_search_path_CXX="${compiler_lib_search_path_CXX} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$postdeps_CXX"; then + postdeps_CXX="${prev}${p}" + else + postdeps_CXX="${postdeps_CXX} ${prev}${p}" + fi + fi + prev= + ;; + + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$predep_objects_CXX"; then + predep_objects_CXX="$p" + else + predep_objects_CXX="$predep_objects_CXX $p" + fi + else + if test -z "$postdep_objects_CXX"; then + postdep_objects_CXX="$p" + else + postdep_objects_CXX="$postdep_objects_CXX $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling CXX test program" +fi + +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS + +# PORTME: override above test on systems where it is broken +case $host_os in +interix[3-9]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + predep_objects_CXX= + postdep_objects_CXX= + postdeps_CXX= + ;; + +linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + if test "$solaris_use_stlport4" != yes; then + postdeps_CXX='-library=Cstd -library=Crun' + fi + ;; + esac + ;; + +solaris*) + case $cc_basename in + CC* | sunCC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + if test "$solaris_use_stlport4" != yes; then + postdeps_CXX='-library=Cstd -library=Crun' + fi + ;; + esac + ;; +esac + + +case " $postdeps_CXX " in +*" -lc "*) archive_cmds_need_lc_CXX=no ;; +esac + compiler_lib_search_dirs_CXX= +if test -n "${compiler_lib_search_path_CXX}"; then + compiler_lib_search_dirs_CXX=`echo " ${compiler_lib_search_path_CXX}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + lt_prog_compiler_wl_CXX= +lt_prog_compiler_pic_CXX= +lt_prog_compiler_static_CXX= + + + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + lt_prog_compiler_pic_CXX='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + lt_prog_compiler_pic_CXX='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + lt_prog_compiler_pic_CXX='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + lt_prog_compiler_pic_CXX= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + lt_prog_compiler_static_CXX= + ;; + interix[3-9]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + lt_prog_compiler_pic_CXX=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + *) + lt_prog_compiler_pic_CXX='-fPIC' + ;; + esac + else + case $host_os in + aix[4-9]*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + lt_prog_compiler_static_CXX='-Bstatic' + else + lt_prog_compiler_static_CXX='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, CXX)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + lt_prog_compiler_pic_CXX='-DDLL_EXPORT' + ;; + dgux*) + case $cc_basename in + ec++*) + lt_prog_compiler_pic_CXX='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + lt_prog_compiler_pic_CXX='+Z' + fi + ;; + aCC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + lt_prog_compiler_pic_CXX='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_static_CXX='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # KAI C++ Compiler + lt_prog_compiler_wl_CXX='--backend -Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64 which still supported -KPIC. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fPIC' + lt_prog_compiler_static_CXX='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-fpic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + xlc* | xlC* | bgxl[cC]* | mpixl[cC]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-qpic' + lt_prog_compiler_static_CXX='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + lt_prog_compiler_pic_CXX='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd* | netbsdelf*-gnu) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + lt_prog_compiler_pic_CXX='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + lt_prog_compiler_wl_CXX='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + lt_prog_compiler_pic_CXX='-pic' + ;; + cxx*) + # Digital/Compaq C++ + lt_prog_compiler_wl_CXX='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + lt_prog_compiler_pic_CXX= + lt_prog_compiler_static_CXX='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + lt_prog_compiler_wl_CXX='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + lt_prog_compiler_pic_CXX='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + lt_prog_compiler_pic_CXX='-pic' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + lcc*) + # Lucid + lt_prog_compiler_pic_CXX='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + lt_prog_compiler_wl_CXX='-Wl,' + lt_prog_compiler_pic_CXX='-KPIC' + lt_prog_compiler_static_CXX='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + lt_prog_compiler_pic_CXX='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + lt_prog_compiler_can_build_shared_CXX=no + ;; + esac + fi + +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + lt_prog_compiler_pic_CXX= + ;; + *) + lt_prog_compiler_pic_CXX="$lt_prog_compiler_pic_CXX -DPIC" + ;; +esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $compiler option to produce PIC" >&5 +$as_echo_n "checking for $compiler option to produce PIC... " >&6; } +if ${lt_cv_prog_compiler_pic_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_CXX=$lt_prog_compiler_pic_CXX +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_pic_CXX" >&6; } +lt_prog_compiler_pic_CXX=$lt_cv_prog_compiler_pic_CXX + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$lt_prog_compiler_pic_CXX"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works" >&5 +$as_echo_n "checking if $compiler PIC flag $lt_prog_compiler_pic_CXX works... " >&6; } +if ${lt_cv_prog_compiler_pic_works_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_pic_works_CXX=no + ac_outfile=conftest.$ac_objext + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$lt_prog_compiler_pic_CXX -DPIC" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_pic_works_CXX=yes + fi + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_pic_works_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_pic_works_CXX" >&6; } + +if test x"$lt_cv_prog_compiler_pic_works_CXX" = xyes; then + case $lt_prog_compiler_pic_CXX in + "" | " "*) ;; + *) lt_prog_compiler_pic_CXX=" $lt_prog_compiler_pic_CXX" ;; + esac +else + lt_prog_compiler_pic_CXX= + lt_prog_compiler_can_build_shared_CXX=no +fi + +fi + + + + + +# +# Check to make sure the static flag actually works. +# +wl=$lt_prog_compiler_wl_CXX eval lt_tmp_static_flag=\"$lt_prog_compiler_static_CXX\" +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler static flag $lt_tmp_static_flag works" >&5 +$as_echo_n "checking if $compiler static flag $lt_tmp_static_flag works... " >&6; } +if ${lt_cv_prog_compiler_static_works_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_static_works_CXX=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $lt_tmp_static_flag" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&5 + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + lt_cv_prog_compiler_static_works_CXX=yes + fi + else + lt_cv_prog_compiler_static_works_CXX=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_static_works_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_static_works_CXX" >&6; } + +if test x"$lt_cv_prog_compiler_static_works_CXX" = xyes; then + : +else + lt_prog_compiler_static_CXX= +fi + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if $compiler supports -c -o file.$ac_objext" >&5 +$as_echo_n "checking if $compiler supports -c -o file.$ac_objext... " >&6; } +if ${lt_cv_prog_compiler_c_o_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_prog_compiler_c_o_CXX=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [^ ]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&5) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&5 + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + lt_cv_prog_compiler_c_o_CXX=yes + fi + fi + chmod u+w . 2>&5 + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_prog_compiler_c_o_CXX" >&5 +$as_echo "$lt_cv_prog_compiler_c_o_CXX" >&6; } + + + + +hard_links="nottested" +if test "$lt_cv_prog_compiler_c_o_CXX" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if we can lock with hard links" >&5 +$as_echo_n "checking if we can lock with hard links... " >&6; } + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $hard_links" >&5 +$as_echo "$hard_links" >&6; } + if test "$hard_links" = no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&5 +$as_echo "$as_me: WARNING: \`$CC' does not support \`-c -o', so \`make -j' may be unsafe" >&2;} + need_locks=warn + fi +else + need_locks=no +fi + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether the $compiler linker ($LD) supports shared libraries" >&5 +$as_echo_n "checking whether the $compiler linker ($LD) supports shared libraries... " >&6; } + + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*' + case $host_os in + aix[4-9]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global defined + # symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + export_symbols_cmds_CXX='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + export_symbols_cmds_CXX='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && (substr(\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + export_symbols_cmds_CXX="$ltdll_cmds" + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) + exclude_expsyms_CXX='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[BCDGRS][ ]/s/.*[ ]\([^ ]*\)/\1 DATA/;s/^.*[ ]__nm__\([^ ]*\)[ ][^ ]*/\1 DATA/;/^I[ ]/d;/^[AITW][ ]/s/.* //'\'' | sort | uniq > $export_symbols' + exclude_expsyms_CXX='[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname' + ;; + esac + ;; + linux* | k*bsd*-gnu | gnu*) + link_all_deplibs_CXX=no + ;; + *) + export_symbols_cmds_CXX='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac + +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $ld_shlibs_CXX" >&5 +$as_echo "$ld_shlibs_CXX" >&6; } +test "$ld_shlibs_CXX" = no && can_build_shared=no + +with_gnu_ld_CXX=$with_gnu_ld + + + + + + +# +# Do we need to explicitly link libc? +# +case "x$archive_cmds_need_lc_CXX" in +x|xyes) + # Assume -lc should be added + archive_cmds_need_lc_CXX=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $archive_cmds_CXX in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc should be explicitly linked in" >&5 +$as_echo_n "checking whether -lc should be explicitly linked in... " >&6; } +if ${lt_cv_archive_cmds_need_lc_CXX+:} false; then : + $as_echo_n "(cached) " >&6 +else + $RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$ac_compile\""; } >&5 + (eval $ac_compile) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$lt_prog_compiler_wl_CXX + pic_flag=$lt_prog_compiler_pic_CXX + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$allow_undefined_flag_CXX + allow_undefined_flag_CXX= + if { { eval echo "\"\$as_me\":${as_lineno-$LINENO}: \"$archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1\""; } >&5 + (eval $archive_cmds_CXX 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) 2>&5 + ac_status=$? + $as_echo "$as_me:${as_lineno-$LINENO}: \$? = $ac_status" >&5 + test $ac_status = 0; } + then + lt_cv_archive_cmds_need_lc_CXX=no + else + lt_cv_archive_cmds_need_lc_CXX=yes + fi + allow_undefined_flag_CXX=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $lt_cv_archive_cmds_need_lc_CXX" >&5 +$as_echo "$lt_cv_archive_cmds_need_lc_CXX" >&6; } + archive_cmds_need_lc_CXX=$lt_cv_archive_cmds_need_lc_CXX + ;; + esac + fi + ;; +esac + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking dynamic linker characteristics" >&5 +$as_echo_n "checking dynamic linker characteristics... " >&6; } + +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[4-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[01] | aix4.[01].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([^/]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[45]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([a-zA-Z]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | $GREP ';[c-zC-Z]:/' >/dev/null; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[.]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' + + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[23].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[01]* | freebsdelf3.[01]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[2-9]* | freebsdelf3.[2-9]* | \ + freebsd4.[0-5] | freebsdelf4.[0-5] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[3-9]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + if ${lt_cv_shlibpath_overrides_runpath+:} false; then : + $as_echo_n "(cached) " >&6 +else + lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$lt_prog_compiler_wl_CXX\"; \ + LDFLAGS=\"\$LDFLAGS $hardcode_libdir_flag_spec_CXX\"" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_cxx_try_link "$LINENO"; then : + if ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null; then : + lt_cv_shlibpath_overrides_runpath=yes +fi +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + +fi + + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \$2)); skip = 1; } { if (!skip) print \$0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[89] | openbsd2.[89].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $dynamic_linker" >&5 +$as_echo "$dynamic_linker" >&6; } +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking how to hardcode library paths into programs" >&5 +$as_echo_n "checking how to hardcode library paths into programs... " >&6; } +hardcode_action_CXX= +if test -n "$hardcode_libdir_flag_spec_CXX" || + test -n "$runpath_var_CXX" || + test "X$hardcode_automatic_CXX" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$hardcode_direct_CXX" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, CXX)" != no && + test "$hardcode_minus_L_CXX" != no; then + # Linking always hardcodes the temporary library directory. + hardcode_action_CXX=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + hardcode_action_CXX=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + hardcode_action_CXX=unsupported +fi +{ $as_echo "$as_me:${as_lineno-$LINENO}: result: $hardcode_action_CXX" >&5 +$as_echo "$hardcode_action_CXX" >&6; } + +if test "$hardcode_action_CXX" = relink || + test "$inherit_rpath_CXX" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi + + + + + + + + fi # test -n "$compiler" + + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test "$_lt_caught_CXX_error" != yes + +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + + + + + + + + + + + + + + ac_config_commands="$ac_config_commands libtool" + + + + +# Only expand once: + + + +# TODO(chandlerc@google.com): Currently we aren't running the Python tests +# against the interpreter detected by AM_PATH_PYTHON, and so we condition +# HAVE_PYTHON by requiring "python" to be in the PATH, and that interpreter's +# version to be >= 2.3. This will allow the scripts to use a "/usr/bin/env" +# hashbang. +PYTHON= # We *do not* allow the user to specify a python interpreter +# Extract the first word of "python", so it can be a program name with args. +set dummy python; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_path_PYTHON+:} false; then : + $as_echo_n "(cached) " >&6 +else + case $PYTHON in + [\\/]* | ?:[\\/]*) + ac_cv_path_PYTHON="$PYTHON" # Let the user override the test with a path. + ;; + *) + as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_path_PYTHON="$as_dir/$ac_word$ac_exec_ext" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_path_PYTHON" && ac_cv_path_PYTHON=":" + ;; +esac +fi +PYTHON=$ac_cv_path_PYTHON +if test -n "$PYTHON"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PYTHON" >&5 +$as_echo "$PYTHON" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + +if test "$PYTHON" != ":"; then : + prog="import sys +# split strings by '.' and convert to numeric. Append some zeros +# because we need at least 4 digits for the hex conversion. +# map returns an iterator in Python 3.0 and a list in 2.x +minver = list(map(int, '2.3'.split('.'))) + [0, 0, 0] +minverhex = 0 +# xrange is not present in Python 3.0 and range returns an iterator +for i in list(range(0, 4)): minverhex = (minverhex << 8) + minver[i] +sys.exit(sys.hexversion < minverhex)" + if { echo "$as_me:$LINENO: $PYTHON -c "$prog"" >&5 + ($PYTHON -c "$prog") >&5 2>&5 + ac_status=$? + echo "$as_me:$LINENO: \$? = $ac_status" >&5 + (exit $ac_status); }; then : + : +else + PYTHON=":" +fi +fi + if test "$PYTHON" != ":"; then + HAVE_PYTHON_TRUE= + HAVE_PYTHON_FALSE='#' +else + HAVE_PYTHON_TRUE='#' + HAVE_PYTHON_FALSE= +fi + + +# Configure pthreads. + +# Check whether --with-pthreads was given. +if test "${with_pthreads+set}" = set; then : + withval=$with_pthreads; with_pthreads=$withval +else + with_pthreads=check +fi + + +have_pthreads=no +if test "x$with_pthreads" != "xno"; then : + + + +ac_ext=c +ac_cpp='$CPP $CPPFLAGS' +ac_compile='$CC -c $CFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CC -o conftest$ac_exeext $CFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_c_compiler_gnu + +acx_pthread_ok=no + +# We used to check for pthread.h first, but this fails if pthread.h +# requires special compiler flags (e.g. on True64 or Sequent). +# It gets checked for in the link test anyway. + +# First of all, check if the user has set any of the PTHREAD_LIBS, +# etcetera environment variables, and if threads linking works using +# them: +if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS" >&5 +$as_echo_n "checking for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +/* Override any GCC internal prototype to avoid an error. + Use char because int might match the return type of a GCC + builtin and then its argument prototype would still apply. */ +#ifdef __cplusplus +extern "C" +#endif +char pthread_join (); +int +main () +{ +return pthread_join (); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + acx_pthread_ok=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_pthread_ok" >&5 +$as_echo "$acx_pthread_ok" >&6; } + if test x"$acx_pthread_ok" = xno; then + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" + fi + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" +fi + +# We must check for the threads library under a number of different +# names; the ordering is very important because some systems +# (e.g. DEC) have both -lpthread and -lpthreads, where one of the +# libraries is broken (non-POSIX). + +# Create a list of thread flags to try. Items starting with a "-" are +# C compiler flags, and other items are library names, except for "none" +# which indicates that we try without any flags at all, and "pthread-config" +# which is a program returning the flags for the Pth emulation library. + +acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" + +# The ordering *is* (sometimes) important. Some notes on the +# individual items follow: + +# pthreads: AIX (must check this before -lpthread) +# none: in case threads are in libc; should be tried before -Kthread and +# other compiler flags to prevent continual compiler warnings +# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) +# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) +# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) +# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) +# -pthreads: Solaris/gcc +# -mthreads: Mingw32/gcc, Lynx/gcc +# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it +# doesn't hurt to check since this sometimes defines pthreads too; +# also defines -D_REENTRANT) +# ... -mt is also the pthreads flag for HP/aCC +# pthread: Linux, etcetera +# --thread-safe: KAI C++ +# pthread-config: use pthread-config program (for GNU Pth library) + +case "${host_cpu}-${host_os}" in + *solaris*) + + # On Solaris (at least, for some versions), libc contains stubbed + # (non-functional) versions of the pthreads routines, so link-based + # tests will erroneously succeed. (We need to link with -pthreads/-mt/ + # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather + # a function called by this macro, so we could check for that, but + # who knows whether they'll stub that too in a future libc.) So, + # we'll just look for -pthreads and -lpthread first: + + acx_pthread_flags="-pthreads pthread -mt -pthread $acx_pthread_flags" + ;; +esac + +if test x"$acx_pthread_ok" = xno; then +for flag in $acx_pthread_flags; do + + case $flag in + none) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work without any flags" >&5 +$as_echo_n "checking whether pthreads work without any flags... " >&6; } + ;; + + -*) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether pthreads work with $flag" >&5 +$as_echo_n "checking whether pthreads work with $flag... " >&6; } + PTHREAD_CFLAGS="$flag" + ;; + + pthread-config) + # Extract the first word of "pthread-config", so it can be a program name with args. +set dummy pthread-config; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_acx_pthread_config+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$acx_pthread_config"; then + ac_cv_prog_acx_pthread_config="$acx_pthread_config" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_acx_pthread_config="yes" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + + test -z "$ac_cv_prog_acx_pthread_config" && ac_cv_prog_acx_pthread_config="no" +fi +fi +acx_pthread_config=$ac_cv_prog_acx_pthread_config +if test -n "$acx_pthread_config"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_pthread_config" >&5 +$as_echo "$acx_pthread_config" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + if test x"$acx_pthread_config" = xno; then continue; fi + PTHREAD_CFLAGS="`pthread-config --cflags`" + PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" + ;; + + *) + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for the pthreads library -l$flag" >&5 +$as_echo_n "checking for the pthreads library -l$flag... " >&6; } + PTHREAD_LIBS="-l$flag" + ;; + esac + + save_LIBS="$LIBS" + save_CFLAGS="$CFLAGS" + LIBS="$PTHREAD_LIBS $LIBS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + + # Check for various functions. We must include pthread.h, + # since some functions may be macros. (On the Sequent, we + # need a special flag -Kthread to make this header compile.) + # We check for pthread_join because it is in -lpthread on IRIX + # while pthread_create is in libc. We check for pthread_attr_init + # due to DEC craziness with -lpthreads. We check for + # pthread_cleanup_push because it is one of the few pthread + # functions on Solaris that doesn't have a non-functional libc stub. + # We try pthread_create on general principles. + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + acx_pthread_ok=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $acx_pthread_ok" >&5 +$as_echo "$acx_pthread_ok" >&6; } + if test "x$acx_pthread_ok" = xyes; then + break; + fi + + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" +done +fi + +# Various other checks: +if test "x$acx_pthread_ok" = xyes; then + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + + # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. + { $as_echo "$as_me:${as_lineno-$LINENO}: checking for joinable pthread attribute" >&5 +$as_echo_n "checking for joinable pthread attribute... " >&6; } + attr_name=unknown + for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +int attr=$attr; return attr; + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + attr_name=$attr; break +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + done + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $attr_name" >&5 +$as_echo "$attr_name" >&6; } + if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then + +cat >>confdefs.h <<_ACEOF +#define PTHREAD_CREATE_JOINABLE $attr_name +_ACEOF + + fi + + { $as_echo "$as_me:${as_lineno-$LINENO}: checking if more special flags are required for pthreads" >&5 +$as_echo_n "checking if more special flags are required for pthreads... " >&6; } + flag=no + case "${host_cpu}-${host_os}" in + *-aix* | *-freebsd* | *-darwin*) flag="-D_THREAD_SAFE";; + *solaris* | *-osf* | *-hpux*) flag="-D_REENTRANT";; + esac + { $as_echo "$as_me:${as_lineno-$LINENO}: result: ${flag}" >&5 +$as_echo "${flag}" >&6; } + if test "x$flag" != xno; then + PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" + fi + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + # More AIX lossage: must compile with xlc_r or cc_r + if test x"$GCC" != xyes; then + for ac_prog in xlc_r cc_r +do + # Extract the first word of "$ac_prog", so it can be a program name with args. +set dummy $ac_prog; ac_word=$2 +{ $as_echo "$as_me:${as_lineno-$LINENO}: checking for $ac_word" >&5 +$as_echo_n "checking for $ac_word... " >&6; } +if ${ac_cv_prog_PTHREAD_CC+:} false; then : + $as_echo_n "(cached) " >&6 +else + if test -n "$PTHREAD_CC"; then + ac_cv_prog_PTHREAD_CC="$PTHREAD_CC" # Let the user override the test. +else +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for ac_exec_ext in '' $ac_executable_extensions; do + if { test -f "$as_dir/$ac_word$ac_exec_ext" && $as_test_x "$as_dir/$ac_word$ac_exec_ext"; }; then + ac_cv_prog_PTHREAD_CC="$ac_prog" + $as_echo "$as_me:${as_lineno-$LINENO}: found $as_dir/$ac_word$ac_exec_ext" >&5 + break 2 + fi +done + done +IFS=$as_save_IFS + +fi +fi +PTHREAD_CC=$ac_cv_prog_PTHREAD_CC +if test -n "$PTHREAD_CC"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: $PTHREAD_CC" >&5 +$as_echo "$PTHREAD_CC" >&6; } +else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } +fi + + + test -n "$PTHREAD_CC" && break +done +test -n "$PTHREAD_CC" || PTHREAD_CC="${CC}" + + else + PTHREAD_CC=$CC + fi + + # The next part tries to detect GCC inconsistency with -shared on some + # architectures and systems. The problem is that in certain + # configurations, when -shared is specified, GCC "forgets" to + # internally use various flags which are still necessary. + + # + # Prepare the flags + # + save_CFLAGS="$CFLAGS" + save_LIBS="$LIBS" + save_CC="$CC" + + # Try with the flags determined by the earlier checks. + # + # -Wl,-z,defs forces link-time symbol resolution, so that the + # linking checks with -shared actually have any value + # + # FIXME: -fPIC is required for -shared on many architectures, + # so we specify it here, but the right way would probably be to + # properly detect whether it is actually required. + CFLAGS="-shared -fPIC -Wl,-z,defs $CFLAGS $PTHREAD_CFLAGS" + LIBS="$PTHREAD_LIBS $LIBS" + CC="$PTHREAD_CC" + + # In order not to create several levels of indentation, we test + # the value of "$done" until we find the cure or run out of ideas. + done="no" + + # First, make sure the CFLAGS we added are actually accepted by our + # compiler. If not (and OS X's ld, for instance, does not accept -z), + # then we can't do this test. + if test x"$done" = xno; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether to check for GCC pthread/shared inconsistencies" >&5 +$as_echo_n "checking whether to check for GCC pthread/shared inconsistencies... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ + +int +main () +{ + + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + +else + done=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + if test "x$done" = xyes ; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + fi + fi + + if test x"$done" = xno; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -pthread is sufficient with -shared" >&5 +$as_echo_n "checking whether -pthread is sufficient with -shared... " >&6; } + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + done=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + if test "x$done" = xyes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + fi + + # + # Linux gcc on some architectures such as mips/mipsel forgets + # about -lpthread + # + if test x"$done" = xno; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lpthread fixes that" >&5 +$as_echo_n "checking whether -lpthread fixes that... " >&6; } + LIBS="-lpthread $PTHREAD_LIBS $save_LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + done=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + if test "x$done" = xyes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + PTHREAD_LIBS="-lpthread $PTHREAD_LIBS" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + fi + # + # FreeBSD 4.10 gcc forgets to use -lc_r instead of -lc + # + if test x"$done" = xno; then + { $as_echo "$as_me:${as_lineno-$LINENO}: checking whether -lc_r fixes that" >&5 +$as_echo_n "checking whether -lc_r fixes that... " >&6; } + LIBS="-lc_r $PTHREAD_LIBS $save_LIBS" + cat confdefs.h - <<_ACEOF >conftest.$ac_ext +/* end confdefs.h. */ +#include +int +main () +{ +pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); + ; + return 0; +} +_ACEOF +if ac_fn_c_try_link "$LINENO"; then : + done=yes +fi +rm -f core conftest.err conftest.$ac_objext \ + conftest$ac_exeext conftest.$ac_ext + + if test "x$done" = xyes; then + { $as_echo "$as_me:${as_lineno-$LINENO}: result: yes" >&5 +$as_echo "yes" >&6; } + PTHREAD_LIBS="-lc_r $PTHREAD_LIBS" + else + { $as_echo "$as_me:${as_lineno-$LINENO}: result: no" >&5 +$as_echo "no" >&6; } + fi + fi + if test x"$done" = xno; then + # OK, we have run out of ideas + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Impossible to determine how to use pthreads with shared libraries" >&5 +$as_echo "$as_me: WARNING: Impossible to determine how to use pthreads with shared libraries" >&2;} + + # so it's not safe to assume that we may use pthreads + acx_pthread_ok=no + fi + + CFLAGS="$save_CFLAGS" + LIBS="$save_LIBS" + CC="$save_CC" +else + PTHREAD_CC="$CC" +fi + + + + + +# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: +if test x"$acx_pthread_ok" = xyes; then + +$as_echo "#define HAVE_PTHREAD 1" >>confdefs.h + + : +else + acx_pthread_ok=no + if test "x$with_pthreads" != "xcheck"; then : + { { $as_echo "$as_me:${as_lineno-$LINENO}: error: in \`$ac_pwd':" >&5 +$as_echo "$as_me: error: in \`$ac_pwd':" >&2;} +as_fn_error $? "--with-pthreads was specified, but unable to be used +See \`config.log' for more details" "$LINENO" 5; } +fi +fi +ac_ext=cpp +ac_cpp='$CXXCPP $CPPFLAGS' +ac_compile='$CXX -c $CXXFLAGS $CPPFLAGS conftest.$ac_ext >&5' +ac_link='$CXX -o conftest$ac_exeext $CXXFLAGS $CPPFLAGS $LDFLAGS conftest.$ac_ext $LIBS >&5' +ac_compiler_gnu=$ac_cv_cxx_compiler_gnu + + + have_pthreads="$acx_pthread_ok" +fi + if test "x$have_pthreads" = "xyes"; then + HAVE_PTHREADS_TRUE= + HAVE_PTHREADS_FALSE='#' +else + HAVE_PTHREADS_TRUE='#' + HAVE_PTHREADS_FALSE= +fi + + + + +# TODO(chandlerc@google.com) Check for the necessary system headers. + +# TODO(chandlerc@google.com) Check the types, structures, and other compiler +# and architecture characteristics. + +# Output the generated files. No further autoconf macros may be used. +cat >confcache <<\_ACEOF +# This file is a shell script that caches the results of configure +# tests run on this system so they can be shared between configure +# scripts and configure runs, see configure's option --config-cache. +# It is not useful on other systems. If it contains results you don't +# want to keep, you may remove or edit it. +# +# config.status only pays attention to the cache file if you give it +# the --recheck option to rerun configure. +# +# `ac_cv_env_foo' variables (set or unset) will be overridden when +# loading this file, other *unset* `ac_cv_foo' will be assigned the +# following values. + +_ACEOF + +# The following way of writing the cache mishandles newlines in values, +# but we know of no workaround that is simple, portable, and efficient. +# So, we kill variables containing newlines. +# Ultrix sh set writes to stderr and can't be redirected directly, +# and sets the high bit in the cache file unless we assign to the vars. +( + for ac_var in `(set) 2>&1 | sed -n 's/^\([a-zA-Z_][a-zA-Z0-9_]*\)=.*/\1/p'`; do + eval ac_val=\$$ac_var + case $ac_val in #( + *${as_nl}*) + case $ac_var in #( + *_cv_*) { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: cache variable $ac_var contains a newline" >&5 +$as_echo "$as_me: WARNING: cache variable $ac_var contains a newline" >&2;} ;; + esac + case $ac_var in #( + _ | IFS | as_nl) ;; #( + BASH_ARGV | BASH_SOURCE) eval $ac_var= ;; #( + *) { eval $ac_var=; unset $ac_var;} ;; + esac ;; + esac + done + + (set) 2>&1 | + case $as_nl`(ac_space=' '; set) 2>&1` in #( + *${as_nl}ac_space=\ *) + # `set' does not quote correctly, so add quotes: double-quote + # substitution turns \\\\ into \\, and sed turns \\ into \. + sed -n \ + "s/'/'\\\\''/g; + s/^\\([_$as_cr_alnum]*_cv_[_$as_cr_alnum]*\\)=\\(.*\\)/\\1='\\2'/p" + ;; #( + *) + # `set' quotes correctly as required by POSIX, so do not add quotes. + sed -n "/^[_$as_cr_alnum]*_cv_[_$as_cr_alnum]*=/p" + ;; + esac | + sort +) | + sed ' + /^ac_cv_env_/b end + t clear + :clear + s/^\([^=]*\)=\(.*[{}].*\)$/test "${\1+set}" = set || &/ + t end + s/^\([^=]*\)=\(.*\)$/\1=${\1=\2}/ + :end' >>confcache +if diff "$cache_file" confcache >/dev/null 2>&1; then :; else + if test -w "$cache_file"; then + if test "x$cache_file" != "x/dev/null"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: updating cache $cache_file" >&5 +$as_echo "$as_me: updating cache $cache_file" >&6;} + if test ! -f "$cache_file" || test -h "$cache_file"; then + cat confcache >"$cache_file" + else + case $cache_file in #( + */* | ?:*) + mv -f confcache "$cache_file"$$ && + mv -f "$cache_file"$$ "$cache_file" ;; #( + *) + mv -f confcache "$cache_file" ;; + esac + fi + fi + else + { $as_echo "$as_me:${as_lineno-$LINENO}: not updating unwritable cache $cache_file" >&5 +$as_echo "$as_me: not updating unwritable cache $cache_file" >&6;} + fi +fi +rm -f confcache + +test "x$prefix" = xNONE && prefix=$ac_default_prefix +# Let make expand exec_prefix. +test "x$exec_prefix" = xNONE && exec_prefix='${prefix}' + +DEFS=-DHAVE_CONFIG_H + +ac_libobjs= +ac_ltlibobjs= +U= +for ac_i in : $LIBOBJS; do test "x$ac_i" = x: && continue + # 1. Remove the extension, and $U if already installed. + ac_script='s/\$U\././;s/\.o$//;s/\.obj$//' + ac_i=`$as_echo "$ac_i" | sed "$ac_script"` + # 2. Prepend LIBOBJDIR. When used with automake>=1.10 LIBOBJDIR + # will be set to the directory where LIBOBJS objects are built. + as_fn_append ac_libobjs " \${LIBOBJDIR}$ac_i\$U.$ac_objext" + as_fn_append ac_ltlibobjs " \${LIBOBJDIR}$ac_i"'$U.lo' +done +LIBOBJS=$ac_libobjs + +LTLIBOBJS=$ac_ltlibobjs + + + if test -n "$EXEEXT"; then + am__EXEEXT_TRUE= + am__EXEEXT_FALSE='#' +else + am__EXEEXT_TRUE='#' + am__EXEEXT_FALSE= +fi + +if test -z "${AMDEP_TRUE}" && test -z "${AMDEP_FALSE}"; then + as_fn_error $? "conditional \"AMDEP\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCC_TRUE}" && test -z "${am__fastdepCC_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCC\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${am__fastdepCXX_TRUE}" && test -z "${am__fastdepCXX_FALSE}"; then + as_fn_error $? "conditional \"am__fastdepCXX\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HAVE_PYTHON_TRUE}" && test -z "${HAVE_PYTHON_FALSE}"; then + as_fn_error $? "conditional \"HAVE_PYTHON\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi +if test -z "${HAVE_PTHREADS_TRUE}" && test -z "${HAVE_PTHREADS_FALSE}"; then + as_fn_error $? "conditional \"HAVE_PTHREADS\" was never defined. +Usually this means the macro was only invoked conditionally." "$LINENO" 5 +fi + +: "${CONFIG_STATUS=./config.status}" +ac_write_fail=0 +ac_clean_files_save=$ac_clean_files +ac_clean_files="$ac_clean_files $CONFIG_STATUS" +{ $as_echo "$as_me:${as_lineno-$LINENO}: creating $CONFIG_STATUS" >&5 +$as_echo "$as_me: creating $CONFIG_STATUS" >&6;} +as_write_fail=0 +cat >$CONFIG_STATUS <<_ASEOF || as_write_fail=1 +#! $SHELL +# Generated by $as_me. +# Run this file to recreate the current configuration. +# Compiler output produced by configure, useful for debugging +# configure, is in config.log if it exists. + +debug=false +ac_cs_recheck=false +ac_cs_silent=false + +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$CONFIG_STATUS <<\_ASEOF || as_write_fail=1 +## -------------------- ## +## M4sh Initialization. ## +## -------------------- ## + +# Be more Bourne compatible +DUALCASE=1; export DUALCASE # for MKS sh +if test -n "${ZSH_VERSION+set}" && (emulate sh) >/dev/null 2>&1; then : + emulate sh + NULLCMD=: + # Pre-4.2 versions of Zsh do word splitting on ${1+"$@"}, which + # is contrary to our usage. Disable this feature. + alias -g '${1+"$@"}'='"$@"' + setopt NO_GLOB_SUBST +else + case `(set -o) 2>/dev/null` in #( + *posix*) : + set -o posix ;; #( + *) : + ;; +esac +fi + + +as_nl=' +' +export as_nl +# Printing a long string crashes Solaris 7 /usr/bin/printf. +as_echo='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo +as_echo=$as_echo$as_echo$as_echo$as_echo$as_echo$as_echo +# Prefer a ksh shell builtin over an external printf program on Solaris, +# but without wasting forks for bash or zsh. +if test -z "$BASH_VERSION$ZSH_VERSION" \ + && (test "X`print -r -- $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='print -r --' + as_echo_n='print -rn --' +elif (test "X`printf %s $as_echo`" = "X$as_echo") 2>/dev/null; then + as_echo='printf %s\n' + as_echo_n='printf %s' +else + if test "X`(/usr/ucb/echo -n -n $as_echo) 2>/dev/null`" = "X-n $as_echo"; then + as_echo_body='eval /usr/ucb/echo -n "$1$as_nl"' + as_echo_n='/usr/ucb/echo -n' + else + as_echo_body='eval expr "X$1" : "X\\(.*\\)"' + as_echo_n_body='eval + arg=$1; + case $arg in #( + *"$as_nl"*) + expr "X$arg" : "X\\(.*\\)$as_nl"; + arg=`expr "X$arg" : ".*$as_nl\\(.*\\)"`;; + esac; + expr "X$arg" : "X\\(.*\\)" | tr -d "$as_nl" + ' + export as_echo_n_body + as_echo_n='sh -c $as_echo_n_body as_echo' + fi + export as_echo_body + as_echo='sh -c $as_echo_body as_echo' +fi + +# The user is always right. +if test "${PATH_SEPARATOR+set}" != set; then + PATH_SEPARATOR=: + (PATH='/bin;/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 && { + (PATH='/bin:/bin'; FPATH=$PATH; sh -c :) >/dev/null 2>&1 || + PATH_SEPARATOR=';' + } +fi + + +# IFS +# We need space, tab and new line, in precisely that order. Quoting is +# there to prevent editors from complaining about space-tab. +# (If _AS_PATH_WALK were called with IFS unset, it would disable word +# splitting by setting IFS to empty value.) +IFS=" "" $as_nl" + +# Find who we are. Look in the path if we contain no directory separator. +as_myself= +case $0 in #(( + *[\\/]* ) as_myself=$0 ;; + *) as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + test -r "$as_dir/$0" && as_myself=$as_dir/$0 && break + done +IFS=$as_save_IFS + + ;; +esac +# We did not find ourselves, most probably we were run as `sh COMMAND' +# in which case we are not to be found in the path. +if test "x$as_myself" = x; then + as_myself=$0 +fi +if test ! -f "$as_myself"; then + $as_echo "$as_myself: error: cannot find myself; rerun with an absolute file name" >&2 + exit 1 +fi + +# Unset variables that we do not need and which cause bugs (e.g. in +# pre-3.0 UWIN ksh). But do not cause bugs in bash 2.01; the "|| exit 1" +# suppresses any "Segmentation fault" message there. '((' could +# trigger a bug in pdksh 5.2.14. +for as_var in BASH_ENV ENV MAIL MAILPATH +do eval test x\${$as_var+set} = xset \ + && ( (unset $as_var) || exit 1) >/dev/null 2>&1 && unset $as_var || : +done +PS1='$ ' +PS2='> ' +PS4='+ ' + +# NLS nuisances. +LC_ALL=C +export LC_ALL +LANGUAGE=C +export LANGUAGE + +# CDPATH. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + + +# as_fn_error STATUS ERROR [LINENO LOG_FD] +# ---------------------------------------- +# Output "`basename $0`: error: ERROR" to stderr. If LINENO and LOG_FD are +# provided, also output the error to LOG_FD, referencing LINENO. Then exit the +# script with STATUS, using 1 if that was 0. +as_fn_error () +{ + as_status=$1; test $as_status -eq 0 && as_status=1 + if test "$4"; then + as_lineno=${as_lineno-"$3"} as_lineno_stack=as_lineno_stack=$as_lineno_stack + $as_echo "$as_me:${as_lineno-$LINENO}: error: $2" >&$4 + fi + $as_echo "$as_me: error: $2" >&2 + as_fn_exit $as_status +} # as_fn_error + + +# as_fn_set_status STATUS +# ----------------------- +# Set $? to STATUS, without forking. +as_fn_set_status () +{ + return $1 +} # as_fn_set_status + +# as_fn_exit STATUS +# ----------------- +# Exit the shell with STATUS, even in a "trap 0" or "set -e" context. +as_fn_exit () +{ + set +e + as_fn_set_status $1 + exit $1 +} # as_fn_exit + +# as_fn_unset VAR +# --------------- +# Portably unset VAR. +as_fn_unset () +{ + { eval $1=; unset $1;} +} +as_unset=as_fn_unset +# as_fn_append VAR VALUE +# ---------------------- +# Append the text in VALUE to the end of the definition contained in VAR. Take +# advantage of any shell optimizations that allow amortized linear growth over +# repeated appends, instead of the typical quadratic growth present in naive +# implementations. +if (eval "as_var=1; as_var+=2; test x\$as_var = x12") 2>/dev/null; then : + eval 'as_fn_append () + { + eval $1+=\$2 + }' +else + as_fn_append () + { + eval $1=\$$1\$2 + } +fi # as_fn_append + +# as_fn_arith ARG... +# ------------------ +# Perform arithmetic evaluation on the ARGs, and store the result in the +# global $as_val. Take advantage of shells that can avoid forks. The arguments +# must be portable across $(()) and expr. +if (eval "test \$(( 1 + 1 )) = 2") 2>/dev/null; then : + eval 'as_fn_arith () + { + as_val=$(( $* )) + }' +else + as_fn_arith () + { + as_val=`expr "$@" || test $? -eq 1` + } +fi # as_fn_arith + + +if expr a : '\(a\)' >/dev/null 2>&1 && + test "X`expr 00001 : '.*\(...\)'`" = X001; then + as_expr=expr +else + as_expr=false +fi + +if (basename -- /) >/dev/null 2>&1 && test "X`basename -- / 2>&1`" = "X/"; then + as_basename=basename +else + as_basename=false +fi + +if (as_dir=`dirname -- /` && test "X$as_dir" = X/) >/dev/null 2>&1; then + as_dirname=dirname +else + as_dirname=false +fi + +as_me=`$as_basename -- "$0" || +$as_expr X/"$0" : '.*/\([^/][^/]*\)/*$' \| \ + X"$0" : 'X\(//\)$' \| \ + X"$0" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X/"$0" | + sed '/^.*\/\([^/][^/]*\)\/*$/{ + s//\1/ + q + } + /^X\/\(\/\/\)$/{ + s//\1/ + q + } + /^X\/\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + +# Avoid depending upon Character Ranges. +as_cr_letters='abcdefghijklmnopqrstuvwxyz' +as_cr_LETTERS='ABCDEFGHIJKLMNOPQRSTUVWXYZ' +as_cr_Letters=$as_cr_letters$as_cr_LETTERS +as_cr_digits='0123456789' +as_cr_alnum=$as_cr_Letters$as_cr_digits + +ECHO_C= ECHO_N= ECHO_T= +case `echo -n x` in #((((( +-n*) + case `echo 'xy\c'` in + *c*) ECHO_T=' ';; # ECHO_T is single tab character. + xy) ECHO_C='\c';; + *) echo `echo ksh88 bug on AIX 6.1` > /dev/null + ECHO_T=' ';; + esac;; +*) + ECHO_N='-n';; +esac + +rm -f conf$$ conf$$.exe conf$$.file +if test -d conf$$.dir; then + rm -f conf$$.dir/conf$$.file +else + rm -f conf$$.dir + mkdir conf$$.dir 2>/dev/null +fi +if (echo >conf$$.file) 2>/dev/null; then + if ln -s conf$$.file conf$$ 2>/dev/null; then + as_ln_s='ln -s' + # ... but there are two gotchas: + # 1) On MSYS, both `ln -s file dir' and `ln file dir' fail. + # 2) DJGPP < 2.04 has no symlinks; `ln -s' creates a wrapper executable. + # In both cases, we have to default to `cp -p'. + ln -s conf$$.file conf$$.dir 2>/dev/null && test ! -f conf$$.exe || + as_ln_s='cp -p' + elif ln conf$$.file conf$$ 2>/dev/null; then + as_ln_s=ln + else + as_ln_s='cp -p' + fi +else + as_ln_s='cp -p' +fi +rm -f conf$$ conf$$.exe conf$$.dir/conf$$.file conf$$.file +rmdir conf$$.dir 2>/dev/null + + +# as_fn_mkdir_p +# ------------- +# Create "$as_dir" as a directory, including parents if necessary. +as_fn_mkdir_p () +{ + + case $as_dir in #( + -*) as_dir=./$as_dir;; + esac + test -d "$as_dir" || eval $as_mkdir_p || { + as_dirs= + while :; do + case $as_dir in #( + *\'*) as_qdir=`$as_echo "$as_dir" | sed "s/'/'\\\\\\\\''/g"`;; #'( + *) as_qdir=$as_dir;; + esac + as_dirs="'$as_qdir' $as_dirs" + as_dir=`$as_dirname -- "$as_dir" || +$as_expr X"$as_dir" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$as_dir" : 'X\(//\)[^/]' \| \ + X"$as_dir" : 'X\(//\)$' \| \ + X"$as_dir" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$as_dir" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + test -d "$as_dir" && break + done + test -z "$as_dirs" || eval "mkdir $as_dirs" + } || test -d "$as_dir" || as_fn_error $? "cannot create directory $as_dir" + + +} # as_fn_mkdir_p +if mkdir -p . 2>/dev/null; then + as_mkdir_p='mkdir -p "$as_dir"' +else + test -d ./-p && rmdir ./-p + as_mkdir_p=false +fi + +if test -x / >/dev/null 2>&1; then + as_test_x='test -x' +else + if ls -dL / >/dev/null 2>&1; then + as_ls_L_option=L + else + as_ls_L_option= + fi + as_test_x=' + eval sh -c '\'' + if test -d "$1"; then + test -d "$1/."; + else + case $1 in #( + -*)set "./$1";; + esac; + case `ls -ld'$as_ls_L_option' "$1" 2>/dev/null` in #(( + ???[sx]*):;;*)false;;esac;fi + '\'' sh + ' +fi +as_executable_p=$as_test_x + +# Sed expression to map a string onto a valid CPP name. +as_tr_cpp="eval sed 'y%*$as_cr_letters%P$as_cr_LETTERS%;s%[^_$as_cr_alnum]%_%g'" + +# Sed expression to map a string onto a valid variable name. +as_tr_sh="eval sed 'y%*+%pp%;s%[^_$as_cr_alnum]%_%g'" + + +exec 6>&1 +## ----------------------------------- ## +## Main body of $CONFIG_STATUS script. ## +## ----------------------------------- ## +_ASEOF +test $as_write_fail = 0 && chmod +x $CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# Save the log message, to keep $0 and so on meaningful, and to +# report actual input values of CONFIG_FILES etc. instead of their +# values after options handling. +ac_log=" +This file was extended by Google C++ Testing Framework $as_me 1.7.0, which was +generated by GNU Autoconf 2.68. Invocation command line was + + CONFIG_FILES = $CONFIG_FILES + CONFIG_HEADERS = $CONFIG_HEADERS + CONFIG_LINKS = $CONFIG_LINKS + CONFIG_COMMANDS = $CONFIG_COMMANDS + $ $0 $@ + +on `(hostname || uname -n) 2>/dev/null | sed 1q` +" + +_ACEOF + +case $ac_config_files in *" +"*) set x $ac_config_files; shift; ac_config_files=$*;; +esac + +case $ac_config_headers in *" +"*) set x $ac_config_headers; shift; ac_config_headers=$*;; +esac + + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# Files that config.status was made for. +config_files="$ac_config_files" +config_headers="$ac_config_headers" +config_commands="$ac_config_commands" + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +ac_cs_usage="\ +\`$as_me' instantiates files and other configuration actions +from templates according to the current configuration. Unless the files +and actions are specified as TAGs, all are instantiated by default. + +Usage: $0 [OPTION]... [TAG]... + + -h, --help print this help, then exit + -V, --version print version number and configuration settings, then exit + --config print configuration, then exit + -q, --quiet, --silent + do not print progress messages + -d, --debug don't remove temporary files + --recheck update $as_me by reconfiguring in the same conditions + --file=FILE[:TEMPLATE] + instantiate the configuration file FILE + --header=FILE[:TEMPLATE] + instantiate the configuration header FILE + +Configuration files: +$config_files + +Configuration headers: +$config_headers + +Configuration commands: +$config_commands + +Report bugs to ." + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_cs_config="`$as_echo "$ac_configure_args" | sed 's/^ //; s/[\\""\`\$]/\\\\&/g'`" +ac_cs_version="\\ +Google C++ Testing Framework config.status 1.7.0 +configured by $0, generated by GNU Autoconf 2.68, + with options \\"\$ac_cs_config\\" + +Copyright (C) 2010 Free Software Foundation, Inc. +This config.status script is free software; the Free Software Foundation +gives unlimited permission to copy, distribute and modify it." + +ac_pwd='$ac_pwd' +srcdir='$srcdir' +INSTALL='$INSTALL' +MKDIR_P='$MKDIR_P' +AWK='$AWK' +test -n "\$AWK" || AWK=awk +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# The default lists apply if the user does not specify any file. +ac_need_defaults=: +while test $# != 0 +do + case $1 in + --*=?*) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg=`expr "X$1" : 'X[^=]*=\(.*\)'` + ac_shift=: + ;; + --*=) + ac_option=`expr "X$1" : 'X\([^=]*\)='` + ac_optarg= + ac_shift=: + ;; + *) + ac_option=$1 + ac_optarg=$2 + ac_shift=shift + ;; + esac + + case $ac_option in + # Handling of the options. + -recheck | --recheck | --rechec | --reche | --rech | --rec | --re | --r) + ac_cs_recheck=: ;; + --version | --versio | --versi | --vers | --ver | --ve | --v | -V ) + $as_echo "$ac_cs_version"; exit ;; + --config | --confi | --conf | --con | --co | --c ) + $as_echo "$ac_cs_config"; exit ;; + --debug | --debu | --deb | --de | --d | -d ) + debug=: ;; + --file | --fil | --fi | --f ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + '') as_fn_error $? "missing file argument" ;; + esac + as_fn_append CONFIG_FILES " '$ac_optarg'" + ac_need_defaults=false;; + --header | --heade | --head | --hea ) + $ac_shift + case $ac_optarg in + *\'*) ac_optarg=`$as_echo "$ac_optarg" | sed "s/'/'\\\\\\\\''/g"` ;; + esac + as_fn_append CONFIG_HEADERS " '$ac_optarg'" + ac_need_defaults=false;; + --he | --h) + # Conflict between --help and --header + as_fn_error $? "ambiguous option: \`$1' +Try \`$0 --help' for more information.";; + --help | --hel | -h ) + $as_echo "$ac_cs_usage"; exit ;; + -q | -quiet | --quiet | --quie | --qui | --qu | --q \ + | -silent | --silent | --silen | --sile | --sil | --si | --s) + ac_cs_silent=: ;; + + # This is an error. + -*) as_fn_error $? "unrecognized option: \`$1' +Try \`$0 --help' for more information." ;; + + *) as_fn_append ac_config_targets " $1" + ac_need_defaults=false ;; + + esac + shift +done + +ac_configure_extra_args= + +if $ac_cs_silent; then + exec 6>/dev/null + ac_configure_extra_args="$ac_configure_extra_args --silent" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +if \$ac_cs_recheck; then + set X '$SHELL' '$0' $ac_configure_args \$ac_configure_extra_args --no-create --no-recursion + shift + \$as_echo "running CONFIG_SHELL=$SHELL \$*" >&6 + CONFIG_SHELL='$SHELL' + export CONFIG_SHELL + exec "\$@" +fi + +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +exec 5>>config.log +{ + echo + sed 'h;s/./-/g;s/^.../## /;s/...$/ ##/;p;x;p;x' <<_ASBOX +## Running $as_me. ## +_ASBOX + $as_echo "$ac_log" +} >&5 + +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +# +# INIT-COMMANDS +# +AMDEP_TRUE="$AMDEP_TRUE" ac_aux_dir="$ac_aux_dir" + + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +macro_version='`$ECHO "$macro_version" | $SED "$delay_single_quote_subst"`' +macro_revision='`$ECHO "$macro_revision" | $SED "$delay_single_quote_subst"`' +enable_shared='`$ECHO "$enable_shared" | $SED "$delay_single_quote_subst"`' +enable_static='`$ECHO "$enable_static" | $SED "$delay_single_quote_subst"`' +pic_mode='`$ECHO "$pic_mode" | $SED "$delay_single_quote_subst"`' +enable_fast_install='`$ECHO "$enable_fast_install" | $SED "$delay_single_quote_subst"`' +SHELL='`$ECHO "$SHELL" | $SED "$delay_single_quote_subst"`' +ECHO='`$ECHO "$ECHO" | $SED "$delay_single_quote_subst"`' +PATH_SEPARATOR='`$ECHO "$PATH_SEPARATOR" | $SED "$delay_single_quote_subst"`' +host_alias='`$ECHO "$host_alias" | $SED "$delay_single_quote_subst"`' +host='`$ECHO "$host" | $SED "$delay_single_quote_subst"`' +host_os='`$ECHO "$host_os" | $SED "$delay_single_quote_subst"`' +build_alias='`$ECHO "$build_alias" | $SED "$delay_single_quote_subst"`' +build='`$ECHO "$build" | $SED "$delay_single_quote_subst"`' +build_os='`$ECHO "$build_os" | $SED "$delay_single_quote_subst"`' +SED='`$ECHO "$SED" | $SED "$delay_single_quote_subst"`' +Xsed='`$ECHO "$Xsed" | $SED "$delay_single_quote_subst"`' +GREP='`$ECHO "$GREP" | $SED "$delay_single_quote_subst"`' +EGREP='`$ECHO "$EGREP" | $SED "$delay_single_quote_subst"`' +FGREP='`$ECHO "$FGREP" | $SED "$delay_single_quote_subst"`' +LD='`$ECHO "$LD" | $SED "$delay_single_quote_subst"`' +NM='`$ECHO "$NM" | $SED "$delay_single_quote_subst"`' +LN_S='`$ECHO "$LN_S" | $SED "$delay_single_quote_subst"`' +max_cmd_len='`$ECHO "$max_cmd_len" | $SED "$delay_single_quote_subst"`' +ac_objext='`$ECHO "$ac_objext" | $SED "$delay_single_quote_subst"`' +exeext='`$ECHO "$exeext" | $SED "$delay_single_quote_subst"`' +lt_unset='`$ECHO "$lt_unset" | $SED "$delay_single_quote_subst"`' +lt_SP2NL='`$ECHO "$lt_SP2NL" | $SED "$delay_single_quote_subst"`' +lt_NL2SP='`$ECHO "$lt_NL2SP" | $SED "$delay_single_quote_subst"`' +lt_cv_to_host_file_cmd='`$ECHO "$lt_cv_to_host_file_cmd" | $SED "$delay_single_quote_subst"`' +lt_cv_to_tool_file_cmd='`$ECHO "$lt_cv_to_tool_file_cmd" | $SED "$delay_single_quote_subst"`' +reload_flag='`$ECHO "$reload_flag" | $SED "$delay_single_quote_subst"`' +reload_cmds='`$ECHO "$reload_cmds" | $SED "$delay_single_quote_subst"`' +OBJDUMP='`$ECHO "$OBJDUMP" | $SED "$delay_single_quote_subst"`' +deplibs_check_method='`$ECHO "$deplibs_check_method" | $SED "$delay_single_quote_subst"`' +file_magic_cmd='`$ECHO "$file_magic_cmd" | $SED "$delay_single_quote_subst"`' +file_magic_glob='`$ECHO "$file_magic_glob" | $SED "$delay_single_quote_subst"`' +want_nocaseglob='`$ECHO "$want_nocaseglob" | $SED "$delay_single_quote_subst"`' +DLLTOOL='`$ECHO "$DLLTOOL" | $SED "$delay_single_quote_subst"`' +sharedlib_from_linklib_cmd='`$ECHO "$sharedlib_from_linklib_cmd" | $SED "$delay_single_quote_subst"`' +AR='`$ECHO "$AR" | $SED "$delay_single_quote_subst"`' +AR_FLAGS='`$ECHO "$AR_FLAGS" | $SED "$delay_single_quote_subst"`' +archiver_list_spec='`$ECHO "$archiver_list_spec" | $SED "$delay_single_quote_subst"`' +STRIP='`$ECHO "$STRIP" | $SED "$delay_single_quote_subst"`' +RANLIB='`$ECHO "$RANLIB" | $SED "$delay_single_quote_subst"`' +old_postinstall_cmds='`$ECHO "$old_postinstall_cmds" | $SED "$delay_single_quote_subst"`' +old_postuninstall_cmds='`$ECHO "$old_postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_cmds='`$ECHO "$old_archive_cmds" | $SED "$delay_single_quote_subst"`' +lock_old_archive_extraction='`$ECHO "$lock_old_archive_extraction" | $SED "$delay_single_quote_subst"`' +CC='`$ECHO "$CC" | $SED "$delay_single_quote_subst"`' +CFLAGS='`$ECHO "$CFLAGS" | $SED "$delay_single_quote_subst"`' +compiler='`$ECHO "$compiler" | $SED "$delay_single_quote_subst"`' +GCC='`$ECHO "$GCC" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_pipe='`$ECHO "$lt_cv_sys_global_symbol_pipe" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_cdecl='`$ECHO "$lt_cv_sys_global_symbol_to_cdecl" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address" | $SED "$delay_single_quote_subst"`' +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix='`$ECHO "$lt_cv_sys_global_symbol_to_c_name_address_lib_prefix" | $SED "$delay_single_quote_subst"`' +nm_file_list_spec='`$ECHO "$nm_file_list_spec" | $SED "$delay_single_quote_subst"`' +lt_sysroot='`$ECHO "$lt_sysroot" | $SED "$delay_single_quote_subst"`' +objdir='`$ECHO "$objdir" | $SED "$delay_single_quote_subst"`' +MAGIC_CMD='`$ECHO "$MAGIC_CMD" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag='`$ECHO "$lt_prog_compiler_no_builtin_flag" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic='`$ECHO "$lt_prog_compiler_pic" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl='`$ECHO "$lt_prog_compiler_wl" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static='`$ECHO "$lt_prog_compiler_static" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o='`$ECHO "$lt_cv_prog_compiler_c_o" | $SED "$delay_single_quote_subst"`' +need_locks='`$ECHO "$need_locks" | $SED "$delay_single_quote_subst"`' +MANIFEST_TOOL='`$ECHO "$MANIFEST_TOOL" | $SED "$delay_single_quote_subst"`' +DSYMUTIL='`$ECHO "$DSYMUTIL" | $SED "$delay_single_quote_subst"`' +NMEDIT='`$ECHO "$NMEDIT" | $SED "$delay_single_quote_subst"`' +LIPO='`$ECHO "$LIPO" | $SED "$delay_single_quote_subst"`' +OTOOL='`$ECHO "$OTOOL" | $SED "$delay_single_quote_subst"`' +OTOOL64='`$ECHO "$OTOOL64" | $SED "$delay_single_quote_subst"`' +libext='`$ECHO "$libext" | $SED "$delay_single_quote_subst"`' +shrext_cmds='`$ECHO "$shrext_cmds" | $SED "$delay_single_quote_subst"`' +extract_expsyms_cmds='`$ECHO "$extract_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc='`$ECHO "$archive_cmds_need_lc" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes='`$ECHO "$enable_shared_with_static_runtimes" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec='`$ECHO "$export_dynamic_flag_spec" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec='`$ECHO "$whole_archive_flag_spec" | $SED "$delay_single_quote_subst"`' +compiler_needs_object='`$ECHO "$compiler_needs_object" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds='`$ECHO "$old_archive_from_new_cmds" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds='`$ECHO "$old_archive_from_expsyms_cmds" | $SED "$delay_single_quote_subst"`' +archive_cmds='`$ECHO "$archive_cmds" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds='`$ECHO "$archive_expsym_cmds" | $SED "$delay_single_quote_subst"`' +module_cmds='`$ECHO "$module_cmds" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds='`$ECHO "$module_expsym_cmds" | $SED "$delay_single_quote_subst"`' +with_gnu_ld='`$ECHO "$with_gnu_ld" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag='`$ECHO "$allow_undefined_flag" | $SED "$delay_single_quote_subst"`' +no_undefined_flag='`$ECHO "$no_undefined_flag" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec='`$ECHO "$hardcode_libdir_flag_spec" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator='`$ECHO "$hardcode_libdir_separator" | $SED "$delay_single_quote_subst"`' +hardcode_direct='`$ECHO "$hardcode_direct" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute='`$ECHO "$hardcode_direct_absolute" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L='`$ECHO "$hardcode_minus_L" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var='`$ECHO "$hardcode_shlibpath_var" | $SED "$delay_single_quote_subst"`' +hardcode_automatic='`$ECHO "$hardcode_automatic" | $SED "$delay_single_quote_subst"`' +inherit_rpath='`$ECHO "$inherit_rpath" | $SED "$delay_single_quote_subst"`' +link_all_deplibs='`$ECHO "$link_all_deplibs" | $SED "$delay_single_quote_subst"`' +always_export_symbols='`$ECHO "$always_export_symbols" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds='`$ECHO "$export_symbols_cmds" | $SED "$delay_single_quote_subst"`' +exclude_expsyms='`$ECHO "$exclude_expsyms" | $SED "$delay_single_quote_subst"`' +include_expsyms='`$ECHO "$include_expsyms" | $SED "$delay_single_quote_subst"`' +prelink_cmds='`$ECHO "$prelink_cmds" | $SED "$delay_single_quote_subst"`' +postlink_cmds='`$ECHO "$postlink_cmds" | $SED "$delay_single_quote_subst"`' +file_list_spec='`$ECHO "$file_list_spec" | $SED "$delay_single_quote_subst"`' +variables_saved_for_relink='`$ECHO "$variables_saved_for_relink" | $SED "$delay_single_quote_subst"`' +need_lib_prefix='`$ECHO "$need_lib_prefix" | $SED "$delay_single_quote_subst"`' +need_version='`$ECHO "$need_version" | $SED "$delay_single_quote_subst"`' +version_type='`$ECHO "$version_type" | $SED "$delay_single_quote_subst"`' +runpath_var='`$ECHO "$runpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_var='`$ECHO "$shlibpath_var" | $SED "$delay_single_quote_subst"`' +shlibpath_overrides_runpath='`$ECHO "$shlibpath_overrides_runpath" | $SED "$delay_single_quote_subst"`' +libname_spec='`$ECHO "$libname_spec" | $SED "$delay_single_quote_subst"`' +library_names_spec='`$ECHO "$library_names_spec" | $SED "$delay_single_quote_subst"`' +soname_spec='`$ECHO "$soname_spec" | $SED "$delay_single_quote_subst"`' +install_override_mode='`$ECHO "$install_override_mode" | $SED "$delay_single_quote_subst"`' +postinstall_cmds='`$ECHO "$postinstall_cmds" | $SED "$delay_single_quote_subst"`' +postuninstall_cmds='`$ECHO "$postuninstall_cmds" | $SED "$delay_single_quote_subst"`' +finish_cmds='`$ECHO "$finish_cmds" | $SED "$delay_single_quote_subst"`' +finish_eval='`$ECHO "$finish_eval" | $SED "$delay_single_quote_subst"`' +hardcode_into_libs='`$ECHO "$hardcode_into_libs" | $SED "$delay_single_quote_subst"`' +sys_lib_search_path_spec='`$ECHO "$sys_lib_search_path_spec" | $SED "$delay_single_quote_subst"`' +sys_lib_dlsearch_path_spec='`$ECHO "$sys_lib_dlsearch_path_spec" | $SED "$delay_single_quote_subst"`' +hardcode_action='`$ECHO "$hardcode_action" | $SED "$delay_single_quote_subst"`' +enable_dlopen='`$ECHO "$enable_dlopen" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self='`$ECHO "$enable_dlopen_self" | $SED "$delay_single_quote_subst"`' +enable_dlopen_self_static='`$ECHO "$enable_dlopen_self_static" | $SED "$delay_single_quote_subst"`' +old_striplib='`$ECHO "$old_striplib" | $SED "$delay_single_quote_subst"`' +striplib='`$ECHO "$striplib" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs='`$ECHO "$compiler_lib_search_dirs" | $SED "$delay_single_quote_subst"`' +predep_objects='`$ECHO "$predep_objects" | $SED "$delay_single_quote_subst"`' +postdep_objects='`$ECHO "$postdep_objects" | $SED "$delay_single_quote_subst"`' +predeps='`$ECHO "$predeps" | $SED "$delay_single_quote_subst"`' +postdeps='`$ECHO "$postdeps" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path='`$ECHO "$compiler_lib_search_path" | $SED "$delay_single_quote_subst"`' +LD_CXX='`$ECHO "$LD_CXX" | $SED "$delay_single_quote_subst"`' +reload_flag_CXX='`$ECHO "$reload_flag_CXX" | $SED "$delay_single_quote_subst"`' +reload_cmds_CXX='`$ECHO "$reload_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_cmds_CXX='`$ECHO "$old_archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +compiler_CXX='`$ECHO "$compiler_CXX" | $SED "$delay_single_quote_subst"`' +GCC_CXX='`$ECHO "$GCC_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_no_builtin_flag_CXX='`$ECHO "$lt_prog_compiler_no_builtin_flag_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_pic_CXX='`$ECHO "$lt_prog_compiler_pic_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_wl_CXX='`$ECHO "$lt_prog_compiler_wl_CXX" | $SED "$delay_single_quote_subst"`' +lt_prog_compiler_static_CXX='`$ECHO "$lt_prog_compiler_static_CXX" | $SED "$delay_single_quote_subst"`' +lt_cv_prog_compiler_c_o_CXX='`$ECHO "$lt_cv_prog_compiler_c_o_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_need_lc_CXX='`$ECHO "$archive_cmds_need_lc_CXX" | $SED "$delay_single_quote_subst"`' +enable_shared_with_static_runtimes_CXX='`$ECHO "$enable_shared_with_static_runtimes_CXX" | $SED "$delay_single_quote_subst"`' +export_dynamic_flag_spec_CXX='`$ECHO "$export_dynamic_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +whole_archive_flag_spec_CXX='`$ECHO "$whole_archive_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +compiler_needs_object_CXX='`$ECHO "$compiler_needs_object_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_new_cmds_CXX='`$ECHO "$old_archive_from_new_cmds_CXX" | $SED "$delay_single_quote_subst"`' +old_archive_from_expsyms_cmds_CXX='`$ECHO "$old_archive_from_expsyms_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_cmds_CXX='`$ECHO "$archive_cmds_CXX" | $SED "$delay_single_quote_subst"`' +archive_expsym_cmds_CXX='`$ECHO "$archive_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_cmds_CXX='`$ECHO "$module_cmds_CXX" | $SED "$delay_single_quote_subst"`' +module_expsym_cmds_CXX='`$ECHO "$module_expsym_cmds_CXX" | $SED "$delay_single_quote_subst"`' +with_gnu_ld_CXX='`$ECHO "$with_gnu_ld_CXX" | $SED "$delay_single_quote_subst"`' +allow_undefined_flag_CXX='`$ECHO "$allow_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +no_undefined_flag_CXX='`$ECHO "$no_undefined_flag_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_flag_spec_CXX='`$ECHO "$hardcode_libdir_flag_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_libdir_separator_CXX='`$ECHO "$hardcode_libdir_separator_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_CXX='`$ECHO "$hardcode_direct_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_direct_absolute_CXX='`$ECHO "$hardcode_direct_absolute_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_minus_L_CXX='`$ECHO "$hardcode_minus_L_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_shlibpath_var_CXX='`$ECHO "$hardcode_shlibpath_var_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_automatic_CXX='`$ECHO "$hardcode_automatic_CXX" | $SED "$delay_single_quote_subst"`' +inherit_rpath_CXX='`$ECHO "$inherit_rpath_CXX" | $SED "$delay_single_quote_subst"`' +link_all_deplibs_CXX='`$ECHO "$link_all_deplibs_CXX" | $SED "$delay_single_quote_subst"`' +always_export_symbols_CXX='`$ECHO "$always_export_symbols_CXX" | $SED "$delay_single_quote_subst"`' +export_symbols_cmds_CXX='`$ECHO "$export_symbols_cmds_CXX" | $SED "$delay_single_quote_subst"`' +exclude_expsyms_CXX='`$ECHO "$exclude_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +include_expsyms_CXX='`$ECHO "$include_expsyms_CXX" | $SED "$delay_single_quote_subst"`' +prelink_cmds_CXX='`$ECHO "$prelink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +postlink_cmds_CXX='`$ECHO "$postlink_cmds_CXX" | $SED "$delay_single_quote_subst"`' +file_list_spec_CXX='`$ECHO "$file_list_spec_CXX" | $SED "$delay_single_quote_subst"`' +hardcode_action_CXX='`$ECHO "$hardcode_action_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_dirs_CXX='`$ECHO "$compiler_lib_search_dirs_CXX" | $SED "$delay_single_quote_subst"`' +predep_objects_CXX='`$ECHO "$predep_objects_CXX" | $SED "$delay_single_quote_subst"`' +postdep_objects_CXX='`$ECHO "$postdep_objects_CXX" | $SED "$delay_single_quote_subst"`' +predeps_CXX='`$ECHO "$predeps_CXX" | $SED "$delay_single_quote_subst"`' +postdeps_CXX='`$ECHO "$postdeps_CXX" | $SED "$delay_single_quote_subst"`' +compiler_lib_search_path_CXX='`$ECHO "$compiler_lib_search_path_CXX" | $SED "$delay_single_quote_subst"`' + +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in SHELL \ +ECHO \ +PATH_SEPARATOR \ +SED \ +GREP \ +EGREP \ +FGREP \ +LD \ +NM \ +LN_S \ +lt_SP2NL \ +lt_NL2SP \ +reload_flag \ +OBJDUMP \ +deplibs_check_method \ +file_magic_cmd \ +file_magic_glob \ +want_nocaseglob \ +DLLTOOL \ +sharedlib_from_linklib_cmd \ +AR \ +AR_FLAGS \ +archiver_list_spec \ +STRIP \ +RANLIB \ +CC \ +CFLAGS \ +compiler \ +lt_cv_sys_global_symbol_pipe \ +lt_cv_sys_global_symbol_to_cdecl \ +lt_cv_sys_global_symbol_to_c_name_address \ +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix \ +nm_file_list_spec \ +lt_prog_compiler_no_builtin_flag \ +lt_prog_compiler_pic \ +lt_prog_compiler_wl \ +lt_prog_compiler_static \ +lt_cv_prog_compiler_c_o \ +need_locks \ +MANIFEST_TOOL \ +DSYMUTIL \ +NMEDIT \ +LIPO \ +OTOOL \ +OTOOL64 \ +shrext_cmds \ +export_dynamic_flag_spec \ +whole_archive_flag_spec \ +compiler_needs_object \ +with_gnu_ld \ +allow_undefined_flag \ +no_undefined_flag \ +hardcode_libdir_flag_spec \ +hardcode_libdir_separator \ +exclude_expsyms \ +include_expsyms \ +file_list_spec \ +variables_saved_for_relink \ +libname_spec \ +library_names_spec \ +soname_spec \ +install_override_mode \ +finish_eval \ +old_striplib \ +striplib \ +compiler_lib_search_dirs \ +predep_objects \ +postdep_objects \ +predeps \ +postdeps \ +compiler_lib_search_path \ +LD_CXX \ +reload_flag_CXX \ +compiler_CXX \ +lt_prog_compiler_no_builtin_flag_CXX \ +lt_prog_compiler_pic_CXX \ +lt_prog_compiler_wl_CXX \ +lt_prog_compiler_static_CXX \ +lt_cv_prog_compiler_c_o_CXX \ +export_dynamic_flag_spec_CXX \ +whole_archive_flag_spec_CXX \ +compiler_needs_object_CXX \ +with_gnu_ld_CXX \ +allow_undefined_flag_CXX \ +no_undefined_flag_CXX \ +hardcode_libdir_flag_spec_CXX \ +hardcode_libdir_separator_CXX \ +exclude_expsyms_CXX \ +include_expsyms_CXX \ +file_list_spec_CXX \ +compiler_lib_search_dirs_CXX \ +predep_objects_CXX \ +postdep_objects_CXX \ +predeps_CXX \ +postdeps_CXX \ +compiler_lib_search_path_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in reload_cmds \ +old_postinstall_cmds \ +old_postuninstall_cmds \ +old_archive_cmds \ +extract_expsyms_cmds \ +old_archive_from_new_cmds \ +old_archive_from_expsyms_cmds \ +archive_cmds \ +archive_expsym_cmds \ +module_cmds \ +module_expsym_cmds \ +export_symbols_cmds \ +prelink_cmds \ +postlink_cmds \ +postinstall_cmds \ +postuninstall_cmds \ +finish_cmds \ +sys_lib_search_path_spec \ +sys_lib_dlsearch_path_spec \ +reload_cmds_CXX \ +old_archive_cmds_CXX \ +old_archive_from_new_cmds_CXX \ +old_archive_from_expsyms_cmds_CXX \ +archive_cmds_CXX \ +archive_expsym_cmds_CXX \ +module_cmds_CXX \ +module_expsym_cmds_CXX \ +export_symbols_cmds_CXX \ +prelink_cmds_CXX \ +postlink_cmds_CXX; do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[\\\\\\\`\\"\\\$]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +ac_aux_dir='$ac_aux_dir' +xsi_shell='$xsi_shell' +lt_shell_append='$lt_shell_append' + +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + + + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile' + + + + + + +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + +# Handling of arguments. +for ac_config_target in $ac_config_targets +do + case $ac_config_target in + "build-aux/config.h") CONFIG_HEADERS="$CONFIG_HEADERS build-aux/config.h" ;; + "Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;; + "scripts/gtest-config") CONFIG_FILES="$CONFIG_FILES scripts/gtest-config" ;; + "depfiles") CONFIG_COMMANDS="$CONFIG_COMMANDS depfiles" ;; + "libtool") CONFIG_COMMANDS="$CONFIG_COMMANDS libtool" ;; + + *) as_fn_error $? "invalid argument: \`$ac_config_target'" "$LINENO" 5;; + esac +done + + +# If the user did not use the arguments to specify the items to instantiate, +# then the envvar interface is used. Set only those that are not. +# We use the long form for the default assignment because of an extremely +# bizarre bug on SunOS 4.1.3. +if $ac_need_defaults; then + test "${CONFIG_FILES+set}" = set || CONFIG_FILES=$config_files + test "${CONFIG_HEADERS+set}" = set || CONFIG_HEADERS=$config_headers + test "${CONFIG_COMMANDS+set}" = set || CONFIG_COMMANDS=$config_commands +fi + +# Have a temporary directory for convenience. Make it in the build tree +# simply because there is no reason against having it here, and in addition, +# creating and moving files from /tmp can sometimes cause problems. +# Hook for its removal unless debugging. +# Note that there is a small window in which the directory will not be cleaned: +# after its creation but before its name has been assigned to `$tmp'. +$debug || +{ + tmp= ac_tmp= + trap 'exit_status=$? + : "${ac_tmp:=$tmp}" + { test ! -d "$ac_tmp" || rm -fr "$ac_tmp"; } && exit $exit_status +' 0 + trap 'as_fn_exit 1' 1 2 13 15 +} +# Create a (secure) tmp directory for tmp files. + +{ + tmp=`(umask 077 && mktemp -d "./confXXXXXX") 2>/dev/null` && + test -d "$tmp" +} || +{ + tmp=./conf$$-$RANDOM + (umask 077 && mkdir "$tmp") +} || as_fn_error $? "cannot create a temporary directory in ." "$LINENO" 5 +ac_tmp=$tmp + +# Set up the scripts for CONFIG_FILES section. +# No need to generate them if there are no CONFIG_FILES. +# This happens for instance with `./config.status config.h'. +if test -n "$CONFIG_FILES"; then + + +ac_cr=`echo X | tr X '\015'` +# On cygwin, bash can eat \r inside `` if the user requested igncr. +# But we know of no other shell where ac_cr would be empty at this +# point, so we can use a bashism as a fallback. +if test "x$ac_cr" = x; then + eval ac_cr=\$\'\\r\' +fi +ac_cs_awk_cr=`$AWK 'BEGIN { print "a\rb" }' /dev/null` +if test "$ac_cs_awk_cr" = "a${ac_cr}b"; then + ac_cs_awk_cr='\\r' +else + ac_cs_awk_cr=$ac_cr +fi + +echo 'BEGIN {' >"$ac_tmp/subs1.awk" && +_ACEOF + + +{ + echo "cat >conf$$subs.awk <<_ACEOF" && + echo "$ac_subst_vars" | sed 's/.*/&!$&$ac_delim/' && + echo "_ACEOF" +} >conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 +ac_delim_num=`echo "$ac_subst_vars" | grep -c '^'` +ac_delim='%!_!# ' +for ac_last_try in false false false false false :; do + . ./conf$$subs.sh || + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + + ac_delim_n=`sed -n "s/.*$ac_delim\$/X/p" conf$$subs.awk | grep -c X` + if test $ac_delim_n = $ac_delim_num; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_STATUS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done +rm -f conf$$subs.sh + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +cat >>"\$ac_tmp/subs1.awk" <<\\_ACAWK && +_ACEOF +sed -n ' +h +s/^/S["/; s/!.*/"]=/ +p +g +s/^[^!]*!// +:repl +t repl +s/'"$ac_delim"'$// +t delim +:nl +h +s/\(.\{148\}\)..*/\1/ +t more1 +s/["\\]/\\&/g; s/^/"/; s/$/\\n"\\/ +p +n +b repl +:more1 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t nl +:delim +h +s/\(.\{148\}\)..*/\1/ +t more2 +s/["\\]/\\&/g; s/^/"/; s/$/"/ +p +b +:more2 +s/["\\]/\\&/g; s/^/"/; s/$/"\\/ +p +g +s/.\{148\}// +t delim +' >$CONFIG_STATUS || ac_write_fail=1 +rm -f conf$$subs.awk +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +_ACAWK +cat >>"\$ac_tmp/subs1.awk" <<_ACAWK && + for (key in S) S_is_set[key] = 1 + FS = "" + +} +{ + line = $ 0 + nfields = split(line, field, "@") + substed = 0 + len = length(field[1]) + for (i = 2; i < nfields; i++) { + key = field[i] + keylen = length(key) + if (S_is_set[key]) { + value = S[key] + line = substr(line, 1, len) "" value "" substr(line, len + keylen + 3) + len += length(value) + length(field[++i]) + substed = 1 + } else + len += 1 + keylen + } + + print line +} + +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +if sed "s/$ac_cr//" < /dev/null > /dev/null 2>&1; then + sed "s/$ac_cr\$//; s/$ac_cr/$ac_cs_awk_cr/g" +else + cat +fi < "$ac_tmp/subs1.awk" > "$ac_tmp/subs.awk" \ + || as_fn_error $? "could not setup config files machinery" "$LINENO" 5 +_ACEOF + +# VPATH may cause trouble with some makes, so we remove sole $(srcdir), +# ${srcdir} and @srcdir@ entries from VPATH if srcdir is ".", strip leading and +# trailing colons and then remove the whole line if VPATH becomes empty +# (actually we leave an empty line to preserve line numbers). +if test "x$srcdir" = x.; then + ac_vpsub='/^[ ]*VPATH[ ]*=[ ]*/{ +h +s/// +s/^/:/ +s/[ ]*$/:/ +s/:\$(srcdir):/:/g +s/:\${srcdir}:/:/g +s/:@srcdir@:/:/g +s/^:*// +s/:*$// +x +s/\(=[ ]*\).*/\1/ +G +s/\n// +s/^[^=]*=[ ]*$// +}' +fi + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +fi # test -n "$CONFIG_FILES" + +# Set up the scripts for CONFIG_HEADERS section. +# No need to generate them if there are no CONFIG_HEADERS. +# This happens for instance with `./config.status Makefile'. +if test -n "$CONFIG_HEADERS"; then +cat >"$ac_tmp/defines.awk" <<\_ACAWK || +BEGIN { +_ACEOF + +# Transform confdefs.h into an awk script `defines.awk', embedded as +# here-document in config.status, that substitutes the proper values into +# config.h.in to produce config.h. + +# Create a delimiter string that does not exist in confdefs.h, to ease +# handling of long lines. +ac_delim='%!_!# ' +for ac_last_try in false false :; do + ac_tt=`sed -n "/$ac_delim/p" confdefs.h` + if test -z "$ac_tt"; then + break + elif $ac_last_try; then + as_fn_error $? "could not make $CONFIG_HEADERS" "$LINENO" 5 + else + ac_delim="$ac_delim!$ac_delim _$ac_delim!! " + fi +done + +# For the awk script, D is an array of macro values keyed by name, +# likewise P contains macro parameters if any. Preserve backslash +# newline sequences. + +ac_word_re=[_$as_cr_Letters][_$as_cr_alnum]* +sed -n ' +s/.\{148\}/&'"$ac_delim"'/g +t rset +:rset +s/^[ ]*#[ ]*define[ ][ ]*/ / +t def +d +:def +s/\\$// +t bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3"/p +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2"/p +d +:bsnl +s/["\\]/\\&/g +s/^ \('"$ac_word_re"'\)\(([^()]*)\)[ ]*\(.*\)/P["\1"]="\2"\ +D["\1"]=" \3\\\\\\n"\\/p +t cont +s/^ \('"$ac_word_re"'\)[ ]*\(.*\)/D["\1"]=" \2\\\\\\n"\\/p +t cont +d +:cont +n +s/.\{148\}/&'"$ac_delim"'/g +t clear +:clear +s/\\$// +t bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/"/p +d +:bsnlc +s/["\\]/\\&/g; s/^/"/; s/$/\\\\\\n"\\/p +b cont +' >$CONFIG_STATUS || ac_write_fail=1 + +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + for (key in D) D_is_set[key] = 1 + FS = "" +} +/^[\t ]*#[\t ]*(define|undef)[\t ]+$ac_word_re([\t (]|\$)/ { + line = \$ 0 + split(line, arg, " ") + if (arg[1] == "#") { + defundef = arg[2] + mac1 = arg[3] + } else { + defundef = substr(arg[1], 2) + mac1 = arg[2] + } + split(mac1, mac2, "(") #) + macro = mac2[1] + prefix = substr(line, 1, index(line, defundef) - 1) + if (D_is_set[macro]) { + # Preserve the white space surrounding the "#". + print prefix "define", macro P[macro] D[macro] + next + } else { + # Replace #undef with comments. This is necessary, for example, + # in the case of _POSIX_SOURCE, which is predefined and required + # on some systems where configure will not decide to define it. + if (defundef == "undef") { + print "/*", prefix defundef, macro, "*/" + next + } + } +} +{ print } +_ACAWK +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 + as_fn_error $? "could not setup config headers machinery" "$LINENO" 5 +fi # test -n "$CONFIG_HEADERS" + + +eval set X " :F $CONFIG_FILES :H $CONFIG_HEADERS :C $CONFIG_COMMANDS" +shift +for ac_tag +do + case $ac_tag in + :[FHLC]) ac_mode=$ac_tag; continue;; + esac + case $ac_mode$ac_tag in + :[FHL]*:*);; + :L* | :C*:*) as_fn_error $? "invalid tag \`$ac_tag'" "$LINENO" 5;; + :[FH]-) ac_tag=-:-;; + :[FH]*) ac_tag=$ac_tag:$ac_tag.in;; + esac + ac_save_IFS=$IFS + IFS=: + set x $ac_tag + IFS=$ac_save_IFS + shift + ac_file=$1 + shift + + case $ac_mode in + :L) ac_source=$1;; + :[FH]) + ac_file_inputs= + for ac_f + do + case $ac_f in + -) ac_f="$ac_tmp/stdin";; + *) # Look for the file first in the build tree, then in the source tree + # (if the path is not absolute). The absolute path cannot be DOS-style, + # because $ac_f cannot contain `:'. + test -f "$ac_f" || + case $ac_f in + [\\/$]*) false;; + *) test -f "$srcdir/$ac_f" && ac_f="$srcdir/$ac_f";; + esac || + as_fn_error 1 "cannot find input file: \`$ac_f'" "$LINENO" 5;; + esac + case $ac_f in *\'*) ac_f=`$as_echo "$ac_f" | sed "s/'/'\\\\\\\\''/g"`;; esac + as_fn_append ac_file_inputs " '$ac_f'" + done + + # Let's still pretend it is `configure' which instantiates (i.e., don't + # use $as_me), people would be surprised to read: + # /* config.h. Generated by config.status. */ + configure_input='Generated from '` + $as_echo "$*" | sed 's|^[^:]*/||;s|:[^:]*/|, |g' + `' by configure.' + if test x"$ac_file" != x-; then + configure_input="$ac_file. $configure_input" + { $as_echo "$as_me:${as_lineno-$LINENO}: creating $ac_file" >&5 +$as_echo "$as_me: creating $ac_file" >&6;} + fi + # Neutralize special characters interpreted by sed in replacement strings. + case $configure_input in #( + *\&* | *\|* | *\\* ) + ac_sed_conf_input=`$as_echo "$configure_input" | + sed 's/[\\\\&|]/\\\\&/g'`;; #( + *) ac_sed_conf_input=$configure_input;; + esac + + case $ac_tag in + *:-:* | *:-) cat >"$ac_tmp/stdin" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 ;; + esac + ;; + esac + + ac_dir=`$as_dirname -- "$ac_file" || +$as_expr X"$ac_file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$ac_file" : 'X\(//\)[^/]' \| \ + X"$ac_file" : 'X\(//\)$' \| \ + X"$ac_file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$ac_file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir="$ac_dir"; as_fn_mkdir_p + ac_builddir=. + +case "$ac_dir" in +.) ac_dir_suffix= ac_top_builddir_sub=. ac_top_build_prefix= ;; +*) + ac_dir_suffix=/`$as_echo "$ac_dir" | sed 's|^\.[\\/]||'` + # A ".." for each directory in $ac_dir_suffix. + ac_top_builddir_sub=`$as_echo "$ac_dir_suffix" | sed 's|/[^\\/]*|/..|g;s|/||'` + case $ac_top_builddir_sub in + "") ac_top_builddir_sub=. ac_top_build_prefix= ;; + *) ac_top_build_prefix=$ac_top_builddir_sub/ ;; + esac ;; +esac +ac_abs_top_builddir=$ac_pwd +ac_abs_builddir=$ac_pwd$ac_dir_suffix +# for backward compatibility: +ac_top_builddir=$ac_top_build_prefix + +case $srcdir in + .) # We are building in place. + ac_srcdir=. + ac_top_srcdir=$ac_top_builddir_sub + ac_abs_top_srcdir=$ac_pwd ;; + [\\/]* | ?:[\\/]* ) # Absolute name. + ac_srcdir=$srcdir$ac_dir_suffix; + ac_top_srcdir=$srcdir + ac_abs_top_srcdir=$srcdir ;; + *) # Relative name. + ac_srcdir=$ac_top_build_prefix$srcdir$ac_dir_suffix + ac_top_srcdir=$ac_top_build_prefix$srcdir + ac_abs_top_srcdir=$ac_pwd/$srcdir ;; +esac +ac_abs_srcdir=$ac_abs_top_srcdir$ac_dir_suffix + + + case $ac_mode in + :F) + # + # CONFIG_FILE + # + + case $INSTALL in + [\\/$]* | ?:[\\/]* ) ac_INSTALL=$INSTALL ;; + *) ac_INSTALL=$ac_top_build_prefix$INSTALL ;; + esac + ac_MKDIR_P=$MKDIR_P + case $MKDIR_P in + [\\/$]* | ?:[\\/]* ) ;; + */*) ac_MKDIR_P=$ac_top_build_prefix$MKDIR_P ;; + esac +_ACEOF + +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +# If the template does not know about datarootdir, expand it. +# FIXME: This hack should be removed a few years after 2.60. +ac_datarootdir_hack=; ac_datarootdir_seen= +ac_sed_dataroot=' +/datarootdir/ { + p + q +} +/@datadir@/p +/@docdir@/p +/@infodir@/p +/@localedir@/p +/@mandir@/p' +case `eval "sed -n \"\$ac_sed_dataroot\" $ac_file_inputs"` in +*datarootdir*) ac_datarootdir_seen=yes;; +*@datadir@*|*@docdir@*|*@infodir@*|*@localedir@*|*@mandir@*) + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&5 +$as_echo "$as_me: WARNING: $ac_file_inputs seems to ignore the --datarootdir setting" >&2;} +_ACEOF +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 + ac_datarootdir_hack=' + s&@datadir@&$datadir&g + s&@docdir@&$docdir&g + s&@infodir@&$infodir&g + s&@localedir@&$localedir&g + s&@mandir@&$mandir&g + s&\\\${datarootdir}&$datarootdir&g' ;; +esac +_ACEOF + +# Neutralize VPATH when `$srcdir' = `.'. +# Shell code in configure.ac might set extrasub. +# FIXME: do we really want to maintain this feature? +cat >>$CONFIG_STATUS <<_ACEOF || ac_write_fail=1 +ac_sed_extra="$ac_vpsub +$extrasub +_ACEOF +cat >>$CONFIG_STATUS <<\_ACEOF || ac_write_fail=1 +:t +/@[a-zA-Z_][a-zA-Z_0-9]*@/!b +s|@configure_input@|$ac_sed_conf_input|;t t +s&@top_builddir@&$ac_top_builddir_sub&;t t +s&@top_build_prefix@&$ac_top_build_prefix&;t t +s&@srcdir@&$ac_srcdir&;t t +s&@abs_srcdir@&$ac_abs_srcdir&;t t +s&@top_srcdir@&$ac_top_srcdir&;t t +s&@abs_top_srcdir@&$ac_abs_top_srcdir&;t t +s&@builddir@&$ac_builddir&;t t +s&@abs_builddir@&$ac_abs_builddir&;t t +s&@abs_top_builddir@&$ac_abs_top_builddir&;t t +s&@INSTALL@&$ac_INSTALL&;t t +s&@MKDIR_P@&$ac_MKDIR_P&;t t +$ac_datarootdir_hack +" +eval sed \"\$ac_sed_extra\" "$ac_file_inputs" | $AWK -f "$ac_tmp/subs.awk" \ + >$ac_tmp/out || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + +test -z "$ac_datarootdir_hack$ac_datarootdir_seen" && + { ac_out=`sed -n '/\${datarootdir}/p' "$ac_tmp/out"`; test -n "$ac_out"; } && + { ac_out=`sed -n '/^[ ]*datarootdir[ ]*:*=/p' \ + "$ac_tmp/out"`; test -z "$ac_out"; } && + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&5 +$as_echo "$as_me: WARNING: $ac_file contains a reference to the variable \`datarootdir' +which seems to be undefined. Please make sure it is defined" >&2;} + + rm -f "$ac_tmp/stdin" + case $ac_file in + -) cat "$ac_tmp/out" && rm -f "$ac_tmp/out";; + *) rm -f "$ac_file" && mv "$ac_tmp/out" "$ac_file";; + esac \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + ;; + :H) + # + # CONFIG_HEADER + # + if test x"$ac_file" != x-; then + { + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" + } >"$ac_tmp/config.h" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + if diff "$ac_file" "$ac_tmp/config.h" >/dev/null 2>&1; then + { $as_echo "$as_me:${as_lineno-$LINENO}: $ac_file is unchanged" >&5 +$as_echo "$as_me: $ac_file is unchanged" >&6;} + else + rm -f "$ac_file" + mv "$ac_tmp/config.h" "$ac_file" \ + || as_fn_error $? "could not create $ac_file" "$LINENO" 5 + fi + else + $as_echo "/* $configure_input */" \ + && eval '$AWK -f "$ac_tmp/defines.awk"' "$ac_file_inputs" \ + || as_fn_error $? "could not create -" "$LINENO" 5 + fi +# Compute "$ac_file"'s index in $config_headers. +_am_arg="$ac_file" +_am_stamp_count=1 +for _am_header in $config_headers :; do + case $_am_header in + $_am_arg | $_am_arg:* ) + break ;; + * ) + _am_stamp_count=`expr $_am_stamp_count + 1` ;; + esac +done +echo "timestamp for $_am_arg" >`$as_dirname -- "$_am_arg" || +$as_expr X"$_am_arg" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$_am_arg" : 'X\(//\)[^/]' \| \ + X"$_am_arg" : 'X\(//\)$' \| \ + X"$_am_arg" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$_am_arg" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'`/stamp-h$_am_stamp_count + ;; + + :C) { $as_echo "$as_me:${as_lineno-$LINENO}: executing $ac_file commands" >&5 +$as_echo "$as_me: executing $ac_file commands" >&6;} + ;; + esac + + + case $ac_file$ac_mode in + "scripts/gtest-config":F) chmod +x scripts/gtest-config ;; + "depfiles":C) test x"$AMDEP_TRUE" != x"" || { + # Autoconf 2.62 quotes --file arguments for eval, but not when files + # are listed without --file. Let's play safe and only enable the eval + # if we detect the quoting. + case $CONFIG_FILES in + *\'*) eval set x "$CONFIG_FILES" ;; + *) set x $CONFIG_FILES ;; + esac + shift + for mf + do + # Strip MF so we end up with the name of the file. + mf=`echo "$mf" | sed -e 's/:.*$//'` + # Check whether this is an Automake generated Makefile or not. + # We used to match only the files named `Makefile.in', but + # some people rename them; so instead we look at the file content. + # Grep'ing the first line is not enough: some people post-process + # each Makefile.in and add a new line on top of each file to say so. + # Grep'ing the whole file is not good either: AIX grep has a line + # limit of 2048, but all sed's we know have understand at least 4000. + if sed -n 's,^#.*generated by automake.*,X,p' "$mf" | grep X >/dev/null 2>&1; then + dirpart=`$as_dirname -- "$mf" || +$as_expr X"$mf" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$mf" : 'X\(//\)[^/]' \| \ + X"$mf" : 'X\(//\)$' \| \ + X"$mf" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$mf" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + else + continue + fi + # Extract the definition of DEPDIR, am__include, and am__quote + # from the Makefile without running `make'. + DEPDIR=`sed -n 's/^DEPDIR = //p' < "$mf"` + test -z "$DEPDIR" && continue + am__include=`sed -n 's/^am__include = //p' < "$mf"` + test -z "am__include" && continue + am__quote=`sed -n 's/^am__quote = //p' < "$mf"` + # When using ansi2knr, U may be empty or an underscore; expand it + U=`sed -n 's/^U = //p' < "$mf"` + # Find all dependency output files, they are included files with + # $(DEPDIR) in their names. We invoke sed twice because it is the + # simplest approach to changing $(DEPDIR) to its actual value in the + # expansion. + for file in `sed -n " + s/^$am__include $am__quote\(.*(DEPDIR).*\)$am__quote"'$/\1/p' <"$mf" | \ + sed -e 's/\$(DEPDIR)/'"$DEPDIR"'/g' -e 's/\$U/'"$U"'/g'`; do + # Make sure the directory exists. + test -f "$dirpart/$file" && continue + fdir=`$as_dirname -- "$file" || +$as_expr X"$file" : 'X\(.*[^/]\)//*[^/][^/]*/*$' \| \ + X"$file" : 'X\(//\)[^/]' \| \ + X"$file" : 'X\(//\)$' \| \ + X"$file" : 'X\(/\)' \| . 2>/dev/null || +$as_echo X"$file" | + sed '/^X\(.*[^/]\)\/\/*[^/][^/]*\/*$/{ + s//\1/ + q + } + /^X\(\/\/\)[^/].*/{ + s//\1/ + q + } + /^X\(\/\/\)$/{ + s//\1/ + q + } + /^X\(\/\).*/{ + s//\1/ + q + } + s/.*/./; q'` + as_dir=$dirpart/$fdir; as_fn_mkdir_p + # echo "creating $dirpart/$file" + echo '# dummy' > "$dirpart/$file" + done + done +} + ;; + "libtool":C) + + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL + +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. + + +# The names of the tagged configurations supported by this script. +available_tags="CXX " + +# ### BEGIN LIBTOOL CONFIG + +# Which release of libtool.m4 was used? +macro_version=$macro_version +macro_revision=$macro_revision + +# Whether or not to build shared libraries. +build_libtool_libs=$enable_shared + +# Whether or not to build static libraries. +build_old_libs=$enable_static + +# What type of objects to build. +pic_mode=$pic_mode + +# Whether or not to optimize for fast installation. +fast_install=$enable_fast_install + +# Shell to use when invoking shell scripts. +SHELL=$lt_SHELL + +# An echo program that protects backslashes. +ECHO=$lt_ECHO + +# The PATH separator for the build system. +PATH_SEPARATOR=$lt_PATH_SEPARATOR + +# The host system. +host_alias=$host_alias +host=$host +host_os=$host_os + +# The build system. +build_alias=$build_alias +build=$build +build_os=$build_os + +# A sed program that does not truncate output. +SED=$lt_SED + +# Sed that helps us avoid accidentally triggering echo(1) options like -n. +Xsed="\$SED -e 1s/^X//" + +# A grep program that handles long lines. +GREP=$lt_GREP + +# An ERE matcher. +EGREP=$lt_EGREP + +# A literal string matcher. +FGREP=$lt_FGREP + +# A BSD- or MS-compatible name lister. +NM=$lt_NM + +# Whether we need soft or hard links. +LN_S=$lt_LN_S + +# What is the maximum length of a command? +max_cmd_len=$max_cmd_len + +# Object file suffix (normally "o"). +objext=$ac_objext + +# Executable file suffix (normally ""). +exeext=$exeext + +# whether the shell understands "unset". +lt_unset=$lt_unset + +# turn spaces into newlines. +SP2NL=$lt_lt_SP2NL + +# turn newlines into spaces. +NL2SP=$lt_lt_NL2SP + +# convert \$build file names to \$host format. +to_host_file_cmd=$lt_cv_to_host_file_cmd + +# convert \$build files to toolchain format. +to_tool_file_cmd=$lt_cv_to_tool_file_cmd + +# An object symbol dumper. +OBJDUMP=$lt_OBJDUMP + +# Method to check whether dependent libraries are shared objects. +deplibs_check_method=$lt_deplibs_check_method + +# Command to use when deplibs_check_method = "file_magic". +file_magic_cmd=$lt_file_magic_cmd + +# How to find potential files when deplibs_check_method = "file_magic". +file_magic_glob=$lt_file_magic_glob + +# Find potential files using nocaseglob when deplibs_check_method = "file_magic". +want_nocaseglob=$lt_want_nocaseglob + +# DLL creation program. +DLLTOOL=$lt_DLLTOOL + +# Command to associate shared and link libraries. +sharedlib_from_linklib_cmd=$lt_sharedlib_from_linklib_cmd + +# The archiver. +AR=$lt_AR + +# Flags to create an archive. +AR_FLAGS=$lt_AR_FLAGS + +# How to feed a file listing to the archiver. +archiver_list_spec=$lt_archiver_list_spec + +# A symbol stripping program. +STRIP=$lt_STRIP + +# Commands used to install an old-style archive. +RANLIB=$lt_RANLIB +old_postinstall_cmds=$lt_old_postinstall_cmds +old_postuninstall_cmds=$lt_old_postuninstall_cmds + +# Whether to use a lock for old archive extraction. +lock_old_archive_extraction=$lock_old_archive_extraction + +# A C compiler. +LTCC=$lt_CC + +# LTCC compiler flags. +LTCFLAGS=$lt_CFLAGS + +# Take the output of nm and produce a listing of raw symbols and C names. +global_symbol_pipe=$lt_lt_cv_sys_global_symbol_pipe + +# Transform the output of nm in a proper C declaration. +global_symbol_to_cdecl=$lt_lt_cv_sys_global_symbol_to_cdecl + +# Transform the output of nm in a C name address pair. +global_symbol_to_c_name_address=$lt_lt_cv_sys_global_symbol_to_c_name_address + +# Transform the output of nm in a C name address pair when lib prefix is needed. +global_symbol_to_c_name_address_lib_prefix=$lt_lt_cv_sys_global_symbol_to_c_name_address_lib_prefix + +# Specify filename containing input files for \$NM. +nm_file_list_spec=$lt_nm_file_list_spec + +# The root where to search for dependent libraries,and in which our libraries should be installed. +lt_sysroot=$lt_sysroot + +# The name of the directory that contains temporary libtool files. +objdir=$objdir + +# Used to examine libraries when file_magic_cmd begins with "file". +MAGIC_CMD=$MAGIC_CMD + +# Must we lock files when doing compilation? +need_locks=$lt_need_locks + +# Manifest tool. +MANIFEST_TOOL=$lt_MANIFEST_TOOL + +# Tool to manipulate archived DWARF debug symbol files on Mac OS X. +DSYMUTIL=$lt_DSYMUTIL + +# Tool to change global to local symbols on Mac OS X. +NMEDIT=$lt_NMEDIT + +# Tool to manipulate fat objects and archives on Mac OS X. +LIPO=$lt_LIPO + +# ldd/readelf like tool for Mach-O binaries on Mac OS X. +OTOOL=$lt_OTOOL + +# ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4. +OTOOL64=$lt_OTOOL64 + +# Old archive suffix (normally "a"). +libext=$libext + +# Shared library suffix (normally ".so"). +shrext_cmds=$lt_shrext_cmds + +# The commands to extract the exported symbol list from a shared archive. +extract_expsyms_cmds=$lt_extract_expsyms_cmds + +# Variables whose values should be saved in libtool wrapper scripts and +# restored at link time. +variables_saved_for_relink=$lt_variables_saved_for_relink + +# Do we need the "lib" prefix for modules? +need_lib_prefix=$need_lib_prefix + +# Do we need a version for libraries? +need_version=$need_version + +# Library versioning type. +version_type=$version_type + +# Shared library runtime path variable. +runpath_var=$runpath_var + +# Shared library path variable. +shlibpath_var=$shlibpath_var + +# Is shlibpath searched before the hard-coded library search path? +shlibpath_overrides_runpath=$shlibpath_overrides_runpath + +# Format of library name prefix. +libname_spec=$lt_libname_spec + +# List of archive names. First name is the real one, the rest are links. +# The last name is the one that the linker finds with -lNAME +library_names_spec=$lt_library_names_spec + +# The coded name of the library, if different from the real name. +soname_spec=$lt_soname_spec + +# Permission mode override for installation of shared libraries. +install_override_mode=$lt_install_override_mode + +# Command to use after installation of a shared archive. +postinstall_cmds=$lt_postinstall_cmds + +# Command to use after uninstallation of a shared archive. +postuninstall_cmds=$lt_postuninstall_cmds + +# Commands used to finish a libtool library installation in a directory. +finish_cmds=$lt_finish_cmds + +# As "finish_cmds", except a single script fragment to be evaled but +# not shown. +finish_eval=$lt_finish_eval + +# Whether we should hardcode library paths into libraries. +hardcode_into_libs=$hardcode_into_libs + +# Compile-time system search path for libraries. +sys_lib_search_path_spec=$lt_sys_lib_search_path_spec + +# Run-time system search path for libraries. +sys_lib_dlsearch_path_spec=$lt_sys_lib_dlsearch_path_spec + +# Whether dlopen is supported. +dlopen_support=$enable_dlopen + +# Whether dlopen of programs is supported. +dlopen_self=$enable_dlopen_self + +# Whether dlopen of statically linked programs is supported. +dlopen_self_static=$enable_dlopen_self_static + +# Commands to strip libraries. +old_striplib=$lt_old_striplib +striplib=$lt_striplib + + +# The linker used to build libraries. +LD=$lt_LD + +# How to create reloadable object files. +reload_flag=$lt_reload_flag +reload_cmds=$lt_reload_cmds + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds + +# A language specific compiler. +CC=$lt_compiler + +# Is the compiler the GNU compiler? +with_gcc=$GCC + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds +archive_expsym_cmds=$lt_archive_expsym_cmds + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds +module_expsym_cmds=$lt_module_expsym_cmds + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \${shlibpath_var} if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects +postdep_objects=$lt_postdep_objects +predeps=$lt_predeps +postdeps=$lt_postdeps + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path + +# ### END LIBTOOL CONFIG + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + +ltmain="$ac_aux_dir/ltmain.sh" + + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + if test x"$xsi_shell" = xyes; then + sed -e '/^func_dirname ()$/,/^} # func_dirname /c\ +func_dirname ()\ +{\ +\ case ${1} in\ +\ */*) func_dirname_result="${1%/*}${2}" ;;\ +\ * ) func_dirname_result="${3}" ;;\ +\ esac\ +} # Extended-shell func_dirname implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_basename ()$/,/^} # func_basename /c\ +func_basename ()\ +{\ +\ func_basename_result="${1##*/}"\ +} # Extended-shell func_basename implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_dirname_and_basename ()$/,/^} # func_dirname_and_basename /c\ +func_dirname_and_basename ()\ +{\ +\ case ${1} in\ +\ */*) func_dirname_result="${1%/*}${2}" ;;\ +\ * ) func_dirname_result="${3}" ;;\ +\ esac\ +\ func_basename_result="${1##*/}"\ +} # Extended-shell func_dirname_and_basename implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_stripname ()$/,/^} # func_stripname /c\ +func_stripname ()\ +{\ +\ # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are\ +\ # positional parameters, so assign one to ordinary parameter first.\ +\ func_stripname_result=${3}\ +\ func_stripname_result=${func_stripname_result#"${1}"}\ +\ func_stripname_result=${func_stripname_result%"${2}"}\ +} # Extended-shell func_stripname implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_split_long_opt ()$/,/^} # func_split_long_opt /c\ +func_split_long_opt ()\ +{\ +\ func_split_long_opt_name=${1%%=*}\ +\ func_split_long_opt_arg=${1#*=}\ +} # Extended-shell func_split_long_opt implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_split_short_opt ()$/,/^} # func_split_short_opt /c\ +func_split_short_opt ()\ +{\ +\ func_split_short_opt_arg=${1#??}\ +\ func_split_short_opt_name=${1%"$func_split_short_opt_arg"}\ +} # Extended-shell func_split_short_opt implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_lo2o ()$/,/^} # func_lo2o /c\ +func_lo2o ()\ +{\ +\ case ${1} in\ +\ *.lo) func_lo2o_result=${1%.lo}.${objext} ;;\ +\ *) func_lo2o_result=${1} ;;\ +\ esac\ +} # Extended-shell func_lo2o implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_xform ()$/,/^} # func_xform /c\ +func_xform ()\ +{\ + func_xform_result=${1%.*}.lo\ +} # Extended-shell func_xform implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_arith ()$/,/^} # func_arith /c\ +func_arith ()\ +{\ + func_arith_result=$(( $* ))\ +} # Extended-shell func_arith implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_len ()$/,/^} # func_len /c\ +func_len ()\ +{\ + func_len_result=${#1}\ +} # Extended-shell func_len implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + +fi + +if test x"$lt_shell_append" = xyes; then + sed -e '/^func_append ()$/,/^} # func_append /c\ +func_append ()\ +{\ + eval "${1}+=\\${2}"\ +} # Extended-shell func_append implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + sed -e '/^func_append_quoted ()$/,/^} # func_append_quoted /c\ +func_append_quoted ()\ +{\ +\ func_quote_for_eval "${2}"\ +\ eval "${1}+=\\\\ \\$func_quote_for_eval_result"\ +} # Extended-shell func_append_quoted implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: + + + # Save a `func_append' function call where possible by direct use of '+=' + sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +else + # Save a `func_append' function call even when '+=' is not available + sed -e 's%func_append \([a-zA-Z_]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +fi + +if test x"$_lt_function_replace_fail" = x":"; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: Unable to substitute extended shell functions in $ofile" >&5 +$as_echo "$as_me: WARNING: Unable to substitute extended shell functions in $ofile" >&2;} +fi + + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" + + + cat <<_LT_EOF >> "$ofile" + +# ### BEGIN LIBTOOL TAG CONFIG: CXX + +# The linker used to build libraries. +LD=$lt_LD_CXX + +# How to create reloadable object files. +reload_flag=$lt_reload_flag_CXX +reload_cmds=$lt_reload_cmds_CXX + +# Commands used to build an old-style archive. +old_archive_cmds=$lt_old_archive_cmds_CXX + +# A language specific compiler. +CC=$lt_compiler_CXX + +# Is the compiler the GNU compiler? +with_gcc=$GCC_CXX + +# Compiler flag to turn off builtin functions. +no_builtin_flag=$lt_lt_prog_compiler_no_builtin_flag_CXX + +# Additional compiler flags for building library objects. +pic_flag=$lt_lt_prog_compiler_pic_CXX + +# How to pass a linker flag through the compiler. +wl=$lt_lt_prog_compiler_wl_CXX + +# Compiler flag to prevent dynamic linking. +link_static_flag=$lt_lt_prog_compiler_static_CXX + +# Does compiler simultaneously support -c and -o options? +compiler_c_o=$lt_lt_cv_prog_compiler_c_o_CXX + +# Whether or not to add -lc for building shared libraries. +build_libtool_need_lc=$archive_cmds_need_lc_CXX + +# Whether or not to disallow shared libs when runtime libs are static. +allow_libtool_libs_with_static_runtimes=$enable_shared_with_static_runtimes_CXX + +# Compiler flag to allow reflexive dlopens. +export_dynamic_flag_spec=$lt_export_dynamic_flag_spec_CXX + +# Compiler flag to generate shared objects directly from archives. +whole_archive_flag_spec=$lt_whole_archive_flag_spec_CXX + +# Whether the compiler copes with passing no objects directly. +compiler_needs_object=$lt_compiler_needs_object_CXX + +# Create an old-style archive from a shared archive. +old_archive_from_new_cmds=$lt_old_archive_from_new_cmds_CXX + +# Create a temporary old-style archive to link instead of a shared archive. +old_archive_from_expsyms_cmds=$lt_old_archive_from_expsyms_cmds_CXX + +# Commands used to build a shared archive. +archive_cmds=$lt_archive_cmds_CXX +archive_expsym_cmds=$lt_archive_expsym_cmds_CXX + +# Commands used to build a loadable module if different from building +# a shared archive. +module_cmds=$lt_module_cmds_CXX +module_expsym_cmds=$lt_module_expsym_cmds_CXX + +# Whether we are building with GNU ld or not. +with_gnu_ld=$lt_with_gnu_ld_CXX + +# Flag that allows shared libraries with undefined symbols to be built. +allow_undefined_flag=$lt_allow_undefined_flag_CXX + +# Flag that enforces no undefined symbols. +no_undefined_flag=$lt_no_undefined_flag_CXX + +# Flag to hardcode \$libdir into a binary during linking. +# This must work even if \$libdir does not exist +hardcode_libdir_flag_spec=$lt_hardcode_libdir_flag_spec_CXX + +# Whether we need a single "-rpath" flag with a separated argument. +hardcode_libdir_separator=$lt_hardcode_libdir_separator_CXX + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary. +hardcode_direct=$hardcode_direct_CXX + +# Set to "yes" if using DIR/libNAME\${shared_ext} during linking hardcodes +# DIR into the resulting binary and the resulting library dependency is +# "absolute",i.e impossible to change by setting \${shlibpath_var} if the +# library is relocated. +hardcode_direct_absolute=$hardcode_direct_absolute_CXX + +# Set to "yes" if using the -LDIR flag during linking hardcodes DIR +# into the resulting binary. +hardcode_minus_L=$hardcode_minus_L_CXX + +# Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR +# into the resulting binary. +hardcode_shlibpath_var=$hardcode_shlibpath_var_CXX + +# Set to "yes" if building a shared library automatically hardcodes DIR +# into the library and all subsequent libraries and executables linked +# against it. +hardcode_automatic=$hardcode_automatic_CXX + +# Set to yes if linker adds runtime paths of dependent libraries +# to runtime path list. +inherit_rpath=$inherit_rpath_CXX + +# Whether libtool must link a program against all its dependency libraries. +link_all_deplibs=$link_all_deplibs_CXX + +# Set to "yes" if exported symbols are required. +always_export_symbols=$always_export_symbols_CXX + +# The commands to list exported symbols. +export_symbols_cmds=$lt_export_symbols_cmds_CXX + +# Symbols that should not be listed in the preloaded symbols. +exclude_expsyms=$lt_exclude_expsyms_CXX + +# Symbols that must always be exported. +include_expsyms=$lt_include_expsyms_CXX + +# Commands necessary for linking programs (against libraries) with templates. +prelink_cmds=$lt_prelink_cmds_CXX + +# Commands necessary for finishing linking programs. +postlink_cmds=$lt_postlink_cmds_CXX + +# Specify filename containing input files. +file_list_spec=$lt_file_list_spec_CXX + +# How to hardcode a shared library path into an executable. +hardcode_action=$hardcode_action_CXX + +# The directories searched by this compiler when creating a shared library. +compiler_lib_search_dirs=$lt_compiler_lib_search_dirs_CXX + +# Dependencies to place before and after the objects being linked to +# create a shared library. +predep_objects=$lt_predep_objects_CXX +postdep_objects=$lt_postdep_objects_CXX +predeps=$lt_predeps_CXX +postdeps=$lt_postdeps_CXX + +# The library search path used internally by the compiler when linking +# a shared library. +compiler_lib_search_path=$lt_compiler_lib_search_path_CXX + +# ### END LIBTOOL TAG CONFIG: CXX +_LT_EOF + + ;; + + esac +done # for ac_tag + + +as_fn_exit 0 +_ACEOF +ac_clean_files=$ac_clean_files_save + +test $ac_write_fail = 0 || + as_fn_error $? "write failure creating $CONFIG_STATUS" "$LINENO" 5 + + +# configure is writing to config.log, and then calls config.status. +# config.status does its own redirection, appending to config.log. +# Unfortunately, on DOS this fails, as config.log is still kept open +# by configure, so config.status won't be able to write to it; its +# output is simply discarded. So we exec the FD to /dev/null, +# effectively closing config.log, so it can be properly (re)opened and +# appended to by config.status. When coming back to configure, we +# need to make the FD available again. +if test "$no_create" != yes; then + ac_cs_success=: + ac_config_status_args= + test "$silent" = yes && + ac_config_status_args="$ac_config_status_args --quiet" + exec 5>/dev/null + $SHELL $CONFIG_STATUS $ac_config_status_args || ac_cs_success=false + exec 5>>config.log + # Use ||, not &&, to avoid exiting from the if with $? = 1, which + # would make configure fail if this is the last instruction. + $ac_cs_success || as_fn_exit 1 +fi +if test -n "$ac_unrecognized_opts" && test "$enable_option_checking" != no; then + { $as_echo "$as_me:${as_lineno-$LINENO}: WARNING: unrecognized options: $ac_unrecognized_opts" >&5 +$as_echo "$as_me: WARNING: unrecognized options: $ac_unrecognized_opts" >&2;} +fi + diff --git a/external/gtest/configure.ac b/external/gtest/configure.ac new file mode 100755 index 0000000000..cc592e1583 --- /dev/null +++ b/external/gtest/configure.ac @@ -0,0 +1,68 @@ +m4_include(m4/acx_pthread.m4) + +# At this point, the Xcode project assumes the version string will be three +# integers separated by periods and surrounded by square brackets (e.g. +# "[1.0.1]"). It also asumes that there won't be any closing parenthesis +# between "AC_INIT(" and the closing ")" including comments and strings. +AC_INIT([Google C++ Testing Framework], + [1.7.0], + [googletestframework@googlegroups.com], + [gtest]) + +# Provide various options to initialize the Autoconf and configure processes. +AC_PREREQ([2.59]) +AC_CONFIG_SRCDIR([./LICENSE]) +AC_CONFIG_MACRO_DIR([m4]) +AC_CONFIG_AUX_DIR([build-aux]) +AC_CONFIG_HEADERS([build-aux/config.h]) +AC_CONFIG_FILES([Makefile]) +AC_CONFIG_FILES([scripts/gtest-config], [chmod +x scripts/gtest-config]) + +# Initialize Automake with various options. We require at least v1.9, prevent +# pedantic complaints about package files, and enable various distribution +# targets. +AM_INIT_AUTOMAKE([1.9 dist-bzip2 dist-zip foreign subdir-objects]) + +# Check for programs used in building Google Test. +AC_PROG_CC +AC_PROG_CXX +AC_LANG([C++]) +AC_PROG_LIBTOOL + +# TODO(chandlerc@google.com): Currently we aren't running the Python tests +# against the interpreter detected by AM_PATH_PYTHON, and so we condition +# HAVE_PYTHON by requiring "python" to be in the PATH, and that interpreter's +# version to be >= 2.3. This will allow the scripts to use a "/usr/bin/env" +# hashbang. +PYTHON= # We *do not* allow the user to specify a python interpreter +AC_PATH_PROG([PYTHON],[python],[:]) +AS_IF([test "$PYTHON" != ":"], + [AM_PYTHON_CHECK_VERSION([$PYTHON],[2.3],[:],[PYTHON=":"])]) +AM_CONDITIONAL([HAVE_PYTHON],[test "$PYTHON" != ":"]) + +# Configure pthreads. +AC_ARG_WITH([pthreads], + [AS_HELP_STRING([--with-pthreads], + [use pthreads (default is yes)])], + [with_pthreads=$withval], + [with_pthreads=check]) + +have_pthreads=no +AS_IF([test "x$with_pthreads" != "xno"], + [ACX_PTHREAD( + [], + [AS_IF([test "x$with_pthreads" != "xcheck"], + [AC_MSG_FAILURE( + [--with-pthreads was specified, but unable to be used])])]) + have_pthreads="$acx_pthread_ok"]) +AM_CONDITIONAL([HAVE_PTHREADS],[test "x$have_pthreads" = "xyes"]) +AC_SUBST(PTHREAD_CFLAGS) +AC_SUBST(PTHREAD_LIBS) + +# TODO(chandlerc@google.com) Check for the necessary system headers. + +# TODO(chandlerc@google.com) Check the types, structures, and other compiler +# and architecture characteristics. + +# Output the generated files. No further autoconf macros may be used. +AC_OUTPUT diff --git a/external/gtest/fused-src/gtest/gtest-all.cc b/external/gtest/fused-src/gtest/gtest-all.cc new file mode 100755 index 0000000000..a9a03b2e3b --- /dev/null +++ b/external/gtest/fused-src/gtest/gtest-all.cc @@ -0,0 +1,9592 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// Google C++ Testing Framework (Google Test) +// +// Sometimes it's desirable to build Google Test by compiling a single file. +// This file serves this purpose. + +// This line ensures that gtest.h can be compiled on its own, even +// when it's fused. +#include "gtest/gtest.h" + +// The following lines pull in the real gtest *.cc files. +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) + +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Utilities for testing Google Test itself and code that uses Google Test +// (e.g. frameworks built on top of Google Test). + +#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_ +#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + + +namespace testing { + +// This helper class can be used to mock out Google Test failure reporting +// so that we can test Google Test or code that builds on Google Test. +// +// An object of this class appends a TestPartResult object to the +// TestPartResultArray object given in the constructor whenever a Google Test +// failure is reported. It can either intercept only failures that are +// generated in the same thread that created this object or it can intercept +// all generated failures. The scope of this mock object can be controlled with +// the second argument to the two arguments constructor. +class GTEST_API_ ScopedFakeTestPartResultReporter + : public TestPartResultReporterInterface { + public: + // The two possible mocking modes of this object. + enum InterceptMode { + INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures. + INTERCEPT_ALL_THREADS // Intercepts all failures. + }; + + // The c'tor sets this object as the test part result reporter used + // by Google Test. The 'result' parameter specifies where to report the + // results. This reporter will only catch failures generated in the current + // thread. DEPRECATED + explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result); + + // Same as above, but you can choose the interception scope of this object. + ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, + TestPartResultArray* result); + + // The d'tor restores the previous test part result reporter. + virtual ~ScopedFakeTestPartResultReporter(); + + // Appends the TestPartResult object to the TestPartResultArray + // received in the constructor. + // + // This method is from the TestPartResultReporterInterface + // interface. + virtual void ReportTestPartResult(const TestPartResult& result); + private: + void Init(); + + const InterceptMode intercept_mode_; + TestPartResultReporterInterface* old_reporter_; + TestPartResultArray* const result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter); +}; + +namespace internal { + +// A helper class for implementing EXPECT_FATAL_FAILURE() and +// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +class GTEST_API_ SingleFailureChecker { + public: + // The constructor remembers the arguments. + SingleFailureChecker(const TestPartResultArray* results, + TestPartResult::Type type, + const string& substr); + ~SingleFailureChecker(); + private: + const TestPartResultArray* const results_; + const TestPartResult::Type type_; + const string substr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker); +}; + +} // namespace internal + +} // namespace testing + +// A set of macros for testing Google Test assertions or code that's expected +// to generate Google Test fatal failures. It verifies that the given +// statement will cause exactly one fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_FATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - 'statement' cannot reference local non-static variables or +// non-static members of the current object. +// - 'statement' cannot return a value. +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. The AcceptsMacroThatExpandsToUnprotectedComma test in +// gtest_unittest.cc will fail to compile if we do that. +#define EXPECT_FATAL_FAILURE(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ALL_THREADS, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +// A macro for testing Google Test assertions or code that's expected to +// generate Google Test non-fatal failures. It asserts that the given +// statement will cause exactly one non-fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// 'statement' is allowed to reference local variables and members of +// the current object. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. If we do that, the code won't compile when the user gives +// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that +// expands to code containing an unprotected comma. The +// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc +// catches that. +// +// For the same reason, we have to write +// if (::testing::internal::AlwaysTrue()) { statement; } +// instead of +// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) +// to avoid an MSVC warning on unreachable code. +#define EXPECT_NONFATAL_FAILURE(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \ + >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include // NOLINT +#include +#include + +#if GTEST_OS_LINUX + +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +# include // NOLINT +# include // NOLINT +# include // NOLINT +// Declares vsnprintf(). This header is not available on Windows. +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include + +#elif GTEST_OS_SYMBIAN +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +#elif GTEST_OS_ZOS +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +// On z/OS we additionally need strings.h for strcasecmp. +# include // NOLINT + +#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE. + +# include // NOLINT + +#elif GTEST_OS_WINDOWS // We are on Windows proper. + +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT + +# if GTEST_OS_WINDOWS_MINGW +// MinGW has gettimeofday() but not _ftime64(). +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +// TODO(kenton@google.com): There are other ways to get the time on +// Windows, like GetTickCount() or GetSystemTimeAsFileTime(). MinGW +// supports these. consider using them instead. +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT +# endif // GTEST_OS_WINDOWS_MINGW + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT + +#else + +// Assume other platforms have gettimeofday(). +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT +# include // NOLINT + +#endif // GTEST_OS_LINUX + +#if GTEST_HAS_EXCEPTIONS +# include +#endif + +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +#endif + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Utility functions and classes used by the Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) +// +// This file contains purely Google Test's internal implementation. Please +// DO NOT #INCLUDE IT IN A USER PROGRAM. + +#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_ +#define GTEST_SRC_GTEST_INTERNAL_INL_H_ + +// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is +// part of Google Test's implementation; otherwise it's undefined. +#if !GTEST_IMPLEMENTATION_ +// A user is trying to include this from his code - just say no. +# error "gtest-internal-inl.h is part of Google Test's internal implementation." +# error "It must not be included except by Google Test itself." +#endif // GTEST_IMPLEMENTATION_ + +#ifndef _WIN32_WCE +# include +#endif // !_WIN32_WCE +#include +#include // For strtoll/_strtoul64/malloc/free. +#include // For memmove. + +#include +#include +#include + + +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +#endif + +#if GTEST_OS_WINDOWS +# include // NOLINT +#endif // GTEST_OS_WINDOWS + + +namespace testing { + +// Declares the flags. +// +// We don't want the users to modify this flag in the code, but want +// Google Test's own unit tests to be able to access it. Therefore we +// declare it here as opposed to in gtest.h. +GTEST_DECLARE_bool_(death_test_use_fork); + +namespace internal { + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest; + +// Names of the flags (needed for parsing Google Test flags). +const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests"; +const char kBreakOnFailureFlag[] = "break_on_failure"; +const char kCatchExceptionsFlag[] = "catch_exceptions"; +const char kColorFlag[] = "color"; +const char kFilterFlag[] = "filter"; +const char kListTestsFlag[] = "list_tests"; +const char kOutputFlag[] = "output"; +const char kPrintTimeFlag[] = "print_time"; +const char kRandomSeedFlag[] = "random_seed"; +const char kRepeatFlag[] = "repeat"; +const char kShuffleFlag[] = "shuffle"; +const char kStackTraceDepthFlag[] = "stack_trace_depth"; +const char kStreamResultToFlag[] = "stream_result_to"; +const char kThrowOnFailureFlag[] = "throw_on_failure"; + +// A valid random seed must be in [1, kMaxRandomSeed]. +const int kMaxRandomSeed = 99999; + +// g_help_flag is true iff the --help flag or an equivalent form is +// specified on the command line. +GTEST_API_ extern bool g_help_flag; + +// Returns the current time in milliseconds. +GTEST_API_ TimeInMillis GetTimeInMillis(); + +// Returns true iff Google Test should use colors in the output. +GTEST_API_ bool ShouldUseColor(bool stdout_is_tty); + +// Formats the given time in milliseconds as seconds. +GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms); + +// Converts the given time in milliseconds to a date string in the ISO 8601 +// format, without the timezone information. N.B.: due to the use the +// non-reentrant localtime() function, this function is not thread safe. Do +// not use it in any code that can be called from multiple threads. +GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms); + +// Parses a string for an Int32 flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +GTEST_API_ bool ParseInt32Flag( + const char* str, const char* flag, Int32* value); + +// Returns a random seed in range [1, kMaxRandomSeed] based on the +// given --gtest_random_seed flag value. +inline int GetRandomSeedFromFlag(Int32 random_seed_flag) { + const unsigned int raw_seed = (random_seed_flag == 0) ? + static_cast(GetTimeInMillis()) : + static_cast(random_seed_flag); + + // Normalizes the actual seed to range [1, kMaxRandomSeed] such that + // it's easy to type. + const int normalized_seed = + static_cast((raw_seed - 1U) % + static_cast(kMaxRandomSeed)) + 1; + return normalized_seed; +} + +// Returns the first valid random seed after 'seed'. The behavior is +// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is +// considered to be 1. +inline int GetNextRandomSeed(int seed) { + GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed) + << "Invalid random seed " << seed << " - must be in [1, " + << kMaxRandomSeed << "]."; + const int next_seed = seed + 1; + return (next_seed > kMaxRandomSeed) ? 1 : next_seed; +} + +// This class saves the values of all Google Test flags in its c'tor, and +// restores them in its d'tor. +class GTestFlagSaver { + public: + // The c'tor. + GTestFlagSaver() { + also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests); + break_on_failure_ = GTEST_FLAG(break_on_failure); + catch_exceptions_ = GTEST_FLAG(catch_exceptions); + color_ = GTEST_FLAG(color); + death_test_style_ = GTEST_FLAG(death_test_style); + death_test_use_fork_ = GTEST_FLAG(death_test_use_fork); + filter_ = GTEST_FLAG(filter); + internal_run_death_test_ = GTEST_FLAG(internal_run_death_test); + list_tests_ = GTEST_FLAG(list_tests); + output_ = GTEST_FLAG(output); + print_time_ = GTEST_FLAG(print_time); + random_seed_ = GTEST_FLAG(random_seed); + repeat_ = GTEST_FLAG(repeat); + shuffle_ = GTEST_FLAG(shuffle); + stack_trace_depth_ = GTEST_FLAG(stack_trace_depth); + stream_result_to_ = GTEST_FLAG(stream_result_to); + throw_on_failure_ = GTEST_FLAG(throw_on_failure); + } + + // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS. + ~GTestFlagSaver() { + GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_; + GTEST_FLAG(break_on_failure) = break_on_failure_; + GTEST_FLAG(catch_exceptions) = catch_exceptions_; + GTEST_FLAG(color) = color_; + GTEST_FLAG(death_test_style) = death_test_style_; + GTEST_FLAG(death_test_use_fork) = death_test_use_fork_; + GTEST_FLAG(filter) = filter_; + GTEST_FLAG(internal_run_death_test) = internal_run_death_test_; + GTEST_FLAG(list_tests) = list_tests_; + GTEST_FLAG(output) = output_; + GTEST_FLAG(print_time) = print_time_; + GTEST_FLAG(random_seed) = random_seed_; + GTEST_FLAG(repeat) = repeat_; + GTEST_FLAG(shuffle) = shuffle_; + GTEST_FLAG(stack_trace_depth) = stack_trace_depth_; + GTEST_FLAG(stream_result_to) = stream_result_to_; + GTEST_FLAG(throw_on_failure) = throw_on_failure_; + } + + private: + // Fields for saving the original values of flags. + bool also_run_disabled_tests_; + bool break_on_failure_; + bool catch_exceptions_; + std::string color_; + std::string death_test_style_; + bool death_test_use_fork_; + std::string filter_; + std::string internal_run_death_test_; + bool list_tests_; + std::string output_; + bool print_time_; + internal::Int32 random_seed_; + internal::Int32 repeat_; + bool shuffle_; + internal::Int32 stack_trace_depth_; + std::string stream_result_to_; + bool throw_on_failure_; +} GTEST_ATTRIBUTE_UNUSED_; + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted +// to "(Invalid Unicode 0xXXXXXXXX)". +GTEST_API_ std::string CodePointToUtf8(UInt32 code_point); + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars); + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded(); + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (e.g., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +GTEST_API_ bool ShouldShard(const char* total_shards_str, + const char* shard_index_str, + bool in_subprocess_for_death_test); + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error and +// and aborts. +GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val); + +// Given the total number of shards, the shard index, and the test id, +// returns true iff the test should be run on this shard. The test id is +// some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +GTEST_API_ bool ShouldRunTestOnShard( + int total_shards, int shard_index, int test_id); + +// STL container utilities. + +// Returns the number of elements in the given container that satisfy +// the given predicate. +template +inline int CountIf(const Container& c, Predicate predicate) { + // Implemented as an explicit loop since std::count_if() in libCstd on + // Solaris has a non-standard signature. + int count = 0; + for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) { + if (predicate(*it)) + ++count; + } + return count; +} + +// Applies a function/functor to each element in the container. +template +void ForEach(const Container& c, Functor functor) { + std::for_each(c.begin(), c.end(), functor); +} + +// Returns the i-th element of the vector, or default_value if i is not +// in range [0, v.size()). +template +inline E GetElementOr(const std::vector& v, int i, E default_value) { + return (i < 0 || i >= static_cast(v.size())) ? default_value : v[i]; +} + +// Performs an in-place shuffle of a range of the vector's elements. +// 'begin' and 'end' are element indices as an STL-style range; +// i.e. [begin, end) are shuffled, where 'end' == size() means to +// shuffle to the end of the vector. +template +void ShuffleRange(internal::Random* random, int begin, int end, + std::vector* v) { + const int size = static_cast(v->size()); + GTEST_CHECK_(0 <= begin && begin <= size) + << "Invalid shuffle range start " << begin << ": must be in range [0, " + << size << "]."; + GTEST_CHECK_(begin <= end && end <= size) + << "Invalid shuffle range finish " << end << ": must be in range [" + << begin << ", " << size << "]."; + + // Fisher-Yates shuffle, from + // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle + for (int range_width = end - begin; range_width >= 2; range_width--) { + const int last_in_range = begin + range_width - 1; + const int selected = begin + random->Generate(range_width); + std::swap((*v)[selected], (*v)[last_in_range]); + } +} + +// Performs an in-place shuffle of the vector's elements. +template +inline void Shuffle(internal::Random* random, std::vector* v) { + ShuffleRange(random, 0, static_cast(v->size()), v); +} + +// A function for deleting an object. Handy for being used as a +// functor. +template +static void Delete(T* x) { + delete x; +} + +// A predicate that checks the key of a TestProperty against a known key. +// +// TestPropertyKeyIs is copyable. +class TestPropertyKeyIs { + public: + // Constructor. + // + // TestPropertyKeyIs has NO default constructor. + explicit TestPropertyKeyIs(const std::string& key) : key_(key) {} + + // Returns true iff the test name of test property matches on key_. + bool operator()(const TestProperty& test_property) const { + return test_property.key() == key_; + } + + private: + std::string key_; +}; + +// Class UnitTestOptions. +// +// This class contains functions for processing options the user +// specifies when running the tests. It has only static members. +// +// In most cases, the user can specify an option using either an +// environment variable or a command line flag. E.g. you can set the +// test filter using either GTEST_FILTER or --gtest_filter. If both +// the variable and the flag are present, the latter overrides the +// former. +class GTEST_API_ UnitTestOptions { + public: + // Functions for processing the gtest_output flag. + + // Returns the output format, or "" for normal printed output. + static std::string GetOutputFormat(); + + // Returns the absolute path of the requested output file, or the + // default (test_detail.xml in the original working directory) if + // none was explicitly specified. + static std::string GetAbsolutePathToOutputFile(); + + // Functions for processing the gtest_filter flag. + + // Returns true iff the wildcard pattern matches the string. The + // first ':' or '\0' character in pattern marks the end of it. + // + // This recursive algorithm isn't very efficient, but is clear and + // works well enough for matching test names, which are short. + static bool PatternMatchesString(const char *pattern, const char *str); + + // Returns true iff the user-specified filter matches the test case + // name and the test name. + static bool FilterMatchesTest(const std::string &test_case_name, + const std::string &test_name); + +#if GTEST_OS_WINDOWS + // Function for supporting the gtest_catch_exception flag. + + // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the + // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. + // This function is useful as an __except condition. + static int GTestShouldProcessSEH(DWORD exception_code); +#endif // GTEST_OS_WINDOWS + + // Returns true if "name" matches the ':' separated list of glob-style + // filters in "filter". + static bool MatchesFilter(const std::string& name, const char* filter); +}; + +// Returns the current application's name, removing directory path if that +// is present. Used by UnitTestOptions::GetOutputFile. +GTEST_API_ FilePath GetCurrentExecutableName(); + +// The role interface for getting the OS stack trace as a string. +class OsStackTraceGetterInterface { + public: + OsStackTraceGetterInterface() {} + virtual ~OsStackTraceGetterInterface() {} + + // Returns the current OS stack trace as an std::string. Parameters: + // + // max_depth - the maximum number of stack frames to be included + // in the trace. + // skip_count - the number of top frames to be skipped; doesn't count + // against max_depth. + virtual string CurrentStackTrace(int max_depth, int skip_count) = 0; + + // UponLeavingGTest() should be called immediately before Google Test calls + // user code. It saves some information about the current stack that + // CurrentStackTrace() will use to find and hide Google Test stack frames. + virtual void UponLeavingGTest() = 0; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface); +}; + +// A working implementation of the OsStackTraceGetterInterface interface. +class OsStackTraceGetter : public OsStackTraceGetterInterface { + public: + OsStackTraceGetter() : caller_frame_(NULL) {} + + virtual string CurrentStackTrace(int max_depth, int skip_count) + GTEST_LOCK_EXCLUDED_(mutex_); + + virtual void UponLeavingGTest() GTEST_LOCK_EXCLUDED_(mutex_); + + // This string is inserted in place of stack frames that are part of + // Google Test's implementation. + static const char* const kElidedFramesMarker; + + private: + Mutex mutex_; // protects all internal state + + // We save the stack frame below the frame that calls user code. + // We do this because the address of the frame immediately below + // the user code changes between the call to UponLeavingGTest() + // and any calls to CurrentStackTrace() from within the user code. + void* caller_frame_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter); +}; + +// Information about a Google Test trace point. +struct TraceInfo { + const char* file; + int line; + std::string message; +}; + +// This is the default global test part result reporter used in UnitTestImpl. +// This class should only be used by UnitTestImpl. +class DefaultGlobalTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. Reports the test part + // result in the current test. + virtual void ReportTestPartResult(const TestPartResult& result); + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter); +}; + +// This is the default per thread test part result reporter used in +// UnitTestImpl. This class should only be used by UnitTestImpl. +class DefaultPerThreadTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. The implementation just + // delegates to the current global test part result reporter of *unit_test_. + virtual void ReportTestPartResult(const TestPartResult& result); + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter); +}; + +// The private implementation of the UnitTest class. We don't protect +// the methods under a mutex, as this class is not accessible by a +// user and the UnitTest class that delegates work to this class does +// proper locking. +class GTEST_API_ UnitTestImpl { + public: + explicit UnitTestImpl(UnitTest* parent); + virtual ~UnitTestImpl(); + + // There are two different ways to register your own TestPartResultReporter. + // You can register your own repoter to listen either only for test results + // from the current thread or for results from all threads. + // By default, each per-thread test result repoter just passes a new + // TestPartResult to the global test result reporter, which registers the + // test part result for the currently running test. + + // Returns the global test part result reporter. + TestPartResultReporterInterface* GetGlobalTestPartResultReporter(); + + // Sets the global test part result reporter. + void SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter); + + // Returns the test part result reporter for the current thread. + TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread(); + + // Sets the test part result reporter for the current thread. + void SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter); + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const { return start_timestamp_; } + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const { return !Failed(); } + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const { + return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed(); + } + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const { + const int index = GetElementOr(test_case_indices_, i, -1); + return index < 0 ? NULL : test_cases_[i]; + } + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i) { + const int index = GetElementOr(test_case_indices_, i, -1); + return index < 0 ? NULL : test_cases_[index]; + } + + // Provides access to the event listener list. + TestEventListeners* listeners() { return &listeners_; } + + // Returns the TestResult for the test that's currently running, or + // the TestResult for the ad hoc test if no test is running. + TestResult* current_test_result(); + + // Returns the TestResult for the ad hoc test. + const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; } + + // Sets the OS stack trace getter. + // + // Does nothing if the input and the current OS stack trace getter + // are the same; otherwise, deletes the old getter and makes the + // input the current getter. + void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter); + + // Returns the current OS stack trace getter if it is not NULL; + // otherwise, creates an OsStackTraceGetter, makes it the current + // getter, and returns it. + OsStackTraceGetterInterface* os_stack_trace_getter(); + + // Returns the current OS stack trace as an std::string. + // + // The maximum number of stack frames to be included is specified by + // the gtest_stack_trace_depth flag. The skip_count parameter + // specifies the number of top frames to be skipped, which doesn't + // count against the number of frames to be included. + // + // For example, if Foo() calls Bar(), which in turn calls + // CurrentOsStackTraceExceptTop(1), Foo() will be included in the + // trace but Bar() and CurrentOsStackTraceExceptTop() won't. + std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_; + + // Finds and returns a TestCase with the given name. If one doesn't + // exist, creates one and returns it. + // + // Arguments: + // + // test_case_name: name of the test case + // type_param: the name of the test's type parameter, or NULL if + // this is not a typed or a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase* GetTestCase(const char* test_case_name, + const char* type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Adds a TestInfo to the unit test. + // + // Arguments: + // + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + // test_info: the TestInfo object + void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + TestInfo* test_info) { + // In order to support thread-safe death tests, we need to + // remember the original working directory when the test program + // was first invoked. We cannot do this in RUN_ALL_TESTS(), as + // the user may have changed the current directory before calling + // RUN_ALL_TESTS(). Therefore we capture the current directory in + // AddTestInfo(), which is called to register a TEST or TEST_F + // before main() is reached. + if (original_working_dir_.IsEmpty()) { + original_working_dir_.Set(FilePath::GetCurrentDir()); + GTEST_CHECK_(!original_working_dir_.IsEmpty()) + << "Failed to get the current working directory."; + } + + GetTestCase(test_info->test_case_name(), + test_info->type_param(), + set_up_tc, + tear_down_tc)->AddTestInfo(test_info); + } + +#if GTEST_HAS_PARAM_TEST + // Returns ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry() { + return parameterized_test_registry_; + } +#endif // GTEST_HAS_PARAM_TEST + + // Sets the TestCase object for the test that's currently running. + void set_current_test_case(TestCase* a_current_test_case) { + current_test_case_ = a_current_test_case; + } + + // Sets the TestInfo object for the test that's currently running. If + // current_test_info is NULL, the assertion results will be stored in + // ad_hoc_test_result_. + void set_current_test_info(TestInfo* a_current_test_info) { + current_test_info_ = a_current_test_info; + } + + // Registers all parameterized tests defined using TEST_P and + // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter + // combination. This method can be called more then once; it has guards + // protecting from registering the tests more then once. If + // value-parameterized tests are disabled, RegisterParameterizedTests is + // present but does nothing. + void RegisterParameterizedTests(); + + // Runs all tests in this UnitTest object, prints the result, and + // returns true if all tests are successful. If any exception is + // thrown during a test, this test is considered to be failed, but + // the rest of the tests will still be run. + bool RunAllTests(); + + // Clears the results of all tests, except the ad hoc tests. + void ClearNonAdHocTestResult() { + ForEach(test_cases_, TestCase::ClearTestCaseResult); + } + + // Clears the results of ad-hoc test assertions. + void ClearAdHocTestResult() { + ad_hoc_test_result_.Clear(); + } + + // Adds a TestProperty to the current TestResult object when invoked in a + // context of a test or a test case, or to the global property set. If the + // result already contains a property with the same key, the value will be + // updated. + void RecordProperty(const TestProperty& test_property); + + enum ReactionToSharding { + HONOR_SHARDING_PROTOCOL, + IGNORE_SHARDING_PROTOCOL + }; + + // Matches the full name of each test against the user-specified + // filter to decide whether the test should run, then records the + // result in each TestCase and TestInfo object. + // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests + // based on sharding variables in the environment. + // Returns the number of tests that should run. + int FilterTests(ReactionToSharding shard_tests); + + // Prints the names of the tests matching the user-specified filter flag. + void ListTestsMatchingFilter(); + + const TestCase* current_test_case() const { return current_test_case_; } + TestInfo* current_test_info() { return current_test_info_; } + const TestInfo* current_test_info() const { return current_test_info_; } + + // Returns the vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector& environments() { return environments_; } + + // Getters for the per-thread Google Test trace stack. + std::vector& gtest_trace_stack() { + return *(gtest_trace_stack_.pointer()); + } + const std::vector& gtest_trace_stack() const { + return gtest_trace_stack_.get(); + } + +#if GTEST_HAS_DEATH_TEST + void InitDeathTestSubprocessControlInfo() { + internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag()); + } + // Returns a pointer to the parsed --gtest_internal_run_death_test + // flag, or NULL if that flag was not specified. + // This information is useful only in a death test child process. + // Must not be called before a call to InitGoogleTest. + const InternalRunDeathTestFlag* internal_run_death_test_flag() const { + return internal_run_death_test_flag_.get(); + } + + // Returns a pointer to the current death test factory. + internal::DeathTestFactory* death_test_factory() { + return death_test_factory_.get(); + } + + void SuppressTestEventsIfInSubprocess(); + + friend class ReplaceDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + + // Initializes the event listener performing XML output as specified by + // UnitTestOptions. Must not be called before InitGoogleTest. + void ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Initializes the event listener for streaming test results to a socket. + // Must not be called before InitGoogleTest. + void ConfigureStreamingOutput(); +#endif + + // Performs initialization dependent upon flag values obtained in + // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to + // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest + // this function is also called from RunAllTests. Since this function can be + // called more than once, it has to be idempotent. + void PostFlagParsingInit(); + + // Gets the random seed used at the start of the current test iteration. + int random_seed() const { return random_seed_; } + + // Gets the random number generator. + internal::Random* random() { return &random_; } + + // Shuffles all test cases, and the tests within each test case, + // making sure that death tests are still run first. + void ShuffleTests(); + + // Restores the test cases and tests to their order before the first shuffle. + void UnshuffleTests(); + + // Returns the value of GTEST_FLAG(catch_exceptions) at the moment + // UnitTest::Run() starts. + bool catch_exceptions() const { return catch_exceptions_; } + + private: + friend class ::testing::UnitTest; + + // Used by UnitTest::Run() to capture the state of + // GTEST_FLAG(catch_exceptions) at the moment it starts. + void set_catch_exceptions(bool value) { catch_exceptions_ = value; } + + // The UnitTest object that owns this implementation object. + UnitTest* const parent_; + + // The working directory when the first TEST() or TEST_F() was + // executed. + internal::FilePath original_working_dir_; + + // The default test part result reporters. + DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_; + DefaultPerThreadTestPartResultReporter + default_per_thread_test_part_result_reporter_; + + // Points to (but doesn't own) the global test part result reporter. + TestPartResultReporterInterface* global_test_part_result_repoter_; + + // Protects read and write access to global_test_part_result_reporter_. + internal::Mutex global_test_part_result_reporter_mutex_; + + // Points to (but doesn't own) the per-thread test part result reporter. + internal::ThreadLocal + per_thread_test_part_result_reporter_; + + // The vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector environments_; + + // The vector of TestCases in their original order. It owns the + // elements in the vector. + std::vector test_cases_; + + // Provides a level of indirection for the test case list to allow + // easy shuffling and restoring the test case order. The i-th + // element of this vector is the index of the i-th test case in the + // shuffled order. + std::vector test_case_indices_; + +#if GTEST_HAS_PARAM_TEST + // ParameterizedTestRegistry object used to register value-parameterized + // tests. + internal::ParameterizedTestCaseRegistry parameterized_test_registry_; + + // Indicates whether RegisterParameterizedTests() has been called already. + bool parameterized_tests_registered_; +#endif // GTEST_HAS_PARAM_TEST + + // Index of the last death test case registered. Initially -1. + int last_death_test_case_; + + // This points to the TestCase for the currently running test. It + // changes as Google Test goes through one test case after another. + // When no test is running, this is set to NULL and Google Test + // stores assertion results in ad_hoc_test_result_. Initially NULL. + TestCase* current_test_case_; + + // This points to the TestInfo for the currently running test. It + // changes as Google Test goes through one test after another. When + // no test is running, this is set to NULL and Google Test stores + // assertion results in ad_hoc_test_result_. Initially NULL. + TestInfo* current_test_info_; + + // Normally, a user only writes assertions inside a TEST or TEST_F, + // or inside a function called by a TEST or TEST_F. Since Google + // Test keeps track of which test is current running, it can + // associate such an assertion with the test it belongs to. + // + // If an assertion is encountered when no TEST or TEST_F is running, + // Google Test attributes the assertion result to an imaginary "ad hoc" + // test, and records the result in ad_hoc_test_result_. + TestResult ad_hoc_test_result_; + + // The list of event listeners that can be used to track events inside + // Google Test. + TestEventListeners listeners_; + + // The OS stack trace getter. Will be deleted when the UnitTest + // object is destructed. By default, an OsStackTraceGetter is used, + // but the user can set this field to use a custom getter if that is + // desired. + OsStackTraceGetterInterface* os_stack_trace_getter_; + + // True iff PostFlagParsingInit() has been called. + bool post_flag_parse_init_performed_; + + // The random number seed used at the beginning of the test run. + int random_seed_; + + // Our random number generator. + internal::Random random_; + + // The time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp_; + + // How long the test took to run, in milliseconds. + TimeInMillis elapsed_time_; + +#if GTEST_HAS_DEATH_TEST + // The decomposed components of the gtest_internal_run_death_test flag, + // parsed when RUN_ALL_TESTS is called. + internal::scoped_ptr internal_run_death_test_flag_; + internal::scoped_ptr death_test_factory_; +#endif // GTEST_HAS_DEATH_TEST + + // A per-thread stack of traces created by the SCOPED_TRACE() macro. + internal::ThreadLocal > gtest_trace_stack_; + + // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests() + // starts. + bool catch_exceptions_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl); +}; // class UnitTestImpl + +// Convenience function for accessing the global UnitTest +// implementation object. +inline UnitTestImpl* GetUnitTestImpl() { + return UnitTest::GetInstance()->impl(); +} + +#if GTEST_USES_SIMPLE_RE + +// Internal helper functions for implementing the simple regular +// expression matcher. +GTEST_API_ bool IsInSet(char ch, const char* str); +GTEST_API_ bool IsAsciiDigit(char ch); +GTEST_API_ bool IsAsciiPunct(char ch); +GTEST_API_ bool IsRepeat(char ch); +GTEST_API_ bool IsAsciiWhiteSpace(char ch); +GTEST_API_ bool IsAsciiWordChar(char ch); +GTEST_API_ bool IsValidEscape(char ch); +GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch); +GTEST_API_ bool ValidateRegex(const char* regex); +GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str); +GTEST_API_ bool MatchRepetitionAndRegexAtHead( + bool escaped, char ch, char repeat, const char* regex, const char* str); +GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str); + +#endif // GTEST_USES_SIMPLE_RE + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv); +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv); + +#if GTEST_HAS_DEATH_TEST + +// Returns the message describing the last system error, regardless of the +// platform. +GTEST_API_ std::string GetLastErrnoDescription(); + +# if GTEST_OS_WINDOWS +// Provides leak-safe Windows kernel handle ownership. +class AutoHandle { + public: + AutoHandle() : handle_(INVALID_HANDLE_VALUE) {} + explicit AutoHandle(HANDLE handle) : handle_(handle) {} + + ~AutoHandle() { Reset(); } + + HANDLE Get() const { return handle_; } + void Reset() { Reset(INVALID_HANDLE_VALUE); } + void Reset(HANDLE handle) { + if (handle != handle_) { + if (handle_ != INVALID_HANDLE_VALUE) + ::CloseHandle(handle_); + handle_ = handle; + } + } + + private: + HANDLE handle_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle); +}; +# endif // GTEST_OS_WINDOWS + +// Attempts to parse a string into a positive integer pointed to by the +// number parameter. Returns true if that is possible. +// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use +// it here. +template +bool ParseNaturalNumber(const ::std::string& str, Integer* number) { + // Fail fast if the given string does not begin with a digit; + // this bypasses strtoXXX's "optional leading whitespace and plus + // or minus sign" semantics, which are undesirable here. + if (str.empty() || !IsDigit(str[0])) { + return false; + } + errno = 0; + + char* end; + // BiggestConvertible is the largest integer type that system-provided + // string-to-number conversion routines can return. + +# if GTEST_OS_WINDOWS && !defined(__GNUC__) + + // MSVC and C++ Builder define __int64 instead of the standard long long. + typedef unsigned __int64 BiggestConvertible; + const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10); + +# else + + typedef unsigned long long BiggestConvertible; // NOLINT + const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10); + +# endif // GTEST_OS_WINDOWS && !defined(__GNUC__) + + const bool parse_success = *end == '\0' && errno == 0; + + // TODO(vladl@google.com): Convert this to compile time assertion when it is + // available. + GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed)); + + const Integer result = static_cast(parsed); + if (parse_success && static_cast(result) == parsed) { + *number = result; + return true; + } + return false; +} +#endif // GTEST_HAS_DEATH_TEST + +// TestResult contains some private methods that should be hidden from +// Google Test user but are required for testing. This class allow our tests +// to access them. +// +// This class is supplied only for the purpose of testing Google Test's own +// constructs. Do not use it in user tests, either directly or indirectly. +class TestResultAccessor { + public: + static void RecordProperty(TestResult* test_result, + const std::string& xml_element, + const TestProperty& property) { + test_result->RecordProperty(xml_element, property); + } + + static void ClearTestPartResults(TestResult* test_result) { + test_result->ClearTestPartResults(); + } + + static const std::vector& test_part_results( + const TestResult& test_result) { + return test_result.test_part_results(); + } +}; + +#if GTEST_CAN_STREAM_RESULTS_ + +// Streams test results to the given port on the given host machine. +class StreamingListener : public EmptyTestEventListener { + public: + // Abstract base class for writing strings to a socket. + class AbstractSocketWriter { + public: + virtual ~AbstractSocketWriter() {} + + // Sends a string to the socket. + virtual void Send(const string& message) = 0; + + // Closes the socket. + virtual void CloseConnection() {} + + // Sends a string and a newline to the socket. + void SendLn(const string& message) { + Send(message + "\n"); + } + }; + + // Concrete class for actually writing strings to a socket. + class SocketWriter : public AbstractSocketWriter { + public: + SocketWriter(const string& host, const string& port) + : sockfd_(-1), host_name_(host), port_num_(port) { + MakeConnection(); + } + + virtual ~SocketWriter() { + if (sockfd_ != -1) + CloseConnection(); + } + + // Sends a string to the socket. + virtual void Send(const string& message) { + GTEST_CHECK_(sockfd_ != -1) + << "Send() can be called only when there is a connection."; + + const int len = static_cast(message.length()); + if (write(sockfd_, message.c_str(), len) != len) { + GTEST_LOG_(WARNING) + << "stream_result_to: failed to stream to " + << host_name_ << ":" << port_num_; + } + } + + private: + // Creates a client socket and connects to the server. + void MakeConnection(); + + // Closes the socket. + void CloseConnection() { + GTEST_CHECK_(sockfd_ != -1) + << "CloseConnection() can be called only when there is a connection."; + + close(sockfd_); + sockfd_ = -1; + } + + int sockfd_; // socket file descriptor + const string host_name_; + const string port_num_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter); + }; // class SocketWriter + + // Escapes '=', '&', '%', and '\n' characters in str as "%xx". + static string UrlEncode(const char* str); + + StreamingListener(const string& host, const string& port) + : socket_writer_(new SocketWriter(host, port)) { Start(); } + + explicit StreamingListener(AbstractSocketWriter* socket_writer) + : socket_writer_(socket_writer) { Start(); } + + void OnTestProgramStart(const UnitTest& /* unit_test */) { + SendLn("event=TestProgramStart"); + } + + void OnTestProgramEnd(const UnitTest& unit_test) { + // Note that Google Test current only report elapsed time for each + // test iteration, not for the entire test program. + SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed())); + + // Notify the streaming server to stop. + socket_writer_->CloseConnection(); + } + + void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) { + SendLn("event=TestIterationStart&iteration=" + + StreamableToString(iteration)); + } + + void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) { + SendLn("event=TestIterationEnd&passed=" + + FormatBool(unit_test.Passed()) + "&elapsed_time=" + + StreamableToString(unit_test.elapsed_time()) + "ms"); + } + + void OnTestCaseStart(const TestCase& test_case) { + SendLn(std::string("event=TestCaseStart&name=") + test_case.name()); + } + + void OnTestCaseEnd(const TestCase& test_case) { + SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed()) + + "&elapsed_time=" + StreamableToString(test_case.elapsed_time()) + + "ms"); + } + + void OnTestStart(const TestInfo& test_info) { + SendLn(std::string("event=TestStart&name=") + test_info.name()); + } + + void OnTestEnd(const TestInfo& test_info) { + SendLn("event=TestEnd&passed=" + + FormatBool((test_info.result())->Passed()) + + "&elapsed_time=" + + StreamableToString((test_info.result())->elapsed_time()) + "ms"); + } + + void OnTestPartResult(const TestPartResult& test_part_result) { + const char* file_name = test_part_result.file_name(); + if (file_name == NULL) + file_name = ""; + SendLn("event=TestPartResult&file=" + UrlEncode(file_name) + + "&line=" + StreamableToString(test_part_result.line_number()) + + "&message=" + UrlEncode(test_part_result.message())); + } + + private: + // Sends the given message and a newline to the socket. + void SendLn(const string& message) { socket_writer_->SendLn(message); } + + // Called at the start of streaming to notify the receiver what + // protocol we are using. + void Start() { SendLn("gtest_streaming_protocol_version=1.0"); } + + string FormatBool(bool value) { return value ? "1" : "0"; } + + const scoped_ptr socket_writer_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); +}; // class StreamingListener + +#endif // GTEST_CAN_STREAM_RESULTS_ + +} // namespace internal +} // namespace testing + +#endif // GTEST_SRC_GTEST_INTERNAL_INL_H_ +#undef GTEST_IMPLEMENTATION_ + +#if GTEST_OS_WINDOWS +# define vsnprintf _vsnprintf +#endif // GTEST_OS_WINDOWS + +namespace testing { + +using internal::CountIf; +using internal::ForEach; +using internal::GetElementOr; +using internal::Shuffle; + +// Constants. + +// A test whose test case name or test name matches this filter is +// disabled and not run. +static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*"; + +// A test case whose name matches this filter is considered a death +// test case and will be run before test cases whose name doesn't +// match this filter. +static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*"; + +// A test filter that matches everything. +static const char kUniversalFilter[] = "*"; + +// The default output file for XML output. +static const char kDefaultOutputFile[] = "test_detail.xml"; + +// The environment variable name for the test shard index. +static const char kTestShardIndex[] = "GTEST_SHARD_INDEX"; +// The environment variable name for the total number of test shards. +static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS"; +// The environment variable name for the test shard status file. +static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE"; + +namespace internal { + +// The text used in failure messages to indicate the start of the +// stack trace. +const char kStackTraceMarker[] = "\nStack trace:\n"; + +// g_help_flag is true iff the --help flag or an equivalent form is +// specified on the command line. +bool g_help_flag = false; + +} // namespace internal + +static const char* GetDefaultFilter() { + return kUniversalFilter; +} + +GTEST_DEFINE_bool_( + also_run_disabled_tests, + internal::BoolFromGTestEnv("also_run_disabled_tests", false), + "Run disabled tests too, in addition to the tests normally being run."); + +GTEST_DEFINE_bool_( + break_on_failure, + internal::BoolFromGTestEnv("break_on_failure", false), + "True iff a failed assertion should be a debugger break-point."); + +GTEST_DEFINE_bool_( + catch_exceptions, + internal::BoolFromGTestEnv("catch_exceptions", true), + "True iff " GTEST_NAME_ + " should catch exceptions and treat them as test failures."); + +GTEST_DEFINE_string_( + color, + internal::StringFromGTestEnv("color", "auto"), + "Whether to use colors in the output. Valid values: yes, no, " + "and auto. 'auto' means to use colors if the output is " + "being sent to a terminal and the TERM environment variable " + "is set to a terminal type that supports colors."); + +GTEST_DEFINE_string_( + filter, + internal::StringFromGTestEnv("filter", GetDefaultFilter()), + "A colon-separated list of glob (not regex) patterns " + "for filtering the tests to run, optionally followed by a " + "'-' and a : separated list of negative patterns (tests to " + "exclude). A test is run if it matches one of the positive " + "patterns and does not match any of the negative patterns."); + +GTEST_DEFINE_bool_(list_tests, false, + "List all tests without running them."); + +GTEST_DEFINE_string_( + output, + internal::StringFromGTestEnv("output", ""), + "A format (currently must be \"xml\"), optionally followed " + "by a colon and an output file name or directory. A directory " + "is indicated by a trailing pathname separator. " + "Examples: \"xml:filename.xml\", \"xml::directoryname/\". " + "If a directory is specified, output files will be created " + "within that directory, with file-names based on the test " + "executable's name and, if necessary, made unique by adding " + "digits."); + +GTEST_DEFINE_bool_( + print_time, + internal::BoolFromGTestEnv("print_time", true), + "True iff " GTEST_NAME_ + " should display elapsed time in text output."); + +GTEST_DEFINE_int32_( + random_seed, + internal::Int32FromGTestEnv("random_seed", 0), + "Random number seed to use when shuffling test orders. Must be in range " + "[1, 99999], or 0 to use a seed based on the current time."); + +GTEST_DEFINE_int32_( + repeat, + internal::Int32FromGTestEnv("repeat", 1), + "How many times to repeat each test. Specify a negative number " + "for repeating forever. Useful for shaking out flaky tests."); + +GTEST_DEFINE_bool_( + show_internal_stack_frames, false, + "True iff " GTEST_NAME_ " should include internal stack frames when " + "printing test failure stack traces."); + +GTEST_DEFINE_bool_( + shuffle, + internal::BoolFromGTestEnv("shuffle", false), + "True iff " GTEST_NAME_ + " should randomize tests' order on every run."); + +GTEST_DEFINE_int32_( + stack_trace_depth, + internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth), + "The maximum number of stack frames to print when an " + "assertion fails. The valid range is 0 through 100, inclusive."); + +GTEST_DEFINE_string_( + stream_result_to, + internal::StringFromGTestEnv("stream_result_to", ""), + "This flag specifies the host name and the port number on which to stream " + "test results. Example: \"localhost:555\". The flag is effective only on " + "Linux."); + +GTEST_DEFINE_bool_( + throw_on_failure, + internal::BoolFromGTestEnv("throw_on_failure", false), + "When this flag is specified, a failed assertion will throw an exception " + "if exceptions are enabled or exit the program with a non-zero code " + "otherwise."); + +namespace internal { + +// Generates a random number from [0, range), using a Linear +// Congruential Generator (LCG). Crashes if 'range' is 0 or greater +// than kMaxRange. +UInt32 Random::Generate(UInt32 range) { + // These constants are the same as are used in glibc's rand(3). + state_ = (1103515245U*state_ + 12345U) % kMaxRange; + + GTEST_CHECK_(range > 0) + << "Cannot generate a number in the range [0, 0)."; + GTEST_CHECK_(range <= kMaxRange) + << "Generation of a number in [0, " << range << ") was requested, " + << "but this can only generate numbers in [0, " << kMaxRange << ")."; + + // Converting via modulus introduces a bit of downward bias, but + // it's simple, and a linear congruential generator isn't too good + // to begin with. + return state_ % range; +} + +// GTestIsInitialized() returns true iff the user has initialized +// Google Test. Useful for catching the user mistake of not initializing +// Google Test before calling RUN_ALL_TESTS(). +// +// A user must call testing::InitGoogleTest() to initialize Google +// Test. g_init_gtest_count is set to the number of times +// InitGoogleTest() has been called. We don't protect this variable +// under a mutex as it is only accessed in the main thread. +GTEST_API_ int g_init_gtest_count = 0; +static bool GTestIsInitialized() { return g_init_gtest_count != 0; } + +// Iterates over a vector of TestCases, keeping a running sum of the +// results of calling a given int-returning method on each. +// Returns the sum. +static int SumOverTestCaseList(const std::vector& case_list, + int (TestCase::*method)() const) { + int sum = 0; + for (size_t i = 0; i < case_list.size(); i++) { + sum += (case_list[i]->*method)(); + } + return sum; +} + +// Returns true iff the test case passed. +static bool TestCasePassed(const TestCase* test_case) { + return test_case->should_run() && test_case->Passed(); +} + +// Returns true iff the test case failed. +static bool TestCaseFailed(const TestCase* test_case) { + return test_case->should_run() && test_case->Failed(); +} + +// Returns true iff test_case contains at least one test that should +// run. +static bool ShouldRunTestCase(const TestCase* test_case) { + return test_case->should_run(); +} + +// AssertHelper constructor. +AssertHelper::AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message) + : data_(new AssertHelperData(type, file, line, message)) { +} + +AssertHelper::~AssertHelper() { + delete data_; +} + +// Message assignment, for assertion streaming support. +void AssertHelper::operator=(const Message& message) const { + UnitTest::GetInstance()-> + AddTestPartResult(data_->type, data_->file, data_->line, + AppendUserMessage(data_->message, message), + UnitTest::GetInstance()->impl() + ->CurrentOsStackTraceExceptTop(1) + // Skips the stack frame for this function itself. + ); // NOLINT +} + +// Mutex for linked pointers. +GTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex); + +// Application pathname gotten in InitGoogleTest. +std::string g_executable_path; + +// Returns the current application's name, removing directory path if that +// is present. +FilePath GetCurrentExecutableName() { + FilePath result; + +#if GTEST_OS_WINDOWS + result.Set(FilePath(g_executable_path).RemoveExtension("exe")); +#else + result.Set(FilePath(g_executable_path)); +#endif // GTEST_OS_WINDOWS + + return result.RemoveDirectoryName(); +} + +// Functions for processing the gtest_output flag. + +// Returns the output format, or "" for normal printed output. +std::string UnitTestOptions::GetOutputFormat() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + if (gtest_output_flag == NULL) return std::string(""); + + const char* const colon = strchr(gtest_output_flag, ':'); + return (colon == NULL) ? + std::string(gtest_output_flag) : + std::string(gtest_output_flag, colon - gtest_output_flag); +} + +// Returns the name of the requested output file, or the default if none +// was explicitly specified. +std::string UnitTestOptions::GetAbsolutePathToOutputFile() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + if (gtest_output_flag == NULL) + return ""; + + const char* const colon = strchr(gtest_output_flag, ':'); + if (colon == NULL) + return internal::FilePath::ConcatPaths( + internal::FilePath( + UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(kDefaultOutputFile)).string(); + + internal::FilePath output_name(colon + 1); + if (!output_name.IsAbsolutePath()) + // TODO(wan@google.com): on Windows \some\path is not an absolute + // path (as its meaning depends on the current drive), yet the + // following logic for turning it into an absolute path is wrong. + // Fix it. + output_name = internal::FilePath::ConcatPaths( + internal::FilePath(UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(colon + 1)); + + if (!output_name.IsDirectory()) + return output_name.string(); + + internal::FilePath result(internal::FilePath::GenerateUniqueFileName( + output_name, internal::GetCurrentExecutableName(), + GetOutputFormat().c_str())); + return result.string(); +} + +// Returns true iff the wildcard pattern matches the string. The +// first ':' or '\0' character in pattern marks the end of it. +// +// This recursive algorithm isn't very efficient, but is clear and +// works well enough for matching test names, which are short. +bool UnitTestOptions::PatternMatchesString(const char *pattern, + const char *str) { + switch (*pattern) { + case '\0': + case ':': // Either ':' or '\0' marks the end of the pattern. + return *str == '\0'; + case '?': // Matches any single character. + return *str != '\0' && PatternMatchesString(pattern + 1, str + 1); + case '*': // Matches any string (possibly empty) of characters. + return (*str != '\0' && PatternMatchesString(pattern, str + 1)) || + PatternMatchesString(pattern + 1, str); + default: // Non-special character. Matches itself. + return *pattern == *str && + PatternMatchesString(pattern + 1, str + 1); + } +} + +bool UnitTestOptions::MatchesFilter( + const std::string& name, const char* filter) { + const char *cur_pattern = filter; + for (;;) { + if (PatternMatchesString(cur_pattern, name.c_str())) { + return true; + } + + // Finds the next pattern in the filter. + cur_pattern = strchr(cur_pattern, ':'); + + // Returns if no more pattern can be found. + if (cur_pattern == NULL) { + return false; + } + + // Skips the pattern separater (the ':' character). + cur_pattern++; + } +} + +// Returns true iff the user-specified filter matches the test case +// name and the test name. +bool UnitTestOptions::FilterMatchesTest(const std::string &test_case_name, + const std::string &test_name) { + const std::string& full_name = test_case_name + "." + test_name.c_str(); + + // Split --gtest_filter at '-', if there is one, to separate into + // positive filter and negative filter portions + const char* const p = GTEST_FLAG(filter).c_str(); + const char* const dash = strchr(p, '-'); + std::string positive; + std::string negative; + if (dash == NULL) { + positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter + negative = ""; + } else { + positive = std::string(p, dash); // Everything up to the dash + negative = std::string(dash + 1); // Everything after the dash + if (positive.empty()) { + // Treat '-test1' as the same as '*-test1' + positive = kUniversalFilter; + } + } + + // A filter is a colon-separated list of patterns. It matches a + // test if any pattern in it matches the test. + return (MatchesFilter(full_name, positive.c_str()) && + !MatchesFilter(full_name, negative.c_str())); +} + +#if GTEST_HAS_SEH +// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the +// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. +// This function is useful as an __except condition. +int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) { + // Google Test should handle a SEH exception if: + // 1. the user wants it to, AND + // 2. this is not a breakpoint exception, AND + // 3. this is not a C++ exception (VC++ implements them via SEH, + // apparently). + // + // SEH exception code for C++ exceptions. + // (see http://support.microsoft.com/kb/185294 for more information). + const DWORD kCxxExceptionCode = 0xe06d7363; + + bool should_handle = true; + + if (!GTEST_FLAG(catch_exceptions)) + should_handle = false; + else if (exception_code == EXCEPTION_BREAKPOINT) + should_handle = false; + else if (exception_code == kCxxExceptionCode) + should_handle = false; + + return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH; +} +#endif // GTEST_HAS_SEH + +} // namespace internal + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. Intercepts only failures from the current thread. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + TestPartResultArray* result) + : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD), + result_(result) { + Init(); +} + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + InterceptMode intercept_mode, TestPartResultArray* result) + : intercept_mode_(intercept_mode), + result_(result) { + Init(); +} + +void ScopedFakeTestPartResultReporter::Init() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + old_reporter_ = impl->GetGlobalTestPartResultReporter(); + impl->SetGlobalTestPartResultReporter(this); + } else { + old_reporter_ = impl->GetTestPartResultReporterForCurrentThread(); + impl->SetTestPartResultReporterForCurrentThread(this); + } +} + +// The d'tor restores the test part result reporter used by Google Test +// before. +ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + impl->SetGlobalTestPartResultReporter(old_reporter_); + } else { + impl->SetTestPartResultReporterForCurrentThread(old_reporter_); + } +} + +// Increments the test part result count and remembers the result. +// This method is from the TestPartResultReporterInterface interface. +void ScopedFakeTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + result_->Append(result); +} + +namespace internal { + +// Returns the type ID of ::testing::Test. We should always call this +// instead of GetTypeId< ::testing::Test>() to get the type ID of +// testing::Test. This is to work around a suspected linker bug when +// using Google Test as a framework on Mac OS X. The bug causes +// GetTypeId< ::testing::Test>() to return different values depending +// on whether the call is from the Google Test framework itself or +// from user test code. GetTestTypeId() is guaranteed to always +// return the same value, as it always calls GetTypeId<>() from the +// gtest.cc, which is within the Google Test framework. +TypeId GetTestTypeId() { + return GetTypeId(); +} + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId(); + +// This predicate-formatter checks that 'results' contains a test part +// failure of the given type and that the failure message contains the +// given substring. +AssertionResult HasOneFailure(const char* /* results_expr */, + const char* /* type_expr */, + const char* /* substr_expr */, + const TestPartResultArray& results, + TestPartResult::Type type, + const string& substr) { + const std::string expected(type == TestPartResult::kFatalFailure ? + "1 fatal failure" : + "1 non-fatal failure"); + Message msg; + if (results.size() != 1) { + msg << "Expected: " << expected << "\n" + << " Actual: " << results.size() << " failures"; + for (int i = 0; i < results.size(); i++) { + msg << "\n" << results.GetTestPartResult(i); + } + return AssertionFailure() << msg; + } + + const TestPartResult& r = results.GetTestPartResult(0); + if (r.type() != type) { + return AssertionFailure() << "Expected: " << expected << "\n" + << " Actual:\n" + << r; + } + + if (strstr(r.message(), substr.c_str()) == NULL) { + return AssertionFailure() << "Expected: " << expected << " containing \"" + << substr << "\"\n" + << " Actual:\n" + << r; + } + + return AssertionSuccess(); +} + +// The constructor of SingleFailureChecker remembers where to look up +// test part results, what type of failure we expect, and what +// substring the failure message should contain. +SingleFailureChecker:: SingleFailureChecker( + const TestPartResultArray* results, + TestPartResult::Type type, + const string& substr) + : results_(results), + type_(type), + substr_(substr) {} + +// The destructor of SingleFailureChecker verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +SingleFailureChecker::~SingleFailureChecker() { + EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_); +} + +DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultGlobalTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->current_test_result()->AddTestPartResult(result); + unit_test_->listeners()->repeater()->OnTestPartResult(result); +} + +DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultPerThreadTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result); +} + +// Returns the global test part result reporter. +TestPartResultReporterInterface* +UnitTestImpl::GetGlobalTestPartResultReporter() { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + return global_test_part_result_repoter_; +} + +// Sets the global test part result reporter. +void UnitTestImpl::SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter) { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + global_test_part_result_repoter_ = reporter; +} + +// Returns the test part result reporter for the current thread. +TestPartResultReporterInterface* +UnitTestImpl::GetTestPartResultReporterForCurrentThread() { + return per_thread_test_part_result_reporter_.get(); +} + +// Sets the test part result reporter for the current thread. +void UnitTestImpl::SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter) { + per_thread_test_part_result_reporter_.set(reporter); +} + +// Gets the number of successful test cases. +int UnitTestImpl::successful_test_case_count() const { + return CountIf(test_cases_, TestCasePassed); +} + +// Gets the number of failed test cases. +int UnitTestImpl::failed_test_case_count() const { + return CountIf(test_cases_, TestCaseFailed); +} + +// Gets the number of all test cases. +int UnitTestImpl::total_test_case_count() const { + return static_cast(test_cases_.size()); +} + +// Gets the number of all test cases that contain at least one test +// that should run. +int UnitTestImpl::test_case_to_run_count() const { + return CountIf(test_cases_, ShouldRunTestCase); +} + +// Gets the number of successful tests. +int UnitTestImpl::successful_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count); +} + +// Gets the number of failed tests. +int UnitTestImpl::failed_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count); +} + +// Gets the number of disabled tests that will be reported in the XML report. +int UnitTestImpl::reportable_disabled_test_count() const { + return SumOverTestCaseList(test_cases_, + &TestCase::reportable_disabled_test_count); +} + +// Gets the number of disabled tests. +int UnitTestImpl::disabled_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count); +} + +// Gets the number of tests to be printed in the XML report. +int UnitTestImpl::reportable_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::reportable_test_count); +} + +// Gets the number of all tests. +int UnitTestImpl::total_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::total_test_count); +} + +// Gets the number of tests that should run. +int UnitTestImpl::test_to_run_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count); +} + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// CurrentOsStackTraceExceptTop(1), Foo() will be included in the +// trace but Bar() and CurrentOsStackTraceExceptTop() won't. +std::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) { + (void)skip_count; + return ""; +} + +// Returns the current time in milliseconds. +TimeInMillis GetTimeInMillis() { +#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__) + // Difference between 1970-01-01 and 1601-01-01 in milliseconds. + // http://analogous.blogspot.com/2005/04/epoch.html + const TimeInMillis kJavaEpochToWinFileTimeDelta = + static_cast(116444736UL) * 100000UL; + const DWORD kTenthMicrosInMilliSecond = 10000; + + SYSTEMTIME now_systime; + FILETIME now_filetime; + ULARGE_INTEGER now_int64; + // TODO(kenton@google.com): Shouldn't this just use + // GetSystemTimeAsFileTime()? + GetSystemTime(&now_systime); + if (SystemTimeToFileTime(&now_systime, &now_filetime)) { + now_int64.LowPart = now_filetime.dwLowDateTime; + now_int64.HighPart = now_filetime.dwHighDateTime; + now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) - + kJavaEpochToWinFileTimeDelta; + return now_int64.QuadPart; + } + return 0; +#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_ + __timeb64 now; + +# ifdef _MSC_VER + + // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996 + // (deprecated function) there. + // TODO(kenton@google.com): Use GetTickCount()? Or use + // SystemTimeToFileTime() +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996. + _ftime64(&now); +# pragma warning(pop) // Restores the warning state. +# else + + _ftime64(&now); + +# endif // _MSC_VER + + return static_cast(now.time) * 1000 + now.millitm; +#elif GTEST_HAS_GETTIMEOFDAY_ + struct timeval now; + gettimeofday(&now, NULL); + return static_cast(now.tv_sec) * 1000 + now.tv_usec / 1000; +#else +# error "Don't know how to get the current time on your system." +#endif +} + +// Utilities + +// class String. + +#if GTEST_OS_WINDOWS_MOBILE +// Creates a UTF-16 wide string from the given ANSI string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the wide string, or NULL if the +// input is NULL. +LPCWSTR String::AnsiToUtf16(const char* ansi) { + if (!ansi) return NULL; + const int length = strlen(ansi); + const int unicode_length = + MultiByteToWideChar(CP_ACP, 0, ansi, length, + NULL, 0); + WCHAR* unicode = new WCHAR[unicode_length + 1]; + MultiByteToWideChar(CP_ACP, 0, ansi, length, + unicode, unicode_length); + unicode[unicode_length] = 0; + return unicode; +} + +// Creates an ANSI string from the given wide string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the ANSI string, or NULL if the +// input is NULL. +const char* String::Utf16ToAnsi(LPCWSTR utf16_str) { + if (!utf16_str) return NULL; + const int ansi_length = + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, + NULL, 0, NULL, NULL); + char* ansi = new char[ansi_length + 1]; + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, + ansi, ansi_length, NULL, NULL); + ansi[ansi_length] = 0; + return ansi; +} + +#endif // GTEST_OS_WINDOWS_MOBILE + +// Compares two C strings. Returns true iff they have the same content. +// +// Unlike strcmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CStringEquals(const char * lhs, const char * rhs) { + if ( lhs == NULL ) return rhs == NULL; + + if ( rhs == NULL ) return false; + + return strcmp(lhs, rhs) == 0; +} + +#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING + +// Converts an array of wide chars to a narrow string using the UTF-8 +// encoding, and streams the result to the given Message object. +static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length, + Message* msg) { + for (size_t i = 0; i != length; ) { // NOLINT + if (wstr[i] != L'\0') { + *msg << WideStringToUtf8(wstr + i, static_cast(length - i)); + while (i != length && wstr[i] != L'\0') + i++; + } else { + *msg << '\0'; + i++; + } + } +} + +#endif // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING + +} // namespace internal + +// Constructs an empty Message. +// We allocate the stringstream separately because otherwise each use of +// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's +// stack frame leading to huge stack frames in some cases; gcc does not reuse +// the stack space. +Message::Message() : ss_(new ::std::stringstream) { + // By default, we want there to be enough precision when printing + // a double to a Message. + *ss_ << std::setprecision(std::numeric_limits::digits10 + 2); +} + +// These two overloads allow streaming a wide C string to a Message +// using the UTF-8 encoding. +Message& Message::operator <<(const wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); +} +Message& Message::operator <<(wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); +} + +#if GTEST_HAS_STD_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::std::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +// Gets the text streamed to this object so far as an std::string. +// Each '\0' character in the buffer is replaced with "\\0". +std::string Message::GetString() const { + return internal::StringStreamToString(ss_.get()); +} + +// AssertionResult constructors. +// Used in EXPECT_TRUE/FALSE(assertion_result). +AssertionResult::AssertionResult(const AssertionResult& other) + : success_(other.success_), + message_(other.message_.get() != NULL ? + new ::std::string(*other.message_) : + static_cast< ::std::string*>(NULL)) { +} + +// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. +AssertionResult AssertionResult::operator!() const { + AssertionResult negation(!success_); + if (message_.get() != NULL) + negation << *message_; + return negation; +} + +// Makes a successful assertion result. +AssertionResult AssertionSuccess() { + return AssertionResult(true); +} + +// Makes a failed assertion result. +AssertionResult AssertionFailure() { + return AssertionResult(false); +} + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << message. +AssertionResult AssertionFailure(const Message& message) { + return AssertionFailure() << message; +} + +namespace internal { + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true iff the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const std::string& expected_value, + const std::string& actual_value, + bool ignoring_case) { + Message msg; + msg << "Value of: " << actual_expression; + if (actual_value != actual_expression) { + msg << "\n Actual: " << actual_value; + } + + msg << "\nExpected: " << expected_expression; + if (ignoring_case) { + msg << " (ignoring case)"; + } + if (expected_value != expected_expression) { + msg << "\nWhich is: " << expected_value; + } + + return AssertionFailure() << msg; +} + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +std::string GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value) { + const char* actual_message = assertion_result.message(); + Message msg; + msg << "Value of: " << expression_text + << "\n Actual: " << actual_predicate_value; + if (actual_message[0] != '\0') + msg << " (" << actual_message << ")"; + msg << "\nExpected: " << expected_predicate_value; + return msg.GetString(); +} + +// Helper function for implementing ASSERT_NEAR. +AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error) { + const double diff = fabs(val1 - val2); + if (diff <= abs_error) return AssertionSuccess(); + + // TODO(wan): do not print the value of an expression if it's + // already a literal. + return AssertionFailure() + << "The difference between " << expr1 << " and " << expr2 + << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n" + << expr1 << " evaluates to " << val1 << ",\n" + << expr2 << " evaluates to " << val2 << ", and\n" + << abs_error_expr << " evaluates to " << abs_error << "."; +} + + +// Helper template for implementing FloatLE() and DoubleLE(). +template +AssertionResult FloatingPointLE(const char* expr1, + const char* expr2, + RawType val1, + RawType val2) { + // Returns success if val1 is less than val2, + if (val1 < val2) { + return AssertionSuccess(); + } + + // or if val1 is almost equal to val2. + const FloatingPoint lhs(val1), rhs(val2); + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + // Note that the above two checks will both fail if either val1 or + // val2 is NaN, as the IEEE floating-point standard requires that + // any predicate involving a NaN must return false. + + ::std::stringstream val1_ss; + val1_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val1; + + ::std::stringstream val2_ss; + val2_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val2; + + return AssertionFailure() + << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n" + << " Actual: " << StringStreamToString(&val1_ss) << " vs " + << StringStreamToString(&val2_ss); +} + +} // namespace internal + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +namespace internal { + +// The helper function for {ASSERT|EXPECT}_EQ with int or enum +// arguments. +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + if (expected == actual) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_?? with integer or enum arguments. It is here +// just to avoid copy-and-paste of similar code. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + BiggestInt val1, BiggestInt val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +} + +// Implements the helper function for {ASSERT|EXPECT}_NE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(NE, !=) +// Implements the helper function for {ASSERT|EXPECT}_LE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LE, <=) +// Implements the helper function for {ASSERT|EXPECT}_LT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LT, < ) +// Implements the helper function for {ASSERT|EXPECT}_GE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GE, >=) +// Implements the helper function for {ASSERT|EXPECT}_GT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GT, > ) + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual) { + if (String::CStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + PrintToString(expected), + PrintToString(actual), + false); +} + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual) { + if (String::CaseInsensitiveCStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + PrintToString(expected), + PrintToString(actual), + true); +} + +// The helper function for {ASSERT|EXPECT}_STRNE. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CaseInsensitiveCStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() + << "Expected: (" << s1_expression << ") != (" + << s2_expression << ") (ignoring case), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +} // namespace internal + +namespace { + +// Helper functions for implementing IsSubString() and IsNotSubstring(). + +// This group of overloaded functions return true iff needle is a +// substring of haystack. NULL is considered a substring of itself +// only. + +bool IsSubstringPred(const char* needle, const char* haystack) { + if (needle == NULL || haystack == NULL) + return needle == haystack; + + return strstr(haystack, needle) != NULL; +} + +bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) { + if (needle == NULL || haystack == NULL) + return needle == haystack; + + return wcsstr(haystack, needle) != NULL; +} + +// StringType here can be either ::std::string or ::std::wstring. +template +bool IsSubstringPred(const StringType& needle, + const StringType& haystack) { + return haystack.find(needle) != StringType::npos; +} + +// This function implements either IsSubstring() or IsNotSubstring(), +// depending on the value of the expected_to_be_substring parameter. +// StringType here can be const char*, const wchar_t*, ::std::string, +// or ::std::wstring. +template +AssertionResult IsSubstringImpl( + bool expected_to_be_substring, + const char* needle_expr, const char* haystack_expr, + const StringType& needle, const StringType& haystack) { + if (IsSubstringPred(needle, haystack) == expected_to_be_substring) + return AssertionSuccess(); + + const bool is_wide_string = sizeof(needle[0]) > 1; + const char* const begin_string_quote = is_wide_string ? "L\"" : "\""; + return AssertionFailure() + << "Value of: " << needle_expr << "\n" + << " Actual: " << begin_string_quote << needle << "\"\n" + << "Expected: " << (expected_to_be_substring ? "" : "not ") + << "a substring of " << haystack_expr << "\n" + << "Which is: " << begin_string_quote << haystack << "\""; +} + +} // namespace + +// IsSubstring() and IsNotSubstring() check whether needle is a +// substring of haystack (NULL is considered a substring of itself +// only), and return an appropriate error message when they fail. + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +#if GTEST_HAS_STD_WSTRING +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +#if GTEST_OS_WINDOWS + +namespace { + +// Helper function for IsHRESULT{SuccessFailure} predicates +AssertionResult HRESULTFailureHelper(const char* expr, + const char* expected, + long hr) { // NOLINT +# if GTEST_OS_WINDOWS_MOBILE + + // Windows CE doesn't support FormatMessage. + const char error_text[] = ""; + +# else + + // Looks up the human-readable system message for the HRESULT code + // and since we're not passing any params to FormatMessage, we don't + // want inserts expanded. + const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS; + const DWORD kBufSize = 4096; + // Gets the system's human readable message string for this HRESULT. + char error_text[kBufSize] = { '\0' }; + DWORD message_length = ::FormatMessageA(kFlags, + 0, // no source, we're asking system + hr, // the error + 0, // no line width restrictions + error_text, // output buffer + kBufSize, // buf size + NULL); // no arguments for inserts + // Trims tailing white space (FormatMessage leaves a trailing CR-LF) + for (; message_length && IsSpace(error_text[message_length - 1]); + --message_length) { + error_text[message_length - 1] = '\0'; + } + +# endif // GTEST_OS_WINDOWS_MOBILE + + const std::string error_hex("0x" + String::FormatHexInt(hr)); + return ::testing::AssertionFailure() + << "Expected: " << expr << " " << expected << ".\n" + << " Actual: " << error_hex << " " << error_text << "\n"; +} + +} // namespace + +AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT + if (SUCCEEDED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "succeeds", hr); +} + +AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT + if (FAILED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "fails", hr); +} + +#endif // GTEST_OS_WINDOWS + +// Utility functions for encoding Unicode text (wide strings) in +// UTF-8. + +// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8 +// like this: +// +// Code-point length Encoding +// 0 - 7 bits 0xxxxxxx +// 8 - 11 bits 110xxxxx 10xxxxxx +// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx +// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + +// The maximum code-point a one-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint1 = (static_cast(1) << 7) - 1; + +// The maximum code-point a two-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint2 = (static_cast(1) << (5 + 6)) - 1; + +// The maximum code-point a three-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint3 = (static_cast(1) << (4 + 2*6)) - 1; + +// The maximum code-point a four-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint4 = (static_cast(1) << (3 + 3*6)) - 1; + +// Chops off the n lowest bits from a bit pattern. Returns the n +// lowest bits. As a side effect, the original bit pattern will be +// shifted to the right by n bits. +inline UInt32 ChopLowBits(UInt32* bits, int n) { + const UInt32 low_bits = *bits & ((static_cast(1) << n) - 1); + *bits >>= n; + return low_bits; +} + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted +// to "(Invalid Unicode 0xXXXXXXXX)". +std::string CodePointToUtf8(UInt32 code_point) { + if (code_point > kMaxCodePoint4) { + return "(Invalid Unicode 0x" + String::FormatHexInt(code_point) + ")"; + } + + char str[5]; // Big enough for the largest valid code point. + if (code_point <= kMaxCodePoint1) { + str[1] = '\0'; + str[0] = static_cast(code_point); // 0xxxxxxx + } else if (code_point <= kMaxCodePoint2) { + str[2] = '\0'; + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xC0 | code_point); // 110xxxxx + } else if (code_point <= kMaxCodePoint3) { + str[3] = '\0'; + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xE0 | code_point); // 1110xxxx + } else { // code_point <= kMaxCodePoint4 + str[4] = '\0'; + str[3] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xF0 | code_point); // 11110xxx + } + return str; +} + +// The following two functions only make sense if the the system +// uses UTF-16 for wide string encoding. All supported systems +// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16. + +// Determines if the arguments constitute UTF-16 surrogate pair +// and thus should be combined into a single Unicode code point +// using CreateCodePointFromUtf16SurrogatePair. +inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) { + return sizeof(wchar_t) == 2 && + (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00; +} + +// Creates a Unicode code point from UTF16 surrogate pair. +inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first, + wchar_t second) { + const UInt32 mask = (1 << 10) - 1; + return (sizeof(wchar_t) == 2) ? + (((first & mask) << 10) | (second & mask)) + 0x10000 : + // This function should not be called when the condition is + // false, but we provide a sensible default in case it is. + static_cast(first); +} + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +std::string WideStringToUtf8(const wchar_t* str, int num_chars) { + if (num_chars == -1) + num_chars = static_cast(wcslen(str)); + + ::std::stringstream stream; + for (int i = 0; i < num_chars; ++i) { + UInt32 unicode_code_point; + + if (str[i] == L'\0') { + break; + } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) { + unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i], + str[i + 1]); + i++; + } else { + unicode_code_point = static_cast(str[i]); + } + + stream << CodePointToUtf8(unicode_code_point); + } + return StringStreamToString(&stream); +} + +// Converts a wide C string to an std::string using the UTF-8 encoding. +// NULL will be converted to "(null)". +std::string String::ShowWideCString(const wchar_t * wide_c_str) { + if (wide_c_str == NULL) return "(null)"; + + return internal::WideStringToUtf8(wide_c_str, -1); +} + +// Compares two wide C strings. Returns true iff they have the same +// content. +// +// Unlike wcscmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) { + if (lhs == NULL) return rhs == NULL; + + if (rhs == NULL) return false; + + return wcscmp(lhs, rhs) == 0; +} + +// Helper function for *_STREQ on wide strings. +AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual) { + if (String::WideCStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + PrintToString(expected), + PrintToString(actual), + false); +} + +// Helper function for *_STRNE on wide strings. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2) { + if (!String::WideCStringEquals(s1, s2)) { + return AssertionSuccess(); + } + + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: " + << PrintToString(s1) + << " vs " << PrintToString(s2); +} + +// Compares two C strings, ignoring case. Returns true iff they have +// the same content. +// +// Unlike strcasecmp(), this function can handle NULL argument(s). A +// NULL C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) { + if (lhs == NULL) + return rhs == NULL; + if (rhs == NULL) + return false; + return posix::StrCaseCmp(lhs, rhs) == 0; +} + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. +bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs) { + if (lhs == NULL) return rhs == NULL; + + if (rhs == NULL) return false; + +#if GTEST_OS_WINDOWS + return _wcsicmp(lhs, rhs) == 0; +#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID + return wcscasecmp(lhs, rhs) == 0; +#else + // Android, Mac OS X and Cygwin don't define wcscasecmp. + // Other unknown OSes may not define it either. + wint_t left, right; + do { + left = towlower(*lhs++); + right = towlower(*rhs++); + } while (left && left == right); + return left == right; +#endif // OS selector +} + +// Returns true iff str ends with the given suffix, ignoring case. +// Any string is considered to end with an empty suffix. +bool String::EndsWithCaseInsensitive( + const std::string& str, const std::string& suffix) { + const size_t str_len = str.length(); + const size_t suffix_len = suffix.length(); + return (str_len >= suffix_len) && + CaseInsensitiveCStringEquals(str.c_str() + str_len - suffix_len, + suffix.c_str()); +} + +// Formats an int value as "%02d". +std::string String::FormatIntWidth2(int value) { + std::stringstream ss; + ss << std::setfill('0') << std::setw(2) << value; + return ss.str(); +} + +// Formats an int value as "%X". +std::string String::FormatHexInt(int value) { + std::stringstream ss; + ss << std::hex << std::uppercase << value; + return ss.str(); +} + +// Formats a byte as "%02X". +std::string String::FormatByte(unsigned char value) { + std::stringstream ss; + ss << std::setfill('0') << std::setw(2) << std::hex << std::uppercase + << static_cast(value); + return ss.str(); +} + +// Converts the buffer in a stringstream to an std::string, converting NUL +// bytes to "\\0" along the way. +std::string StringStreamToString(::std::stringstream* ss) { + const ::std::string& str = ss->str(); + const char* const start = str.c_str(); + const char* const end = start + str.length(); + + std::string result; + result.reserve(2 * (end - start)); + for (const char* ch = start; ch != end; ++ch) { + if (*ch == '\0') { + result += "\\0"; // Replaces NUL with "\\0"; + } else { + result += *ch; + } + } + + return result; +} + +// Appends the user-supplied message to the Google-Test-generated message. +std::string AppendUserMessage(const std::string& gtest_msg, + const Message& user_msg) { + // Appends the user message if it's non-empty. + const std::string user_msg_string = user_msg.GetString(); + if (user_msg_string.empty()) { + return gtest_msg; + } + + return gtest_msg + "\n" + user_msg_string; +} + +} // namespace internal + +// class TestResult + +// Creates an empty TestResult. +TestResult::TestResult() + : death_test_count_(0), + elapsed_time_(0) { +} + +// D'tor. +TestResult::~TestResult() { +} + +// Returns the i-th test part result among all the results. i can +// range from 0 to total_part_count() - 1. If i is not in that range, +// aborts the program. +const TestPartResult& TestResult::GetTestPartResult(int i) const { + if (i < 0 || i >= total_part_count()) + internal::posix::Abort(); + return test_part_results_.at(i); +} + +// Returns the i-th test property. i can range from 0 to +// test_property_count() - 1. If i is not in that range, aborts the +// program. +const TestProperty& TestResult::GetTestProperty(int i) const { + if (i < 0 || i >= test_property_count()) + internal::posix::Abort(); + return test_properties_.at(i); +} + +// Clears the test part results. +void TestResult::ClearTestPartResults() { + test_part_results_.clear(); +} + +// Adds a test part result to the list. +void TestResult::AddTestPartResult(const TestPartResult& test_part_result) { + test_part_results_.push_back(test_part_result); +} + +// Adds a test property to the list. If a property with the same key as the +// supplied property is already represented, the value of this test_property +// replaces the old value for that key. +void TestResult::RecordProperty(const std::string& xml_element, + const TestProperty& test_property) { + if (!ValidateTestProperty(xml_element, test_property)) { + return; + } + internal::MutexLock lock(&test_properites_mutex_); + const std::vector::iterator property_with_matching_key = + std::find_if(test_properties_.begin(), test_properties_.end(), + internal::TestPropertyKeyIs(test_property.key())); + if (property_with_matching_key == test_properties_.end()) { + test_properties_.push_back(test_property); + return; + } + property_with_matching_key->SetValue(test_property.value()); +} + +// The list of reserved attributes used in the element of XML +// output. +static const char* const kReservedTestSuitesAttributes[] = { + "disabled", + "errors", + "failures", + "name", + "random_seed", + "tests", + "time", + "timestamp" +}; + +// The list of reserved attributes used in the element of XML +// output. +static const char* const kReservedTestSuiteAttributes[] = { + "disabled", + "errors", + "failures", + "name", + "tests", + "time" +}; + +// The list of reserved attributes used in the element of XML output. +static const char* const kReservedTestCaseAttributes[] = { + "classname", + "name", + "status", + "time", + "type_param", + "value_param" +}; + +template +std::vector ArrayAsVector(const char* const (&array)[kSize]) { + return std::vector(array, array + kSize); +} + +static std::vector GetReservedAttributesForElement( + const std::string& xml_element) { + if (xml_element == "testsuites") { + return ArrayAsVector(kReservedTestSuitesAttributes); + } else if (xml_element == "testsuite") { + return ArrayAsVector(kReservedTestSuiteAttributes); + } else if (xml_element == "testcase") { + return ArrayAsVector(kReservedTestCaseAttributes); + } else { + GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element; + } + // This code is unreachable but some compilers may not realizes that. + return std::vector(); +} + +static std::string FormatWordList(const std::vector& words) { + Message word_list; + for (size_t i = 0; i < words.size(); ++i) { + if (i > 0 && words.size() > 2) { + word_list << ", "; + } + if (i == words.size() - 1) { + word_list << "and "; + } + word_list << "'" << words[i] << "'"; + } + return word_list.GetString(); +} + +bool ValidateTestPropertyName(const std::string& property_name, + const std::vector& reserved_names) { + if (std::find(reserved_names.begin(), reserved_names.end(), property_name) != + reserved_names.end()) { + ADD_FAILURE() << "Reserved key used in RecordProperty(): " << property_name + << " (" << FormatWordList(reserved_names) + << " are reserved by " << GTEST_NAME_ << ")"; + return false; + } + return true; +} + +// Adds a failure if the key is a reserved attribute of the element named +// xml_element. Returns true if the property is valid. +bool TestResult::ValidateTestProperty(const std::string& xml_element, + const TestProperty& test_property) { + return ValidateTestPropertyName(test_property.key(), + GetReservedAttributesForElement(xml_element)); +} + +// Clears the object. +void TestResult::Clear() { + test_part_results_.clear(); + test_properties_.clear(); + death_test_count_ = 0; + elapsed_time_ = 0; +} + +// Returns true iff the test failed. +bool TestResult::Failed() const { + for (int i = 0; i < total_part_count(); ++i) { + if (GetTestPartResult(i).failed()) + return true; + } + return false; +} + +// Returns true iff the test part fatally failed. +static bool TestPartFatallyFailed(const TestPartResult& result) { + return result.fatally_failed(); +} + +// Returns true iff the test fatally failed. +bool TestResult::HasFatalFailure() const { + return CountIf(test_part_results_, TestPartFatallyFailed) > 0; +} + +// Returns true iff the test part non-fatally failed. +static bool TestPartNonfatallyFailed(const TestPartResult& result) { + return result.nonfatally_failed(); +} + +// Returns true iff the test has a non-fatal failure. +bool TestResult::HasNonfatalFailure() const { + return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0; +} + +// Gets the number of all test parts. This is the sum of the number +// of successful test parts and the number of failed test parts. +int TestResult::total_part_count() const { + return static_cast(test_part_results_.size()); +} + +// Returns the number of the test properties. +int TestResult::test_property_count() const { + return static_cast(test_properties_.size()); +} + +// class Test + +// Creates a Test object. + +// The c'tor saves the values of all Google Test flags. +Test::Test() + : gtest_flag_saver_(new internal::GTestFlagSaver) { +} + +// The d'tor restores the values of all Google Test flags. +Test::~Test() { + delete gtest_flag_saver_; +} + +// Sets up the test fixture. +// +// A sub-class may override this. +void Test::SetUp() { +} + +// Tears down the test fixture. +// +// A sub-class may override this. +void Test::TearDown() { +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const std::string& key, const std::string& value) { + UnitTest::GetInstance()->RecordProperty(key, value); +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const std::string& key, int value) { + Message value_message; + value_message << value; + RecordProperty(key, value_message.GetString().c_str()); +} + +namespace internal { + +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const std::string& message) { + // This function is a friend of UnitTest and as such has access to + // AddTestPartResult. + UnitTest::GetInstance()->AddTestPartResult( + result_type, + NULL, // No info about the source file where the exception occurred. + -1, // We have no info on which line caused the exception. + message, + ""); // No stack trace, either. +} + +} // namespace internal + +// Google Test requires all tests in the same test case to use the same test +// fixture class. This function checks if the current test has the +// same fixture class as the first test in the current test case. If +// yes, it returns true; otherwise it generates a Google Test failure and +// returns false. +bool Test::HasSameFixtureClass() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + const TestCase* const test_case = impl->current_test_case(); + + // Info about the first test in the current test case. + const TestInfo* const first_test_info = test_case->test_info_list()[0]; + const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_; + const char* const first_test_name = first_test_info->name(); + + // Info about the current test. + const TestInfo* const this_test_info = impl->current_test_info(); + const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_; + const char* const this_test_name = this_test_info->name(); + + if (this_fixture_id != first_fixture_id) { + // Is the first test defined using TEST? + const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId(); + // Is this test defined using TEST? + const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId(); + + if (first_is_TEST || this_is_TEST) { + // The user mixed TEST and TEST_F in this test case - we'll tell + // him/her how to fix it. + + // Gets the name of the TEST and the name of the TEST_F. Note + // that first_is_TEST and this_is_TEST cannot both be true, as + // the fixture IDs are different for the two tests. + const char* const TEST_name = + first_is_TEST ? first_test_name : this_test_name; + const char* const TEST_F_name = + first_is_TEST ? this_test_name : first_test_name; + + ADD_FAILURE() + << "All tests in the same test case must use the same test fixture\n" + << "class, so mixing TEST_F and TEST in the same test case is\n" + << "illegal. In test case " << this_test_info->test_case_name() + << ",\n" + << "test " << TEST_F_name << " is defined using TEST_F but\n" + << "test " << TEST_name << " is defined using TEST. You probably\n" + << "want to change the TEST to TEST_F or move it to another test\n" + << "case."; + } else { + // The user defined two fixture classes with the same name in + // two namespaces - we'll tell him/her how to fix it. + ADD_FAILURE() + << "All tests in the same test case must use the same test fixture\n" + << "class. However, in test case " + << this_test_info->test_case_name() << ",\n" + << "you defined test " << first_test_name + << " and test " << this_test_name << "\n" + << "using two different test fixture classes. This can happen if\n" + << "the two classes are from different namespaces or translation\n" + << "units and have the same name. You should probably rename one\n" + << "of the classes to put the tests into different test cases."; + } + return false; + } + + return true; +} + +#if GTEST_HAS_SEH + +// Adds an "exception thrown" fatal failure to the current test. This +// function returns its result via an output parameter pointer because VC++ +// prohibits creation of objects with destructors on stack in functions +// using __try (see error C2712). +static std::string* FormatSehExceptionMessage(DWORD exception_code, + const char* location) { + Message message; + message << "SEH exception with code 0x" << std::setbase(16) << + exception_code << std::setbase(10) << " thrown in " << location << "."; + + return new std::string(message.GetString()); +} + +#endif // GTEST_HAS_SEH + +namespace internal { + +#if GTEST_HAS_EXCEPTIONS + +// Adds an "exception thrown" fatal failure to the current test. +static std::string FormatCxxExceptionMessage(const char* description, + const char* location) { + Message message; + if (description != NULL) { + message << "C++ exception with description \"" << description << "\""; + } else { + message << "Unknown C++ exception"; + } + message << " thrown in " << location << "."; + + return message.GetString(); +} + +static std::string PrintTestPartResultToString( + const TestPartResult& test_part_result); + +GoogleTestFailureException::GoogleTestFailureException( + const TestPartResult& failure) + : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {} + +#endif // GTEST_HAS_EXCEPTIONS + +// We put these helper functions in the internal namespace as IBM's xlC +// compiler rejects the code if they were declared static. + +// Runs the given method and handles SEH exceptions it throws, when +// SEH is supported; returns the 0-value for type Result in case of an +// SEH exception. (Microsoft compilers cannot handle SEH and C++ +// exceptions in the same function. Therefore, we provide a separate +// wrapper function for handling SEH exceptions.) +template +Result HandleSehExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { +#if GTEST_HAS_SEH + __try { + return (object->*method)(); + } __except (internal::UnitTestOptions::GTestShouldProcessSEH( // NOLINT + GetExceptionCode())) { + // We create the exception message on the heap because VC++ prohibits + // creation of objects with destructors on stack in functions using __try + // (see error C2712). + std::string* exception_message = FormatSehExceptionMessage( + GetExceptionCode(), location); + internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure, + *exception_message); + delete exception_message; + return static_cast(0); + } +#else + (void)location; + return (object->*method)(); +#endif // GTEST_HAS_SEH +} + +// Runs the given method and catches and reports C++ and/or SEH-style +// exceptions, if they are supported; returns the 0-value for type +// Result in case of an SEH exception. +template +Result HandleExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { + // NOTE: The user code can affect the way in which Google Test handles + // exceptions by setting GTEST_FLAG(catch_exceptions), but only before + // RUN_ALL_TESTS() starts. It is technically possible to check the flag + // after the exception is caught and either report or re-throw the + // exception based on the flag's value: + // + // try { + // // Perform the test method. + // } catch (...) { + // if (GTEST_FLAG(catch_exceptions)) + // // Report the exception as failure. + // else + // throw; // Re-throws the original exception. + // } + // + // However, the purpose of this flag is to allow the program to drop into + // the debugger when the exception is thrown. On most platforms, once the + // control enters the catch block, the exception origin information is + // lost and the debugger will stop the program at the point of the + // re-throw in this function -- instead of at the point of the original + // throw statement in the code under test. For this reason, we perform + // the check early, sacrificing the ability to affect Google Test's + // exception handling in the method where the exception is thrown. + if (internal::GetUnitTestImpl()->catch_exceptions()) { +#if GTEST_HAS_EXCEPTIONS + try { + return HandleSehExceptionsInMethodIfSupported(object, method, location); + } catch (const internal::GoogleTestFailureException&) { // NOLINT + // This exception type can only be thrown by a failed Google + // Test assertion with the intention of letting another testing + // framework catch it. Therefore we just re-throw it. + throw; + } catch (const std::exception& e) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(e.what(), location)); + } catch (...) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(NULL, location)); + } + return static_cast(0); +#else + return HandleSehExceptionsInMethodIfSupported(object, method, location); +#endif // GTEST_HAS_EXCEPTIONS + } else { + return (object->*method)(); + } +} + +} // namespace internal + +// Runs the test and updates the test result. +void Test::Run() { + if (!HasSameFixtureClass()) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()"); + // We will run the test only if SetUp() was successful. + if (!HasFatalFailure()) { + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TestBody, "the test body"); + } + + // However, we want to clean up as much as possible. Hence we will + // always call TearDown(), even if SetUp() or the test body has + // failed. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TearDown, "TearDown()"); +} + +// Returns true iff the current test has a fatal failure. +bool Test::HasFatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure(); +} + +// Returns true iff the current test has a non-fatal failure. +bool Test::HasNonfatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()-> + HasNonfatalFailure(); +} + +// class TestInfo + +// Constructs a TestInfo object. It assumes ownership of the test factory +// object. +TestInfo::TestInfo(const std::string& a_test_case_name, + const std::string& a_name, + const char* a_type_param, + const char* a_value_param, + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory) + : test_case_name_(a_test_case_name), + name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : NULL), + value_param_(a_value_param ? new std::string(a_value_param) : NULL), + fixture_class_id_(fixture_class_id), + should_run_(false), + is_disabled_(false), + matches_filter_(false), + factory_(factory), + result_() {} + +// Destructs a TestInfo object. +TestInfo::~TestInfo() { delete factory_; } + +namespace internal { + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_case_name: name of the test case +// name: name of the test +// type_param: the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param: text representation of the test's value parameter, +// or NULL if this is not a value-parameterized test. +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +TestInfo* MakeAndRegisterTestInfo( + const char* test_case_name, + const char* name, + const char* type_param, + const char* value_param, + TypeId fixture_class_id, + SetUpTestCaseFunc set_up_tc, + TearDownTestCaseFunc tear_down_tc, + TestFactoryBase* factory) { + TestInfo* const test_info = + new TestInfo(test_case_name, name, type_param, value_param, + fixture_class_id, factory); + GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info); + return test_info; +} + +#if GTEST_HAS_PARAM_TEST +void ReportInvalidTestCaseType(const char* test_case_name, + const char* file, int line) { + Message errors; + errors + << "Attempted redefinition of test case " << test_case_name << ".\n" + << "All tests in the same test case must use the same test fixture\n" + << "class. However, in test case " << test_case_name << ", you tried\n" + << "to define a test using a fixture class different from the one\n" + << "used earlier. This can happen if the two fixture classes are\n" + << "from different namespaces and have the same name. You should\n" + << "probably rename one of the classes to put the tests into different\n" + << "test cases."; + + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors.GetString().c_str()); +} +#endif // GTEST_HAS_PARAM_TEST + +} // namespace internal + +namespace { + +// A predicate that checks the test name of a TestInfo against a known +// value. +// +// This is used for implementation of the TestCase class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestNameIs is copyable. +class TestNameIs { + public: + // Constructor. + // + // TestNameIs has NO default constructor. + explicit TestNameIs(const char* name) + : name_(name) {} + + // Returns true iff the test name of test_info matches name_. + bool operator()(const TestInfo * test_info) const { + return test_info && test_info->name() == name_; + } + + private: + std::string name_; +}; + +} // namespace + +namespace internal { + +// This method expands all parameterized tests registered with macros TEST_P +// and INSTANTIATE_TEST_CASE_P into regular tests and registers those. +// This will be done just once during the program runtime. +void UnitTestImpl::RegisterParameterizedTests() { +#if GTEST_HAS_PARAM_TEST + if (!parameterized_tests_registered_) { + parameterized_test_registry_.RegisterTests(); + parameterized_tests_registered_ = true; + } +#endif +} + +} // namespace internal + +// Creates the test object, runs it, records its result, and then +// deletes it. +void TestInfo::Run() { + if (!should_run_) return; + + // Tells UnitTest where to store test result. + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_info(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + // Notifies the unit test event listeners that a test is about to start. + repeater->OnTestStart(*this); + + const TimeInMillis start = internal::GetTimeInMillis(); + + impl->os_stack_trace_getter()->UponLeavingGTest(); + + // Creates the test object. + Test* const test = internal::HandleExceptionsInMethodIfSupported( + factory_, &internal::TestFactoryBase::CreateTest, + "the test fixture's constructor"); + + // Runs the test only if the test object was created and its + // constructor didn't generate a fatal failure. + if ((test != NULL) && !Test::HasFatalFailure()) { + // This doesn't throw as all user code that can throw are wrapped into + // exception handling code. + test->Run(); + } + + // Deletes the test object. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + test, &Test::DeleteSelf_, "the test fixture's destructor"); + + result_.set_elapsed_time(internal::GetTimeInMillis() - start); + + // Notifies the unit test event listener that a test has just finished. + repeater->OnTestEnd(*this); + + // Tells UnitTest to stop associating assertion results to this + // test. + impl->set_current_test_info(NULL); +} + +// class TestCase + +// Gets the number of successful tests in this test case. +int TestCase::successful_test_count() const { + return CountIf(test_info_list_, TestPassed); +} + +// Gets the number of failed tests in this test case. +int TestCase::failed_test_count() const { + return CountIf(test_info_list_, TestFailed); +} + +// Gets the number of disabled tests that will be reported in the XML report. +int TestCase::reportable_disabled_test_count() const { + return CountIf(test_info_list_, TestReportableDisabled); +} + +// Gets the number of disabled tests in this test case. +int TestCase::disabled_test_count() const { + return CountIf(test_info_list_, TestDisabled); +} + +// Gets the number of tests to be printed in the XML report. +int TestCase::reportable_test_count() const { + return CountIf(test_info_list_, TestReportable); +} + +// Get the number of tests in this test case that should run. +int TestCase::test_to_run_count() const { + return CountIf(test_info_list_, ShouldRunTest); +} + +// Gets the number of all tests. +int TestCase::total_test_count() const { + return static_cast(test_info_list_.size()); +} + +// Creates a TestCase with the given name. +// +// Arguments: +// +// name: name of the test case +// a_type_param: the name of the test case's type parameter, or NULL if +// this is not a typed or a type-parameterized test case. +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +TestCase::TestCase(const char* a_name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc) + : name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : NULL), + set_up_tc_(set_up_tc), + tear_down_tc_(tear_down_tc), + should_run_(false), + elapsed_time_(0) { +} + +// Destructor of TestCase. +TestCase::~TestCase() { + // Deletes every Test in the collection. + ForEach(test_info_list_, internal::Delete); +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +const TestInfo* TestCase::GetTestInfo(int i) const { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? NULL : test_info_list_[index]; +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +TestInfo* TestCase::GetMutableTestInfo(int i) { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? NULL : test_info_list_[index]; +} + +// Adds a test to this test case. Will delete the test upon +// destruction of the TestCase object. +void TestCase::AddTestInfo(TestInfo * test_info) { + test_info_list_.push_back(test_info); + test_indices_.push_back(static_cast(test_indices_.size())); +} + +// Runs every test in this TestCase. +void TestCase::Run() { + if (!should_run_) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_case(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + repeater->OnTestCaseStart(*this); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestCase::RunSetUpTestCase, "SetUpTestCase()"); + + const internal::TimeInMillis start = internal::GetTimeInMillis(); + for (int i = 0; i < total_test_count(); i++) { + GetMutableTestInfo(i)->Run(); + } + elapsed_time_ = internal::GetTimeInMillis() - start; + + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestCase::RunTearDownTestCase, "TearDownTestCase()"); + + repeater->OnTestCaseEnd(*this); + impl->set_current_test_case(NULL); +} + +// Clears the results of all tests in this test case. +void TestCase::ClearResult() { + ad_hoc_test_result_.Clear(); + ForEach(test_info_list_, TestInfo::ClearTestResult); +} + +// Shuffles the tests in this test case. +void TestCase::ShuffleTests(internal::Random* random) { + Shuffle(random, &test_indices_); +} + +// Restores the test order to before the first shuffle. +void TestCase::UnshuffleTests() { + for (size_t i = 0; i < test_indices_.size(); i++) { + test_indices_[i] = static_cast(i); + } +} + +// Formats a countable noun. Depending on its quantity, either the +// singular form or the plural form is used. e.g. +// +// FormatCountableNoun(1, "formula", "formuli") returns "1 formula". +// FormatCountableNoun(5, "book", "books") returns "5 books". +static std::string FormatCountableNoun(int count, + const char * singular_form, + const char * plural_form) { + return internal::StreamableToString(count) + " " + + (count == 1 ? singular_form : plural_form); +} + +// Formats the count of tests. +static std::string FormatTestCount(int test_count) { + return FormatCountableNoun(test_count, "test", "tests"); +} + +// Formats the count of test cases. +static std::string FormatTestCaseCount(int test_case_count) { + return FormatCountableNoun(test_case_count, "test case", "test cases"); +} + +// Converts a TestPartResult::Type enum to human-friendly string +// representation. Both kNonFatalFailure and kFatalFailure are translated +// to "Failure", as the user usually doesn't care about the difference +// between the two when viewing the test result. +static const char * TestPartResultTypeToString(TestPartResult::Type type) { + switch (type) { + case TestPartResult::kSuccess: + return "Success"; + + case TestPartResult::kNonFatalFailure: + case TestPartResult::kFatalFailure: +#ifdef _MSC_VER + return "error: "; +#else + return "Failure\n"; +#endif + default: + return "Unknown result type"; + } +} + +namespace internal { + +// Prints a TestPartResult to an std::string. +static std::string PrintTestPartResultToString( + const TestPartResult& test_part_result) { + return (Message() + << internal::FormatFileLocation(test_part_result.file_name(), + test_part_result.line_number()) + << " " << TestPartResultTypeToString(test_part_result.type()) + << test_part_result.message()).GetString(); +} + +// Prints a TestPartResult. +static void PrintTestPartResult(const TestPartResult& test_part_result) { + const std::string& result = + PrintTestPartResultToString(test_part_result); + printf("%s\n", result.c_str()); + fflush(stdout); + // If the test program runs in Visual Studio or a debugger, the + // following statements add the test part result message to the Output + // window such that the user can double-click on it to jump to the + // corresponding source code location; otherwise they do nothing. +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + // We don't call OutputDebugString*() on Windows Mobile, as printing + // to stdout is done by OutputDebugString() there already - we don't + // want the same message printed twice. + ::OutputDebugStringA(result.c_str()); + ::OutputDebugStringA("\n"); +#endif +} + +// class PrettyUnitTestResultPrinter + +enum GTestColor { + COLOR_DEFAULT, + COLOR_RED, + COLOR_GREEN, + COLOR_YELLOW +}; + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + +// Returns the character attribute for the given color. +WORD GetColorAttribute(GTestColor color) { + switch (color) { + case COLOR_RED: return FOREGROUND_RED; + case COLOR_GREEN: return FOREGROUND_GREEN; + case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN; + default: return 0; + } +} + +#else + +// Returns the ANSI color code for the given color. COLOR_DEFAULT is +// an invalid input. +const char* GetAnsiColorCode(GTestColor color) { + switch (color) { + case COLOR_RED: return "1"; + case COLOR_GREEN: return "2"; + case COLOR_YELLOW: return "3"; + default: return NULL; + }; +} + +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + +// Returns true iff Google Test should use colors in the output. +bool ShouldUseColor(bool stdout_is_tty) { + const char* const gtest_color = GTEST_FLAG(color).c_str(); + + if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) { +#if GTEST_OS_WINDOWS + // On Windows the TERM variable is usually not set, but the + // console there does support colors. + return stdout_is_tty; +#else + // On non-Windows platforms, we rely on the TERM variable. + const char* const term = posix::GetEnv("TERM"); + const bool term_supports_color = + String::CStringEquals(term, "xterm") || + String::CStringEquals(term, "xterm-color") || + String::CStringEquals(term, "xterm-256color") || + String::CStringEquals(term, "screen") || + String::CStringEquals(term, "screen-256color") || + String::CStringEquals(term, "linux") || + String::CStringEquals(term, "cygwin"); + return stdout_is_tty && term_supports_color; +#endif // GTEST_OS_WINDOWS + } + + return String::CaseInsensitiveCStringEquals(gtest_color, "yes") || + String::CaseInsensitiveCStringEquals(gtest_color, "true") || + String::CaseInsensitiveCStringEquals(gtest_color, "t") || + String::CStringEquals(gtest_color, "1"); + // We take "yes", "true", "t", and "1" as meaning "yes". If the + // value is neither one of these nor "auto", we treat it as "no" to + // be conservative. +} + +// Helpers for printing colored strings to stdout. Note that on Windows, we +// cannot simply emit special characters and have the terminal change colors. +// This routine must actually emit the characters rather than return a string +// that would be colored when printed, as can be done on Linux. +void ColoredPrintf(GTestColor color, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS || GTEST_OS_IOS + const bool use_color = false; +#else + static const bool in_color_mode = + ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0); + const bool use_color = in_color_mode && (color != COLOR_DEFAULT); +#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS + // The '!= 0' comparison is necessary to satisfy MSVC 7.1. + + if (!use_color) { + vprintf(fmt, args); + va_end(args); + return; + } + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); + + // Gets the current text color. + CONSOLE_SCREEN_BUFFER_INFO buffer_info; + GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); + const WORD old_color_attrs = buffer_info.wAttributes; + + // We need to flush the stream buffers into the console before each + // SetConsoleTextAttribute call lest it affect the text that is already + // printed but has not yet reached the console. + fflush(stdout); + SetConsoleTextAttribute(stdout_handle, + GetColorAttribute(color) | FOREGROUND_INTENSITY); + vprintf(fmt, args); + + fflush(stdout); + // Restores the text color. + SetConsoleTextAttribute(stdout_handle, old_color_attrs); +#else + printf("\033[0;3%sm", GetAnsiColorCode(color)); + vprintf(fmt, args); + printf("\033[m"); // Resets the terminal to default. +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + va_end(args); +} + +// Text printed in Google Test's text output and --gunit_list_tests +// output to label the type parameter and value parameter for a test. +static const char kTypeParamLabel[] = "TypeParam"; +static const char kValueParamLabel[] = "GetParam()"; + +void PrintFullTestCommentIfPresent(const TestInfo& test_info) { + const char* const type_param = test_info.type_param(); + const char* const value_param = test_info.value_param(); + + if (type_param != NULL || value_param != NULL) { + printf(", where "); + if (type_param != NULL) { + printf("%s = %s", kTypeParamLabel, type_param); + if (value_param != NULL) + printf(" and "); + } + if (value_param != NULL) { + printf("%s = %s", kValueParamLabel, value_param); + } + } +} + +// This class implements the TestEventListener interface. +// +// Class PrettyUnitTestResultPrinter is copyable. +class PrettyUnitTestResultPrinter : public TestEventListener { + public: + PrettyUnitTestResultPrinter() {} + static void PrintTestName(const char * test_case, const char * test) { + printf("%s.%s", test_case, test); + } + + // The following methods override what's in the TestEventListener class. + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestCaseStart(const TestCase& test_case); + virtual void OnTestStart(const TestInfo& test_info); + virtual void OnTestPartResult(const TestPartResult& result); + virtual void OnTestEnd(const TestInfo& test_info); + virtual void OnTestCaseEnd(const TestCase& test_case); + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} + + private: + static void PrintFailedTests(const UnitTest& unit_test); +}; + + // Fired before each iteration of tests starts. +void PrettyUnitTestResultPrinter::OnTestIterationStart( + const UnitTest& unit_test, int iteration) { + if (GTEST_FLAG(repeat) != 1) + printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1); + + const char* const filter = GTEST_FLAG(filter).c_str(); + + // Prints the filter if it's not *. This reminds the user that some + // tests may be skipped. + if (!String::CStringEquals(filter, kUniversalFilter)) { + ColoredPrintf(COLOR_YELLOW, + "Note: %s filter = %s\n", GTEST_NAME_, filter); + } + + if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) { + const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1); + ColoredPrintf(COLOR_YELLOW, + "Note: This is test shard %d of %s.\n", + static_cast(shard_index) + 1, + internal::posix::GetEnv(kTestTotalShards)); + } + + if (GTEST_FLAG(shuffle)) { + ColoredPrintf(COLOR_YELLOW, + "Note: Randomizing tests' orders with a seed of %d .\n", + unit_test.random_seed()); + } + + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("Running %s from %s.\n", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment set-up.\n"); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) { + const std::string counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s", counts.c_str(), test_case.name()); + if (test_case.type_param() == NULL) { + printf("\n"); + } else { + printf(", where %s = %s\n", kTypeParamLabel, test_case.type_param()); + } + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) { + ColoredPrintf(COLOR_GREEN, "[ RUN ] "); + PrintTestName(test_info.test_case_name(), test_info.name()); + printf("\n"); + fflush(stdout); +} + +// Called after an assertion failure. +void PrettyUnitTestResultPrinter::OnTestPartResult( + const TestPartResult& result) { + // If the test part succeeded, we don't need to do anything. + if (result.type() == TestPartResult::kSuccess) + return; + + // Print failure message from the assertion (e.g. expected this and got that). + PrintTestPartResult(result); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) { + if (test_info.result()->Passed()) { + ColoredPrintf(COLOR_GREEN, "[ OK ] "); + } else { + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + } + PrintTestName(test_info.test_case_name(), test_info.name()); + if (test_info.result()->Failed()) + PrintFullTestCommentIfPresent(test_info); + + if (GTEST_FLAG(print_time)) { + printf(" (%s ms)\n", internal::StreamableToString( + test_info.result()->elapsed_time()).c_str()); + } else { + printf("\n"); + } + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) { + if (!GTEST_FLAG(print_time)) return; + + const std::string counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s (%s ms total)\n\n", + counts.c_str(), test_case.name(), + internal::StreamableToString(test_case.elapsed_time()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment tear-down\n"); + fflush(stdout); +} + +// Internal helper for printing the list of failed tests. +void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) { + const int failed_test_count = unit_test.failed_test_count(); + if (failed_test_count == 0) { + return; + } + + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + const TestCase& test_case = *unit_test.GetTestCase(i); + if (!test_case.should_run() || (test_case.failed_test_count() == 0)) { + continue; + } + for (int j = 0; j < test_case.total_test_count(); ++j) { + const TestInfo& test_info = *test_case.GetTestInfo(j); + if (!test_info.should_run() || test_info.result()->Passed()) { + continue; + } + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s.%s", test_case.name(), test_info.name()); + PrintFullTestCommentIfPresent(test_info); + printf("\n"); + } + } +} + +void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("%s from %s ran.", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str()); + if (GTEST_FLAG(print_time)) { + printf(" (%s ms total)", + internal::StreamableToString(unit_test.elapsed_time()).c_str()); + } + printf("\n"); + ColoredPrintf(COLOR_GREEN, "[ PASSED ] "); + printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str()); + + int num_failures = unit_test.failed_test_count(); + if (!unit_test.Passed()) { + const int failed_test_count = unit_test.failed_test_count(); + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str()); + PrintFailedTests(unit_test); + printf("\n%2d FAILED %s\n", num_failures, + num_failures == 1 ? "TEST" : "TESTS"); + } + + int num_disabled = unit_test.reportable_disabled_test_count(); + if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) { + if (!num_failures) { + printf("\n"); // Add a spacer if no FAILURE banner is displayed. + } + ColoredPrintf(COLOR_YELLOW, + " YOU HAVE %d DISABLED %s\n\n", + num_disabled, + num_disabled == 1 ? "TEST" : "TESTS"); + } + // Ensure that Google Test output is printed before, e.g., heapchecker output. + fflush(stdout); +} + +// End PrettyUnitTestResultPrinter + +// class TestEventRepeater +// +// This class forwards events to other event listeners. +class TestEventRepeater : public TestEventListener { + public: + TestEventRepeater() : forwarding_enabled_(true) {} + virtual ~TestEventRepeater(); + void Append(TestEventListener *listener); + TestEventListener* Release(TestEventListener* listener); + + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled() const { return forwarding_enabled_; } + void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; } + + virtual void OnTestProgramStart(const UnitTest& unit_test); + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test); + virtual void OnTestCaseStart(const TestCase& test_case); + virtual void OnTestStart(const TestInfo& test_info); + virtual void OnTestPartResult(const TestPartResult& result); + virtual void OnTestEnd(const TestInfo& test_info); + virtual void OnTestCaseEnd(const TestCase& test_case); + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test); + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestProgramEnd(const UnitTest& unit_test); + + private: + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled_; + // The list of listeners that receive events. + std::vector listeners_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater); +}; + +TestEventRepeater::~TestEventRepeater() { + ForEach(listeners_, Delete); +} + +void TestEventRepeater::Append(TestEventListener *listener) { + listeners_.push_back(listener); +} + +// TODO(vladl@google.com): Factor the search functionality into Vector::Find. +TestEventListener* TestEventRepeater::Release(TestEventListener *listener) { + for (size_t i = 0; i < listeners_.size(); ++i) { + if (listeners_[i] == listener) { + listeners_.erase(listeners_.begin() + i); + return listener; + } + } + + return NULL; +} + +// Since most methods are very similar, use macros to reduce boilerplate. +// This defines a member that forwards the call to all listeners. +#define GTEST_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (size_t i = 0; i < listeners_.size(); i++) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} +// This defines a member that forwards the call to all listeners in reverse +// order. +#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (int i = static_cast(listeners_.size()) - 1; i >= 0; i--) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} + +GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest) +GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest) +GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase) +GTEST_REPEATER_METHOD_(OnTestStart, TestInfo) +GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult) +GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo) +GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase) +GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest) + +#undef GTEST_REPEATER_METHOD_ +#undef GTEST_REVERSE_REPEATER_METHOD_ + +void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (size_t i = 0; i < listeners_.size(); i++) { + listeners_[i]->OnTestIterationStart(unit_test, iteration); + } + } +} + +void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (int i = static_cast(listeners_.size()) - 1; i >= 0; i--) { + listeners_[i]->OnTestIterationEnd(unit_test, iteration); + } + } +} + +// End TestEventRepeater + +// This class generates an XML output file. +class XmlUnitTestResultPrinter : public EmptyTestEventListener { + public: + explicit XmlUnitTestResultPrinter(const char* output_file); + + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + + private: + // Is c a whitespace character that is normalized to a space character + // when it appears in an XML attribute value? + static bool IsNormalizableWhitespace(char c) { + return c == 0x9 || c == 0xA || c == 0xD; + } + + // May c appear in a well-formed XML document? + static bool IsValidXmlCharacter(char c) { + return IsNormalizableWhitespace(c) || c >= 0x20; + } + + // Returns an XML-escaped copy of the input string str. If + // is_attribute is true, the text is meant to appear as an attribute + // value, and normalizable whitespace is preserved by replacing it + // with character references. + static std::string EscapeXml(const std::string& str, bool is_attribute); + + // Returns the given string with all characters invalid in XML removed. + static std::string RemoveInvalidXmlCharacters(const std::string& str); + + // Convenience wrapper around EscapeXml when str is an attribute value. + static std::string EscapeXmlAttribute(const std::string& str) { + return EscapeXml(str, true); + } + + // Convenience wrapper around EscapeXml when str is not an attribute value. + static std::string EscapeXmlText(const char* str) { + return EscapeXml(str, false); + } + + // Verifies that the given attribute belongs to the given element and + // streams the attribute as XML. + static void OutputXmlAttribute(std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value); + + // Streams an XML CDATA section, escaping invalid CDATA sequences as needed. + static void OutputXmlCDataSection(::std::ostream* stream, const char* data); + + // Streams an XML representation of a TestInfo object. + static void OutputXmlTestInfo(::std::ostream* stream, + const char* test_case_name, + const TestInfo& test_info); + + // Prints an XML representation of a TestCase object + static void PrintXmlTestCase(::std::ostream* stream, + const TestCase& test_case); + + // Prints an XML summary of unit_test to output stream out. + static void PrintXmlUnitTest(::std::ostream* stream, + const UnitTest& unit_test); + + // Produces a string representing the test properties in a result as space + // delimited XML attributes based on the property key="value" pairs. + // When the std::string is not empty, it includes a space at the beginning, + // to delimit this attribute from prior attributes. + static std::string TestPropertiesAsXmlAttributes(const TestResult& result); + + // The output file. + const std::string output_file_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter); +}; + +// Creates a new XmlUnitTestResultPrinter. +XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file) + : output_file_(output_file) { + if (output_file_.c_str() == NULL || output_file_.empty()) { + fprintf(stderr, "XML output file may not be null\n"); + fflush(stderr); + exit(EXIT_FAILURE); + } +} + +// Called after the unit test ends. +void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + FILE* xmlout = NULL; + FilePath output_file(output_file_); + FilePath output_dir(output_file.RemoveFileName()); + + if (output_dir.CreateDirectoriesRecursively()) { + xmlout = posix::FOpen(output_file_.c_str(), "w"); + } + if (xmlout == NULL) { + // TODO(wan): report the reason of the failure. + // + // We don't do it for now as: + // + // 1. There is no urgent need for it. + // 2. It's a bit involved to make the errno variable thread-safe on + // all three operating systems (Linux, Windows, and Mac OS). + // 3. To interpret the meaning of errno in a thread-safe way, + // we need the strerror_r() function, which is not available on + // Windows. + fprintf(stderr, + "Unable to open file \"%s\"\n", + output_file_.c_str()); + fflush(stderr); + exit(EXIT_FAILURE); + } + std::stringstream stream; + PrintXmlUnitTest(&stream, unit_test); + fprintf(xmlout, "%s", StringStreamToString(&stream).c_str()); + fclose(xmlout); +} + +// Returns an XML-escaped copy of the input string str. If is_attribute +// is true, the text is meant to appear as an attribute value, and +// normalizable whitespace is preserved by replacing it with character +// references. +// +// Invalid XML characters in str, if any, are stripped from the output. +// It is expected that most, if not all, of the text processed by this +// module will consist of ordinary English text. +// If this module is ever modified to produce version 1.1 XML output, +// most invalid characters can be retained using character references. +// TODO(wan): It might be nice to have a minimally invasive, human-readable +// escaping scheme for invalid characters, rather than dropping them. +std::string XmlUnitTestResultPrinter::EscapeXml( + const std::string& str, bool is_attribute) { + Message m; + + for (size_t i = 0; i < str.size(); ++i) { + const char ch = str[i]; + switch (ch) { + case '<': + m << "<"; + break; + case '>': + m << ">"; + break; + case '&': + m << "&"; + break; + case '\'': + if (is_attribute) + m << "'"; + else + m << '\''; + break; + case '"': + if (is_attribute) + m << """; + else + m << '"'; + break; + default: + if (IsValidXmlCharacter(ch)) { + if (is_attribute && IsNormalizableWhitespace(ch)) + m << "&#x" << String::FormatByte(static_cast(ch)) + << ";"; + else + m << ch; + } + break; + } + } + + return m.GetString(); +} + +// Returns the given string with all characters invalid in XML removed. +// Currently invalid characters are dropped from the string. An +// alternative is to replace them with certain characters such as . or ?. +std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters( + const std::string& str) { + std::string output; + output.reserve(str.size()); + for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) + if (IsValidXmlCharacter(*it)) + output.push_back(*it); + + return output; +} + +// The following routines generate an XML representation of a UnitTest +// object. +// +// This is how Google Test concepts map to the DTD: +// +// <-- corresponds to a UnitTest object +// <-- corresponds to a TestCase object +// <-- corresponds to a TestInfo object +// ... +// ... +// ... +// <-- individual assertion failures +// +// +// + +// Formats the given time in milliseconds as seconds. +std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) { + ::std::stringstream ss; + ss << ms/1000.0; + return ss.str(); +} + +// Converts the given epoch time in milliseconds to a date string in the ISO +// 8601 format, without the timezone information. +std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) { + // Using non-reentrant version as localtime_r is not portable. + time_t seconds = static_cast(ms / 1000); +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996 + // (function or variable may be unsafe). + const struct tm* const time_struct = localtime(&seconds); // NOLINT +# pragma warning(pop) // Restores the warning state again. +#else + const struct tm* const time_struct = localtime(&seconds); // NOLINT +#endif + if (time_struct == NULL) + return ""; // Invalid ms value + + // YYYY-MM-DDThh:mm:ss + return StreamableToString(time_struct->tm_year + 1900) + "-" + + String::FormatIntWidth2(time_struct->tm_mon + 1) + "-" + + String::FormatIntWidth2(time_struct->tm_mday) + "T" + + String::FormatIntWidth2(time_struct->tm_hour) + ":" + + String::FormatIntWidth2(time_struct->tm_min) + ":" + + String::FormatIntWidth2(time_struct->tm_sec); +} + +// Streams an XML CDATA section, escaping invalid CDATA sequences as needed. +void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream, + const char* data) { + const char* segment = data; + *stream << ""); + if (next_segment != NULL) { + stream->write( + segment, static_cast(next_segment - segment)); + *stream << "]]>]]>"); + } else { + *stream << segment; + break; + } + } + *stream << "]]>"; +} + +void XmlUnitTestResultPrinter::OutputXmlAttribute( + std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value) { + const std::vector& allowed_names = + GetReservedAttributesForElement(element_name); + + GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) != + allowed_names.end()) + << "Attribute " << name << " is not allowed for element <" << element_name + << ">."; + + *stream << " " << name << "=\"" << EscapeXmlAttribute(value) << "\""; +} + +// Prints an XML representation of a TestInfo object. +// TODO(wan): There is also value in printing properties with the plain printer. +void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream, + const char* test_case_name, + const TestInfo& test_info) { + const TestResult& result = *test_info.result(); + const std::string kTestcase = "testcase"; + + *stream << " \n"; + } + const string location = internal::FormatCompilerIndependentFileLocation( + part.file_name(), part.line_number()); + const string summary = location + "\n" + part.summary(); + *stream << " "; + const string detail = location + "\n" + part.message(); + OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str()); + *stream << "\n"; + } + } + + if (failures == 0) + *stream << " />\n"; + else + *stream << " \n"; +} + +// Prints an XML representation of a TestCase object +void XmlUnitTestResultPrinter::PrintXmlTestCase(std::ostream* stream, + const TestCase& test_case) { + const std::string kTestsuite = "testsuite"; + *stream << " <" << kTestsuite; + OutputXmlAttribute(stream, kTestsuite, "name", test_case.name()); + OutputXmlAttribute(stream, kTestsuite, "tests", + StreamableToString(test_case.reportable_test_count())); + OutputXmlAttribute(stream, kTestsuite, "failures", + StreamableToString(test_case.failed_test_count())); + OutputXmlAttribute( + stream, kTestsuite, "disabled", + StreamableToString(test_case.reportable_disabled_test_count())); + OutputXmlAttribute(stream, kTestsuite, "errors", "0"); + OutputXmlAttribute(stream, kTestsuite, "time", + FormatTimeInMillisAsSeconds(test_case.elapsed_time())); + *stream << TestPropertiesAsXmlAttributes(test_case.ad_hoc_test_result()) + << ">\n"; + + for (int i = 0; i < test_case.total_test_count(); ++i) { + if (test_case.GetTestInfo(i)->is_reportable()) + OutputXmlTestInfo(stream, test_case.name(), *test_case.GetTestInfo(i)); + } + *stream << " \n"; +} + +// Prints an XML summary of unit_test to output stream out. +void XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream, + const UnitTest& unit_test) { + const std::string kTestsuites = "testsuites"; + + *stream << "\n"; + *stream << "<" << kTestsuites; + + OutputXmlAttribute(stream, kTestsuites, "tests", + StreamableToString(unit_test.reportable_test_count())); + OutputXmlAttribute(stream, kTestsuites, "failures", + StreamableToString(unit_test.failed_test_count())); + OutputXmlAttribute( + stream, kTestsuites, "disabled", + StreamableToString(unit_test.reportable_disabled_test_count())); + OutputXmlAttribute(stream, kTestsuites, "errors", "0"); + OutputXmlAttribute( + stream, kTestsuites, "timestamp", + FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp())); + OutputXmlAttribute(stream, kTestsuites, "time", + FormatTimeInMillisAsSeconds(unit_test.elapsed_time())); + + if (GTEST_FLAG(shuffle)) { + OutputXmlAttribute(stream, kTestsuites, "random_seed", + StreamableToString(unit_test.random_seed())); + } + + *stream << TestPropertiesAsXmlAttributes(unit_test.ad_hoc_test_result()); + + OutputXmlAttribute(stream, kTestsuites, "name", "AllTests"); + *stream << ">\n"; + + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + if (unit_test.GetTestCase(i)->reportable_test_count() > 0) + PrintXmlTestCase(stream, *unit_test.GetTestCase(i)); + } + *stream << "\n"; +} + +// Produces a string representing the test properties in a result as space +// delimited XML attributes based on the property key="value" pairs. +std::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( + const TestResult& result) { + Message attributes; + for (int i = 0; i < result.test_property_count(); ++i) { + const TestProperty& property = result.GetTestProperty(i); + attributes << " " << property.key() << "=" + << "\"" << EscapeXmlAttribute(property.value()) << "\""; + } + return attributes.GetString(); +} + +// End XmlUnitTestResultPrinter + +#if GTEST_CAN_STREAM_RESULTS_ + +// Checks if str contains '=', '&', '%' or '\n' characters. If yes, +// replaces them by "%xx" where xx is their hexadecimal value. For +// example, replaces "=" with "%3D". This algorithm is O(strlen(str)) +// in both time and space -- important as the input str may contain an +// arbitrarily long test failure message and stack trace. +string StreamingListener::UrlEncode(const char* str) { + string result; + result.reserve(strlen(str) + 1); + for (char ch = *str; ch != '\0'; ch = *++str) { + switch (ch) { + case '%': + case '=': + case '&': + case '\n': + result.append("%" + String::FormatByte(static_cast(ch))); + break; + default: + result.push_back(ch); + break; + } + } + return result; +} + +void StreamingListener::SocketWriter::MakeConnection() { + GTEST_CHECK_(sockfd_ == -1) + << "MakeConnection() can't be called when there is already a connection."; + + addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses. + hints.ai_socktype = SOCK_STREAM; + addrinfo* servinfo = NULL; + + // Use the getaddrinfo() to get a linked list of IP addresses for + // the given host name. + const int error_num = getaddrinfo( + host_name_.c_str(), port_num_.c_str(), &hints, &servinfo); + if (error_num != 0) { + GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: " + << gai_strerror(error_num); + } + + // Loop through all the results and connect to the first we can. + for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL; + cur_addr = cur_addr->ai_next) { + sockfd_ = socket( + cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol); + if (sockfd_ != -1) { + // Connect the client socket to the server socket. + if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) { + close(sockfd_); + sockfd_ = -1; + } + } + } + + freeaddrinfo(servinfo); // all done with this structure + + if (sockfd_ == -1) { + GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to " + << host_name_ << ":" << port_num_; + } +} + +// End of class Streaming Listener +#endif // GTEST_CAN_STREAM_RESULTS__ + +// Class ScopedTrace + +// Pushes the given source file location and message onto a per-thread +// trace stack maintained by Google Test. +ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) + GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { + TraceInfo trace; + trace.file = file; + trace.line = line; + trace.message = message.GetString(); + + UnitTest::GetInstance()->PushGTestTrace(trace); +} + +// Pops the info pushed by the c'tor. +ScopedTrace::~ScopedTrace() + GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { + UnitTest::GetInstance()->PopGTestTrace(); +} + + +// class OsStackTraceGetter + +// Returns the current OS stack trace as an std::string. Parameters: +// +// max_depth - the maximum number of stack frames to be included +// in the trace. +// skip_count - the number of top frames to be skipped; doesn't count +// against max_depth. +// +string OsStackTraceGetter::CurrentStackTrace(int /* max_depth */, + int /* skip_count */) + GTEST_LOCK_EXCLUDED_(mutex_) { + return ""; +} + +void OsStackTraceGetter::UponLeavingGTest() + GTEST_LOCK_EXCLUDED_(mutex_) { +} + +const char* const +OsStackTraceGetter::kElidedFramesMarker = + "... " GTEST_NAME_ " internal frames ..."; + +// A helper class that creates the premature-exit file in its +// constructor and deletes the file in its destructor. +class ScopedPrematureExitFile { + public: + explicit ScopedPrematureExitFile(const char* premature_exit_filepath) + : premature_exit_filepath_(premature_exit_filepath) { + // If a path to the premature-exit file is specified... + if (premature_exit_filepath != NULL && *premature_exit_filepath != '\0') { + // create the file with a single "0" character in it. I/O + // errors are ignored as there's nothing better we can do and we + // don't want to fail the test because of this. + FILE* pfile = posix::FOpen(premature_exit_filepath, "w"); + fwrite("0", 1, 1, pfile); + fclose(pfile); + } + } + + ~ScopedPrematureExitFile() { + if (premature_exit_filepath_ != NULL && *premature_exit_filepath_ != '\0') { + remove(premature_exit_filepath_); + } + } + + private: + const char* const premature_exit_filepath_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile); +}; + +} // namespace internal + +// class TestEventListeners + +TestEventListeners::TestEventListeners() + : repeater_(new internal::TestEventRepeater()), + default_result_printer_(NULL), + default_xml_generator_(NULL) { +} + +TestEventListeners::~TestEventListeners() { delete repeater_; } + +// Returns the standard listener responsible for the default console +// output. Can be removed from the listeners list to shut down default +// console output. Note that removing this object from the listener list +// with Release transfers its ownership to the user. +void TestEventListeners::Append(TestEventListener* listener) { + repeater_->Append(listener); +} + +// Removes the given event listener from the list and returns it. It then +// becomes the caller's responsibility to delete the listener. Returns +// NULL if the listener is not found in the list. +TestEventListener* TestEventListeners::Release(TestEventListener* listener) { + if (listener == default_result_printer_) + default_result_printer_ = NULL; + else if (listener == default_xml_generator_) + default_xml_generator_ = NULL; + return repeater_->Release(listener); +} + +// Returns repeater that broadcasts the TestEventListener events to all +// subscribers. +TestEventListener* TestEventListeners::repeater() { return repeater_; } + +// Sets the default_result_printer attribute to the provided listener. +// The listener is also added to the listener list and previous +// default_result_printer is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) { + if (default_result_printer_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_result_printer_); + default_result_printer_ = listener; + if (listener != NULL) + Append(listener); + } +} + +// Sets the default_xml_generator attribute to the provided listener. The +// listener is also added to the listener list and previous +// default_xml_generator is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) { + if (default_xml_generator_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_xml_generator_); + default_xml_generator_ = listener; + if (listener != NULL) + Append(listener); + } +} + +// Controls whether events will be forwarded by the repeater to the +// listeners in the list. +bool TestEventListeners::EventForwardingEnabled() const { + return repeater_->forwarding_enabled(); +} + +void TestEventListeners::SuppressEventForwarding() { + repeater_->set_forwarding_enabled(false); +} + +// class UnitTest + +// Gets the singleton UnitTest object. The first time this method is +// called, a UnitTest object is constructed and returned. Consecutive +// calls will return the same object. +// +// We don't protect this under mutex_ as a user is not supposed to +// call this before main() starts, from which point on the return +// value will never change. +UnitTest* UnitTest::GetInstance() { + // When compiled with MSVC 7.1 in optimized mode, destroying the + // UnitTest object upon exiting the program messes up the exit code, + // causing successful tests to appear failed. We have to use a + // different implementation in this case to bypass the compiler bug. + // This implementation makes the compiler happy, at the cost of + // leaking the UnitTest object. + + // CodeGear C++Builder insists on a public destructor for the + // default implementation. Use this implementation to keep good OO + // design with private destructor. + +#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__) + static UnitTest* const instance = new UnitTest; + return instance; +#else + static UnitTest instance; + return &instance; +#endif // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__) +} + +// Gets the number of successful test cases. +int UnitTest::successful_test_case_count() const { + return impl()->successful_test_case_count(); +} + +// Gets the number of failed test cases. +int UnitTest::failed_test_case_count() const { + return impl()->failed_test_case_count(); +} + +// Gets the number of all test cases. +int UnitTest::total_test_case_count() const { + return impl()->total_test_case_count(); +} + +// Gets the number of all test cases that contain at least one test +// that should run. +int UnitTest::test_case_to_run_count() const { + return impl()->test_case_to_run_count(); +} + +// Gets the number of successful tests. +int UnitTest::successful_test_count() const { + return impl()->successful_test_count(); +} + +// Gets the number of failed tests. +int UnitTest::failed_test_count() const { return impl()->failed_test_count(); } + +// Gets the number of disabled tests that will be reported in the XML report. +int UnitTest::reportable_disabled_test_count() const { + return impl()->reportable_disabled_test_count(); +} + +// Gets the number of disabled tests. +int UnitTest::disabled_test_count() const { + return impl()->disabled_test_count(); +} + +// Gets the number of tests to be printed in the XML report. +int UnitTest::reportable_test_count() const { + return impl()->reportable_test_count(); +} + +// Gets the number of all tests. +int UnitTest::total_test_count() const { return impl()->total_test_count(); } + +// Gets the number of tests that should run. +int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); } + +// Gets the time of the test program start, in ms from the start of the +// UNIX epoch. +internal::TimeInMillis UnitTest::start_timestamp() const { + return impl()->start_timestamp(); +} + +// Gets the elapsed time, in milliseconds. +internal::TimeInMillis UnitTest::elapsed_time() const { + return impl()->elapsed_time(); +} + +// Returns true iff the unit test passed (i.e. all test cases passed). +bool UnitTest::Passed() const { return impl()->Passed(); } + +// Returns true iff the unit test failed (i.e. some test case failed +// or something outside of all tests failed). +bool UnitTest::Failed() const { return impl()->Failed(); } + +// Gets the i-th test case among all the test cases. i can range from 0 to +// total_test_case_count() - 1. If i is not in that range, returns NULL. +const TestCase* UnitTest::GetTestCase(int i) const { + return impl()->GetTestCase(i); +} + +// Returns the TestResult containing information on test failures and +// properties logged outside of individual test cases. +const TestResult& UnitTest::ad_hoc_test_result() const { + return *impl()->ad_hoc_test_result(); +} + +// Gets the i-th test case among all the test cases. i can range from 0 to +// total_test_case_count() - 1. If i is not in that range, returns NULL. +TestCase* UnitTest::GetMutableTestCase(int i) { + return impl()->GetMutableTestCase(i); +} + +// Returns the list of event listeners that can be used to track events +// inside Google Test. +TestEventListeners& UnitTest::listeners() { + return *impl()->listeners(); +} + +// Registers and returns a global test environment. When a test +// program is run, all global test environments will be set-up in the +// order they were registered. After all tests in the program have +// finished, all global test environments will be torn-down in the +// *reverse* order they were registered. +// +// The UnitTest object takes ownership of the given environment. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +Environment* UnitTest::AddEnvironment(Environment* env) { + if (env == NULL) { + return NULL; + } + + impl_->environments().push_back(env); + return env; +} + +// Adds a TestPartResult to the current TestResult object. All Google Test +// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call +// this to report their results. The user code should use the +// assertion macros instead of calling this directly. +void UnitTest::AddTestPartResult( + TestPartResult::Type result_type, + const char* file_name, + int line_number, + const std::string& message, + const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) { + Message msg; + msg << message; + + internal::MutexLock lock(&mutex_); + if (impl_->gtest_trace_stack().size() > 0) { + msg << "\n" << GTEST_NAME_ << " trace:"; + + for (int i = static_cast(impl_->gtest_trace_stack().size()); + i > 0; --i) { + const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1]; + msg << "\n" << internal::FormatFileLocation(trace.file, trace.line) + << " " << trace.message; + } + } + + if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) { + msg << internal::kStackTraceMarker << os_stack_trace; + } + + const TestPartResult result = + TestPartResult(result_type, file_name, line_number, + msg.GetString().c_str()); + impl_->GetTestPartResultReporterForCurrentThread()-> + ReportTestPartResult(result); + + if (result_type != TestPartResult::kSuccess) { + // gtest_break_on_failure takes precedence over + // gtest_throw_on_failure. This allows a user to set the latter + // in the code (perhaps in order to use Google Test assertions + // with another testing framework) and specify the former on the + // command line for debugging. + if (GTEST_FLAG(break_on_failure)) { +#if GTEST_OS_WINDOWS + // Using DebugBreak on Windows allows gtest to still break into a debugger + // when a failure happens and both the --gtest_break_on_failure and + // the --gtest_catch_exceptions flags are specified. + DebugBreak(); +#else + // Dereference NULL through a volatile pointer to prevent the compiler + // from removing. We use this rather than abort() or __builtin_trap() for + // portability: Symbian doesn't implement abort() well, and some debuggers + // don't correctly trap abort(). + *static_cast(NULL) = 1; +#endif // GTEST_OS_WINDOWS + } else if (GTEST_FLAG(throw_on_failure)) { +#if GTEST_HAS_EXCEPTIONS + throw internal::GoogleTestFailureException(result); +#else + // We cannot call abort() as it generates a pop-up in debug mode + // that cannot be suppressed in VC 7.1 or below. + exit(1); +#endif + } + } +} + +// Adds a TestProperty to the current TestResult object when invoked from +// inside a test, to current TestCase's ad_hoc_test_result_ when invoked +// from SetUpTestCase or TearDownTestCase, or to the global property set +// when invoked elsewhere. If the result already contains a property with +// the same key, the value will be updated. +void UnitTest::RecordProperty(const std::string& key, + const std::string& value) { + impl_->RecordProperty(TestProperty(key, value)); +} + +// Runs all tests in this UnitTest object and prints the result. +// Returns 0 if successful, or 1 otherwise. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +int UnitTest::Run() { + const bool in_death_test_child_process = + internal::GTEST_FLAG(internal_run_death_test).length() > 0; + + // Google Test implements this protocol for catching that a test + // program exits before returning control to Google Test: + // + // 1. Upon start, Google Test creates a file whose absolute path + // is specified by the environment variable + // TEST_PREMATURE_EXIT_FILE. + // 2. When Google Test has finished its work, it deletes the file. + // + // This allows a test runner to set TEST_PREMATURE_EXIT_FILE before + // running a Google-Test-based test program and check the existence + // of the file at the end of the test execution to see if it has + // exited prematurely. + + // If we are in the child process of a death test, don't + // create/delete the premature exit file, as doing so is unnecessary + // and will confuse the parent process. Otherwise, create/delete + // the file upon entering/leaving this function. If the program + // somehow exits before this function has a chance to return, the + // premature-exit file will be left undeleted, causing a test runner + // that understands the premature-exit-file protocol to report the + // test as having failed. + const internal::ScopedPrematureExitFile premature_exit_file( + in_death_test_child_process ? + NULL : internal::posix::GetEnv("TEST_PREMATURE_EXIT_FILE")); + + // Captures the value of GTEST_FLAG(catch_exceptions). This value will be + // used for the duration of the program. + impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions)); + +#if GTEST_HAS_SEH + // Either the user wants Google Test to catch exceptions thrown by the + // tests or this is executing in the context of death test child + // process. In either case the user does not want to see pop-up dialogs + // about crashes - they are expected. + if (impl()->catch_exceptions() || in_death_test_child_process) { +# if !GTEST_OS_WINDOWS_MOBILE + // SetErrorMode doesn't exist on CE. + SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | + SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); +# endif // !GTEST_OS_WINDOWS_MOBILE + +# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE + // Death test children can be terminated with _abort(). On Windows, + // _abort() can show a dialog with a warning message. This forces the + // abort message to go to stderr instead. + _set_error_mode(_OUT_TO_STDERR); +# endif + +# if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE + // In the debug version, Visual Studio pops up a separate dialog + // offering a choice to debug the aborted program. We need to suppress + // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement + // executed. Google Test will notify the user of any unexpected + // failure via stderr. + // + // VC++ doesn't define _set_abort_behavior() prior to the version 8.0. + // Users of prior VC versions shall suffer the agony and pain of + // clicking through the countless debug dialogs. + // TODO(vladl@google.com): find a way to suppress the abort dialog() in the + // debug mode when compiled with VC 7.1 or lower. + if (!GTEST_FLAG(break_on_failure)) + _set_abort_behavior( + 0x0, // Clear the following flags: + _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump. +# endif + } +#endif // GTEST_HAS_SEH + + return internal::HandleExceptionsInMethodIfSupported( + impl(), + &internal::UnitTestImpl::RunAllTests, + "auxiliary test code (environments or event listeners)") ? 0 : 1; +} + +// Returns the working directory when the first TEST() or TEST_F() was +// executed. +const char* UnitTest::original_working_dir() const { + return impl_->original_working_dir_.c_str(); +} + +// Returns the TestCase object for the test that's currently running, +// or NULL if no test is running. +const TestCase* UnitTest::current_test_case() const + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + return impl_->current_test_case(); +} + +// Returns the TestInfo object for the test that's currently running, +// or NULL if no test is running. +const TestInfo* UnitTest::current_test_info() const + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + return impl_->current_test_info(); +} + +// Returns the random seed used at the start of the current test run. +int UnitTest::random_seed() const { return impl_->random_seed(); } + +#if GTEST_HAS_PARAM_TEST +// Returns ParameterizedTestCaseRegistry object used to keep track of +// value-parameterized tests and instantiate and register them. +internal::ParameterizedTestCaseRegistry& + UnitTest::parameterized_test_registry() + GTEST_LOCK_EXCLUDED_(mutex_) { + return impl_->parameterized_test_registry(); +} +#endif // GTEST_HAS_PARAM_TEST + +// Creates an empty UnitTest. +UnitTest::UnitTest() { + impl_ = new internal::UnitTestImpl(this); +} + +// Destructor of UnitTest. +UnitTest::~UnitTest() { + delete impl_; +} + +// Pushes a trace defined by SCOPED_TRACE() on to the per-thread +// Google Test trace stack. +void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().push_back(trace); +} + +// Pops a trace from the per-thread Google Test trace stack. +void UnitTest::PopGTestTrace() + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().pop_back(); +} + +namespace internal { + +UnitTestImpl::UnitTestImpl(UnitTest* parent) + : parent_(parent), +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4355) // Temporarily disables warning 4355 + // (using this in initializer). + default_global_test_part_result_reporter_(this), + default_per_thread_test_part_result_reporter_(this), +# pragma warning(pop) // Restores the warning state again. +#else + default_global_test_part_result_reporter_(this), + default_per_thread_test_part_result_reporter_(this), +#endif // _MSC_VER + global_test_part_result_repoter_( + &default_global_test_part_result_reporter_), + per_thread_test_part_result_reporter_( + &default_per_thread_test_part_result_reporter_), +#if GTEST_HAS_PARAM_TEST + parameterized_test_registry_(), + parameterized_tests_registered_(false), +#endif // GTEST_HAS_PARAM_TEST + last_death_test_case_(-1), + current_test_case_(NULL), + current_test_info_(NULL), + ad_hoc_test_result_(), + os_stack_trace_getter_(NULL), + post_flag_parse_init_performed_(false), + random_seed_(0), // Will be overridden by the flag before first use. + random_(0), // Will be reseeded before first use. + start_timestamp_(0), + elapsed_time_(0), +#if GTEST_HAS_DEATH_TEST + death_test_factory_(new DefaultDeathTestFactory), +#endif + // Will be overridden by the flag before first use. + catch_exceptions_(false) { + listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter); +} + +UnitTestImpl::~UnitTestImpl() { + // Deletes every TestCase. + ForEach(test_cases_, internal::Delete); + + // Deletes every Environment. + ForEach(environments_, internal::Delete); + + delete os_stack_trace_getter_; +} + +// Adds a TestProperty to the current TestResult object when invoked in a +// context of a test, to current test case's ad_hoc_test_result when invoke +// from SetUpTestCase/TearDownTestCase, or to the global property set +// otherwise. If the result already contains a property with the same key, +// the value will be updated. +void UnitTestImpl::RecordProperty(const TestProperty& test_property) { + std::string xml_element; + TestResult* test_result; // TestResult appropriate for property recording. + + if (current_test_info_ != NULL) { + xml_element = "testcase"; + test_result = &(current_test_info_->result_); + } else if (current_test_case_ != NULL) { + xml_element = "testsuite"; + test_result = &(current_test_case_->ad_hoc_test_result_); + } else { + xml_element = "testsuites"; + test_result = &ad_hoc_test_result_; + } + test_result->RecordProperty(xml_element, test_property); +} + +#if GTEST_HAS_DEATH_TEST +// Disables event forwarding if the control is currently in a death test +// subprocess. Must not be called before InitGoogleTest. +void UnitTestImpl::SuppressTestEventsIfInSubprocess() { + if (internal_run_death_test_flag_.get() != NULL) + listeners()->SuppressEventForwarding(); +} +#endif // GTEST_HAS_DEATH_TEST + +// Initializes event listeners performing XML output as specified by +// UnitTestOptions. Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureXmlOutput() { + const std::string& output_format = UnitTestOptions::GetOutputFormat(); + if (output_format == "xml") { + listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter( + UnitTestOptions::GetAbsolutePathToOutputFile().c_str())); + } else if (output_format != "") { + printf("WARNING: unrecognized output format \"%s\" ignored.\n", + output_format.c_str()); + fflush(stdout); + } +} + +#if GTEST_CAN_STREAM_RESULTS_ +// Initializes event listeners for streaming test results in string form. +// Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureStreamingOutput() { + const std::string& target = GTEST_FLAG(stream_result_to); + if (!target.empty()) { + const size_t pos = target.find(':'); + if (pos != std::string::npos) { + listeners()->Append(new StreamingListener(target.substr(0, pos), + target.substr(pos+1))); + } else { + printf("WARNING: unrecognized streaming target \"%s\" ignored.\n", + target.c_str()); + fflush(stdout); + } + } +} +#endif // GTEST_CAN_STREAM_RESULTS_ + +// Performs initialization dependent upon flag values obtained in +// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to +// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest +// this function is also called from RunAllTests. Since this function can be +// called more than once, it has to be idempotent. +void UnitTestImpl::PostFlagParsingInit() { + // Ensures that this function does not execute more than once. + if (!post_flag_parse_init_performed_) { + post_flag_parse_init_performed_ = true; + +#if GTEST_HAS_DEATH_TEST + InitDeathTestSubprocessControlInfo(); + SuppressTestEventsIfInSubprocess(); +#endif // GTEST_HAS_DEATH_TEST + + // Registers parameterized tests. This makes parameterized tests + // available to the UnitTest reflection API without running + // RUN_ALL_TESTS. + RegisterParameterizedTests(); + + // Configures listeners for XML output. This makes it possible for users + // to shut down the default XML output before invoking RUN_ALL_TESTS. + ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Configures listeners for streaming test results to the specified server. + ConfigureStreamingOutput(); +#endif // GTEST_CAN_STREAM_RESULTS_ + } +} + +// A predicate that checks the name of a TestCase against a known +// value. +// +// This is used for implementation of the UnitTest class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestCaseNameIs is copyable. +class TestCaseNameIs { + public: + // Constructor. + explicit TestCaseNameIs(const std::string& name) + : name_(name) {} + + // Returns true iff the name of test_case matches name_. + bool operator()(const TestCase* test_case) const { + return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0; + } + + private: + std::string name_; +}; + +// Finds and returns a TestCase with the given name. If one doesn't +// exist, creates one and returns it. It's the CALLER'S +// RESPONSIBILITY to ensure that this function is only called WHEN THE +// TESTS ARE NOT SHUFFLED. +// +// Arguments: +// +// test_case_name: name of the test case +// type_param: the name of the test case's type parameter, or NULL if +// this is not a typed or a type-parameterized test case. +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +TestCase* UnitTestImpl::GetTestCase(const char* test_case_name, + const char* type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc) { + // Can we find a TestCase with the given name? + const std::vector::const_iterator test_case = + std::find_if(test_cases_.begin(), test_cases_.end(), + TestCaseNameIs(test_case_name)); + + if (test_case != test_cases_.end()) + return *test_case; + + // No. Let's create one. + TestCase* const new_test_case = + new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc); + + // Is this a death test case? + if (internal::UnitTestOptions::MatchesFilter(test_case_name, + kDeathTestCaseFilter)) { + // Yes. Inserts the test case after the last death test case + // defined so far. This only works when the test cases haven't + // been shuffled. Otherwise we may end up running a death test + // after a non-death test. + ++last_death_test_case_; + test_cases_.insert(test_cases_.begin() + last_death_test_case_, + new_test_case); + } else { + // No. Appends to the end of the list. + test_cases_.push_back(new_test_case); + } + + test_case_indices_.push_back(static_cast(test_case_indices_.size())); + return new_test_case; +} + +// Helpers for setting up / tearing down the given environment. They +// are for use in the ForEach() function. +static void SetUpEnvironment(Environment* env) { env->SetUp(); } +static void TearDownEnvironment(Environment* env) { env->TearDown(); } + +// Runs all tests in this UnitTest object, prints the result, and +// returns true if all tests are successful. If any exception is +// thrown during a test, the test is considered to be failed, but the +// rest of the tests will still be run. +// +// When parameterized tests are enabled, it expands and registers +// parameterized tests first in RegisterParameterizedTests(). +// All other functions called from RunAllTests() may safely assume that +// parameterized tests are ready to be counted and run. +bool UnitTestImpl::RunAllTests() { + // Makes sure InitGoogleTest() was called. + if (!GTestIsInitialized()) { + printf("%s", + "\nThis test program did NOT call ::testing::InitGoogleTest " + "before calling RUN_ALL_TESTS(). Please fix it.\n"); + return false; + } + + // Do not run any test if the --help flag was specified. + if (g_help_flag) + return true; + + // Repeats the call to the post-flag parsing initialization in case the + // user didn't call InitGoogleTest. + PostFlagParsingInit(); + + // Even if sharding is not on, test runners may want to use the + // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding + // protocol. + internal::WriteToShardStatusFileIfNeeded(); + + // True iff we are in a subprocess for running a thread-safe-style + // death test. + bool in_subprocess_for_death_test = false; + +#if GTEST_HAS_DEATH_TEST + in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL); +#endif // GTEST_HAS_DEATH_TEST + + const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex, + in_subprocess_for_death_test); + + // Compares the full test names with the filter to decide which + // tests to run. + const bool has_tests_to_run = FilterTests(should_shard + ? HONOR_SHARDING_PROTOCOL + : IGNORE_SHARDING_PROTOCOL) > 0; + + // Lists the tests and exits if the --gtest_list_tests flag was specified. + if (GTEST_FLAG(list_tests)) { + // This must be called *after* FilterTests() has been called. + ListTestsMatchingFilter(); + return true; + } + + random_seed_ = GTEST_FLAG(shuffle) ? + GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0; + + // True iff at least one test has failed. + bool failed = false; + + TestEventListener* repeater = listeners()->repeater(); + + start_timestamp_ = GetTimeInMillis(); + repeater->OnTestProgramStart(*parent_); + + // How many times to repeat the tests? We don't want to repeat them + // when we are inside the subprocess of a death test. + const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat); + // Repeats forever if the repeat count is negative. + const bool forever = repeat < 0; + for (int i = 0; forever || i != repeat; i++) { + // We want to preserve failures generated by ad-hoc test + // assertions executed before RUN_ALL_TESTS(). + ClearNonAdHocTestResult(); + + const TimeInMillis start = GetTimeInMillis(); + + // Shuffles test cases and tests if requested. + if (has_tests_to_run && GTEST_FLAG(shuffle)) { + random()->Reseed(random_seed_); + // This should be done before calling OnTestIterationStart(), + // such that a test event listener can see the actual test order + // in the event. + ShuffleTests(); + } + + // Tells the unit test event listeners that the tests are about to start. + repeater->OnTestIterationStart(*parent_, i); + + // Runs each test case if there is at least one test to run. + if (has_tests_to_run) { + // Sets up all environments beforehand. + repeater->OnEnvironmentsSetUpStart(*parent_); + ForEach(environments_, SetUpEnvironment); + repeater->OnEnvironmentsSetUpEnd(*parent_); + + // Runs the tests only if there was no fatal failure during global + // set-up. + if (!Test::HasFatalFailure()) { + for (int test_index = 0; test_index < total_test_case_count(); + test_index++) { + GetMutableTestCase(test_index)->Run(); + } + } + + // Tears down all environments in reverse order afterwards. + repeater->OnEnvironmentsTearDownStart(*parent_); + std::for_each(environments_.rbegin(), environments_.rend(), + TearDownEnvironment); + repeater->OnEnvironmentsTearDownEnd(*parent_); + } + + elapsed_time_ = GetTimeInMillis() - start; + + // Tells the unit test event listener that the tests have just finished. + repeater->OnTestIterationEnd(*parent_, i); + + // Gets the result and clears it. + if (!Passed()) { + failed = true; + } + + // Restores the original test order after the iteration. This + // allows the user to quickly repro a failure that happens in the + // N-th iteration without repeating the first (N - 1) iterations. + // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in + // case the user somehow changes the value of the flag somewhere + // (it's always safe to unshuffle the tests). + UnshuffleTests(); + + if (GTEST_FLAG(shuffle)) { + // Picks a new random seed for each iteration. + random_seed_ = GetNextRandomSeed(random_seed_); + } + } + + repeater->OnTestProgramEnd(*parent_); + + return !failed; +} + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded() { + const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile); + if (test_shard_file != NULL) { + FILE* const file = posix::FOpen(test_shard_file, "w"); + if (file == NULL) { + ColoredPrintf(COLOR_RED, + "Could not write to the test shard status file \"%s\" " + "specified by the %s environment variable.\n", + test_shard_file, kTestShardStatusFile); + fflush(stdout); + exit(EXIT_FAILURE); + } + fclose(file); + } +} + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (i.e., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +bool ShouldShard(const char* total_shards_env, + const char* shard_index_env, + bool in_subprocess_for_death_test) { + if (in_subprocess_for_death_test) { + return false; + } + + const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1); + const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1); + + if (total_shards == -1 && shard_index == -1) { + return false; + } else if (total_shards == -1 && shard_index != -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestShardIndex << " = " << shard_index + << ", but have left " << kTestTotalShards << " unset.\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (total_shards != -1 && shard_index == -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestTotalShards << " = " << total_shards + << ", but have left " << kTestShardIndex << " unset.\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (shard_index < 0 || shard_index >= total_shards) { + const Message msg = Message() + << "Invalid environment variables: we require 0 <= " + << kTestShardIndex << " < " << kTestTotalShards + << ", but you have " << kTestShardIndex << "=" << shard_index + << ", " << kTestTotalShards << "=" << total_shards << ".\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } + + return total_shards > 1; +} + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error +// and aborts. +Int32 Int32FromEnvOrDie(const char* var, Int32 default_val) { + const char* str_val = posix::GetEnv(var); + if (str_val == NULL) { + return default_val; + } + + Int32 result; + if (!ParseInt32(Message() << "The value of environment variable " << var, + str_val, &result)) { + exit(EXIT_FAILURE); + } + return result; +} + +// Given the total number of shards, the shard index, and the test id, +// returns true iff the test should be run on this shard. The test id is +// some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) { + return (test_id % total_shards) == shard_index; +} + +// Compares the name of each test with the user-specified filter to +// decide whether the test should be run, then records the result in +// each TestCase and TestInfo object. +// If shard_tests == true, further filters tests based on sharding +// variables in the environment - see +// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide. +// Returns the number of tests that should run. +int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { + const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestTotalShards, -1) : -1; + const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestShardIndex, -1) : -1; + + // num_runnable_tests are the number of tests that will + // run across all shards (i.e., match filter and are not disabled). + // num_selected_tests are the number of tests to be run on + // this shard. + int num_runnable_tests = 0; + int num_selected_tests = 0; + for (size_t i = 0; i < test_cases_.size(); i++) { + TestCase* const test_case = test_cases_[i]; + const std::string &test_case_name = test_case->name(); + test_case->set_should_run(false); + + for (size_t j = 0; j < test_case->test_info_list().size(); j++) { + TestInfo* const test_info = test_case->test_info_list()[j]; + const std::string test_name(test_info->name()); + // A test is disabled if test case name or test name matches + // kDisableTestFilter. + const bool is_disabled = + internal::UnitTestOptions::MatchesFilter(test_case_name, + kDisableTestFilter) || + internal::UnitTestOptions::MatchesFilter(test_name, + kDisableTestFilter); + test_info->is_disabled_ = is_disabled; + + const bool matches_filter = + internal::UnitTestOptions::FilterMatchesTest(test_case_name, + test_name); + test_info->matches_filter_ = matches_filter; + + const bool is_runnable = + (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) && + matches_filter; + + const bool is_selected = is_runnable && + (shard_tests == IGNORE_SHARDING_PROTOCOL || + ShouldRunTestOnShard(total_shards, shard_index, + num_runnable_tests)); + + num_runnable_tests += is_runnable; + num_selected_tests += is_selected; + + test_info->should_run_ = is_selected; + test_case->set_should_run(test_case->should_run() || is_selected); + } + } + return num_selected_tests; +} + +// Prints the given C-string on a single line by replacing all '\n' +// characters with string "\\n". If the output takes more than +// max_length characters, only prints the first max_length characters +// and "...". +static void PrintOnOneLine(const char* str, int max_length) { + if (str != NULL) { + for (int i = 0; *str != '\0'; ++str) { + if (i >= max_length) { + printf("..."); + break; + } + if (*str == '\n') { + printf("\\n"); + i += 2; + } else { + printf("%c", *str); + ++i; + } + } + } +} + +// Prints the names of the tests matching the user-specified filter flag. +void UnitTestImpl::ListTestsMatchingFilter() { + // Print at most this many characters for each type/value parameter. + const int kMaxParamLength = 250; + + for (size_t i = 0; i < test_cases_.size(); i++) { + const TestCase* const test_case = test_cases_[i]; + bool printed_test_case_name = false; + + for (size_t j = 0; j < test_case->test_info_list().size(); j++) { + const TestInfo* const test_info = + test_case->test_info_list()[j]; + if (test_info->matches_filter_) { + if (!printed_test_case_name) { + printed_test_case_name = true; + printf("%s.", test_case->name()); + if (test_case->type_param() != NULL) { + printf(" # %s = ", kTypeParamLabel); + // We print the type parameter on a single line to make + // the output easy to parse by a program. + PrintOnOneLine(test_case->type_param(), kMaxParamLength); + } + printf("\n"); + } + printf(" %s", test_info->name()); + if (test_info->value_param() != NULL) { + printf(" # %s = ", kValueParamLabel); + // We print the value parameter on a single line to make the + // output easy to parse by a program. + PrintOnOneLine(test_info->value_param(), kMaxParamLength); + } + printf("\n"); + } + } + } + fflush(stdout); +} + +// Sets the OS stack trace getter. +// +// Does nothing if the input and the current OS stack trace getter are +// the same; otherwise, deletes the old getter and makes the input the +// current getter. +void UnitTestImpl::set_os_stack_trace_getter( + OsStackTraceGetterInterface* getter) { + if (os_stack_trace_getter_ != getter) { + delete os_stack_trace_getter_; + os_stack_trace_getter_ = getter; + } +} + +// Returns the current OS stack trace getter if it is not NULL; +// otherwise, creates an OsStackTraceGetter, makes it the current +// getter, and returns it. +OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() { + if (os_stack_trace_getter_ == NULL) { + os_stack_trace_getter_ = new OsStackTraceGetter; + } + + return os_stack_trace_getter_; +} + +// Returns the TestResult for the test that's currently running, or +// the TestResult for the ad hoc test if no test is running. +TestResult* UnitTestImpl::current_test_result() { + return current_test_info_ ? + &(current_test_info_->result_) : &ad_hoc_test_result_; +} + +// Shuffles all test cases, and the tests within each test case, +// making sure that death tests are still run first. +void UnitTestImpl::ShuffleTests() { + // Shuffles the death test cases. + ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_); + + // Shuffles the non-death test cases. + ShuffleRange(random(), last_death_test_case_ + 1, + static_cast(test_cases_.size()), &test_case_indices_); + + // Shuffles the tests inside each test case. + for (size_t i = 0; i < test_cases_.size(); i++) { + test_cases_[i]->ShuffleTests(random()); + } +} + +// Restores the test cases and tests to their order before the first shuffle. +void UnitTestImpl::UnshuffleTests() { + for (size_t i = 0; i < test_cases_.size(); i++) { + // Unshuffles the tests in each test case. + test_cases_[i]->UnshuffleTests(); + // Resets the index of each test case. + test_case_indices_[i] = static_cast(i); + } +} + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, + int skip_count) { + // We pass skip_count + 1 to skip this wrapper function in addition + // to what the user really wants to skip. + return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1); +} + +// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to +// suppress unreachable code warnings. +namespace { +class ClassUniqueToAlwaysTrue {}; +} + +bool IsTrue(bool condition) { return condition; } + +bool AlwaysTrue() { +#if GTEST_HAS_EXCEPTIONS + // This condition is always false so AlwaysTrue() never actually throws, + // but it makes the compiler think that it may throw. + if (IsTrue(false)) + throw ClassUniqueToAlwaysTrue(); +#endif // GTEST_HAS_EXCEPTIONS + return true; +} + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +bool SkipPrefix(const char* prefix, const char** pstr) { + const size_t prefix_len = strlen(prefix); + if (strncmp(*pstr, prefix, prefix_len) == 0) { + *pstr += prefix_len; + return true; + } + return false; +} + +// Parses a string as a command line flag. The string should have +// the format "--flag=value". When def_optional is true, the "=value" +// part can be omitted. +// +// Returns the value of the flag, or NULL if the parsing failed. +const char* ParseFlagValue(const char* str, + const char* flag, + bool def_optional) { + // str and flag must not be NULL. + if (str == NULL || flag == NULL) return NULL; + + // The flag must start with "--" followed by GTEST_FLAG_PREFIX_. + const std::string flag_str = std::string("--") + GTEST_FLAG_PREFIX_ + flag; + const size_t flag_len = flag_str.length(); + if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL; + + // Skips the flag name. + const char* flag_end = str + flag_len; + + // When def_optional is true, it's OK to not have a "=value" part. + if (def_optional && (flag_end[0] == '\0')) { + return flag_end; + } + + // If def_optional is true and there are more characters after the + // flag name, or if def_optional is false, there must be a '=' after + // the flag name. + if (flag_end[0] != '=') return NULL; + + // Returns the string after "=". + return flag_end + 1; +} + +// Parses a string for a bool flag, in the form of either +// "--flag=value" or "--flag". +// +// In the former case, the value is taken as true as long as it does +// not start with '0', 'f', or 'F'. +// +// In the latter case, the value is taken as true. +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseBoolFlag(const char* str, const char* flag, bool* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, true); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Converts the string value to a bool. + *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F'); + return true; +} + +// Parses a string for an Int32 flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseInt32Flag(const char* str, const char* flag, Int32* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Sets *value to the value of the flag. + return ParseInt32(Message() << "The value of flag --" << flag, + value_str, value); +} + +// Parses a string for a string flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseStringFlag(const char* str, const char* flag, std::string* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Sets *value to the value of the flag. + *value = value_str; + return true; +} + +// Determines whether a string has a prefix that Google Test uses for its +// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_. +// If Google Test detects that a command line flag has its prefix but is not +// recognized, it will print its help message. Flags starting with +// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test +// internal flags and do not trigger the help message. +static bool HasGoogleTestFlagPrefix(const char* str) { + return (SkipPrefix("--", &str) || + SkipPrefix("-", &str) || + SkipPrefix("/", &str)) && + !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) && + (SkipPrefix(GTEST_FLAG_PREFIX_, &str) || + SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str)); +} + +// Prints a string containing code-encoded text. The following escape +// sequences can be used in the string to control the text color: +// +// @@ prints a single '@' character. +// @R changes the color to red. +// @G changes the color to green. +// @Y changes the color to yellow. +// @D changes to the default terminal text color. +// +// TODO(wan@google.com): Write tests for this once we add stdout +// capturing to Google Test. +static void PrintColorEncoded(const char* str) { + GTestColor color = COLOR_DEFAULT; // The current color. + + // Conceptually, we split the string into segments divided by escape + // sequences. Then we print one segment at a time. At the end of + // each iteration, the str pointer advances to the beginning of the + // next segment. + for (;;) { + const char* p = strchr(str, '@'); + if (p == NULL) { + ColoredPrintf(color, "%s", str); + return; + } + + ColoredPrintf(color, "%s", std::string(str, p).c_str()); + + const char ch = p[1]; + str = p + 2; + if (ch == '@') { + ColoredPrintf(color, "@"); + } else if (ch == 'D') { + color = COLOR_DEFAULT; + } else if (ch == 'R') { + color = COLOR_RED; + } else if (ch == 'G') { + color = COLOR_GREEN; + } else if (ch == 'Y') { + color = COLOR_YELLOW; + } else { + --str; + } + } +} + +static const char kColorEncodedHelpMessage[] = +"This program contains tests written using " GTEST_NAME_ ". You can use the\n" +"following command line flags to control its behavior:\n" +"\n" +"Test Selection:\n" +" @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n" +" List the names of all tests instead of running them. The name of\n" +" TEST(Foo, Bar) is \"Foo.Bar\".\n" +" @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS" + "[@G-@YNEGATIVE_PATTERNS]@D\n" +" Run only the tests whose name matches one of the positive patterns but\n" +" none of the negative patterns. '?' matches any single character; '*'\n" +" matches any substring; ':' separates two patterns.\n" +" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n" +" Run all disabled tests too.\n" +"\n" +"Test Execution:\n" +" @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n" +" Run the tests repeatedly; use a negative count to repeat forever.\n" +" @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n" +" Randomize tests' orders on every iteration.\n" +" @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n" +" Random number seed to use for shuffling test orders (between 1 and\n" +" 99999, or 0 to use a seed based on the current time).\n" +"\n" +"Test Output:\n" +" @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n" +" Enable/disable colored output. The default is @Gauto@D.\n" +" -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n" +" Don't print the elapsed time of each test.\n" +" @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G" + GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n" +" Generate an XML report in the given directory or with the given file\n" +" name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n" +#if GTEST_CAN_STREAM_RESULTS_ +" @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n" +" Stream test results to the given server.\n" +#endif // GTEST_CAN_STREAM_RESULTS_ +"\n" +"Assertion Behavior:\n" +#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n" +" Set the default death test style.\n" +#endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n" +" Turn assertion failures into debugger break-points.\n" +" @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n" +" Turn assertion failures into C++ exceptions.\n" +" @G--" GTEST_FLAG_PREFIX_ "catch_exceptions=0@D\n" +" Do not report exceptions as test failures. Instead, allow them\n" +" to crash the program or throw a pop-up (on Windows).\n" +"\n" +"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set " + "the corresponding\n" +"environment variable of a flag (all letters in upper-case). For example, to\n" +"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_ + "color=no@D or set\n" +"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n" +"\n" +"For more information, please read the " GTEST_NAME_ " documentation at\n" +"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n" +"(not one in your own code or tests), please report it to\n" +"@G<" GTEST_DEV_EMAIL_ ">@D.\n"; + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. The type parameter CharType can be +// instantiated to either char or wchar_t. +template +void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { + for (int i = 1; i < *argc; i++) { + const std::string arg_string = StreamableToString(argv[i]); + const char* const arg = arg_string.c_str(); + + using internal::ParseBoolFlag; + using internal::ParseInt32Flag; + using internal::ParseStringFlag; + + // Do we see a Google Test flag? + if (ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag, + >EST_FLAG(also_run_disabled_tests)) || + ParseBoolFlag(arg, kBreakOnFailureFlag, + >EST_FLAG(break_on_failure)) || + ParseBoolFlag(arg, kCatchExceptionsFlag, + >EST_FLAG(catch_exceptions)) || + ParseStringFlag(arg, kColorFlag, >EST_FLAG(color)) || + ParseStringFlag(arg, kDeathTestStyleFlag, + >EST_FLAG(death_test_style)) || + ParseBoolFlag(arg, kDeathTestUseFork, + >EST_FLAG(death_test_use_fork)) || + ParseStringFlag(arg, kFilterFlag, >EST_FLAG(filter)) || + ParseStringFlag(arg, kInternalRunDeathTestFlag, + >EST_FLAG(internal_run_death_test)) || + ParseBoolFlag(arg, kListTestsFlag, >EST_FLAG(list_tests)) || + ParseStringFlag(arg, kOutputFlag, >EST_FLAG(output)) || + ParseBoolFlag(arg, kPrintTimeFlag, >EST_FLAG(print_time)) || + ParseInt32Flag(arg, kRandomSeedFlag, >EST_FLAG(random_seed)) || + ParseInt32Flag(arg, kRepeatFlag, >EST_FLAG(repeat)) || + ParseBoolFlag(arg, kShuffleFlag, >EST_FLAG(shuffle)) || + ParseInt32Flag(arg, kStackTraceDepthFlag, + >EST_FLAG(stack_trace_depth)) || + ParseStringFlag(arg, kStreamResultToFlag, + >EST_FLAG(stream_result_to)) || + ParseBoolFlag(arg, kThrowOnFailureFlag, + >EST_FLAG(throw_on_failure)) + ) { + // Yes. Shift the remainder of the argv list left by one. Note + // that argv has (*argc + 1) elements, the last one always being + // NULL. The following loop moves the trailing NULL element as + // well. + for (int j = i; j != *argc; j++) { + argv[j] = argv[j + 1]; + } + + // Decrements the argument count. + (*argc)--; + + // We also need to decrement the iterator as we just removed + // an element. + i--; + } else if (arg_string == "--help" || arg_string == "-h" || + arg_string == "-?" || arg_string == "/?" || + HasGoogleTestFlagPrefix(arg)) { + // Both help flag and unrecognized Google Test flags (excluding + // internal ones) trigger help display. + g_help_flag = true; + } + } + + if (g_help_flag) { + // We print the help here instead of in RUN_ALL_TESTS(), as the + // latter may not be called at all if the user is using Google + // Test with another testing framework. + PrintColorEncoded(kColorEncodedHelpMessage); + } +} + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +void ParseGoogleTestFlagsOnly(int* argc, char** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} +void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} + +// The internal implementation of InitGoogleTest(). +// +// The type parameter CharType can be instantiated to either char or +// wchar_t. +template +void InitGoogleTestImpl(int* argc, CharType** argv) { + g_init_gtest_count++; + + // We don't want to run the initialization code twice. + if (g_init_gtest_count != 1) return; + + if (*argc <= 0) return; + + internal::g_executable_path = internal::StreamableToString(argv[0]); + +#if GTEST_HAS_DEATH_TEST + + g_argvs.clear(); + for (int i = 0; i != *argc; i++) { + g_argvs.push_back(StreamableToString(argv[i])); + } + +#endif // GTEST_HAS_DEATH_TEST + + ParseGoogleTestFlagsOnly(argc, argv); + GetUnitTestImpl()->PostFlagParsingInit(); +} + +} // namespace internal + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +void InitGoogleTest(int* argc, char** argv) { + internal::InitGoogleTestImpl(argc, argv); +} + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +void InitGoogleTest(int* argc, wchar_t** argv) { + internal::InitGoogleTestImpl(argc, argv); +} + +} // namespace testing +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev) +// +// This file implements death tests. + + +#if GTEST_HAS_DEATH_TEST + +# if GTEST_OS_MAC +# include +# endif // GTEST_OS_MAC + +# include +# include +# include + +# if GTEST_OS_LINUX +# include +# endif // GTEST_OS_LINUX + +# include + +# if GTEST_OS_WINDOWS +# include +# else +# include +# include +# endif // GTEST_OS_WINDOWS + +# if GTEST_OS_QNX +# include +# endif // GTEST_OS_QNX + +#endif // GTEST_HAS_DEATH_TEST + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +// Constants. + +// The default death test style. +static const char kDefaultDeathTestStyle[] = "fast"; + +GTEST_DEFINE_string_( + death_test_style, + internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle), + "Indicates how to run a death test in a forked child process: " + "\"threadsafe\" (child process re-executes the test binary " + "from the beginning, running only the specific death test) or " + "\"fast\" (child process runs the death test immediately " + "after forking)."); + +GTEST_DEFINE_bool_( + death_test_use_fork, + internal::BoolFromGTestEnv("death_test_use_fork", false), + "Instructs to use fork()/_exit() instead of clone() in death tests. " + "Ignored and always uses fork() on POSIX systems where clone() is not " + "implemented. Useful when running under valgrind or similar tools if " + "those do not support clone(). Valgrind 3.3.1 will just fail if " + "it sees an unsupported combination of clone() flags. " + "It is not recommended to use this flag w/o valgrind though it will " + "work in 99% of the cases. Once valgrind is fixed, this flag will " + "most likely be removed."); + +namespace internal { +GTEST_DEFINE_string_( + internal_run_death_test, "", + "Indicates the file, line number, temporal index of " + "the single death test to run, and a file descriptor to " + "which a success code may be sent, all separated by " + "the '|' characters. This flag is specified if and only if the current " + "process is a sub-process launched for running a thread-safe " + "death test. FOR INTERNAL USE ONLY."); +} // namespace internal + +#if GTEST_HAS_DEATH_TEST + +namespace internal { + +// Valid only for fast death tests. Indicates the code is running in the +// child process of a fast style death test. +static bool g_in_fast_death_test_child = false; + +// Returns a Boolean value indicating whether the caller is currently +// executing in the context of the death test child process. Tools such as +// Valgrind heap checkers may need this to modify their behavior in death +// tests. IMPORTANT: This is an internal utility. Using it may break the +// implementation of death tests. User code MUST NOT use it. +bool InDeathTestChild() { +# if GTEST_OS_WINDOWS + + // On Windows, death tests are thread-safe regardless of the value of the + // death_test_style flag. + return !GTEST_FLAG(internal_run_death_test).empty(); + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") + return !GTEST_FLAG(internal_run_death_test).empty(); + else + return g_in_fast_death_test_child; +#endif +} + +} // namespace internal + +// ExitedWithCode constructor. +ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) { +} + +// ExitedWithCode function-call operator. +bool ExitedWithCode::operator()(int exit_status) const { +# if GTEST_OS_WINDOWS + + return exit_status == exit_code_; + +# else + + return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_; + +# endif // GTEST_OS_WINDOWS +} + +# if !GTEST_OS_WINDOWS +// KilledBySignal constructor. +KilledBySignal::KilledBySignal(int signum) : signum_(signum) { +} + +// KilledBySignal function-call operator. +bool KilledBySignal::operator()(int exit_status) const { + return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_; +} +# endif // !GTEST_OS_WINDOWS + +namespace internal { + +// Utilities needed for death tests. + +// Generates a textual description of a given exit code, in the format +// specified by wait(2). +static std::string ExitSummary(int exit_code) { + Message m; + +# if GTEST_OS_WINDOWS + + m << "Exited with exit status " << exit_code; + +# else + + if (WIFEXITED(exit_code)) { + m << "Exited with exit status " << WEXITSTATUS(exit_code); + } else if (WIFSIGNALED(exit_code)) { + m << "Terminated by signal " << WTERMSIG(exit_code); + } +# ifdef WCOREDUMP + if (WCOREDUMP(exit_code)) { + m << " (core dumped)"; + } +# endif +# endif // GTEST_OS_WINDOWS + + return m.GetString(); +} + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +bool ExitedUnsuccessfully(int exit_status) { + return !ExitedWithCode(0)(exit_status); +} + +# if !GTEST_OS_WINDOWS +// Generates a textual failure message when a death test finds more than +// one thread running, or cannot determine the number of threads, prior +// to executing the given statement. It is the responsibility of the +// caller not to pass a thread_count of 1. +static std::string DeathTestThreadWarning(size_t thread_count) { + Message msg; + msg << "Death tests use fork(), which is unsafe particularly" + << " in a threaded context. For this test, " << GTEST_NAME_ << " "; + if (thread_count == 0) + msg << "couldn't detect the number of threads."; + else + msg << "detected " << thread_count << " threads."; + return msg.GetString(); +} +# endif // !GTEST_OS_WINDOWS + +// Flag characters for reporting a death test that did not die. +static const char kDeathTestLived = 'L'; +static const char kDeathTestReturned = 'R'; +static const char kDeathTestThrew = 'T'; +static const char kDeathTestInternalError = 'I'; + +// An enumeration describing all of the possible ways that a death test can +// conclude. DIED means that the process died while executing the test +// code; LIVED means that process lived beyond the end of the test code; +// RETURNED means that the test statement attempted to execute a return +// statement, which is not allowed; THREW means that the test statement +// returned control by throwing an exception. IN_PROGRESS means the test +// has not yet concluded. +// TODO(vladl@google.com): Unify names and possibly values for +// AbortReason, DeathTestOutcome, and flag characters above. +enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }; + +// Routine for aborting the program which is safe to call from an +// exec-style death test child process, in which case the error +// message is propagated back to the parent process. Otherwise, the +// message is simply printed to stderr. In either case, the program +// then exits with status 1. +void DeathTestAbort(const std::string& message) { + // On a POSIX system, this function may be called from a threadsafe-style + // death test child process, which operates on a very small stack. Use + // the heap for any additional non-minuscule memory requirements. + const InternalRunDeathTestFlag* const flag = + GetUnitTestImpl()->internal_run_death_test_flag(); + if (flag != NULL) { + FILE* parent = posix::FDOpen(flag->write_fd(), "w"); + fputc(kDeathTestInternalError, parent); + fprintf(parent, "%s", message.c_str()); + fflush(parent); + _exit(1); + } else { + fprintf(stderr, "%s", message.c_str()); + fflush(stderr); + posix::Abort(); + } +} + +// A replacement for CHECK that calls DeathTestAbort if the assertion +// fails. +# define GTEST_DEATH_TEST_CHECK_(expression) \ + do { \ + if (!::testing::internal::IsTrue(expression)) { \ + DeathTestAbort( \ + ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ + + ::testing::internal::StreamableToString(__LINE__) + ": " \ + + #expression); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for +// evaluating any system call that fulfills two conditions: it must return +// -1 on failure, and set errno to EINTR when it is interrupted and +// should be tried again. The macro expands to a loop that repeatedly +// evaluates the expression as long as it evaluates to -1 and sets +// errno to EINTR. If the expression evaluates to -1 but errno is +// something other than EINTR, DeathTestAbort is called. +# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \ + do { \ + int gtest_retval; \ + do { \ + gtest_retval = (expression); \ + } while (gtest_retval == -1 && errno == EINTR); \ + if (gtest_retval == -1) { \ + DeathTestAbort( \ + ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ + + ::testing::internal::StreamableToString(__LINE__) + ": " \ + + #expression + " != -1"); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// Returns the message describing the last system error in errno. +std::string GetLastErrnoDescription() { + return errno == 0 ? "" : posix::StrError(errno); +} + +// This is called from a death test parent process to read a failure +// message from the death test child process and log it with the FATAL +// severity. On Windows, the message is read from a pipe handle. On other +// platforms, it is read from a file descriptor. +static void FailFromInternalError(int fd) { + Message error; + char buffer[256]; + int num_read; + + do { + while ((num_read = posix::Read(fd, buffer, 255)) > 0) { + buffer[num_read] = '\0'; + error << buffer; + } + } while (num_read == -1 && errno == EINTR); + + if (num_read == 0) { + GTEST_LOG_(FATAL) << error.GetString(); + } else { + const int last_error = errno; + GTEST_LOG_(FATAL) << "Error while reading death test internal: " + << GetLastErrnoDescription() << " [" << last_error << "]"; + } +} + +// Death test constructor. Increments the running death test count +// for the current test. +DeathTest::DeathTest() { + TestInfo* const info = GetUnitTestImpl()->current_test_info(); + if (info == NULL) { + DeathTestAbort("Cannot run a death test outside of a TEST or " + "TEST_F construct"); + } +} + +// Creates and returns a death test by dispatching to the current +// death test factory. +bool DeathTest::Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test) { + return GetUnitTestImpl()->death_test_factory()->Create( + statement, regex, file, line, test); +} + +const char* DeathTest::LastMessage() { + return last_death_test_message_.c_str(); +} + +void DeathTest::set_last_death_test_message(const std::string& message) { + last_death_test_message_ = message; +} + +std::string DeathTest::last_death_test_message_; + +// Provides cross platform implementation for some death functionality. +class DeathTestImpl : public DeathTest { + protected: + DeathTestImpl(const char* a_statement, const RE* a_regex) + : statement_(a_statement), + regex_(a_regex), + spawned_(false), + status_(-1), + outcome_(IN_PROGRESS), + read_fd_(-1), + write_fd_(-1) {} + + // read_fd_ is expected to be closed and cleared by a derived class. + ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); } + + void Abort(AbortReason reason); + virtual bool Passed(bool status_ok); + + const char* statement() const { return statement_; } + const RE* regex() const { return regex_; } + bool spawned() const { return spawned_; } + void set_spawned(bool is_spawned) { spawned_ = is_spawned; } + int status() const { return status_; } + void set_status(int a_status) { status_ = a_status; } + DeathTestOutcome outcome() const { return outcome_; } + void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; } + int read_fd() const { return read_fd_; } + void set_read_fd(int fd) { read_fd_ = fd; } + int write_fd() const { return write_fd_; } + void set_write_fd(int fd) { write_fd_ = fd; } + + // Called in the parent process only. Reads the result code of the death + // test child process via a pipe, interprets it to set the outcome_ + // member, and closes read_fd_. Outputs diagnostics and terminates in + // case of unexpected codes. + void ReadAndInterpretStatusByte(); + + private: + // The textual content of the code this object is testing. This class + // doesn't own this string and should not attempt to delete it. + const char* const statement_; + // The regular expression which test output must match. DeathTestImpl + // doesn't own this object and should not attempt to delete it. + const RE* const regex_; + // True if the death test child process has been successfully spawned. + bool spawned_; + // The exit status of the child process. + int status_; + // How the death test concluded. + DeathTestOutcome outcome_; + // Descriptor to the read end of the pipe to the child process. It is + // always -1 in the child process. The child keeps its write end of the + // pipe in write_fd_. + int read_fd_; + // Descriptor to the child's write end of the pipe to the parent process. + // It is always -1 in the parent process. The parent keeps its end of the + // pipe in read_fd_. + int write_fd_; +}; + +// Called in the parent process only. Reads the result code of the death +// test child process via a pipe, interprets it to set the outcome_ +// member, and closes read_fd_. Outputs diagnostics and terminates in +// case of unexpected codes. +void DeathTestImpl::ReadAndInterpretStatusByte() { + char flag; + int bytes_read; + + // The read() here blocks until data is available (signifying the + // failure of the death test) or until the pipe is closed (signifying + // its success), so it's okay to call this in the parent before + // the child process has exited. + do { + bytes_read = posix::Read(read_fd(), &flag, 1); + } while (bytes_read == -1 && errno == EINTR); + + if (bytes_read == 0) { + set_outcome(DIED); + } else if (bytes_read == 1) { + switch (flag) { + case kDeathTestReturned: + set_outcome(RETURNED); + break; + case kDeathTestThrew: + set_outcome(THREW); + break; + case kDeathTestLived: + set_outcome(LIVED); + break; + case kDeathTestInternalError: + FailFromInternalError(read_fd()); // Does not return. + break; + default: + GTEST_LOG_(FATAL) << "Death test child process reported " + << "unexpected status byte (" + << static_cast(flag) << ")"; + } + } else { + GTEST_LOG_(FATAL) << "Read from death test child process failed: " + << GetLastErrnoDescription(); + } + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd())); + set_read_fd(-1); +} + +// Signals that the death test code which should have exited, didn't. +// Should be called only in a death test child process. +// Writes a status byte to the child's status file descriptor, then +// calls _exit(1). +void DeathTestImpl::Abort(AbortReason reason) { + // The parent process considers the death test to be a failure if + // it finds any data in our pipe. So, here we write a single flag byte + // to the pipe, then exit. + const char status_ch = + reason == TEST_DID_NOT_DIE ? kDeathTestLived : + reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned; + + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1)); + // We are leaking the descriptor here because on some platforms (i.e., + // when built as Windows DLL), destructors of global objects will still + // run after calling _exit(). On such systems, write_fd_ will be + // indirectly closed from the destructor of UnitTestImpl, causing double + // close if it is also closed here. On debug configurations, double close + // may assert. As there are no in-process buffers to flush here, we are + // relying on the OS to close the descriptor after the process terminates + // when the destructors are not run. + _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash) +} + +// Returns an indented copy of stderr output for a death test. +// This makes distinguishing death test output lines from regular log lines +// much easier. +static ::std::string FormatDeathTestOutput(const ::std::string& output) { + ::std::string ret; + for (size_t at = 0; ; ) { + const size_t line_end = output.find('\n', at); + ret += "[ DEATH ] "; + if (line_end == ::std::string::npos) { + ret += output.substr(at); + break; + } + ret += output.substr(at, line_end + 1 - at); + at = line_end + 1; + } + return ret; +} + +// Assesses the success or failure of a death test, using both private +// members which have previously been set, and one argument: +// +// Private data members: +// outcome: An enumeration describing how the death test +// concluded: DIED, LIVED, THREW, or RETURNED. The death test +// fails in the latter three cases. +// status: The exit status of the child process. On *nix, it is in the +// in the format specified by wait(2). On Windows, this is the +// value supplied to the ExitProcess() API or a numeric code +// of the exception that terminated the program. +// regex: A regular expression object to be applied to +// the test's captured standard error output; the death test +// fails if it does not match. +// +// Argument: +// status_ok: true if exit_status is acceptable in the context of +// this particular death test, which fails if it is false +// +// Returns true iff all of the above conditions are met. Otherwise, the +// first failing condition, in the order given above, is the one that is +// reported. Also sets the last death test message string. +bool DeathTestImpl::Passed(bool status_ok) { + if (!spawned()) + return false; + + const std::string error_message = GetCapturedStderr(); + + bool success = false; + Message buffer; + + buffer << "Death test: " << statement() << "\n"; + switch (outcome()) { + case LIVED: + buffer << " Result: failed to die.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case THREW: + buffer << " Result: threw an exception.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case RETURNED: + buffer << " Result: illegal return in test statement.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case DIED: + if (status_ok) { + const bool matched = RE::PartialMatch(error_message.c_str(), *regex()); + if (matched) { + success = true; + } else { + buffer << " Result: died but not with expected error.\n" + << " Expected: " << regex()->pattern() << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + } else { + buffer << " Result: died but not with expected exit code:\n" + << " " << ExitSummary(status()) << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + break; + case IN_PROGRESS: + default: + GTEST_LOG_(FATAL) + << "DeathTest::Passed somehow called before conclusion of test"; + } + + DeathTest::set_last_death_test_message(buffer.GetString()); + return success; +} + +# if GTEST_OS_WINDOWS +// WindowsDeathTest implements death tests on Windows. Due to the +// specifics of starting new processes on Windows, death tests there are +// always threadsafe, and Google Test considers the +// --gtest_death_test_style=fast setting to be equivalent to +// --gtest_death_test_style=threadsafe there. +// +// A few implementation notes: Like the Linux version, the Windows +// implementation uses pipes for child-to-parent communication. But due to +// the specifics of pipes on Windows, some extra steps are required: +// +// 1. The parent creates a communication pipe and stores handles to both +// ends of it. +// 2. The parent starts the child and provides it with the information +// necessary to acquire the handle to the write end of the pipe. +// 3. The child acquires the write end of the pipe and signals the parent +// using a Windows event. +// 4. Now the parent can release the write end of the pipe on its side. If +// this is done before step 3, the object's reference count goes down to +// 0 and it is destroyed, preventing the child from acquiring it. The +// parent now has to release it, or read operations on the read end of +// the pipe will not return when the child terminates. +// 5. The parent reads child's output through the pipe (outcome code and +// any possible error messages) from the pipe, and its stderr and then +// determines whether to fail the test. +// +// Note: to distinguish Win32 API calls from the local method and function +// calls, the former are explicitly resolved in the global namespace. +// +class WindowsDeathTest : public DeathTestImpl { + public: + WindowsDeathTest(const char* a_statement, + const RE* a_regex, + const char* file, + int line) + : DeathTestImpl(a_statement, a_regex), file_(file), line_(line) {} + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + virtual TestRole AssumeRole(); + + private: + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; + // Handle to the write end of the pipe to the child process. + AutoHandle write_handle_; + // Child process handle. + AutoHandle child_handle_; + // Event the child process uses to signal the parent that it has + // acquired the handle to the write end of the pipe. After seeing this + // event the parent can release its own handles to make sure its + // ReadFile() calls return when the child terminates. + AutoHandle event_handle_; +}; + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int WindowsDeathTest::Wait() { + if (!spawned()) + return 0; + + // Wait until the child either signals that it has acquired the write end + // of the pipe or it dies. + const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() }; + switch (::WaitForMultipleObjects(2, + wait_handles, + FALSE, // Waits for any of the handles. + INFINITE)) { + case WAIT_OBJECT_0: + case WAIT_OBJECT_0 + 1: + break; + default: + GTEST_DEATH_TEST_CHECK_(false); // Should not get here. + } + + // The child has acquired the write end of the pipe or exited. + // We release the handle on our side and continue. + write_handle_.Reset(); + event_handle_.Reset(); + + ReadAndInterpretStatusByte(); + + // Waits for the child process to exit if it haven't already. This + // returns immediately if the child has already exited, regardless of + // whether previous calls to WaitForMultipleObjects synchronized on this + // handle or not. + GTEST_DEATH_TEST_CHECK_( + WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(), + INFINITE)); + DWORD status_code; + GTEST_DEATH_TEST_CHECK_( + ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE); + child_handle_.Reset(); + set_status(static_cast(status_code)); + return status(); +} + +// The AssumeRole process for a Windows death test. It creates a child +// process with the same executable as the current process to run the +// death test. The child process is given the --gtest_filter and +// --gtest_internal_run_death_test flags such that it knows to run the +// current death test only. +DeathTest::TestRole WindowsDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != NULL) { + // ParseInternalRunDeathTestFlag() has performed all the necessary + // processing. + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + // WindowsDeathTest uses an anonymous pipe to communicate results of + // a death test. + SECURITY_ATTRIBUTES handles_are_inheritable = { + sizeof(SECURITY_ATTRIBUTES), NULL, TRUE }; + HANDLE read_handle, write_handle; + GTEST_DEATH_TEST_CHECK_( + ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable, + 0) // Default buffer size. + != FALSE); + set_read_fd(::_open_osfhandle(reinterpret_cast(read_handle), + O_RDONLY)); + write_handle_.Reset(write_handle); + event_handle_.Reset(::CreateEvent( + &handles_are_inheritable, + TRUE, // The event will automatically reset to non-signaled state. + FALSE, // The initial state is non-signalled. + NULL)); // The even is unnamed. + GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL); + const std::string filter_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" + + info->test_case_name() + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + + "=" + file_ + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index) + "|" + + StreamableToString(static_cast(::GetCurrentProcessId())) + + // size_t has the same width as pointers on both 32-bit and 64-bit + // Windows platforms. + // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx. + "|" + StreamableToString(reinterpret_cast(write_handle)) + + "|" + StreamableToString(reinterpret_cast(event_handle_.Get())); + + char executable_path[_MAX_PATH + 1]; // NOLINT + GTEST_DEATH_TEST_CHECK_( + _MAX_PATH + 1 != ::GetModuleFileNameA(NULL, + executable_path, + _MAX_PATH)); + + std::string command_line = + std::string(::GetCommandLineA()) + " " + filter_flag + " \"" + + internal_flag + "\""; + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // Flush the log buffers since the log streams are shared with the child. + FlushInfoLog(); + + // The child process will share the standard handles with the parent. + STARTUPINFOA startup_info; + memset(&startup_info, 0, sizeof(STARTUPINFO)); + startup_info.dwFlags = STARTF_USESTDHANDLES; + startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE); + startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE); + startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE); + + PROCESS_INFORMATION process_info; + GTEST_DEATH_TEST_CHECK_(::CreateProcessA( + executable_path, + const_cast(command_line.c_str()), + NULL, // Retuned process handle is not inheritable. + NULL, // Retuned thread handle is not inheritable. + TRUE, // Child inherits all inheritable handles (for write_handle_). + 0x0, // Default creation flags. + NULL, // Inherit the parent's environment. + UnitTest::GetInstance()->original_working_dir(), + &startup_info, + &process_info) != FALSE); + child_handle_.Reset(process_info.hProcess); + ::CloseHandle(process_info.hThread); + set_spawned(true); + return OVERSEE_TEST; +} +# else // We are not on Windows. + +// ForkingDeathTest provides implementations for most of the abstract +// methods of the DeathTest interface. Only the AssumeRole method is +// left undefined. +class ForkingDeathTest : public DeathTestImpl { + public: + ForkingDeathTest(const char* statement, const RE* regex); + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + + protected: + void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; } + + private: + // PID of child process during death test; 0 in the child process itself. + pid_t child_pid_; +}; + +// Constructs a ForkingDeathTest. +ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex) + : DeathTestImpl(a_statement, a_regex), + child_pid_(-1) {} + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int ForkingDeathTest::Wait() { + if (!spawned()) + return 0; + + ReadAndInterpretStatusByte(); + + int status_value; + GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0)); + set_status(status_value); + return status_value; +} + +// A concrete death test class that forks, then immediately runs the test +// in the child process. +class NoExecDeathTest : public ForkingDeathTest { + public: + NoExecDeathTest(const char* a_statement, const RE* a_regex) : + ForkingDeathTest(a_statement, a_regex) { } + virtual TestRole AssumeRole(); +}; + +// The AssumeRole process for a fork-and-run death test. It implements a +// straightforward fork, with a simple pipe to transmit the status byte. +DeathTest::TestRole NoExecDeathTest::AssumeRole() { + const size_t thread_count = GetThreadCount(); + if (thread_count != 1) { + GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count); + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + + DeathTest::set_last_death_test_message(""); + CaptureStderr(); + // When we fork the process below, the log file buffers are copied, but the + // file descriptors are shared. We flush all log files here so that closing + // the file descriptors in the child process doesn't throw off the + // synchronization between descriptors and buffers in the parent process. + // This is as close to the fork as possible to avoid a race condition in case + // there are multiple threads running before the death test, and another + // thread writes to the log file. + FlushInfoLog(); + + const pid_t child_pid = fork(); + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + set_child_pid(child_pid); + if (child_pid == 0) { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0])); + set_write_fd(pipe_fd[1]); + // Redirects all logging to stderr in the child process to prevent + // concurrent writes to the log files. We capture stderr in the parent + // process and append the child process' output to a log. + LogToStderr(); + // Event forwarding to the listeners of event listener API mush be shut + // down in death test subprocesses. + GetUnitTestImpl()->listeners()->SuppressEventForwarding(); + g_in_fast_death_test_child = true; + return EXECUTE_TEST; + } else { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; + } +} + +// A concrete death test class that forks and re-executes the main +// program from the beginning, with command-line flags set that cause +// only this specific death test to be run. +class ExecDeathTest : public ForkingDeathTest { + public: + ExecDeathTest(const char* a_statement, const RE* a_regex, + const char* file, int line) : + ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { } + virtual TestRole AssumeRole(); + private: + static ::std::vector + GetArgvsForDeathTestChildProcess() { + ::std::vector args = GetInjectableArgvs(); + return args; + } + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; +}; + +// Utility class for accumulating command-line arguments. +class Arguments { + public: + Arguments() { + args_.push_back(NULL); + } + + ~Arguments() { + for (std::vector::iterator i = args_.begin(); i != args_.end(); + ++i) { + free(*i); + } + } + void AddArgument(const char* argument) { + args_.insert(args_.end() - 1, posix::StrDup(argument)); + } + + template + void AddArguments(const ::std::vector& arguments) { + for (typename ::std::vector::const_iterator i = arguments.begin(); + i != arguments.end(); + ++i) { + args_.insert(args_.end() - 1, posix::StrDup(i->c_str())); + } + } + char* const* Argv() { + return &args_[0]; + } + + private: + std::vector args_; +}; + +// A struct that encompasses the arguments to the child process of a +// threadsafe-style death test process. +struct ExecDeathTestArgs { + char* const* argv; // Command-line arguments for the child's call to exec + int close_fd; // File descriptor to close; the read end of a pipe +}; + +# if GTEST_OS_MAC +inline char** GetEnviron() { + // When Google Test is built as a framework on MacOS X, the environ variable + // is unavailable. Apple's documentation (man environ) recommends using + // _NSGetEnviron() instead. + return *_NSGetEnviron(); +} +# else +// Some POSIX platforms expect you to declare environ. extern "C" makes +// it reside in the global namespace. +extern "C" char** environ; +inline char** GetEnviron() { return environ; } +# endif // GTEST_OS_MAC + +# if !GTEST_OS_QNX +// The main function for a threadsafe-style death test child process. +// This function is called in a clone()-ed process and thus must avoid +// any potentially unsafe operations like malloc or libc functions. +static int ExecDeathTestChildMain(void* child_arg) { + ExecDeathTestArgs* const args = static_cast(child_arg); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd)); + + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; + } + + // We can safely call execve() as it's a direct system call. We + // cannot use execvp() as it's a libc function and thus potentially + // unsafe. Since execve() doesn't search the PATH, the user must + // invoke the test program via a valid path that contains at least + // one path separator. + execve(args->argv[0], args->argv, GetEnviron()); + DeathTestAbort(std::string("execve(") + args->argv[0] + ", ...) in " + + original_dir + " failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; +} +# endif // !GTEST_OS_QNX + +// Two utility routines that together determine the direction the stack +// grows. +// This could be accomplished more elegantly by a single recursive +// function, but we want to guard against the unlikely possibility of +// a smart compiler optimizing the recursion away. +// +// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining +// StackLowerThanAddress into StackGrowsDown, which then doesn't give +// correct answer. +void StackLowerThanAddress(const void* ptr, bool* result) GTEST_NO_INLINE_; +void StackLowerThanAddress(const void* ptr, bool* result) { + int dummy; + *result = (&dummy < ptr); +} + +bool StackGrowsDown() { + int dummy; + bool result; + StackLowerThanAddress(&dummy, &result); + return result; +} + +// Spawns a child process with the same executable as the current process in +// a thread-safe manner and instructs it to run the death test. The +// implementation uses fork(2) + exec. On systems where clone(2) is +// available, it is used instead, being slightly more thread-safe. On QNX, +// fork supports only single-threaded environments, so this function uses +// spawn(2) there instead. The function dies with an error message if +// anything goes wrong. +static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { + ExecDeathTestArgs args = { argv, close_fd }; + pid_t child_pid = -1; + +# if GTEST_OS_QNX + // Obtains the current directory and sets it to be closed in the child + // process. + const int cwd_fd = open(".", O_RDONLY); + GTEST_DEATH_TEST_CHECK_(cwd_fd != -1); + GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(cwd_fd, F_SETFD, FD_CLOEXEC)); + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; + } + + int fd_flags; + // Set close_fd to be closed after spawn. + GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD)); + GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD, + fd_flags | FD_CLOEXEC)); + struct inheritance inherit = {0}; + // spawn is a system call. + child_pid = spawn(args.argv[0], 0, NULL, &inherit, args.argv, GetEnviron()); + // Restores the current working directory. + GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd)); + +# else // GTEST_OS_QNX +# if GTEST_OS_LINUX + // When a SIGPROF signal is received while fork() or clone() are executing, + // the process may hang. To avoid this, we ignore SIGPROF here and re-enable + // it after the call to fork()/clone() is complete. + struct sigaction saved_sigprof_action; + struct sigaction ignore_sigprof_action; + memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action)); + sigemptyset(&ignore_sigprof_action.sa_mask); + ignore_sigprof_action.sa_handler = SIG_IGN; + GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction( + SIGPROF, &ignore_sigprof_action, &saved_sigprof_action)); +# endif // GTEST_OS_LINUX + +# if GTEST_HAS_CLONE + const bool use_fork = GTEST_FLAG(death_test_use_fork); + + if (!use_fork) { + static const bool stack_grows_down = StackGrowsDown(); + const size_t stack_size = getpagesize(); + // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead. + void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, -1, 0); + GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED); + + // Maximum stack alignment in bytes: For a downward-growing stack, this + // amount is subtracted from size of the stack space to get an address + // that is within the stack space and is aligned on all systems we care + // about. As far as I know there is no ABI with stack alignment greater + // than 64. We assume stack and stack_size already have alignment of + // kMaxStackAlignment. + const size_t kMaxStackAlignment = 64; + void* const stack_top = + static_cast(stack) + + (stack_grows_down ? stack_size - kMaxStackAlignment : 0); + GTEST_DEATH_TEST_CHECK_(stack_size > kMaxStackAlignment && + reinterpret_cast(stack_top) % kMaxStackAlignment == 0); + + child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args); + + GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1); + } +# else + const bool use_fork = true; +# endif // GTEST_HAS_CLONE + + if (use_fork && (child_pid = fork()) == 0) { + ExecDeathTestChildMain(&args); + _exit(0); + } +# endif // GTEST_OS_QNX +# if GTEST_OS_LINUX + GTEST_DEATH_TEST_CHECK_SYSCALL_( + sigaction(SIGPROF, &saved_sigprof_action, NULL)); +# endif // GTEST_OS_LINUX + + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + return child_pid; +} + +// The AssumeRole process for a fork-and-exec death test. It re-executes the +// main program from the beginning, setting the --gtest_filter +// and --gtest_internal_run_death_test flags to cause only the current +// death test to be re-run. +DeathTest::TestRole ExecDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != NULL) { + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + // Clear the close-on-exec flag on the write end of the pipe, lest + // it be closed when the child process does an exec: + GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1); + + const std::string filter_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" + + info->test_case_name() + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "=" + + file_ + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index) + "|" + + StreamableToString(pipe_fd[1]); + Arguments args; + args.AddArguments(GetArgvsForDeathTestChildProcess()); + args.AddArgument(filter_flag.c_str()); + args.AddArgument(internal_flag.c_str()); + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // See the comment in NoExecDeathTest::AssumeRole for why the next line + // is necessary. + FlushInfoLog(); + + const pid_t child_pid = ExecDeathTestSpawnChild(args.Argv(), pipe_fd[0]); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_child_pid(child_pid); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; +} + +# endif // !GTEST_OS_WINDOWS + +// Creates a concrete DeathTest-derived class that depends on the +// --gtest_death_test_style flag, and sets the pointer pointed to +// by the "test" argument to its address. If the test should be +// skipped, sets that pointer to NULL. Returns true, unless the +// flag is set to an invalid value. +bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex, + const char* file, int line, + DeathTest** test) { + UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const int death_test_index = impl->current_test_info() + ->increment_death_test_count(); + + if (flag != NULL) { + if (death_test_index > flag->index()) { + DeathTest::set_last_death_test_message( + "Death test count (" + StreamableToString(death_test_index) + + ") somehow exceeded expected maximum (" + + StreamableToString(flag->index()) + ")"); + return false; + } + + if (!(flag->file() == file && flag->line() == line && + flag->index() == death_test_index)) { + *test = NULL; + return true; + } + } + +# if GTEST_OS_WINDOWS + + if (GTEST_FLAG(death_test_style) == "threadsafe" || + GTEST_FLAG(death_test_style) == "fast") { + *test = new WindowsDeathTest(statement, regex, file, line); + } + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") { + *test = new ExecDeathTest(statement, regex, file, line); + } else if (GTEST_FLAG(death_test_style) == "fast") { + *test = new NoExecDeathTest(statement, regex); + } + +# endif // GTEST_OS_WINDOWS + + else { // NOLINT - this is more readable than unbalanced brackets inside #if. + DeathTest::set_last_death_test_message( + "Unknown death test style \"" + GTEST_FLAG(death_test_style) + + "\" encountered"); + return false; + } + + return true; +} + +// Splits a given string on a given delimiter, populating a given +// vector with the fields. GTEST_HAS_DEATH_TEST implies that we have +// ::std::string, so we can use it here. +static void SplitString(const ::std::string& str, char delimiter, + ::std::vector< ::std::string>* dest) { + ::std::vector< ::std::string> parsed; + ::std::string::size_type pos = 0; + while (::testing::internal::AlwaysTrue()) { + const ::std::string::size_type colon = str.find(delimiter, pos); + if (colon == ::std::string::npos) { + parsed.push_back(str.substr(pos)); + break; + } else { + parsed.push_back(str.substr(pos, colon - pos)); + pos = colon + 1; + } + } + dest->swap(parsed); +} + +# if GTEST_OS_WINDOWS +// Recreates the pipe and event handles from the provided parameters, +// signals the event, and returns a file descriptor wrapped around the pipe +// handle. This function is called in the child process only. +int GetStatusFileDescriptor(unsigned int parent_process_id, + size_t write_handle_as_size_t, + size_t event_handle_as_size_t) { + AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE, + FALSE, // Non-inheritable. + parent_process_id)); + if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) { + DeathTestAbort("Unable to open parent process " + + StreamableToString(parent_process_id)); + } + + // TODO(vladl@google.com): Replace the following check with a + // compile-time assertion when available. + GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t)); + + const HANDLE write_handle = + reinterpret_cast(write_handle_as_size_t); + HANDLE dup_write_handle; + + // The newly initialized handle is accessible only in in the parent + // process. To obtain one accessible within the child, we need to use + // DuplicateHandle. + if (!::DuplicateHandle(parent_process_handle.Get(), write_handle, + ::GetCurrentProcess(), &dup_write_handle, + 0x0, // Requested privileges ignored since + // DUPLICATE_SAME_ACCESS is used. + FALSE, // Request non-inheritable handler. + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort("Unable to duplicate the pipe handle " + + StreamableToString(write_handle_as_size_t) + + " from the parent process " + + StreamableToString(parent_process_id)); + } + + const HANDLE event_handle = reinterpret_cast(event_handle_as_size_t); + HANDLE dup_event_handle; + + if (!::DuplicateHandle(parent_process_handle.Get(), event_handle, + ::GetCurrentProcess(), &dup_event_handle, + 0x0, + FALSE, + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort("Unable to duplicate the event handle " + + StreamableToString(event_handle_as_size_t) + + " from the parent process " + + StreamableToString(parent_process_id)); + } + + const int write_fd = + ::_open_osfhandle(reinterpret_cast(dup_write_handle), O_APPEND); + if (write_fd == -1) { + DeathTestAbort("Unable to convert pipe handle " + + StreamableToString(write_handle_as_size_t) + + " to a file descriptor"); + } + + // Signals the parent that the write end of the pipe has been acquired + // so the parent can release its own write end. + ::SetEvent(dup_event_handle); + + return write_fd; +} +# endif // GTEST_OS_WINDOWS + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { + if (GTEST_FLAG(internal_run_death_test) == "") return NULL; + + // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we + // can use it here. + int line = -1; + int index = -1; + ::std::vector< ::std::string> fields; + SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields); + int write_fd = -1; + +# if GTEST_OS_WINDOWS + + unsigned int parent_process_id = 0; + size_t write_handle_as_size_t = 0; + size_t event_handle_as_size_t = 0; + + if (fields.size() != 6 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &parent_process_id) + || !ParseNaturalNumber(fields[4], &write_handle_as_size_t) + || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) { + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); + } + write_fd = GetStatusFileDescriptor(parent_process_id, + write_handle_as_size_t, + event_handle_as_size_t); +# else + + if (fields.size() != 4 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &write_fd)) { + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); + } + +# endif // GTEST_OS_WINDOWS + + return new InternalRunDeathTestFlag(fields[0], line, index, write_fd); +} + +} // namespace internal + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: keith.ray@gmail.com (Keith Ray) + + +#include + +#if GTEST_OS_WINDOWS_MOBILE +# include +#elif GTEST_OS_WINDOWS +# include +# include +#elif GTEST_OS_SYMBIAN +// Symbian OpenC has PATH_MAX in sys/syslimits.h +# include +#else +# include +# include // Some Linux distributions define PATH_MAX here. +#endif // GTEST_OS_WINDOWS_MOBILE + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_MAX_ _MAX_PATH +#elif defined(PATH_MAX) +# define GTEST_PATH_MAX_ PATH_MAX +#elif defined(_XOPEN_PATH_MAX) +# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX +#else +# define GTEST_PATH_MAX_ _POSIX_PATH_MAX +#endif // GTEST_OS_WINDOWS + + +namespace testing { +namespace internal { + +#if GTEST_OS_WINDOWS +// On Windows, '\\' is the standard path separator, but many tools and the +// Windows API also accept '/' as an alternate path separator. Unless otherwise +// noted, a file path can contain either kind of path separators, or a mixture +// of them. +const char kPathSeparator = '\\'; +const char kAlternatePathSeparator = '/'; +const char kPathSeparatorString[] = "\\"; +const char kAlternatePathSeparatorString[] = "/"; +# if GTEST_OS_WINDOWS_MOBILE +// Windows CE doesn't have a current directory. You should not use +// the current directory in tests on Windows CE, but this at least +// provides a reasonable fallback. +const char kCurrentDirectoryString[] = "\\"; +// Windows CE doesn't define INVALID_FILE_ATTRIBUTES +const DWORD kInvalidFileAttributes = 0xffffffff; +# else +const char kCurrentDirectoryString[] = ".\\"; +# endif // GTEST_OS_WINDOWS_MOBILE +#else +const char kPathSeparator = '/'; +const char kPathSeparatorString[] = "/"; +const char kCurrentDirectoryString[] = "./"; +#endif // GTEST_OS_WINDOWS + +// Returns whether the given character is a valid path separator. +static bool IsPathSeparator(char c) { +#if GTEST_HAS_ALT_PATH_SEP_ + return (c == kPathSeparator) || (c == kAlternatePathSeparator); +#else + return c == kPathSeparator; +#endif +} + +// Returns the current working directory, or "" if unsuccessful. +FilePath FilePath::GetCurrentDir() { +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE doesn't have a current directory, so we just return + // something reasonable. + return FilePath(kCurrentDirectoryString); +#elif GTEST_OS_WINDOWS + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd); +#else + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + return FilePath(getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd); +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns a copy of the FilePath with the case-insensitive extension removed. +// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns +// FilePath("dir/file"). If a case-insensitive extension is not +// found, returns a copy of the original FilePath. +FilePath FilePath::RemoveExtension(const char* extension) const { + const std::string dot_extension = std::string(".") + extension; + if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) { + return FilePath(pathname_.substr( + 0, pathname_.length() - dot_extension.length())); + } + return *this; +} + +// Returns a pointer to the last occurence of a valid path separator in +// the FilePath. On Windows, for example, both '/' and '\' are valid path +// separators. Returns NULL if no path separator was found. +const char* FilePath::FindLastPathSeparator() const { + const char* const last_sep = strrchr(c_str(), kPathSeparator); +#if GTEST_HAS_ALT_PATH_SEP_ + const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator); + // Comparing two pointers of which only one is NULL is undefined. + if (last_alt_sep != NULL && + (last_sep == NULL || last_alt_sep > last_sep)) { + return last_alt_sep; + } +#endif + return last_sep; +} + +// Returns a copy of the FilePath with the directory part removed. +// Example: FilePath("path/to/file").RemoveDirectoryName() returns +// FilePath("file"). If there is no directory part ("just_a_file"), it returns +// the FilePath unmodified. If there is no file part ("just_a_dir/") it +// returns an empty FilePath (""). +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveDirectoryName() const { + const char* const last_sep = FindLastPathSeparator(); + return last_sep ? FilePath(last_sep + 1) : *this; +} + +// RemoveFileName returns the directory path with the filename removed. +// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". +// If the FilePath is "a_file" or "/a_file", RemoveFileName returns +// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does +// not have a file, like "just/a/dir/", it returns the FilePath unmodified. +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveFileName() const { + const char* const last_sep = FindLastPathSeparator(); + std::string dir; + if (last_sep) { + dir = std::string(c_str(), last_sep + 1 - c_str()); + } else { + dir = kCurrentDirectoryString; + } + return FilePath(dir); +} + +// Helper functions for naming files in a directory for xml output. + +// Given directory = "dir", base_name = "test", number = 0, +// extension = "xml", returns "dir/test.xml". If number is greater +// than zero (e.g., 12), returns "dir/test_12.xml". +// On Windows platform, uses \ as the separator rather than /. +FilePath FilePath::MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension) { + std::string file; + if (number == 0) { + file = base_name.string() + "." + extension; + } else { + file = base_name.string() + "_" + StreamableToString(number) + + "." + extension; + } + return ConcatPaths(directory, FilePath(file)); +} + +// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml". +// On Windows, uses \ as the separator rather than /. +FilePath FilePath::ConcatPaths(const FilePath& directory, + const FilePath& relative_path) { + if (directory.IsEmpty()) + return relative_path; + const FilePath dir(directory.RemoveTrailingPathSeparator()); + return FilePath(dir.string() + kPathSeparator + relative_path.string()); +} + +// Returns true if pathname describes something findable in the file-system, +// either a file, directory, or whatever. +bool FilePath::FileOrDirectoryExists() const { +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + return attributes != kInvalidFileAttributes; +#else + posix::StatStruct file_stat; + return posix::Stat(pathname_.c_str(), &file_stat) == 0; +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns true if pathname describes a directory in the file-system +// that exists. +bool FilePath::DirectoryExists() const { + bool result = false; +#if GTEST_OS_WINDOWS + // Don't strip off trailing separator if path is a root directory on + // Windows (like "C:\\"). + const FilePath& path(IsRootDirectory() ? *this : + RemoveTrailingPathSeparator()); +#else + const FilePath& path(*this); +#endif + +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(path.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + if ((attributes != kInvalidFileAttributes) && + (attributes & FILE_ATTRIBUTE_DIRECTORY)) { + result = true; + } +#else + posix::StatStruct file_stat; + result = posix::Stat(path.c_str(), &file_stat) == 0 && + posix::IsDir(file_stat); +#endif // GTEST_OS_WINDOWS_MOBILE + + return result; +} + +// Returns true if pathname describes a root directory. (Windows has one +// root directory per disk drive.) +bool FilePath::IsRootDirectory() const { +#if GTEST_OS_WINDOWS + // TODO(wan@google.com): on Windows a network share like + // \\server\share can be a root directory, although it cannot be the + // current directory. Handle this properly. + return pathname_.length() == 3 && IsAbsolutePath(); +#else + return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]); +#endif +} + +// Returns true if pathname describes an absolute path. +bool FilePath::IsAbsolutePath() const { + const char* const name = pathname_.c_str(); +#if GTEST_OS_WINDOWS + return pathname_.length() >= 3 && + ((name[0] >= 'a' && name[0] <= 'z') || + (name[0] >= 'A' && name[0] <= 'Z')) && + name[1] == ':' && + IsPathSeparator(name[2]); +#else + return IsPathSeparator(name[0]); +#endif +} + +// Returns a pathname for a file that does not currently exist. The pathname +// will be directory/base_name.extension or +// directory/base_name_.extension if directory/base_name.extension +// already exists. The number will be incremented until a pathname is found +// that does not already exist. +// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. +// There could be a race condition if two or more processes are calling this +// function at the same time -- they could both pick the same filename. +FilePath FilePath::GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension) { + FilePath full_pathname; + int number = 0; + do { + full_pathname.Set(MakeFileName(directory, base_name, number++, extension)); + } while (full_pathname.FileOrDirectoryExists()); + return full_pathname; +} + +// Returns true if FilePath ends with a path separator, which indicates that +// it is intended to represent a directory. Returns false otherwise. +// This does NOT check that a directory (or file) actually exists. +bool FilePath::IsDirectory() const { + return !pathname_.empty() && + IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]); +} + +// Create directories so that path exists. Returns true if successful or if +// the directories already exist; returns false if unable to create directories +// for any reason. +bool FilePath::CreateDirectoriesRecursively() const { + if (!this->IsDirectory()) { + return false; + } + + if (pathname_.length() == 0 || this->DirectoryExists()) { + return true; + } + + const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName()); + return parent.CreateDirectoriesRecursively() && this->CreateFolder(); +} + +// Create the directory so that path exists. Returns true if successful or +// if the directory already exists; returns false if unable to create the +// directory for any reason, including if the parent directory does not +// exist. Not named "CreateDirectory" because that's a macro on Windows. +bool FilePath::CreateFolder() const { +#if GTEST_OS_WINDOWS_MOBILE + FilePath removed_sep(this->RemoveTrailingPathSeparator()); + LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str()); + int result = CreateDirectory(unicode, NULL) ? 0 : -1; + delete [] unicode; +#elif GTEST_OS_WINDOWS + int result = _mkdir(pathname_.c_str()); +#else + int result = mkdir(pathname_.c_str(), 0777); +#endif // GTEST_OS_WINDOWS_MOBILE + + if (result == -1) { + return this->DirectoryExists(); // An error is OK if the directory exists. + } + return true; // No error. +} + +// If input name has a trailing separator character, remove it and return the +// name, otherwise return the name string unmodified. +// On Windows platform, uses \ as the separator, other platforms use /. +FilePath FilePath::RemoveTrailingPathSeparator() const { + return IsDirectory() + ? FilePath(pathname_.substr(0, pathname_.length() - 1)) + : *this; +} + +// Removes any redundant separators that might be in the pathname. +// For example, "bar///foo" becomes "bar/foo". Does not eliminate other +// redundancies that might be in a pathname involving "." or "..". +// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share). +void FilePath::Normalize() { + if (pathname_.c_str() == NULL) { + pathname_ = ""; + return; + } + const char* src = pathname_.c_str(); + char* const dest = new char[pathname_.length() + 1]; + char* dest_ptr = dest; + memset(dest_ptr, 0, pathname_.length() + 1); + + while (*src != '\0') { + *dest_ptr = *src; + if (!IsPathSeparator(*src)) { + src++; + } else { +#if GTEST_HAS_ALT_PATH_SEP_ + if (*dest_ptr == kAlternatePathSeparator) { + *dest_ptr = kPathSeparator; + } +#endif + while (IsPathSeparator(*src)) + src++; + } + dest_ptr++; + } + *dest_ptr = '\0'; + pathname_ = dest; + delete[] dest; +} + +} // namespace internal +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + + +#include +#include +#include +#include + +#if GTEST_OS_WINDOWS_MOBILE +# include // For TerminateProcess() +#elif GTEST_OS_WINDOWS +# include +# include +#else +# include +#endif // GTEST_OS_WINDOWS_MOBILE + +#if GTEST_OS_MAC +# include +# include +# include +#endif // GTEST_OS_MAC + +#if GTEST_OS_QNX +# include +# include +#endif // GTEST_OS_QNX + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { +namespace internal { + +#if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC and C++Builder do not provide a definition of STDERR_FILENO. +const int kStdOutFileno = 1; +const int kStdErrFileno = 2; +#else +const int kStdOutFileno = STDOUT_FILENO; +const int kStdErrFileno = STDERR_FILENO; +#endif // _MSC_VER + +#if GTEST_OS_MAC + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + const task_t task = mach_task_self(); + mach_msg_type_number_t thread_count; + thread_act_array_t thread_list; + const kern_return_t status = task_threads(task, &thread_list, &thread_count); + if (status == KERN_SUCCESS) { + // task_threads allocates resources in thread_list and we need to free them + // to avoid leaks. + vm_deallocate(task, + reinterpret_cast(thread_list), + sizeof(thread_t) * thread_count); + return static_cast(thread_count); + } else { + return 0; + } +} + +#elif GTEST_OS_QNX + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + const int fd = open("/proc/self/as", O_RDONLY); + if (fd < 0) { + return 0; + } + procfs_info process_info; + const int status = + devctl(fd, DCMD_PROC_INFO, &process_info, sizeof(process_info), NULL); + close(fd); + if (status == EOK) { + return static_cast(process_info.num_threads); + } else { + return 0; + } +} + +#else + +size_t GetThreadCount() { + // There's no portable way to detect the number of threads, so we just + // return 0 to indicate that we cannot detect it. + return 0; +} + +#endif // GTEST_OS_MAC + +#if GTEST_USES_POSIX_RE + +// Implements RE. Currently only needed for death tests. + +RE::~RE() { + if (is_valid_) { + // regfree'ing an invalid regex might crash because the content + // of the regex is undefined. Since the regex's are essentially + // the same, one cannot be valid (or invalid) without the other + // being so too. + regfree(&partial_regex_); + regfree(&full_regex_); + } + free(const_cast(pattern_)); +} + +// Returns true iff regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.full_regex_, str, 1, &match, 0) == 0; +} + +// Returns true iff regular expression re matches a substring of str +// (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.partial_regex_, str, 1, &match, 0) == 0; +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = posix::StrDup(regex); + + // Reserves enough bytes to hold the regular expression used for a + // full match. + const size_t full_regex_len = strlen(regex) + 10; + char* const full_pattern = new char[full_regex_len]; + + snprintf(full_pattern, full_regex_len, "^(%s)$", regex); + is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0; + // We want to call regcomp(&partial_regex_, ...) even if the + // previous expression returns false. Otherwise partial_regex_ may + // not be properly initialized can may cause trouble when it's + // freed. + // + // Some implementation of POSIX regex (e.g. on at least some + // versions of Cygwin) doesn't accept the empty string as a valid + // regex. We change it to an equivalent form "()" to be safe. + if (is_valid_) { + const char* const partial_regex = (*regex == '\0') ? "()" : regex; + is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0; + } + EXPECT_TRUE(is_valid_) + << "Regular expression \"" << regex + << "\" is not a valid POSIX Extended regular expression."; + + delete[] full_pattern; +} + +#elif GTEST_USES_SIMPLE_RE + +// Returns true iff ch appears anywhere in str (excluding the +// terminating '\0' character). +bool IsInSet(char ch, const char* str) { + return ch != '\0' && strchr(str, ch) != NULL; +} + +// Returns true iff ch belongs to the given classification. Unlike +// similar functions in , these aren't affected by the +// current locale. +bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; } +bool IsAsciiPunct(char ch) { + return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~"); +} +bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); } +bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); } +bool IsAsciiWordChar(char ch) { + return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || + ('0' <= ch && ch <= '9') || ch == '_'; +} + +// Returns true iff "\\c" is a supported escape sequence. +bool IsValidEscape(char c) { + return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW")); +} + +// Returns true iff the given atom (specified by escaped and pattern) +// matches ch. The result is undefined if the atom is invalid. +bool AtomMatchesChar(bool escaped, char pattern_char, char ch) { + if (escaped) { // "\\p" where p is pattern_char. + switch (pattern_char) { + case 'd': return IsAsciiDigit(ch); + case 'D': return !IsAsciiDigit(ch); + case 'f': return ch == '\f'; + case 'n': return ch == '\n'; + case 'r': return ch == '\r'; + case 's': return IsAsciiWhiteSpace(ch); + case 'S': return !IsAsciiWhiteSpace(ch); + case 't': return ch == '\t'; + case 'v': return ch == '\v'; + case 'w': return IsAsciiWordChar(ch); + case 'W': return !IsAsciiWordChar(ch); + } + return IsAsciiPunct(pattern_char) && pattern_char == ch; + } + + return (pattern_char == '.' && ch != '\n') || pattern_char == ch; +} + +// Helper function used by ValidateRegex() to format error messages. +std::string FormatRegexSyntaxError(const char* regex, int index) { + return (Message() << "Syntax error at index " << index + << " in simple regular expression \"" << regex << "\": ").GetString(); +} + +// Generates non-fatal failures and returns false if regex is invalid; +// otherwise returns true. +bool ValidateRegex(const char* regex) { + if (regex == NULL) { + // TODO(wan@google.com): fix the source file location in the + // assertion failures to match where the regex is used in user + // code. + ADD_FAILURE() << "NULL is not a valid simple regular expression."; + return false; + } + + bool is_valid = true; + + // True iff ?, *, or + can follow the previous atom. + bool prev_repeatable = false; + for (int i = 0; regex[i]; i++) { + if (regex[i] == '\\') { // An escape sequence + i++; + if (regex[i] == '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "'\\' cannot appear at the end."; + return false; + } + + if (!IsValidEscape(regex[i])) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "invalid escape sequence \"\\" << regex[i] << "\"."; + is_valid = false; + } + prev_repeatable = true; + } else { // Not an escape sequence. + const char ch = regex[i]; + + if (ch == '^' && i > 0) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'^' can only appear at the beginning."; + is_valid = false; + } else if (ch == '$' && regex[i + 1] != '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'$' can only appear at the end."; + is_valid = false; + } else if (IsInSet(ch, "()[]{}|")) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' is unsupported."; + is_valid = false; + } else if (IsRepeat(ch) && !prev_repeatable) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' can only follow a repeatable token."; + is_valid = false; + } + + prev_repeatable = !IsInSet(ch, "^$?*+"); + } + } + + return is_valid; +} + +// Matches a repeated regex atom followed by a valid simple regular +// expression. The regex atom is defined as c if escaped is false, +// or \c otherwise. repeat is the repetition meta character (?, *, +// or +). The behavior is undefined if str contains too many +// characters to be indexable by size_t, in which case the test will +// probably time out anyway. We are fine with this limitation as +// std::string has it too. +bool MatchRepetitionAndRegexAtHead( + bool escaped, char c, char repeat, const char* regex, + const char* str) { + const size_t min_count = (repeat == '+') ? 1 : 0; + const size_t max_count = (repeat == '?') ? 1 : + static_cast(-1) - 1; + // We cannot call numeric_limits::max() as it conflicts with the + // max() macro on Windows. + + for (size_t i = 0; i <= max_count; ++i) { + // We know that the atom matches each of the first i characters in str. + if (i >= min_count && MatchRegexAtHead(regex, str + i)) { + // We have enough matches at the head, and the tail matches too. + // Since we only care about *whether* the pattern matches str + // (as opposed to *how* it matches), there is no need to find a + // greedy match. + return true; + } + if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i])) + return false; + } + return false; +} + +// Returns true iff regex matches a prefix of str. regex must be a +// valid simple regular expression and not start with "^", or the +// result is undefined. +bool MatchRegexAtHead(const char* regex, const char* str) { + if (*regex == '\0') // An empty regex matches a prefix of anything. + return true; + + // "$" only matches the end of a string. Note that regex being + // valid guarantees that there's nothing after "$" in it. + if (*regex == '$') + return *str == '\0'; + + // Is the first thing in regex an escape sequence? + const bool escaped = *regex == '\\'; + if (escaped) + ++regex; + if (IsRepeat(regex[1])) { + // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so + // here's an indirect recursion. It terminates as the regex gets + // shorter in each recursion. + return MatchRepetitionAndRegexAtHead( + escaped, regex[0], regex[1], regex + 2, str); + } else { + // regex isn't empty, isn't "$", and doesn't start with a + // repetition. We match the first atom of regex with the first + // character of str and recurse. + return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) && + MatchRegexAtHead(regex + 1, str + 1); + } +} + +// Returns true iff regex matches any substring of str. regex must be +// a valid simple regular expression, or the result is undefined. +// +// The algorithm is recursive, but the recursion depth doesn't exceed +// the regex length, so we won't need to worry about running out of +// stack space normally. In rare cases the time complexity can be +// exponential with respect to the regex length + the string length, +// but usually it's must faster (often close to linear). +bool MatchRegexAnywhere(const char* regex, const char* str) { + if (regex == NULL || str == NULL) + return false; + + if (*regex == '^') + return MatchRegexAtHead(regex + 1, str); + + // A successful match can be anywhere in str. + do { + if (MatchRegexAtHead(regex, str)) + return true; + } while (*str++ != '\0'); + return false; +} + +// Implements the RE class. + +RE::~RE() { + free(const_cast(pattern_)); + free(const_cast(full_pattern_)); +} + +// Returns true iff regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str); +} + +// Returns true iff regular expression re matches a substring of str +// (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str); +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = full_pattern_ = NULL; + if (regex != NULL) { + pattern_ = posix::StrDup(regex); + } + + is_valid_ = ValidateRegex(regex); + if (!is_valid_) { + // No need to calculate the full pattern when the regex is invalid. + return; + } + + const size_t len = strlen(regex); + // Reserves enough bytes to hold the regular expression used for a + // full match: we need space to prepend a '^', append a '$', and + // terminate the string with '\0'. + char* buffer = static_cast(malloc(len + 3)); + full_pattern_ = buffer; + + if (*regex != '^') + *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'. + + // We don't use snprintf or strncpy, as they trigger a warning when + // compiled with VC++ 8.0. + memcpy(buffer, regex, len); + buffer += len; + + if (len == 0 || regex[len - 1] != '$') + *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'. + + *buffer = '\0'; +} + +#endif // GTEST_USES_POSIX_RE + +const char kUnknownFile[] = "unknown file"; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) { + const std::string file_name(file == NULL ? kUnknownFile : file); + + if (line < 0) { + return file_name + ":"; + } +#ifdef _MSC_VER + return file_name + "(" + StreamableToString(line) + "):"; +#else + return file_name + ":" + StreamableToString(line) + ":"; +#endif // _MSC_VER +} + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +// Note that FormatCompilerIndependentFileLocation() does NOT append colon +// to the file location it produces, unlike FormatFileLocation(). +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation( + const char* file, int line) { + const std::string file_name(file == NULL ? kUnknownFile : file); + + if (line < 0) + return file_name; + else + return file_name + ":" + StreamableToString(line); +} + + +GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line) + : severity_(severity) { + const char* const marker = + severity == GTEST_INFO ? "[ INFO ]" : + severity == GTEST_WARNING ? "[WARNING]" : + severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]"; + GetStream() << ::std::endl << marker << " " + << FormatFileLocation(file, line).c_str() << ": "; +} + +// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. +GTestLog::~GTestLog() { + GetStream() << ::std::endl; + if (severity_ == GTEST_FATAL) { + fflush(stderr); + posix::Abort(); + } +} +// Disable Microsoft deprecation warnings for POSIX functions called from +// this class (creat, dup, dup2, and close) +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable: 4996) +#endif // _MSC_VER + +#if GTEST_HAS_STREAM_REDIRECTION + +// Object that captures an output stream (stdout/stderr). +class CapturedStream { + public: + // The ctor redirects the stream to a temporary file. + explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) { +# if GTEST_OS_WINDOWS + char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT + char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT + + ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path); + const UINT success = ::GetTempFileNameA(temp_dir_path, + "gtest_redir", + 0, // Generate unique file name. + temp_file_path); + GTEST_CHECK_(success != 0) + << "Unable to create a temporary file in " << temp_dir_path; + const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE); + GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file " + << temp_file_path; + filename_ = temp_file_path; +# else + // There's no guarantee that a test has write access to the current + // directory, so we create the temporary file in the /tmp directory + // instead. We use /tmp on most systems, and /sdcard on Android. + // That's because Android doesn't have /tmp. +# if GTEST_OS_LINUX_ANDROID + // Note: Android applications are expected to call the framework's + // Context.getExternalStorageDirectory() method through JNI to get + // the location of the world-writable SD Card directory. However, + // this requires a Context handle, which cannot be retrieved + // globally from native code. Doing so also precludes running the + // code as part of a regular standalone executable, which doesn't + // run in a Dalvik process (e.g. when running it through 'adb shell'). + // + // The location /sdcard is directly accessible from native code + // and is the only location (unofficially) supported by the Android + // team. It's generally a symlink to the real SD Card mount point + // which can be /mnt/sdcard, /mnt/sdcard0, /system/media/sdcard, or + // other OEM-customized locations. Never rely on these, and always + // use /sdcard. + char name_template[] = "/sdcard/gtest_captured_stream.XXXXXX"; +# else + char name_template[] = "/tmp/captured_stream.XXXXXX"; +# endif // GTEST_OS_LINUX_ANDROID + const int captured_fd = mkstemp(name_template); + filename_ = name_template; +# endif // GTEST_OS_WINDOWS + fflush(NULL); + dup2(captured_fd, fd_); + close(captured_fd); + } + + ~CapturedStream() { + remove(filename_.c_str()); + } + + std::string GetCapturedString() { + if (uncaptured_fd_ != -1) { + // Restores the original stream. + fflush(NULL); + dup2(uncaptured_fd_, fd_); + close(uncaptured_fd_); + uncaptured_fd_ = -1; + } + + FILE* const file = posix::FOpen(filename_.c_str(), "r"); + const std::string content = ReadEntireFile(file); + posix::FClose(file); + return content; + } + + private: + // Reads the entire content of a file as an std::string. + static std::string ReadEntireFile(FILE* file); + + // Returns the size (in bytes) of a file. + static size_t GetFileSize(FILE* file); + + const int fd_; // A stream to capture. + int uncaptured_fd_; + // Name of the temporary file holding the stderr output. + ::std::string filename_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream); +}; + +// Returns the size (in bytes) of a file. +size_t CapturedStream::GetFileSize(FILE* file) { + fseek(file, 0, SEEK_END); + return static_cast(ftell(file)); +} + +// Reads the entire content of a file as a string. +std::string CapturedStream::ReadEntireFile(FILE* file) { + const size_t file_size = GetFileSize(file); + char* const buffer = new char[file_size]; + + size_t bytes_last_read = 0; // # of bytes read in the last fread() + size_t bytes_read = 0; // # of bytes read so far + + fseek(file, 0, SEEK_SET); + + // Keeps reading the file until we cannot read further or the + // pre-determined file size is reached. + do { + bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file); + bytes_read += bytes_last_read; + } while (bytes_last_read > 0 && bytes_read < file_size); + + const std::string content(buffer, bytes_read); + delete[] buffer; + + return content; +} + +# ifdef _MSC_VER +# pragma warning(pop) +# endif // _MSC_VER + +static CapturedStream* g_captured_stderr = NULL; +static CapturedStream* g_captured_stdout = NULL; + +// Starts capturing an output stream (stdout/stderr). +void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) { + if (*stream != NULL) { + GTEST_LOG_(FATAL) << "Only one " << stream_name + << " capturer can exist at a time."; + } + *stream = new CapturedStream(fd); +} + +// Stops capturing the output stream and returns the captured string. +std::string GetCapturedStream(CapturedStream** captured_stream) { + const std::string content = (*captured_stream)->GetCapturedString(); + + delete *captured_stream; + *captured_stream = NULL; + + return content; +} + +// Starts capturing stdout. +void CaptureStdout() { + CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout); +} + +// Starts capturing stderr. +void CaptureStderr() { + CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr); +} + +// Stops capturing stdout and returns the captured string. +std::string GetCapturedStdout() { + return GetCapturedStream(&g_captured_stdout); +} + +// Stops capturing stderr and returns the captured string. +std::string GetCapturedStderr() { + return GetCapturedStream(&g_captured_stderr); +} + +#endif // GTEST_HAS_STREAM_REDIRECTION + +#if GTEST_HAS_DEATH_TEST + +// A copy of all command line arguments. Set by InitGoogleTest(). +::std::vector g_argvs; + +static const ::std::vector* g_injected_test_argvs = + NULL; // Owned. + +void SetInjectableArgvs(const ::std::vector* argvs) { + if (g_injected_test_argvs != argvs) + delete g_injected_test_argvs; + g_injected_test_argvs = argvs; +} + +const ::std::vector& GetInjectableArgvs() { + if (g_injected_test_argvs != NULL) { + return *g_injected_test_argvs; + } + return g_argvs; +} +#endif // GTEST_HAS_DEATH_TEST + +#if GTEST_OS_WINDOWS_MOBILE +namespace posix { +void Abort() { + DebugBreak(); + TerminateProcess(GetCurrentProcess(), 1); +} +} // namespace posix +#endif // GTEST_OS_WINDOWS_MOBILE + +// Returns the name of the environment variable corresponding to the +// given flag. For example, FlagToEnvVar("foo") will return +// "GTEST_FOO" in the open-source version. +static std::string FlagToEnvVar(const char* flag) { + const std::string full_flag = + (Message() << GTEST_FLAG_PREFIX_ << flag).GetString(); + + Message env_var; + for (size_t i = 0; i != full_flag.length(); i++) { + env_var << ToUpper(full_flag.c_str()[i]); + } + + return env_var.GetString(); +} + +// Parses 'str' for a 32-bit signed integer. If successful, writes +// the result to *value and returns true; otherwise leaves *value +// unchanged and returns false. +bool ParseInt32(const Message& src_text, const char* str, Int32* value) { + // Parses the environment variable as a decimal integer. + char* end = NULL; + const long long_value = strtol(str, &end, 10); // NOLINT + + // Has strtol() consumed all characters in the string? + if (*end != '\0') { + // No - an invalid character was encountered. + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value \"" << str << "\".\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + // Is the parsed value in the range of an Int32? + const Int32 result = static_cast(long_value); + if (long_value == LONG_MAX || long_value == LONG_MIN || + // The parsed value overflows as a long. (strtol() returns + // LONG_MAX or LONG_MIN when the input overflows.) + result != long_value + // The parsed value overflows as an Int32. + ) { + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value " << str << ", which overflows.\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + *value = result; + return true; +} + +// Reads and returns the Boolean environment variable corresponding to +// the given flag; if it's not set, returns default_value. +// +// The value is considered true iff it's not "0". +bool BoolFromGTestEnv(const char* flag, bool default_value) { + const std::string env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + return string_value == NULL ? + default_value : strcmp(string_value, "0") != 0; +} + +// Reads and returns a 32-bit integer stored in the environment +// variable corresponding to the given flag; if it isn't set or +// doesn't represent a valid 32-bit integer, returns default_value. +Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) { + const std::string env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + if (string_value == NULL) { + // The environment variable is not set. + return default_value; + } + + Int32 result = default_value; + if (!ParseInt32(Message() << "Environment variable " << env_var, + string_value, &result)) { + printf("The default value %s is used.\n", + (Message() << default_value).GetString().c_str()); + fflush(stdout); + return default_value; + } + + return result; +} + +// Reads and returns the string environment variable corresponding to +// the given flag; if it's not set, returns default_value. +const char* StringFromGTestEnv(const char* flag, const char* default_value) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value = posix::GetEnv(env_var.c_str()); + return value == NULL ? default_value : value; +} + +} // namespace internal +} // namespace testing +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// It uses the << operator when possible, and prints the bytes in the +// object otherwise. A user can override its behavior for a class +// type Foo by defining either operator<<(::std::ostream&, const Foo&) +// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that +// defines Foo. + +#include +#include +#include // NOLINT +#include + +namespace testing { + +namespace { + +using ::std::ostream; + +// Prints a segment of bytes in the given object. +void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start, + size_t count, ostream* os) { + char text[5] = ""; + for (size_t i = 0; i != count; i++) { + const size_t j = start + i; + if (i != 0) { + // Organizes the bytes into groups of 2 for easy parsing by + // human. + if ((j % 2) == 0) + *os << ' '; + else + *os << '-'; + } + GTEST_SNPRINTF_(text, sizeof(text), "%02X", obj_bytes[j]); + *os << text; + } +} + +// Prints the bytes in the given value to the given ostream. +void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count, + ostream* os) { + // Tells the user how big the object is. + *os << count << "-byte object <"; + + const size_t kThreshold = 132; + const size_t kChunkSize = 64; + // If the object size is bigger than kThreshold, we'll have to omit + // some details by printing only the first and the last kChunkSize + // bytes. + // TODO(wan): let the user control the threshold using a flag. + if (count < kThreshold) { + PrintByteSegmentInObjectTo(obj_bytes, 0, count, os); + } else { + PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os); + *os << " ... "; + // Rounds up to 2-byte boundary. + const size_t resume_pos = (count - kChunkSize + 1)/2*2; + PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os); + } + *os << ">"; +} + +} // namespace + +namespace internal2 { + +// Delegates to PrintBytesInObjectToImpl() to print the bytes in the +// given object. The delegation simplifies the implementation, which +// uses the << operator and thus is easier done outside of the +// ::testing::internal namespace, which contains a << operator that +// sometimes conflicts with the one in STL. +void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count, + ostream* os) { + PrintBytesInObjectToImpl(obj_bytes, count, os); +} + +} // namespace internal2 + +namespace internal { + +// Depending on the value of a char (or wchar_t), we print it in one +// of three formats: +// - as is if it's a printable ASCII (e.g. 'a', '2', ' '), +// - as a hexidecimal escape sequence (e.g. '\x7F'), or +// - as a special escape sequence (e.g. '\r', '\n'). +enum CharFormat { + kAsIs, + kHexEscape, + kSpecialEscape +}; + +// Returns true if c is a printable ASCII character. We test the +// value of c directly instead of calling isprint(), which is buggy on +// Windows Mobile. +inline bool IsPrintableAscii(wchar_t c) { + return 0x20 <= c && c <= 0x7E; +} + +// Prints a wide or narrow char c as a character literal without the +// quotes, escaping it when necessary; returns how c was formatted. +// The template argument UnsignedChar is the unsigned version of Char, +// which is the type of c. +template +static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) { + switch (static_cast(c)) { + case L'\0': + *os << "\\0"; + break; + case L'\'': + *os << "\\'"; + break; + case L'\\': + *os << "\\\\"; + break; + case L'\a': + *os << "\\a"; + break; + case L'\b': + *os << "\\b"; + break; + case L'\f': + *os << "\\f"; + break; + case L'\n': + *os << "\\n"; + break; + case L'\r': + *os << "\\r"; + break; + case L'\t': + *os << "\\t"; + break; + case L'\v': + *os << "\\v"; + break; + default: + if (IsPrintableAscii(c)) { + *os << static_cast(c); + return kAsIs; + } else { + *os << "\\x" + String::FormatHexInt(static_cast(c)); + return kHexEscape; + } + } + return kSpecialEscape; +} + +// Prints a wchar_t c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsStringLiteralTo(wchar_t c, ostream* os) { + switch (c) { + case L'\'': + *os << "'"; + return kAsIs; + case L'"': + *os << "\\\""; + return kSpecialEscape; + default: + return PrintAsCharLiteralTo(c, os); + } +} + +// Prints a char c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsStringLiteralTo(char c, ostream* os) { + return PrintAsStringLiteralTo( + static_cast(static_cast(c)), os); +} + +// Prints a wide or narrow character c and its code. '\0' is printed +// as "'\\0'", other unprintable characters are also properly escaped +// using the standard C++ escape sequence. The template argument +// UnsignedChar is the unsigned version of Char, which is the type of c. +template +void PrintCharAndCodeTo(Char c, ostream* os) { + // First, print c as a literal in the most readable form we can find. + *os << ((sizeof(c) > 1) ? "L'" : "'"); + const CharFormat format = PrintAsCharLiteralTo(c, os); + *os << "'"; + + // To aid user debugging, we also print c's code in decimal, unless + // it's 0 (in which case c was printed as '\\0', making the code + // obvious). + if (c == 0) + return; + *os << " (" << static_cast(c); + + // For more convenience, we print c's code again in hexidecimal, + // unless c was already printed in the form '\x##' or the code is in + // [1, 9]. + if (format == kHexEscape || (1 <= c && c <= 9)) { + // Do nothing. + } else { + *os << ", 0x" << String::FormatHexInt(static_cast(c)); + } + *os << ")"; +} + +void PrintTo(unsigned char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} +void PrintTo(signed char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} + +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its code. L'\0' is printed as "L'\\0'". +void PrintTo(wchar_t wc, ostream* os) { + PrintCharAndCodeTo(wc, os); +} + +// Prints the given array of characters to the ostream. CharType must be either +// char or wchar_t. +// The array starts at begin, the length is len, it may include '\0' characters +// and may not be NUL-terminated. +template +static void PrintCharsAsStringTo( + const CharType* begin, size_t len, ostream* os) { + const char* const kQuoteBegin = sizeof(CharType) == 1 ? "\"" : "L\""; + *os << kQuoteBegin; + bool is_previous_hex = false; + for (size_t index = 0; index < len; ++index) { + const CharType cur = begin[index]; + if (is_previous_hex && IsXDigit(cur)) { + // Previous character is of '\x..' form and this character can be + // interpreted as another hexadecimal digit in its number. Break string to + // disambiguate. + *os << "\" " << kQuoteBegin; + } + is_previous_hex = PrintAsStringLiteralTo(cur, os) == kHexEscape; + } + *os << "\""; +} + +// Prints a (const) char/wchar_t array of 'len' elements, starting at address +// 'begin'. CharType must be either char or wchar_t. +template +static void UniversalPrintCharArray( + const CharType* begin, size_t len, ostream* os) { + // The code + // const char kFoo[] = "foo"; + // generates an array of 4, not 3, elements, with the last one being '\0'. + // + // Therefore when printing a char array, we don't print the last element if + // it's '\0', such that the output matches the string literal as it's + // written in the source code. + if (len > 0 && begin[len - 1] == '\0') { + PrintCharsAsStringTo(begin, len - 1, os); + return; + } + + // If, however, the last element in the array is not '\0', e.g. + // const char kFoo[] = { 'f', 'o', 'o' }; + // we must print the entire array. We also print a message to indicate + // that the array is not NUL-terminated. + PrintCharsAsStringTo(begin, len, os); + *os << " (no terminating NUL)"; +} + +// Prints a (const) char array of 'len' elements, starting at address 'begin'. +void UniversalPrintArray(const char* begin, size_t len, ostream* os) { + UniversalPrintCharArray(begin, len, os); +} + +// Prints a (const) wchar_t array of 'len' elements, starting at address +// 'begin'. +void UniversalPrintArray(const wchar_t* begin, size_t len, ostream* os) { + UniversalPrintCharArray(begin, len, os); +} + +// Prints the given C string to the ostream. +void PrintTo(const char* s, ostream* os) { + if (s == NULL) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintCharsAsStringTo(s, strlen(s), os); + } +} + +// MSVC compiler can be configured to define whar_t as a typedef +// of unsigned short. Defining an overload for const wchar_t* in that case +// would cause pointers to unsigned shorts be printed as wide strings, +// possibly accessing more memory than intended and causing invalid +// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when +// wchar_t is implemented as a native type. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Prints the given wide C string to the ostream. +void PrintTo(const wchar_t* s, ostream* os) { + if (s == NULL) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintCharsAsStringTo(s, wcslen(s), os); + } +} +#endif // wchar_t is native + +// Prints a ::string object. +#if GTEST_HAS_GLOBAL_STRING +void PrintStringTo(const ::string& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_GLOBAL_STRING + +void PrintStringTo(const ::std::string& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} + +// Prints a ::wstring object. +#if GTEST_HAS_GLOBAL_WSTRING +void PrintWideStringTo(const ::wstring& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +void PrintWideStringTo(const ::std::wstring& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_STD_WSTRING + +} // namespace internal + +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// The Google C++ Testing Framework (Google Test) + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +using internal::GetUnitTestImpl; + +// Gets the summary of the failure message by omitting the stack trace +// in it. +std::string TestPartResult::ExtractSummary(const char* message) { + const char* const stack_trace = strstr(message, internal::kStackTraceMarker); + return stack_trace == NULL ? message : + std::string(message, stack_trace); +} + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result) { + return os + << result.file_name() << ":" << result.line_number() << ": " + << (result.type() == TestPartResult::kSuccess ? "Success" : + result.type() == TestPartResult::kFatalFailure ? "Fatal failure" : + "Non-fatal failure") << ":\n" + << result.message() << std::endl; +} + +// Appends a TestPartResult to the array. +void TestPartResultArray::Append(const TestPartResult& result) { + array_.push_back(result); +} + +// Returns the TestPartResult at the given index (0-based). +const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const { + if (index < 0 || index >= size()) { + printf("\nInvalid index (%d) into TestPartResultArray.\n", index); + internal::posix::Abort(); + } + + return array_[index]; +} + +// Returns the number of TestPartResult objects in the array. +int TestPartResultArray::size() const { + return static_cast(array_.size()); +} + +namespace internal { + +HasNewFatalFailureHelper::HasNewFatalFailureHelper() + : has_new_fatal_failure_(false), + original_reporter_(GetUnitTestImpl()-> + GetTestPartResultReporterForCurrentThread()) { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this); +} + +HasNewFatalFailureHelper::~HasNewFatalFailureHelper() { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread( + original_reporter_); +} + +void HasNewFatalFailureHelper::ReportTestPartResult( + const TestPartResult& result) { + if (result.fatally_failed()) + has_new_fatal_failure_ = true; + original_reporter_->ReportTestPartResult(result); +} + +} // namespace internal + +} // namespace testing +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + + +namespace testing { +namespace internal { + +#if GTEST_HAS_TYPED_TEST_P + +// Skips to the first non-space char in str. Returns an empty string if str +// contains only whitespace characters. +static const char* SkipSpaces(const char* str) { + while (IsSpace(*str)) + str++; + return str; +} + +// Verifies that registered_tests match the test names in +// defined_test_names_; returns registered_tests if successful, or +// aborts the program otherwise. +const char* TypedTestCasePState::VerifyRegisteredTestNames( + const char* file, int line, const char* registered_tests) { + typedef ::std::set::const_iterator DefinedTestIter; + registered_ = true; + + // Skip initial whitespace in registered_tests since some + // preprocessors prefix stringizied literals with whitespace. + registered_tests = SkipSpaces(registered_tests); + + Message errors; + ::std::set tests; + for (const char* names = registered_tests; names != NULL; + names = SkipComma(names)) { + const std::string name = GetPrefixUntilComma(names); + if (tests.count(name) != 0) { + errors << "Test " << name << " is listed more than once.\n"; + continue; + } + + bool found = false; + for (DefinedTestIter it = defined_test_names_.begin(); + it != defined_test_names_.end(); + ++it) { + if (name == *it) { + found = true; + break; + } + } + + if (found) { + tests.insert(name); + } else { + errors << "No test named " << name + << " can be found in this test case.\n"; + } + } + + for (DefinedTestIter it = defined_test_names_.begin(); + it != defined_test_names_.end(); + ++it) { + if (tests.count(*it) == 0) { + errors << "You forgot to list test " << *it << ".\n"; + } + } + + const std::string& errors_str = errors.GetString(); + if (errors_str != "") { + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors_str.c_str()); + fflush(stderr); + posix::Abort(); + } + + return registered_tests; +} + +#endif // GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing diff --git a/external/gtest/fused-src/gtest/gtest.h b/external/gtest/fused-src/gtest/gtest.h new file mode 100755 index 0000000000..4f3804f703 --- /dev/null +++ b/external/gtest/fused-src/gtest/gtest.h @@ -0,0 +1,20061 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for Google Test. It should be +// included by any test program that uses Google Test. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! +// +// Acknowledgment: Google Test borrowed the idea of automatic test +// registration from Barthelemy Dagenais' (barthelemy@prologique.com) +// easyUnit framework. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_H_ + +#include +#include +#include + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares functions and macros used internally by +// Google Test. They are subject to change without notice. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan) +// +// Low-level types and utilities for porting Google Test to various +// platforms. They are subject to change without notice. DO NOT USE +// THEM IN USER CODE. +// +// This file is fundamental to Google Test. All other Google Test source +// files are expected to #include this. Therefore, it cannot #include +// any other Google Test header. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + +// The user can define the following macros in the build script to +// control Google Test's behavior. If the user doesn't define a macro +// in this list, Google Test will define it. +// +// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2) +// is/isn't available. +// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions +// are enabled. +// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string +// is/isn't available (some systems define +// ::string, which is different to std::string). +// GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string +// is/isn't available (some systems define +// ::wstring, which is different to std::wstring). +// GTEST_HAS_POSIX_RE - Define it to 1/0 to indicate that POSIX regular +// expressions are/aren't available. +// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that +// is/isn't available. +// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't +// enabled. +// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that +// std::wstring does/doesn't work (Google Test can +// be used where std::wstring is unavailable). +// GTEST_HAS_TR1_TUPLE - Define it to 1/0 to indicate tr1::tuple +// is/isn't available. +// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the +// compiler supports Microsoft's "Structured +// Exception Handling". +// GTEST_HAS_STREAM_REDIRECTION +// - Define it to 1/0 to indicate whether the +// platform supports I/O stream redirection using +// dup() and dup2(). +// GTEST_USE_OWN_TR1_TUPLE - Define it to 1/0 to indicate whether Google +// Test's own tr1 tuple implementation should be +// used. Unused when the user sets +// GTEST_HAS_TR1_TUPLE to 0. +// GTEST_LANG_CXX11 - Define it to 1/0 to indicate that Google Test +// is building in C++11/C++98 mode. +// GTEST_LINKED_AS_SHARED_LIBRARY +// - Define to 1 when compiling tests that use +// Google Test as a shared library (known as +// DLL on Windows). +// GTEST_CREATE_SHARED_LIBRARY +// - Define to 1 when compiling Google Test itself +// as a shared library. + +// This header defines the following utilities: +// +// Macros indicating the current platform (defined to 1 if compiled on +// the given platform; otherwise undefined): +// GTEST_OS_AIX - IBM AIX +// GTEST_OS_CYGWIN - Cygwin +// GTEST_OS_HPUX - HP-UX +// GTEST_OS_LINUX - Linux +// GTEST_OS_LINUX_ANDROID - Google Android +// GTEST_OS_MAC - Mac OS X +// GTEST_OS_IOS - iOS +// GTEST_OS_IOS_SIMULATOR - iOS simulator +// GTEST_OS_NACL - Google Native Client (NaCl) +// GTEST_OS_OPENBSD - OpenBSD +// GTEST_OS_QNX - QNX +// GTEST_OS_SOLARIS - Sun Solaris +// GTEST_OS_SYMBIAN - Symbian +// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile) +// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop +// GTEST_OS_WINDOWS_MINGW - MinGW +// GTEST_OS_WINDOWS_MOBILE - Windows Mobile +// GTEST_OS_ZOS - z/OS +// +// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the +// most stable support. Since core members of the Google Test project +// don't have access to other platforms, support for them may be less +// stable. If you notice any problems on your platform, please notify +// googletestframework@googlegroups.com (patches for fixing them are +// even more welcome!). +// +// Note that it is possible that none of the GTEST_OS_* macros are defined. +// +// Macros indicating available Google Test features (defined to 1 if +// the corresponding feature is supported; otherwise undefined): +// GTEST_HAS_COMBINE - the Combine() function (for value-parameterized +// tests) +// GTEST_HAS_DEATH_TEST - death tests +// GTEST_HAS_PARAM_TEST - value-parameterized tests +// GTEST_HAS_TYPED_TEST - typed tests +// GTEST_HAS_TYPED_TEST_P - type-parameterized tests +// GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with +// GTEST_HAS_POSIX_RE (see above) which users can +// define themselves. +// GTEST_USES_SIMPLE_RE - our own simple regex is used; +// the above two are mutually exclusive. +// GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ(). +// +// Macros for basic C++ coding: +// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning. +// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a +// variable don't have to be used. +// GTEST_DISALLOW_ASSIGN_ - disables operator=. +// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=. +// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used. +// +// Synchronization: +// Mutex, MutexLock, ThreadLocal, GetThreadCount() +// - synchronization primitives. +// GTEST_IS_THREADSAFE - defined to 1 to indicate that the above +// synchronization primitives have real implementations +// and Google Test is thread-safe; or 0 otherwise. +// +// Template meta programming: +// is_pointer - as in TR1; needed on Symbian and IBM XL C/C++ only. +// IteratorTraits - partial implementation of std::iterator_traits, which +// is not available in libCstd when compiled with Sun C++. +// +// Smart pointers: +// scoped_ptr - as in TR2. +// +// Regular expressions: +// RE - a simple regular expression class using the POSIX +// Extended Regular Expression syntax on UNIX-like +// platforms, or a reduced regular exception syntax on +// other platforms, including Windows. +// +// Logging: +// GTEST_LOG_() - logs messages at the specified severity level. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. +// +// Stdout and stderr capturing: +// CaptureStdout() - starts capturing stdout. +// GetCapturedStdout() - stops capturing stdout and returns the captured +// string. +// CaptureStderr() - starts capturing stderr. +// GetCapturedStderr() - stops capturing stderr and returns the captured +// string. +// +// Integer types: +// TypeWithSize - maps an integer to a int type. +// Int32, UInt32, Int64, UInt64, TimeInMillis +// - integers of known sizes. +// BiggestInt - the biggest signed integer type. +// +// Command-line utilities: +// GTEST_FLAG() - references a flag. +// GTEST_DECLARE_*() - declares a flag. +// GTEST_DEFINE_*() - defines a flag. +// GetInjectableArgvs() - returns the command line as a vector of strings. +// +// Environment variable utilities: +// GetEnv() - gets the value of an environment variable. +// BoolFromGTestEnv() - parses a bool environment variable. +// Int32FromGTestEnv() - parses an Int32 environment variable. +// StringFromGTestEnv() - parses a string environment variable. + +#include // for isspace, etc +#include // for ptrdiff_t +#include +#include +#include +#ifndef _WIN32_WCE +# include +# include +#endif // !_WIN32_WCE + +#if defined __APPLE__ +# include +# include +#endif + +#include // NOLINT +#include // NOLINT +#include // NOLINT + +#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com" +#define GTEST_FLAG_PREFIX_ "gtest_" +#define GTEST_FLAG_PREFIX_DASH_ "gtest-" +#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_" +#define GTEST_NAME_ "Google Test" +#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/" + +// Determines the version of gcc that is used to compile this. +#ifdef __GNUC__ +// 40302 means version 4.3.2. +# define GTEST_GCC_VER_ \ + (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) +#endif // __GNUC__ + +// Determines the platform on which Google Test is compiled. +#ifdef __CYGWIN__ +# define GTEST_OS_CYGWIN 1 +#elif defined __SYMBIAN32__ +# define GTEST_OS_SYMBIAN 1 +#elif defined _WIN32 +# define GTEST_OS_WINDOWS 1 +# ifdef _WIN32_WCE +# define GTEST_OS_WINDOWS_MOBILE 1 +# elif defined(__MINGW__) || defined(__MINGW32__) +# define GTEST_OS_WINDOWS_MINGW 1 +# else +# define GTEST_OS_WINDOWS_DESKTOP 1 +# endif // _WIN32_WCE +#elif defined __APPLE__ +# define GTEST_OS_MAC 1 +# if TARGET_OS_IPHONE +# define GTEST_OS_IOS 1 +# if TARGET_IPHONE_SIMULATOR +# define GTEST_OS_IOS_SIMULATOR 1 +# endif +# endif +#elif defined __linux__ +# define GTEST_OS_LINUX 1 +# if defined __ANDROID__ +# define GTEST_OS_LINUX_ANDROID 1 +# endif +#elif defined __MVS__ +# define GTEST_OS_ZOS 1 +#elif defined(__sun) && defined(__SVR4) +# define GTEST_OS_SOLARIS 1 +#elif defined(_AIX) +# define GTEST_OS_AIX 1 +#elif defined(__hpux) +# define GTEST_OS_HPUX 1 +#elif defined __native_client__ +# define GTEST_OS_NACL 1 +#elif defined __OpenBSD__ +# define GTEST_OS_OPENBSD 1 +#elif defined __QNX__ +# define GTEST_OS_QNX 1 +#endif // __CYGWIN__ + +#ifndef GTEST_LANG_CXX11 +// gcc and clang define __GXX_EXPERIMENTAL_CXX0X__ when +// -std={c,gnu}++{0x,11} is passed. The C++11 standard specifies a +// value for __cplusplus, and recent versions of clang, gcc, and +// probably other compilers set that too in C++11 mode. +# if __GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L +// Compiling in at least C++11 mode. +# define GTEST_LANG_CXX11 1 +# else +# define GTEST_LANG_CXX11 0 +# endif +#endif + +// Brings in definitions for functions used in the testing::internal::posix +// namespace (read, write, close, chdir, isatty, stat). We do not currently +// use them on Windows Mobile. +#if !GTEST_OS_WINDOWS +// This assumes that non-Windows OSes provide unistd.h. For OSes where this +// is not the case, we need to include headers that provide the functions +// mentioned above. +# include +# include +#elif !GTEST_OS_WINDOWS_MOBILE +# include +# include +#endif + +#if GTEST_OS_LINUX_ANDROID +// Used to define __ANDROID_API__ matching the target NDK API level. +# include // NOLINT +#endif + +// Defines this to true iff Google Test can use POSIX regular expressions. +#ifndef GTEST_HAS_POSIX_RE +# if GTEST_OS_LINUX_ANDROID +// On Android, is only available starting with Gingerbread. +# define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9) +# else +# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS) +# endif +#endif + +#if GTEST_HAS_POSIX_RE + +// On some platforms, needs someone to define size_t, and +// won't compile otherwise. We can #include it here as we already +// included , which is guaranteed to define size_t through +// . +# include // NOLINT + +# define GTEST_USES_POSIX_RE 1 + +#elif GTEST_OS_WINDOWS + +// is not available on Windows. Use our own simple regex +// implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#else + +// may not be available on this platform. Use our own +// simple regex implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#endif // GTEST_HAS_POSIX_RE + +#ifndef GTEST_HAS_EXCEPTIONS +// The user didn't tell us whether exceptions are enabled, so we need +// to figure it out. +# if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS +// macro to enable exceptions, so we'll do the same. +// Assumes that exceptions are enabled by default. +# ifndef _HAS_EXCEPTIONS +# define _HAS_EXCEPTIONS 1 +# endif // _HAS_EXCEPTIONS +# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS +# elif defined(__GNUC__) && __EXCEPTIONS +// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__SUNPRO_CC) +// Sun Pro CC supports exceptions. However, there is no compile-time way of +// detecting whether they are enabled or not. Therefore, we assume that +// they are enabled unless the user tells us otherwise. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__IBMCPP__) && __EXCEPTIONS +// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__HP_aCC) +// Exception handling is in effect by default in HP aCC compiler. It has to +// be turned of by +noeh compiler option if desired. +# define GTEST_HAS_EXCEPTIONS 1 +# else +// For other compilers, we assume exceptions are disabled to be +// conservative. +# define GTEST_HAS_EXCEPTIONS 0 +# endif // defined(_MSC_VER) || defined(__BORLANDC__) +#endif // GTEST_HAS_EXCEPTIONS + +#if !defined(GTEST_HAS_STD_STRING) +// Even though we don't use this macro any longer, we keep it in case +// some clients still depend on it. +# define GTEST_HAS_STD_STRING 1 +#elif !GTEST_HAS_STD_STRING +// The user told us that ::std::string isn't available. +# error "Google Test cannot be used where ::std::string isn't available." +#endif // !defined(GTEST_HAS_STD_STRING) + +#ifndef GTEST_HAS_GLOBAL_STRING +// The user didn't tell us whether ::string is available, so we need +// to figure it out. + +# define GTEST_HAS_GLOBAL_STRING 0 + +#endif // GTEST_HAS_GLOBAL_STRING + +#ifndef GTEST_HAS_STD_WSTRING +// The user didn't tell us whether ::std::wstring is available, so we need +// to figure it out. +// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring +// is available. + +// Cygwin 1.7 and below doesn't support ::std::wstring. +// Solaris' libc++ doesn't support it either. Android has +// no support for it at least as recent as Froyo (2.2). +# define GTEST_HAS_STD_WSTRING \ + (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS)) + +#endif // GTEST_HAS_STD_WSTRING + +#ifndef GTEST_HAS_GLOBAL_WSTRING +// The user didn't tell us whether ::wstring is available, so we need +// to figure it out. +# define GTEST_HAS_GLOBAL_WSTRING \ + (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING) +#endif // GTEST_HAS_GLOBAL_WSTRING + +// Determines whether RTTI is available. +#ifndef GTEST_HAS_RTTI +// The user didn't tell us whether RTTI is enabled, so we need to +// figure it out. + +# ifdef _MSC_VER + +# ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled. +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled. +# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302) + +# ifdef __GXX_RTTI +// When building against STLport with the Android NDK and with +// -frtti -fno-exceptions, the build fails at link time with undefined +// references to __cxa_bad_typeid. Note sure if STL or toolchain bug, +// so disable RTTI when detected. +# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \ + !defined(__EXCEPTIONS) +# define GTEST_HAS_RTTI 0 +# else +# define GTEST_HAS_RTTI 1 +# endif // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS +# else +# define GTEST_HAS_RTTI 0 +# endif // __GXX_RTTI + +// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends +// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the +// first version with C++ support. +# elif defined(__clang__) + +# define GTEST_HAS_RTTI __has_feature(cxx_rtti) + +// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if +// both the typeid and dynamic_cast features are present. +# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900) + +# ifdef __RTTI_ALL__ +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +# else + +// For all other compilers, we assume RTTI is enabled. +# define GTEST_HAS_RTTI 1 + +# endif // _MSC_VER + +#endif // GTEST_HAS_RTTI + +// It's this header's responsibility to #include when RTTI +// is enabled. +#if GTEST_HAS_RTTI +# include +#endif + +// Determines whether Google Test can use the pthreads library. +#ifndef GTEST_HAS_PTHREAD +// The user didn't tell us explicitly, so we assume pthreads support is +// available on Linux and Mac. +// +// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0 +// to your compiler flags. +# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX \ + || GTEST_OS_QNX) +#endif // GTEST_HAS_PTHREAD + +#if GTEST_HAS_PTHREAD +// gtest-port.h guarantees to #include when GTEST_HAS_PTHREAD is +// true. +# include // NOLINT + +// For timespec and nanosleep, used below. +# include // NOLINT +#endif + +// Determines whether Google Test can use tr1/tuple. You can define +// this macro to 0 to prevent Google Test from using tuple (any +// feature depending on tuple with be disabled in this mode). +#ifndef GTEST_HAS_TR1_TUPLE +# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) +// STLport, provided with the Android NDK, has neither or . +# define GTEST_HAS_TR1_TUPLE 0 +# else +// The user didn't tell us not to do it, so we assume it's OK. +# define GTEST_HAS_TR1_TUPLE 1 +# endif +#endif // GTEST_HAS_TR1_TUPLE + +// Determines whether Google Test's own tr1 tuple implementation +// should be used. +#ifndef GTEST_USE_OWN_TR1_TUPLE +// The user didn't tell us, so we need to figure it out. + +// We use our own TR1 tuple if we aren't sure the user has an +// implementation of it already. At this time, libstdc++ 4.0.0+ and +// MSVC 2010 are the only mainstream standard libraries that come +// with a TR1 tuple implementation. NVIDIA's CUDA NVCC compiler +// pretends to be GCC by defining __GNUC__ and friends, but cannot +// compile GCC's tuple implementation. MSVC 2008 (9.0) provides TR1 +// tuple in a 323 MB Feature Pack download, which we cannot assume the +// user has. QNX's QCC compiler is a modified GCC but it doesn't +// support TR1 tuple. libc++ only provides std::tuple, in C++11 mode, +// and it can be used with some compilers that define __GNUC__. +# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000) \ + && !GTEST_OS_QNX && !defined(_LIBCPP_VERSION)) || _MSC_VER >= 1600 +# define GTEST_ENV_HAS_TR1_TUPLE_ 1 +# endif + +// C++11 specifies that provides std::tuple. Use that if gtest is used +// in C++11 mode and libstdc++ isn't very old (binaries targeting OS X 10.6 +// can build with clang but need to use gcc4.2's libstdc++). +# if GTEST_LANG_CXX11 && (!defined(__GLIBCXX__) || __GLIBCXX__ > 20110325) +# define GTEST_ENV_HAS_STD_TUPLE_ 1 +# endif + +# if GTEST_ENV_HAS_TR1_TUPLE_ || GTEST_ENV_HAS_STD_TUPLE_ +# define GTEST_USE_OWN_TR1_TUPLE 0 +# else +# define GTEST_USE_OWN_TR1_TUPLE 1 +# endif + +#endif // GTEST_USE_OWN_TR1_TUPLE + +// To avoid conditional compilation everywhere, we make it +// gtest-port.h's responsibility to #include the header implementing +// tr1/tuple. +#if GTEST_HAS_TR1_TUPLE + +# if GTEST_USE_OWN_TR1_TUPLE +// This file was GENERATED by command: +// pump.py gtest-tuple.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2009 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Implements a subset of TR1 tuple needed by Google Test and Google Mock. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ + +#include // For ::std::pair. + +// The compiler used in Symbian has a bug that prevents us from declaring the +// tuple template as a friend (it complains that tuple is redefined). This +// hack bypasses the bug by declaring the members that should otherwise be +// private as public. +// Sun Studio versions < 12 also have the above bug. +#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590) +# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public: +#else +# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \ + template friend class tuple; \ + private: +#endif + +// GTEST_n_TUPLE_(T) is the type of an n-tuple. +#define GTEST_0_TUPLE_(T) tuple<> +#define GTEST_1_TUPLE_(T) tuple +#define GTEST_2_TUPLE_(T) tuple +#define GTEST_3_TUPLE_(T) tuple +#define GTEST_4_TUPLE_(T) tuple +#define GTEST_5_TUPLE_(T) tuple +#define GTEST_6_TUPLE_(T) tuple +#define GTEST_7_TUPLE_(T) tuple +#define GTEST_8_TUPLE_(T) tuple +#define GTEST_9_TUPLE_(T) tuple +#define GTEST_10_TUPLE_(T) tuple + +// GTEST_n_TYPENAMES_(T) declares a list of n typenames. +#define GTEST_0_TYPENAMES_(T) +#define GTEST_1_TYPENAMES_(T) typename T##0 +#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1 +#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2 +#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3 +#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4 +#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5 +#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6 +#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, typename T##7 +#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, \ + typename T##7, typename T##8 +#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, \ + typename T##7, typename T##8, typename T##9 + +// In theory, defining stuff in the ::std namespace is undefined +// behavior. We can do this as we are playing the role of a standard +// library vendor. +namespace std { +namespace tr1 { + +template +class tuple; + +// Anything in namespace gtest_internal is Google Test's INTERNAL +// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code. +namespace gtest_internal { + +// ByRef::type is T if T is a reference; otherwise it's const T&. +template +struct ByRef { typedef const T& type; }; // NOLINT +template +struct ByRef { typedef T& type; }; // NOLINT + +// A handy wrapper for ByRef. +#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef::type + +// AddRef::type is T if T is a reference; otherwise it's T&. This +// is the same as tr1::add_reference::type. +template +struct AddRef { typedef T& type; }; // NOLINT +template +struct AddRef { typedef T& type; }; // NOLINT + +// A handy wrapper for AddRef. +#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef::type + +// A helper for implementing get(). +template class Get; + +// A helper for implementing tuple_element. kIndexValid is true +// iff k < the number of fields in tuple type T. +template +struct TupleElement; + +template +struct TupleElement { + typedef T0 type; +}; + +template +struct TupleElement { + typedef T1 type; +}; + +template +struct TupleElement { + typedef T2 type; +}; + +template +struct TupleElement { + typedef T3 type; +}; + +template +struct TupleElement { + typedef T4 type; +}; + +template +struct TupleElement { + typedef T5 type; +}; + +template +struct TupleElement { + typedef T6 type; +}; + +template +struct TupleElement { + typedef T7 type; +}; + +template +struct TupleElement { + typedef T8 type; +}; + +template +struct TupleElement { + typedef T9 type; +}; + +} // namespace gtest_internal + +template <> +class tuple<> { + public: + tuple() {} + tuple(const tuple& /* t */) {} + tuple& operator=(const tuple& /* t */) { return *this; } +}; + +template +class GTEST_1_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {} + + tuple(const tuple& t) : f0_(t.f0_) {} + + template + tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_1_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) { + f0_ = t.f0_; + return *this; + } + + T0 f0_; +}; + +template +class GTEST_2_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0), + f1_(f1) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {} + + template + tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {} + template + tuple(const ::std::pair& p) : f0_(p.first), f1_(p.second) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_2_TUPLE_(U)& t) { + return CopyFrom(t); + } + template + tuple& operator=(const ::std::pair& p) { + f0_ = p.first; + f1_ = p.second; + return *this; + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + return *this; + } + + T0 f0_; + T1 f1_; +}; + +template +class GTEST_3_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {} + + template + tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_3_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; +}; + +template +class GTEST_4_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {} + + template + tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_4_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; +}; + +template +class GTEST_5_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, + GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_) {} + + template + tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_5_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; +}; + +template +class GTEST_6_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_) {} + + template + tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_6_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; +}; + +template +class GTEST_7_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3), f4_(f4), f5_(f5), f6_(f6) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {} + + template + tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_7_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; +}; + +template +class GTEST_8_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, + GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5), f6_(f6), f7_(f7) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {} + + template + tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_8_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; +}; + +template +class GTEST_9_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7, + GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5), f6_(f6), f7_(f7), f8_(f8) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {} + + template + tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_9_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + f8_ = t.f8_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; + T8 f8_; +}; + +template +class tuple { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(), + f9_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7, + GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {} + + template + tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), + f9_(t.f9_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_10_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + f8_ = t.f8_; + f9_ = t.f9_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; + T8 f8_; + T9 f9_; +}; + +// 6.1.3.2 Tuple creation functions. + +// Known limitations: we don't support passing an +// std::tr1::reference_wrapper to make_tuple(). And we don't +// implement tie(). + +inline tuple<> make_tuple() { return tuple<>(); } + +template +inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) { + return GTEST_1_TUPLE_(T)(f0); +} + +template +inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) { + return GTEST_2_TUPLE_(T)(f0, f1); +} + +template +inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) { + return GTEST_3_TUPLE_(T)(f0, f1, f2); +} + +template +inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3) { + return GTEST_4_TUPLE_(T)(f0, f1, f2, f3); +} + +template +inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4) { + return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4); +} + +template +inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5) { + return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5); +} + +template +inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6) { + return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6); +} + +template +inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) { + return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7); +} + +template +inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7, + const T8& f8) { + return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8); +} + +template +inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7, + const T8& f8, const T9& f9) { + return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9); +} + +// 6.1.3.3 Tuple helper classes. + +template struct tuple_size; + +template +struct tuple_size { + static const int value = 0; +}; + +template +struct tuple_size { + static const int value = 1; +}; + +template +struct tuple_size { + static const int value = 2; +}; + +template +struct tuple_size { + static const int value = 3; +}; + +template +struct tuple_size { + static const int value = 4; +}; + +template +struct tuple_size { + static const int value = 5; +}; + +template +struct tuple_size { + static const int value = 6; +}; + +template +struct tuple_size { + static const int value = 7; +}; + +template +struct tuple_size { + static const int value = 8; +}; + +template +struct tuple_size { + static const int value = 9; +}; + +template +struct tuple_size { + static const int value = 10; +}; + +template +struct tuple_element { + typedef typename gtest_internal::TupleElement< + k < (tuple_size::value), k, Tuple>::type type; +}; + +#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element::type + +// 6.1.3.4 Element access. + +namespace gtest_internal { + +template <> +class Get<0> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple)) + Field(Tuple& t) { return t.f0_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple)) + ConstField(const Tuple& t) { return t.f0_; } +}; + +template <> +class Get<1> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple)) + Field(Tuple& t) { return t.f1_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple)) + ConstField(const Tuple& t) { return t.f1_; } +}; + +template <> +class Get<2> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple)) + Field(Tuple& t) { return t.f2_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple)) + ConstField(const Tuple& t) { return t.f2_; } +}; + +template <> +class Get<3> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple)) + Field(Tuple& t) { return t.f3_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple)) + ConstField(const Tuple& t) { return t.f3_; } +}; + +template <> +class Get<4> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple)) + Field(Tuple& t) { return t.f4_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple)) + ConstField(const Tuple& t) { return t.f4_; } +}; + +template <> +class Get<5> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple)) + Field(Tuple& t) { return t.f5_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple)) + ConstField(const Tuple& t) { return t.f5_; } +}; + +template <> +class Get<6> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple)) + Field(Tuple& t) { return t.f6_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple)) + ConstField(const Tuple& t) { return t.f6_; } +}; + +template <> +class Get<7> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple)) + Field(Tuple& t) { return t.f7_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple)) + ConstField(const Tuple& t) { return t.f7_; } +}; + +template <> +class Get<8> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple)) + Field(Tuple& t) { return t.f8_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple)) + ConstField(const Tuple& t) { return t.f8_; } +}; + +template <> +class Get<9> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple)) + Field(Tuple& t) { return t.f9_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple)) + ConstField(const Tuple& t) { return t.f9_; } +}; + +} // namespace gtest_internal + +template +GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T))) +get(GTEST_10_TUPLE_(T)& t) { + return gtest_internal::Get::Field(t); +} + +template +GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T))) +get(const GTEST_10_TUPLE_(T)& t) { + return gtest_internal::Get::ConstField(t); +} + +// 6.1.3.5 Relational operators + +// We only implement == and !=, as we don't have a need for the rest yet. + +namespace gtest_internal { + +// SameSizeTuplePrefixComparator::Eq(t1, t2) returns true if the +// first k fields of t1 equals the first k fields of t2. +// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if +// k1 != k2. +template +struct SameSizeTuplePrefixComparator; + +template <> +struct SameSizeTuplePrefixComparator<0, 0> { + template + static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) { + return true; + } +}; + +template +struct SameSizeTuplePrefixComparator { + template + static bool Eq(const Tuple1& t1, const Tuple2& t2) { + return SameSizeTuplePrefixComparator::Eq(t1, t2) && + ::std::tr1::get(t1) == ::std::tr1::get(t2); + } +}; + +} // namespace gtest_internal + +template +inline bool operator==(const GTEST_10_TUPLE_(T)& t, + const GTEST_10_TUPLE_(U)& u) { + return gtest_internal::SameSizeTuplePrefixComparator< + tuple_size::value, + tuple_size::value>::Eq(t, u); +} + +template +inline bool operator!=(const GTEST_10_TUPLE_(T)& t, + const GTEST_10_TUPLE_(U)& u) { return !(t == u); } + +// 6.1.4 Pairs. +// Unimplemented. + +} // namespace tr1 +} // namespace std + +#undef GTEST_0_TUPLE_ +#undef GTEST_1_TUPLE_ +#undef GTEST_2_TUPLE_ +#undef GTEST_3_TUPLE_ +#undef GTEST_4_TUPLE_ +#undef GTEST_5_TUPLE_ +#undef GTEST_6_TUPLE_ +#undef GTEST_7_TUPLE_ +#undef GTEST_8_TUPLE_ +#undef GTEST_9_TUPLE_ +#undef GTEST_10_TUPLE_ + +#undef GTEST_0_TYPENAMES_ +#undef GTEST_1_TYPENAMES_ +#undef GTEST_2_TYPENAMES_ +#undef GTEST_3_TYPENAMES_ +#undef GTEST_4_TYPENAMES_ +#undef GTEST_5_TYPENAMES_ +#undef GTEST_6_TYPENAMES_ +#undef GTEST_7_TYPENAMES_ +#undef GTEST_8_TYPENAMES_ +#undef GTEST_9_TYPENAMES_ +#undef GTEST_10_TYPENAMES_ + +#undef GTEST_DECLARE_TUPLE_AS_FRIEND_ +#undef GTEST_BY_REF_ +#undef GTEST_ADD_REF_ +#undef GTEST_TUPLE_ELEMENT_ + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ +# elif GTEST_ENV_HAS_STD_TUPLE_ +# include +// C++11 puts its tuple into the ::std namespace rather than +// ::std::tr1. gtest expects tuple to live in ::std::tr1, so put it there. +// This causes undefined behavior, but supported compilers react in +// the way we intend. +namespace std { +namespace tr1 { +using ::std::get; +using ::std::make_tuple; +using ::std::tuple; +using ::std::tuple_element; +using ::std::tuple_size; +} +} + +# elif GTEST_OS_SYMBIAN + +// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to +// use STLport's tuple implementation, which unfortunately doesn't +// work as the copy of STLport distributed with Symbian is incomplete. +// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to +// use its own tuple implementation. +# ifdef BOOST_HAS_TR1_TUPLE +# undef BOOST_HAS_TR1_TUPLE +# endif // BOOST_HAS_TR1_TUPLE + +// This prevents , which defines +// BOOST_HAS_TR1_TUPLE, from being #included by Boost's . +# define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED +# include + +# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000) +// GCC 4.0+ implements tr1/tuple in the header. This does +// not conform to the TR1 spec, which requires the header to be . + +# if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302 +// Until version 4.3.2, gcc has a bug that causes , +// which is #included by , to not compile when RTTI is +// disabled. _TR1_FUNCTIONAL is the header guard for +// . Hence the following #define is a hack to prevent +// from being included. +# define _TR1_FUNCTIONAL 1 +# include +# undef _TR1_FUNCTIONAL // Allows the user to #include + // if he chooses to. +# else +# include // NOLINT +# endif // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302 + +# else +// If the compiler is not GCC 4.0+, we assume the user is using a +// spec-conforming TR1 implementation. +# include // NOLINT +# endif // GTEST_USE_OWN_TR1_TUPLE + +#endif // GTEST_HAS_TR1_TUPLE + +// Determines whether clone(2) is supported. +// Usually it will only be available on Linux, excluding +// Linux on the Itanium architecture. +// Also see http://linux.die.net/man/2/clone. +#ifndef GTEST_HAS_CLONE +// The user didn't tell us, so we need to figure it out. + +# if GTEST_OS_LINUX && !defined(__ia64__) +# if GTEST_OS_LINUX_ANDROID +// On Android, clone() is only available on ARM starting with Gingerbread. +# if defined(__arm__) && __ANDROID_API__ >= 9 +# define GTEST_HAS_CLONE 1 +# else +# define GTEST_HAS_CLONE 0 +# endif +# else +# define GTEST_HAS_CLONE 1 +# endif +# else +# define GTEST_HAS_CLONE 0 +# endif // GTEST_OS_LINUX && !defined(__ia64__) + +#endif // GTEST_HAS_CLONE + +// Determines whether to support stream redirection. This is used to test +// output correctness and to implement death tests. +#ifndef GTEST_HAS_STREAM_REDIRECTION +// By default, we assume that stream redirection is supported on all +// platforms except known mobile ones. +# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN +# define GTEST_HAS_STREAM_REDIRECTION 0 +# else +# define GTEST_HAS_STREAM_REDIRECTION 1 +# endif // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN +#endif // GTEST_HAS_STREAM_REDIRECTION + +// Determines whether to support death tests. +// Google Test does not support death tests for VC 7.1 and earlier as +// abort() in a VC 7.1 application compiled as GUI in debug config +// pops up a dialog window that cannot be suppressed programmatically. +#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \ + (GTEST_OS_MAC && !GTEST_OS_IOS) || GTEST_OS_IOS_SIMULATOR || \ + (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \ + GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \ + GTEST_OS_OPENBSD || GTEST_OS_QNX) +# define GTEST_HAS_DEATH_TEST 1 +# include // NOLINT +#endif + +// We don't support MSVC 7.1 with exceptions disabled now. Therefore +// all the compilers we care about are adequate for supporting +// value-parameterized tests. +#define GTEST_HAS_PARAM_TEST 1 + +// Determines whether to support type-driven tests. + +// Typed tests need and variadic macros, which GCC, VC++ 8.0, +// Sun Pro CC, IBM Visual Age, and HP aCC support. +#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \ + defined(__IBMCPP__) || defined(__HP_aCC) +# define GTEST_HAS_TYPED_TEST 1 +# define GTEST_HAS_TYPED_TEST_P 1 +#endif + +// Determines whether to support Combine(). This only makes sense when +// value-parameterized tests are enabled. The implementation doesn't +// work on Sun Studio since it doesn't understand templated conversion +// operators. +#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC) +# define GTEST_HAS_COMBINE 1 +#endif + +// Determines whether the system compiler uses UTF-16 for encoding wide strings. +#define GTEST_WIDE_STRING_USES_UTF16_ \ + (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX) + +// Determines whether test results can be streamed to a socket. +#if GTEST_OS_LINUX +# define GTEST_CAN_STREAM_RESULTS_ 1 +#endif + +// Defines some utility macros. + +// The GNU compiler emits a warning if nested "if" statements are followed by +// an "else" statement and braces are not used to explicitly disambiguate the +// "else" binding. This leads to problems with code like: +// +// if (gate) +// ASSERT_*(condition) << "Some message"; +// +// The "switch (0) case 0:" idiom is used to suppress this. +#ifdef __INTEL_COMPILER +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ +#else +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT +#endif + +// Use this annotation at the end of a struct/class definition to +// prevent the compiler from optimizing away instances that are never +// used. This is useful when all interesting logic happens inside the +// c'tor and / or d'tor. Example: +// +// struct Foo { +// Foo() { ... } +// } GTEST_ATTRIBUTE_UNUSED_; +// +// Also use it after a variable or parameter declaration to tell the +// compiler the variable/parameter does not have to be used. +#if defined(__GNUC__) && !defined(COMPILER_ICC) +# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused)) +#else +# define GTEST_ATTRIBUTE_UNUSED_ +#endif + +// A macro to disallow operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_ASSIGN_(type)\ + void operator=(type const &) + +// A macro to disallow copy constructor and operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\ + type(type const &);\ + GTEST_DISALLOW_ASSIGN_(type) + +// Tell the compiler to warn about unused return values for functions declared +// with this macro. The macro should be used on function declarations +// following the argument list: +// +// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_; +#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC) +# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result)) +#else +# define GTEST_MUST_USE_RESULT_ +#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC + +// Determine whether the compiler supports Microsoft's Structured Exception +// Handling. This is supported by several Windows compilers but generally +// does not exist on any other system. +#ifndef GTEST_HAS_SEH +// The user didn't tell us, so we need to figure it out. + +# if defined(_MSC_VER) || defined(__BORLANDC__) +// These two compilers are known to support SEH. +# define GTEST_HAS_SEH 1 +# else +// Assume no SEH. +# define GTEST_HAS_SEH 0 +# endif + +#endif // GTEST_HAS_SEH + +#ifdef _MSC_VER + +# if GTEST_LINKED_AS_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllimport) +# elif GTEST_CREATE_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllexport) +# endif + +#endif // _MSC_VER + +#ifndef GTEST_API_ +# define GTEST_API_ +#endif + +#ifdef __GNUC__ +// Ask the compiler to never inline a given function. +# define GTEST_NO_INLINE_ __attribute__((noinline)) +#else +# define GTEST_NO_INLINE_ +#endif + +// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project. +#if defined(__GLIBCXX__) || defined(_LIBCPP_VERSION) +# define GTEST_HAS_CXXABI_H_ 1 +#else +# define GTEST_HAS_CXXABI_H_ 0 +#endif + +namespace testing { + +class Message; + +namespace internal { + +// A secret type that Google Test users don't know about. It has no +// definition on purpose. Therefore it's impossible to create a +// Secret object, which is what we want. +class Secret; + +// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time +// expression is true. For example, you could use it to verify the +// size of a static array: +// +// GTEST_COMPILE_ASSERT_(ARRAYSIZE(content_type_names) == CONTENT_NUM_TYPES, +// content_type_names_incorrect_size); +// +// or to make sure a struct is smaller than a certain size: +// +// GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large); +// +// The second argument to the macro is the name of the variable. If +// the expression is false, most compilers will issue a warning/error +// containing the name of the variable. + +template +struct CompileAssert { +}; + +#define GTEST_COMPILE_ASSERT_(expr, msg) \ + typedef ::testing::internal::CompileAssert<(static_cast(expr))> \ + msg[static_cast(expr) ? 1 : -1] GTEST_ATTRIBUTE_UNUSED_ + +// Implementation details of GTEST_COMPILE_ASSERT_: +// +// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1 +// elements (and thus is invalid) when the expression is false. +// +// - The simpler definition +// +// #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1] +// +// does not work, as gcc supports variable-length arrays whose sizes +// are determined at run-time (this is gcc's extension and not part +// of the C++ standard). As a result, gcc fails to reject the +// following code with the simple definition: +// +// int foo; +// GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is +// // not a compile-time constant. +// +// - By using the type CompileAssert<(bool(expr))>, we ensures that +// expr is a compile-time constant. (Template arguments must be +// determined at compile-time.) +// +// - The outter parentheses in CompileAssert<(bool(expr))> are necessary +// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written +// +// CompileAssert +// +// instead, these compilers will refuse to compile +// +// GTEST_COMPILE_ASSERT_(5 > 0, some_message); +// +// (They seem to think the ">" in "5 > 0" marks the end of the +// template argument list.) +// +// - The array size is (bool(expr) ? 1 : -1), instead of simply +// +// ((expr) ? 1 : -1). +// +// This is to avoid running into a bug in MS VC 7.1, which +// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1. + +// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h. +// +// This template is declared, but intentionally undefined. +template +struct StaticAssertTypeEqHelper; + +template +struct StaticAssertTypeEqHelper {}; + +#if GTEST_HAS_GLOBAL_STRING +typedef ::string string; +#else +typedef ::std::string string; +#endif // GTEST_HAS_GLOBAL_STRING + +#if GTEST_HAS_GLOBAL_WSTRING +typedef ::wstring wstring; +#elif GTEST_HAS_STD_WSTRING +typedef ::std::wstring wstring; +#endif // GTEST_HAS_GLOBAL_WSTRING + +// A helper for suppressing warnings on constant condition. It just +// returns 'condition'. +GTEST_API_ bool IsTrue(bool condition); + +// Defines scoped_ptr. + +// This implementation of scoped_ptr is PARTIAL - it only contains +// enough stuff to satisfy Google Test's need. +template +class scoped_ptr { + public: + typedef T element_type; + + explicit scoped_ptr(T* p = NULL) : ptr_(p) {} + ~scoped_ptr() { reset(); } + + T& operator*() const { return *ptr_; } + T* operator->() const { return ptr_; } + T* get() const { return ptr_; } + + T* release() { + T* const ptr = ptr_; + ptr_ = NULL; + return ptr; + } + + void reset(T* p = NULL) { + if (p != ptr_) { + if (IsTrue(sizeof(T) > 0)) { // Makes sure T is a complete type. + delete ptr_; + } + ptr_ = p; + } + } + + private: + T* ptr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr); +}; + +// Defines RE. + +// A simple C++ wrapper for . It uses the POSIX Extended +// Regular Expression syntax. +class GTEST_API_ RE { + public: + // A copy constructor is required by the Standard to initialize object + // references from r-values. + RE(const RE& other) { Init(other.pattern()); } + + // Constructs an RE from a string. + RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT + +#if GTEST_HAS_GLOBAL_STRING + + RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT + +#endif // GTEST_HAS_GLOBAL_STRING + + RE(const char* regex) { Init(regex); } // NOLINT + ~RE(); + + // Returns the string representation of the regex. + const char* pattern() const { return pattern_; } + + // FullMatch(str, re) returns true iff regular expression re matches + // the entire str. + // PartialMatch(str, re) returns true iff regular expression re + // matches a substring of str (including str itself). + // + // TODO(wan@google.com): make FullMatch() and PartialMatch() work + // when str contains NUL characters. + static bool FullMatch(const ::std::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::std::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + +#if GTEST_HAS_GLOBAL_STRING + + static bool FullMatch(const ::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + +#endif // GTEST_HAS_GLOBAL_STRING + + static bool FullMatch(const char* str, const RE& re); + static bool PartialMatch(const char* str, const RE& re); + + private: + void Init(const char* regex); + + // We use a const char* instead of an std::string, as Google Test used to be + // used where std::string is not available. TODO(wan@google.com): change to + // std::string. + const char* pattern_; + bool is_valid_; + +#if GTEST_USES_POSIX_RE + + regex_t full_regex_; // For FullMatch(). + regex_t partial_regex_; // For PartialMatch(). + +#else // GTEST_USES_SIMPLE_RE + + const char* full_pattern_; // For FullMatch(); + +#endif + + GTEST_DISALLOW_ASSIGN_(RE); +}; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line); + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file, + int line); + +// Defines logging utilities: +// GTEST_LOG_(severity) - logs messages at the specified severity level. The +// message itself is streamed into the macro. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. + +enum GTestLogSeverity { + GTEST_INFO, + GTEST_WARNING, + GTEST_ERROR, + GTEST_FATAL +}; + +// Formats log entry severity, provides a stream object for streaming the +// log message, and terminates the message with a newline when going out of +// scope. +class GTEST_API_ GTestLog { + public: + GTestLog(GTestLogSeverity severity, const char* file, int line); + + // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. + ~GTestLog(); + + ::std::ostream& GetStream() { return ::std::cerr; } + + private: + const GTestLogSeverity severity_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog); +}; + +#define GTEST_LOG_(severity) \ + ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \ + __FILE__, __LINE__).GetStream() + +inline void LogToStderr() {} +inline void FlushInfoLog() { fflush(NULL); } + +// INTERNAL IMPLEMENTATION - DO NOT USE. +// +// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition +// is not satisfied. +// Synopsys: +// GTEST_CHECK_(boolean_condition); +// or +// GTEST_CHECK_(boolean_condition) << "Additional message"; +// +// This checks the condition and if the condition is not satisfied +// it prints message about the condition violation, including the +// condition itself, plus additional message streamed into it, if any, +// and then it aborts the program. It aborts the program irrespective of +// whether it is built in the debug mode or not. +#define GTEST_CHECK_(condition) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::IsTrue(condition)) \ + ; \ + else \ + GTEST_LOG_(FATAL) << "Condition " #condition " failed. " + +// An all-mode assert to verify that the given POSIX-style function +// call returns 0 (indicating success). Known limitation: this +// doesn't expand to a balanced 'if' statement, so enclose the macro +// in {} if you need to use it as the only statement in an 'if' +// branch. +#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \ + if (const int gtest_error = (posix_call)) \ + GTEST_LOG_(FATAL) << #posix_call << "failed with error " \ + << gtest_error + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Use ImplicitCast_ as a safe version of static_cast for upcasting in +// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a +// const Foo*). When you use ImplicitCast_, the compiler checks that +// the cast is safe. Such explicit ImplicitCast_s are necessary in +// surprisingly many situations where C++ demands an exact type match +// instead of an argument type convertable to a target type. +// +// The syntax for using ImplicitCast_ is the same as for static_cast: +// +// ImplicitCast_(expr) +// +// ImplicitCast_ would have been part of the C++ standard library, +// but the proposal was submitted too late. It will probably make +// its way into the language in the future. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., implicit_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template +inline To ImplicitCast_(To x) { return x; } + +// When you upcast (that is, cast a pointer from type Foo to type +// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts +// always succeed. When you downcast (that is, cast a pointer from +// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because +// how do you know the pointer is really of type SubclassOfFoo? It +// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus, +// when you downcast, you should use this macro. In debug mode, we +// use dynamic_cast<> to double-check the downcast is legal (we die +// if it's not). In normal mode, we do the efficient static_cast<> +// instead. Thus, it's important to test in debug mode to make sure +// the cast is legal! +// This is the only place in the code we should use dynamic_cast<>. +// In particular, you SHOULDN'T be using dynamic_cast<> in order to +// do RTTI (eg code like this: +// if (dynamic_cast(foo)) HandleASubclass1Object(foo); +// if (dynamic_cast(foo)) HandleASubclass2Object(foo); +// You should design the code some other way not to need this. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., down_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template // use like this: DownCast_(foo); +inline To DownCast_(From* f) { // so we only accept pointers + // Ensures that To is a sub-type of From *. This test is here only + // for compile-time type checking, and has no overhead in an + // optimized build at run-time, as it will be optimized away + // completely. + if (false) { + const To to = NULL; + ::testing::internal::ImplicitCast_(to); + } + +#if GTEST_HAS_RTTI + // RTTI: debug mode only! + GTEST_CHECK_(f == NULL || dynamic_cast(f) != NULL); +#endif + return static_cast(f); +} + +// Downcasts the pointer of type Base to Derived. +// Derived must be a subclass of Base. The parameter MUST +// point to a class of type Derived, not any subclass of it. +// When RTTI is available, the function performs a runtime +// check to enforce this. +template +Derived* CheckedDowncastToActualType(Base* base) { +#if GTEST_HAS_RTTI + GTEST_CHECK_(typeid(*base) == typeid(Derived)); + return dynamic_cast(base); // NOLINT +#else + return static_cast(base); // Poor man's downcast. +#endif +} + +#if GTEST_HAS_STREAM_REDIRECTION + +// Defines the stderr capturer: +// CaptureStdout - starts capturing stdout. +// GetCapturedStdout - stops capturing stdout and returns the captured string. +// CaptureStderr - starts capturing stderr. +// GetCapturedStderr - stops capturing stderr and returns the captured string. +// +GTEST_API_ void CaptureStdout(); +GTEST_API_ std::string GetCapturedStdout(); +GTEST_API_ void CaptureStderr(); +GTEST_API_ std::string GetCapturedStderr(); + +#endif // GTEST_HAS_STREAM_REDIRECTION + + +#if GTEST_HAS_DEATH_TEST + +const ::std::vector& GetInjectableArgvs(); +void SetInjectableArgvs(const ::std::vector* + new_argvs); + +// A copy of all command line arguments. Set by InitGoogleTest(). +extern ::std::vector g_argvs; + +#endif // GTEST_HAS_DEATH_TEST + +// Defines synchronization primitives. + +#if GTEST_HAS_PTHREAD + +// Sleeps for (roughly) n milli-seconds. This function is only for +// testing Google Test's own constructs. Don't use it in user tests, +// either directly or indirectly. +inline void SleepMilliseconds(int n) { + const timespec time = { + 0, // 0 seconds. + n * 1000L * 1000L, // And n ms. + }; + nanosleep(&time, NULL); +} + +// Allows a controller thread to pause execution of newly created +// threads until notified. Instances of this class must be created +// and destroyed in the controller thread. +// +// This class is only for testing Google Test's own constructs. Do not +// use it in user tests, either directly or indirectly. +class Notification { + public: + Notification() : notified_(false) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL)); + } + ~Notification() { + pthread_mutex_destroy(&mutex_); + } + + // Notifies all threads created with this notification to start. Must + // be called from the controller thread. + void Notify() { + pthread_mutex_lock(&mutex_); + notified_ = true; + pthread_mutex_unlock(&mutex_); + } + + // Blocks until the controller thread notifies. Must be called from a test + // thread. + void WaitForNotification() { + for (;;) { + pthread_mutex_lock(&mutex_); + const bool notified = notified_; + pthread_mutex_unlock(&mutex_); + if (notified) + break; + SleepMilliseconds(10); + } + } + + private: + pthread_mutex_t mutex_; + bool notified_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); +}; + +// As a C-function, ThreadFuncWithCLinkage cannot be templated itself. +// Consequently, it cannot select a correct instantiation of ThreadWithParam +// in order to call its Run(). Introducing ThreadWithParamBase as a +// non-templated base class for ThreadWithParam allows us to bypass this +// problem. +class ThreadWithParamBase { + public: + virtual ~ThreadWithParamBase() {} + virtual void Run() = 0; +}; + +// pthread_create() accepts a pointer to a function type with the C linkage. +// According to the Standard (7.5/1), function types with different linkages +// are different even if they are otherwise identical. Some compilers (for +// example, SunStudio) treat them as different types. Since class methods +// cannot be defined with C-linkage we need to define a free C-function to +// pass into pthread_create(). +extern "C" inline void* ThreadFuncWithCLinkage(void* thread) { + static_cast(thread)->Run(); + return NULL; +} + +// Helper class for testing Google Test's multi-threading constructs. +// To use it, write: +// +// void ThreadFunc(int param) { /* Do things with param */ } +// Notification thread_can_start; +// ... +// // The thread_can_start parameter is optional; you can supply NULL. +// ThreadWithParam thread(&ThreadFunc, 5, &thread_can_start); +// thread_can_start.Notify(); +// +// These classes are only for testing Google Test's own constructs. Do +// not use them in user tests, either directly or indirectly. +template +class ThreadWithParam : public ThreadWithParamBase { + public: + typedef void (*UserThreadFunc)(T); + + ThreadWithParam( + UserThreadFunc func, T param, Notification* thread_can_start) + : func_(func), + param_(param), + thread_can_start_(thread_can_start), + finished_(false) { + ThreadWithParamBase* const base = this; + // The thread can be created only after all fields except thread_ + // have been initialized. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base)); + } + ~ThreadWithParam() { Join(); } + + void Join() { + if (!finished_) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0)); + finished_ = true; + } + } + + virtual void Run() { + if (thread_can_start_ != NULL) + thread_can_start_->WaitForNotification(); + func_(param_); + } + + private: + const UserThreadFunc func_; // User-supplied thread function. + const T param_; // User-supplied parameter to the thread function. + // When non-NULL, used to block execution until the controller thread + // notifies. + Notification* const thread_can_start_; + bool finished_; // true iff we know that the thread function has finished. + pthread_t thread_; // The native thread object. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam); +}; + +// MutexBase and Mutex implement mutex on pthreads-based platforms. They +// are used in conjunction with class MutexLock: +// +// Mutex mutex; +// ... +// MutexLock lock(&mutex); // Acquires the mutex and releases it at the end +// // of the current scope. +// +// MutexBase implements behavior for both statically and dynamically +// allocated mutexes. Do not use MutexBase directly. Instead, write +// the following to define a static mutex: +// +// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex); +// +// You can forward declare a static mutex like this: +// +// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex); +// +// To create a dynamic mutex, just define an object of type Mutex. +class MutexBase { + public: + // Acquires this mutex. + void Lock() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_)); + owner_ = pthread_self(); + has_owner_ = true; + } + + // Releases this mutex. + void Unlock() { + // Since the lock is being released the owner_ field should no longer be + // considered valid. We don't protect writing to has_owner_ here, as it's + // the caller's responsibility to ensure that the current thread holds the + // mutex when this is called. + has_owner_ = false; + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_)); + } + + // Does nothing if the current thread holds the mutex. Otherwise, crashes + // with high probability. + void AssertHeld() const { + GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self())) + << "The current thread is not holding the mutex @" << this; + } + + // A static mutex may be used before main() is entered. It may even + // be used before the dynamic initialization stage. Therefore we + // must be able to initialize a static mutex object at link time. + // This means MutexBase has to be a POD and its member variables + // have to be public. + public: + pthread_mutex_t mutex_; // The underlying pthread mutex. + // has_owner_ indicates whether the owner_ field below contains a valid thread + // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All + // accesses to the owner_ field should be protected by a check of this field. + // An alternative might be to memset() owner_ to all zeros, but there's no + // guarantee that a zero'd pthread_t is necessarily invalid or even different + // from pthread_self(). + bool has_owner_; + pthread_t owner_; // The thread holding the mutex. +}; + +// Forward-declares a static mutex. +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::MutexBase mutex + +// Defines and statically (i.e. at link time) initializes a static mutex. +// The initialization list here does not explicitly initialize each field, +// instead relying on default initialization for the unspecified fields. In +// particular, the owner_ field (a pthread_t) is not explicitly initialized. +// This allows initialization to work whether pthread_t is a scalar or struct. +// The flag -Wmissing-field-initializers must not be specified for this to work. +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ + ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, false } + +// The Mutex class can only be used for mutexes created at runtime. It +// shares its API with MutexBase otherwise. +class Mutex : public MutexBase { + public: + Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL)); + has_owner_ = false; + } + ~Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_)); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex); +}; + +// We cannot name this class MutexLock as the ctor declaration would +// conflict with a macro named MutexLock, which is defined on some +// platforms. Hence the typedef trick below. +class GTestMutexLock { + public: + explicit GTestMutexLock(MutexBase* mutex) + : mutex_(mutex) { mutex_->Lock(); } + + ~GTestMutexLock() { mutex_->Unlock(); } + + private: + MutexBase* const mutex_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock); +}; + +typedef GTestMutexLock MutexLock; + +// Helpers for ThreadLocal. + +// pthread_key_create() requires DeleteThreadLocalValue() to have +// C-linkage. Therefore it cannot be templatized to access +// ThreadLocal. Hence the need for class +// ThreadLocalValueHolderBase. +class ThreadLocalValueHolderBase { + public: + virtual ~ThreadLocalValueHolderBase() {} +}; + +// Called by pthread to delete thread-local data stored by +// pthread_setspecific(). +extern "C" inline void DeleteThreadLocalValue(void* value_holder) { + delete static_cast(value_holder); +} + +// Implements thread-local storage on pthreads-based systems. +// +// // Thread 1 +// ThreadLocal tl(100); // 100 is the default value for each thread. +// +// // Thread 2 +// tl.set(150); // Changes the value for thread 2 only. +// EXPECT_EQ(150, tl.get()); +// +// // Thread 1 +// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value. +// tl.set(200); +// EXPECT_EQ(200, tl.get()); +// +// The template type argument T must have a public copy constructor. +// In addition, the default ThreadLocal constructor requires T to have +// a public default constructor. +// +// An object managed for a thread by a ThreadLocal instance is deleted +// when the thread exits. Or, if the ThreadLocal instance dies in +// that thread, when the ThreadLocal dies. It's the user's +// responsibility to ensure that all other threads using a ThreadLocal +// have exited when it dies, or the per-thread objects for those +// threads will not be deleted. +// +// Google Test only uses global ThreadLocal objects. That means they +// will die after main() has returned. Therefore, no per-thread +// object managed by Google Test will be leaked as long as all threads +// using Google Test have exited when main() returns. +template +class ThreadLocal { + public: + ThreadLocal() : key_(CreateKey()), + default_() {} + explicit ThreadLocal(const T& value) : key_(CreateKey()), + default_(value) {} + + ~ThreadLocal() { + // Destroys the managed object for the current thread, if any. + DeleteThreadLocalValue(pthread_getspecific(key_)); + + // Releases resources associated with the key. This will *not* + // delete managed objects for other threads. + GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_)); + } + + T* pointer() { return GetOrCreateValue(); } + const T* pointer() const { return GetOrCreateValue(); } + const T& get() const { return *pointer(); } + void set(const T& value) { *pointer() = value; } + + private: + // Holds a value of type T. + class ValueHolder : public ThreadLocalValueHolderBase { + public: + explicit ValueHolder(const T& value) : value_(value) {} + + T* pointer() { return &value_; } + + private: + T value_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder); + }; + + static pthread_key_t CreateKey() { + pthread_key_t key; + // When a thread exits, DeleteThreadLocalValue() will be called on + // the object managed for that thread. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_key_create(&key, &DeleteThreadLocalValue)); + return key; + } + + T* GetOrCreateValue() const { + ThreadLocalValueHolderBase* const holder = + static_cast(pthread_getspecific(key_)); + if (holder != NULL) { + return CheckedDowncastToActualType(holder)->pointer(); + } + + ValueHolder* const new_holder = new ValueHolder(default_); + ThreadLocalValueHolderBase* const holder_base = new_holder; + GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base)); + return new_holder->pointer(); + } + + // A key pthreads uses for looking up per-thread values. + const pthread_key_t key_; + const T default_; // The default value for each thread. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal); +}; + +# define GTEST_IS_THREADSAFE 1 + +#else // GTEST_HAS_PTHREAD + +// A dummy implementation of synchronization primitives (mutex, lock, +// and thread-local variable). Necessary for compiling Google Test where +// mutex is not supported - using Google Test in multiple threads is not +// supported on such platforms. + +class Mutex { + public: + Mutex() {} + void Lock() {} + void Unlock() {} + void AssertHeld() const {} +}; + +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::Mutex mutex + +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex + +class GTestMutexLock { + public: + explicit GTestMutexLock(Mutex*) {} // NOLINT +}; + +typedef GTestMutexLock MutexLock; + +template +class ThreadLocal { + public: + ThreadLocal() : value_() {} + explicit ThreadLocal(const T& value) : value_(value) {} + T* pointer() { return &value_; } + const T* pointer() const { return &value_; } + const T& get() const { return value_; } + void set(const T& value) { value_ = value; } + private: + T value_; +}; + +// The above synchronization primitives have dummy implementations. +// Therefore Google Test is not thread-safe. +# define GTEST_IS_THREADSAFE 0 + +#endif // GTEST_HAS_PTHREAD + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +GTEST_API_ size_t GetThreadCount(); + +// Passing non-POD classes through ellipsis (...) crashes the ARM +// compiler and generates a warning in Sun Studio. The Nokia Symbian +// and the IBM XL C/C++ compiler try to instantiate a copy constructor +// for objects passed through ellipsis (...), failing for uncopyable +// objects. We define this to ensure that only POD is passed through +// ellipsis on these systems. +#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC) +// We lose support for NULL detection where the compiler doesn't like +// passing non-POD classes through ellipsis (...). +# define GTEST_ELLIPSIS_NEEDS_POD_ 1 +#else +# define GTEST_CAN_COMPARE_NULL 1 +#endif + +// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between +// const T& and const T* in a function template. These compilers +// _can_ decide between class template specializations for T and T*, +// so a tr1::type_traits-like is_pointer works. +#if defined(__SYMBIAN32__) || defined(__IBMCPP__) +# define GTEST_NEEDS_IS_POINTER_ 1 +#endif + +template +struct bool_constant { + typedef bool_constant type; + static const bool value = bool_value; +}; +template const bool bool_constant::value; + +typedef bool_constant false_type; +typedef bool_constant true_type; + +template +struct is_pointer : public false_type {}; + +template +struct is_pointer : public true_type {}; + +template +struct IteratorTraits { + typedef typename Iterator::value_type value_type; +}; + +template +struct IteratorTraits { + typedef T value_type; +}; + +template +struct IteratorTraits { + typedef T value_type; +}; + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_SEP_ "\\" +# define GTEST_HAS_ALT_PATH_SEP_ 1 +// The biggest signed integer type the compiler supports. +typedef __int64 BiggestInt; +#else +# define GTEST_PATH_SEP_ "/" +# define GTEST_HAS_ALT_PATH_SEP_ 0 +typedef long long BiggestInt; // NOLINT +#endif // GTEST_OS_WINDOWS + +// Utilities for char. + +// isspace(int ch) and friends accept an unsigned char or EOF. char +// may be signed, depending on the compiler (or compiler flags). +// Therefore we need to cast a char to unsigned char before calling +// isspace(), etc. + +inline bool IsAlpha(char ch) { + return isalpha(static_cast(ch)) != 0; +} +inline bool IsAlNum(char ch) { + return isalnum(static_cast(ch)) != 0; +} +inline bool IsDigit(char ch) { + return isdigit(static_cast(ch)) != 0; +} +inline bool IsLower(char ch) { + return islower(static_cast(ch)) != 0; +} +inline bool IsSpace(char ch) { + return isspace(static_cast(ch)) != 0; +} +inline bool IsUpper(char ch) { + return isupper(static_cast(ch)) != 0; +} +inline bool IsXDigit(char ch) { + return isxdigit(static_cast(ch)) != 0; +} +inline bool IsXDigit(wchar_t ch) { + const unsigned char low_byte = static_cast(ch); + return ch == low_byte && isxdigit(low_byte) != 0; +} + +inline char ToLower(char ch) { + return static_cast(tolower(static_cast(ch))); +} +inline char ToUpper(char ch) { + return static_cast(toupper(static_cast(ch))); +} + +// The testing::internal::posix namespace holds wrappers for common +// POSIX functions. These wrappers hide the differences between +// Windows/MSVC and POSIX systems. Since some compilers define these +// standard functions as macros, the wrapper cannot have the same name +// as the wrapped function. + +namespace posix { + +// Functions with a different name on Windows. + +#if GTEST_OS_WINDOWS + +typedef struct _stat StatStruct; + +# ifdef __BORLANDC__ +inline int IsATTY(int fd) { return isatty(fd); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +# else // !__BORLANDC__ +# if GTEST_OS_WINDOWS_MOBILE +inline int IsATTY(int /* fd */) { return 0; } +# else +inline int IsATTY(int fd) { return _isatty(fd); } +# endif // GTEST_OS_WINDOWS_MOBILE +inline int StrCaseCmp(const char* s1, const char* s2) { + return _stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return _strdup(src); } +# endif // __BORLANDC__ + +# if GTEST_OS_WINDOWS_MOBILE +inline int FileNo(FILE* file) { return reinterpret_cast(_fileno(file)); } +// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this +// time and thus not defined there. +# else +inline int FileNo(FILE* file) { return _fileno(file); } +inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); } +inline int RmDir(const char* dir) { return _rmdir(dir); } +inline bool IsDir(const StatStruct& st) { + return (_S_IFDIR & st.st_mode) != 0; +} +# endif // GTEST_OS_WINDOWS_MOBILE + +#else + +typedef struct stat StatStruct; + +inline int FileNo(FILE* file) { return fileno(file); } +inline int IsATTY(int fd) { return isatty(fd); } +inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return strcasecmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +inline int RmDir(const char* dir) { return rmdir(dir); } +inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); } + +#endif // GTEST_OS_WINDOWS + +// Functions deprecated by MSVC 8.0. + +#ifdef _MSC_VER +// Temporarily disable warning 4996 (deprecated function). +# pragma warning(push) +# pragma warning(disable:4996) +#endif + +inline const char* StrNCpy(char* dest, const char* src, size_t n) { + return strncpy(dest, src, n); +} + +// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and +// StrError() aren't needed on Windows CE at this time and thus not +// defined there. + +#if !GTEST_OS_WINDOWS_MOBILE +inline int ChDir(const char* dir) { return chdir(dir); } +#endif +inline FILE* FOpen(const char* path, const char* mode) { + return fopen(path, mode); +} +#if !GTEST_OS_WINDOWS_MOBILE +inline FILE *FReopen(const char* path, const char* mode, FILE* stream) { + return freopen(path, mode, stream); +} +inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); } +#endif +inline int FClose(FILE* fp) { return fclose(fp); } +#if !GTEST_OS_WINDOWS_MOBILE +inline int Read(int fd, void* buf, unsigned int count) { + return static_cast(read(fd, buf, count)); +} +inline int Write(int fd, const void* buf, unsigned int count) { + return static_cast(write(fd, buf, count)); +} +inline int Close(int fd) { return close(fd); } +inline const char* StrError(int errnum) { return strerror(errnum); } +#endif +inline const char* GetEnv(const char* name) { +#if GTEST_OS_WINDOWS_MOBILE + // We are on Windows CE, which has no environment variables. + return NULL; +#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9) + // Environment variables which we programmatically clear will be set to the + // empty string rather than unset (NULL). Handle that case. + const char* const env = getenv(name); + return (env != NULL && env[0] != '\0') ? env : NULL; +#else + return getenv(name); +#endif +} + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif + +#if GTEST_OS_WINDOWS_MOBILE +// Windows CE has no C library. The abort() function is used in +// several places in Google Test. This implementation provides a reasonable +// imitation of standard behaviour. +void Abort(); +#else +inline void Abort() { abort(); } +#endif // GTEST_OS_WINDOWS_MOBILE + +} // namespace posix + +// MSVC "deprecates" snprintf and issues warnings wherever it is used. In +// order to avoid these warnings, we need to use _snprintf or _snprintf_s on +// MSVC-based platforms. We map the GTEST_SNPRINTF_ macro to the appropriate +// function in order to achieve that. We use macro definition here because +// snprintf is a variadic function. +#if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE +// MSVC 2005 and above support variadic macros. +# define GTEST_SNPRINTF_(buffer, size, format, ...) \ + _snprintf_s(buffer, size, size, format, __VA_ARGS__) +#elif defined(_MSC_VER) +// Windows CE does not define _snprintf_s and MSVC prior to 2005 doesn't +// complain about _snprintf. +# define GTEST_SNPRINTF_ _snprintf +#else +# define GTEST_SNPRINTF_ snprintf +#endif + +// The maximum number a BiggestInt can represent. This definition +// works no matter BiggestInt is represented in one's complement or +// two's complement. +// +// We cannot rely on numeric_limits in STL, as __int64 and long long +// are not part of standard C++ and numeric_limits doesn't need to be +// defined for them. +const BiggestInt kMaxBiggestInt = + ~(static_cast(1) << (8*sizeof(BiggestInt) - 1)); + +// This template class serves as a compile-time function from size to +// type. It maps a size in bytes to a primitive type with that +// size. e.g. +// +// TypeWithSize<4>::UInt +// +// is typedef-ed to be unsigned int (unsigned integer made up of 4 +// bytes). +// +// Such functionality should belong to STL, but I cannot find it +// there. +// +// Google Test uses this class in the implementation of floating-point +// comparison. +// +// For now it only handles UInt (unsigned int) as that's all Google Test +// needs. Other types can be easily added in the future if need +// arises. +template +class TypeWithSize { + public: + // This prevents the user from using TypeWithSize with incorrect + // values of N. + typedef void UInt; +}; + +// The specialization for size 4. +template <> +class TypeWithSize<4> { + public: + // unsigned int has size 4 in both gcc and MSVC. + // + // As base/basictypes.h doesn't compile on Windows, we cannot use + // uint32, uint64, and etc here. + typedef int Int; + typedef unsigned int UInt; +}; + +// The specialization for size 8. +template <> +class TypeWithSize<8> { + public: +#if GTEST_OS_WINDOWS + typedef __int64 Int; + typedef unsigned __int64 UInt; +#else + typedef long long Int; // NOLINT + typedef unsigned long long UInt; // NOLINT +#endif // GTEST_OS_WINDOWS +}; + +// Integer types of known sizes. +typedef TypeWithSize<4>::Int Int32; +typedef TypeWithSize<4>::UInt UInt32; +typedef TypeWithSize<8>::Int Int64; +typedef TypeWithSize<8>::UInt UInt64; +typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds. + +// Utilities for command line flags and environment variables. + +// Macro for referencing flags. +#define GTEST_FLAG(name) FLAGS_gtest_##name + +// Macros for declaring flags. +#define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name) +#define GTEST_DECLARE_int32_(name) \ + GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name) +#define GTEST_DECLARE_string_(name) \ + GTEST_API_ extern ::std::string GTEST_FLAG(name) + +// Macros for defining flags. +#define GTEST_DEFINE_bool_(name, default_val, doc) \ + GTEST_API_ bool GTEST_FLAG(name) = (default_val) +#define GTEST_DEFINE_int32_(name, default_val, doc) \ + GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val) +#define GTEST_DEFINE_string_(name, default_val, doc) \ + GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val) + +// Thread annotations +#define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks) +#define GTEST_LOCK_EXCLUDED_(locks) + +// Parses 'str' for a 32-bit signed integer. If successful, writes the result +// to *value and returns true; otherwise leaves *value unchanged and returns +// false. +// TODO(chandlerc): Find a better way to refactor flag and environment parsing +// out of both gtest-port.cc and gtest.cc to avoid exporting this utility +// function. +bool ParseInt32(const Message& src_text, const char* str, Int32* value); + +// Parses a bool/Int32/string from the environment variable +// corresponding to the given Google Test flag. +bool BoolFromGTestEnv(const char* flag, bool default_val); +GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val); +const char* StringFromGTestEnv(const char* flag, const char* default_val); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + +#if GTEST_OS_LINUX +# include +# include +# include +# include +#endif // GTEST_OS_LINUX + +#if GTEST_HAS_EXCEPTIONS +# include +#endif + +#include +#include +#include +#include +#include +#include + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the Message class. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! + +#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ + +#include + + +// Ensures that there is at least one operator<< in the global namespace. +// See Message& operator<<(...) below for why. +void operator<<(const testing::internal::Secret&, int); + +namespace testing { + +// The Message class works like an ostream repeater. +// +// Typical usage: +// +// 1. You stream a bunch of values to a Message object. +// It will remember the text in a stringstream. +// 2. Then you stream the Message object to an ostream. +// This causes the text in the Message to be streamed +// to the ostream. +// +// For example; +// +// testing::Message foo; +// foo << 1 << " != " << 2; +// std::cout << foo; +// +// will print "1 != 2". +// +// Message is not intended to be inherited from. In particular, its +// destructor is not virtual. +// +// Note that stringstream behaves differently in gcc and in MSVC. You +// can stream a NULL char pointer to it in the former, but not in the +// latter (it causes an access violation if you do). The Message +// class hides this difference by treating a NULL char pointer as +// "(null)". +class GTEST_API_ Message { + private: + // The type of basic IO manipulators (endl, ends, and flush) for + // narrow streams. + typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&); + + public: + // Constructs an empty Message. + Message(); + + // Copy constructor. + Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT + *ss_ << msg.GetString(); + } + + // Constructs a Message from a C-string. + explicit Message(const char* str) : ss_(new ::std::stringstream) { + *ss_ << str; + } + +#if GTEST_OS_SYMBIAN + // Streams a value (either a pointer or not) to this object. + template + inline Message& operator <<(const T& value) { + StreamHelper(typename internal::is_pointer::type(), value); + return *this; + } +#else + // Streams a non-pointer value to this object. + template + inline Message& operator <<(const T& val) { + // Some libraries overload << for STL containers. These + // overloads are defined in the global namespace instead of ::std. + // + // C++'s symbol lookup rule (i.e. Koenig lookup) says that these + // overloads are visible in either the std namespace or the global + // namespace, but not other namespaces, including the testing + // namespace which Google Test's Message class is in. + // + // To allow STL containers (and other types that has a << operator + // defined in the global namespace) to be used in Google Test + // assertions, testing::Message must access the custom << operator + // from the global namespace. With this using declaration, + // overloads of << defined in the global namespace and those + // visible via Koenig lookup are both exposed in this function. + using ::operator <<; + *ss_ << val; + return *this; + } + + // Streams a pointer value to this object. + // + // This function is an overload of the previous one. When you + // stream a pointer to a Message, this definition will be used as it + // is more specialized. (The C++ Standard, section + // [temp.func.order].) If you stream a non-pointer, then the + // previous definition will be used. + // + // The reason for this overload is that streaming a NULL pointer to + // ostream is undefined behavior. Depending on the compiler, you + // may get "0", "(nil)", "(null)", or an access violation. To + // ensure consistent result across compilers, we always treat NULL + // as "(null)". + template + inline Message& operator <<(T* const& pointer) { // NOLINT + if (pointer == NULL) { + *ss_ << "(null)"; + } else { + *ss_ << pointer; + } + return *this; + } +#endif // GTEST_OS_SYMBIAN + + // Since the basic IO manipulators are overloaded for both narrow + // and wide streams, we have to provide this specialized definition + // of operator <<, even though its body is the same as the + // templatized version above. Without this definition, streaming + // endl or other basic IO manipulators to Message will confuse the + // compiler. + Message& operator <<(BasicNarrowIoManip val) { + *ss_ << val; + return *this; + } + + // Instead of 1/0, we want to see true/false for bool values. + Message& operator <<(bool b) { + return *this << (b ? "true" : "false"); + } + + // These two overloads allow streaming a wide C string to a Message + // using the UTF-8 encoding. + Message& operator <<(const wchar_t* wide_c_str); + Message& operator <<(wchar_t* wide_c_str); + +#if GTEST_HAS_STD_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::std::wstring& wstr); +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::wstring& wstr); +#endif // GTEST_HAS_GLOBAL_WSTRING + + // Gets the text streamed to this object so far as an std::string. + // Each '\0' character in the buffer is replaced with "\\0". + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + std::string GetString() const; + + private: + +#if GTEST_OS_SYMBIAN + // These are needed as the Nokia Symbian Compiler cannot decide between + // const T& and const T* in a function template. The Nokia compiler _can_ + // decide between class template specializations for T and T*, so a + // tr1::type_traits-like is_pointer works, and we can overload on that. + template + inline void StreamHelper(internal::true_type /*is_pointer*/, T* pointer) { + if (pointer == NULL) { + *ss_ << "(null)"; + } else { + *ss_ << pointer; + } + } + template + inline void StreamHelper(internal::false_type /*is_pointer*/, + const T& value) { + // See the comments in Message& operator <<(const T&) above for why + // we need this using statement. + using ::operator <<; + *ss_ << value; + } +#endif // GTEST_OS_SYMBIAN + + // We'll hold the text streamed to this object here. + const internal::scoped_ptr< ::std::stringstream> ss_; + + // We declare (but don't implement) this to prevent the compiler + // from implementing the assignment operator. + void operator=(const Message&); +}; + +// Streams a Message to an ostream. +inline std::ostream& operator <<(std::ostream& os, const Message& sb) { + return os << sb.GetString(); +} + +namespace internal { + +// Converts a streamable value to an std::string. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". +template +std::string StreamableToString(const T& streamable) { + return (Message() << streamable).GetString(); +} + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares the String class and functions used internally by +// Google Test. They are subject to change without notice. They should not used +// by code external to Google Test. +// +// This header file is #included by . +// It should not be #included by other files. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ + +#ifdef __BORLANDC__ +// string.h is not guaranteed to provide strcpy on C++ Builder. +# include +#endif + +#include +#include + + +namespace testing { +namespace internal { + +// String - an abstract class holding static string utilities. +class GTEST_API_ String { + public: + // Static utility methods + + // Clones a 0-terminated C string, allocating memory using new. The + // caller is responsible for deleting the return value using + // delete[]. Returns the cloned string, or NULL if the input is + // NULL. + // + // This is different from strdup() in string.h, which allocates + // memory using malloc(). + static const char* CloneCString(const char* c_str); + +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be + // able to pass strings to Win32 APIs on CE we need to convert them + // to 'Unicode', UTF-16. + + // Creates a UTF-16 wide string from the given ANSI string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the wide string, or NULL if the + // input is NULL. + // + // The wide string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static LPCWSTR AnsiToUtf16(const char* c_str); + + // Creates an ANSI string from the given wide string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the ANSI string, or NULL if the + // input is NULL. + // + // The returned string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static const char* Utf16ToAnsi(LPCWSTR utf16_str); +#endif + + // Compares two C strings. Returns true iff they have the same content. + // + // Unlike strcmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CStringEquals(const char* lhs, const char* rhs); + + // Converts a wide C string to a String using the UTF-8 encoding. + // NULL will be converted to "(null)". If an error occurred during + // the conversion, "(failed to convert from wide string)" is + // returned. + static std::string ShowWideCString(const wchar_t* wide_c_str); + + // Compares two wide C strings. Returns true iff they have the same + // content. + // + // Unlike wcscmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs); + + // Compares two C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike strcasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CaseInsensitiveCStringEquals(const char* lhs, + const char* rhs); + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. + static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs); + + // Returns true iff the given string ends with the given suffix, ignoring + // case. Any string is considered to end with an empty suffix. + static bool EndsWithCaseInsensitive( + const std::string& str, const std::string& suffix); + + // Formats an int value as "%02d". + static std::string FormatIntWidth2(int value); // "%02d" for width == 2 + + // Formats an int value as "%X". + static std::string FormatHexInt(int value); + + // Formats a byte as "%02X". + static std::string FormatByte(unsigned char value); + + private: + String(); // Not meant to be instantiated. +}; // class String + +// Gets the content of the stringstream's buffer as an std::string. Each '\0' +// character in the buffer is replaced with "\\0". +GTEST_API_ std::string StringStreamToString(::std::stringstream* stream); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: keith.ray@gmail.com (Keith Ray) +// +// Google Test filepath utilities +// +// This header file declares classes and functions used internally by +// Google Test. They are subject to change without notice. +// +// This file is #included in . +// Do not include this header file separately! + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ + + +namespace testing { +namespace internal { + +// FilePath - a class for file and directory pathname manipulation which +// handles platform-specific conventions (like the pathname separator). +// Used for helper functions for naming files in a directory for xml output. +// Except for Set methods, all methods are const or static, which provides an +// "immutable value object" -- useful for peace of mind. +// A FilePath with a value ending in a path separator ("like/this/") represents +// a directory, otherwise it is assumed to represent a file. In either case, +// it may or may not represent an actual file or directory in the file system. +// Names are NOT checked for syntax correctness -- no checking for illegal +// characters, malformed paths, etc. + +class GTEST_API_ FilePath { + public: + FilePath() : pathname_("") { } + FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { } + + explicit FilePath(const std::string& pathname) : pathname_(pathname) { + Normalize(); + } + + FilePath& operator=(const FilePath& rhs) { + Set(rhs); + return *this; + } + + void Set(const FilePath& rhs) { + pathname_ = rhs.pathname_; + } + + const std::string& string() const { return pathname_; } + const char* c_str() const { return pathname_.c_str(); } + + // Returns the current working directory, or "" if unsuccessful. + static FilePath GetCurrentDir(); + + // Given directory = "dir", base_name = "test", number = 0, + // extension = "xml", returns "dir/test.xml". If number is greater + // than zero (e.g., 12), returns "dir/test_12.xml". + // On Windows platform, uses \ as the separator rather than /. + static FilePath MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension); + + // Given directory = "dir", relative_path = "test.xml", + // returns "dir/test.xml". + // On Windows, uses \ as the separator rather than /. + static FilePath ConcatPaths(const FilePath& directory, + const FilePath& relative_path); + + // Returns a pathname for a file that does not currently exist. The pathname + // will be directory/base_name.extension or + // directory/base_name_.extension if directory/base_name.extension + // already exists. The number will be incremented until a pathname is found + // that does not already exist. + // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. + // There could be a race condition if two or more processes are calling this + // function at the same time -- they could both pick the same filename. + static FilePath GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension); + + // Returns true iff the path is "". + bool IsEmpty() const { return pathname_.empty(); } + + // If input name has a trailing separator character, removes it and returns + // the name, otherwise return the name string unmodified. + // On Windows platform, uses \ as the separator, other platforms use /. + FilePath RemoveTrailingPathSeparator() const; + + // Returns a copy of the FilePath with the directory part removed. + // Example: FilePath("path/to/file").RemoveDirectoryName() returns + // FilePath("file"). If there is no directory part ("just_a_file"), it returns + // the FilePath unmodified. If there is no file part ("just_a_dir/") it + // returns an empty FilePath (""). + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveDirectoryName() const; + + // RemoveFileName returns the directory path with the filename removed. + // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". + // If the FilePath is "a_file" or "/a_file", RemoveFileName returns + // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does + // not have a file, like "just/a/dir/", it returns the FilePath unmodified. + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveFileName() const; + + // Returns a copy of the FilePath with the case-insensitive extension removed. + // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns + // FilePath("dir/file"). If a case-insensitive extension is not + // found, returns a copy of the original FilePath. + FilePath RemoveExtension(const char* extension) const; + + // Creates directories so that path exists. Returns true if successful or if + // the directories already exist; returns false if unable to create + // directories for any reason. Will also return false if the FilePath does + // not represent a directory (that is, it doesn't end with a path separator). + bool CreateDirectoriesRecursively() const; + + // Create the directory so that path exists. Returns true if successful or + // if the directory already exists; returns false if unable to create the + // directory for any reason, including if the parent directory does not + // exist. Not named "CreateDirectory" because that's a macro on Windows. + bool CreateFolder() const; + + // Returns true if FilePath describes something in the file-system, + // either a file, directory, or whatever, and that something exists. + bool FileOrDirectoryExists() const; + + // Returns true if pathname describes a directory in the file-system + // that exists. + bool DirectoryExists() const; + + // Returns true if FilePath ends with a path separator, which indicates that + // it is intended to represent a directory. Returns false otherwise. + // This does NOT check that a directory (or file) actually exists. + bool IsDirectory() const; + + // Returns true if pathname describes a root directory. (Windows has one + // root directory per disk drive.) + bool IsRootDirectory() const; + + // Returns true if pathname describes an absolute path. + bool IsAbsolutePath() const; + + private: + // Replaces multiple consecutive separators with a single separator. + // For example, "bar///foo" becomes "bar/foo". Does not eliminate other + // redundancies that might be in a pathname involving "." or "..". + // + // A pathname with multiple consecutive separators may occur either through + // user error or as a result of some scripts or APIs that generate a pathname + // with a trailing separator. On other platforms the same API or script + // may NOT generate a pathname with a trailing "/". Then elsewhere that + // pathname may have another "/" and pathname components added to it, + // without checking for the separator already being there. + // The script language and operating system may allow paths like "foo//bar" + // but some of the functions in FilePath will not handle that correctly. In + // particular, RemoveTrailingPathSeparator() only removes one separator, and + // it is called in CreateDirectoriesRecursively() assuming that it will change + // a pathname from directory syntax (trailing separator) to filename syntax. + // + // On Windows this method also replaces the alternate path separator '/' with + // the primary path separator '\\', so that for example "bar\\/\\foo" becomes + // "bar\\foo". + + void Normalize(); + + // Returns a pointer to the last occurence of a valid path separator in + // the FilePath. On Windows, for example, both '/' and '\' are valid path + // separators. Returns NULL if no path separator was found. + const char* FindLastPathSeparator() const; + + std::string pathname_; +}; // class FilePath + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +// This file was GENERATED by command: +// pump.py gtest-type-util.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Type utilities needed for implementing typed and type-parameterized +// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// Currently we support at most 50 types in a list, and at most 50 +// type-parameterized tests in one type-parameterized test case. +// Please contact googletestframework@googlegroups.com if you need +// more. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + + +// #ifdef __GNUC__ is too general here. It is possible to use gcc without using +// libstdc++ (which is where cxxabi.h comes from). +# if GTEST_HAS_CXXABI_H_ +# include +# elif defined(__HP_aCC) +# include +# endif // GTEST_HASH_CXXABI_H_ + +namespace testing { +namespace internal { + +// GetTypeName() returns a human-readable name of type T. +// NB: This function is also used in Google Mock, so don't move it inside of +// the typed-test-only section below. +template +std::string GetTypeName() { +# if GTEST_HAS_RTTI + + const char* const name = typeid(T).name(); +# if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC) + int status = 0; + // gcc's implementation of typeid(T).name() mangles the type name, + // so we have to demangle it. +# if GTEST_HAS_CXXABI_H_ + using abi::__cxa_demangle; +# endif // GTEST_HAS_CXXABI_H_ + char* const readable_name = __cxa_demangle(name, 0, 0, &status); + const std::string name_str(status == 0 ? readable_name : name); + free(readable_name); + return name_str; +# else + return name; +# endif // GTEST_HAS_CXXABI_H_ || __HP_aCC + +# else + + return ""; + +# endif // GTEST_HAS_RTTI +} + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// AssertyTypeEq::type is defined iff T1 and T2 are the same +// type. This can be used as a compile-time assertion to ensure that +// two types are equal. + +template +struct AssertTypeEq; + +template +struct AssertTypeEq { + typedef bool type; +}; + +// A unique type used as the default value for the arguments of class +// template Types. This allows us to simulate variadic templates +// (e.g. Types, Type, and etc), which C++ doesn't +// support directly. +struct None {}; + +// The following family of struct and struct templates are used to +// represent type lists. In particular, TypesN +// represents a type list with N types (T1, T2, ..., and TN) in it. +// Except for Types0, every struct in the family has two member types: +// Head for the first type in the list, and Tail for the rest of the +// list. + +// The empty type list. +struct Types0 {}; + +// Type lists of length 1, 2, 3, and so on. + +template +struct Types1 { + typedef T1 Head; + typedef Types0 Tail; +}; +template +struct Types2 { + typedef T1 Head; + typedef Types1 Tail; +}; + +template +struct Types3 { + typedef T1 Head; + typedef Types2 Tail; +}; + +template +struct Types4 { + typedef T1 Head; + typedef Types3 Tail; +}; + +template +struct Types5 { + typedef T1 Head; + typedef Types4 Tail; +}; + +template +struct Types6 { + typedef T1 Head; + typedef Types5 Tail; +}; + +template +struct Types7 { + typedef T1 Head; + typedef Types6 Tail; +}; + +template +struct Types8 { + typedef T1 Head; + typedef Types7 Tail; +}; + +template +struct Types9 { + typedef T1 Head; + typedef Types8 Tail; +}; + +template +struct Types10 { + typedef T1 Head; + typedef Types9 Tail; +}; + +template +struct Types11 { + typedef T1 Head; + typedef Types10 Tail; +}; + +template +struct Types12 { + typedef T1 Head; + typedef Types11 Tail; +}; + +template +struct Types13 { + typedef T1 Head; + typedef Types12 Tail; +}; + +template +struct Types14 { + typedef T1 Head; + typedef Types13 Tail; +}; + +template +struct Types15 { + typedef T1 Head; + typedef Types14 Tail; +}; + +template +struct Types16 { + typedef T1 Head; + typedef Types15 Tail; +}; + +template +struct Types17 { + typedef T1 Head; + typedef Types16 Tail; +}; + +template +struct Types18 { + typedef T1 Head; + typedef Types17 Tail; +}; + +template +struct Types19 { + typedef T1 Head; + typedef Types18 Tail; +}; + +template +struct Types20 { + typedef T1 Head; + typedef Types19 Tail; +}; + +template +struct Types21 { + typedef T1 Head; + typedef Types20 Tail; +}; + +template +struct Types22 { + typedef T1 Head; + typedef Types21 Tail; +}; + +template +struct Types23 { + typedef T1 Head; + typedef Types22 Tail; +}; + +template +struct Types24 { + typedef T1 Head; + typedef Types23 Tail; +}; + +template +struct Types25 { + typedef T1 Head; + typedef Types24 Tail; +}; + +template +struct Types26 { + typedef T1 Head; + typedef Types25 Tail; +}; + +template +struct Types27 { + typedef T1 Head; + typedef Types26 Tail; +}; + +template +struct Types28 { + typedef T1 Head; + typedef Types27 Tail; +}; + +template +struct Types29 { + typedef T1 Head; + typedef Types28 Tail; +}; + +template +struct Types30 { + typedef T1 Head; + typedef Types29 Tail; +}; + +template +struct Types31 { + typedef T1 Head; + typedef Types30 Tail; +}; + +template +struct Types32 { + typedef T1 Head; + typedef Types31 Tail; +}; + +template +struct Types33 { + typedef T1 Head; + typedef Types32 Tail; +}; + +template +struct Types34 { + typedef T1 Head; + typedef Types33 Tail; +}; + +template +struct Types35 { + typedef T1 Head; + typedef Types34 Tail; +}; + +template +struct Types36 { + typedef T1 Head; + typedef Types35 Tail; +}; + +template +struct Types37 { + typedef T1 Head; + typedef Types36 Tail; +}; + +template +struct Types38 { + typedef T1 Head; + typedef Types37 Tail; +}; + +template +struct Types39 { + typedef T1 Head; + typedef Types38 Tail; +}; + +template +struct Types40 { + typedef T1 Head; + typedef Types39 Tail; +}; + +template +struct Types41 { + typedef T1 Head; + typedef Types40 Tail; +}; + +template +struct Types42 { + typedef T1 Head; + typedef Types41 Tail; +}; + +template +struct Types43 { + typedef T1 Head; + typedef Types42 Tail; +}; + +template +struct Types44 { + typedef T1 Head; + typedef Types43 Tail; +}; + +template +struct Types45 { + typedef T1 Head; + typedef Types44 Tail; +}; + +template +struct Types46 { + typedef T1 Head; + typedef Types45 Tail; +}; + +template +struct Types47 { + typedef T1 Head; + typedef Types46 Tail; +}; + +template +struct Types48 { + typedef T1 Head; + typedef Types47 Tail; +}; + +template +struct Types49 { + typedef T1 Head; + typedef Types48 Tail; +}; + +template +struct Types50 { + typedef T1 Head; + typedef Types49 Tail; +}; + + +} // namespace internal + +// We don't want to require the users to write TypesN<...> directly, +// as that would require them to count the length. Types<...> is much +// easier to write, but generates horrible messages when there is a +// compiler error, as gcc insists on printing out each template +// argument, even if it has the default value (this means Types +// will appear as Types in the compiler +// errors). +// +// Our solution is to combine the best part of the two approaches: a +// user would write Types, and Google Test will translate +// that to TypesN internally to make error messages +// readable. The translation is done by the 'type' member of the +// Types template. +template +struct Types { + typedef internal::Types50 type; +}; + +template <> +struct Types { + typedef internal::Types0 type; +}; +template +struct Types { + typedef internal::Types1 type; +}; +template +struct Types { + typedef internal::Types2 type; +}; +template +struct Types { + typedef internal::Types3 type; +}; +template +struct Types { + typedef internal::Types4 type; +}; +template +struct Types { + typedef internal::Types5 type; +}; +template +struct Types { + typedef internal::Types6 type; +}; +template +struct Types { + typedef internal::Types7 type; +}; +template +struct Types { + typedef internal::Types8 type; +}; +template +struct Types { + typedef internal::Types9 type; +}; +template +struct Types { + typedef internal::Types10 type; +}; +template +struct Types { + typedef internal::Types11 type; +}; +template +struct Types { + typedef internal::Types12 type; +}; +template +struct Types { + typedef internal::Types13 type; +}; +template +struct Types { + typedef internal::Types14 type; +}; +template +struct Types { + typedef internal::Types15 type; +}; +template +struct Types { + typedef internal::Types16 type; +}; +template +struct Types { + typedef internal::Types17 type; +}; +template +struct Types { + typedef internal::Types18 type; +}; +template +struct Types { + typedef internal::Types19 type; +}; +template +struct Types { + typedef internal::Types20 type; +}; +template +struct Types { + typedef internal::Types21 type; +}; +template +struct Types { + typedef internal::Types22 type; +}; +template +struct Types { + typedef internal::Types23 type; +}; +template +struct Types { + typedef internal::Types24 type; +}; +template +struct Types { + typedef internal::Types25 type; +}; +template +struct Types { + typedef internal::Types26 type; +}; +template +struct Types { + typedef internal::Types27 type; +}; +template +struct Types { + typedef internal::Types28 type; +}; +template +struct Types { + typedef internal::Types29 type; +}; +template +struct Types { + typedef internal::Types30 type; +}; +template +struct Types { + typedef internal::Types31 type; +}; +template +struct Types { + typedef internal::Types32 type; +}; +template +struct Types { + typedef internal::Types33 type; +}; +template +struct Types { + typedef internal::Types34 type; +}; +template +struct Types { + typedef internal::Types35 type; +}; +template +struct Types { + typedef internal::Types36 type; +}; +template +struct Types { + typedef internal::Types37 type; +}; +template +struct Types { + typedef internal::Types38 type; +}; +template +struct Types { + typedef internal::Types39 type; +}; +template +struct Types { + typedef internal::Types40 type; +}; +template +struct Types { + typedef internal::Types41 type; +}; +template +struct Types { + typedef internal::Types42 type; +}; +template +struct Types { + typedef internal::Types43 type; +}; +template +struct Types { + typedef internal::Types44 type; +}; +template +struct Types { + typedef internal::Types45 type; +}; +template +struct Types { + typedef internal::Types46 type; +}; +template +struct Types { + typedef internal::Types47 type; +}; +template +struct Types { + typedef internal::Types48 type; +}; +template +struct Types { + typedef internal::Types49 type; +}; + +namespace internal { + +# define GTEST_TEMPLATE_ template class + +// The template "selector" struct TemplateSel is used to +// represent Tmpl, which must be a class template with one type +// parameter, as a type. TemplateSel::Bind::type is defined +// as the type Tmpl. This allows us to actually instantiate the +// template "selected" by TemplateSel. +// +// This trick is necessary for simulating typedef for class templates, +// which C++ doesn't support directly. +template +struct TemplateSel { + template + struct Bind { + typedef Tmpl type; + }; +}; + +# define GTEST_BIND_(TmplSel, T) \ + TmplSel::template Bind::type + +// A unique struct template used as the default value for the +// arguments of class template Templates. This allows us to simulate +// variadic templates (e.g. Templates, Templates, +// and etc), which C++ doesn't support directly. +template +struct NoneT {}; + +// The following family of struct and struct templates are used to +// represent template lists. In particular, TemplatesN represents a list of N templates (T1, T2, ..., and TN). Except +// for Templates0, every struct in the family has two member types: +// Head for the selector of the first template in the list, and Tail +// for the rest of the list. + +// The empty template list. +struct Templates0 {}; + +// Template lists of length 1, 2, 3, and so on. + +template +struct Templates1 { + typedef TemplateSel Head; + typedef Templates0 Tail; +}; +template +struct Templates2 { + typedef TemplateSel Head; + typedef Templates1 Tail; +}; + +template +struct Templates3 { + typedef TemplateSel Head; + typedef Templates2 Tail; +}; + +template +struct Templates4 { + typedef TemplateSel Head; + typedef Templates3 Tail; +}; + +template +struct Templates5 { + typedef TemplateSel Head; + typedef Templates4 Tail; +}; + +template +struct Templates6 { + typedef TemplateSel Head; + typedef Templates5 Tail; +}; + +template +struct Templates7 { + typedef TemplateSel Head; + typedef Templates6 Tail; +}; + +template +struct Templates8 { + typedef TemplateSel Head; + typedef Templates7 Tail; +}; + +template +struct Templates9 { + typedef TemplateSel Head; + typedef Templates8 Tail; +}; + +template +struct Templates10 { + typedef TemplateSel Head; + typedef Templates9 Tail; +}; + +template +struct Templates11 { + typedef TemplateSel Head; + typedef Templates10 Tail; +}; + +template +struct Templates12 { + typedef TemplateSel Head; + typedef Templates11 Tail; +}; + +template +struct Templates13 { + typedef TemplateSel Head; + typedef Templates12 Tail; +}; + +template +struct Templates14 { + typedef TemplateSel Head; + typedef Templates13 Tail; +}; + +template +struct Templates15 { + typedef TemplateSel Head; + typedef Templates14 Tail; +}; + +template +struct Templates16 { + typedef TemplateSel Head; + typedef Templates15 Tail; +}; + +template +struct Templates17 { + typedef TemplateSel Head; + typedef Templates16 Tail; +}; + +template +struct Templates18 { + typedef TemplateSel Head; + typedef Templates17 Tail; +}; + +template +struct Templates19 { + typedef TemplateSel Head; + typedef Templates18 Tail; +}; + +template +struct Templates20 { + typedef TemplateSel Head; + typedef Templates19 Tail; +}; + +template +struct Templates21 { + typedef TemplateSel Head; + typedef Templates20 Tail; +}; + +template +struct Templates22 { + typedef TemplateSel Head; + typedef Templates21 Tail; +}; + +template +struct Templates23 { + typedef TemplateSel Head; + typedef Templates22 Tail; +}; + +template +struct Templates24 { + typedef TemplateSel Head; + typedef Templates23 Tail; +}; + +template +struct Templates25 { + typedef TemplateSel Head; + typedef Templates24 Tail; +}; + +template +struct Templates26 { + typedef TemplateSel Head; + typedef Templates25 Tail; +}; + +template +struct Templates27 { + typedef TemplateSel Head; + typedef Templates26 Tail; +}; + +template +struct Templates28 { + typedef TemplateSel Head; + typedef Templates27 Tail; +}; + +template +struct Templates29 { + typedef TemplateSel Head; + typedef Templates28 Tail; +}; + +template +struct Templates30 { + typedef TemplateSel Head; + typedef Templates29 Tail; +}; + +template +struct Templates31 { + typedef TemplateSel Head; + typedef Templates30 Tail; +}; + +template +struct Templates32 { + typedef TemplateSel Head; + typedef Templates31 Tail; +}; + +template +struct Templates33 { + typedef TemplateSel Head; + typedef Templates32 Tail; +}; + +template +struct Templates34 { + typedef TemplateSel Head; + typedef Templates33 Tail; +}; + +template +struct Templates35 { + typedef TemplateSel Head; + typedef Templates34 Tail; +}; + +template +struct Templates36 { + typedef TemplateSel Head; + typedef Templates35 Tail; +}; + +template +struct Templates37 { + typedef TemplateSel Head; + typedef Templates36 Tail; +}; + +template +struct Templates38 { + typedef TemplateSel Head; + typedef Templates37 Tail; +}; + +template +struct Templates39 { + typedef TemplateSel Head; + typedef Templates38 Tail; +}; + +template +struct Templates40 { + typedef TemplateSel Head; + typedef Templates39 Tail; +}; + +template +struct Templates41 { + typedef TemplateSel Head; + typedef Templates40 Tail; +}; + +template +struct Templates42 { + typedef TemplateSel Head; + typedef Templates41 Tail; +}; + +template +struct Templates43 { + typedef TemplateSel Head; + typedef Templates42 Tail; +}; + +template +struct Templates44 { + typedef TemplateSel Head; + typedef Templates43 Tail; +}; + +template +struct Templates45 { + typedef TemplateSel Head; + typedef Templates44 Tail; +}; + +template +struct Templates46 { + typedef TemplateSel Head; + typedef Templates45 Tail; +}; + +template +struct Templates47 { + typedef TemplateSel Head; + typedef Templates46 Tail; +}; + +template +struct Templates48 { + typedef TemplateSel Head; + typedef Templates47 Tail; +}; + +template +struct Templates49 { + typedef TemplateSel Head; + typedef Templates48 Tail; +}; + +template +struct Templates50 { + typedef TemplateSel Head; + typedef Templates49 Tail; +}; + + +// We don't want to require the users to write TemplatesN<...> directly, +// as that would require them to count the length. Templates<...> is much +// easier to write, but generates horrible messages when there is a +// compiler error, as gcc insists on printing out each template +// argument, even if it has the default value (this means Templates +// will appear as Templates in the compiler +// errors). +// +// Our solution is to combine the best part of the two approaches: a +// user would write Templates, and Google Test will translate +// that to TemplatesN internally to make error messages +// readable. The translation is done by the 'type' member of the +// Templates template. +template +struct Templates { + typedef Templates50 type; +}; + +template <> +struct Templates { + typedef Templates0 type; +}; +template +struct Templates { + typedef Templates1 type; +}; +template +struct Templates { + typedef Templates2 type; +}; +template +struct Templates { + typedef Templates3 type; +}; +template +struct Templates { + typedef Templates4 type; +}; +template +struct Templates { + typedef Templates5 type; +}; +template +struct Templates { + typedef Templates6 type; +}; +template +struct Templates { + typedef Templates7 type; +}; +template +struct Templates { + typedef Templates8 type; +}; +template +struct Templates { + typedef Templates9 type; +}; +template +struct Templates { + typedef Templates10 type; +}; +template +struct Templates { + typedef Templates11 type; +}; +template +struct Templates { + typedef Templates12 type; +}; +template +struct Templates { + typedef Templates13 type; +}; +template +struct Templates { + typedef Templates14 type; +}; +template +struct Templates { + typedef Templates15 type; +}; +template +struct Templates { + typedef Templates16 type; +}; +template +struct Templates { + typedef Templates17 type; +}; +template +struct Templates { + typedef Templates18 type; +}; +template +struct Templates { + typedef Templates19 type; +}; +template +struct Templates { + typedef Templates20 type; +}; +template +struct Templates { + typedef Templates21 type; +}; +template +struct Templates { + typedef Templates22 type; +}; +template +struct Templates { + typedef Templates23 type; +}; +template +struct Templates { + typedef Templates24 type; +}; +template +struct Templates { + typedef Templates25 type; +}; +template +struct Templates { + typedef Templates26 type; +}; +template +struct Templates { + typedef Templates27 type; +}; +template +struct Templates { + typedef Templates28 type; +}; +template +struct Templates { + typedef Templates29 type; +}; +template +struct Templates { + typedef Templates30 type; +}; +template +struct Templates { + typedef Templates31 type; +}; +template +struct Templates { + typedef Templates32 type; +}; +template +struct Templates { + typedef Templates33 type; +}; +template +struct Templates { + typedef Templates34 type; +}; +template +struct Templates { + typedef Templates35 type; +}; +template +struct Templates { + typedef Templates36 type; +}; +template +struct Templates { + typedef Templates37 type; +}; +template +struct Templates { + typedef Templates38 type; +}; +template +struct Templates { + typedef Templates39 type; +}; +template +struct Templates { + typedef Templates40 type; +}; +template +struct Templates { + typedef Templates41 type; +}; +template +struct Templates { + typedef Templates42 type; +}; +template +struct Templates { + typedef Templates43 type; +}; +template +struct Templates { + typedef Templates44 type; +}; +template +struct Templates { + typedef Templates45 type; +}; +template +struct Templates { + typedef Templates46 type; +}; +template +struct Templates { + typedef Templates47 type; +}; +template +struct Templates { + typedef Templates48 type; +}; +template +struct Templates { + typedef Templates49 type; +}; + +// The TypeList template makes it possible to use either a single type +// or a Types<...> list in TYPED_TEST_CASE() and +// INSTANTIATE_TYPED_TEST_CASE_P(). + +template +struct TypeList { + typedef Types1 type; +}; + +template +struct TypeList > { + typedef typename Types::type type; +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + +// Due to C++ preprocessor weirdness, we need double indirection to +// concatenate two tokens when one of them is __LINE__. Writing +// +// foo ## __LINE__ +// +// will result in the token foo__LINE__, instead of foo followed by +// the current line number. For more details, see +// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6 +#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar) +#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar + +class ProtocolMessage; +namespace proto2 { class Message; } + +namespace testing { + +// Forward declarations. + +class AssertionResult; // Result of an assertion. +class Message; // Represents a failure message. +class Test; // Represents a test. +class TestInfo; // Information about a test. +class TestPartResult; // Result of a test part. +class UnitTest; // A collection of test cases. + +template +::std::string PrintToString(const T& value); + +namespace internal { + +struct TraceInfo; // Information about a trace point. +class ScopedTrace; // Implements scoped trace. +class TestInfoImpl; // Opaque implementation of TestInfo +class UnitTestImpl; // Opaque implementation of UnitTest + +// How many times InitGoogleTest() has been called. +GTEST_API_ extern int g_init_gtest_count; + +// The text used in failure messages to indicate the start of the +// stack trace. +GTEST_API_ extern const char kStackTraceMarker[]; + +// Two overloaded helpers for checking at compile time whether an +// expression is a null pointer literal (i.e. NULL or any 0-valued +// compile-time integral constant). Their return values have +// different sizes, so we can use sizeof() to test which version is +// picked by the compiler. These helpers have no implementations, as +// we only need their signatures. +// +// Given IsNullLiteralHelper(x), the compiler will pick the first +// version if x can be implicitly converted to Secret*, and pick the +// second version otherwise. Since Secret is a secret and incomplete +// type, the only expression a user can write that has type Secret* is +// a null pointer literal. Therefore, we know that x is a null +// pointer literal if and only if the first version is picked by the +// compiler. +char IsNullLiteralHelper(Secret* p); +char (&IsNullLiteralHelper(...))[2]; // NOLINT + +// A compile-time bool constant that is true if and only if x is a +// null pointer literal (i.e. NULL or any 0-valued compile-time +// integral constant). +#ifdef GTEST_ELLIPSIS_NEEDS_POD_ +// We lose support for NULL detection where the compiler doesn't like +// passing non-POD classes through ellipsis (...). +# define GTEST_IS_NULL_LITERAL_(x) false +#else +# define GTEST_IS_NULL_LITERAL_(x) \ + (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1) +#endif // GTEST_ELLIPSIS_NEEDS_POD_ + +// Appends the user-supplied message to the Google-Test-generated message. +GTEST_API_ std::string AppendUserMessage( + const std::string& gtest_msg, const Message& user_msg); + +#if GTEST_HAS_EXCEPTIONS + +// This exception is thrown by (and only by) a failed Google Test +// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions +// are enabled). We derive it from std::runtime_error, which is for +// errors presumably detectable only at run time. Since +// std::runtime_error inherits from std::exception, many testing +// frameworks know how to extract and print the message inside it. +class GTEST_API_ GoogleTestFailureException : public ::std::runtime_error { + public: + explicit GoogleTestFailureException(const TestPartResult& failure); +}; + +#endif // GTEST_HAS_EXCEPTIONS + +// A helper class for creating scoped traces in user programs. +class GTEST_API_ ScopedTrace { + public: + // The c'tor pushes the given source file location and message onto + // a trace stack maintained by Google Test. + ScopedTrace(const char* file, int line, const Message& message); + + // The d'tor pops the info pushed by the c'tor. + // + // Note that the d'tor is not virtual in order to be efficient. + // Don't inherit from ScopedTrace! + ~ScopedTrace(); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace); +} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its + // c'tor and d'tor. Therefore it doesn't + // need to be used otherwise. + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true iff the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +GTEST_API_ AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const std::string& expected_value, + const std::string& actual_value, + bool ignoring_case); + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +GTEST_API_ std::string GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value); + +// This template class represents an IEEE floating-point number +// (either single-precision or double-precision, depending on the +// template parameters). +// +// The purpose of this class is to do more sophisticated number +// comparison. (Due to round-off error, etc, it's very unlikely that +// two floating-points will be equal exactly. Hence a naive +// comparison by the == operation often doesn't work.) +// +// Format of IEEE floating-point: +// +// The most-significant bit being the leftmost, an IEEE +// floating-point looks like +// +// sign_bit exponent_bits fraction_bits +// +// Here, sign_bit is a single bit that designates the sign of the +// number. +// +// For float, there are 8 exponent bits and 23 fraction bits. +// +// For double, there are 11 exponent bits and 52 fraction bits. +// +// More details can be found at +// http://en.wikipedia.org/wiki/IEEE_floating-point_standard. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +template +class FloatingPoint { + public: + // Defines the unsigned integer type that has the same size as the + // floating point number. + typedef typename TypeWithSize::UInt Bits; + + // Constants. + + // # of bits in a number. + static const size_t kBitCount = 8*sizeof(RawType); + + // # of fraction bits in a number. + static const size_t kFractionBitCount = + std::numeric_limits::digits - 1; + + // # of exponent bits in a number. + static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount; + + // The mask for the sign bit. + static const Bits kSignBitMask = static_cast(1) << (kBitCount - 1); + + // The mask for the fraction bits. + static const Bits kFractionBitMask = + ~static_cast(0) >> (kExponentBitCount + 1); + + // The mask for the exponent bits. + static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask); + + // How many ULP's (Units in the Last Place) we want to tolerate when + // comparing two numbers. The larger the value, the more error we + // allow. A 0 value means that two numbers must be exactly the same + // to be considered equal. + // + // The maximum error of a single floating-point operation is 0.5 + // units in the last place. On Intel CPU's, all floating-point + // calculations are done with 80-bit precision, while double has 64 + // bits. Therefore, 4 should be enough for ordinary use. + // + // See the following article for more details on ULP: + // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + static const size_t kMaxUlps = 4; + + // Constructs a FloatingPoint from a raw floating-point number. + // + // On an Intel CPU, passing a non-normalized NAN (Not a Number) + // around may change its bits, although the new value is guaranteed + // to be also a NAN. Therefore, don't expect this constructor to + // preserve the bits in x when x is a NAN. + explicit FloatingPoint(const RawType& x) { u_.value_ = x; } + + // Static methods + + // Reinterprets a bit pattern as a floating-point number. + // + // This function is needed to test the AlmostEquals() method. + static RawType ReinterpretBits(const Bits bits) { + FloatingPoint fp(0); + fp.u_.bits_ = bits; + return fp.u_.value_; + } + + // Returns the floating-point number that represent positive infinity. + static RawType Infinity() { + return ReinterpretBits(kExponentBitMask); + } + + // Returns the maximum representable finite floating-point number. + static RawType Max(); + + // Non-static methods + + // Returns the bits that represents this number. + const Bits &bits() const { return u_.bits_; } + + // Returns the exponent bits of this number. + Bits exponent_bits() const { return kExponentBitMask & u_.bits_; } + + // Returns the fraction bits of this number. + Bits fraction_bits() const { return kFractionBitMask & u_.bits_; } + + // Returns the sign bit of this number. + Bits sign_bit() const { return kSignBitMask & u_.bits_; } + + // Returns true iff this is NAN (not a number). + bool is_nan() const { + // It's a NAN if the exponent bits are all ones and the fraction + // bits are not entirely zeros. + return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0); + } + + // Returns true iff this number is at most kMaxUlps ULP's away from + // rhs. In particular, this function: + // + // - returns false if either number is (or both are) NAN. + // - treats really large numbers as almost equal to infinity. + // - thinks +0.0 and -0.0 are 0 DLP's apart. + bool AlmostEquals(const FloatingPoint& rhs) const { + // The IEEE standard says that any comparison operation involving + // a NAN must return false. + if (is_nan() || rhs.is_nan()) return false; + + return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_) + <= kMaxUlps; + } + + private: + // The data type used to store the actual floating-point number. + union FloatingPointUnion { + RawType value_; // The raw floating-point number. + Bits bits_; // The bits that represent the number. + }; + + // Converts an integer from the sign-and-magnitude representation to + // the biased representation. More precisely, let N be 2 to the + // power of (kBitCount - 1), an integer x is represented by the + // unsigned number x + N. + // + // For instance, + // + // -N + 1 (the most negative number representable using + // sign-and-magnitude) is represented by 1; + // 0 is represented by N; and + // N - 1 (the biggest number representable using + // sign-and-magnitude) is represented by 2N - 1. + // + // Read http://en.wikipedia.org/wiki/Signed_number_representations + // for more details on signed number representations. + static Bits SignAndMagnitudeToBiased(const Bits &sam) { + if (kSignBitMask & sam) { + // sam represents a negative number. + return ~sam + 1; + } else { + // sam represents a positive number. + return kSignBitMask | sam; + } + } + + // Given two numbers in the sign-and-magnitude representation, + // returns the distance between them as an unsigned number. + static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1, + const Bits &sam2) { + const Bits biased1 = SignAndMagnitudeToBiased(sam1); + const Bits biased2 = SignAndMagnitudeToBiased(sam2); + return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1); + } + + FloatingPointUnion u_; +}; + +// We cannot use std::numeric_limits::max() as it clashes with the max() +// macro defined by . +template <> +inline float FloatingPoint::Max() { return FLT_MAX; } +template <> +inline double FloatingPoint::Max() { return DBL_MAX; } + +// Typedefs the instances of the FloatingPoint template class that we +// care to use. +typedef FloatingPoint Float; +typedef FloatingPoint Double; + +// In order to catch the mistake of putting tests that use different +// test fixture classes in the same test case, we need to assign +// unique IDs to fixture classes and compare them. The TypeId type is +// used to hold such IDs. The user should treat TypeId as an opaque +// type: the only operation allowed on TypeId values is to compare +// them for equality using the == operator. +typedef const void* TypeId; + +template +class TypeIdHelper { + public: + // dummy_ must not have a const type. Otherwise an overly eager + // compiler (e.g. MSVC 7.1 & 8.0) may try to merge + // TypeIdHelper::dummy_ for different Ts as an "optimization". + static bool dummy_; +}; + +template +bool TypeIdHelper::dummy_ = false; + +// GetTypeId() returns the ID of type T. Different values will be +// returned for different types. Calling the function twice with the +// same type argument is guaranteed to return the same ID. +template +TypeId GetTypeId() { + // The compiler is required to allocate a different + // TypeIdHelper::dummy_ variable for each T used to instantiate + // the template. Therefore, the address of dummy_ is guaranteed to + // be unique. + return &(TypeIdHelper::dummy_); +} + +// Returns the type ID of ::testing::Test. Always call this instead +// of GetTypeId< ::testing::Test>() to get the type ID of +// ::testing::Test, as the latter may give the wrong result due to a +// suspected linker bug when compiling Google Test as a Mac OS X +// framework. +GTEST_API_ TypeId GetTestTypeId(); + +// Defines the abstract factory interface that creates instances +// of a Test object. +class TestFactoryBase { + public: + virtual ~TestFactoryBase() {} + + // Creates a test instance to run. The instance is both created and destroyed + // within TestInfoImpl::Run() + virtual Test* CreateTest() = 0; + + protected: + TestFactoryBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase); +}; + +// This class provides implementation of TeastFactoryBase interface. +// It is used in TEST and TEST_F macros. +template +class TestFactoryImpl : public TestFactoryBase { + public: + virtual Test* CreateTest() { return new TestClass; } +}; + +#if GTEST_OS_WINDOWS + +// Predicate-formatters for implementing the HRESULT checking macros +// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED} +// We pass a long instead of HRESULT to avoid causing an +// include dependency for the HRESULT type. +GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr, + long hr); // NOLINT +GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr, + long hr); // NOLINT + +#endif // GTEST_OS_WINDOWS + +// Types of SetUpTestCase() and TearDownTestCase() functions. +typedef void (*SetUpTestCaseFunc)(); +typedef void (*TearDownTestCaseFunc)(); + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_case_name: name of the test case +// name: name of the test +// type_param the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param text representation of the test's value parameter, +// or NULL if this is not a type-parameterized test. +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +GTEST_API_ TestInfo* MakeAndRegisterTestInfo( + const char* test_case_name, + const char* name, + const char* type_param, + const char* value_param, + TypeId fixture_class_id, + SetUpTestCaseFunc set_up_tc, + TearDownTestCaseFunc tear_down_tc, + TestFactoryBase* factory); + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr); + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// State of the definition of a type-parameterized test case. +class GTEST_API_ TypedTestCasePState { + public: + TypedTestCasePState() : registered_(false) {} + + // Adds the given test name to defined_test_names_ and return true + // if the test case hasn't been registered; otherwise aborts the + // program. + bool AddTestName(const char* file, int line, const char* case_name, + const char* test_name) { + if (registered_) { + fprintf(stderr, "%s Test %s must be defined before " + "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n", + FormatFileLocation(file, line).c_str(), test_name, case_name); + fflush(stderr); + posix::Abort(); + } + defined_test_names_.insert(test_name); + return true; + } + + // Verifies that registered_tests match the test names in + // defined_test_names_; returns registered_tests if successful, or + // aborts the program otherwise. + const char* VerifyRegisteredTestNames( + const char* file, int line, const char* registered_tests); + + private: + bool registered_; + ::std::set defined_test_names_; +}; + +// Skips to the first non-space char after the first comma in 'str'; +// returns NULL if no comma is found in 'str'. +inline const char* SkipComma(const char* str) { + const char* comma = strchr(str, ','); + if (comma == NULL) { + return NULL; + } + while (IsSpace(*(++comma))) {} + return comma; +} + +// Returns the prefix of 'str' before the first comma in it; returns +// the entire string if it contains no comma. +inline std::string GetPrefixUntilComma(const char* str) { + const char* comma = strchr(str, ','); + return comma == NULL ? str : std::string(str, comma); +} + +// TypeParameterizedTest::Register() +// registers a list of type-parameterized tests with Google Test. The +// return value is insignificant - we just need to return something +// such that we can call this function in a namespace scope. +// +// Implementation note: The GTEST_TEMPLATE_ macro declares a template +// template parameter. It's defined in gtest-type-util.h. +template +class TypeParameterizedTest { + public: + // 'index' is the index of the test in the type list 'Types' + // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase, + // Types). Valid values for 'index' are [0, N - 1] where N is the + // length of Types. + static bool Register(const char* prefix, const char* case_name, + const char* test_names, int index) { + typedef typename Types::Head Type; + typedef Fixture FixtureClass; + typedef typename GTEST_BIND_(TestSel, Type) TestClass; + + // First, registers the first type-parameterized test in the type + // list. + MakeAndRegisterTestInfo( + (std::string(prefix) + (prefix[0] == '\0' ? "" : "/") + case_name + "/" + + StreamableToString(index)).c_str(), + GetPrefixUntilComma(test_names).c_str(), + GetTypeName().c_str(), + NULL, // No value parameter. + GetTypeId(), + TestClass::SetUpTestCase, + TestClass::TearDownTestCase, + new TestFactoryImpl); + + // Next, recurses (at compile time) with the tail of the type list. + return TypeParameterizedTest + ::Register(prefix, case_name, test_names, index + 1); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTest { + public: + static bool Register(const char* /*prefix*/, const char* /*case_name*/, + const char* /*test_names*/, int /*index*/) { + return true; + } +}; + +// TypeParameterizedTestCase::Register() +// registers *all combinations* of 'Tests' and 'Types' with Google +// Test. The return value is insignificant - we just need to return +// something such that we can call this function in a namespace scope. +template +class TypeParameterizedTestCase { + public: + static bool Register(const char* prefix, const char* case_name, + const char* test_names) { + typedef typename Tests::Head Head; + + // First, register the first test in 'Test' for each type in 'Types'. + TypeParameterizedTest::Register( + prefix, case_name, test_names, 0); + + // Next, recurses (at compile time) with the tail of the test list. + return TypeParameterizedTestCase + ::Register(prefix, case_name, SkipComma(test_names)); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTestCase { + public: + static bool Register(const char* /*prefix*/, const char* /*case_name*/, + const char* /*test_names*/) { + return true; + } +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +GTEST_API_ std::string GetCurrentOsStackTraceExceptTop( + UnitTest* unit_test, int skip_count); + +// Helpers for suppressing warnings on unreachable code or constant +// condition. + +// Always returns true. +GTEST_API_ bool AlwaysTrue(); + +// Always returns false. +inline bool AlwaysFalse() { return !AlwaysTrue(); } + +// Helper for suppressing false warning from Clang on a const char* +// variable declared in a conditional expression always being NULL in +// the else branch. +struct GTEST_API_ ConstCharPtr { + ConstCharPtr(const char* str) : value(str) {} + operator bool() const { return true; } + const char* value; +}; + +// A simple Linear Congruential Generator for generating random +// numbers with a uniform distribution. Unlike rand() and srand(), it +// doesn't use global state (and therefore can't interfere with user +// code). Unlike rand_r(), it's portable. An LCG isn't very random, +// but it's good enough for our purposes. +class GTEST_API_ Random { + public: + static const UInt32 kMaxRange = 1u << 31; + + explicit Random(UInt32 seed) : state_(seed) {} + + void Reseed(UInt32 seed) { state_ = seed; } + + // Generates a random number from [0, range). Crashes if 'range' is + // 0 or greater than kMaxRange. + UInt32 Generate(UInt32 range); + + private: + UInt32 state_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(Random); +}; + +// Defining a variable of type CompileAssertTypesEqual will cause a +// compiler error iff T1 and T2 are different types. +template +struct CompileAssertTypesEqual; + +template +struct CompileAssertTypesEqual { +}; + +// Removes the reference from a type if it is a reference type, +// otherwise leaves it unchanged. This is the same as +// tr1::remove_reference, which is not widely available yet. +template +struct RemoveReference { typedef T type; }; // NOLINT +template +struct RemoveReference { typedef T type; }; // NOLINT + +// A handy wrapper around RemoveReference that works when the argument +// T depends on template parameters. +#define GTEST_REMOVE_REFERENCE_(T) \ + typename ::testing::internal::RemoveReference::type + +// Removes const from a type if it is a const type, otherwise leaves +// it unchanged. This is the same as tr1::remove_const, which is not +// widely available yet. +template +struct RemoveConst { typedef T type; }; // NOLINT +template +struct RemoveConst { typedef T type; }; // NOLINT + +// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above +// definition to fail to remove the const in 'const int[3]' and 'const +// char[3][4]'. The following specialization works around the bug. +template +struct RemoveConst { + typedef typename RemoveConst::type type[N]; +}; + +#if defined(_MSC_VER) && _MSC_VER < 1400 +// This is the only specialization that allows VC++ 7.1 to remove const in +// 'const int[3] and 'const int[3][4]'. However, it causes trouble with GCC +// and thus needs to be conditionally compiled. +template +struct RemoveConst { + typedef typename RemoveConst::type type[N]; +}; +#endif + +// A handy wrapper around RemoveConst that works when the argument +// T depends on template parameters. +#define GTEST_REMOVE_CONST_(T) \ + typename ::testing::internal::RemoveConst::type + +// Turns const U&, U&, const U, and U all into U. +#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \ + GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T)) + +// Adds reference to a type if it is not a reference type, +// otherwise leaves it unchanged. This is the same as +// tr1::add_reference, which is not widely available yet. +template +struct AddReference { typedef T& type; }; // NOLINT +template +struct AddReference { typedef T& type; }; // NOLINT + +// A handy wrapper around AddReference that works when the argument T +// depends on template parameters. +#define GTEST_ADD_REFERENCE_(T) \ + typename ::testing::internal::AddReference::type + +// Adds a reference to const on top of T as necessary. For example, +// it transforms +// +// char ==> const char& +// const char ==> const char& +// char& ==> const char& +// const char& ==> const char& +// +// The argument T must depend on some template parameters. +#define GTEST_REFERENCE_TO_CONST_(T) \ + GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T)) + +// ImplicitlyConvertible::value is a compile-time bool +// constant that's true iff type From can be implicitly converted to +// type To. +template +class ImplicitlyConvertible { + private: + // We need the following helper functions only for their types. + // They have no implementations. + + // MakeFrom() is an expression whose type is From. We cannot simply + // use From(), as the type From may not have a public default + // constructor. + static From MakeFrom(); + + // These two functions are overloaded. Given an expression + // Helper(x), the compiler will pick the first version if x can be + // implicitly converted to type To; otherwise it will pick the + // second version. + // + // The first version returns a value of size 1, and the second + // version returns a value of size 2. Therefore, by checking the + // size of Helper(x), which can be done at compile time, we can tell + // which version of Helper() is used, and hence whether x can be + // implicitly converted to type To. + static char Helper(To); + static char (&Helper(...))[2]; // NOLINT + + // We have to put the 'public' section after the 'private' section, + // or MSVC refuses to compile the code. + public: + // MSVC warns about implicitly converting from double to int for + // possible loss of data, so we need to temporarily disable the + // warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4244) // Temporarily disables warning 4244. + + static const bool value = + sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1; +# pragma warning(pop) // Restores the warning state. +#elif defined(__BORLANDC__) + // C++Builder cannot use member overload resolution during template + // instantiation. The simplest workaround is to use its C++0x type traits + // functions (C++Builder 2009 and above only). + static const bool value = __is_convertible(From, To); +#else + static const bool value = + sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1; +#endif // _MSV_VER +}; +template +const bool ImplicitlyConvertible::value; + +// IsAProtocolMessage::value is a compile-time bool constant that's +// true iff T is type ProtocolMessage, proto2::Message, or a subclass +// of those. +template +struct IsAProtocolMessage + : public bool_constant< + ImplicitlyConvertible::value || + ImplicitlyConvertible::value> { +}; + +// When the compiler sees expression IsContainerTest(0), if C is an +// STL-style container class, the first overload of IsContainerTest +// will be viable (since both C::iterator* and C::const_iterator* are +// valid types and NULL can be implicitly converted to them). It will +// be picked over the second overload as 'int' is a perfect match for +// the type of argument 0. If C::iterator or C::const_iterator is not +// a valid type, the first overload is not viable, and the second +// overload will be picked. Therefore, we can determine whether C is +// a container class by checking the type of IsContainerTest(0). +// The value of the expression is insignificant. +// +// Note that we look for both C::iterator and C::const_iterator. The +// reason is that C++ injects the name of a class as a member of the +// class itself (e.g. you can refer to class iterator as either +// 'iterator' or 'iterator::iterator'). If we look for C::iterator +// only, for example, we would mistakenly think that a class named +// iterator is an STL container. +// +// Also note that the simpler approach of overloading +// IsContainerTest(typename C::const_iterator*) and +// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++. +typedef int IsContainer; +template +IsContainer IsContainerTest(int /* dummy */, + typename C::iterator* /* it */ = NULL, + typename C::const_iterator* /* const_it */ = NULL) { + return 0; +} + +typedef char IsNotContainer; +template +IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; } + +// EnableIf::type is void when 'Cond' is true, and +// undefined when 'Cond' is false. To use SFINAE to make a function +// overload only apply when a particular expression is true, add +// "typename EnableIf::type* = 0" as the last parameter. +template struct EnableIf; +template<> struct EnableIf { typedef void type; }; // NOLINT + +// Utilities for native arrays. + +// ArrayEq() compares two k-dimensional native arrays using the +// elements' operator==, where k can be any integer >= 0. When k is +// 0, ArrayEq() degenerates into comparing a single pair of values. + +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs); + +// This generic version is used when k is 0. +template +inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; } + +// This overload is used when k >= 1. +template +inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) { + return internal::ArrayEq(lhs, N, rhs); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous ArrayEq() function, arrays with different sizes would +// lead to different copies of the template code. +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs) { + for (size_t i = 0; i != size; i++) { + if (!internal::ArrayEq(lhs[i], rhs[i])) + return false; + } + return true; +} + +// Finds the first element in the iterator range [begin, end) that +// equals elem. Element may be a native array type itself. +template +Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) { + for (Iter it = begin; it != end; ++it) { + if (internal::ArrayEq(*it, elem)) + return it; + } + return end; +} + +// CopyArray() copies a k-dimensional native array using the elements' +// operator=, where k can be any integer >= 0. When k is 0, +// CopyArray() degenerates into copying a single value. + +template +void CopyArray(const T* from, size_t size, U* to); + +// This generic version is used when k is 0. +template +inline void CopyArray(const T& from, U* to) { *to = from; } + +// This overload is used when k >= 1. +template +inline void CopyArray(const T(&from)[N], U(*to)[N]) { + internal::CopyArray(from, N, *to); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous CopyArray() function, arrays with different sizes +// would lead to different copies of the template code. +template +void CopyArray(const T* from, size_t size, U* to) { + for (size_t i = 0; i != size; i++) { + internal::CopyArray(from[i], to + i); + } +} + +// The relation between an NativeArray object (see below) and the +// native array it represents. +enum RelationToSource { + kReference, // The NativeArray references the native array. + kCopy // The NativeArray makes a copy of the native array and + // owns the copy. +}; + +// Adapts a native array to a read-only STL-style container. Instead +// of the complete STL container concept, this adaptor only implements +// members useful for Google Mock's container matchers. New members +// should be added as needed. To simplify the implementation, we only +// support Element being a raw type (i.e. having no top-level const or +// reference modifier). It's the client's responsibility to satisfy +// this requirement. Element can be an array type itself (hence +// multi-dimensional arrays are supported). +template +class NativeArray { + public: + // STL-style container typedefs. + typedef Element value_type; + typedef Element* iterator; + typedef const Element* const_iterator; + + // Constructs from a native array. + NativeArray(const Element* array, size_t count, RelationToSource relation) { + Init(array, count, relation); + } + + // Copy constructor. + NativeArray(const NativeArray& rhs) { + Init(rhs.array_, rhs.size_, rhs.relation_to_source_); + } + + ~NativeArray() { + // Ensures that the user doesn't instantiate NativeArray with a + // const or reference type. + static_cast(StaticAssertTypeEqHelper()); + if (relation_to_source_ == kCopy) + delete[] array_; + } + + // STL-style container methods. + size_t size() const { return size_; } + const_iterator begin() const { return array_; } + const_iterator end() const { return array_ + size_; } + bool operator==(const NativeArray& rhs) const { + return size() == rhs.size() && + ArrayEq(begin(), size(), rhs.begin()); + } + + private: + // Initializes this object; makes a copy of the input array if + // 'relation' is kCopy. + void Init(const Element* array, size_t a_size, RelationToSource relation) { + if (relation == kReference) { + array_ = array; + } else { + Element* const copy = new Element[a_size]; + CopyArray(array, a_size, copy); + array_ = copy; + } + size_ = a_size; + relation_to_source_ = relation; + } + + const Element* array_; + size_t size_; + RelationToSource relation_to_source_; + + GTEST_DISALLOW_ASSIGN_(NativeArray); +}; + +} // namespace internal +} // namespace testing + +#define GTEST_MESSAGE_AT_(file, line, message, result_type) \ + ::testing::internal::AssertHelper(result_type, file, line, message) \ + = ::testing::Message() + +#define GTEST_MESSAGE_(message, result_type) \ + GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type) + +#define GTEST_FATAL_FAILURE_(message) \ + return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure) + +#define GTEST_NONFATAL_FAILURE_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure) + +#define GTEST_SUCCESS_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess) + +// Suppresses MSVC warnings 4072 (unreachable code) for the code following +// statement if it returns or throws (or doesn't return or throw in some +// situations). +#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \ + if (::testing::internal::AlwaysTrue()) { statement; } + +#define GTEST_TEST_THROW_(statement, expected_exception, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::ConstCharPtr gtest_msg = "") { \ + bool gtest_caught_expected = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (expected_exception const&) { \ + gtest_caught_expected = true; \ + } \ + catch (...) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws a different type."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + if (!gtest_caught_expected) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws nothing."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \ + fail(gtest_msg.value) + +#define GTEST_TEST_NO_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \ + fail("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: it throws.") + +#define GTEST_TEST_ANY_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + bool gtest_caught_any = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + gtest_caught_any = true; \ + } \ + if (!gtest_caught_any) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \ + fail("Expected: " #statement " throws an exception.\n" \ + " Actual: it doesn't.") + + +// Implements Boolean test assertions such as EXPECT_TRUE. expression can be +// either a boolean expression or an AssertionResult. text is a textual +// represenation of expression as it was passed into the EXPECT_TRUE. +#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar_ = \ + ::testing::AssertionResult(expression)) \ + ; \ + else \ + fail(::testing::internal::GetBoolAssertionFailureMessage(\ + gtest_ar_, text, #actual, #expected).c_str()) + +#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \ + fail("Expected: " #statement " doesn't generate new fatal " \ + "failures in the current thread.\n" \ + " Actual: it does.") + +// Expands to the name of the class that implements the given test. +#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \ + test_case_name##_##test_name##_Test + +// Helper macro for defining tests. +#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\ +class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\ + public:\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\ + private:\ + virtual void TestBody();\ + static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\ + GTEST_DISALLOW_COPY_AND_ASSIGN_(\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\ +};\ +\ +::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\ + ::test_info_ =\ + ::testing::internal::MakeAndRegisterTestInfo(\ + #test_case_name, #test_name, NULL, NULL, \ + (parent_id), \ + parent_class::SetUpTestCase, \ + parent_class::TearDownTestCase, \ + new ::testing::internal::TestFactoryImpl<\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\ +void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for death tests. It is +// #included by gtest.h so a user doesn't need to include this +// directly. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines internal utilities needed for implementing +// death tests. They are subject to change without notice. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + + +#include + +namespace testing { +namespace internal { + +GTEST_DECLARE_string_(internal_run_death_test); + +// Names of the flags (needed for parsing Google Test flags). +const char kDeathTestStyleFlag[] = "death_test_style"; +const char kDeathTestUseFork[] = "death_test_use_fork"; +const char kInternalRunDeathTestFlag[] = "internal_run_death_test"; + +#if GTEST_HAS_DEATH_TEST + +// DeathTest is a class that hides much of the complexity of the +// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method +// returns a concrete class that depends on the prevailing death test +// style, as defined by the --gtest_death_test_style and/or +// --gtest_internal_run_death_test flags. + +// In describing the results of death tests, these terms are used with +// the corresponding definitions: +// +// exit status: The integer exit information in the format specified +// by wait(2) +// exit code: The integer code passed to exit(3), _exit(2), or +// returned from main() +class GTEST_API_ DeathTest { + public: + // Create returns false if there was an error determining the + // appropriate action to take for the current death test; for example, + // if the gtest_death_test_style flag is set to an invalid value. + // The LastMessage method will return a more detailed message in that + // case. Otherwise, the DeathTest pointer pointed to by the "test" + // argument is set. If the death test should be skipped, the pointer + // is set to NULL; otherwise, it is set to the address of a new concrete + // DeathTest object that controls the execution of the current test. + static bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test); + DeathTest(); + virtual ~DeathTest() { } + + // A helper class that aborts a death test when it's deleted. + class ReturnSentinel { + public: + explicit ReturnSentinel(DeathTest* test) : test_(test) { } + ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); } + private: + DeathTest* const test_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel); + } GTEST_ATTRIBUTE_UNUSED_; + + // An enumeration of possible roles that may be taken when a death + // test is encountered. EXECUTE means that the death test logic should + // be executed immediately. OVERSEE means that the program should prepare + // the appropriate environment for a child process to execute the death + // test, then wait for it to complete. + enum TestRole { OVERSEE_TEST, EXECUTE_TEST }; + + // An enumeration of the three reasons that a test might be aborted. + enum AbortReason { + TEST_ENCOUNTERED_RETURN_STATEMENT, + TEST_THREW_EXCEPTION, + TEST_DID_NOT_DIE + }; + + // Assumes one of the above roles. + virtual TestRole AssumeRole() = 0; + + // Waits for the death test to finish and returns its status. + virtual int Wait() = 0; + + // Returns true if the death test passed; that is, the test process + // exited during the test, its exit status matches a user-supplied + // predicate, and its stderr output matches a user-supplied regular + // expression. + // The user-supplied predicate may be a macro expression rather + // than a function pointer or functor, or else Wait and Passed could + // be combined. + virtual bool Passed(bool exit_status_ok) = 0; + + // Signals that the death test did not die as expected. + virtual void Abort(AbortReason reason) = 0; + + // Returns a human-readable outcome message regarding the outcome of + // the last death test. + static const char* LastMessage(); + + static void set_last_death_test_message(const std::string& message); + + private: + // A string containing a description of the outcome of the last death test. + static std::string last_death_test_message_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest); +}; + +// Factory interface for death tests. May be mocked out for testing. +class DeathTestFactory { + public: + virtual ~DeathTestFactory() { } + virtual bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test) = 0; +}; + +// A concrete DeathTestFactory implementation for normal use. +class DefaultDeathTestFactory : public DeathTestFactory { + public: + virtual bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test); +}; + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +GTEST_API_ bool ExitedUnsuccessfully(int exit_status); + +// Traps C++ exceptions escaping statement and reports them as test +// failures. Note that trapping SEH exceptions is not implemented here. +# if GTEST_HAS_EXCEPTIONS +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } catch (const ::std::exception& gtest_exception) { \ + fprintf(\ + stderr, \ + "\n%s: Caught std::exception-derived exception escaping the " \ + "death test statement. Exception message: %s\n", \ + ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \ + gtest_exception.what()); \ + fflush(stderr); \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } catch (...) { \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } + +# else +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) + +# endif + +// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*, +// ASSERT_EXIT*, and EXPECT_EXIT*. +# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + const ::testing::internal::RE& gtest_regex = (regex); \ + ::testing::internal::DeathTest* gtest_dt; \ + if (!::testing::internal::DeathTest::Create(#statement, >est_regex, \ + __FILE__, __LINE__, >est_dt)) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + if (gtest_dt != NULL) { \ + ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \ + gtest_dt_ptr(gtest_dt); \ + switch (gtest_dt->AssumeRole()) { \ + case ::testing::internal::DeathTest::OVERSEE_TEST: \ + if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + break; \ + case ::testing::internal::DeathTest::EXECUTE_TEST: { \ + ::testing::internal::DeathTest::ReturnSentinel \ + gtest_sentinel(gtest_dt); \ + GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \ + gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \ + break; \ + } \ + default: \ + break; \ + } \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \ + fail(::testing::internal::DeathTest::LastMessage()) +// The symbol "fail" here expands to something into which a message +// can be streamed. + +// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in +// NDEBUG mode. In this case we need the statements to be executed, the regex is +// ignored, and the macro must accept a streamed message even though the message +// is never printed. +# define GTEST_EXECUTE_STATEMENT_(statement, regex) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } else \ + ::testing::Message() + +// A class representing the parsed contents of the +// --gtest_internal_run_death_test flag, as it existed when +// RUN_ALL_TESTS was called. +class InternalRunDeathTestFlag { + public: + InternalRunDeathTestFlag(const std::string& a_file, + int a_line, + int an_index, + int a_write_fd) + : file_(a_file), line_(a_line), index_(an_index), + write_fd_(a_write_fd) {} + + ~InternalRunDeathTestFlag() { + if (write_fd_ >= 0) + posix::Close(write_fd_); + } + + const std::string& file() const { return file_; } + int line() const { return line_; } + int index() const { return index_; } + int write_fd() const { return write_fd_; } + + private: + std::string file_; + int line_; + int index_; + int write_fd_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag); +}; + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag(); + +#else // GTEST_HAS_DEATH_TEST + +// This macro is used for implementing macros such as +// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where +// death tests are not supported. Those macros must compile on such systems +// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on +// systems that support death tests. This allows one to write such a macro +// on a system that does not support death tests and be sure that it will +// compile on a death-test supporting system. +// +// Parameters: +// statement - A statement that a macro such as EXPECT_DEATH would test +// for program termination. This macro has to make sure this +// statement is compiled but not executed, to ensure that +// EXPECT_DEATH_IF_SUPPORTED compiles with a certain +// parameter iff EXPECT_DEATH compiles with it. +// regex - A regex that a macro such as EXPECT_DEATH would use to test +// the output of statement. This parameter has to be +// compiled but not evaluated by this macro, to ensure that +// this macro only accepts expressions that a macro such as +// EXPECT_DEATH would accept. +// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED +// and a return statement for ASSERT_DEATH_IF_SUPPORTED. +// This ensures that ASSERT_DEATH_IF_SUPPORTED will not +// compile inside functions where ASSERT_DEATH doesn't +// compile. +// +// The branch that has an always false condition is used to ensure that +// statement and regex are compiled (and thus syntactically correct) but +// never executed. The unreachable code macro protects the terminator +// statement from generating an 'unreachable code' warning in case +// statement unconditionally returns or throws. The Message constructor at +// the end allows the syntax of streaming additional messages into the +// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH. +# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_LOG_(WARNING) \ + << "Death tests are not supported on this platform.\n" \ + << "Statement '" #statement "' cannot be verified."; \ + } else if (::testing::internal::AlwaysFalse()) { \ + ::testing::internal::RE::PartialMatch(".*", (regex)); \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + terminator; \ + } else \ + ::testing::Message() + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + +namespace testing { + +// This flag controls the style of death tests. Valid values are "threadsafe", +// meaning that the death test child process will re-execute the test binary +// from the start, running only a single death test, or "fast", +// meaning that the child process will execute the test logic immediately +// after forking. +GTEST_DECLARE_string_(death_test_style); + +#if GTEST_HAS_DEATH_TEST + +namespace internal { + +// Returns a Boolean value indicating whether the caller is currently +// executing in the context of the death test child process. Tools such as +// Valgrind heap checkers may need this to modify their behavior in death +// tests. IMPORTANT: This is an internal utility. Using it may break the +// implementation of death tests. User code MUST NOT use it. +GTEST_API_ bool InDeathTestChild(); + +} // namespace internal + +// The following macros are useful for writing death tests. + +// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is +// executed: +// +// 1. It generates a warning if there is more than one active +// thread. This is because it's safe to fork() or clone() only +// when there is a single thread. +// +// 2. The parent process clone()s a sub-process and runs the death +// test in it; the sub-process exits with code 0 at the end of the +// death test, if it hasn't exited already. +// +// 3. The parent process waits for the sub-process to terminate. +// +// 4. The parent process checks the exit code and error message of +// the sub-process. +// +// Examples: +// +// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number"); +// for (int i = 0; i < 5; i++) { +// EXPECT_DEATH(server.ProcessRequest(i), +// "Invalid request .* in ProcessRequest()") +// << "Failed to die on request " << i; +// } +// +// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting"); +// +// bool KilledBySIGHUP(int exit_code) { +// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP; +// } +// +// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!"); +// +// On the regular expressions used in death tests: +// +// On POSIX-compliant systems (*nix), we use the library, +// which uses the POSIX extended regex syntax. +// +// On other platforms (e.g. Windows), we only support a simple regex +// syntax implemented as part of Google Test. This limited +// implementation should be enough most of the time when writing +// death tests; though it lacks many features you can find in PCRE +// or POSIX extended regex syntax. For example, we don't support +// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and +// repetition count ("x{5,7}"), among others. +// +// Below is the syntax that we do support. We chose it to be a +// subset of both PCRE and POSIX extended regex, so it's easy to +// learn wherever you come from. In the following: 'A' denotes a +// literal character, period (.), or a single \\ escape sequence; +// 'x' and 'y' denote regular expressions; 'm' and 'n' are for +// natural numbers. +// +// c matches any literal character c +// \\d matches any decimal digit +// \\D matches any character that's not a decimal digit +// \\f matches \f +// \\n matches \n +// \\r matches \r +// \\s matches any ASCII whitespace, including \n +// \\S matches any character that's not a whitespace +// \\t matches \t +// \\v matches \v +// \\w matches any letter, _, or decimal digit +// \\W matches any character that \\w doesn't match +// \\c matches any literal character c, which must be a punctuation +// . matches any single character except \n +// A? matches 0 or 1 occurrences of A +// A* matches 0 or many occurrences of A +// A+ matches 1 or many occurrences of A +// ^ matches the beginning of a string (not that of each line) +// $ matches the end of a string (not that of each line) +// xy matches x followed by y +// +// If you accidentally use PCRE or POSIX extended regex features +// not implemented by us, you will get a run-time failure. In that +// case, please try to rewrite your regular expression within the +// above syntax. +// +// This implementation is *not* meant to be as highly tuned or robust +// as a compiled regex library, but should perform well enough for a +// death test, which already incurs significant overhead by launching +// a child process. +// +// Known caveats: +// +// A "threadsafe" style death test obtains the path to the test +// program from argv[0] and re-executes it in the sub-process. For +// simplicity, the current implementation doesn't search the PATH +// when launching the sub-process. This means that the user must +// invoke the test program via a path that contains at least one +// path separator (e.g. path/to/foo_test and +// /absolute/path/to/bar_test are fine, but foo_test is not). This +// is rarely a problem as people usually don't put the test binary +// directory in PATH. +// +// TODO(wan@google.com): make thread-safe death tests search the PATH. + +// Asserts that a given statement causes the program to exit, with an +// integer exit status that satisfies predicate, and emitting error output +// that matches regex. +# define ASSERT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_) + +// Like ASSERT_EXIT, but continues on to successive tests in the +// test case, if any: +# define EXPECT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_) + +// Asserts that a given statement causes the program to exit, either by +// explicitly exiting with a nonzero exit code or being killed by a +// signal, and emitting error output that matches regex. +# define ASSERT_DEATH(statement, regex) \ + ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Like ASSERT_DEATH, but continues on to successive tests in the +// test case, if any: +# define EXPECT_DEATH(statement, regex) \ + EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*: + +// Tests that an exit code describes a normal exit with a given exit code. +class GTEST_API_ ExitedWithCode { + public: + explicit ExitedWithCode(int exit_code); + bool operator()(int exit_status) const; + private: + // No implementation - assignment is unsupported. + void operator=(const ExitedWithCode& other); + + const int exit_code_; +}; + +# if !GTEST_OS_WINDOWS +// Tests that an exit code describes an exit due to termination by a +// given signal. +class GTEST_API_ KilledBySignal { + public: + explicit KilledBySignal(int signum); + bool operator()(int exit_status) const; + private: + const int signum_; +}; +# endif // !GTEST_OS_WINDOWS + +// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode. +// The death testing framework causes this to have interesting semantics, +// since the sideeffects of the call are only visible in opt mode, and not +// in debug mode. +// +// In practice, this can be used to test functions that utilize the +// LOG(DFATAL) macro using the following style: +// +// int DieInDebugOr12(int* sideeffect) { +// if (sideeffect) { +// *sideeffect = 12; +// } +// LOG(DFATAL) << "death"; +// return 12; +// } +// +// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) { +// int sideeffect = 0; +// // Only asserts in dbg. +// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death"); +// +// #ifdef NDEBUG +// // opt-mode has sideeffect visible. +// EXPECT_EQ(12, sideeffect); +// #else +// // dbg-mode no visible sideeffect. +// EXPECT_EQ(0, sideeffect); +// #endif +// } +// +// This will assert that DieInDebugReturn12InOpt() crashes in debug +// mode, usually due to a DCHECK or LOG(DFATAL), but returns the +// appropriate fallback value (12 in this case) in opt mode. If you +// need to test that a function has appropriate side-effects in opt +// mode, include assertions against the side-effects. A general +// pattern for this is: +// +// EXPECT_DEBUG_DEATH({ +// // Side-effects here will have an effect after this statement in +// // opt mode, but none in debug mode. +// EXPECT_EQ(12, DieInDebugOr12(&sideeffect)); +// }, "death"); +// +# ifdef NDEBUG + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + GTEST_EXECUTE_STATEMENT_(statement, regex) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + GTEST_EXECUTE_STATEMENT_(statement, regex) + +# else + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + EXPECT_DEATH(statement, regex) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + ASSERT_DEATH(statement, regex) + +# endif // NDEBUG for EXPECT_DEBUG_DEATH +#endif // GTEST_HAS_DEATH_TEST + +// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and +// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if +// death tests are supported; otherwise they just issue a warning. This is +// useful when you are combining death test assertions with normal test +// assertions in one test. +#if GTEST_HAS_DEATH_TEST +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + EXPECT_DEATH(statement, regex) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + ASSERT_DEATH(statement, regex) +#else +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, ) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return) +#endif + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +// This file was GENERATED by command: +// pump.py gtest-param-test.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: vladl@google.com (Vlad Losev) +// +// Macros and functions for implementing parameterized tests +// in Google C++ Testing Framework (Google Test) +// +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ + + +// Value-parameterized tests allow you to test your code with different +// parameters without writing multiple copies of the same test. +// +// Here is how you use value-parameterized tests: + +#if 0 + +// To write value-parameterized tests, first you should define a fixture +// class. It is usually derived from testing::TestWithParam (see below for +// another inheritance scheme that's sometimes useful in more complicated +// class hierarchies), where the type of your parameter values. +// TestWithParam is itself derived from testing::Test. T can be any +// copyable type. If it's a raw pointer, you are responsible for managing the +// lifespan of the pointed values. + +class FooTest : public ::testing::TestWithParam { + // You can implement all the usual class fixture members here. +}; + +// Then, use the TEST_P macro to define as many parameterized tests +// for this fixture as you want. The _P suffix is for "parameterized" +// or "pattern", whichever you prefer to think. + +TEST_P(FooTest, DoesBlah) { + // Inside a test, access the test parameter with the GetParam() method + // of the TestWithParam class: + EXPECT_TRUE(foo.Blah(GetParam())); + ... +} + +TEST_P(FooTest, HasBlahBlah) { + ... +} + +// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test +// case with any set of parameters you want. Google Test defines a number +// of functions for generating test parameters. They return what we call +// (surprise!) parameter generators. Here is a summary of them, which +// are all in the testing namespace: +// +// +// Range(begin, end [, step]) - Yields values {begin, begin+step, +// begin+step+step, ...}. The values do not +// include end. step defaults to 1. +// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}. +// ValuesIn(container) - Yields values from a C-style array, an STL +// ValuesIn(begin,end) container, or an iterator range [begin, end). +// Bool() - Yields sequence {false, true}. +// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product +// for the math savvy) of the values generated +// by the N generators. +// +// For more details, see comments at the definitions of these functions below +// in this file. +// +// The following statement will instantiate tests from the FooTest test case +// each with parameter values "meeny", "miny", and "moe". + +INSTANTIATE_TEST_CASE_P(InstantiationName, + FooTest, + Values("meeny", "miny", "moe")); + +// To distinguish different instances of the pattern, (yes, you +// can instantiate it more then once) the first argument to the +// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the +// actual test case name. Remember to pick unique prefixes for different +// instantiations. The tests from the instantiation above will have +// these names: +// +// * InstantiationName/FooTest.DoesBlah/0 for "meeny" +// * InstantiationName/FooTest.DoesBlah/1 for "miny" +// * InstantiationName/FooTest.DoesBlah/2 for "moe" +// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny" +// * InstantiationName/FooTest.HasBlahBlah/1 for "miny" +// * InstantiationName/FooTest.HasBlahBlah/2 for "moe" +// +// You can use these names in --gtest_filter. +// +// This statement will instantiate all tests from FooTest again, each +// with parameter values "cat" and "dog": + +const char* pets[] = {"cat", "dog"}; +INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets)); + +// The tests from the instantiation above will have these names: +// +// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog" +// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog" +// +// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests +// in the given test case, whether their definitions come before or +// AFTER the INSTANTIATE_TEST_CASE_P statement. +// +// Please also note that generator expressions (including parameters to the +// generators) are evaluated in InitGoogleTest(), after main() has started. +// This allows the user on one hand, to adjust generator parameters in order +// to dynamically determine a set of tests to run and on the other hand, +// give the user a chance to inspect the generated tests with Google Test +// reflection API before RUN_ALL_TESTS() is executed. +// +// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc +// for more examples. +// +// In the future, we plan to publish the API for defining new parameter +// generators. But for now this interface remains part of the internal +// implementation and is subject to change. +// +// +// A parameterized test fixture must be derived from testing::Test and from +// testing::WithParamInterface, where T is the type of the parameter +// values. Inheriting from TestWithParam satisfies that requirement because +// TestWithParam inherits from both Test and WithParamInterface. In more +// complicated hierarchies, however, it is occasionally useful to inherit +// separately from Test and WithParamInterface. For example: + +class BaseTest : public ::testing::Test { + // You can inherit all the usual members for a non-parameterized test + // fixture here. +}; + +class DerivedTest : public BaseTest, public ::testing::WithParamInterface { + // The usual test fixture members go here too. +}; + +TEST_F(BaseTest, HasFoo) { + // This is an ordinary non-parameterized test. +} + +TEST_P(DerivedTest, DoesBlah) { + // GetParam works just the same here as if you inherit from TestWithParam. + EXPECT_TRUE(foo.Blah(GetParam())); +} + +#endif // 0 + + +#if !GTEST_OS_SYMBIAN +# include +#endif + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// Type and function utilities for implementing parameterized tests. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ + +#include +#include +#include + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. +// Copyright 2003 Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: Dan Egnor (egnor@google.com) +// +// A "smart" pointer type with reference tracking. Every pointer to a +// particular object is kept on a circular linked list. When the last pointer +// to an object is destroyed or reassigned, the object is deleted. +// +// Used properly, this deletes the object when the last reference goes away. +// There are several caveats: +// - Like all reference counting schemes, cycles lead to leaks. +// - Each smart pointer is actually two pointers (8 bytes instead of 4). +// - Every time a pointer is assigned, the entire list of pointers to that +// object is traversed. This class is therefore NOT SUITABLE when there +// will often be more than two or three pointers to a particular object. +// - References are only tracked as long as linked_ptr<> objects are copied. +// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS +// will happen (double deletion). +// +// A good use of this class is storing object references in STL containers. +// You can safely put linked_ptr<> in a vector<>. +// Other uses may not be as good. +// +// Note: If you use an incomplete type with linked_ptr<>, the class +// *containing* linked_ptr<> must have a constructor and destructor (even +// if they do nothing!). +// +// Bill Gibbons suggested we use something like this. +// +// Thread Safety: +// Unlike other linked_ptr implementations, in this implementation +// a linked_ptr object is thread-safe in the sense that: +// - it's safe to copy linked_ptr objects concurrently, +// - it's safe to copy *from* a linked_ptr and read its underlying +// raw pointer (e.g. via get()) concurrently, and +// - it's safe to write to two linked_ptrs that point to the same +// shared object concurrently. +// TODO(wan@google.com): rename this to safe_linked_ptr to avoid +// confusion with normal linked_ptr. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ + +#include +#include + + +namespace testing { +namespace internal { + +// Protects copying of all linked_ptr objects. +GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex); + +// This is used internally by all instances of linked_ptr<>. It needs to be +// a non-template class because different types of linked_ptr<> can refer to +// the same object (linked_ptr(obj) vs linked_ptr(obj)). +// So, it needs to be possible for different types of linked_ptr to participate +// in the same circular linked list, so we need a single class type here. +// +// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr. +class linked_ptr_internal { + public: + // Create a new circle that includes only this instance. + void join_new() { + next_ = this; + } + + // Many linked_ptr operations may change p.link_ for some linked_ptr + // variable p in the same circle as this object. Therefore we need + // to prevent two such operations from occurring concurrently. + // + // Note that different types of linked_ptr objects can coexist in a + // circle (e.g. linked_ptr, linked_ptr, and + // linked_ptr). Therefore we must use a single mutex to + // protect all linked_ptr objects. This can create serious + // contention in production code, but is acceptable in a testing + // framework. + + // Join an existing circle. + void join(linked_ptr_internal const* ptr) + GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) { + MutexLock lock(&g_linked_ptr_mutex); + + linked_ptr_internal const* p = ptr; + while (p->next_ != ptr) p = p->next_; + p->next_ = this; + next_ = ptr; + } + + // Leave whatever circle we're part of. Returns true if we were the + // last member of the circle. Once this is done, you can join() another. + bool depart() + GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) { + MutexLock lock(&g_linked_ptr_mutex); + + if (next_ == this) return true; + linked_ptr_internal const* p = next_; + while (p->next_ != this) p = p->next_; + p->next_ = next_; + return false; + } + + private: + mutable linked_ptr_internal const* next_; +}; + +template +class linked_ptr { + public: + typedef T element_type; + + // Take over ownership of a raw pointer. This should happen as soon as + // possible after the object is created. + explicit linked_ptr(T* ptr = NULL) { capture(ptr); } + ~linked_ptr() { depart(); } + + // Copy an existing linked_ptr<>, adding ourselves to the list of references. + template linked_ptr(linked_ptr const& ptr) { copy(&ptr); } + linked_ptr(linked_ptr const& ptr) { // NOLINT + assert(&ptr != this); + copy(&ptr); + } + + // Assignment releases the old value and acquires the new. + template linked_ptr& operator=(linked_ptr const& ptr) { + depart(); + copy(&ptr); + return *this; + } + + linked_ptr& operator=(linked_ptr const& ptr) { + if (&ptr != this) { + depart(); + copy(&ptr); + } + return *this; + } + + // Smart pointer members. + void reset(T* ptr = NULL) { + depart(); + capture(ptr); + } + T* get() const { return value_; } + T* operator->() const { return value_; } + T& operator*() const { return *value_; } + + bool operator==(T* p) const { return value_ == p; } + bool operator!=(T* p) const { return value_ != p; } + template + bool operator==(linked_ptr const& ptr) const { + return value_ == ptr.get(); + } + template + bool operator!=(linked_ptr const& ptr) const { + return value_ != ptr.get(); + } + + private: + template + friend class linked_ptr; + + T* value_; + linked_ptr_internal link_; + + void depart() { + if (link_.depart()) delete value_; + } + + void capture(T* ptr) { + value_ = ptr; + link_.join_new(); + } + + template void copy(linked_ptr const* ptr) { + value_ = ptr->get(); + if (value_) + link_.join(&ptr->link_); + else + link_.join_new(); + } +}; + +template inline +bool operator==(T* ptr, const linked_ptr& x) { + return ptr == x.get(); +} + +template inline +bool operator!=(T* ptr, const linked_ptr& x) { + return ptr != x.get(); +} + +// A function to convert T* into linked_ptr +// Doing e.g. make_linked_ptr(new FooBarBaz(arg)) is a shorter notation +// for linked_ptr >(new FooBarBaz(arg)) +template +linked_ptr make_linked_ptr(T* ptr) { + return linked_ptr(ptr); +} + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// A user can teach this function how to print a class type T by +// defining either operator<<() or PrintTo() in the namespace that +// defines T. More specifically, the FIRST defined function in the +// following list will be used (assuming T is defined in namespace +// foo): +// +// 1. foo::PrintTo(const T&, ostream*) +// 2. operator<<(ostream&, const T&) defined in either foo or the +// global namespace. +// +// If none of the above is defined, it will print the debug string of +// the value if it is a protocol buffer, or print the raw bytes in the +// value otherwise. +// +// To aid debugging: when T is a reference type, the address of the +// value is also printed; when T is a (const) char pointer, both the +// pointer value and the NUL-terminated string it points to are +// printed. +// +// We also provide some convenient wrappers: +// +// // Prints a value to a string. For a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// std::string ::testing::PrintToString(const T& value); +// +// // Prints a value tersely: for a reference type, the referenced +// // value (but not the address) is printed; for a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// void ::testing::internal::UniversalTersePrint(const T& value, ostream*); +// +// // Prints value using the type inferred by the compiler. The difference +// // from UniversalTersePrint() is that this function prints both the +// // pointer and the NUL-terminated string for a (const or not) char pointer. +// void ::testing::internal::UniversalPrint(const T& value, ostream*); +// +// // Prints the fields of a tuple tersely to a string vector, one +// // element for each field. Tuple support must be enabled in +// // gtest-port.h. +// std::vector UniversalTersePrintTupleFieldsToStrings( +// const Tuple& value); +// +// Known limitation: +// +// The print primitives print the elements of an STL-style container +// using the compiler-inferred type of *iter where iter is a +// const_iterator of the container. When const_iterator is an input +// iterator but not a forward iterator, this inferred type may not +// match value_type, and the print output may be incorrect. In +// practice, this is rarely a problem as for most containers +// const_iterator is a forward iterator. We'll fix this if there's an +// actual need for it. Note that this fix cannot rely on value_type +// being defined as many user-defined container types don't have +// value_type. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#include // NOLINT +#include +#include +#include +#include + +namespace testing { + +// Definitions in the 'internal' and 'internal2' name spaces are +// subject to change without notice. DO NOT USE THEM IN USER CODE! +namespace internal2 { + +// Prints the given number of bytes in the given object to the given +// ostream. +GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes, + size_t count, + ::std::ostream* os); + +// For selecting which printer to use when a given type has neither << +// nor PrintTo(). +enum TypeKind { + kProtobuf, // a protobuf type + kConvertibleToInteger, // a type implicitly convertible to BiggestInt + // (e.g. a named or unnamed enum type) + kOtherType // anything else +}; + +// TypeWithoutFormatter::PrintValue(value, os) is called +// by the universal printer to print a value of type T when neither +// operator<< nor PrintTo() is defined for T, where kTypeKind is the +// "kind" of T as defined by enum TypeKind. +template +class TypeWithoutFormatter { + public: + // This default version is called when kTypeKind is kOtherType. + static void PrintValue(const T& value, ::std::ostream* os) { + PrintBytesInObjectTo(reinterpret_cast(&value), + sizeof(value), os); + } +}; + +// We print a protobuf using its ShortDebugString() when the string +// doesn't exceed this many characters; otherwise we print it using +// DebugString() for better readability. +const size_t kProtobufOneLinerMaxLength = 50; + +template +class TypeWithoutFormatter { + public: + static void PrintValue(const T& value, ::std::ostream* os) { + const ::testing::internal::string short_str = value.ShortDebugString(); + const ::testing::internal::string pretty_str = + short_str.length() <= kProtobufOneLinerMaxLength ? + short_str : ("\n" + value.DebugString()); + *os << ("<" + pretty_str + ">"); + } +}; + +template +class TypeWithoutFormatter { + public: + // Since T has no << operator or PrintTo() but can be implicitly + // converted to BiggestInt, we print it as a BiggestInt. + // + // Most likely T is an enum type (either named or unnamed), in which + // case printing it as an integer is the desired behavior. In case + // T is not an enum, printing it as an integer is the best we can do + // given that it has no user-defined printer. + static void PrintValue(const T& value, ::std::ostream* os) { + const internal::BiggestInt kBigInt = value; + *os << kBigInt; + } +}; + +// Prints the given value to the given ostream. If the value is a +// protocol message, its debug string is printed; if it's an enum or +// of a type implicitly convertible to BiggestInt, it's printed as an +// integer; otherwise the bytes in the value are printed. This is +// what UniversalPrinter::Print() does when it knows nothing about +// type T and T has neither << operator nor PrintTo(). +// +// A user can override this behavior for a class type Foo by defining +// a << operator in the namespace where Foo is defined. +// +// We put this operator in namespace 'internal2' instead of 'internal' +// to simplify the implementation, as much code in 'internal' needs to +// use << in STL, which would conflict with our own << were it defined +// in 'internal'. +// +// Note that this operator<< takes a generic std::basic_ostream type instead of the more restricted std::ostream. If +// we define it to take an std::ostream instead, we'll get an +// "ambiguous overloads" compiler error when trying to print a type +// Foo that supports streaming to std::basic_ostream, as the compiler cannot tell whether +// operator<<(std::ostream&, const T&) or +// operator<<(std::basic_stream, const Foo&) is more +// specific. +template +::std::basic_ostream& operator<<( + ::std::basic_ostream& os, const T& x) { + TypeWithoutFormatter::value ? kProtobuf : + internal::ImplicitlyConvertible::value ? + kConvertibleToInteger : kOtherType)>::PrintValue(x, &os); + return os; +} + +} // namespace internal2 +} // namespace testing + +// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up +// magic needed for implementing UniversalPrinter won't work. +namespace testing_internal { + +// Used to print a value that is not an STL-style container when the +// user doesn't define PrintTo() for it. +template +void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) { + // With the following statement, during unqualified name lookup, + // testing::internal2::operator<< appears as if it was declared in + // the nearest enclosing namespace that contains both + // ::testing_internal and ::testing::internal2, i.e. the global + // namespace. For more details, refer to the C++ Standard section + // 7.3.4-1 [namespace.udir]. This allows us to fall back onto + // testing::internal2::operator<< in case T doesn't come with a << + // operator. + // + // We cannot write 'using ::testing::internal2::operator<<;', which + // gcc 3.3 fails to compile due to a compiler bug. + using namespace ::testing::internal2; // NOLINT + + // Assuming T is defined in namespace foo, in the next statement, + // the compiler will consider all of: + // + // 1. foo::operator<< (thanks to Koenig look-up), + // 2. ::operator<< (as the current namespace is enclosed in ::), + // 3. testing::internal2::operator<< (thanks to the using statement above). + // + // The operator<< whose type matches T best will be picked. + // + // We deliberately allow #2 to be a candidate, as sometimes it's + // impossible to define #1 (e.g. when foo is ::std, defining + // anything in it is undefined behavior unless you are a compiler + // vendor.). + *os << value; +} + +} // namespace testing_internal + +namespace testing { +namespace internal { + +// UniversalPrinter::Print(value, ostream_ptr) prints the given +// value to the given ostream. The caller must ensure that +// 'ostream_ptr' is not NULL, or the behavior is undefined. +// +// We define UniversalPrinter as a class template (as opposed to a +// function template), as we need to partially specialize it for +// reference types, which cannot be done with function templates. +template +class UniversalPrinter; + +template +void UniversalPrint(const T& value, ::std::ostream* os); + +// Used to print an STL-style container when the user doesn't define +// a PrintTo() for it. +template +void DefaultPrintTo(IsContainer /* dummy */, + false_type /* is not a pointer */, + const C& container, ::std::ostream* os) { + const size_t kMaxCount = 32; // The maximum number of elements to print. + *os << '{'; + size_t count = 0; + for (typename C::const_iterator it = container.begin(); + it != container.end(); ++it, ++count) { + if (count > 0) { + *os << ','; + if (count == kMaxCount) { // Enough has been printed. + *os << " ..."; + break; + } + } + *os << ' '; + // We cannot call PrintTo(*it, os) here as PrintTo() doesn't + // handle *it being a native array. + internal::UniversalPrint(*it, os); + } + + if (count > 0) { + *os << ' '; + } + *os << '}'; +} + +// Used to print a pointer that is neither a char pointer nor a member +// pointer, when the user doesn't define PrintTo() for it. (A member +// variable pointer or member function pointer doesn't really point to +// a location in the address space. Their representation is +// implementation-defined. Therefore they will be printed as raw +// bytes.) +template +void DefaultPrintTo(IsNotContainer /* dummy */, + true_type /* is a pointer */, + T* p, ::std::ostream* os) { + if (p == NULL) { + *os << "NULL"; + } else { + // C++ doesn't allow casting from a function pointer to any object + // pointer. + // + // IsTrue() silences warnings: "Condition is always true", + // "unreachable code". + if (IsTrue(ImplicitlyConvertible::value)) { + // T is not a function type. We just call << to print p, + // relying on ADL to pick up user-defined << for their pointer + // types, if any. + *os << p; + } else { + // T is a function type, so '*os << p' doesn't do what we want + // (it just prints p as bool). We want to print p as a const + // void*. However, we cannot cast it to const void* directly, + // even using reinterpret_cast, as earlier versions of gcc + // (e.g. 3.4.5) cannot compile the cast when p is a function + // pointer. Casting to UInt64 first solves the problem. + *os << reinterpret_cast( + reinterpret_cast(p)); + } + } +} + +// Used to print a non-container, non-pointer value when the user +// doesn't define PrintTo() for it. +template +void DefaultPrintTo(IsNotContainer /* dummy */, + false_type /* is not a pointer */, + const T& value, ::std::ostream* os) { + ::testing_internal::DefaultPrintNonContainerTo(value, os); +} + +// Prints the given value using the << operator if it has one; +// otherwise prints the bytes in it. This is what +// UniversalPrinter::Print() does when PrintTo() is not specialized +// or overloaded for type T. +// +// A user can override this behavior for a class type Foo by defining +// an overload of PrintTo() in the namespace where Foo is defined. We +// give the user this option as sometimes defining a << operator for +// Foo is not desirable (e.g. the coding style may prevent doing it, +// or there is already a << operator but it doesn't do what the user +// wants). +template +void PrintTo(const T& value, ::std::ostream* os) { + // DefaultPrintTo() is overloaded. The type of its first two + // arguments determine which version will be picked. If T is an + // STL-style container, the version for container will be called; if + // T is a pointer, the pointer version will be called; otherwise the + // generic version will be called. + // + // Note that we check for container types here, prior to we check + // for protocol message types in our operator<<. The rationale is: + // + // For protocol messages, we want to give people a chance to + // override Google Mock's format by defining a PrintTo() or + // operator<<. For STL containers, other formats can be + // incompatible with Google Mock's format for the container + // elements; therefore we check for container types here to ensure + // that our format is used. + // + // The second argument of DefaultPrintTo() is needed to bypass a bug + // in Symbian's C++ compiler that prevents it from picking the right + // overload between: + // + // PrintTo(const T& x, ...); + // PrintTo(T* x, ...); + DefaultPrintTo(IsContainerTest(0), is_pointer(), value, os); +} + +// The following list of PrintTo() overloads tells +// UniversalPrinter::Print() how to print standard types (built-in +// types, strings, plain arrays, and pointers). + +// Overloads for various char types. +GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os); +GTEST_API_ void PrintTo(signed char c, ::std::ostream* os); +inline void PrintTo(char c, ::std::ostream* os) { + // When printing a plain char, we always treat it as unsigned. This + // way, the output won't be affected by whether the compiler thinks + // char is signed or not. + PrintTo(static_cast(c), os); +} + +// Overloads for other simple built-in types. +inline void PrintTo(bool x, ::std::ostream* os) { + *os << (x ? "true" : "false"); +} + +// Overload for wchar_t type. +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its decimal code (except for L'\0'). +// The L'\0' char is printed as "L'\\0'". The decimal code is printed +// as signed integer when wchar_t is implemented by the compiler +// as a signed type and is printed as an unsigned integer when wchar_t +// is implemented as an unsigned type. +GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os); + +// Overloads for C strings. +GTEST_API_ void PrintTo(const char* s, ::std::ostream* os); +inline void PrintTo(char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// signed/unsigned char is often used for representing binary data, so +// we print pointers to it as void* to be safe. +inline void PrintTo(const signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(const unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// MSVC can be configured to define wchar_t as a typedef of unsigned +// short. It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native +// type. When wchar_t is a typedef, defining an overload for const +// wchar_t* would cause unsigned short* be printed as a wide string, +// possibly causing invalid memory accesses. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Overloads for wide C strings +GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os); +inline void PrintTo(wchar_t* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +#endif + +// Overload for C arrays. Multi-dimensional arrays are printed +// properly. + +// Prints the given number of elements in an array, without printing +// the curly braces. +template +void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) { + UniversalPrint(a[0], os); + for (size_t i = 1; i != count; i++) { + *os << ", "; + UniversalPrint(a[i], os); + } +} + +// Overloads for ::string and ::std::string. +#if GTEST_HAS_GLOBAL_STRING +GTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os); +inline void PrintTo(const ::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} +#endif // GTEST_HAS_GLOBAL_STRING + +GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os); +inline void PrintTo(const ::std::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} + +// Overloads for ::wstring and ::std::wstring. +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_TR1_TUPLE +// Overload for ::std::tr1::tuple. Needed for printing function arguments, +// which are packed as tuples. + +// Helper function for printing a tuple. T must be instantiated with +// a tuple type. +template +void PrintTupleTo(const T& t, ::std::ostream* os); + +// Overloaded PrintTo() for tuples of various arities. We support +// tuples of up-to 10 fields. The following implementation works +// regardless of whether tr1::tuple is implemented using the +// non-standard variadic template feature or not. + +inline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo( + const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} +#endif // GTEST_HAS_TR1_TUPLE + +// Overload for std::pair. +template +void PrintTo(const ::std::pair& value, ::std::ostream* os) { + *os << '('; + // We cannot use UniversalPrint(value.first, os) here, as T1 may be + // a reference type. The same for printing value.second. + UniversalPrinter::Print(value.first, os); + *os << ", "; + UniversalPrinter::Print(value.second, os); + *os << ')'; +} + +// Implements printing a non-reference type T by letting the compiler +// pick the right overload of PrintTo() for T. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4180) // Temporarily disables warning 4180. +#endif // _MSC_VER + + // Note: we deliberately don't call this PrintTo(), as that name + // conflicts with ::testing::internal::PrintTo in the body of the + // function. + static void Print(const T& value, ::std::ostream* os) { + // By default, ::testing::internal::PrintTo() is used for printing + // the value. + // + // Thanks to Koenig look-up, if T is a class and has its own + // PrintTo() function defined in its namespace, that function will + // be visible here. Since it is more specific than the generic ones + // in ::testing::internal, it will be picked by the compiler in the + // following statement - exactly what we want. + PrintTo(value, os); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif // _MSC_VER +}; + +// UniversalPrintArray(begin, len, os) prints an array of 'len' +// elements, starting at address 'begin'. +template +void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) { + if (len == 0) { + *os << "{}"; + } else { + *os << "{ "; + const size_t kThreshold = 18; + const size_t kChunkSize = 8; + // If the array has more than kThreshold elements, we'll have to + // omit some details by printing only the first and the last + // kChunkSize elements. + // TODO(wan@google.com): let the user control the threshold using a flag. + if (len <= kThreshold) { + PrintRawArrayTo(begin, len, os); + } else { + PrintRawArrayTo(begin, kChunkSize, os); + *os << ", ..., "; + PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os); + } + *os << " }"; + } +} +// This overload prints a (const) char array compactly. +GTEST_API_ void UniversalPrintArray( + const char* begin, size_t len, ::std::ostream* os); + +// This overload prints a (const) wchar_t array compactly. +GTEST_API_ void UniversalPrintArray( + const wchar_t* begin, size_t len, ::std::ostream* os); + +// Implements printing an array type T[N]. +template +class UniversalPrinter { + public: + // Prints the given array, omitting some elements when there are too + // many. + static void Print(const T (&a)[N], ::std::ostream* os) { + UniversalPrintArray(a, N, os); + } +}; + +// Implements printing a reference type T&. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4180) // Temporarily disables warning 4180. +#endif // _MSC_VER + + static void Print(const T& value, ::std::ostream* os) { + // Prints the address of the value. We use reinterpret_cast here + // as static_cast doesn't compile when T is a function type. + *os << "@" << reinterpret_cast(&value) << " "; + + // Then prints the value itself. + UniversalPrint(value, os); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif // _MSC_VER +}; + +// Prints a value tersely: for a reference type, the referenced value +// (but not the address) is printed; for a (const) char pointer, the +// NUL-terminated string (but not the pointer) is printed. + +template +class UniversalTersePrinter { + public: + static void Print(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); + } +}; +template +class UniversalTersePrinter { + public: + static void Print(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); + } +}; +template +class UniversalTersePrinter { + public: + static void Print(const T (&value)[N], ::std::ostream* os) { + UniversalPrinter::Print(value, os); + } +}; +template <> +class UniversalTersePrinter { + public: + static void Print(const char* str, ::std::ostream* os) { + if (str == NULL) { + *os << "NULL"; + } else { + UniversalPrint(string(str), os); + } + } +}; +template <> +class UniversalTersePrinter { + public: + static void Print(char* str, ::std::ostream* os) { + UniversalTersePrinter::Print(str, os); + } +}; + +#if GTEST_HAS_STD_WSTRING +template <> +class UniversalTersePrinter { + public: + static void Print(const wchar_t* str, ::std::ostream* os) { + if (str == NULL) { + *os << "NULL"; + } else { + UniversalPrint(::std::wstring(str), os); + } + } +}; +#endif + +template <> +class UniversalTersePrinter { + public: + static void Print(wchar_t* str, ::std::ostream* os) { + UniversalTersePrinter::Print(str, os); + } +}; + +template +void UniversalTersePrint(const T& value, ::std::ostream* os) { + UniversalTersePrinter::Print(value, os); +} + +// Prints a value using the type inferred by the compiler. The +// difference between this and UniversalTersePrint() is that for a +// (const) char pointer, this prints both the pointer and the +// NUL-terminated string. +template +void UniversalPrint(const T& value, ::std::ostream* os) { + // A workarond for the bug in VC++ 7.1 that prevents us from instantiating + // UniversalPrinter with T directly. + typedef T T1; + UniversalPrinter::Print(value, os); +} + +#if GTEST_HAS_TR1_TUPLE +typedef ::std::vector Strings; + +// This helper template allows PrintTo() for tuples and +// UniversalTersePrintTupleFieldsToStrings() to be defined by +// induction on the number of tuple fields. The idea is that +// TuplePrefixPrinter::PrintPrefixTo(t, os) prints the first N +// fields in tuple t, and can be defined in terms of +// TuplePrefixPrinter. + +// The inductive case. +template +struct TuplePrefixPrinter { + // Prints the first N fields of a tuple. + template + static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) { + TuplePrefixPrinter::PrintPrefixTo(t, os); + *os << ", "; + UniversalPrinter::type> + ::Print(::std::tr1::get(t), os); + } + + // Tersely prints the first N fields of a tuple to a string vector, + // one element for each field. + template + static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) { + TuplePrefixPrinter::TersePrintPrefixToStrings(t, strings); + ::std::stringstream ss; + UniversalTersePrint(::std::tr1::get(t), &ss); + strings->push_back(ss.str()); + } +}; + +// Base cases. +template <> +struct TuplePrefixPrinter<0> { + template + static void PrintPrefixTo(const Tuple&, ::std::ostream*) {} + + template + static void TersePrintPrefixToStrings(const Tuple&, Strings*) {} +}; +// We have to specialize the entire TuplePrefixPrinter<> class +// template here, even though the definition of +// TersePrintPrefixToStrings() is the same as the generic version, as +// Embarcadero (formerly CodeGear, formerly Borland) C++ doesn't +// support specializing a method template of a class template. +template <> +struct TuplePrefixPrinter<1> { + template + static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) { + UniversalPrinter::type>:: + Print(::std::tr1::get<0>(t), os); + } + + template + static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) { + ::std::stringstream ss; + UniversalTersePrint(::std::tr1::get<0>(t), &ss); + strings->push_back(ss.str()); + } +}; + +// Helper function for printing a tuple. T must be instantiated with +// a tuple type. +template +void PrintTupleTo(const T& t, ::std::ostream* os) { + *os << "("; + TuplePrefixPrinter< ::std::tr1::tuple_size::value>:: + PrintPrefixTo(t, os); + *os << ")"; +} + +// Prints the fields of a tuple tersely to a string vector, one +// element for each field. See the comment before +// UniversalTersePrint() for how we define "tersely". +template +Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) { + Strings result; + TuplePrefixPrinter< ::std::tr1::tuple_size::value>:: + TersePrintPrefixToStrings(value, &result); + return result; +} +#endif // GTEST_HAS_TR1_TUPLE + +} // namespace internal + +template +::std::string PrintToString(const T& value) { + ::std::stringstream ss; + internal::UniversalTersePrinter::Print(value, &ss); + return ss.str(); +} + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#if GTEST_HAS_PARAM_TEST + +namespace testing { +namespace internal { + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Outputs a message explaining invalid registration of different +// fixture class for the same test case. This may happen when +// TEST_P macro is used to define two tests with the same name +// but in different namespaces. +GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name, + const char* file, int line); + +template class ParamGeneratorInterface; +template class ParamGenerator; + +// Interface for iterating over elements provided by an implementation +// of ParamGeneratorInterface. +template +class ParamIteratorInterface { + public: + virtual ~ParamIteratorInterface() {} + // A pointer to the base generator instance. + // Used only for the purposes of iterator comparison + // to make sure that two iterators belong to the same generator. + virtual const ParamGeneratorInterface* BaseGenerator() const = 0; + // Advances iterator to point to the next element + // provided by the generator. The caller is responsible + // for not calling Advance() on an iterator equal to + // BaseGenerator()->End(). + virtual void Advance() = 0; + // Clones the iterator object. Used for implementing copy semantics + // of ParamIterator. + virtual ParamIteratorInterface* Clone() const = 0; + // Dereferences the current iterator and provides (read-only) access + // to the pointed value. It is the caller's responsibility not to call + // Current() on an iterator equal to BaseGenerator()->End(). + // Used for implementing ParamGenerator::operator*(). + virtual const T* Current() const = 0; + // Determines whether the given iterator and other point to the same + // element in the sequence generated by the generator. + // Used for implementing ParamGenerator::operator==(). + virtual bool Equals(const ParamIteratorInterface& other) const = 0; +}; + +// Class iterating over elements provided by an implementation of +// ParamGeneratorInterface. It wraps ParamIteratorInterface +// and implements the const forward iterator concept. +template +class ParamIterator { + public: + typedef T value_type; + typedef const T& reference; + typedef ptrdiff_t difference_type; + + // ParamIterator assumes ownership of the impl_ pointer. + ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {} + ParamIterator& operator=(const ParamIterator& other) { + if (this != &other) + impl_.reset(other.impl_->Clone()); + return *this; + } + + const T& operator*() const { return *impl_->Current(); } + const T* operator->() const { return impl_->Current(); } + // Prefix version of operator++. + ParamIterator& operator++() { + impl_->Advance(); + return *this; + } + // Postfix version of operator++. + ParamIterator operator++(int /*unused*/) { + ParamIteratorInterface* clone = impl_->Clone(); + impl_->Advance(); + return ParamIterator(clone); + } + bool operator==(const ParamIterator& other) const { + return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_); + } + bool operator!=(const ParamIterator& other) const { + return !(*this == other); + } + + private: + friend class ParamGenerator; + explicit ParamIterator(ParamIteratorInterface* impl) : impl_(impl) {} + scoped_ptr > impl_; +}; + +// ParamGeneratorInterface is the binary interface to access generators +// defined in other translation units. +template +class ParamGeneratorInterface { + public: + typedef T ParamType; + + virtual ~ParamGeneratorInterface() {} + + // Generator interface definition + virtual ParamIteratorInterface* Begin() const = 0; + virtual ParamIteratorInterface* End() const = 0; +}; + +// Wraps ParamGeneratorInterface and provides general generator syntax +// compatible with the STL Container concept. +// This class implements copy initialization semantics and the contained +// ParamGeneratorInterface instance is shared among all copies +// of the original object. This is possible because that instance is immutable. +template +class ParamGenerator { + public: + typedef ParamIterator iterator; + + explicit ParamGenerator(ParamGeneratorInterface* impl) : impl_(impl) {} + ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {} + + ParamGenerator& operator=(const ParamGenerator& other) { + impl_ = other.impl_; + return *this; + } + + iterator begin() const { return iterator(impl_->Begin()); } + iterator end() const { return iterator(impl_->End()); } + + private: + linked_ptr > impl_; +}; + +// Generates values from a range of two comparable values. Can be used to +// generate sequences of user-defined types that implement operator+() and +// operator<(). +// This class is used in the Range() function. +template +class RangeGenerator : public ParamGeneratorInterface { + public: + RangeGenerator(T begin, T end, IncrementT step) + : begin_(begin), end_(end), + step_(step), end_index_(CalculateEndIndex(begin, end, step)) {} + virtual ~RangeGenerator() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, begin_, 0, step_); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, end_, end_index_, step_); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, T value, int index, + IncrementT step) + : base_(base), value_(value), index_(index), step_(step) {} + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + virtual void Advance() { + value_ = value_ + step_; + index_++; + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const T* Current() const { return &value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const int other_index = + CheckedDowncastToActualType(&other)->index_; + return index_ == other_index; + } + + private: + Iterator(const Iterator& other) + : ParamIteratorInterface(), + base_(other.base_), value_(other.value_), index_(other.index_), + step_(other.step_) {} + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + T value_; + int index_; + const IncrementT step_; + }; // class RangeGenerator::Iterator + + static int CalculateEndIndex(const T& begin, + const T& end, + const IncrementT& step) { + int end_index = 0; + for (T i = begin; i < end; i = i + step) + end_index++; + return end_index; + } + + // No implementation - assignment is unsupported. + void operator=(const RangeGenerator& other); + + const T begin_; + const T end_; + const IncrementT step_; + // The index for the end() iterator. All the elements in the generated + // sequence are indexed (0-based) to aid iterator comparison. + const int end_index_; +}; // class RangeGenerator + + +// Generates values from a pair of STL-style iterators. Used in the +// ValuesIn() function. The elements are copied from the source range +// since the source can be located on the stack, and the generator +// is likely to persist beyond that stack frame. +template +class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface { + public: + template + ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end) + : container_(begin, end) {} + virtual ~ValuesInIteratorRangeGenerator() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, container_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, container_.end()); + } + + private: + typedef typename ::std::vector ContainerType; + + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + typename ContainerType::const_iterator iterator) + : base_(base), iterator_(iterator) {} + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + virtual void Advance() { + ++iterator_; + value_.reset(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + // We need to use cached value referenced by iterator_ because *iterator_ + // can return a temporary object (and of type other then T), so just + // having "return &*iterator_;" doesn't work. + // value_ is updated here and not in Advance() because Advance() + // can advance iterator_ beyond the end of the range, and we cannot + // detect that fact. The client code, on the other hand, is + // responsible for not calling Current() on an out-of-range iterator. + virtual const T* Current() const { + if (value_.get() == NULL) + value_.reset(new T(*iterator_)); + return value_.get(); + } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + return iterator_ == + CheckedDowncastToActualType(&other)->iterator_; + } + + private: + Iterator(const Iterator& other) + // The explicit constructor call suppresses a false warning + // emitted by gcc when supplied with the -Wextra option. + : ParamIteratorInterface(), + base_(other.base_), + iterator_(other.iterator_) {} + + const ParamGeneratorInterface* const base_; + typename ContainerType::const_iterator iterator_; + // A cached value of *iterator_. We keep it here to allow access by + // pointer in the wrapping iterator's operator->(). + // value_ needs to be mutable to be accessed in Current(). + // Use of scoped_ptr helps manage cached value's lifetime, + // which is bound by the lifespan of the iterator itself. + mutable scoped_ptr value_; + }; // class ValuesInIteratorRangeGenerator::Iterator + + // No implementation - assignment is unsupported. + void operator=(const ValuesInIteratorRangeGenerator& other); + + const ContainerType container_; +}; // class ValuesInIteratorRangeGenerator + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Stores a parameter value and later creates tests parameterized with that +// value. +template +class ParameterizedTestFactory : public TestFactoryBase { + public: + typedef typename TestClass::ParamType ParamType; + explicit ParameterizedTestFactory(ParamType parameter) : + parameter_(parameter) {} + virtual Test* CreateTest() { + TestClass::SetParam(¶meter_); + return new TestClass(); + } + + private: + const ParamType parameter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactoryBase is a base class for meta-factories that create +// test factories for passing into MakeAndRegisterTestInfo function. +template +class TestMetaFactoryBase { + public: + virtual ~TestMetaFactoryBase() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0; +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactory creates test factories for passing into +// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives +// ownership of test factory pointer, same factory object cannot be passed +// into that method twice. But ParameterizedTestCaseInfo is going to call +// it for each Test/Parameter value combination. Thus it needs meta factory +// creator class. +template +class TestMetaFactory + : public TestMetaFactoryBase { + public: + typedef typename TestCase::ParamType ParamType; + + TestMetaFactory() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) { + return new ParameterizedTestFactory(parameter); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseInfoBase is a generic interface +// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase +// accumulates test information provided by TEST_P macro invocations +// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations +// and uses that information to register all resulting test instances +// in RegisterTests method. The ParameterizeTestCaseRegistry class holds +// a collection of pointers to the ParameterizedTestCaseInfo objects +// and calls RegisterTests() on each of them when asked. +class ParameterizedTestCaseInfoBase { + public: + virtual ~ParameterizedTestCaseInfoBase() {} + + // Base part of test case name for display purposes. + virtual const string& GetTestCaseName() const = 0; + // Test case id to verify identity. + virtual TypeId GetTestCaseTypeId() const = 0; + // UnitTest class invokes this method to register tests in this + // test case right before running them in RUN_ALL_TESTS macro. + // This method should not be called more then once on any single + // instance of a ParameterizedTestCaseInfoBase derived class. + virtual void RegisterTests() = 0; + + protected: + ParameterizedTestCaseInfoBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P +// macro invocations for a particular test case and generators +// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that +// test case. It registers tests with all values generated by all +// generators when asked. +template +class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase { + public: + // ParamType and GeneratorCreationFunc are private types but are required + // for declarations of public methods AddTestPattern() and + // AddTestCaseInstantiation(). + typedef typename TestCase::ParamType ParamType; + // A function that returns an instance of appropriate generator type. + typedef ParamGenerator(GeneratorCreationFunc)(); + + explicit ParameterizedTestCaseInfo(const char* name) + : test_case_name_(name) {} + + // Test case base name for display purposes. + virtual const string& GetTestCaseName() const { return test_case_name_; } + // Test case id to verify identity. + virtual TypeId GetTestCaseTypeId() const { return GetTypeId(); } + // TEST_P macro uses AddTestPattern() to record information + // about a single test in a LocalTestInfo structure. + // test_case_name is the base name of the test case (without invocation + // prefix). test_base_name is the name of an individual test without + // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is + // test case base name and DoBar is test base name. + void AddTestPattern(const char* test_case_name, + const char* test_base_name, + TestMetaFactoryBase* meta_factory) { + tests_.push_back(linked_ptr(new TestInfo(test_case_name, + test_base_name, + meta_factory))); + } + // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information + // about a generator. + int AddTestCaseInstantiation(const string& instantiation_name, + GeneratorCreationFunc* func, + const char* /* file */, + int /* line */) { + instantiations_.push_back(::std::make_pair(instantiation_name, func)); + return 0; // Return value used only to run this method in namespace scope. + } + // UnitTest class invokes this method to register tests in this test case + // test cases right before running tests in RUN_ALL_TESTS macro. + // This method should not be called more then once on any single + // instance of a ParameterizedTestCaseInfoBase derived class. + // UnitTest has a guard to prevent from calling this method more then once. + virtual void RegisterTests() { + for (typename TestInfoContainer::iterator test_it = tests_.begin(); + test_it != tests_.end(); ++test_it) { + linked_ptr test_info = *test_it; + for (typename InstantiationContainer::iterator gen_it = + instantiations_.begin(); gen_it != instantiations_.end(); + ++gen_it) { + const string& instantiation_name = gen_it->first; + ParamGenerator generator((*gen_it->second)()); + + string test_case_name; + if ( !instantiation_name.empty() ) + test_case_name = instantiation_name + "/"; + test_case_name += test_info->test_case_base_name; + + int i = 0; + for (typename ParamGenerator::iterator param_it = + generator.begin(); + param_it != generator.end(); ++param_it, ++i) { + Message test_name_stream; + test_name_stream << test_info->test_base_name << "/" << i; + MakeAndRegisterTestInfo( + test_case_name.c_str(), + test_name_stream.GetString().c_str(), + NULL, // No type parameter. + PrintToString(*param_it).c_str(), + GetTestCaseTypeId(), + TestCase::SetUpTestCase, + TestCase::TearDownTestCase, + test_info->test_meta_factory->CreateTestFactory(*param_it)); + } // for param_it + } // for gen_it + } // for test_it + } // RegisterTests + + private: + // LocalTestInfo structure keeps information about a single test registered + // with TEST_P macro. + struct TestInfo { + TestInfo(const char* a_test_case_base_name, + const char* a_test_base_name, + TestMetaFactoryBase* a_test_meta_factory) : + test_case_base_name(a_test_case_base_name), + test_base_name(a_test_base_name), + test_meta_factory(a_test_meta_factory) {} + + const string test_case_base_name; + const string test_base_name; + const scoped_ptr > test_meta_factory; + }; + typedef ::std::vector > TestInfoContainer; + // Keeps pairs of + // received from INSTANTIATE_TEST_CASE_P macros. + typedef ::std::vector > + InstantiationContainer; + + const string test_case_name_; + TestInfoContainer tests_; + InstantiationContainer instantiations_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo); +}; // class ParameterizedTestCaseInfo + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase +// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P +// macros use it to locate their corresponding ParameterizedTestCaseInfo +// descriptors. +class ParameterizedTestCaseRegistry { + public: + ParameterizedTestCaseRegistry() {} + ~ParameterizedTestCaseRegistry() { + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + delete *it; + } + } + + // Looks up or creates and returns a structure containing information about + // tests and instantiations of a particular test case. + template + ParameterizedTestCaseInfo* GetTestCasePatternHolder( + const char* test_case_name, + const char* file, + int line) { + ParameterizedTestCaseInfo* typed_test_info = NULL; + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + if ((*it)->GetTestCaseName() == test_case_name) { + if ((*it)->GetTestCaseTypeId() != GetTypeId()) { + // Complain about incorrect usage of Google Test facilities + // and terminate the program since we cannot guaranty correct + // test case setup and tear-down in this case. + ReportInvalidTestCaseType(test_case_name, file, line); + posix::Abort(); + } else { + // At this point we are sure that the object we found is of the same + // type we are looking for, so we downcast it to that type + // without further checks. + typed_test_info = CheckedDowncastToActualType< + ParameterizedTestCaseInfo >(*it); + } + break; + } + } + if (typed_test_info == NULL) { + typed_test_info = new ParameterizedTestCaseInfo(test_case_name); + test_case_infos_.push_back(typed_test_info); + } + return typed_test_info; + } + void RegisterTests() { + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + (*it)->RegisterTests(); + } + } + + private: + typedef ::std::vector TestCaseInfoContainer; + + TestCaseInfoContainer test_case_infos_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry); +}; + +} // namespace internal +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +// This file was GENERATED by command: +// pump.py gtest-param-util-generated.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// Type and function utilities for implementing parameterized tests. +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// Currently Google Test supports at most 50 arguments in Values, +// and at most 10 arguments in Combine. Please contact +// googletestframework@googlegroups.com if you need more. +// Please note that the number of arguments to Combine is limited +// by the maximum arity of the implementation of tr1::tuple which is +// currently set at 10. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. + +#if GTEST_HAS_PARAM_TEST + +namespace testing { + +// Forward declarations of ValuesIn(), which is implemented in +// include/gtest/gtest-param-test.h. +template +internal::ParamGenerator< + typename ::testing::internal::IteratorTraits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end); + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]); + +template +internal::ParamGenerator ValuesIn( + const Container& container); + +namespace internal { + +// Used in the Values() function to provide polymorphic capabilities. +template +class ValueArray1 { + public: + explicit ValueArray1(T1 v1) : v1_(v1) {} + + template + operator ParamGenerator() const { return ValuesIn(&v1_, &v1_ + 1); } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray1& other); + + const T1 v1_; +}; + +template +class ValueArray2 { + public: + ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray2& other); + + const T1 v1_; + const T2 v2_; +}; + +template +class ValueArray3 { + public: + ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray3& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; +}; + +template +class ValueArray4 { + public: + ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray4& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; +}; + +template +class ValueArray5 { + public: + ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray5& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; +}; + +template +class ValueArray6 { + public: + ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray6& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; +}; + +template +class ValueArray7 { + public: + ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray7& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; +}; + +template +class ValueArray8 { + public: + ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray8& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; +}; + +template +class ValueArray9 { + public: + ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray9& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; +}; + +template +class ValueArray10 { + public: + ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray10& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; +}; + +template +class ValueArray11 { + public: + ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray11& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; +}; + +template +class ValueArray12 { + public: + ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray12& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; +}; + +template +class ValueArray13 { + public: + ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray13& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; +}; + +template +class ValueArray14 { + public: + ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray14& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; +}; + +template +class ValueArray15 { + public: + ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray15& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; +}; + +template +class ValueArray16 { + public: + ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray16& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; +}; + +template +class ValueArray17 { + public: + ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray17& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; +}; + +template +class ValueArray18 { + public: + ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray18& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; +}; + +template +class ValueArray19 { + public: + ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray19& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; +}; + +template +class ValueArray20 { + public: + ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray20& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; +}; + +template +class ValueArray21 { + public: + ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray21& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; +}; + +template +class ValueArray22 { + public: + ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray22& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; +}; + +template +class ValueArray23 { + public: + ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray23& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; +}; + +template +class ValueArray24 { + public: + ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray24& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; +}; + +template +class ValueArray25 { + public: + ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray25& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; +}; + +template +class ValueArray26 { + public: + ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray26& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; +}; + +template +class ValueArray27 { + public: + ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray27& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; +}; + +template +class ValueArray28 { + public: + ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray28& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; +}; + +template +class ValueArray29 { + public: + ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray29& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; +}; + +template +class ValueArray30 { + public: + ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray30& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; +}; + +template +class ValueArray31 { + public: + ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray31& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; +}; + +template +class ValueArray32 { + public: + ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray32& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; +}; + +template +class ValueArray33 { + public: + ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, + T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray33& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; +}; + +template +class ValueArray34 { + public: + ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray34& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; +}; + +template +class ValueArray35 { + public: + ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), + v32_(v32), v33_(v33), v34_(v34), v35_(v35) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray35& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; +}; + +template +class ValueArray36 { + public: + ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), + v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray36& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; +}; + +template +class ValueArray37 { + public: + ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), + v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), + v36_(v36), v37_(v37) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray37& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; +}; + +template +class ValueArray38 { + public: + ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray38& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; +}; + +template +class ValueArray39 { + public: + ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray39& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; +}; + +template +class ValueArray40 { + public: + ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), + v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), + v40_(v40) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray40& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; +}; + +template +class ValueArray41 { + public: + ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, + T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray41& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; +}; + +template +class ValueArray42 { + public: + ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray42& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; +}; + +template +class ValueArray43 { + public: + ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), + v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), + v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray43& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; +}; + +template +class ValueArray44 { + public: + ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), + v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), + v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), + v43_(v43), v44_(v44) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray44& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; +}; + +template +class ValueArray45 { + public: + ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), + v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), + v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), + v42_(v42), v43_(v43), v44_(v44), v45_(v45) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray45& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; +}; + +template +class ValueArray46 { + public: + ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), + v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray46& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; +}; + +template +class ValueArray47 { + public: + ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), + v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46), + v47_(v47) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray47& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; +}; + +template +class ValueArray48 { + public: + ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), + v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), + v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), + v46_(v46), v47_(v47), v48_(v48) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray48& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; +}; + +template +class ValueArray49 { + public: + ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, + T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), + v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_), static_cast(v49_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray49& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; + const T49 v49_; +}; + +template +class ValueArray50 { + public: + ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49, + T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), + v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_), static_cast(v49_), static_cast(v50_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray50& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; + const T49 v49_; + const T50 v50_; +}; + +# if GTEST_HAS_COMBINE +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Generates values from the Cartesian product of values produced +// by the argument generators. +// +template +class CartesianProductGenerator2 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator2(const ParamGenerator& g1, + const ParamGenerator& g2) + : g1_(g1), g2_(g2) {} + virtual ~CartesianProductGenerator2() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current2_; + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + ParamType current_value_; + }; // class CartesianProductGenerator2::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator2& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; +}; // class CartesianProductGenerator2 + + +template +class CartesianProductGenerator3 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator3(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3) + : g1_(g1), g2_(g2), g3_(g3) {} + virtual ~CartesianProductGenerator3() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current3_; + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + ParamType current_value_; + }; // class CartesianProductGenerator3::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator3& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; +}; // class CartesianProductGenerator3 + + +template +class CartesianProductGenerator4 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator4(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {} + virtual ~CartesianProductGenerator4() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current4_; + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + ParamType current_value_; + }; // class CartesianProductGenerator4::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator4& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; +}; // class CartesianProductGenerator4 + + +template +class CartesianProductGenerator5 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator5(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {} + virtual ~CartesianProductGenerator5() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current5_; + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + ParamType current_value_; + }; // class CartesianProductGenerator5::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator5& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; +}; // class CartesianProductGenerator5 + + +template +class CartesianProductGenerator6 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator6(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {} + virtual ~CartesianProductGenerator6() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current6_; + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + ParamType current_value_; + }; // class CartesianProductGenerator6::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator6& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; +}; // class CartesianProductGenerator6 + + +template +class CartesianProductGenerator7 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator7(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {} + virtual ~CartesianProductGenerator7() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current7_; + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + ParamType current_value_; + }; // class CartesianProductGenerator7::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator7& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; +}; // class CartesianProductGenerator7 + + +template +class CartesianProductGenerator8 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator8(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), + g8_(g8) {} + virtual ~CartesianProductGenerator8() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current8_; + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + ParamType current_value_; + }; // class CartesianProductGenerator8::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator8& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; +}; // class CartesianProductGenerator8 + + +template +class CartesianProductGenerator9 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator9(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8, const ParamGenerator& g9) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9) {} + virtual ~CartesianProductGenerator9() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end(), g9_, g9_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8, + const ParamGenerator& g9, + const typename ParamGenerator::iterator& current9) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8), + begin9_(g9.begin()), end9_(g9.end()), current9_(current9) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current9_; + if (current9_ == end9_) { + current9_ = begin9_; + ++current8_; + } + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_ && + current9_ == typed_other->current9_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_), + begin9_(other.begin9_), + end9_(other.end9_), + current9_(other.current9_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_, + *current9_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_ || + current9_ == end9_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + const typename ParamGenerator::iterator begin9_; + const typename ParamGenerator::iterator end9_; + typename ParamGenerator::iterator current9_; + ParamType current_value_; + }; // class CartesianProductGenerator9::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator9& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; + const ParamGenerator g9_; +}; // class CartesianProductGenerator9 + + +template +class CartesianProductGenerator10 + : public ParamGeneratorInterface< ::std::tr1::tuple > { + public: + typedef ::std::tr1::tuple ParamType; + + CartesianProductGenerator10(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8, const ParamGenerator& g9, + const ParamGenerator& g10) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9), g10_(g10) {} + virtual ~CartesianProductGenerator10() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end(), g9_, g9_.end(), g10_, g10_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8, + const ParamGenerator& g9, + const typename ParamGenerator::iterator& current9, + const ParamGenerator& g10, + const typename ParamGenerator::iterator& current10) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8), + begin9_(g9.begin()), end9_(g9.end()), current9_(current9), + begin10_(g10.begin()), end10_(g10.end()), current10_(current10) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current10_; + if (current10_ == end10_) { + current10_ = begin10_; + ++current9_; + } + if (current9_ == end9_) { + current9_ = begin9_; + ++current8_; + } + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_ && + current9_ == typed_other->current9_ && + current10_ == typed_other->current10_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_), + begin9_(other.begin9_), + end9_(other.end9_), + current9_(other.current9_), + begin10_(other.begin10_), + end10_(other.end10_), + current10_(other.current10_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_, + *current9_, *current10_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_ || + current9_ == end9_ || + current10_ == end10_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + const typename ParamGenerator::iterator begin9_; + const typename ParamGenerator::iterator end9_; + typename ParamGenerator::iterator current9_; + const typename ParamGenerator::iterator begin10_; + const typename ParamGenerator::iterator end10_; + typename ParamGenerator::iterator current10_; + ParamType current_value_; + }; // class CartesianProductGenerator10::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator10& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; + const ParamGenerator g9_; + const ParamGenerator g10_; +}; // class CartesianProductGenerator10 + + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Helper classes providing Combine() with polymorphic features. They allow +// casting CartesianProductGeneratorN to ParamGenerator if T is +// convertible to U. +// +template +class CartesianProductHolder2 { + public: +CartesianProductHolder2(const Generator1& g1, const Generator2& g2) + : g1_(g1), g2_(g2) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator2( + static_cast >(g1_), + static_cast >(g2_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder2& other); + + const Generator1 g1_; + const Generator2 g2_; +}; // class CartesianProductHolder2 + +template +class CartesianProductHolder3 { + public: +CartesianProductHolder3(const Generator1& g1, const Generator2& g2, + const Generator3& g3) + : g1_(g1), g2_(g2), g3_(g3) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator3( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder3& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; +}; // class CartesianProductHolder3 + +template +class CartesianProductHolder4 { + public: +CartesianProductHolder4(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator4( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder4& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; +}; // class CartesianProductHolder4 + +template +class CartesianProductHolder5 { + public: +CartesianProductHolder5(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator5( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder5& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; +}; // class CartesianProductHolder5 + +template +class CartesianProductHolder6 { + public: +CartesianProductHolder6(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator6( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder6& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; +}; // class CartesianProductHolder6 + +template +class CartesianProductHolder7 { + public: +CartesianProductHolder7(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator7( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder7& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; +}; // class CartesianProductHolder7 + +template +class CartesianProductHolder8 { + public: +CartesianProductHolder8(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), + g8_(g8) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator8( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder8& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; +}; // class CartesianProductHolder8 + +template +class CartesianProductHolder9 { + public: +CartesianProductHolder9(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8, + const Generator9& g9) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator9( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_), + static_cast >(g9_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder9& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; + const Generator9 g9_; +}; // class CartesianProductHolder9 + +template +class CartesianProductHolder10 { + public: +CartesianProductHolder10(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8, + const Generator9& g9, const Generator10& g10) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9), g10_(g10) {} + template + operator ParamGenerator< ::std::tr1::tuple >() const { + return ParamGenerator< ::std::tr1::tuple >( + new CartesianProductGenerator10( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_), + static_cast >(g9_), + static_cast >(g10_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder10& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; + const Generator9 g9_; + const Generator10 g10_; +}; // class CartesianProductHolder10 + +# endif // GTEST_HAS_COMBINE + +} // namespace internal +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ + +#if GTEST_HAS_PARAM_TEST + +namespace testing { + +// Functions producing parameter generators. +// +// Google Test uses these generators to produce parameters for value- +// parameterized tests. When a parameterized test case is instantiated +// with a particular generator, Google Test creates and runs tests +// for each element in the sequence produced by the generator. +// +// In the following sample, tests from test case FooTest are instantiated +// each three times with parameter values 3, 5, and 8: +// +// class FooTest : public TestWithParam { ... }; +// +// TEST_P(FooTest, TestThis) { +// } +// TEST_P(FooTest, TestThat) { +// } +// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8)); +// + +// Range() returns generators providing sequences of values in a range. +// +// Synopsis: +// Range(start, end) +// - returns a generator producing a sequence of values {start, start+1, +// start+2, ..., }. +// Range(start, end, step) +// - returns a generator producing a sequence of values {start, start+step, +// start+step+step, ..., }. +// Notes: +// * The generated sequences never include end. For example, Range(1, 5) +// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2) +// returns a generator producing {1, 3, 5, 7}. +// * start and end must have the same type. That type may be any integral or +// floating-point type or a user defined type satisfying these conditions: +// * It must be assignable (have operator=() defined). +// * It must have operator+() (operator+(int-compatible type) for +// two-operand version). +// * It must have operator<() defined. +// Elements in the resulting sequences will also have that type. +// * Condition start < end must be satisfied in order for resulting sequences +// to contain any elements. +// +template +internal::ParamGenerator Range(T start, T end, IncrementT step) { + return internal::ParamGenerator( + new internal::RangeGenerator(start, end, step)); +} + +template +internal::ParamGenerator Range(T start, T end) { + return Range(start, end, 1); +} + +// ValuesIn() function allows generation of tests with parameters coming from +// a container. +// +// Synopsis: +// ValuesIn(const T (&array)[N]) +// - returns a generator producing sequences with elements from +// a C-style array. +// ValuesIn(const Container& container) +// - returns a generator producing sequences with elements from +// an STL-style container. +// ValuesIn(Iterator begin, Iterator end) +// - returns a generator producing sequences with elements from +// a range [begin, end) defined by a pair of STL-style iterators. These +// iterators can also be plain C pointers. +// +// Please note that ValuesIn copies the values from the containers +// passed in and keeps them to generate tests in RUN_ALL_TESTS(). +// +// Examples: +// +// This instantiates tests from test case StringTest +// each with C-string values of "foo", "bar", and "baz": +// +// const char* strings[] = {"foo", "bar", "baz"}; +// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings)); +// +// This instantiates tests from test case StlStringTest +// each with STL strings with values "a" and "b": +// +// ::std::vector< ::std::string> GetParameterStrings() { +// ::std::vector< ::std::string> v; +// v.push_back("a"); +// v.push_back("b"); +// return v; +// } +// +// INSTANTIATE_TEST_CASE_P(CharSequence, +// StlStringTest, +// ValuesIn(GetParameterStrings())); +// +// +// This will also instantiate tests from CharTest +// each with parameter values 'a' and 'b': +// +// ::std::list GetParameterChars() { +// ::std::list list; +// list.push_back('a'); +// list.push_back('b'); +// return list; +// } +// ::std::list l = GetParameterChars(); +// INSTANTIATE_TEST_CASE_P(CharSequence2, +// CharTest, +// ValuesIn(l.begin(), l.end())); +// +template +internal::ParamGenerator< + typename ::testing::internal::IteratorTraits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end) { + typedef typename ::testing::internal::IteratorTraits + ::value_type ParamType; + return internal::ParamGenerator( + new internal::ValuesInIteratorRangeGenerator(begin, end)); +} + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]) { + return ValuesIn(array, array + N); +} + +template +internal::ParamGenerator ValuesIn( + const Container& container) { + return ValuesIn(container.begin(), container.end()); +} + +// Values() allows generating tests from explicitly specified list of +// parameters. +// +// Synopsis: +// Values(T v1, T v2, ..., T vN) +// - returns a generator producing sequences with elements v1, v2, ..., vN. +// +// For example, this instantiates tests from test case BarTest each +// with values "one", "two", and "three": +// +// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three")); +// +// This instantiates tests from test case BazTest each with values 1, 2, 3.5. +// The exact type of values will depend on the type of parameter in BazTest. +// +// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5)); +// +// Currently, Values() supports from 1 to 50 parameters. +// +template +internal::ValueArray1 Values(T1 v1) { + return internal::ValueArray1(v1); +} + +template +internal::ValueArray2 Values(T1 v1, T2 v2) { + return internal::ValueArray2(v1, v2); +} + +template +internal::ValueArray3 Values(T1 v1, T2 v2, T3 v3) { + return internal::ValueArray3(v1, v2, v3); +} + +template +internal::ValueArray4 Values(T1 v1, T2 v2, T3 v3, T4 v4) { + return internal::ValueArray4(v1, v2, v3, v4); +} + +template +internal::ValueArray5 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5) { + return internal::ValueArray5(v1, v2, v3, v4, v5); +} + +template +internal::ValueArray6 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6) { + return internal::ValueArray6(v1, v2, v3, v4, v5, v6); +} + +template +internal::ValueArray7 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7) { + return internal::ValueArray7(v1, v2, v3, v4, v5, + v6, v7); +} + +template +internal::ValueArray8 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) { + return internal::ValueArray8(v1, v2, v3, v4, + v5, v6, v7, v8); +} + +template +internal::ValueArray9 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) { + return internal::ValueArray9(v1, v2, v3, + v4, v5, v6, v7, v8, v9); +} + +template +internal::ValueArray10 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) { + return internal::ValueArray10(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10); +} + +template +internal::ValueArray11 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11) { + return internal::ValueArray11(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11); +} + +template +internal::ValueArray12 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12) { + return internal::ValueArray12(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12); +} + +template +internal::ValueArray13 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13) { + return internal::ValueArray13(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13); +} + +template +internal::ValueArray14 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) { + return internal::ValueArray14(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14); +} + +template +internal::ValueArray15 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) { + return internal::ValueArray15(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15); +} + +template +internal::ValueArray16 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16) { + return internal::ValueArray16(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16); +} + +template +internal::ValueArray17 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17) { + return internal::ValueArray17(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17); +} + +template +internal::ValueArray18 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18) { + return internal::ValueArray18(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18); +} + +template +internal::ValueArray19 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) { + return internal::ValueArray19(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19); +} + +template +internal::ValueArray20 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) { + return internal::ValueArray20(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20); +} + +template +internal::ValueArray21 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) { + return internal::ValueArray21(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21); +} + +template +internal::ValueArray22 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22) { + return internal::ValueArray22(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22); +} + +template +internal::ValueArray23 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23) { + return internal::ValueArray23(v1, v2, v3, + v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23); +} + +template +internal::ValueArray24 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24) { + return internal::ValueArray24(v1, v2, + v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, + v19, v20, v21, v22, v23, v24); +} + +template +internal::ValueArray25 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, + T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, + T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) { + return internal::ValueArray25(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, + v18, v19, v20, v21, v22, v23, v24, v25); +} + +template +internal::ValueArray26 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26) { + return internal::ValueArray26(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, + v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26); +} + +template +internal::ValueArray27 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27) { + return internal::ValueArray27(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, + v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27); +} + +template +internal::ValueArray28 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28) { + return internal::ValueArray28(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, + v28); +} + +template +internal::ValueArray29 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29) { + return internal::ValueArray29(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, + v27, v28, v29); +} + +template +internal::ValueArray30 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) { + return internal::ValueArray30(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, + v26, v27, v28, v29, v30); +} + +template +internal::ValueArray31 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) { + return internal::ValueArray31(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, + v25, v26, v27, v28, v29, v30, v31); +} + +template +internal::ValueArray32 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32) { + return internal::ValueArray32(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32); +} + +template +internal::ValueArray33 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33) { + return internal::ValueArray33(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33); +} + +template +internal::ValueArray34 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, + T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, + T31 v31, T32 v32, T33 v33, T34 v34) { + return internal::ValueArray34(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, + v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34); +} + +template +internal::ValueArray35 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) { + return internal::ValueArray35(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, + v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35); +} + +template +internal::ValueArray36 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) { + return internal::ValueArray36(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36); +} + +template +internal::ValueArray37 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37) { + return internal::ValueArray37(v1, v2, v3, + v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36, v37); +} + +template +internal::ValueArray38 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37, T38 v38) { + return internal::ValueArray38(v1, v2, + v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, + v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, + v33, v34, v35, v36, v37, v38); +} + +template +internal::ValueArray39 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37, T38 v38, T39 v39) { + return internal::ValueArray39(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, + v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, + v32, v33, v34, v35, v36, v37, v38, v39); +} + +template +internal::ValueArray40 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, + T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, + T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, + T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, + T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) { + return internal::ValueArray40(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, + v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, + v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40); +} + +template +internal::ValueArray41 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) { + return internal::ValueArray41(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, + v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, + v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41); +} + +template +internal::ValueArray42 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42) { + return internal::ValueArray42(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, + v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, + v42); +} + +template +internal::ValueArray43 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43) { + return internal::ValueArray43(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, + v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, + v41, v42, v43); +} + +template +internal::ValueArray44 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44) { + return internal::ValueArray44(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, + v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, + v40, v41, v42, v43, v44); +} + +template +internal::ValueArray45 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, + T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, + T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) { + return internal::ValueArray45(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, + v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, + v39, v40, v41, v42, v43, v44, v45); +} + +template +internal::ValueArray46 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) { + return internal::ValueArray46(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, + v38, v39, v40, v41, v42, v43, v44, v45, v46); +} + +template +internal::ValueArray47 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) { + return internal::ValueArray47(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, + v38, v39, v40, v41, v42, v43, v44, v45, v46, v47); +} + +template +internal::ValueArray48 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, + T48 v48) { + return internal::ValueArray48(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, + v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, + v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48); +} + +template +internal::ValueArray49 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, + T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, + T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, + T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, + T47 v47, T48 v48, T49 v49) { + return internal::ValueArray49(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, + v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, + v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49); +} + +template +internal::ValueArray50 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, + T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, + T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) { + return internal::ValueArray50(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, + v48, v49, v50); +} + +// Bool() allows generating tests with parameters in a set of (false, true). +// +// Synopsis: +// Bool() +// - returns a generator producing sequences with elements {false, true}. +// +// It is useful when testing code that depends on Boolean flags. Combinations +// of multiple flags can be tested when several Bool()'s are combined using +// Combine() function. +// +// In the following example all tests in the test case FlagDependentTest +// will be instantiated twice with parameters false and true. +// +// class FlagDependentTest : public testing::TestWithParam { +// virtual void SetUp() { +// external_flag = GetParam(); +// } +// } +// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool()); +// +inline internal::ParamGenerator Bool() { + return Values(false, true); +} + +# if GTEST_HAS_COMBINE +// Combine() allows the user to combine two or more sequences to produce +// values of a Cartesian product of those sequences' elements. +// +// Synopsis: +// Combine(gen1, gen2, ..., genN) +// - returns a generator producing sequences with elements coming from +// the Cartesian product of elements from the sequences generated by +// gen1, gen2, ..., genN. The sequence elements will have a type of +// tuple where T1, T2, ..., TN are the types +// of elements from sequences produces by gen1, gen2, ..., genN. +// +// Combine can have up to 10 arguments. This number is currently limited +// by the maximum number of elements in the tuple implementation used by Google +// Test. +// +// Example: +// +// This will instantiate tests in test case AnimalTest each one with +// the parameter values tuple("cat", BLACK), tuple("cat", WHITE), +// tuple("dog", BLACK), and tuple("dog", WHITE): +// +// enum Color { BLACK, GRAY, WHITE }; +// class AnimalTest +// : public testing::TestWithParam > {...}; +// +// TEST_P(AnimalTest, AnimalLooksNice) {...} +// +// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest, +// Combine(Values("cat", "dog"), +// Values(BLACK, WHITE))); +// +// This will instantiate tests in FlagDependentTest with all variations of two +// Boolean flags: +// +// class FlagDependentTest +// : public testing::TestWithParam > { +// virtual void SetUp() { +// // Assigns external_flag_1 and external_flag_2 values from the tuple. +// tie(external_flag_1, external_flag_2) = GetParam(); +// } +// }; +// +// TEST_P(FlagDependentTest, TestFeature1) { +// // Test your code using external_flag_1 and external_flag_2 here. +// } +// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest, +// Combine(Bool(), Bool())); +// +template +internal::CartesianProductHolder2 Combine( + const Generator1& g1, const Generator2& g2) { + return internal::CartesianProductHolder2( + g1, g2); +} + +template +internal::CartesianProductHolder3 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3) { + return internal::CartesianProductHolder3( + g1, g2, g3); +} + +template +internal::CartesianProductHolder4 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4) { + return internal::CartesianProductHolder4( + g1, g2, g3, g4); +} + +template +internal::CartesianProductHolder5 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5) { + return internal::CartesianProductHolder5( + g1, g2, g3, g4, g5); +} + +template +internal::CartesianProductHolder6 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6) { + return internal::CartesianProductHolder6( + g1, g2, g3, g4, g5, g6); +} + +template +internal::CartesianProductHolder7 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7) { + return internal::CartesianProductHolder7( + g1, g2, g3, g4, g5, g6, g7); +} + +template +internal::CartesianProductHolder8 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8) { + return internal::CartesianProductHolder8( + g1, g2, g3, g4, g5, g6, g7, g8); +} + +template +internal::CartesianProductHolder9 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8, const Generator9& g9) { + return internal::CartesianProductHolder9( + g1, g2, g3, g4, g5, g6, g7, g8, g9); +} + +template +internal::CartesianProductHolder10 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8, const Generator9& g9, + const Generator10& g10) { + return internal::CartesianProductHolder10( + g1, g2, g3, g4, g5, g6, g7, g8, g9, g10); +} +# endif // GTEST_HAS_COMBINE + + + +# define TEST_P(test_case_name, test_name) \ + class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \ + : public test_case_name { \ + public: \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \ + virtual void TestBody(); \ + private: \ + static int AddToRegistry() { \ + ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ + GetTestCasePatternHolder(\ + #test_case_name, __FILE__, __LINE__)->AddTestPattern(\ + #test_case_name, \ + #test_name, \ + new ::testing::internal::TestMetaFactory< \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \ + return 0; \ + } \ + static int gtest_registering_dummy_; \ + GTEST_DISALLOW_COPY_AND_ASSIGN_(\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \ + }; \ + int GTEST_TEST_CLASS_NAME_(test_case_name, \ + test_name)::gtest_registering_dummy_ = \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \ + void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() + +# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \ + ::testing::internal::ParamGenerator \ + gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \ + int gtest_##prefix##test_case_name##_dummy_ = \ + ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ + GetTestCasePatternHolder(\ + #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\ + #prefix, \ + >est_##prefix##test_case_name##_EvalGenerator_, \ + __FILE__, __LINE__) + +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Google C++ Testing Framework definitions useful in production code. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_ + +// When you need to test the private or protected members of a class, +// use the FRIEND_TEST macro to declare your tests as friends of the +// class. For example: +// +// class MyClass { +// private: +// void MyMethod(); +// FRIEND_TEST(MyClassTest, MyMethod); +// }; +// +// class MyClassTest : public testing::Test { +// // ... +// }; +// +// TEST_F(MyClassTest, MyMethod) { +// // Can call MyClass::MyMethod() here. +// } + +#define FRIEND_TEST(test_case_name, test_name)\ +friend class test_case_name##_##test_name##_Test + +#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ + +#include +#include + +namespace testing { + +// A copyable object representing the result of a test part (i.e. an +// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()). +// +// Don't inherit from TestPartResult as its destructor is not virtual. +class GTEST_API_ TestPartResult { + public: + // The possible outcomes of a test part (i.e. an assertion or an + // explicit SUCCEED(), FAIL(), or ADD_FAILURE()). + enum Type { + kSuccess, // Succeeded. + kNonFatalFailure, // Failed but the test can continue. + kFatalFailure // Failed and the test should be terminated. + }; + + // C'tor. TestPartResult does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestPartResult object. + TestPartResult(Type a_type, + const char* a_file_name, + int a_line_number, + const char* a_message) + : type_(a_type), + file_name_(a_file_name == NULL ? "" : a_file_name), + line_number_(a_line_number), + summary_(ExtractSummary(a_message)), + message_(a_message) { + } + + // Gets the outcome of the test part. + Type type() const { return type_; } + + // Gets the name of the source file where the test part took place, or + // NULL if it's unknown. + const char* file_name() const { + return file_name_.empty() ? NULL : file_name_.c_str(); + } + + // Gets the line in the source file where the test part took place, + // or -1 if it's unknown. + int line_number() const { return line_number_; } + + // Gets the summary of the failure message. + const char* summary() const { return summary_.c_str(); } + + // Gets the message associated with the test part. + const char* message() const { return message_.c_str(); } + + // Returns true iff the test part passed. + bool passed() const { return type_ == kSuccess; } + + // Returns true iff the test part failed. + bool failed() const { return type_ != kSuccess; } + + // Returns true iff the test part non-fatally failed. + bool nonfatally_failed() const { return type_ == kNonFatalFailure; } + + // Returns true iff the test part fatally failed. + bool fatally_failed() const { return type_ == kFatalFailure; } + + private: + Type type_; + + // Gets the summary of the failure message by omitting the stack + // trace in it. + static std::string ExtractSummary(const char* message); + + // The name of the source file where the test part took place, or + // "" if the source file is unknown. + std::string file_name_; + // The line in the source file where the test part took place, or -1 + // if the line number is unknown. + int line_number_; + std::string summary_; // The test failure summary. + std::string message_; // The test failure message. +}; + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result); + +// An array of TestPartResult objects. +// +// Don't inherit from TestPartResultArray as its destructor is not +// virtual. +class GTEST_API_ TestPartResultArray { + public: + TestPartResultArray() {} + + // Appends the given TestPartResult to the array. + void Append(const TestPartResult& result); + + // Returns the TestPartResult at the given index (0-based). + const TestPartResult& GetTestPartResult(int index) const; + + // Returns the number of TestPartResult objects in the array. + int size() const; + + private: + std::vector array_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray); +}; + +// This interface knows how to report a test part result. +class TestPartResultReporterInterface { + public: + virtual ~TestPartResultReporterInterface() {} + + virtual void ReportTestPartResult(const TestPartResult& result) = 0; +}; + +namespace internal { + +// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a +// statement generates new fatal failures. To do so it registers itself as the +// current test part result reporter. Besides checking if fatal failures were +// reported, it only delegates the reporting to the former result reporter. +// The original result reporter is restored in the destructor. +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +class GTEST_API_ HasNewFatalFailureHelper + : public TestPartResultReporterInterface { + public: + HasNewFatalFailureHelper(); + virtual ~HasNewFatalFailureHelper(); + virtual void ReportTestPartResult(const TestPartResult& result); + bool has_new_fatal_failure() const { return has_new_fatal_failure_; } + private: + bool has_new_fatal_failure_; + TestPartResultReporterInterface* original_reporter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper); +}; + +} // namespace internal + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// This header implements typed tests and type-parameterized tests. + +// Typed (aka type-driven) tests repeat the same test for types in a +// list. You must know which types you want to test with when writing +// typed tests. Here's how you do it: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + public: + ... + typedef std::list List; + static T shared_; + T value_; +}; + +// Next, associate a list of types with the test case, which will be +// repeated for each type in the list. The typedef is necessary for +// the macro to parse correctly. +typedef testing::Types MyTypes; +TYPED_TEST_CASE(FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// TYPED_TEST_CASE(FooTest, int); + +// Then, use TYPED_TEST() instead of TEST_F() to define as many typed +// tests for this test case as you want. +TYPED_TEST(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + // Since we are inside a derived class template, C++ requires use to + // visit the members of FooTest via 'this'. + TypeParam n = this->value_; + + // To visit static members of the fixture, add the TestFixture:: + // prefix. + n += TestFixture::shared_; + + // To refer to typedefs in the fixture, add the "typename + // TestFixture::" prefix. + typename TestFixture::List values; + values.push_back(n); + ... +} + +TYPED_TEST(FooTest, HasPropertyA) { ... } + +#endif // 0 + +// Type-parameterized tests are abstract test patterns parameterized +// by a type. Compared with typed tests, type-parameterized tests +// allow you to define the test pattern without knowing what the type +// parameters are. The defined pattern can be instantiated with +// different types any number of times, in any number of translation +// units. +// +// If you are designing an interface or concept, you can define a +// suite of type-parameterized tests to verify properties that any +// valid implementation of the interface/concept should have. Then, +// each implementation can easily instantiate the test suite to verify +// that it conforms to the requirements, without having to write +// similar tests repeatedly. Here's an example: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + ... +}; + +// Next, declare that you will define a type-parameterized test case +// (the _P suffix is for "parameterized" or "pattern", whichever you +// prefer): +TYPED_TEST_CASE_P(FooTest); + +// Then, use TYPED_TEST_P() to define as many type-parameterized tests +// for this type-parameterized test case as you want. +TYPED_TEST_P(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + TypeParam n = 0; + ... +} + +TYPED_TEST_P(FooTest, HasPropertyA) { ... } + +// Now the tricky part: you need to register all test patterns before +// you can instantiate them. The first argument of the macro is the +// test case name; the rest are the names of the tests in this test +// case. +REGISTER_TYPED_TEST_CASE_P(FooTest, + DoesBlah, HasPropertyA); + +// Finally, you are free to instantiate the pattern with the types you +// want. If you put the above code in a header file, you can #include +// it in multiple C++ source files and instantiate it multiple times. +// +// To distinguish different instances of the pattern, the first +// argument to the INSTANTIATE_* macro is a prefix that will be added +// to the actual test case name. Remember to pick unique prefixes for +// different instances. +typedef testing::Types MyTypes; +INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int); + +#endif // 0 + + +// Implements typed tests. + +#if GTEST_HAS_TYPED_TEST + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the typedef for the type parameters of the +// given test case. +# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_ + +// The 'Types' template argument below must have spaces around it +// since some compilers may choke on '>>' when passing a template +// instance (e.g. Types) +# define TYPED_TEST_CASE(CaseName, Types) \ + typedef ::testing::internal::TypeList< Types >::type \ + GTEST_TYPE_PARAMS_(CaseName) + +# define TYPED_TEST(CaseName, TestName) \ + template \ + class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \ + : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + virtual void TestBody(); \ + }; \ + bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTest< \ + CaseName, \ + ::testing::internal::TemplateSel< \ + GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \ + GTEST_TYPE_PARAMS_(CaseName)>::Register(\ + "", #CaseName, #TestName, 0); \ + template \ + void GTEST_TEST_CLASS_NAME_(CaseName, TestName)::TestBody() + +#endif // GTEST_HAS_TYPED_TEST + +// Implements type-parameterized tests. + +#if GTEST_HAS_TYPED_TEST_P + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the namespace name that the type-parameterized tests for +// the given type-parameterized test case are defined in. The exact +// name of the namespace is subject to change without notice. +# define GTEST_CASE_NAMESPACE_(TestCaseName) \ + gtest_case_##TestCaseName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the variable used to remember the names of +// the defined tests in the given test case. +# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \ + gtest_typed_test_case_p_state_##TestCaseName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY. +// +// Expands to the name of the variable used to remember the names of +// the registered tests in the given test case. +# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \ + gtest_registered_test_names_##TestCaseName##_ + +// The variables defined in the type-parameterized test macros are +// static as typically these macros are used in a .h file that can be +// #included in multiple translation units linked together. +# define TYPED_TEST_CASE_P(CaseName) \ + static ::testing::internal::TypedTestCasePState \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName) + +# define TYPED_TEST_P(CaseName, TestName) \ + namespace GTEST_CASE_NAMESPACE_(CaseName) { \ + template \ + class TestName : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + virtual void TestBody(); \ + }; \ + static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\ + __FILE__, __LINE__, #CaseName, #TestName); \ + } \ + template \ + void GTEST_CASE_NAMESPACE_(CaseName)::TestName::TestBody() + +# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \ + namespace GTEST_CASE_NAMESPACE_(CaseName) { \ + typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \ + } \ + static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\ + __FILE__, __LINE__, #__VA_ARGS__) + +// The 'Types' template argument below must have spaces around it +// since some compilers may choke on '>>' when passing a template +// instance (e.g. Types) +# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \ + bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTestCase::type>::Register(\ + #Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName)) + +#endif // GTEST_HAS_TYPED_TEST_P + +#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// Depending on the platform, different string classes are available. +// On Linux, in addition to ::std::string, Google also makes use of +// class ::string, which has the same interface as ::std::string, but +// has a different implementation. +// +// The user can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that +// ::string is available AND is a distinct type to ::std::string, or +// define it to 0 to indicate otherwise. +// +// If the user's ::std::string and ::string are the same class due to +// aliasing, he should define GTEST_HAS_GLOBAL_STRING to 0. +// +// If the user doesn't define GTEST_HAS_GLOBAL_STRING, it is defined +// heuristically. + +namespace testing { + +// Declares the flags. + +// This flag temporary enables the disabled tests. +GTEST_DECLARE_bool_(also_run_disabled_tests); + +// This flag brings the debugger on an assertion failure. +GTEST_DECLARE_bool_(break_on_failure); + +// This flag controls whether Google Test catches all test-thrown exceptions +// and logs them as failures. +GTEST_DECLARE_bool_(catch_exceptions); + +// This flag enables using colors in terminal output. Available values are +// "yes" to enable colors, "no" (disable colors), or "auto" (the default) +// to let Google Test decide. +GTEST_DECLARE_string_(color); + +// This flag sets up the filter to select by name using a glob pattern +// the tests to run. If the filter is not given all tests are executed. +GTEST_DECLARE_string_(filter); + +// This flag causes the Google Test to list tests. None of the tests listed +// are actually run if the flag is provided. +GTEST_DECLARE_bool_(list_tests); + +// This flag controls whether Google Test emits a detailed XML report to a file +// in addition to its normal textual output. +GTEST_DECLARE_string_(output); + +// This flags control whether Google Test prints the elapsed time for each +// test. +GTEST_DECLARE_bool_(print_time); + +// This flag specifies the random number seed. +GTEST_DECLARE_int32_(random_seed); + +// This flag sets how many times the tests are repeated. The default value +// is 1. If the value is -1 the tests are repeating forever. +GTEST_DECLARE_int32_(repeat); + +// This flag controls whether Google Test includes Google Test internal +// stack frames in failure stack traces. +GTEST_DECLARE_bool_(show_internal_stack_frames); + +// When this flag is specified, tests' order is randomized on every iteration. +GTEST_DECLARE_bool_(shuffle); + +// This flag specifies the maximum number of stack frames to be +// printed in a failure message. +GTEST_DECLARE_int32_(stack_trace_depth); + +// When this flag is specified, a failed assertion will throw an +// exception if exceptions are enabled, or exit the program with a +// non-zero code otherwise. +GTEST_DECLARE_bool_(throw_on_failure); + +// When this flag is set with a "host:port" string, on supported +// platforms test results are streamed to the specified port on +// the specified host machine. +GTEST_DECLARE_string_(stream_result_to); + +// The upper limit for valid stack trace depths. +const int kMaxStackTraceDepth = 100; + +namespace internal { + +class AssertHelper; +class DefaultGlobalTestPartResultReporter; +class ExecDeathTest; +class NoExecDeathTest; +class FinalSuccessChecker; +class GTestFlagSaver; +class StreamingListenerTest; +class TestResultAccessor; +class TestEventListenersAccessor; +class TestEventRepeater; +class UnitTestRecordPropertyTestHelper; +class WindowsDeathTest; +class UnitTestImpl* GetUnitTestImpl(); +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const std::string& message); + +} // namespace internal + +// The friend relationship of some of these classes is cyclic. +// If we don't forward declare them the compiler might confuse the classes +// in friendship clauses with same named classes on the scope. +class Test; +class TestCase; +class TestInfo; +class UnitTest; + +// A class for indicating whether an assertion was successful. When +// the assertion wasn't successful, the AssertionResult object +// remembers a non-empty message that describes how it failed. +// +// To create an instance of this class, use one of the factory functions +// (AssertionSuccess() and AssertionFailure()). +// +// This class is useful for two purposes: +// 1. Defining predicate functions to be used with Boolean test assertions +// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts +// 2. Defining predicate-format functions to be +// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). +// +// For example, if you define IsEven predicate: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) +// will print the message +// +// Value of: IsEven(Fib(5)) +// Actual: false (5 is odd) +// Expected: true +// +// instead of a more opaque +// +// Value of: IsEven(Fib(5)) +// Actual: false +// Expected: true +// +// in case IsEven is a simple Boolean predicate. +// +// If you expect your predicate to be reused and want to support informative +// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up +// about half as often as positive ones in our tests), supply messages for +// both success and failure cases: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess() << n << " is even"; +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print +// +// Value of: IsEven(Fib(6)) +// Actual: true (8 is even) +// Expected: false +// +// NB: Predicates that support negative Boolean assertions have reduced +// performance in positive ones so be careful not to use them in tests +// that have lots (tens of thousands) of positive Boolean assertions. +// +// To use this class with EXPECT_PRED_FORMAT assertions such as: +// +// // Verifies that Foo() returns an even number. +// EXPECT_PRED_FORMAT1(IsEven, Foo()); +// +// you need to define: +// +// testing::AssertionResult IsEven(const char* expr, int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() +// << "Expected: " << expr << " is even\n Actual: it's " << n; +// } +// +// If Foo() returns 5, you will see the following message: +// +// Expected: Foo() is even +// Actual: it's 5 +// +class GTEST_API_ AssertionResult { + public: + // Copy constructor. + // Used in EXPECT_TRUE/FALSE(assertion_result). + AssertionResult(const AssertionResult& other); + // Used in the EXPECT_TRUE/FALSE(bool_expression). + explicit AssertionResult(bool success) : success_(success) {} + + // Returns true iff the assertion succeeded. + operator bool() const { return success_; } // NOLINT + + // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. + AssertionResult operator!() const; + + // Returns the text streamed into this AssertionResult. Test assertions + // use it when they fail (i.e., the predicate's outcome doesn't match the + // assertion's expectation). When nothing has been streamed into the + // object, returns an empty string. + const char* message() const { + return message_.get() != NULL ? message_->c_str() : ""; + } + // TODO(vladl@google.com): Remove this after making sure no clients use it. + // Deprecated; please use message() instead. + const char* failure_message() const { return message(); } + + // Streams a custom failure message into this object. + template AssertionResult& operator<<(const T& value) { + AppendMessage(Message() << value); + return *this; + } + + // Allows streaming basic output manipulators such as endl or flush into + // this object. + AssertionResult& operator<<( + ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { + AppendMessage(Message() << basic_manipulator); + return *this; + } + + private: + // Appends the contents of message to message_. + void AppendMessage(const Message& a_message) { + if (message_.get() == NULL) + message_.reset(new ::std::string); + message_->append(a_message.GetString().c_str()); + } + + // Stores result of the assertion predicate. + bool success_; + // Stores the message describing the condition in case the expectation + // construct is not satisfied with the predicate's outcome. + // Referenced via a pointer to avoid taking too much stack frame space + // with test assertions. + internal::scoped_ptr< ::std::string> message_; + + GTEST_DISALLOW_ASSIGN_(AssertionResult); +}; + +// Makes a successful assertion result. +GTEST_API_ AssertionResult AssertionSuccess(); + +// Makes a failed assertion result. +GTEST_API_ AssertionResult AssertionFailure(); + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << msg. +GTEST_API_ AssertionResult AssertionFailure(const Message& msg); + +// The abstract class that all tests inherit from. +// +// In Google Test, a unit test program contains one or many TestCases, and +// each TestCase contains one or many Tests. +// +// When you define a test using the TEST macro, you don't need to +// explicitly derive from Test - the TEST macro automatically does +// this for you. +// +// The only time you derive from Test is when defining a test fixture +// to be used a TEST_F. For example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { ... } +// virtual void TearDown() { ... } +// ... +// }; +// +// TEST_F(FooTest, Bar) { ... } +// TEST_F(FooTest, Baz) { ... } +// +// Test is not copyable. +class GTEST_API_ Test { + public: + friend class TestInfo; + + // Defines types for pointers to functions that set up and tear down + // a test case. + typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc; + typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc; + + // The d'tor is virtual as we intend to inherit from Test. + virtual ~Test(); + + // Sets up the stuff shared by all tests in this test case. + // + // Google Test will call Foo::SetUpTestCase() before running the first + // test in test case Foo. Hence a sub-class can define its own + // SetUpTestCase() method to shadow the one defined in the super + // class. + static void SetUpTestCase() {} + + // Tears down the stuff shared by all tests in this test case. + // + // Google Test will call Foo::TearDownTestCase() after running the last + // test in test case Foo. Hence a sub-class can define its own + // TearDownTestCase() method to shadow the one defined in the super + // class. + static void TearDownTestCase() {} + + // Returns true iff the current test has a fatal failure. + static bool HasFatalFailure(); + + // Returns true iff the current test has a non-fatal failure. + static bool HasNonfatalFailure(); + + // Returns true iff the current test has a (either fatal or + // non-fatal) failure. + static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); } + + // Logs a property for the current test, test case, or for the entire + // invocation of the test program when used outside of the context of a + // test case. Only the last value for a given key is remembered. These + // are public static so they can be called from utility functions that are + // not members of the test fixture. Calls to RecordProperty made during + // lifespan of the test (from the moment its constructor starts to the + // moment its destructor finishes) will be output in XML as attributes of + // the element. Properties recorded from fixture's + // SetUpTestCase or TearDownTestCase are logged as attributes of the + // corresponding element. Calls to RecordProperty made in the + // global context (before or after invocation of RUN_ALL_TESTS and from + // SetUp/TearDown method of Environment objects registered with Google + // Test) will be output as attributes of the element. + static void RecordProperty(const std::string& key, const std::string& value); + static void RecordProperty(const std::string& key, int value); + + protected: + // Creates a Test object. + Test(); + + // Sets up the test fixture. + virtual void SetUp(); + + // Tears down the test fixture. + virtual void TearDown(); + + private: + // Returns true iff the current test has the same fixture class as + // the first test in the current test case. + static bool HasSameFixtureClass(); + + // Runs the test after the test fixture has been set up. + // + // A sub-class must implement this to define the test logic. + // + // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM. + // Instead, use the TEST or TEST_F macro. + virtual void TestBody() = 0; + + // Sets up, executes, and tears down the test. + void Run(); + + // Deletes self. We deliberately pick an unusual name for this + // internal method to avoid clashing with names used in user TESTs. + void DeleteSelf_() { delete this; } + + // Uses a GTestFlagSaver to save and restore all Google Test flags. + const internal::GTestFlagSaver* const gtest_flag_saver_; + + // Often a user mis-spells SetUp() as Setup() and spends a long time + // wondering why it is never called by Google Test. The declaration of + // the following method is solely for catching such an error at + // compile time: + // + // - The return type is deliberately chosen to be not void, so it + // will be a conflict if a user declares void Setup() in his test + // fixture. + // + // - This method is private, so it will be another compiler error + // if a user calls it from his test fixture. + // + // DO NOT OVERRIDE THIS FUNCTION. + // + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } + + // We disallow copying Tests. + GTEST_DISALLOW_COPY_AND_ASSIGN_(Test); +}; + +typedef internal::TimeInMillis TimeInMillis; + +// A copyable object representing a user specified test property which can be +// output as a key/value string pair. +// +// Don't inherit from TestProperty as its destructor is not virtual. +class TestProperty { + public: + // C'tor. TestProperty does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestProperty object. + TestProperty(const std::string& a_key, const std::string& a_value) : + key_(a_key), value_(a_value) { + } + + // Gets the user supplied key. + const char* key() const { + return key_.c_str(); + } + + // Gets the user supplied value. + const char* value() const { + return value_.c_str(); + } + + // Sets a new value, overriding the one supplied in the constructor. + void SetValue(const std::string& new_value) { + value_ = new_value; + } + + private: + // The key supplied by the user. + std::string key_; + // The value supplied by the user. + std::string value_; +}; + +// The result of a single Test. This includes a list of +// TestPartResults, a list of TestProperties, a count of how many +// death tests there are in the Test, and how much time it took to run +// the Test. +// +// TestResult is not copyable. +class GTEST_API_ TestResult { + public: + // Creates an empty TestResult. + TestResult(); + + // D'tor. Do not inherit from TestResult. + ~TestResult(); + + // Gets the number of all test parts. This is the sum of the number + // of successful test parts and the number of failed test parts. + int total_part_count() const; + + // Returns the number of the test properties. + int test_property_count() const; + + // Returns true iff the test passed (i.e. no test part failed). + bool Passed() const { return !Failed(); } + + // Returns true iff the test failed. + bool Failed() const; + + // Returns true iff the test fatally failed. + bool HasFatalFailure() const; + + // Returns true iff the test has a non-fatal failure. + bool HasNonfatalFailure() const; + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test part result among all the results. i can range + // from 0 to test_property_count() - 1. If i is not in that range, aborts + // the program. + const TestPartResult& GetTestPartResult(int i) const; + + // Returns the i-th test property. i can range from 0 to + // test_property_count() - 1. If i is not in that range, aborts the + // program. + const TestProperty& GetTestProperty(int i) const; + + private: + friend class TestInfo; + friend class TestCase; + friend class UnitTest; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::ExecDeathTest; + friend class internal::TestResultAccessor; + friend class internal::UnitTestImpl; + friend class internal::WindowsDeathTest; + + // Gets the vector of TestPartResults. + const std::vector& test_part_results() const { + return test_part_results_; + } + + // Gets the vector of TestProperties. + const std::vector& test_properties() const { + return test_properties_; + } + + // Sets the elapsed time. + void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; } + + // Adds a test property to the list. The property is validated and may add + // a non-fatal failure if invalid (e.g., if it conflicts with reserved + // key names). If a property is already recorded for the same key, the + // value will be updated, rather than storing multiple values for the same + // key. xml_element specifies the element for which the property is being + // recorded and is used for validation. + void RecordProperty(const std::string& xml_element, + const TestProperty& test_property); + + // Adds a failure if the key is a reserved attribute of Google Test + // testcase tags. Returns true if the property is valid. + // TODO(russr): Validate attribute names are legal and human readable. + static bool ValidateTestProperty(const std::string& xml_element, + const TestProperty& test_property); + + // Adds a test part result to the list. + void AddTestPartResult(const TestPartResult& test_part_result); + + // Returns the death test count. + int death_test_count() const { return death_test_count_; } + + // Increments the death test count, returning the new count. + int increment_death_test_count() { return ++death_test_count_; } + + // Clears the test part results. + void ClearTestPartResults(); + + // Clears the object. + void Clear(); + + // Protects mutable state of the property vector and of owned + // properties, whose values may be updated. + internal::Mutex test_properites_mutex_; + + // The vector of TestPartResults + std::vector test_part_results_; + // The vector of TestProperties + std::vector test_properties_; + // Running count of death tests. + int death_test_count_; + // The elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + + // We disallow copying TestResult. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult); +}; // class TestResult + +// A TestInfo object stores the following information about a test: +// +// Test case name +// Test name +// Whether the test should be run +// A function pointer that creates the test object when invoked +// Test result +// +// The constructor of TestInfo registers itself with the UnitTest +// singleton such that the RUN_ALL_TESTS() macro knows which tests to +// run. +class GTEST_API_ TestInfo { + public: + // Destructs a TestInfo object. This function is not virtual, so + // don't inherit from TestInfo. + ~TestInfo(); + + // Returns the test case name. + const char* test_case_name() const { return test_case_name_.c_str(); } + + // Returns the test name. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a typed + // or a type-parameterized test. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns the text representation of the value parameter, or NULL if this + // is not a value-parameterized test. + const char* value_param() const { + if (value_param_.get() != NULL) + return value_param_->c_str(); + return NULL; + } + + // Returns true if this test should run, that is if the test is not + // disabled (or it is disabled but the also_run_disabled_tests flag has + // been specified) and its full name matches the user-specified filter. + // + // Google Test allows the user to filter the tests by their full names. + // The full name of a test Bar in test case Foo is defined as + // "Foo.Bar". Only the tests that match the filter will run. + // + // A filter is a colon-separated list of glob (not regex) patterns, + // optionally followed by a '-' and a colon-separated list of + // negative patterns (tests to exclude). A test is run if it + // matches one of the positive patterns and does not match any of + // the negative patterns. + // + // For example, *A*:Foo.* is a filter that matches any string that + // contains the character 'A' or starts with "Foo.". + bool should_run() const { return should_run_; } + + // Returns true iff this test will appear in the XML report. + bool is_reportable() const { + // For now, the XML report includes all tests matching the filter. + // In the future, we may trim tests that are excluded because of + // sharding. + return matches_filter_; + } + + // Returns the result of the test. + const TestResult* result() const { return &result_; } + + private: +#if GTEST_HAS_DEATH_TEST + friend class internal::DefaultDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + friend class Test; + friend class TestCase; + friend class internal::UnitTestImpl; + friend class internal::StreamingListenerTest; + friend TestInfo* internal::MakeAndRegisterTestInfo( + const char* test_case_name, + const char* name, + const char* type_param, + const char* value_param, + internal::TypeId fixture_class_id, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + internal::TestFactoryBase* factory); + + // Constructs a TestInfo object. The newly constructed instance assumes + // ownership of the factory object. + TestInfo(const std::string& test_case_name, + const std::string& name, + const char* a_type_param, // NULL if not a type-parameterized test + const char* a_value_param, // NULL if not a value-parameterized test + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory); + + // Increments the number of death tests encountered in this test so + // far. + int increment_death_test_count() { + return result_.increment_death_test_count(); + } + + // Creates the test object, runs it, records its result, and then + // deletes it. + void Run(); + + static void ClearTestResult(TestInfo* test_info) { + test_info->result_.Clear(); + } + + // These fields are immutable properties of the test. + const std::string test_case_name_; // Test case name + const std::string name_; // Test name + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // Text representation of the value parameter, or NULL if this is not a + // value-parameterized test. + const internal::scoped_ptr value_param_; + const internal::TypeId fixture_class_id_; // ID of the test fixture class + bool should_run_; // True iff this test should run + bool is_disabled_; // True iff this test is disabled + bool matches_filter_; // True if this test matches the + // user-specified filter. + internal::TestFactoryBase* const factory_; // The factory that creates + // the test object + + // This field is mutable and needs to be reset before running the + // test for the second time. + TestResult result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo); +}; + +// A test case, which consists of a vector of TestInfos. +// +// TestCase is not copyable. +class GTEST_API_ TestCase { + public: + // Creates a TestCase with the given name. + // + // TestCase does NOT have a default constructor. Always use this + // constructor to create a TestCase object. + // + // Arguments: + // + // name: name of the test case + // a_type_param: the name of the test's type parameter, or NULL if + // this is not a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase(const char* name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Destructor of TestCase. + virtual ~TestCase(); + + // Gets the name of the TestCase. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a + // type-parameterized test case. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns true if any test in this test case should run. + bool should_run() const { return should_run_; } + + // Gets the number of successful tests in this test case. + int successful_test_count() const; + + // Gets the number of failed tests in this test case. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests in this test case. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Get the number of tests in this test case that should run. + int test_to_run_count() const; + + // Gets the number of all tests in this test case. + int total_test_count() const; + + // Returns true iff the test case passed. + bool Passed() const { return !Failed(); } + + // Returns true iff the test case failed. + bool Failed() const { return failed_test_count() > 0; } + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + const TestInfo* GetTestInfo(int i) const; + + // Returns the TestResult that holds test properties recorded during + // execution of SetUpTestCase and TearDownTestCase. + const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; } + + private: + friend class Test; + friend class internal::UnitTestImpl; + + // Gets the (mutable) vector of TestInfos in this TestCase. + std::vector& test_info_list() { return test_info_list_; } + + // Gets the (immutable) vector of TestInfos in this TestCase. + const std::vector& test_info_list() const { + return test_info_list_; + } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + TestInfo* GetMutableTestInfo(int i); + + // Sets the should_run member. + void set_should_run(bool should) { should_run_ = should; } + + // Adds a TestInfo to this test case. Will delete the TestInfo upon + // destruction of the TestCase object. + void AddTestInfo(TestInfo * test_info); + + // Clears the results of all tests in this test case. + void ClearResult(); + + // Clears the results of all tests in the given test case. + static void ClearTestCaseResult(TestCase* test_case) { + test_case->ClearResult(); + } + + // Runs every test in this TestCase. + void Run(); + + // Runs SetUpTestCase() for this TestCase. This wrapper is needed + // for catching exceptions thrown from SetUpTestCase(). + void RunSetUpTestCase() { (*set_up_tc_)(); } + + // Runs TearDownTestCase() for this TestCase. This wrapper is + // needed for catching exceptions thrown from TearDownTestCase(). + void RunTearDownTestCase() { (*tear_down_tc_)(); } + + // Returns true iff test passed. + static bool TestPassed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Passed(); + } + + // Returns true iff test failed. + static bool TestFailed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Failed(); + } + + // Returns true iff the test is disabled and will be reported in the XML + // report. + static bool TestReportableDisabled(const TestInfo* test_info) { + return test_info->is_reportable() && test_info->is_disabled_; + } + + // Returns true iff test is disabled. + static bool TestDisabled(const TestInfo* test_info) { + return test_info->is_disabled_; + } + + // Returns true iff this test will appear in the XML report. + static bool TestReportable(const TestInfo* test_info) { + return test_info->is_reportable(); + } + + // Returns true if the given test should run. + static bool ShouldRunTest(const TestInfo* test_info) { + return test_info->should_run(); + } + + // Shuffles the tests in this test case. + void ShuffleTests(internal::Random* random); + + // Restores the test order to before the first shuffle. + void UnshuffleTests(); + + // Name of the test case. + std::string name_; + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // The vector of TestInfos in their original order. It owns the + // elements in the vector. + std::vector test_info_list_; + // Provides a level of indirection for the test list to allow easy + // shuffling and restoring the test order. The i-th element in this + // vector is the index of the i-th test in the shuffled test list. + std::vector test_indices_; + // Pointer to the function that sets up the test case. + Test::SetUpTestCaseFunc set_up_tc_; + // Pointer to the function that tears down the test case. + Test::TearDownTestCaseFunc tear_down_tc_; + // True iff any test in this test case should run. + bool should_run_; + // Elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + // Holds test properties recorded during execution of SetUpTestCase and + // TearDownTestCase. + TestResult ad_hoc_test_result_; + + // We disallow copying TestCases. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase); +}; + +// An Environment object is capable of setting up and tearing down an +// environment. The user should subclass this to define his own +// environment(s). +// +// An Environment object does the set-up and tear-down in virtual +// methods SetUp() and TearDown() instead of the constructor and the +// destructor, as: +// +// 1. You cannot safely throw from a destructor. This is a problem +// as in some cases Google Test is used where exceptions are enabled, and +// we may want to implement ASSERT_* using exceptions where they are +// available. +// 2. You cannot use ASSERT_* directly in a constructor or +// destructor. +class Environment { + public: + // The d'tor is virtual as we need to subclass Environment. + virtual ~Environment() {} + + // Override this to define how to set up the environment. + virtual void SetUp() {} + + // Override this to define how to tear down the environment. + virtual void TearDown() {} + private: + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } +}; + +// The interface for tracing execution of tests. The methods are organized in +// the order the corresponding events are fired. +class TestEventListener { + public: + virtual ~TestEventListener() {} + + // Fired before any test activity starts. + virtual void OnTestProgramStart(const UnitTest& unit_test) = 0; + + // Fired before each iteration of tests starts. There may be more than + // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration + // index, starting from 0. + virtual void OnTestIterationStart(const UnitTest& unit_test, + int iteration) = 0; + + // Fired before environment set-up for each iteration of tests starts. + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0; + + // Fired after environment set-up for each iteration of tests ends. + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0; + + // Fired before the test case starts. + virtual void OnTestCaseStart(const TestCase& test_case) = 0; + + // Fired before the test starts. + virtual void OnTestStart(const TestInfo& test_info) = 0; + + // Fired after a failed assertion or a SUCCEED() invocation. + virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0; + + // Fired after the test ends. + virtual void OnTestEnd(const TestInfo& test_info) = 0; + + // Fired after the test case ends. + virtual void OnTestCaseEnd(const TestCase& test_case) = 0; + + // Fired before environment tear-down for each iteration of tests starts. + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0; + + // Fired after environment tear-down for each iteration of tests ends. + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0; + + // Fired after each iteration of tests finishes. + virtual void OnTestIterationEnd(const UnitTest& unit_test, + int iteration) = 0; + + // Fired after all test activities have ended. + virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0; +}; + +// The convenience class for users who need to override just one or two +// methods and are not concerned that a possible change to a signature of +// the methods they override will not be caught during the build. For +// comments about each method please see the definition of TestEventListener +// above. +class EmptyTestEventListener : public TestEventListener { + public: + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationStart(const UnitTest& /*unit_test*/, + int /*iteration*/) {} + virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {} + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestCaseStart(const TestCase& /*test_case*/) {} + virtual void OnTestStart(const TestInfo& /*test_info*/) {} + virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {} + virtual void OnTestEnd(const TestInfo& /*test_info*/) {} + virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {} + virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {} + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/, + int /*iteration*/) {} + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} +}; + +// TestEventListeners lets users add listeners to track events in Google Test. +class GTEST_API_ TestEventListeners { + public: + TestEventListeners(); + ~TestEventListeners(); + + // Appends an event listener to the end of the list. Google Test assumes + // the ownership of the listener (i.e. it will delete the listener when + // the test program finishes). + void Append(TestEventListener* listener); + + // Removes the given event listener from the list and returns it. It then + // becomes the caller's responsibility to delete the listener. Returns + // NULL if the listener is not found in the list. + TestEventListener* Release(TestEventListener* listener); + + // Returns the standard listener responsible for the default console + // output. Can be removed from the listeners list to shut down default + // console output. Note that removing this object from the listener list + // with Release transfers its ownership to the caller and makes this + // function return NULL the next time. + TestEventListener* default_result_printer() const { + return default_result_printer_; + } + + // Returns the standard listener responsible for the default XML output + // controlled by the --gtest_output=xml flag. Can be removed from the + // listeners list by users who want to shut down the default XML output + // controlled by this flag and substitute it with custom one. Note that + // removing this object from the listener list with Release transfers its + // ownership to the caller and makes this function return NULL the next + // time. + TestEventListener* default_xml_generator() const { + return default_xml_generator_; + } + + private: + friend class TestCase; + friend class TestInfo; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::NoExecDeathTest; + friend class internal::TestEventListenersAccessor; + friend class internal::UnitTestImpl; + + // Returns repeater that broadcasts the TestEventListener events to all + // subscribers. + TestEventListener* repeater(); + + // Sets the default_result_printer attribute to the provided listener. + // The listener is also added to the listener list and previous + // default_result_printer is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultResultPrinter(TestEventListener* listener); + + // Sets the default_xml_generator attribute to the provided listener. The + // listener is also added to the listener list and previous + // default_xml_generator is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultXmlGenerator(TestEventListener* listener); + + // Controls whether events will be forwarded by the repeater to the + // listeners in the list. + bool EventForwardingEnabled() const; + void SuppressEventForwarding(); + + // The actual list of listeners. + internal::TestEventRepeater* repeater_; + // Listener responsible for the standard result output. + TestEventListener* default_result_printer_; + // Listener responsible for the creation of the XML output file. + TestEventListener* default_xml_generator_; + + // We disallow copying TestEventListeners. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners); +}; + +// A UnitTest consists of a vector of TestCases. +// +// This is a singleton class. The only instance of UnitTest is +// created when UnitTest::GetInstance() is first called. This +// instance is never deleted. +// +// UnitTest is not copyable. +// +// This class is thread-safe as long as the methods are called +// according to their specification. +class GTEST_API_ UnitTest { + public: + // Gets the singleton UnitTest object. The first time this method + // is called, a UnitTest object is constructed and returned. + // Consecutive calls will return the same object. + static UnitTest* GetInstance(); + + // Runs all tests in this UnitTest object and prints the result. + // Returns 0 if successful, or 1 otherwise. + // + // This method can only be called from the main thread. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + int Run() GTEST_MUST_USE_RESULT_; + + // Returns the working directory when the first TEST() or TEST_F() + // was executed. The UnitTest object owns the string. + const char* original_working_dir() const; + + // Returns the TestCase object for the test that's currently running, + // or NULL if no test is running. + const TestCase* current_test_case() const + GTEST_LOCK_EXCLUDED_(mutex_); + + // Returns the TestInfo object for the test that's currently running, + // or NULL if no test is running. + const TestInfo* current_test_info() const + GTEST_LOCK_EXCLUDED_(mutex_); + + // Returns the random seed used at the start of the current test run. + int random_seed() const; + +#if GTEST_HAS_PARAM_TEST + // Returns the ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry() + GTEST_LOCK_EXCLUDED_(mutex_); +#endif // GTEST_HAS_PARAM_TEST + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const; + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const; + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const; + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const; + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const; + + // Returns the TestResult containing information on test failures and + // properties logged outside of individual test cases. + const TestResult& ad_hoc_test_result() const; + + // Returns the list of event listeners that can be used to track events + // inside Google Test. + TestEventListeners& listeners(); + + private: + // Registers and returns a global test environment. When a test + // program is run, all global test environments will be set-up in + // the order they were registered. After all tests in the program + // have finished, all global test environments will be torn-down in + // the *reverse* order they were registered. + // + // The UnitTest object takes ownership of the given environment. + // + // This method can only be called from the main thread. + Environment* AddEnvironment(Environment* env); + + // Adds a TestPartResult to the current TestResult object. All + // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) + // eventually call this to report their results. The user code + // should use the assertion macros instead of calling this directly. + void AddTestPartResult(TestPartResult::Type result_type, + const char* file_name, + int line_number, + const std::string& message, + const std::string& os_stack_trace) + GTEST_LOCK_EXCLUDED_(mutex_); + + // Adds a TestProperty to the current TestResult object when invoked from + // inside a test, to current TestCase's ad_hoc_test_result_ when invoked + // from SetUpTestCase or TearDownTestCase, or to the global property set + // when invoked elsewhere. If the result already contains a property with + // the same key, the value will be updated. + void RecordProperty(const std::string& key, const std::string& value); + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i); + + // Accessors for the implementation object. + internal::UnitTestImpl* impl() { return impl_; } + const internal::UnitTestImpl* impl() const { return impl_; } + + // These classes and funcions are friends as they need to access private + // members of UnitTest. + friend class Test; + friend class internal::AssertHelper; + friend class internal::ScopedTrace; + friend class internal::StreamingListenerTest; + friend class internal::UnitTestRecordPropertyTestHelper; + friend Environment* AddGlobalTestEnvironment(Environment* env); + friend internal::UnitTestImpl* internal::GetUnitTestImpl(); + friend void internal::ReportFailureInUnknownLocation( + TestPartResult::Type result_type, + const std::string& message); + + // Creates an empty UnitTest. + UnitTest(); + + // D'tor + virtual ~UnitTest(); + + // Pushes a trace defined by SCOPED_TRACE() on to the per-thread + // Google Test trace stack. + void PushGTestTrace(const internal::TraceInfo& trace) + GTEST_LOCK_EXCLUDED_(mutex_); + + // Pops a trace from the per-thread Google Test trace stack. + void PopGTestTrace() + GTEST_LOCK_EXCLUDED_(mutex_); + + // Protects mutable state in *impl_. This is mutable as some const + // methods need to lock it too. + mutable internal::Mutex mutex_; + + // Opaque implementation object. This field is never changed once + // the object is constructed. We don't mark it as const here, as + // doing so will cause a warning in the constructor of UnitTest. + // Mutable state in *impl_ is protected by mutex_. + internal::UnitTestImpl* impl_; + + // We disallow copying UnitTest. + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest); +}; + +// A convenient wrapper for adding an environment for the test +// program. +// +// You should call this before RUN_ALL_TESTS() is called, probably in +// main(). If you use gtest_main, you need to call this before main() +// starts for it to take effect. For example, you can define a global +// variable like this: +// +// testing::Environment* const foo_env = +// testing::AddGlobalTestEnvironment(new FooEnvironment); +// +// However, we strongly recommend you to write your own main() and +// call AddGlobalTestEnvironment() there, as relying on initialization +// of global variables makes the code harder to read and may cause +// problems when you register multiple environments from different +// translation units and the environments have dependencies among them +// (remember that the compiler doesn't guarantee the order in which +// global variables from different translation units are initialized). +inline Environment* AddGlobalTestEnvironment(Environment* env) { + return UnitTest::GetInstance()->AddEnvironment(env); +} + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +GTEST_API_ void InitGoogleTest(int* argc, char** argv); + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv); + +namespace internal { + +// FormatForComparison::Format(value) formats a +// value of type ToPrint that is an operand of a comparison assertion +// (e.g. ASSERT_EQ). OtherOperand is the type of the other operand in +// the comparison, and is used to help determine the best way to +// format the value. In particular, when the value is a C string +// (char pointer) and the other operand is an STL string object, we +// want to format the C string as a string, since we know it is +// compared by value with the string object. If the value is a char +// pointer but the other operand is not an STL string object, we don't +// know whether the pointer is supposed to point to a NUL-terminated +// string, and thus want to print it as a pointer to be safe. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// The default case. +template +class FormatForComparison { + public: + static ::std::string Format(const ToPrint& value) { + return ::testing::PrintToString(value); + } +}; + +// Array. +template +class FormatForComparison { + public: + static ::std::string Format(const ToPrint* value) { + return FormatForComparison::Format(value); + } +}; + +// By default, print C string as pointers to be safe, as we don't know +// whether they actually point to a NUL-terminated string. + +#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \ + template \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(static_cast(value)); \ + } \ + } + +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t); + +#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_ + +// If a C string is compared with an STL string object, we know it's meant +// to point to a NUL-terminated string, and thus can print it as a string. + +#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \ + template <> \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(value); \ + } \ + } + +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string); + +#if GTEST_HAS_GLOBAL_STRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::string); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::string); +#endif + +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::wstring); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::wstring); +#endif + +#if GTEST_HAS_STD_WSTRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring); +#endif + +#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_ + +// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc) +// operand to be used in a failure message. The type (but not value) +// of the other operand may affect the format. This allows us to +// print a char* as a raw pointer when it is compared against another +// char* or void*, and print it as a C string when it is compared +// against an std::string object, for example. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +std::string FormatForComparisonFailureMessage( + const T1& value, const T2& /* other_operand */) { + return FormatForComparison::Format(value); +} + +// The helper function for {ASSERT|EXPECT}_EQ. +template +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4389) // Temporarily disables warning on + // signed/unsigned mismatch. +#endif + + if (expected == actual) { + return AssertionSuccess(); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif + + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// With this overloaded version, we allow anonymous enums to be used +// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums +// can be implicitly cast to BiggestInt. +GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual); + +// The helper class for {ASSERT|EXPECT}_EQ. The template argument +// lhs_is_null_literal is true iff the first argument to ASSERT_EQ() +// is a null pointer literal. The following default implementation is +// for lhs_is_null_literal being false. +template +class EqHelper { + public: + // This templatized version is for the general case. + template + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // With this overloaded version, we allow anonymous enums to be used + // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous + // enums can be implicitly cast to BiggestInt. + // + // Even though its body looks the same as the above version, we + // cannot merge the two, as it will make anonymous enums unhappy. + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } +}; + +// This specialization is used when the first argument to ASSERT_EQ() +// is a null pointer literal, like NULL, false, or 0. +template <> +class EqHelper { + public: + // We define two overloaded versions of Compare(). The first + // version will be picked when the second argument to ASSERT_EQ() is + // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or + // EXPECT_EQ(false, a_bool). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual, + // The following line prevents this overload from being considered if T2 + // is not a pointer type. We need this because ASSERT_EQ(NULL, my_ptr) + // expands to Compare("", "", NULL, my_ptr), which requires a conversion + // to match the Secret* in the other overload, which would otherwise make + // this template match better. + typename EnableIf::value>::type* = 0) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // This version will be picked when the second argument to ASSERT_EQ() is a + // pointer, e.g. ASSERT_EQ(NULL, a_pointer). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + // We used to have a second template parameter instead of Secret*. That + // template parameter would deduce to 'long', making this a better match + // than the first overload even without the first overload's EnableIf. + // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to + // non-pointer argument" (even a deduced integral argument), so the old + // implementation caused warnings in user code. + Secret* /* expected (NULL) */, + T* actual) { + // We already know that 'expected' is a null pointer. + return CmpHelperEQ(expected_expression, actual_expression, + static_cast(NULL), actual); + } +}; + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste +// of similar code. +// +// For each templatized helper function, we also define an overloaded +// version for BiggestInt in order to reduce code bloat and allow +// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled +// with gcc 4. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +template \ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + const T1& val1, const T2& val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +}\ +GTEST_API_ AssertionResult CmpHelper##op_name(\ + const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2) + +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// Implements the helper function for {ASSERT|EXPECT}_NE +GTEST_IMPL_CMP_HELPER_(NE, !=); +// Implements the helper function for {ASSERT|EXPECT}_LE +GTEST_IMPL_CMP_HELPER_(LE, <=); +// Implements the helper function for {ASSERT|EXPECT}_LT +GTEST_IMPL_CMP_HELPER_(LT, <); +// Implements the helper function for {ASSERT|EXPECT}_GE +GTEST_IMPL_CMP_HELPER_(GE, >=); +// Implements the helper function for {ASSERT|EXPECT}_GT +GTEST_IMPL_CMP_HELPER_(GT, >); + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRNE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + + +// Helper function for *_STREQ on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual); + +// Helper function for *_STRNE on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2); + +} // namespace internal + +// IsSubstring() and IsNotSubstring() are intended to be used as the +// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by +// themselves. They check whether needle is a substring of haystack +// (NULL is considered a substring of itself only), and return an +// appropriate error message when they fail. +// +// The {needle,haystack}_expr arguments are the stringified +// expressions that generated the two real arguments. +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +// Helper template function for comparing floating-points. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression, + const char* actual_expression, + RawType expected, + RawType actual) { + const FloatingPoint lhs(expected), rhs(actual); + + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + ::std::stringstream expected_ss; + expected_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << expected; + + ::std::stringstream actual_ss; + actual_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << actual; + + return EqFailure(expected_expression, + actual_expression, + StringStreamToString(&expected_ss), + StringStreamToString(&actual_ss), + false); +} + +// Helper function for implementing ASSERT_NEAR. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error); + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// A class that enables one to stream messages to assertion macros +class GTEST_API_ AssertHelper { + public: + // Constructor. + AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message); + ~AssertHelper(); + + // Message assignment is a semantic trick to enable assertion + // streaming; see the GTEST_MESSAGE_ macro below. + void operator=(const Message& message) const; + + private: + // We put our data in a struct so that the size of the AssertHelper class can + // be as small as possible. This is important because gcc is incapable of + // re-using stack space even for temporary variables, so every EXPECT_EQ + // reserves stack space for another AssertHelper. + struct AssertHelperData { + AssertHelperData(TestPartResult::Type t, + const char* srcfile, + int line_num, + const char* msg) + : type(t), file(srcfile), line(line_num), message(msg) { } + + TestPartResult::Type const type; + const char* const file; + int const line; + std::string const message; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData); + }; + + AssertHelperData* const data_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper); +}; + +} // namespace internal + +#if GTEST_HAS_PARAM_TEST +// The pure interface class that all value-parameterized tests inherit from. +// A value-parameterized class must inherit from both ::testing::Test and +// ::testing::WithParamInterface. In most cases that just means inheriting +// from ::testing::TestWithParam, but more complicated test hierarchies +// may need to inherit from Test and WithParamInterface at different levels. +// +// This interface has support for accessing the test parameter value via +// the GetParam() method. +// +// Use it with one of the parameter generator defining functions, like Range(), +// Values(), ValuesIn(), Bool(), and Combine(). +// +// class FooTest : public ::testing::TestWithParam { +// protected: +// FooTest() { +// // Can use GetParam() here. +// } +// virtual ~FooTest() { +// // Can use GetParam() here. +// } +// virtual void SetUp() { +// // Can use GetParam() here. +// } +// virtual void TearDown { +// // Can use GetParam() here. +// } +// }; +// TEST_P(FooTest, DoesBar) { +// // Can use GetParam() method here. +// Foo foo; +// ASSERT_TRUE(foo.DoesBar(GetParam())); +// } +// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10)); + +template +class WithParamInterface { + public: + typedef T ParamType; + virtual ~WithParamInterface() {} + + // The current parameter value. Is also available in the test fixture's + // constructor. This member function is non-static, even though it only + // references static data, to reduce the opportunity for incorrect uses + // like writing 'WithParamInterface::GetParam()' for a test that + // uses a fixture whose parameter type is int. + const ParamType& GetParam() const { + GTEST_CHECK_(parameter_ != NULL) + << "GetParam() can only be called inside a value-parameterized test " + << "-- did you intend to write TEST_P instead of TEST_F?"; + return *parameter_; + } + + private: + // Sets parameter value. The caller is responsible for making sure the value + // remains alive and unchanged throughout the current test. + static void SetParam(const ParamType* parameter) { + parameter_ = parameter; + } + + // Static value used for accessing parameter during a test lifetime. + static const ParamType* parameter_; + + // TestClass must be a subclass of WithParamInterface and Test. + template friend class internal::ParameterizedTestFactory; +}; + +template +const T* WithParamInterface::parameter_ = NULL; + +// Most value-parameterized classes can ignore the existence of +// WithParamInterface, and can just inherit from ::testing::TestWithParam. + +template +class TestWithParam : public Test, public WithParamInterface { +}; + +#endif // GTEST_HAS_PARAM_TEST + +// Macros for indicating success/failure in test code. + +// ADD_FAILURE unconditionally adds a failure to the current test. +// SUCCEED generates a success - it doesn't automatically make the +// current test successful, as a test is only successful when it has +// no failure. +// +// EXPECT_* verifies that a certain condition is satisfied. If not, +// it behaves like ADD_FAILURE. In particular: +// +// EXPECT_TRUE verifies that a Boolean condition is true. +// EXPECT_FALSE verifies that a Boolean condition is false. +// +// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except +// that they will also abort the current function on failure. People +// usually want the fail-fast behavior of FAIL and ASSERT_*, but those +// writing data-driven tests often find themselves using ADD_FAILURE +// and EXPECT_* more. + +// Generates a nonfatal failure with a generic message. +#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed") + +// Generates a nonfatal failure at the given source file location with +// a generic message. +#define ADD_FAILURE_AT(file, line) \ + GTEST_MESSAGE_AT_(file, line, "Failed", \ + ::testing::TestPartResult::kNonFatalFailure) + +// Generates a fatal failure with a generic message. +#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed") + +// Define this macro to 1 to omit the definition of FAIL(), which is a +// generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_FAIL +# define FAIL() GTEST_FAIL() +#endif + +// Generates a success with a generic message. +#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded") + +// Define this macro to 1 to omit the definition of SUCCEED(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_SUCCEED +# define SUCCEED() GTEST_SUCCEED() +#endif + +// Macros for testing exceptions. +// +// * {ASSERT|EXPECT}_THROW(statement, expected_exception): +// Tests that the statement throws the expected exception. +// * {ASSERT|EXPECT}_NO_THROW(statement): +// Tests that the statement doesn't throw any exception. +// * {ASSERT|EXPECT}_ANY_THROW(statement): +// Tests that the statement throws an exception. + +#define EXPECT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_) +#define EXPECT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define EXPECT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define ASSERT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_) +#define ASSERT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_) +#define ASSERT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_) + +// Boolean assertions. Condition can be either a Boolean expression or an +// AssertionResult. For more information on how to use AssertionResult with +// these macros see comments on that class. +#define EXPECT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_NONFATAL_FAILURE_) +#define EXPECT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_NONFATAL_FAILURE_) +#define ASSERT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_FATAL_FAILURE_) +#define ASSERT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_FATAL_FAILURE_) + +// Includes the auto-generated header that implements a family of +// generic predicate assertion macros. +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is AUTOMATICALLY GENERATED on 10/31/2011 by command +// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! +// +// Implements a family of generic predicate assertion macros. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Makes sure this header is not included before gtest.h. +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +# error Do not include gtest_pred_impl.h directly. Include gtest.h instead. +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ + +// This header implements a family of generic predicate assertion +// macros: +// +// ASSERT_PRED_FORMAT1(pred_format, v1) +// ASSERT_PRED_FORMAT2(pred_format, v1, v2) +// ... +// +// where pred_format is a function or functor that takes n (in the +// case of ASSERT_PRED_FORMATn) values and their source expression +// text, and returns a testing::AssertionResult. See the definition +// of ASSERT_EQ in gtest.h for an example. +// +// If you don't care about formatting, you can use the more +// restrictive version: +// +// ASSERT_PRED1(pred, v1) +// ASSERT_PRED2(pred, v1, v2) +// ... +// +// where pred is an n-ary function or functor that returns bool, +// and the values v1, v2, ..., must support the << operator for +// streaming to std::ostream. +// +// We also define the EXPECT_* variations. +// +// For now we only support predicates whose arity is at most 5. +// Please email googletestframework@googlegroups.com if you need +// support for higher arities. + +// GTEST_ASSERT_ is the basic statement to which all of the assertions +// in this file reduce. Don't use this in your code. + +#define GTEST_ASSERT_(expression, on_failure) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar = (expression)) \ + ; \ + else \ + on_failure(gtest_ar.failure_message()) + + +// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +template +AssertionResult AssertPred1Helper(const char* pred_text, + const char* e1, + Pred pred, + const T1& v1) { + if (pred(v1)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1. +// Don't use this in your code. +#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, v1), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +#define GTEST_PRED1_(pred, v1, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \ + #v1, \ + pred, \ + v1), on_failure) + +// Unary predicate assertion macros. +#define EXPECT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +template +AssertionResult AssertPred2Helper(const char* pred_text, + const char* e1, + const char* e2, + Pred pred, + const T1& v1, + const T2& v2) { + if (pred(v1, v2)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2. +// Don't use this in your code. +#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +#define GTEST_PRED2_(pred, v1, v2, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \ + #v1, \ + #v2, \ + pred, \ + v1, \ + v2), on_failure) + +// Binary predicate assertion macros. +#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +template +AssertionResult AssertPred3Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3) { + if (pred(v1, v2, v3)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3. +// Don't use this in your code. +#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + pred, \ + v1, \ + v2, \ + v3), on_failure) + +// Ternary predicate assertion macros. +#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +template +AssertionResult AssertPred4Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4) { + if (pred(v1, v2, v3, v4)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ", " + << e4 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3 + << "\n" << e4 << " evaluates to " << v4; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4. +// Don't use this in your code. +#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4), on_failure) + +// 4-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +template +AssertionResult AssertPred5Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + const char* e5, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4, + const T5& v5) { + if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ", " + << e4 << ", " + << e5 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3 + << "\n" << e4 << " evaluates to " << v4 + << "\n" << e5 << " evaluates to " << v5; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5. +// Don't use this in your code. +#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + #v5, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4, \ + v5), on_failure) + +// 5-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) + + + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Macros for testing equalities and inequalities. +// +// * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual +// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2 +// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2 +// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2 +// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2 +// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2 +// +// When they are not, Google Test prints both the tested expressions and +// their actual values. The values must be compatible built-in types, +// or you will get a compiler error. By "compatible" we mean that the +// values can be compared by the respective operator. +// +// Note: +// +// 1. It is possible to make a user-defined type work with +// {ASSERT|EXPECT}_??(), but that requires overloading the +// comparison operators and is thus discouraged by the Google C++ +// Usage Guide. Therefore, you are advised to use the +// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are +// equal. +// +// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on +// pointers (in particular, C strings). Therefore, if you use it +// with two C strings, you are testing how their locations in memory +// are related, not how their content is related. To compare two C +// strings by content, use {ASSERT|EXPECT}_STR*(). +// +// 3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to +// {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you +// what the actual value is when it fails, and similarly for the +// other comparisons. +// +// 4. Do not depend on the order in which {ASSERT|EXPECT}_??() +// evaluate their arguments, which is undefined. +// +// 5. These macros evaluate their arguments exactly once. +// +// Examples: +// +// EXPECT_NE(5, Foo()); +// EXPECT_EQ(NULL, a_pointer); +// ASSERT_LT(i, array_size); +// ASSERT_GT(records.size(), 0) << "There is no record left."; + +#define EXPECT_EQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define EXPECT_NE(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual) +#define EXPECT_LE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define EXPECT_LT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define EXPECT_GE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define EXPECT_GT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +#define GTEST_ASSERT_EQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define GTEST_ASSERT_NE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2) +#define GTEST_ASSERT_LE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define GTEST_ASSERT_LT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define GTEST_ASSERT_GE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define GTEST_ASSERT_GT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of +// ASSERT_XY(), which clashes with some users' own code. + +#if !GTEST_DONT_DEFINE_ASSERT_EQ +# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_NE +# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LE +# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LT +# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GE +# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GT +# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2) +#endif + +// C-string Comparisons. All tests treat NULL and any non-NULL string +// as different. Two NULLs are equal. +// +// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2 +// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2 +// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case +// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case +// +// For wide or narrow string objects, you can use the +// {ASSERT|EXPECT}_??() macros. +// +// Don't depend on the order in which the arguments are evaluated, +// which is undefined. +// +// These macros evaluate their arguments exactly once. + +#define EXPECT_STREQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define EXPECT_STRNE(s1, s2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define EXPECT_STRCASEEQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define EXPECT_STRCASENE(s1, s2)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +#define ASSERT_STREQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define ASSERT_STRNE(s1, s2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define ASSERT_STRCASEEQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define ASSERT_STRCASENE(s1, s2)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +// Macros for comparing floating-point numbers. +// +// * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual): +// Tests that two float values are almost equal. +// * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual): +// Tests that two double values are almost equal. +// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error): +// Tests that v1 and v2 are within the given distance to each other. +// +// Google Test uses ULP-based comparison to automatically pick a default +// error bound that is appropriate for the operands. See the +// FloatingPoint template class in gtest-internal.h if you are +// interested in the implementation details. + +#define EXPECT_FLOAT_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_DOUBLE_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_FLOAT_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_DOUBLE_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_NEAR(val1, val2, abs_error)\ + EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +#define ASSERT_NEAR(val1, val2, abs_error)\ + ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +// These predicate format functions work on floating-point values, and +// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g. +// +// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0); + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2); +GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2); + + +#if GTEST_OS_WINDOWS + +// Macros that test for HRESULT failure and success, these are only useful +// on Windows, and rely on Windows SDK macros and APIs to compile. +// +// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr) +// +// When expr unexpectedly fails or succeeds, Google Test prints the +// expected result and the actual result with both a human-readable +// string representation of the error, if available, as well as the +// hex result code. +# define EXPECT_HRESULT_SUCCEEDED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define ASSERT_HRESULT_SUCCEEDED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define EXPECT_HRESULT_FAILED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +# define ASSERT_HRESULT_FAILED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +#endif // GTEST_OS_WINDOWS + +// Macros that execute statement and check that it doesn't generate new fatal +// failures in the current thread. +// +// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement); +// +// Examples: +// +// EXPECT_NO_FATAL_FAILURE(Process()); +// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed"; +// +#define ASSERT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_) +#define EXPECT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_) + +// Causes a trace (including the source file path, the current line +// number, and the given message) to be included in every test failure +// message generated by code in the current scope. The effect is +// undone when the control leaves the current scope. +// +// The message argument can be anything streamable to std::ostream. +// +// In the implementation, we include the current line number as part +// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s +// to appear in the same block - as long as they are on different +// lines. +#define SCOPED_TRACE(message) \ + ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\ + __FILE__, __LINE__, ::testing::Message() << (message)) + +// Compile-time assertion for type equality. +// StaticAssertTypeEq() compiles iff type1 and type2 are +// the same type. The value it returns is not interesting. +// +// Instead of making StaticAssertTypeEq a class template, we make it a +// function template that invokes a helper class template. This +// prevents a user from misusing StaticAssertTypeEq by +// defining objects of that type. +// +// CAVEAT: +// +// When used inside a method of a class template, +// StaticAssertTypeEq() is effective ONLY IF the method is +// instantiated. For example, given: +// +// template class Foo { +// public: +// void Bar() { testing::StaticAssertTypeEq(); } +// }; +// +// the code: +// +// void Test1() { Foo foo; } +// +// will NOT generate a compiler error, as Foo::Bar() is never +// actually instantiated. Instead, you need: +// +// void Test2() { Foo foo; foo.Bar(); } +// +// to cause a compiler error. +template +bool StaticAssertTypeEq() { + (void)internal::StaticAssertTypeEqHelper(); + return true; +} + +// Defines a test. +// +// The first parameter is the name of the test case, and the second +// parameter is the name of the test within the test case. +// +// The convention is to end the test case name with "Test". For +// example, a test case for the Foo class can be named FooTest. +// +// The user should put his test code between braces after using this +// macro. Example: +// +// TEST(FooTest, InitializesCorrectly) { +// Foo foo; +// EXPECT_TRUE(foo.StatusIsOK()); +// } + +// Note that we call GetTestTypeId() instead of GetTypeId< +// ::testing::Test>() here to get the type ID of testing::Test. This +// is to work around a suspected linker bug when using Google Test as +// a framework on Mac OS X. The bug causes GetTypeId< +// ::testing::Test>() to return different values depending on whether +// the call is from the Google Test framework itself or from user test +// code. GetTestTypeId() is guaranteed to always return the same +// value, as it always calls GetTypeId<>() from the Google Test +// framework. +#define GTEST_TEST(test_case_name, test_name)\ + GTEST_TEST_(test_case_name, test_name, \ + ::testing::Test, ::testing::internal::GetTestTypeId()) + +// Define this macro to 1 to omit the definition of TEST(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_TEST +# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name) +#endif + +// Defines a test that uses a test fixture. +// +// The first parameter is the name of the test fixture class, which +// also doubles as the test case name. The second parameter is the +// name of the test within the test case. +// +// A test fixture class must be declared earlier. The user should put +// his test code between braces after using this macro. Example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { b_.AddElement(3); } +// +// Foo a_; +// Foo b_; +// }; +// +// TEST_F(FooTest, InitializesCorrectly) { +// EXPECT_TRUE(a_.StatusIsOK()); +// } +// +// TEST_F(FooTest, ReturnsElementCountCorrectly) { +// EXPECT_EQ(0, a_.size()); +// EXPECT_EQ(1, b_.size()); +// } + +#define TEST_F(test_fixture, test_name)\ + GTEST_TEST_(test_fixture, test_name, test_fixture, \ + ::testing::internal::GetTypeId()) + +} // namespace testing + +// Use this function in main() to run all tests. It returns 0 if all +// tests are successful, or 1 otherwise. +// +// RUN_ALL_TESTS() should be invoked after the command line has been +// parsed by InitGoogleTest(). +// +// This function was formerly a macro; thus, it is in the global +// namespace and has an all-caps name. +int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_; + +inline int RUN_ALL_TESTS() { + return ::testing::UnitTest::GetInstance()->Run(); +} + +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ diff --git a/external/gtest/fused-src/gtest/gtest_main.cc b/external/gtest/fused-src/gtest/gtest_main.cc new file mode 100755 index 0000000000..f302822552 --- /dev/null +++ b/external/gtest/fused-src/gtest/gtest_main.cc @@ -0,0 +1,38 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "gtest/gtest.h" + +GTEST_API_ int main(int argc, char **argv) { + printf("Running main() from gtest_main.cc\n"); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/tests/gtest/include/gtest/gtest-death-test.h b/external/gtest/include/gtest/gtest-death-test.h old mode 100644 new mode 100755 similarity index 94% rename from tests/gtest/include/gtest/gtest-death-test.h rename to external/gtest/include/gtest/gtest-death-test.h index a27883f0a4..957a69c6a9 --- a/tests/gtest/include/gtest/gtest-death-test.h +++ b/external/gtest/include/gtest/gtest-death-test.h @@ -51,6 +51,17 @@ GTEST_DECLARE_string_(death_test_style); #if GTEST_HAS_DEATH_TEST +namespace internal { + +// Returns a Boolean value indicating whether the caller is currently +// executing in the context of the death test child process. Tools such as +// Valgrind heap checkers may need this to modify their behavior in death +// tests. IMPORTANT: This is an internal utility. Using it may break the +// implementation of death tests. User code MUST NOT use it. +GTEST_API_ bool InDeathTestChild(); + +} // namespace internal + // The following macros are useful for writing death tests. // Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is @@ -75,7 +86,7 @@ GTEST_DECLARE_string_(death_test_style); // for (int i = 0; i < 5; i++) { // EXPECT_DEATH(server.ProcessRequest(i), // "Invalid request .* in ProcessRequest()") -// << "Failed to die on request " << i); +// << "Failed to die on request " << i; // } // // ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting"); @@ -245,10 +256,10 @@ class GTEST_API_ KilledBySignal { # ifdef NDEBUG # define EXPECT_DEBUG_DEATH(statement, regex) \ - do { statement; } while (::testing::internal::AlwaysFalse()) + GTEST_EXECUTE_STATEMENT_(statement, regex) # define ASSERT_DEBUG_DEATH(statement, regex) \ - do { statement; } while (::testing::internal::AlwaysFalse()) + GTEST_EXECUTE_STATEMENT_(statement, regex) # else diff --git a/tests/gtest/include/gtest/gtest-message.h b/external/gtest/include/gtest/gtest-message.h old mode 100644 new mode 100755 similarity index 77% rename from tests/gtest/include/gtest/gtest-message.h rename to external/gtest/include/gtest/gtest-message.h index 9b7142f320..fe879bca79 --- a/tests/gtest/include/gtest/gtest-message.h +++ b/external/gtest/include/gtest/gtest-message.h @@ -48,8 +48,11 @@ #include -#include "gtest/internal/gtest-string.h" -#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-port.h" + +// Ensures that there is at least one operator<< in the global namespace. +// See Message& operator<<(...) below for why. +void operator<<(const testing::internal::Secret&, int); namespace testing { @@ -87,15 +90,7 @@ class GTEST_API_ Message { public: // Constructs an empty Message. - // We allocate the stringstream separately because otherwise each use of - // ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's - // stack frame leading to huge stack frames in some cases; gcc does not reuse - // the stack space. - Message() : ss_(new ::std::stringstream) { - // By default, we want there to be enough precision when printing - // a double to a Message. - *ss_ << std::setprecision(std::numeric_limits::digits10 + 2); - } + Message(); // Copy constructor. Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT @@ -118,7 +113,22 @@ class GTEST_API_ Message { // Streams a non-pointer value to this object. template inline Message& operator <<(const T& val) { - ::GTestStreamToHelper(ss_.get(), val); + // Some libraries overload << for STL containers. These + // overloads are defined in the global namespace instead of ::std. + // + // C++'s symbol lookup rule (i.e. Koenig lookup) says that these + // overloads are visible in either the std namespace or the global + // namespace, but not other namespaces, including the testing + // namespace which Google Test's Message class is in. + // + // To allow STL containers (and other types that has a << operator + // defined in the global namespace) to be used in Google Test + // assertions, testing::Message must access the custom << operator + // from the global namespace. With this using declaration, + // overloads of << defined in the global namespace and those + // visible via Koenig lookup are both exposed in this function. + using ::operator <<; + *ss_ << val; return *this; } @@ -140,7 +150,7 @@ class GTEST_API_ Message { if (pointer == NULL) { *ss_ << "(null)"; } else { - ::GTestStreamToHelper(ss_.get(), pointer); + *ss_ << pointer; } return *this; } @@ -164,12 +174,8 @@ class GTEST_API_ Message { // These two overloads allow streaming a wide C string to a Message // using the UTF-8 encoding. - Message& operator <<(const wchar_t* wide_c_str) { - return *this << internal::String::ShowWideCString(wide_c_str); - } - Message& operator <<(wchar_t* wide_c_str) { - return *this << internal::String::ShowWideCString(wide_c_str); - } + Message& operator <<(const wchar_t* wide_c_str); + Message& operator <<(wchar_t* wide_c_str); #if GTEST_HAS_STD_WSTRING // Converts the given wide string to a narrow string using the UTF-8 @@ -183,13 +189,11 @@ class GTEST_API_ Message { Message& operator <<(const ::wstring& wstr); #endif // GTEST_HAS_GLOBAL_WSTRING - // Gets the text streamed to this object so far as a String. + // Gets the text streamed to this object so far as an std::string. // Each '\0' character in the buffer is replaced with "\\0". // // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. - internal::String GetString() const { - return internal::StringStreamToString(ss_.get()); - } + std::string GetString() const; private: @@ -199,16 +203,20 @@ class GTEST_API_ Message { // decide between class template specializations for T and T*, so a // tr1::type_traits-like is_pointer works, and we can overload on that. template - inline void StreamHelper(internal::true_type /*dummy*/, T* pointer) { + inline void StreamHelper(internal::true_type /*is_pointer*/, T* pointer) { if (pointer == NULL) { *ss_ << "(null)"; } else { - ::GTestStreamToHelper(ss_.get(), pointer); + *ss_ << pointer; } } template - inline void StreamHelper(internal::false_type /*dummy*/, const T& value) { - ::GTestStreamToHelper(ss_.get(), value); + inline void StreamHelper(internal::false_type /*is_pointer*/, + const T& value) { + // See the comments in Message& operator <<(const T&) above for why + // we need this using statement. + using ::operator <<; + *ss_ << value; } #endif // GTEST_OS_SYMBIAN @@ -225,6 +233,18 @@ inline std::ostream& operator <<(std::ostream& os, const Message& sb) { return os << sb.GetString(); } +namespace internal { + +// Converts a streamable value to an std::string. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". +template +std::string StreamableToString(const T& streamable) { + return (Message() << streamable).GetString(); +} + +} // namespace internal } // namespace testing #endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ diff --git a/tests/gtest/include/gtest/gtest-param-test.h b/external/gtest/include/gtest/gtest-param-test.h old mode 100644 new mode 100755 similarity index 99% rename from tests/gtest/include/gtest/gtest-param-test.h rename to external/gtest/include/gtest/gtest-param-test.h index 6407cfd685..d6702c8f16 --- a/tests/gtest/include/gtest/gtest-param-test.h +++ b/external/gtest/include/gtest/gtest-param-test.h @@ -1257,7 +1257,7 @@ inline internal::ParamGenerator Bool() { // Boolean flags: // // class FlagDependentTest -// : public testing::TestWithParam > { +// : public testing::TestWithParam > { // virtual void SetUp() { // // Assigns external_flag_1 and external_flag_2 values from the tuple. // tie(external_flag_1, external_flag_2) = GetParam(); diff --git a/tests/gtest/include/gtest/gtest-param-test.h.pump b/external/gtest/include/gtest/gtest-param-test.h.pump old mode 100644 new mode 100755 similarity index 99% rename from tests/gtest/include/gtest/gtest-param-test.h.pump rename to external/gtest/include/gtest/gtest-param-test.h.pump index 401cb513a8..2dc9303b5e --- a/tests/gtest/include/gtest/gtest-param-test.h.pump +++ b/external/gtest/include/gtest/gtest-param-test.h.pump @@ -414,7 +414,7 @@ inline internal::ParamGenerator Bool() { // Boolean flags: // // class FlagDependentTest -// : public testing::TestWithParam > { +// : public testing::TestWithParam > { // virtual void SetUp() { // // Assigns external_flag_1 and external_flag_2 values from the tuple. // tie(external_flag_1, external_flag_2) = GetParam(); diff --git a/tests/gtest/include/gtest/gtest-printers.h b/external/gtest/include/gtest/gtest-printers.h old mode 100644 new mode 100755 similarity index 93% rename from tests/gtest/include/gtest/gtest-printers.h rename to external/gtest/include/gtest/gtest-printers.h index 9cbab3ff4b..0639d9f586 --- a/tests/gtest/include/gtest/gtest-printers.h +++ b/external/gtest/include/gtest/gtest-printers.h @@ -630,9 +630,12 @@ void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) { } } // This overload prints a (const) char array compactly. -GTEST_API_ void UniversalPrintArray(const char* begin, - size_t len, - ::std::ostream* os); +GTEST_API_ void UniversalPrintArray( + const char* begin, size_t len, ::std::ostream* os); + +// This overload prints a (const) wchar_t array compactly. +GTEST_API_ void UniversalPrintArray( + const wchar_t* begin, size_t len, ::std::ostream* os); // Implements printing an array type T[N]. template @@ -673,19 +676,72 @@ class UniversalPrinter { // Prints a value tersely: for a reference type, the referenced value // (but not the address) is printed; for a (const) char pointer, the // NUL-terminated string (but not the pointer) is printed. + template -void UniversalTersePrint(const T& value, ::std::ostream* os) { - UniversalPrint(value, os); -} -inline void UniversalTersePrint(const char* str, ::std::ostream* os) { - if (str == NULL) { - *os << "NULL"; - } else { - UniversalPrint(string(str), os); +class UniversalTersePrinter { + public: + static void Print(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); } -} -inline void UniversalTersePrint(char* str, ::std::ostream* os) { - UniversalTersePrint(static_cast(str), os); +}; +template +class UniversalTersePrinter { + public: + static void Print(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); + } +}; +template +class UniversalTersePrinter { + public: + static void Print(const T (&value)[N], ::std::ostream* os) { + UniversalPrinter::Print(value, os); + } +}; +template <> +class UniversalTersePrinter { + public: + static void Print(const char* str, ::std::ostream* os) { + if (str == NULL) { + *os << "NULL"; + } else { + UniversalPrint(string(str), os); + } + } +}; +template <> +class UniversalTersePrinter { + public: + static void Print(char* str, ::std::ostream* os) { + UniversalTersePrinter::Print(str, os); + } +}; + +#if GTEST_HAS_STD_WSTRING +template <> +class UniversalTersePrinter { + public: + static void Print(const wchar_t* str, ::std::ostream* os) { + if (str == NULL) { + *os << "NULL"; + } else { + UniversalPrint(::std::wstring(str), os); + } + } +}; +#endif + +template <> +class UniversalTersePrinter { + public: + static void Print(wchar_t* str, ::std::ostream* os) { + UniversalTersePrinter::Print(str, os); + } +}; + +template +void UniversalTersePrint(const T& value, ::std::ostream* os) { + UniversalTersePrinter::Print(value, os); } // Prints a value using the type inferred by the compiler. The @@ -694,7 +750,10 @@ inline void UniversalTersePrint(char* str, ::std::ostream* os) { // NUL-terminated string. template void UniversalPrint(const T& value, ::std::ostream* os) { - UniversalPrinter::Print(value, os); + // A workarond for the bug in VC++ 7.1 that prevents us from instantiating + // UniversalPrinter with T directly. + typedef T T1; + UniversalPrinter::Print(value, os); } #if GTEST_HAS_TR1_TUPLE @@ -787,7 +846,7 @@ Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) { template ::std::string PrintToString(const T& value) { ::std::stringstream ss; - internal::UniversalTersePrint(value, &ss); + internal::UniversalTersePrinter::Print(value, &ss); return ss.str(); } diff --git a/tests/gtest/include/gtest/gtest-spi.h b/external/gtest/include/gtest/gtest-spi.h old mode 100644 new mode 100755 similarity index 99% rename from tests/gtest/include/gtest/gtest-spi.h rename to external/gtest/include/gtest/gtest-spi.h index b226e55048..f59ec6d71c --- a/tests/gtest/include/gtest/gtest-spi.h +++ b/external/gtest/include/gtest/gtest-spi.h @@ -75,7 +75,7 @@ class GTEST_API_ ScopedFakeTestPartResultReporter // // This method is from the TestPartResultReporterInterface // interface. - virtual void ReportTestPartResult(const TestPartResult& result); + virtual void ReportTestPartResult(const TestPartResult& result) override; private: void Init(); @@ -223,7 +223,7 @@ class GTEST_API_ SingleFailureChecker { (substr));\ {\ ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ - ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS,\ + ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \ >est_failures);\ if (::testing::internal::AlwaysTrue()) { statement; }\ }\ diff --git a/tests/gtest/include/gtest/gtest-test-part.h b/external/gtest/include/gtest/gtest-test-part.h old mode 100644 new mode 100755 similarity index 93% rename from tests/gtest/include/gtest/gtest-test-part.h rename to external/gtest/include/gtest/gtest-test-part.h index 8aeea14984..b93b114fa2 --- a/tests/gtest/include/gtest/gtest-test-part.h +++ b/external/gtest/include/gtest/gtest-test-part.h @@ -62,7 +62,7 @@ class GTEST_API_ TestPartResult { int a_line_number, const char* a_message) : type_(a_type), - file_name_(a_file_name), + file_name_(a_file_name == NULL ? "" : a_file_name), line_number_(a_line_number), summary_(ExtractSummary(a_message)), message_(a_message) { @@ -73,7 +73,9 @@ class GTEST_API_ TestPartResult { // Gets the name of the source file where the test part took place, or // NULL if it's unknown. - const char* file_name() const { return file_name_.c_str(); } + const char* file_name() const { + return file_name_.empty() ? NULL : file_name_.c_str(); + } // Gets the line in the source file where the test part took place, // or -1 if it's unknown. @@ -96,21 +98,22 @@ class GTEST_API_ TestPartResult { // Returns true iff the test part fatally failed. bool fatally_failed() const { return type_ == kFatalFailure; } + private: Type type_; // Gets the summary of the failure message by omitting the stack // trace in it. - static internal::String ExtractSummary(const char* message); + static std::string ExtractSummary(const char* message); // The name of the source file where the test part took place, or - // NULL if the source file is unknown. - internal::String file_name_; + // "" if the source file is unknown. + std::string file_name_; // The line in the source file where the test part took place, or -1 // if the line number is unknown. int line_number_; - internal::String summary_; // The test failure summary. - internal::String message_; // The test failure message. + std::string summary_; // The test failure summary. + std::string message_; // The test failure message. }; // Prints a TestPartResult object. @@ -159,8 +162,8 @@ class GTEST_API_ HasNewFatalFailureHelper : public TestPartResultReporterInterface { public: HasNewFatalFailureHelper(); - virtual ~HasNewFatalFailureHelper(); - virtual void ReportTestPartResult(const TestPartResult& result); + virtual ~HasNewFatalFailureHelper() override; + virtual void ReportTestPartResult(const TestPartResult& result) override; bool has_new_fatal_failure() const { return has_new_fatal_failure_; } private: bool has_new_fatal_failure_; diff --git a/tests/gtest/include/gtest/gtest-typed-test.h b/external/gtest/include/gtest/gtest-typed-test.h old mode 100644 new mode 100755 similarity index 100% rename from tests/gtest/include/gtest/gtest-typed-test.h rename to external/gtest/include/gtest/gtest-typed-test.h diff --git a/external/gtest/include/gtest/gtest.h b/external/gtest/include/gtest/gtest.h new file mode 100755 index 0000000000..82b58dc7af --- /dev/null +++ b/external/gtest/include/gtest/gtest.h @@ -0,0 +1,2291 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for Google Test. It should be +// included by any test program that uses Google Test. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! +// +// Acknowledgment: Google Test borrowed the idea of automatic test +// registration from Barthelemy Dagenais' (barthelemy@prologique.com) +// easyUnit framework. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_H_ + +#include +#include +#include + +#include "gtest/internal/gtest-internal.h" +#include "gtest/internal/gtest-string.h" +#include "gtest/gtest-death-test.h" +#include "gtest/gtest-message.h" +#include "gtest/gtest-param-test.h" +#include "gtest/gtest-printers.h" +#include "gtest/gtest_prod.h" +#include "gtest/gtest-test-part.h" +#include "gtest/gtest-typed-test.h" + +// Depending on the platform, different string classes are available. +// On Linux, in addition to ::std::string, Google also makes use of +// class ::string, which has the same interface as ::std::string, but +// has a different implementation. +// +// The user can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that +// ::string is available AND is a distinct type to ::std::string, or +// define it to 0 to indicate otherwise. +// +// If the user's ::std::string and ::string are the same class due to +// aliasing, he should define GTEST_HAS_GLOBAL_STRING to 0. +// +// If the user doesn't define GTEST_HAS_GLOBAL_STRING, it is defined +// heuristically. + +namespace testing { + +// Declares the flags. + +// This flag temporary enables the disabled tests. +GTEST_DECLARE_bool_(also_run_disabled_tests); + +// This flag brings the debugger on an assertion failure. +GTEST_DECLARE_bool_(break_on_failure); + +// This flag controls whether Google Test catches all test-thrown exceptions +// and logs them as failures. +GTEST_DECLARE_bool_(catch_exceptions); + +// This flag enables using colors in terminal output. Available values are +// "yes" to enable colors, "no" (disable colors), or "auto" (the default) +// to let Google Test decide. +GTEST_DECLARE_string_(color); + +// This flag sets up the filter to select by name using a glob pattern +// the tests to run. If the filter is not given all tests are executed. +GTEST_DECLARE_string_(filter); + +// This flag causes the Google Test to list tests. None of the tests listed +// are actually run if the flag is provided. +GTEST_DECLARE_bool_(list_tests); + +// This flag controls whether Google Test emits a detailed XML report to a file +// in addition to its normal textual output. +GTEST_DECLARE_string_(output); + +// This flags control whether Google Test prints the elapsed time for each +// test. +GTEST_DECLARE_bool_(print_time); + +// This flag specifies the random number seed. +GTEST_DECLARE_int32_(random_seed); + +// This flag sets how many times the tests are repeated. The default value +// is 1. If the value is -1 the tests are repeating forever. +GTEST_DECLARE_int32_(repeat); + +// This flag controls whether Google Test includes Google Test internal +// stack frames in failure stack traces. +GTEST_DECLARE_bool_(show_internal_stack_frames); + +// When this flag is specified, tests' order is randomized on every iteration. +GTEST_DECLARE_bool_(shuffle); + +// This flag specifies the maximum number of stack frames to be +// printed in a failure message. +GTEST_DECLARE_int32_(stack_trace_depth); + +// When this flag is specified, a failed assertion will throw an +// exception if exceptions are enabled, or exit the program with a +// non-zero code otherwise. +GTEST_DECLARE_bool_(throw_on_failure); + +// When this flag is set with a "host:port" string, on supported +// platforms test results are streamed to the specified port on +// the specified host machine. +GTEST_DECLARE_string_(stream_result_to); + +// The upper limit for valid stack trace depths. +const int kMaxStackTraceDepth = 100; + +namespace internal { + +class AssertHelper; +class DefaultGlobalTestPartResultReporter; +class ExecDeathTest; +class NoExecDeathTest; +class FinalSuccessChecker; +class GTestFlagSaver; +class StreamingListenerTest; +class TestResultAccessor; +class TestEventListenersAccessor; +class TestEventRepeater; +class UnitTestRecordPropertyTestHelper; +class WindowsDeathTest; +class UnitTestImpl* GetUnitTestImpl(); +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const std::string& message); + +} // namespace internal + +// The friend relationship of some of these classes is cyclic. +// If we don't forward declare them the compiler might confuse the classes +// in friendship clauses with same named classes on the scope. +class Test; +class TestCase; +class TestInfo; +class UnitTest; + +// A class for indicating whether an assertion was successful. When +// the assertion wasn't successful, the AssertionResult object +// remembers a non-empty message that describes how it failed. +// +// To create an instance of this class, use one of the factory functions +// (AssertionSuccess() and AssertionFailure()). +// +// This class is useful for two purposes: +// 1. Defining predicate functions to be used with Boolean test assertions +// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts +// 2. Defining predicate-format functions to be +// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). +// +// For example, if you define IsEven predicate: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) +// will print the message +// +// Value of: IsEven(Fib(5)) +// Actual: false (5 is odd) +// Expected: true +// +// instead of a more opaque +// +// Value of: IsEven(Fib(5)) +// Actual: false +// Expected: true +// +// in case IsEven is a simple Boolean predicate. +// +// If you expect your predicate to be reused and want to support informative +// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up +// about half as often as positive ones in our tests), supply messages for +// both success and failure cases: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess() << n << " is even"; +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print +// +// Value of: IsEven(Fib(6)) +// Actual: true (8 is even) +// Expected: false +// +// NB: Predicates that support negative Boolean assertions have reduced +// performance in positive ones so be careful not to use them in tests +// that have lots (tens of thousands) of positive Boolean assertions. +// +// To use this class with EXPECT_PRED_FORMAT assertions such as: +// +// // Verifies that Foo() returns an even number. +// EXPECT_PRED_FORMAT1(IsEven, Foo()); +// +// you need to define: +// +// testing::AssertionResult IsEven(const char* expr, int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() +// << "Expected: " << expr << " is even\n Actual: it's " << n; +// } +// +// If Foo() returns 5, you will see the following message: +// +// Expected: Foo() is even +// Actual: it's 5 +// +class GTEST_API_ AssertionResult { + public: + // Copy constructor. + // Used in EXPECT_TRUE/FALSE(assertion_result). + AssertionResult(const AssertionResult& other); + // Used in the EXPECT_TRUE/FALSE(bool_expression). + explicit AssertionResult(bool success) : success_(success) {} + + // Returns true iff the assertion succeeded. + operator bool() const { return success_; } // NOLINT + + // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. + AssertionResult operator!() const; + + // Returns the text streamed into this AssertionResult. Test assertions + // use it when they fail (i.e., the predicate's outcome doesn't match the + // assertion's expectation). When nothing has been streamed into the + // object, returns an empty string. + const char* message() const { + return message_.get() != NULL ? message_->c_str() : ""; + } + // TODO(vladl@google.com): Remove this after making sure no clients use it. + // Deprecated; please use message() instead. + const char* failure_message() const { return message(); } + + // Streams a custom failure message into this object. + template AssertionResult& operator<<(const T& value) { + AppendMessage(Message() << value); + return *this; + } + + // Allows streaming basic output manipulators such as endl or flush into + // this object. + AssertionResult& operator<<( + ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { + AppendMessage(Message() << basic_manipulator); + return *this; + } + + private: + // Appends the contents of message to message_. + void AppendMessage(const Message& a_message) { + if (message_.get() == NULL) + message_.reset(new ::std::string); + message_->append(a_message.GetString().c_str()); + } + + // Stores result of the assertion predicate. + bool success_; + // Stores the message describing the condition in case the expectation + // construct is not satisfied with the predicate's outcome. + // Referenced via a pointer to avoid taking too much stack frame space + // with test assertions. + internal::scoped_ptr< ::std::string> message_; + + GTEST_DISALLOW_ASSIGN_(AssertionResult); +}; + +// Makes a successful assertion result. +GTEST_API_ AssertionResult AssertionSuccess(); + +// Makes a failed assertion result. +GTEST_API_ AssertionResult AssertionFailure(); + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << msg. +GTEST_API_ AssertionResult AssertionFailure(const Message& msg); + +// The abstract class that all tests inherit from. +// +// In Google Test, a unit test program contains one or many TestCases, and +// each TestCase contains one or many Tests. +// +// When you define a test using the TEST macro, you don't need to +// explicitly derive from Test - the TEST macro automatically does +// this for you. +// +// The only time you derive from Test is when defining a test fixture +// to be used a TEST_F. For example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { ... } +// virtual void TearDown() { ... } +// ... +// }; +// +// TEST_F(FooTest, Bar) { ... } +// TEST_F(FooTest, Baz) { ... } +// +// Test is not copyable. +class GTEST_API_ Test { + public: + friend class TestInfo; + + // Defines types for pointers to functions that set up and tear down + // a test case. + typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc; + typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc; + + // The d'tor is virtual as we intend to inherit from Test. + virtual ~Test(); + + // Sets up the stuff shared by all tests in this test case. + // + // Google Test will call Foo::SetUpTestCase() before running the first + // test in test case Foo. Hence a sub-class can define its own + // SetUpTestCase() method to shadow the one defined in the super + // class. + static void SetUpTestCase() {} + + // Tears down the stuff shared by all tests in this test case. + // + // Google Test will call Foo::TearDownTestCase() after running the last + // test in test case Foo. Hence a sub-class can define its own + // TearDownTestCase() method to shadow the one defined in the super + // class. + static void TearDownTestCase() {} + + // Returns true iff the current test has a fatal failure. + static bool HasFatalFailure(); + + // Returns true iff the current test has a non-fatal failure. + static bool HasNonfatalFailure(); + + // Returns true iff the current test has a (either fatal or + // non-fatal) failure. + static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); } + + // Logs a property for the current test, test case, or for the entire + // invocation of the test program when used outside of the context of a + // test case. Only the last value for a given key is remembered. These + // are public static so they can be called from utility functions that are + // not members of the test fixture. Calls to RecordProperty made during + // lifespan of the test (from the moment its constructor starts to the + // moment its destructor finishes) will be output in XML as attributes of + // the element. Properties recorded from fixture's + // SetUpTestCase or TearDownTestCase are logged as attributes of the + // corresponding element. Calls to RecordProperty made in the + // global context (before or after invocation of RUN_ALL_TESTS and from + // SetUp/TearDown method of Environment objects registered with Google + // Test) will be output as attributes of the element. + static void RecordProperty(const std::string& key, const std::string& value); + static void RecordProperty(const std::string& key, int value); + + protected: + // Creates a Test object. + Test(); + + // Sets up the test fixture. + virtual void SetUp(); + + // Tears down the test fixture. + virtual void TearDown(); + + private: + // Returns true iff the current test has the same fixture class as + // the first test in the current test case. + static bool HasSameFixtureClass(); + + // Runs the test after the test fixture has been set up. + // + // A sub-class must implement this to define the test logic. + // + // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM. + // Instead, use the TEST or TEST_F macro. + virtual void TestBody() = 0; + + // Sets up, executes, and tears down the test. + void Run(); + + // Deletes self. We deliberately pick an unusual name for this + // internal method to avoid clashing with names used in user TESTs. + void DeleteSelf_() { delete this; } + + // Uses a GTestFlagSaver to save and restore all Google Test flags. + const internal::GTestFlagSaver* const gtest_flag_saver_; + + // Often a user mis-spells SetUp() as Setup() and spends a long time + // wondering why it is never called by Google Test. The declaration of + // the following method is solely for catching such an error at + // compile time: + // + // - The return type is deliberately chosen to be not void, so it + // will be a conflict if a user declares void Setup() in his test + // fixture. + // + // - This method is private, so it will be another compiler error + // if a user calls it from his test fixture. + // + // DO NOT OVERRIDE THIS FUNCTION. + // + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } + + // We disallow copying Tests. + GTEST_DISALLOW_COPY_AND_ASSIGN_(Test); +}; + +typedef internal::TimeInMillis TimeInMillis; + +// A copyable object representing a user specified test property which can be +// output as a key/value string pair. +// +// Don't inherit from TestProperty as its destructor is not virtual. +class TestProperty { + public: + // C'tor. TestProperty does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestProperty object. + TestProperty(const std::string& a_key, const std::string& a_value) : + key_(a_key), value_(a_value) { + } + + // Gets the user supplied key. + const char* key() const { + return key_.c_str(); + } + + // Gets the user supplied value. + const char* value() const { + return value_.c_str(); + } + + // Sets a new value, overriding the one supplied in the constructor. + void SetValue(const std::string& new_value) { + value_ = new_value; + } + + private: + // The key supplied by the user. + std::string key_; + // The value supplied by the user. + std::string value_; +}; + +// The result of a single Test. This includes a list of +// TestPartResults, a list of TestProperties, a count of how many +// death tests there are in the Test, and how much time it took to run +// the Test. +// +// TestResult is not copyable. +class GTEST_API_ TestResult { + public: + // Creates an empty TestResult. + TestResult(); + + // D'tor. Do not inherit from TestResult. + ~TestResult(); + + // Gets the number of all test parts. This is the sum of the number + // of successful test parts and the number of failed test parts. + int total_part_count() const; + + // Returns the number of the test properties. + int test_property_count() const; + + // Returns true iff the test passed (i.e. no test part failed). + bool Passed() const { return !Failed(); } + + // Returns true iff the test failed. + bool Failed() const; + + // Returns true iff the test fatally failed. + bool HasFatalFailure() const; + + // Returns true iff the test has a non-fatal failure. + bool HasNonfatalFailure() const; + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test part result among all the results. i can range + // from 0 to test_property_count() - 1. If i is not in that range, aborts + // the program. + const TestPartResult& GetTestPartResult(int i) const; + + // Returns the i-th test property. i can range from 0 to + // test_property_count() - 1. If i is not in that range, aborts the + // program. + const TestProperty& GetTestProperty(int i) const; + + private: + friend class TestInfo; + friend class TestCase; + friend class UnitTest; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::ExecDeathTest; + friend class internal::TestResultAccessor; + friend class internal::UnitTestImpl; + friend class internal::WindowsDeathTest; + + // Gets the vector of TestPartResults. + const std::vector& test_part_results() const { + return test_part_results_; + } + + // Gets the vector of TestProperties. + const std::vector& test_properties() const { + return test_properties_; + } + + // Sets the elapsed time. + void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; } + + // Adds a test property to the list. The property is validated and may add + // a non-fatal failure if invalid (e.g., if it conflicts with reserved + // key names). If a property is already recorded for the same key, the + // value will be updated, rather than storing multiple values for the same + // key. xml_element specifies the element for which the property is being + // recorded and is used for validation. + void RecordProperty(const std::string& xml_element, + const TestProperty& test_property); + + // Adds a failure if the key is a reserved attribute of Google Test + // testcase tags. Returns true if the property is valid. + // TODO(russr): Validate attribute names are legal and human readable. + static bool ValidateTestProperty(const std::string& xml_element, + const TestProperty& test_property); + + // Adds a test part result to the list. + void AddTestPartResult(const TestPartResult& test_part_result); + + // Returns the death test count. + int death_test_count() const { return death_test_count_; } + + // Increments the death test count, returning the new count. + int increment_death_test_count() { return ++death_test_count_; } + + // Clears the test part results. + void ClearTestPartResults(); + + // Clears the object. + void Clear(); + + // Protects mutable state of the property vector and of owned + // properties, whose values may be updated. + internal::Mutex test_properites_mutex_; + + // The vector of TestPartResults + std::vector test_part_results_; + // The vector of TestProperties + std::vector test_properties_; + // Running count of death tests. + int death_test_count_; + // The elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + + // We disallow copying TestResult. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult); +}; // class TestResult + +// A TestInfo object stores the following information about a test: +// +// Test case name +// Test name +// Whether the test should be run +// A function pointer that creates the test object when invoked +// Test result +// +// The constructor of TestInfo registers itself with the UnitTest +// singleton such that the RUN_ALL_TESTS() macro knows which tests to +// run. +class GTEST_API_ TestInfo { + public: + // Destructs a TestInfo object. This function is not virtual, so + // don't inherit from TestInfo. + ~TestInfo(); + + // Returns the test case name. + const char* test_case_name() const { return test_case_name_.c_str(); } + + // Returns the test name. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a typed + // or a type-parameterized test. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns the text representation of the value parameter, or NULL if this + // is not a value-parameterized test. + const char* value_param() const { + if (value_param_.get() != NULL) + return value_param_->c_str(); + return NULL; + } + + // Returns true if this test should run, that is if the test is not + // disabled (or it is disabled but the also_run_disabled_tests flag has + // been specified) and its full name matches the user-specified filter. + // + // Google Test allows the user to filter the tests by their full names. + // The full name of a test Bar in test case Foo is defined as + // "Foo.Bar". Only the tests that match the filter will run. + // + // A filter is a colon-separated list of glob (not regex) patterns, + // optionally followed by a '-' and a colon-separated list of + // negative patterns (tests to exclude). A test is run if it + // matches one of the positive patterns and does not match any of + // the negative patterns. + // + // For example, *A*:Foo.* is a filter that matches any string that + // contains the character 'A' or starts with "Foo.". + bool should_run() const { return should_run_; } + + // Returns true iff this test will appear in the XML report. + bool is_reportable() const { + // For now, the XML report includes all tests matching the filter. + // In the future, we may trim tests that are excluded because of + // sharding. + return matches_filter_; + } + + // Returns the result of the test. + const TestResult* result() const { return &result_; } + + private: +#if GTEST_HAS_DEATH_TEST + friend class internal::DefaultDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + friend class Test; + friend class TestCase; + friend class internal::UnitTestImpl; + friend class internal::StreamingListenerTest; + friend TestInfo* internal::MakeAndRegisterTestInfo( + const char* test_case_name, + const char* name, + const char* type_param, + const char* value_param, + internal::TypeId fixture_class_id, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + internal::TestFactoryBase* factory); + + // Constructs a TestInfo object. The newly constructed instance assumes + // ownership of the factory object. + TestInfo(const std::string& test_case_name, + const std::string& name, + const char* a_type_param, // NULL if not a type-parameterized test + const char* a_value_param, // NULL if not a value-parameterized test + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory); + + // Increments the number of death tests encountered in this test so + // far. + int increment_death_test_count() { + return result_.increment_death_test_count(); + } + + // Creates the test object, runs it, records its result, and then + // deletes it. + void Run(); + + static void ClearTestResult(TestInfo* test_info) { + test_info->result_.Clear(); + } + + // These fields are immutable properties of the test. + const std::string test_case_name_; // Test case name + const std::string name_; // Test name + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // Text representation of the value parameter, or NULL if this is not a + // value-parameterized test. + const internal::scoped_ptr value_param_; + const internal::TypeId fixture_class_id_; // ID of the test fixture class + bool should_run_; // True iff this test should run + bool is_disabled_; // True iff this test is disabled + bool matches_filter_; // True if this test matches the + // user-specified filter. + internal::TestFactoryBase* const factory_; // The factory that creates + // the test object + + // This field is mutable and needs to be reset before running the + // test for the second time. + TestResult result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo); +}; + +// A test case, which consists of a vector of TestInfos. +// +// TestCase is not copyable. +class GTEST_API_ TestCase { + public: + // Creates a TestCase with the given name. + // + // TestCase does NOT have a default constructor. Always use this + // constructor to create a TestCase object. + // + // Arguments: + // + // name: name of the test case + // a_type_param: the name of the test's type parameter, or NULL if + // this is not a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase(const char* name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Destructor of TestCase. + virtual ~TestCase(); + + // Gets the name of the TestCase. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a + // type-parameterized test case. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns true if any test in this test case should run. + bool should_run() const { return should_run_; } + + // Gets the number of successful tests in this test case. + int successful_test_count() const; + + // Gets the number of failed tests in this test case. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests in this test case. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Get the number of tests in this test case that should run. + int test_to_run_count() const; + + // Gets the number of all tests in this test case. + int total_test_count() const; + + // Returns true iff the test case passed. + bool Passed() const { return !Failed(); } + + // Returns true iff the test case failed. + bool Failed() const { return failed_test_count() > 0; } + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + const TestInfo* GetTestInfo(int i) const; + + // Returns the TestResult that holds test properties recorded during + // execution of SetUpTestCase and TearDownTestCase. + const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; } + + private: + friend class Test; + friend class internal::UnitTestImpl; + + // Gets the (mutable) vector of TestInfos in this TestCase. + std::vector& test_info_list() { return test_info_list_; } + + // Gets the (immutable) vector of TestInfos in this TestCase. + const std::vector& test_info_list() const { + return test_info_list_; + } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + TestInfo* GetMutableTestInfo(int i); + + // Sets the should_run member. + void set_should_run(bool should) { should_run_ = should; } + + // Adds a TestInfo to this test case. Will delete the TestInfo upon + // destruction of the TestCase object. + void AddTestInfo(TestInfo * test_info); + + // Clears the results of all tests in this test case. + void ClearResult(); + + // Clears the results of all tests in the given test case. + static void ClearTestCaseResult(TestCase* test_case) { + test_case->ClearResult(); + } + + // Runs every test in this TestCase. + void Run(); + + // Runs SetUpTestCase() for this TestCase. This wrapper is needed + // for catching exceptions thrown from SetUpTestCase(). + void RunSetUpTestCase() { (*set_up_tc_)(); } + + // Runs TearDownTestCase() for this TestCase. This wrapper is + // needed for catching exceptions thrown from TearDownTestCase(). + void RunTearDownTestCase() { (*tear_down_tc_)(); } + + // Returns true iff test passed. + static bool TestPassed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Passed(); + } + + // Returns true iff test failed. + static bool TestFailed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Failed(); + } + + // Returns true iff the test is disabled and will be reported in the XML + // report. + static bool TestReportableDisabled(const TestInfo* test_info) { + return test_info->is_reportable() && test_info->is_disabled_; + } + + // Returns true iff test is disabled. + static bool TestDisabled(const TestInfo* test_info) { + return test_info->is_disabled_; + } + + // Returns true iff this test will appear in the XML report. + static bool TestReportable(const TestInfo* test_info) { + return test_info->is_reportable(); + } + + // Returns true if the given test should run. + static bool ShouldRunTest(const TestInfo* test_info) { + return test_info->should_run(); + } + + // Shuffles the tests in this test case. + void ShuffleTests(internal::Random* random); + + // Restores the test order to before the first shuffle. + void UnshuffleTests(); + + // Name of the test case. + std::string name_; + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // The vector of TestInfos in their original order. It owns the + // elements in the vector. + std::vector test_info_list_; + // Provides a level of indirection for the test list to allow easy + // shuffling and restoring the test order. The i-th element in this + // vector is the index of the i-th test in the shuffled test list. + std::vector test_indices_; + // Pointer to the function that sets up the test case. + Test::SetUpTestCaseFunc set_up_tc_; + // Pointer to the function that tears down the test case. + Test::TearDownTestCaseFunc tear_down_tc_; + // True iff any test in this test case should run. + bool should_run_; + // Elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + // Holds test properties recorded during execution of SetUpTestCase and + // TearDownTestCase. + TestResult ad_hoc_test_result_; + + // We disallow copying TestCases. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase); +}; + +// An Environment object is capable of setting up and tearing down an +// environment. The user should subclass this to define his own +// environment(s). +// +// An Environment object does the set-up and tear-down in virtual +// methods SetUp() and TearDown() instead of the constructor and the +// destructor, as: +// +// 1. You cannot safely throw from a destructor. This is a problem +// as in some cases Google Test is used where exceptions are enabled, and +// we may want to implement ASSERT_* using exceptions where they are +// available. +// 2. You cannot use ASSERT_* directly in a constructor or +// destructor. +class Environment { + public: + // The d'tor is virtual as we need to subclass Environment. + virtual ~Environment() {} + + // Override this to define how to set up the environment. + virtual void SetUp() {} + + // Override this to define how to tear down the environment. + virtual void TearDown() {} + private: + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } +}; + +// The interface for tracing execution of tests. The methods are organized in +// the order the corresponding events are fired. +class TestEventListener { + public: + virtual ~TestEventListener() {} + + // Fired before any test activity starts. + virtual void OnTestProgramStart(const UnitTest& unit_test) = 0; + + // Fired before each iteration of tests starts. There may be more than + // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration + // index, starting from 0. + virtual void OnTestIterationStart(const UnitTest& unit_test, + int iteration) = 0; + + // Fired before environment set-up for each iteration of tests starts. + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0; + + // Fired after environment set-up for each iteration of tests ends. + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0; + + // Fired before the test case starts. + virtual void OnTestCaseStart(const TestCase& test_case) = 0; + + // Fired before the test starts. + virtual void OnTestStart(const TestInfo& test_info) = 0; + + // Fired after a failed assertion or a SUCCEED() invocation. + virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0; + + // Fired after the test ends. + virtual void OnTestEnd(const TestInfo& test_info) = 0; + + // Fired after the test case ends. + virtual void OnTestCaseEnd(const TestCase& test_case) = 0; + + // Fired before environment tear-down for each iteration of tests starts. + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0; + + // Fired after environment tear-down for each iteration of tests ends. + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0; + + // Fired after each iteration of tests finishes. + virtual void OnTestIterationEnd(const UnitTest& unit_test, + int iteration) = 0; + + // Fired after all test activities have ended. + virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0; +}; + +// The convenience class for users who need to override just one or two +// methods and are not concerned that a possible change to a signature of +// the methods they override will not be caught during the build. For +// comments about each method please see the definition of TestEventListener +// above. +class EmptyTestEventListener : public TestEventListener { + public: + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) override {} + virtual void OnTestIterationStart(const UnitTest& /*unit_test*/, + int /*iteration*/) override {} + virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) override {} + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) override {} + virtual void OnTestCaseStart(const TestCase& /*test_case*/) override {} + virtual void OnTestStart(const TestInfo& /*test_info*/) override {} + virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) override {} + virtual void OnTestEnd(const TestInfo& /*test_info*/) override {} + virtual void OnTestCaseEnd(const TestCase& /*test_case*/) override {} + virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) override {} + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) override {} + virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/, + int /*iteration*/) override {} + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) override {} +}; + +// TestEventListeners lets users add listeners to track events in Google Test. +class GTEST_API_ TestEventListeners { + public: + TestEventListeners(); + ~TestEventListeners(); + + // Appends an event listener to the end of the list. Google Test assumes + // the ownership of the listener (i.e. it will delete the listener when + // the test program finishes). + void Append(TestEventListener* listener); + + // Removes the given event listener from the list and returns it. It then + // becomes the caller's responsibility to delete the listener. Returns + // NULL if the listener is not found in the list. + TestEventListener* Release(TestEventListener* listener); + + // Returns the standard listener responsible for the default console + // output. Can be removed from the listeners list to shut down default + // console output. Note that removing this object from the listener list + // with Release transfers its ownership to the caller and makes this + // function return NULL the next time. + TestEventListener* default_result_printer() const { + return default_result_printer_; + } + + // Returns the standard listener responsible for the default XML output + // controlled by the --gtest_output=xml flag. Can be removed from the + // listeners list by users who want to shut down the default XML output + // controlled by this flag and substitute it with custom one. Note that + // removing this object from the listener list with Release transfers its + // ownership to the caller and makes this function return NULL the next + // time. + TestEventListener* default_xml_generator() const { + return default_xml_generator_; + } + + private: + friend class TestCase; + friend class TestInfo; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::NoExecDeathTest; + friend class internal::TestEventListenersAccessor; + friend class internal::UnitTestImpl; + + // Returns repeater that broadcasts the TestEventListener events to all + // subscribers. + TestEventListener* repeater(); + + // Sets the default_result_printer attribute to the provided listener. + // The listener is also added to the listener list and previous + // default_result_printer is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultResultPrinter(TestEventListener* listener); + + // Sets the default_xml_generator attribute to the provided listener. The + // listener is also added to the listener list and previous + // default_xml_generator is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultXmlGenerator(TestEventListener* listener); + + // Controls whether events will be forwarded by the repeater to the + // listeners in the list. + bool EventForwardingEnabled() const; + void SuppressEventForwarding(); + + // The actual list of listeners. + internal::TestEventRepeater* repeater_; + // Listener responsible for the standard result output. + TestEventListener* default_result_printer_; + // Listener responsible for the creation of the XML output file. + TestEventListener* default_xml_generator_; + + // We disallow copying TestEventListeners. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners); +}; + +// A UnitTest consists of a vector of TestCases. +// +// This is a singleton class. The only instance of UnitTest is +// created when UnitTest::GetInstance() is first called. This +// instance is never deleted. +// +// UnitTest is not copyable. +// +// This class is thread-safe as long as the methods are called +// according to their specification. +class GTEST_API_ UnitTest { + public: + // Gets the singleton UnitTest object. The first time this method + // is called, a UnitTest object is constructed and returned. + // Consecutive calls will return the same object. + static UnitTest* GetInstance(); + + // Runs all tests in this UnitTest object and prints the result. + // Returns 0 if successful, or 1 otherwise. + // + // This method can only be called from the main thread. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + int Run() GTEST_MUST_USE_RESULT_; + + // Returns the working directory when the first TEST() or TEST_F() + // was executed. The UnitTest object owns the string. + const char* original_working_dir() const; + + // Returns the TestCase object for the test that's currently running, + // or NULL if no test is running. + const TestCase* current_test_case() const + GTEST_LOCK_EXCLUDED_(mutex_); + + // Returns the TestInfo object for the test that's currently running, + // or NULL if no test is running. + const TestInfo* current_test_info() const + GTEST_LOCK_EXCLUDED_(mutex_); + + // Returns the random seed used at the start of the current test run. + int random_seed() const; + +#if GTEST_HAS_PARAM_TEST + // Returns the ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry() + GTEST_LOCK_EXCLUDED_(mutex_); +#endif // GTEST_HAS_PARAM_TEST + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const; + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const; + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const; + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const; + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const; + + // Returns the TestResult containing information on test failures and + // properties logged outside of individual test cases. + const TestResult& ad_hoc_test_result() const; + + // Returns the list of event listeners that can be used to track events + // inside Google Test. + TestEventListeners& listeners(); + + private: + // Registers and returns a global test environment. When a test + // program is run, all global test environments will be set-up in + // the order they were registered. After all tests in the program + // have finished, all global test environments will be torn-down in + // the *reverse* order they were registered. + // + // The UnitTest object takes ownership of the given environment. + // + // This method can only be called from the main thread. + Environment* AddEnvironment(Environment* env); + + // Adds a TestPartResult to the current TestResult object. All + // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) + // eventually call this to report their results. The user code + // should use the assertion macros instead of calling this directly. + void AddTestPartResult(TestPartResult::Type result_type, + const char* file_name, + int line_number, + const std::string& message, + const std::string& os_stack_trace) + GTEST_LOCK_EXCLUDED_(mutex_); + + // Adds a TestProperty to the current TestResult object when invoked from + // inside a test, to current TestCase's ad_hoc_test_result_ when invoked + // from SetUpTestCase or TearDownTestCase, or to the global property set + // when invoked elsewhere. If the result already contains a property with + // the same key, the value will be updated. + void RecordProperty(const std::string& key, const std::string& value); + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i); + + // Accessors for the implementation object. + internal::UnitTestImpl* impl() { return impl_; } + const internal::UnitTestImpl* impl() const { return impl_; } + + // These classes and funcions are friends as they need to access private + // members of UnitTest. + friend class Test; + friend class internal::AssertHelper; + friend class internal::ScopedTrace; + friend class internal::StreamingListenerTest; + friend class internal::UnitTestRecordPropertyTestHelper; + friend Environment* AddGlobalTestEnvironment(Environment* env); + friend internal::UnitTestImpl* internal::GetUnitTestImpl(); + friend void internal::ReportFailureInUnknownLocation( + TestPartResult::Type result_type, + const std::string& message); + + // Creates an empty UnitTest. + UnitTest(); + + // D'tor + virtual ~UnitTest(); + + // Pushes a trace defined by SCOPED_TRACE() on to the per-thread + // Google Test trace stack. + void PushGTestTrace(const internal::TraceInfo& trace) + GTEST_LOCK_EXCLUDED_(mutex_); + + // Pops a trace from the per-thread Google Test trace stack. + void PopGTestTrace() + GTEST_LOCK_EXCLUDED_(mutex_); + + // Protects mutable state in *impl_. This is mutable as some const + // methods need to lock it too. + mutable internal::Mutex mutex_; + + // Opaque implementation object. This field is never changed once + // the object is constructed. We don't mark it as const here, as + // doing so will cause a warning in the constructor of UnitTest. + // Mutable state in *impl_ is protected by mutex_. + internal::UnitTestImpl* impl_; + + // We disallow copying UnitTest. + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest); +}; + +// A convenient wrapper for adding an environment for the test +// program. +// +// You should call this before RUN_ALL_TESTS() is called, probably in +// main(). If you use gtest_main, you need to call this before main() +// starts for it to take effect. For example, you can define a global +// variable like this: +// +// testing::Environment* const foo_env = +// testing::AddGlobalTestEnvironment(new FooEnvironment); +// +// However, we strongly recommend you to write your own main() and +// call AddGlobalTestEnvironment() there, as relying on initialization +// of global variables makes the code harder to read and may cause +// problems when you register multiple environments from different +// translation units and the environments have dependencies among them +// (remember that the compiler doesn't guarantee the order in which +// global variables from different translation units are initialized). +inline Environment* AddGlobalTestEnvironment(Environment* env) { + return UnitTest::GetInstance()->AddEnvironment(env); +} + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +GTEST_API_ void InitGoogleTest(int* argc, char** argv); + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv); + +namespace internal { + +// FormatForComparison::Format(value) formats a +// value of type ToPrint that is an operand of a comparison assertion +// (e.g. ASSERT_EQ). OtherOperand is the type of the other operand in +// the comparison, and is used to help determine the best way to +// format the value. In particular, when the value is a C string +// (char pointer) and the other operand is an STL string object, we +// want to format the C string as a string, since we know it is +// compared by value with the string object. If the value is a char +// pointer but the other operand is not an STL string object, we don't +// know whether the pointer is supposed to point to a NUL-terminated +// string, and thus want to print it as a pointer to be safe. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// The default case. +template +class FormatForComparison { + public: + static ::std::string Format(const ToPrint& value) { + return ::testing::PrintToString(value); + } +}; + +// Array. +template +class FormatForComparison { + public: + static ::std::string Format(const ToPrint* value) { + return FormatForComparison::Format(value); + } +}; + +// By default, print C string as pointers to be safe, as we don't know +// whether they actually point to a NUL-terminated string. + +#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \ + template \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(static_cast(value)); \ + } \ + } + +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t); + +#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_ + +// If a C string is compared with an STL string object, we know it's meant +// to point to a NUL-terminated string, and thus can print it as a string. + +#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \ + template <> \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(value); \ + } \ + } + +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string); + +#if GTEST_HAS_GLOBAL_STRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::string); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::string); +#endif + +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::wstring); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::wstring); +#endif + +#if GTEST_HAS_STD_WSTRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring); +#endif + +#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_ + +// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc) +// operand to be used in a failure message. The type (but not value) +// of the other operand may affect the format. This allows us to +// print a char* as a raw pointer when it is compared against another +// char* or void*, and print it as a C string when it is compared +// against an std::string object, for example. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +std::string FormatForComparisonFailureMessage( + const T1& value, const T2& /* other_operand */) { + return FormatForComparison::Format(value); +} + +// The helper function for {ASSERT|EXPECT}_EQ. +template +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4389) // Temporarily disables warning on + // signed/unsigned mismatch. +#endif + + if (expected == actual) { + return AssertionSuccess(); + } + +#ifdef _MSC_VER +# pragma warning(pop) // Restores the warning state. +#endif + + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// With this overloaded version, we allow anonymous enums to be used +// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums +// can be implicitly cast to BiggestInt. +GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual); + +// The helper class for {ASSERT|EXPECT}_EQ. The template argument +// lhs_is_null_literal is true iff the first argument to ASSERT_EQ() +// is a null pointer literal. The following default implementation is +// for lhs_is_null_literal being false. +template +class EqHelper { + public: + // This templatized version is for the general case. + template + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // With this overloaded version, we allow anonymous enums to be used + // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous + // enums can be implicitly cast to BiggestInt. + // + // Even though its body looks the same as the above version, we + // cannot merge the two, as it will make anonymous enums unhappy. + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } +}; + +// This specialization is used when the first argument to ASSERT_EQ() +// is a null pointer literal, like NULL, false, or 0. +template <> +class EqHelper { + public: + // We define two overloaded versions of Compare(). The first + // version will be picked when the second argument to ASSERT_EQ() is + // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or + // EXPECT_EQ(false, a_bool). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual, + // The following line prevents this overload from being considered if T2 + // is not a pointer type. We need this because ASSERT_EQ(NULL, my_ptr) + // expands to Compare("", "", NULL, my_ptr), which requires a conversion + // to match the Secret* in the other overload, which would otherwise make + // this template match better. + typename EnableIf::value>::type* = 0) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // This version will be picked when the second argument to ASSERT_EQ() is a + // pointer, e.g. ASSERT_EQ(NULL, a_pointer). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + // We used to have a second template parameter instead of Secret*. That + // template parameter would deduce to 'long', making this a better match + // than the first overload even without the first overload's EnableIf. + // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to + // non-pointer argument" (even a deduced integral argument), so the old + // implementation caused warnings in user code. + Secret* /* expected (NULL) */, + T* actual) { + // We already know that 'expected' is a null pointer. + return CmpHelperEQ(expected_expression, actual_expression, + static_cast(NULL), actual); + } +}; + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste +// of similar code. +// +// For each templatized helper function, we also define an overloaded +// version for BiggestInt in order to reduce code bloat and allow +// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled +// with gcc 4. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +template \ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + const T1& val1, const T2& val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +}\ +GTEST_API_ AssertionResult CmpHelper##op_name(\ + const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2) + +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// Implements the helper function for {ASSERT|EXPECT}_NE +GTEST_IMPL_CMP_HELPER_(NE, !=); +// Implements the helper function for {ASSERT|EXPECT}_LE +GTEST_IMPL_CMP_HELPER_(LE, <=); +// Implements the helper function for {ASSERT|EXPECT}_LT +GTEST_IMPL_CMP_HELPER_(LT, <); +// Implements the helper function for {ASSERT|EXPECT}_GE +GTEST_IMPL_CMP_HELPER_(GE, >=); +// Implements the helper function for {ASSERT|EXPECT}_GT +GTEST_IMPL_CMP_HELPER_(GT, >); + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRNE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + + +// Helper function for *_STREQ on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual); + +// Helper function for *_STRNE on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2); + +} // namespace internal + +// IsSubstring() and IsNotSubstring() are intended to be used as the +// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by +// themselves. They check whether needle is a substring of haystack +// (NULL is considered a substring of itself only), and return an +// appropriate error message when they fail. +// +// The {needle,haystack}_expr arguments are the stringified +// expressions that generated the two real arguments. +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +// Helper template function for comparing floating-points. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression, + const char* actual_expression, + RawType expected, + RawType actual) { + const FloatingPoint lhs(expected), rhs(actual); + + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + ::std::stringstream expected_ss; + expected_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << expected; + + ::std::stringstream actual_ss; + actual_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << actual; + + return EqFailure(expected_expression, + actual_expression, + StringStreamToString(&expected_ss), + StringStreamToString(&actual_ss), + false); +} + +// Helper function for implementing ASSERT_NEAR. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error); + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// A class that enables one to stream messages to assertion macros +class GTEST_API_ AssertHelper { + public: + // Constructor. + AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message); + ~AssertHelper(); + + // Message assignment is a semantic trick to enable assertion + // streaming; see the GTEST_MESSAGE_ macro below. + void operator=(const Message& message) const; + + private: + // We put our data in a struct so that the size of the AssertHelper class can + // be as small as possible. This is important because gcc is incapable of + // re-using stack space even for temporary variables, so every EXPECT_EQ + // reserves stack space for another AssertHelper. + struct AssertHelperData { + AssertHelperData(TestPartResult::Type t, + const char* srcfile, + int line_num, + const char* msg) + : type(t), file(srcfile), line(line_num), message(msg) { } + + TestPartResult::Type const type; + const char* const file; + int const line; + std::string const message; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData); + }; + + AssertHelperData* const data_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper); +}; + +} // namespace internal + +#if GTEST_HAS_PARAM_TEST +// The pure interface class that all value-parameterized tests inherit from. +// A value-parameterized class must inherit from both ::testing::Test and +// ::testing::WithParamInterface. In most cases that just means inheriting +// from ::testing::TestWithParam, but more complicated test hierarchies +// may need to inherit from Test and WithParamInterface at different levels. +// +// This interface has support for accessing the test parameter value via +// the GetParam() method. +// +// Use it with one of the parameter generator defining functions, like Range(), +// Values(), ValuesIn(), Bool(), and Combine(). +// +// class FooTest : public ::testing::TestWithParam { +// protected: +// FooTest() { +// // Can use GetParam() here. +// } +// virtual ~FooTest() { +// // Can use GetParam() here. +// } +// virtual void SetUp() { +// // Can use GetParam() here. +// } +// virtual void TearDown { +// // Can use GetParam() here. +// } +// }; +// TEST_P(FooTest, DoesBar) { +// // Can use GetParam() method here. +// Foo foo; +// ASSERT_TRUE(foo.DoesBar(GetParam())); +// } +// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10)); + +template +class WithParamInterface { + public: + typedef T ParamType; + virtual ~WithParamInterface() {} + + // The current parameter value. Is also available in the test fixture's + // constructor. This member function is non-static, even though it only + // references static data, to reduce the opportunity for incorrect uses + // like writing 'WithParamInterface::GetParam()' for a test that + // uses a fixture whose parameter type is int. + const ParamType& GetParam() const { + GTEST_CHECK_(parameter_ != NULL) + << "GetParam() can only be called inside a value-parameterized test " + << "-- did you intend to write TEST_P instead of TEST_F?"; + return *parameter_; + } + + private: + // Sets parameter value. The caller is responsible for making sure the value + // remains alive and unchanged throughout the current test. + static void SetParam(const ParamType* parameter) { + parameter_ = parameter; + } + + // Static value used for accessing parameter during a test lifetime. + static const ParamType* parameter_; + + // TestClass must be a subclass of WithParamInterface and Test. + template friend class internal::ParameterizedTestFactory; +}; + +template +const T* WithParamInterface::parameter_ = NULL; + +// Most value-parameterized classes can ignore the existence of +// WithParamInterface, and can just inherit from ::testing::TestWithParam. + +template +class TestWithParam : public Test, public WithParamInterface { +}; + +#endif // GTEST_HAS_PARAM_TEST + +// Macros for indicating success/failure in test code. + +// ADD_FAILURE unconditionally adds a failure to the current test. +// SUCCEED generates a success - it doesn't automatically make the +// current test successful, as a test is only successful when it has +// no failure. +// +// EXPECT_* verifies that a certain condition is satisfied. If not, +// it behaves like ADD_FAILURE. In particular: +// +// EXPECT_TRUE verifies that a Boolean condition is true. +// EXPECT_FALSE verifies that a Boolean condition is false. +// +// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except +// that they will also abort the current function on failure. People +// usually want the fail-fast behavior of FAIL and ASSERT_*, but those +// writing data-driven tests often find themselves using ADD_FAILURE +// and EXPECT_* more. + +// Generates a nonfatal failure with a generic message. +#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed") + +// Generates a nonfatal failure at the given source file location with +// a generic message. +#define ADD_FAILURE_AT(file, line) \ + GTEST_MESSAGE_AT_(file, line, "Failed", \ + ::testing::TestPartResult::kNonFatalFailure) + +// Generates a fatal failure with a generic message. +#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed") + +// Define this macro to 1 to omit the definition of FAIL(), which is a +// generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_FAIL +# define FAIL() GTEST_FAIL() +#endif + +// Generates a success with a generic message. +#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded") + +// Define this macro to 1 to omit the definition of SUCCEED(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_SUCCEED +# define SUCCEED() GTEST_SUCCEED() +#endif + +// Macros for testing exceptions. +// +// * {ASSERT|EXPECT}_THROW(statement, expected_exception): +// Tests that the statement throws the expected exception. +// * {ASSERT|EXPECT}_NO_THROW(statement): +// Tests that the statement doesn't throw any exception. +// * {ASSERT|EXPECT}_ANY_THROW(statement): +// Tests that the statement throws an exception. + +#define EXPECT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_) +#define EXPECT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define EXPECT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define ASSERT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_) +#define ASSERT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_) +#define ASSERT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_) + +// Boolean assertions. Condition can be either a Boolean expression or an +// AssertionResult. For more information on how to use AssertionResult with +// these macros see comments on that class. +#define EXPECT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_NONFATAL_FAILURE_) +#define EXPECT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_NONFATAL_FAILURE_) +#define ASSERT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_FATAL_FAILURE_) +#define ASSERT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_FATAL_FAILURE_) + +// Includes the auto-generated header that implements a family of +// generic predicate assertion macros. +#include "gtest/gtest_pred_impl.h" + +// Macros for testing equalities and inequalities. +// +// * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual +// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2 +// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2 +// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2 +// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2 +// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2 +// +// When they are not, Google Test prints both the tested expressions and +// their actual values. The values must be compatible built-in types, +// or you will get a compiler error. By "compatible" we mean that the +// values can be compared by the respective operator. +// +// Note: +// +// 1. It is possible to make a user-defined type work with +// {ASSERT|EXPECT}_??(), but that requires overloading the +// comparison operators and is thus discouraged by the Google C++ +// Usage Guide. Therefore, you are advised to use the +// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are +// equal. +// +// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on +// pointers (in particular, C strings). Therefore, if you use it +// with two C strings, you are testing how their locations in memory +// are related, not how their content is related. To compare two C +// strings by content, use {ASSERT|EXPECT}_STR*(). +// +// 3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to +// {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you +// what the actual value is when it fails, and similarly for the +// other comparisons. +// +// 4. Do not depend on the order in which {ASSERT|EXPECT}_??() +// evaluate their arguments, which is undefined. +// +// 5. These macros evaluate their arguments exactly once. +// +// Examples: +// +// EXPECT_NE(5, Foo()); +// EXPECT_EQ(NULL, a_pointer); +// ASSERT_LT(i, array_size); +// ASSERT_GT(records.size(), 0) << "There is no record left."; + +#define EXPECT_EQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define EXPECT_NE(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual) +#define EXPECT_LE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define EXPECT_LT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define EXPECT_GE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define EXPECT_GT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +#define GTEST_ASSERT_EQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define GTEST_ASSERT_NE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2) +#define GTEST_ASSERT_LE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define GTEST_ASSERT_LT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define GTEST_ASSERT_GE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define GTEST_ASSERT_GT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of +// ASSERT_XY(), which clashes with some users' own code. + +#if !GTEST_DONT_DEFINE_ASSERT_EQ +# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_NE +# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LE +# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LT +# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GE +# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GT +# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2) +#endif + +// C-string Comparisons. All tests treat NULL and any non-NULL string +// as different. Two NULLs are equal. +// +// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2 +// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2 +// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case +// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case +// +// For wide or narrow string objects, you can use the +// {ASSERT|EXPECT}_??() macros. +// +// Don't depend on the order in which the arguments are evaluated, +// which is undefined. +// +// These macros evaluate their arguments exactly once. + +#define EXPECT_STREQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define EXPECT_STRNE(s1, s2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define EXPECT_STRCASEEQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define EXPECT_STRCASENE(s1, s2)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +#define ASSERT_STREQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define ASSERT_STRNE(s1, s2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define ASSERT_STRCASEEQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define ASSERT_STRCASENE(s1, s2)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +// Macros for comparing floating-point numbers. +// +// * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual): +// Tests that two float values are almost equal. +// * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual): +// Tests that two double values are almost equal. +// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error): +// Tests that v1 and v2 are within the given distance to each other. +// +// Google Test uses ULP-based comparison to automatically pick a default +// error bound that is appropriate for the operands. See the +// FloatingPoint template class in gtest-internal.h if you are +// interested in the implementation details. + +#define EXPECT_FLOAT_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_DOUBLE_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_FLOAT_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_DOUBLE_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_NEAR(val1, val2, abs_error)\ + EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +#define ASSERT_NEAR(val1, val2, abs_error)\ + ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +// These predicate format functions work on floating-point values, and +// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g. +// +// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0); + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2); +GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2); + + +#if GTEST_OS_WINDOWS + +// Macros that test for HRESULT failure and success, these are only useful +// on Windows, and rely on Windows SDK macros and APIs to compile. +// +// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr) +// +// When expr unexpectedly fails or succeeds, Google Test prints the +// expected result and the actual result with both a human-readable +// string representation of the error, if available, as well as the +// hex result code. +# define EXPECT_HRESULT_SUCCEEDED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define ASSERT_HRESULT_SUCCEEDED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define EXPECT_HRESULT_FAILED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +# define ASSERT_HRESULT_FAILED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +#endif // GTEST_OS_WINDOWS + +// Macros that execute statement and check that it doesn't generate new fatal +// failures in the current thread. +// +// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement); +// +// Examples: +// +// EXPECT_NO_FATAL_FAILURE(Process()); +// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed"; +// +#define ASSERT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_) +#define EXPECT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_) + +// Causes a trace (including the source file path, the current line +// number, and the given message) to be included in every test failure +// message generated by code in the current scope. The effect is +// undone when the control leaves the current scope. +// +// The message argument can be anything streamable to std::ostream. +// +// In the implementation, we include the current line number as part +// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s +// to appear in the same block - as long as they are on different +// lines. +#define SCOPED_TRACE(message) \ + ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\ + __FILE__, __LINE__, ::testing::Message() << (message)) + +// Compile-time assertion for type equality. +// StaticAssertTypeEq() compiles iff type1 and type2 are +// the same type. The value it returns is not interesting. +// +// Instead of making StaticAssertTypeEq a class template, we make it a +// function template that invokes a helper class template. This +// prevents a user from misusing StaticAssertTypeEq by +// defining objects of that type. +// +// CAVEAT: +// +// When used inside a method of a class template, +// StaticAssertTypeEq() is effective ONLY IF the method is +// instantiated. For example, given: +// +// template class Foo { +// public: +// void Bar() { testing::StaticAssertTypeEq(); } +// }; +// +// the code: +// +// void Test1() { Foo foo; } +// +// will NOT generate a compiler error, as Foo::Bar() is never +// actually instantiated. Instead, you need: +// +// void Test2() { Foo foo; foo.Bar(); } +// +// to cause a compiler error. +template +bool StaticAssertTypeEq() { + (void)internal::StaticAssertTypeEqHelper(); + return true; +} + +// Defines a test. +// +// The first parameter is the name of the test case, and the second +// parameter is the name of the test within the test case. +// +// The convention is to end the test case name with "Test". For +// example, a test case for the Foo class can be named FooTest. +// +// The user should put his test code between braces after using this +// macro. Example: +// +// TEST(FooTest, InitializesCorrectly) { +// Foo foo; +// EXPECT_TRUE(foo.StatusIsOK()); +// } + +// Note that we call GetTestTypeId() instead of GetTypeId< +// ::testing::Test>() here to get the type ID of testing::Test. This +// is to work around a suspected linker bug when using Google Test as +// a framework on Mac OS X. The bug causes GetTypeId< +// ::testing::Test>() to return different values depending on whether +// the call is from the Google Test framework itself or from user test +// code. GetTestTypeId() is guaranteed to always return the same +// value, as it always calls GetTypeId<>() from the Google Test +// framework. +#define GTEST_TEST(test_case_name, test_name)\ + GTEST_TEST_(test_case_name, test_name, \ + ::testing::Test, ::testing::internal::GetTestTypeId()) + +// Define this macro to 1 to omit the definition of TEST(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_TEST +# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name) +#endif + +// Defines a test that uses a test fixture. +// +// The first parameter is the name of the test fixture class, which +// also doubles as the test case name. The second parameter is the +// name of the test within the test case. +// +// A test fixture class must be declared earlier. The user should put +// his test code between braces after using this macro. Example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { b_.AddElement(3); } +// +// Foo a_; +// Foo b_; +// }; +// +// TEST_F(FooTest, InitializesCorrectly) { +// EXPECT_TRUE(a_.StatusIsOK()); +// } +// +// TEST_F(FooTest, ReturnsElementCountCorrectly) { +// EXPECT_EQ(0, a_.size()); +// EXPECT_EQ(1, b_.size()); +// } + +#define TEST_F(test_fixture, test_name)\ + GTEST_TEST_(test_fixture, test_name, test_fixture, \ + ::testing::internal::GetTypeId()) + +} // namespace testing + +// Use this function in main() to run all tests. It returns 0 if all +// tests are successful, or 1 otherwise. +// +// RUN_ALL_TESTS() should be invoked after the command line has been +// parsed by InitGoogleTest(). +// +// This function was formerly a macro; thus, it is in the global +// namespace and has an all-caps name. +int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_; + +inline int RUN_ALL_TESTS() { + return ::testing::UnitTest::GetInstance()->Run(); +} + +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ diff --git a/tests/gtest/include/gtest/gtest_pred_impl.h b/external/gtest/include/gtest/gtest_pred_impl.h old mode 100644 new mode 100755 similarity index 98% rename from tests/gtest/include/gtest/gtest_pred_impl.h rename to external/gtest/include/gtest/gtest_pred_impl.h index 3805f85bdb..30ae712f50 --- a/tests/gtest/include/gtest/gtest_pred_impl.h +++ b/external/gtest/include/gtest/gtest_pred_impl.h @@ -27,7 +27,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// This file is AUTOMATICALLY GENERATED on 09/24/2010 by command +// This file is AUTOMATICALLY GENERATED on 10/31/2011 by command // 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! // // Implements a family of generic predicate assertion macros. @@ -98,7 +98,7 @@ AssertionResult AssertPred1Helper(const char* pred_text, // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1. // Don't use this in your code. #define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\ - GTEST_ASSERT_(pred_format(#v1, v1),\ + GTEST_ASSERT_(pred_format(#v1, v1), \ on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use @@ -144,7 +144,7 @@ AssertionResult AssertPred2Helper(const char* pred_text, // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2. // Don't use this in your code. #define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\ - GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2),\ + GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \ on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use @@ -197,7 +197,7 @@ AssertionResult AssertPred3Helper(const char* pred_text, // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3. // Don't use this in your code. #define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\ - GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3),\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \ on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use @@ -257,7 +257,7 @@ AssertionResult AssertPred4Helper(const char* pred_text, // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4. // Don't use this in your code. #define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\ - GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4),\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \ on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use @@ -324,7 +324,7 @@ AssertionResult AssertPred5Helper(const char* pred_text, // Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5. // Don't use this in your code. #define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ - GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5),\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ on_failure) // Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use diff --git a/tests/gtest/include/gtest/gtest_prod.h b/external/gtest/include/gtest/gtest_prod.h old mode 100644 new mode 100755 similarity index 100% rename from tests/gtest/include/gtest/gtest_prod.h rename to external/gtest/include/gtest/gtest_prod.h diff --git a/tests/gtest/include/gtest/internal/gtest-death-test-internal.h b/external/gtest/include/gtest/internal/gtest-death-test-internal.h old mode 100644 new mode 100755 similarity index 94% rename from tests/gtest/include/gtest/internal/gtest-death-test-internal.h rename to external/gtest/include/gtest/internal/gtest-death-test-internal.h index 1d9f83b652..a82aec1f9b --- a/tests/gtest/include/gtest/internal/gtest-death-test-internal.h +++ b/external/gtest/include/gtest/internal/gtest-death-test-internal.h @@ -127,11 +127,11 @@ class GTEST_API_ DeathTest { // the last death test. static const char* LastMessage(); - static void set_last_death_test_message(const String& message); + static void set_last_death_test_message(const std::string& message); private: // A string containing a description of the outcome of the last death test. - static String last_death_test_message_; + static std::string last_death_test_message_; GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest); }; @@ -148,7 +148,7 @@ class DeathTestFactory { class DefaultDeathTestFactory : public DeathTestFactory { public: virtual bool Create(const char* statement, const RE* regex, - const char* file, int line, DeathTest** test); + const char* file, int line, DeathTest** test) override; }; // Returns true if exit_status describes a process that was terminated @@ -217,12 +217,23 @@ GTEST_API_ bool ExitedUnsuccessfully(int exit_status); // The symbol "fail" here expands to something into which a message // can be streamed. +// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in +// NDEBUG mode. In this case we need the statements to be executed, the regex is +// ignored, and the macro must accept a streamed message even though the message +// is never printed. +# define GTEST_EXECUTE_STATEMENT_(statement, regex) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } else \ + ::testing::Message() + // A class representing the parsed contents of the // --gtest_internal_run_death_test flag, as it existed when // RUN_ALL_TESTS was called. class InternalRunDeathTestFlag { public: - InternalRunDeathTestFlag(const String& a_file, + InternalRunDeathTestFlag(const std::string& a_file, int a_line, int an_index, int a_write_fd) @@ -234,13 +245,13 @@ class InternalRunDeathTestFlag { posix::Close(write_fd_); } - String file() const { return file_; } + const std::string& file() const { return file_; } int line() const { return line_; } int index() const { return index_; } int write_fd() const { return write_fd_; } private: - String file_; + std::string file_; int line_; int index_; int write_fd_; diff --git a/tests/gtest/include/gtest/internal/gtest-filepath.h b/external/gtest/include/gtest/internal/gtest-filepath.h old mode 100644 new mode 100755 similarity index 96% rename from tests/gtest/include/gtest/internal/gtest-filepath.h rename to external/gtest/include/gtest/internal/gtest-filepath.h index b36b3cf210..7a13b4b0de --- a/tests/gtest/include/gtest/internal/gtest-filepath.h +++ b/external/gtest/include/gtest/internal/gtest-filepath.h @@ -61,11 +61,7 @@ class GTEST_API_ FilePath { FilePath() : pathname_("") { } FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { } - explicit FilePath(const char* pathname) : pathname_(pathname) { - Normalize(); - } - - explicit FilePath(const String& pathname) : pathname_(pathname) { + explicit FilePath(const std::string& pathname) : pathname_(pathname) { Normalize(); } @@ -78,7 +74,7 @@ class GTEST_API_ FilePath { pathname_ = rhs.pathname_; } - String ToString() const { return pathname_; } + const std::string& string() const { return pathname_; } const char* c_str() const { return pathname_.c_str(); } // Returns the current working directory, or "" if unsuccessful. @@ -111,8 +107,8 @@ class GTEST_API_ FilePath { const FilePath& base_name, const char* extension); - // Returns true iff the path is NULL or "". - bool IsEmpty() const { return c_str() == NULL || *c_str() == '\0'; } + // Returns true iff the path is "". + bool IsEmpty() const { return pathname_.empty(); } // If input name has a trailing separator character, removes it and returns // the name, otherwise return the name string unmodified. @@ -201,7 +197,7 @@ class GTEST_API_ FilePath { // separators. Returns NULL if no path separator was found. const char* FindLastPathSeparator() const; - String pathname_; + std::string pathname_; }; // class FilePath } // namespace internal diff --git a/tests/gtest/include/gtest/internal/gtest-internal.h b/external/gtest/include/gtest/internal/gtest-internal.h old mode 100644 new mode 100755 similarity index 88% rename from tests/gtest/include/gtest/internal/gtest-internal.h rename to external/gtest/include/gtest/internal/gtest-internal.h index 7aa1197f15..064fcfa439 --- a/tests/gtest/include/gtest/internal/gtest-internal.h +++ b/external/gtest/include/gtest/internal/gtest-internal.h @@ -46,12 +46,18 @@ # include #endif // GTEST_OS_LINUX +#if GTEST_HAS_EXCEPTIONS +# include +#endif + #include +#include #include #include #include #include +#include "gtest/gtest-message.h" #include "gtest/internal/gtest-string.h" #include "gtest/internal/gtest-filepath.h" #include "gtest/internal/gtest-type-util.h" @@ -67,36 +73,6 @@ #define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar) #define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar -// Google Test defines the testing::Message class to allow construction of -// test messages via the << operator. The idea is that anything -// streamable to std::ostream can be streamed to a testing::Message. -// This allows a user to use his own types in Google Test assertions by -// overloading the << operator. -// -// util/gtl/stl_logging-inl.h overloads << for STL containers. These -// overloads cannot be defined in the std namespace, as that will be -// undefined behavior. Therefore, they are defined in the global -// namespace instead. -// -// C++'s symbol lookup rule (i.e. Koenig lookup) says that these -// overloads are visible in either the std namespace or the global -// namespace, but not other namespaces, including the testing -// namespace which Google Test's Message class is in. -// -// To allow STL containers (and other types that has a << operator -// defined in the global namespace) to be used in Google Test assertions, -// testing::Message must access the custom << operator from the global -// namespace. Hence this helper function. -// -// Note: Jeffrey Yasskin suggested an alternative fix by "using -// ::operator<<;" in the definition of Message's operator<<. That fix -// doesn't require a helper function, but unfortunately doesn't -// compile with MSVC. -template -inline void GTestStreamToHelper(std::ostream* os, const T& val) { - *os << val; -} - class ProtocolMessage; namespace proto2 { class Message; } @@ -122,17 +98,12 @@ class TestInfoImpl; // Opaque implementation of TestInfo class UnitTestImpl; // Opaque implementation of UnitTest // How many times InitGoogleTest() has been called. -extern int g_init_gtest_count; +GTEST_API_ extern int g_init_gtest_count; // The text used in failure messages to indicate the start of the // stack trace. GTEST_API_ extern const char kStackTraceMarker[]; -// A secret type that Google Test users don't know about. It has no -// definition on purpose. Therefore it's impossible to create a -// Secret object, which is what we want. -class Secret; - // Two overloaded helpers for checking at compile time whether an // expression is a null pointer literal (i.e. NULL or any 0-valued // compile-time integral constant). Their return values have @@ -163,8 +134,23 @@ char (&IsNullLiteralHelper(...))[2]; // NOLINT #endif // GTEST_ELLIPSIS_NEEDS_POD_ // Appends the user-supplied message to the Google-Test-generated message. -GTEST_API_ String AppendUserMessage(const String& gtest_msg, - const Message& user_msg); +GTEST_API_ std::string AppendUserMessage( + const std::string& gtest_msg, const Message& user_msg); + +#if GTEST_HAS_EXCEPTIONS + +// This exception is thrown by (and only by) a failed Google Test +// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions +// are enabled). We derive it from std::runtime_error, which is for +// errors presumably detectable only at run time. Since +// std::runtime_error inherits from std::exception, many testing +// frameworks know how to extract and print the message inside it. +class GTEST_API_ GoogleTestFailureException : public ::std::runtime_error { + public: + explicit GoogleTestFailureException(const TestPartResult& failure); +}; + +#endif // GTEST_HAS_EXCEPTIONS // A helper class for creating scoped traces in user programs. class GTEST_API_ ScopedTrace { @@ -185,77 +171,6 @@ class GTEST_API_ ScopedTrace { // c'tor and d'tor. Therefore it doesn't // need to be used otherwise. -// Converts a streamable value to a String. A NULL pointer is -// converted to "(null)". When the input value is a ::string, -// ::std::string, ::wstring, or ::std::wstring object, each NUL -// character in it is replaced with "\\0". -// Declared here but defined in gtest.h, so that it has access -// to the definition of the Message class, required by the ARM -// compiler. -template -String StreamableToString(const T& streamable); - -// The Symbian compiler has a bug that prevents it from selecting the -// correct overload of FormatForComparisonFailureMessage (see below) -// unless we pass the first argument by reference. If we do that, -// however, Visual Age C++ 10.1 generates a compiler error. Therefore -// we only apply the work-around for Symbian. -#if defined(__SYMBIAN32__) -# define GTEST_CREF_WORKAROUND_ const& -#else -# define GTEST_CREF_WORKAROUND_ -#endif - -// When this operand is a const char* or char*, if the other operand -// is a ::std::string or ::string, we print this operand as a C string -// rather than a pointer (we do the same for wide strings); otherwise -// we print it as a pointer to be safe. - -// This internal macro is used to avoid duplicated code. -#define GTEST_FORMAT_IMPL_(operand2_type, operand1_printer)\ -inline String FormatForComparisonFailureMessage(\ - operand2_type::value_type* GTEST_CREF_WORKAROUND_ str, \ - const operand2_type& /*operand2*/) {\ - return operand1_printer(str);\ -}\ -inline String FormatForComparisonFailureMessage(\ - const operand2_type::value_type* GTEST_CREF_WORKAROUND_ str, \ - const operand2_type& /*operand2*/) {\ - return operand1_printer(str);\ -} - -GTEST_FORMAT_IMPL_(::std::string, String::ShowCStringQuoted) -#if GTEST_HAS_STD_WSTRING -GTEST_FORMAT_IMPL_(::std::wstring, String::ShowWideCStringQuoted) -#endif // GTEST_HAS_STD_WSTRING - -#if GTEST_HAS_GLOBAL_STRING -GTEST_FORMAT_IMPL_(::string, String::ShowCStringQuoted) -#endif // GTEST_HAS_GLOBAL_STRING -#if GTEST_HAS_GLOBAL_WSTRING -GTEST_FORMAT_IMPL_(::wstring, String::ShowWideCStringQuoted) -#endif // GTEST_HAS_GLOBAL_WSTRING - -#undef GTEST_FORMAT_IMPL_ - -// The next four overloads handle the case where the operand being -// printed is a char/wchar_t pointer and the other operand is not a -// string/wstring object. In such cases, we just print the operand as -// a pointer to be safe. -#define GTEST_FORMAT_CHAR_PTR_IMPL_(CharType) \ - template \ - String FormatForComparisonFailureMessage(CharType* GTEST_CREF_WORKAROUND_ p, \ - const T&) { \ - return PrintToString(static_cast(p)); \ - } - -GTEST_FORMAT_CHAR_PTR_IMPL_(char) -GTEST_FORMAT_CHAR_PTR_IMPL_(const char) -GTEST_FORMAT_CHAR_PTR_IMPL_(wchar_t) -GTEST_FORMAT_CHAR_PTR_IMPL_(const wchar_t) - -#undef GTEST_FORMAT_CHAR_PTR_IMPL_ - // Constructs and returns the message for an equality assertion // (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. // @@ -273,12 +188,12 @@ GTEST_FORMAT_CHAR_PTR_IMPL_(const wchar_t) // be inserted into the message. GTEST_API_ AssertionResult EqFailure(const char* expected_expression, const char* actual_expression, - const String& expected_value, - const String& actual_value, + const std::string& expected_value, + const std::string& actual_value, bool ignoring_case); // Constructs a failure message for Boolean assertions such as EXPECT_TRUE. -GTEST_API_ String GetBoolAssertionFailureMessage( +GTEST_API_ std::string GetBoolAssertionFailureMessage( const AssertionResult& assertion_result, const char* expression_text, const char* actual_predicate_value, @@ -353,7 +268,7 @@ class FloatingPoint { // bits. Therefore, 4 should be enough for ordinary use. // // See the following article for more details on ULP: - // http://www.cygnus-software.com/papers/comparingfloats/comparingfloats.htm. + // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ static const size_t kMaxUlps = 4; // Constructs a FloatingPoint from a raw floating-point number. @@ -380,6 +295,9 @@ class FloatingPoint { return ReinterpretBits(kExponentBitMask); } + // Returns the maximum representable finite floating-point number. + static RawType Max(); + // Non-static methods // Returns the bits that represents this number. @@ -460,6 +378,13 @@ class FloatingPoint { FloatingPointUnion u_; }; +// We cannot use std::numeric_limits::max() as it clashes with the max() +// macro defined by . +template <> +inline float FloatingPoint::Max() { return FLT_MAX; } +template <> +inline double FloatingPoint::Max() { return DBL_MAX; } + // Typedefs the instances of the FloatingPoint template class that we // care to use. typedef FloatingPoint Float; @@ -526,7 +451,7 @@ class TestFactoryBase { template class TestFactoryImpl : public TestFactoryBase { public: - virtual Test* CreateTest() { return new TestClass; } + virtual Test* CreateTest() override { return new TestClass; } }; #if GTEST_OS_WINDOWS @@ -554,7 +479,7 @@ typedef void (*TearDownTestCaseFunc)(); // test_case_name: name of the test case // name: name of the test // type_param the name of the test's type parameter, or NULL if -// this is not a typed or a type-parameterized test. +// this is not a typed or a type-parameterized test. // value_param text representation of the test's value parameter, // or NULL if this is not a type-parameterized test. // fixture_class_id: ID of the test fixture class @@ -564,7 +489,8 @@ typedef void (*TearDownTestCaseFunc)(); // The newly created TestInfo instance will assume // ownership of the factory object. GTEST_API_ TestInfo* MakeAndRegisterTestInfo( - const char* test_case_name, const char* name, + const char* test_case_name, + const char* name, const char* type_param, const char* value_param, TypeId fixture_class_id, @@ -624,9 +550,9 @@ inline const char* SkipComma(const char* str) { // Returns the prefix of 'str' before the first comma in it; returns // the entire string if it contains no comma. -inline String GetPrefixUntilComma(const char* str) { +inline std::string GetPrefixUntilComma(const char* str) { const char* comma = strchr(str, ','); - return comma == NULL ? String(str) : String(str, comma - str); + return comma == NULL ? str : std::string(str, comma); } // TypeParameterizedTest::Register() @@ -652,8 +578,8 @@ class TypeParameterizedTest { // First, registers the first type-parameterized test in the type // list. MakeAndRegisterTestInfo( - String::Format("%s%s%s/%d", prefix, prefix[0] == '\0' ? "" : "/", - case_name, index).c_str(), + (std::string(prefix) + (prefix[0] == '\0' ? "" : "/") + case_name + "/" + + StreamableToString(index)).c_str(), GetPrefixUntilComma(test_names).c_str(), GetTypeName().c_str(), NULL, // No value parameter. @@ -711,7 +637,7 @@ class TypeParameterizedTestCase { #endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P -// Returns the current OS stack trace as a String. +// Returns the current OS stack trace as an std::string. // // The maximum number of stack frames to be included is specified by // the gtest_stack_trace_depth flag. The skip_count parameter @@ -721,8 +647,8 @@ class TypeParameterizedTestCase { // For example, if Foo() calls Bar(), which in turn calls // GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in // the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. -GTEST_API_ String GetCurrentOsStackTraceExceptTop(UnitTest* unit_test, - int skip_count); +GTEST_API_ std::string GetCurrentOsStackTraceExceptTop( + UnitTest* unit_test, int skip_count); // Helpers for suppressing warnings on unreachable code or constant // condition. @@ -797,13 +723,19 @@ struct RemoveConst { typedef T type; }; // NOLINT // MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above // definition to fail to remove the const in 'const int[3]' and 'const // char[3][4]'. The following specialization works around the bug. -// However, it causes trouble with GCC and thus needs to be -// conditionally compiled. -#if defined(_MSC_VER) || defined(__SUNPRO_CC) || defined(__IBMCPP__) template struct RemoveConst { typedef typename RemoveConst::type type[N]; }; + +#if defined(_MSC_VER) && _MSC_VER < 1400 +// This is the only specialization that allows VC++ 7.1 to remove const in +// 'const int[3] and 'const int[3][4]'. However, it causes trouble with GCC +// and thus needs to be conditionally compiled. +template +struct RemoveConst { + typedef typename RemoveConst::type type[N]; +}; #endif // A handy wrapper around RemoveConst that works when the argument @@ -1206,7 +1138,7 @@ class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\ public:\ GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\ private:\ - virtual void TestBody();\ + virtual void TestBody() override;\ static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\ GTEST_DISALLOW_COPY_AND_ASSIGN_(\ GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\ diff --git a/tests/gtest/include/gtest/internal/gtest-linked_ptr.h b/external/gtest/include/gtest/internal/gtest-linked_ptr.h old mode 100644 new mode 100755 similarity index 98% rename from tests/gtest/include/gtest/internal/gtest-linked_ptr.h rename to external/gtest/include/gtest/internal/gtest-linked_ptr.h index 57147b4e8b..b1362cd002 --- a/tests/gtest/include/gtest/internal/gtest-linked_ptr.h +++ b/external/gtest/include/gtest/internal/gtest-linked_ptr.h @@ -105,8 +105,8 @@ class linked_ptr_internal { // framework. // Join an existing circle. - // L < g_linked_ptr_mutex - void join(linked_ptr_internal const* ptr) { + void join(linked_ptr_internal const* ptr) + GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) { MutexLock lock(&g_linked_ptr_mutex); linked_ptr_internal const* p = ptr; @@ -117,8 +117,8 @@ class linked_ptr_internal { // Leave whatever circle we're part of. Returns true if we were the // last member of the circle. Once this is done, you can join() another. - // L < g_linked_ptr_mutex - bool depart() { + bool depart() + GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) { MutexLock lock(&g_linked_ptr_mutex); if (next_ == this) return true; diff --git a/tests/gtest/include/gtest/internal/gtest-param-util-generated.h b/external/gtest/include/gtest/internal/gtest-param-util-generated.h old mode 100644 new mode 100755 similarity index 83% rename from tests/gtest/include/gtest/internal/gtest-param-util-generated.h rename to external/gtest/include/gtest/internal/gtest-param-util-generated.h index 258267500e..e80548592c --- a/tests/gtest/include/gtest/internal/gtest-param-util-generated.h +++ b/external/gtest/include/gtest/internal/gtest-param-util-generated.h @@ -95,7 +95,7 @@ class ValueArray2 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_}; + const T array[] = {static_cast(v1_), static_cast(v2_)}; return ValuesIn(array); } @@ -114,7 +114,8 @@ class ValueArray3 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_)}; return ValuesIn(array); } @@ -135,7 +136,8 @@ class ValueArray4 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_)}; return ValuesIn(array); } @@ -157,7 +159,8 @@ class ValueArray5 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_)}; return ValuesIn(array); } @@ -181,7 +184,9 @@ class ValueArray6 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_)}; return ValuesIn(array); } @@ -206,7 +211,9 @@ class ValueArray7 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_)}; return ValuesIn(array); } @@ -233,7 +240,9 @@ class ValueArray8 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_)}; return ValuesIn(array); } @@ -261,7 +270,10 @@ class ValueArray9 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_)}; return ValuesIn(array); } @@ -290,7 +302,10 @@ class ValueArray10 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_)}; return ValuesIn(array); } @@ -321,7 +336,10 @@ class ValueArray11 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_)}; return ValuesIn(array); } @@ -353,8 +371,11 @@ class ValueArray12 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_)}; return ValuesIn(array); } @@ -388,8 +409,11 @@ class ValueArray13 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_)}; return ValuesIn(array); } @@ -424,8 +448,11 @@ class ValueArray14 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_)}; return ValuesIn(array); } @@ -461,8 +488,12 @@ class ValueArray15 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_)}; return ValuesIn(array); } @@ -501,8 +532,12 @@ class ValueArray16 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_)}; return ValuesIn(array); } @@ -542,8 +577,12 @@ class ValueArray17 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_)}; return ValuesIn(array); } @@ -584,8 +623,13 @@ class ValueArray18 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_)}; return ValuesIn(array); } @@ -627,8 +671,13 @@ class ValueArray19 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_)}; return ValuesIn(array); } @@ -672,8 +721,13 @@ class ValueArray20 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_)}; return ValuesIn(array); } @@ -719,8 +773,14 @@ class ValueArray21 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_)}; return ValuesIn(array); } @@ -767,8 +827,14 @@ class ValueArray22 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_)}; return ValuesIn(array); } @@ -817,9 +883,14 @@ class ValueArray23 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, - v23_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_)}; return ValuesIn(array); } @@ -869,9 +940,15 @@ class ValueArray24 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_)}; return ValuesIn(array); } @@ -922,9 +999,15 @@ class ValueArray25 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_)}; return ValuesIn(array); } @@ -977,9 +1060,15 @@ class ValueArray26 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_)}; return ValuesIn(array); } @@ -1034,9 +1123,16 @@ class ValueArray27 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_)}; return ValuesIn(array); } @@ -1092,9 +1188,16 @@ class ValueArray28 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_)}; return ValuesIn(array); } @@ -1151,9 +1254,16 @@ class ValueArray29 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_)}; return ValuesIn(array); } @@ -1212,9 +1322,17 @@ class ValueArray30 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_)}; return ValuesIn(array); } @@ -1275,9 +1393,17 @@ class ValueArray31 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_)}; return ValuesIn(array); } @@ -1339,9 +1465,17 @@ class ValueArray32 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_)}; return ValuesIn(array); } @@ -1405,9 +1539,18 @@ class ValueArray33 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_)}; return ValuesIn(array); } @@ -1472,9 +1615,18 @@ class ValueArray34 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_)}; return ValuesIn(array); } @@ -1540,10 +1692,18 @@ class ValueArray35 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, - v35_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_)}; return ValuesIn(array); } @@ -1611,10 +1771,19 @@ class ValueArray36 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_)}; return ValuesIn(array); } @@ -1684,10 +1853,19 @@ class ValueArray37 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_)}; return ValuesIn(array); } @@ -1758,10 +1936,19 @@ class ValueArray38 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_)}; return ValuesIn(array); } @@ -1833,10 +2020,20 @@ class ValueArray39 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_)}; return ValuesIn(array); } @@ -1910,10 +2107,20 @@ class ValueArray40 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_)}; return ValuesIn(array); } @@ -1989,10 +2196,20 @@ class ValueArray41 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_, v41_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_)}; return ValuesIn(array); } @@ -2069,10 +2286,21 @@ class ValueArray42 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_, v41_, v42_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_)}; return ValuesIn(array); } @@ -2150,10 +2378,21 @@ class ValueArray43 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_)}; return ValuesIn(array); } @@ -2233,10 +2472,21 @@ class ValueArray44 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_)}; return ValuesIn(array); } @@ -2317,10 +2567,22 @@ class ValueArray45 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_)}; return ValuesIn(array); } @@ -2403,10 +2665,22 @@ class ValueArray46 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_)}; return ValuesIn(array); } @@ -2491,11 +2765,22 @@ class ValueArray47 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, - v47_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_)}; return ValuesIn(array); } @@ -2581,11 +2866,23 @@ class ValueArray48 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_, - v48_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_)}; return ValuesIn(array); } @@ -2672,11 +2969,23 @@ class ValueArray49 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_, - v48_, v49_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_), static_cast(v49_)}; return ValuesIn(array); } @@ -2764,11 +3073,23 @@ class ValueArray50 { template operator ParamGenerator() const { - const T array[] = {v1_, v2_, v3_, v4_, v5_, v6_, v7_, v8_, v9_, v10_, v11_, - v12_, v13_, v14_, v15_, v16_, v17_, v18_, v19_, v20_, v21_, v22_, v23_, - v24_, v25_, v26_, v27_, v28_, v29_, v30_, v31_, v32_, v33_, v34_, v35_, - v36_, v37_, v38_, v39_, v40_, v41_, v42_, v43_, v44_, v45_, v46_, v47_, - v48_, v49_, v50_}; + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_), static_cast(v49_), static_cast(v50_)}; return ValuesIn(array); } diff --git a/tests/gtest/include/gtest/internal/gtest-param-util-generated.h.pump b/external/gtest/include/gtest/internal/gtest-param-util-generated.h.pump old mode 100644 new mode 100755 similarity index 99% rename from tests/gtest/include/gtest/internal/gtest-param-util-generated.h.pump rename to external/gtest/include/gtest/internal/gtest-param-util-generated.h.pump index dbe9386305..009206fd31 --- a/tests/gtest/include/gtest/internal/gtest-param-util-generated.h.pump +++ b/external/gtest/include/gtest/internal/gtest-param-util-generated.h.pump @@ -98,7 +98,7 @@ class ValueArray$i { template operator ParamGenerator() const { - const T array[] = {$for j, [[v$(j)_]]}; + const T array[] = {$for j, [[static_cast(v$(j)_)]]}; return ValuesIn(array); } diff --git a/tests/gtest/include/gtest/internal/gtest-param-util.h b/external/gtest/include/gtest/internal/gtest-param-util.h old mode 100644 new mode 100755 similarity index 96% rename from tests/gtest/include/gtest/internal/gtest-param-util.h rename to external/gtest/include/gtest/internal/gtest-param-util.h index 0ef9718cf4..86ac88f73f --- a/tests/gtest/include/gtest/internal/gtest-param-util.h +++ b/external/gtest/include/gtest/internal/gtest-param-util.h @@ -270,12 +270,12 @@ class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface { template ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end) : container_(begin, end) {} - virtual ~ValuesInIteratorRangeGenerator() {} + virtual ~ValuesInIteratorRangeGenerator() override {} - virtual ParamIteratorInterface* Begin() const { + virtual ParamIteratorInterface* Begin() const override { return new Iterator(this, container_.begin()); } - virtual ParamIteratorInterface* End() const { + virtual ParamIteratorInterface* End() const override { return new Iterator(this, container_.end()); } @@ -287,16 +287,16 @@ class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface { Iterator(const ParamGeneratorInterface* base, typename ContainerType::const_iterator iterator) : base_(base), iterator_(iterator) {} - virtual ~Iterator() {} + virtual ~Iterator() override {} - virtual const ParamGeneratorInterface* BaseGenerator() const { + virtual const ParamGeneratorInterface* BaseGenerator() const override { return base_; } - virtual void Advance() { + virtual void Advance() override { ++iterator_; value_.reset(); } - virtual ParamIteratorInterface* Clone() const { + virtual ParamIteratorInterface* Clone() const override { return new Iterator(*this); } // We need to use cached value referenced by iterator_ because *iterator_ @@ -306,12 +306,12 @@ class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface { // can advance iterator_ beyond the end of the range, and we cannot // detect that fact. The client code, on the other hand, is // responsible for not calling Current() on an out-of-range iterator. - virtual const T* Current() const { + virtual const T* Current() const override { if (value_.get() == NULL) value_.reset(new T(*iterator_)); return value_.get(); } - virtual bool Equals(const ParamIteratorInterface& other) const { + virtual bool Equals(const ParamIteratorInterface& other) const override { // Having the same base generator guarantees that the other // iterator is of the same type and we can downcast. GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) @@ -355,7 +355,7 @@ class ParameterizedTestFactory : public TestFactoryBase { typedef typename TestClass::ParamType ParamType; explicit ParameterizedTestFactory(ParamType parameter) : parameter_(parameter) {} - virtual Test* CreateTest() { + virtual Test* CreateTest() override { TestClass::SetParam(¶meter_); return new TestClass(); } @@ -454,9 +454,9 @@ class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase { : test_case_name_(name) {} // Test case base name for display purposes. - virtual const string& GetTestCaseName() const { return test_case_name_; } + virtual const string& GetTestCaseName() const override { return test_case_name_; } // Test case id to verify identity. - virtual TypeId GetTestCaseTypeId() const { return GetTypeId(); } + virtual TypeId GetTestCaseTypeId() const override { return GetTypeId(); } // TEST_P macro uses AddTestPattern() to record information // about a single test in a LocalTestInfo structure. // test_case_name is the base name of the test case (without invocation @@ -484,7 +484,7 @@ class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase { // This method should not be called more then once on any single // instance of a ParameterizedTestCaseInfoBase derived class. // UnitTest has a guard to prevent from calling this method more then once. - virtual void RegisterTests() { + virtual void RegisterTests() override { for (typename TestInfoContainer::iterator test_it = tests_.begin(); test_it != tests_.end(); ++test_it) { linked_ptr test_info = *test_it; @@ -494,10 +494,10 @@ class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase { const string& instantiation_name = gen_it->first; ParamGenerator generator((*gen_it->second)()); - Message test_case_name_stream; + string test_case_name; if ( !instantiation_name.empty() ) - test_case_name_stream << instantiation_name << "/"; - test_case_name_stream << test_info->test_case_base_name; + test_case_name = instantiation_name + "/"; + test_case_name += test_info->test_case_base_name; int i = 0; for (typename ParamGenerator::iterator param_it = @@ -506,7 +506,7 @@ class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase { Message test_name_stream; test_name_stream << test_info->test_base_name << "/" << i; MakeAndRegisterTestInfo( - test_case_name_stream.GetString().c_str(), + test_case_name.c_str(), test_name_stream.GetString().c_str(), NULL, // No type parameter. PrintToString(*param_it).c_str(), diff --git a/tests/gtest/include/gtest/internal/gtest-port.h b/external/gtest/include/gtest/internal/gtest-port.h old mode 100644 new mode 100755 similarity index 87% rename from tests/gtest/include/gtest/internal/gtest-port.h rename to external/gtest/include/gtest/internal/gtest-port.h index 157b47f86a..2c87972d98 --- a/tests/gtest/include/gtest/internal/gtest-port.h +++ b/external/gtest/include/gtest/internal/gtest-port.h @@ -32,6 +32,10 @@ // Low-level types and utilities for porting Google Test to various // platforms. They are subject to change without notice. DO NOT USE // THEM IN USER CODE. +// +// This file is fundamental to Google Test. All other Google Test source +// files are expected to #include this. Therefore, it cannot #include +// any other Google Test header. #ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ #define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ @@ -72,6 +76,8 @@ // Test's own tr1 tuple implementation should be // used. Unused when the user sets // GTEST_HAS_TR1_TUPLE to 0. +// GTEST_LANG_CXX11 - Define it to 1/0 to indicate that Google Test +// is building in C++11/C++98 mode. // GTEST_LINKED_AS_SHARED_LIBRARY // - Define to 1 when compiling tests that use // Google Test as a shared library (known as @@ -90,7 +96,11 @@ // GTEST_OS_LINUX - Linux // GTEST_OS_LINUX_ANDROID - Google Android // GTEST_OS_MAC - Mac OS X +// GTEST_OS_IOS - iOS +// GTEST_OS_IOS_SIMULATOR - iOS simulator // GTEST_OS_NACL - Google Native Client (NaCl) +// GTEST_OS_OPENBSD - OpenBSD +// GTEST_OS_QNX - QNX // GTEST_OS_SOLARIS - Sun Solaris // GTEST_OS_SYMBIAN - Symbian // GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile) @@ -175,7 +185,7 @@ // GTEST_FLAG() - references a flag. // GTEST_DECLARE_*() - declares a flag. // GTEST_DEFINE_*() - defines a flag. -// GetArgvs() - returns the command line as a vector of strings. +// GetInjectableArgvs() - returns the command line as a vector of strings. // // Environment variable utilities: // GetEnv() - gets the value of an environment variable. @@ -193,6 +203,11 @@ # include #endif // !_WIN32_WCE +#if defined __APPLE__ +# include +# include +#endif + #include // NOLINT #include // NOLINT #include // NOLINT @@ -227,11 +242,17 @@ # endif // _WIN32_WCE #elif defined __APPLE__ # define GTEST_OS_MAC 1 +# if TARGET_OS_IPHONE +# define GTEST_OS_IOS 1 +# if TARGET_IPHONE_SIMULATOR +# define GTEST_OS_IOS_SIMULATOR 1 +# endif +# endif #elif defined __linux__ # define GTEST_OS_LINUX 1 -# ifdef ANDROID +# if defined __ANDROID__ # define GTEST_OS_LINUX_ANDROID 1 -# endif // ANDROID +# endif #elif defined __MVS__ # define GTEST_OS_ZOS 1 #elif defined(__sun) && defined(__SVR4) @@ -242,8 +263,25 @@ # define GTEST_OS_HPUX 1 #elif defined __native_client__ # define GTEST_OS_NACL 1 +#elif defined __OpenBSD__ +# define GTEST_OS_OPENBSD 1 +#elif defined __QNX__ +# define GTEST_OS_QNX 1 #endif // __CYGWIN__ +#ifndef GTEST_LANG_CXX11 +// gcc and clang define __GXX_EXPERIMENTAL_CXX0X__ when +// -std={c,gnu}++{0x,11} is passed. The C++11 standard specifies a +// value for __cplusplus, and recent versions of clang, gcc, and +// probably other compilers set that too in C++11 mode. +# if __GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L +// Compiling in at least C++11 mode. +# define GTEST_LANG_CXX11 1 +# else +# define GTEST_LANG_CXX11 0 +# endif +#endif + // Brings in definitions for functions used in the testing::internal::posix // namespace (read, write, close, chdir, isatty, stat). We do not currently // use them on Windows Mobile. @@ -252,20 +290,25 @@ // is not the case, we need to include headers that provide the functions // mentioned above. # include -# if !GTEST_OS_NACL -// TODO(vladl@google.com): Remove this condition when Native Client SDK adds -// strings.h (tracked in -// http://code.google.com/p/nativeclient/issues/detail?id=1175). -# include // Native Client doesn't provide strings.h. -# endif +# include #elif !GTEST_OS_WINDOWS_MOBILE # include # include #endif +#if GTEST_OS_LINUX_ANDROID +// Used to define __ANDROID_API__ matching the target NDK API level. +# include // NOLINT +#endif + // Defines this to true iff Google Test can use POSIX regular expressions. #ifndef GTEST_HAS_POSIX_RE -# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS) +# if GTEST_OS_LINUX_ANDROID +// On Android, is only available starting with Gingerbread. +# define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9) +# else +# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS) +# endif #endif #if GTEST_HAS_POSIX_RE @@ -380,11 +423,27 @@ # elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302) # ifdef __GXX_RTTI -# define GTEST_HAS_RTTI 1 +// When building against STLport with the Android NDK and with +// -frtti -fno-exceptions, the build fails at link time with undefined +// references to __cxa_bad_typeid. Note sure if STL or toolchain bug, +// so disable RTTI when detected. +# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \ + !defined(__EXCEPTIONS) +# define GTEST_HAS_RTTI 0 +# else +# define GTEST_HAS_RTTI 1 +# endif // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS # else # define GTEST_HAS_RTTI 0 # endif // __GXX_RTTI +// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends +// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the +// first version with C++ support. +# elif defined(__clang__) + +# define GTEST_HAS_RTTI __has_feature(cxx_rtti) + // Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if // both the typeid and dynamic_cast features are present. # elif defined(__IBMCPP__) && (__IBMCPP__ >= 900) @@ -417,7 +476,8 @@ // // To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0 // to your compiler flags. -# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX) +# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX \ + || GTEST_OS_QNX) #endif // GTEST_HAS_PTHREAD #if GTEST_HAS_PTHREAD @@ -433,8 +493,13 @@ // this macro to 0 to prevent Google Test from using tuple (any // feature depending on tuple with be disabled in this mode). #ifndef GTEST_HAS_TR1_TUPLE +# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) +// STLport, provided with the Android NDK, has neither or . +# define GTEST_HAS_TR1_TUPLE 0 +# else // The user didn't tell us not to do it, so we assume it's OK. -# define GTEST_HAS_TR1_TUPLE 1 +# define GTEST_HAS_TR1_TUPLE 1 +# endif #endif // GTEST_HAS_TR1_TUPLE // Determines whether Google Test's own tr1 tuple implementation @@ -443,14 +508,28 @@ // The user didn't tell us, so we need to figure it out. // We use our own TR1 tuple if we aren't sure the user has an -// implementation of it already. At this time, GCC 4.0.0+ and MSVC -// 2010 are the only mainstream compilers that come with a TR1 tuple -// implementation. NVIDIA's CUDA NVCC compiler pretends to be GCC by -// defining __GNUC__ and friends, but cannot compile GCC's tuple -// implementation. MSVC 2008 (9.0) provides TR1 tuple in a 323 MB -// Feature Pack download, which we cannot assume the user has. -# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000)) \ - || _MSC_VER >= 1600 +// implementation of it already. At this time, libstdc++ 4.0.0+ and +// MSVC 2010 are the only mainstream standard libraries that come +// with a TR1 tuple implementation. NVIDIA's CUDA NVCC compiler +// pretends to be GCC by defining __GNUC__ and friends, but cannot +// compile GCC's tuple implementation. MSVC 2008 (9.0) provides TR1 +// tuple in a 323 MB Feature Pack download, which we cannot assume the +// user has. QNX's QCC compiler is a modified GCC but it doesn't +// support TR1 tuple. libc++ only provides std::tuple, in C++11 mode, +// and it can be used with some compilers that define __GNUC__. +# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000) \ + && !GTEST_OS_QNX && !defined(_LIBCPP_VERSION)) || _MSC_VER >= 1600 +# define GTEST_ENV_HAS_TR1_TUPLE_ 1 +# endif + +// C++11 specifies that provides std::tuple. Use that if gtest is used +// in C++11 mode and libstdc++ isn't very old (binaries targeting OS X 10.6 +// can build with clang but need to use gcc4.2's libstdc++). +# if GTEST_LANG_CXX11 && (!defined(__GLIBCXX__) || __GLIBCXX__ > 20110325) +# define GTEST_ENV_HAS_STD_TUPLE_ 1 +# endif + +# if GTEST_ENV_HAS_TR1_TUPLE_ || GTEST_ENV_HAS_STD_TUPLE_ # define GTEST_USE_OWN_TR1_TUPLE 0 # else # define GTEST_USE_OWN_TR1_TUPLE 1 @@ -465,6 +544,22 @@ # if GTEST_USE_OWN_TR1_TUPLE # include "gtest/internal/gtest-tuple.h" +# elif GTEST_ENV_HAS_STD_TUPLE_ +# include +// C++11 puts its tuple into the ::std namespace rather than +// ::std::tr1. gtest expects tuple to live in ::std::tr1, so put it there. +// This causes undefined behavior, but supported compilers react in +// the way we intend. +namespace std { +namespace tr1 { +using ::std::get; +using ::std::make_tuple; +using ::std::tuple; +using ::std::tuple_element; +using ::std::tuple_size; +} +} + # elif GTEST_OS_SYMBIAN // On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to @@ -515,7 +610,16 @@ // The user didn't tell us, so we need to figure it out. # if GTEST_OS_LINUX && !defined(__ia64__) -# define GTEST_HAS_CLONE 1 +# if GTEST_OS_LINUX_ANDROID +// On Android, clone() is only available on ARM starting with Gingerbread. +# if defined(__arm__) && __ANDROID_API__ >= 9 +# define GTEST_HAS_CLONE 1 +# else +# define GTEST_HAS_CLONE 0 +# endif +# else +# define GTEST_HAS_CLONE 1 +# endif # else # define GTEST_HAS_CLONE 0 # endif // GTEST_OS_LINUX && !defined(__ia64__) @@ -538,9 +642,11 @@ // Google Test does not support death tests for VC 7.1 and earlier as // abort() in a VC 7.1 application compiled as GUI in debug config // pops up a dialog window that cannot be suppressed programmatically. -#if (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \ +#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \ + (GTEST_OS_MAC && !GTEST_OS_IOS) || GTEST_OS_IOS_SIMULATOR || \ (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \ - GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX) + GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \ + GTEST_OS_OPENBSD || GTEST_OS_QNX) # define GTEST_HAS_DEATH_TEST 1 # include // NOLINT #endif @@ -669,13 +775,23 @@ # define GTEST_NO_INLINE_ #endif +// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project. +#if defined(__GLIBCXX__) || defined(_LIBCPP_VERSION) +# define GTEST_HAS_CXXABI_H_ 1 +#else +# define GTEST_HAS_CXXABI_H_ 0 +#endif + namespace testing { class Message; namespace internal { -class String; +// A secret type that Google Test users don't know about. It has no +// definition on purpose. Therefore it's impossible to create a +// Secret object, which is what we want. +class Secret; // The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time // expression is true. For example, you could use it to verify the @@ -697,8 +813,8 @@ struct CompileAssert { }; #define GTEST_COMPILE_ASSERT_(expr, msg) \ - typedef ::testing::internal::CompileAssert<(bool(expr))> \ - msg[bool(expr) ? 1 : -1] + typedef ::testing::internal::CompileAssert<(static_cast(expr))> \ + msg[static_cast(expr) ? 1 : -1] GTEST_ATTRIBUTE_UNUSED_ // Implementation details of GTEST_COMPILE_ASSERT_: // @@ -796,6 +912,7 @@ class scoped_ptr { ptr_ = p; } } + private: T* ptr_; @@ -858,10 +975,9 @@ class GTEST_API_ RE { private: void Init(const char* regex); - // We use a const char* instead of a string, as Google Test may be used - // where string is not available. We also do not use Google Test's own - // String type here, in order to simplify dependencies between the - // files. + // We use a const char* instead of an std::string, as Google Test used to be + // used where std::string is not available. TODO(wan@google.com): change to + // std::string. const char* pattern_; bool is_valid_; @@ -1044,20 +1160,21 @@ Derived* CheckedDowncastToActualType(Base* base) { // GetCapturedStderr - stops capturing stderr and returns the captured string. // GTEST_API_ void CaptureStdout(); -GTEST_API_ String GetCapturedStdout(); +GTEST_API_ std::string GetCapturedStdout(); GTEST_API_ void CaptureStderr(); -GTEST_API_ String GetCapturedStderr(); +GTEST_API_ std::string GetCapturedStderr(); #endif // GTEST_HAS_STREAM_REDIRECTION #if GTEST_HAS_DEATH_TEST -// A copy of all command line arguments. Set by InitGoogleTest(). -extern ::std::vector g_argvs; +const ::std::vector& GetInjectableArgvs(); +void SetInjectableArgvs(const ::std::vector* + new_argvs); -// GTEST_HAS_DEATH_TEST implies we have ::std::string. -const ::std::vector& GetArgvs(); +// A copy of all command line arguments. Set by InitGoogleTest(). +extern ::std::vector g_argvs; #endif // GTEST_HAS_DEATH_TEST @@ -1084,22 +1201,37 @@ inline void SleepMilliseconds(int n) { // use it in user tests, either directly or indirectly. class Notification { public: - Notification() : notified_(false) {} + Notification() : notified_(false) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL)); + } + ~Notification() { + pthread_mutex_destroy(&mutex_); + } // Notifies all threads created with this notification to start. Must // be called from the controller thread. - void Notify() { notified_ = true; } + void Notify() { + pthread_mutex_lock(&mutex_); + notified_ = true; + pthread_mutex_unlock(&mutex_); + } // Blocks until the controller thread notifies. Must be called from a test // thread. void WaitForNotification() { - while(!notified_) { + for (;;) { + pthread_mutex_lock(&mutex_); + const bool notified = notified_; + pthread_mutex_unlock(&mutex_); + if (notified) + break; SleepMilliseconds(10); } } private: - volatile bool notified_; + pthread_mutex_t mutex_; + bool notified_; GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); }; @@ -1164,7 +1296,7 @@ class ThreadWithParam : public ThreadWithParamBase { } } - virtual void Run() { + virtual void Run() override { if (thread_can_start_ != NULL) thread_can_start_->WaitForNotification(); func_(param_); @@ -1207,21 +1339,23 @@ class MutexBase { void Lock() { GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_)); owner_ = pthread_self(); + has_owner_ = true; } // Releases this mutex. void Unlock() { - // We don't protect writing to owner_ here, as it's the caller's - // responsibility to ensure that the current thread holds the + // Since the lock is being released the owner_ field should no longer be + // considered valid. We don't protect writing to has_owner_ here, as it's + // the caller's responsibility to ensure that the current thread holds the // mutex when this is called. - owner_ = 0; + has_owner_ = false; GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_)); } // Does nothing if the current thread holds the mutex. Otherwise, crashes // with high probability. void AssertHeld() const { - GTEST_CHECK_(owner_ == pthread_self()) + GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self())) << "The current thread is not holding the mutex @" << this; } @@ -1232,7 +1366,14 @@ class MutexBase { // have to be public. public: pthread_mutex_t mutex_; // The underlying pthread mutex. - pthread_t owner_; // The thread holding the mutex; 0 means no one holds it. + // has_owner_ indicates whether the owner_ field below contains a valid thread + // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All + // accesses to the owner_ field should be protected by a check of this field. + // An alternative might be to memset() owner_ to all zeros, but there's no + // guarantee that a zero'd pthread_t is necessarily invalid or even different + // from pthread_self(). + bool has_owner_; + pthread_t owner_; // The thread holding the mutex. }; // Forward-declares a static mutex. @@ -1240,8 +1381,13 @@ class MutexBase { extern ::testing::internal::MutexBase mutex // Defines and statically (i.e. at link time) initializes a static mutex. +// The initialization list here does not explicitly initialize each field, +// instead relying on default initialization for the unspecified fields. In +// particular, the owner_ field (a pthread_t) is not explicitly initialized. +// This allows initialization to work whether pthread_t is a scalar or struct. +// The flag -Wmissing-field-initializers must not be specified for this to work. # define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ - ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, 0 } + ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, false } // The Mutex class can only be used for mutexes created at runtime. It // shares its API with MutexBase otherwise. @@ -1249,7 +1395,7 @@ class Mutex : public MutexBase { public: Mutex() { GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL)); - owner_ = 0; + has_owner_ = false; } ~Mutex() { GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_)); @@ -1399,6 +1545,8 @@ class ThreadLocal { class Mutex { public: Mutex() {} + void Lock() {} + void Unlock() {} void AssertHeld() const {} }; @@ -1529,6 +1677,10 @@ inline bool IsUpper(char ch) { inline bool IsXDigit(char ch) { return isxdigit(static_cast(ch)) != 0; } +inline bool IsXDigit(wchar_t ch) { + const unsigned char low_byte = static_cast(ch); + return ch == low_byte && isxdigit(low_byte) != 0; +} inline char ToLower(char ch) { return static_cast(tolower(static_cast(ch))); @@ -1666,6 +1818,23 @@ inline void Abort() { abort(); } } // namespace posix +// MSVC "deprecates" snprintf and issues warnings wherever it is used. In +// order to avoid these warnings, we need to use _snprintf or _snprintf_s on +// MSVC-based platforms. We map the GTEST_SNPRINTF_ macro to the appropriate +// function in order to achieve that. We use macro definition here because +// snprintf is a variadic function. +#if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE +// MSVC 2005 and above support variadic macros. +# define GTEST_SNPRINTF_(buffer, size, format, ...) \ + _snprintf_s(buffer, size, size, format, __VA_ARGS__) +#elif defined(_MSC_VER) +// Windows CE does not define _snprintf_s and MSVC prior to 2005 doesn't +// complain about _snprintf. +# define GTEST_SNPRINTF_ _snprintf +#else +# define GTEST_SNPRINTF_ snprintf +#endif + // The maximum number a BiggestInt can represent. This definition // works no matter BiggestInt is represented in one's complement or // two's complement. @@ -1718,7 +1887,6 @@ class TypeWithSize<4> { template <> class TypeWithSize<8> { public: - #if GTEST_OS_WINDOWS typedef __int64 Int; typedef unsigned __int64 UInt; @@ -1745,7 +1913,7 @@ typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds. #define GTEST_DECLARE_int32_(name) \ GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name) #define GTEST_DECLARE_string_(name) \ - GTEST_API_ extern ::testing::internal::String GTEST_FLAG(name) + GTEST_API_ extern ::std::string GTEST_FLAG(name) // Macros for defining flags. #define GTEST_DEFINE_bool_(name, default_val, doc) \ @@ -1753,7 +1921,11 @@ typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds. #define GTEST_DEFINE_int32_(name, default_val, doc) \ GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val) #define GTEST_DEFINE_string_(name, default_val, doc) \ - GTEST_API_ ::testing::internal::String GTEST_FLAG(name) = (default_val) + GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val) + +// Thread annotations +#define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks) +#define GTEST_LOCK_EXCLUDED_(locks) // Parses 'str' for a 32-bit signed integer. If successful, writes the result // to *value and returns true; otherwise leaves *value unchanged and returns diff --git a/external/gtest/include/gtest/internal/gtest-string.h b/external/gtest/include/gtest/internal/gtest-string.h new file mode 100755 index 0000000000..97f1a7fdd2 --- /dev/null +++ b/external/gtest/include/gtest/internal/gtest-string.h @@ -0,0 +1,167 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares the String class and functions used internally by +// Google Test. They are subject to change without notice. They should not used +// by code external to Google Test. +// +// This header file is #included by . +// It should not be #included by other files. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ + +#ifdef __BORLANDC__ +// string.h is not guaranteed to provide strcpy on C++ Builder. +# include +#endif + +#include +#include + +#include "gtest/internal/gtest-port.h" + +namespace testing { +namespace internal { + +// String - an abstract class holding static string utilities. +class GTEST_API_ String { + public: + // Static utility methods + + // Clones a 0-terminated C string, allocating memory using new. The + // caller is responsible for deleting the return value using + // delete[]. Returns the cloned string, or NULL if the input is + // NULL. + // + // This is different from strdup() in string.h, which allocates + // memory using malloc(). + static const char* CloneCString(const char* c_str); + +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be + // able to pass strings to Win32 APIs on CE we need to convert them + // to 'Unicode', UTF-16. + + // Creates a UTF-16 wide string from the given ANSI string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the wide string, or NULL if the + // input is NULL. + // + // The wide string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static LPCWSTR AnsiToUtf16(const char* c_str); + + // Creates an ANSI string from the given wide string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the ANSI string, or NULL if the + // input is NULL. + // + // The returned string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static const char* Utf16ToAnsi(LPCWSTR utf16_str); +#endif + + // Compares two C strings. Returns true iff they have the same content. + // + // Unlike strcmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CStringEquals(const char* lhs, const char* rhs); + + // Converts a wide C string to a String using the UTF-8 encoding. + // NULL will be converted to "(null)". If an error occurred during + // the conversion, "(failed to convert from wide string)" is + // returned. + static std::string ShowWideCString(const wchar_t* wide_c_str); + + // Compares two wide C strings. Returns true iff they have the same + // content. + // + // Unlike wcscmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs); + + // Compares two C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike strcasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CaseInsensitiveCStringEquals(const char* lhs, + const char* rhs); + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. + static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs); + + // Returns true iff the given string ends with the given suffix, ignoring + // case. Any string is considered to end with an empty suffix. + static bool EndsWithCaseInsensitive( + const std::string& str, const std::string& suffix); + + // Formats an int value as "%02d". + static std::string FormatIntWidth2(int value); // "%02d" for width == 2 + + // Formats an int value as "%X". + static std::string FormatHexInt(int value); + + // Formats a byte as "%02X". + static std::string FormatByte(unsigned char value); + + private: + String(); // Not meant to be instantiated. +}; // class String + +// Gets the content of the stringstream's buffer as an std::string. Each '\0' +// character in the buffer is replaced with "\\0". +GTEST_API_ std::string StringStreamToString(::std::stringstream* stream); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ diff --git a/tests/gtest/include/gtest/internal/gtest-tuple.h b/external/gtest/include/gtest/internal/gtest-tuple.h old mode 100644 new mode 100755 similarity index 93% rename from tests/gtest/include/gtest/internal/gtest-tuple.h rename to external/gtest/include/gtest/internal/gtest-tuple.h index d1af50e188..7b3dfc312d --- a/tests/gtest/include/gtest/internal/gtest-tuple.h +++ b/external/gtest/include/gtest/internal/gtest-tuple.h @@ -1,4 +1,6 @@ -// This file was GENERATED by a script. DO NOT EDIT BY HAND!!! +// This file was GENERATED by command: +// pump.py gtest-tuple.h.pump +// DO NOT EDIT BY HAND!!! // Copyright 2009 Google Inc. // All Rights Reserved. @@ -140,34 +142,54 @@ template struct TupleElement; template -struct TupleElement { typedef T0 type; }; +struct TupleElement { + typedef T0 type; +}; template -struct TupleElement { typedef T1 type; }; +struct TupleElement { + typedef T1 type; +}; template -struct TupleElement { typedef T2 type; }; +struct TupleElement { + typedef T2 type; +}; template -struct TupleElement { typedef T3 type; }; +struct TupleElement { + typedef T3 type; +}; template -struct TupleElement { typedef T4 type; }; +struct TupleElement { + typedef T4 type; +}; template -struct TupleElement { typedef T5 type; }; +struct TupleElement { + typedef T5 type; +}; template -struct TupleElement { typedef T6 type; }; +struct TupleElement { + typedef T6 type; +}; template -struct TupleElement { typedef T7 type; }; +struct TupleElement { + typedef T7 type; +}; template -struct TupleElement { typedef T8 type; }; +struct TupleElement { + typedef T8 type; +}; template -struct TupleElement { typedef T9 type; }; +struct TupleElement { + typedef T9 type; +}; } // namespace gtest_internal @@ -708,37 +730,59 @@ inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, template struct tuple_size; template -struct tuple_size { static const int value = 0; }; +struct tuple_size { + static const int value = 0; +}; template -struct tuple_size { static const int value = 1; }; +struct tuple_size { + static const int value = 1; +}; template -struct tuple_size { static const int value = 2; }; +struct tuple_size { + static const int value = 2; +}; template -struct tuple_size { static const int value = 3; }; +struct tuple_size { + static const int value = 3; +}; template -struct tuple_size { static const int value = 4; }; +struct tuple_size { + static const int value = 4; +}; template -struct tuple_size { static const int value = 5; }; +struct tuple_size { + static const int value = 5; +}; template -struct tuple_size { static const int value = 6; }; +struct tuple_size { + static const int value = 6; +}; template -struct tuple_size { static const int value = 7; }; +struct tuple_size { + static const int value = 7; +}; template -struct tuple_size { static const int value = 8; }; +struct tuple_size { + static const int value = 8; +}; template -struct tuple_size { static const int value = 9; }; +struct tuple_size { + static const int value = 9; +}; template -struct tuple_size { static const int value = 10; }; +struct tuple_size { + static const int value = 10; +}; template struct tuple_element { @@ -922,8 +966,8 @@ template inline bool operator==(const GTEST_10_TUPLE_(T)& t, const GTEST_10_TUPLE_(U)& u) { return gtest_internal::SameSizeTuplePrefixComparator< - tuple_size::value, - tuple_size::value>::Eq(t, u); + tuple_size::value, + tuple_size::value>::Eq(t, u); } template diff --git a/tests/gtest/include/gtest/internal/gtest-tuple.h.pump b/external/gtest/include/gtest/internal/gtest-tuple.h.pump old mode 100644 new mode 100755 similarity index 97% rename from tests/gtest/include/gtest/internal/gtest-tuple.h.pump rename to external/gtest/include/gtest/internal/gtest-tuple.h.pump index ef519094a6..c7d9e039b1 --- a/tests/gtest/include/gtest/internal/gtest-tuple.h.pump +++ b/external/gtest/include/gtest/internal/gtest-tuple.h.pump @@ -118,8 +118,9 @@ struct TupleElement; $for i [[ template -struct TupleElement [[]] -{ typedef T$i type; }; +struct TupleElement { + typedef T$i type; +}; ]] @@ -220,7 +221,9 @@ template struct tuple_size; $for j [[ template -struct tuple_size { static const int value = $j; }; +struct tuple_size { + static const int value = $j; +}; ]] @@ -302,8 +305,8 @@ template inline bool operator==(const GTEST_$(n)_TUPLE_(T)& t, const GTEST_$(n)_TUPLE_(U)& u) { return gtest_internal::SameSizeTuplePrefixComparator< - tuple_size::value, - tuple_size::value>::Eq(t, u); + tuple_size::value, + tuple_size::value>::Eq(t, u); } template diff --git a/tests/gtest/include/gtest/internal/gtest-type-util.h b/external/gtest/include/gtest/internal/gtest-type-util.h old mode 100644 new mode 100755 similarity index 99% rename from tests/gtest/include/gtest/internal/gtest-type-util.h rename to external/gtest/include/gtest/internal/gtest-type-util.h index b7b01b0948..e46f7cfcb4 --- a/tests/gtest/include/gtest/internal/gtest-type-util.h +++ b/external/gtest/include/gtest/internal/gtest-type-util.h @@ -45,15 +45,14 @@ #define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ #include "gtest/internal/gtest-port.h" -#include "gtest/internal/gtest-string.h" // #ifdef __GNUC__ is too general here. It is possible to use gcc without using // libstdc++ (which is where cxxabi.h comes from). -# ifdef __GLIBCXX__ +# if GTEST_HAS_CXXABI_H_ # include # elif defined(__HP_aCC) # include -# endif // __GLIBCXX__ +# endif // GTEST_HASH_CXXABI_H_ namespace testing { namespace internal { @@ -62,24 +61,24 @@ namespace internal { // NB: This function is also used in Google Mock, so don't move it inside of // the typed-test-only section below. template -String GetTypeName() { +std::string GetTypeName() { # if GTEST_HAS_RTTI const char* const name = typeid(T).name(); -# if defined(__GLIBCXX__) || defined(__HP_aCC) +# if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC) int status = 0; // gcc's implementation of typeid(T).name() mangles the type name, // so we have to demangle it. -# ifdef __GLIBCXX__ +# if GTEST_HAS_CXXABI_H_ using abi::__cxa_demangle; -# endif // __GLIBCXX__ +# endif // GTEST_HAS_CXXABI_H_ char* const readable_name = __cxa_demangle(name, 0, 0, &status); - const String name_str(status == 0 ? readable_name : name); + const std::string name_str(status == 0 ? readable_name : name); free(readable_name); return name_str; # else return name; -# endif // __GLIBCXX__ || __HP_aCC +# endif // GTEST_HAS_CXXABI_H_ || __HP_aCC # else @@ -3300,7 +3299,9 @@ struct Templates -struct TypeList { typedef Types1 type; }; +struct TypeList { + typedef Types1 type; +}; template # elif defined(__HP_aCC) # include -# endif // __GLIBCXX__ +# endif // GTEST_HASH_CXXABI_H_ namespace testing { namespace internal { @@ -60,24 +59,24 @@ namespace internal { // NB: This function is also used in Google Mock, so don't move it inside of // the typed-test-only section below. template -String GetTypeName() { +std::string GetTypeName() { # if GTEST_HAS_RTTI const char* const name = typeid(T).name(); -# if defined(__GLIBCXX__) || defined(__HP_aCC) +# if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC) int status = 0; // gcc's implementation of typeid(T).name() mangles the type name, // so we have to demangle it. -# ifdef __GLIBCXX__ +# if GTEST_HAS_CXXABI_H_ using abi::__cxa_demangle; -# endif // __GLIBCXX__ +# endif // GTEST_HAS_CXXABI_H_ char* const readable_name = __cxa_demangle(name, 0, 0, &status); - const String name_str(status == 0 ? readable_name : name); + const std::string name_str(status == 0 ? readable_name : name); free(readable_name); return name_str; # else return name; -# endif // __GLIBCXX__ || __HP_aCC +# endif // GTEST_HAS_CXXABI_H_ || __HP_aCC # else @@ -279,7 +278,9 @@ struct Templates<$for j, [[T$j]]$for k[[, NoneT]]> { // INSTANTIATE_TYPED_TEST_CASE_P(). template -struct TypeList { typedef Types1 type; }; +struct TypeList { + typedef Types1 type; +}; $range i 1..n diff --git a/external/gtest/m4/acx_pthread.m4 b/external/gtest/m4/acx_pthread.m4 new file mode 100755 index 0000000000..2cf20de144 --- /dev/null +++ b/external/gtest/m4/acx_pthread.m4 @@ -0,0 +1,363 @@ +# This was retrieved from +# http://svn.0pointer.de/viewvc/trunk/common/acx_pthread.m4?revision=1277&root=avahi +# See also (perhaps for new versions?) +# http://svn.0pointer.de/viewvc/trunk/common/acx_pthread.m4?root=avahi +# +# We've rewritten the inconsistency check code (from avahi), to work +# more broadly. In particular, it no longer assumes ld accepts -zdefs. +# This caused a restructing of the code, but the functionality has only +# changed a little. + +dnl @synopsis ACX_PTHREAD([ACTION-IF-FOUND[, ACTION-IF-NOT-FOUND]]) +dnl +dnl @summary figure out how to build C programs using POSIX threads +dnl +dnl This macro figures out how to build C programs using POSIX threads. +dnl It sets the PTHREAD_LIBS output variable to the threads library and +dnl linker flags, and the PTHREAD_CFLAGS output variable to any special +dnl C compiler flags that are needed. (The user can also force certain +dnl compiler flags/libs to be tested by setting these environment +dnl variables.) +dnl +dnl Also sets PTHREAD_CC to any special C compiler that is needed for +dnl multi-threaded programs (defaults to the value of CC otherwise). +dnl (This is necessary on AIX to use the special cc_r compiler alias.) +dnl +dnl NOTE: You are assumed to not only compile your program with these +dnl flags, but also link it with them as well. e.g. you should link +dnl with $PTHREAD_CC $CFLAGS $PTHREAD_CFLAGS $LDFLAGS ... $PTHREAD_LIBS +dnl $LIBS +dnl +dnl If you are only building threads programs, you may wish to use +dnl these variables in your default LIBS, CFLAGS, and CC: +dnl +dnl LIBS="$PTHREAD_LIBS $LIBS" +dnl CFLAGS="$CFLAGS $PTHREAD_CFLAGS" +dnl CC="$PTHREAD_CC" +dnl +dnl In addition, if the PTHREAD_CREATE_JOINABLE thread-attribute +dnl constant has a nonstandard name, defines PTHREAD_CREATE_JOINABLE to +dnl that name (e.g. PTHREAD_CREATE_UNDETACHED on AIX). +dnl +dnl ACTION-IF-FOUND is a list of shell commands to run if a threads +dnl library is found, and ACTION-IF-NOT-FOUND is a list of commands to +dnl run it if it is not found. If ACTION-IF-FOUND is not specified, the +dnl default action will define HAVE_PTHREAD. +dnl +dnl Please let the authors know if this macro fails on any platform, or +dnl if you have any other suggestions or comments. This macro was based +dnl on work by SGJ on autoconf scripts for FFTW (www.fftw.org) (with +dnl help from M. Frigo), as well as ac_pthread and hb_pthread macros +dnl posted by Alejandro Forero Cuervo to the autoconf macro repository. +dnl We are also grateful for the helpful feedback of numerous users. +dnl +dnl @category InstalledPackages +dnl @author Steven G. Johnson +dnl @version 2006-05-29 +dnl @license GPLWithACException +dnl +dnl Checks for GCC shared/pthread inconsistency based on work by +dnl Marcin Owsiany + + +AC_DEFUN([ACX_PTHREAD], [ +AC_REQUIRE([AC_CANONICAL_HOST]) +AC_LANG_SAVE +AC_LANG_C +acx_pthread_ok=no + +# We used to check for pthread.h first, but this fails if pthread.h +# requires special compiler flags (e.g. on True64 or Sequent). +# It gets checked for in the link test anyway. + +# First of all, check if the user has set any of the PTHREAD_LIBS, +# etcetera environment variables, and if threads linking works using +# them: +if test x"$PTHREAD_LIBS$PTHREAD_CFLAGS" != x; then + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + AC_MSG_CHECKING([for pthread_join in LIBS=$PTHREAD_LIBS with CFLAGS=$PTHREAD_CFLAGS]) + AC_TRY_LINK_FUNC(pthread_join, acx_pthread_ok=yes) + AC_MSG_RESULT($acx_pthread_ok) + if test x"$acx_pthread_ok" = xno; then + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" + fi + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" +fi + +# We must check for the threads library under a number of different +# names; the ordering is very important because some systems +# (e.g. DEC) have both -lpthread and -lpthreads, where one of the +# libraries is broken (non-POSIX). + +# Create a list of thread flags to try. Items starting with a "-" are +# C compiler flags, and other items are library names, except for "none" +# which indicates that we try without any flags at all, and "pthread-config" +# which is a program returning the flags for the Pth emulation library. + +acx_pthread_flags="pthreads none -Kthread -kthread lthread -pthread -pthreads -mthreads pthread --thread-safe -mt pthread-config" + +# The ordering *is* (sometimes) important. Some notes on the +# individual items follow: + +# pthreads: AIX (must check this before -lpthread) +# none: in case threads are in libc; should be tried before -Kthread and +# other compiler flags to prevent continual compiler warnings +# -Kthread: Sequent (threads in libc, but -Kthread needed for pthread.h) +# -kthread: FreeBSD kernel threads (preferred to -pthread since SMP-able) +# lthread: LinuxThreads port on FreeBSD (also preferred to -pthread) +# -pthread: Linux/gcc (kernel threads), BSD/gcc (userland threads) +# -pthreads: Solaris/gcc +# -mthreads: Mingw32/gcc, Lynx/gcc +# -mt: Sun Workshop C (may only link SunOS threads [-lthread], but it +# doesn't hurt to check since this sometimes defines pthreads too; +# also defines -D_REENTRANT) +# ... -mt is also the pthreads flag for HP/aCC +# pthread: Linux, etcetera +# --thread-safe: KAI C++ +# pthread-config: use pthread-config program (for GNU Pth library) + +case "${host_cpu}-${host_os}" in + *solaris*) + + # On Solaris (at least, for some versions), libc contains stubbed + # (non-functional) versions of the pthreads routines, so link-based + # tests will erroneously succeed. (We need to link with -pthreads/-mt/ + # -lpthread.) (The stubs are missing pthread_cleanup_push, or rather + # a function called by this macro, so we could check for that, but + # who knows whether they'll stub that too in a future libc.) So, + # we'll just look for -pthreads and -lpthread first: + + acx_pthread_flags="-pthreads pthread -mt -pthread $acx_pthread_flags" + ;; +esac + +if test x"$acx_pthread_ok" = xno; then +for flag in $acx_pthread_flags; do + + case $flag in + none) + AC_MSG_CHECKING([whether pthreads work without any flags]) + ;; + + -*) + AC_MSG_CHECKING([whether pthreads work with $flag]) + PTHREAD_CFLAGS="$flag" + ;; + + pthread-config) + AC_CHECK_PROG(acx_pthread_config, pthread-config, yes, no) + if test x"$acx_pthread_config" = xno; then continue; fi + PTHREAD_CFLAGS="`pthread-config --cflags`" + PTHREAD_LIBS="`pthread-config --ldflags` `pthread-config --libs`" + ;; + + *) + AC_MSG_CHECKING([for the pthreads library -l$flag]) + PTHREAD_LIBS="-l$flag" + ;; + esac + + save_LIBS="$LIBS" + save_CFLAGS="$CFLAGS" + LIBS="$PTHREAD_LIBS $LIBS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + + # Check for various functions. We must include pthread.h, + # since some functions may be macros. (On the Sequent, we + # need a special flag -Kthread to make this header compile.) + # We check for pthread_join because it is in -lpthread on IRIX + # while pthread_create is in libc. We check for pthread_attr_init + # due to DEC craziness with -lpthreads. We check for + # pthread_cleanup_push because it is one of the few pthread + # functions on Solaris that doesn't have a non-functional libc stub. + # We try pthread_create on general principles. + AC_TRY_LINK([#include ], + [pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); ], + [acx_pthread_ok=yes]) + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + + AC_MSG_RESULT($acx_pthread_ok) + if test "x$acx_pthread_ok" = xyes; then + break; + fi + + PTHREAD_LIBS="" + PTHREAD_CFLAGS="" +done +fi + +# Various other checks: +if test "x$acx_pthread_ok" = xyes; then + save_LIBS="$LIBS" + LIBS="$PTHREAD_LIBS $LIBS" + save_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS $PTHREAD_CFLAGS" + + # Detect AIX lossage: JOINABLE attribute is called UNDETACHED. + AC_MSG_CHECKING([for joinable pthread attribute]) + attr_name=unknown + for attr in PTHREAD_CREATE_JOINABLE PTHREAD_CREATE_UNDETACHED; do + AC_TRY_LINK([#include ], [int attr=$attr; return attr;], + [attr_name=$attr; break]) + done + AC_MSG_RESULT($attr_name) + if test "$attr_name" != PTHREAD_CREATE_JOINABLE; then + AC_DEFINE_UNQUOTED(PTHREAD_CREATE_JOINABLE, $attr_name, + [Define to necessary symbol if this constant + uses a non-standard name on your system.]) + fi + + AC_MSG_CHECKING([if more special flags are required for pthreads]) + flag=no + case "${host_cpu}-${host_os}" in + *-aix* | *-freebsd* | *-darwin*) flag="-D_THREAD_SAFE";; + *solaris* | *-osf* | *-hpux*) flag="-D_REENTRANT";; + esac + AC_MSG_RESULT(${flag}) + if test "x$flag" != xno; then + PTHREAD_CFLAGS="$flag $PTHREAD_CFLAGS" + fi + + LIBS="$save_LIBS" + CFLAGS="$save_CFLAGS" + # More AIX lossage: must compile with xlc_r or cc_r + if test x"$GCC" != xyes; then + AC_CHECK_PROGS(PTHREAD_CC, xlc_r cc_r, ${CC}) + else + PTHREAD_CC=$CC + fi + + # The next part tries to detect GCC inconsistency with -shared on some + # architectures and systems. The problem is that in certain + # configurations, when -shared is specified, GCC "forgets" to + # internally use various flags which are still necessary. + + # + # Prepare the flags + # + save_CFLAGS="$CFLAGS" + save_LIBS="$LIBS" + save_CC="$CC" + + # Try with the flags determined by the earlier checks. + # + # -Wl,-z,defs forces link-time symbol resolution, so that the + # linking checks with -shared actually have any value + # + # FIXME: -fPIC is required for -shared on many architectures, + # so we specify it here, but the right way would probably be to + # properly detect whether it is actually required. + CFLAGS="-shared -fPIC -Wl,-z,defs $CFLAGS $PTHREAD_CFLAGS" + LIBS="$PTHREAD_LIBS $LIBS" + CC="$PTHREAD_CC" + + # In order not to create several levels of indentation, we test + # the value of "$done" until we find the cure or run out of ideas. + done="no" + + # First, make sure the CFLAGS we added are actually accepted by our + # compiler. If not (and OS X's ld, for instance, does not accept -z), + # then we can't do this test. + if test x"$done" = xno; then + AC_MSG_CHECKING([whether to check for GCC pthread/shared inconsistencies]) + AC_TRY_LINK(,, , [done=yes]) + + if test "x$done" = xyes ; then + AC_MSG_RESULT([no]) + else + AC_MSG_RESULT([yes]) + fi + fi + + if test x"$done" = xno; then + AC_MSG_CHECKING([whether -pthread is sufficient with -shared]) + AC_TRY_LINK([#include ], + [pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); ], + [done=yes]) + + if test "x$done" = xyes; then + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + fi + + # + # Linux gcc on some architectures such as mips/mipsel forgets + # about -lpthread + # + if test x"$done" = xno; then + AC_MSG_CHECKING([whether -lpthread fixes that]) + LIBS="-lpthread $PTHREAD_LIBS $save_LIBS" + AC_TRY_LINK([#include ], + [pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); ], + [done=yes]) + + if test "x$done" = xyes; then + AC_MSG_RESULT([yes]) + PTHREAD_LIBS="-lpthread $PTHREAD_LIBS" + else + AC_MSG_RESULT([no]) + fi + fi + # + # FreeBSD 4.10 gcc forgets to use -lc_r instead of -lc + # + if test x"$done" = xno; then + AC_MSG_CHECKING([whether -lc_r fixes that]) + LIBS="-lc_r $PTHREAD_LIBS $save_LIBS" + AC_TRY_LINK([#include ], + [pthread_t th; pthread_join(th, 0); + pthread_attr_init(0); pthread_cleanup_push(0, 0); + pthread_create(0,0,0,0); pthread_cleanup_pop(0); ], + [done=yes]) + + if test "x$done" = xyes; then + AC_MSG_RESULT([yes]) + PTHREAD_LIBS="-lc_r $PTHREAD_LIBS" + else + AC_MSG_RESULT([no]) + fi + fi + if test x"$done" = xno; then + # OK, we have run out of ideas + AC_MSG_WARN([Impossible to determine how to use pthreads with shared libraries]) + + # so it's not safe to assume that we may use pthreads + acx_pthread_ok=no + fi + + CFLAGS="$save_CFLAGS" + LIBS="$save_LIBS" + CC="$save_CC" +else + PTHREAD_CC="$CC" +fi + +AC_SUBST(PTHREAD_LIBS) +AC_SUBST(PTHREAD_CFLAGS) +AC_SUBST(PTHREAD_CC) + +# Finally, execute ACTION-IF-FOUND/ACTION-IF-NOT-FOUND: +if test x"$acx_pthread_ok" = xyes; then + ifelse([$1],,AC_DEFINE(HAVE_PTHREAD,1,[Define if you have POSIX threads libraries and header files.]),[$1]) + : +else + acx_pthread_ok=no + $2 +fi +AC_LANG_RESTORE +])dnl ACX_PTHREAD diff --git a/external/gtest/m4/gtest.m4 b/external/gtest/m4/gtest.m4 new file mode 100755 index 0000000000..6598ba75a4 --- /dev/null +++ b/external/gtest/m4/gtest.m4 @@ -0,0 +1,74 @@ +dnl GTEST_LIB_CHECK([minimum version [, +dnl action if found [,action if not found]]]) +dnl +dnl Check for the presence of the Google Test library, optionally at a minimum +dnl version, and indicate a viable version with the HAVE_GTEST flag. It defines +dnl standard variables for substitution including GTEST_CPPFLAGS, +dnl GTEST_CXXFLAGS, GTEST_LDFLAGS, and GTEST_LIBS. It also defines +dnl GTEST_VERSION as the version of Google Test found. Finally, it provides +dnl optional custom action slots in the event GTEST is found or not. +AC_DEFUN([GTEST_LIB_CHECK], +[ +dnl Provide a flag to enable or disable Google Test usage. +AC_ARG_ENABLE([gtest], + [AS_HELP_STRING([--enable-gtest], + [Enable tests using the Google C++ Testing Framework. + (Default is enabled.)])], + [], + [enable_gtest=]) +AC_ARG_VAR([GTEST_CONFIG], + [The exact path of Google Test's 'gtest-config' script.]) +AC_ARG_VAR([GTEST_CPPFLAGS], + [C-like preprocessor flags for Google Test.]) +AC_ARG_VAR([GTEST_CXXFLAGS], + [C++ compile flags for Google Test.]) +AC_ARG_VAR([GTEST_LDFLAGS], + [Linker path and option flags for Google Test.]) +AC_ARG_VAR([GTEST_LIBS], + [Library linking flags for Google Test.]) +AC_ARG_VAR([GTEST_VERSION], + [The version of Google Test available.]) +HAVE_GTEST="no" +AS_IF([test "x${enable_gtest}" != "xno"], + [AC_MSG_CHECKING([for 'gtest-config']) + AS_IF([test "x${enable_gtest}" != "xyes"], + [AS_IF([test -x "${enable_gtest}/scripts/gtest-config"], + [GTEST_CONFIG="${enable_gtest}/scripts/gtest-config"], + [GTEST_CONFIG="${enable_gtest}/bin/gtest-config"]) + AS_IF([test -x "${GTEST_CONFIG}"], [], + [AC_MSG_RESULT([no]) + AC_MSG_ERROR([dnl +Unable to locate either a built or installed Google Test. +The specific location '${enable_gtest}' was provided for a built or installed +Google Test, but no 'gtest-config' script could be found at this location.]) + ])], + [AC_PATH_PROG([GTEST_CONFIG], [gtest-config])]) + AS_IF([test -x "${GTEST_CONFIG}"], + [AC_MSG_RESULT([${GTEST_CONFIG}]) + m4_ifval([$1], + [_gtest_min_version="--min-version=$1" + AC_MSG_CHECKING([for Google Test at least version >= $1])], + [_gtest_min_version="--min-version=0" + AC_MSG_CHECKING([for Google Test])]) + AS_IF([${GTEST_CONFIG} ${_gtest_min_version}], + [AC_MSG_RESULT([yes]) + HAVE_GTEST='yes'], + [AC_MSG_RESULT([no])])], + [AC_MSG_RESULT([no])]) + AS_IF([test "x${HAVE_GTEST}" = "xyes"], + [GTEST_CPPFLAGS=`${GTEST_CONFIG} --cppflags` + GTEST_CXXFLAGS=`${GTEST_CONFIG} --cxxflags` + GTEST_LDFLAGS=`${GTEST_CONFIG} --ldflags` + GTEST_LIBS=`${GTEST_CONFIG} --libs` + GTEST_VERSION=`${GTEST_CONFIG} --version` + AC_DEFINE([HAVE_GTEST],[1],[Defined when Google Test is available.])], + [AS_IF([test "x${enable_gtest}" = "xyes"], + [AC_MSG_ERROR([dnl +Google Test was enabled, but no viable version could be found.]) + ])])]) +AC_SUBST([HAVE_GTEST]) +AM_CONDITIONAL([HAVE_GTEST],[test "x$HAVE_GTEST" = "xyes"]) +AS_IF([test "x$HAVE_GTEST" = "xyes"], + [m4_ifval([$2], [$2])], + [m4_ifval([$3], [$3])]) +]) diff --git a/external/gtest/m4/libtool.m4 b/external/gtest/m4/libtool.m4 new file mode 100755 index 0000000000..828104cfde --- /dev/null +++ b/external/gtest/m4/libtool.m4 @@ -0,0 +1,8001 @@ +# libtool.m4 - Configure libtool for the host system. -*-Autoconf-*- +# +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +m4_define([_LT_COPYING], [dnl +# Copyright (C) 1996, 1997, 1998, 1999, 2000, 2001, 2003, 2004, 2005, +# 2006, 2007, 2008, 2009, 2010, 2011 Free Software +# Foundation, Inc. +# Written by Gordon Matzigkeit, 1996 +# +# This file is part of GNU Libtool. +# +# GNU Libtool is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License as +# published by the Free Software Foundation; either version 2 of +# the License, or (at your option) any later version. +# +# As a special exception to the GNU General Public License, +# if you distribute this file as part of a program or library that +# is built using GNU Libtool, you may include this file under the +# same distribution terms that you use for the rest of that program. +# +# GNU Libtool is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with GNU Libtool; see the file COPYING. If not, a copy +# can be downloaded from http://www.gnu.org/licenses/gpl.html, or +# obtained by writing to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +]) + +# serial 57 LT_INIT + + +# LT_PREREQ(VERSION) +# ------------------ +# Complain and exit if this libtool version is less that VERSION. +m4_defun([LT_PREREQ], +[m4_if(m4_version_compare(m4_defn([LT_PACKAGE_VERSION]), [$1]), -1, + [m4_default([$3], + [m4_fatal([Libtool version $1 or higher is required], + 63)])], + [$2])]) + + +# _LT_CHECK_BUILDDIR +# ------------------ +# Complain if the absolute build directory name contains unusual characters +m4_defun([_LT_CHECK_BUILDDIR], +[case `pwd` in + *\ * | *\ *) + AC_MSG_WARN([Libtool does not cope well with whitespace in `pwd`]) ;; +esac +]) + + +# LT_INIT([OPTIONS]) +# ------------------ +AC_DEFUN([LT_INIT], +[AC_PREREQ([2.58])dnl We use AC_INCLUDES_DEFAULT +AC_REQUIRE([AC_CONFIG_AUX_DIR_DEFAULT])dnl +AC_BEFORE([$0], [LT_LANG])dnl +AC_BEFORE([$0], [LT_OUTPUT])dnl +AC_BEFORE([$0], [LTDL_INIT])dnl +m4_require([_LT_CHECK_BUILDDIR])dnl + +dnl Autoconf doesn't catch unexpanded LT_ macros by default: +m4_pattern_forbid([^_?LT_[A-Z_]+$])dnl +m4_pattern_allow([^(_LT_EOF|LT_DLGLOBAL|LT_DLLAZY_OR_NOW|LT_MULTI_MODULE)$])dnl +dnl aclocal doesn't pull ltoptions.m4, ltsugar.m4, or ltversion.m4 +dnl unless we require an AC_DEFUNed macro: +AC_REQUIRE([LTOPTIONS_VERSION])dnl +AC_REQUIRE([LTSUGAR_VERSION])dnl +AC_REQUIRE([LTVERSION_VERSION])dnl +AC_REQUIRE([LTOBSOLETE_VERSION])dnl +m4_require([_LT_PROG_LTMAIN])dnl + +_LT_SHELL_INIT([SHELL=${CONFIG_SHELL-/bin/sh}]) + +dnl Parse OPTIONS +_LT_SET_OPTIONS([$0], [$1]) + +# This can be used to rebuild libtool when needed +LIBTOOL_DEPS="$ltmain" + +# Always use our own libtool. +LIBTOOL='$(SHELL) $(top_builddir)/libtool' +AC_SUBST(LIBTOOL)dnl + +_LT_SETUP + +# Only expand once: +m4_define([LT_INIT]) +])# LT_INIT + +# Old names: +AU_ALIAS([AC_PROG_LIBTOOL], [LT_INIT]) +AU_ALIAS([AM_PROG_LIBTOOL], [LT_INIT]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PROG_LIBTOOL], []) +dnl AC_DEFUN([AM_PROG_LIBTOOL], []) + + +# _LT_CC_BASENAME(CC) +# ------------------- +# Calculate cc_basename. Skip known compiler wrappers and cross-prefix. +m4_defun([_LT_CC_BASENAME], +[for cc_temp in $1""; do + case $cc_temp in + compile | *[[\\/]]compile | ccache | *[[\\/]]ccache ) ;; + distcc | *[[\\/]]distcc | purify | *[[\\/]]purify ) ;; + \-*) ;; + *) break;; + esac +done +cc_basename=`$ECHO "$cc_temp" | $SED "s%.*/%%; s%^$host_alias-%%"` +]) + + +# _LT_FILEUTILS_DEFAULTS +# ---------------------- +# It is okay to use these file commands and assume they have been set +# sensibly after `m4_require([_LT_FILEUTILS_DEFAULTS])'. +m4_defun([_LT_FILEUTILS_DEFAULTS], +[: ${CP="cp -f"} +: ${MV="mv -f"} +: ${RM="rm -f"} +])# _LT_FILEUTILS_DEFAULTS + + +# _LT_SETUP +# --------- +m4_defun([_LT_SETUP], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_REQUIRE([_LT_PREPARE_SED_QUOTE_VARS])dnl +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH])dnl + +_LT_DECL([], [PATH_SEPARATOR], [1], [The PATH separator for the build system])dnl +dnl +_LT_DECL([], [host_alias], [0], [The host system])dnl +_LT_DECL([], [host], [0])dnl +_LT_DECL([], [host_os], [0])dnl +dnl +_LT_DECL([], [build_alias], [0], [The build system])dnl +_LT_DECL([], [build], [0])dnl +_LT_DECL([], [build_os], [0])dnl +dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +dnl +AC_REQUIRE([AC_PROG_LN_S])dnl +test -z "$LN_S" && LN_S="ln -s" +_LT_DECL([], [LN_S], [1], [Whether we need soft or hard links])dnl +dnl +AC_REQUIRE([LT_CMD_MAX_LEN])dnl +_LT_DECL([objext], [ac_objext], [0], [Object file suffix (normally "o")])dnl +_LT_DECL([], [exeext], [0], [Executable file suffix (normally "")])dnl +dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +m4_require([_LT_PATH_CONVERSION_FUNCTIONS])dnl +m4_require([_LT_CMD_RELOAD])dnl +m4_require([_LT_CHECK_MAGIC_METHOD])dnl +m4_require([_LT_CHECK_SHAREDLIB_FROM_LINKLIB])dnl +m4_require([_LT_CMD_OLD_ARCHIVE])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_WITH_SYSROOT])dnl + +_LT_CONFIG_LIBTOOL_INIT([ +# See if we are running on zsh, and set the options which allow our +# commands through without removal of \ escapes INIT. +if test -n "\${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi +]) +if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST +fi + +_LT_CHECK_OBJDIR + +m4_require([_LT_TAG_COMPILER])dnl + +case $host_os in +aix3*) + # AIX sometimes has problems with the GCC collect2 program. For some + # reason, if we set the COLLECT_NAMES environment variable, the problems + # vanish in a puff of smoke. + if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES + fi + ;; +esac + +# Global variables: +ofile=libtool +can_build_shared=yes + +# All known linkers require a `.a' archive for static linking (except MSVC, +# which needs '.lib'). +libext=a + +with_gnu_ld="$lt_cv_prog_gnu_ld" + +old_CC="$CC" +old_CFLAGS="$CFLAGS" + +# Set sane defaults for various variables +test -z "$CC" && CC=cc +test -z "$LTCC" && LTCC=$CC +test -z "$LTCFLAGS" && LTCFLAGS=$CFLAGS +test -z "$LD" && LD=ld +test -z "$ac_objext" && ac_objext=o + +_LT_CC_BASENAME([$compiler]) + +# Only perform the check for file, if the check method requires it +test -z "$MAGIC_CMD" && MAGIC_CMD=file +case $deplibs_check_method in +file_magic*) + if test "$file_magic_cmd" = '$MAGIC_CMD'; then + _LT_PATH_MAGIC + fi + ;; +esac + +# Use C for the default configuration in the libtool script +LT_SUPPORTED_TAG([CC]) +_LT_LANG_C_CONFIG +_LT_LANG_DEFAULT_CONFIG +_LT_CONFIG_COMMANDS +])# _LT_SETUP + + +# _LT_PREPARE_SED_QUOTE_VARS +# -------------------------- +# Define a few sed substitution that help us do robust quoting. +m4_defun([_LT_PREPARE_SED_QUOTE_VARS], +[# Backslashify metacharacters that are still active within +# double-quoted strings. +sed_quote_subst='s/\([["`$\\]]\)/\\\1/g' + +# Same as above, but do not quote variable references. +double_quote_subst='s/\([["`\\]]\)/\\\1/g' + +# Sed substitution to delay expansion of an escaped shell variable in a +# double_quote_subst'ed string. +delay_variable_subst='s/\\\\\\\\\\\$/\\\\\\$/g' + +# Sed substitution to delay expansion of an escaped single quote. +delay_single_quote_subst='s/'\''/'\'\\\\\\\'\''/g' + +# Sed substitution to avoid accidental globbing in evaled expressions +no_glob_subst='s/\*/\\\*/g' +]) + +# _LT_PROG_LTMAIN +# --------------- +# Note that this code is called both from `configure', and `config.status' +# now that we use AC_CONFIG_COMMANDS to generate libtool. Notably, +# `config.status' has no value for ac_aux_dir unless we are using Automake, +# so we pass a copy along to make sure it has a sensible value anyway. +m4_defun([_LT_PROG_LTMAIN], +[m4_ifdef([AC_REQUIRE_AUX_FILE], [AC_REQUIRE_AUX_FILE([ltmain.sh])])dnl +_LT_CONFIG_LIBTOOL_INIT([ac_aux_dir='$ac_aux_dir']) +ltmain="$ac_aux_dir/ltmain.sh" +])# _LT_PROG_LTMAIN + + +## ------------------------------------- ## +## Accumulate code for creating libtool. ## +## ------------------------------------- ## + +# So that we can recreate a full libtool script including additional +# tags, we accumulate the chunks of code to send to AC_CONFIG_COMMANDS +# in macros and then make a single call at the end using the `libtool' +# label. + + +# _LT_CONFIG_LIBTOOL_INIT([INIT-COMMANDS]) +# ---------------------------------------- +# Register INIT-COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL_INIT], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_INIT], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_INIT]) + + +# _LT_CONFIG_LIBTOOL([COMMANDS]) +# ------------------------------ +# Register COMMANDS to be passed to AC_CONFIG_COMMANDS later. +m4_define([_LT_CONFIG_LIBTOOL], +[m4_ifval([$1], + [m4_append([_LT_OUTPUT_LIBTOOL_COMMANDS], + [$1 +])])]) + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS]) + + +# _LT_CONFIG_SAVE_COMMANDS([COMMANDS], [INIT_COMMANDS]) +# ----------------------------------------------------- +m4_defun([_LT_CONFIG_SAVE_COMMANDS], +[_LT_CONFIG_LIBTOOL([$1]) +_LT_CONFIG_LIBTOOL_INIT([$2]) +]) + + +# _LT_FORMAT_COMMENT([COMMENT]) +# ----------------------------- +# Add leading comment marks to the start of each line, and a trailing +# full-stop to the whole comment if one is not present already. +m4_define([_LT_FORMAT_COMMENT], +[m4_ifval([$1], [ +m4_bpatsubst([m4_bpatsubst([$1], [^ *], [# ])], + [['`$\]], [\\\&])]m4_bmatch([$1], [[!?.]$], [], [.]) +)]) + + + +## ------------------------ ## +## FIXME: Eliminate VARNAME ## +## ------------------------ ## + + +# _LT_DECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION], [IS-TAGGED?]) +# ------------------------------------------------------------------- +# CONFIGNAME is the name given to the value in the libtool script. +# VARNAME is the (base) name used in the configure script. +# VALUE may be 0, 1 or 2 for a computed quote escaped value based on +# VARNAME. Any other value will be used directly. +m4_define([_LT_DECL], +[lt_if_append_uniq([lt_decl_varnames], [$2], [, ], + [lt_dict_add_subkey([lt_decl_dict], [$2], [libtool_name], + [m4_ifval([$1], [$1], [$2])]) + lt_dict_add_subkey([lt_decl_dict], [$2], [value], [$3]) + m4_ifval([$4], + [lt_dict_add_subkey([lt_decl_dict], [$2], [description], [$4])]) + lt_dict_add_subkey([lt_decl_dict], [$2], + [tagged?], [m4_ifval([$5], [yes], [no])])]) +]) + + +# _LT_TAGDECL([CONFIGNAME], VARNAME, VALUE, [DESCRIPTION]) +# -------------------------------------------------------- +m4_define([_LT_TAGDECL], [_LT_DECL([$1], [$2], [$3], [$4], [yes])]) + + +# lt_decl_tag_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_tag_varnames], +[_lt_decl_filter([tagged?], [yes], $@)]) + + +# _lt_decl_filter(SUBKEY, VALUE, [SEPARATOR], [VARNAME1..]) +# --------------------------------------------------------- +m4_define([_lt_decl_filter], +[m4_case([$#], + [0], [m4_fatal([$0: too few arguments: $#])], + [1], [m4_fatal([$0: too few arguments: $#: $1])], + [2], [lt_dict_filter([lt_decl_dict], [$1], [$2], [], lt_decl_varnames)], + [3], [lt_dict_filter([lt_decl_dict], [$1], [$2], [$3], lt_decl_varnames)], + [lt_dict_filter([lt_decl_dict], $@)])[]dnl +]) + + +# lt_decl_quote_varnames([SEPARATOR], [VARNAME1...]) +# -------------------------------------------------- +m4_define([lt_decl_quote_varnames], +[_lt_decl_filter([value], [1], $@)]) + + +# lt_decl_dquote_varnames([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_dquote_varnames], +[_lt_decl_filter([value], [2], $@)]) + + +# lt_decl_varnames_tagged([SEPARATOR], [VARNAME1...]) +# --------------------------------------------------- +m4_define([lt_decl_varnames_tagged], +[m4_assert([$# <= 2])dnl +_$0(m4_quote(m4_default([$1], [[, ]])), + m4_ifval([$2], [[$2]], [m4_dquote(lt_decl_tag_varnames)]), + m4_split(m4_normalize(m4_quote(_LT_TAGS)), [ ]))]) +m4_define([_lt_decl_varnames_tagged], +[m4_ifval([$3], [lt_combine([$1], [$2], [_], $3)])]) + + +# lt_decl_all_varnames([SEPARATOR], [VARNAME1...]) +# ------------------------------------------------ +m4_define([lt_decl_all_varnames], +[_$0(m4_quote(m4_default([$1], [[, ]])), + m4_if([$2], [], + m4_quote(lt_decl_varnames), + m4_quote(m4_shift($@))))[]dnl +]) +m4_define([_lt_decl_all_varnames], +[lt_join($@, lt_decl_varnames_tagged([$1], + lt_decl_tag_varnames([[, ]], m4_shift($@))))dnl +]) + + +# _LT_CONFIG_STATUS_DECLARE([VARNAME]) +# ------------------------------------ +# Quote a variable value, and forward it to `config.status' so that its +# declaration there will have the same value as in `configure'. VARNAME +# must have a single quote delimited value for this to work. +m4_define([_LT_CONFIG_STATUS_DECLARE], +[$1='`$ECHO "$][$1" | $SED "$delay_single_quote_subst"`']) + + +# _LT_CONFIG_STATUS_DECLARATIONS +# ------------------------------ +# We delimit libtool config variables with single quotes, so when +# we write them to config.status, we have to be sure to quote all +# embedded single quotes properly. In configure, this macro expands +# each variable declared with _LT_DECL (and _LT_TAGDECL) into: +# +# ='`$ECHO "$" | $SED "$delay_single_quote_subst"`' +m4_defun([_LT_CONFIG_STATUS_DECLARATIONS], +[m4_foreach([_lt_var], m4_quote(lt_decl_all_varnames), + [m4_n([_LT_CONFIG_STATUS_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAGS +# ---------------- +# Output comment and list of tags supported by the script +m4_defun([_LT_LIBTOOL_TAGS], +[_LT_FORMAT_COMMENT([The names of the tagged configurations supported by this script])dnl +available_tags="_LT_TAGS"dnl +]) + + +# _LT_LIBTOOL_DECLARE(VARNAME, [TAG]) +# ----------------------------------- +# Extract the dictionary values for VARNAME (optionally with TAG) and +# expand to a commented shell variable setting: +# +# # Some comment about what VAR is for. +# visible_name=$lt_internal_name +m4_define([_LT_LIBTOOL_DECLARE], +[_LT_FORMAT_COMMENT(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], + [description])))[]dnl +m4_pushdef([_libtool_name], + m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [libtool_name])))[]dnl +m4_case(m4_quote(lt_dict_fetch([lt_decl_dict], [$1], [value])), + [0], [_libtool_name=[$]$1], + [1], [_libtool_name=$lt_[]$1], + [2], [_libtool_name=$lt_[]$1], + [_libtool_name=lt_dict_fetch([lt_decl_dict], [$1], [value])])[]dnl +m4_ifval([$2], [_$2])[]m4_popdef([_libtool_name])[]dnl +]) + + +# _LT_LIBTOOL_CONFIG_VARS +# ----------------------- +# Produce commented declarations of non-tagged libtool config variables +# suitable for insertion in the LIBTOOL CONFIG section of the `libtool' +# script. Tagged libtool config variables (even for the LIBTOOL CONFIG +# section) are produced by _LT_LIBTOOL_TAG_VARS. +m4_defun([_LT_LIBTOOL_CONFIG_VARS], +[m4_foreach([_lt_var], + m4_quote(_lt_decl_filter([tagged?], [no], [], lt_decl_varnames)), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var)])])]) + + +# _LT_LIBTOOL_TAG_VARS(TAG) +# ------------------------- +m4_define([_LT_LIBTOOL_TAG_VARS], +[m4_foreach([_lt_var], m4_quote(lt_decl_tag_varnames), + [m4_n([_LT_LIBTOOL_DECLARE(_lt_var, [$1])])])]) + + +# _LT_TAGVAR(VARNAME, [TAGNAME]) +# ------------------------------ +m4_define([_LT_TAGVAR], [m4_ifval([$2], [$1_$2], [$1])]) + + +# _LT_CONFIG_COMMANDS +# ------------------- +# Send accumulated output to $CONFIG_STATUS. Thanks to the lists of +# variables for single and double quote escaping we saved from calls +# to _LT_DECL, we can put quote escaped variables declarations +# into `config.status', and then the shell code to quote escape them in +# for loops in `config.status'. Finally, any additional code accumulated +# from calls to _LT_CONFIG_LIBTOOL_INIT is expanded. +m4_defun([_LT_CONFIG_COMMANDS], +[AC_PROVIDE_IFELSE([LT_OUTPUT], + dnl If the libtool generation code has been placed in $CONFIG_LT, + dnl instead of duplicating it all over again into config.status, + dnl then we will have config.status run $CONFIG_LT later, so it + dnl needs to know what name is stored there: + [AC_CONFIG_COMMANDS([libtool], + [$SHELL $CONFIG_LT || AS_EXIT(1)], [CONFIG_LT='$CONFIG_LT'])], + dnl If the libtool generation code is destined for config.status, + dnl expand the accumulated commands and init code now: + [AC_CONFIG_COMMANDS([libtool], + [_LT_OUTPUT_LIBTOOL_COMMANDS], [_LT_OUTPUT_LIBTOOL_COMMANDS_INIT])]) +])#_LT_CONFIG_COMMANDS + + +# Initialize. +m4_define([_LT_OUTPUT_LIBTOOL_COMMANDS_INIT], +[ + +# The HP-UX ksh and POSIX shell print the target directory to stdout +# if CDPATH is set. +(unset CDPATH) >/dev/null 2>&1 && unset CDPATH + +sed_quote_subst='$sed_quote_subst' +double_quote_subst='$double_quote_subst' +delay_variable_subst='$delay_variable_subst' +_LT_CONFIG_STATUS_DECLARATIONS +LTCC='$LTCC' +LTCFLAGS='$LTCFLAGS' +compiler='$compiler_DEFAULT' + +# A function that is used when there is no print builtin or printf. +func_fallback_echo () +{ + eval 'cat <<_LTECHO_EOF +\$[]1 +_LTECHO_EOF' +} + +# Quote evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_quote_varnames); do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED \\"\\\$sed_quote_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +# Double-quote double-evaled strings. +for var in lt_decl_all_varnames([[ \ +]], lt_decl_dquote_varnames); do + case \`eval \\\\\$ECHO \\\\""\\\\\$\$var"\\\\"\` in + *[[\\\\\\\`\\"\\\$]]*) + eval "lt_\$var=\\\\\\"\\\`\\\$ECHO \\"\\\$\$var\\" | \\\$SED -e \\"\\\$double_quote_subst\\" -e \\"\\\$sed_quote_subst\\" -e \\"\\\$delay_variable_subst\\"\\\`\\\\\\"" + ;; + *) + eval "lt_\$var=\\\\\\"\\\$\$var\\\\\\"" + ;; + esac +done + +_LT_OUTPUT_LIBTOOL_INIT +]) + +# _LT_GENERATED_FILE_INIT(FILE, [COMMENT]) +# ------------------------------------ +# Generate a child script FILE with all initialization necessary to +# reuse the environment learned by the parent script, and make the +# file executable. If COMMENT is supplied, it is inserted after the +# `#!' sequence but before initialization text begins. After this +# macro, additional text can be appended to FILE to form the body of +# the child script. The macro ends with non-zero status if the +# file could not be fully written (such as if the disk is full). +m4_ifdef([AS_INIT_GENERATED], +[m4_defun([_LT_GENERATED_FILE_INIT],[AS_INIT_GENERATED($@)])], +[m4_defun([_LT_GENERATED_FILE_INIT], +[m4_require([AS_PREPARE])]dnl +[m4_pushdef([AS_MESSAGE_LOG_FD])]dnl +[lt_write_fail=0 +cat >$1 <<_ASEOF || lt_write_fail=1 +#! $SHELL +# Generated by $as_me. +$2 +SHELL=\${CONFIG_SHELL-$SHELL} +export SHELL +_ASEOF +cat >>$1 <<\_ASEOF || lt_write_fail=1 +AS_SHELL_SANITIZE +_AS_PREPARE +exec AS_MESSAGE_FD>&1 +_ASEOF +test $lt_write_fail = 0 && chmod +x $1[]dnl +m4_popdef([AS_MESSAGE_LOG_FD])])])# _LT_GENERATED_FILE_INIT + +# LT_OUTPUT +# --------- +# This macro allows early generation of the libtool script (before +# AC_OUTPUT is called), incase it is used in configure for compilation +# tests. +AC_DEFUN([LT_OUTPUT], +[: ${CONFIG_LT=./config.lt} +AC_MSG_NOTICE([creating $CONFIG_LT]) +_LT_GENERATED_FILE_INIT(["$CONFIG_LT"], +[# Run this file to recreate a libtool stub with the current configuration.]) + +cat >>"$CONFIG_LT" <<\_LTEOF +lt_cl_silent=false +exec AS_MESSAGE_LOG_FD>>config.log +{ + echo + AS_BOX([Running $as_me.]) +} >&AS_MESSAGE_LOG_FD + +lt_cl_help="\ +\`$as_me' creates a local libtool stub from the current configuration, +for use in further configure time tests before the real libtool is +generated. + +Usage: $[0] [[OPTIONS]] + + -h, --help print this help, then exit + -V, --version print version number, then exit + -q, --quiet do not print progress messages + -d, --debug don't remove temporary files + +Report bugs to ." + +lt_cl_version="\ +m4_ifset([AC_PACKAGE_NAME], [AC_PACKAGE_NAME ])config.lt[]dnl +m4_ifset([AC_PACKAGE_VERSION], [ AC_PACKAGE_VERSION]) +configured by $[0], generated by m4_PACKAGE_STRING. + +Copyright (C) 2011 Free Software Foundation, Inc. +This config.lt script is free software; the Free Software Foundation +gives unlimited permision to copy, distribute and modify it." + +while test $[#] != 0 +do + case $[1] in + --version | --v* | -V ) + echo "$lt_cl_version"; exit 0 ;; + --help | --h* | -h ) + echo "$lt_cl_help"; exit 0 ;; + --debug | --d* | -d ) + debug=: ;; + --quiet | --q* | --silent | --s* | -q ) + lt_cl_silent=: ;; + + -*) AC_MSG_ERROR([unrecognized option: $[1] +Try \`$[0] --help' for more information.]) ;; + + *) AC_MSG_ERROR([unrecognized argument: $[1] +Try \`$[0] --help' for more information.]) ;; + esac + shift +done + +if $lt_cl_silent; then + exec AS_MESSAGE_FD>/dev/null +fi +_LTEOF + +cat >>"$CONFIG_LT" <<_LTEOF +_LT_OUTPUT_LIBTOOL_COMMANDS_INIT +_LTEOF + +cat >>"$CONFIG_LT" <<\_LTEOF +AC_MSG_NOTICE([creating $ofile]) +_LT_OUTPUT_LIBTOOL_COMMANDS +AS_EXIT(0) +_LTEOF +chmod +x "$CONFIG_LT" + +# configure is writing to config.log, but config.lt does its own redirection, +# appending to config.log, which fails on DOS, as config.log is still kept +# open by configure. Here we exec the FD to /dev/null, effectively closing +# config.log, so it can be properly (re)opened and appended to by config.lt. +lt_cl_success=: +test "$silent" = yes && + lt_config_lt_args="$lt_config_lt_args --quiet" +exec AS_MESSAGE_LOG_FD>/dev/null +$SHELL "$CONFIG_LT" $lt_config_lt_args || lt_cl_success=false +exec AS_MESSAGE_LOG_FD>>config.log +$lt_cl_success || AS_EXIT(1) +])# LT_OUTPUT + + +# _LT_CONFIG(TAG) +# --------------- +# If TAG is the built-in tag, create an initial libtool script with a +# default configuration from the untagged config vars. Otherwise add code +# to config.status for appending the configuration named by TAG from the +# matching tagged config vars. +m4_defun([_LT_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_CONFIG_SAVE_COMMANDS([ + m4_define([_LT_TAG], m4_if([$1], [], [C], [$1]))dnl + m4_if(_LT_TAG, [C], [ + # See if we are running on zsh, and set the options which allow our + # commands through without removal of \ escapes. + if test -n "${ZSH_VERSION+set}" ; then + setopt NO_GLOB_SUBST + fi + + cfgfile="${ofile}T" + trap "$RM \"$cfgfile\"; exit 1" 1 2 15 + $RM "$cfgfile" + + cat <<_LT_EOF >> "$cfgfile" +#! $SHELL + +# `$ECHO "$ofile" | sed 's%^.*/%%'` - Provide generalized library-building support services. +# Generated automatically by $as_me ($PACKAGE$TIMESTAMP) $VERSION +# Libtool was configured on host `(hostname || uname -n) 2>/dev/null | sed 1q`: +# NOTE: Changes made to this file will be lost: look at ltmain.sh. +# +_LT_COPYING +_LT_LIBTOOL_TAGS + +# ### BEGIN LIBTOOL CONFIG +_LT_LIBTOOL_CONFIG_VARS +_LT_LIBTOOL_TAG_VARS +# ### END LIBTOOL CONFIG + +_LT_EOF + + case $host_os in + aix3*) + cat <<\_LT_EOF >> "$cfgfile" +# AIX sometimes has problems with the GCC collect2 program. For some +# reason, if we set the COLLECT_NAMES environment variable, the problems +# vanish in a puff of smoke. +if test "X${COLLECT_NAMES+set}" != Xset; then + COLLECT_NAMES= + export COLLECT_NAMES +fi +_LT_EOF + ;; + esac + + _LT_PROG_LTMAIN + + # We use sed instead of cat because bash on DJGPP gets confused if + # if finds mixed CR/LF and LF-only lines. Since sed operates in + # text mode, it properly converts lines to CR/LF. This bash problem + # is reportedly fixed, but why not run on old versions too? + sed '$q' "$ltmain" >> "$cfgfile" \ + || (rm -f "$cfgfile"; exit 1) + + _LT_PROG_REPLACE_SHELLFNS + + mv -f "$cfgfile" "$ofile" || + (rm -f "$ofile" && cp "$cfgfile" "$ofile" && rm -f "$cfgfile") + chmod +x "$ofile" +], +[cat <<_LT_EOF >> "$ofile" + +dnl Unfortunately we have to use $1 here, since _LT_TAG is not expanded +dnl in a comment (ie after a #). +# ### BEGIN LIBTOOL TAG CONFIG: $1 +_LT_LIBTOOL_TAG_VARS(_LT_TAG) +# ### END LIBTOOL TAG CONFIG: $1 +_LT_EOF +])dnl /m4_if +], +[m4_if([$1], [], [ + PACKAGE='$PACKAGE' + VERSION='$VERSION' + TIMESTAMP='$TIMESTAMP' + RM='$RM' + ofile='$ofile'], []) +])dnl /_LT_CONFIG_SAVE_COMMANDS +])# _LT_CONFIG + + +# LT_SUPPORTED_TAG(TAG) +# --------------------- +# Trace this macro to discover what tags are supported by the libtool +# --tag option, using: +# autoconf --trace 'LT_SUPPORTED_TAG:$1' +AC_DEFUN([LT_SUPPORTED_TAG], []) + + +# C support is built-in for now +m4_define([_LT_LANG_C_enabled], []) +m4_define([_LT_TAGS], []) + + +# LT_LANG(LANG) +# ------------- +# Enable libtool support for the given language if not already enabled. +AC_DEFUN([LT_LANG], +[AC_BEFORE([$0], [LT_OUTPUT])dnl +m4_case([$1], + [C], [_LT_LANG(C)], + [C++], [_LT_LANG(CXX)], + [Go], [_LT_LANG(GO)], + [Java], [_LT_LANG(GCJ)], + [Fortran 77], [_LT_LANG(F77)], + [Fortran], [_LT_LANG(FC)], + [Windows Resource], [_LT_LANG(RC)], + [m4_ifdef([_LT_LANG_]$1[_CONFIG], + [_LT_LANG($1)], + [m4_fatal([$0: unsupported language: "$1"])])])dnl +])# LT_LANG + + +# _LT_LANG(LANGNAME) +# ------------------ +m4_defun([_LT_LANG], +[m4_ifdef([_LT_LANG_]$1[_enabled], [], + [LT_SUPPORTED_TAG([$1])dnl + m4_append([_LT_TAGS], [$1 ])dnl + m4_define([_LT_LANG_]$1[_enabled], [])dnl + _LT_LANG_$1_CONFIG($1)])dnl +])# _LT_LANG + + +m4_ifndef([AC_PROG_GO], [ +############################################################ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_GO. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +############################################################ +m4_defun([AC_PROG_GO], +[AC_LANG_PUSH(Go)dnl +AC_ARG_VAR([GOC], [Go compiler command])dnl +AC_ARG_VAR([GOFLAGS], [Go compiler flags])dnl +_AC_ARG_VAR_LDFLAGS()dnl +AC_CHECK_TOOL(GOC, gccgo) +if test -z "$GOC"; then + if test -n "$ac_tool_prefix"; then + AC_CHECK_PROG(GOC, [${ac_tool_prefix}gccgo], [${ac_tool_prefix}gccgo]) + fi +fi +if test -z "$GOC"; then + AC_CHECK_PROG(GOC, gccgo, gccgo, false) +fi +])#m4_defun +])#m4_ifndef + + +# _LT_LANG_DEFAULT_CONFIG +# ----------------------- +m4_defun([_LT_LANG_DEFAULT_CONFIG], +[AC_PROVIDE_IFELSE([AC_PROG_CXX], + [LT_LANG(CXX)], + [m4_define([AC_PROG_CXX], defn([AC_PROG_CXX])[LT_LANG(CXX)])]) + +AC_PROVIDE_IFELSE([AC_PROG_F77], + [LT_LANG(F77)], + [m4_define([AC_PROG_F77], defn([AC_PROG_F77])[LT_LANG(F77)])]) + +AC_PROVIDE_IFELSE([AC_PROG_FC], + [LT_LANG(FC)], + [m4_define([AC_PROG_FC], defn([AC_PROG_FC])[LT_LANG(FC)])]) + +dnl The call to [A][M_PROG_GCJ] is quoted like that to stop aclocal +dnl pulling things in needlessly. +AC_PROVIDE_IFELSE([AC_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([A][M_PROG_GCJ], + [LT_LANG(GCJ)], + [AC_PROVIDE_IFELSE([LT_PROG_GCJ], + [LT_LANG(GCJ)], + [m4_ifdef([AC_PROG_GCJ], + [m4_define([AC_PROG_GCJ], defn([AC_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([A][M_PROG_GCJ], + [m4_define([A][M_PROG_GCJ], defn([A][M_PROG_GCJ])[LT_LANG(GCJ)])]) + m4_ifdef([LT_PROG_GCJ], + [m4_define([LT_PROG_GCJ], defn([LT_PROG_GCJ])[LT_LANG(GCJ)])])])])]) + +AC_PROVIDE_IFELSE([AC_PROG_GO], + [LT_LANG(GO)], + [m4_define([AC_PROG_GO], defn([AC_PROG_GO])[LT_LANG(GO)])]) + +AC_PROVIDE_IFELSE([LT_PROG_RC], + [LT_LANG(RC)], + [m4_define([LT_PROG_RC], defn([LT_PROG_RC])[LT_LANG(RC)])]) +])# _LT_LANG_DEFAULT_CONFIG + +# Obsolete macros: +AU_DEFUN([AC_LIBTOOL_CXX], [LT_LANG(C++)]) +AU_DEFUN([AC_LIBTOOL_F77], [LT_LANG(Fortran 77)]) +AU_DEFUN([AC_LIBTOOL_FC], [LT_LANG(Fortran)]) +AU_DEFUN([AC_LIBTOOL_GCJ], [LT_LANG(Java)]) +AU_DEFUN([AC_LIBTOOL_RC], [LT_LANG(Windows Resource)]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_CXX], []) +dnl AC_DEFUN([AC_LIBTOOL_F77], []) +dnl AC_DEFUN([AC_LIBTOOL_FC], []) +dnl AC_DEFUN([AC_LIBTOOL_GCJ], []) +dnl AC_DEFUN([AC_LIBTOOL_RC], []) + + +# _LT_TAG_COMPILER +# ---------------- +m4_defun([_LT_TAG_COMPILER], +[AC_REQUIRE([AC_PROG_CC])dnl + +_LT_DECL([LTCC], [CC], [1], [A C compiler])dnl +_LT_DECL([LTCFLAGS], [CFLAGS], [1], [LTCC compiler flags])dnl +_LT_TAGDECL([CC], [compiler], [1], [A language specific compiler])dnl +_LT_TAGDECL([with_gcc], [GCC], [0], [Is the compiler the GNU compiler?])dnl + +# If no C compiler was specified, use CC. +LTCC=${LTCC-"$CC"} + +# If no C compiler flags were specified, use CFLAGS. +LTCFLAGS=${LTCFLAGS-"$CFLAGS"} + +# Allow CC to be a program name with arguments. +compiler=$CC +])# _LT_TAG_COMPILER + + +# _LT_COMPILER_BOILERPLATE +# ------------------------ +# Check for compiler boilerplate output or warnings with +# the simple compiler test code. +m4_defun([_LT_COMPILER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_compile_test_code" >conftest.$ac_ext +eval "$ac_compile" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_compiler_boilerplate=`cat conftest.err` +$RM conftest* +])# _LT_COMPILER_BOILERPLATE + + +# _LT_LINKER_BOILERPLATE +# ---------------------- +# Check for linker boilerplate output or warnings with +# the simple link test code. +m4_defun([_LT_LINKER_BOILERPLATE], +[m4_require([_LT_DECL_SED])dnl +ac_outfile=conftest.$ac_objext +echo "$lt_simple_link_test_code" >conftest.$ac_ext +eval "$ac_link" 2>&1 >/dev/null | $SED '/^$/d; /^ *+/d' >conftest.err +_lt_linker_boilerplate=`cat conftest.err` +$RM -r conftest* +])# _LT_LINKER_BOILERPLATE + +# _LT_REQUIRED_DARWIN_CHECKS +# ------------------------- +m4_defun_once([_LT_REQUIRED_DARWIN_CHECKS],[ + case $host_os in + rhapsody* | darwin*) + AC_CHECK_TOOL([DSYMUTIL], [dsymutil], [:]) + AC_CHECK_TOOL([NMEDIT], [nmedit], [:]) + AC_CHECK_TOOL([LIPO], [lipo], [:]) + AC_CHECK_TOOL([OTOOL], [otool], [:]) + AC_CHECK_TOOL([OTOOL64], [otool64], [:]) + _LT_DECL([], [DSYMUTIL], [1], + [Tool to manipulate archived DWARF debug symbol files on Mac OS X]) + _LT_DECL([], [NMEDIT], [1], + [Tool to change global to local symbols on Mac OS X]) + _LT_DECL([], [LIPO], [1], + [Tool to manipulate fat objects and archives on Mac OS X]) + _LT_DECL([], [OTOOL], [1], + [ldd/readelf like tool for Mach-O binaries on Mac OS X]) + _LT_DECL([], [OTOOL64], [1], + [ldd/readelf like tool for 64 bit Mach-O binaries on Mac OS X 10.4]) + + AC_CACHE_CHECK([for -single_module linker flag],[lt_cv_apple_cc_single_mod], + [lt_cv_apple_cc_single_mod=no + if test -z "${LT_MULTI_MODULE}"; then + # By default we will add the -single_module flag. You can override + # by either setting the environment variable LT_MULTI_MODULE + # non-empty at configure time, or by adding -multi_module to the + # link flags. + rm -rf libconftest.dylib* + echo "int foo(void){return 1;}" > conftest.c + echo "$LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ +-dynamiclib -Wl,-single_module conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o libconftest.dylib \ + -dynamiclib -Wl,-single_module conftest.c 2>conftest.err + _lt_result=$? + # If there is a non-empty error log, and "single_module" + # appears in it, assume the flag caused a linker warning + if test -s conftest.err && $GREP single_module conftest.err; then + cat conftest.err >&AS_MESSAGE_LOG_FD + # Otherwise, if the output was created with a 0 exit code from + # the compiler, it worked. + elif test -f libconftest.dylib && test $_lt_result -eq 0; then + lt_cv_apple_cc_single_mod=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -rf libconftest.dylib* + rm -f conftest.* + fi]) + + AC_CACHE_CHECK([for -exported_symbols_list linker flag], + [lt_cv_ld_exported_symbols_list], + [lt_cv_ld_exported_symbols_list=no + save_LDFLAGS=$LDFLAGS + echo "_main" > conftest.sym + LDFLAGS="$LDFLAGS -Wl,-exported_symbols_list,conftest.sym" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [lt_cv_ld_exported_symbols_list=yes], + [lt_cv_ld_exported_symbols_list=no]) + LDFLAGS="$save_LDFLAGS" + ]) + + AC_CACHE_CHECK([for -force_load linker flag],[lt_cv_ld_force_load], + [lt_cv_ld_force_load=no + cat > conftest.c << _LT_EOF +int forced_loaded() { return 2;} +_LT_EOF + echo "$LTCC $LTCFLAGS -c -o conftest.o conftest.c" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS -c -o conftest.o conftest.c 2>&AS_MESSAGE_LOG_FD + echo "$AR cru libconftest.a conftest.o" >&AS_MESSAGE_LOG_FD + $AR cru libconftest.a conftest.o 2>&AS_MESSAGE_LOG_FD + echo "$RANLIB libconftest.a" >&AS_MESSAGE_LOG_FD + $RANLIB libconftest.a 2>&AS_MESSAGE_LOG_FD + cat > conftest.c << _LT_EOF +int main() { return 0;} +_LT_EOF + echo "$LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a" >&AS_MESSAGE_LOG_FD + $LTCC $LTCFLAGS $LDFLAGS -o conftest conftest.c -Wl,-force_load,./libconftest.a 2>conftest.err + _lt_result=$? + if test -s conftest.err && $GREP force_load conftest.err; then + cat conftest.err >&AS_MESSAGE_LOG_FD + elif test -f conftest && test $_lt_result -eq 0 && $GREP forced_load conftest >/dev/null 2>&1 ; then + lt_cv_ld_force_load=yes + else + cat conftest.err >&AS_MESSAGE_LOG_FD + fi + rm -f conftest.err libconftest.a conftest conftest.c + rm -rf conftest.dSYM + ]) + case $host_os in + rhapsody* | darwin1.[[012]]) + _lt_dar_allow_undefined='${wl}-undefined ${wl}suppress' ;; + darwin1.*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + darwin*) # darwin 5.x on + # if running on 10.5 or later, the deployment target defaults + # to the OS version, if on x86, and 10.4, the deployment + # target defaults to 10.4. Don't you love it? + case ${MACOSX_DEPLOYMENT_TARGET-10.0},$host in + 10.0,*86*-darwin8*|10.0,*-darwin[[91]]*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + 10.[[012]]*) + _lt_dar_allow_undefined='${wl}-flat_namespace ${wl}-undefined ${wl}suppress' ;; + 10.*) + _lt_dar_allow_undefined='${wl}-undefined ${wl}dynamic_lookup' ;; + esac + ;; + esac + if test "$lt_cv_apple_cc_single_mod" = "yes"; then + _lt_dar_single_mod='$single_module' + fi + if test "$lt_cv_ld_exported_symbols_list" = "yes"; then + _lt_dar_export_syms=' ${wl}-exported_symbols_list,$output_objdir/${libname}-symbols.expsym' + else + _lt_dar_export_syms='~$NMEDIT -s $output_objdir/${libname}-symbols.expsym ${lib}' + fi + if test "$DSYMUTIL" != ":" && test "$lt_cv_ld_force_load" = "no"; then + _lt_dsymutil='~$DSYMUTIL $lib || :' + else + _lt_dsymutil= + fi + ;; + esac +]) + + +# _LT_DARWIN_LINKER_FEATURES([TAG]) +# --------------------------------- +# Checks for linker and compiler features on darwin +m4_defun([_LT_DARWIN_LINKER_FEATURES], +[ + m4_require([_LT_REQUIRED_DARWIN_CHECKS]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_automatic, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + if test "$lt_cv_ld_force_load" = "yes"; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience ${wl}-force_load,$conv\"; done; func_echo_all \"$new_convenience\"`' + m4_case([$1], [F77], [_LT_TAGVAR(compiler_needs_object, $1)=yes], + [FC], [_LT_TAGVAR(compiler_needs_object, $1)=yes]) + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='' + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)="$_lt_dar_allow_undefined" + case $cc_basename in + ifort*) _lt_dar_can_shared=yes ;; + *) _lt_dar_can_shared=$GCC ;; + esac + if test "$_lt_dar_can_shared" = "yes"; then + output_verbose_link_cmd=func_echo_all + _LT_TAGVAR(archive_cmds, $1)="\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring $_lt_dar_single_mod${_lt_dsymutil}" + _LT_TAGVAR(module_cmds, $1)="\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \$libobjs \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring ${_lt_dar_single_mod}${_lt_dar_export_syms}${_lt_dsymutil}" + _LT_TAGVAR(module_expsym_cmds, $1)="sed -e 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC \$allow_undefined_flag -o \$lib -bundle \$libobjs \$deplibs \$compiler_flags${_lt_dar_export_syms}${_lt_dsymutil}" + m4_if([$1], [CXX], +[ if test "$lt_cv_apple_cc_single_mod" != "yes"; then + _LT_TAGVAR(archive_cmds, $1)="\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dsymutil}" + _LT_TAGVAR(archive_expsym_cmds, $1)="sed 's,^,_,' < \$export_symbols > \$output_objdir/\${libname}-symbols.expsym~\$CC -r -keep_private_externs -nostdlib -o \${lib}-master.o \$libobjs~\$CC -dynamiclib \$allow_undefined_flag -o \$lib \${lib}-master.o \$deplibs \$compiler_flags -install_name \$rpath/\$soname \$verstring${_lt_dar_export_syms}${_lt_dsymutil}" + fi +],[]) + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi +]) + +# _LT_SYS_MODULE_PATH_AIX([TAGNAME]) +# ---------------------------------- +# Links a minimal program and checks the executable +# for the system default hardcoded library path. In most cases, +# this is /usr/lib:/lib, but when the MPI compilers are used +# the location of the communication and MPI libs are included too. +# If we don't find anything, use the default library path according +# to the aix ld manual. +# Store the results from the different compilers for each TAGNAME. +# Allow to override them for all tags through lt_cv_aix_libpath. +m4_defun([_LT_SYS_MODULE_PATH_AIX], +[m4_require([_LT_DECL_SED])dnl +if test "${lt_cv_aix_libpath+set}" = set; then + aix_libpath=$lt_cv_aix_libpath +else + AC_CACHE_VAL([_LT_TAGVAR([lt_cv_aix_libpath_], [$1])], + [AC_LINK_IFELSE([AC_LANG_PROGRAM],[ + lt_aix_libpath_sed='[ + /Import File Strings/,/^$/ { + /^0/ { + s/^0 *\([^ ]*\) *$/\1/ + p + } + }]' + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -H conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + # Check for a 64-bit object if we didn't find anything. + if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])=`dump -HX64 conftest$ac_exeext 2>/dev/null | $SED -n -e "$lt_aix_libpath_sed"` + fi],[]) + if test -z "$_LT_TAGVAR([lt_cv_aix_libpath_], [$1])"; then + _LT_TAGVAR([lt_cv_aix_libpath_], [$1])="/usr/lib:/lib" + fi + ]) + aix_libpath=$_LT_TAGVAR([lt_cv_aix_libpath_], [$1]) +fi +])# _LT_SYS_MODULE_PATH_AIX + + +# _LT_SHELL_INIT(ARG) +# ------------------- +m4_define([_LT_SHELL_INIT], +[m4_divert_text([M4SH-INIT], [$1 +])])# _LT_SHELL_INIT + + + +# _LT_PROG_ECHO_BACKSLASH +# ----------------------- +# Find how we can fake an echo command that does not interpret backslash. +# In particular, with Autoconf 2.60 or later we add some code to the start +# of the generated configure script which will find a shell with a builtin +# printf (which we can use as an echo command). +m4_defun([_LT_PROG_ECHO_BACKSLASH], +[ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO +ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + +AC_MSG_CHECKING([how to print strings]) +# Test print first, because it will be a builtin if present. +if test "X`( print -r -- -n ) 2>/dev/null`" = X-n && \ + test "X`print -r -- $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='print -r --' +elif test "X`printf %s $ECHO 2>/dev/null`" = "X$ECHO"; then + ECHO='printf %s\n' +else + # Use this function as a fallback that always works. + func_fallback_echo () + { + eval 'cat <<_LTECHO_EOF +$[]1 +_LTECHO_EOF' + } + ECHO='func_fallback_echo' +fi + +# func_echo_all arg... +# Invoke $ECHO with all args, space-separated. +func_echo_all () +{ + $ECHO "$*" +} + +case "$ECHO" in + printf*) AC_MSG_RESULT([printf]) ;; + print*) AC_MSG_RESULT([print -r]) ;; + *) AC_MSG_RESULT([cat]) ;; +esac + +m4_ifdef([_AS_DETECT_SUGGESTED], +[_AS_DETECT_SUGGESTED([ + test -n "${ZSH_VERSION+set}${BASH_VERSION+set}" || ( + ECHO='\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\\' + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO + ECHO=$ECHO$ECHO$ECHO$ECHO$ECHO$ECHO + PATH=/empty FPATH=/empty; export PATH FPATH + test "X`printf %s $ECHO`" = "X$ECHO" \ + || test "X`print -r -- $ECHO`" = "X$ECHO" )])]) + +_LT_DECL([], [SHELL], [1], [Shell to use when invoking shell scripts]) +_LT_DECL([], [ECHO], [1], [An echo program that protects backslashes]) +])# _LT_PROG_ECHO_BACKSLASH + + +# _LT_WITH_SYSROOT +# ---------------- +AC_DEFUN([_LT_WITH_SYSROOT], +[AC_MSG_CHECKING([for sysroot]) +AC_ARG_WITH([sysroot], +[ --with-sysroot[=DIR] Search for dependent libraries within DIR + (or the compiler's sysroot if not specified).], +[], [with_sysroot=no]) + +dnl lt_sysroot will always be passed unquoted. We quote it here +dnl in case the user passed a directory name. +lt_sysroot= +case ${with_sysroot} in #( + yes) + if test "$GCC" = yes; then + lt_sysroot=`$CC --print-sysroot 2>/dev/null` + fi + ;; #( + /*) + lt_sysroot=`echo "$with_sysroot" | sed -e "$sed_quote_subst"` + ;; #( + no|'') + ;; #( + *) + AC_MSG_RESULT([${with_sysroot}]) + AC_MSG_ERROR([The sysroot must be an absolute path.]) + ;; +esac + + AC_MSG_RESULT([${lt_sysroot:-no}]) +_LT_DECL([], [lt_sysroot], [0], [The root where to search for ]dnl +[dependent libraries, and in which our libraries should be installed.])]) + +# _LT_ENABLE_LOCK +# --------------- +m4_defun([_LT_ENABLE_LOCK], +[AC_ARG_ENABLE([libtool-lock], + [AS_HELP_STRING([--disable-libtool-lock], + [avoid locking (might break parallel builds)])]) +test "x$enable_libtool_lock" != xno && enable_libtool_lock=yes + +# Some flags need to be propagated to the compiler or linker for good +# libtool support. +case $host in +ia64-*-hpux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.$ac_objext` in + *ELF-32*) + HPUX_IA64_MODE="32" + ;; + *ELF-64*) + HPUX_IA64_MODE="64" + ;; + esac + fi + rm -rf conftest* + ;; +*-*-irix6*) + # Find out which ABI we are using. + echo '[#]line '$LINENO' "configure"' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + if test "$lt_cv_prog_gnu_ld" = yes; then + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -melf32bsmip" + ;; + *N32*) + LD="${LD-ld} -melf32bmipn32" + ;; + *64-bit*) + LD="${LD-ld} -melf64bmip" + ;; + esac + else + case `/usr/bin/file conftest.$ac_objext` in + *32-bit*) + LD="${LD-ld} -32" + ;; + *N32*) + LD="${LD-ld} -n32" + ;; + *64-bit*) + LD="${LD-ld} -64" + ;; + esac + fi + fi + rm -rf conftest* + ;; + +x86_64-*kfreebsd*-gnu|x86_64-*linux*|ppc*-*linux*|powerpc*-*linux*| \ +s390*-*linux*|s390*-*tpf*|sparc*-*linux*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *32-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_i386_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_i386" + ;; + ppc64-*linux*|powerpc64-*linux*) + LD="${LD-ld} -m elf32ppclinux" + ;; + s390x-*linux*) + LD="${LD-ld} -m elf_s390" + ;; + sparc64-*linux*) + LD="${LD-ld} -m elf32_sparc" + ;; + esac + ;; + *64-bit*) + case $host in + x86_64-*kfreebsd*-gnu) + LD="${LD-ld} -m elf_x86_64_fbsd" + ;; + x86_64-*linux*) + LD="${LD-ld} -m elf_x86_64" + ;; + ppc*-*linux*|powerpc*-*linux*) + LD="${LD-ld} -m elf64ppc" + ;; + s390*-*linux*|s390*-*tpf*) + LD="${LD-ld} -m elf64_s390" + ;; + sparc*-*linux*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; + +*-*-sco3.2v5*) + # On SCO OpenServer 5, we need -belf to get full-featured binaries. + SAVE_CFLAGS="$CFLAGS" + CFLAGS="$CFLAGS -belf" + AC_CACHE_CHECK([whether the C compiler needs -belf], lt_cv_cc_needs_belf, + [AC_LANG_PUSH(C) + AC_LINK_IFELSE([AC_LANG_PROGRAM([[]],[[]])],[lt_cv_cc_needs_belf=yes],[lt_cv_cc_needs_belf=no]) + AC_LANG_POP]) + if test x"$lt_cv_cc_needs_belf" != x"yes"; then + # this is probably gcc 2.8.0, egcs 1.0 or newer; no need for -belf + CFLAGS="$SAVE_CFLAGS" + fi + ;; +*-*solaris*) + # Find out which ABI we are using. + echo 'int i;' > conftest.$ac_ext + if AC_TRY_EVAL(ac_compile); then + case `/usr/bin/file conftest.o` in + *64-bit*) + case $lt_cv_prog_gnu_ld in + yes*) + case $host in + i?86-*-solaris*) + LD="${LD-ld} -m elf_x86_64" + ;; + sparc*-*-solaris*) + LD="${LD-ld} -m elf64_sparc" + ;; + esac + # GNU ld 2.21 introduced _sol2 emulations. Use them if available. + if ${LD-ld} -V | grep _sol2 >/dev/null 2>&1; then + LD="${LD-ld}_sol2" + fi + ;; + *) + if ${LD-ld} -64 -r -o conftest2.o conftest.o >/dev/null 2>&1; then + LD="${LD-ld} -64" + fi + ;; + esac + ;; + esac + fi + rm -rf conftest* + ;; +esac + +need_locks="$enable_libtool_lock" +])# _LT_ENABLE_LOCK + + +# _LT_PROG_AR +# ----------- +m4_defun([_LT_PROG_AR], +[AC_CHECK_TOOLS(AR, [ar], false) +: ${AR=ar} +: ${AR_FLAGS=cru} +_LT_DECL([], [AR], [1], [The archiver]) +_LT_DECL([], [AR_FLAGS], [1], [Flags to create an archive]) + +AC_CACHE_CHECK([for archiver @FILE support], [lt_cv_ar_at_file], + [lt_cv_ar_at_file=no + AC_COMPILE_IFELSE([AC_LANG_PROGRAM], + [echo conftest.$ac_objext > conftest.lst + lt_ar_try='$AR $AR_FLAGS libconftest.a @conftest.lst >&AS_MESSAGE_LOG_FD' + AC_TRY_EVAL([lt_ar_try]) + if test "$ac_status" -eq 0; then + # Ensure the archiver fails upon bogus file names. + rm -f conftest.$ac_objext libconftest.a + AC_TRY_EVAL([lt_ar_try]) + if test "$ac_status" -ne 0; then + lt_cv_ar_at_file=@ + fi + fi + rm -f conftest.* libconftest.a + ]) + ]) + +if test "x$lt_cv_ar_at_file" = xno; then + archiver_list_spec= +else + archiver_list_spec=$lt_cv_ar_at_file +fi +_LT_DECL([], [archiver_list_spec], [1], + [How to feed a file listing to the archiver]) +])# _LT_PROG_AR + + +# _LT_CMD_OLD_ARCHIVE +# ------------------- +m4_defun([_LT_CMD_OLD_ARCHIVE], +[_LT_PROG_AR + +AC_CHECK_TOOL(STRIP, strip, :) +test -z "$STRIP" && STRIP=: +_LT_DECL([], [STRIP], [1], [A symbol stripping program]) + +AC_CHECK_TOOL(RANLIB, ranlib, :) +test -z "$RANLIB" && RANLIB=: +_LT_DECL([], [RANLIB], [1], + [Commands used to install an old-style archive]) + +# Determine commands to create old-style static archives. +old_archive_cmds='$AR $AR_FLAGS $oldlib$oldobjs' +old_postinstall_cmds='chmod 644 $oldlib' +old_postuninstall_cmds= + +if test -n "$RANLIB"; then + case $host_os in + openbsd*) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB -t \$tool_oldlib" + ;; + *) + old_postinstall_cmds="$old_postinstall_cmds~\$RANLIB \$tool_oldlib" + ;; + esac + old_archive_cmds="$old_archive_cmds~\$RANLIB \$tool_oldlib" +fi + +case $host_os in + darwin*) + lock_old_archive_extraction=yes ;; + *) + lock_old_archive_extraction=no ;; +esac +_LT_DECL([], [old_postinstall_cmds], [2]) +_LT_DECL([], [old_postuninstall_cmds], [2]) +_LT_TAGDECL([], [old_archive_cmds], [2], + [Commands used to build an old-style archive]) +_LT_DECL([], [lock_old_archive_extraction], [0], + [Whether to use a lock for old archive extraction]) +])# _LT_CMD_OLD_ARCHIVE + + +# _LT_COMPILER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [OUTPUT-FILE], [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------------------- +# Check whether the given compiler option works +AC_DEFUN([_LT_COMPILER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + m4_if([$4], , [ac_outfile=conftest.$ac_objext], [ac_outfile=$4]) + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + lt_compiler_flag="$3" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + # The option is referenced via a variable to avoid confusing sed. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>conftest.err) + ac_status=$? + cat conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s "$ac_outfile"; then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings other than the usual output. + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' >conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if test ! -s conftest.er2 || diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + fi + $RM conftest* +]) + +if test x"[$]$2" = xyes; then + m4_if([$5], , :, [$5]) +else + m4_if([$6], , :, [$6]) +fi +])# _LT_COMPILER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_COMPILER_OPTION], [_LT_COMPILER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_COMPILER_OPTION], []) + + +# _LT_LINKER_OPTION(MESSAGE, VARIABLE-NAME, FLAGS, +# [ACTION-SUCCESS], [ACTION-FAILURE]) +# ---------------------------------------------------- +# Check whether the given linker option works +AC_DEFUN([_LT_LINKER_OPTION], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_SED])dnl +AC_CACHE_CHECK([$1], [$2], + [$2=no + save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS $3" + echo "$lt_simple_link_test_code" > conftest.$ac_ext + if (eval $ac_link 2>conftest.err) && test -s conftest$ac_exeext; then + # The linker can only warn and ignore the option if not recognized + # So say no if there are warnings + if test -s conftest.err; then + # Append any errors to the config.log. + cat conftest.err 1>&AS_MESSAGE_LOG_FD + $ECHO "$_lt_linker_boilerplate" | $SED '/^$/d' > conftest.exp + $SED '/^$/d; /^ *+/d' conftest.err >conftest.er2 + if diff conftest.exp conftest.er2 >/dev/null; then + $2=yes + fi + else + $2=yes + fi + fi + $RM -r conftest* + LDFLAGS="$save_LDFLAGS" +]) + +if test x"[$]$2" = xyes; then + m4_if([$4], , :, [$4]) +else + m4_if([$5], , :, [$5]) +fi +])# _LT_LINKER_OPTION + +# Old name: +AU_ALIAS([AC_LIBTOOL_LINKER_OPTION], [_LT_LINKER_OPTION]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_LINKER_OPTION], []) + + +# LT_CMD_MAX_LEN +#--------------- +AC_DEFUN([LT_CMD_MAX_LEN], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +# find the maximum length of command line arguments +AC_MSG_CHECKING([the maximum length of command line arguments]) +AC_CACHE_VAL([lt_cv_sys_max_cmd_len], [dnl + i=0 + teststring="ABCD" + + case $build_os in + msdosdjgpp*) + # On DJGPP, this test can blow up pretty badly due to problems in libc + # (any single argument exceeding 2000 bytes causes a buffer overrun + # during glob expansion). Even if it were fixed, the result of this + # check would be larger than it should be. + lt_cv_sys_max_cmd_len=12288; # 12K is about right + ;; + + gnu*) + # Under GNU Hurd, this test is not required because there is + # no limit to the length of command line arguments. + # Libtool will interpret -1 as no limit whatsoever + lt_cv_sys_max_cmd_len=-1; + ;; + + cygwin* | mingw* | cegcc*) + # On Win9x/ME, this test blows up -- it succeeds, but takes + # about 5 minutes as the teststring grows exponentially. + # Worse, since 9x/ME are not pre-emptively multitasking, + # you end up with a "frozen" computer, even though with patience + # the test eventually succeeds (with a max line length of 256k). + # Instead, let's just punt: use the minimum linelength reported by + # all of the supported platforms: 8192 (on NT/2K/XP). + lt_cv_sys_max_cmd_len=8192; + ;; + + mint*) + # On MiNT this can take a long time and run out of memory. + lt_cv_sys_max_cmd_len=8192; + ;; + + amigaos*) + # On AmigaOS with pdksh, this test takes hours, literally. + # So we just punt and use a minimum line length of 8192. + lt_cv_sys_max_cmd_len=8192; + ;; + + netbsd* | freebsd* | openbsd* | darwin* | dragonfly*) + # This has been around since 386BSD, at least. Likely further. + if test -x /sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/sbin/sysctl -n kern.argmax` + elif test -x /usr/sbin/sysctl; then + lt_cv_sys_max_cmd_len=`/usr/sbin/sysctl -n kern.argmax` + else + lt_cv_sys_max_cmd_len=65536 # usable default for all BSDs + fi + # And add a safety zone + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + ;; + + interix*) + # We know the value 262144 and hardcode it with a safety zone (like BSD) + lt_cv_sys_max_cmd_len=196608 + ;; + + os2*) + # The test takes a long time on OS/2. + lt_cv_sys_max_cmd_len=8192 + ;; + + osf*) + # Dr. Hans Ekkehard Plesser reports seeing a kernel panic running configure + # due to this test when exec_disable_arg_limit is 1 on Tru64. It is not + # nice to cause kernel panics so lets avoid the loop below. + # First set a reasonable default. + lt_cv_sys_max_cmd_len=16384 + # + if test -x /sbin/sysconfig; then + case `/sbin/sysconfig -q proc exec_disable_arg_limit` in + *1*) lt_cv_sys_max_cmd_len=-1 ;; + esac + fi + ;; + sco3.2v5*) + lt_cv_sys_max_cmd_len=102400 + ;; + sysv5* | sco5v6* | sysv4.2uw2*) + kargmax=`grep ARG_MAX /etc/conf/cf.d/stune 2>/dev/null` + if test -n "$kargmax"; then + lt_cv_sys_max_cmd_len=`echo $kargmax | sed 's/.*[[ ]]//'` + else + lt_cv_sys_max_cmd_len=32768 + fi + ;; + *) + lt_cv_sys_max_cmd_len=`(getconf ARG_MAX) 2> /dev/null` + if test -n "$lt_cv_sys_max_cmd_len"; then + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 4` + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \* 3` + else + # Make teststring a little bigger before we do anything with it. + # a 1K string should be a reasonable start. + for i in 1 2 3 4 5 6 7 8 ; do + teststring=$teststring$teststring + done + SHELL=${SHELL-${CONFIG_SHELL-/bin/sh}} + # If test is not a shell built-in, we'll probably end up computing a + # maximum length that is only half of the actual maximum length, but + # we can't tell. + while { test "X"`env echo "$teststring$teststring" 2>/dev/null` \ + = "X$teststring$teststring"; } >/dev/null 2>&1 && + test $i != 17 # 1/2 MB should be enough + do + i=`expr $i + 1` + teststring=$teststring$teststring + done + # Only check the string length outside the loop. + lt_cv_sys_max_cmd_len=`expr "X$teststring" : ".*" 2>&1` + teststring= + # Add a significant safety factor because C++ compilers can tack on + # massive amounts of additional arguments before passing them to the + # linker. It appears as though 1/2 is a usable value. + lt_cv_sys_max_cmd_len=`expr $lt_cv_sys_max_cmd_len \/ 2` + fi + ;; + esac +]) +if test -n $lt_cv_sys_max_cmd_len ; then + AC_MSG_RESULT($lt_cv_sys_max_cmd_len) +else + AC_MSG_RESULT(none) +fi +max_cmd_len=$lt_cv_sys_max_cmd_len +_LT_DECL([], [max_cmd_len], [0], + [What is the maximum length of a command?]) +])# LT_CMD_MAX_LEN + +# Old name: +AU_ALIAS([AC_LIBTOOL_SYS_MAX_CMD_LEN], [LT_CMD_MAX_LEN]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_SYS_MAX_CMD_LEN], []) + + +# _LT_HEADER_DLFCN +# ---------------- +m4_defun([_LT_HEADER_DLFCN], +[AC_CHECK_HEADERS([dlfcn.h], [], [], [AC_INCLUDES_DEFAULT])dnl +])# _LT_HEADER_DLFCN + + +# _LT_TRY_DLOPEN_SELF (ACTION-IF-TRUE, ACTION-IF-TRUE-W-USCORE, +# ACTION-IF-FALSE, ACTION-IF-CROSS-COMPILING) +# ---------------------------------------------------------------- +m4_defun([_LT_TRY_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "$cross_compiling" = yes; then : + [$4] +else + lt_dlunknown=0; lt_dlno_uscore=1; lt_dlneed_uscore=2 + lt_status=$lt_dlunknown + cat > conftest.$ac_ext <<_LT_EOF +[#line $LINENO "configure" +#include "confdefs.h" + +#if HAVE_DLFCN_H +#include +#endif + +#include + +#ifdef RTLD_GLOBAL +# define LT_DLGLOBAL RTLD_GLOBAL +#else +# ifdef DL_GLOBAL +# define LT_DLGLOBAL DL_GLOBAL +# else +# define LT_DLGLOBAL 0 +# endif +#endif + +/* We may have to define LT_DLLAZY_OR_NOW in the command line if we + find out it does not work in some platform. */ +#ifndef LT_DLLAZY_OR_NOW +# ifdef RTLD_LAZY +# define LT_DLLAZY_OR_NOW RTLD_LAZY +# else +# ifdef DL_LAZY +# define LT_DLLAZY_OR_NOW DL_LAZY +# else +# ifdef RTLD_NOW +# define LT_DLLAZY_OR_NOW RTLD_NOW +# else +# ifdef DL_NOW +# define LT_DLLAZY_OR_NOW DL_NOW +# else +# define LT_DLLAZY_OR_NOW 0 +# endif +# endif +# endif +# endif +#endif + +/* When -fvisbility=hidden is used, assume the code has been annotated + correspondingly for the symbols needed. */ +#if defined(__GNUC__) && (((__GNUC__ == 3) && (__GNUC_MINOR__ >= 3)) || (__GNUC__ > 3)) +int fnord () __attribute__((visibility("default"))); +#endif + +int fnord () { return 42; } +int main () +{ + void *self = dlopen (0, LT_DLGLOBAL|LT_DLLAZY_OR_NOW); + int status = $lt_dlunknown; + + if (self) + { + if (dlsym (self,"fnord")) status = $lt_dlno_uscore; + else + { + if (dlsym( self,"_fnord")) status = $lt_dlneed_uscore; + else puts (dlerror ()); + } + /* dlclose (self); */ + } + else + puts (dlerror ()); + + return status; +}] +_LT_EOF + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} 2>/dev/null; then + (./conftest; exit; ) >&AS_MESSAGE_LOG_FD 2>/dev/null + lt_status=$? + case x$lt_status in + x$lt_dlno_uscore) $1 ;; + x$lt_dlneed_uscore) $2 ;; + x$lt_dlunknown|x*) $3 ;; + esac + else : + # compilation failed + $3 + fi +fi +rm -fr conftest* +])# _LT_TRY_DLOPEN_SELF + + +# LT_SYS_DLOPEN_SELF +# ------------------ +AC_DEFUN([LT_SYS_DLOPEN_SELF], +[m4_require([_LT_HEADER_DLFCN])dnl +if test "x$enable_dlopen" != xyes; then + enable_dlopen=unknown + enable_dlopen_self=unknown + enable_dlopen_self_static=unknown +else + lt_cv_dlopen=no + lt_cv_dlopen_libs= + + case $host_os in + beos*) + lt_cv_dlopen="load_add_on" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ;; + + mingw* | pw32* | cegcc*) + lt_cv_dlopen="LoadLibrary" + lt_cv_dlopen_libs= + ;; + + cygwin*) + lt_cv_dlopen="dlopen" + lt_cv_dlopen_libs= + ;; + + darwin*) + # if libdl is installed we need to link against it + AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"],[ + lt_cv_dlopen="dyld" + lt_cv_dlopen_libs= + lt_cv_dlopen_self=yes + ]) + ;; + + *) + AC_CHECK_FUNC([shl_load], + [lt_cv_dlopen="shl_load"], + [AC_CHECK_LIB([dld], [shl_load], + [lt_cv_dlopen="shl_load" lt_cv_dlopen_libs="-ldld"], + [AC_CHECK_FUNC([dlopen], + [lt_cv_dlopen="dlopen"], + [AC_CHECK_LIB([dl], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-ldl"], + [AC_CHECK_LIB([svld], [dlopen], + [lt_cv_dlopen="dlopen" lt_cv_dlopen_libs="-lsvld"], + [AC_CHECK_LIB([dld], [dld_link], + [lt_cv_dlopen="dld_link" lt_cv_dlopen_libs="-ldld"]) + ]) + ]) + ]) + ]) + ]) + ;; + esac + + if test "x$lt_cv_dlopen" != xno; then + enable_dlopen=yes + else + enable_dlopen=no + fi + + case $lt_cv_dlopen in + dlopen) + save_CPPFLAGS="$CPPFLAGS" + test "x$ac_cv_header_dlfcn_h" = xyes && CPPFLAGS="$CPPFLAGS -DHAVE_DLFCN_H" + + save_LDFLAGS="$LDFLAGS" + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $export_dynamic_flag_spec\" + + save_LIBS="$LIBS" + LIBS="$lt_cv_dlopen_libs $LIBS" + + AC_CACHE_CHECK([whether a program can dlopen itself], + lt_cv_dlopen_self, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self=yes, lt_cv_dlopen_self=yes, + lt_cv_dlopen_self=no, lt_cv_dlopen_self=cross) + ]) + + if test "x$lt_cv_dlopen_self" = xyes; then + wl=$lt_prog_compiler_wl eval LDFLAGS=\"\$LDFLAGS $lt_prog_compiler_static\" + AC_CACHE_CHECK([whether a statically linked program can dlopen itself], + lt_cv_dlopen_self_static, [dnl + _LT_TRY_DLOPEN_SELF( + lt_cv_dlopen_self_static=yes, lt_cv_dlopen_self_static=yes, + lt_cv_dlopen_self_static=no, lt_cv_dlopen_self_static=cross) + ]) + fi + + CPPFLAGS="$save_CPPFLAGS" + LDFLAGS="$save_LDFLAGS" + LIBS="$save_LIBS" + ;; + esac + + case $lt_cv_dlopen_self in + yes|no) enable_dlopen_self=$lt_cv_dlopen_self ;; + *) enable_dlopen_self=unknown ;; + esac + + case $lt_cv_dlopen_self_static in + yes|no) enable_dlopen_self_static=$lt_cv_dlopen_self_static ;; + *) enable_dlopen_self_static=unknown ;; + esac +fi +_LT_DECL([dlopen_support], [enable_dlopen], [0], + [Whether dlopen is supported]) +_LT_DECL([dlopen_self], [enable_dlopen_self], [0], + [Whether dlopen of programs is supported]) +_LT_DECL([dlopen_self_static], [enable_dlopen_self_static], [0], + [Whether dlopen of statically linked programs is supported]) +])# LT_SYS_DLOPEN_SELF + +# Old name: +AU_ALIAS([AC_LIBTOOL_DLOPEN_SELF], [LT_SYS_DLOPEN_SELF]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN_SELF], []) + + +# _LT_COMPILER_C_O([TAGNAME]) +# --------------------------- +# Check to see if options -c and -o are simultaneously supported by compiler. +# This macro does not hard code the compiler like AC_PROG_CC_C_O. +m4_defun([_LT_COMPILER_C_O], +[m4_require([_LT_DECL_SED])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_CACHE_CHECK([if $compiler supports -c -o file.$ac_objext], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=no + $RM -r conftest 2>/dev/null + mkdir conftest + cd conftest + mkdir out + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + lt_compiler_flag="-o out/conftest2.$ac_objext" + # Insert the option either (1) after the last *FLAGS variable, or + # (2) before a word containing "conftest.", or (3) at the end. + # Note that $ac_compile itself does not contain backslashes and begins + # with a dollar sign (not a hyphen), so the echo should work correctly. + lt_compile=`echo "$ac_compile" | $SED \ + -e 's:.*FLAGS}\{0,1\} :&$lt_compiler_flag :; t' \ + -e 's: [[^ ]]*conftest\.: $lt_compiler_flag&:; t' \ + -e 's:$: $lt_compiler_flag:'` + (eval echo "\"\$as_me:$LINENO: $lt_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$lt_compile" 2>out/conftest.err) + ac_status=$? + cat out/conftest.err >&AS_MESSAGE_LOG_FD + echo "$as_me:$LINENO: \$? = $ac_status" >&AS_MESSAGE_LOG_FD + if (exit $ac_status) && test -s out/conftest2.$ac_objext + then + # The compiler can only warn and ignore the option if not recognized + # So say no if there are warnings + $ECHO "$_lt_compiler_boilerplate" | $SED '/^$/d' > out/conftest.exp + $SED '/^$/d; /^ *+/d' out/conftest.err >out/conftest.er2 + if test ! -s out/conftest.er2 || diff out/conftest.exp out/conftest.er2 >/dev/null; then + _LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + fi + fi + chmod u+w . 2>&AS_MESSAGE_LOG_FD + $RM conftest* + # SGI C++ compiler will create directory out/ii_files/ for + # template instantiation + test -d out/ii_files && $RM out/ii_files/* && rmdir out/ii_files + $RM out/* && rmdir out + cd .. + $RM -r conftest + $RM conftest* +]) +_LT_TAGDECL([compiler_c_o], [lt_cv_prog_compiler_c_o], [1], + [Does compiler simultaneously support -c and -o options?]) +])# _LT_COMPILER_C_O + + +# _LT_COMPILER_FILE_LOCKS([TAGNAME]) +# ---------------------------------- +# Check to see if we can do hard links to lock some files if needed +m4_defun([_LT_COMPILER_FILE_LOCKS], +[m4_require([_LT_ENABLE_LOCK])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +_LT_COMPILER_C_O([$1]) + +hard_links="nottested" +if test "$_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)" = no && test "$need_locks" != no; then + # do not overwrite the value of need_locks provided by the user + AC_MSG_CHECKING([if we can lock with hard links]) + hard_links=yes + $RM conftest* + ln conftest.a conftest.b 2>/dev/null && hard_links=no + touch conftest.a + ln conftest.a conftest.b 2>&5 || hard_links=no + ln conftest.a conftest.b 2>/dev/null && hard_links=no + AC_MSG_RESULT([$hard_links]) + if test "$hard_links" = no; then + AC_MSG_WARN([`$CC' does not support `-c -o', so `make -j' may be unsafe]) + need_locks=warn + fi +else + need_locks=no +fi +_LT_DECL([], [need_locks], [1], [Must we lock files when doing compilation?]) +])# _LT_COMPILER_FILE_LOCKS + + +# _LT_CHECK_OBJDIR +# ---------------- +m4_defun([_LT_CHECK_OBJDIR], +[AC_CACHE_CHECK([for objdir], [lt_cv_objdir], +[rm -f .libs 2>/dev/null +mkdir .libs 2>/dev/null +if test -d .libs; then + lt_cv_objdir=.libs +else + # MS-DOS does not allow filenames that begin with a dot. + lt_cv_objdir=_libs +fi +rmdir .libs 2>/dev/null]) +objdir=$lt_cv_objdir +_LT_DECL([], [objdir], [0], + [The name of the directory that contains temporary libtool files])dnl +m4_pattern_allow([LT_OBJDIR])dnl +AC_DEFINE_UNQUOTED(LT_OBJDIR, "$lt_cv_objdir/", + [Define to the sub-directory in which libtool stores uninstalled libraries.]) +])# _LT_CHECK_OBJDIR + + +# _LT_LINKER_HARDCODE_LIBPATH([TAGNAME]) +# -------------------------------------- +# Check hardcoding attributes. +m4_defun([_LT_LINKER_HARDCODE_LIBPATH], +[AC_MSG_CHECKING([how to hardcode library paths into programs]) +_LT_TAGVAR(hardcode_action, $1)= +if test -n "$_LT_TAGVAR(hardcode_libdir_flag_spec, $1)" || + test -n "$_LT_TAGVAR(runpath_var, $1)" || + test "X$_LT_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then + + # We can hardcode non-existent directories. + if test "$_LT_TAGVAR(hardcode_direct, $1)" != no && + # If the only mechanism to avoid hardcoding is shlibpath_var, we + # have to relink, otherwise we might link with an installed library + # when we should be linking with a yet-to-be-installed one + ## test "$_LT_TAGVAR(hardcode_shlibpath_var, $1)" != no && + test "$_LT_TAGVAR(hardcode_minus_L, $1)" != no; then + # Linking always hardcodes the temporary library directory. + _LT_TAGVAR(hardcode_action, $1)=relink + else + # We can link without hardcoding, and we can hardcode nonexisting dirs. + _LT_TAGVAR(hardcode_action, $1)=immediate + fi +else + # We cannot hardcode anything, or else we can only hardcode existing + # directories. + _LT_TAGVAR(hardcode_action, $1)=unsupported +fi +AC_MSG_RESULT([$_LT_TAGVAR(hardcode_action, $1)]) + +if test "$_LT_TAGVAR(hardcode_action, $1)" = relink || + test "$_LT_TAGVAR(inherit_rpath, $1)" = yes; then + # Fast installation is not supported + enable_fast_install=no +elif test "$shlibpath_overrides_runpath" = yes || + test "$enable_shared" = no; then + # Fast installation is not necessary + enable_fast_install=needless +fi +_LT_TAGDECL([], [hardcode_action], [0], + [How to hardcode a shared library path into an executable]) +])# _LT_LINKER_HARDCODE_LIBPATH + + +# _LT_CMD_STRIPLIB +# ---------------- +m4_defun([_LT_CMD_STRIPLIB], +[m4_require([_LT_DECL_EGREP]) +striplib= +old_striplib= +AC_MSG_CHECKING([whether stripping libraries is possible]) +if test -n "$STRIP" && $STRIP -V 2>&1 | $GREP "GNU strip" >/dev/null; then + test -z "$old_striplib" && old_striplib="$STRIP --strip-debug" + test -z "$striplib" && striplib="$STRIP --strip-unneeded" + AC_MSG_RESULT([yes]) +else +# FIXME - insert some real tests, host_os isn't really good enough + case $host_os in + darwin*) + if test -n "$STRIP" ; then + striplib="$STRIP -x" + old_striplib="$STRIP -S" + AC_MSG_RESULT([yes]) + else + AC_MSG_RESULT([no]) + fi + ;; + *) + AC_MSG_RESULT([no]) + ;; + esac +fi +_LT_DECL([], [old_striplib], [1], [Commands to strip libraries]) +_LT_DECL([], [striplib], [1]) +])# _LT_CMD_STRIPLIB + + +# _LT_SYS_DYNAMIC_LINKER([TAG]) +# ----------------------------- +# PORTME Fill in your ld.so characteristics +m4_defun([_LT_SYS_DYNAMIC_LINKER], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_OBJDUMP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CHECK_SHELL_FEATURES])dnl +AC_MSG_CHECKING([dynamic linker characteristics]) +m4_if([$1], + [], [ +if test "$GCC" = yes; then + case $host_os in + darwin*) lt_awk_arg="/^libraries:/,/LR/" ;; + *) lt_awk_arg="/^libraries:/" ;; + esac + case $host_os in + mingw* | cegcc*) lt_sed_strip_eq="s,=\([[A-Za-z]]:\),\1,g" ;; + *) lt_sed_strip_eq="s,=/,/,g" ;; + esac + lt_search_path_spec=`$CC -print-search-dirs | awk $lt_awk_arg | $SED -e "s/^libraries://" -e $lt_sed_strip_eq` + case $lt_search_path_spec in + *\;*) + # if the path contains ";" then we assume it to be the separator + # otherwise default to the standard path separator (i.e. ":") - it is + # assumed that no part of a normal pathname contains ";" but that should + # okay in the real world where ";" in dirpaths is itself problematic. + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED 's/;/ /g'` + ;; + *) + lt_search_path_spec=`$ECHO "$lt_search_path_spec" | $SED "s/$PATH_SEPARATOR/ /g"` + ;; + esac + # Ok, now we have the path, separated by spaces, we can step through it + # and add multilib dir if necessary. + lt_tmp_lt_search_path_spec= + lt_multi_os_dir=`$CC $CPPFLAGS $CFLAGS $LDFLAGS -print-multi-os-directory 2>/dev/null` + for lt_sys_path in $lt_search_path_spec; do + if test -d "$lt_sys_path/$lt_multi_os_dir"; then + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path/$lt_multi_os_dir" + else + test -d "$lt_sys_path" && \ + lt_tmp_lt_search_path_spec="$lt_tmp_lt_search_path_spec $lt_sys_path" + fi + done + lt_search_path_spec=`$ECHO "$lt_tmp_lt_search_path_spec" | awk ' +BEGIN {RS=" "; FS="/|\n";} { + lt_foo=""; + lt_count=0; + for (lt_i = NF; lt_i > 0; lt_i--) { + if ($lt_i != "" && $lt_i != ".") { + if ($lt_i == "..") { + lt_count++; + } else { + if (lt_count == 0) { + lt_foo="/" $lt_i lt_foo; + } else { + lt_count--; + } + } + } + } + if (lt_foo != "") { lt_freq[[lt_foo]]++; } + if (lt_freq[[lt_foo]] == 1) { print lt_foo; } +}'` + # AWK program above erroneously prepends '/' to C:/dos/paths + # for these hosts. + case $host_os in + mingw* | cegcc*) lt_search_path_spec=`$ECHO "$lt_search_path_spec" |\ + $SED 's,/\([[A-Za-z]]:\),\1,g'` ;; + esac + sys_lib_search_path_spec=`$ECHO "$lt_search_path_spec" | $lt_NL2SP` +else + sys_lib_search_path_spec="/lib /usr/lib /usr/local/lib" +fi]) +library_names_spec= +libname_spec='lib$name' +soname_spec= +shrext_cmds=".so" +postinstall_cmds= +postuninstall_cmds= +finish_cmds= +finish_eval= +shlibpath_var= +shlibpath_overrides_runpath=unknown +version_type=none +dynamic_linker="$host_os ld.so" +sys_lib_dlsearch_path_spec="/lib /usr/lib" +need_lib_prefix=unknown +hardcode_into_libs=no + +# when you set need_version to no, make sure it does not cause -set_version +# flags to be left without arguments +need_version=unknown + +case $host_os in +aix3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname.a' + shlibpath_var=LIBPATH + + # AIX 3 has no versioning support, so we append a major version to the name. + soname_spec='${libname}${release}${shared_ext}$major' + ;; + +aix[[4-9]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + hardcode_into_libs=yes + if test "$host_cpu" = ia64; then + # AIX 5 supports IA64 + library_names_spec='${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext}$versuffix $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + else + # With GCC up to 2.95.x, collect2 would create an import file + # for dependence libraries. The import file would start with + # the line `#! .'. This would cause the generated library to + # depend on `.', always an invalid library. This was fixed in + # development snapshots of GCC prior to 3.0. + case $host_os in + aix4 | aix4.[[01]] | aix4.[[01]].*) + if { echo '#if __GNUC__ > 2 || (__GNUC__ == 2 && __GNUC_MINOR__ >= 97)' + echo ' yes ' + echo '#endif'; } | ${CC} -E - | $GREP yes > /dev/null; then + : + else + can_build_shared=no + fi + ;; + esac + # AIX (on Power*) has no versioning support, so currently we can not hardcode correct + # soname into executable. Probably we can add versioning support to + # collect2, so additional links can be useful in future. + if test "$aix_use_runtimelinking" = yes; then + # If using run time linking (on AIX 4.2 or later) use lib.so + # instead of lib.a to let people know that these are not + # typical AIX shared libraries. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + else + # We preserve .a as extension for shared libraries through AIX4.2 + # and later when we are not doing run time linking. + library_names_spec='${libname}${release}.a $libname.a' + soname_spec='${libname}${release}${shared_ext}$major' + fi + shlibpath_var=LIBPATH + fi + ;; + +amigaos*) + case $host_cpu in + powerpc) + # Since July 2007 AmigaOS4 officially supports .so libraries. + # When compiling the executable, add -use-dynld -Lsobjs: to the compileline. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + ;; + m68k) + library_names_spec='$libname.ixlibrary $libname.a' + # Create ${libname}_ixlibrary.a entries in /sys/libs. + finish_eval='for lib in `ls $libdir/*.ixlibrary 2>/dev/null`; do libname=`func_echo_all "$lib" | $SED '\''s%^.*/\([[^/]]*\)\.ixlibrary$%\1%'\''`; test $RM /sys/libs/${libname}_ixlibrary.a; $show "cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a"; cd /sys/libs && $LN_S $lib ${libname}_ixlibrary.a || exit 1; done' + ;; + esac + ;; + +beos*) + library_names_spec='${libname}${shared_ext}' + dynamic_linker="$host_os ld.so" + shlibpath_var=LIBRARY_PATH + ;; + +bsdi[[45]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/shlib /usr/lib /usr/X11/lib /usr/contrib/lib /lib /usr/local/lib" + sys_lib_dlsearch_path_spec="/shlib /usr/lib /usr/local/lib" + # the default ld.so.conf also contains /usr/contrib/lib and + # /usr/X11R6/lib (/usr/X11 is a link to /usr/X11R6), but let us allow + # libtool to hard-code these into programs + ;; + +cygwin* | mingw* | pw32* | cegcc*) + version_type=windows + shrext_cmds=".dll" + need_version=no + need_lib_prefix=no + + case $GCC,$cc_basename in + yes,*) + # gcc + library_names_spec='$libname.dll.a' + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname~ + chmod a+x \$dldir/$dlname~ + if test -n '\''$stripme'\'' && test -n '\''$striplib'\''; then + eval '\''$striplib \$dldir/$dlname'\'' || exit \$?; + fi' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + + case $host_os in + cygwin*) + # Cygwin DLLs use 'cyg' prefix rather than 'lib' + soname_spec='`echo ${libname} | sed -e 's/^lib/cyg/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/lib/w32api"]) + ;; + mingw* | cegcc*) + # MinGW DLLs use traditional 'lib' prefix + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + pw32*) + # pw32 DLLs use 'pw' prefix rather than 'lib' + library_names_spec='`echo ${libname} | sed -e 's/^lib/pw/'``echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + ;; + esac + dynamic_linker='Win32 ld.exe' + ;; + + *,cl*) + # Native MSVC + libname_spec='$name' + soname_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext}' + library_names_spec='${libname}.dll.lib' + + case $build_os in + mingw*) + sys_lib_search_path_spec= + lt_save_ifs=$IFS + IFS=';' + for lt_path in $LIB + do + IFS=$lt_save_ifs + # Let DOS variable expansion print the short 8.3 style file name. + lt_path=`cd "$lt_path" 2>/dev/null && cmd //C "for %i in (".") do @echo %~si"` + sys_lib_search_path_spec="$sys_lib_search_path_spec $lt_path" + done + IFS=$lt_save_ifs + # Convert to MSYS style. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | sed -e 's|\\\\|/|g' -e 's| \\([[a-zA-Z]]\\):| /\\1|g' -e 's|^ ||'` + ;; + cygwin*) + # Convert to unix form, then to dos form, then back to unix form + # but this time dos style (no spaces!) so that the unix form looks + # like /cygdrive/c/PROGRA~1:/cygdr... + sys_lib_search_path_spec=`cygpath --path --unix "$LIB"` + sys_lib_search_path_spec=`cygpath --path --dos "$sys_lib_search_path_spec" 2>/dev/null` + sys_lib_search_path_spec=`cygpath --path --unix "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + ;; + *) + sys_lib_search_path_spec="$LIB" + if $ECHO "$sys_lib_search_path_spec" | [$GREP ';[c-zC-Z]:/' >/dev/null]; then + # It is most probably a Windows format PATH. + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e 's/;/ /g'` + else + sys_lib_search_path_spec=`$ECHO "$sys_lib_search_path_spec" | $SED -e "s/$PATH_SEPARATOR/ /g"` + fi + # FIXME: find the short name or the path components, as spaces are + # common. (e.g. "Program Files" -> "PROGRA~1") + ;; + esac + + # DLL is installed to $(libdir)/../bin by postinstall_cmds + postinstall_cmds='base_file=`basename \${file}`~ + dlpath=`$SHELL 2>&1 -c '\''. $dir/'\''\${base_file}'\''i; echo \$dlname'\''`~ + dldir=$destdir/`dirname \$dlpath`~ + test -d \$dldir || mkdir -p \$dldir~ + $install_prog $dir/$dlname \$dldir/$dlname' + postuninstall_cmds='dldll=`$SHELL 2>&1 -c '\''. $file; echo \$dlname'\''`~ + dlpath=$dir/\$dldll~ + $RM \$dlpath' + shlibpath_overrides_runpath=yes + dynamic_linker='Win32 link.exe' + ;; + + *) + # Assume MSVC wrapper + library_names_spec='${libname}`echo ${release} | $SED -e 's/[[.]]/-/g'`${versuffix}${shared_ext} $libname.lib' + dynamic_linker='Win32 ld.exe' + ;; + esac + # FIXME: first we should search . and the directory the executable is in + shlibpath_var=PATH + ;; + +darwin* | rhapsody*) + dynamic_linker="$host_os dyld" + version_type=darwin + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${major}$shared_ext ${libname}$shared_ext' + soname_spec='${libname}${release}${major}$shared_ext' + shlibpath_overrides_runpath=yes + shlibpath_var=DYLD_LIBRARY_PATH + shrext_cmds='`test .$module = .yes && echo .so || echo .dylib`' +m4_if([$1], [],[ + sys_lib_search_path_spec="$sys_lib_search_path_spec /usr/local/lib"]) + sys_lib_dlsearch_path_spec='/usr/local/lib /lib /usr/lib' + ;; + +dgux*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname$shared_ext' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +freebsd* | dragonfly*) + # DragonFly does not have aout. When/if they implement a new + # versioning mechanism, adjust this. + if test -x /usr/bin/objformat; then + objformat=`/usr/bin/objformat` + else + case $host_os in + freebsd[[23]].*) objformat=aout ;; + *) objformat=elf ;; + esac + fi + version_type=freebsd-$objformat + case $version_type in + freebsd-elf*) + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + need_version=no + need_lib_prefix=no + ;; + freebsd-*) + library_names_spec='${libname}${release}${shared_ext}$versuffix $libname${shared_ext}$versuffix' + need_version=yes + ;; + esac + shlibpath_var=LD_LIBRARY_PATH + case $host_os in + freebsd2.*) + shlibpath_overrides_runpath=yes + ;; + freebsd3.[[01]]* | freebsdelf3.[[01]]*) + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + freebsd3.[[2-9]]* | freebsdelf3.[[2-9]]* | \ + freebsd4.[[0-5]] | freebsdelf4.[[0-5]] | freebsd4.1.1 | freebsdelf4.1.1) + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + *) # from 4.6 on, and DragonFly + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + esac + ;; + +gnu*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +haiku*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + dynamic_linker="$host_os runtime_loader" + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}${major} ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LIBRARY_PATH + shlibpath_overrides_runpath=yes + sys_lib_dlsearch_path_spec='/boot/home/config/lib /boot/common/lib /boot/system/lib' + hardcode_into_libs=yes + ;; + +hpux9* | hpux10* | hpux11*) + # Give a soname corresponding to the major version so that dld.sl refuses to + # link against other versions. + version_type=sunos + need_lib_prefix=no + need_version=no + case $host_cpu in + ia64*) + shrext_cmds='.so' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.so" + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + if test "X$HPUX_IA64_MODE" = X32; then + sys_lib_search_path_spec="/usr/lib/hpux32 /usr/local/lib/hpux32 /usr/local/lib" + else + sys_lib_search_path_spec="/usr/lib/hpux64 /usr/local/lib/hpux64" + fi + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + hppa*64*) + shrext_cmds='.sl' + hardcode_into_libs=yes + dynamic_linker="$host_os dld.sl" + shlibpath_var=LD_LIBRARY_PATH # How should we handle SHLIB_PATH + shlibpath_overrides_runpath=yes # Unless +noenvvar is specified. + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + sys_lib_search_path_spec="/usr/lib/pa20_64 /usr/ccs/lib/pa20_64" + sys_lib_dlsearch_path_spec=$sys_lib_search_path_spec + ;; + *) + shrext_cmds='.sl' + dynamic_linker="$host_os dld.sl" + shlibpath_var=SHLIB_PATH + shlibpath_overrides_runpath=no # +s is required to enable SHLIB_PATH + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + ;; + esac + # HP-UX runs *really* slowly unless shared libraries are mode 555, ... + postinstall_cmds='chmod 555 $lib' + # or fails outright, so override atomically: + install_override_mode=555 + ;; + +interix[[3-9]]*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='Interix 3.x ld.so.1 (PE, like ELF)' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +irix5* | irix6* | nonstopux*) + case $host_os in + nonstopux*) version_type=nonstopux ;; + *) + if test "$lt_cv_prog_gnu_ld" = yes; then + version_type=linux # correct to gnu/linux during the next big refactor + else + version_type=irix + fi ;; + esac + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${release}${shared_ext} $libname${shared_ext}' + case $host_os in + irix5* | nonstopux*) + libsuff= shlibsuff= + ;; + *) + case $LD in # libtool.m4 will add one of these switches to LD + *-32|*"-32 "|*-melf32bsmip|*"-melf32bsmip ") + libsuff= shlibsuff= libmagic=32-bit;; + *-n32|*"-n32 "|*-melf32bmipn32|*"-melf32bmipn32 ") + libsuff=32 shlibsuff=N32 libmagic=N32;; + *-64|*"-64 "|*-melf64bmip|*"-melf64bmip ") + libsuff=64 shlibsuff=64 libmagic=64-bit;; + *) libsuff= shlibsuff= libmagic=never-match;; + esac + ;; + esac + shlibpath_var=LD_LIBRARY${shlibsuff}_PATH + shlibpath_overrides_runpath=no + sys_lib_search_path_spec="/usr/lib${libsuff} /lib${libsuff} /usr/local/lib${libsuff}" + sys_lib_dlsearch_path_spec="/usr/lib${libsuff} /lib${libsuff}" + hardcode_into_libs=yes + ;; + +# No shared lib support for Linux oldld, aout, or coff. +linux*oldld* | linux*aout* | linux*coff*) + dynamic_linker=no + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -n $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + + # Some binutils ld are patched to set DT_RUNPATH + AC_CACHE_VAL([lt_cv_shlibpath_overrides_runpath], + [lt_cv_shlibpath_overrides_runpath=no + save_LDFLAGS=$LDFLAGS + save_libdir=$libdir + eval "libdir=/foo; wl=\"$_LT_TAGVAR(lt_prog_compiler_wl, $1)\"; \ + LDFLAGS=\"\$LDFLAGS $_LT_TAGVAR(hardcode_libdir_flag_spec, $1)\"" + AC_LINK_IFELSE([AC_LANG_PROGRAM([],[])], + [AS_IF([ ($OBJDUMP -p conftest$ac_exeext) 2>/dev/null | grep "RUNPATH.*$libdir" >/dev/null], + [lt_cv_shlibpath_overrides_runpath=yes])]) + LDFLAGS=$save_LDFLAGS + libdir=$save_libdir + ]) + shlibpath_overrides_runpath=$lt_cv_shlibpath_overrides_runpath + + # This implies no fast_install, which is unacceptable. + # Some rework will be needed to allow for fast_install + # before this can be enabled. + hardcode_into_libs=yes + + # Append ld.so.conf contents to the search path + if test -f /etc/ld.so.conf; then + lt_ld_extra=`awk '/^include / { system(sprintf("cd /etc; cat %s 2>/dev/null", \[$]2)); skip = 1; } { if (!skip) print \[$]0; skip = 0; }' < /etc/ld.so.conf | $SED -e 's/#.*//;/^[ ]*hwcap[ ]/d;s/[:, ]/ /g;s/=[^=]*$//;s/=[^= ]* / /g;s/"//g;/^$/d' | tr '\n' ' '` + sys_lib_dlsearch_path_spec="/lib /usr/lib $lt_ld_extra" + fi + + # We used to test for /lib/ld.so.1 and disable shared libraries on + # powerpc, because MkLinux only supported shared libraries with the + # GNU dynamic linker. Since this was broken with cross compilers, + # most powerpc-linux boxes support dynamic linking these days and + # people can always --disable-shared, the test was removed, and we + # assume the GNU/Linux dynamic linker is in use. + dynamic_linker='GNU/Linux ld.so' + ;; + +netbsdelf*-gnu) + version_type=linux + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='NetBSD ld.elf_so' + ;; + +netbsd*) + version_type=sunos + need_lib_prefix=no + need_version=no + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + dynamic_linker='NetBSD (a.out) ld.so' + else + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major ${libname}${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + dynamic_linker='NetBSD ld.elf_so' + fi + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + ;; + +newsos6) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + ;; + +*nto* | *qnx*) + version_type=qnx + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + dynamic_linker='ldqnx.so' + ;; + +openbsd*) + version_type=sunos + sys_lib_dlsearch_path_spec="/usr/lib" + need_lib_prefix=no + # Some older versions of OpenBSD (3.3 at least) *do* need versioned libs. + case $host_os in + openbsd3.3 | openbsd3.3.*) need_version=yes ;; + *) need_version=no ;; + esac + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/sbin" ldconfig -m $libdir' + shlibpath_var=LD_LIBRARY_PATH + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + case $host_os in + openbsd2.[[89]] | openbsd2.[[89]].*) + shlibpath_overrides_runpath=no + ;; + *) + shlibpath_overrides_runpath=yes + ;; + esac + else + shlibpath_overrides_runpath=yes + fi + ;; + +os2*) + libname_spec='$name' + shrext_cmds=".dll" + need_lib_prefix=no + library_names_spec='$libname${shared_ext} $libname.a' + dynamic_linker='OS/2 ld.exe' + shlibpath_var=LIBPATH + ;; + +osf3* | osf4* | osf5*) + version_type=osf + need_lib_prefix=no + need_version=no + soname_spec='${libname}${release}${shared_ext}$major' + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + sys_lib_search_path_spec="/usr/shlib /usr/ccs/lib /usr/lib/cmplrs/cc /usr/lib /usr/local/lib /var/shlib" + sys_lib_dlsearch_path_spec="$sys_lib_search_path_spec" + ;; + +rdos*) + dynamic_linker=no + ;; + +solaris*) + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + # ldd complains unless libraries are executable + postinstall_cmds='chmod +x $lib' + ;; + +sunos4*) + version_type=sunos + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${shared_ext}$versuffix' + finish_cmds='PATH="\$PATH:/usr/etc" ldconfig $libdir' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + if test "$with_gnu_ld" = yes; then + need_lib_prefix=no + fi + need_version=yes + ;; + +sysv4 | sysv4.3*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + case $host_vendor in + sni) + shlibpath_overrides_runpath=no + need_lib_prefix=no + runpath_var=LD_RUN_PATH + ;; + siemens) + need_lib_prefix=no + ;; + motorola) + need_lib_prefix=no + need_version=no + shlibpath_overrides_runpath=no + sys_lib_search_path_spec='/lib /usr/lib /usr/ccs/lib' + ;; + esac + ;; + +sysv4*MP*) + if test -d /usr/nec ;then + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='$libname${shared_ext}.$versuffix $libname${shared_ext}.$major $libname${shared_ext}' + soname_spec='$libname${shared_ext}.$major' + shlibpath_var=LD_LIBRARY_PATH + fi + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + version_type=freebsd-elf + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext} $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=yes + hardcode_into_libs=yes + if test "$with_gnu_ld" = yes; then + sys_lib_search_path_spec='/usr/local/lib /usr/gnu/lib /usr/ccs/lib /usr/lib /lib' + else + sys_lib_search_path_spec='/usr/ccs/lib /usr/lib' + case $host_os in + sco3.2v5*) + sys_lib_search_path_spec="$sys_lib_search_path_spec /lib" + ;; + esac + fi + sys_lib_dlsearch_path_spec='/usr/lib' + ;; + +tpf*) + # TPF is a cross-target only. Preferred cross-host = GNU/Linux. + version_type=linux # correct to gnu/linux during the next big refactor + need_lib_prefix=no + need_version=no + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + shlibpath_var=LD_LIBRARY_PATH + shlibpath_overrides_runpath=no + hardcode_into_libs=yes + ;; + +uts4*) + version_type=linux # correct to gnu/linux during the next big refactor + library_names_spec='${libname}${release}${shared_ext}$versuffix ${libname}${release}${shared_ext}$major $libname${shared_ext}' + soname_spec='${libname}${release}${shared_ext}$major' + shlibpath_var=LD_LIBRARY_PATH + ;; + +*) + dynamic_linker=no + ;; +esac +AC_MSG_RESULT([$dynamic_linker]) +test "$dynamic_linker" = no && can_build_shared=no + +variables_saved_for_relink="PATH $shlibpath_var $runpath_var" +if test "$GCC" = yes; then + variables_saved_for_relink="$variables_saved_for_relink GCC_EXEC_PREFIX COMPILER_PATH LIBRARY_PATH" +fi + +if test "${lt_cv_sys_lib_search_path_spec+set}" = set; then + sys_lib_search_path_spec="$lt_cv_sys_lib_search_path_spec" +fi +if test "${lt_cv_sys_lib_dlsearch_path_spec+set}" = set; then + sys_lib_dlsearch_path_spec="$lt_cv_sys_lib_dlsearch_path_spec" +fi + +_LT_DECL([], [variables_saved_for_relink], [1], + [Variables whose values should be saved in libtool wrapper scripts and + restored at link time]) +_LT_DECL([], [need_lib_prefix], [0], + [Do we need the "lib" prefix for modules?]) +_LT_DECL([], [need_version], [0], [Do we need a version for libraries?]) +_LT_DECL([], [version_type], [0], [Library versioning type]) +_LT_DECL([], [runpath_var], [0], [Shared library runtime path variable]) +_LT_DECL([], [shlibpath_var], [0],[Shared library path variable]) +_LT_DECL([], [shlibpath_overrides_runpath], [0], + [Is shlibpath searched before the hard-coded library search path?]) +_LT_DECL([], [libname_spec], [1], [Format of library name prefix]) +_LT_DECL([], [library_names_spec], [1], + [[List of archive names. First name is the real one, the rest are links. + The last name is the one that the linker finds with -lNAME]]) +_LT_DECL([], [soname_spec], [1], + [[The coded name of the library, if different from the real name]]) +_LT_DECL([], [install_override_mode], [1], + [Permission mode override for installation of shared libraries]) +_LT_DECL([], [postinstall_cmds], [2], + [Command to use after installation of a shared archive]) +_LT_DECL([], [postuninstall_cmds], [2], + [Command to use after uninstallation of a shared archive]) +_LT_DECL([], [finish_cmds], [2], + [Commands used to finish a libtool library installation in a directory]) +_LT_DECL([], [finish_eval], [1], + [[As "finish_cmds", except a single script fragment to be evaled but + not shown]]) +_LT_DECL([], [hardcode_into_libs], [0], + [Whether we should hardcode library paths into libraries]) +_LT_DECL([], [sys_lib_search_path_spec], [2], + [Compile-time system search path for libraries]) +_LT_DECL([], [sys_lib_dlsearch_path_spec], [2], + [Run-time system search path for libraries]) +])# _LT_SYS_DYNAMIC_LINKER + + +# _LT_PATH_TOOL_PREFIX(TOOL) +# -------------------------- +# find a file program which can recognize shared library +AC_DEFUN([_LT_PATH_TOOL_PREFIX], +[m4_require([_LT_DECL_EGREP])dnl +AC_MSG_CHECKING([for $1]) +AC_CACHE_VAL(lt_cv_path_MAGIC_CMD, +[case $MAGIC_CMD in +[[\\/*] | ?:[\\/]*]) + lt_cv_path_MAGIC_CMD="$MAGIC_CMD" # Let the user override the test with a path. + ;; +*) + lt_save_MAGIC_CMD="$MAGIC_CMD" + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR +dnl $ac_dummy forces splitting on constant user-supplied paths. +dnl POSIX.2 word splitting is done only on the output of word expansions, +dnl not every word. This closes a longstanding sh security hole. + ac_dummy="m4_if([$2], , $PATH, [$2])" + for ac_dir in $ac_dummy; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f $ac_dir/$1; then + lt_cv_path_MAGIC_CMD="$ac_dir/$1" + if test -n "$file_magic_test_file"; then + case $deplibs_check_method in + "file_magic "*) + file_magic_regex=`expr "$deplibs_check_method" : "file_magic \(.*\)"` + MAGIC_CMD="$lt_cv_path_MAGIC_CMD" + if eval $file_magic_cmd \$file_magic_test_file 2> /dev/null | + $EGREP "$file_magic_regex" > /dev/null; then + : + else + cat <<_LT_EOF 1>&2 + +*** Warning: the command libtool uses to detect shared libraries, +*** $file_magic_cmd, produces output that libtool cannot recognize. +*** The result is that libtool may fail to recognize shared libraries +*** as such. This will affect the creation of libtool libraries that +*** depend on shared libraries, but programs linked with such libtool +*** libraries will work regardless of this problem. Nevertheless, you +*** may want to report the problem to your system manager and/or to +*** bug-libtool@gnu.org + +_LT_EOF + fi ;; + esac + fi + break + fi + done + IFS="$lt_save_ifs" + MAGIC_CMD="$lt_save_MAGIC_CMD" + ;; +esac]) +MAGIC_CMD="$lt_cv_path_MAGIC_CMD" +if test -n "$MAGIC_CMD"; then + AC_MSG_RESULT($MAGIC_CMD) +else + AC_MSG_RESULT(no) +fi +_LT_DECL([], [MAGIC_CMD], [0], + [Used to examine libraries when file_magic_cmd begins with "file"])dnl +])# _LT_PATH_TOOL_PREFIX + +# Old name: +AU_ALIAS([AC_PATH_TOOL_PREFIX], [_LT_PATH_TOOL_PREFIX]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_PATH_TOOL_PREFIX], []) + + +# _LT_PATH_MAGIC +# -------------- +# find a file program which can recognize a shared library +m4_defun([_LT_PATH_MAGIC], +[_LT_PATH_TOOL_PREFIX(${ac_tool_prefix}file, /usr/bin$PATH_SEPARATOR$PATH) +if test -z "$lt_cv_path_MAGIC_CMD"; then + if test -n "$ac_tool_prefix"; then + _LT_PATH_TOOL_PREFIX(file, /usr/bin$PATH_SEPARATOR$PATH) + else + MAGIC_CMD=: + fi +fi +])# _LT_PATH_MAGIC + + +# LT_PATH_LD +# ---------- +# find the pathname to the GNU or non-GNU linker +AC_DEFUN([LT_PATH_LD], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_PROG_ECHO_BACKSLASH])dnl + +AC_ARG_WITH([gnu-ld], + [AS_HELP_STRING([--with-gnu-ld], + [assume the C compiler uses GNU ld @<:@default=no@:>@])], + [test "$withval" = no || with_gnu_ld=yes], + [with_gnu_ld=no])dnl + +ac_prog=ld +if test "$GCC" = yes; then + # Check if gcc -print-prog-name=ld gives a path. + AC_MSG_CHECKING([for ld used by $CC]) + case $host in + *-*-mingw*) + # gcc leaves a trailing carriage return which upsets mingw + ac_prog=`($CC -print-prog-name=ld) 2>&5 | tr -d '\015'` ;; + *) + ac_prog=`($CC -print-prog-name=ld) 2>&5` ;; + esac + case $ac_prog in + # Accept absolute paths. + [[\\/]]* | ?:[[\\/]]*) + re_direlt='/[[^/]][[^/]]*/\.\./' + # Canonicalize the pathname of ld + ac_prog=`$ECHO "$ac_prog"| $SED 's%\\\\%/%g'` + while $ECHO "$ac_prog" | $GREP "$re_direlt" > /dev/null 2>&1; do + ac_prog=`$ECHO $ac_prog| $SED "s%$re_direlt%/%"` + done + test -z "$LD" && LD="$ac_prog" + ;; + "") + # If it fails, then pretend we aren't using GCC. + ac_prog=ld + ;; + *) + # If it is relative, then search for the first ld in PATH. + with_gnu_ld=unknown + ;; + esac +elif test "$with_gnu_ld" = yes; then + AC_MSG_CHECKING([for GNU ld]) +else + AC_MSG_CHECKING([for non-GNU ld]) +fi +AC_CACHE_VAL(lt_cv_path_LD, +[if test -z "$LD"; then + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + if test -f "$ac_dir/$ac_prog" || test -f "$ac_dir/$ac_prog$ac_exeext"; then + lt_cv_path_LD="$ac_dir/$ac_prog" + # Check to see if the program is GNU ld. I'd rather use --version, + # but apparently some variants of GNU ld only accept -v. + # Break only if it was the GNU/non-GNU ld that we prefer. + case `"$lt_cv_path_LD" -v 2>&1 &1 /dev/null 2>&1; then + lt_cv_deplibs_check_method='file_magic ^x86 archive import|^x86 DLL' + lt_cv_file_magic_cmd='func_win32_libid' + else + # Keep this pattern in sync with the one in func_win32_libid. + lt_cv_deplibs_check_method='file_magic file format (pei*-i386(.*architecture: i386)?|pe-arm-wince|pe-x86-64)' + lt_cv_file_magic_cmd='$OBJDUMP -f' + fi + ;; + +cegcc*) + # use the weaker test based on 'objdump'. See mingw*. + lt_cv_deplibs_check_method='file_magic file format pe-arm-.*little(.*architecture: arm)?' + lt_cv_file_magic_cmd='$OBJDUMP -f' + ;; + +darwin* | rhapsody*) + lt_cv_deplibs_check_method=pass_all + ;; + +freebsd* | dragonfly*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + case $host_cpu in + i*86 ) + # Not sure whether the presence of OpenBSD here was a mistake. + # Let's accept both of them until this is cleared up. + lt_cv_deplibs_check_method='file_magic (FreeBSD|OpenBSD|DragonFly)/i[[3-9]]86 (compact )?demand paged shared library' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so.*` + ;; + esac + else + lt_cv_deplibs_check_method=pass_all + fi + ;; + +gnu*) + lt_cv_deplibs_check_method=pass_all + ;; + +haiku*) + lt_cv_deplibs_check_method=pass_all + ;; + +hpux10.20* | hpux11*) + lt_cv_file_magic_cmd=/usr/bin/file + case $host_cpu in + ia64*) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|ELF-[[0-9]][[0-9]]) shared object file - IA64' + lt_cv_file_magic_test_file=/usr/lib/hpux32/libc.so + ;; + hppa*64*) + [lt_cv_deplibs_check_method='file_magic (s[0-9][0-9][0-9]|ELF[ -][0-9][0-9])(-bit)?( [LM]SB)? shared object( file)?[, -]* PA-RISC [0-9]\.[0-9]'] + lt_cv_file_magic_test_file=/usr/lib/pa20_64/libc.sl + ;; + *) + lt_cv_deplibs_check_method='file_magic (s[[0-9]][[0-9]][[0-9]]|PA-RISC[[0-9]]\.[[0-9]]) shared library' + lt_cv_file_magic_test_file=/usr/lib/libc.sl + ;; + esac + ;; + +interix[[3-9]]*) + # PIC code is broken on Interix 3.x, that's why |\.a not |_pic\.a here + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|\.a)$' + ;; + +irix5* | irix6* | nonstopux*) + case $LD in + *-32|*"-32 ") libmagic=32-bit;; + *-n32|*"-n32 ") libmagic=N32;; + *-64|*"-64 ") libmagic=64-bit;; + *) libmagic=never-match;; + esac + lt_cv_deplibs_check_method=pass_all + ;; + +# This must be glibc/ELF. +linux* | k*bsd*-gnu | kopensolaris*-gnu) + lt_cv_deplibs_check_method=pass_all + ;; + +netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ > /dev/null; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so|_pic\.a)$' + fi + ;; + +newos6*) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (executable|dynamic lib)' + lt_cv_file_magic_cmd=/usr/bin/file + lt_cv_file_magic_test_file=/usr/lib/libnls.so + ;; + +*nto* | *qnx*) + lt_cv_deplibs_check_method=pass_all + ;; + +openbsd*) + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|\.so|_pic\.a)$' + else + lt_cv_deplibs_check_method='match_pattern /lib[[^/]]+(\.so\.[[0-9]]+\.[[0-9]]+|_pic\.a)$' + fi + ;; + +osf3* | osf4* | osf5*) + lt_cv_deplibs_check_method=pass_all + ;; + +rdos*) + lt_cv_deplibs_check_method=pass_all + ;; + +solaris*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX* | sysv4*uw2*) + lt_cv_deplibs_check_method=pass_all + ;; + +sysv4 | sysv4.3*) + case $host_vendor in + motorola) + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[ML]]SB (shared object|dynamic lib) M[[0-9]][[0-9]]* Version [[0-9]]' + lt_cv_file_magic_test_file=`echo /usr/lib/libc.so*` + ;; + ncr) + lt_cv_deplibs_check_method=pass_all + ;; + sequent) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method='file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB (shared object|dynamic lib )' + ;; + sni) + lt_cv_file_magic_cmd='/bin/file' + lt_cv_deplibs_check_method="file_magic ELF [[0-9]][[0-9]]*-bit [[LM]]SB dynamic lib" + lt_cv_file_magic_test_file=/lib/libc.so + ;; + siemens) + lt_cv_deplibs_check_method=pass_all + ;; + pc) + lt_cv_deplibs_check_method=pass_all + ;; + esac + ;; + +tpf*) + lt_cv_deplibs_check_method=pass_all + ;; +esac +]) + +file_magic_glob= +want_nocaseglob=no +if test "$build" = "$host"; then + case $host_os in + mingw* | pw32*) + if ( shopt | grep nocaseglob ) >/dev/null 2>&1; then + want_nocaseglob=yes + else + file_magic_glob=`echo aAbBcCdDeEfFgGhHiIjJkKlLmMnNoOpPqQrRsStTuUvVwWxXyYzZ | $SED -e "s/\(..\)/s\/[[\1]]\/[[\1]]\/g;/g"` + fi + ;; + esac +fi + +file_magic_cmd=$lt_cv_file_magic_cmd +deplibs_check_method=$lt_cv_deplibs_check_method +test -z "$deplibs_check_method" && deplibs_check_method=unknown + +_LT_DECL([], [deplibs_check_method], [1], + [Method to check whether dependent libraries are shared objects]) +_LT_DECL([], [file_magic_cmd], [1], + [Command to use when deplibs_check_method = "file_magic"]) +_LT_DECL([], [file_magic_glob], [1], + [How to find potential files when deplibs_check_method = "file_magic"]) +_LT_DECL([], [want_nocaseglob], [1], + [Find potential files using nocaseglob when deplibs_check_method = "file_magic"]) +])# _LT_CHECK_MAGIC_METHOD + + +# LT_PATH_NM +# ---------- +# find the pathname to a BSD- or MS-compatible name lister +AC_DEFUN([LT_PATH_NM], +[AC_REQUIRE([AC_PROG_CC])dnl +AC_CACHE_CHECK([for BSD- or MS-compatible name lister (nm)], lt_cv_path_NM, +[if test -n "$NM"; then + # Let the user override the test. + lt_cv_path_NM="$NM" +else + lt_nm_to_check="${ac_tool_prefix}nm" + if test -n "$ac_tool_prefix" && test "$build" = "$host"; then + lt_nm_to_check="$lt_nm_to_check nm" + fi + for lt_tmp_nm in $lt_nm_to_check; do + lt_save_ifs="$IFS"; IFS=$PATH_SEPARATOR + for ac_dir in $PATH /usr/ccs/bin/elf /usr/ccs/bin /usr/ucb /bin; do + IFS="$lt_save_ifs" + test -z "$ac_dir" && ac_dir=. + tmp_nm="$ac_dir/$lt_tmp_nm" + if test -f "$tmp_nm" || test -f "$tmp_nm$ac_exeext" ; then + # Check to see if the nm accepts a BSD-compat flag. + # Adding the `sed 1q' prevents false positives on HP-UX, which says: + # nm: unknown option "B" ignored + # Tru64's nm complains that /dev/null is an invalid object file + case `"$tmp_nm" -B /dev/null 2>&1 | sed '1q'` in + */dev/null* | *'Invalid file or object type'*) + lt_cv_path_NM="$tmp_nm -B" + break + ;; + *) + case `"$tmp_nm" -p /dev/null 2>&1 | sed '1q'` in + */dev/null*) + lt_cv_path_NM="$tmp_nm -p" + break + ;; + *) + lt_cv_path_NM=${lt_cv_path_NM="$tmp_nm"} # keep the first match, but + continue # so that we can try to find one that supports BSD flags + ;; + esac + ;; + esac + fi + done + IFS="$lt_save_ifs" + done + : ${lt_cv_path_NM=no} +fi]) +if test "$lt_cv_path_NM" != "no"; then + NM="$lt_cv_path_NM" +else + # Didn't find any BSD compatible name lister, look for dumpbin. + if test -n "$DUMPBIN"; then : + # Let the user override the test. + else + AC_CHECK_TOOLS(DUMPBIN, [dumpbin "link -dump"], :) + case `$DUMPBIN -symbols /dev/null 2>&1 | sed '1q'` in + *COFF*) + DUMPBIN="$DUMPBIN -symbols" + ;; + *) + DUMPBIN=: + ;; + esac + fi + AC_SUBST([DUMPBIN]) + if test "$DUMPBIN" != ":"; then + NM="$DUMPBIN" + fi +fi +test -z "$NM" && NM=nm +AC_SUBST([NM]) +_LT_DECL([], [NM], [1], [A BSD- or MS-compatible name lister])dnl + +AC_CACHE_CHECK([the name lister ($NM) interface], [lt_cv_nm_interface], + [lt_cv_nm_interface="BSD nm" + echo "int some_variable = 0;" > conftest.$ac_ext + (eval echo "\"\$as_me:$LINENO: $ac_compile\"" >&AS_MESSAGE_LOG_FD) + (eval "$ac_compile" 2>conftest.err) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:$LINENO: $NM \\\"conftest.$ac_objext\\\"\"" >&AS_MESSAGE_LOG_FD) + (eval "$NM \"conftest.$ac_objext\"" 2>conftest.err > conftest.out) + cat conftest.err >&AS_MESSAGE_LOG_FD + (eval echo "\"\$as_me:$LINENO: output\"" >&AS_MESSAGE_LOG_FD) + cat conftest.out >&AS_MESSAGE_LOG_FD + if $GREP 'External.*some_variable' conftest.out > /dev/null; then + lt_cv_nm_interface="MS dumpbin" + fi + rm -f conftest*]) +])# LT_PATH_NM + +# Old names: +AU_ALIAS([AM_PROG_NM], [LT_PATH_NM]) +AU_ALIAS([AC_PROG_NM], [LT_PATH_NM]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_PROG_NM], []) +dnl AC_DEFUN([AC_PROG_NM], []) + +# _LT_CHECK_SHAREDLIB_FROM_LINKLIB +# -------------------------------- +# how to determine the name of the shared library +# associated with a specific link library. +# -- PORTME fill in with the dynamic library characteristics +m4_defun([_LT_CHECK_SHAREDLIB_FROM_LINKLIB], +[m4_require([_LT_DECL_EGREP]) +m4_require([_LT_DECL_OBJDUMP]) +m4_require([_LT_DECL_DLLTOOL]) +AC_CACHE_CHECK([how to associate runtime and link libraries], +lt_cv_sharedlib_from_linklib_cmd, +[lt_cv_sharedlib_from_linklib_cmd='unknown' + +case $host_os in +cygwin* | mingw* | pw32* | cegcc*) + # two different shell functions defined in ltmain.sh + # decide which to use based on capabilities of $DLLTOOL + case `$DLLTOOL --help 2>&1` in + *--identify-strict*) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib + ;; + *) + lt_cv_sharedlib_from_linklib_cmd=func_cygming_dll_for_implib_fallback + ;; + esac + ;; +*) + # fallback: assume linklib IS sharedlib + lt_cv_sharedlib_from_linklib_cmd="$ECHO" + ;; +esac +]) +sharedlib_from_linklib_cmd=$lt_cv_sharedlib_from_linklib_cmd +test -z "$sharedlib_from_linklib_cmd" && sharedlib_from_linklib_cmd=$ECHO + +_LT_DECL([], [sharedlib_from_linklib_cmd], [1], + [Command to associate shared and link libraries]) +])# _LT_CHECK_SHAREDLIB_FROM_LINKLIB + + +# _LT_PATH_MANIFEST_TOOL +# ---------------------- +# locate the manifest tool +m4_defun([_LT_PATH_MANIFEST_TOOL], +[AC_CHECK_TOOL(MANIFEST_TOOL, mt, :) +test -z "$MANIFEST_TOOL" && MANIFEST_TOOL=mt +AC_CACHE_CHECK([if $MANIFEST_TOOL is a manifest tool], [lt_cv_path_mainfest_tool], + [lt_cv_path_mainfest_tool=no + echo "$as_me:$LINENO: $MANIFEST_TOOL '-?'" >&AS_MESSAGE_LOG_FD + $MANIFEST_TOOL '-?' 2>conftest.err > conftest.out + cat conftest.err >&AS_MESSAGE_LOG_FD + if $GREP 'Manifest Tool' conftest.out > /dev/null; then + lt_cv_path_mainfest_tool=yes + fi + rm -f conftest*]) +if test "x$lt_cv_path_mainfest_tool" != xyes; then + MANIFEST_TOOL=: +fi +_LT_DECL([], [MANIFEST_TOOL], [1], [Manifest tool])dnl +])# _LT_PATH_MANIFEST_TOOL + + +# LT_LIB_M +# -------- +# check for math library +AC_DEFUN([LT_LIB_M], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +LIBM= +case $host in +*-*-beos* | *-*-cegcc* | *-*-cygwin* | *-*-haiku* | *-*-pw32* | *-*-darwin*) + # These system don't have libm, or don't need it + ;; +*-ncr-sysv4.3*) + AC_CHECK_LIB(mw, _mwvalidcheckl, LIBM="-lmw") + AC_CHECK_LIB(m, cos, LIBM="$LIBM -lm") + ;; +*) + AC_CHECK_LIB(m, cos, LIBM="-lm") + ;; +esac +AC_SUBST([LIBM]) +])# LT_LIB_M + +# Old name: +AU_ALIAS([AC_CHECK_LIBM], [LT_LIB_M]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_CHECK_LIBM], []) + + +# _LT_COMPILER_NO_RTTI([TAGNAME]) +# ------------------------------- +m4_defun([_LT_COMPILER_NO_RTTI], +[m4_require([_LT_TAG_COMPILER])dnl + +_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + +if test "$GCC" = yes; then + case $cc_basename in + nvcc*) + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -Xcompiler -fno-builtin' ;; + *) + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' ;; + esac + + _LT_COMPILER_OPTION([if $compiler supports -fno-rtti -fno-exceptions], + lt_cv_prog_compiler_rtti_exceptions, + [-fno-rtti -fno-exceptions], [], + [_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)="$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1) -fno-rtti -fno-exceptions"]) +fi +_LT_TAGDECL([no_builtin_flag], [lt_prog_compiler_no_builtin_flag], [1], + [Compiler flag to turn off builtin functions]) +])# _LT_COMPILER_NO_RTTI + + +# _LT_CMD_GLOBAL_SYMBOLS +# ---------------------- +m4_defun([_LT_CMD_GLOBAL_SYMBOLS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_PROG_CC])dnl +AC_REQUIRE([AC_PROG_AWK])dnl +AC_REQUIRE([LT_PATH_NM])dnl +AC_REQUIRE([LT_PATH_LD])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_TAG_COMPILER])dnl + +# Check for command to grab the raw symbol name followed by C symbol from nm. +AC_MSG_CHECKING([command to parse $NM output from $compiler object]) +AC_CACHE_VAL([lt_cv_sys_global_symbol_pipe], +[ +# These are sane defaults that work on at least a few old systems. +# [They come from Ultrix. What could be older than Ultrix?!! ;)] + +# Character class describing NM global symbol codes. +symcode='[[BCDEGRST]]' + +# Regexp to match symbols that can be accessed directly from C. +sympat='\([[_A-Za-z]][[_A-Za-z0-9]]*\)' + +# Define system-specific variables. +case $host_os in +aix*) + symcode='[[BCDT]]' + ;; +cygwin* | mingw* | pw32* | cegcc*) + symcode='[[ABCDGISTW]]' + ;; +hpux*) + if test "$host_cpu" = ia64; then + symcode='[[ABCDEGRST]]' + fi + ;; +irix* | nonstopux*) + symcode='[[BCDEGRST]]' + ;; +osf*) + symcode='[[BCDEGQRST]]' + ;; +solaris*) + symcode='[[BDRT]]' + ;; +sco3.2v5*) + symcode='[[DT]]' + ;; +sysv4.2uw2*) + symcode='[[DT]]' + ;; +sysv5* | sco5v6* | unixware* | OpenUNIX*) + symcode='[[ABDT]]' + ;; +sysv4) + symcode='[[DFNSTU]]' + ;; +esac + +# If we're using GNU nm, then use its standard symbol codes. +case `$NM -V 2>&1` in +*GNU* | *'with BFD'*) + symcode='[[ABCDGIRSTW]]' ;; +esac + +# Transform an extracted symbol line into a proper C declaration. +# Some systems (esp. on ia64) link data and code symbols differently, +# so use this general approach. +lt_cv_sys_global_symbol_to_cdecl="sed -n -e 's/^T .* \(.*\)$/extern int \1();/p' -e 's/^$symcode* .* \(.*\)$/extern char \1;/p'" + +# Transform an extracted symbol line into symbol name and symbol address +lt_cv_sys_global_symbol_to_c_name_address="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p'" +lt_cv_sys_global_symbol_to_c_name_address_lib_prefix="sed -n -e 's/^: \([[^ ]]*\)[[ ]]*$/ {\\\"\1\\\", (void *) 0},/p' -e 's/^$symcode* \([[^ ]]*\) \(lib[[^ ]]*\)$/ {\"\2\", (void *) \&\2},/p' -e 's/^$symcode* \([[^ ]]*\) \([[^ ]]*\)$/ {\"lib\2\", (void *) \&\2},/p'" + +# Handle CRLF in mingw tool chain +opt_cr= +case $build_os in +mingw*) + opt_cr=`$ECHO 'x\{0,1\}' | tr x '\015'` # option cr in regexp + ;; +esac + +# Try without a prefix underscore, then with it. +for ac_symprfx in "" "_"; do + + # Transform symcode, sympat, and symprfx into a raw symbol and a C symbol. + symxfrm="\\1 $ac_symprfx\\2 \\2" + + # Write the raw and C identifiers. + if test "$lt_cv_nm_interface" = "MS dumpbin"; then + # Fake it for dumpbin and say T for any non-static function + # and D for any global variable. + # Also find C++ and __fastcall symbols from MSVC++, + # which start with @ or ?. + lt_cv_sys_global_symbol_pipe="$AWK ['"\ +" {last_section=section; section=\$ 3};"\ +" /^COFF SYMBOL TABLE/{for(i in hide) delete hide[i]};"\ +" /Section length .*#relocs.*(pick any)/{hide[last_section]=1};"\ +" \$ 0!~/External *\|/{next};"\ +" / 0+ UNDEF /{next}; / UNDEF \([^|]\)*()/{next};"\ +" {if(hide[section]) next};"\ +" {f=0}; \$ 0~/\(\).*\|/{f=1}; {printf f ? \"T \" : \"D \"};"\ +" {split(\$ 0, a, /\||\r/); split(a[2], s)};"\ +" s[1]~/^[@?]/{print s[1], s[1]; next};"\ +" s[1]~prfx {split(s[1],t,\"@\"); print t[1], substr(t[1],length(prfx))}"\ +" ' prfx=^$ac_symprfx]" + else + lt_cv_sys_global_symbol_pipe="sed -n -e 's/^.*[[ ]]\($symcode$symcode*\)[[ ]][[ ]]*$ac_symprfx$sympat$opt_cr$/$symxfrm/p'" + fi + lt_cv_sys_global_symbol_pipe="$lt_cv_sys_global_symbol_pipe | sed '/ __gnu_lto/d'" + + # Check to see that the pipe works correctly. + pipe_works=no + + rm -f conftest* + cat > conftest.$ac_ext <<_LT_EOF +#ifdef __cplusplus +extern "C" { +#endif +char nm_test_var; +void nm_test_func(void); +void nm_test_func(void){} +#ifdef __cplusplus +} +#endif +int main(){nm_test_var='a';nm_test_func();return(0);} +_LT_EOF + + if AC_TRY_EVAL(ac_compile); then + # Now try to grab the symbols. + nlist=conftest.nm + if AC_TRY_EVAL(NM conftest.$ac_objext \| "$lt_cv_sys_global_symbol_pipe" \> $nlist) && test -s "$nlist"; then + # Try sorting and uniquifying the output. + if sort "$nlist" | uniq > "$nlist"T; then + mv -f "$nlist"T "$nlist" + else + rm -f "$nlist"T + fi + + # Make sure that we snagged all the symbols we need. + if $GREP ' nm_test_var$' "$nlist" >/dev/null; then + if $GREP ' nm_test_func$' "$nlist" >/dev/null; then + cat <<_LT_EOF > conftest.$ac_ext +/* Keep this code in sync between libtool.m4, ltmain, lt_system.h, and tests. */ +#if defined(_WIN32) || defined(__CYGWIN__) || defined(_WIN32_WCE) +/* DATA imports from DLLs on WIN32 con't be const, because runtime + relocations are performed -- see ld's documentation on pseudo-relocs. */ +# define LT@&t@_DLSYM_CONST +#elif defined(__osf__) +/* This system does not cope well with relocations in const data. */ +# define LT@&t@_DLSYM_CONST +#else +# define LT@&t@_DLSYM_CONST const +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +_LT_EOF + # Now generate the symbol file. + eval "$lt_cv_sys_global_symbol_to_cdecl"' < "$nlist" | $GREP -v main >> conftest.$ac_ext' + + cat <<_LT_EOF >> conftest.$ac_ext + +/* The mapping between symbol names and symbols. */ +LT@&t@_DLSYM_CONST struct { + const char *name; + void *address; +} +lt__PROGRAM__LTX_preloaded_symbols[[]] = +{ + { "@PROGRAM@", (void *) 0 }, +_LT_EOF + $SED "s/^$symcode$symcode* \(.*\) \(.*\)$/ {\"\2\", (void *) \&\2},/" < "$nlist" | $GREP -v main >> conftest.$ac_ext + cat <<\_LT_EOF >> conftest.$ac_ext + {0, (void *) 0} +}; + +/* This works around a problem in FreeBSD linker */ +#ifdef FREEBSD_WORKAROUND +static const void *lt_preloaded_setup() { + return lt__PROGRAM__LTX_preloaded_symbols; +} +#endif + +#ifdef __cplusplus +} +#endif +_LT_EOF + # Now try linking the two files. + mv conftest.$ac_objext conftstm.$ac_objext + lt_globsym_save_LIBS=$LIBS + lt_globsym_save_CFLAGS=$CFLAGS + LIBS="conftstm.$ac_objext" + CFLAGS="$CFLAGS$_LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)" + if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext}; then + pipe_works=yes + fi + LIBS=$lt_globsym_save_LIBS + CFLAGS=$lt_globsym_save_CFLAGS + else + echo "cannot find nm_test_func in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot find nm_test_var in $nlist" >&AS_MESSAGE_LOG_FD + fi + else + echo "cannot run $lt_cv_sys_global_symbol_pipe" >&AS_MESSAGE_LOG_FD + fi + else + echo "$progname: failed program was:" >&AS_MESSAGE_LOG_FD + cat conftest.$ac_ext >&5 + fi + rm -rf conftest* conftst* + + # Do not use the global_symbol_pipe unless it works. + if test "$pipe_works" = yes; then + break + else + lt_cv_sys_global_symbol_pipe= + fi +done +]) +if test -z "$lt_cv_sys_global_symbol_pipe"; then + lt_cv_sys_global_symbol_to_cdecl= +fi +if test -z "$lt_cv_sys_global_symbol_pipe$lt_cv_sys_global_symbol_to_cdecl"; then + AC_MSG_RESULT(failed) +else + AC_MSG_RESULT(ok) +fi + +# Response file support. +if test "$lt_cv_nm_interface" = "MS dumpbin"; then + nm_file_list_spec='@' +elif $NM --help 2>/dev/null | grep '[[@]]FILE' >/dev/null; then + nm_file_list_spec='@' +fi + +_LT_DECL([global_symbol_pipe], [lt_cv_sys_global_symbol_pipe], [1], + [Take the output of nm and produce a listing of raw symbols and C names]) +_LT_DECL([global_symbol_to_cdecl], [lt_cv_sys_global_symbol_to_cdecl], [1], + [Transform the output of nm in a proper C declaration]) +_LT_DECL([global_symbol_to_c_name_address], + [lt_cv_sys_global_symbol_to_c_name_address], [1], + [Transform the output of nm in a C name address pair]) +_LT_DECL([global_symbol_to_c_name_address_lib_prefix], + [lt_cv_sys_global_symbol_to_c_name_address_lib_prefix], [1], + [Transform the output of nm in a C name address pair when lib prefix is needed]) +_LT_DECL([], [nm_file_list_spec], [1], + [Specify filename containing input files for $NM]) +]) # _LT_CMD_GLOBAL_SYMBOLS + + +# _LT_COMPILER_PIC([TAGNAME]) +# --------------------------- +m4_defun([_LT_COMPILER_PIC], +[m4_require([_LT_TAG_COMPILER])dnl +_LT_TAGVAR(lt_prog_compiler_wl, $1)= +_LT_TAGVAR(lt_prog_compiler_pic, $1)= +_LT_TAGVAR(lt_prog_compiler_static, $1)= + +m4_if([$1], [CXX], [ + # C++ specific cases for pic, static, wl, etc. + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + *djgpp*) + # DJGPP does not support shared libraries at all + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + _LT_TAGVAR(lt_prog_compiler_static, $1)= + ;; + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + else + case $host_os in + aix[[4-9]]*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + chorus*) + case $cc_basename in + cxch68*) + # Green Hills C++ Compiler + # _LT_TAGVAR(lt_prog_compiler_static, $1)="--no_auto_instantiation -u __main -u __premain -u _abort -r $COOL_DIR/lib/libOrb.a $MVME_DIR/lib/CC/libC.a $MVME_DIR/lib/classix/libcx.s.a" + ;; + esac + ;; + mingw* | cygwin* | os2* | pw32* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + dgux*) + case $cc_basename in + ec++*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + ghcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + freebsd* | dragonfly*) + # FreeBSD uses GNU C++ + ;; + hpux9* | hpux10* | hpux11*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + if test "$host_cpu" != ia64; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + fi + ;; + aCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + ;; + *) + ;; + esac + ;; + interix*) + # This is c89, which is MS Visual C++ (no shared libs) + # Anyone wants to do a port? + ;; + irix5* | irix6* | nonstopux*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + # CC pic flag -KPIC is the default. + ;; + *) + ;; + esac + ;; + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # KAI C++ Compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + ecpc* ) + # old Intel C++ for x86_64 which still supported -KPIC. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + icpc* ) + # Intel C++, used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + cxx*) + # Compaq C++ + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xlc* | xlC* | bgxl[[cC]]* | mpixl[[cC]]*) + # IBM XL 8.0, 9.0 on PPC and BlueGene + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + esac + ;; + esac + ;; + lynxos*) + ;; + m88k*) + ;; + mvs*) + case $cc_basename in + cxx*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-W c,exportall' + ;; + *) + ;; + esac + ;; + netbsd* | netbsdelf*-gnu) + ;; + *qnx* | *nto*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='--backend -Wl,' + ;; + RCC*) + # Rational C++ 2.4.1 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + cxx*) + # Digital/Compaq C++ + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # Make sure the PIC flag is empty. It appears that all Alpha + # Linux and Compaq Tru64 Unix objects are PIC. + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + *) + ;; + esac + ;; + psos*) + ;; + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + ;; + *) + ;; + esac + ;; + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + lcc*) + # Lucid + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + ;; + *) + ;; + esac + ;; + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + case $cc_basename in + CC*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + ;; + *) + ;; + esac + ;; + vxworks*) + ;; + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +], +[ + if test "$GCC" = yes; then + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + + case $host_os in + aix*) + # All AIX code is PIC. + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + m68k) + # FIXME: we need at least 68020 code to build shared libraries, but + # adding the `-m68020' flag to GCC prevents building anything better, + # like `-m68040'. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-m68020 -resident32 -malways-restore-a4' + ;; + esac + ;; + + beos* | irix5* | irix6* | nonstopux* | osf3* | osf4* | osf5*) + # PIC is the default for these OSes. + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + # Although the cygwin gcc ignores -fPIC, still need this for old-style + # (--disable-auto-import) libraries + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + + darwin* | rhapsody*) + # PIC is the default on this platform + # Common symbols not allowed in MH_DYLIB files + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fno-common' + ;; + + haiku*) + # PIC is the default for Haiku. + # The "-static" flag exists, but is broken. + _LT_TAGVAR(lt_prog_compiler_static, $1)= + ;; + + hpux*) + # PIC is the default for 64-bit PA HP-UX, but not for 32-bit + # PA HP-UX. On IA64 HP-UX, PIC is the default but the pic flag + # sets the default TLS model and affects inlining. + case $host_cpu in + hppa*64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + ;; + + interix[[3-9]]*) + # Interix 3.x gcc -fpic/-fPIC options generate broken code. + # Instead, we relocate shared libraries at runtime. + ;; + + msdosdjgpp*) + # Just because we use GCC doesn't mean we suddenly get shared libraries + # on systems that don't support them. + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + enable_shared=no + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)=-Kconform_pic + fi + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + ;; + esac + + case $cc_basename in + nvcc*) # Cuda Compiler Driver 2.2 + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Xlinker ' + if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_TAGVAR(lt_prog_compiler_pic, $1)="-Xcompiler $_LT_TAGVAR(lt_prog_compiler_pic, $1)" + fi + ;; + esac + else + # PORTME Check for flag to pass linker flags through the system compiler. + case $host_os in + aix*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + if test "$host_cpu" = ia64; then + # AIX 5 now supports IA64 processor + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + else + _LT_TAGVAR(lt_prog_compiler_static, $1)='-bnso -bI:/lib/syscalls.exp' + fi + ;; + + mingw* | cygwin* | pw32* | os2* | cegcc*) + # This hack is so that the source file can tell whether it is being + # built for inclusion in a dll (and should export symbols for example). + m4_if([$1], [GCJ], [], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)='-DDLL_EXPORT']) + ;; + + hpux9* | hpux10* | hpux11*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC is the default for IA64 HP-UX and 64-bit HP-UX, but + # not for PA HP-UX. + case $host_cpu in + hppa*64*|ia64*) + # +Z the default + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='+Z' + ;; + esac + # Is there a better lt_prog_compiler_static that works with the bundled CC? + _LT_TAGVAR(lt_prog_compiler_static, $1)='${wl}-a ${wl}archive' + ;; + + irix5* | irix6* | nonstopux*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # PIC (with -KPIC) is the default. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + # old Intel for x86_64 which still supported -KPIC. + ecc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # icc used to be incompatible with GCC. + # ICC 10 doesn't accept -KPIC any more. + icc* | ifort*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + # Lahey Fortran 8.1. + lf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='--shared' + _LT_TAGVAR(lt_prog_compiler_static, $1)='--static' + ;; + nagfor*) + # NAG Fortran compiler + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,-Wl,,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + pgcc* | pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group compilers (*not* the Pentium gcc compiler, + # which looks to be a dead project) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + ccc*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All Alpha code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + xl* | bgxl* | bgf* | mpixl*) + # IBM XL C 8.0/Fortran 10.1, 11.1 on PPC and BlueGene + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-qpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-qstaticlink' + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ Ceres\ Fortran* | *Sun*Fortran*\ [[1-7]].* | *Sun*Fortran*\ 8.[[0-3]]*) + # Sun Fortran 8.3 passes all unrecognized flags to the linker + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='' + ;; + *Sun\ F* | *Sun*Fortran*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + ;; + *Sun\ C*) + # Sun C 5.9 + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + ;; + *Intel*\ [[CF]]*Compiler*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-static' + ;; + *Portland\ Group*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fpic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + esac + ;; + esac + ;; + + newsos6) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *nto* | *qnx*) + # QNX uses GNU C++, but need to define -shared option too, otherwise + # it will coredump. + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-fPIC -shared' + ;; + + osf3* | osf4* | osf5*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + # All OSF/1 code is PIC. + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + rdos*) + _LT_TAGVAR(lt_prog_compiler_static, $1)='-non_shared' + ;; + + solaris*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + case $cc_basename in + f77* | f90* | f95* | sunf77* | sunf90* | sunf95*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ';; + *) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,';; + esac + ;; + + sunos4*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Qoption ld ' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-PIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4 | sysv4.2uw2* | sysv4.3*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + sysv4*MP*) + if test -d /usr/nec ;then + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-Kconform_pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + fi + ;; + + sysv5* | unixware* | sco3.2v5* | sco5v6* | OpenUNIX*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-KPIC' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + unicos*) + _LT_TAGVAR(lt_prog_compiler_wl, $1)='-Wl,' + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + + uts4*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)='-pic' + _LT_TAGVAR(lt_prog_compiler_static, $1)='-Bstatic' + ;; + + *) + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no + ;; + esac + fi +]) +case $host_os in + # For platforms which do not support PIC, -DPIC is meaningless: + *djgpp*) + _LT_TAGVAR(lt_prog_compiler_pic, $1)= + ;; + *) + _LT_TAGVAR(lt_prog_compiler_pic, $1)="$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])" + ;; +esac + +AC_CACHE_CHECK([for $compiler option to produce PIC], + [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)], + [_LT_TAGVAR(lt_cv_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_prog_compiler_pic, $1)]) +_LT_TAGVAR(lt_prog_compiler_pic, $1)=$_LT_TAGVAR(lt_cv_prog_compiler_pic, $1) + +# +# Check to make sure the PIC flag actually works. +# +if test -n "$_LT_TAGVAR(lt_prog_compiler_pic, $1)"; then + _LT_COMPILER_OPTION([if $compiler PIC flag $_LT_TAGVAR(lt_prog_compiler_pic, $1) works], + [_LT_TAGVAR(lt_cv_prog_compiler_pic_works, $1)], + [$_LT_TAGVAR(lt_prog_compiler_pic, $1)@&t@m4_if([$1],[],[ -DPIC],[m4_if([$1],[CXX],[ -DPIC],[])])], [], + [case $_LT_TAGVAR(lt_prog_compiler_pic, $1) in + "" | " "*) ;; + *) _LT_TAGVAR(lt_prog_compiler_pic, $1)=" $_LT_TAGVAR(lt_prog_compiler_pic, $1)" ;; + esac], + [_LT_TAGVAR(lt_prog_compiler_pic, $1)= + _LT_TAGVAR(lt_prog_compiler_can_build_shared, $1)=no]) +fi +_LT_TAGDECL([pic_flag], [lt_prog_compiler_pic], [1], + [Additional compiler flags for building library objects]) + +_LT_TAGDECL([wl], [lt_prog_compiler_wl], [1], + [How to pass a linker flag through the compiler]) +# +# Check to make sure the static flag actually works. +# +wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) eval lt_tmp_static_flag=\"$_LT_TAGVAR(lt_prog_compiler_static, $1)\" +_LT_LINKER_OPTION([if $compiler static flag $lt_tmp_static_flag works], + _LT_TAGVAR(lt_cv_prog_compiler_static_works, $1), + $lt_tmp_static_flag, + [], + [_LT_TAGVAR(lt_prog_compiler_static, $1)=]) +_LT_TAGDECL([link_static_flag], [lt_prog_compiler_static], [1], + [Compiler flag to prevent dynamic linking]) +])# _LT_COMPILER_PIC + + +# _LT_LINKER_SHLIBS([TAGNAME]) +# ---------------------------- +# See if the linker supports building shared libraries. +m4_defun([_LT_LINKER_SHLIBS], +[AC_REQUIRE([LT_PATH_LD])dnl +AC_REQUIRE([LT_PATH_NM])dnl +m4_require([_LT_PATH_MANIFEST_TOOL])dnl +m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_DECL_SED])dnl +m4_require([_LT_CMD_GLOBAL_SYMBOLS])dnl +m4_require([_LT_TAG_COMPILER])dnl +AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) +m4_if([$1], [CXX], [ + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + case $host_os in + aix[[4-9]]*) + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global defined + # symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + ;; + pw32*) + _LT_TAGVAR(export_symbols_cmds, $1)="$ltdll_cmds" + ;; + cygwin* | mingw* | cegcc*) + case $cc_basename in + cl*) + _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + ;; + esac + ;; + linux* | k*bsd*-gnu | gnu*) + _LT_TAGVAR(link_all_deplibs, $1)=no + ;; + *) + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + ;; + esac +], [ + runpath_var= + _LT_TAGVAR(allow_undefined_flag, $1)= + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(archive_cmds, $1)= + _LT_TAGVAR(archive_expsym_cmds, $1)= + _LT_TAGVAR(compiler_needs_object, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED '\''s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(hardcode_automatic, $1)=no + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(hardcode_libdir_separator, $1)= + _LT_TAGVAR(hardcode_minus_L, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported + _LT_TAGVAR(inherit_rpath, $1)=no + _LT_TAGVAR(link_all_deplibs, $1)=unknown + _LT_TAGVAR(module_cmds, $1)= + _LT_TAGVAR(module_expsym_cmds, $1)= + _LT_TAGVAR(old_archive_from_new_cmds, $1)= + _LT_TAGVAR(old_archive_from_expsyms_cmds, $1)= + _LT_TAGVAR(thread_safe_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + # include_expsyms should be a list of space-separated symbols to be *always* + # included in the symbol list + _LT_TAGVAR(include_expsyms, $1)= + # exclude_expsyms can be an extended regexp of symbols to exclude + # it will be wrapped by ` (' and `)$', so one must not match beginning or + # end of line. Example: `a|bc|.*d.*' will exclude the symbols `a' and `bc', + # as well as any symbol that contains `d'. + _LT_TAGVAR(exclude_expsyms, $1)=['_GLOBAL_OFFSET_TABLE_|_GLOBAL__F[ID]_.*'] + # Although _GLOBAL_OFFSET_TABLE_ is a valid symbol C name, most a.out + # platforms (ab)use it in PIC code, but their linkers get confused if + # the symbol is explicitly referenced. Since portable code cannot + # rely on this symbol name, it's probably fine to never include it in + # preloaded symbol tables. + # Exclude shared library initialization/finalization symbols. +dnl Note also adjust exclude_expsyms for C++ above. + extract_expsyms_cmds= + + case $host_os in + cygwin* | mingw* | pw32* | cegcc*) + # FIXME: the MSVC++ port hasn't been tested in a loooong time + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + if test "$GCC" != yes; then + with_gnu_ld=no + fi + ;; + interix*) + # we just hope/assume this is gcc and not c89 (= MSVC++) + with_gnu_ld=yes + ;; + openbsd*) + with_gnu_ld=no + ;; + linux* | k*bsd*-gnu | gnu*) + _LT_TAGVAR(link_all_deplibs, $1)=no + ;; + esac + + _LT_TAGVAR(ld_shlibs, $1)=yes + + # On some targets, GNU ld is compatible enough with the native linker + # that we're better off using the native interface for both. + lt_use_gnu_ld_interface=no + if test "$with_gnu_ld" = yes; then + case $host_os in + aix*) + # The AIX port of GNU ld has always aspired to compatibility + # with the native linker. However, as the warning in the GNU ld + # block says, versions before 2.19.5* couldn't really create working + # shared libraries, regardless of the interface used. + case `$LD -v 2>&1` in + *\ \(GNU\ Binutils\)\ 2.19.5*) ;; + *\ \(GNU\ Binutils\)\ 2.[[2-9]]*) ;; + *\ \(GNU\ Binutils\)\ [[3-9]]*) ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + ;; + *) + lt_use_gnu_ld_interface=yes + ;; + esac + fi + + if test "$lt_use_gnu_ld_interface" = yes; then + # If archive_cmds runs LD, not CC, wlarc should be empty + wlarc='${wl}' + + # Set some defaults for GNU ld with shared library support. These + # are reset later if shared libraries are not supported. Putting them + # here allows them to be overridden if necessary. + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + # ancient GNU ld didn't support --whole-archive et. al. + if $LD --help 2>&1 | $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + supports_anon_versioning=no + case `$LD -v 2>&1` in + *GNU\ gold*) supports_anon_versioning=yes ;; + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.10.*) ;; # catch versions < 2.11 + *\ 2.11.93.0.2\ *) supports_anon_versioning=yes ;; # RH7.3 ... + *\ 2.11.92.0.12\ *) supports_anon_versioning=yes ;; # Mandrake 8.2 ... + *\ 2.11.*) ;; # other 2.11 versions + *) supports_anon_versioning=yes ;; + esac + + # See if GNU ld supports shared libraries. + case $host_os in + aix[[3-9]]*) + # On AIX/PPC, the GNU linker is very broken + if test "$host_cpu" != ia64; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: the GNU linker, at least up to release 2.19, is reported +*** to be unable to reliably create shared libraries on AIX. +*** Therefore, libtool is disabling shared libraries support. If you +*** really care for shared libraries, you may want to install binutils +*** 2.20 or above, or modify your PATH so that a non-GNU linker is found. +*** You will then need to restart the configuration process. + +_LT_EOF + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1 DATA/;s/^.*[[ ]]__nm__\([[^ ]]*\)[[ ]][[^ ]]*/\1 DATA/;/^I[[ ]]/d;/^[[AITW]][[ ]]/s/.* //'\'' | sort | uniq > $export_symbols' + _LT_TAGVAR(exclude_expsyms, $1)=['[_]+GLOBAL_OFFSET_TABLE_|[_]+GLOBAL__[FID]_.*|[_]+head_[A-Za-z0-9_]+_dll|[A-Za-z0-9_]+_dll_iname'] + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared $output_objdir/$soname.def $libobjs $deplibs $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + haiku*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + + gnu* | linux* | tpf* | k*bsd*-gnu | kopensolaris*-gnu) + tmp_diet=no + if test "$host_os" = linux-dietlibc; then + case $cc_basename in + diet\ *) tmp_diet=yes;; # linux-dietlibc with static linking (!diet-dyn) + esac + fi + if $LD --help 2>&1 | $EGREP ': supported targets:.* elf' > /dev/null \ + && test "$tmp_diet" = no + then + tmp_addflag=' $pic_flag' + tmp_sharedflag='-shared' + case $cc_basename,$host_cpu in + pgcc*) # Portland Group C compiler + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag' + ;; + pgf77* | pgf90* | pgf95* | pgfortran*) + # Portland Group f77 and f90 compilers + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + tmp_addflag=' $pic_flag -Mnomain' ;; + ecc*,ia64* | icc*,ia64*) # Intel C compiler on ia64 + tmp_addflag=' -i_dynamic' ;; + efc*,ia64* | ifort*,ia64*) # Intel Fortran compiler on ia64 + tmp_addflag=' -i_dynamic -nofor_main' ;; + ifc* | ifort*) # Intel Fortran compiler + tmp_addflag=' -nofor_main' ;; + lf95*) # Lahey Fortran 8.1 + _LT_TAGVAR(whole_archive_flag_spec, $1)= + tmp_sharedflag='--shared' ;; + xl[[cC]]* | bgxl[[cC]]* | mpixl[[cC]]*) # IBM XL C 8.0 on PPC (deal with xlf below) + tmp_sharedflag='-qmkshrobj' + tmp_addflag= ;; + nvcc*) # Cuda Compiler Driver 2.2 + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + ;; + esac + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) # Sun C 5.9 + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + tmp_sharedflag='-G' ;; + *Sun\ F*) # Sun Fortran 8.3 + tmp_sharedflag='-G' ;; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC '"$tmp_sharedflag""$tmp_addflag"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + + case $cc_basename in + xlf* | bgf* | bgxlf* | mpixlf*) + # IBM XL Fortran 10.1 on PPC cannot create shared libs itself + _LT_TAGVAR(whole_archive_flag_spec, $1)='--whole-archive$convenience --no-whole-archive' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(archive_cmds, $1)='$LD -shared $libobjs $deplibs $linker_flags -soname $soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $LD -shared $libobjs $deplibs $linker_flags -soname $soname -version-script $output_objdir/$libname.ver -o $lib' + fi + ;; + esac + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable $libobjs $deplibs $linker_flags -o $lib' + wlarc= + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + fi + ;; + + solaris*) + if $LD -v 2>&1 | $GREP 'BFD 2\.8' > /dev/null; then + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: The releases 2.8.* of the GNU linker cannot reliably +*** create shared libraries on Solaris systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.9.1 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + elif $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + sysv5* | sco3.2v5* | sco5v6* | unixware* | OpenUNIX*) + case `$LD -v 2>&1` in + *\ [[01]].* | *\ 2.[[0-9]].* | *\ 2.1[[0-5]].*) + _LT_TAGVAR(ld_shlibs, $1)=no + cat <<_LT_EOF 1>&2 + +*** Warning: Releases of the GNU linker prior to 2.16.91.0.3 can not +*** reliably create shared libraries on SCO systems. Therefore, libtool +*** is disabling shared libraries support. We urge you to upgrade GNU +*** binutils to release 2.16.91.0.3 or newer. Another option is to modify +*** your PATH or compiler configuration so that the native linker is +*** used, and then restart. + +_LT_EOF + ;; + *) + # For security reasons, it is highly recommended that you always + # use absolute paths for naming shared libraries, and exclude the + # DT_RUNPATH tag from executables and libraries. But doing so + # requires that you compile everything twice, which is a pain. + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + sunos4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bshareable -o $lib $libobjs $deplibs $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + + if test "$_LT_TAGVAR(ld_shlibs, $1)" = no; then + runpath_var= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)= + _LT_TAGVAR(export_dynamic_flag_spec, $1)= + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + # PORTME fill in a description of your system's linker (not GNU ld) + case $host_os in + aix3*) + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(archive_expsym_cmds, $1)='$LD -o $output_objdir/$soname $libobjs $deplibs $linker_flags -bE:$export_symbols -T512 -H512 -bM:SRE~$AR $AR_FLAGS $lib $output_objdir/$soname' + # Note: this linker hardcodes the directories in LIBPATH if there + # are no directories specified by -L. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + if test "$GCC" = yes && test -z "$lt_prog_compiler_static"; then + # Neither direct hardcoding nor static linking is supported with a + # broken collect2. + _LT_TAGVAR(hardcode_direct, $1)=unsupported + fi + ;; + + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + # If we're using GNU nm, then we don't want the "-C" option. + # -C means demangle to AIX nm, but means don't demangle with GNU nm + # Also, AIX nm treats weak defined symbols like other global + # defined symbols, whereas GNU nm marks them as "W". + if $NM -V 2>&1 | $GREP 'GNU' > /dev/null; then + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -Bpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B") || (\$ 2 == "W")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + else + _LT_TAGVAR(export_symbols_cmds, $1)='$NM -BCpg $libobjs $convenience | awk '\''{ if (((\$ 2 == "T") || (\$ 2 == "D") || (\$ 2 == "B")) && ([substr](\$ 3,1,1) != ".")) { print \$ 3 } }'\'' | sort -u > $export_symbols' + fi + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + if (test $ld_flag = "-brtl" || test $ld_flag = "-Wl,-brtl"); then + aix_use_runtimelinking=yes + break + fi + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GCC" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + ;; + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + _LT_TAGVAR(link_all_deplibs, $1)=no + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + amigaos*) + case $host_cpu in + powerpc) + # see comment about AmigaOS4 .so support + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='' + ;; + m68k) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/a2ixlibrary.data~$ECHO "#define NAME $libname" > $output_objdir/a2ixlibrary.data~$ECHO "#define LIBRARY_ID 1" >> $output_objdir/a2ixlibrary.data~$ECHO "#define VERSION $major" >> $output_objdir/a2ixlibrary.data~$ECHO "#define REVISION $revision" >> $output_objdir/a2ixlibrary.data~$AR $AR_FLAGS $lib $libobjs~$RANLIB $lib~(cd $output_objdir && a2ixlibrary -32)' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + ;; + + bsdi[[45]]*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)=-rdynamic + ;; + + cygwin* | mingw* | pw32* | cegcc*) + # When not using gcc, we currently assume that we are using + # Microsoft Visual C++. + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + case $cc_basename in + cl*) + # Native MSVC + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + sed -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + sed -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + _LT_TAGVAR(exclude_expsyms, $1)='_NULL_IMPORT_DESCRIPTOR|_IMPORT_DESCRIPTOR_.*' + _LT_TAGVAR(export_symbols_cmds, $1)='$NM $libobjs $convenience | $global_symbol_pipe | $SED -e '\''/^[[BCDGRS]][[ ]]/s/.*[[ ]]\([[^ ]]*\)/\1,DATA/'\'' | $SED -e '\''/^[[AITW]][[ ]]/s/.*[[ ]]//'\'' | sort | uniq > $export_symbols' + # Don't use ranlib + _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' + _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # Assume MSVC wrapper + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $lib $libobjs $compiler_flags `func_echo_all "$deplibs" | $SED '\''s/ -lc$//'\''` -link -dll~linknames=' + # The linker will automatically build a .lib file if we build a DLL. + _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + # FIXME: Should let the user specify the lib program. + _LT_TAGVAR(old_archive_cmds, $1)='lib -OUT:$oldlib$oldobjs$old_deplibs' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + ;; + esac + ;; + + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 2.2.[012] allows us to include c++rt0.o to get C++ constructor + # support. Future versions do this automatically, but an explicit c++rt0.o + # does not break anything, and helps significantly (at the cost of a little + # extra space). + freebsd2.2*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags /usr/lib/c++rt0.o' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # Unfortunately, older versions of FreeBSD 2 do not have this feature. + freebsd2.*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + # FreeBSD 3 and greater uses gcc -shared to do shared libraries. + freebsd* | dragonfly*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + hpux9*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $libobjs $deplibs $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$LD -b +b $install_libdir -o $output_objdir/$soname $libobjs $deplibs $linker_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + + hpux10*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags' + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + fi + ;; + + hpux11*) + if test "$GCC" = yes && test "$with_gnu_ld" = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + else + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + m4_if($1, [], [ + # Older versions of the 11.00 compiler do not understand -b yet + # (HP92453-01 A.11.01.20 doesn't, HP92453-01 B.11.X.35175-35176.GP does) + _LT_LINKER_OPTION([if $CC understands -b], + _LT_TAGVAR(lt_cv_prog_compiler__b, $1), [-b], + [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags'], + [_LT_TAGVAR(archive_cmds, $1)='$LD -b +h $soname +b $install_libdir -o $lib $libobjs $deplibs $linker_flags'])], + [_LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $libobjs $deplibs $compiler_flags']) + ;; + esac + fi + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + + # hardcode_minus_L: Not really in the search PATH, + # but as the default location of the library. + _LT_TAGVAR(hardcode_minus_L, $1)=yes + ;; + esac + fi + ;; + + irix5* | irix6* | nonstopux*) + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + # Try to use the -exported_symbol ld option, if it does not + # work, assume that -exports_file does not work either and + # implicitly export all symbols. + # This should be the same for all languages, so no per-tag cache variable. + AC_CACHE_CHECK([whether the $host_os linker accepts -exported_symbol], + [lt_cv_irix_exported_symbol], + [save_LDFLAGS="$LDFLAGS" + LDFLAGS="$LDFLAGS -shared ${wl}-exported_symbol ${wl}foo ${wl}-update_registry ${wl}/dev/null" + AC_LINK_IFELSE( + [AC_LANG_SOURCE( + [AC_LANG_CASE([C], [[int foo (void) { return 0; }]], + [C++], [[int foo (void) { return 0; }]], + [Fortran 77], [[ + subroutine foo + end]], + [Fortran], [[ + subroutine foo + end]])])], + [lt_cv_irix_exported_symbol=yes], + [lt_cv_irix_exported_symbol=no]) + LDFLAGS="$save_LDFLAGS"]) + if test "$lt_cv_irix_exported_symbol" = yes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations ${wl}-exports_file ${wl}$export_symbols -o $lib' + fi + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -exports_file $export_symbols -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + netbsd* | netbsdelf*-gnu) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' # a.out + else + _LT_TAGVAR(archive_cmds, $1)='$LD -shared -o $lib $libobjs $deplibs $linker_flags' # ELF + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + newsos6) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *nto* | *qnx*) + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + if test -z "`echo __ELF__ | $CC -E - | $GREP __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags ${wl}-retain-symbols-file,$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + else + case $host_os in + openbsd[[01]].* | openbsd2.[[0-7]] | openbsd2.[[0-7]].*) + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + ;; + esac + fi + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + os2*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(archive_cmds, $1)='$ECHO "LIBRARY $libname INITINSTANCE" > $output_objdir/$libname.def~$ECHO "DESCRIPTION \"$libname\"" >> $output_objdir/$libname.def~echo DATA >> $output_objdir/$libname.def~echo " SINGLE NONSHARED" >> $output_objdir/$libname.def~echo EXPORTS >> $output_objdir/$libname.def~emxexp $libobjs >> $output_objdir/$libname.def~$CC -Zdll -Zcrtdll -o $lib $libobjs $deplibs $compiler_flags $output_objdir/$libname.def' + _LT_TAGVAR(old_archive_from_new_cmds, $1)='emximp -o $output_objdir/$libname.a $output_objdir/$libname.def' + ;; + + osf3*) + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + osf4* | osf5*) # as osf3* with the addition of -msym flag + if test "$GCC" = yes; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $pic_flag $libobjs $deplibs $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + else + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $libobjs $deplibs $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done; printf "%s\\n" "-hidden">> $lib.exp~ + $CC -shared${allow_undefined_flag} ${wl}-input ${wl}$lib.exp $compiler_flags $libobjs $deplibs -soname $soname `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~$RM $lib.exp' + + # Both c and cxx compiler support -rpath directly + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)='no' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + ;; + + solaris*) + _LT_TAGVAR(no_undefined_flag, $1)=' -z defs' + if test "$GCC" = yes; then + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag ${wl}-z ${wl}text ${wl}-M ${wl}$lib.exp ${wl}-h ${wl}$soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + else + case `$CC -V 2>&1` in + *"Compilers 5.0"*) + wlarc='' + _LT_TAGVAR(archive_cmds, $1)='$LD -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $LD -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $linker_flags~$RM $lib.exp' + ;; + *) + wlarc='${wl}' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h $soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} -M $lib.exp -h $soname -o $lib $libobjs $deplibs $compiler_flags~$RM $lib.exp' + ;; + esac + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. GCC discards it without `$wl', + # but is careful enough not to reorder. + # Supported since Solaris 2.6 (maybe 2.5.1?) + if test "$GCC" = yes; then + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + fi + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + sunos4*) + if test "x$host_vendor" = xsequent; then + # Use $CC to link under sequent, because it throws in some extra .o + # files that make .init and .fini sections work. + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h $soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$LD -assert pure-text -Bstatic -o $lib $libobjs $deplibs $linker_flags' + fi + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4) + case $host_vendor in + sni) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=yes # is this really true??? + ;; + siemens) + ## LD is ld it makes a PLAMLIB + ## CC just makes a GrossModule. + _LT_TAGVAR(archive_cmds, $1)='$LD -G -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(reload_cmds, $1)='$CC -r -o $output$reload_objs' + _LT_TAGVAR(hardcode_direct, $1)=no + ;; + motorola) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_direct, $1)=no #Motorola manual says yes, but my tests say they lie + ;; + esac + runpath_var='LD_RUN_PATH' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + sysv4.3*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(export_dynamic_flag_spec, $1)='-Bexport' + ;; + + sysv4*MP*) + if test -d /usr/nec; then + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var=LD_RUN_PATH + hardcode_runpath_var=yes + _LT_TAGVAR(ld_shlibs, $1)=yes + fi + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + if test "$GCC" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + fi + ;; + + uts4*) + _LT_TAGVAR(archive_cmds, $1)='$LD -G -h $soname -o $lib $libobjs $deplibs $linker_flags' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + + *) + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + if test x$host_vendor = xsni; then + case $host in + sysv4 | sysv4.2uw2* | sysv4.3* | sysv5*) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Blargedynsym' + ;; + esac + fi + fi +]) +AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) +test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + +_LT_TAGVAR(with_gnu_ld, $1)=$with_gnu_ld + +_LT_DECL([], [libext], [0], [Old archive suffix (normally "a")])dnl +_LT_DECL([], [shrext_cmds], [1], [Shared library suffix (normally ".so")])dnl +_LT_DECL([], [extract_expsyms_cmds], [2], + [The commands to extract the exported symbol list from a shared archive]) + +# +# Do we need to explicitly link libc? +# +case "x$_LT_TAGVAR(archive_cmds_need_lc, $1)" in +x|xyes) + # Assume -lc should be added + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + + if test "$enable_shared" = yes && test "$GCC" = yes; then + case $_LT_TAGVAR(archive_cmds, $1) in + *'~'*) + # FIXME: we may have to deal with multi-command sequences. + ;; + '$CC '*) + # Test whether the compiler implicitly links with -lc since on some + # systems, -lgcc has to come before -lc. If gcc already passes -lc + # to ld, don't add -lc before -lgcc. + AC_CACHE_CHECK([whether -lc should be explicitly linked in], + [lt_cv_]_LT_TAGVAR(archive_cmds_need_lc, $1), + [$RM conftest* + echo "$lt_simple_compile_test_code" > conftest.$ac_ext + + if AC_TRY_EVAL(ac_compile) 2>conftest.err; then + soname=conftest + lib=conftest + libobjs=conftest.$ac_objext + deplibs= + wl=$_LT_TAGVAR(lt_prog_compiler_wl, $1) + pic_flag=$_LT_TAGVAR(lt_prog_compiler_pic, $1) + compiler_flags=-v + linker_flags=-v + verstring= + output_objdir=. + libname=conftest + lt_save_allow_undefined_flag=$_LT_TAGVAR(allow_undefined_flag, $1) + _LT_TAGVAR(allow_undefined_flag, $1)= + if AC_TRY_EVAL(_LT_TAGVAR(archive_cmds, $1) 2\>\&1 \| $GREP \" -lc \" \>/dev/null 2\>\&1) + then + lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=no + else + lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1)=yes + fi + _LT_TAGVAR(allow_undefined_flag, $1)=$lt_save_allow_undefined_flag + else + cat conftest.err 1>&5 + fi + $RM conftest* + ]) + _LT_TAGVAR(archive_cmds_need_lc, $1)=$lt_cv_[]_LT_TAGVAR(archive_cmds_need_lc, $1) + ;; + esac + fi + ;; +esac + +_LT_TAGDECL([build_libtool_need_lc], [archive_cmds_need_lc], [0], + [Whether or not to add -lc for building shared libraries]) +_LT_TAGDECL([allow_libtool_libs_with_static_runtimes], + [enable_shared_with_static_runtimes], [0], + [Whether or not to disallow shared libs when runtime libs are static]) +_LT_TAGDECL([], [export_dynamic_flag_spec], [1], + [Compiler flag to allow reflexive dlopens]) +_LT_TAGDECL([], [whole_archive_flag_spec], [1], + [Compiler flag to generate shared objects directly from archives]) +_LT_TAGDECL([], [compiler_needs_object], [1], + [Whether the compiler copes with passing no objects directly]) +_LT_TAGDECL([], [old_archive_from_new_cmds], [2], + [Create an old-style archive from a shared archive]) +_LT_TAGDECL([], [old_archive_from_expsyms_cmds], [2], + [Create a temporary old-style archive to link instead of a shared archive]) +_LT_TAGDECL([], [archive_cmds], [2], [Commands used to build a shared archive]) +_LT_TAGDECL([], [archive_expsym_cmds], [2]) +_LT_TAGDECL([], [module_cmds], [2], + [Commands used to build a loadable module if different from building + a shared archive.]) +_LT_TAGDECL([], [module_expsym_cmds], [2]) +_LT_TAGDECL([], [with_gnu_ld], [1], + [Whether we are building with GNU ld or not]) +_LT_TAGDECL([], [allow_undefined_flag], [1], + [Flag that allows shared libraries with undefined symbols to be built]) +_LT_TAGDECL([], [no_undefined_flag], [1], + [Flag that enforces no undefined symbols]) +_LT_TAGDECL([], [hardcode_libdir_flag_spec], [1], + [Flag to hardcode $libdir into a binary during linking. + This must work even if $libdir does not exist]) +_LT_TAGDECL([], [hardcode_libdir_separator], [1], + [Whether we need a single "-rpath" flag with a separated argument]) +_LT_TAGDECL([], [hardcode_direct], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary]) +_LT_TAGDECL([], [hardcode_direct_absolute], [0], + [Set to "yes" if using DIR/libNAME${shared_ext} during linking hardcodes + DIR into the resulting binary and the resulting library dependency is + "absolute", i.e impossible to change by setting ${shlibpath_var} if the + library is relocated]) +_LT_TAGDECL([], [hardcode_minus_L], [0], + [Set to "yes" if using the -LDIR flag during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_shlibpath_var], [0], + [Set to "yes" if using SHLIBPATH_VAR=DIR during linking hardcodes DIR + into the resulting binary]) +_LT_TAGDECL([], [hardcode_automatic], [0], + [Set to "yes" if building a shared library automatically hardcodes DIR + into the library and all subsequent libraries and executables linked + against it]) +_LT_TAGDECL([], [inherit_rpath], [0], + [Set to yes if linker adds runtime paths of dependent libraries + to runtime path list]) +_LT_TAGDECL([], [link_all_deplibs], [0], + [Whether libtool must link a program against all its dependency libraries]) +_LT_TAGDECL([], [always_export_symbols], [0], + [Set to "yes" if exported symbols are required]) +_LT_TAGDECL([], [export_symbols_cmds], [2], + [The commands to list exported symbols]) +_LT_TAGDECL([], [exclude_expsyms], [1], + [Symbols that should not be listed in the preloaded symbols]) +_LT_TAGDECL([], [include_expsyms], [1], + [Symbols that must always be exported]) +_LT_TAGDECL([], [prelink_cmds], [2], + [Commands necessary for linking programs (against libraries) with templates]) +_LT_TAGDECL([], [postlink_cmds], [2], + [Commands necessary for finishing linking programs]) +_LT_TAGDECL([], [file_list_spec], [1], + [Specify filename containing input files]) +dnl FIXME: Not yet implemented +dnl _LT_TAGDECL([], [thread_safe_flag_spec], [1], +dnl [Compiler flag to generate thread safe objects]) +])# _LT_LINKER_SHLIBS + + +# _LT_LANG_C_CONFIG([TAG]) +# ------------------------ +# Ensure that the configuration variables for a C compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_C_CONFIG], +[m4_require([_LT_DECL_EGREP])dnl +lt_save_CC="$CC" +AC_LANG_PUSH(C) + +# Source file extension for C test sources. +ac_ext=c + +# Object file extension for compiled C test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="int some_variable = 0;" + +# Code to be used in simple link tests +lt_simple_link_test_code='int main(){return(0);}' + +_LT_TAG_COMPILER +# Save the default compiler, since it gets overwritten when the other +# tags are being tested, and _LT_TAGVAR(compiler, []) is a NOP. +compiler_DEFAULT=$CC + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + LT_SYS_DLOPEN_SELF + _LT_CMD_STRIPLIB + + # Report which library types will actually be built + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_CONFIG($1) +fi +AC_LANG_POP +CC="$lt_save_CC" +])# _LT_LANG_C_CONFIG + + +# _LT_LANG_CXX_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a C++ compiler are suitably +# defined. These variables are subsequently used by _LT_CONFIG to write +# the compiler configuration to `libtool'. +m4_defun([_LT_LANG_CXX_CONFIG], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +m4_require([_LT_DECL_EGREP])dnl +m4_require([_LT_PATH_MANIFEST_TOOL])dnl +if test -n "$CXX" && ( test "X$CXX" != "Xno" && + ( (test "X$CXX" = "Xg++" && `g++ -v >/dev/null 2>&1` ) || + (test "X$CXX" != "Xg++"))) ; then + AC_PROG_CXXCPP +else + _lt_caught_CXX_error=yes +fi + +AC_LANG_PUSH(C++) +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(compiler_needs_object, $1)=no +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_shlibpath_var, $1)=unsupported +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for C++ test sources. +ac_ext=cpp + +# Object file extension for compiled C++ test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the CXX compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_caught_CXX_error" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="int some_variable = 0;" + + # Code to be used in simple link tests + lt_simple_link_test_code='int main(int, char *[[]]) { return(0); }' + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC=$CC + lt_save_CFLAGS=$CFLAGS + lt_save_LD=$LD + lt_save_GCC=$GCC + GCC=$GXX + lt_save_with_gnu_ld=$with_gnu_ld + lt_save_path_LD=$lt_cv_path_LD + if test -n "${lt_cv_prog_gnu_ldcxx+set}"; then + lt_cv_prog_gnu_ld=$lt_cv_prog_gnu_ldcxx + else + $as_unset lt_cv_prog_gnu_ld + fi + if test -n "${lt_cv_path_LDCXX+set}"; then + lt_cv_path_LD=$lt_cv_path_LDCXX + else + $as_unset lt_cv_path_LD + fi + test -z "${LDCXX+set}" || LD=$LDCXX + CC=${CXX-"c++"} + CFLAGS=$CXXFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + # We don't want -fno-exception when compiling C++ code, so set the + # no_builtin_flag separately + if test "$GXX" = yes; then + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)=' -fno-builtin' + else + _LT_TAGVAR(lt_prog_compiler_no_builtin_flag, $1)= + fi + + if test "$GXX" = yes; then + # Set up default GNU C++ configuration + + LT_PATH_LD + + # Check if GNU C++ uses GNU ld as the underlying linker, since the + # archiving commands below assume that GNU ld is being used. + if test "$with_gnu_ld" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC $pic_flag -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # If archive_cmds runs LD, not CC, wlarc should be empty + # XXX I think wlarc can be eliminated in ltcf-cxx, but I need to + # investigate it a little bit more. (MM) + wlarc='${wl}' + + # ancient GNU ld didn't support --whole-archive et. al. + if eval "`$CC -print-prog-name=ld` --help 2>&1" | + $GREP 'no-whole-archive' > /dev/null; then + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + else + _LT_TAGVAR(whole_archive_flag_spec, $1)= + fi + else + with_gnu_ld=no + wlarc= + + # A generic and very simple default shared library creation + # command for GNU C++ for the case where it uses the native + # linker, instead of GNU ld. If possible, this setting should + # overridden to take advantage of the native linker features on + # the platform it is being used on. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + fi + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + GXX=no + with_gnu_ld=no + wlarc= + fi + + # PORTME: fill in a description of your system's C++ link characteristics + AC_MSG_CHECKING([whether the $compiler linker ($LD) supports shared libraries]) + _LT_TAGVAR(ld_shlibs, $1)=yes + case $host_os in + aix3*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aix[[4-9]]*) + if test "$host_cpu" = ia64; then + # On IA64, the linker does run time linking by default, so we don't + # have to do anything special. + aix_use_runtimelinking=no + exp_sym_flag='-Bexport' + no_entry_flag="" + else + aix_use_runtimelinking=no + + # Test if we are trying to use run time linking or normal + # AIX style linking. If -brtl is somewhere in LDFLAGS, we + # need to do runtime linking. + case $host_os in aix4.[[23]]|aix4.[[23]].*|aix[[5-9]]*) + for ld_flag in $LDFLAGS; do + case $ld_flag in + *-brtl*) + aix_use_runtimelinking=yes + break + ;; + esac + done + ;; + esac + + exp_sym_flag='-bexport' + no_entry_flag='-bnoentry' + fi + + # When large executables or shared objects are built, AIX ld can + # have problems creating the table of contents. If linking a library + # or program results in "error TOC overflow" add -mminimal-toc to + # CXXFLAGS/CFLAGS for g++/gcc. In the cases where that is not + # enough to fix the problem, add -Wl,-bbigtoc to LDFLAGS. + + _LT_TAGVAR(archive_cmds, $1)='' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='${wl}-f,' + + if test "$GXX" = yes; then + case $host_os in aix4.[[012]]|aix4.[[012]].*) + # We only want to do this on AIX 4.2 and lower, the check + # below for broken collect2 doesn't work under 4.3+ + collect2name=`${CC} -print-prog-name=collect2` + if test -f "$collect2name" && + strings "$collect2name" | $GREP resolve_lib_name >/dev/null + then + # We have reworked collect2 + : + else + # We have old collect2 + _LT_TAGVAR(hardcode_direct, $1)=unsupported + # It fails to find uninstalled libraries when the uninstalled + # path is not listed in the libpath. Setting hardcode_minus_L + # to unsupported forces relinking + _LT_TAGVAR(hardcode_minus_L, $1)=yes + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)= + fi + esac + shared_flag='-shared' + if test "$aix_use_runtimelinking" = yes; then + shared_flag="$shared_flag "'${wl}-G' + fi + else + # not using gcc + if test "$host_cpu" = ia64; then + # VisualAge C++, Version 5.5 for AIX 5L for IA-64, Beta 3 Release + # chokes on -Wl,-G. The following line is correct: + shared_flag='-G' + else + if test "$aix_use_runtimelinking" = yes; then + shared_flag='${wl}-G' + else + shared_flag='${wl}-bM:SRE' + fi + fi + fi + + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-bexpall' + # It seems that -bexpall does not export symbols beginning with + # underscore (_), so it is better to generate a list of symbols to + # export. + _LT_TAGVAR(always_export_symbols, $1)=yes + if test "$aix_use_runtimelinking" = yes; then + # Warning - without using the other runtime loading flags (-brtl), + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(allow_undefined_flag, $1)='-berok' + # Determine the default libpath from the value encoded in an empty + # executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags `if test "x${allow_undefined_flag}" != "x"; then func_echo_all "${wl}${allow_undefined_flag}"; else :; fi` '"\${wl}$exp_sym_flag:\$export_symbols $shared_flag" + else + if test "$host_cpu" = ia64; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $libdir:/usr/lib:/lib' + _LT_TAGVAR(allow_undefined_flag, $1)="-z nodefs" + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs '"\${wl}$no_entry_flag"' $compiler_flags ${wl}${allow_undefined_flag} '"\${wl}$exp_sym_flag:\$export_symbols" + else + # Determine the default libpath from the value encoded in an + # empty executable. + _LT_SYS_MODULE_PATH_AIX([$1]) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-blibpath:$libdir:'"$aix_libpath" + # Warning - without using the other run time loading flags, + # -berok will link without error, but may produce a broken library. + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-bernotok' + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-berok' + if test "$with_gnu_ld" = yes; then + # We only use this code for GNU lds that support --whole-archive. + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + else + # Exported symbols can be pulled into shared objects from archives + _LT_TAGVAR(whole_archive_flag_spec, $1)='$convenience' + fi + _LT_TAGVAR(archive_cmds_need_lc, $1)=yes + # This is similar to how AIX traditionally builds its shared + # libraries. + _LT_TAGVAR(archive_expsym_cmds, $1)="\$CC $shared_flag"' -o $output_objdir/$soname $libobjs $deplibs ${wl}-bnoentry $compiler_flags ${wl}-bE:$export_symbols${allow_undefined_flag}~$AR $AR_FLAGS $output_objdir/$libname$release.a $output_objdir/$soname' + fi + fi + ;; + + beos*) + if $LD --help 2>&1 | $GREP ': supported targets:.* elf' > /dev/null; then + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + # Joseph Beckenbach says some releases of gcc + # support --undefined. This deserves some investigation. FIXME + _LT_TAGVAR(archive_cmds, $1)='$CC -nostart $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + chorus*) + case $cc_basename in + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + cygwin* | mingw* | pw32* | cegcc*) + case $GXX,$cc_basename in + ,cl* | no,cl*) + # Native MSVC + # hardcode_libdir_flag_spec is actually meaningless, as there is + # no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)=' ' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=yes + _LT_TAGVAR(file_list_spec, $1)='@' + # Tell ltmain to make .lib files, not .a files. + libext=lib + # Tell ltmain to make .dll files, not .so files. + shrext_cmds=".dll" + # FIXME: Setting linknames here is a bad hack. + _LT_TAGVAR(archive_cmds, $1)='$CC -o $output_objdir/$soname $libobjs $compiler_flags $deplibs -Wl,-dll~linknames=' + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + $SED -n -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' -e '1\\\!p' < $export_symbols > $output_objdir/$soname.exp; + else + $SED -e 's/\\\\\\\(.*\\\\\\\)/-link\\\ -EXPORT:\\\\\\\1/' < $export_symbols > $output_objdir/$soname.exp; + fi~ + $CC -o $tool_output_objdir$soname $libobjs $compiler_flags $deplibs "@$tool_output_objdir$soname.exp" -Wl,-DLL,-IMPLIB:"$tool_output_objdir$libname.dll.lib"~ + linknames=' + # The linker will not automatically build a static lib if we build a DLL. + # _LT_TAGVAR(old_archive_from_new_cmds, $1)='true' + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + # Don't use ranlib + _LT_TAGVAR(old_postinstall_cmds, $1)='chmod 644 $oldlib' + _LT_TAGVAR(postlink_cmds, $1)='lt_outputfile="@OUTPUT@"~ + lt_tool_outputfile="@TOOL_OUTPUT@"~ + case $lt_outputfile in + *.exe|*.EXE) ;; + *) + lt_outputfile="$lt_outputfile.exe" + lt_tool_outputfile="$lt_tool_outputfile.exe" + ;; + esac~ + func_to_tool_file "$lt_outputfile"~ + if test "$MANIFEST_TOOL" != ":" && test -f "$lt_outputfile.manifest"; then + $MANIFEST_TOOL -manifest "$lt_tool_outputfile.manifest" -outputresource:"$lt_tool_outputfile" || exit 1; + $RM "$lt_outputfile.manifest"; + fi' + ;; + *) + # g++ + # _LT_TAGVAR(hardcode_libdir_flag_spec, $1) is actually meaningless, + # as there is no search path for DLLs. + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-L$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-all-symbols' + _LT_TAGVAR(allow_undefined_flag, $1)=unsupported + _LT_TAGVAR(always_export_symbols, $1)=no + _LT_TAGVAR(enable_shared_with_static_runtimes, $1)=yes + + if $LD --help 2>&1 | $GREP 'auto-import' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + # If the export-symbols file already is a .def file (1st line + # is EXPORTS), use it as is; otherwise, prepend... + _LT_TAGVAR(archive_expsym_cmds, $1)='if test "x`$SED 1q $export_symbols`" = xEXPORTS; then + cp $export_symbols $output_objdir/$soname.def; + else + echo EXPORTS > $output_objdir/$soname.def; + cat $export_symbols >> $output_objdir/$soname.def; + fi~ + $CC -shared -nostdlib $output_objdir/$soname.def $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $output_objdir/$soname ${wl}--enable-auto-image-base -Xlinker --out-implib -Xlinker $lib' + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + darwin* | rhapsody*) + _LT_DARWIN_LINKER_FEATURES($1) + ;; + + dgux*) + case $cc_basename in + ec++*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + ghcx*) + # Green Hills C++ Compiler + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + freebsd2.*) + # C++ shared libraries reported to be fairly broken before + # switch to ELF + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + freebsd-elf*) + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + ;; + + freebsd* | dragonfly*) + # FreeBSD 3 and later use GNU C++ and GNU ld with standard ELF + # conventions + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + gnu*) + ;; + + haiku*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + + hpux9*) + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -b ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $EGREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + _LT_TAGVAR(archive_cmds, $1)='$RM $output_objdir/$soname~$CC -shared -nostdlib $pic_flag ${wl}+b ${wl}$install_libdir -o $output_objdir/$soname $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~test $output_objdir/$soname = $lib || mv $output_objdir/$soname $lib' + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + hpux10*|hpux11*) + if test $with_gnu_ld = no; then + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}+b ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + case $host_cpu in + hppa*64*|ia64*) + ;; + *) + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + ;; + esac + fi + case $host_cpu in + hppa*64*|ia64*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + ;; + *) + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(hardcode_minus_L, $1)=yes # Not in the search PATH, + # but as the default + # location of the library. + ;; + esac + + case $cc_basename in + CC*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + aCC*) + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -b ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`($CC -b $CFLAGS -v conftest.$objext 2>&1) | $GREP "\-L"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes; then + if test $with_gnu_ld = no; then + case $host_cpu in + hppa*64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib -fPIC ${wl}+h ${wl}$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + ia64*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+nodefaultrpath -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib $pic_flag ${wl}+h ${wl}$soname ${wl}+b ${wl}$install_libdir -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + ;; + esac + fi + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + interix[[3-9]]*) + _LT_TAGVAR(hardcode_direct, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + # Hack: On Interix 3.x, we cannot compile PIC because of a broken gcc. + # Instead, shared libraries are loaded at an image base (0x10000000 by + # default) and relocated if they conflict, which is a slow very memory + # consuming and fragmenting process. To avoid this, we pick a random, + # 256 KiB-aligned image base between 0x50000000 and 0x6FFC0000 at link + # time. Moving up from 0x10000000 also allows more sbrk(2) space. + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='sed "s,^,_," $export_symbols >$output_objdir/$soname.expsym~$CC -shared $pic_flag $libobjs $deplibs $compiler_flags ${wl}-h,$soname ${wl}--retain-symbols-file,$output_objdir/$soname.expsym ${wl}--image-base,`expr ${RANDOM-$$} % 4096 / 2 \* 262144 + 1342177280` -o $lib' + ;; + irix5* | irix6*) + case $cc_basename in + CC*) + # SGI C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -all -multigot $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + + # Archives containing C++ object files must be created using + # "CC -ar", where "CC" is the IRIX C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -ar -WR,-u -o $oldlib $oldobjs' + ;; + *) + if test "$GXX" = yes; then + if test "$with_gnu_ld" = no; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + else + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` -o $lib' + fi + fi + _LT_TAGVAR(link_all_deplibs, $1)=yes + ;; + esac + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + _LT_TAGVAR(inherit_rpath, $1)=yes + ;; + + linux* | k*bsd*-gnu | kopensolaris*-gnu) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo $lib | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib ${wl}-retain-symbols-file,$export_symbols; mv \$templib $lib' + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 | $GREP "ld"`; rm -f libconftest$shared_ext; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + + # Archives containing C++ object files must be created using + # "CC -Bstatic", where "CC" is the KAI C++ compiler. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' + ;; + icpc* | ecpc* ) + # Intel C++ + with_gnu_ld=yes + # version 8.0 and above of icpc choke on multiply defined symbols + # if we add $predep_objects and $postdep_objects, however 7.1 and + # earlier do not add the objects themselves. + case `$CC -V 2>&1` in + *"Version 7."*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + *) # Version 8.0 or newer + tmp_idyn= + case $host_cpu in + ia64*) tmp_idyn=' -i_dynamic';; + esac + _LT_TAGVAR(archive_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared'"$tmp_idyn"' $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-retain-symbols-file $wl$export_symbols -o $lib' + ;; + esac + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive$convenience ${wl}--no-whole-archive' + ;; + pgCC* | pgcpp*) + # Portland Group C++ compiler + case `$CC -V` in + *pgCC\ [[1-5]].* | *pgcpp\ [[1-5]].*) + _LT_TAGVAR(prelink_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $objs $libobjs $compile_deplibs~ + compile_command="$compile_command `find $tpldir -name \*.o | sort | $NL2SP`"' + _LT_TAGVAR(old_archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $oldobjs$old_deplibs~ + $AR $AR_FLAGS $oldlib$oldobjs$old_deplibs `find $tpldir -name \*.o | sort | $NL2SP`~ + $RANLIB $oldlib' + _LT_TAGVAR(archive_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='tpldir=Template.dir~ + rm -rf $tpldir~ + $CC --prelink_objects --instantiation_dir $tpldir $predep_objects $libobjs $deplibs $convenience $postdep_objects~ + $CC -shared $pic_flag $predep_objects $libobjs $deplibs `find $tpldir -name \*.o | sort | $NL2SP` $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + *) # Version 6 and above use weak symbols + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname ${wl}-retain-symbols-file ${wl}$export_symbols -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}--rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`for conv in $convenience\"\"; do test -n \"$conv\" && new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + ;; + cxx*) + # Compaq C++ + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $wl$soname -o $lib ${wl}-retain-symbols-file $wl$export_symbols' + + runpath_var=LD_RUN_PATH + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld .*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "X$list" | $Xsed' + ;; + xl* | mpixl* | bgxl*) + # IBM XL 8.0 on PPC, with GNU ld + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}--export-dynamic' + _LT_TAGVAR(archive_cmds, $1)='$CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname -o $lib' + if test "x$supports_anon_versioning" = xyes; then + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $output_objdir/$libname.ver~ + cat $export_symbols | sed -e "s/\(.*\)/\1;/" >> $output_objdir/$libname.ver~ + echo "local: *; };" >> $output_objdir/$libname.ver~ + $CC -qmkshrobj $libobjs $deplibs $compiler_flags ${wl}-soname $wl$soname ${wl}-version-script ${wl}$output_objdir/$libname.ver -o $lib' + fi + ;; + *) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file ${wl}$export_symbols' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}--whole-archive`new_convenience=; for conv in $convenience\"\"; do test -z \"$conv\" || new_convenience=\"$new_convenience,$conv\"; done; func_echo_all \"$new_convenience\"` ${wl}--no-whole-archive' + _LT_TAGVAR(compiler_needs_object, $1)=yes + + # Not sure whether something based on + # $CC $CFLAGS -v conftest.$objext -o libconftest$shared_ext 2>&1 + # would be better. + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + esac + ;; + esac + ;; + + lynxos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + m88k*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + mvs*) + case $cc_basename in + cxx*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + netbsd*) + if echo __ELF__ | $CC -E - | $GREP __ELF__ >/dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$LD -Bshareable -o $lib $predep_objects $libobjs $deplibs $postdep_objects $linker_flags' + wlarc= + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + fi + # Workaround some broken pre-1.5 toolchains + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP conftest.$objext | $SED -e "s:-lgcc -lc -lgcc::"' + ;; + + *nto* | *qnx*) + _LT_TAGVAR(ld_shlibs, $1)=yes + ;; + + openbsd2*) + # C++ shared libraries are fairly broken + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + openbsd*) + if test -f /usr/libexec/ld.so; then + _LT_TAGVAR(hardcode_direct, $1)=yes + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_direct_absolute, $1)=yes + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + if test -z "`echo __ELF__ | $CC -E - | grep __ELF__`" || test "$host_os-$host_cpu" = "openbsd2.8-powerpc"; then + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared $pic_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-retain-symbols-file,$export_symbols -o $lib' + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-E' + _LT_TAGVAR(whole_archive_flag_spec, $1)="$wlarc"'--whole-archive$convenience '"$wlarc"'--no-whole-archive' + fi + output_verbose_link_cmd=func_echo_all + else + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + + osf3* | osf4* | osf5*) + case $cc_basename in + KCC*) + # Kuck and Associates, Inc. (KAI) C++ Compiler + + # KCC will only create a shared library if the output file + # ends with ".so" (or ".sl" for HP-UX), so rename the library + # to its proper name (with version) after linking. + _LT_TAGVAR(archive_cmds, $1)='tempext=`echo $shared_ext | $SED -e '\''s/\([[^()0-9A-Za-z{}]]\)/\\\\\1/g'\''`; templib=`echo "$lib" | $SED -e "s/\${tempext}\..*/.so/"`; $CC $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags --soname $soname -o \$templib; mv \$templib $lib' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Archives containing C++ object files must be created using + # the KAI C++ compiler. + case $host in + osf3*) _LT_TAGVAR(old_archive_cmds, $1)='$CC -Bstatic -o $oldlib $oldobjs' ;; + *) _LT_TAGVAR(old_archive_cmds, $1)='$CC -o $oldlib $oldobjs' ;; + esac + ;; + RCC*) + # Rational C++ 2.4.1 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + cxx*) + case $host in + osf3*) + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname $soname `test -n "$verstring" && func_echo_all "${wl}-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + ;; + *) + _LT_TAGVAR(allow_undefined_flag, $1)=' -expect_unresolved \*' + _LT_TAGVAR(archive_cmds, $1)='$CC -shared${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname `test -n "$verstring" && func_echo_all "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='for i in `cat $export_symbols`; do printf "%s %s\\n" -exported_symbol "\$i" >> $lib.exp; done~ + echo "-hidden">> $lib.exp~ + $CC -shared$allow_undefined_flag $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags -msym -soname $soname ${wl}-input ${wl}$lib.exp `test -n "$verstring" && $ECHO "-set_version $verstring"` -update_registry ${output_objdir}/so_locations -o $lib~ + $RM $lib.exp' + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-rpath $libdir' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + # + # There doesn't appear to be a way to prevent this compiler from + # explicitly linking system object files so we need to strip them + # from the output so that they don't get included in the library + # dependencies. + output_verbose_link_cmd='templist=`$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP "ld" | $GREP -v "ld:"`; templist=`func_echo_all "$templist" | $SED "s/\(^.*ld.*\)\( .*ld.*$\)/\1/"`; list=""; for z in $templist; do case $z in conftest.$objext) list="$list $z";; *.$objext);; *) list="$list $z";;esac; done; func_echo_all "$list"' + ;; + *) + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(allow_undefined_flag, $1)=' ${wl}-expect_unresolved ${wl}\*' + case $host in + osf3*) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib ${allow_undefined_flag} $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-msym ${wl}-soname ${wl}$soname `test -n "$verstring" && func_echo_all "${wl}-set_version ${wl}$verstring"` ${wl}-update_registry ${wl}${output_objdir}/so_locations -o $lib' + ;; + esac + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-rpath ${wl}$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=: + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + + else + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + fi + ;; + esac + ;; + + psos*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + sunos4*) + case $cc_basename in + CC*) + # Sun C++ 4.x + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + lcc*) + # Lucid + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + solaris*) + case $cc_basename in + CC* | sunCC*) + # Sun C++ 4.2, 5.x and Centerline C++ + _LT_TAGVAR(archive_cmds_need_lc,$1)=yes + _LT_TAGVAR(no_undefined_flag, $1)=' -zdefs' + _LT_TAGVAR(archive_cmds, $1)='$CC -G${allow_undefined_flag} -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G${allow_undefined_flag} ${wl}-M ${wl}$lib.exp -h$soname -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='-R$libdir' + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + # The compiler driver will combine and reorder linker options, + # but understands `-z linker_flag'. + # Supported since Solaris 2.6 (maybe 2.5.1?) + _LT_TAGVAR(whole_archive_flag_spec, $1)='-z allextract$convenience -z defaultextract' + ;; + esac + _LT_TAGVAR(link_all_deplibs, $1)=yes + + output_verbose_link_cmd='func_echo_all' + + # Archives containing C++ object files must be created using + # "CC -xar", where "CC" is the Sun C++ compiler. This is + # necessary to make sure instantiated templates are included + # in the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC -xar -o $oldlib $oldobjs' + ;; + gcx*) + # Green Hills C++ Compiler + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + + # The C++ compiler must be used to create the archive. + _LT_TAGVAR(old_archive_cmds, $1)='$CC $LDFLAGS -archive -o $oldlib $oldobjs' + ;; + *) + # GNU C++ compiler with Solaris linker + if test "$GXX" = yes && test "$with_gnu_ld" = no; then + _LT_TAGVAR(no_undefined_flag, $1)=' ${wl}-z ${wl}defs' + if $CC --version | $GREP -v '^2\.7' > /dev/null; then + _LT_TAGVAR(archive_cmds, $1)='$CC -shared $pic_flag -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -shared $pic_flag -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -shared $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + else + # g++ 2.7 appears to require `-G' NOT `-shared' on this + # platform. + _LT_TAGVAR(archive_cmds, $1)='$CC -G -nostdlib $LDFLAGS $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags ${wl}-h $wl$soname -o $lib' + _LT_TAGVAR(archive_expsym_cmds, $1)='echo "{ global:" > $lib.exp~cat $export_symbols | $SED -e "s/\(.*\)/\1;/" >> $lib.exp~echo "local: *; };" >> $lib.exp~ + $CC -G -nostdlib ${wl}-M $wl$lib.exp -o $lib $predep_objects $libobjs $deplibs $postdep_objects $compiler_flags~$RM $lib.exp' + + # Commands to make compiler produce verbose output that lists + # what "hidden" libraries, object files and flags are used when + # linking a shared library. + output_verbose_link_cmd='$CC -G $CFLAGS -v conftest.$objext 2>&1 | $GREP -v "^Configured with:" | $GREP "\-L"' + fi + + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R $wl$libdir' + case $host_os in + solaris2.[[0-5]] | solaris2.[[0-5]].*) ;; + *) + _LT_TAGVAR(whole_archive_flag_spec, $1)='${wl}-z ${wl}allextract$convenience ${wl}-z ${wl}defaultextract' + ;; + esac + fi + ;; + esac + ;; + + sysv4*uw2* | sysv5OpenUNIX* | sysv5UnixWare7.[[01]].[[10]]* | unixware7* | sco3.2v5.0.[[024]]*) + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + sysv5* | sco3.2v5* | sco5v6*) + # Note: We can NOT use -z defs as we might desire, because we do not + # link with -lc, and that would cause any symbols used from libc to + # always be unresolved, which means just about no library would + # ever link correctly. If we're not using GNU ld we use -z text + # though, which does catch some bad symbols but isn't as heavy-handed + # as -z defs. + _LT_TAGVAR(no_undefined_flag, $1)='${wl}-z,text' + _LT_TAGVAR(allow_undefined_flag, $1)='${wl}-z,nodefs' + _LT_TAGVAR(archive_cmds_need_lc, $1)=no + _LT_TAGVAR(hardcode_shlibpath_var, $1)=no + _LT_TAGVAR(hardcode_libdir_flag_spec, $1)='${wl}-R,$libdir' + _LT_TAGVAR(hardcode_libdir_separator, $1)=':' + _LT_TAGVAR(link_all_deplibs, $1)=yes + _LT_TAGVAR(export_dynamic_flag_spec, $1)='${wl}-Bexport' + runpath_var='LD_RUN_PATH' + + case $cc_basename in + CC*) + _LT_TAGVAR(archive_cmds, $1)='$CC -G ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -G ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(old_archive_cmds, $1)='$CC -Tprelink_objects $oldobjs~ + '"$_LT_TAGVAR(old_archive_cmds, $1)" + _LT_TAGVAR(reload_cmds, $1)='$CC -Tprelink_objects $reload_objs~ + '"$_LT_TAGVAR(reload_cmds, $1)" + ;; + *) + _LT_TAGVAR(archive_cmds, $1)='$CC -shared ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + _LT_TAGVAR(archive_expsym_cmds, $1)='$CC -shared ${wl}-Bexport:$export_symbols ${wl}-h,$soname -o $lib $libobjs $deplibs $compiler_flags' + ;; + esac + ;; + + tandem*) + case $cc_basename in + NCC*) + # NonStop-UX NCC 3.20 + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + ;; + + vxworks*) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + + *) + # FIXME: insert proper C++ library support + _LT_TAGVAR(ld_shlibs, $1)=no + ;; + esac + + AC_MSG_RESULT([$_LT_TAGVAR(ld_shlibs, $1)]) + test "$_LT_TAGVAR(ld_shlibs, $1)" = no && can_build_shared=no + + _LT_TAGVAR(GCC, $1)="$GXX" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS + LDCXX=$LD + LD=$lt_save_LD + GCC=$lt_save_GCC + with_gnu_ld=$lt_save_with_gnu_ld + lt_cv_path_LDCXX=$lt_cv_path_LD + lt_cv_path_LD=$lt_save_path_LD + lt_cv_prog_gnu_ldcxx=$lt_cv_prog_gnu_ld + lt_cv_prog_gnu_ld=$lt_save_with_gnu_ld +fi # test "$_lt_caught_CXX_error" != yes + +AC_LANG_POP +])# _LT_LANG_CXX_CONFIG + + +# _LT_FUNC_STRIPNAME_CNF +# ---------------------- +# func_stripname_cnf prefix suffix name +# strip PREFIX and SUFFIX off of NAME. +# PREFIX and SUFFIX must not contain globbing or regex special +# characters, hashes, percent signs, but SUFFIX may contain a leading +# dot (in which case that matches only a dot). +# +# This function is identical to the (non-XSI) version of func_stripname, +# except this one can be used by m4 code that may be executed by configure, +# rather than the libtool script. +m4_defun([_LT_FUNC_STRIPNAME_CNF],[dnl +AC_REQUIRE([_LT_DECL_SED]) +AC_REQUIRE([_LT_PROG_ECHO_BACKSLASH]) +func_stripname_cnf () +{ + case ${2} in + .*) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%\\\\${2}\$%%"`;; + *) func_stripname_result=`$ECHO "${3}" | $SED "s%^${1}%%; s%${2}\$%%"`;; + esac +} # func_stripname_cnf +])# _LT_FUNC_STRIPNAME_CNF + +# _LT_SYS_HIDDEN_LIBDEPS([TAGNAME]) +# --------------------------------- +# Figure out "hidden" library dependencies from verbose +# compiler output when linking a shared library. +# Parse the compiler output and extract the necessary +# objects, libraries and library flags. +m4_defun([_LT_SYS_HIDDEN_LIBDEPS], +[m4_require([_LT_FILEUTILS_DEFAULTS])dnl +AC_REQUIRE([_LT_FUNC_STRIPNAME_CNF])dnl +# Dependencies to place before and after the object being linked: +_LT_TAGVAR(predep_objects, $1)= +_LT_TAGVAR(postdep_objects, $1)= +_LT_TAGVAR(predeps, $1)= +_LT_TAGVAR(postdeps, $1)= +_LT_TAGVAR(compiler_lib_search_path, $1)= + +dnl we can't use the lt_simple_compile_test_code here, +dnl because it contains code intended for an executable, +dnl not a library. It's possible we should let each +dnl tag define a new lt_????_link_test_code variable, +dnl but it's only used here... +m4_if([$1], [], [cat > conftest.$ac_ext <<_LT_EOF +int a; +void foo (void) { a = 0; } +_LT_EOF +], [$1], [CXX], [cat > conftest.$ac_ext <<_LT_EOF +class Foo +{ +public: + Foo (void) { a = 0; } +private: + int a; +}; +_LT_EOF +], [$1], [F77], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer*4 a + a=0 + return + end +_LT_EOF +], [$1], [FC], [cat > conftest.$ac_ext <<_LT_EOF + subroutine foo + implicit none + integer a + a=0 + return + end +_LT_EOF +], [$1], [GCJ], [cat > conftest.$ac_ext <<_LT_EOF +public class foo { + private int a; + public void bar (void) { + a = 0; + } +}; +_LT_EOF +], [$1], [GO], [cat > conftest.$ac_ext <<_LT_EOF +package foo +func foo() { +} +_LT_EOF +]) + +_lt_libdeps_save_CFLAGS=$CFLAGS +case "$CC $CFLAGS " in #( +*\ -flto*\ *) CFLAGS="$CFLAGS -fno-lto" ;; +*\ -fwhopr*\ *) CFLAGS="$CFLAGS -fno-whopr" ;; +*\ -fuse-linker-plugin*\ *) CFLAGS="$CFLAGS -fno-use-linker-plugin" ;; +esac + +dnl Parse the compiler output and extract the necessary +dnl objects, libraries and library flags. +if AC_TRY_EVAL(ac_compile); then + # Parse the compiler output and extract the necessary + # objects, libraries and library flags. + + # Sentinel used to keep track of whether or not we are before + # the conftest object file. + pre_test_object_deps_done=no + + for p in `eval "$output_verbose_link_cmd"`; do + case ${prev}${p} in + + -L* | -R* | -l*) + # Some compilers place space between "-{L,R}" and the path. + # Remove the space. + if test $p = "-L" || + test $p = "-R"; then + prev=$p + continue + fi + + # Expand the sysroot to ease extracting the directories later. + if test -z "$prev"; then + case $p in + -L*) func_stripname_cnf '-L' '' "$p"; prev=-L; p=$func_stripname_result ;; + -R*) func_stripname_cnf '-R' '' "$p"; prev=-R; p=$func_stripname_result ;; + -l*) func_stripname_cnf '-l' '' "$p"; prev=-l; p=$func_stripname_result ;; + esac + fi + case $p in + =*) func_stripname_cnf '=' '' "$p"; p=$lt_sysroot$func_stripname_result ;; + esac + if test "$pre_test_object_deps_done" = no; then + case ${prev} in + -L | -R) + # Internal compiler library paths should come after those + # provided the user. The postdeps already come after the + # user supplied libs so there is no need to process them. + if test -z "$_LT_TAGVAR(compiler_lib_search_path, $1)"; then + _LT_TAGVAR(compiler_lib_search_path, $1)="${prev}${p}" + else + _LT_TAGVAR(compiler_lib_search_path, $1)="${_LT_TAGVAR(compiler_lib_search_path, $1)} ${prev}${p}" + fi + ;; + # The "-l" case would never come before the object being + # linked, so don't bother handling this case. + esac + else + if test -z "$_LT_TAGVAR(postdeps, $1)"; then + _LT_TAGVAR(postdeps, $1)="${prev}${p}" + else + _LT_TAGVAR(postdeps, $1)="${_LT_TAGVAR(postdeps, $1)} ${prev}${p}" + fi + fi + prev= + ;; + + *.lto.$objext) ;; # Ignore GCC LTO objects + *.$objext) + # This assumes that the test object file only shows up + # once in the compiler output. + if test "$p" = "conftest.$objext"; then + pre_test_object_deps_done=yes + continue + fi + + if test "$pre_test_object_deps_done" = no; then + if test -z "$_LT_TAGVAR(predep_objects, $1)"; then + _LT_TAGVAR(predep_objects, $1)="$p" + else + _LT_TAGVAR(predep_objects, $1)="$_LT_TAGVAR(predep_objects, $1) $p" + fi + else + if test -z "$_LT_TAGVAR(postdep_objects, $1)"; then + _LT_TAGVAR(postdep_objects, $1)="$p" + else + _LT_TAGVAR(postdep_objects, $1)="$_LT_TAGVAR(postdep_objects, $1) $p" + fi + fi + ;; + + *) ;; # Ignore the rest. + + esac + done + + # Clean up. + rm -f a.out a.exe +else + echo "libtool.m4: error: problem compiling $1 test program" +fi + +$RM -f confest.$objext +CFLAGS=$_lt_libdeps_save_CFLAGS + +# PORTME: override above test on systems where it is broken +m4_if([$1], [CXX], +[case $host_os in +interix[[3-9]]*) + # Interix 3.5 installs completely hosed .la files for C++, so rather than + # hack all around it, let's just trust "g++" to DTRT. + _LT_TAGVAR(predep_objects,$1)= + _LT_TAGVAR(postdep_objects,$1)= + _LT_TAGVAR(postdeps,$1)= + ;; + +linux*) + case `$CC -V 2>&1 | sed 5q` in + *Sun\ C*) + # Sun C++ 5.9 + + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; + +solaris*) + case $cc_basename in + CC* | sunCC*) + # The more standards-conforming stlport4 library is + # incompatible with the Cstd library. Avoid specifying + # it if it's in CXXFLAGS. Ignore libCrun as + # -library=stlport4 depends on it. + case " $CXX $CXXFLAGS " in + *" -library=stlport4 "*) + solaris_use_stlport4=yes + ;; + esac + + # Adding this requires a known-good setup of shared libraries for + # Sun compiler versions before 5.6, else PIC objects from an old + # archive will be linked into the output, leading to subtle bugs. + if test "$solaris_use_stlport4" != yes; then + _LT_TAGVAR(postdeps,$1)='-library=Cstd -library=Crun' + fi + ;; + esac + ;; +esac +]) + +case " $_LT_TAGVAR(postdeps, $1) " in +*" -lc "*) _LT_TAGVAR(archive_cmds_need_lc, $1)=no ;; +esac + _LT_TAGVAR(compiler_lib_search_dirs, $1)= +if test -n "${_LT_TAGVAR(compiler_lib_search_path, $1)}"; then + _LT_TAGVAR(compiler_lib_search_dirs, $1)=`echo " ${_LT_TAGVAR(compiler_lib_search_path, $1)}" | ${SED} -e 's! -L! !g' -e 's!^ !!'` +fi +_LT_TAGDECL([], [compiler_lib_search_dirs], [1], + [The directories searched by this compiler when creating a shared library]) +_LT_TAGDECL([], [predep_objects], [1], + [Dependencies to place before and after the objects being linked to + create a shared library]) +_LT_TAGDECL([], [postdep_objects], [1]) +_LT_TAGDECL([], [predeps], [1]) +_LT_TAGDECL([], [postdeps], [1]) +_LT_TAGDECL([], [compiler_lib_search_path], [1], + [The library search path used internally by the compiler when linking + a shared library]) +])# _LT_SYS_HIDDEN_LIBDEPS + + +# _LT_LANG_F77_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for a Fortran 77 compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_F77_CONFIG], +[AC_LANG_PUSH(Fortran 77) +if test -z "$F77" || test "X$F77" = "Xno"; then + _lt_disable_F77=yes +fi + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for f77 test sources. +ac_ext=f + +# Object file extension for compiled f77 test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the F77 compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_F77" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + lt_save_CFLAGS=$CFLAGS + CC=${F77-"f77"} + CFLAGS=$FFLAGS + compiler=$CC + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + GCC=$G77 + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$G77" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC="$lt_save_CC" + CFLAGS="$lt_save_CFLAGS" +fi # test "$_lt_disable_F77" != yes + +AC_LANG_POP +])# _LT_LANG_F77_CONFIG + + +# _LT_LANG_FC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for a Fortran compiler are +# suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_FC_CONFIG], +[AC_LANG_PUSH(Fortran) + +if test -z "$FC" || test "X$FC" = "Xno"; then + _lt_disable_FC=yes +fi + +_LT_TAGVAR(archive_cmds_need_lc, $1)=no +_LT_TAGVAR(allow_undefined_flag, $1)= +_LT_TAGVAR(always_export_symbols, $1)=no +_LT_TAGVAR(archive_expsym_cmds, $1)= +_LT_TAGVAR(export_dynamic_flag_spec, $1)= +_LT_TAGVAR(hardcode_direct, $1)=no +_LT_TAGVAR(hardcode_direct_absolute, $1)=no +_LT_TAGVAR(hardcode_libdir_flag_spec, $1)= +_LT_TAGVAR(hardcode_libdir_separator, $1)= +_LT_TAGVAR(hardcode_minus_L, $1)=no +_LT_TAGVAR(hardcode_automatic, $1)=no +_LT_TAGVAR(inherit_rpath, $1)=no +_LT_TAGVAR(module_cmds, $1)= +_LT_TAGVAR(module_expsym_cmds, $1)= +_LT_TAGVAR(link_all_deplibs, $1)=unknown +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds +_LT_TAGVAR(no_undefined_flag, $1)= +_LT_TAGVAR(whole_archive_flag_spec, $1)= +_LT_TAGVAR(enable_shared_with_static_runtimes, $1)=no + +# Source file extension for fc test sources. +ac_ext=${ac_fc_srcext-f} + +# Object file extension for compiled fc test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# No sense in running all these tests if we already determined that +# the FC compiler isn't working. Some variables (like enable_shared) +# are currently assumed to apply to all compilers on this platform, +# and will be corrupted by setting them based on a non-working compiler. +if test "$_lt_disable_FC" != yes; then + # Code to be used in simple compile tests + lt_simple_compile_test_code="\ + subroutine t + return + end +" + + # Code to be used in simple link tests + lt_simple_link_test_code="\ + program t + end +" + + # ltmain only uses $CC for tagged configurations so make sure $CC is set. + _LT_TAG_COMPILER + + # save warnings/boilerplate of simple test code + _LT_COMPILER_BOILERPLATE + _LT_LINKER_BOILERPLATE + + # Allow CC to be a program name with arguments. + lt_save_CC="$CC" + lt_save_GCC=$GCC + lt_save_CFLAGS=$CFLAGS + CC=${FC-"f95"} + CFLAGS=$FCFLAGS + compiler=$CC + GCC=$ac_cv_fc_compiler_gnu + + _LT_TAGVAR(compiler, $1)=$CC + _LT_CC_BASENAME([$compiler]) + + if test -n "$compiler"; then + AC_MSG_CHECKING([if libtool supports shared libraries]) + AC_MSG_RESULT([$can_build_shared]) + + AC_MSG_CHECKING([whether to build shared libraries]) + test "$can_build_shared" = "no" && enable_shared=no + + # On AIX, shared libraries and static libraries use the same namespace, and + # are all built from PIC. + case $host_os in + aix3*) + test "$enable_shared" = yes && enable_static=no + if test -n "$RANLIB"; then + archive_cmds="$archive_cmds~\$RANLIB \$lib" + postinstall_cmds='$RANLIB $lib' + fi + ;; + aix[[4-9]]*) + if test "$host_cpu" != ia64 && test "$aix_use_runtimelinking" = no ; then + test "$enable_shared" = yes && enable_static=no + fi + ;; + esac + AC_MSG_RESULT([$enable_shared]) + + AC_MSG_CHECKING([whether to build static libraries]) + # Make sure either enable_shared or enable_static is yes. + test "$enable_shared" = yes || enable_static=yes + AC_MSG_RESULT([$enable_static]) + + _LT_TAGVAR(GCC, $1)="$ac_cv_fc_compiler_gnu" + _LT_TAGVAR(LD, $1)="$LD" + + ## CAVEAT EMPTOR: + ## There is no encapsulation within the following macros, do not change + ## the running order or otherwise move them around unless you know exactly + ## what you are doing... + _LT_SYS_HIDDEN_LIBDEPS($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_SYS_DYNAMIC_LINKER($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) + fi # test -n "$compiler" + + GCC=$lt_save_GCC + CC=$lt_save_CC + CFLAGS=$lt_save_CFLAGS +fi # test "$_lt_disable_FC" != yes + +AC_LANG_POP +])# _LT_LANG_FC_CONFIG + + +# _LT_LANG_GCJ_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Java Compiler compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_GCJ_CONFIG], +[AC_REQUIRE([LT_PROG_GCJ])dnl +AC_LANG_SAVE + +# Source file extension for Java test sources. +ac_ext=java + +# Object file extension for compiled Java test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="class foo {}" + +# Code to be used in simple link tests +lt_simple_link_test_code='public class conftest { public static void main(String[[]] argv) {}; }' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC=yes +CC=${GCJ-"gcj"} +CFLAGS=$GCJFLAGS +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)="$LD" +_LT_CC_BASENAME([$compiler]) + +# GCJ did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no + +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_GCJ_CONFIG + + +# _LT_LANG_GO_CONFIG([TAG]) +# -------------------------- +# Ensure that the configuration variables for the GNU Go compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_GO_CONFIG], +[AC_REQUIRE([LT_PROG_GO])dnl +AC_LANG_SAVE + +# Source file extension for Go test sources. +ac_ext=go + +# Object file extension for compiled Go test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code="package main; func main() { }" + +# Code to be used in simple link tests +lt_simple_link_test_code='package main; func main() { }' + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC=$CC +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC=yes +CC=${GOC-"gccgo"} +CFLAGS=$GOFLAGS +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_TAGVAR(LD, $1)="$LD" +_LT_CC_BASENAME([$compiler]) + +# Go did not exist at the time GCC didn't implicitly link libc in. +_LT_TAGVAR(archive_cmds_need_lc, $1)=no + +_LT_TAGVAR(old_archive_cmds, $1)=$old_archive_cmds +_LT_TAGVAR(reload_flag, $1)=$reload_flag +_LT_TAGVAR(reload_cmds, $1)=$reload_cmds + +## CAVEAT EMPTOR: +## There is no encapsulation within the following macros, do not change +## the running order or otherwise move them around unless you know exactly +## what you are doing... +if test -n "$compiler"; then + _LT_COMPILER_NO_RTTI($1) + _LT_COMPILER_PIC($1) + _LT_COMPILER_C_O($1) + _LT_COMPILER_FILE_LOCKS($1) + _LT_LINKER_SHLIBS($1) + _LT_LINKER_HARDCODE_LIBPATH($1) + + _LT_CONFIG($1) +fi + +AC_LANG_RESTORE + +GCC=$lt_save_GCC +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_GO_CONFIG + + +# _LT_LANG_RC_CONFIG([TAG]) +# ------------------------- +# Ensure that the configuration variables for the Windows resource compiler +# are suitably defined. These variables are subsequently used by _LT_CONFIG +# to write the compiler configuration to `libtool'. +m4_defun([_LT_LANG_RC_CONFIG], +[AC_REQUIRE([LT_PROG_RC])dnl +AC_LANG_SAVE + +# Source file extension for RC test sources. +ac_ext=rc + +# Object file extension for compiled RC test sources. +objext=o +_LT_TAGVAR(objext, $1)=$objext + +# Code to be used in simple compile tests +lt_simple_compile_test_code='sample MENU { MENUITEM "&Soup", 100, CHECKED }' + +# Code to be used in simple link tests +lt_simple_link_test_code="$lt_simple_compile_test_code" + +# ltmain only uses $CC for tagged configurations so make sure $CC is set. +_LT_TAG_COMPILER + +# save warnings/boilerplate of simple test code +_LT_COMPILER_BOILERPLATE +_LT_LINKER_BOILERPLATE + +# Allow CC to be a program name with arguments. +lt_save_CC="$CC" +lt_save_CFLAGS=$CFLAGS +lt_save_GCC=$GCC +GCC= +CC=${RC-"windres"} +CFLAGS= +compiler=$CC +_LT_TAGVAR(compiler, $1)=$CC +_LT_CC_BASENAME([$compiler]) +_LT_TAGVAR(lt_cv_prog_compiler_c_o, $1)=yes + +if test -n "$compiler"; then + : + _LT_CONFIG($1) +fi + +GCC=$lt_save_GCC +AC_LANG_RESTORE +CC=$lt_save_CC +CFLAGS=$lt_save_CFLAGS +])# _LT_LANG_RC_CONFIG + + +# LT_PROG_GCJ +# ----------- +AC_DEFUN([LT_PROG_GCJ], +[m4_ifdef([AC_PROG_GCJ], [AC_PROG_GCJ], + [m4_ifdef([A][M_PROG_GCJ], [A][M_PROG_GCJ], + [AC_CHECK_TOOL(GCJ, gcj,) + test "x${GCJFLAGS+set}" = xset || GCJFLAGS="-g -O2" + AC_SUBST(GCJFLAGS)])])[]dnl +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_GCJ], [LT_PROG_GCJ]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_GCJ], []) + + +# LT_PROG_GO +# ---------- +AC_DEFUN([LT_PROG_GO], +[AC_CHECK_TOOL(GOC, gccgo,) +]) + + +# LT_PROG_RC +# ---------- +AC_DEFUN([LT_PROG_RC], +[AC_CHECK_TOOL(RC, windres,) +]) + +# Old name: +AU_ALIAS([LT_AC_PROG_RC], [LT_PROG_RC]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_RC], []) + + +# _LT_DECL_EGREP +# -------------- +# If we don't have a new enough Autoconf to choose the best grep +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_EGREP], +[AC_REQUIRE([AC_PROG_EGREP])dnl +AC_REQUIRE([AC_PROG_FGREP])dnl +test -z "$GREP" && GREP=grep +_LT_DECL([], [GREP], [1], [A grep program that handles long lines]) +_LT_DECL([], [EGREP], [1], [An ERE matcher]) +_LT_DECL([], [FGREP], [1], [A literal string matcher]) +dnl Non-bleeding-edge autoconf doesn't subst GREP, so do it here too +AC_SUBST([GREP]) +]) + + +# _LT_DECL_OBJDUMP +# -------------- +# If we don't have a new enough Autoconf to choose the best objdump +# available, choose the one first in the user's PATH. +m4_defun([_LT_DECL_OBJDUMP], +[AC_CHECK_TOOL(OBJDUMP, objdump, false) +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [An object symbol dumper]) +AC_SUBST([OBJDUMP]) +]) + +# _LT_DECL_DLLTOOL +# ---------------- +# Ensure DLLTOOL variable is set. +m4_defun([_LT_DECL_DLLTOOL], +[AC_CHECK_TOOL(DLLTOOL, dlltool, false) +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [1], [DLL creation program]) +AC_SUBST([DLLTOOL]) +]) + +# _LT_DECL_SED +# ------------ +# Check for a fully-functional sed program, that truncates +# as few characters as possible. Prefer GNU sed if found. +m4_defun([_LT_DECL_SED], +[AC_PROG_SED +test -z "$SED" && SED=sed +Xsed="$SED -e 1s/^X//" +_LT_DECL([], [SED], [1], [A sed program that does not truncate output]) +_LT_DECL([], [Xsed], ["\$SED -e 1s/^X//"], + [Sed that helps us avoid accidentally triggering echo(1) options like -n]) +])# _LT_DECL_SED + +m4_ifndef([AC_PROG_SED], [ +############################################################ +# NOTE: This macro has been submitted for inclusion into # +# GNU Autoconf as AC_PROG_SED. When it is available in # +# a released version of Autoconf we should remove this # +# macro and use it instead. # +############################################################ + +m4_defun([AC_PROG_SED], +[AC_MSG_CHECKING([for a sed that does not truncate output]) +AC_CACHE_VAL(lt_cv_path_SED, +[# Loop through the user's path and test for sed and gsed. +# Then use that list of sed's as ones to test for truncation. +as_save_IFS=$IFS; IFS=$PATH_SEPARATOR +for as_dir in $PATH +do + IFS=$as_save_IFS + test -z "$as_dir" && as_dir=. + for lt_ac_prog in sed gsed; do + for ac_exec_ext in '' $ac_executable_extensions; do + if $as_executable_p "$as_dir/$lt_ac_prog$ac_exec_ext"; then + lt_ac_sed_list="$lt_ac_sed_list $as_dir/$lt_ac_prog$ac_exec_ext" + fi + done + done +done +IFS=$as_save_IFS +lt_ac_max=0 +lt_ac_count=0 +# Add /usr/xpg4/bin/sed as it is typically found on Solaris +# along with /bin/sed that truncates output. +for lt_ac_sed in $lt_ac_sed_list /usr/xpg4/bin/sed; do + test ! -f $lt_ac_sed && continue + cat /dev/null > conftest.in + lt_ac_count=0 + echo $ECHO_N "0123456789$ECHO_C" >conftest.in + # Check for GNU sed and select it if it is found. + if "$lt_ac_sed" --version 2>&1 < /dev/null | grep 'GNU' > /dev/null; then + lt_cv_path_SED=$lt_ac_sed + break + fi + while true; do + cat conftest.in conftest.in >conftest.tmp + mv conftest.tmp conftest.in + cp conftest.in conftest.nl + echo >>conftest.nl + $lt_ac_sed -e 's/a$//' < conftest.nl >conftest.out || break + cmp -s conftest.out conftest.nl || break + # 10000 chars as input seems more than enough + test $lt_ac_count -gt 10 && break + lt_ac_count=`expr $lt_ac_count + 1` + if test $lt_ac_count -gt $lt_ac_max; then + lt_ac_max=$lt_ac_count + lt_cv_path_SED=$lt_ac_sed + fi + done +done +]) +SED=$lt_cv_path_SED +AC_SUBST([SED]) +AC_MSG_RESULT([$SED]) +])#AC_PROG_SED +])#m4_ifndef + +# Old name: +AU_ALIAS([LT_AC_PROG_SED], [AC_PROG_SED]) +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([LT_AC_PROG_SED], []) + + +# _LT_CHECK_SHELL_FEATURES +# ------------------------ +# Find out whether the shell is Bourne or XSI compatible, +# or has some other useful features. +m4_defun([_LT_CHECK_SHELL_FEATURES], +[AC_MSG_CHECKING([whether the shell understands some XSI constructs]) +# Try some XSI features +xsi_shell=no +( _lt_dummy="a/b/c" + test "${_lt_dummy##*/},${_lt_dummy%/*},${_lt_dummy#??}"${_lt_dummy%"$_lt_dummy"}, \ + = c,a/b,b/c, \ + && eval 'test $(( 1 + 1 )) -eq 2 \ + && test "${#_lt_dummy}" -eq 5' ) >/dev/null 2>&1 \ + && xsi_shell=yes +AC_MSG_RESULT([$xsi_shell]) +_LT_CONFIG_LIBTOOL_INIT([xsi_shell='$xsi_shell']) + +AC_MSG_CHECKING([whether the shell understands "+="]) +lt_shell_append=no +( foo=bar; set foo baz; eval "$[1]+=\$[2]" && test "$foo" = barbaz ) \ + >/dev/null 2>&1 \ + && lt_shell_append=yes +AC_MSG_RESULT([$lt_shell_append]) +_LT_CONFIG_LIBTOOL_INIT([lt_shell_append='$lt_shell_append']) + +if ( (MAIL=60; unset MAIL) || exit) >/dev/null 2>&1; then + lt_unset=unset +else + lt_unset=false +fi +_LT_DECL([], [lt_unset], [0], [whether the shell understands "unset"])dnl + +# test EBCDIC or ASCII +case `echo X|tr X '\101'` in + A) # ASCII based system + # \n is not interpreted correctly by Solaris 8 /usr/ucb/tr + lt_SP2NL='tr \040 \012' + lt_NL2SP='tr \015\012 \040\040' + ;; + *) # EBCDIC based system + lt_SP2NL='tr \100 \n' + lt_NL2SP='tr \r\n \100\100' + ;; +esac +_LT_DECL([SP2NL], [lt_SP2NL], [1], [turn spaces into newlines])dnl +_LT_DECL([NL2SP], [lt_NL2SP], [1], [turn newlines into spaces])dnl +])# _LT_CHECK_SHELL_FEATURES + + +# _LT_PROG_FUNCTION_REPLACE (FUNCNAME, REPLACEMENT-BODY) +# ------------------------------------------------------ +# In `$cfgfile', look for function FUNCNAME delimited by `^FUNCNAME ()$' and +# '^} FUNCNAME ', and replace its body with REPLACEMENT-BODY. +m4_defun([_LT_PROG_FUNCTION_REPLACE], +[dnl { +sed -e '/^$1 ()$/,/^} # $1 /c\ +$1 ()\ +{\ +m4_bpatsubsts([$2], [$], [\\], [^\([ ]\)], [\\\1]) +} # Extended-shell $1 implementation' "$cfgfile" > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") +test 0 -eq $? || _lt_function_replace_fail=: +]) + + +# _LT_PROG_REPLACE_SHELLFNS +# ------------------------- +# Replace existing portable implementations of several shell functions with +# equivalent extended shell implementations where those features are available.. +m4_defun([_LT_PROG_REPLACE_SHELLFNS], +[if test x"$xsi_shell" = xyes; then + _LT_PROG_FUNCTION_REPLACE([func_dirname], [dnl + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac]) + + _LT_PROG_FUNCTION_REPLACE([func_basename], [dnl + func_basename_result="${1##*/}"]) + + _LT_PROG_FUNCTION_REPLACE([func_dirname_and_basename], [dnl + case ${1} in + */*) func_dirname_result="${1%/*}${2}" ;; + * ) func_dirname_result="${3}" ;; + esac + func_basename_result="${1##*/}"]) + + _LT_PROG_FUNCTION_REPLACE([func_stripname], [dnl + # pdksh 5.2.14 does not do ${X%$Y} correctly if both X and Y are + # positional parameters, so assign one to ordinary parameter first. + func_stripname_result=${3} + func_stripname_result=${func_stripname_result#"${1}"} + func_stripname_result=${func_stripname_result%"${2}"}]) + + _LT_PROG_FUNCTION_REPLACE([func_split_long_opt], [dnl + func_split_long_opt_name=${1%%=*} + func_split_long_opt_arg=${1#*=}]) + + _LT_PROG_FUNCTION_REPLACE([func_split_short_opt], [dnl + func_split_short_opt_arg=${1#??} + func_split_short_opt_name=${1%"$func_split_short_opt_arg"}]) + + _LT_PROG_FUNCTION_REPLACE([func_lo2o], [dnl + case ${1} in + *.lo) func_lo2o_result=${1%.lo}.${objext} ;; + *) func_lo2o_result=${1} ;; + esac]) + + _LT_PROG_FUNCTION_REPLACE([func_xform], [ func_xform_result=${1%.*}.lo]) + + _LT_PROG_FUNCTION_REPLACE([func_arith], [ func_arith_result=$(( $[*] ))]) + + _LT_PROG_FUNCTION_REPLACE([func_len], [ func_len_result=${#1}]) +fi + +if test x"$lt_shell_append" = xyes; then + _LT_PROG_FUNCTION_REPLACE([func_append], [ eval "${1}+=\\${2}"]) + + _LT_PROG_FUNCTION_REPLACE([func_append_quoted], [dnl + func_quote_for_eval "${2}" +dnl m4 expansion turns \\\\ into \\, and then the shell eval turns that into \ + eval "${1}+=\\\\ \\$func_quote_for_eval_result"]) + + # Save a `func_append' function call where possible by direct use of '+=' + sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1+="%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +else + # Save a `func_append' function call even when '+=' is not available + sed -e 's%func_append \([[a-zA-Z_]]\{1,\}\) "%\1="$\1%g' $cfgfile > $cfgfile.tmp \ + && mv -f "$cfgfile.tmp" "$cfgfile" \ + || (rm -f "$cfgfile" && cp "$cfgfile.tmp" "$cfgfile" && rm -f "$cfgfile.tmp") + test 0 -eq $? || _lt_function_replace_fail=: +fi + +if test x"$_lt_function_replace_fail" = x":"; then + AC_MSG_WARN([Unable to substitute extended shell functions in $ofile]) +fi +]) + +# _LT_PATH_CONVERSION_FUNCTIONS +# ----------------------------- +# Determine which file name conversion functions should be used by +# func_to_host_file (and, implicitly, by func_to_host_path). These are needed +# for certain cross-compile configurations and native mingw. +m4_defun([_LT_PATH_CONVERSION_FUNCTIONS], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +AC_REQUIRE([AC_CANONICAL_BUILD])dnl +AC_MSG_CHECKING([how to convert $build file names to $host format]) +AC_CACHE_VAL(lt_cv_to_host_file_cmd, +[case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_w32 + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_cygwin_to_w32 + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_w32 + ;; + esac + ;; + *-*-cygwin* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_host_file_cmd=func_convert_file_msys_to_cygwin + ;; + *-*-cygwin* ) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; + * ) # otherwise, assume *nix + lt_cv_to_host_file_cmd=func_convert_file_nix_to_cygwin + ;; + esac + ;; + * ) # unhandled hosts (and "normal" native builds) + lt_cv_to_host_file_cmd=func_convert_file_noop + ;; +esac +]) +to_host_file_cmd=$lt_cv_to_host_file_cmd +AC_MSG_RESULT([$lt_cv_to_host_file_cmd]) +_LT_DECL([to_host_file_cmd], [lt_cv_to_host_file_cmd], + [0], [convert $build file names to $host format])dnl + +AC_MSG_CHECKING([how to convert $build file names to toolchain format]) +AC_CACHE_VAL(lt_cv_to_tool_file_cmd, +[#assume ordinary cross tools, or native build. +lt_cv_to_tool_file_cmd=func_convert_file_noop +case $host in + *-*-mingw* ) + case $build in + *-*-mingw* ) # actually msys + lt_cv_to_tool_file_cmd=func_convert_file_msys_to_w32 + ;; + esac + ;; +esac +]) +to_tool_file_cmd=$lt_cv_to_tool_file_cmd +AC_MSG_RESULT([$lt_cv_to_tool_file_cmd]) +_LT_DECL([to_tool_file_cmd], [lt_cv_to_tool_file_cmd], + [0], [convert $build files to toolchain format])dnl +])# _LT_PATH_CONVERSION_FUNCTIONS diff --git a/external/gtest/m4/ltoptions.m4 b/external/gtest/m4/ltoptions.m4 new file mode 100755 index 0000000000..5d9acd8e23 --- /dev/null +++ b/external/gtest/m4/ltoptions.m4 @@ -0,0 +1,384 @@ +# Helper functions for option handling. -*- Autoconf -*- +# +# Copyright (C) 2004, 2005, 2007, 2008, 2009 Free Software Foundation, +# Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 7 ltoptions.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOPTIONS_VERSION], [m4_if([1])]) + + +# _LT_MANGLE_OPTION(MACRO-NAME, OPTION-NAME) +# ------------------------------------------ +m4_define([_LT_MANGLE_OPTION], +[[_LT_OPTION_]m4_bpatsubst($1__$2, [[^a-zA-Z0-9_]], [_])]) + + +# _LT_SET_OPTION(MACRO-NAME, OPTION-NAME) +# --------------------------------------- +# Set option OPTION-NAME for macro MACRO-NAME, and if there is a +# matching handler defined, dispatch to it. Other OPTION-NAMEs are +# saved as a flag. +m4_define([_LT_SET_OPTION], +[m4_define(_LT_MANGLE_OPTION([$1], [$2]))dnl +m4_ifdef(_LT_MANGLE_DEFUN([$1], [$2]), + _LT_MANGLE_DEFUN([$1], [$2]), + [m4_warning([Unknown $1 option `$2'])])[]dnl +]) + + +# _LT_IF_OPTION(MACRO-NAME, OPTION-NAME, IF-SET, [IF-NOT-SET]) +# ------------------------------------------------------------ +# Execute IF-SET if OPTION is set, IF-NOT-SET otherwise. +m4_define([_LT_IF_OPTION], +[m4_ifdef(_LT_MANGLE_OPTION([$1], [$2]), [$3], [$4])]) + + +# _LT_UNLESS_OPTIONS(MACRO-NAME, OPTION-LIST, IF-NOT-SET) +# ------------------------------------------------------- +# Execute IF-NOT-SET unless all options in OPTION-LIST for MACRO-NAME +# are set. +m4_define([_LT_UNLESS_OPTIONS], +[m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [m4_ifdef(_LT_MANGLE_OPTION([$1], _LT_Option), + [m4_define([$0_found])])])[]dnl +m4_ifdef([$0_found], [m4_undefine([$0_found])], [$3 +])[]dnl +]) + + +# _LT_SET_OPTIONS(MACRO-NAME, OPTION-LIST) +# ---------------------------------------- +# OPTION-LIST is a space-separated list of Libtool options associated +# with MACRO-NAME. If any OPTION has a matching handler declared with +# LT_OPTION_DEFINE, dispatch to that macro; otherwise complain about +# the unknown option and exit. +m4_defun([_LT_SET_OPTIONS], +[# Set options +m4_foreach([_LT_Option], m4_split(m4_normalize([$2])), + [_LT_SET_OPTION([$1], _LT_Option)]) + +m4_if([$1],[LT_INIT],[ + dnl + dnl Simply set some default values (i.e off) if boolean options were not + dnl specified: + _LT_UNLESS_OPTIONS([LT_INIT], [dlopen], [enable_dlopen=no + ]) + _LT_UNLESS_OPTIONS([LT_INIT], [win32-dll], [enable_win32_dll=no + ]) + dnl + dnl If no reference was made to various pairs of opposing options, then + dnl we run the default mode handler for the pair. For example, if neither + dnl `shared' nor `disable-shared' was passed, we enable building of shared + dnl archives by default: + _LT_UNLESS_OPTIONS([LT_INIT], [shared disable-shared], [_LT_ENABLE_SHARED]) + _LT_UNLESS_OPTIONS([LT_INIT], [static disable-static], [_LT_ENABLE_STATIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [pic-only no-pic], [_LT_WITH_PIC]) + _LT_UNLESS_OPTIONS([LT_INIT], [fast-install disable-fast-install], + [_LT_ENABLE_FAST_INSTALL]) + ]) +])# _LT_SET_OPTIONS + + +## --------------------------------- ## +## Macros to handle LT_INIT options. ## +## --------------------------------- ## + +# _LT_MANGLE_DEFUN(MACRO-NAME, OPTION-NAME) +# ----------------------------------------- +m4_define([_LT_MANGLE_DEFUN], +[[_LT_OPTION_DEFUN_]m4_bpatsubst(m4_toupper([$1__$2]), [[^A-Z0-9_]], [_])]) + + +# LT_OPTION_DEFINE(MACRO-NAME, OPTION-NAME, CODE) +# ----------------------------------------------- +m4_define([LT_OPTION_DEFINE], +[m4_define(_LT_MANGLE_DEFUN([$1], [$2]), [$3])[]dnl +])# LT_OPTION_DEFINE + + +# dlopen +# ------ +LT_OPTION_DEFINE([LT_INIT], [dlopen], [enable_dlopen=yes +]) + +AU_DEFUN([AC_LIBTOOL_DLOPEN], +[_LT_SET_OPTION([LT_INIT], [dlopen]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `dlopen' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_DLOPEN], []) + + +# win32-dll +# --------- +# Declare package support for building win32 dll's. +LT_OPTION_DEFINE([LT_INIT], [win32-dll], +[enable_win32_dll=yes + +case $host in +*-*-cygwin* | *-*-mingw* | *-*-pw32* | *-*-cegcc*) + AC_CHECK_TOOL(AS, as, false) + AC_CHECK_TOOL(DLLTOOL, dlltool, false) + AC_CHECK_TOOL(OBJDUMP, objdump, false) + ;; +esac + +test -z "$AS" && AS=as +_LT_DECL([], [AS], [1], [Assembler program])dnl + +test -z "$DLLTOOL" && DLLTOOL=dlltool +_LT_DECL([], [DLLTOOL], [1], [DLL creation program])dnl + +test -z "$OBJDUMP" && OBJDUMP=objdump +_LT_DECL([], [OBJDUMP], [1], [Object dumper program])dnl +])# win32-dll + +AU_DEFUN([AC_LIBTOOL_WIN32_DLL], +[AC_REQUIRE([AC_CANONICAL_HOST])dnl +_LT_SET_OPTION([LT_INIT], [win32-dll]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `win32-dll' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_WIN32_DLL], []) + + +# _LT_ENABLE_SHARED([DEFAULT]) +# ---------------------------- +# implement the --enable-shared flag, and supports the `shared' and +# `disable-shared' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_SHARED], +[m4_define([_LT_ENABLE_SHARED_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([shared], + [AS_HELP_STRING([--enable-shared@<:@=PKGS@:>@], + [build shared libraries @<:@default=]_LT_ENABLE_SHARED_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_shared=yes ;; + no) enable_shared=no ;; + *) + enable_shared=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_shared=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_shared=]_LT_ENABLE_SHARED_DEFAULT) + + _LT_DECL([build_libtool_libs], [enable_shared], [0], + [Whether or not to build shared libraries]) +])# _LT_ENABLE_SHARED + +LT_OPTION_DEFINE([LT_INIT], [shared], [_LT_ENABLE_SHARED([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-shared], [_LT_ENABLE_SHARED([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[shared]) +]) + +AC_DEFUN([AC_DISABLE_SHARED], +[_LT_SET_OPTION([LT_INIT], [disable-shared]) +]) + +AU_DEFUN([AM_ENABLE_SHARED], [AC_ENABLE_SHARED($@)]) +AU_DEFUN([AM_DISABLE_SHARED], [AC_DISABLE_SHARED($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_SHARED], []) +dnl AC_DEFUN([AM_DISABLE_SHARED], []) + + + +# _LT_ENABLE_STATIC([DEFAULT]) +# ---------------------------- +# implement the --enable-static flag, and support the `static' and +# `disable-static' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_STATIC], +[m4_define([_LT_ENABLE_STATIC_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([static], + [AS_HELP_STRING([--enable-static@<:@=PKGS@:>@], + [build static libraries @<:@default=]_LT_ENABLE_STATIC_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_static=yes ;; + no) enable_static=no ;; + *) + enable_static=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_static=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_static=]_LT_ENABLE_STATIC_DEFAULT) + + _LT_DECL([build_old_libs], [enable_static], [0], + [Whether or not to build static libraries]) +])# _LT_ENABLE_STATIC + +LT_OPTION_DEFINE([LT_INIT], [static], [_LT_ENABLE_STATIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-static], [_LT_ENABLE_STATIC([no])]) + +# Old names: +AC_DEFUN([AC_ENABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[static]) +]) + +AC_DEFUN([AC_DISABLE_STATIC], +[_LT_SET_OPTION([LT_INIT], [disable-static]) +]) + +AU_DEFUN([AM_ENABLE_STATIC], [AC_ENABLE_STATIC($@)]) +AU_DEFUN([AM_DISABLE_STATIC], [AC_DISABLE_STATIC($@)]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AM_ENABLE_STATIC], []) +dnl AC_DEFUN([AM_DISABLE_STATIC], []) + + + +# _LT_ENABLE_FAST_INSTALL([DEFAULT]) +# ---------------------------------- +# implement the --enable-fast-install flag, and support the `fast-install' +# and `disable-fast-install' LT_INIT options. +# DEFAULT is either `yes' or `no'. If omitted, it defaults to `yes'. +m4_define([_LT_ENABLE_FAST_INSTALL], +[m4_define([_LT_ENABLE_FAST_INSTALL_DEFAULT], [m4_if($1, no, no, yes)])dnl +AC_ARG_ENABLE([fast-install], + [AS_HELP_STRING([--enable-fast-install@<:@=PKGS@:>@], + [optimize for fast installation @<:@default=]_LT_ENABLE_FAST_INSTALL_DEFAULT[@:>@])], + [p=${PACKAGE-default} + case $enableval in + yes) enable_fast_install=yes ;; + no) enable_fast_install=no ;; + *) + enable_fast_install=no + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for pkg in $enableval; do + IFS="$lt_save_ifs" + if test "X$pkg" = "X$p"; then + enable_fast_install=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [enable_fast_install=]_LT_ENABLE_FAST_INSTALL_DEFAULT) + +_LT_DECL([fast_install], [enable_fast_install], [0], + [Whether or not to optimize for fast installation])dnl +])# _LT_ENABLE_FAST_INSTALL + +LT_OPTION_DEFINE([LT_INIT], [fast-install], [_LT_ENABLE_FAST_INSTALL([yes])]) +LT_OPTION_DEFINE([LT_INIT], [disable-fast-install], [_LT_ENABLE_FAST_INSTALL([no])]) + +# Old names: +AU_DEFUN([AC_ENABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], m4_if([$1], [no], [disable-])[fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `fast-install' option into LT_INIT's first parameter.]) +]) + +AU_DEFUN([AC_DISABLE_FAST_INSTALL], +[_LT_SET_OPTION([LT_INIT], [disable-fast-install]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you put +the `disable-fast-install' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_ENABLE_FAST_INSTALL], []) +dnl AC_DEFUN([AM_DISABLE_FAST_INSTALL], []) + + +# _LT_WITH_PIC([MODE]) +# -------------------- +# implement the --with-pic flag, and support the `pic-only' and `no-pic' +# LT_INIT options. +# MODE is either `yes' or `no'. If omitted, it defaults to `both'. +m4_define([_LT_WITH_PIC], +[AC_ARG_WITH([pic], + [AS_HELP_STRING([--with-pic@<:@=PKGS@:>@], + [try to use only PIC/non-PIC objects @<:@default=use both@:>@])], + [lt_p=${PACKAGE-default} + case $withval in + yes|no) pic_mode=$withval ;; + *) + pic_mode=default + # Look at the argument we got. We use all the common list separators. + lt_save_ifs="$IFS"; IFS="${IFS}$PATH_SEPARATOR," + for lt_pkg in $withval; do + IFS="$lt_save_ifs" + if test "X$lt_pkg" = "X$lt_p"; then + pic_mode=yes + fi + done + IFS="$lt_save_ifs" + ;; + esac], + [pic_mode=default]) + +test -z "$pic_mode" && pic_mode=m4_default([$1], [default]) + +_LT_DECL([], [pic_mode], [0], [What type of objects to build])dnl +])# _LT_WITH_PIC + +LT_OPTION_DEFINE([LT_INIT], [pic-only], [_LT_WITH_PIC([yes])]) +LT_OPTION_DEFINE([LT_INIT], [no-pic], [_LT_WITH_PIC([no])]) + +# Old name: +AU_DEFUN([AC_LIBTOOL_PICMODE], +[_LT_SET_OPTION([LT_INIT], [pic-only]) +AC_DIAGNOSE([obsolete], +[$0: Remove this warning and the call to _LT_SET_OPTION when you +put the `pic-only' option into LT_INIT's first parameter.]) +]) + +dnl aclocal-1.4 backwards compatibility: +dnl AC_DEFUN([AC_LIBTOOL_PICMODE], []) + +## ----------------- ## +## LTDL_INIT Options ## +## ----------------- ## + +m4_define([_LTDL_MODE], []) +LT_OPTION_DEFINE([LTDL_INIT], [nonrecursive], + [m4_define([_LTDL_MODE], [nonrecursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [recursive], + [m4_define([_LTDL_MODE], [recursive])]) +LT_OPTION_DEFINE([LTDL_INIT], [subproject], + [m4_define([_LTDL_MODE], [subproject])]) + +m4_define([_LTDL_TYPE], []) +LT_OPTION_DEFINE([LTDL_INIT], [installable], + [m4_define([_LTDL_TYPE], [installable])]) +LT_OPTION_DEFINE([LTDL_INIT], [convenience], + [m4_define([_LTDL_TYPE], [convenience])]) diff --git a/external/gtest/m4/ltsugar.m4 b/external/gtest/m4/ltsugar.m4 new file mode 100755 index 0000000000..9000a057d3 --- /dev/null +++ b/external/gtest/m4/ltsugar.m4 @@ -0,0 +1,123 @@ +# ltsugar.m4 -- libtool m4 base layer. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007, 2008 Free Software Foundation, Inc. +# Written by Gary V. Vaughan, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 6 ltsugar.m4 + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTSUGAR_VERSION], [m4_if([0.1])]) + + +# lt_join(SEP, ARG1, [ARG2...]) +# ----------------------------- +# Produce ARG1SEPARG2...SEPARGn, omitting [] arguments and their +# associated separator. +# Needed until we can rely on m4_join from Autoconf 2.62, since all earlier +# versions in m4sugar had bugs. +m4_define([lt_join], +[m4_if([$#], [1], [], + [$#], [2], [[$2]], + [m4_if([$2], [], [], [[$2]_])$0([$1], m4_shift(m4_shift($@)))])]) +m4_define([_lt_join], +[m4_if([$#$2], [2], [], + [m4_if([$2], [], [], [[$1$2]])$0([$1], m4_shift(m4_shift($@)))])]) + + +# lt_car(LIST) +# lt_cdr(LIST) +# ------------ +# Manipulate m4 lists. +# These macros are necessary as long as will still need to support +# Autoconf-2.59 which quotes differently. +m4_define([lt_car], [[$1]]) +m4_define([lt_cdr], +[m4_if([$#], 0, [m4_fatal([$0: cannot be called without arguments])], + [$#], 1, [], + [m4_dquote(m4_shift($@))])]) +m4_define([lt_unquote], $1) + + +# lt_append(MACRO-NAME, STRING, [SEPARATOR]) +# ------------------------------------------ +# Redefine MACRO-NAME to hold its former content plus `SEPARATOR'`STRING'. +# Note that neither SEPARATOR nor STRING are expanded; they are appended +# to MACRO-NAME as is (leaving the expansion for when MACRO-NAME is invoked). +# No SEPARATOR is output if MACRO-NAME was previously undefined (different +# than defined and empty). +# +# This macro is needed until we can rely on Autoconf 2.62, since earlier +# versions of m4sugar mistakenly expanded SEPARATOR but not STRING. +m4_define([lt_append], +[m4_define([$1], + m4_ifdef([$1], [m4_defn([$1])[$3]])[$2])]) + + + +# lt_combine(SEP, PREFIX-LIST, INFIX, SUFFIX1, [SUFFIX2...]) +# ---------------------------------------------------------- +# Produce a SEP delimited list of all paired combinations of elements of +# PREFIX-LIST with SUFFIX1 through SUFFIXn. Each element of the list +# has the form PREFIXmINFIXSUFFIXn. +# Needed until we can rely on m4_combine added in Autoconf 2.62. +m4_define([lt_combine], +[m4_if(m4_eval([$# > 3]), [1], + [m4_pushdef([_Lt_sep], [m4_define([_Lt_sep], m4_defn([lt_car]))])]]dnl +[[m4_foreach([_Lt_prefix], [$2], + [m4_foreach([_Lt_suffix], + ]m4_dquote(m4_dquote(m4_shift(m4_shift(m4_shift($@)))))[, + [_Lt_sep([$1])[]m4_defn([_Lt_prefix])[$3]m4_defn([_Lt_suffix])])])])]) + + +# lt_if_append_uniq(MACRO-NAME, VARNAME, [SEPARATOR], [UNIQ], [NOT-UNIQ]) +# ----------------------------------------------------------------------- +# Iff MACRO-NAME does not yet contain VARNAME, then append it (delimited +# by SEPARATOR if supplied) and expand UNIQ, else NOT-UNIQ. +m4_define([lt_if_append_uniq], +[m4_ifdef([$1], + [m4_if(m4_index([$3]m4_defn([$1])[$3], [$3$2$3]), [-1], + [lt_append([$1], [$2], [$3])$4], + [$5])], + [lt_append([$1], [$2], [$3])$4])]) + + +# lt_dict_add(DICT, KEY, VALUE) +# ----------------------------- +m4_define([lt_dict_add], +[m4_define([$1($2)], [$3])]) + + +# lt_dict_add_subkey(DICT, KEY, SUBKEY, VALUE) +# -------------------------------------------- +m4_define([lt_dict_add_subkey], +[m4_define([$1($2:$3)], [$4])]) + + +# lt_dict_fetch(DICT, KEY, [SUBKEY]) +# ---------------------------------- +m4_define([lt_dict_fetch], +[m4_ifval([$3], + m4_ifdef([$1($2:$3)], [m4_defn([$1($2:$3)])]), + m4_ifdef([$1($2)], [m4_defn([$1($2)])]))]) + + +# lt_if_dict_fetch(DICT, KEY, [SUBKEY], VALUE, IF-TRUE, [IF-FALSE]) +# ----------------------------------------------------------------- +m4_define([lt_if_dict_fetch], +[m4_if(lt_dict_fetch([$1], [$2], [$3]), [$4], + [$5], + [$6])]) + + +# lt_dict_filter(DICT, [SUBKEY], VALUE, [SEPARATOR], KEY, [...]) +# -------------------------------------------------------------- +m4_define([lt_dict_filter], +[m4_if([$5], [], [], + [lt_join(m4_quote(m4_default([$4], [[, ]])), + lt_unquote(m4_split(m4_normalize(m4_foreach(_Lt_key, lt_car([m4_shiftn(4, $@)]), + [lt_if_dict_fetch([$1], _Lt_key, [$2], [$3], [_Lt_key ])])))))])[]dnl +]) diff --git a/external/gtest/m4/ltversion.m4 b/external/gtest/m4/ltversion.m4 new file mode 100755 index 0000000000..07a8602d48 --- /dev/null +++ b/external/gtest/m4/ltversion.m4 @@ -0,0 +1,23 @@ +# ltversion.m4 -- version numbers -*- Autoconf -*- +# +# Copyright (C) 2004 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004 +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# @configure_input@ + +# serial 3337 ltversion.m4 +# This file is part of GNU Libtool + +m4_define([LT_PACKAGE_VERSION], [2.4.2]) +m4_define([LT_PACKAGE_REVISION], [1.3337]) + +AC_DEFUN([LTVERSION_VERSION], +[macro_version='2.4.2' +macro_revision='1.3337' +_LT_DECL(, macro_version, 0, [Which release of libtool.m4 was used?]) +_LT_DECL(, macro_revision, 0) +]) diff --git a/external/gtest/m4/lt~obsolete.m4 b/external/gtest/m4/lt~obsolete.m4 new file mode 100755 index 0000000000..c573da90c5 --- /dev/null +++ b/external/gtest/m4/lt~obsolete.m4 @@ -0,0 +1,98 @@ +# lt~obsolete.m4 -- aclocal satisfying obsolete definitions. -*-Autoconf-*- +# +# Copyright (C) 2004, 2005, 2007, 2009 Free Software Foundation, Inc. +# Written by Scott James Remnant, 2004. +# +# This file is free software; the Free Software Foundation gives +# unlimited permission to copy and/or distribute it, with or without +# modifications, as long as this notice is preserved. + +# serial 5 lt~obsolete.m4 + +# These exist entirely to fool aclocal when bootstrapping libtool. +# +# In the past libtool.m4 has provided macros via AC_DEFUN (or AU_DEFUN) +# which have later been changed to m4_define as they aren't part of the +# exported API, or moved to Autoconf or Automake where they belong. +# +# The trouble is, aclocal is a bit thick. It'll see the old AC_DEFUN +# in /usr/share/aclocal/libtool.m4 and remember it, then when it sees us +# using a macro with the same name in our local m4/libtool.m4 it'll +# pull the old libtool.m4 in (it doesn't see our shiny new m4_define +# and doesn't know about Autoconf macros at all.) +# +# So we provide this file, which has a silly filename so it's always +# included after everything else. This provides aclocal with the +# AC_DEFUNs it wants, but when m4 processes it, it doesn't do anything +# because those macros already exist, or will be overwritten later. +# We use AC_DEFUN over AU_DEFUN for compatibility with aclocal-1.6. +# +# Anytime we withdraw an AC_DEFUN or AU_DEFUN, remember to add it here. +# Yes, that means every name once taken will need to remain here until +# we give up compatibility with versions before 1.7, at which point +# we need to keep only those names which we still refer to. + +# This is to help aclocal find these macros, as it can't see m4_define. +AC_DEFUN([LTOBSOLETE_VERSION], [m4_if([1])]) + +m4_ifndef([AC_LIBTOOL_LINKER_OPTION], [AC_DEFUN([AC_LIBTOOL_LINKER_OPTION])]) +m4_ifndef([AC_PROG_EGREP], [AC_DEFUN([AC_PROG_EGREP])]) +m4_ifndef([_LT_AC_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_AC_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_AC_SHELL_INIT], [AC_DEFUN([_LT_AC_SHELL_INIT])]) +m4_ifndef([_LT_AC_SYS_LIBPATH_AIX], [AC_DEFUN([_LT_AC_SYS_LIBPATH_AIX])]) +m4_ifndef([_LT_PROG_LTMAIN], [AC_DEFUN([_LT_PROG_LTMAIN])]) +m4_ifndef([_LT_AC_TAGVAR], [AC_DEFUN([_LT_AC_TAGVAR])]) +m4_ifndef([AC_LTDL_ENABLE_INSTALL], [AC_DEFUN([AC_LTDL_ENABLE_INSTALL])]) +m4_ifndef([AC_LTDL_PREOPEN], [AC_DEFUN([AC_LTDL_PREOPEN])]) +m4_ifndef([_LT_AC_SYS_COMPILER], [AC_DEFUN([_LT_AC_SYS_COMPILER])]) +m4_ifndef([_LT_AC_LOCK], [AC_DEFUN([_LT_AC_LOCK])]) +m4_ifndef([AC_LIBTOOL_SYS_OLD_ARCHIVE], [AC_DEFUN([AC_LIBTOOL_SYS_OLD_ARCHIVE])]) +m4_ifndef([_LT_AC_TRY_DLOPEN_SELF], [AC_DEFUN([_LT_AC_TRY_DLOPEN_SELF])]) +m4_ifndef([AC_LIBTOOL_PROG_CC_C_O], [AC_DEFUN([AC_LIBTOOL_PROG_CC_C_O])]) +m4_ifndef([AC_LIBTOOL_SYS_HARD_LINK_LOCKS], [AC_DEFUN([AC_LIBTOOL_SYS_HARD_LINK_LOCKS])]) +m4_ifndef([AC_LIBTOOL_OBJDIR], [AC_DEFUN([AC_LIBTOOL_OBJDIR])]) +m4_ifndef([AC_LTDL_OBJDIR], [AC_DEFUN([AC_LTDL_OBJDIR])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH], [AC_DEFUN([AC_LIBTOOL_PROG_LD_HARDCODE_LIBPATH])]) +m4_ifndef([AC_LIBTOOL_SYS_LIB_STRIP], [AC_DEFUN([AC_LIBTOOL_SYS_LIB_STRIP])]) +m4_ifndef([AC_PATH_MAGIC], [AC_DEFUN([AC_PATH_MAGIC])]) +m4_ifndef([AC_PROG_LD_GNU], [AC_DEFUN([AC_PROG_LD_GNU])]) +m4_ifndef([AC_PROG_LD_RELOAD_FLAG], [AC_DEFUN([AC_PROG_LD_RELOAD_FLAG])]) +m4_ifndef([AC_DEPLIBS_CHECK_METHOD], [AC_DEFUN([AC_DEPLIBS_CHECK_METHOD])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_NO_RTTI], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_NO_RTTI])]) +m4_ifndef([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE], [AC_DEFUN([AC_LIBTOOL_SYS_GLOBAL_SYMBOL_PIPE])]) +m4_ifndef([AC_LIBTOOL_PROG_COMPILER_PIC], [AC_DEFUN([AC_LIBTOOL_PROG_COMPILER_PIC])]) +m4_ifndef([AC_LIBTOOL_PROG_LD_SHLIBS], [AC_DEFUN([AC_LIBTOOL_PROG_LD_SHLIBS])]) +m4_ifndef([AC_LIBTOOL_POSTDEP_PREDEP], [AC_DEFUN([AC_LIBTOOL_POSTDEP_PREDEP])]) +m4_ifndef([LT_AC_PROG_EGREP], [AC_DEFUN([LT_AC_PROG_EGREP])]) +m4_ifndef([LT_AC_PROG_SED], [AC_DEFUN([LT_AC_PROG_SED])]) +m4_ifndef([_LT_CC_BASENAME], [AC_DEFUN([_LT_CC_BASENAME])]) +m4_ifndef([_LT_COMPILER_BOILERPLATE], [AC_DEFUN([_LT_COMPILER_BOILERPLATE])]) +m4_ifndef([_LT_LINKER_BOILERPLATE], [AC_DEFUN([_LT_LINKER_BOILERPLATE])]) +m4_ifndef([_AC_PROG_LIBTOOL], [AC_DEFUN([_AC_PROG_LIBTOOL])]) +m4_ifndef([AC_LIBTOOL_SETUP], [AC_DEFUN([AC_LIBTOOL_SETUP])]) +m4_ifndef([_LT_AC_CHECK_DLFCN], [AC_DEFUN([_LT_AC_CHECK_DLFCN])]) +m4_ifndef([AC_LIBTOOL_SYS_DYNAMIC_LINKER], [AC_DEFUN([AC_LIBTOOL_SYS_DYNAMIC_LINKER])]) +m4_ifndef([_LT_AC_TAGCONFIG], [AC_DEFUN([_LT_AC_TAGCONFIG])]) +m4_ifndef([AC_DISABLE_FAST_INSTALL], [AC_DEFUN([AC_DISABLE_FAST_INSTALL])]) +m4_ifndef([_LT_AC_LANG_CXX], [AC_DEFUN([_LT_AC_LANG_CXX])]) +m4_ifndef([_LT_AC_LANG_F77], [AC_DEFUN([_LT_AC_LANG_F77])]) +m4_ifndef([_LT_AC_LANG_GCJ], [AC_DEFUN([_LT_AC_LANG_GCJ])]) +m4_ifndef([AC_LIBTOOL_LANG_C_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_C_CONFIG])]) +m4_ifndef([_LT_AC_LANG_C_CONFIG], [AC_DEFUN([_LT_AC_LANG_C_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_CXX_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_CXX_CONFIG])]) +m4_ifndef([_LT_AC_LANG_CXX_CONFIG], [AC_DEFUN([_LT_AC_LANG_CXX_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_F77_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_F77_CONFIG])]) +m4_ifndef([_LT_AC_LANG_F77_CONFIG], [AC_DEFUN([_LT_AC_LANG_F77_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_GCJ_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_GCJ_CONFIG])]) +m4_ifndef([_LT_AC_LANG_GCJ_CONFIG], [AC_DEFUN([_LT_AC_LANG_GCJ_CONFIG])]) +m4_ifndef([AC_LIBTOOL_LANG_RC_CONFIG], [AC_DEFUN([AC_LIBTOOL_LANG_RC_CONFIG])]) +m4_ifndef([_LT_AC_LANG_RC_CONFIG], [AC_DEFUN([_LT_AC_LANG_RC_CONFIG])]) +m4_ifndef([AC_LIBTOOL_CONFIG], [AC_DEFUN([AC_LIBTOOL_CONFIG])]) +m4_ifndef([_LT_AC_FILE_LTDLL_C], [AC_DEFUN([_LT_AC_FILE_LTDLL_C])]) +m4_ifndef([_LT_REQUIRED_DARWIN_CHECKS], [AC_DEFUN([_LT_REQUIRED_DARWIN_CHECKS])]) +m4_ifndef([_LT_AC_PROG_CXXCPP], [AC_DEFUN([_LT_AC_PROG_CXXCPP])]) +m4_ifndef([_LT_PREPARE_SED_QUOTE_VARS], [AC_DEFUN([_LT_PREPARE_SED_QUOTE_VARS])]) +m4_ifndef([_LT_PROG_ECHO_BACKSLASH], [AC_DEFUN([_LT_PROG_ECHO_BACKSLASH])]) +m4_ifndef([_LT_PROG_F77], [AC_DEFUN([_LT_PROG_F77])]) +m4_ifndef([_LT_PROG_FC], [AC_DEFUN([_LT_PROG_FC])]) +m4_ifndef([_LT_PROG_CXX], [AC_DEFUN([_LT_PROG_CXX])]) diff --git a/external/gtest/make/Makefile b/external/gtest/make/Makefile new file mode 100755 index 0000000000..9ac74493ba --- /dev/null +++ b/external/gtest/make/Makefile @@ -0,0 +1,82 @@ +# A sample Makefile for building Google Test and using it in user +# tests. Please tweak it to suit your environment and project. You +# may want to move it to your project's root directory. +# +# SYNOPSIS: +# +# make [all] - makes everything. +# make TARGET - makes the given target. +# make clean - removes all files generated by make. + +# Please tweak the following variable definitions as needed by your +# project, except GTEST_HEADERS, which you can use in your own targets +# but shouldn't modify. + +# Points to the root of Google Test, relative to where this file is. +# Remember to tweak this if you move this file. +GTEST_DIR = .. + +# Where to find user code. +USER_DIR = ../samples + +# Flags passed to the preprocessor. +# Set Google Test's header directory as a system directory, such that +# the compiler doesn't generate warnings in Google Test headers. +CPPFLAGS += -isystem $(GTEST_DIR)/include + +# Flags passed to the C++ compiler. +CXXFLAGS += -g -Wall -Wextra -pthread + +# All tests produced by this Makefile. Remember to add new tests you +# created to the list. +TESTS = sample1_unittest + +# All Google Test headers. Usually you shouldn't change this +# definition. +GTEST_HEADERS = $(GTEST_DIR)/include/gtest/*.h \ + $(GTEST_DIR)/include/gtest/internal/*.h + +# House-keeping build targets. + +all : $(TESTS) + +clean : + rm -f $(TESTS) gtest.a gtest_main.a *.o + +# Builds gtest.a and gtest_main.a. + +# Usually you shouldn't tweak such internal variables, indicated by a +# trailing _. +GTEST_SRCS_ = $(GTEST_DIR)/src/*.cc $(GTEST_DIR)/src/*.h $(GTEST_HEADERS) + +# For simplicity and to avoid depending on Google Test's +# implementation details, the dependencies specified below are +# conservative and not optimized. This is fine as Google Test +# compiles fast and for ordinary users its source rarely changes. +gtest-all.o : $(GTEST_SRCS_) + $(CXX) $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -c \ + $(GTEST_DIR)/src/gtest-all.cc + +gtest_main.o : $(GTEST_SRCS_) + $(CXX) $(CPPFLAGS) -I$(GTEST_DIR) $(CXXFLAGS) -c \ + $(GTEST_DIR)/src/gtest_main.cc + +gtest.a : gtest-all.o + $(AR) $(ARFLAGS) $@ $^ + +gtest_main.a : gtest-all.o gtest_main.o + $(AR) $(ARFLAGS) $@ $^ + +# Builds a sample test. A test should link with either gtest.a or +# gtest_main.a, depending on whether it defines its own main() +# function. + +sample1.o : $(USER_DIR)/sample1.cc $(USER_DIR)/sample1.h $(GTEST_HEADERS) + $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(USER_DIR)/sample1.cc + +sample1_unittest.o : $(USER_DIR)/sample1_unittest.cc \ + $(USER_DIR)/sample1.h $(GTEST_HEADERS) + $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(USER_DIR)/sample1_unittest.cc + +sample1_unittest : sample1.o sample1_unittest.o gtest_main.a + $(CXX) $(CPPFLAGS) $(CXXFLAGS) -lpthread $^ -o $@ diff --git a/external/gtest/msvc/gtest-md.sln b/external/gtest/msvc/gtest-md.sln new file mode 100755 index 0000000000..829b4019ae --- /dev/null +++ b/external/gtest/msvc/gtest-md.sln @@ -0,0 +1,45 @@ +Microsoft Visual Studio Solution File, Format Version 8.00 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "gtest-md", "gtest-md.vcproj", "{C8F6C172-56F2-4E76-B5FA-C3B423B31BE8}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "gtest_main-md", "gtest_main-md.vcproj", "{3AF54C8A-10BF-4332-9147-F68ED9862033}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "gtest_prod_test-md", "gtest_prod_test-md.vcproj", "{24848551-EF4F-47E8-9A9D-EA4D49BC3ECB}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "gtest_unittest-md", "gtest_unittest-md.vcproj", "{4D9FDFB5-986A-4139-823C-F4EE0ED481A2}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfiguration) = preSolution + Debug = Debug + Release = Release + EndGlobalSection + GlobalSection(ProjectConfiguration) = postSolution + {C8F6C172-56F2-4E76-B5FA-C3B423B31BE8}.Debug.ActiveCfg = Debug|Win32 + {C8F6C172-56F2-4E76-B5FA-C3B423B31BE8}.Debug.Build.0 = Debug|Win32 + {C8F6C172-56F2-4E76-B5FA-C3B423B31BE8}.Release.ActiveCfg = Release|Win32 + {C8F6C172-56F2-4E76-B5FA-C3B423B31BE8}.Release.Build.0 = Release|Win32 + {3AF54C8A-10BF-4332-9147-F68ED9862033}.Debug.ActiveCfg = Debug|Win32 + {3AF54C8A-10BF-4332-9147-F68ED9862033}.Debug.Build.0 = Debug|Win32 + {3AF54C8A-10BF-4332-9147-F68ED9862033}.Release.ActiveCfg = Release|Win32 + {3AF54C8A-10BF-4332-9147-F68ED9862033}.Release.Build.0 = Release|Win32 + {24848551-EF4F-47E8-9A9D-EA4D49BC3ECB}.Debug.ActiveCfg = Debug|Win32 + {24848551-EF4F-47E8-9A9D-EA4D49BC3ECB}.Debug.Build.0 = Debug|Win32 + {24848551-EF4F-47E8-9A9D-EA4D49BC3ECB}.Release.ActiveCfg = Release|Win32 + {24848551-EF4F-47E8-9A9D-EA4D49BC3ECB}.Release.Build.0 = Release|Win32 + {4D9FDFB5-986A-4139-823C-F4EE0ED481A2}.Debug.ActiveCfg = Debug|Win32 + {4D9FDFB5-986A-4139-823C-F4EE0ED481A2}.Debug.Build.0 = Debug|Win32 + {4D9FDFB5-986A-4139-823C-F4EE0ED481A2}.Release.ActiveCfg = Release|Win32 + {4D9FDFB5-986A-4139-823C-F4EE0ED481A2}.Release.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + EndGlobalSection + GlobalSection(ExtensibilityAddIns) = postSolution + EndGlobalSection +EndGlobal diff --git a/external/gtest/msvc/gtest-md.vcproj b/external/gtest/msvc/gtest-md.vcproj new file mode 100755 index 0000000000..1c1496ccbf --- /dev/null +++ b/external/gtest/msvc/gtest-md.vcproj @@ -0,0 +1,126 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/gtest/msvc/gtest.sln b/external/gtest/msvc/gtest.sln new file mode 100755 index 0000000000..c1b2929649 --- /dev/null +++ b/external/gtest/msvc/gtest.sln @@ -0,0 +1,45 @@ +Microsoft Visual Studio Solution File, Format Version 8.00 +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "gtest", "gtest.vcproj", "{C8F6C172-56F2-4E76-B5FA-C3B423B31BE7}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "gtest_main", "gtest_main.vcproj", "{3AF54C8A-10BF-4332-9147-F68ED9862032}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "gtest_unittest", "gtest_unittest.vcproj", "{4D9FDFB5-986A-4139-823C-F4EE0ED481A1}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "gtest_prod_test", "gtest_prod_test.vcproj", "{24848551-EF4F-47E8-9A9D-EA4D49BC3ECA}" + ProjectSection(ProjectDependencies) = postProject + EndProjectSection +EndProject +Global + GlobalSection(SolutionConfiguration) = preSolution + Debug = Debug + Release = Release + EndGlobalSection + GlobalSection(ProjectConfiguration) = postSolution + {C8F6C172-56F2-4E76-B5FA-C3B423B31BE7}.Debug.ActiveCfg = Debug|Win32 + {C8F6C172-56F2-4E76-B5FA-C3B423B31BE7}.Debug.Build.0 = Debug|Win32 + {C8F6C172-56F2-4E76-B5FA-C3B423B31BE7}.Release.ActiveCfg = Release|Win32 + {C8F6C172-56F2-4E76-B5FA-C3B423B31BE7}.Release.Build.0 = Release|Win32 + {3AF54C8A-10BF-4332-9147-F68ED9862032}.Debug.ActiveCfg = Debug|Win32 + {3AF54C8A-10BF-4332-9147-F68ED9862032}.Debug.Build.0 = Debug|Win32 + {3AF54C8A-10BF-4332-9147-F68ED9862032}.Release.ActiveCfg = Release|Win32 + {3AF54C8A-10BF-4332-9147-F68ED9862032}.Release.Build.0 = Release|Win32 + {4D9FDFB5-986A-4139-823C-F4EE0ED481A1}.Debug.ActiveCfg = Debug|Win32 + {4D9FDFB5-986A-4139-823C-F4EE0ED481A1}.Debug.Build.0 = Debug|Win32 + {4D9FDFB5-986A-4139-823C-F4EE0ED481A1}.Release.ActiveCfg = Release|Win32 + {4D9FDFB5-986A-4139-823C-F4EE0ED481A1}.Release.Build.0 = Release|Win32 + {24848551-EF4F-47E8-9A9D-EA4D49BC3ECA}.Debug.ActiveCfg = Debug|Win32 + {24848551-EF4F-47E8-9A9D-EA4D49BC3ECA}.Debug.Build.0 = Debug|Win32 + {24848551-EF4F-47E8-9A9D-EA4D49BC3ECA}.Release.ActiveCfg = Release|Win32 + {24848551-EF4F-47E8-9A9D-EA4D49BC3ECA}.Release.Build.0 = Release|Win32 + EndGlobalSection + GlobalSection(ExtensibilityGlobals) = postSolution + EndGlobalSection + GlobalSection(ExtensibilityAddIns) = postSolution + EndGlobalSection +EndGlobal diff --git a/external/gtest/msvc/gtest.vcproj b/external/gtest/msvc/gtest.vcproj new file mode 100755 index 0000000000..449e7e09ea --- /dev/null +++ b/external/gtest/msvc/gtest.vcproj @@ -0,0 +1,126 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/gtest/msvc/gtest_main-md.vcproj b/external/gtest/msvc/gtest_main-md.vcproj new file mode 100755 index 0000000000..d00956cd3d --- /dev/null +++ b/external/gtest/msvc/gtest_main-md.vcproj @@ -0,0 +1,129 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/gtest/msvc/gtest_main.vcproj b/external/gtest/msvc/gtest_main.vcproj new file mode 100755 index 0000000000..e7e9f41768 --- /dev/null +++ b/external/gtest/msvc/gtest_main.vcproj @@ -0,0 +1,129 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/gtest/msvc/gtest_prod_test-md.vcproj b/external/gtest/msvc/gtest_prod_test-md.vcproj new file mode 100755 index 0000000000..4071d28fed --- /dev/null +++ b/external/gtest/msvc/gtest_prod_test-md.vcproj @@ -0,0 +1,164 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/gtest/msvc/gtest_prod_test.vcproj b/external/gtest/msvc/gtest_prod_test.vcproj new file mode 100755 index 0000000000..998c75808a --- /dev/null +++ b/external/gtest/msvc/gtest_prod_test.vcproj @@ -0,0 +1,164 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/gtest/msvc/gtest_unittest-md.vcproj b/external/gtest/msvc/gtest_unittest-md.vcproj new file mode 100755 index 0000000000..1525939750 --- /dev/null +++ b/external/gtest/msvc/gtest_unittest-md.vcproj @@ -0,0 +1,147 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/gtest/msvc/gtest_unittest.vcproj b/external/gtest/msvc/gtest_unittest.vcproj new file mode 100755 index 0000000000..2b2d743457 --- /dev/null +++ b/external/gtest/msvc/gtest_unittest.vcproj @@ -0,0 +1,147 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/external/gtest/samples/prime_tables.h b/external/gtest/samples/prime_tables.h new file mode 100755 index 0000000000..92ce16a014 --- /dev/null +++ b/external/gtest/samples/prime_tables.h @@ -0,0 +1,123 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// Author: vladl@google.com (Vlad Losev) + +// This provides interface PrimeTable that determines whether a number is a +// prime and determines a next prime number. This interface is used +// in Google Test samples demonstrating use of parameterized tests. + +#ifndef GTEST_SAMPLES_PRIME_TABLES_H_ +#define GTEST_SAMPLES_PRIME_TABLES_H_ + +#include + +// The prime table interface. +class PrimeTable { + public: + virtual ~PrimeTable() {} + + // Returns true iff n is a prime number. + virtual bool IsPrime(int n) const = 0; + + // Returns the smallest prime number greater than p; or returns -1 + // if the next prime is beyond the capacity of the table. + virtual int GetNextPrime(int p) const = 0; +}; + +// Implementation #1 calculates the primes on-the-fly. +class OnTheFlyPrimeTable : public PrimeTable { + public: + virtual bool IsPrime(int n) const { + if (n <= 1) return false; + + for (int i = 2; i*i <= n; i++) { + // n is divisible by an integer other than 1 and itself. + if ((n % i) == 0) return false; + } + + return true; + } + + virtual int GetNextPrime(int p) const { + for (int n = p + 1; n > 0; n++) { + if (IsPrime(n)) return n; + } + + return -1; + } +}; + +// Implementation #2 pre-calculates the primes and stores the result +// in an array. +class PreCalculatedPrimeTable : public PrimeTable { + public: + // 'max' specifies the maximum number the prime table holds. + explicit PreCalculatedPrimeTable(int max) + : is_prime_size_(max + 1), is_prime_(new bool[max + 1]) { + CalculatePrimesUpTo(max); + } + virtual ~PreCalculatedPrimeTable() { delete[] is_prime_; } + + virtual bool IsPrime(int n) const { + return 0 <= n && n < is_prime_size_ && is_prime_[n]; + } + + virtual int GetNextPrime(int p) const { + for (int n = p + 1; n < is_prime_size_; n++) { + if (is_prime_[n]) return n; + } + + return -1; + } + + private: + void CalculatePrimesUpTo(int max) { + ::std::fill(is_prime_, is_prime_ + is_prime_size_, true); + is_prime_[0] = is_prime_[1] = false; + + for (int i = 2; i <= max; i++) { + if (!is_prime_[i]) continue; + + // Marks all multiples of i (except i itself) as non-prime. + for (int j = 2*i; j <= max; j += i) { + is_prime_[j] = false; + } + } + } + + const int is_prime_size_; + bool* const is_prime_; + + // Disables compiler warning "assignment operator could not be generated." + void operator=(const PreCalculatedPrimeTable& rhs); +}; + +#endif // GTEST_SAMPLES_PRIME_TABLES_H_ diff --git a/external/gtest/samples/sample1.cc b/external/gtest/samples/sample1.cc new file mode 100755 index 0000000000..f171e2609d --- /dev/null +++ b/external/gtest/samples/sample1.cc @@ -0,0 +1,68 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A sample program demonstrating using Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) + +#include "sample1.h" + +// Returns n! (the factorial of n). For negative n, n! is defined to be 1. +int Factorial(int n) { + int result = 1; + for (int i = 1; i <= n; i++) { + result *= i; + } + + return result; +} + +// Returns true iff n is a prime number. +bool IsPrime(int n) { + // Trivial case 1: small numbers + if (n <= 1) return false; + + // Trivial case 2: even numbers + if (n % 2 == 0) return n == 2; + + // Now, we have that n is odd and n >= 3. + + // Try to divide n by every odd number i, starting from 3 + for (int i = 3; ; i += 2) { + // We only have to try i up to the squre root of n + if (i > n/i) break; + + // Now, we have i <= n/i < n. + // If n is divisible by i, n is not prime. + if (n % i == 0) return false; + } + + // n has no integer factor in the range (1, n), and thus is prime. + return true; +} diff --git a/external/gtest/samples/sample1.h b/external/gtest/samples/sample1.h new file mode 100755 index 0000000000..3dfeb98c45 --- /dev/null +++ b/external/gtest/samples/sample1.h @@ -0,0 +1,43 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A sample program demonstrating using Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) + +#ifndef GTEST_SAMPLES_SAMPLE1_H_ +#define GTEST_SAMPLES_SAMPLE1_H_ + +// Returns n! (the factorial of n). For negative n, n! is defined to be 1. +int Factorial(int n); + +// Returns true iff n is a prime number. +bool IsPrime(int n); + +#endif // GTEST_SAMPLES_SAMPLE1_H_ diff --git a/external/gtest/samples/sample10_unittest.cc b/external/gtest/samples/sample10_unittest.cc new file mode 100755 index 0000000000..0051cd5dcd --- /dev/null +++ b/external/gtest/samples/sample10_unittest.cc @@ -0,0 +1,144 @@ +// Copyright 2009 Google Inc. All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// This sample shows how to use Google Test listener API to implement +// a primitive leak checker. + +#include +#include + +#include "gtest/gtest.h" + +using ::testing::EmptyTestEventListener; +using ::testing::InitGoogleTest; +using ::testing::Test; +using ::testing::TestCase; +using ::testing::TestEventListeners; +using ::testing::TestInfo; +using ::testing::TestPartResult; +using ::testing::UnitTest; + +namespace { + +// We will track memory used by this class. +class Water { + public: + // Normal Water declarations go here. + + // operator new and operator delete help us control water allocation. + void* operator new(size_t allocation_size) { + allocated_++; + return malloc(allocation_size); + } + + void operator delete(void* block, size_t /* allocation_size */) { + allocated_--; + free(block); + } + + static int allocated() { return allocated_; } + + private: + static int allocated_; +}; + +int Water::allocated_ = 0; + +// This event listener monitors how many Water objects are created and +// destroyed by each test, and reports a failure if a test leaks some Water +// objects. It does this by comparing the number of live Water objects at +// the beginning of a test and at the end of a test. +class LeakChecker : public EmptyTestEventListener { + private: + // Called before a test starts. + virtual void OnTestStart(const TestInfo& /* test_info */) { + initially_allocated_ = Water::allocated(); + } + + // Called after a test ends. + virtual void OnTestEnd(const TestInfo& /* test_info */) { + int difference = Water::allocated() - initially_allocated_; + + // You can generate a failure in any event handler except + // OnTestPartResult. Just use an appropriate Google Test assertion to do + // it. + EXPECT_LE(difference, 0) << "Leaked " << difference << " unit(s) of Water!"; + } + + int initially_allocated_; +}; + +TEST(ListenersTest, DoesNotLeak) { + Water* water = new Water; + delete water; +} + +// This should fail when the --check_for_leaks command line flag is +// specified. +TEST(ListenersTest, LeaksWater) { + Water* water = new Water; + EXPECT_TRUE(water != NULL); +} + +} // namespace + +int main(int argc, char **argv) { + InitGoogleTest(&argc, argv); + + bool check_for_leaks = false; + if (argc > 1 && strcmp(argv[1], "--check_for_leaks") == 0 ) + check_for_leaks = true; + else + printf("%s\n", "Run this program with --check_for_leaks to enable " + "custom leak checking in the tests."); + + // If we are given the --check_for_leaks command line flag, installs the + // leak checker. + if (check_for_leaks) { + TestEventListeners& listeners = UnitTest::GetInstance()->listeners(); + + // Adds the leak checker to the end of the test event listener list, + // after the default text output printer and the default XML report + // generator. + // + // The order is important - it ensures that failures generated in the + // leak checker's OnTestEnd() method are processed by the text and XML + // printers *before* their OnTestEnd() methods are called, such that + // they are attributed to the right test. Remember that a listener + // receives an OnXyzStart event *after* listeners preceding it in the + // list received that event, and receives an OnXyzEnd event *before* + // listeners preceding it. + // + // We don't need to worry about deleting the new listener later, as + // Google Test will do it. + listeners.Append(new LeakChecker); + } + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/samples/sample1_unittest.cc b/external/gtest/samples/sample1_unittest.cc new file mode 100755 index 0000000000..aefc4f1d86 --- /dev/null +++ b/external/gtest/samples/sample1_unittest.cc @@ -0,0 +1,153 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A sample program demonstrating using Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) + + +// This sample shows how to write a simple unit test for a function, +// using Google C++ testing framework. +// +// Writing a unit test using Google C++ testing framework is easy as 1-2-3: + + +// Step 1. Include necessary header files such that the stuff your +// test logic needs is declared. +// +// Don't forget gtest.h, which declares the testing framework. + +#include +#include "sample1.h" +#include "gtest/gtest.h" + + +// Step 2. Use the TEST macro to define your tests. +// +// TEST has two parameters: the test case name and the test name. +// After using the macro, you should define your test logic between a +// pair of braces. You can use a bunch of macros to indicate the +// success or failure of a test. EXPECT_TRUE and EXPECT_EQ are +// examples of such macros. For a complete list, see gtest.h. +// +// +// +// In Google Test, tests are grouped into test cases. This is how we +// keep test code organized. You should put logically related tests +// into the same test case. +// +// The test case name and the test name should both be valid C++ +// identifiers. And you should not use underscore (_) in the names. +// +// Google Test guarantees that each test you define is run exactly +// once, but it makes no guarantee on the order the tests are +// executed. Therefore, you should write your tests in such a way +// that their results don't depend on their order. +// +// + + +// Tests Factorial(). + +// Tests factorial of negative numbers. +TEST(FactorialTest, Negative) { + // This test is named "Negative", and belongs to the "FactorialTest" + // test case. + EXPECT_EQ(1, Factorial(-5)); + EXPECT_EQ(1, Factorial(-1)); + EXPECT_GT(Factorial(-10), 0); + + // + // + // EXPECT_EQ(expected, actual) is the same as + // + // EXPECT_TRUE((expected) == (actual)) + // + // except that it will print both the expected value and the actual + // value when the assertion fails. This is very helpful for + // debugging. Therefore in this case EXPECT_EQ is preferred. + // + // On the other hand, EXPECT_TRUE accepts any Boolean expression, + // and is thus more general. + // + // +} + +// Tests factorial of 0. +TEST(FactorialTest, Zero) { + EXPECT_EQ(1, Factorial(0)); +} + +// Tests factorial of positive numbers. +TEST(FactorialTest, Positive) { + EXPECT_EQ(1, Factorial(1)); + EXPECT_EQ(2, Factorial(2)); + EXPECT_EQ(6, Factorial(3)); + EXPECT_EQ(40320, Factorial(8)); +} + + +// Tests IsPrime() + +// Tests negative input. +TEST(IsPrimeTest, Negative) { + // This test belongs to the IsPrimeTest test case. + + EXPECT_FALSE(IsPrime(-1)); + EXPECT_FALSE(IsPrime(-2)); + EXPECT_FALSE(IsPrime(INT_MIN)); +} + +// Tests some trivial cases. +TEST(IsPrimeTest, Trivial) { + EXPECT_FALSE(IsPrime(0)); + EXPECT_FALSE(IsPrime(1)); + EXPECT_TRUE(IsPrime(2)); + EXPECT_TRUE(IsPrime(3)); +} + +// Tests positive input. +TEST(IsPrimeTest, Positive) { + EXPECT_FALSE(IsPrime(4)); + EXPECT_TRUE(IsPrime(5)); + EXPECT_FALSE(IsPrime(6)); + EXPECT_TRUE(IsPrime(23)); +} + +// Step 3. Call RUN_ALL_TESTS() in main(). +// +// We do this by linking in src/gtest_main.cc file, which consists of +// a main() function which calls RUN_ALL_TESTS() for us. +// +// This runs all the tests you've defined, prints the result, and +// returns 0 if successful, or 1 otherwise. +// +// Did you notice that we didn't register the tests? The +// RUN_ALL_TESTS() macro magically knows about all the tests we +// defined. Isn't this convenient? diff --git a/external/gtest/samples/sample2.cc b/external/gtest/samples/sample2.cc new file mode 100755 index 0000000000..5f763b9bdf --- /dev/null +++ b/external/gtest/samples/sample2.cc @@ -0,0 +1,56 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A sample program demonstrating using Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) + +#include "sample2.h" + +#include + +// Clones a 0-terminated C string, allocating memory using new. +const char* MyString::CloneCString(const char* a_c_string) { + if (a_c_string == NULL) return NULL; + + const size_t len = strlen(a_c_string); + char* const clone = new char[ len + 1 ]; + memcpy(clone, a_c_string, len + 1); + + return clone; +} + +// Sets the 0-terminated C string this MyString object +// represents. +void MyString::Set(const char* a_c_string) { + // Makes sure this works when c_string == c_string_ + const char* const temp = MyString::CloneCString(a_c_string); + delete[] c_string_; + c_string_ = temp; +} diff --git a/external/gtest/samples/sample2.h b/external/gtest/samples/sample2.h new file mode 100755 index 0000000000..cb485c70fb --- /dev/null +++ b/external/gtest/samples/sample2.h @@ -0,0 +1,85 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A sample program demonstrating using Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) + +#ifndef GTEST_SAMPLES_SAMPLE2_H_ +#define GTEST_SAMPLES_SAMPLE2_H_ + +#include + + +// A simple string class. +class MyString { + private: + const char* c_string_; + const MyString& operator=(const MyString& rhs); + + public: + // Clones a 0-terminated C string, allocating memory using new. + static const char* CloneCString(const char* a_c_string); + + //////////////////////////////////////////////////////////// + // + // C'tors + + // The default c'tor constructs a NULL string. + MyString() : c_string_(NULL) {} + + // Constructs a MyString by cloning a 0-terminated C string. + explicit MyString(const char* a_c_string) : c_string_(NULL) { + Set(a_c_string); + } + + // Copy c'tor + MyString(const MyString& string) : c_string_(NULL) { + Set(string.c_string_); + } + + //////////////////////////////////////////////////////////// + // + // D'tor. MyString is intended to be a final class, so the d'tor + // doesn't need to be virtual. + ~MyString() { delete[] c_string_; } + + // Gets the 0-terminated C string this MyString object represents. + const char* c_string() const { return c_string_; } + + size_t Length() const { + return c_string_ == NULL ? 0 : strlen(c_string_); + } + + // Sets the 0-terminated C string this MyString object represents. + void Set(const char* c_string); +}; + + +#endif // GTEST_SAMPLES_SAMPLE2_H_ diff --git a/external/gtest/samples/sample2_unittest.cc b/external/gtest/samples/sample2_unittest.cc new file mode 100755 index 0000000000..4fa19b71c7 --- /dev/null +++ b/external/gtest/samples/sample2_unittest.cc @@ -0,0 +1,109 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A sample program demonstrating using Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) + + +// This sample shows how to write a more complex unit test for a class +// that has multiple member functions. +// +// Usually, it's a good idea to have one test for each method in your +// class. You don't have to do that exactly, but it helps to keep +// your tests organized. You may also throw in additional tests as +// needed. + +#include "sample2.h" +#include "gtest/gtest.h" + +// In this example, we test the MyString class (a simple string). + +// Tests the default c'tor. +TEST(MyString, DefaultConstructor) { + const MyString s; + + // Asserts that s.c_string() returns NULL. + // + // + // + // If we write NULL instead of + // + // static_cast(NULL) + // + // in this assertion, it will generate a warning on gcc 3.4. The + // reason is that EXPECT_EQ needs to know the types of its + // arguments in order to print them when it fails. Since NULL is + // #defined as 0, the compiler will use the formatter function for + // int to print it. However, gcc thinks that NULL should be used as + // a pointer, not an int, and therefore complains. + // + // The root of the problem is C++'s lack of distinction between the + // integer number 0 and the null pointer constant. Unfortunately, + // we have to live with this fact. + // + // + EXPECT_STREQ(NULL, s.c_string()); + + EXPECT_EQ(0u, s.Length()); +} + +const char kHelloString[] = "Hello, world!"; + +// Tests the c'tor that accepts a C string. +TEST(MyString, ConstructorFromCString) { + const MyString s(kHelloString); + EXPECT_EQ(0, strcmp(s.c_string(), kHelloString)); + EXPECT_EQ(sizeof(kHelloString)/sizeof(kHelloString[0]) - 1, + s.Length()); +} + +// Tests the copy c'tor. +TEST(MyString, CopyConstructor) { + const MyString s1(kHelloString); + const MyString s2 = s1; + EXPECT_EQ(0, strcmp(s2.c_string(), kHelloString)); +} + +// Tests the Set method. +TEST(MyString, Set) { + MyString s; + + s.Set(kHelloString); + EXPECT_EQ(0, strcmp(s.c_string(), kHelloString)); + + // Set should work when the input pointer is the same as the one + // already in the MyString object. + s.Set(s.c_string()); + EXPECT_EQ(0, strcmp(s.c_string(), kHelloString)); + + // Can we set the MyString to NULL? + s.Set(NULL); + EXPECT_STREQ(NULL, s.c_string()); +} diff --git a/external/gtest/samples/sample3-inl.h b/external/gtest/samples/sample3-inl.h new file mode 100755 index 0000000000..7e3084d638 --- /dev/null +++ b/external/gtest/samples/sample3-inl.h @@ -0,0 +1,172 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A sample program demonstrating using Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) + +#ifndef GTEST_SAMPLES_SAMPLE3_INL_H_ +#define GTEST_SAMPLES_SAMPLE3_INL_H_ + +#include + + +// Queue is a simple queue implemented as a singled-linked list. +// +// The element type must support copy constructor. +template // E is the element type +class Queue; + +// QueueNode is a node in a Queue, which consists of an element of +// type E and a pointer to the next node. +template // E is the element type +class QueueNode { + friend class Queue; + + public: + // Gets the element in this node. + const E& element() const { return element_; } + + // Gets the next node in the queue. + QueueNode* next() { return next_; } + const QueueNode* next() const { return next_; } + + private: + // Creates a node with a given element value. The next pointer is + // set to NULL. + explicit QueueNode(const E& an_element) : element_(an_element), next_(NULL) {} + + // We disable the default assignment operator and copy c'tor. + const QueueNode& operator = (const QueueNode&); + QueueNode(const QueueNode&); + + E element_; + QueueNode* next_; +}; + +template // E is the element type. +class Queue { + public: + // Creates an empty queue. + Queue() : head_(NULL), last_(NULL), size_(0) {} + + // D'tor. Clears the queue. + ~Queue() { Clear(); } + + // Clears the queue. + void Clear() { + if (size_ > 0) { + // 1. Deletes every node. + QueueNode* node = head_; + QueueNode* next = node->next(); + for (; ;) { + delete node; + node = next; + if (node == NULL) break; + next = node->next(); + } + + // 2. Resets the member variables. + head_ = last_ = NULL; + size_ = 0; + } + } + + // Gets the number of elements. + size_t Size() const { return size_; } + + // Gets the first element of the queue, or NULL if the queue is empty. + QueueNode* Head() { return head_; } + const QueueNode* Head() const { return head_; } + + // Gets the last element of the queue, or NULL if the queue is empty. + QueueNode* Last() { return last_; } + const QueueNode* Last() const { return last_; } + + // Adds an element to the end of the queue. A copy of the element is + // created using the copy constructor, and then stored in the queue. + // Changes made to the element in the queue doesn't affect the source + // object, and vice versa. + void Enqueue(const E& element) { + QueueNode* new_node = new QueueNode(element); + + if (size_ == 0) { + head_ = last_ = new_node; + size_ = 1; + } else { + last_->next_ = new_node; + last_ = new_node; + size_++; + } + } + + // Removes the head of the queue and returns it. Returns NULL if + // the queue is empty. + E* Dequeue() { + if (size_ == 0) { + return NULL; + } + + const QueueNode* const old_head = head_; + head_ = head_->next_; + size_--; + if (size_ == 0) { + last_ = NULL; + } + + E* element = new E(old_head->element()); + delete old_head; + + return element; + } + + // Applies a function/functor on each element of the queue, and + // returns the result in a new queue. The original queue is not + // affected. + template + Queue* Map(F function) const { + Queue* new_queue = new Queue(); + for (const QueueNode* node = head_; node != NULL; node = node->next_) { + new_queue->Enqueue(function(node->element())); + } + + return new_queue; + } + + private: + QueueNode* head_; // The first node of the queue. + QueueNode* last_; // The last node of the queue. + size_t size_; // The number of elements in the queue. + + // We disallow copying a queue. + Queue(const Queue&); + const Queue& operator = (const Queue&); +}; + +#endif // GTEST_SAMPLES_SAMPLE3_INL_H_ diff --git a/external/gtest/samples/sample3_unittest.cc b/external/gtest/samples/sample3_unittest.cc new file mode 100755 index 0000000000..bf3877d013 --- /dev/null +++ b/external/gtest/samples/sample3_unittest.cc @@ -0,0 +1,151 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A sample program demonstrating using Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) + + +// In this example, we use a more advanced feature of Google Test called +// test fixture. +// +// A test fixture is a place to hold objects and functions shared by +// all tests in a test case. Using a test fixture avoids duplicating +// the test code necessary to initialize and cleanup those common +// objects for each test. It is also useful for defining sub-routines +// that your tests need to invoke a lot. +// +// +// +// The tests share the test fixture in the sense of code sharing, not +// data sharing. Each test is given its own fresh copy of the +// fixture. You cannot expect the data modified by one test to be +// passed on to another test, which is a bad idea. +// +// The reason for this design is that tests should be independent and +// repeatable. In particular, a test should not fail as the result of +// another test's failure. If one test depends on info produced by +// another test, then the two tests should really be one big test. +// +// The macros for indicating the success/failure of a test +// (EXPECT_TRUE, FAIL, etc) need to know what the current test is +// (when Google Test prints the test result, it tells you which test +// each failure belongs to). Technically, these macros invoke a +// member function of the Test class. Therefore, you cannot use them +// in a global function. That's why you should put test sub-routines +// in a test fixture. +// +// + +#include "sample3-inl.h" +#include "gtest/gtest.h" + +// To use a test fixture, derive a class from testing::Test. +class QueueTest : public testing::Test { + protected: // You should make the members protected s.t. they can be + // accessed from sub-classes. + + // virtual void SetUp() will be called before each test is run. You + // should define it if you need to initialize the varaibles. + // Otherwise, this can be skipped. + virtual void SetUp() { + q1_.Enqueue(1); + q2_.Enqueue(2); + q2_.Enqueue(3); + } + + // virtual void TearDown() will be called after each test is run. + // You should define it if there is cleanup work to do. Otherwise, + // you don't have to provide it. + // + // virtual void TearDown() { + // } + + // A helper function that some test uses. + static int Double(int n) { + return 2*n; + } + + // A helper function for testing Queue::Map(). + void MapTester(const Queue * q) { + // Creates a new queue, where each element is twice as big as the + // corresponding one in q. + const Queue * const new_q = q->Map(Double); + + // Verifies that the new queue has the same size as q. + ASSERT_EQ(q->Size(), new_q->Size()); + + // Verifies the relationship between the elements of the two queues. + for ( const QueueNode * n1 = q->Head(), * n2 = new_q->Head(); + n1 != NULL; n1 = n1->next(), n2 = n2->next() ) { + EXPECT_EQ(2 * n1->element(), n2->element()); + } + + delete new_q; + } + + // Declares the variables your tests want to use. + Queue q0_; + Queue q1_; + Queue q2_; +}; + +// When you have a test fixture, you define a test using TEST_F +// instead of TEST. + +// Tests the default c'tor. +TEST_F(QueueTest, DefaultConstructor) { + // You can access data in the test fixture here. + EXPECT_EQ(0u, q0_.Size()); +} + +// Tests Dequeue(). +TEST_F(QueueTest, Dequeue) { + int * n = q0_.Dequeue(); + EXPECT_TRUE(n == NULL); + + n = q1_.Dequeue(); + ASSERT_TRUE(n != NULL); + EXPECT_EQ(1, *n); + EXPECT_EQ(0u, q1_.Size()); + delete n; + + n = q2_.Dequeue(); + ASSERT_TRUE(n != NULL); + EXPECT_EQ(2, *n); + EXPECT_EQ(1u, q2_.Size()); + delete n; +} + +// Tests the Queue::Map() function. +TEST_F(QueueTest, Map) { + MapTester(&q0_); + MapTester(&q1_); + MapTester(&q2_); +} diff --git a/external/gtest/samples/sample4.cc b/external/gtest/samples/sample4.cc new file mode 100755 index 0000000000..ae44bda6f1 --- /dev/null +++ b/external/gtest/samples/sample4.cc @@ -0,0 +1,46 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A sample program demonstrating using Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) + +#include + +#include "sample4.h" + +// Returns the current counter value, and increments it. +int Counter::Increment() { + return counter_++; +} + +// Prints the current counter value to STDOUT. +void Counter::Print() const { + printf("%d", counter_); +} diff --git a/external/gtest/samples/sample4.h b/external/gtest/samples/sample4.h new file mode 100755 index 0000000000..cd60f0dd2d --- /dev/null +++ b/external/gtest/samples/sample4.h @@ -0,0 +1,53 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// A sample program demonstrating using Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) + +#ifndef GTEST_SAMPLES_SAMPLE4_H_ +#define GTEST_SAMPLES_SAMPLE4_H_ + +// A simple monotonic counter. +class Counter { + private: + int counter_; + + public: + // Creates a counter that starts at 0. + Counter() : counter_(0) {} + + // Returns the current counter value, and increments it. + int Increment(); + + // Prints the current counter value to STDOUT. + void Print() const; +}; + +#endif // GTEST_SAMPLES_SAMPLE4_H_ diff --git a/external/gtest/samples/sample4_unittest.cc b/external/gtest/samples/sample4_unittest.cc new file mode 100755 index 0000000000..fa5afc7d5a --- /dev/null +++ b/external/gtest/samples/sample4_unittest.cc @@ -0,0 +1,45 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#include "gtest/gtest.h" +#include "sample4.h" + +// Tests the Increment() method. +TEST(Counter, Increment) { + Counter c; + + // EXPECT_EQ() evaluates its arguments exactly once, so they + // can have side effects. + + EXPECT_EQ(0, c.Increment()); + EXPECT_EQ(1, c.Increment()); + EXPECT_EQ(2, c.Increment()); +} diff --git a/external/gtest/samples/sample5_unittest.cc b/external/gtest/samples/sample5_unittest.cc new file mode 100755 index 0000000000..43d8e57775 --- /dev/null +++ b/external/gtest/samples/sample5_unittest.cc @@ -0,0 +1,199 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// This sample teaches how to reuse a test fixture in multiple test +// cases by deriving sub-fixtures from it. +// +// When you define a test fixture, you specify the name of the test +// case that will use this fixture. Therefore, a test fixture can +// be used by only one test case. +// +// Sometimes, more than one test cases may want to use the same or +// slightly different test fixtures. For example, you may want to +// make sure that all tests for a GUI library don't leak important +// system resources like fonts and brushes. In Google Test, you do +// this by putting the shared logic in a super (as in "super class") +// test fixture, and then have each test case use a fixture derived +// from this super fixture. + +#include +#include +#include "sample3-inl.h" +#include "gtest/gtest.h" +#include "sample1.h" + +// In this sample, we want to ensure that every test finishes within +// ~5 seconds. If a test takes longer to run, we consider it a +// failure. +// +// We put the code for timing a test in a test fixture called +// "QuickTest". QuickTest is intended to be the super fixture that +// other fixtures derive from, therefore there is no test case with +// the name "QuickTest". This is OK. +// +// Later, we will derive multiple test fixtures from QuickTest. +class QuickTest : public testing::Test { + protected: + // Remember that SetUp() is run immediately before a test starts. + // This is a good place to record the start time. + virtual void SetUp() { + start_time_ = time(NULL); + } + + // TearDown() is invoked immediately after a test finishes. Here we + // check if the test was too slow. + virtual void TearDown() { + // Gets the time when the test finishes + const time_t end_time = time(NULL); + + // Asserts that the test took no more than ~5 seconds. Did you + // know that you can use assertions in SetUp() and TearDown() as + // well? + EXPECT_TRUE(end_time - start_time_ <= 5) << "The test took too long."; + } + + // The UTC time (in seconds) when the test starts + time_t start_time_; +}; + + +// We derive a fixture named IntegerFunctionTest from the QuickTest +// fixture. All tests using this fixture will be automatically +// required to be quick. +class IntegerFunctionTest : public QuickTest { + // We don't need any more logic than already in the QuickTest fixture. + // Therefore the body is empty. +}; + + +// Now we can write tests in the IntegerFunctionTest test case. + +// Tests Factorial() +TEST_F(IntegerFunctionTest, Factorial) { + // Tests factorial of negative numbers. + EXPECT_EQ(1, Factorial(-5)); + EXPECT_EQ(1, Factorial(-1)); + EXPECT_GT(Factorial(-10), 0); + + // Tests factorial of 0. + EXPECT_EQ(1, Factorial(0)); + + // Tests factorial of positive numbers. + EXPECT_EQ(1, Factorial(1)); + EXPECT_EQ(2, Factorial(2)); + EXPECT_EQ(6, Factorial(3)); + EXPECT_EQ(40320, Factorial(8)); +} + + +// Tests IsPrime() +TEST_F(IntegerFunctionTest, IsPrime) { + // Tests negative input. + EXPECT_FALSE(IsPrime(-1)); + EXPECT_FALSE(IsPrime(-2)); + EXPECT_FALSE(IsPrime(INT_MIN)); + + // Tests some trivial cases. + EXPECT_FALSE(IsPrime(0)); + EXPECT_FALSE(IsPrime(1)); + EXPECT_TRUE(IsPrime(2)); + EXPECT_TRUE(IsPrime(3)); + + // Tests positive input. + EXPECT_FALSE(IsPrime(4)); + EXPECT_TRUE(IsPrime(5)); + EXPECT_FALSE(IsPrime(6)); + EXPECT_TRUE(IsPrime(23)); +} + + +// The next test case (named "QueueTest") also needs to be quick, so +// we derive another fixture from QuickTest. +// +// The QueueTest test fixture has some logic and shared objects in +// addition to what's in QuickTest already. We define the additional +// stuff inside the body of the test fixture, as usual. +class QueueTest : public QuickTest { + protected: + virtual void SetUp() { + // First, we need to set up the super fixture (QuickTest). + QuickTest::SetUp(); + + // Second, some additional setup for this fixture. + q1_.Enqueue(1); + q2_.Enqueue(2); + q2_.Enqueue(3); + } + + // By default, TearDown() inherits the behavior of + // QuickTest::TearDown(). As we have no additional cleaning work + // for QueueTest, we omit it here. + // + // virtual void TearDown() { + // QuickTest::TearDown(); + // } + + Queue q0_; + Queue q1_; + Queue q2_; +}; + + +// Now, let's write tests using the QueueTest fixture. + +// Tests the default constructor. +TEST_F(QueueTest, DefaultConstructor) { + EXPECT_EQ(0u, q0_.Size()); +} + +// Tests Dequeue(). +TEST_F(QueueTest, Dequeue) { + int* n = q0_.Dequeue(); + EXPECT_TRUE(n == NULL); + + n = q1_.Dequeue(); + EXPECT_TRUE(n != NULL); + EXPECT_EQ(1, *n); + EXPECT_EQ(0u, q1_.Size()); + delete n; + + n = q2_.Dequeue(); + EXPECT_TRUE(n != NULL); + EXPECT_EQ(2, *n); + EXPECT_EQ(1u, q2_.Size()); + delete n; +} + +// If necessary, you can derive further test fixtures from a derived +// fixture itself. For example, you can derive another fixture from +// QueueTest. Google Test imposes no limit on how deep the hierarchy +// can be. In practice, however, you probably don't want it to be too +// deep as to be confusing. diff --git a/external/gtest/samples/sample6_unittest.cc b/external/gtest/samples/sample6_unittest.cc new file mode 100755 index 0000000000..8f2036a516 --- /dev/null +++ b/external/gtest/samples/sample6_unittest.cc @@ -0,0 +1,224 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// This sample shows how to test common properties of multiple +// implementations of the same interface (aka interface tests). + +// The interface and its implementations are in this header. +#include "prime_tables.h" + +#include "gtest/gtest.h" + +// First, we define some factory functions for creating instances of +// the implementations. You may be able to skip this step if all your +// implementations can be constructed the same way. + +template +PrimeTable* CreatePrimeTable(); + +template <> +PrimeTable* CreatePrimeTable() { + return new OnTheFlyPrimeTable; +} + +template <> +PrimeTable* CreatePrimeTable() { + return new PreCalculatedPrimeTable(10000); +} + +// Then we define a test fixture class template. +template +class PrimeTableTest : public testing::Test { + protected: + // The ctor calls the factory function to create a prime table + // implemented by T. + PrimeTableTest() : table_(CreatePrimeTable()) {} + + virtual ~PrimeTableTest() { delete table_; } + + // Note that we test an implementation via the base interface + // instead of the actual implementation class. This is important + // for keeping the tests close to the real world scenario, where the + // implementation is invoked via the base interface. It avoids + // got-yas where the implementation class has a method that shadows + // a method with the same name (but slightly different argument + // types) in the base interface, for example. + PrimeTable* const table_; +}; + +#if GTEST_HAS_TYPED_TEST + +using testing::Types; + +// Google Test offers two ways for reusing tests for different types. +// The first is called "typed tests". You should use it if you +// already know *all* the types you are gonna exercise when you write +// the tests. + +// To write a typed test case, first use +// +// TYPED_TEST_CASE(TestCaseName, TypeList); +// +// to declare it and specify the type parameters. As with TEST_F, +// TestCaseName must match the test fixture name. + +// The list of types we want to test. +typedef Types Implementations; + +TYPED_TEST_CASE(PrimeTableTest, Implementations); + +// Then use TYPED_TEST(TestCaseName, TestName) to define a typed test, +// similar to TEST_F. +TYPED_TEST(PrimeTableTest, ReturnsFalseForNonPrimes) { + // Inside the test body, you can refer to the type parameter by + // TypeParam, and refer to the fixture class by TestFixture. We + // don't need them in this example. + + // Since we are in the template world, C++ requires explicitly + // writing 'this->' when referring to members of the fixture class. + // This is something you have to learn to live with. + EXPECT_FALSE(this->table_->IsPrime(-5)); + EXPECT_FALSE(this->table_->IsPrime(0)); + EXPECT_FALSE(this->table_->IsPrime(1)); + EXPECT_FALSE(this->table_->IsPrime(4)); + EXPECT_FALSE(this->table_->IsPrime(6)); + EXPECT_FALSE(this->table_->IsPrime(100)); +} + +TYPED_TEST(PrimeTableTest, ReturnsTrueForPrimes) { + EXPECT_TRUE(this->table_->IsPrime(2)); + EXPECT_TRUE(this->table_->IsPrime(3)); + EXPECT_TRUE(this->table_->IsPrime(5)); + EXPECT_TRUE(this->table_->IsPrime(7)); + EXPECT_TRUE(this->table_->IsPrime(11)); + EXPECT_TRUE(this->table_->IsPrime(131)); +} + +TYPED_TEST(PrimeTableTest, CanGetNextPrime) { + EXPECT_EQ(2, this->table_->GetNextPrime(0)); + EXPECT_EQ(3, this->table_->GetNextPrime(2)); + EXPECT_EQ(5, this->table_->GetNextPrime(3)); + EXPECT_EQ(7, this->table_->GetNextPrime(5)); + EXPECT_EQ(11, this->table_->GetNextPrime(7)); + EXPECT_EQ(131, this->table_->GetNextPrime(128)); +} + +// That's it! Google Test will repeat each TYPED_TEST for each type +// in the type list specified in TYPED_TEST_CASE. Sit back and be +// happy that you don't have to define them multiple times. + +#endif // GTEST_HAS_TYPED_TEST + +#if GTEST_HAS_TYPED_TEST_P + +using testing::Types; + +// Sometimes, however, you don't yet know all the types that you want +// to test when you write the tests. For example, if you are the +// author of an interface and expect other people to implement it, you +// might want to write a set of tests to make sure each implementation +// conforms to some basic requirements, but you don't know what +// implementations will be written in the future. +// +// How can you write the tests without committing to the type +// parameters? That's what "type-parameterized tests" can do for you. +// It is a bit more involved than typed tests, but in return you get a +// test pattern that can be reused in many contexts, which is a big +// win. Here's how you do it: + +// First, define a test fixture class template. Here we just reuse +// the PrimeTableTest fixture defined earlier: + +template +class PrimeTableTest2 : public PrimeTableTest { +}; + +// Then, declare the test case. The argument is the name of the test +// fixture, and also the name of the test case (as usual). The _P +// suffix is for "parameterized" or "pattern". +TYPED_TEST_CASE_P(PrimeTableTest2); + +// Next, use TYPED_TEST_P(TestCaseName, TestName) to define a test, +// similar to what you do with TEST_F. +TYPED_TEST_P(PrimeTableTest2, ReturnsFalseForNonPrimes) { + EXPECT_FALSE(this->table_->IsPrime(-5)); + EXPECT_FALSE(this->table_->IsPrime(0)); + EXPECT_FALSE(this->table_->IsPrime(1)); + EXPECT_FALSE(this->table_->IsPrime(4)); + EXPECT_FALSE(this->table_->IsPrime(6)); + EXPECT_FALSE(this->table_->IsPrime(100)); +} + +TYPED_TEST_P(PrimeTableTest2, ReturnsTrueForPrimes) { + EXPECT_TRUE(this->table_->IsPrime(2)); + EXPECT_TRUE(this->table_->IsPrime(3)); + EXPECT_TRUE(this->table_->IsPrime(5)); + EXPECT_TRUE(this->table_->IsPrime(7)); + EXPECT_TRUE(this->table_->IsPrime(11)); + EXPECT_TRUE(this->table_->IsPrime(131)); +} + +TYPED_TEST_P(PrimeTableTest2, CanGetNextPrime) { + EXPECT_EQ(2, this->table_->GetNextPrime(0)); + EXPECT_EQ(3, this->table_->GetNextPrime(2)); + EXPECT_EQ(5, this->table_->GetNextPrime(3)); + EXPECT_EQ(7, this->table_->GetNextPrime(5)); + EXPECT_EQ(11, this->table_->GetNextPrime(7)); + EXPECT_EQ(131, this->table_->GetNextPrime(128)); +} + +// Type-parameterized tests involve one extra step: you have to +// enumerate the tests you defined: +REGISTER_TYPED_TEST_CASE_P( + PrimeTableTest2, // The first argument is the test case name. + // The rest of the arguments are the test names. + ReturnsFalseForNonPrimes, ReturnsTrueForPrimes, CanGetNextPrime); + +// At this point the test pattern is done. However, you don't have +// any real test yet as you haven't said which types you want to run +// the tests with. + +// To turn the abstract test pattern into real tests, you instantiate +// it with a list of types. Usually the test pattern will be defined +// in a .h file, and anyone can #include and instantiate it. You can +// even instantiate it more than once in the same program. To tell +// different instances apart, you give each of them a name, which will +// become part of the test case name and can be used in test filters. + +// The list of types we want to test. Note that it doesn't have to be +// defined at the time we write the TYPED_TEST_P()s. +typedef Types + PrimeTableImplementations; +INSTANTIATE_TYPED_TEST_CASE_P(OnTheFlyAndPreCalculated, // Instance name + PrimeTableTest2, // Test case name + PrimeTableImplementations); // Type list + +#endif // GTEST_HAS_TYPED_TEST_P diff --git a/external/gtest/samples/sample7_unittest.cc b/external/gtest/samples/sample7_unittest.cc new file mode 100755 index 0000000000..1b651a21d6 --- /dev/null +++ b/external/gtest/samples/sample7_unittest.cc @@ -0,0 +1,130 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// This sample shows how to test common properties of multiple +// implementations of an interface (aka interface tests) using +// value-parameterized tests. Each test in the test case has +// a parameter that is an interface pointer to an implementation +// tested. + +// The interface and its implementations are in this header. +#include "prime_tables.h" + +#include "gtest/gtest.h" + +#if GTEST_HAS_PARAM_TEST + +using ::testing::TestWithParam; +using ::testing::Values; + +// As a general rule, to prevent a test from affecting the tests that come +// after it, you should create and destroy the tested objects for each test +// instead of reusing them. In this sample we will define a simple factory +// function for PrimeTable objects. We will instantiate objects in test's +// SetUp() method and delete them in TearDown() method. +typedef PrimeTable* CreatePrimeTableFunc(); + +PrimeTable* CreateOnTheFlyPrimeTable() { + return new OnTheFlyPrimeTable(); +} + +template +PrimeTable* CreatePreCalculatedPrimeTable() { + return new PreCalculatedPrimeTable(max_precalculated); +} + +// Inside the test body, fixture constructor, SetUp(), and TearDown() you +// can refer to the test parameter by GetParam(). In this case, the test +// parameter is a factory function which we call in fixture's SetUp() to +// create and store an instance of PrimeTable. +class PrimeTableTest : public TestWithParam { + public: + virtual ~PrimeTableTest() { delete table_; } + virtual void SetUp() { table_ = (*GetParam())(); } + virtual void TearDown() { + delete table_; + table_ = NULL; + } + + protected: + PrimeTable* table_; +}; + +TEST_P(PrimeTableTest, ReturnsFalseForNonPrimes) { + EXPECT_FALSE(table_->IsPrime(-5)); + EXPECT_FALSE(table_->IsPrime(0)); + EXPECT_FALSE(table_->IsPrime(1)); + EXPECT_FALSE(table_->IsPrime(4)); + EXPECT_FALSE(table_->IsPrime(6)); + EXPECT_FALSE(table_->IsPrime(100)); +} + +TEST_P(PrimeTableTest, ReturnsTrueForPrimes) { + EXPECT_TRUE(table_->IsPrime(2)); + EXPECT_TRUE(table_->IsPrime(3)); + EXPECT_TRUE(table_->IsPrime(5)); + EXPECT_TRUE(table_->IsPrime(7)); + EXPECT_TRUE(table_->IsPrime(11)); + EXPECT_TRUE(table_->IsPrime(131)); +} + +TEST_P(PrimeTableTest, CanGetNextPrime) { + EXPECT_EQ(2, table_->GetNextPrime(0)); + EXPECT_EQ(3, table_->GetNextPrime(2)); + EXPECT_EQ(5, table_->GetNextPrime(3)); + EXPECT_EQ(7, table_->GetNextPrime(5)); + EXPECT_EQ(11, table_->GetNextPrime(7)); + EXPECT_EQ(131, table_->GetNextPrime(128)); +} + +// In order to run value-parameterized tests, you need to instantiate them, +// or bind them to a list of values which will be used as test parameters. +// You can instantiate them in a different translation module, or even +// instantiate them several times. +// +// Here, we instantiate our tests with a list of two PrimeTable object +// factory functions: +INSTANTIATE_TEST_CASE_P( + OnTheFlyAndPreCalculated, + PrimeTableTest, + Values(&CreateOnTheFlyPrimeTable, &CreatePreCalculatedPrimeTable<1000>)); + +#else + +// Google Test may not support value-parameterized tests with some +// compilers. If we use conditional compilation to compile out all +// code referring to the gtest_main library, MSVC linker will not link +// that library at all and consequently complain about missing entry +// point defined in that library (fatal error LNK1561: entry point +// must be defined). This dummy test keeps gtest_main linked in. +TEST(DummyTest, ValueParameterizedTestsAreNotSupportedOnThisPlatform) {} + +#endif // GTEST_HAS_PARAM_TEST diff --git a/external/gtest/samples/sample8_unittest.cc b/external/gtest/samples/sample8_unittest.cc new file mode 100755 index 0000000000..5ad2e2c9f1 --- /dev/null +++ b/external/gtest/samples/sample8_unittest.cc @@ -0,0 +1,173 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// This sample shows how to test code relying on some global flag variables. +// Combine() helps with generating all possible combinations of such flags, +// and each test is given one combination as a parameter. + +// Use class definitions to test from this header. +#include "prime_tables.h" + +#include "gtest/gtest.h" + +#if GTEST_HAS_COMBINE + +// Suppose we want to introduce a new, improved implementation of PrimeTable +// which combines speed of PrecalcPrimeTable and versatility of +// OnTheFlyPrimeTable (see prime_tables.h). Inside it instantiates both +// PrecalcPrimeTable and OnTheFlyPrimeTable and uses the one that is more +// appropriate under the circumstances. But in low memory conditions, it can be +// told to instantiate without PrecalcPrimeTable instance at all and use only +// OnTheFlyPrimeTable. +class HybridPrimeTable : public PrimeTable { + public: + HybridPrimeTable(bool force_on_the_fly, int max_precalculated) + : on_the_fly_impl_(new OnTheFlyPrimeTable), + precalc_impl_(force_on_the_fly ? NULL : + new PreCalculatedPrimeTable(max_precalculated)), + max_precalculated_(max_precalculated) {} + virtual ~HybridPrimeTable() { + delete on_the_fly_impl_; + delete precalc_impl_; + } + + virtual bool IsPrime(int n) const { + if (precalc_impl_ != NULL && n < max_precalculated_) + return precalc_impl_->IsPrime(n); + else + return on_the_fly_impl_->IsPrime(n); + } + + virtual int GetNextPrime(int p) const { + int next_prime = -1; + if (precalc_impl_ != NULL && p < max_precalculated_) + next_prime = precalc_impl_->GetNextPrime(p); + + return next_prime != -1 ? next_prime : on_the_fly_impl_->GetNextPrime(p); + } + + private: + OnTheFlyPrimeTable* on_the_fly_impl_; + PreCalculatedPrimeTable* precalc_impl_; + int max_precalculated_; +}; + +using ::testing::TestWithParam; +using ::testing::Bool; +using ::testing::Values; +using ::testing::Combine; + +// To test all code paths for HybridPrimeTable we must test it with numbers +// both within and outside PreCalculatedPrimeTable's capacity and also with +// PreCalculatedPrimeTable disabled. We do this by defining fixture which will +// accept different combinations of parameters for instantiating a +// HybridPrimeTable instance. +class PrimeTableTest : public TestWithParam< ::std::tr1::tuple > { + protected: + virtual void SetUp() { + // This can be written as + // + // bool force_on_the_fly; + // int max_precalculated; + // tie(force_on_the_fly, max_precalculated) = GetParam(); + // + // once the Google C++ Style Guide allows use of ::std::tr1::tie. + // + bool force_on_the_fly = ::std::tr1::get<0>(GetParam()); + int max_precalculated = ::std::tr1::get<1>(GetParam()); + table_ = new HybridPrimeTable(force_on_the_fly, max_precalculated); + } + virtual void TearDown() { + delete table_; + table_ = NULL; + } + HybridPrimeTable* table_; +}; + +TEST_P(PrimeTableTest, ReturnsFalseForNonPrimes) { + // Inside the test body, you can refer to the test parameter by GetParam(). + // In this case, the test parameter is a PrimeTable interface pointer which + // we can use directly. + // Please note that you can also save it in the fixture's SetUp() method + // or constructor and use saved copy in the tests. + + EXPECT_FALSE(table_->IsPrime(-5)); + EXPECT_FALSE(table_->IsPrime(0)); + EXPECT_FALSE(table_->IsPrime(1)); + EXPECT_FALSE(table_->IsPrime(4)); + EXPECT_FALSE(table_->IsPrime(6)); + EXPECT_FALSE(table_->IsPrime(100)); +} + +TEST_P(PrimeTableTest, ReturnsTrueForPrimes) { + EXPECT_TRUE(table_->IsPrime(2)); + EXPECT_TRUE(table_->IsPrime(3)); + EXPECT_TRUE(table_->IsPrime(5)); + EXPECT_TRUE(table_->IsPrime(7)); + EXPECT_TRUE(table_->IsPrime(11)); + EXPECT_TRUE(table_->IsPrime(131)); +} + +TEST_P(PrimeTableTest, CanGetNextPrime) { + EXPECT_EQ(2, table_->GetNextPrime(0)); + EXPECT_EQ(3, table_->GetNextPrime(2)); + EXPECT_EQ(5, table_->GetNextPrime(3)); + EXPECT_EQ(7, table_->GetNextPrime(5)); + EXPECT_EQ(11, table_->GetNextPrime(7)); + EXPECT_EQ(131, table_->GetNextPrime(128)); +} + +// In order to run value-parameterized tests, you need to instantiate them, +// or bind them to a list of values which will be used as test parameters. +// You can instantiate them in a different translation module, or even +// instantiate them several times. +// +// Here, we instantiate our tests with a list of parameters. We must combine +// all variations of the boolean flag suppressing PrecalcPrimeTable and some +// meaningful values for tests. We choose a small value (1), and a value that +// will put some of the tested numbers beyond the capability of the +// PrecalcPrimeTable instance and some inside it (10). Combine will produce all +// possible combinations. +INSTANTIATE_TEST_CASE_P(MeaningfulTestParameters, + PrimeTableTest, + Combine(Bool(), Values(1, 10))); + +#else + +// Google Test may not support Combine() with some compilers. If we +// use conditional compilation to compile out all code referring to +// the gtest_main library, MSVC linker will not link that library at +// all and consequently complain about missing entry point defined in +// that library (fatal error LNK1561: entry point must be +// defined). This dummy test keeps gtest_main linked in. +TEST(DummyTest, CombineIsNotSupportedOnThisPlatform) {} + +#endif // GTEST_HAS_COMBINE diff --git a/external/gtest/samples/sample9_unittest.cc b/external/gtest/samples/sample9_unittest.cc new file mode 100755 index 0000000000..b2e2079bf3 --- /dev/null +++ b/external/gtest/samples/sample9_unittest.cc @@ -0,0 +1,160 @@ +// Copyright 2009 Google Inc. All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// This sample shows how to use Google Test listener API to implement +// an alternative console output and how to use the UnitTest reflection API +// to enumerate test cases and tests and to inspect their results. + +#include + +#include "gtest/gtest.h" + +using ::testing::EmptyTestEventListener; +using ::testing::InitGoogleTest; +using ::testing::Test; +using ::testing::TestCase; +using ::testing::TestEventListeners; +using ::testing::TestInfo; +using ::testing::TestPartResult; +using ::testing::UnitTest; + +namespace { + +// Provides alternative output mode which produces minimal amount of +// information about tests. +class TersePrinter : public EmptyTestEventListener { + private: + // Called before any test activity starts. + virtual void OnTestProgramStart(const UnitTest& /* unit_test */) {} + + // Called after all test activities have ended. + virtual void OnTestProgramEnd(const UnitTest& unit_test) { + fprintf(stdout, "TEST %s\n", unit_test.Passed() ? "PASSED" : "FAILED"); + fflush(stdout); + } + + // Called before a test starts. + virtual void OnTestStart(const TestInfo& test_info) { + fprintf(stdout, + "*** Test %s.%s starting.\n", + test_info.test_case_name(), + test_info.name()); + fflush(stdout); + } + + // Called after a failed assertion or a SUCCEED() invocation. + virtual void OnTestPartResult(const TestPartResult& test_part_result) { + fprintf(stdout, + "%s in %s:%d\n%s\n", + test_part_result.failed() ? "*** Failure" : "Success", + test_part_result.file_name(), + test_part_result.line_number(), + test_part_result.summary()); + fflush(stdout); + } + + // Called after a test ends. + virtual void OnTestEnd(const TestInfo& test_info) { + fprintf(stdout, + "*** Test %s.%s ending.\n", + test_info.test_case_name(), + test_info.name()); + fflush(stdout); + } +}; // class TersePrinter + +TEST(CustomOutputTest, PrintsMessage) { + printf("Printing something from the test body...\n"); +} + +TEST(CustomOutputTest, Succeeds) { + SUCCEED() << "SUCCEED() has been invoked from here"; +} + +TEST(CustomOutputTest, Fails) { + EXPECT_EQ(1, 2) + << "This test fails in order to demonstrate alternative failure messages"; +} + +} // namespace + +int main(int argc, char **argv) { + InitGoogleTest(&argc, argv); + + bool terse_output = false; + if (argc > 1 && strcmp(argv[1], "--terse_output") == 0 ) + terse_output = true; + else + printf("%s\n", "Run this program with --terse_output to change the way " + "it prints its output."); + + UnitTest& unit_test = *UnitTest::GetInstance(); + + // If we are given the --terse_output command line flag, suppresses the + // standard output and attaches own result printer. + if (terse_output) { + TestEventListeners& listeners = unit_test.listeners(); + + // Removes the default console output listener from the list so it will + // not receive events from Google Test and won't print any output. Since + // this operation transfers ownership of the listener to the caller we + // have to delete it as well. + delete listeners.Release(listeners.default_result_printer()); + + // Adds the custom output listener to the list. It will now receive + // events from Google Test and print the alternative output. We don't + // have to worry about deleting it since Google Test assumes ownership + // over it after adding it to the list. + listeners.Append(new TersePrinter); + } + int ret_val = RUN_ALL_TESTS(); + + // This is an example of using the UnitTest reflection API to inspect test + // results. Here we discount failures from the tests we expected to fail. + int unexpectedly_failed_tests = 0; + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + const TestCase& test_case = *unit_test.GetTestCase(i); + for (int j = 0; j < test_case.total_test_count(); ++j) { + const TestInfo& test_info = *test_case.GetTestInfo(j); + // Counts failed tests that were not meant to fail (those without + // 'Fails' in the name). + if (test_info.result()->Failed() && + strcmp(test_info.name(), "Fails") != 0) { + unexpectedly_failed_tests++; + } + } + } + + // Test that were meant to fail should not affect the test program outcome. + if (unexpectedly_failed_tests == 0) + ret_val = 0; + + return ret_val; +} diff --git a/external/gtest/scripts/fuse_gtest_files.py b/external/gtest/scripts/fuse_gtest_files.py new file mode 100755 index 0000000000..57ef72f0e3 --- /dev/null +++ b/external/gtest/scripts/fuse_gtest_files.py @@ -0,0 +1,250 @@ +#!/usr/bin/env python +# +# Copyright 2009, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""fuse_gtest_files.py v0.2.0 +Fuses Google Test source code into a .h file and a .cc file. + +SYNOPSIS + fuse_gtest_files.py [GTEST_ROOT_DIR] OUTPUT_DIR + + Scans GTEST_ROOT_DIR for Google Test source code, and generates + two files: OUTPUT_DIR/gtest/gtest.h and OUTPUT_DIR/gtest/gtest-all.cc. + Then you can build your tests by adding OUTPUT_DIR to the include + search path and linking with OUTPUT_DIR/gtest/gtest-all.cc. These + two files contain everything you need to use Google Test. Hence + you can "install" Google Test by copying them to wherever you want. + + GTEST_ROOT_DIR can be omitted and defaults to the parent + directory of the directory holding this script. + +EXAMPLES + ./fuse_gtest_files.py fused_gtest + ./fuse_gtest_files.py path/to/unpacked/gtest fused_gtest + +This tool is experimental. In particular, it assumes that there is no +conditional inclusion of Google Test headers. Please report any +problems to googletestframework@googlegroups.com. You can read +http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide for +more information. +""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import os +import re +import sets +import sys + +# We assume that this file is in the scripts/ directory in the Google +# Test root directory. +DEFAULT_GTEST_ROOT_DIR = os.path.join(os.path.dirname(__file__), '..') + +# Regex for matching '#include "gtest/..."'. +INCLUDE_GTEST_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(gtest/.+)"') + +# Regex for matching '#include "src/..."'. +INCLUDE_SRC_FILE_REGEX = re.compile(r'^\s*#\s*include\s*"(src/.+)"') + +# Where to find the source seed files. +GTEST_H_SEED = 'include/gtest/gtest.h' +GTEST_SPI_H_SEED = 'include/gtest/gtest-spi.h' +GTEST_ALL_CC_SEED = 'src/gtest-all.cc' + +# Where to put the generated files. +GTEST_H_OUTPUT = 'gtest/gtest.h' +GTEST_ALL_CC_OUTPUT = 'gtest/gtest-all.cc' + + +def VerifyFileExists(directory, relative_path): + """Verifies that the given file exists; aborts on failure. + + relative_path is the file path relative to the given directory. + """ + + if not os.path.isfile(os.path.join(directory, relative_path)): + print 'ERROR: Cannot find %s in directory %s.' % (relative_path, + directory) + print ('Please either specify a valid project root directory ' + 'or omit it on the command line.') + sys.exit(1) + + +def ValidateGTestRootDir(gtest_root): + """Makes sure gtest_root points to a valid gtest root directory. + + The function aborts the program on failure. + """ + + VerifyFileExists(gtest_root, GTEST_H_SEED) + VerifyFileExists(gtest_root, GTEST_ALL_CC_SEED) + + +def VerifyOutputFile(output_dir, relative_path): + """Verifies that the given output file path is valid. + + relative_path is relative to the output_dir directory. + """ + + # Makes sure the output file either doesn't exist or can be overwritten. + output_file = os.path.join(output_dir, relative_path) + if os.path.exists(output_file): + # TODO(wan@google.com): The following user-interaction doesn't + # work with automated processes. We should provide a way for the + # Makefile to force overwriting the files. + print ('%s already exists in directory %s - overwrite it? (y/N) ' % + (relative_path, output_dir)) + answer = sys.stdin.readline().strip() + if answer not in ['y', 'Y']: + print 'ABORTED.' + sys.exit(1) + + # Makes sure the directory holding the output file exists; creates + # it and all its ancestors if necessary. + parent_directory = os.path.dirname(output_file) + if not os.path.isdir(parent_directory): + os.makedirs(parent_directory) + + +def ValidateOutputDir(output_dir): + """Makes sure output_dir points to a valid output directory. + + The function aborts the program on failure. + """ + + VerifyOutputFile(output_dir, GTEST_H_OUTPUT) + VerifyOutputFile(output_dir, GTEST_ALL_CC_OUTPUT) + + +def FuseGTestH(gtest_root, output_dir): + """Scans folder gtest_root to generate gtest/gtest.h in output_dir.""" + + output_file = file(os.path.join(output_dir, GTEST_H_OUTPUT), 'w') + processed_files = sets.Set() # Holds all gtest headers we've processed. + + def ProcessFile(gtest_header_path): + """Processes the given gtest header file.""" + + # We don't process the same header twice. + if gtest_header_path in processed_files: + return + + processed_files.add(gtest_header_path) + + # Reads each line in the given gtest header. + for line in file(os.path.join(gtest_root, gtest_header_path), 'r'): + m = INCLUDE_GTEST_FILE_REGEX.match(line) + if m: + # It's '#include "gtest/..."' - let's process it recursively. + ProcessFile('include/' + m.group(1)) + else: + # Otherwise we copy the line unchanged to the output file. + output_file.write(line) + + ProcessFile(GTEST_H_SEED) + output_file.close() + + +def FuseGTestAllCcToFile(gtest_root, output_file): + """Scans folder gtest_root to generate gtest/gtest-all.cc in output_file.""" + + processed_files = sets.Set() + + def ProcessFile(gtest_source_file): + """Processes the given gtest source file.""" + + # We don't process the same #included file twice. + if gtest_source_file in processed_files: + return + + processed_files.add(gtest_source_file) + + # Reads each line in the given gtest source file. + for line in file(os.path.join(gtest_root, gtest_source_file), 'r'): + m = INCLUDE_GTEST_FILE_REGEX.match(line) + if m: + if 'include/' + m.group(1) == GTEST_SPI_H_SEED: + # It's '#include "gtest/gtest-spi.h"'. This file is not + # #included by "gtest/gtest.h", so we need to process it. + ProcessFile(GTEST_SPI_H_SEED) + else: + # It's '#include "gtest/foo.h"' where foo is not gtest-spi. + # We treat it as '#include "gtest/gtest.h"', as all other + # gtest headers are being fused into gtest.h and cannot be + # #included directly. + + # There is no need to #include "gtest/gtest.h" more than once. + if not GTEST_H_SEED in processed_files: + processed_files.add(GTEST_H_SEED) + output_file.write('#include "%s"\n' % (GTEST_H_OUTPUT,)) + else: + m = INCLUDE_SRC_FILE_REGEX.match(line) + if m: + # It's '#include "src/foo"' - let's process it recursively. + ProcessFile(m.group(1)) + else: + output_file.write(line) + + ProcessFile(GTEST_ALL_CC_SEED) + + +def FuseGTestAllCc(gtest_root, output_dir): + """Scans folder gtest_root to generate gtest/gtest-all.cc in output_dir.""" + + output_file = file(os.path.join(output_dir, GTEST_ALL_CC_OUTPUT), 'w') + FuseGTestAllCcToFile(gtest_root, output_file) + output_file.close() + + +def FuseGTest(gtest_root, output_dir): + """Fuses gtest.h and gtest-all.cc.""" + + ValidateGTestRootDir(gtest_root) + ValidateOutputDir(output_dir) + + FuseGTestH(gtest_root, output_dir) + FuseGTestAllCc(gtest_root, output_dir) + + +def main(): + argc = len(sys.argv) + if argc == 2: + # fuse_gtest_files.py OUTPUT_DIR + FuseGTest(DEFAULT_GTEST_ROOT_DIR, sys.argv[1]) + elif argc == 3: + # fuse_gtest_files.py GTEST_ROOT_DIR OUTPUT_DIR + FuseGTest(sys.argv[1], sys.argv[2]) + else: + print __doc__ + sys.exit(1) + + +if __name__ == '__main__': + main() diff --git a/external/gtest/scripts/gen_gtest_pred_impl.py b/external/gtest/scripts/gen_gtest_pred_impl.py new file mode 100755 index 0000000000..3e7ab042ea --- /dev/null +++ b/external/gtest/scripts/gen_gtest_pred_impl.py @@ -0,0 +1,730 @@ +#!/usr/bin/env python +# +# Copyright 2006, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""gen_gtest_pred_impl.py v0.1 + +Generates the implementation of Google Test predicate assertions and +accompanying tests. + +Usage: + + gen_gtest_pred_impl.py MAX_ARITY + +where MAX_ARITY is a positive integer. + +The command generates the implementation of up-to MAX_ARITY-ary +predicate assertions, and writes it to file gtest_pred_impl.h in the +directory where the script is. It also generates the accompanying +unit test in file gtest_pred_impl_unittest.cc. +""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import os +import sys +import time + +# Where this script is. +SCRIPT_DIR = os.path.dirname(sys.argv[0]) + +# Where to store the generated header. +HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h') + +# Where to store the generated unit test. +UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc') + + +def HeaderPreamble(n): + """Returns the preamble for the header file. + + Args: + n: the maximum arity of the predicate macros to be generated. + """ + + # A map that defines the values used in the preamble template. + DEFS = { + 'today' : time.strftime('%m/%d/%Y'), + 'year' : time.strftime('%Y'), + 'command' : '%s %s' % (os.path.basename(sys.argv[0]), n), + 'n' : n + } + + return ( +"""// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is AUTOMATICALLY GENERATED on %(today)s by command +// '%(command)s'. DO NOT EDIT BY HAND! +// +// Implements a family of generic predicate assertion macros. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Makes sure this header is not included before gtest.h. +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +# error Do not include gtest_pred_impl.h directly. Include gtest.h instead. +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ + +// This header implements a family of generic predicate assertion +// macros: +// +// ASSERT_PRED_FORMAT1(pred_format, v1) +// ASSERT_PRED_FORMAT2(pred_format, v1, v2) +// ... +// +// where pred_format is a function or functor that takes n (in the +// case of ASSERT_PRED_FORMATn) values and their source expression +// text, and returns a testing::AssertionResult. See the definition +// of ASSERT_EQ in gtest.h for an example. +// +// If you don't care about formatting, you can use the more +// restrictive version: +// +// ASSERT_PRED1(pred, v1) +// ASSERT_PRED2(pred, v1, v2) +// ... +// +// where pred is an n-ary function or functor that returns bool, +// and the values v1, v2, ..., must support the << operator for +// streaming to std::ostream. +// +// We also define the EXPECT_* variations. +// +// For now we only support predicates whose arity is at most %(n)s. +// Please email googletestframework@googlegroups.com if you need +// support for higher arities. + +// GTEST_ASSERT_ is the basic statement to which all of the assertions +// in this file reduce. Don't use this in your code. + +#define GTEST_ASSERT_(expression, on_failure) \\ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\ + if (const ::testing::AssertionResult gtest_ar = (expression)) \\ + ; \\ + else \\ + on_failure(gtest_ar.failure_message()) +""" % DEFS) + + +def Arity(n): + """Returns the English name of the given arity.""" + + if n < 0: + return None + elif n <= 3: + return ['nullary', 'unary', 'binary', 'ternary'][n] + else: + return '%s-ary' % n + + +def Title(word): + """Returns the given word in title case. The difference between + this and string's title() method is that Title('4-ary') is '4-ary' + while '4-ary'.title() is '4-Ary'.""" + + return word[0].upper() + word[1:] + + +def OneTo(n): + """Returns the list [1, 2, 3, ..., n].""" + + return range(1, n + 1) + + +def Iter(n, format, sep=''): + """Given a positive integer n, a format string that contains 0 or + more '%s' format specs, and optionally a separator string, returns + the join of n strings, each formatted with the format string on an + iterator ranged from 1 to n. + + Example: + + Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'. + """ + + # How many '%s' specs are in format? + spec_count = len(format.split('%s')) - 1 + return sep.join([format % (spec_count * (i,)) for i in OneTo(n)]) + + +def ImplementationForArity(n): + """Returns the implementation of n-ary predicate assertions.""" + + # A map the defines the values used in the implementation template. + DEFS = { + 'n' : str(n), + 'vs' : Iter(n, 'v%s', sep=', '), + 'vts' : Iter(n, '#v%s', sep=', '), + 'arity' : Arity(n), + 'Arity' : Title(Arity(n)) + } + + impl = """ + +// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use +// this in your code. +template +AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS + + impl += Iter(n, """, + const char* e%s""") + + impl += """, + Pred pred""" + + impl += Iter(n, """, + const T%s& v%s""") + + impl += """) { + if (pred(%(vs)s)) return AssertionSuccess(); + +""" % DEFS + + impl += ' return AssertionFailure() << pred_text << "("' + + impl += Iter(n, """ + << e%s""", sep=' << ", "') + + impl += ' << ") evaluates to false, where"' + + impl += Iter(n, """ + << "\\n" << e%s << " evaluates to " << v%s""") + + impl += """; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s. +// Don't use this in your code. +#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\ + GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use +// this in your code. +#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\ + GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS + + impl += Iter(n, """, \\ + #v%s""") + + impl += """, \\ + pred""" + + impl += Iter(n, """, \\ + v%s""") + + impl += """), on_failure) + +// %(Arity)s predicate assertion macros. +#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\ + GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED%(n)s(pred, %(vs)s) \\ + GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\ + GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED%(n)s(pred, %(vs)s) \\ + GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_) + +""" % DEFS + + return impl + + +def HeaderPostamble(): + """Returns the postamble for the header file.""" + + return """ + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ +""" + + +def GenerateFile(path, content): + """Given a file path and a content string, overwrites it with the + given content.""" + + print 'Updating file %s . . .' % path + + f = file(path, 'w+') + print >>f, content, + f.close() + + print 'File %s has been updated.' % path + + +def GenerateHeader(n): + """Given the maximum arity n, updates the header file that implements + the predicate assertions.""" + + GenerateFile(HEADER, + HeaderPreamble(n) + + ''.join([ImplementationForArity(i) for i in OneTo(n)]) + + HeaderPostamble()) + + +def UnitTestPreamble(): + """Returns the preamble for the unit test file.""" + + # A map that defines the values used in the preamble template. + DEFS = { + 'today' : time.strftime('%m/%d/%Y'), + 'year' : time.strftime('%Y'), + 'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]), + } + + return ( +"""// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is AUTOMATICALLY GENERATED on %(today)s by command +// '%(command)s'. DO NOT EDIT BY HAND! + +// Regression test for gtest_pred_impl.h +// +// This file is generated by a script and quite long. If you intend to +// learn how Google Test works by reading its unit tests, read +// gtest_unittest.cc instead. +// +// This is intended as a regression test for the Google Test predicate +// assertions. We compile it as part of the gtest_unittest target +// only to keep the implementation tidy and compact, as it is quite +// involved to set up the stage for testing Google Test using Google +// Test itself. +// +// Currently, gtest_unittest takes ~11 seconds to run in the testing +// daemon. In the future, if it grows too large and needs much more +// time to finish, we should consider separating this file into a +// stand-alone regression test. + +#include + +#include "gtest/gtest.h" +#include "gtest/gtest-spi.h" + +// A user-defined data type. +struct Bool { + explicit Bool(int val) : value(val != 0) {} + + bool operator>(int n) const { return value > Bool(n).value; } + + Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); } + + bool operator==(const Bool& rhs) const { return value == rhs.value; } + + bool value; +}; + +// Enables Bool to be used in assertions. +std::ostream& operator<<(std::ostream& os, const Bool& x) { + return os << (x.value ? "true" : "false"); +} + +""" % DEFS) + + +def TestsForArity(n): + """Returns the tests for n-ary predicate assertions.""" + + # A map that defines the values used in the template for the tests. + DEFS = { + 'n' : n, + 'es' : Iter(n, 'e%s', sep=', '), + 'vs' : Iter(n, 'v%s', sep=', '), + 'vts' : Iter(n, '#v%s', sep=', '), + 'tvs' : Iter(n, 'T%s v%s', sep=', '), + 'int_vs' : Iter(n, 'int v%s', sep=', '), + 'Bool_vs' : Iter(n, 'Bool v%s', sep=', '), + 'types' : Iter(n, 'typename T%s', sep=', '), + 'v_sum' : Iter(n, 'v%s', sep=' + '), + 'arity' : Arity(n), + 'Arity' : Title(Arity(n)), + } + + tests = ( +"""// Sample functions/functors for testing %(arity)s predicate assertions. + +// A %(arity)s predicate function. +template <%(types)s> +bool PredFunction%(n)s(%(tvs)s) { + return %(v_sum)s > 0; +} + +// The following two functions are needed to circumvent a bug in +// gcc 2.95.3, which sometimes has problem with the above template +// function. +bool PredFunction%(n)sInt(%(int_vs)s) { + return %(v_sum)s > 0; +} +bool PredFunction%(n)sBool(%(Bool_vs)s) { + return %(v_sum)s > 0; +} +""" % DEFS) + + tests += """ +// A %(arity)s predicate functor. +struct PredFunctor%(n)s { + template <%(types)s> + bool operator()(""" % DEFS + + tests += Iter(n, 'const T%s& v%s', sep=""", + """) + + tests += """) { + return %(v_sum)s > 0; + } +}; +""" % DEFS + + tests += """ +// A %(arity)s predicate-formatter function. +template <%(types)s> +testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS + + tests += Iter(n, 'const char* e%s', sep=""", + """) + + tests += Iter(n, """, + const T%s& v%s""") + + tests += """) { + if (PredFunction%(n)s(%(vs)s)) + return testing::AssertionSuccess(); + + return testing::AssertionFailure() + << """ % DEFS + + tests += Iter(n, 'e%s', sep=' << " + " << ') + + tests += """ + << " is expected to be positive, but evaluates to " + << %(v_sum)s << "."; +} +""" % DEFS + + tests += """ +// A %(arity)s predicate-formatter functor. +struct PredFormatFunctor%(n)s { + template <%(types)s> + testing::AssertionResult operator()(""" % DEFS + + tests += Iter(n, 'const char* e%s', sep=""", + """) + + tests += Iter(n, """, + const T%s& v%s""") + + tests += """) const { + return PredFormatFunction%(n)s(%(es)s, %(vs)s); + } +}; +""" % DEFS + + tests += """ +// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s. + +class Predicate%(n)sTest : public testing::Test { + protected: + virtual void SetUp() { + expected_to_finish_ = true; + finished_ = false;""" % DEFS + + tests += """ + """ + Iter(n, 'n%s_ = ') + """0; + } +""" + + tests += """ + virtual void TearDown() { + // Verifies that each of the predicate's arguments was evaluated + // exactly once.""" + + tests += ''.join([""" + EXPECT_EQ(1, n%s_) << + "The predicate assertion didn't evaluate argument %s " + "exactly once.";""" % (i, i + 1) for i in OneTo(n)]) + + tests += """ + + // Verifies that the control flow in the test function is expected. + if (expected_to_finish_ && !finished_) { + FAIL() << "The predicate assertion unexpactedly aborted the test."; + } else if (!expected_to_finish_ && finished_) { + FAIL() << "The failed predicate assertion didn't abort the test " + "as expected."; + } + } + + // true iff the test function is expected to run to finish. + static bool expected_to_finish_; + + // true iff the test function did run to finish. + static bool finished_; +""" % DEFS + + tests += Iter(n, """ + static int n%s_;""") + + tests += """ +}; + +bool Predicate%(n)sTest::expected_to_finish_; +bool Predicate%(n)sTest::finished_; +""" % DEFS + + tests += Iter(n, """int Predicate%%(n)sTest::n%s_; +""") % DEFS + + tests += """ +typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest; +typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest; +typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest; +typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest; +""" % DEFS + + def GenTest(use_format, use_assert, expect_failure, + use_functor, use_user_type): + """Returns the test for a predicate assertion macro. + + Args: + use_format: true iff the assertion is a *_PRED_FORMAT*. + use_assert: true iff the assertion is a ASSERT_*. + expect_failure: true iff the assertion is expected to fail. + use_functor: true iff the first argument of the assertion is + a functor (as opposed to a function) + use_user_type: true iff the predicate functor/function takes + argument(s) of a user-defined type. + + Example: + + GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior + of a successful EXPECT_PRED_FORMATn() that takes a functor + whose arguments have built-in types.""" + + if use_assert: + assrt = 'ASSERT' # 'assert' is reserved, so we cannot use + # that identifier here. + else: + assrt = 'EXPECT' + + assertion = assrt + '_PRED' + + if use_format: + pred_format = 'PredFormat' + assertion += '_FORMAT' + else: + pred_format = 'Pred' + + assertion += '%(n)s' % DEFS + + if use_functor: + pred_format_type = 'functor' + pred_format += 'Functor%(n)s()' + else: + pred_format_type = 'function' + pred_format += 'Function%(n)s' + if not use_format: + if use_user_type: + pred_format += 'Bool' + else: + pred_format += 'Int' + + test_name = pred_format_type.title() + + if use_user_type: + arg_type = 'user-defined type (Bool)' + test_name += 'OnUserType' + if expect_failure: + arg = 'Bool(n%s_++)' + else: + arg = 'Bool(++n%s_)' + else: + arg_type = 'built-in type (int)' + test_name += 'OnBuiltInType' + if expect_failure: + arg = 'n%s_++' + else: + arg = '++n%s_' + + if expect_failure: + successful_or_failed = 'failed' + expected_or_not = 'expected.' + test_name += 'Failure' + else: + successful_or_failed = 'successful' + expected_or_not = 'UNEXPECTED!' + test_name += 'Success' + + # A map that defines the values used in the test template. + defs = DEFS.copy() + defs.update({ + 'assert' : assrt, + 'assertion' : assertion, + 'test_name' : test_name, + 'pf_type' : pred_format_type, + 'pf' : pred_format, + 'arg_type' : arg_type, + 'arg' : arg, + 'successful' : successful_or_failed, + 'expected' : expected_or_not, + }) + + test = """ +// Tests a %(successful)s %(assertion)s where the +// predicate-formatter is a %(pf_type)s on a %(arg_type)s. +TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs + + indent = (len(assertion) + 3)*' ' + extra_indent = '' + + if expect_failure: + extra_indent = ' ' + if use_assert: + test += """ + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT""" + else: + test += """ + EXPECT_NONFATAL_FAILURE({ // NOLINT""" + + test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs + + test = test % defs + test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs) + test += ');\n' + extra_indent + ' finished_ = true;\n' + + if expect_failure: + test += ' }, "");\n' + + test += '}\n' + return test + + # Generates tests for all 2**6 = 64 combinations. + tests += ''.join([GenTest(use_format, use_assert, expect_failure, + use_functor, use_user_type) + for use_format in [0, 1] + for use_assert in [0, 1] + for expect_failure in [0, 1] + for use_functor in [0, 1] + for use_user_type in [0, 1] + ]) + + return tests + + +def UnitTestPostamble(): + """Returns the postamble for the tests.""" + + return '' + + +def GenerateUnitTest(n): + """Returns the tests for up-to n-ary predicate assertions.""" + + GenerateFile(UNIT_TEST, + UnitTestPreamble() + + ''.join([TestsForArity(i) for i in OneTo(n)]) + + UnitTestPostamble()) + + +def _Main(): + """The entry point of the script. Generates the header file and its + unit test.""" + + if len(sys.argv) != 2: + print __doc__ + print 'Author: ' + __author__ + sys.exit(1) + + n = int(sys.argv[1]) + GenerateHeader(n) + GenerateUnitTest(n) + + +if __name__ == '__main__': + _Main() diff --git a/external/gtest/scripts/gtest-config.in b/external/gtest/scripts/gtest-config.in new file mode 100755 index 0000000000..780f8432ef --- /dev/null +++ b/external/gtest/scripts/gtest-config.in @@ -0,0 +1,274 @@ +#!/bin/sh + +# These variables are automatically filled in by the configure script. +name="@PACKAGE_TARNAME@" +version="@PACKAGE_VERSION@" + +show_usage() +{ + echo "Usage: gtest-config [OPTIONS...]" +} + +show_help() +{ + show_usage + cat <<\EOF + +The `gtest-config' script provides access to the necessary compile and linking +flags to connect with Google C++ Testing Framework, both in a build prior to +installation, and on the system proper after installation. The installation +overrides may be issued in combination with any other queries, but will only +affect installation queries if called on a built but not installed gtest. The +installation queries may not be issued with any other types of queries, and +only one installation query may be made at a time. The version queries and +compiler flag queries may be combined as desired but not mixed. Different +version queries are always combined with logical "and" semantics, and only the +last of any particular query is used while all previous ones ignored. All +versions must be specified as a sequence of numbers separated by periods. +Compiler flag queries output the union of the sets of flags when combined. + + Examples: + gtest-config --min-version=1.0 || echo "Insufficient Google Test version." + + g++ $(gtest-config --cppflags --cxxflags) -o foo.o -c foo.cpp + g++ $(gtest-config --ldflags --libs) -o foo foo.o + + # When using a built but not installed Google Test: + g++ $(../../my_gtest_build/scripts/gtest-config ...) ... + + # When using an installed Google Test, but with installation overrides: + export GTEST_PREFIX="/opt" + g++ $(gtest-config --libdir="/opt/lib64" ...) ... + + Help: + --usage brief usage information + --help display this help message + + Installation Overrides: + --prefix= overrides the installation prefix + --exec-prefix= overrides the executable installation prefix + --libdir= overrides the library installation prefix + --includedir= overrides the header file installation prefix + + Installation Queries: + --prefix installation prefix + --exec-prefix executable installation prefix + --libdir library installation directory + --includedir header file installation directory + --version the version of the Google Test installation + + Version Queries: + --min-version=VERSION return 0 if the version is at least VERSION + --exact-version=VERSION return 0 if the version is exactly VERSION + --max-version=VERSION return 0 if the version is at most VERSION + + Compilation Flag Queries: + --cppflags compile flags specific to the C-like preprocessors + --cxxflags compile flags appropriate for C++ programs + --ldflags linker flags + --libs libraries for linking + +EOF +} + +# This function bounds our version with a min and a max. It uses some clever +# POSIX-compliant variable expansion to portably do all the work in the shell +# and avoid any dependency on a particular "sed" or "awk" implementation. +# Notable is that it will only ever compare the first 3 components of versions. +# Further components will be cleanly stripped off. All versions must be +# unadorned, so "v1.0" will *not* work. The minimum version must be in $1, and +# the max in $2. TODO(chandlerc@google.com): If this ever breaks, we should +# investigate expanding this via autom4te from AS_VERSION_COMPARE rather than +# continuing to maintain our own shell version. +check_versions() +{ + major_version=${version%%.*} + minor_version="0" + point_version="0" + if test "${version#*.}" != "${version}"; then + minor_version=${version#*.} + minor_version=${minor_version%%.*} + fi + if test "${version#*.*.}" != "${version}"; then + point_version=${version#*.*.} + point_version=${point_version%%.*} + fi + + min_version="$1" + min_major_version=${min_version%%.*} + min_minor_version="0" + min_point_version="0" + if test "${min_version#*.}" != "${min_version}"; then + min_minor_version=${min_version#*.} + min_minor_version=${min_minor_version%%.*} + fi + if test "${min_version#*.*.}" != "${min_version}"; then + min_point_version=${min_version#*.*.} + min_point_version=${min_point_version%%.*} + fi + + max_version="$2" + max_major_version=${max_version%%.*} + max_minor_version="0" + max_point_version="0" + if test "${max_version#*.}" != "${max_version}"; then + max_minor_version=${max_version#*.} + max_minor_version=${max_minor_version%%.*} + fi + if test "${max_version#*.*.}" != "${max_version}"; then + max_point_version=${max_version#*.*.} + max_point_version=${max_point_version%%.*} + fi + + test $(($major_version)) -lt $(($min_major_version)) && exit 1 + if test $(($major_version)) -eq $(($min_major_version)); then + test $(($minor_version)) -lt $(($min_minor_version)) && exit 1 + if test $(($minor_version)) -eq $(($min_minor_version)); then + test $(($point_version)) -lt $(($min_point_version)) && exit 1 + fi + fi + + test $(($major_version)) -gt $(($max_major_version)) && exit 1 + if test $(($major_version)) -eq $(($max_major_version)); then + test $(($minor_version)) -gt $(($max_minor_version)) && exit 1 + if test $(($minor_version)) -eq $(($max_minor_version)); then + test $(($point_version)) -gt $(($max_point_version)) && exit 1 + fi + fi + + exit 0 +} + +# Show the usage line when no arguments are specified. +if test $# -eq 0; then + show_usage + exit 1 +fi + +while test $# -gt 0; do + case $1 in + --usage) show_usage; exit 0;; + --help) show_help; exit 0;; + + # Installation overrides + --prefix=*) GTEST_PREFIX=${1#--prefix=};; + --exec-prefix=*) GTEST_EXEC_PREFIX=${1#--exec-prefix=};; + --libdir=*) GTEST_LIBDIR=${1#--libdir=};; + --includedir=*) GTEST_INCLUDEDIR=${1#--includedir=};; + + # Installation queries + --prefix|--exec-prefix|--libdir|--includedir|--version) + if test -n "${do_query}"; then + show_usage + exit 1 + fi + do_query=${1#--} + ;; + + # Version checking + --min-version=*) + do_check_versions=yes + min_version=${1#--min-version=} + ;; + --max-version=*) + do_check_versions=yes + max_version=${1#--max-version=} + ;; + --exact-version=*) + do_check_versions=yes + exact_version=${1#--exact-version=} + ;; + + # Compiler flag output + --cppflags) echo_cppflags=yes;; + --cxxflags) echo_cxxflags=yes;; + --ldflags) echo_ldflags=yes;; + --libs) echo_libs=yes;; + + # Everything else is an error + *) show_usage; exit 1;; + esac + shift +done + +# These have defaults filled in by the configure script but can also be +# overridden by environment variables or command line parameters. +prefix="${GTEST_PREFIX:-@prefix@}" +exec_prefix="${GTEST_EXEC_PREFIX:-@exec_prefix@}" +libdir="${GTEST_LIBDIR:-@libdir@}" +includedir="${GTEST_INCLUDEDIR:-@includedir@}" + +# We try and detect if our binary is not located at its installed location. If +# it's not, we provide variables pointing to the source and build tree rather +# than to the install tree. This allows building against a just-built gtest +# rather than an installed gtest. +bindir="@bindir@" +this_relative_bindir=`dirname $0` +this_bindir=`cd ${this_relative_bindir}; pwd -P` +if test "${this_bindir}" = "${this_bindir%${bindir}}"; then + # The path to the script doesn't end in the bindir sequence from Autoconf, + # assume that we are in a build tree. + build_dir=`dirname ${this_bindir}` + src_dir=`cd ${this_bindir}; cd @top_srcdir@; pwd -P` + + # TODO(chandlerc@google.com): This is a dangerous dependency on libtool, we + # should work to remove it, and/or remove libtool altogether, replacing it + # with direct references to the library and a link path. + gtest_libs="${build_dir}/lib/libgtest.la @PTHREAD_CFLAGS@ @PTHREAD_LIBS@" + gtest_ldflags="" + + # We provide hooks to include from either the source or build dir, where the + # build dir is always preferred. This will potentially allow us to write + # build rules for generated headers and have them automatically be preferred + # over provided versions. + gtest_cppflags="-I${build_dir}/include -I${src_dir}/include" + gtest_cxxflags="@PTHREAD_CFLAGS@" +else + # We're using an installed gtest, although it may be staged under some + # prefix. Assume (as our own libraries do) that we can resolve the prefix, + # and are present in the dynamic link paths. + gtest_ldflags="-L${libdir}" + gtest_libs="-l${name} @PTHREAD_CFLAGS@ @PTHREAD_LIBS@" + gtest_cppflags="-I${includedir}" + gtest_cxxflags="@PTHREAD_CFLAGS@" +fi + +# Do an installation query if requested. +if test -n "$do_query"; then + case $do_query in + prefix) echo $prefix; exit 0;; + exec-prefix) echo $exec_prefix; exit 0;; + libdir) echo $libdir; exit 0;; + includedir) echo $includedir; exit 0;; + version) echo $version; exit 0;; + *) show_usage; exit 1;; + esac +fi + +# Do a version check if requested. +if test "$do_check_versions" = "yes"; then + # Make sure we didn't receive a bad combination of parameters. + test "$echo_cppflags" = "yes" && show_usage && exit 1 + test "$echo_cxxflags" = "yes" && show_usage && exit 1 + test "$echo_ldflags" = "yes" && show_usage && exit 1 + test "$echo_libs" = "yes" && show_usage && exit 1 + + if test "$exact_version" != ""; then + check_versions $exact_version $exact_version + # unreachable + else + check_versions ${min_version:-0.0.0} ${max_version:-9999.9999.9999} + # unreachable + fi +fi + +# Do the output in the correct order so that these can be used in-line of +# a compiler invocation. +output="" +test "$echo_cppflags" = "yes" && output="$output $gtest_cppflags" +test "$echo_cxxflags" = "yes" && output="$output $gtest_cxxflags" +test "$echo_ldflags" = "yes" && output="$output $gtest_ldflags" +test "$echo_libs" = "yes" && output="$output $gtest_libs" +echo $output + +exit 0 diff --git a/external/gtest/scripts/pump.py b/external/gtest/scripts/pump.py new file mode 100755 index 0000000000..5efb653c20 --- /dev/null +++ b/external/gtest/scripts/pump.py @@ -0,0 +1,855 @@ +#!/usr/bin/env python +# +# Copyright 2008, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""pump v0.2.0 - Pretty Useful for Meta Programming. + +A tool for preprocessor meta programming. Useful for generating +repetitive boilerplate code. Especially useful for writing C++ +classes, functions, macros, and templates that need to work with +various number of arguments. + +USAGE: + pump.py SOURCE_FILE + +EXAMPLES: + pump.py foo.cc.pump + Converts foo.cc.pump to foo.cc. + +GRAMMAR: + CODE ::= ATOMIC_CODE* + ATOMIC_CODE ::= $var ID = EXPRESSION + | $var ID = [[ CODE ]] + | $range ID EXPRESSION..EXPRESSION + | $for ID SEPARATOR [[ CODE ]] + | $($) + | $ID + | $(EXPRESSION) + | $if EXPRESSION [[ CODE ]] ELSE_BRANCH + | [[ CODE ]] + | RAW_CODE + SEPARATOR ::= RAW_CODE | EMPTY + ELSE_BRANCH ::= $else [[ CODE ]] + | $elif EXPRESSION [[ CODE ]] ELSE_BRANCH + | EMPTY + EXPRESSION has Python syntax. +""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import os +import re +import sys + + +TOKEN_TABLE = [ + (re.compile(r'\$var\s+'), '$var'), + (re.compile(r'\$elif\s+'), '$elif'), + (re.compile(r'\$else\s+'), '$else'), + (re.compile(r'\$for\s+'), '$for'), + (re.compile(r'\$if\s+'), '$if'), + (re.compile(r'\$range\s+'), '$range'), + (re.compile(r'\$[_A-Za-z]\w*'), '$id'), + (re.compile(r'\$\(\$\)'), '$($)'), + (re.compile(r'\$'), '$'), + (re.compile(r'\[\[\n?'), '[['), + (re.compile(r'\]\]\n?'), ']]'), + ] + + +class Cursor: + """Represents a position (line and column) in a text file.""" + + def __init__(self, line=-1, column=-1): + self.line = line + self.column = column + + def __eq__(self, rhs): + return self.line == rhs.line and self.column == rhs.column + + def __ne__(self, rhs): + return not self == rhs + + def __lt__(self, rhs): + return self.line < rhs.line or ( + self.line == rhs.line and self.column < rhs.column) + + def __le__(self, rhs): + return self < rhs or self == rhs + + def __gt__(self, rhs): + return rhs < self + + def __ge__(self, rhs): + return rhs <= self + + def __str__(self): + if self == Eof(): + return 'EOF' + else: + return '%s(%s)' % (self.line + 1, self.column) + + def __add__(self, offset): + return Cursor(self.line, self.column + offset) + + def __sub__(self, offset): + return Cursor(self.line, self.column - offset) + + def Clone(self): + """Returns a copy of self.""" + + return Cursor(self.line, self.column) + + +# Special cursor to indicate the end-of-file. +def Eof(): + """Returns the special cursor to denote the end-of-file.""" + return Cursor(-1, -1) + + +class Token: + """Represents a token in a Pump source file.""" + + def __init__(self, start=None, end=None, value=None, token_type=None): + if start is None: + self.start = Eof() + else: + self.start = start + if end is None: + self.end = Eof() + else: + self.end = end + self.value = value + self.token_type = token_type + + def __str__(self): + return 'Token @%s: \'%s\' type=%s' % ( + self.start, self.value, self.token_type) + + def Clone(self): + """Returns a copy of self.""" + + return Token(self.start.Clone(), self.end.Clone(), self.value, + self.token_type) + + +def StartsWith(lines, pos, string): + """Returns True iff the given position in lines starts with 'string'.""" + + return lines[pos.line][pos.column:].startswith(string) + + +def FindFirstInLine(line, token_table): + best_match_start = -1 + for (regex, token_type) in token_table: + m = regex.search(line) + if m: + # We found regex in lines + if best_match_start < 0 or m.start() < best_match_start: + best_match_start = m.start() + best_match_length = m.end() - m.start() + best_match_token_type = token_type + + if best_match_start < 0: + return None + + return (best_match_start, best_match_length, best_match_token_type) + + +def FindFirst(lines, token_table, cursor): + """Finds the first occurrence of any string in strings in lines.""" + + start = cursor.Clone() + cur_line_number = cursor.line + for line in lines[start.line:]: + if cur_line_number == start.line: + line = line[start.column:] + m = FindFirstInLine(line, token_table) + if m: + # We found a regex in line. + (start_column, length, token_type) = m + if cur_line_number == start.line: + start_column += start.column + found_start = Cursor(cur_line_number, start_column) + found_end = found_start + length + return MakeToken(lines, found_start, found_end, token_type) + cur_line_number += 1 + # We failed to find str in lines + return None + + +def SubString(lines, start, end): + """Returns a substring in lines.""" + + if end == Eof(): + end = Cursor(len(lines) - 1, len(lines[-1])) + + if start >= end: + return '' + + if start.line == end.line: + return lines[start.line][start.column:end.column] + + result_lines = ([lines[start.line][start.column:]] + + lines[start.line + 1:end.line] + + [lines[end.line][:end.column]]) + return ''.join(result_lines) + + +def StripMetaComments(str): + """Strip meta comments from each line in the given string.""" + + # First, completely remove lines containing nothing but a meta + # comment, including the trailing \n. + str = re.sub(r'^\s*\$\$.*\n', '', str) + + # Then, remove meta comments from contentful lines. + return re.sub(r'\s*\$\$.*', '', str) + + +def MakeToken(lines, start, end, token_type): + """Creates a new instance of Token.""" + + return Token(start, end, SubString(lines, start, end), token_type) + + +def ParseToken(lines, pos, regex, token_type): + line = lines[pos.line][pos.column:] + m = regex.search(line) + if m and not m.start(): + return MakeToken(lines, pos, pos + m.end(), token_type) + else: + print 'ERROR: %s expected at %s.' % (token_type, pos) + sys.exit(1) + + +ID_REGEX = re.compile(r'[_A-Za-z]\w*') +EQ_REGEX = re.compile(r'=') +REST_OF_LINE_REGEX = re.compile(r'.*?(?=$|\$\$)') +OPTIONAL_WHITE_SPACES_REGEX = re.compile(r'\s*') +WHITE_SPACE_REGEX = re.compile(r'\s') +DOT_DOT_REGEX = re.compile(r'\.\.') + + +def Skip(lines, pos, regex): + line = lines[pos.line][pos.column:] + m = re.search(regex, line) + if m and not m.start(): + return pos + m.end() + else: + return pos + + +def SkipUntil(lines, pos, regex, token_type): + line = lines[pos.line][pos.column:] + m = re.search(regex, line) + if m: + return pos + m.start() + else: + print ('ERROR: %s expected on line %s after column %s.' % + (token_type, pos.line + 1, pos.column)) + sys.exit(1) + + +def ParseExpTokenInParens(lines, pos): + def ParseInParens(pos): + pos = Skip(lines, pos, OPTIONAL_WHITE_SPACES_REGEX) + pos = Skip(lines, pos, r'\(') + pos = Parse(pos) + pos = Skip(lines, pos, r'\)') + return pos + + def Parse(pos): + pos = SkipUntil(lines, pos, r'\(|\)', ')') + if SubString(lines, pos, pos + 1) == '(': + pos = Parse(pos + 1) + pos = Skip(lines, pos, r'\)') + return Parse(pos) + else: + return pos + + start = pos.Clone() + pos = ParseInParens(pos) + return MakeToken(lines, start, pos, 'exp') + + +def RStripNewLineFromToken(token): + if token.value.endswith('\n'): + return Token(token.start, token.end, token.value[:-1], token.token_type) + else: + return token + + +def TokenizeLines(lines, pos): + while True: + found = FindFirst(lines, TOKEN_TABLE, pos) + if not found: + yield MakeToken(lines, pos, Eof(), 'code') + return + + if found.start == pos: + prev_token = None + prev_token_rstripped = None + else: + prev_token = MakeToken(lines, pos, found.start, 'code') + prev_token_rstripped = RStripNewLineFromToken(prev_token) + + if found.token_type == '$var': + if prev_token_rstripped: + yield prev_token_rstripped + yield found + id_token = ParseToken(lines, found.end, ID_REGEX, 'id') + yield id_token + pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX) + + eq_token = ParseToken(lines, pos, EQ_REGEX, '=') + yield eq_token + pos = Skip(lines, eq_token.end, r'\s*') + + if SubString(lines, pos, pos + 2) != '[[': + exp_token = ParseToken(lines, pos, REST_OF_LINE_REGEX, 'exp') + yield exp_token + pos = Cursor(exp_token.end.line + 1, 0) + elif found.token_type == '$for': + if prev_token_rstripped: + yield prev_token_rstripped + yield found + id_token = ParseToken(lines, found.end, ID_REGEX, 'id') + yield id_token + pos = Skip(lines, id_token.end, WHITE_SPACE_REGEX) + elif found.token_type == '$range': + if prev_token_rstripped: + yield prev_token_rstripped + yield found + id_token = ParseToken(lines, found.end, ID_REGEX, 'id') + yield id_token + pos = Skip(lines, id_token.end, OPTIONAL_WHITE_SPACES_REGEX) + + dots_pos = SkipUntil(lines, pos, DOT_DOT_REGEX, '..') + yield MakeToken(lines, pos, dots_pos, 'exp') + yield MakeToken(lines, dots_pos, dots_pos + 2, '..') + pos = dots_pos + 2 + new_pos = Cursor(pos.line + 1, 0) + yield MakeToken(lines, pos, new_pos, 'exp') + pos = new_pos + elif found.token_type == '$': + if prev_token: + yield prev_token + yield found + exp_token = ParseExpTokenInParens(lines, found.end) + yield exp_token + pos = exp_token.end + elif (found.token_type == ']]' or found.token_type == '$if' or + found.token_type == '$elif' or found.token_type == '$else'): + if prev_token_rstripped: + yield prev_token_rstripped + yield found + pos = found.end + else: + if prev_token: + yield prev_token + yield found + pos = found.end + + +def Tokenize(s): + """A generator that yields the tokens in the given string.""" + if s != '': + lines = s.splitlines(True) + for token in TokenizeLines(lines, Cursor(0, 0)): + yield token + + +class CodeNode: + def __init__(self, atomic_code_list=None): + self.atomic_code = atomic_code_list + + +class VarNode: + def __init__(self, identifier=None, atomic_code=None): + self.identifier = identifier + self.atomic_code = atomic_code + + +class RangeNode: + def __init__(self, identifier=None, exp1=None, exp2=None): + self.identifier = identifier + self.exp1 = exp1 + self.exp2 = exp2 + + +class ForNode: + def __init__(self, identifier=None, sep=None, code=None): + self.identifier = identifier + self.sep = sep + self.code = code + + +class ElseNode: + def __init__(self, else_branch=None): + self.else_branch = else_branch + + +class IfNode: + def __init__(self, exp=None, then_branch=None, else_branch=None): + self.exp = exp + self.then_branch = then_branch + self.else_branch = else_branch + + +class RawCodeNode: + def __init__(self, token=None): + self.raw_code = token + + +class LiteralDollarNode: + def __init__(self, token): + self.token = token + + +class ExpNode: + def __init__(self, token, python_exp): + self.token = token + self.python_exp = python_exp + + +def PopFront(a_list): + head = a_list[0] + a_list[:1] = [] + return head + + +def PushFront(a_list, elem): + a_list[:0] = [elem] + + +def PopToken(a_list, token_type=None): + token = PopFront(a_list) + if token_type is not None and token.token_type != token_type: + print 'ERROR: %s expected at %s' % (token_type, token.start) + print 'ERROR: %s found instead' % (token,) + sys.exit(1) + + return token + + +def PeekToken(a_list): + if not a_list: + return None + + return a_list[0] + + +def ParseExpNode(token): + python_exp = re.sub(r'([_A-Za-z]\w*)', r'self.GetValue("\1")', token.value) + return ExpNode(token, python_exp) + + +def ParseElseNode(tokens): + def Pop(token_type=None): + return PopToken(tokens, token_type) + + next = PeekToken(tokens) + if not next: + return None + if next.token_type == '$else': + Pop('$else') + Pop('[[') + code_node = ParseCodeNode(tokens) + Pop(']]') + return code_node + elif next.token_type == '$elif': + Pop('$elif') + exp = Pop('code') + Pop('[[') + code_node = ParseCodeNode(tokens) + Pop(']]') + inner_else_node = ParseElseNode(tokens) + return CodeNode([IfNode(ParseExpNode(exp), code_node, inner_else_node)]) + elif not next.value.strip(): + Pop('code') + return ParseElseNode(tokens) + else: + return None + + +def ParseAtomicCodeNode(tokens): + def Pop(token_type=None): + return PopToken(tokens, token_type) + + head = PopFront(tokens) + t = head.token_type + if t == 'code': + return RawCodeNode(head) + elif t == '$var': + id_token = Pop('id') + Pop('=') + next = PeekToken(tokens) + if next.token_type == 'exp': + exp_token = Pop() + return VarNode(id_token, ParseExpNode(exp_token)) + Pop('[[') + code_node = ParseCodeNode(tokens) + Pop(']]') + return VarNode(id_token, code_node) + elif t == '$for': + id_token = Pop('id') + next_token = PeekToken(tokens) + if next_token.token_type == 'code': + sep_token = next_token + Pop('code') + else: + sep_token = None + Pop('[[') + code_node = ParseCodeNode(tokens) + Pop(']]') + return ForNode(id_token, sep_token, code_node) + elif t == '$if': + exp_token = Pop('code') + Pop('[[') + code_node = ParseCodeNode(tokens) + Pop(']]') + else_node = ParseElseNode(tokens) + return IfNode(ParseExpNode(exp_token), code_node, else_node) + elif t == '$range': + id_token = Pop('id') + exp1_token = Pop('exp') + Pop('..') + exp2_token = Pop('exp') + return RangeNode(id_token, ParseExpNode(exp1_token), + ParseExpNode(exp2_token)) + elif t == '$id': + return ParseExpNode(Token(head.start + 1, head.end, head.value[1:], 'id')) + elif t == '$($)': + return LiteralDollarNode(head) + elif t == '$': + exp_token = Pop('exp') + return ParseExpNode(exp_token) + elif t == '[[': + code_node = ParseCodeNode(tokens) + Pop(']]') + return code_node + else: + PushFront(tokens, head) + return None + + +def ParseCodeNode(tokens): + atomic_code_list = [] + while True: + if not tokens: + break + atomic_code_node = ParseAtomicCodeNode(tokens) + if atomic_code_node: + atomic_code_list.append(atomic_code_node) + else: + break + return CodeNode(atomic_code_list) + + +def ParseToAST(pump_src_text): + """Convert the given Pump source text into an AST.""" + tokens = list(Tokenize(pump_src_text)) + code_node = ParseCodeNode(tokens) + return code_node + + +class Env: + def __init__(self): + self.variables = [] + self.ranges = [] + + def Clone(self): + clone = Env() + clone.variables = self.variables[:] + clone.ranges = self.ranges[:] + return clone + + def PushVariable(self, var, value): + # If value looks like an int, store it as an int. + try: + int_value = int(value) + if ('%s' % int_value) == value: + value = int_value + except Exception: + pass + self.variables[:0] = [(var, value)] + + def PopVariable(self): + self.variables[:1] = [] + + def PushRange(self, var, lower, upper): + self.ranges[:0] = [(var, lower, upper)] + + def PopRange(self): + self.ranges[:1] = [] + + def GetValue(self, identifier): + for (var, value) in self.variables: + if identifier == var: + return value + + print 'ERROR: meta variable %s is undefined.' % (identifier,) + sys.exit(1) + + def EvalExp(self, exp): + try: + result = eval(exp.python_exp) + except Exception, e: + print 'ERROR: caught exception %s: %s' % (e.__class__.__name__, e) + print ('ERROR: failed to evaluate meta expression %s at %s' % + (exp.python_exp, exp.token.start)) + sys.exit(1) + return result + + def GetRange(self, identifier): + for (var, lower, upper) in self.ranges: + if identifier == var: + return (lower, upper) + + print 'ERROR: range %s is undefined.' % (identifier,) + sys.exit(1) + + +class Output: + def __init__(self): + self.string = '' + + def GetLastLine(self): + index = self.string.rfind('\n') + if index < 0: + return '' + + return self.string[index + 1:] + + def Append(self, s): + self.string += s + + +def RunAtomicCode(env, node, output): + if isinstance(node, VarNode): + identifier = node.identifier.value.strip() + result = Output() + RunAtomicCode(env.Clone(), node.atomic_code, result) + value = result.string + env.PushVariable(identifier, value) + elif isinstance(node, RangeNode): + identifier = node.identifier.value.strip() + lower = int(env.EvalExp(node.exp1)) + upper = int(env.EvalExp(node.exp2)) + env.PushRange(identifier, lower, upper) + elif isinstance(node, ForNode): + identifier = node.identifier.value.strip() + if node.sep is None: + sep = '' + else: + sep = node.sep.value + (lower, upper) = env.GetRange(identifier) + for i in range(lower, upper + 1): + new_env = env.Clone() + new_env.PushVariable(identifier, i) + RunCode(new_env, node.code, output) + if i != upper: + output.Append(sep) + elif isinstance(node, RawCodeNode): + output.Append(node.raw_code.value) + elif isinstance(node, IfNode): + cond = env.EvalExp(node.exp) + if cond: + RunCode(env.Clone(), node.then_branch, output) + elif node.else_branch is not None: + RunCode(env.Clone(), node.else_branch, output) + elif isinstance(node, ExpNode): + value = env.EvalExp(node) + output.Append('%s' % (value,)) + elif isinstance(node, LiteralDollarNode): + output.Append('$') + elif isinstance(node, CodeNode): + RunCode(env.Clone(), node, output) + else: + print 'BAD' + print node + sys.exit(1) + + +def RunCode(env, code_node, output): + for atomic_code in code_node.atomic_code: + RunAtomicCode(env, atomic_code, output) + + +def IsSingleLineComment(cur_line): + return '//' in cur_line + + +def IsInPreprocessorDirective(prev_lines, cur_line): + if cur_line.lstrip().startswith('#'): + return True + return prev_lines and prev_lines[-1].endswith('\\') + + +def WrapComment(line, output): + loc = line.find('//') + before_comment = line[:loc].rstrip() + if before_comment == '': + indent = loc + else: + output.append(before_comment) + indent = len(before_comment) - len(before_comment.lstrip()) + prefix = indent*' ' + '// ' + max_len = 80 - len(prefix) + comment = line[loc + 2:].strip() + segs = [seg for seg in re.split(r'(\w+\W*)', comment) if seg != ''] + cur_line = '' + for seg in segs: + if len((cur_line + seg).rstrip()) < max_len: + cur_line += seg + else: + if cur_line.strip() != '': + output.append(prefix + cur_line.rstrip()) + cur_line = seg.lstrip() + if cur_line.strip() != '': + output.append(prefix + cur_line.strip()) + + +def WrapCode(line, line_concat, output): + indent = len(line) - len(line.lstrip()) + prefix = indent*' ' # Prefix of the current line + max_len = 80 - indent - len(line_concat) # Maximum length of the current line + new_prefix = prefix + 4*' ' # Prefix of a continuation line + new_max_len = max_len - 4 # Maximum length of a continuation line + # Prefers to wrap a line after a ',' or ';'. + segs = [seg for seg in re.split(r'([^,;]+[,;]?)', line.strip()) if seg != ''] + cur_line = '' # The current line without leading spaces. + for seg in segs: + # If the line is still too long, wrap at a space. + while cur_line == '' and len(seg.strip()) > max_len: + seg = seg.lstrip() + split_at = seg.rfind(' ', 0, max_len) + output.append(prefix + seg[:split_at].strip() + line_concat) + seg = seg[split_at + 1:] + prefix = new_prefix + max_len = new_max_len + + if len((cur_line + seg).rstrip()) < max_len: + cur_line = (cur_line + seg).lstrip() + else: + output.append(prefix + cur_line.rstrip() + line_concat) + prefix = new_prefix + max_len = new_max_len + cur_line = seg.lstrip() + if cur_line.strip() != '': + output.append(prefix + cur_line.strip()) + + +def WrapPreprocessorDirective(line, output): + WrapCode(line, ' \\', output) + + +def WrapPlainCode(line, output): + WrapCode(line, '', output) + + +def IsMultiLineIWYUPragma(line): + return re.search(r'/\* IWYU pragma: ', line) + + +def IsHeaderGuardIncludeOrOneLineIWYUPragma(line): + return (re.match(r'^#(ifndef|define|endif\s*//)\s*[\w_]+\s*$', line) or + re.match(r'^#include\s', line) or + # Don't break IWYU pragmas, either; that causes iwyu.py problems. + re.search(r'// IWYU pragma: ', line)) + + +def WrapLongLine(line, output): + line = line.rstrip() + if len(line) <= 80: + output.append(line) + elif IsSingleLineComment(line): + if IsHeaderGuardIncludeOrOneLineIWYUPragma(line): + # The style guide made an exception to allow long header guard lines, + # includes and IWYU pragmas. + output.append(line) + else: + WrapComment(line, output) + elif IsInPreprocessorDirective(output, line): + if IsHeaderGuardIncludeOrOneLineIWYUPragma(line): + # The style guide made an exception to allow long header guard lines, + # includes and IWYU pragmas. + output.append(line) + else: + WrapPreprocessorDirective(line, output) + elif IsMultiLineIWYUPragma(line): + output.append(line) + else: + WrapPlainCode(line, output) + + +def BeautifyCode(string): + lines = string.splitlines() + output = [] + for line in lines: + WrapLongLine(line, output) + output2 = [line.rstrip() for line in output] + return '\n'.join(output2) + '\n' + + +def ConvertFromPumpSource(src_text): + """Return the text generated from the given Pump source text.""" + ast = ParseToAST(StripMetaComments(src_text)) + output = Output() + RunCode(Env(), ast, output) + return BeautifyCode(output.string) + + +def main(argv): + if len(argv) == 1: + print __doc__ + sys.exit(1) + + file_path = argv[-1] + output_str = ConvertFromPumpSource(file(file_path, 'r').read()) + if file_path.endswith('.pump'): + output_file_path = file_path[:-5] + else: + output_file_path = '-' + if output_file_path == '-': + print output_str, + else: + output_file = file(output_file_path, 'w') + output_file.write('// This file was GENERATED by command:\n') + output_file.write('// %s %s\n' % + (os.path.basename(__file__), os.path.basename(file_path))) + output_file.write('// DO NOT EDIT BY HAND!!!\n\n') + output_file.write(output_str) + output_file.close() + + +if __name__ == '__main__': + main(sys.argv) diff --git a/external/gtest/scripts/test/Makefile b/external/gtest/scripts/test/Makefile new file mode 100755 index 0000000000..cdff584637 --- /dev/null +++ b/external/gtest/scripts/test/Makefile @@ -0,0 +1,59 @@ +# A Makefile for fusing Google Test and building a sample test against it. +# +# SYNOPSIS: +# +# make [all] - makes everything. +# make TARGET - makes the given target. +# make check - makes everything and runs the built sample test. +# make clean - removes all files generated by make. + +# Points to the root of fused Google Test, relative to where this file is. +FUSED_GTEST_DIR = output + +# Paths to the fused gtest files. +FUSED_GTEST_H = $(FUSED_GTEST_DIR)/gtest/gtest.h +FUSED_GTEST_ALL_CC = $(FUSED_GTEST_DIR)/gtest/gtest-all.cc + +# Where to find the sample test. +SAMPLE_DIR = ../../samples + +# Where to find gtest_main.cc. +GTEST_MAIN_CC = ../../src/gtest_main.cc + +# Flags passed to the preprocessor. +# We have no idea here whether pthreads is available in the system, so +# disable its use. +CPPFLAGS += -I$(FUSED_GTEST_DIR) -DGTEST_HAS_PTHREAD=0 + +# Flags passed to the C++ compiler. +CXXFLAGS += -g + +all : sample1_unittest + +check : all + ./sample1_unittest + +clean : + rm -rf $(FUSED_GTEST_DIR) sample1_unittest *.o + +$(FUSED_GTEST_H) : + ../fuse_gtest_files.py $(FUSED_GTEST_DIR) + +$(FUSED_GTEST_ALL_CC) : + ../fuse_gtest_files.py $(FUSED_GTEST_DIR) + +gtest-all.o : $(FUSED_GTEST_H) $(FUSED_GTEST_ALL_CC) + $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(FUSED_GTEST_DIR)/gtest/gtest-all.cc + +gtest_main.o : $(FUSED_GTEST_H) $(GTEST_MAIN_CC) + $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(GTEST_MAIN_CC) + +sample1.o : $(SAMPLE_DIR)/sample1.cc $(SAMPLE_DIR)/sample1.h + $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(SAMPLE_DIR)/sample1.cc + +sample1_unittest.o : $(SAMPLE_DIR)/sample1_unittest.cc \ + $(SAMPLE_DIR)/sample1.h $(FUSED_GTEST_H) + $(CXX) $(CPPFLAGS) $(CXXFLAGS) -c $(SAMPLE_DIR)/sample1_unittest.cc + +sample1_unittest : sample1.o sample1_unittest.o gtest-all.o gtest_main.o + $(CXX) $(CPPFLAGS) $(CXXFLAGS) $^ -o $@ diff --git a/tests/gtest/src/gtest-all.cc b/external/gtest/src/gtest-all.cc old mode 100644 new mode 100755 similarity index 100% rename from tests/gtest/src/gtest-all.cc rename to external/gtest/src/gtest-all.cc diff --git a/tests/gtest/src/gtest-death-test.cc b/external/gtest/src/gtest-death-test.cc old mode 100644 new mode 100755 similarity index 82% rename from tests/gtest/src/gtest-death-test.cc rename to external/gtest/src/gtest-death-test.cc index 8b2e4131ca..c158d0b808 --- a/tests/gtest/src/gtest-death-test.cc +++ b/external/gtest/src/gtest-death-test.cc @@ -43,6 +43,11 @@ # include # include # include + +# if GTEST_OS_LINUX +# include +# endif // GTEST_OS_LINUX + # include # if GTEST_OS_WINDOWS @@ -52,6 +57,10 @@ # include # endif // GTEST_OS_WINDOWS +# if GTEST_OS_QNX +# include +# endif // GTEST_OS_QNX + #endif // GTEST_HAS_DEATH_TEST #include "gtest/gtest-message.h" @@ -100,13 +109,42 @@ GTEST_DEFINE_string_( "Indicates the file, line number, temporal index of " "the single death test to run, and a file descriptor to " "which a success code may be sent, all separated by " - "colons. This flag is specified if and only if the current " + "the '|' characters. This flag is specified if and only if the current " "process is a sub-process launched for running a thread-safe " "death test. FOR INTERNAL USE ONLY."); } // namespace internal #if GTEST_HAS_DEATH_TEST +namespace internal { + +// Valid only for fast death tests. Indicates the code is running in the +// child process of a fast style death test. +static bool g_in_fast_death_test_child = false; + +// Returns a Boolean value indicating whether the caller is currently +// executing in the context of the death test child process. Tools such as +// Valgrind heap checkers may need this to modify their behavior in death +// tests. IMPORTANT: This is an internal utility. Using it may break the +// implementation of death tests. User code MUST NOT use it. +bool InDeathTestChild() { +# if GTEST_OS_WINDOWS + + // On Windows, death tests are thread-safe regardless of the value of the + // death_test_style flag. + return !GTEST_FLAG(internal_run_death_test).empty(); + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") + return !GTEST_FLAG(internal_run_death_test).empty(); + else + return g_in_fast_death_test_child; +#endif +} + +} // namespace internal + // ExitedWithCode constructor. ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) { } @@ -141,7 +179,7 @@ namespace internal { // Generates a textual description of a given exit code, in the format // specified by wait(2). -static String ExitSummary(int exit_code) { +static std::string ExitSummary(int exit_code) { Message m; # if GTEST_OS_WINDOWS @@ -176,7 +214,7 @@ bool ExitedUnsuccessfully(int exit_status) { // one thread running, or cannot determine the number of threads, prior // to executing the given statement. It is the responsibility of the // caller not to pass a thread_count of 1. -static String DeathTestThreadWarning(size_t thread_count) { +static std::string DeathTestThreadWarning(size_t thread_count) { Message msg; msg << "Death tests use fork(), which is unsafe particularly" << " in a threaded context. For this test, " << GTEST_NAME_ << " "; @@ -210,7 +248,7 @@ enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }; // message is propagated back to the parent process. Otherwise, the // message is simply printed to stderr. In either case, the program // then exits with status 1. -void DeathTestAbort(const String& message) { +void DeathTestAbort(const std::string& message) { // On a POSIX system, this function may be called from a threadsafe-style // death test child process, which operates on a very small stack. Use // the heap for any additional non-minuscule memory requirements. @@ -234,9 +272,10 @@ void DeathTestAbort(const String& message) { # define GTEST_DEATH_TEST_CHECK_(expression) \ do { \ if (!::testing::internal::IsTrue(expression)) { \ - DeathTestAbort(::testing::internal::String::Format( \ - "CHECK failed: File %s, line %d: %s", \ - __FILE__, __LINE__, #expression)); \ + DeathTestAbort( \ + ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ + + ::testing::internal::StreamableToString(__LINE__) + ": " \ + + #expression); \ } \ } while (::testing::internal::AlwaysFalse()) @@ -254,15 +293,16 @@ void DeathTestAbort(const String& message) { gtest_retval = (expression); \ } while (gtest_retval == -1 && errno == EINTR); \ if (gtest_retval == -1) { \ - DeathTestAbort(::testing::internal::String::Format( \ - "CHECK failed: File %s, line %d: %s != -1", \ - __FILE__, __LINE__, #expression)); \ + DeathTestAbort( \ + ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ + + ::testing::internal::StreamableToString(__LINE__) + ": " \ + + #expression + " != -1"); \ } \ } while (::testing::internal::AlwaysFalse()) // Returns the message describing the last system error in errno. -String GetLastErrnoDescription() { - return String(errno == 0 ? "" : posix::StrError(errno)); +std::string GetLastErrnoDescription() { + return errno == 0 ? "" : posix::StrError(errno); } // This is called from a death test parent process to read a failure @@ -312,11 +352,11 @@ const char* DeathTest::LastMessage() { return last_death_test_message_.c_str(); } -void DeathTest::set_last_death_test_message(const String& message) { +void DeathTest::set_last_death_test_message(const std::string& message) { last_death_test_message_ = message; } -String DeathTest::last_death_test_message_; +std::string DeathTest::last_death_test_message_; // Provides cross platform implementation for some death functionality. class DeathTestImpl : public DeathTest { @@ -333,8 +373,8 @@ class DeathTestImpl : public DeathTest { // read_fd_ is expected to be closed and cleared by a derived class. ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); } - void Abort(AbortReason reason); - virtual bool Passed(bool status_ok); + void Abort(AbortReason reason) override; + virtual bool Passed(bool status_ok) override; const char* statement() const { return statement_; } const RE* regex() const { return regex_; } @@ -491,7 +531,7 @@ bool DeathTestImpl::Passed(bool status_ok) { if (!spawned()) return false; - const String error_message = GetCapturedStderr(); + const std::string error_message = GetCapturedStderr(); bool success = false; Message buffer; @@ -673,22 +713,19 @@ DeathTest::TestRole WindowsDeathTest::AssumeRole() { FALSE, // The initial state is non-signalled. NULL)); // The even is unnamed. GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL); - const String filter_flag = String::Format("--%s%s=%s.%s", - GTEST_FLAG_PREFIX_, kFilterFlag, - info->test_case_name(), - info->name()); - const String internal_flag = String::Format( - "--%s%s=%s|%d|%d|%u|%Iu|%Iu", - GTEST_FLAG_PREFIX_, - kInternalRunDeathTestFlag, - file_, line_, - death_test_index, - static_cast(::GetCurrentProcessId()), - // size_t has the same with as pointers on both 32-bit and 64-bit + const std::string filter_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" + + info->test_case_name() + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + + "=" + file_ + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index) + "|" + + StreamableToString(static_cast(::GetCurrentProcessId())) + + // size_t has the same width as pointers on both 32-bit and 64-bit // Windows platforms. // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx. - reinterpret_cast(write_handle), - reinterpret_cast(event_handle_.Get())); + "|" + StreamableToString(reinterpret_cast(write_handle)) + + "|" + StreamableToString(reinterpret_cast(event_handle_.Get())); char executable_path[_MAX_PATH + 1]; // NOLINT GTEST_DEATH_TEST_CHECK_( @@ -696,10 +733,9 @@ DeathTest::TestRole WindowsDeathTest::AssumeRole() { executable_path, _MAX_PATH)); - String command_line = String::Format("%s %s \"%s\"", - ::GetCommandLineA(), - filter_flag.c_str(), - internal_flag.c_str()); + std::string command_line = + std::string(::GetCommandLineA()) + " " + filter_flag + " \"" + + internal_flag + "\""; DeathTest::set_last_death_test_message(""); @@ -742,7 +778,7 @@ class ForkingDeathTest : public DeathTestImpl { ForkingDeathTest(const char* statement, const RE* regex); // All of these virtual functions are inherited from DeathTest. - virtual int Wait(); + virtual int Wait() override; protected: void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; } @@ -778,7 +814,7 @@ class NoExecDeathTest : public ForkingDeathTest { public: NoExecDeathTest(const char* a_statement, const RE* a_regex) : ForkingDeathTest(a_statement, a_regex) { } - virtual TestRole AssumeRole(); + virtual TestRole AssumeRole() override; }; // The AssumeRole process for a fork-and-run death test. It implements a @@ -816,6 +852,7 @@ DeathTest::TestRole NoExecDeathTest::AssumeRole() { // Event forwarding to the listeners of event listener API mush be shut // down in death test subprocesses. GetUnitTestImpl()->listeners()->SuppressEventForwarding(); + g_in_fast_death_test_child = true; return EXECUTE_TEST; } else { GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); @@ -833,8 +870,13 @@ class ExecDeathTest : public ForkingDeathTest { ExecDeathTest(const char* a_statement, const RE* a_regex, const char* file, int line) : ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { } - virtual TestRole AssumeRole(); + virtual TestRole AssumeRole() override; private: + static ::std::vector + GetArgvsForDeathTestChildProcess() { + ::std::vector args = GetInjectableArgvs(); + return args; + } // The name of the file in which the death test is located. const char* const file_; // The line number on which the death test is located. @@ -869,6 +911,7 @@ class Arguments { char* const* Argv() { return &args_[0]; } + private: std::vector args_; }; @@ -894,6 +937,7 @@ extern "C" char** environ; inline char** GetEnviron() { return environ; } # endif // GTEST_OS_MAC +# if !GTEST_OS_QNX // The main function for a threadsafe-style death test child process. // This function is called in a clone()-ed process and thus must avoid // any potentially unsafe operations like malloc or libc functions. @@ -908,9 +952,8 @@ static int ExecDeathTestChildMain(void* child_arg) { UnitTest::GetInstance()->original_working_dir(); // We can safely call chdir() as it's a direct system call. if (chdir(original_dir) != 0) { - DeathTestAbort(String::Format("chdir(\"%s\") failed: %s", - original_dir, - GetLastErrnoDescription().c_str())); + DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + + GetLastErrnoDescription()); return EXIT_FAILURE; } @@ -920,12 +963,12 @@ static int ExecDeathTestChildMain(void* child_arg) { // invoke the test program via a valid path that contains at least // one path separator. execve(args->argv[0], args->argv, GetEnviron()); - DeathTestAbort(String::Format("execve(%s, ...) in %s failed: %s", - args->argv[0], - original_dir, - GetLastErrnoDescription().c_str())); + DeathTestAbort(std::string("execve(") + args->argv[0] + ", ...) in " + + original_dir + " failed: " + + GetLastErrnoDescription()); return EXIT_FAILURE; } +# endif // !GTEST_OS_QNX // Two utility routines that together determine the direction the stack // grows. @@ -936,25 +979,75 @@ static int ExecDeathTestChildMain(void* child_arg) { // GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining // StackLowerThanAddress into StackGrowsDown, which then doesn't give // correct answer. -bool StackLowerThanAddress(const void* ptr) GTEST_NO_INLINE_; -bool StackLowerThanAddress(const void* ptr) { +void StackLowerThanAddress(const void* ptr, bool* result) GTEST_NO_INLINE_; +void StackLowerThanAddress(const void* ptr, bool* result) { int dummy; - return &dummy < ptr; + *result = (&dummy < ptr); } bool StackGrowsDown() { int dummy; - return StackLowerThanAddress(&dummy); + bool result; + StackLowerThanAddress(&dummy, &result); + return result; } -// A threadsafe implementation of fork(2) for threadsafe-style death tests -// that uses clone(2). It dies with an error message if anything goes -// wrong. -static pid_t ExecDeathTestFork(char* const* argv, int close_fd) { +// Spawns a child process with the same executable as the current process in +// a thread-safe manner and instructs it to run the death test. The +// implementation uses fork(2) + exec. On systems where clone(2) is +// available, it is used instead, being slightly more thread-safe. On QNX, +// fork supports only single-threaded environments, so this function uses +// spawn(2) there instead. The function dies with an error message if +// anything goes wrong. +static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { ExecDeathTestArgs args = { argv, close_fd }; pid_t child_pid = -1; -# if GTEST_HAS_CLONE +# if GTEST_OS_QNX + // Obtains the current directory and sets it to be closed in the child + // process. + const int cwd_fd = open(".", O_RDONLY); + GTEST_DEATH_TEST_CHECK_(cwd_fd != -1); + GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(cwd_fd, F_SETFD, FD_CLOEXEC)); + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; + } + + int fd_flags; + // Set close_fd to be closed after spawn. + GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD)); + GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD, + fd_flags | FD_CLOEXEC)); + struct inheritance inherit = {0}; + // spawn is a system call. + child_pid = spawn(args.argv[0], 0, NULL, &inherit, args.argv, GetEnviron()); + // Restores the current working directory. + GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd)); + +# else // GTEST_OS_QNX +# if GTEST_OS_LINUX + // When a SIGPROF signal is received while fork() or clone() are executing, + // the process may hang. To avoid this, we ignore SIGPROF here and re-enable + // it after the call to fork()/clone() is complete. + struct sigaction saved_sigprof_action; + struct sigaction ignore_sigprof_action; + memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action)); + sigemptyset(&ignore_sigprof_action.sa_mask); + ignore_sigprof_action.sa_handler = SIG_IGN; + GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction( + SIGPROF, &ignore_sigprof_action, &saved_sigprof_action)); +# endif // GTEST_OS_LINUX + +# if GTEST_HAS_CLONE const bool use_fork = GTEST_FLAG(death_test_use_fork); if (!use_fork) { @@ -964,21 +1057,37 @@ static pid_t ExecDeathTestFork(char* const* argv, int close_fd) { void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE, MAP_ANON | MAP_PRIVATE, -1, 0); GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED); + + // Maximum stack alignment in bytes: For a downward-growing stack, this + // amount is subtracted from size of the stack space to get an address + // that is within the stack space and is aligned on all systems we care + // about. As far as I know there is no ABI with stack alignment greater + // than 64. We assume stack and stack_size already have alignment of + // kMaxStackAlignment. + const size_t kMaxStackAlignment = 64; void* const stack_top = - static_cast(stack) + (stack_grows_down ? stack_size : 0); + static_cast(stack) + + (stack_grows_down ? stack_size - kMaxStackAlignment : 0); + GTEST_DEATH_TEST_CHECK_(stack_size > kMaxStackAlignment && + reinterpret_cast(stack_top) % kMaxStackAlignment == 0); child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args); GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1); } -# else +# else const bool use_fork = true; -# endif // GTEST_HAS_CLONE +# endif // GTEST_HAS_CLONE if (use_fork && (child_pid = fork()) == 0) { ExecDeathTestChildMain(&args); _exit(0); } +# endif // GTEST_OS_QNX +# if GTEST_OS_LINUX + GTEST_DEATH_TEST_CHECK_SYSCALL_( + sigaction(SIGPROF, &saved_sigprof_action, NULL)); +# endif // GTEST_OS_LINUX GTEST_DEATH_TEST_CHECK_(child_pid != -1); return child_pid; @@ -1006,16 +1115,16 @@ DeathTest::TestRole ExecDeathTest::AssumeRole() { // it be closed when the child process does an exec: GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1); - const String filter_flag = - String::Format("--%s%s=%s.%s", - GTEST_FLAG_PREFIX_, kFilterFlag, - info->test_case_name(), info->name()); - const String internal_flag = - String::Format("--%s%s=%s|%d|%d|%d", - GTEST_FLAG_PREFIX_, kInternalRunDeathTestFlag, - file_, line_, death_test_index, pipe_fd[1]); + const std::string filter_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" + + info->test_case_name() + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "=" + + file_ + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index) + "|" + + StreamableToString(pipe_fd[1]); Arguments args; - args.AddArguments(GetArgvs()); + args.AddArguments(GetArgvsForDeathTestChildProcess()); args.AddArgument(filter_flag.c_str()); args.AddArgument(internal_flag.c_str()); @@ -1026,7 +1135,7 @@ DeathTest::TestRole ExecDeathTest::AssumeRole() { // is necessary. FlushInfoLog(); - const pid_t child_pid = ExecDeathTestFork(args.Argv(), pipe_fd[0]); + const pid_t child_pid = ExecDeathTestSpawnChild(args.Argv(), pipe_fd[0]); GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); set_child_pid(child_pid); set_read_fd(pipe_fd[0]); @@ -1052,9 +1161,10 @@ bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex, if (flag != NULL) { if (death_test_index > flag->index()) { - DeathTest::set_last_death_test_message(String::Format( - "Death test count (%d) somehow exceeded expected maximum (%d)", - death_test_index, flag->index())); + DeathTest::set_last_death_test_message( + "Death test count (" + StreamableToString(death_test_index) + + ") somehow exceeded expected maximum (" + + StreamableToString(flag->index()) + ")"); return false; } @@ -1083,9 +1193,9 @@ bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex, # endif // GTEST_OS_WINDOWS else { // NOLINT - this is more readable than unbalanced brackets inside #if. - DeathTest::set_last_death_test_message(String::Format( - "Unknown death test style \"%s\" encountered", - GTEST_FLAG(death_test_style).c_str())); + DeathTest::set_last_death_test_message( + "Unknown death test style \"" + GTEST_FLAG(death_test_style) + + "\" encountered"); return false; } @@ -1123,8 +1233,8 @@ int GetStatusFileDescriptor(unsigned int parent_process_id, FALSE, // Non-inheritable. parent_process_id)); if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) { - DeathTestAbort(String::Format("Unable to open parent process %u", - parent_process_id)); + DeathTestAbort("Unable to open parent process " + + StreamableToString(parent_process_id)); } // TODO(vladl@google.com): Replace the following check with a @@ -1144,9 +1254,10 @@ int GetStatusFileDescriptor(unsigned int parent_process_id, // DUPLICATE_SAME_ACCESS is used. FALSE, // Request non-inheritable handler. DUPLICATE_SAME_ACCESS)) { - DeathTestAbort(String::Format( - "Unable to duplicate the pipe handle %Iu from the parent process %u", - write_handle_as_size_t, parent_process_id)); + DeathTestAbort("Unable to duplicate the pipe handle " + + StreamableToString(write_handle_as_size_t) + + " from the parent process " + + StreamableToString(parent_process_id)); } const HANDLE event_handle = reinterpret_cast(event_handle_as_size_t); @@ -1157,17 +1268,18 @@ int GetStatusFileDescriptor(unsigned int parent_process_id, 0x0, FALSE, DUPLICATE_SAME_ACCESS)) { - DeathTestAbort(String::Format( - "Unable to duplicate the event handle %Iu from the parent process %u", - event_handle_as_size_t, parent_process_id)); + DeathTestAbort("Unable to duplicate the event handle " + + StreamableToString(event_handle_as_size_t) + + " from the parent process " + + StreamableToString(parent_process_id)); } const int write_fd = ::_open_osfhandle(reinterpret_cast(dup_write_handle), O_APPEND); if (write_fd == -1) { - DeathTestAbort(String::Format( - "Unable to convert pipe handle %Iu to a file descriptor", - write_handle_as_size_t)); + DeathTestAbort("Unable to convert pipe handle " + + StreamableToString(write_handle_as_size_t) + + " to a file descriptor"); } // Signals the parent that the write end of the pipe has been acquired @@ -1204,9 +1316,8 @@ InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { || !ParseNaturalNumber(fields[3], &parent_process_id) || !ParseNaturalNumber(fields[4], &write_handle_as_size_t) || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) { - DeathTestAbort(String::Format( - "Bad --gtest_internal_run_death_test flag: %s", - GTEST_FLAG(internal_run_death_test).c_str())); + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); } write_fd = GetStatusFileDescriptor(parent_process_id, write_handle_as_size_t, @@ -1217,9 +1328,8 @@ InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { || !ParseNaturalNumber(fields[1], &line) || !ParseNaturalNumber(fields[2], &index) || !ParseNaturalNumber(fields[3], &write_fd)) { - DeathTestAbort(String::Format( - "Bad --gtest_internal_run_death_test flag: %s", - GTEST_FLAG(internal_run_death_test).c_str())); + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); } # endif // GTEST_OS_WINDOWS diff --git a/tests/gtest/src/gtest-filepath.cc b/external/gtest/src/gtest-filepath.cc old mode 100644 new mode 100755 similarity index 94% rename from tests/gtest/src/gtest-filepath.cc rename to external/gtest/src/gtest-filepath.cc index 91b2571380..6be58b6fca --- a/tests/gtest/src/gtest-filepath.cc +++ b/external/gtest/src/gtest-filepath.cc @@ -29,6 +29,7 @@ // // Authors: keith.ray@gmail.com (Keith Ray) +#include "gtest/gtest-message.h" #include "gtest/internal/gtest-filepath.h" #include "gtest/internal/gtest-port.h" @@ -39,8 +40,8 @@ #elif GTEST_OS_WINDOWS # include # include -#elif GTEST_OS_SYMBIAN || GTEST_OS_NACL -// Symbian OpenC and NaCl have PATH_MAX in sys/syslimits.h +#elif GTEST_OS_SYMBIAN +// Symbian OpenC has PATH_MAX in sys/syslimits.h # include #else # include @@ -116,9 +117,10 @@ FilePath FilePath::GetCurrentDir() { // FilePath("dir/file"). If a case-insensitive extension is not // found, returns a copy of the original FilePath. FilePath FilePath::RemoveExtension(const char* extension) const { - String dot_extension(String::Format(".%s", extension)); - if (pathname_.EndsWithCaseInsensitive(dot_extension.c_str())) { - return FilePath(String(pathname_.c_str(), pathname_.length() - 4)); + const std::string dot_extension = std::string(".") + extension; + if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) { + return FilePath(pathname_.substr( + 0, pathname_.length() - dot_extension.length())); } return *this; } @@ -147,7 +149,7 @@ const char* FilePath::FindLastPathSeparator() const { // On Windows platform, '\' is the path separator, otherwise it is '/'. FilePath FilePath::RemoveDirectoryName() const { const char* const last_sep = FindLastPathSeparator(); - return last_sep ? FilePath(String(last_sep + 1)) : *this; + return last_sep ? FilePath(last_sep + 1) : *this; } // RemoveFileName returns the directory path with the filename removed. @@ -158,9 +160,9 @@ FilePath FilePath::RemoveDirectoryName() const { // On Windows platform, '\' is the path separator, otherwise it is '/'. FilePath FilePath::RemoveFileName() const { const char* const last_sep = FindLastPathSeparator(); - String dir; + std::string dir; if (last_sep) { - dir = String(c_str(), last_sep + 1 - c_str()); + dir = std::string(c_str(), last_sep + 1 - c_str()); } else { dir = kCurrentDirectoryString; } @@ -177,11 +179,12 @@ FilePath FilePath::MakeFileName(const FilePath& directory, const FilePath& base_name, int number, const char* extension) { - String file; + std::string file; if (number == 0) { - file = String::Format("%s.%s", base_name.c_str(), extension); + file = base_name.string() + "." + extension; } else { - file = String::Format("%s_%d.%s", base_name.c_str(), number, extension); + file = base_name.string() + "_" + StreamableToString(number) + + "." + extension; } return ConcatPaths(directory, FilePath(file)); } @@ -193,8 +196,7 @@ FilePath FilePath::ConcatPaths(const FilePath& directory, if (directory.IsEmpty()) return relative_path; const FilePath dir(directory.RemoveTrailingPathSeparator()); - return FilePath(String::Format("%s%c%s", dir.c_str(), kPathSeparator, - relative_path.c_str())); + return FilePath(dir.string() + kPathSeparator + relative_path.string()); } // Returns true if pathname describes something findable in the file-system, @@ -338,7 +340,7 @@ bool FilePath::CreateFolder() const { // On Windows platform, uses \ as the separator, other platforms use /. FilePath FilePath::RemoveTrailingPathSeparator() const { return IsDirectory() - ? FilePath(String(pathname_.c_str(), pathname_.length() - 1)) + ? FilePath(pathname_.substr(0, pathname_.length() - 1)) : *this; } diff --git a/tests/gtest/src/gtest-internal-inl.h b/external/gtest/src/gtest-internal-inl.h old mode 100644 new mode 100755 similarity index 83% rename from tests/gtest/src/gtest-internal-inl.h rename to external/gtest/src/gtest-internal-inl.h index 65a2101a4d..65561806b0 --- a/tests/gtest/src/gtest-internal-inl.h +++ b/external/gtest/src/gtest-internal-inl.h @@ -58,6 +58,11 @@ #include "gtest/internal/gtest-port.h" +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +#endif + #if GTEST_OS_WINDOWS # include // NOLINT #endif // GTEST_OS_WINDOWS @@ -112,6 +117,12 @@ GTEST_API_ bool ShouldUseColor(bool stdout_is_tty); // Formats the given time in milliseconds as seconds. GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms); +// Converts the given time in milliseconds to a date string in the ISO 8601 +// format, without the timezone information. N.B.: due to the use the +// non-reentrant localtime() function, this function is not thread safe. Do +// not use it in any code that can be called from multiple threads. +GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms); + // Parses a string for an Int32 flag, in the form of "--flag=value". // // On success, stores the value of the flag in *value, and returns @@ -190,37 +201,35 @@ class GTestFlagSaver { GTEST_FLAG(stream_result_to) = stream_result_to_; GTEST_FLAG(throw_on_failure) = throw_on_failure_; } + private: // Fields for saving the original values of flags. bool also_run_disabled_tests_; bool break_on_failure_; bool catch_exceptions_; - String color_; - String death_test_style_; + std::string color_; + std::string death_test_style_; bool death_test_use_fork_; - String filter_; - String internal_run_death_test_; + std::string filter_; + std::string internal_run_death_test_; bool list_tests_; - String output_; + std::string output_; bool print_time_; - bool pretty_; internal::Int32 random_seed_; internal::Int32 repeat_; bool shuffle_; internal::Int32 stack_trace_depth_; - String stream_result_to_; + std::string stream_result_to_; bool throw_on_failure_; } GTEST_ATTRIBUTE_UNUSED_; // Converts a Unicode code point to a narrow string in UTF-8 encoding. // code_point parameter is of type UInt32 because wchar_t may not be // wide enough to contain a code point. -// The output buffer str must containt at least 32 characters. -// The function returns the address of the output buffer. // If the code_point is not a valid Unicode code point -// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output -// as '(Invalid Unicode 0xXXXXXXXX)'. -GTEST_API_ char* CodePointToUtf8(UInt32 code_point, char* str); +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted +// to "(Invalid Unicode 0xXXXXXXXX)". +GTEST_API_ std::string CodePointToUtf8(UInt32 code_point); // Converts a wide string to a narrow string in UTF-8 encoding. // The wide string is assumed to have the following encoding: @@ -235,7 +244,7 @@ GTEST_API_ char* CodePointToUtf8(UInt32 code_point, char* str); // as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding // and contains invalid UTF-16 surrogate pairs, values in those pairs // will be encoded as individual Unicode characters from Basic Normal Plane. -GTEST_API_ String WideStringToUtf8(const wchar_t* str, int num_chars); +GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars); // Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file // if the variable is present. If a file already exists at this location, this @@ -339,16 +348,15 @@ class TestPropertyKeyIs { // Constructor. // // TestPropertyKeyIs has NO default constructor. - explicit TestPropertyKeyIs(const char* key) - : key_(key) {} + explicit TestPropertyKeyIs(const std::string& key) : key_(key) {} // Returns true iff the test name of test property matches on key_. bool operator()(const TestProperty& test_property) const { - return String(test_property.key()).Compare(key_) == 0; + return test_property.key() == key_; } private: - String key_; + std::string key_; }; // Class UnitTestOptions. @@ -366,12 +374,12 @@ class GTEST_API_ UnitTestOptions { // Functions for processing the gtest_output flag. // Returns the output format, or "" for normal printed output. - static String GetOutputFormat(); + static std::string GetOutputFormat(); // Returns the absolute path of the requested output file, or the // default (test_detail.xml in the original working directory) if // none was explicitly specified. - static String GetAbsolutePathToOutputFile(); + static std::string GetAbsolutePathToOutputFile(); // Functions for processing the gtest_filter flag. @@ -384,8 +392,8 @@ class GTEST_API_ UnitTestOptions { // Returns true iff the user-specified filter matches the test case // name and the test name. - static bool FilterMatchesTest(const String &test_case_name, - const String &test_name); + static bool FilterMatchesTest(const std::string &test_case_name, + const std::string &test_name); #if GTEST_OS_WINDOWS // Function for supporting the gtest_catch_exception flag. @@ -398,7 +406,7 @@ class GTEST_API_ UnitTestOptions { // Returns true if "name" matches the ':' separated list of glob-style // filters in "filter". - static bool MatchesFilter(const String& name, const char* filter); + static bool MatchesFilter(const std::string& name, const char* filter); }; // Returns the current application's name, removing directory path if that @@ -411,13 +419,13 @@ class OsStackTraceGetterInterface { OsStackTraceGetterInterface() {} virtual ~OsStackTraceGetterInterface() {} - // Returns the current OS stack trace as a String. Parameters: + // Returns the current OS stack trace as an std::string. Parameters: // // max_depth - the maximum number of stack frames to be included // in the trace. // skip_count - the number of top frames to be skipped; doesn't count // against max_depth. - virtual String CurrentStackTrace(int max_depth, int skip_count) = 0; + virtual string CurrentStackTrace(int max_depth, int skip_count) = 0; // UponLeavingGTest() should be called immediately before Google Test calls // user code. It saves some information about the current stack that @@ -432,8 +440,11 @@ class OsStackTraceGetterInterface { class OsStackTraceGetter : public OsStackTraceGetterInterface { public: OsStackTraceGetter() : caller_frame_(NULL) {} - virtual String CurrentStackTrace(int max_depth, int skip_count); - virtual void UponLeavingGTest(); + + virtual string CurrentStackTrace(int max_depth, int skip_count) override + GTEST_LOCK_EXCLUDED_(mutex_); + + virtual void UponLeavingGTest() override GTEST_LOCK_EXCLUDED_(mutex_); // This string is inserted in place of stack frames that are part of // Google Test's implementation. @@ -455,7 +466,7 @@ class OsStackTraceGetter : public OsStackTraceGetterInterface { struct TraceInfo { const char* file; int line; - String message; + std::string message; }; // This is the default global test part result reporter used in UnitTestImpl. @@ -466,7 +477,7 @@ class DefaultGlobalTestPartResultReporter explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test); // Implements the TestPartResultReporterInterface. Reports the test part // result in the current test. - virtual void ReportTestPartResult(const TestPartResult& result); + virtual void ReportTestPartResult(const TestPartResult& result) override; private: UnitTestImpl* const unit_test_; @@ -482,7 +493,7 @@ class DefaultPerThreadTestPartResultReporter explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test); // Implements the TestPartResultReporterInterface. The implementation just // delegates to the current global test part result reporter of *unit_test_. - virtual void ReportTestPartResult(const TestPartResult& result); + virtual void ReportTestPartResult(const TestPartResult& result) override; private: UnitTestImpl* const unit_test_; @@ -539,15 +550,25 @@ class GTEST_API_ UnitTestImpl { // Gets the number of failed tests. int failed_test_count() const; + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + // Gets the number of disabled tests. int disabled_test_count() const; + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + // Gets the number of all tests. int total_test_count() const; // Gets the number of tests that should run. int test_to_run_count() const; + // Gets the time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const { return start_timestamp_; } + // Gets the elapsed time, in milliseconds. TimeInMillis elapsed_time() const { return elapsed_time_; } @@ -596,7 +617,7 @@ class GTEST_API_ UnitTestImpl { // getter, and returns it. OsStackTraceGetterInterface* os_stack_trace_getter(); - // Returns the current OS stack trace as a String. + // Returns the current OS stack trace as an std::string. // // The maximum number of stack frames to be included is specified by // the gtest_stack_trace_depth flag. The skip_count parameter @@ -606,7 +627,7 @@ class GTEST_API_ UnitTestImpl { // For example, if Foo() calls Bar(), which in turn calls // CurrentOsStackTraceExceptTop(1), Foo() will be included in the // trace but Bar() and CurrentOsStackTraceExceptTop() won't. - String CurrentOsStackTraceExceptTop(int skip_count); + std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_; // Finds and returns a TestCase with the given name. If one doesn't // exist, creates one and returns it. @@ -696,6 +717,12 @@ class GTEST_API_ UnitTestImpl { ad_hoc_test_result_.Clear(); } + // Adds a TestProperty to the current TestResult object when invoked in a + // context of a test or a test case, or to the global property set. If the + // result already contains a property with the same key, the value will be + // updated. + void RecordProperty(const TestProperty& test_property); + enum ReactionToSharding { HONOR_SHARDING_PROTOCOL, IGNORE_SHARDING_PROTOCOL @@ -880,6 +907,10 @@ class GTEST_API_ UnitTestImpl { // Our random number generator. internal::Random random_; + // The time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp_; + // How long the test took to run, in milliseconds. TimeInMillis elapsed_time_; @@ -935,7 +966,7 @@ GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv); // Returns the message describing the last system error, regardless of the // platform. -GTEST_API_ String GetLastErrnoDescription(); +GTEST_API_ std::string GetLastErrnoDescription(); # if GTEST_OS_WINDOWS // Provides leak-safe Windows kernel handle ownership. @@ -1018,8 +1049,9 @@ bool ParseNaturalNumber(const ::std::string& str, Integer* number) { class TestResultAccessor { public: static void RecordProperty(TestResult* test_result, + const std::string& xml_element, const TestProperty& property) { - test_result->RecordProperty(property); + test_result->RecordProperty(xml_element, property); } static void ClearTestPartResults(TestResult* test_result) { @@ -1032,6 +1064,154 @@ class TestResultAccessor { } }; +#if GTEST_CAN_STREAM_RESULTS_ + +// Streams test results to the given port on the given host machine. +class StreamingListener : public EmptyTestEventListener { + public: + // Abstract base class for writing strings to a socket. + class AbstractSocketWriter { + public: + virtual ~AbstractSocketWriter() {} + + // Sends a string to the socket. + virtual void Send(const string& message) = 0; + + // Closes the socket. + virtual void CloseConnection() {} + + // Sends a string and a newline to the socket. + void SendLn(const string& message) { + Send(message + "\n"); + } + }; + + // Concrete class for actually writing strings to a socket. + class SocketWriter : public AbstractSocketWriter { + public: + SocketWriter(const string& host, const string& port) + : sockfd_(-1), host_name_(host), port_num_(port) { + MakeConnection(); + } + + virtual ~SocketWriter() { + if (sockfd_ != -1) + CloseConnection(); + } + + // Sends a string to the socket. + virtual void Send(const string& message) override { + GTEST_CHECK_(sockfd_ != -1) + << "Send() can be called only when there is a connection."; + + const int len = static_cast(message.length()); + if (write(sockfd_, message.c_str(), len) != len) { + GTEST_LOG_(WARNING) + << "stream_result_to: failed to stream to " + << host_name_ << ":" << port_num_; + } + } + + private: + // Creates a client socket and connects to the server. + void MakeConnection(); + + // Closes the socket. + void CloseConnection() override { + GTEST_CHECK_(sockfd_ != -1) + << "CloseConnection() can be called only when there is a connection."; + + close(sockfd_); + sockfd_ = -1; + } + + int sockfd_; // socket file descriptor + const string host_name_; + const string port_num_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter); + }; // class SocketWriter + + // Escapes '=', '&', '%', and '\n' characters in str as "%xx". + static string UrlEncode(const char* str); + + StreamingListener(const string& host, const string& port) + : socket_writer_(new SocketWriter(host, port)) { Start(); } + + explicit StreamingListener(AbstractSocketWriter* socket_writer) + : socket_writer_(socket_writer) { Start(); } + + void OnTestProgramStart(const UnitTest& /* unit_test */) override { + SendLn("event=TestProgramStart"); + } + + void OnTestProgramEnd(const UnitTest& unit_test) override { + // Note that Google Test current only report elapsed time for each + // test iteration, not for the entire test program. + SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed())); + + // Notify the streaming server to stop. + socket_writer_->CloseConnection(); + } + + void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) override { + SendLn("event=TestIterationStart&iteration=" + + StreamableToString(iteration)); + } + + void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) override { + SendLn("event=TestIterationEnd&passed=" + + FormatBool(unit_test.Passed()) + "&elapsed_time=" + + StreamableToString(unit_test.elapsed_time()) + "ms"); + } + + void OnTestCaseStart(const TestCase& test_case) override { + SendLn(std::string("event=TestCaseStart&name=") + test_case.name()); + } + + void OnTestCaseEnd(const TestCase& test_case) override { + SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed()) + + "&elapsed_time=" + StreamableToString(test_case.elapsed_time()) + + "ms"); + } + + void OnTestStart(const TestInfo& test_info) override { + SendLn(std::string("event=TestStart&name=") + test_info.name()); + } + + void OnTestEnd(const TestInfo& test_info) override { + SendLn("event=TestEnd&passed=" + + FormatBool((test_info.result())->Passed()) + + "&elapsed_time=" + + StreamableToString((test_info.result())->elapsed_time()) + "ms"); + } + + void OnTestPartResult(const TestPartResult& test_part_result) override { + const char* file_name = test_part_result.file_name(); + if (file_name == NULL) + file_name = ""; + SendLn("event=TestPartResult&file=" + UrlEncode(file_name) + + "&line=" + StreamableToString(test_part_result.line_number()) + + "&message=" + UrlEncode(test_part_result.message())); + } + + private: + // Sends the given message and a newline to the socket. + void SendLn(const string& message) { socket_writer_->SendLn(message); } + + // Called at the start of streaming to notify the receiver what + // protocol we are using. + void Start() { SendLn("gtest_streaming_protocol_version=1.0"); } + + string FormatBool(bool value) { return value ? "1" : "0"; } + + const scoped_ptr socket_writer_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); +}; // class StreamingListener + +#endif // GTEST_CAN_STREAM_RESULTS_ + } // namespace internal } // namespace testing diff --git a/tests/gtest/src/gtest-port.cc b/external/gtest/src/gtest-port.cc old mode 100644 new mode 100755 similarity index 86% rename from tests/gtest/src/gtest-port.cc rename to external/gtest/src/gtest-port.cc index b860d4812e..0c4df5f29a --- a/tests/gtest/src/gtest-port.cc +++ b/external/gtest/src/gtest-port.cc @@ -51,6 +51,11 @@ # include #endif // GTEST_OS_MAC +#if GTEST_OS_QNX +# include +# include +#endif // GTEST_OS_QNX + #include "gtest/gtest-spi.h" #include "gtest/gtest-message.h" #include "gtest/internal/gtest-internal.h" @@ -98,6 +103,26 @@ size_t GetThreadCount() { } } +#elif GTEST_OS_QNX + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + const int fd = open("/proc/self/as", O_RDONLY); + if (fd < 0) { + return 0; + } + procfs_info process_info; + const int status = + devctl(fd, DCMD_PROC_INFO, &process_info, sizeof(process_info), NULL); + close(fd); + if (status == EOK) { + return static_cast(process_info.num_threads); + } else { + return 0; + } +} + #else size_t GetThreadCount() { @@ -222,7 +247,7 @@ bool AtomMatchesChar(bool escaped, char pattern_char, char ch) { } // Helper function used by ValidateRegex() to format error messages. -String FormatRegexSyntaxError(const char* regex, int index) { +std::string FormatRegexSyntaxError(const char* regex, int index) { return (Message() << "Syntax error at index " << index << " in simple regular expression \"" << regex << "\": ").GetString(); } @@ -429,15 +454,15 @@ const char kUnknownFile[] = "unknown file"; // Formats a source file path and a line number as they would appear // in an error message from the compiler used to compile this code. GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) { - const char* const file_name = file == NULL ? kUnknownFile : file; + const std::string file_name(file == NULL ? kUnknownFile : file); if (line < 0) { - return String::Format("%s:", file_name).c_str(); + return file_name + ":"; } #ifdef _MSC_VER - return String::Format("%s(%d):", file_name, line).c_str(); + return file_name + "(" + StreamableToString(line) + "):"; #else - return String::Format("%s:%d:", file_name, line).c_str(); + return file_name + ":" + StreamableToString(line) + ":"; #endif // _MSC_VER } @@ -448,12 +473,12 @@ GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) { // to the file location it produces, unlike FormatFileLocation(). GTEST_API_ ::std::string FormatCompilerIndependentFileLocation( const char* file, int line) { - const char* const file_name = file == NULL ? kUnknownFile : file; + const std::string file_name(file == NULL ? kUnknownFile : file); if (line < 0) return file_name; else - return String::Format("%s:%d", file_name, line).c_str(); + return file_name + ":" + StreamableToString(line); } @@ -488,8 +513,7 @@ GTestLog::~GTestLog() { class CapturedStream { public: // The ctor redirects the stream to a temporary file. - CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) { - + explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) { # if GTEST_OS_WINDOWS char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT @@ -506,10 +530,29 @@ class CapturedStream { << temp_file_path; filename_ = temp_file_path; # else - // There's no guarantee that a test has write access to the - // current directory, so we create the temporary file in the /tmp - // directory instead. + // There's no guarantee that a test has write access to the current + // directory, so we create the temporary file in the /tmp directory + // instead. We use /tmp on most systems, and /sdcard on Android. + // That's because Android doesn't have /tmp. +# if GTEST_OS_LINUX_ANDROID + // Note: Android applications are expected to call the framework's + // Context.getExternalStorageDirectory() method through JNI to get + // the location of the world-writable SD Card directory. However, + // this requires a Context handle, which cannot be retrieved + // globally from native code. Doing so also precludes running the + // code as part of a regular standalone executable, which doesn't + // run in a Dalvik process (e.g. when running it through 'adb shell'). + // + // The location /sdcard is directly accessible from native code + // and is the only location (unofficially) supported by the Android + // team. It's generally a symlink to the real SD Card mount point + // which can be /mnt/sdcard, /mnt/sdcard0, /system/media/sdcard, or + // other OEM-customized locations. Never rely on these, and always + // use /sdcard. + char name_template[] = "/sdcard/gtest_captured_stream.XXXXXX"; +# else char name_template[] = "/tmp/captured_stream.XXXXXX"; +# endif // GTEST_OS_LINUX_ANDROID const int captured_fd = mkstemp(name_template); filename_ = name_template; # endif // GTEST_OS_WINDOWS @@ -522,7 +565,7 @@ class CapturedStream { remove(filename_.c_str()); } - String GetCapturedString() { + std::string GetCapturedString() { if (uncaptured_fd_ != -1) { // Restores the original stream. fflush(NULL); @@ -532,14 +575,14 @@ class CapturedStream { } FILE* const file = posix::FOpen(filename_.c_str(), "r"); - const String content = ReadEntireFile(file); + const std::string content = ReadEntireFile(file); posix::FClose(file); return content; } private: - // Reads the entire content of a file as a String. - static String ReadEntireFile(FILE* file); + // Reads the entire content of a file as an std::string. + static std::string ReadEntireFile(FILE* file); // Returns the size (in bytes) of a file. static size_t GetFileSize(FILE* file); @@ -559,7 +602,7 @@ size_t CapturedStream::GetFileSize(FILE* file) { } // Reads the entire content of a file as a string. -String CapturedStream::ReadEntireFile(FILE* file) { +std::string CapturedStream::ReadEntireFile(FILE* file) { const size_t file_size = GetFileSize(file); char* const buffer = new char[file_size]; @@ -575,7 +618,7 @@ String CapturedStream::ReadEntireFile(FILE* file) { bytes_read += bytes_last_read; } while (bytes_last_read > 0 && bytes_read < file_size); - const String content(buffer, bytes_read); + const std::string content(buffer, bytes_read); delete[] buffer; return content; @@ -598,8 +641,8 @@ void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) { } // Stops capturing the output stream and returns the captured string. -String GetCapturedStream(CapturedStream** captured_stream) { - const String content = (*captured_stream)->GetCapturedString(); +std::string GetCapturedStream(CapturedStream** captured_stream) { + const std::string content = (*captured_stream)->GetCapturedString(); delete *captured_stream; *captured_stream = NULL; @@ -618,21 +661,37 @@ void CaptureStderr() { } // Stops capturing stdout and returns the captured string. -String GetCapturedStdout() { return GetCapturedStream(&g_captured_stdout); } +std::string GetCapturedStdout() { + return GetCapturedStream(&g_captured_stdout); +} // Stops capturing stderr and returns the captured string. -String GetCapturedStderr() { return GetCapturedStream(&g_captured_stderr); } +std::string GetCapturedStderr() { + return GetCapturedStream(&g_captured_stderr); +} #endif // GTEST_HAS_STREAM_REDIRECTION #if GTEST_HAS_DEATH_TEST // A copy of all command line arguments. Set by InitGoogleTest(). -::std::vector g_argvs; +::std::vector g_argvs; -// Returns the command line as a vector of strings. -const ::std::vector& GetArgvs() { return g_argvs; } +static const ::std::vector* g_injected_test_argvs = + NULL; // Owned. +void SetInjectableArgvs(const ::std::vector* argvs) { + if (g_injected_test_argvs != argvs) + delete g_injected_test_argvs; + g_injected_test_argvs = argvs; +} + +const ::std::vector& GetInjectableArgvs() { + if (g_injected_test_argvs != NULL) { + return *g_injected_test_argvs; + } + return g_argvs; +} #endif // GTEST_HAS_DEATH_TEST #if GTEST_OS_WINDOWS_MOBILE @@ -647,8 +706,8 @@ void Abort() { // Returns the name of the environment variable corresponding to the // given flag. For example, FlagToEnvVar("foo") will return // "GTEST_FOO" in the open-source version. -static String FlagToEnvVar(const char* flag) { - const String full_flag = +static std::string FlagToEnvVar(const char* flag) { + const std::string full_flag = (Message() << GTEST_FLAG_PREFIX_ << flag).GetString(); Message env_var; @@ -705,7 +764,7 @@ bool ParseInt32(const Message& src_text, const char* str, Int32* value) { // // The value is considered true iff it's not "0". bool BoolFromGTestEnv(const char* flag, bool default_value) { - const String env_var = FlagToEnvVar(flag); + const std::string env_var = FlagToEnvVar(flag); const char* const string_value = posix::GetEnv(env_var.c_str()); return string_value == NULL ? default_value : strcmp(string_value, "0") != 0; @@ -715,7 +774,7 @@ bool BoolFromGTestEnv(const char* flag, bool default_value) { // variable corresponding to the given flag; if it isn't set or // doesn't represent a valid 32-bit integer, returns default_value. Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) { - const String env_var = FlagToEnvVar(flag); + const std::string env_var = FlagToEnvVar(flag); const char* const string_value = posix::GetEnv(env_var.c_str()); if (string_value == NULL) { // The environment variable is not set. @@ -737,7 +796,7 @@ Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) { // Reads and returns the string environment variable corresponding to // the given flag; if it's not set, returns default_value. const char* StringFromGTestEnv(const char* flag, const char* default_value) { - const String env_var = FlagToEnvVar(flag); + const std::string env_var = FlagToEnvVar(flag); const char* const value = posix::GetEnv(env_var.c_str()); return value == NULL ? default_value : value; } diff --git a/tests/gtest/src/gtest-printers.cc b/external/gtest/src/gtest-printers.cc old mode 100644 new mode 100755 similarity index 80% rename from tests/gtest/src/gtest-printers.cc rename to external/gtest/src/gtest-printers.cc index ed63c7b3b9..75fa408100 --- a/tests/gtest/src/gtest-printers.cc +++ b/external/gtest/src/gtest-printers.cc @@ -55,14 +55,6 @@ namespace { using ::std::ostream; -#if GTEST_OS_WINDOWS_MOBILE // Windows CE does not define _snprintf_s. -# define snprintf _snprintf -#elif _MSC_VER >= 1400 // VC 8.0 and later deprecate snprintf and _snprintf. -# define snprintf _snprintf_s -#elif _MSC_VER -# define snprintf _snprintf -#endif // GTEST_OS_WINDOWS_MOBILE - // Prints a segment of bytes in the given object. void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start, size_t count, ostream* os) { @@ -77,7 +69,7 @@ void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start, else *os << '-'; } - snprintf(text, sizeof(text), "%02X", obj_bytes[j]); + GTEST_SNPRINTF_(text, sizeof(text), "%02X", obj_bytes[j]); *os << text; } } @@ -184,16 +176,16 @@ static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) { *os << static_cast(c); return kAsIs; } else { - *os << String::Format("\\x%X", static_cast(c)); + *os << "\\x" + String::FormatHexInt(static_cast(c)); return kHexEscape; } } return kSpecialEscape; } -// Prints a char c as if it's part of a string literal, escaping it when +// Prints a wchar_t c as if it's part of a string literal, escaping it when // necessary; returns how c was formatted. -static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) { +static CharFormat PrintAsStringLiteralTo(wchar_t c, ostream* os) { switch (c) { case L'\'': *os << "'"; @@ -208,8 +200,9 @@ static CharFormat PrintAsWideStringLiteralTo(wchar_t c, ostream* os) { // Prints a char c as if it's part of a string literal, escaping it when // necessary; returns how c was formatted. -static CharFormat PrintAsNarrowStringLiteralTo(char c, ostream* os) { - return PrintAsWideStringLiteralTo(static_cast(c), os); +static CharFormat PrintAsStringLiteralTo(char c, ostream* os) { + return PrintAsStringLiteralTo( + static_cast(static_cast(c)), os); } // Prints a wide or narrow character c and its code. '\0' is printed @@ -228,7 +221,7 @@ void PrintCharAndCodeTo(Char c, ostream* os) { // obvious). if (c == 0) return; - *os << " (" << String::Format("%d", c).c_str(); + *os << " (" << static_cast(c); // For more convenience, we print c's code again in hexidecimal, // unless c was already printed in the form '\x##' or the code is in @@ -236,8 +229,7 @@ void PrintCharAndCodeTo(Char c, ostream* os) { if (format == kHexEscape || (1 <= c && c <= 9)) { // Do nothing. } else { - *os << String::Format(", 0x%X", - static_cast(c)).c_str(); + *os << ", 0x" << String::FormatHexInt(static_cast(c)); } *os << ")"; } @@ -255,48 +247,63 @@ void PrintTo(wchar_t wc, ostream* os) { PrintCharAndCodeTo(wc, os); } -// Prints the given array of characters to the ostream. -// The array starts at *begin, the length is len, it may include '\0' characters -// and may not be null-terminated. -static void PrintCharsAsStringTo(const char* begin, size_t len, ostream* os) { - *os << "\""; +// Prints the given array of characters to the ostream. CharType must be either +// char or wchar_t. +// The array starts at begin, the length is len, it may include '\0' characters +// and may not be NUL-terminated. +template +static void PrintCharsAsStringTo( + const CharType* begin, size_t len, ostream* os) { + const char* const kQuoteBegin = sizeof(CharType) == 1 ? "\"" : "L\""; + *os << kQuoteBegin; bool is_previous_hex = false; for (size_t index = 0; index < len; ++index) { - const char cur = begin[index]; + const CharType cur = begin[index]; if (is_previous_hex && IsXDigit(cur)) { // Previous character is of '\x..' form and this character can be // interpreted as another hexadecimal digit in its number. Break string to // disambiguate. - *os << "\" \""; + *os << "\" " << kQuoteBegin; } - is_previous_hex = PrintAsNarrowStringLiteralTo(cur, os) == kHexEscape; + is_previous_hex = PrintAsStringLiteralTo(cur, os) == kHexEscape; } *os << "\""; } +// Prints a (const) char/wchar_t array of 'len' elements, starting at address +// 'begin'. CharType must be either char or wchar_t. +template +static void UniversalPrintCharArray( + const CharType* begin, size_t len, ostream* os) { + // The code + // const char kFoo[] = "foo"; + // generates an array of 4, not 3, elements, with the last one being '\0'. + // + // Therefore when printing a char array, we don't print the last element if + // it's '\0', such that the output matches the string literal as it's + // written in the source code. + if (len > 0 && begin[len - 1] == '\0') { + PrintCharsAsStringTo(begin, len - 1, os); + return; + } + + // If, however, the last element in the array is not '\0', e.g. + // const char kFoo[] = { 'f', 'o', 'o' }; + // we must print the entire array. We also print a message to indicate + // that the array is not NUL-terminated. + PrintCharsAsStringTo(begin, len, os); + *os << " (no terminating NUL)"; +} + // Prints a (const) char array of 'len' elements, starting at address 'begin'. void UniversalPrintArray(const char* begin, size_t len, ostream* os) { - PrintCharsAsStringTo(begin, len, os); + UniversalPrintCharArray(begin, len, os); } -// Prints the given array of wide characters to the ostream. -// The array starts at *begin, the length is len, it may include L'\0' -// characters and may not be null-terminated. -static void PrintWideCharsAsStringTo(const wchar_t* begin, size_t len, - ostream* os) { - *os << "L\""; - bool is_previous_hex = false; - for (size_t index = 0; index < len; ++index) { - const wchar_t cur = begin[index]; - if (is_previous_hex && isascii(cur) && IsXDigit(static_cast(cur))) { - // Previous character is of '\x..' form and this character can be - // interpreted as another hexadecimal digit in its number. Break string to - // disambiguate. - *os << "\" L\""; - } - is_previous_hex = PrintAsWideStringLiteralTo(cur, os) == kHexEscape; - } - *os << "\""; +// Prints a (const) wchar_t array of 'len' elements, starting at address +// 'begin'. +void UniversalPrintArray(const wchar_t* begin, size_t len, ostream* os) { + UniversalPrintCharArray(begin, len, os); } // Prints the given C string to the ostream. @@ -322,7 +329,7 @@ void PrintTo(const wchar_t* s, ostream* os) { *os << "NULL"; } else { *os << ImplicitCast_(s) << " pointing to "; - PrintWideCharsAsStringTo(s, wcslen(s), os); + PrintCharsAsStringTo(s, wcslen(s), os); } } #endif // wchar_t is native @@ -341,13 +348,13 @@ void PrintStringTo(const ::std::string& s, ostream* os) { // Prints a ::wstring object. #if GTEST_HAS_GLOBAL_WSTRING void PrintWideStringTo(const ::wstring& s, ostream* os) { - PrintWideCharsAsStringTo(s.data(), s.size(), os); + PrintCharsAsStringTo(s.data(), s.size(), os); } #endif // GTEST_HAS_GLOBAL_WSTRING #if GTEST_HAS_STD_WSTRING void PrintWideStringTo(const ::std::wstring& s, ostream* os) { - PrintWideCharsAsStringTo(s.data(), s.size(), os); + PrintCharsAsStringTo(s.data(), s.size(), os); } #endif // GTEST_HAS_STD_WSTRING diff --git a/tests/gtest/src/gtest-test-part.cc b/external/gtest/src/gtest-test-part.cc old mode 100644 new mode 100755 similarity index 95% rename from tests/gtest/src/gtest-test-part.cc rename to external/gtest/src/gtest-test-part.cc index 5ddc67c1c9..c60eef3ab3 --- a/tests/gtest/src/gtest-test-part.cc +++ b/external/gtest/src/gtest-test-part.cc @@ -48,10 +48,10 @@ using internal::GetUnitTestImpl; // Gets the summary of the failure message by omitting the stack trace // in it. -internal::String TestPartResult::ExtractSummary(const char* message) { +std::string TestPartResult::ExtractSummary(const char* message) { const char* const stack_trace = strstr(message, internal::kStackTraceMarker); - return stack_trace == NULL ? internal::String(message) : - internal::String(message, stack_trace - message); + return stack_trace == NULL ? message : + std::string(message, stack_trace); } // Prints a TestPartResult object. diff --git a/tests/gtest/src/gtest-typed-test.cc b/external/gtest/src/gtest-typed-test.cc old mode 100644 new mode 100755 similarity index 96% rename from tests/gtest/src/gtest-typed-test.cc rename to external/gtest/src/gtest-typed-test.cc index a5cc88f920..f0079f407c --- a/tests/gtest/src/gtest-typed-test.cc +++ b/external/gtest/src/gtest-typed-test.cc @@ -58,10 +58,10 @@ const char* TypedTestCasePState::VerifyRegisteredTestNames( registered_tests = SkipSpaces(registered_tests); Message errors; - ::std::set tests; + ::std::set tests; for (const char* names = registered_tests; names != NULL; names = SkipComma(names)) { - const String name = GetPrefixUntilComma(names); + const std::string name = GetPrefixUntilComma(names); if (tests.count(name) != 0) { errors << "Test " << name << " is listed more than once.\n"; continue; @@ -93,7 +93,7 @@ const char* TypedTestCasePState::VerifyRegisteredTestNames( } } - const String& errors_str = errors.GetString(); + const std::string& errors_str = errors.GetString(); if (errors_str != "") { fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), errors_str.c_str()); diff --git a/tests/gtest/src/gtest.cc b/external/gtest/src/gtest.cc old mode 100644 new mode 100755 similarity index 84% rename from tests/gtest/src/gtest.cc rename to external/gtest/src/gtest.cc index ee5eb1c495..e3ad4f35e4 --- a/tests/gtest/src/gtest.cc +++ b/external/gtest/src/gtest.cc @@ -39,10 +39,13 @@ #include #include #include +#include #include #include #include +#include +#include #include // NOLINT #include #include @@ -179,6 +182,10 @@ bool g_help_flag = false; } // namespace internal +static const char* GetDefaultFilter() { + return kUniversalFilter; +} + GTEST_DEFINE_bool_( also_run_disabled_tests, internal::BoolFromGTestEnv("also_run_disabled_tests", false), @@ -201,11 +208,11 @@ GTEST_DEFINE_string_( "Whether to use colors in the output. Valid values: yes, no, " "and auto. 'auto' means to use colors if the output is " "being sent to a terminal and the TERM environment variable " - "is set to xterm, xterm-color, xterm-256color, linux or cygwin."); + "is set to a terminal type that supports colors."); GTEST_DEFINE_string_( filter, - internal::StringFromGTestEnv("filter", kUniversalFilter), + internal::StringFromGTestEnv("filter", GetDefaultFilter()), "A colon-separated list of glob (not regex) patterns " "for filtering the tests to run, optionally followed by a " "'-' and a : separated list of negative patterns (tests to " @@ -305,7 +312,7 @@ UInt32 Random::Generate(UInt32 range) { // Test. g_init_gtest_count is set to the number of times // InitGoogleTest() has been called. We don't protect this variable // under a mutex as it is only accessed in the main thread. -int g_init_gtest_count = 0; +GTEST_API_ int g_init_gtest_count = 0; static bool GTestIsInitialized() { return g_init_gtest_count != 0; } // Iterates over a vector of TestCases, keeping a running sum of the @@ -360,10 +367,10 @@ void AssertHelper::operator=(const Message& message) const { } // Mutex for linked pointers. -GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex); +GTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex); // Application pathname gotten in InitGoogleTest. -String g_executable_path; +std::string g_executable_path; // Returns the current application's name, removing directory path if that // is present. @@ -382,29 +389,29 @@ FilePath GetCurrentExecutableName() { // Functions for processing the gtest_output flag. // Returns the output format, or "" for normal printed output. -String UnitTestOptions::GetOutputFormat() { +std::string UnitTestOptions::GetOutputFormat() { const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); - if (gtest_output_flag == NULL) return String(""); + if (gtest_output_flag == NULL) return std::string(""); const char* const colon = strchr(gtest_output_flag, ':'); return (colon == NULL) ? - String(gtest_output_flag) : - String(gtest_output_flag, colon - gtest_output_flag); + std::string(gtest_output_flag) : + std::string(gtest_output_flag, colon - gtest_output_flag); } // Returns the name of the requested output file, or the default if none // was explicitly specified. -String UnitTestOptions::GetAbsolutePathToOutputFile() { +std::string UnitTestOptions::GetAbsolutePathToOutputFile() { const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); if (gtest_output_flag == NULL) - return String(""); + return ""; const char* const colon = strchr(gtest_output_flag, ':'); if (colon == NULL) - return String(internal::FilePath::ConcatPaths( - internal::FilePath( - UnitTest::GetInstance()->original_working_dir()), - internal::FilePath(kDefaultOutputFile)).ToString() ); + return internal::FilePath::ConcatPaths( + internal::FilePath( + UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(kDefaultOutputFile)).string(); internal::FilePath output_name(colon + 1); if (!output_name.IsAbsolutePath()) @@ -417,12 +424,12 @@ String UnitTestOptions::GetAbsolutePathToOutputFile() { internal::FilePath(colon + 1)); if (!output_name.IsDirectory()) - return output_name.ToString(); + return output_name.string(); internal::FilePath result(internal::FilePath::GenerateUniqueFileName( output_name, internal::GetCurrentExecutableName(), GetOutputFormat().c_str())); - return result.ToString(); + return result.string(); } // Returns true iff the wildcard pattern matches the string. The @@ -447,7 +454,8 @@ bool UnitTestOptions::PatternMatchesString(const char *pattern, } } -bool UnitTestOptions::MatchesFilter(const String& name, const char* filter) { +bool UnitTestOptions::MatchesFilter( + const std::string& name, const char* filter) { const char *cur_pattern = filter; for (;;) { if (PatternMatchesString(cur_pattern, name.c_str())) { @@ -467,28 +475,24 @@ bool UnitTestOptions::MatchesFilter(const String& name, const char* filter) { } } -// TODO(keithray): move String function implementations to gtest-string.cc. - // Returns true iff the user-specified filter matches the test case // name and the test name. -bool UnitTestOptions::FilterMatchesTest(const String &test_case_name, - const String &test_name) { - const String& full_name = String::Format("%s.%s", - test_case_name.c_str(), - test_name.c_str()); +bool UnitTestOptions::FilterMatchesTest(const std::string &test_case_name, + const std::string &test_name) { + const std::string& full_name = test_case_name + "." + test_name.c_str(); // Split --gtest_filter at '-', if there is one, to separate into // positive filter and negative filter portions const char* const p = GTEST_FLAG(filter).c_str(); const char* const dash = strchr(p, '-'); - String positive; - String negative; + std::string positive; + std::string negative; if (dash == NULL) { positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter - negative = String(""); + negative = ""; } else { - positive = String(p, dash - p); // Everything up to the dash - negative = String(dash+1); // Everything after the dash + positive = std::string(p, dash); // Everything up to the dash + negative = std::string(dash + 1); // Everything after the dash if (positive.empty()) { // Treat '-test1' as the same as '*-test1' positive = kUniversalFilter; @@ -608,7 +612,7 @@ AssertionResult HasOneFailure(const char* /* results_expr */, const TestPartResultArray& results, TestPartResult::Type type, const string& substr) { - const String expected(type == TestPartResult::kFatalFailure ? + const std::string expected(type == TestPartResult::kFatalFailure ? "1 fatal failure" : "1 non-fatal failure"); Message msg; @@ -731,11 +735,22 @@ int UnitTestImpl::failed_test_count() const { return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count); } +// Gets the number of disabled tests that will be reported in the XML report. +int UnitTestImpl::reportable_disabled_test_count() const { + return SumOverTestCaseList(test_cases_, + &TestCase::reportable_disabled_test_count); +} + // Gets the number of disabled tests. int UnitTestImpl::disabled_test_count() const { return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count); } +// Gets the number of tests to be printed in the XML report. +int UnitTestImpl::reportable_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::reportable_test_count); +} + // Gets the number of all tests. int UnitTestImpl::total_test_count() const { return SumOverTestCaseList(test_cases_, &TestCase::total_test_count); @@ -746,7 +761,7 @@ int UnitTestImpl::test_to_run_count() const { return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count); } -// Returns the current OS stack trace as a String. +// Returns the current OS stack trace as an std::string. // // The maximum number of stack frames to be included is specified by // the gtest_stack_trace_depth flag. The skip_count parameter @@ -756,9 +771,9 @@ int UnitTestImpl::test_to_run_count() const { // For example, if Foo() calls Bar(), which in turn calls // CurrentOsStackTraceExceptTop(1), Foo() will be included in the // trace but Bar() and CurrentOsStackTraceExceptTop() won't. -String UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) { +std::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) { (void)skip_count; - return String(""); + return ""; } // Returns the current time in milliseconds. @@ -815,41 +830,7 @@ TimeInMillis GetTimeInMillis() { // Utilities -// class String - -// Returns the input enclosed in double quotes if it's not NULL; -// otherwise returns "(null)". For example, "\"Hello\"" is returned -// for input "Hello". -// -// This is useful for printing a C string in the syntax of a literal. -// -// Known issue: escape sequences are not handled yet. -String String::ShowCStringQuoted(const char* c_str) { - return c_str ? String::Format("\"%s\"", c_str) : String("(null)"); -} - -// Copies at most length characters from str into a newly-allocated -// piece of memory of size length+1. The memory is allocated with new[]. -// A terminating null byte is written to the memory, and a pointer to it -// is returned. If str is NULL, NULL is returned. -static char* CloneString(const char* str, size_t length) { - if (str == NULL) { - return NULL; - } else { - char* const clone = new char[length + 1]; - posix::StrNCpy(clone, str, length); - clone[length] = '\0'; - return clone; - } -} - -// Clones a 0-terminated C string, allocating memory using new. The -// caller is responsible for deleting[] the return value. Returns the -// cloned string, or NULL if the input is NULL. -const char * String::CloneCString(const char* c_str) { - return (c_str == NULL) ? - NULL : CloneString(c_str, strlen(c_str)); -} +// class String. #if GTEST_OS_WINDOWS_MOBILE // Creates a UTF-16 wide string from the given ANSI string, allocating @@ -906,11 +887,6 @@ bool String::CStringEquals(const char * lhs, const char * rhs) { // encoding, and streams the result to the given Message object. static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length, Message* msg) { - // TODO(wan): consider allowing a testing::String object to - // contain '\0'. This will make it behave more like std::string, - // and will allow ToUtf8String() to return the correct encoding - // for '\0' s.t. we can get rid of the conditional here (and in - // several other places). for (size_t i = 0; i != length; ) { // NOLINT if (wstr[i] != L'\0') { *msg << WideStringToUtf8(wstr + i, static_cast(length - i)); @@ -927,6 +903,26 @@ static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length, } // namespace internal +// Constructs an empty Message. +// We allocate the stringstream separately because otherwise each use of +// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's +// stack frame leading to huge stack frames in some cases; gcc does not reuse +// the stack space. +Message::Message() : ss_(new ::std::stringstream) { + // By default, we want there to be enough precision when printing + // a double to a Message. + *ss_ << std::setprecision(std::numeric_limits::digits10 + 2); +} + +// These two overloads allow streaming a wide C string to a Message +// using the UTF-8 encoding. +Message& Message::operator <<(const wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); +} +Message& Message::operator <<(wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); +} + #if GTEST_HAS_STD_WSTRING // Converts the given wide string to a narrow string using the UTF-8 // encoding, and streams the result to this Message object. @@ -945,6 +941,12 @@ Message& Message::operator <<(const ::wstring& wstr) { } #endif // GTEST_HAS_GLOBAL_WSTRING +// Gets the text streamed to this object so far as an std::string. +// Each '\0' character in the buffer is replaced with "\\0". +std::string Message::GetString() const { + return internal::StringStreamToString(ss_.get()); +} + // AssertionResult constructors. // Used in EXPECT_TRUE/FALSE(assertion_result). AssertionResult::AssertionResult(const AssertionResult& other) @@ -997,8 +999,8 @@ namespace internal { // be inserted into the message. AssertionResult EqFailure(const char* expected_expression, const char* actual_expression, - const String& expected_value, - const String& actual_value, + const std::string& expected_value, + const std::string& actual_value, bool ignoring_case) { Message msg; msg << "Value of: " << actual_expression; @@ -1018,10 +1020,11 @@ AssertionResult EqFailure(const char* expected_expression, } // Constructs a failure message for Boolean assertions such as EXPECT_TRUE. -String GetBoolAssertionFailureMessage(const AssertionResult& assertion_result, - const char* expression_text, - const char* actual_predicate_value, - const char* expected_predicate_value) { +std::string GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value) { const char* actual_message = assertion_result.message(); Message msg; msg << "Value of: " << expression_text @@ -1168,8 +1171,8 @@ AssertionResult CmpHelperSTREQ(const char* expected_expression, return EqFailure(expected_expression, actual_expression, - String::ShowCStringQuoted(expected), - String::ShowCStringQuoted(actual), + PrintToString(expected), + PrintToString(actual), false); } @@ -1184,8 +1187,8 @@ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, return EqFailure(expected_expression, actual_expression, - String::ShowCStringQuoted(expected), - String::ShowCStringQuoted(actual), + PrintToString(expected), + PrintToString(actual), true); } @@ -1349,7 +1352,7 @@ AssertionResult HRESULTFailureHelper(const char* expr, // want inserts expanded. const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM | FORMAT_MESSAGE_IGNORE_INSERTS; - const DWORD kBufSize = 4096; // String::Format can't exceed this length. + const DWORD kBufSize = 4096; // Gets the system's human readable message string for this HRESULT. char error_text[kBufSize] = { '\0' }; DWORD message_length = ::FormatMessageA(kFlags, @@ -1359,7 +1362,7 @@ AssertionResult HRESULTFailureHelper(const char* expr, error_text, // output buffer kBufSize, // buf size NULL); // no arguments for inserts - // Trims tailing white space (FormatMessage leaves a trailing cr-lf) + // Trims tailing white space (FormatMessage leaves a trailing CR-LF) for (; message_length && IsSpace(error_text[message_length - 1]); --message_length) { error_text[message_length - 1] = '\0'; @@ -1367,10 +1370,10 @@ AssertionResult HRESULTFailureHelper(const char* expr, # endif // GTEST_OS_WINDOWS_MOBILE - const String error_hex(String::Format("0x%08X ", hr)); + const std::string error_hex("0x" + String::FormatHexInt(hr)); return ::testing::AssertionFailure() << "Expected: " << expr << " " << expected << ".\n" - << " Actual: " << error_hex << error_text << "\n"; + << " Actual: " << error_hex << " " << error_text << "\n"; } } // namespace @@ -1427,12 +1430,15 @@ inline UInt32 ChopLowBits(UInt32* bits, int n) { // Converts a Unicode code point to a narrow string in UTF-8 encoding. // code_point parameter is of type UInt32 because wchar_t may not be // wide enough to contain a code point. -// The output buffer str must containt at least 32 characters. -// The function returns the address of the output buffer. // If the code_point is not a valid Unicode code point -// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be output -// as '(Invalid Unicode 0xXXXXXXXX)'. -char* CodePointToUtf8(UInt32 code_point, char* str) { +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted +// to "(Invalid Unicode 0xXXXXXXXX)". +std::string CodePointToUtf8(UInt32 code_point) { + if (code_point > kMaxCodePoint4) { + return "(Invalid Unicode 0x" + String::FormatHexInt(code_point) + ")"; + } + + char str[5]; // Big enough for the largest valid code point. if (code_point <= kMaxCodePoint1) { str[1] = '\0'; str[0] = static_cast(code_point); // 0xxxxxxx @@ -1445,22 +1451,12 @@ char* CodePointToUtf8(UInt32 code_point, char* str) { str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx str[0] = static_cast(0xE0 | code_point); // 1110xxxx - } else if (code_point <= kMaxCodePoint4) { + } else { // code_point <= kMaxCodePoint4 str[4] = '\0'; str[3] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx str[0] = static_cast(0xF0 | code_point); // 11110xxx - } else { - // The longest string String::Format can produce when invoked - // with these parameters is 28 character long (not including - // the terminating nul character). We are asking for 32 character - // buffer just in case. This is also enough for strncpy to - // null-terminate the destination string. - posix::StrNCpy( - str, String::Format("(Invalid Unicode 0x%X)", code_point).c_str(), 32); - str[31] = '\0'; // Makes sure no change in the format to strncpy leaves - // the result unterminated. } return str; } @@ -1501,7 +1497,7 @@ inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first, // as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding // and contains invalid UTF-16 surrogate pairs, values in those pairs // will be encoded as individual Unicode characters from Basic Normal Plane. -String WideStringToUtf8(const wchar_t* str, int num_chars) { +std::string WideStringToUtf8(const wchar_t* str, int num_chars) { if (num_chars == -1) num_chars = static_cast(wcslen(str)); @@ -1519,27 +1515,17 @@ String WideStringToUtf8(const wchar_t* str, int num_chars) { unicode_code_point = static_cast(str[i]); } - char buffer[32]; // CodePointToUtf8 requires a buffer this big. - stream << CodePointToUtf8(unicode_code_point, buffer); + stream << CodePointToUtf8(unicode_code_point); } return StringStreamToString(&stream); } -// Converts a wide C string to a String using the UTF-8 encoding. +// Converts a wide C string to an std::string using the UTF-8 encoding. // NULL will be converted to "(null)". -String String::ShowWideCString(const wchar_t * wide_c_str) { - if (wide_c_str == NULL) return String("(null)"); +std::string String::ShowWideCString(const wchar_t * wide_c_str) { + if (wide_c_str == NULL) return "(null)"; - return String(internal::WideStringToUtf8(wide_c_str, -1).c_str()); -} - -// Similar to ShowWideCString(), except that this function encloses -// the converted string in double quotes. -String String::ShowWideCStringQuoted(const wchar_t* wide_c_str) { - if (wide_c_str == NULL) return String("(null)"); - - return String::Format("L\"%s\"", - String::ShowWideCString(wide_c_str).c_str()); + return internal::WideStringToUtf8(wide_c_str, -1); } // Compares two wide C strings. Returns true iff they have the same @@ -1567,8 +1553,8 @@ AssertionResult CmpHelperSTREQ(const char* expected_expression, return EqFailure(expected_expression, actual_expression, - String::ShowWideCStringQuoted(expected), - String::ShowWideCStringQuoted(actual), + PrintToString(expected), + PrintToString(actual), false); } @@ -1583,8 +1569,8 @@ AssertionResult CmpHelperSTRNE(const char* s1_expression, return AssertionFailure() << "Expected: (" << s1_expression << ") != (" << s2_expression << "), actual: " - << String::ShowWideCStringQuoted(s1) - << " vs " << String::ShowWideCStringQuoted(s2); + << PrintToString(s1) + << " vs " << PrintToString(s2); } // Compares two C strings, ignoring case. Returns true iff they have @@ -1635,135 +1621,69 @@ bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs, #endif // OS selector } -// Compares this with another String. -// Returns < 0 if this is less than rhs, 0 if this is equal to rhs, or > 0 -// if this is greater than rhs. -int String::Compare(const String & rhs) const { - const char* const lhs_c_str = c_str(); - const char* const rhs_c_str = rhs.c_str(); - - if (lhs_c_str == NULL) { - return rhs_c_str == NULL ? 0 : -1; // NULL < anything except NULL - } else if (rhs_c_str == NULL) { - return 1; - } - - const size_t shorter_str_len = - length() <= rhs.length() ? length() : rhs.length(); - for (size_t i = 0; i != shorter_str_len; i++) { - if (lhs_c_str[i] < rhs_c_str[i]) { - return -1; - } else if (lhs_c_str[i] > rhs_c_str[i]) { - return 1; - } - } - return (length() < rhs.length()) ? -1 : - (length() > rhs.length()) ? 1 : 0; +// Returns true iff str ends with the given suffix, ignoring case. +// Any string is considered to end with an empty suffix. +bool String::EndsWithCaseInsensitive( + const std::string& str, const std::string& suffix) { + const size_t str_len = str.length(); + const size_t suffix_len = suffix.length(); + return (str_len >= suffix_len) && + CaseInsensitiveCStringEquals(str.c_str() + str_len - suffix_len, + suffix.c_str()); } -// Returns true iff this String ends with the given suffix. *Any* -// String is considered to end with a NULL or empty suffix. -bool String::EndsWith(const char* suffix) const { - if (suffix == NULL || CStringEquals(suffix, "")) return true; - - if (c_str() == NULL) return false; - - const size_t this_len = strlen(c_str()); - const size_t suffix_len = strlen(suffix); - return (this_len >= suffix_len) && - CStringEquals(c_str() + this_len - suffix_len, suffix); +// Formats an int value as "%02d". +std::string String::FormatIntWidth2(int value) { + std::stringstream ss; + ss << std::setfill('0') << std::setw(2) << value; + return ss.str(); } -// Returns true iff this String ends with the given suffix, ignoring case. -// Any String is considered to end with a NULL or empty suffix. -bool String::EndsWithCaseInsensitive(const char* suffix) const { - if (suffix == NULL || CStringEquals(suffix, "")) return true; - - if (c_str() == NULL) return false; - - const size_t this_len = strlen(c_str()); - const size_t suffix_len = strlen(suffix); - return (this_len >= suffix_len) && - CaseInsensitiveCStringEquals(c_str() + this_len - suffix_len, suffix); +// Formats an int value as "%X". +std::string String::FormatHexInt(int value) { + std::stringstream ss; + ss << std::hex << std::uppercase << value; + return ss.str(); } -// Formats a list of arguments to a String, using the same format -// spec string as for printf. -// -// We do not use the StringPrintf class as it is not universally -// available. -// -// The result is limited to 4096 characters (including the tailing 0). -// If 4096 characters are not enough to format the input, or if -// there's an error, "" is -// returned. -String String::Format(const char * format, ...) { - va_list args; - va_start(args, format); - - char buffer[4096]; - const int kBufferSize = sizeof(buffer)/sizeof(buffer[0]); - - // MSVC 8 deprecates vsnprintf(), so we want to suppress warning - // 4996 (deprecated function) there. -#ifdef _MSC_VER // We are using MSVC. -# pragma warning(push) // Saves the current warning state. -# pragma warning(disable:4996) // Temporarily disables warning 4996. - - const int size = vsnprintf(buffer, kBufferSize, format, args); - -# pragma warning(pop) // Restores the warning state. -#else // We are not using MSVC. - const int size = vsnprintf(buffer, kBufferSize, format, args); -#endif // _MSC_VER - va_end(args); - - // vsnprintf()'s behavior is not portable. When the buffer is not - // big enough, it returns a negative value in MSVC, and returns the - // needed buffer size on Linux. When there is an output error, it - // always returns a negative value. For simplicity, we lump the two - // error cases together. - if (size < 0 || size >= kBufferSize) { - return String(""); - } else { - return String(buffer, size); - } +// Formats a byte as "%02X". +std::string String::FormatByte(unsigned char value) { + std::stringstream ss; + ss << std::setfill('0') << std::setw(2) << std::hex << std::uppercase + << static_cast(value); + return ss.str(); } -// Converts the buffer in a stringstream to a String, converting NUL +// Converts the buffer in a stringstream to an std::string, converting NUL // bytes to "\\0" along the way. -String StringStreamToString(::std::stringstream* ss) { +std::string StringStreamToString(::std::stringstream* ss) { const ::std::string& str = ss->str(); const char* const start = str.c_str(); const char* const end = start + str.length(); - // We need to use a helper stringstream to do this transformation - // because String doesn't support push_back(). - ::std::stringstream helper; + std::string result; + result.reserve(2 * (end - start)); for (const char* ch = start; ch != end; ++ch) { if (*ch == '\0') { - helper << "\\0"; // Replaces NUL with "\\0"; + result += "\\0"; // Replaces NUL with "\\0"; } else { - helper.put(*ch); + result += *ch; } } - return String(helper.str().c_str()); + return result; } // Appends the user-supplied message to the Google-Test-generated message. -String AppendUserMessage(const String& gtest_msg, - const Message& user_msg) { +std::string AppendUserMessage(const std::string& gtest_msg, + const Message& user_msg) { // Appends the user message if it's non-empty. - const String user_msg_string = user_msg.GetString(); + const std::string user_msg_string = user_msg.GetString(); if (user_msg_string.empty()) { return gtest_msg; } - Message msg; - msg << gtest_msg << "\n" << user_msg_string; - - return msg.GetString(); + return gtest_msg + "\n" + user_msg_string; } } // namespace internal @@ -1811,8 +1731,9 @@ void TestResult::AddTestPartResult(const TestPartResult& test_part_result) { // Adds a test property to the list. If a property with the same key as the // supplied property is already represented, the value of this test_property // replaces the old value for that key. -void TestResult::RecordProperty(const TestProperty& test_property) { - if (!ValidateTestProperty(test_property)) { +void TestResult::RecordProperty(const std::string& xml_element, + const TestProperty& test_property) { + if (!ValidateTestProperty(xml_element, test_property)) { return; } internal::MutexLock lock(&test_properites_mutex_); @@ -1826,21 +1747,94 @@ void TestResult::RecordProperty(const TestProperty& test_property) { property_with_matching_key->SetValue(test_property.value()); } -// Adds a failure if the key is a reserved attribute of Google Test -// testcase tags. Returns true if the property is valid. -bool TestResult::ValidateTestProperty(const TestProperty& test_property) { - internal::String key(test_property.key()); - if (key == "name" || key == "status" || key == "time" || key == "classname") { - ADD_FAILURE() - << "Reserved key used in RecordProperty(): " - << key - << " ('name', 'status', 'time', and 'classname' are reserved by " - << GTEST_NAME_ << ")"; +// The list of reserved attributes used in the element of XML +// output. +static const char* const kReservedTestSuitesAttributes[] = { + "disabled", + "errors", + "failures", + "name", + "random_seed", + "tests", + "time", + "timestamp" +}; + +// The list of reserved attributes used in the element of XML +// output. +static const char* const kReservedTestSuiteAttributes[] = { + "disabled", + "errors", + "failures", + "name", + "tests", + "time" +}; + +// The list of reserved attributes used in the element of XML output. +static const char* const kReservedTestCaseAttributes[] = { + "classname", + "name", + "status", + "time", + "type_param", + "value_param" +}; + +template +std::vector ArrayAsVector(const char* const (&array)[kSize]) { + return std::vector(array, array + kSize); +} + +static std::vector GetReservedAttributesForElement( + const std::string& xml_element) { + if (xml_element == "testsuites") { + return ArrayAsVector(kReservedTestSuitesAttributes); + } else if (xml_element == "testsuite") { + return ArrayAsVector(kReservedTestSuiteAttributes); + } else if (xml_element == "testcase") { + return ArrayAsVector(kReservedTestCaseAttributes); + } else { + GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element; + } + // This code is unreachable but some compilers may not realizes that. + return std::vector(); +} + +static std::string FormatWordList(const std::vector& words) { + Message word_list; + for (size_t i = 0; i < words.size(); ++i) { + if (i > 0 && words.size() > 2) { + word_list << ", "; + } + if (i == words.size() - 1) { + word_list << "and "; + } + word_list << "'" << words[i] << "'"; + } + return word_list.GetString(); +} + +bool ValidateTestPropertyName(const std::string& property_name, + const std::vector& reserved_names) { + if (std::find(reserved_names.begin(), reserved_names.end(), property_name) != + reserved_names.end()) { + ADD_FAILURE() << "Reserved key used in RecordProperty(): " << property_name + << " (" << FormatWordList(reserved_names) + << " are reserved by " << GTEST_NAME_ << ")"; return false; } return true; } +// Adds a failure if the key is a reserved attribute of the element named +// xml_element. Returns true if the property is valid. +bool TestResult::ValidateTestProperty(const std::string& xml_element, + const TestProperty& test_property) { + return ValidateTestPropertyName(test_property.key(), + GetReservedAttributesForElement(xml_element)); +} + // Clears the object. void TestResult::Clear() { test_part_results_.clear(); @@ -1916,12 +1910,12 @@ void Test::TearDown() { } // Allows user supplied key value pairs to be recorded for later output. -void Test::RecordProperty(const char* key, const char* value) { - UnitTest::GetInstance()->RecordPropertyForCurrentTest(key, value); +void Test::RecordProperty(const std::string& key, const std::string& value) { + UnitTest::GetInstance()->RecordProperty(key, value); } // Allows user supplied key value pairs to be recorded for later output. -void Test::RecordProperty(const char* key, int value) { +void Test::RecordProperty(const std::string& key, int value) { Message value_message; value_message << value; RecordProperty(key, value_message.GetString().c_str()); @@ -1930,7 +1924,7 @@ void Test::RecordProperty(const char* key, int value) { namespace internal { void ReportFailureInUnknownLocation(TestPartResult::Type result_type, - const String& message) { + const std::string& message) { // This function is a friend of UnitTest and as such has access to // AddTestPartResult. UnitTest::GetInstance()->AddTestPartResult( @@ -1938,7 +1932,7 @@ void ReportFailureInUnknownLocation(TestPartResult::Type result_type, NULL, // No info about the source file where the exception occurred. -1, // We have no info on which line caused the exception. message, - String()); // No stack trace, either. + ""); // No stack trace, either. } } // namespace internal @@ -2015,22 +2009,24 @@ bool Test::HasSameFixtureClass() { // function returns its result via an output parameter pointer because VC++ // prohibits creation of objects with destructors on stack in functions // using __try (see error C2712). -static internal::String* FormatSehExceptionMessage(DWORD exception_code, - const char* location) { +static std::string* FormatSehExceptionMessage(DWORD exception_code, + const char* location) { Message message; message << "SEH exception with code 0x" << std::setbase(16) << exception_code << std::setbase(10) << " thrown in " << location << "."; - return new internal::String(message.GetString()); + return new std::string(message.GetString()); } #endif // GTEST_HAS_SEH +namespace internal { + #if GTEST_HAS_EXCEPTIONS // Adds an "exception thrown" fatal failure to the current test. -static internal::String FormatCxxExceptionMessage(const char* description, - const char* location) { +static std::string FormatCxxExceptionMessage(const char* description, + const char* location) { Message message; if (description != NULL) { message << "C++ exception with description \"" << description << "\""; @@ -2042,23 +2038,15 @@ static internal::String FormatCxxExceptionMessage(const char* description, return message.GetString(); } -static internal::String PrintTestPartResultToString( +static std::string PrintTestPartResultToString( const TestPartResult& test_part_result); -// A failed Google Test assertion will throw an exception of this type when -// GTEST_FLAG(throw_on_failure) is true (if exceptions are enabled). We -// derive it from std::runtime_error, which is for errors presumably -// detectable only at run time. Since std::runtime_error inherits from -// std::exception, many testing frameworks know how to extract and print the -// message inside it. -class GoogleTestFailureException : public ::std::runtime_error { - public: - explicit GoogleTestFailureException(const TestPartResult& failure) - : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {} -}; +GoogleTestFailureException::GoogleTestFailureException( + const TestPartResult& failure) + : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {} + #endif // GTEST_HAS_EXCEPTIONS -namespace internal { // We put these helper functions in the internal namespace as IBM's xlC // compiler rejects the code if they were declared static. @@ -2078,7 +2066,7 @@ Result HandleSehExceptionsInMethodIfSupported( // We create the exception message on the heap because VC++ prohibits // creation of objects with destructors on stack in functions using __try // (see error C2712). - internal::String* exception_message = FormatSehExceptionMessage( + std::string* exception_message = FormatSehExceptionMessage( GetExceptionCode(), location); internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure, *exception_message); @@ -2124,9 +2112,10 @@ Result HandleExceptionsInMethodIfSupported( #if GTEST_HAS_EXCEPTIONS try { return HandleSehExceptionsInMethodIfSupported(object, method, location); - } catch (const GoogleTestFailureException&) { // NOLINT - // This exception doesn't originate in code under test. It makes no - // sense to report it as a test failure. + } catch (const internal::GoogleTestFailureException&) { // NOLINT + // This exception type can only be thrown by a failed Google + // Test assertion with the intention of letting another testing + // framework catch it. Therefore we just re-throw it. throw; } catch (const std::exception& e) { // NOLINT internal::ReportFailureInUnknownLocation( @@ -2185,10 +2174,8 @@ bool Test::HasNonfatalFailure() { // Constructs a TestInfo object. It assumes ownership of the test factory // object. -// TODO(vladl@google.com): Make a_test_case_name and a_name const string&'s -// to signify they cannot be NULLs. -TestInfo::TestInfo(const char* a_test_case_name, - const char* a_name, +TestInfo::TestInfo(const std::string& a_test_case_name, + const std::string& a_name, const char* a_type_param, const char* a_value_param, internal::TypeId fixture_class_id, @@ -2227,7 +2214,8 @@ namespace internal { // The newly created TestInfo instance will assume // ownership of the factory object. TestInfo* MakeAndRegisterTestInfo( - const char* test_case_name, const char* name, + const char* test_case_name, + const char* name, const char* type_param, const char* value_param, TypeId fixture_class_id, @@ -2282,11 +2270,11 @@ class TestNameIs { // Returns true iff the test name of test_info matches name_. bool operator()(const TestInfo * test_info) const { - return test_info && internal::String(test_info->name()).Compare(name_) == 0; + return test_info && test_info->name() == name_; } private: - internal::String name_; + std::string name_; }; } // namespace @@ -2365,10 +2353,21 @@ int TestCase::failed_test_count() const { return CountIf(test_info_list_, TestFailed); } +// Gets the number of disabled tests that will be reported in the XML report. +int TestCase::reportable_disabled_test_count() const { + return CountIf(test_info_list_, TestReportableDisabled); +} + +// Gets the number of disabled tests in this test case. int TestCase::disabled_test_count() const { return CountIf(test_info_list_, TestDisabled); } +// Gets the number of tests to be printed in the XML report. +int TestCase::reportable_test_count() const { + return CountIf(test_info_list_, TestReportable); +} + // Get the number of tests in this test case that should run. int TestCase::test_to_run_count() const { return CountIf(test_info_list_, ShouldRunTest); @@ -2456,6 +2455,7 @@ void TestCase::Run() { // Clears the results of all tests in this test case. void TestCase::ClearResult() { + ad_hoc_test_result_.Clear(); ForEach(test_info_list_, TestInfo::ClearTestResult); } @@ -2476,20 +2476,20 @@ void TestCase::UnshuffleTests() { // // FormatCountableNoun(1, "formula", "formuli") returns "1 formula". // FormatCountableNoun(5, "book", "books") returns "5 books". -static internal::String FormatCountableNoun(int count, - const char * singular_form, - const char * plural_form) { - return internal::String::Format("%d %s", count, - count == 1 ? singular_form : plural_form); +static std::string FormatCountableNoun(int count, + const char * singular_form, + const char * plural_form) { + return internal::StreamableToString(count) + " " + + (count == 1 ? singular_form : plural_form); } // Formats the count of tests. -static internal::String FormatTestCount(int test_count) { +static std::string FormatTestCount(int test_count) { return FormatCountableNoun(test_count, "test", "tests"); } // Formats the count of test cases. -static internal::String FormatTestCaseCount(int test_case_count) { +static std::string FormatTestCaseCount(int test_case_count) { return FormatCountableNoun(test_case_count, "test case", "test cases"); } @@ -2514,8 +2514,10 @@ static const char * TestPartResultTypeToString(TestPartResult::Type type) { } } -// Prints a TestPartResult to a String. -static internal::String PrintTestPartResultToString( +namespace internal { + +// Prints a TestPartResult to an std::string. +static std::string PrintTestPartResultToString( const TestPartResult& test_part_result) { return (Message() << internal::FormatFileLocation(test_part_result.file_name(), @@ -2526,7 +2528,7 @@ static internal::String PrintTestPartResultToString( // Prints a TestPartResult. static void PrintTestPartResult(const TestPartResult& test_part_result) { - const internal::String& result = + const std::string& result = PrintTestPartResultToString(test_part_result); printf("%s\n", result.c_str()); fflush(stdout); @@ -2545,8 +2547,6 @@ static void PrintTestPartResult(const TestPartResult& test_part_result) { // class PrettyUnitTestResultPrinter -namespace internal { - enum GTestColor { COLOR_DEFAULT, COLOR_RED, @@ -2598,6 +2598,7 @@ bool ShouldUseColor(bool stdout_is_tty) { String::CStringEquals(term, "xterm-color") || String::CStringEquals(term, "xterm-256color") || String::CStringEquals(term, "screen") || + String::CStringEquals(term, "screen-256color") || String::CStringEquals(term, "linux") || String::CStringEquals(term, "cygwin"); return stdout_is_tty && term_supports_color; @@ -2621,7 +2622,7 @@ void ColoredPrintf(GTestColor color, const char* fmt, ...) { va_list args; va_start(args, fmt); -#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS || GTEST_OS_IOS const bool use_color = false; #else static const bool in_color_mode = @@ -2663,6 +2664,11 @@ void ColoredPrintf(GTestColor color, const char* fmt, ...) { va_end(args); } +// Text printed in Google Test's text output and --gunit_list_tests +// output to label the type parameter and value parameter for a test. +static const char kTypeParamLabel[] = "TypeParam"; +static const char kValueParamLabel[] = "GetParam()"; + void PrintFullTestCommentIfPresent(const TestInfo& test_info) { const char* const type_param = test_info.type_param(); const char* const value_param = test_info.value_param(); @@ -2670,12 +2676,12 @@ void PrintFullTestCommentIfPresent(const TestInfo& test_info) { if (type_param != NULL || value_param != NULL) { printf(", where "); if (type_param != NULL) { - printf("TypeParam = %s", type_param); + printf("%s = %s", kTypeParamLabel, type_param); if (value_param != NULL) printf(" and "); } if (value_param != NULL) { - printf("GetParam() = %s", value_param); + printf("%s = %s", kValueParamLabel, value_param); } } } @@ -2691,24 +2697,22 @@ class PrettyUnitTestResultPrinter : public TestEventListener { } // The following methods override what's in the TestEventListener class. - virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} - virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); - virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); - virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} - virtual void OnTestCaseStart(const TestCase& test_case); - virtual void OnTestStart(const TestInfo& test_info); - virtual void OnTestPartResult(const TestPartResult& result); - virtual void OnTestEnd(const TestInfo& test_info); - virtual void OnTestCaseEnd(const TestCase& test_case); - virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); - virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} - virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); - virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) override {} + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration) override; + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) override; + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) override {} + virtual void OnTestCaseStart(const TestCase& test_case) override; + virtual void OnTestStart(const TestInfo& test_info) override; + virtual void OnTestPartResult(const TestPartResult& result) override; + virtual void OnTestEnd(const TestInfo& test_info) override; + virtual void OnTestCaseEnd(const TestCase& test_case) override; + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) override; + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) override {} + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override; + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) override {} private: static void PrintFailedTests(const UnitTest& unit_test); - - internal::String test_case_name_; }; // Fired before each iteration of tests starts. @@ -2721,7 +2725,7 @@ void PrettyUnitTestResultPrinter::OnTestIterationStart( // Prints the filter if it's not *. This reminds the user that some // tests may be skipped. - if (!internal::String::CStringEquals(filter, kUniversalFilter)) { + if (!String::CStringEquals(filter, kUniversalFilter)) { ColoredPrintf(COLOR_YELLOW, "Note: %s filter = %s\n", GTEST_NAME_, filter); } @@ -2755,22 +2759,21 @@ void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart( } void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) { - test_case_name_ = test_case.name(); - const internal::String counts = + const std::string counts = FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); ColoredPrintf(COLOR_GREEN, "[----------] "); - printf("%s from %s", counts.c_str(), test_case_name_.c_str()); + printf("%s from %s", counts.c_str(), test_case.name()); if (test_case.type_param() == NULL) { printf("\n"); } else { - printf(", where TypeParam = %s\n", test_case.type_param()); + printf(", where %s = %s\n", kTypeParamLabel, test_case.type_param()); } fflush(stdout); } void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) { ColoredPrintf(COLOR_GREEN, "[ RUN ] "); - PrintTestName(test_case_name_.c_str(), test_info.name()); + PrintTestName(test_info.test_case_name(), test_info.name()); printf("\n"); fflush(stdout); } @@ -2793,7 +2796,7 @@ void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) { } else { ColoredPrintf(COLOR_RED, "[ FAILED ] "); } - PrintTestName(test_case_name_.c_str(), test_info.name()); + PrintTestName(test_info.test_case_name(), test_info.name()); if (test_info.result()->Failed()) PrintFullTestCommentIfPresent(test_info); @@ -2809,12 +2812,11 @@ void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) { void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) { if (!GTEST_FLAG(print_time)) return; - test_case_name_ = test_case.name(); - const internal::String counts = + const std::string counts = FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); ColoredPrintf(COLOR_GREEN, "[----------] "); printf("%s from %s (%s ms total)\n\n", - counts.c_str(), test_case_name_.c_str(), + counts.c_str(), test_case.name(), internal::StreamableToString(test_case.elapsed_time()).c_str()); fflush(stdout); } @@ -2875,7 +2877,7 @@ void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, num_failures == 1 ? "TEST" : "TESTS"); } - int num_disabled = unit_test.disabled_test_count(); + int num_disabled = unit_test.reportable_disabled_test_count(); if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) { if (!num_failures) { printf("\n"); // Add a spacer if no FAILURE banner is displayed. @@ -2906,19 +2908,19 @@ class TestEventRepeater : public TestEventListener { bool forwarding_enabled() const { return forwarding_enabled_; } void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; } - virtual void OnTestProgramStart(const UnitTest& unit_test); - virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); - virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); - virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test); - virtual void OnTestCaseStart(const TestCase& test_case); - virtual void OnTestStart(const TestInfo& test_info); - virtual void OnTestPartResult(const TestPartResult& result); - virtual void OnTestEnd(const TestInfo& test_info); - virtual void OnTestCaseEnd(const TestCase& test_case); - virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); - virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test); - virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); - virtual void OnTestProgramEnd(const UnitTest& unit_test); + virtual void OnTestProgramStart(const UnitTest& unit_test) override; + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration) override; + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) override; + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) override; + virtual void OnTestCaseStart(const TestCase& test_case) override; + virtual void OnTestStart(const TestInfo& test_info) override; + virtual void OnTestPartResult(const TestPartResult& result) override; + virtual void OnTestEnd(const TestInfo& test_info) override; + virtual void OnTestCaseEnd(const TestCase& test_case) override; + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) override; + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) override; + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override; + virtual void OnTestProgramEnd(const UnitTest& unit_test) override; private: // Controls whether events will be forwarded to listeners_. Set to false @@ -3011,7 +3013,7 @@ class XmlUnitTestResultPrinter : public EmptyTestEventListener { public: explicit XmlUnitTestResultPrinter(const char* output_file); - virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration) override; private: // Is c a whitespace character that is normalized to a space character @@ -3029,18 +3031,27 @@ class XmlUnitTestResultPrinter : public EmptyTestEventListener { // is_attribute is true, the text is meant to appear as an attribute // value, and normalizable whitespace is preserved by replacing it // with character references. - static String EscapeXml(const char* str, bool is_attribute); + static std::string EscapeXml(const std::string& str, bool is_attribute); // Returns the given string with all characters invalid in XML removed. - static string RemoveInvalidXmlCharacters(const string& str); + static std::string RemoveInvalidXmlCharacters(const std::string& str); // Convenience wrapper around EscapeXml when str is an attribute value. - static String EscapeXmlAttribute(const char* str) { + static std::string EscapeXmlAttribute(const std::string& str) { return EscapeXml(str, true); } // Convenience wrapper around EscapeXml when str is not an attribute value. - static String EscapeXmlText(const char* str) { return EscapeXml(str, false); } + static std::string EscapeXmlText(const char* str) { + return EscapeXml(str, false); + } + + // Verifies that the given attribute belongs to the given element and + // streams the attribute as XML. + static void OutputXmlAttribute(std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value); // Streams an XML CDATA section, escaping invalid CDATA sequences as needed. static void OutputXmlCDataSection(::std::ostream* stream, const char* data); @@ -3051,19 +3062,21 @@ class XmlUnitTestResultPrinter : public EmptyTestEventListener { const TestInfo& test_info); // Prints an XML representation of a TestCase object - static void PrintXmlTestCase(FILE* out, const TestCase& test_case); + static void PrintXmlTestCase(::std::ostream* stream, + const TestCase& test_case); // Prints an XML summary of unit_test to output stream out. - static void PrintXmlUnitTest(FILE* out, const UnitTest& unit_test); + static void PrintXmlUnitTest(::std::ostream* stream, + const UnitTest& unit_test); // Produces a string representing the test properties in a result as space // delimited XML attributes based on the property key="value" pairs. - // When the String is not empty, it includes a space at the beginning, + // When the std::string is not empty, it includes a space at the beginning, // to delimit this attribute from prior attributes. - static String TestPropertiesAsXmlAttributes(const TestResult& result); + static std::string TestPropertiesAsXmlAttributes(const TestResult& result); // The output file. - const String output_file_; + const std::string output_file_; GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter); }; @@ -3105,7 +3118,9 @@ void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, fflush(stderr); exit(EXIT_FAILURE); } - PrintXmlUnitTest(xmlout, unit_test); + std::stringstream stream; + PrintXmlUnitTest(&stream, unit_test); + fprintf(xmlout, "%s", StringStreamToString(&stream).c_str()); fclose(xmlout); } @@ -3121,42 +3136,43 @@ void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, // most invalid characters can be retained using character references. // TODO(wan): It might be nice to have a minimally invasive, human-readable // escaping scheme for invalid characters, rather than dropping them. -String XmlUnitTestResultPrinter::EscapeXml(const char* str, bool is_attribute) { +std::string XmlUnitTestResultPrinter::EscapeXml( + const std::string& str, bool is_attribute) { Message m; - if (str != NULL) { - for (const char* src = str; *src; ++src) { - switch (*src) { - case '<': - m << "<"; - break; - case '>': - m << ">"; - break; - case '&': - m << "&"; - break; - case '\'': - if (is_attribute) - m << "'"; - else - m << '\''; - break; - case '"': - if (is_attribute) - m << """; + for (size_t i = 0; i < str.size(); ++i) { + const char ch = str[i]; + switch (ch) { + case '<': + m << "<"; + break; + case '>': + m << ">"; + break; + case '&': + m << "&"; + break; + case '\'': + if (is_attribute) + m << "'"; + else + m << '\''; + break; + case '"': + if (is_attribute) + m << """; + else + m << '"'; + break; + default: + if (IsValidXmlCharacter(ch)) { + if (is_attribute && IsNormalizableWhitespace(ch)) + m << "&#x" << String::FormatByte(static_cast(ch)) + << ";"; else - m << '"'; - break; - default: - if (IsValidXmlCharacter(*src)) { - if (is_attribute && IsNormalizableWhitespace(*src)) - m << String::Format("&#x%02X;", unsigned(*src)); - else - m << *src; - } - break; - } + m << ch; + } + break; } } @@ -3166,10 +3182,11 @@ String XmlUnitTestResultPrinter::EscapeXml(const char* str, bool is_attribute) { // Returns the given string with all characters invalid in XML removed. // Currently invalid characters are dropped from the string. An // alternative is to replace them with certain characters such as . or ?. -string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters(const string& str) { - string output; +std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters( + const std::string& str) { + std::string output; output.reserve(str.size()); - for (string::const_iterator it = str.begin(); it != str.end(); ++it) + for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) if (IsValidXmlCharacter(*it)) output.push_back(*it); @@ -3199,6 +3216,32 @@ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) { return ss.str(); } +// Converts the given epoch time in milliseconds to a date string in the ISO +// 8601 format, without the timezone information. +std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) { + // Using non-reentrant version as localtime_r is not portable. + time_t seconds = static_cast(ms / 1000); +#ifdef _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996 + // (function or variable may be unsafe). + const struct tm* const time_struct = localtime(&seconds); // NOLINT +# pragma warning(pop) // Restores the warning state again. +#else + const struct tm* const time_struct = localtime(&seconds); // NOLINT +#endif + if (time_struct == NULL) + return ""; // Invalid ms value + + // YYYY-MM-DDThh:mm:ss + return StreamableToString(time_struct->tm_year + 1900) + "-" + + String::FormatIntWidth2(time_struct->tm_mon + 1) + "-" + + String::FormatIntWidth2(time_struct->tm_mday) + "T" + + String::FormatIntWidth2(time_struct->tm_hour) + ":" + + String::FormatIntWidth2(time_struct->tm_min) + ":" + + String::FormatIntWidth2(time_struct->tm_sec); +} + // Streams an XML CDATA section, escaping invalid CDATA sequences as needed. void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream, const char* data) { @@ -3219,48 +3262,63 @@ void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream, *stream << "]]>"; } +void XmlUnitTestResultPrinter::OutputXmlAttribute( + std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value) { + const std::vector& allowed_names = + GetReservedAttributesForElement(element_name); + + GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) != + allowed_names.end()) + << "Attribute " << name << " is not allowed for element <" << element_name + << ">."; + + *stream << " " << name << "=\"" << EscapeXmlAttribute(value) << "\""; +} + // Prints an XML representation of a TestInfo object. // TODO(wan): There is also value in printing properties with the plain printer. void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream, const char* test_case_name, const TestInfo& test_info) { - if (test_info.filtered_out ()) - return; - const TestResult& result = *test_info.result(); - *stream << " \n"; - *stream << " "; + } const string location = internal::FormatCompilerIndependentFileLocation( part.file_name(), part.line_number()); - const string message = location + "\n" + part.message(); - OutputXmlCDataSection(stream, - RemoveInvalidXmlCharacters(message).c_str()); + const string summary = location + "\n" + part.summary(); + *stream << " "; + const string detail = location + "\n" + part.message(); + OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str()); *stream << "\n"; } } @@ -3272,52 +3330,73 @@ void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream, } // Prints an XML representation of a TestCase object -void XmlUnitTestResultPrinter::PrintXmlTestCase(FILE* out, +void XmlUnitTestResultPrinter::PrintXmlTestCase(std::ostream* stream, const TestCase& test_case) { - if (test_case.should_skip_report ()) - return; + const std::string kTestsuite = "testsuite"; + *stream << " <" << kTestsuite; + OutputXmlAttribute(stream, kTestsuite, "name", test_case.name()); + OutputXmlAttribute(stream, kTestsuite, "tests", + StreamableToString(test_case.reportable_test_count())); + OutputXmlAttribute(stream, kTestsuite, "failures", + StreamableToString(test_case.failed_test_count())); + OutputXmlAttribute( + stream, kTestsuite, "disabled", + StreamableToString(test_case.reportable_disabled_test_count())); + OutputXmlAttribute(stream, kTestsuite, "errors", "0"); + OutputXmlAttribute(stream, kTestsuite, "time", + FormatTimeInMillisAsSeconds(test_case.elapsed_time())); + *stream << TestPropertiesAsXmlAttributes(test_case.ad_hoc_test_result()) + << ">\n"; - fprintf(out, - " \n", - FormatTimeInMillisAsSeconds(test_case.elapsed_time()).c_str()); for (int i = 0; i < test_case.total_test_count(); ++i) { - ::std::stringstream stream; - OutputXmlTestInfo(&stream, test_case.name(), *test_case.GetTestInfo(i)); - fprintf(out, "%s", StringStreamToString(&stream).c_str()); + if (test_case.GetTestInfo(i)->is_reportable()) + OutputXmlTestInfo(stream, test_case.name(), *test_case.GetTestInfo(i)); } - fprintf(out, " \n"); + *stream << " \n"; } // Prints an XML summary of unit_test to output stream out. -void XmlUnitTestResultPrinter::PrintXmlUnitTest(FILE* out, +void XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream, const UnitTest& unit_test) { - fprintf(out, "\n"); - fprintf(out, - "\n"; + *stream << "<" << kTestsuites; + + OutputXmlAttribute(stream, kTestsuites, "tests", + StreamableToString(unit_test.reportable_test_count())); + OutputXmlAttribute(stream, kTestsuites, "failures", + StreamableToString(unit_test.failed_test_count())); + OutputXmlAttribute( + stream, kTestsuites, "disabled", + StreamableToString(unit_test.reportable_disabled_test_count())); + OutputXmlAttribute(stream, kTestsuites, "errors", "0"); + OutputXmlAttribute( + stream, kTestsuites, "timestamp", + FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp())); + OutputXmlAttribute(stream, kTestsuites, "time", + FormatTimeInMillisAsSeconds(unit_test.elapsed_time())); + if (GTEST_FLAG(shuffle)) { - fprintf(out, "random_seed=\"%d\" ", unit_test.random_seed()); + OutputXmlAttribute(stream, kTestsuites, "random_seed", + StreamableToString(unit_test.random_seed())); } - fprintf(out, "name=\"AllTests\">\n"); - for (int i = 0; i < unit_test.total_test_case_count(); ++i) - PrintXmlTestCase(out, *unit_test.GetTestCase(i)); - fprintf(out, "\n"); + + *stream << TestPropertiesAsXmlAttributes(unit_test.ad_hoc_test_result()); + + OutputXmlAttribute(stream, kTestsuites, "name", "AllTests"); + *stream << ">\n"; + + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + if (unit_test.GetTestCase(i)->reportable_test_count() > 0) + PrintXmlTestCase(stream, *unit_test.GetTestCase(i)); + } + *stream << "\n"; } // Produces a string representing the test properties in a result as space // delimited XML attributes based on the property key="value" pairs. -String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( +std::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( const TestResult& result) { Message attributes; for (int i = 0; i < result.test_property_count(); ++i) { @@ -3332,112 +3411,6 @@ String XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( #if GTEST_CAN_STREAM_RESULTS_ -// Streams test results to the given port on the given host machine. -class StreamingListener : public EmptyTestEventListener { - public: - // Escapes '=', '&', '%', and '\n' characters in str as "%xx". - static string UrlEncode(const char* str); - - StreamingListener(const string& host, const string& port) - : sockfd_(-1), host_name_(host), port_num_(port) { - MakeConnection(); - Send("gtest_streaming_protocol_version=1.0\n"); - } - - virtual ~StreamingListener() { - if (sockfd_ != -1) - CloseConnection(); - } - - void OnTestProgramStart(const UnitTest& /* unit_test */) { - Send("event=TestProgramStart\n"); - } - - void OnTestProgramEnd(const UnitTest& unit_test) { - // Note that Google Test current only report elapsed time for each - // test iteration, not for the entire test program. - Send(String::Format("event=TestProgramEnd&passed=%d\n", - unit_test.Passed())); - - // Notify the streaming server to stop. - CloseConnection(); - } - - void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) { - Send(String::Format("event=TestIterationStart&iteration=%d\n", - iteration)); - } - - void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) { - Send(String::Format("event=TestIterationEnd&passed=%d&elapsed_time=%sms\n", - unit_test.Passed(), - StreamableToString(unit_test.elapsed_time()).c_str())); - } - - void OnTestCaseStart(const TestCase& test_case) { - Send(String::Format("event=TestCaseStart&name=%s\n", test_case.name())); - } - - void OnTestCaseEnd(const TestCase& test_case) { - Send(String::Format("event=TestCaseEnd&passed=%d&elapsed_time=%sms\n", - test_case.Passed(), - StreamableToString(test_case.elapsed_time()).c_str())); - } - - void OnTestStart(const TestInfo& test_info) { - Send(String::Format("event=TestStart&name=%s\n", test_info.name())); - } - - void OnTestEnd(const TestInfo& test_info) { - Send(String::Format( - "event=TestEnd&passed=%d&elapsed_time=%sms\n", - (test_info.result())->Passed(), - StreamableToString((test_info.result())->elapsed_time()).c_str())); - } - - void OnTestPartResult(const TestPartResult& test_part_result) { - const char* file_name = test_part_result.file_name(); - if (file_name == NULL) - file_name = ""; - Send(String::Format("event=TestPartResult&file=%s&line=%d&message=", - UrlEncode(file_name).c_str(), - test_part_result.line_number())); - Send(UrlEncode(test_part_result.message()) + "\n"); - } - - private: - // Creates a client socket and connects to the server. - void MakeConnection(); - - // Closes the socket. - void CloseConnection() { - GTEST_CHECK_(sockfd_ != -1) - << "CloseConnection() can be called only when there is a connection."; - - close(sockfd_); - sockfd_ = -1; - } - - // Sends a string to the socket. - void Send(const string& message) { - GTEST_CHECK_(sockfd_ != -1) - << "Send() can be called only when there is a connection."; - - const int len = static_cast(message.length()); - if (write(sockfd_, message.c_str(), len) != len) { - GTEST_LOG_(WARNING) - << "stream_result_to: failed to stream to " - << host_name_ << ":" << port_num_; - } - } - - int sockfd_; // socket file descriptor - const string host_name_; - const string port_num_; - - GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); -}; // class StreamingListener - // Checks if str contains '=', '&', '%' or '\n' characters. If yes, // replaces them by "%xx" where xx is their hexadecimal value. For // example, replaces "=" with "%3D". This algorithm is O(strlen(str)) @@ -3452,7 +3425,7 @@ string StreamingListener::UrlEncode(const char* str) { case '=': case '&': case '\n': - result.append(String::Format("%%%02x", static_cast(ch))); + result.append("%" + String::FormatByte(static_cast(ch))); break; default: result.push_back(ch); @@ -3462,7 +3435,7 @@ string StreamingListener::UrlEncode(const char* str) { return result; } -void StreamingListener::MakeConnection() { +void StreamingListener::SocketWriter::MakeConnection() { GTEST_CHECK_(sockfd_ == -1) << "MakeConnection() can't be called when there is already a connection."; @@ -3510,8 +3483,8 @@ void StreamingListener::MakeConnection() { // Pushes the given source file location and message onto a per-thread // trace stack maintained by Google Test. -// L < UnitTest::mutex_ -ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) { +ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) + GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { TraceInfo trace; trace.file = file; trace.line = line; @@ -3521,35 +3494,64 @@ ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) { } // Pops the info pushed by the c'tor. -// L < UnitTest::mutex_ -ScopedTrace::~ScopedTrace() { +ScopedTrace::~ScopedTrace() + GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { UnitTest::GetInstance()->PopGTestTrace(); } // class OsStackTraceGetter -// Returns the current OS stack trace as a String. Parameters: +// Returns the current OS stack trace as an std::string. Parameters: // // max_depth - the maximum number of stack frames to be included // in the trace. // skip_count - the number of top frames to be skipped; doesn't count // against max_depth. // -// L < mutex_ -// We use "L < mutex_" to denote that the function may acquire mutex_. -String OsStackTraceGetter::CurrentStackTrace(int, int) { - return String(""); +string OsStackTraceGetter::CurrentStackTrace(int /* max_depth */, + int /* skip_count */) + GTEST_LOCK_EXCLUDED_(mutex_) { + return ""; } -// L < mutex_ -void OsStackTraceGetter::UponLeavingGTest() { +void OsStackTraceGetter::UponLeavingGTest() + GTEST_LOCK_EXCLUDED_(mutex_) { } const char* const OsStackTraceGetter::kElidedFramesMarker = "... " GTEST_NAME_ " internal frames ..."; +// A helper class that creates the premature-exit file in its +// constructor and deletes the file in its destructor. +class ScopedPrematureExitFile { + public: + explicit ScopedPrematureExitFile(const char* premature_exit_filepath) + : premature_exit_filepath_(premature_exit_filepath) { + // If a path to the premature-exit file is specified... + if (premature_exit_filepath != NULL && *premature_exit_filepath != '\0') { + // create the file with a single "0" character in it. I/O + // errors are ignored as there's nothing better we can do and we + // don't want to fail the test because of this. + FILE* pfile = posix::FOpen(premature_exit_filepath, "w"); + fwrite("0", 1, 1, pfile); + fclose(pfile); + } + } + + ~ScopedPrematureExitFile() { + if (premature_exit_filepath_ != NULL && *premature_exit_filepath_ != '\0') { + remove(premature_exit_filepath_); + } + } + + private: + const char* const premature_exit_filepath_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile); +}; + } // namespace internal // class TestEventListeners @@ -3636,7 +3638,7 @@ void TestEventListeners::SuppressEventForwarding() { // We don't protect this under mutex_ as a user is not supposed to // call this before main() starts, from which point on the return // value will never change. -UnitTest * UnitTest::GetInstance() { +UnitTest* UnitTest::GetInstance() { // When compiled with MSVC 7.1 in optimized mode, destroying the // UnitTest object upon exiting the program messes up the exit code, // causing successful tests to appear failed. We have to use a @@ -3686,17 +3688,33 @@ int UnitTest::successful_test_count() const { // Gets the number of failed tests. int UnitTest::failed_test_count() const { return impl()->failed_test_count(); } +// Gets the number of disabled tests that will be reported in the XML report. +int UnitTest::reportable_disabled_test_count() const { + return impl()->reportable_disabled_test_count(); +} + // Gets the number of disabled tests. int UnitTest::disabled_test_count() const { return impl()->disabled_test_count(); } +// Gets the number of tests to be printed in the XML report. +int UnitTest::reportable_test_count() const { + return impl()->reportable_test_count(); +} + // Gets the number of all tests. int UnitTest::total_test_count() const { return impl()->total_test_count(); } // Gets the number of tests that should run. int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); } +// Gets the time of the test program start, in ms from the start of the +// UNIX epoch. +internal::TimeInMillis UnitTest::start_timestamp() const { + return impl()->start_timestamp(); +} + // Gets the elapsed time, in milliseconds. internal::TimeInMillis UnitTest::elapsed_time() const { return impl()->elapsed_time(); @@ -3715,6 +3733,12 @@ const TestCase* UnitTest::GetTestCase(int i) const { return impl()->GetTestCase(i); } +// Returns the TestResult containing information on test failures and +// properties logged outside of individual test cases. +const TestResult& UnitTest::ad_hoc_test_result() const { + return *impl()->ad_hoc_test_result(); +} + // Gets the i-th test case among all the test cases. i can range from 0 to // total_test_case_count() - 1. If i is not in that range, returns NULL. TestCase* UnitTest::GetMutableTestCase(int i) { @@ -3750,12 +3774,12 @@ Environment* UnitTest::AddEnvironment(Environment* env) { // assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call // this to report their results. The user code should use the // assertion macros instead of calling this directly. -// L < mutex_ -void UnitTest::AddTestPartResult(TestPartResult::Type result_type, - const char* file_name, - int line_number, - const internal::String& message, - const internal::String& os_stack_trace) { +void UnitTest::AddTestPartResult( + TestPartResult::Type result_type, + const char* file_name, + int line_number, + const std::string& message, + const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) { Message msg; msg << message; @@ -3802,7 +3826,7 @@ void UnitTest::AddTestPartResult(TestPartResult::Type result_type, #endif // GTEST_OS_WINDOWS } else if (GTEST_FLAG(throw_on_failure)) { #if GTEST_HAS_EXCEPTIONS - throw GoogleTestFailureException(result); + throw internal::GoogleTestFailureException(result); #else // We cannot call abort() as it generates a pop-up in debug mode // that cannot be suppressed in VC 7.1 or below. @@ -3812,12 +3836,14 @@ void UnitTest::AddTestPartResult(TestPartResult::Type result_type, } } -// Creates and adds a property to the current TestResult. If a property matching -// the supplied value already exists, updates its value instead. -void UnitTest::RecordPropertyForCurrentTest(const char* key, - const char* value) { - const TestProperty test_property(key, value); - impl_->current_test_result()->RecordProperty(test_property); +// Adds a TestProperty to the current TestResult object when invoked from +// inside a test, to current TestCase's ad_hoc_test_result_ when invoked +// from SetUpTestCase or TearDownTestCase, or to the global property set +// when invoked elsewhere. If the result already contains a property with +// the same key, the value will be updated. +void UnitTest::RecordProperty(const std::string& key, + const std::string& value) { + impl_->RecordProperty(TestProperty(key, value)); } // Runs all tests in this UnitTest object and prints the result. @@ -3826,20 +3852,44 @@ void UnitTest::RecordPropertyForCurrentTest(const char* key, // We don't protect this under mutex_, as we only support calling it // from the main thread. int UnitTest::Run() { + const bool in_death_test_child_process = + internal::GTEST_FLAG(internal_run_death_test).length() > 0; + + // Google Test implements this protocol for catching that a test + // program exits before returning control to Google Test: + // + // 1. Upon start, Google Test creates a file whose absolute path + // is specified by the environment variable + // TEST_PREMATURE_EXIT_FILE. + // 2. When Google Test has finished its work, it deletes the file. + // + // This allows a test runner to set TEST_PREMATURE_EXIT_FILE before + // running a Google-Test-based test program and check the existence + // of the file at the end of the test execution to see if it has + // exited prematurely. + + // If we are in the child process of a death test, don't + // create/delete the premature exit file, as doing so is unnecessary + // and will confuse the parent process. Otherwise, create/delete + // the file upon entering/leaving this function. If the program + // somehow exits before this function has a chance to return, the + // premature-exit file will be left undeleted, causing a test runner + // that understands the premature-exit-file protocol to report the + // test as having failed. + const internal::ScopedPrematureExitFile premature_exit_file( + in_death_test_child_process ? + NULL : internal::posix::GetEnv("TEST_PREMATURE_EXIT_FILE")); + // Captures the value of GTEST_FLAG(catch_exceptions). This value will be // used for the duration of the program. impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions)); #if GTEST_HAS_SEH - const bool in_death_test_child_process = - internal::GTEST_FLAG(internal_run_death_test).length() > 0; - // Either the user wants Google Test to catch exceptions thrown by the // tests or this is executing in the context of death test child // process. In either case the user does not want to see pop-up dialogs // about crashes - they are expected. if (impl()->catch_exceptions() || in_death_test_child_process) { - # if !GTEST_OS_WINDOWS_MOBILE // SetErrorMode doesn't exist on CE. SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | @@ -3870,7 +3920,6 @@ int UnitTest::Run() { 0x0, // Clear the following flags: _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump. # endif - } #endif // GTEST_HAS_SEH @@ -3888,16 +3937,16 @@ const char* UnitTest::original_working_dir() const { // Returns the TestCase object for the test that's currently running, // or NULL if no test is running. -// L < mutex_ -const TestCase* UnitTest::current_test_case() const { +const TestCase* UnitTest::current_test_case() const + GTEST_LOCK_EXCLUDED_(mutex_) { internal::MutexLock lock(&mutex_); return impl_->current_test_case(); } // Returns the TestInfo object for the test that's currently running, // or NULL if no test is running. -// L < mutex_ -const TestInfo* UnitTest::current_test_info() const { +const TestInfo* UnitTest::current_test_info() const + GTEST_LOCK_EXCLUDED_(mutex_) { internal::MutexLock lock(&mutex_); return impl_->current_test_info(); } @@ -3908,9 +3957,9 @@ int UnitTest::random_seed() const { return impl_->random_seed(); } #if GTEST_HAS_PARAM_TEST // Returns ParameterizedTestCaseRegistry object used to keep track of // value-parameterized tests and instantiate and register them. -// L < mutex_ internal::ParameterizedTestCaseRegistry& - UnitTest::parameterized_test_registry() { + UnitTest::parameterized_test_registry() + GTEST_LOCK_EXCLUDED_(mutex_) { return impl_->parameterized_test_registry(); } #endif // GTEST_HAS_PARAM_TEST @@ -3927,15 +3976,15 @@ UnitTest::~UnitTest() { // Pushes a trace defined by SCOPED_TRACE() on to the per-thread // Google Test trace stack. -// L < mutex_ -void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) { +void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) + GTEST_LOCK_EXCLUDED_(mutex_) { internal::MutexLock lock(&mutex_); impl_->gtest_trace_stack().push_back(trace); } // Pops a trace from the per-thread Google Test trace stack. -// L < mutex_ -void UnitTest::PopGTestTrace() { +void UnitTest::PopGTestTrace() + GTEST_LOCK_EXCLUDED_(mutex_) { internal::MutexLock lock(&mutex_); impl_->gtest_trace_stack().pop_back(); } @@ -3971,9 +4020,9 @@ UnitTestImpl::UnitTestImpl(UnitTest* parent) post_flag_parse_init_performed_(false), random_seed_(0), // Will be overridden by the flag before first use. random_(0), // Will be reseeded before first use. + start_timestamp_(0), elapsed_time_(0), #if GTEST_HAS_DEATH_TEST - internal_run_death_test_flag_(NULL), death_test_factory_(new DefaultDeathTestFactory), #endif // Will be overridden by the flag before first use. @@ -3991,6 +4040,28 @@ UnitTestImpl::~UnitTestImpl() { delete os_stack_trace_getter_; } +// Adds a TestProperty to the current TestResult object when invoked in a +// context of a test, to current test case's ad_hoc_test_result when invoke +// from SetUpTestCase/TearDownTestCase, or to the global property set +// otherwise. If the result already contains a property with the same key, +// the value will be updated. +void UnitTestImpl::RecordProperty(const TestProperty& test_property) { + std::string xml_element; + TestResult* test_result; // TestResult appropriate for property recording. + + if (current_test_info_ != NULL) { + xml_element = "testcase"; + test_result = &(current_test_info_->result_); + } else if (current_test_case_ != NULL) { + xml_element = "testsuite"; + test_result = &(current_test_case_->ad_hoc_test_result_); + } else { + xml_element = "testsuites"; + test_result = &ad_hoc_test_result_; + } + test_result->RecordProperty(xml_element, test_property); +} + #if GTEST_HAS_DEATH_TEST // Disables event forwarding if the control is currently in a death test // subprocess. Must not be called before InitGoogleTest. @@ -4003,7 +4074,7 @@ void UnitTestImpl::SuppressTestEventsIfInSubprocess() { // Initializes event listeners performing XML output as specified by // UnitTestOptions. Must not be called before InitGoogleTest. void UnitTestImpl::ConfigureXmlOutput() { - const String& output_format = UnitTestOptions::GetOutputFormat(); + const std::string& output_format = UnitTestOptions::GetOutputFormat(); if (output_format == "xml") { listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter( UnitTestOptions::GetAbsolutePathToOutputFile().c_str())); @@ -4015,13 +4086,13 @@ void UnitTestImpl::ConfigureXmlOutput() { } #if GTEST_CAN_STREAM_RESULTS_ -// Initializes event listeners for streaming test results in String form. +// Initializes event listeners for streaming test results in string form. // Must not be called before InitGoogleTest. void UnitTestImpl::ConfigureStreamingOutput() { - const string& target = GTEST_FLAG(stream_result_to); + const std::string& target = GTEST_FLAG(stream_result_to); if (!target.empty()) { const size_t pos = target.find(':'); - if (pos != string::npos) { + if (pos != std::string::npos) { listeners()->Append(new StreamingListener(target.substr(0, pos), target.substr(pos+1))); } else { @@ -4075,7 +4146,7 @@ void UnitTestImpl::PostFlagParsingInit() { class TestCaseNameIs { public: // Constructor. - explicit TestCaseNameIs(const String& name) + explicit TestCaseNameIs(const std::string& name) : name_(name) {} // Returns true iff the name of test_case matches name_. @@ -4084,7 +4155,7 @@ class TestCaseNameIs { } private: - String name_; + std::string name_; }; // Finds and returns a TestCase with the given name. If one doesn't @@ -4116,7 +4187,7 @@ TestCase* UnitTestImpl::GetTestCase(const char* test_case_name, new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc); // Is this a death test case? - if (internal::UnitTestOptions::MatchesFilter(String(test_case_name), + if (internal::UnitTestOptions::MatchesFilter(test_case_name, kDeathTestCaseFilter)) { // Yes. Inserts the test case after the last death test case // defined so far. This only works when the test cases haven't @@ -4202,6 +4273,7 @@ bool UnitTestImpl::RunAllTests() { TestEventListener* repeater = listeners()->repeater(); + start_timestamp_ = GetTimeInMillis(); repeater->OnTestProgramStart(*parent_); // How many times to repeat the tests? We don't want to repeat them @@ -4394,14 +4466,12 @@ int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { int num_selected_tests = 0; for (size_t i = 0; i < test_cases_.size(); i++) { TestCase* const test_case = test_cases_[i]; - const String &test_case_name = test_case->name(); + const std::string &test_case_name = test_case->name(); test_case->set_should_run(false); - bool any_matched_filter = false; - for (size_t j = 0; j < test_case->test_info_list().size(); j++) { TestInfo* const test_info = test_case->test_info_list()[j]; - const String test_name(test_info->name()); + const std::string test_name(test_info->name()); // A test is disabled if test case name or test name matches // kDisableTestFilter. const bool is_disabled = @@ -4415,7 +4485,6 @@ int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { internal::UnitTestOptions::FilterMatchesTest(test_case_name, test_name); test_info->matches_filter_ = matches_filter; - any_matched_filter |= matches_filter; const bool is_runnable = (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) && @@ -4432,14 +4501,37 @@ int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { test_info->should_run_ = is_selected; test_case->set_should_run(test_case->should_run() || is_selected); } - - test_case->set_should_skip_report(!any_matched_filter); } return num_selected_tests; } +// Prints the given C-string on a single line by replacing all '\n' +// characters with string "\\n". If the output takes more than +// max_length characters, only prints the first max_length characters +// and "...". +static void PrintOnOneLine(const char* str, int max_length) { + if (str != NULL) { + for (int i = 0; *str != '\0'; ++str) { + if (i >= max_length) { + printf("..."); + break; + } + if (*str == '\n') { + printf("\\n"); + i += 2; + } else { + printf("%c", *str); + ++i; + } + } + } +} + // Prints the names of the tests matching the user-specified filter flag. void UnitTestImpl::ListTestsMatchingFilter() { + // Print at most this many characters for each type/value parameter. + const int kMaxParamLength = 250; + for (size_t i = 0; i < test_cases_.size(); i++) { const TestCase* const test_case = test_cases_[i]; bool printed_test_case_name = false; @@ -4450,9 +4542,23 @@ void UnitTestImpl::ListTestsMatchingFilter() { if (test_info->matches_filter_) { if (!printed_test_case_name) { printed_test_case_name = true; - printf("%s.\n", test_case->name()); + printf("%s.", test_case->name()); + if (test_case->type_param() != NULL) { + printf(" # %s = ", kTypeParamLabel); + // We print the type parameter on a single line to make + // the output easy to parse by a program. + PrintOnOneLine(test_case->type_param(), kMaxParamLength); + } + printf("\n"); + } + printf(" %s", test_info->name()); + if (test_info->value_param() != NULL) { + printf(" # %s = ", kValueParamLabel); + // We print the value parameter on a single line to make the + // output easy to parse by a program. + PrintOnOneLine(test_info->value_param(), kMaxParamLength); } - printf(" %s\n", test_info->name()); + printf("\n"); } } } @@ -4516,7 +4622,7 @@ void UnitTestImpl::UnshuffleTests() { } } -// Returns the current OS stack trace as a String. +// Returns the current OS stack trace as an std::string. // // The maximum number of stack frames to be included is specified by // the gtest_stack_trace_depth flag. The skip_count parameter @@ -4526,8 +4632,8 @@ void UnitTestImpl::UnshuffleTests() { // For example, if Foo() calls Bar(), which in turn calls // GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in // the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. -String GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, - int skip_count) { +std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, + int skip_count) { // We pass skip_count + 1 to skip this wrapper function in addition // to what the user really wants to skip. return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1); @@ -4575,7 +4681,7 @@ const char* ParseFlagValue(const char* str, if (str == NULL || flag == NULL) return NULL; // The flag must start with "--" followed by GTEST_FLAG_PREFIX_. - const String flag_str = String::Format("--%s%s", GTEST_FLAG_PREFIX_, flag); + const std::string flag_str = std::string("--") + GTEST_FLAG_PREFIX_ + flag; const size_t flag_len = flag_str.length(); if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL; @@ -4640,7 +4746,7 @@ bool ParseInt32Flag(const char* str, const char* flag, Int32* value) { // // On success, stores the value of the flag in *value, and returns // true. On failure, returns false without changing *value. -bool ParseStringFlag(const char* str, const char* flag, String* value) { +bool ParseStringFlag(const char* str, const char* flag, std::string* value) { // Gets the value of the flag as a string. const char* const value_str = ParseFlagValue(str, flag, false); @@ -4692,7 +4798,7 @@ static void PrintColorEncoded(const char* str) { return; } - ColoredPrintf(color, "%s", String(str, p - str).c_str()); + ColoredPrintf(color, "%s", std::string(str, p).c_str()); const char ch = p[1]; str = p + 2; @@ -4782,7 +4888,7 @@ static const char kColorEncodedHelpMessage[] = template void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { for (int i = 1; i < *argc; i++) { - const String arg_string = StreamableToString(argv[i]); + const std::string arg_string = StreamableToString(argv[i]); const char* const arg = arg_string.c_str(); using internal::ParseBoolFlag; diff --git a/external/gtest/src/gtest_main.cc b/external/gtest/src/gtest_main.cc new file mode 100755 index 0000000000..f302822552 --- /dev/null +++ b/external/gtest/src/gtest_main.cc @@ -0,0 +1,38 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +#include + +#include "gtest/gtest.h" + +GTEST_API_ int main(int argc, char **argv) { + printf("Running main() from gtest_main.cc\n"); + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest-death-test_ex_test.cc b/external/gtest/test/gtest-death-test_ex_test.cc new file mode 100755 index 0000000000..b50a13d5e2 --- /dev/null +++ b/external/gtest/test/gtest-death-test_ex_test.cc @@ -0,0 +1,93 @@ +// Copyright 2010, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) +// +// Tests that verify interaction of exceptions and death tests. + +#include "gtest/gtest-death-test.h" +#include "gtest/gtest.h" + +#if GTEST_HAS_DEATH_TEST + +# if GTEST_HAS_SEH +# include // For RaiseException(). +# endif + +# include "gtest/gtest-spi.h" + +# if GTEST_HAS_EXCEPTIONS + +# include // For std::exception. + +// Tests that death tests report thrown exceptions as failures and that the +// exceptions do not escape death test macros. +TEST(CxxExceptionDeathTest, ExceptionIsFailure) { + try { + EXPECT_NONFATAL_FAILURE(EXPECT_DEATH(throw 1, ""), "threw an exception"); + } catch (...) { // NOLINT + FAIL() << "An exception escaped a death test macro invocation " + << "with catch_exceptions " + << (testing::GTEST_FLAG(catch_exceptions) ? "enabled" : "disabled"); + } +} + +class TestException : public std::exception { + public: + virtual const char* what() const throw() { return "exceptional message"; } +}; + +TEST(CxxExceptionDeathTest, PrintsMessageForStdExceptions) { + // Verifies that the exception message is quoted in the failure text. + EXPECT_NONFATAL_FAILURE(EXPECT_DEATH(throw TestException(), ""), + "exceptional message"); + // Verifies that the location is mentioned in the failure text. + EXPECT_NONFATAL_FAILURE(EXPECT_DEATH(throw TestException(), ""), + "gtest-death-test_ex_test.cc"); +} +# endif // GTEST_HAS_EXCEPTIONS + +# if GTEST_HAS_SEH +// Tests that enabling interception of SEH exceptions with the +// catch_exceptions flag does not interfere with SEH exceptions being +// treated as death by death tests. +TEST(SehExceptionDeasTest, CatchExceptionsDoesNotInterfere) { + EXPECT_DEATH(RaiseException(42, 0x0, 0, NULL), "") + << "with catch_exceptions " + << (testing::GTEST_FLAG(catch_exceptions) ? "enabled" : "disabled"); +} +# endif + +#endif // GTEST_HAS_DEATH_TEST + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + testing::GTEST_FLAG(catch_exceptions) = GTEST_ENABLE_CATCH_EXCEPTIONS_ != 0; + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest-death-test_test.cc b/external/gtest/test/gtest-death-test_test.cc new file mode 100755 index 0000000000..c2d26df993 --- /dev/null +++ b/external/gtest/test/gtest-death-test_test.cc @@ -0,0 +1,1367 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Tests for death tests. + +#include "gtest/gtest-death-test.h" +#include "gtest/gtest.h" +#include "gtest/internal/gtest-filepath.h" + +using testing::internal::AlwaysFalse; +using testing::internal::AlwaysTrue; + +#if GTEST_HAS_DEATH_TEST + +# if GTEST_OS_WINDOWS +# include // For chdir(). +# else +# include +# include // For waitpid. +# endif // GTEST_OS_WINDOWS + +# include +# include +# include + +# if GTEST_OS_LINUX +# include +# endif // GTEST_OS_LINUX + +# include "gtest/gtest-spi.h" + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +# define GTEST_IMPLEMENTATION_ 1 +# include "src/gtest-internal-inl.h" +# undef GTEST_IMPLEMENTATION_ + +namespace posix = ::testing::internal::posix; + +using testing::Message; +using testing::internal::DeathTest; +using testing::internal::DeathTestFactory; +using testing::internal::FilePath; +using testing::internal::GetLastErrnoDescription; +using testing::internal::GetUnitTestImpl; +using testing::internal::InDeathTestChild; +using testing::internal::ParseNaturalNumber; + +namespace testing { +namespace internal { + +// A helper class whose objects replace the death test factory for a +// single UnitTest object during their lifetimes. +class ReplaceDeathTestFactory { + public: + explicit ReplaceDeathTestFactory(DeathTestFactory* new_factory) + : unit_test_impl_(GetUnitTestImpl()) { + old_factory_ = unit_test_impl_->death_test_factory_.release(); + unit_test_impl_->death_test_factory_.reset(new_factory); + } + + ~ReplaceDeathTestFactory() { + unit_test_impl_->death_test_factory_.release(); + unit_test_impl_->death_test_factory_.reset(old_factory_); + } + private: + // Prevents copying ReplaceDeathTestFactory objects. + ReplaceDeathTestFactory(const ReplaceDeathTestFactory&); + void operator=(const ReplaceDeathTestFactory&); + + UnitTestImpl* unit_test_impl_; + DeathTestFactory* old_factory_; +}; + +} // namespace internal +} // namespace testing + +void DieWithMessage(const ::std::string& message) { + fprintf(stderr, "%s", message.c_str()); + fflush(stderr); // Make sure the text is printed before the process exits. + + // We call _exit() instead of exit(), as the former is a direct + // system call and thus safer in the presence of threads. exit() + // will invoke user-defined exit-hooks, which may do dangerous + // things that conflict with death tests. + // + // Some compilers can recognize that _exit() never returns and issue the + // 'unreachable code' warning for code following this function, unless + // fooled by a fake condition. + if (AlwaysTrue()) + _exit(1); +} + +void DieInside(const ::std::string& function) { + DieWithMessage("death inside " + function + "()."); +} + +// Tests that death tests work. + +class TestForDeathTest : public testing::Test { + protected: + TestForDeathTest() : original_dir_(FilePath::GetCurrentDir()) {} + + virtual ~TestForDeathTest() { + posix::ChDir(original_dir_.c_str()); + } + + // A static member function that's expected to die. + static void StaticMemberFunction() { DieInside("StaticMemberFunction"); } + + // A method of the test fixture that may die. + void MemberFunction() { + if (should_die_) + DieInside("MemberFunction"); + } + + // True iff MemberFunction() should die. + bool should_die_; + const FilePath original_dir_; +}; + +// A class with a member function that may die. +class MayDie { + public: + explicit MayDie(bool should_die) : should_die_(should_die) {} + + // A member function that may die. + void MemberFunction() const { + if (should_die_) + DieInside("MayDie::MemberFunction"); + } + + private: + // True iff MemberFunction() should die. + bool should_die_; +}; + +// A global function that's expected to die. +void GlobalFunction() { DieInside("GlobalFunction"); } + +// A non-void function that's expected to die. +int NonVoidFunction() { + DieInside("NonVoidFunction"); + return 1; +} + +// A unary function that may die. +void DieIf(bool should_die) { + if (should_die) + DieInside("DieIf"); +} + +// A binary function that may die. +bool DieIfLessThan(int x, int y) { + if (x < y) { + DieInside("DieIfLessThan"); + } + return true; +} + +// Tests that ASSERT_DEATH can be used outside a TEST, TEST_F, or test fixture. +void DeathTestSubroutine() { + EXPECT_DEATH(GlobalFunction(), "death.*GlobalFunction"); + ASSERT_DEATH(GlobalFunction(), "death.*GlobalFunction"); +} + +// Death in dbg, not opt. +int DieInDebugElse12(int* sideeffect) { + if (sideeffect) *sideeffect = 12; + +# ifndef NDEBUG + + DieInside("DieInDebugElse12"); + +# endif // NDEBUG + + return 12; +} + +# if GTEST_OS_WINDOWS + +// Tests the ExitedWithCode predicate. +TEST(ExitStatusPredicateTest, ExitedWithCode) { + // On Windows, the process's exit code is the same as its exit status, + // so the predicate just compares the its input with its parameter. + EXPECT_TRUE(testing::ExitedWithCode(0)(0)); + EXPECT_TRUE(testing::ExitedWithCode(1)(1)); + EXPECT_TRUE(testing::ExitedWithCode(42)(42)); + EXPECT_FALSE(testing::ExitedWithCode(0)(1)); + EXPECT_FALSE(testing::ExitedWithCode(1)(0)); +} + +# else + +// Returns the exit status of a process that calls _exit(2) with a +// given exit code. This is a helper function for the +// ExitStatusPredicateTest test suite. +static int NormalExitStatus(int exit_code) { + pid_t child_pid = fork(); + if (child_pid == 0) { + _exit(exit_code); + } + int status; + waitpid(child_pid, &status, 0); + return status; +} + +// Returns the exit status of a process that raises a given signal. +// If the signal does not cause the process to die, then it returns +// instead the exit status of a process that exits normally with exit +// code 1. This is a helper function for the ExitStatusPredicateTest +// test suite. +static int KilledExitStatus(int signum) { + pid_t child_pid = fork(); + if (child_pid == 0) { + raise(signum); + _exit(1); + } + int status; + waitpid(child_pid, &status, 0); + return status; +} + +// Tests the ExitedWithCode predicate. +TEST(ExitStatusPredicateTest, ExitedWithCode) { + const int status0 = NormalExitStatus(0); + const int status1 = NormalExitStatus(1); + const int status42 = NormalExitStatus(42); + const testing::ExitedWithCode pred0(0); + const testing::ExitedWithCode pred1(1); + const testing::ExitedWithCode pred42(42); + EXPECT_PRED1(pred0, status0); + EXPECT_PRED1(pred1, status1); + EXPECT_PRED1(pred42, status42); + EXPECT_FALSE(pred0(status1)); + EXPECT_FALSE(pred42(status0)); + EXPECT_FALSE(pred1(status42)); +} + +// Tests the KilledBySignal predicate. +TEST(ExitStatusPredicateTest, KilledBySignal) { + const int status_segv = KilledExitStatus(SIGSEGV); + const int status_kill = KilledExitStatus(SIGKILL); + const testing::KilledBySignal pred_segv(SIGSEGV); + const testing::KilledBySignal pred_kill(SIGKILL); + EXPECT_PRED1(pred_segv, status_segv); + EXPECT_PRED1(pred_kill, status_kill); + EXPECT_FALSE(pred_segv(status_kill)); + EXPECT_FALSE(pred_kill(status_segv)); +} + +# endif // GTEST_OS_WINDOWS + +// Tests that the death test macros expand to code which may or may not +// be followed by operator<<, and that in either case the complete text +// comprises only a single C++ statement. +TEST_F(TestForDeathTest, SingleStatement) { + if (AlwaysFalse()) + // This would fail if executed; this is a compilation test only + ASSERT_DEATH(return, ""); + + if (AlwaysTrue()) + EXPECT_DEATH(_exit(1), ""); + else + // This empty "else" branch is meant to ensure that EXPECT_DEATH + // doesn't expand into an "if" statement without an "else" + ; + + if (AlwaysFalse()) + ASSERT_DEATH(return, "") << "did not die"; + + if (AlwaysFalse()) + ; + else + EXPECT_DEATH(_exit(1), "") << 1 << 2 << 3; +} + +void DieWithEmbeddedNul() { + fprintf(stderr, "Hello%cmy null world.\n", '\0'); + fflush(stderr); + _exit(1); +} + +# if GTEST_USES_PCRE +// Tests that EXPECT_DEATH and ASSERT_DEATH work when the error +// message has a NUL character in it. +TEST_F(TestForDeathTest, EmbeddedNulInMessage) { + // TODO(wan@google.com): doesn't support matching strings + // with embedded NUL characters - find a way to workaround it. + EXPECT_DEATH(DieWithEmbeddedNul(), "my null world"); + ASSERT_DEATH(DieWithEmbeddedNul(), "my null world"); +} +# endif // GTEST_USES_PCRE + +// Tests that death test macros expand to code which interacts well with switch +// statements. +TEST_F(TestForDeathTest, SwitchStatement) { +// Microsoft compiler usually complains about switch statements without +// case labels. We suppress that warning for this test. +# ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable: 4065) +# endif // _MSC_VER + + switch (0) + default: + ASSERT_DEATH(_exit(1), "") << "exit in default switch handler"; + + switch (0) + case 0: + EXPECT_DEATH(_exit(1), "") << "exit in switch case"; + +# ifdef _MSC_VER +# pragma warning(pop) +# endif // _MSC_VER +} + +// Tests that a static member function can be used in a "fast" style +// death test. +TEST_F(TestForDeathTest, StaticMemberFunctionFastStyle) { + testing::GTEST_FLAG(death_test_style) = "fast"; + ASSERT_DEATH(StaticMemberFunction(), "death.*StaticMember"); +} + +// Tests that a method of the test fixture can be used in a "fast" +// style death test. +TEST_F(TestForDeathTest, MemberFunctionFastStyle) { + testing::GTEST_FLAG(death_test_style) = "fast"; + should_die_ = true; + EXPECT_DEATH(MemberFunction(), "inside.*MemberFunction"); +} + +void ChangeToRootDir() { posix::ChDir(GTEST_PATH_SEP_); } + +// Tests that death tests work even if the current directory has been +// changed. +TEST_F(TestForDeathTest, FastDeathTestInChangedDir) { + testing::GTEST_FLAG(death_test_style) = "fast"; + + ChangeToRootDir(); + EXPECT_EXIT(_exit(1), testing::ExitedWithCode(1), ""); + + ChangeToRootDir(); + ASSERT_DEATH(_exit(1), ""); +} + +# if GTEST_OS_LINUX +void SigprofAction(int, siginfo_t*, void*) { /* no op */ } + +// Sets SIGPROF action and ITIMER_PROF timer (interval: 1ms). +void SetSigprofActionAndTimer() { + struct itimerval timer; + timer.it_interval.tv_sec = 0; + timer.it_interval.tv_usec = 1; + timer.it_value = timer.it_interval; + ASSERT_EQ(0, setitimer(ITIMER_PROF, &timer, NULL)); + struct sigaction signal_action; + memset(&signal_action, 0, sizeof(signal_action)); + sigemptyset(&signal_action.sa_mask); + signal_action.sa_sigaction = SigprofAction; + signal_action.sa_flags = SA_RESTART | SA_SIGINFO; + ASSERT_EQ(0, sigaction(SIGPROF, &signal_action, NULL)); +} + +// Disables ITIMER_PROF timer and ignores SIGPROF signal. +void DisableSigprofActionAndTimer(struct sigaction* old_signal_action) { + struct itimerval timer; + timer.it_interval.tv_sec = 0; + timer.it_interval.tv_usec = 0; + timer.it_value = timer.it_interval; + ASSERT_EQ(0, setitimer(ITIMER_PROF, &timer, NULL)); + struct sigaction signal_action; + memset(&signal_action, 0, sizeof(signal_action)); + sigemptyset(&signal_action.sa_mask); + signal_action.sa_handler = SIG_IGN; + ASSERT_EQ(0, sigaction(SIGPROF, &signal_action, old_signal_action)); +} + +// Tests that death tests work when SIGPROF handler and timer are set. +TEST_F(TestForDeathTest, FastSigprofActionSet) { + testing::GTEST_FLAG(death_test_style) = "fast"; + SetSigprofActionAndTimer(); + EXPECT_DEATH(_exit(1), ""); + struct sigaction old_signal_action; + DisableSigprofActionAndTimer(&old_signal_action); + EXPECT_TRUE(old_signal_action.sa_sigaction == SigprofAction); +} + +TEST_F(TestForDeathTest, ThreadSafeSigprofActionSet) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + SetSigprofActionAndTimer(); + EXPECT_DEATH(_exit(1), ""); + struct sigaction old_signal_action; + DisableSigprofActionAndTimer(&old_signal_action); + EXPECT_TRUE(old_signal_action.sa_sigaction == SigprofAction); +} +# endif // GTEST_OS_LINUX + +// Repeats a representative sample of death tests in the "threadsafe" style: + +TEST_F(TestForDeathTest, StaticMemberFunctionThreadsafeStyle) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + ASSERT_DEATH(StaticMemberFunction(), "death.*StaticMember"); +} + +TEST_F(TestForDeathTest, MemberFunctionThreadsafeStyle) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + should_die_ = true; + EXPECT_DEATH(MemberFunction(), "inside.*MemberFunction"); +} + +TEST_F(TestForDeathTest, ThreadsafeDeathTestInLoop) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + + for (int i = 0; i < 3; ++i) + EXPECT_EXIT(_exit(i), testing::ExitedWithCode(i), "") << ": i = " << i; +} + +TEST_F(TestForDeathTest, ThreadsafeDeathTestInChangedDir) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + + ChangeToRootDir(); + EXPECT_EXIT(_exit(1), testing::ExitedWithCode(1), ""); + + ChangeToRootDir(); + ASSERT_DEATH(_exit(1), ""); +} + +TEST_F(TestForDeathTest, MixedStyles) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + EXPECT_DEATH(_exit(1), ""); + testing::GTEST_FLAG(death_test_style) = "fast"; + EXPECT_DEATH(_exit(1), ""); +} + +# if GTEST_HAS_CLONE && GTEST_HAS_PTHREAD + +namespace { + +bool pthread_flag; + +void SetPthreadFlag() { + pthread_flag = true; +} + +} // namespace + +TEST_F(TestForDeathTest, DoesNotExecuteAtforkHooks) { + if (!testing::GTEST_FLAG(death_test_use_fork)) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + pthread_flag = false; + ASSERT_EQ(0, pthread_atfork(&SetPthreadFlag, NULL, NULL)); + ASSERT_DEATH(_exit(1), ""); + ASSERT_FALSE(pthread_flag); + } +} + +# endif // GTEST_HAS_CLONE && GTEST_HAS_PTHREAD + +// Tests that a method of another class can be used in a death test. +TEST_F(TestForDeathTest, MethodOfAnotherClass) { + const MayDie x(true); + ASSERT_DEATH(x.MemberFunction(), "MayDie\\:\\:MemberFunction"); +} + +// Tests that a global function can be used in a death test. +TEST_F(TestForDeathTest, GlobalFunction) { + EXPECT_DEATH(GlobalFunction(), "GlobalFunction"); +} + +// Tests that any value convertible to an RE works as a second +// argument to EXPECT_DEATH. +TEST_F(TestForDeathTest, AcceptsAnythingConvertibleToRE) { + static const char regex_c_str[] = "GlobalFunction"; + EXPECT_DEATH(GlobalFunction(), regex_c_str); + + const testing::internal::RE regex(regex_c_str); + EXPECT_DEATH(GlobalFunction(), regex); + +# if GTEST_HAS_GLOBAL_STRING + + const string regex_str(regex_c_str); + EXPECT_DEATH(GlobalFunction(), regex_str); + +# endif // GTEST_HAS_GLOBAL_STRING + + const ::std::string regex_std_str(regex_c_str); + EXPECT_DEATH(GlobalFunction(), regex_std_str); +} + +// Tests that a non-void function can be used in a death test. +TEST_F(TestForDeathTest, NonVoidFunction) { + ASSERT_DEATH(NonVoidFunction(), "NonVoidFunction"); +} + +// Tests that functions that take parameter(s) can be used in a death test. +TEST_F(TestForDeathTest, FunctionWithParameter) { + EXPECT_DEATH(DieIf(true), "DieIf\\(\\)"); + EXPECT_DEATH(DieIfLessThan(2, 3), "DieIfLessThan"); +} + +// Tests that ASSERT_DEATH can be used outside a TEST, TEST_F, or test fixture. +TEST_F(TestForDeathTest, OutsideFixture) { + DeathTestSubroutine(); +} + +// Tests that death tests can be done inside a loop. +TEST_F(TestForDeathTest, InsideLoop) { + for (int i = 0; i < 5; i++) { + EXPECT_DEATH(DieIfLessThan(-1, i), "DieIfLessThan") << "where i == " << i; + } +} + +// Tests that a compound statement can be used in a death test. +TEST_F(TestForDeathTest, CompoundStatement) { + EXPECT_DEATH({ // NOLINT + const int x = 2; + const int y = x + 1; + DieIfLessThan(x, y); + }, + "DieIfLessThan"); +} + +// Tests that code that doesn't die causes a death test to fail. +TEST_F(TestForDeathTest, DoesNotDie) { + EXPECT_NONFATAL_FAILURE(EXPECT_DEATH(DieIf(false), "DieIf"), + "failed to die"); +} + +// Tests that a death test fails when the error message isn't expected. +TEST_F(TestForDeathTest, ErrorMessageMismatch) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_DEATH(DieIf(true), "DieIfLessThan") << "End of death test message."; + }, "died but not with expected error"); +} + +// On exit, *aborted will be true iff the EXPECT_DEATH() statement +// aborted the function. +void ExpectDeathTestHelper(bool* aborted) { + *aborted = true; + EXPECT_DEATH(DieIf(false), "DieIf"); // This assertion should fail. + *aborted = false; +} + +// Tests that EXPECT_DEATH doesn't abort the test on failure. +TEST_F(TestForDeathTest, EXPECT_DEATH) { + bool aborted = true; + EXPECT_NONFATAL_FAILURE(ExpectDeathTestHelper(&aborted), + "failed to die"); + EXPECT_FALSE(aborted); +} + +// Tests that ASSERT_DEATH does abort the test on failure. +TEST_F(TestForDeathTest, ASSERT_DEATH) { + static bool aborted; + EXPECT_FATAL_FAILURE({ // NOLINT + aborted = true; + ASSERT_DEATH(DieIf(false), "DieIf"); // This assertion should fail. + aborted = false; + }, "failed to die"); + EXPECT_TRUE(aborted); +} + +// Tests that EXPECT_DEATH evaluates the arguments exactly once. +TEST_F(TestForDeathTest, SingleEvaluation) { + int x = 3; + EXPECT_DEATH(DieIf((++x) == 4), "DieIf"); + + const char* regex = "DieIf"; + const char* regex_save = regex; + EXPECT_DEATH(DieIfLessThan(3, 4), regex++); + EXPECT_EQ(regex_save + 1, regex); +} + +// Tests that run-away death tests are reported as failures. +TEST_F(TestForDeathTest, RunawayIsFailure) { + EXPECT_NONFATAL_FAILURE(EXPECT_DEATH(static_cast(0), "Foo"), + "failed to die."); +} + +// Tests that death tests report executing 'return' in the statement as +// failure. +TEST_F(TestForDeathTest, ReturnIsFailure) { + EXPECT_FATAL_FAILURE(ASSERT_DEATH(return, "Bar"), + "illegal return in test statement."); +} + +// Tests that EXPECT_DEBUG_DEATH works as expected, that is, you can stream a +// message to it, and in debug mode it: +// 1. Asserts on death. +// 2. Has no side effect. +// +// And in opt mode, it: +// 1. Has side effects but does not assert. +TEST_F(TestForDeathTest, TestExpectDebugDeath) { + int sideeffect = 0; + + EXPECT_DEBUG_DEATH(DieInDebugElse12(&sideeffect), "death.*DieInDebugElse12") + << "Must accept a streamed message"; + +# ifdef NDEBUG + + // Checks that the assignment occurs in opt mode (sideeffect). + EXPECT_EQ(12, sideeffect); + +# else + + // Checks that the assignment does not occur in dbg mode (no sideeffect). + EXPECT_EQ(0, sideeffect); + +# endif +} + +// Tests that ASSERT_DEBUG_DEATH works as expected, that is, you can stream a +// message to it, and in debug mode it: +// 1. Asserts on death. +// 2. Has no side effect. +// +// And in opt mode, it: +// 1. Has side effects but does not assert. +TEST_F(TestForDeathTest, TestAssertDebugDeath) { + int sideeffect = 0; + + ASSERT_DEBUG_DEATH(DieInDebugElse12(&sideeffect), "death.*DieInDebugElse12") + << "Must accept a streamed message"; + +# ifdef NDEBUG + + // Checks that the assignment occurs in opt mode (sideeffect). + EXPECT_EQ(12, sideeffect); + +# else + + // Checks that the assignment does not occur in dbg mode (no sideeffect). + EXPECT_EQ(0, sideeffect); + +# endif +} + +# ifndef NDEBUG + +void ExpectDebugDeathHelper(bool* aborted) { + *aborted = true; + EXPECT_DEBUG_DEATH(return, "") << "This is expected to fail."; + *aborted = false; +} + +# if GTEST_OS_WINDOWS +TEST(PopUpDeathTest, DoesNotShowPopUpOnAbort) { + printf("This test should be considered failing if it shows " + "any pop-up dialogs.\n"); + fflush(stdout); + + EXPECT_DEATH({ + testing::GTEST_FLAG(catch_exceptions) = false; + abort(); + }, ""); +} +# endif // GTEST_OS_WINDOWS + +// Tests that EXPECT_DEBUG_DEATH in debug mode does not abort +// the function. +TEST_F(TestForDeathTest, ExpectDebugDeathDoesNotAbort) { + bool aborted = true; + EXPECT_NONFATAL_FAILURE(ExpectDebugDeathHelper(&aborted), ""); + EXPECT_FALSE(aborted); +} + +void AssertDebugDeathHelper(bool* aborted) { + *aborted = true; + ASSERT_DEBUG_DEATH(return, "") << "This is expected to fail."; + *aborted = false; +} + +// Tests that ASSERT_DEBUG_DEATH in debug mode aborts the function on +// failure. +TEST_F(TestForDeathTest, AssertDebugDeathAborts) { + static bool aborted; + aborted = false; + EXPECT_FATAL_FAILURE(AssertDebugDeathHelper(&aborted), ""); + EXPECT_TRUE(aborted); +} + +# endif // _NDEBUG + +// Tests the *_EXIT family of macros, using a variety of predicates. +static void TestExitMacros() { + EXPECT_EXIT(_exit(1), testing::ExitedWithCode(1), ""); + ASSERT_EXIT(_exit(42), testing::ExitedWithCode(42), ""); + +# if GTEST_OS_WINDOWS + + // Of all signals effects on the process exit code, only those of SIGABRT + // are documented on Windows. + // See http://msdn.microsoft.com/en-us/library/dwwzkt4c(VS.71).aspx. + EXPECT_EXIT(raise(SIGABRT), testing::ExitedWithCode(3), "") << "b_ar"; + +# else + + EXPECT_EXIT(raise(SIGKILL), testing::KilledBySignal(SIGKILL), "") << "foo"; + ASSERT_EXIT(raise(SIGUSR2), testing::KilledBySignal(SIGUSR2), "") << "bar"; + + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_EXIT(_exit(0), testing::KilledBySignal(SIGSEGV), "") + << "This failure is expected, too."; + }, "This failure is expected, too."); + +# endif // GTEST_OS_WINDOWS + + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_EXIT(raise(SIGSEGV), testing::ExitedWithCode(0), "") + << "This failure is expected."; + }, "This failure is expected."); +} + +TEST_F(TestForDeathTest, ExitMacros) { + TestExitMacros(); +} + +TEST_F(TestForDeathTest, ExitMacrosUsingFork) { + testing::GTEST_FLAG(death_test_use_fork) = true; + TestExitMacros(); +} + +TEST_F(TestForDeathTest, InvalidStyle) { + testing::GTEST_FLAG(death_test_style) = "rococo"; + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_DEATH(_exit(0), "") << "This failure is expected."; + }, "This failure is expected."); +} + +TEST_F(TestForDeathTest, DeathTestFailedOutput) { + testing::GTEST_FLAG(death_test_style) = "fast"; + EXPECT_NONFATAL_FAILURE( + EXPECT_DEATH(DieWithMessage("death\n"), + "expected message"), + "Actual msg:\n" + "[ DEATH ] death\n"); +} + +TEST_F(TestForDeathTest, DeathTestUnexpectedReturnOutput) { + testing::GTEST_FLAG(death_test_style) = "fast"; + EXPECT_NONFATAL_FAILURE( + EXPECT_DEATH({ + fprintf(stderr, "returning\n"); + fflush(stderr); + return; + }, ""), + " Result: illegal return in test statement.\n" + " Error msg:\n" + "[ DEATH ] returning\n"); +} + +TEST_F(TestForDeathTest, DeathTestBadExitCodeOutput) { + testing::GTEST_FLAG(death_test_style) = "fast"; + EXPECT_NONFATAL_FAILURE( + EXPECT_EXIT(DieWithMessage("exiting with rc 1\n"), + testing::ExitedWithCode(3), + "expected message"), + " Result: died but not with expected exit code:\n" + " Exited with exit status 1\n" + "Actual msg:\n" + "[ DEATH ] exiting with rc 1\n"); +} + +TEST_F(TestForDeathTest, DeathTestMultiLineMatchFail) { + testing::GTEST_FLAG(death_test_style) = "fast"; + EXPECT_NONFATAL_FAILURE( + EXPECT_DEATH(DieWithMessage("line 1\nline 2\nline 3\n"), + "line 1\nxyz\nline 3\n"), + "Actual msg:\n" + "[ DEATH ] line 1\n" + "[ DEATH ] line 2\n" + "[ DEATH ] line 3\n"); +} + +TEST_F(TestForDeathTest, DeathTestMultiLineMatchPass) { + testing::GTEST_FLAG(death_test_style) = "fast"; + EXPECT_DEATH(DieWithMessage("line 1\nline 2\nline 3\n"), + "line 1\nline 2\nline 3\n"); +} + +// A DeathTestFactory that returns MockDeathTests. +class MockDeathTestFactory : public DeathTestFactory { + public: + MockDeathTestFactory(); + virtual bool Create(const char* statement, + const ::testing::internal::RE* regex, + const char* file, int line, DeathTest** test); + + // Sets the parameters for subsequent calls to Create. + void SetParameters(bool create, DeathTest::TestRole role, + int status, bool passed); + + // Accessors. + int AssumeRoleCalls() const { return assume_role_calls_; } + int WaitCalls() const { return wait_calls_; } + int PassedCalls() const { return passed_args_.size(); } + bool PassedArgument(int n) const { return passed_args_[n]; } + int AbortCalls() const { return abort_args_.size(); } + DeathTest::AbortReason AbortArgument(int n) const { + return abort_args_[n]; + } + bool TestDeleted() const { return test_deleted_; } + + private: + friend class MockDeathTest; + // If true, Create will return a MockDeathTest; otherwise it returns + // NULL. + bool create_; + // The value a MockDeathTest will return from its AssumeRole method. + DeathTest::TestRole role_; + // The value a MockDeathTest will return from its Wait method. + int status_; + // The value a MockDeathTest will return from its Passed method. + bool passed_; + + // Number of times AssumeRole was called. + int assume_role_calls_; + // Number of times Wait was called. + int wait_calls_; + // The arguments to the calls to Passed since the last call to + // SetParameters. + std::vector passed_args_; + // The arguments to the calls to Abort since the last call to + // SetParameters. + std::vector abort_args_; + // True if the last MockDeathTest returned by Create has been + // deleted. + bool test_deleted_; +}; + + +// A DeathTest implementation useful in testing. It returns values set +// at its creation from its various inherited DeathTest methods, and +// reports calls to those methods to its parent MockDeathTestFactory +// object. +class MockDeathTest : public DeathTest { + public: + MockDeathTest(MockDeathTestFactory *parent, + TestRole role, int status, bool passed) : + parent_(parent), role_(role), status_(status), passed_(passed) { + } + virtual ~MockDeathTest() { + parent_->test_deleted_ = true; + } + virtual TestRole AssumeRole() { + ++parent_->assume_role_calls_; + return role_; + } + virtual int Wait() { + ++parent_->wait_calls_; + return status_; + } + virtual bool Passed(bool exit_status_ok) { + parent_->passed_args_.push_back(exit_status_ok); + return passed_; + } + virtual void Abort(AbortReason reason) { + parent_->abort_args_.push_back(reason); + } + + private: + MockDeathTestFactory* const parent_; + const TestRole role_; + const int status_; + const bool passed_; +}; + + +// MockDeathTestFactory constructor. +MockDeathTestFactory::MockDeathTestFactory() + : create_(true), + role_(DeathTest::OVERSEE_TEST), + status_(0), + passed_(true), + assume_role_calls_(0), + wait_calls_(0), + passed_args_(), + abort_args_() { +} + + +// Sets the parameters for subsequent calls to Create. +void MockDeathTestFactory::SetParameters(bool create, + DeathTest::TestRole role, + int status, bool passed) { + create_ = create; + role_ = role; + status_ = status; + passed_ = passed; + + assume_role_calls_ = 0; + wait_calls_ = 0; + passed_args_.clear(); + abort_args_.clear(); +} + + +// Sets test to NULL (if create_ is false) or to the address of a new +// MockDeathTest object with parameters taken from the last call +// to SetParameters (if create_ is true). Always returns true. +bool MockDeathTestFactory::Create(const char* /*statement*/, + const ::testing::internal::RE* /*regex*/, + const char* /*file*/, + int /*line*/, + DeathTest** test) { + test_deleted_ = false; + if (create_) { + *test = new MockDeathTest(this, role_, status_, passed_); + } else { + *test = NULL; + } + return true; +} + +// A test fixture for testing the logic of the GTEST_DEATH_TEST_ macro. +// It installs a MockDeathTestFactory that is used for the duration +// of the test case. +class MacroLogicDeathTest : public testing::Test { + protected: + static testing::internal::ReplaceDeathTestFactory* replacer_; + static MockDeathTestFactory* factory_; + + static void SetUpTestCase() { + factory_ = new MockDeathTestFactory; + replacer_ = new testing::internal::ReplaceDeathTestFactory(factory_); + } + + static void TearDownTestCase() { + delete replacer_; + replacer_ = NULL; + delete factory_; + factory_ = NULL; + } + + // Runs a death test that breaks the rules by returning. Such a death + // test cannot be run directly from a test routine that uses a + // MockDeathTest, or the remainder of the routine will not be executed. + static void RunReturningDeathTest(bool* flag) { + ASSERT_DEATH({ // NOLINT + *flag = true; + return; + }, ""); + } +}; + +testing::internal::ReplaceDeathTestFactory* MacroLogicDeathTest::replacer_ + = NULL; +MockDeathTestFactory* MacroLogicDeathTest::factory_ = NULL; + + +// Test that nothing happens when the factory doesn't return a DeathTest: +TEST_F(MacroLogicDeathTest, NothingHappens) { + bool flag = false; + factory_->SetParameters(false, DeathTest::OVERSEE_TEST, 0, true); + EXPECT_DEATH(flag = true, ""); + EXPECT_FALSE(flag); + EXPECT_EQ(0, factory_->AssumeRoleCalls()); + EXPECT_EQ(0, factory_->WaitCalls()); + EXPECT_EQ(0, factory_->PassedCalls()); + EXPECT_EQ(0, factory_->AbortCalls()); + EXPECT_FALSE(factory_->TestDeleted()); +} + +// Test that the parent process doesn't run the death test code, +// and that the Passed method returns false when the (simulated) +// child process exits with status 0: +TEST_F(MacroLogicDeathTest, ChildExitsSuccessfully) { + bool flag = false; + factory_->SetParameters(true, DeathTest::OVERSEE_TEST, 0, true); + EXPECT_DEATH(flag = true, ""); + EXPECT_FALSE(flag); + EXPECT_EQ(1, factory_->AssumeRoleCalls()); + EXPECT_EQ(1, factory_->WaitCalls()); + ASSERT_EQ(1, factory_->PassedCalls()); + EXPECT_FALSE(factory_->PassedArgument(0)); + EXPECT_EQ(0, factory_->AbortCalls()); + EXPECT_TRUE(factory_->TestDeleted()); +} + +// Tests that the Passed method was given the argument "true" when +// the (simulated) child process exits with status 1: +TEST_F(MacroLogicDeathTest, ChildExitsUnsuccessfully) { + bool flag = false; + factory_->SetParameters(true, DeathTest::OVERSEE_TEST, 1, true); + EXPECT_DEATH(flag = true, ""); + EXPECT_FALSE(flag); + EXPECT_EQ(1, factory_->AssumeRoleCalls()); + EXPECT_EQ(1, factory_->WaitCalls()); + ASSERT_EQ(1, factory_->PassedCalls()); + EXPECT_TRUE(factory_->PassedArgument(0)); + EXPECT_EQ(0, factory_->AbortCalls()); + EXPECT_TRUE(factory_->TestDeleted()); +} + +// Tests that the (simulated) child process executes the death test +// code, and is aborted with the correct AbortReason if it +// executes a return statement. +TEST_F(MacroLogicDeathTest, ChildPerformsReturn) { + bool flag = false; + factory_->SetParameters(true, DeathTest::EXECUTE_TEST, 0, true); + RunReturningDeathTest(&flag); + EXPECT_TRUE(flag); + EXPECT_EQ(1, factory_->AssumeRoleCalls()); + EXPECT_EQ(0, factory_->WaitCalls()); + EXPECT_EQ(0, factory_->PassedCalls()); + EXPECT_EQ(1, factory_->AbortCalls()); + EXPECT_EQ(DeathTest::TEST_ENCOUNTERED_RETURN_STATEMENT, + factory_->AbortArgument(0)); + EXPECT_TRUE(factory_->TestDeleted()); +} + +// Tests that the (simulated) child process is aborted with the +// correct AbortReason if it does not die. +TEST_F(MacroLogicDeathTest, ChildDoesNotDie) { + bool flag = false; + factory_->SetParameters(true, DeathTest::EXECUTE_TEST, 0, true); + EXPECT_DEATH(flag = true, ""); + EXPECT_TRUE(flag); + EXPECT_EQ(1, factory_->AssumeRoleCalls()); + EXPECT_EQ(0, factory_->WaitCalls()); + EXPECT_EQ(0, factory_->PassedCalls()); + // This time there are two calls to Abort: one since the test didn't + // die, and another from the ReturnSentinel when it's destroyed. The + // sentinel normally isn't destroyed if a test doesn't die, since + // _exit(2) is called in that case by ForkingDeathTest, but not by + // our MockDeathTest. + ASSERT_EQ(2, factory_->AbortCalls()); + EXPECT_EQ(DeathTest::TEST_DID_NOT_DIE, + factory_->AbortArgument(0)); + EXPECT_EQ(DeathTest::TEST_ENCOUNTERED_RETURN_STATEMENT, + factory_->AbortArgument(1)); + EXPECT_TRUE(factory_->TestDeleted()); +} + +// Tests that a successful death test does not register a successful +// test part. +TEST(SuccessRegistrationDeathTest, NoSuccessPart) { + EXPECT_DEATH(_exit(1), ""); + EXPECT_EQ(0, GetUnitTestImpl()->current_test_result()->total_part_count()); +} + +TEST(StreamingAssertionsDeathTest, DeathTest) { + EXPECT_DEATH(_exit(1), "") << "unexpected failure"; + ASSERT_DEATH(_exit(1), "") << "unexpected failure"; + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_DEATH(_exit(0), "") << "expected failure"; + }, "expected failure"); + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_DEATH(_exit(0), "") << "expected failure"; + }, "expected failure"); +} + +// Tests that GetLastErrnoDescription returns an empty string when the +// last error is 0 and non-empty string when it is non-zero. +TEST(GetLastErrnoDescription, GetLastErrnoDescriptionWorks) { + errno = ENOENT; + EXPECT_STRNE("", GetLastErrnoDescription().c_str()); + errno = 0; + EXPECT_STREQ("", GetLastErrnoDescription().c_str()); +} + +# if GTEST_OS_WINDOWS +TEST(AutoHandleTest, AutoHandleWorks) { + HANDLE handle = ::CreateEvent(NULL, FALSE, FALSE, NULL); + ASSERT_NE(INVALID_HANDLE_VALUE, handle); + + // Tests that the AutoHandle is correctly initialized with a handle. + testing::internal::AutoHandle auto_handle(handle); + EXPECT_EQ(handle, auto_handle.Get()); + + // Tests that Reset assigns INVALID_HANDLE_VALUE. + // Note that this cannot verify whether the original handle is closed. + auto_handle.Reset(); + EXPECT_EQ(INVALID_HANDLE_VALUE, auto_handle.Get()); + + // Tests that Reset assigns the new handle. + // Note that this cannot verify whether the original handle is closed. + handle = ::CreateEvent(NULL, FALSE, FALSE, NULL); + ASSERT_NE(INVALID_HANDLE_VALUE, handle); + auto_handle.Reset(handle); + EXPECT_EQ(handle, auto_handle.Get()); + + // Tests that AutoHandle contains INVALID_HANDLE_VALUE by default. + testing::internal::AutoHandle auto_handle2; + EXPECT_EQ(INVALID_HANDLE_VALUE, auto_handle2.Get()); +} +# endif // GTEST_OS_WINDOWS + +# if GTEST_OS_WINDOWS +typedef unsigned __int64 BiggestParsable; +typedef signed __int64 BiggestSignedParsable; +# else +typedef unsigned long long BiggestParsable; +typedef signed long long BiggestSignedParsable; +# endif // GTEST_OS_WINDOWS + +// We cannot use std::numeric_limits::max() as it clashes with the +// max() macro defined by . +const BiggestParsable kBiggestParsableMax = ULLONG_MAX; +const BiggestSignedParsable kBiggestSignedParsableMax = LLONG_MAX; + +TEST(ParseNaturalNumberTest, RejectsInvalidFormat) { + BiggestParsable result = 0; + + // Rejects non-numbers. + EXPECT_FALSE(ParseNaturalNumber("non-number string", &result)); + + // Rejects numbers with whitespace prefix. + EXPECT_FALSE(ParseNaturalNumber(" 123", &result)); + + // Rejects negative numbers. + EXPECT_FALSE(ParseNaturalNumber("-123", &result)); + + // Rejects numbers starting with a plus sign. + EXPECT_FALSE(ParseNaturalNumber("+123", &result)); + errno = 0; +} + +TEST(ParseNaturalNumberTest, RejectsOverflownNumbers) { + BiggestParsable result = 0; + + EXPECT_FALSE(ParseNaturalNumber("99999999999999999999999", &result)); + + signed char char_result = 0; + EXPECT_FALSE(ParseNaturalNumber("200", &char_result)); + errno = 0; +} + +TEST(ParseNaturalNumberTest, AcceptsValidNumbers) { + BiggestParsable result = 0; + + result = 0; + ASSERT_TRUE(ParseNaturalNumber("123", &result)); + EXPECT_EQ(123U, result); + + // Check 0 as an edge case. + result = 1; + ASSERT_TRUE(ParseNaturalNumber("0", &result)); + EXPECT_EQ(0U, result); + + result = 1; + ASSERT_TRUE(ParseNaturalNumber("00000", &result)); + EXPECT_EQ(0U, result); +} + +TEST(ParseNaturalNumberTest, AcceptsTypeLimits) { + Message msg; + msg << kBiggestParsableMax; + + BiggestParsable result = 0; + EXPECT_TRUE(ParseNaturalNumber(msg.GetString(), &result)); + EXPECT_EQ(kBiggestParsableMax, result); + + Message msg2; + msg2 << kBiggestSignedParsableMax; + + BiggestSignedParsable signed_result = 0; + EXPECT_TRUE(ParseNaturalNumber(msg2.GetString(), &signed_result)); + EXPECT_EQ(kBiggestSignedParsableMax, signed_result); + + Message msg3; + msg3 << INT_MAX; + + int int_result = 0; + EXPECT_TRUE(ParseNaturalNumber(msg3.GetString(), &int_result)); + EXPECT_EQ(INT_MAX, int_result); + + Message msg4; + msg4 << UINT_MAX; + + unsigned int uint_result = 0; + EXPECT_TRUE(ParseNaturalNumber(msg4.GetString(), &uint_result)); + EXPECT_EQ(UINT_MAX, uint_result); +} + +TEST(ParseNaturalNumberTest, WorksForShorterIntegers) { + short short_result = 0; + ASSERT_TRUE(ParseNaturalNumber("123", &short_result)); + EXPECT_EQ(123, short_result); + + signed char char_result = 0; + ASSERT_TRUE(ParseNaturalNumber("123", &char_result)); + EXPECT_EQ(123, char_result); +} + +# if GTEST_OS_WINDOWS +TEST(EnvironmentTest, HandleFitsIntoSizeT) { + // TODO(vladl@google.com): Remove this test after this condition is verified + // in a static assertion in gtest-death-test.cc in the function + // GetStatusFileDescriptor. + ASSERT_TRUE(sizeof(HANDLE) <= sizeof(size_t)); +} +# endif // GTEST_OS_WINDOWS + +// Tests that EXPECT_DEATH_IF_SUPPORTED/ASSERT_DEATH_IF_SUPPORTED trigger +// failures when death tests are available on the system. +TEST(ConditionalDeathMacrosDeathTest, ExpectsDeathWhenDeathTestsAvailable) { + EXPECT_DEATH_IF_SUPPORTED(DieInside("CondDeathTestExpectMacro"), + "death inside CondDeathTestExpectMacro"); + ASSERT_DEATH_IF_SUPPORTED(DieInside("CondDeathTestAssertMacro"), + "death inside CondDeathTestAssertMacro"); + + // Empty statement will not crash, which must trigger a failure. + EXPECT_NONFATAL_FAILURE(EXPECT_DEATH_IF_SUPPORTED(;, ""), ""); + EXPECT_FATAL_FAILURE(ASSERT_DEATH_IF_SUPPORTED(;, ""), ""); +} + +#else + +using testing::internal::CaptureStderr; +using testing::internal::GetCapturedStderr; + +// Tests that EXPECT_DEATH_IF_SUPPORTED/ASSERT_DEATH_IF_SUPPORTED are still +// defined but do not trigger failures when death tests are not available on +// the system. +TEST(ConditionalDeathMacrosTest, WarnsWhenDeathTestsNotAvailable) { + // Empty statement will not crash, but that should not trigger a failure + // when death tests are not supported. + CaptureStderr(); + EXPECT_DEATH_IF_SUPPORTED(;, ""); + std::string output = GetCapturedStderr(); + ASSERT_TRUE(NULL != strstr(output.c_str(), + "Death tests are not supported on this platform")); + ASSERT_TRUE(NULL != strstr(output.c_str(), ";")); + + // The streamed message should not be printed as there is no test failure. + CaptureStderr(); + EXPECT_DEATH_IF_SUPPORTED(;, "") << "streamed message"; + output = GetCapturedStderr(); + ASSERT_TRUE(NULL == strstr(output.c_str(), "streamed message")); + + CaptureStderr(); + ASSERT_DEATH_IF_SUPPORTED(;, ""); // NOLINT + output = GetCapturedStderr(); + ASSERT_TRUE(NULL != strstr(output.c_str(), + "Death tests are not supported on this platform")); + ASSERT_TRUE(NULL != strstr(output.c_str(), ";")); + + CaptureStderr(); + ASSERT_DEATH_IF_SUPPORTED(;, "") << "streamed message"; // NOLINT + output = GetCapturedStderr(); + ASSERT_TRUE(NULL == strstr(output.c_str(), "streamed message")); +} + +void FuncWithAssert(int* n) { + ASSERT_DEATH_IF_SUPPORTED(return;, ""); + (*n)++; +} + +// Tests that ASSERT_DEATH_IF_SUPPORTED does not return from the current +// function (as ASSERT_DEATH does) if death tests are not supported. +TEST(ConditionalDeathMacrosTest, AssertDeatDoesNotReturnhIfUnsupported) { + int n = 0; + FuncWithAssert(&n); + EXPECT_EQ(1, n); +} + +TEST(InDeathTestChildDeathTest, ReportsDeathTestCorrectlyInFastStyle) { + testing::GTEST_FLAG(death_test_style) = "fast"; + EXPECT_FALSE(InDeathTestChild()); + EXPECT_DEATH({ + fprintf(stderr, InDeathTestChild() ? "Inside" : "Outside"); + fflush(stderr); + _exit(1); + }, "Inside"); +} + +TEST(InDeathTestChildDeathTest, ReportsDeathTestCorrectlyInThreadSafeStyle) { + testing::GTEST_FLAG(death_test_style) = "threadsafe"; + EXPECT_FALSE(InDeathTestChild()); + EXPECT_DEATH({ + fprintf(stderr, InDeathTestChild() ? "Inside" : "Outside"); + fflush(stderr); + _exit(1); + }, "Inside"); +} + +#endif // GTEST_HAS_DEATH_TEST + +// Tests that the death test macros expand to code which may or may not +// be followed by operator<<, and that in either case the complete text +// comprises only a single C++ statement. +// +// The syntax should work whether death tests are available or not. +TEST(ConditionalDeathMacrosSyntaxDeathTest, SingleStatement) { + if (AlwaysFalse()) + // This would fail if executed; this is a compilation test only + ASSERT_DEATH_IF_SUPPORTED(return, ""); + + if (AlwaysTrue()) + EXPECT_DEATH_IF_SUPPORTED(_exit(1), ""); + else + // This empty "else" branch is meant to ensure that EXPECT_DEATH + // doesn't expand into an "if" statement without an "else" + ; // NOLINT + + if (AlwaysFalse()) + ASSERT_DEATH_IF_SUPPORTED(return, "") << "did not die"; + + if (AlwaysFalse()) + ; // NOLINT + else + EXPECT_DEATH_IF_SUPPORTED(_exit(1), "") << 1 << 2 << 3; +} + +// Tests that conditional death test macros expand to code which interacts +// well with switch statements. +TEST(ConditionalDeathMacrosSyntaxDeathTest, SwitchStatement) { +// Microsoft compiler usually complains about switch statements without +// case labels. We suppress that warning for this test. +#ifdef _MSC_VER +# pragma warning(push) +# pragma warning(disable: 4065) +#endif // _MSC_VER + + switch (0) + default: + ASSERT_DEATH_IF_SUPPORTED(_exit(1), "") + << "exit in default switch handler"; + + switch (0) + case 0: + EXPECT_DEATH_IF_SUPPORTED(_exit(1), "") << "exit in switch case"; + +#ifdef _MSC_VER +# pragma warning(pop) +#endif // _MSC_VER +} + +// Tests that a test case whose name ends with "DeathTest" works fine +// on Windows. +TEST(NotADeathTest, Test) { + SUCCEED(); +} diff --git a/external/gtest/test/gtest-filepath_test.cc b/external/gtest/test/gtest-filepath_test.cc new file mode 100755 index 0000000000..ae9f55a0c9 --- /dev/null +++ b/external/gtest/test/gtest-filepath_test.cc @@ -0,0 +1,680 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: keith.ray@gmail.com (Keith Ray) +// +// Google Test filepath utilities +// +// This file tests classes and functions used internally by +// Google Test. They are subject to change without notice. +// +// This file is #included from gtest_unittest.cc, to avoid changing +// build or make-files for some existing Google Test clients. Do not +// #include this file anywhere else! + +#include "gtest/internal/gtest-filepath.h" +#include "gtest/gtest.h" + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#include "src/gtest-internal-inl.h" +#undef GTEST_IMPLEMENTATION_ + +#if GTEST_OS_WINDOWS_MOBILE +# include // NOLINT +#elif GTEST_OS_WINDOWS +# include // NOLINT +#endif // GTEST_OS_WINDOWS_MOBILE + +namespace testing { +namespace internal { +namespace { + +#if GTEST_OS_WINDOWS_MOBILE +// TODO(wan@google.com): Move these to the POSIX adapter section in +// gtest-port.h. + +// Windows CE doesn't have the remove C function. +int remove(const char* path) { + LPCWSTR wpath = String::AnsiToUtf16(path); + int ret = DeleteFile(wpath) ? 0 : -1; + delete [] wpath; + return ret; +} +// Windows CE doesn't have the _rmdir C function. +int _rmdir(const char* path) { + FilePath filepath(path); + LPCWSTR wpath = String::AnsiToUtf16( + filepath.RemoveTrailingPathSeparator().c_str()); + int ret = RemoveDirectory(wpath) ? 0 : -1; + delete [] wpath; + return ret; +} + +#else + +TEST(GetCurrentDirTest, ReturnsCurrentDir) { + const FilePath original_dir = FilePath::GetCurrentDir(); + EXPECT_FALSE(original_dir.IsEmpty()); + + posix::ChDir(GTEST_PATH_SEP_); + const FilePath cwd = FilePath::GetCurrentDir(); + posix::ChDir(original_dir.c_str()); + +# if GTEST_OS_WINDOWS + + // Skips the ":". + const char* const cwd_without_drive = strchr(cwd.c_str(), ':'); + ASSERT_TRUE(cwd_without_drive != NULL); + EXPECT_STREQ(GTEST_PATH_SEP_, cwd_without_drive + 1); + +# else + + EXPECT_EQ(GTEST_PATH_SEP_, cwd.string()); + +# endif +} + +#endif // GTEST_OS_WINDOWS_MOBILE + +TEST(IsEmptyTest, ReturnsTrueForEmptyPath) { + EXPECT_TRUE(FilePath("").IsEmpty()); +} + +TEST(IsEmptyTest, ReturnsFalseForNonEmptyPath) { + EXPECT_FALSE(FilePath("a").IsEmpty()); + EXPECT_FALSE(FilePath(".").IsEmpty()); + EXPECT_FALSE(FilePath("a/b").IsEmpty()); + EXPECT_FALSE(FilePath("a\\b\\").IsEmpty()); +} + +// RemoveDirectoryName "" -> "" +TEST(RemoveDirectoryNameTest, WhenEmptyName) { + EXPECT_EQ("", FilePath("").RemoveDirectoryName().string()); +} + +// RemoveDirectoryName "afile" -> "afile" +TEST(RemoveDirectoryNameTest, ButNoDirectory) { + EXPECT_EQ("afile", + FilePath("afile").RemoveDirectoryName().string()); +} + +// RemoveDirectoryName "/afile" -> "afile" +TEST(RemoveDirectoryNameTest, RootFileShouldGiveFileName) { + EXPECT_EQ("afile", + FilePath(GTEST_PATH_SEP_ "afile").RemoveDirectoryName().string()); +} + +// RemoveDirectoryName "adir/" -> "" +TEST(RemoveDirectoryNameTest, WhereThereIsNoFileName) { + EXPECT_EQ("", + FilePath("adir" GTEST_PATH_SEP_).RemoveDirectoryName().string()); +} + +// RemoveDirectoryName "adir/afile" -> "afile" +TEST(RemoveDirectoryNameTest, ShouldGiveFileName) { + EXPECT_EQ("afile", + FilePath("adir" GTEST_PATH_SEP_ "afile").RemoveDirectoryName().string()); +} + +// RemoveDirectoryName "adir/subdir/afile" -> "afile" +TEST(RemoveDirectoryNameTest, ShouldAlsoGiveFileName) { + EXPECT_EQ("afile", + FilePath("adir" GTEST_PATH_SEP_ "subdir" GTEST_PATH_SEP_ "afile") + .RemoveDirectoryName().string()); +} + +#if GTEST_HAS_ALT_PATH_SEP_ + +// Tests that RemoveDirectoryName() works with the alternate separator +// on Windows. + +// RemoveDirectoryName("/afile") -> "afile" +TEST(RemoveDirectoryNameTest, RootFileShouldGiveFileNameForAlternateSeparator) { + EXPECT_EQ("afile", FilePath("/afile").RemoveDirectoryName().string()); +} + +// RemoveDirectoryName("adir/") -> "" +TEST(RemoveDirectoryNameTest, WhereThereIsNoFileNameForAlternateSeparator) { + EXPECT_EQ("", FilePath("adir/").RemoveDirectoryName().string()); +} + +// RemoveDirectoryName("adir/afile") -> "afile" +TEST(RemoveDirectoryNameTest, ShouldGiveFileNameForAlternateSeparator) { + EXPECT_EQ("afile", FilePath("adir/afile").RemoveDirectoryName().string()); +} + +// RemoveDirectoryName("adir/subdir/afile") -> "afile" +TEST(RemoveDirectoryNameTest, ShouldAlsoGiveFileNameForAlternateSeparator) { + EXPECT_EQ("afile", + FilePath("adir/subdir/afile").RemoveDirectoryName().string()); +} + +#endif + +// RemoveFileName "" -> "./" +TEST(RemoveFileNameTest, EmptyName) { +#if GTEST_OS_WINDOWS_MOBILE + // On Windows CE, we use the root as the current directory. + EXPECT_EQ(GTEST_PATH_SEP_, FilePath("").RemoveFileName().string()); +#else + EXPECT_EQ("." GTEST_PATH_SEP_, FilePath("").RemoveFileName().string()); +#endif +} + +// RemoveFileName "adir/" -> "adir/" +TEST(RemoveFileNameTest, ButNoFile) { + EXPECT_EQ("adir" GTEST_PATH_SEP_, + FilePath("adir" GTEST_PATH_SEP_).RemoveFileName().string()); +} + +// RemoveFileName "adir/afile" -> "adir/" +TEST(RemoveFileNameTest, GivesDirName) { + EXPECT_EQ("adir" GTEST_PATH_SEP_, + FilePath("adir" GTEST_PATH_SEP_ "afile").RemoveFileName().string()); +} + +// RemoveFileName "adir/subdir/afile" -> "adir/subdir/" +TEST(RemoveFileNameTest, GivesDirAndSubDirName) { + EXPECT_EQ("adir" GTEST_PATH_SEP_ "subdir" GTEST_PATH_SEP_, + FilePath("adir" GTEST_PATH_SEP_ "subdir" GTEST_PATH_SEP_ "afile") + .RemoveFileName().string()); +} + +// RemoveFileName "/afile" -> "/" +TEST(RemoveFileNameTest, GivesRootDir) { + EXPECT_EQ(GTEST_PATH_SEP_, + FilePath(GTEST_PATH_SEP_ "afile").RemoveFileName().string()); +} + +#if GTEST_HAS_ALT_PATH_SEP_ + +// Tests that RemoveFileName() works with the alternate separator on +// Windows. + +// RemoveFileName("adir/") -> "adir/" +TEST(RemoveFileNameTest, ButNoFileForAlternateSeparator) { + EXPECT_EQ("adir" GTEST_PATH_SEP_, + FilePath("adir/").RemoveFileName().string()); +} + +// RemoveFileName("adir/afile") -> "adir/" +TEST(RemoveFileNameTest, GivesDirNameForAlternateSeparator) { + EXPECT_EQ("adir" GTEST_PATH_SEP_, + FilePath("adir/afile").RemoveFileName().string()); +} + +// RemoveFileName("adir/subdir/afile") -> "adir/subdir/" +TEST(RemoveFileNameTest, GivesDirAndSubDirNameForAlternateSeparator) { + EXPECT_EQ("adir" GTEST_PATH_SEP_ "subdir" GTEST_PATH_SEP_, + FilePath("adir/subdir/afile").RemoveFileName().string()); +} + +// RemoveFileName("/afile") -> "\" +TEST(RemoveFileNameTest, GivesRootDirForAlternateSeparator) { + EXPECT_EQ(GTEST_PATH_SEP_, FilePath("/afile").RemoveFileName().string()); +} + +#endif + +TEST(MakeFileNameTest, GenerateWhenNumberIsZero) { + FilePath actual = FilePath::MakeFileName(FilePath("foo"), FilePath("bar"), + 0, "xml"); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar.xml", actual.string()); +} + +TEST(MakeFileNameTest, GenerateFileNameNumberGtZero) { + FilePath actual = FilePath::MakeFileName(FilePath("foo"), FilePath("bar"), + 12, "xml"); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar_12.xml", actual.string()); +} + +TEST(MakeFileNameTest, GenerateFileNameWithSlashNumberIsZero) { + FilePath actual = FilePath::MakeFileName(FilePath("foo" GTEST_PATH_SEP_), + FilePath("bar"), 0, "xml"); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar.xml", actual.string()); +} + +TEST(MakeFileNameTest, GenerateFileNameWithSlashNumberGtZero) { + FilePath actual = FilePath::MakeFileName(FilePath("foo" GTEST_PATH_SEP_), + FilePath("bar"), 12, "xml"); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar_12.xml", actual.string()); +} + +TEST(MakeFileNameTest, GenerateWhenNumberIsZeroAndDirIsEmpty) { + FilePath actual = FilePath::MakeFileName(FilePath(""), FilePath("bar"), + 0, "xml"); + EXPECT_EQ("bar.xml", actual.string()); +} + +TEST(MakeFileNameTest, GenerateWhenNumberIsNotZeroAndDirIsEmpty) { + FilePath actual = FilePath::MakeFileName(FilePath(""), FilePath("bar"), + 14, "xml"); + EXPECT_EQ("bar_14.xml", actual.string()); +} + +TEST(ConcatPathsTest, WorksWhenDirDoesNotEndWithPathSep) { + FilePath actual = FilePath::ConcatPaths(FilePath("foo"), + FilePath("bar.xml")); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar.xml", actual.string()); +} + +TEST(ConcatPathsTest, WorksWhenPath1EndsWithPathSep) { + FilePath actual = FilePath::ConcatPaths(FilePath("foo" GTEST_PATH_SEP_), + FilePath("bar.xml")); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar.xml", actual.string()); +} + +TEST(ConcatPathsTest, Path1BeingEmpty) { + FilePath actual = FilePath::ConcatPaths(FilePath(""), + FilePath("bar.xml")); + EXPECT_EQ("bar.xml", actual.string()); +} + +TEST(ConcatPathsTest, Path2BeingEmpty) { + FilePath actual = FilePath::ConcatPaths(FilePath("foo"), FilePath("")); + EXPECT_EQ("foo" GTEST_PATH_SEP_, actual.string()); +} + +TEST(ConcatPathsTest, BothPathBeingEmpty) { + FilePath actual = FilePath::ConcatPaths(FilePath(""), + FilePath("")); + EXPECT_EQ("", actual.string()); +} + +TEST(ConcatPathsTest, Path1ContainsPathSep) { + FilePath actual = FilePath::ConcatPaths(FilePath("foo" GTEST_PATH_SEP_ "bar"), + FilePath("foobar.xml")); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar" GTEST_PATH_SEP_ "foobar.xml", + actual.string()); +} + +TEST(ConcatPathsTest, Path2ContainsPathSep) { + FilePath actual = FilePath::ConcatPaths( + FilePath("foo" GTEST_PATH_SEP_), + FilePath("bar" GTEST_PATH_SEP_ "bar.xml")); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar" GTEST_PATH_SEP_ "bar.xml", + actual.string()); +} + +TEST(ConcatPathsTest, Path2EndsWithPathSep) { + FilePath actual = FilePath::ConcatPaths(FilePath("foo"), + FilePath("bar" GTEST_PATH_SEP_)); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar" GTEST_PATH_SEP_, actual.string()); +} + +// RemoveTrailingPathSeparator "" -> "" +TEST(RemoveTrailingPathSeparatorTest, EmptyString) { + EXPECT_EQ("", FilePath("").RemoveTrailingPathSeparator().string()); +} + +// RemoveTrailingPathSeparator "foo" -> "foo" +TEST(RemoveTrailingPathSeparatorTest, FileNoSlashString) { + EXPECT_EQ("foo", FilePath("foo").RemoveTrailingPathSeparator().string()); +} + +// RemoveTrailingPathSeparator "foo/" -> "foo" +TEST(RemoveTrailingPathSeparatorTest, ShouldRemoveTrailingSeparator) { + EXPECT_EQ("foo", + FilePath("foo" GTEST_PATH_SEP_).RemoveTrailingPathSeparator().string()); +#if GTEST_HAS_ALT_PATH_SEP_ + EXPECT_EQ("foo", FilePath("foo/").RemoveTrailingPathSeparator().string()); +#endif +} + +// RemoveTrailingPathSeparator "foo/bar/" -> "foo/bar/" +TEST(RemoveTrailingPathSeparatorTest, ShouldRemoveLastSeparator) { + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", + FilePath("foo" GTEST_PATH_SEP_ "bar" GTEST_PATH_SEP_) + .RemoveTrailingPathSeparator().string()); +} + +// RemoveTrailingPathSeparator "foo/bar" -> "foo/bar" +TEST(RemoveTrailingPathSeparatorTest, ShouldReturnUnmodified) { + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", + FilePath("foo" GTEST_PATH_SEP_ "bar") + .RemoveTrailingPathSeparator().string()); +} + +TEST(DirectoryTest, RootDirectoryExists) { +#if GTEST_OS_WINDOWS // We are on Windows. + char current_drive[_MAX_PATH]; // NOLINT + current_drive[0] = static_cast(_getdrive() + 'A' - 1); + current_drive[1] = ':'; + current_drive[2] = '\\'; + current_drive[3] = '\0'; + EXPECT_TRUE(FilePath(current_drive).DirectoryExists()); +#else + EXPECT_TRUE(FilePath("/").DirectoryExists()); +#endif // GTEST_OS_WINDOWS +} + +#if GTEST_OS_WINDOWS +TEST(DirectoryTest, RootOfWrongDriveDoesNotExists) { + const int saved_drive_ = _getdrive(); + // Find a drive that doesn't exist. Start with 'Z' to avoid common ones. + for (char drive = 'Z'; drive >= 'A'; drive--) + if (_chdrive(drive - 'A' + 1) == -1) { + char non_drive[_MAX_PATH]; // NOLINT + non_drive[0] = drive; + non_drive[1] = ':'; + non_drive[2] = '\\'; + non_drive[3] = '\0'; + EXPECT_FALSE(FilePath(non_drive).DirectoryExists()); + break; + } + _chdrive(saved_drive_); +} +#endif // GTEST_OS_WINDOWS + +#if !GTEST_OS_WINDOWS_MOBILE +// Windows CE _does_ consider an empty directory to exist. +TEST(DirectoryTest, EmptyPathDirectoryDoesNotExist) { + EXPECT_FALSE(FilePath("").DirectoryExists()); +} +#endif // !GTEST_OS_WINDOWS_MOBILE + +TEST(DirectoryTest, CurrentDirectoryExists) { +#if GTEST_OS_WINDOWS // We are on Windows. +# ifndef _WIN32_CE // Windows CE doesn't have a current directory. + + EXPECT_TRUE(FilePath(".").DirectoryExists()); + EXPECT_TRUE(FilePath(".\\").DirectoryExists()); + +# endif // _WIN32_CE +#else + EXPECT_TRUE(FilePath(".").DirectoryExists()); + EXPECT_TRUE(FilePath("./").DirectoryExists()); +#endif // GTEST_OS_WINDOWS +} + +// "foo/bar" == foo//bar" == "foo///bar" +TEST(NormalizeTest, MultipleConsecutiveSepaparatorsInMidstring) { + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", + FilePath("foo" GTEST_PATH_SEP_ "bar").string()); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", + FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_ "bar").string()); + EXPECT_EQ("foo" GTEST_PATH_SEP_ "bar", + FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_ + GTEST_PATH_SEP_ "bar").string()); +} + +// "/bar" == //bar" == "///bar" +TEST(NormalizeTest, MultipleConsecutiveSepaparatorsAtStringStart) { + EXPECT_EQ(GTEST_PATH_SEP_ "bar", + FilePath(GTEST_PATH_SEP_ "bar").string()); + EXPECT_EQ(GTEST_PATH_SEP_ "bar", + FilePath(GTEST_PATH_SEP_ GTEST_PATH_SEP_ "bar").string()); + EXPECT_EQ(GTEST_PATH_SEP_ "bar", + FilePath(GTEST_PATH_SEP_ GTEST_PATH_SEP_ GTEST_PATH_SEP_ "bar").string()); +} + +// "foo/" == foo//" == "foo///" +TEST(NormalizeTest, MultipleConsecutiveSepaparatorsAtStringEnd) { + EXPECT_EQ("foo" GTEST_PATH_SEP_, + FilePath("foo" GTEST_PATH_SEP_).string()); + EXPECT_EQ("foo" GTEST_PATH_SEP_, + FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_).string()); + EXPECT_EQ("foo" GTEST_PATH_SEP_, + FilePath("foo" GTEST_PATH_SEP_ GTEST_PATH_SEP_ GTEST_PATH_SEP_).string()); +} + +#if GTEST_HAS_ALT_PATH_SEP_ + +// Tests that separators at the end of the string are normalized +// regardless of their combination (e.g. "foo\" =="foo/\" == +// "foo\\/"). +TEST(NormalizeTest, MixAlternateSeparatorAtStringEnd) { + EXPECT_EQ("foo" GTEST_PATH_SEP_, + FilePath("foo/").string()); + EXPECT_EQ("foo" GTEST_PATH_SEP_, + FilePath("foo" GTEST_PATH_SEP_ "/").string()); + EXPECT_EQ("foo" GTEST_PATH_SEP_, + FilePath("foo//" GTEST_PATH_SEP_).string()); +} + +#endif + +TEST(AssignmentOperatorTest, DefaultAssignedToNonDefault) { + FilePath default_path; + FilePath non_default_path("path"); + non_default_path = default_path; + EXPECT_EQ("", non_default_path.string()); + EXPECT_EQ("", default_path.string()); // RHS var is unchanged. +} + +TEST(AssignmentOperatorTest, NonDefaultAssignedToDefault) { + FilePath non_default_path("path"); + FilePath default_path; + default_path = non_default_path; + EXPECT_EQ("path", default_path.string()); + EXPECT_EQ("path", non_default_path.string()); // RHS var is unchanged. +} + +TEST(AssignmentOperatorTest, ConstAssignedToNonConst) { + const FilePath const_default_path("const_path"); + FilePath non_default_path("path"); + non_default_path = const_default_path; + EXPECT_EQ("const_path", non_default_path.string()); +} + +class DirectoryCreationTest : public Test { + protected: + virtual void SetUp() { + testdata_path_.Set(FilePath( + TempDir() + GetCurrentExecutableName().string() + + "_directory_creation" GTEST_PATH_SEP_ "test" GTEST_PATH_SEP_)); + testdata_file_.Set(testdata_path_.RemoveTrailingPathSeparator()); + + unique_file0_.Set(FilePath::MakeFileName(testdata_path_, FilePath("unique"), + 0, "txt")); + unique_file1_.Set(FilePath::MakeFileName(testdata_path_, FilePath("unique"), + 1, "txt")); + + remove(testdata_file_.c_str()); + remove(unique_file0_.c_str()); + remove(unique_file1_.c_str()); + posix::RmDir(testdata_path_.c_str()); + } + + virtual void TearDown() { + remove(testdata_file_.c_str()); + remove(unique_file0_.c_str()); + remove(unique_file1_.c_str()); + posix::RmDir(testdata_path_.c_str()); + } + + std::string TempDir() const { +#if GTEST_OS_WINDOWS_MOBILE + return "\\temp\\"; +#elif GTEST_OS_WINDOWS + const char* temp_dir = posix::GetEnv("TEMP"); + if (temp_dir == NULL || temp_dir[0] == '\0') + return "\\temp\\"; + else if (temp_dir[strlen(temp_dir) - 1] == '\\') + return temp_dir; + else + return std::string(temp_dir) + "\\"; +#elif GTEST_OS_LINUX_ANDROID + return "/sdcard/"; +#else + return "/tmp/"; +#endif // GTEST_OS_WINDOWS_MOBILE + } + + void CreateTextFile(const char* filename) { + FILE* f = posix::FOpen(filename, "w"); + fprintf(f, "text\n"); + fclose(f); + } + + // Strings representing a directory and a file, with identical paths + // except for the trailing separator character that distinquishes + // a directory named 'test' from a file named 'test'. Example names: + FilePath testdata_path_; // "/tmp/directory_creation/test/" + FilePath testdata_file_; // "/tmp/directory_creation/test" + FilePath unique_file0_; // "/tmp/directory_creation/test/unique.txt" + FilePath unique_file1_; // "/tmp/directory_creation/test/unique_1.txt" +}; + +TEST_F(DirectoryCreationTest, CreateDirectoriesRecursively) { + EXPECT_FALSE(testdata_path_.DirectoryExists()) << testdata_path_.string(); + EXPECT_TRUE(testdata_path_.CreateDirectoriesRecursively()); + EXPECT_TRUE(testdata_path_.DirectoryExists()); +} + +TEST_F(DirectoryCreationTest, CreateDirectoriesForAlreadyExistingPath) { + EXPECT_FALSE(testdata_path_.DirectoryExists()) << testdata_path_.string(); + EXPECT_TRUE(testdata_path_.CreateDirectoriesRecursively()); + // Call 'create' again... should still succeed. + EXPECT_TRUE(testdata_path_.CreateDirectoriesRecursively()); +} + +TEST_F(DirectoryCreationTest, CreateDirectoriesAndUniqueFilename) { + FilePath file_path(FilePath::GenerateUniqueFileName(testdata_path_, + FilePath("unique"), "txt")); + EXPECT_EQ(unique_file0_.string(), file_path.string()); + EXPECT_FALSE(file_path.FileOrDirectoryExists()); // file not there + + testdata_path_.CreateDirectoriesRecursively(); + EXPECT_FALSE(file_path.FileOrDirectoryExists()); // file still not there + CreateTextFile(file_path.c_str()); + EXPECT_TRUE(file_path.FileOrDirectoryExists()); + + FilePath file_path2(FilePath::GenerateUniqueFileName(testdata_path_, + FilePath("unique"), "txt")); + EXPECT_EQ(unique_file1_.string(), file_path2.string()); + EXPECT_FALSE(file_path2.FileOrDirectoryExists()); // file not there + CreateTextFile(file_path2.c_str()); + EXPECT_TRUE(file_path2.FileOrDirectoryExists()); +} + +TEST_F(DirectoryCreationTest, CreateDirectoriesFail) { + // force a failure by putting a file where we will try to create a directory. + CreateTextFile(testdata_file_.c_str()); + EXPECT_TRUE(testdata_file_.FileOrDirectoryExists()); + EXPECT_FALSE(testdata_file_.DirectoryExists()); + EXPECT_FALSE(testdata_file_.CreateDirectoriesRecursively()); +} + +TEST(NoDirectoryCreationTest, CreateNoDirectoriesForDefaultXmlFile) { + const FilePath test_detail_xml("test_detail.xml"); + EXPECT_FALSE(test_detail_xml.CreateDirectoriesRecursively()); +} + +TEST(FilePathTest, DefaultConstructor) { + FilePath fp; + EXPECT_EQ("", fp.string()); +} + +TEST(FilePathTest, CharAndCopyConstructors) { + const FilePath fp("spicy"); + EXPECT_EQ("spicy", fp.string()); + + const FilePath fp_copy(fp); + EXPECT_EQ("spicy", fp_copy.string()); +} + +TEST(FilePathTest, StringConstructor) { + const FilePath fp(std::string("cider")); + EXPECT_EQ("cider", fp.string()); +} + +TEST(FilePathTest, Set) { + const FilePath apple("apple"); + FilePath mac("mac"); + mac.Set(apple); // Implement Set() since overloading operator= is forbidden. + EXPECT_EQ("apple", mac.string()); + EXPECT_EQ("apple", apple.string()); +} + +TEST(FilePathTest, ToString) { + const FilePath file("drink"); + EXPECT_EQ("drink", file.string()); +} + +TEST(FilePathTest, RemoveExtension) { + EXPECT_EQ("app", FilePath("app.cc").RemoveExtension("cc").string()); + EXPECT_EQ("app", FilePath("app.exe").RemoveExtension("exe").string()); + EXPECT_EQ("APP", FilePath("APP.EXE").RemoveExtension("exe").string()); +} + +TEST(FilePathTest, RemoveExtensionWhenThereIsNoExtension) { + EXPECT_EQ("app", FilePath("app").RemoveExtension("exe").string()); +} + +TEST(FilePathTest, IsDirectory) { + EXPECT_FALSE(FilePath("cola").IsDirectory()); + EXPECT_TRUE(FilePath("koala" GTEST_PATH_SEP_).IsDirectory()); +#if GTEST_HAS_ALT_PATH_SEP_ + EXPECT_TRUE(FilePath("koala/").IsDirectory()); +#endif +} + +TEST(FilePathTest, IsAbsolutePath) { + EXPECT_FALSE(FilePath("is" GTEST_PATH_SEP_ "relative").IsAbsolutePath()); + EXPECT_FALSE(FilePath("").IsAbsolutePath()); +#if GTEST_OS_WINDOWS + EXPECT_TRUE(FilePath("c:\\" GTEST_PATH_SEP_ "is_not" + GTEST_PATH_SEP_ "relative").IsAbsolutePath()); + EXPECT_FALSE(FilePath("c:foo" GTEST_PATH_SEP_ "bar").IsAbsolutePath()); + EXPECT_TRUE(FilePath("c:/" GTEST_PATH_SEP_ "is_not" + GTEST_PATH_SEP_ "relative").IsAbsolutePath()); +#else + EXPECT_TRUE(FilePath(GTEST_PATH_SEP_ "is_not" GTEST_PATH_SEP_ "relative") + .IsAbsolutePath()); +#endif // GTEST_OS_WINDOWS +} + +TEST(FilePathTest, IsRootDirectory) { +#if GTEST_OS_WINDOWS + EXPECT_TRUE(FilePath("a:\\").IsRootDirectory()); + EXPECT_TRUE(FilePath("Z:/").IsRootDirectory()); + EXPECT_TRUE(FilePath("e://").IsRootDirectory()); + EXPECT_FALSE(FilePath("").IsRootDirectory()); + EXPECT_FALSE(FilePath("b:").IsRootDirectory()); + EXPECT_FALSE(FilePath("b:a").IsRootDirectory()); + EXPECT_FALSE(FilePath("8:/").IsRootDirectory()); + EXPECT_FALSE(FilePath("c|/").IsRootDirectory()); +#else + EXPECT_TRUE(FilePath("/").IsRootDirectory()); + EXPECT_TRUE(FilePath("//").IsRootDirectory()); + EXPECT_FALSE(FilePath("").IsRootDirectory()); + EXPECT_FALSE(FilePath("\\").IsRootDirectory()); + EXPECT_FALSE(FilePath("/x").IsRootDirectory()); +#endif +} + +} // namespace +} // namespace internal +} // namespace testing diff --git a/external/gtest/test/gtest-linked_ptr_test.cc b/external/gtest/test/gtest-linked_ptr_test.cc new file mode 100755 index 0000000000..6fcf5124a8 --- /dev/null +++ b/external/gtest/test/gtest-linked_ptr_test.cc @@ -0,0 +1,154 @@ +// Copyright 2003, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: Dan Egnor (egnor@google.com) +// Ported to Windows: Vadim Berman (vadimb@google.com) + +#include "gtest/internal/gtest-linked_ptr.h" + +#include +#include "gtest/gtest.h" + +namespace { + +using testing::Message; +using testing::internal::linked_ptr; + +int num; +Message* history = NULL; + +// Class which tracks allocation/deallocation +class A { + public: + A(): mynum(num++) { *history << "A" << mynum << " ctor\n"; } + virtual ~A() { *history << "A" << mynum << " dtor\n"; } + virtual void Use() { *history << "A" << mynum << " use\n"; } + protected: + int mynum; +}; + +// Subclass +class B : public A { + public: + B() { *history << "B" << mynum << " ctor\n"; } + ~B() { *history << "B" << mynum << " dtor\n"; } + virtual void Use() { *history << "B" << mynum << " use\n"; } +}; + +class LinkedPtrTest : public testing::Test { + public: + LinkedPtrTest() { + num = 0; + history = new Message; + } + + virtual ~LinkedPtrTest() { + delete history; + history = NULL; + } +}; + +TEST_F(LinkedPtrTest, GeneralTest) { + { + linked_ptr a0, a1, a2; + // Use explicit function call notation here to suppress self-assign warning. + a0.operator=(a0); + a1 = a2; + ASSERT_EQ(a0.get(), static_cast(NULL)); + ASSERT_EQ(a1.get(), static_cast(NULL)); + ASSERT_EQ(a2.get(), static_cast(NULL)); + ASSERT_TRUE(a0 == NULL); + ASSERT_TRUE(a1 == NULL); + ASSERT_TRUE(a2 == NULL); + + { + linked_ptr a3(new A); + a0 = a3; + ASSERT_TRUE(a0 == a3); + ASSERT_TRUE(a0 != NULL); + ASSERT_TRUE(a0.get() == a3); + ASSERT_TRUE(a0 == a3.get()); + linked_ptr a4(a0); + a1 = a4; + linked_ptr a5(new A); + ASSERT_TRUE(a5.get() != a3); + ASSERT_TRUE(a5 != a3.get()); + a2 = a5; + linked_ptr b0(new B); + linked_ptr a6(b0); + ASSERT_TRUE(b0 == a6); + ASSERT_TRUE(a6 == b0); + ASSERT_TRUE(b0 != NULL); + a5 = b0; + a5 = b0; + a3->Use(); + a4->Use(); + a5->Use(); + a6->Use(); + b0->Use(); + (*b0).Use(); + b0.get()->Use(); + } + + a0->Use(); + a1->Use(); + a2->Use(); + + a1 = a2; + a2.reset(new A); + a0.reset(); + + linked_ptr a7; + } + + ASSERT_STREQ( + "A0 ctor\n" + "A1 ctor\n" + "A2 ctor\n" + "B2 ctor\n" + "A0 use\n" + "A0 use\n" + "B2 use\n" + "B2 use\n" + "B2 use\n" + "B2 use\n" + "B2 use\n" + "B2 dtor\n" + "A2 dtor\n" + "A0 use\n" + "A0 use\n" + "A1 use\n" + "A3 ctor\n" + "A0 dtor\n" + "A3 dtor\n" + "A1 dtor\n", + history->GetString().c_str()); +} + +} // Unnamed namespace diff --git a/external/gtest/test/gtest-listener_test.cc b/external/gtest/test/gtest-listener_test.cc new file mode 100755 index 0000000000..99662cff33 --- /dev/null +++ b/external/gtest/test/gtest-listener_test.cc @@ -0,0 +1,310 @@ +// Copyright 2009 Google Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) +// +// The Google C++ Testing Framework (Google Test) +// +// This file verifies Google Test event listeners receive events at the +// right times. + +#include "gtest/gtest.h" +#include + +using ::testing::AddGlobalTestEnvironment; +using ::testing::Environment; +using ::testing::InitGoogleTest; +using ::testing::Test; +using ::testing::TestCase; +using ::testing::TestEventListener; +using ::testing::TestInfo; +using ::testing::TestPartResult; +using ::testing::UnitTest; + +// Used by tests to register their events. +std::vector* g_events = NULL; + +namespace testing { +namespace internal { + +class EventRecordingListener : public TestEventListener { + public: + explicit EventRecordingListener(const char* name) : name_(name) {} + + protected: + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) { + g_events->push_back(GetFullMethodName("OnTestProgramStart")); + } + + virtual void OnTestIterationStart(const UnitTest& /*unit_test*/, + int iteration) { + Message message; + message << GetFullMethodName("OnTestIterationStart") + << "(" << iteration << ")"; + g_events->push_back(message.GetString()); + } + + virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) { + g_events->push_back(GetFullMethodName("OnEnvironmentsSetUpStart")); + } + + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) { + g_events->push_back(GetFullMethodName("OnEnvironmentsSetUpEnd")); + } + + virtual void OnTestCaseStart(const TestCase& /*test_case*/) { + g_events->push_back(GetFullMethodName("OnTestCaseStart")); + } + + virtual void OnTestStart(const TestInfo& /*test_info*/) { + g_events->push_back(GetFullMethodName("OnTestStart")); + } + + virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) { + g_events->push_back(GetFullMethodName("OnTestPartResult")); + } + + virtual void OnTestEnd(const TestInfo& /*test_info*/) { + g_events->push_back(GetFullMethodName("OnTestEnd")); + } + + virtual void OnTestCaseEnd(const TestCase& /*test_case*/) { + g_events->push_back(GetFullMethodName("OnTestCaseEnd")); + } + + virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) { + g_events->push_back(GetFullMethodName("OnEnvironmentsTearDownStart")); + } + + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) { + g_events->push_back(GetFullMethodName("OnEnvironmentsTearDownEnd")); + } + + virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/, + int iteration) { + Message message; + message << GetFullMethodName("OnTestIterationEnd") + << "(" << iteration << ")"; + g_events->push_back(message.GetString()); + } + + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) { + g_events->push_back(GetFullMethodName("OnTestProgramEnd")); + } + + private: + std::string GetFullMethodName(const char* name) { + return name_ + "." + name; + } + + std::string name_; +}; + +class EnvironmentInvocationCatcher : public Environment { + protected: + virtual void SetUp() { + g_events->push_back("Environment::SetUp"); + } + + virtual void TearDown() { + g_events->push_back("Environment::TearDown"); + } +}; + +class ListenerTest : public Test { + protected: + static void SetUpTestCase() { + g_events->push_back("ListenerTest::SetUpTestCase"); + } + + static void TearDownTestCase() { + g_events->push_back("ListenerTest::TearDownTestCase"); + } + + virtual void SetUp() { + g_events->push_back("ListenerTest::SetUp"); + } + + virtual void TearDown() { + g_events->push_back("ListenerTest::TearDown"); + } +}; + +TEST_F(ListenerTest, DoesFoo) { + // Test execution order within a test case is not guaranteed so we are not + // recording the test name. + g_events->push_back("ListenerTest::* Test Body"); + SUCCEED(); // Triggers OnTestPartResult. +} + +TEST_F(ListenerTest, DoesBar) { + g_events->push_back("ListenerTest::* Test Body"); + SUCCEED(); // Triggers OnTestPartResult. +} + +} // namespace internal + +} // namespace testing + +using ::testing::internal::EnvironmentInvocationCatcher; +using ::testing::internal::EventRecordingListener; + +void VerifyResults(const std::vector& data, + const char* const* expected_data, + int expected_data_size) { + const int actual_size = data.size(); + // If the following assertion fails, a new entry will be appended to + // data. Hence we save data.size() first. + EXPECT_EQ(expected_data_size, actual_size); + + // Compares the common prefix. + const int shorter_size = expected_data_size <= actual_size ? + expected_data_size : actual_size; + int i = 0; + for (; i < shorter_size; ++i) { + ASSERT_STREQ(expected_data[i], data[i].c_str()) + << "at position " << i; + } + + // Prints extra elements in the actual data. + for (; i < actual_size; ++i) { + printf(" Actual event #%d: %s\n", i, data[i].c_str()); + } +} + +int main(int argc, char **argv) { + std::vector events; + g_events = &events; + InitGoogleTest(&argc, argv); + + UnitTest::GetInstance()->listeners().Append( + new EventRecordingListener("1st")); + UnitTest::GetInstance()->listeners().Append( + new EventRecordingListener("2nd")); + + AddGlobalTestEnvironment(new EnvironmentInvocationCatcher); + + GTEST_CHECK_(events.size() == 0) + << "AddGlobalTestEnvironment should not generate any events itself."; + + ::testing::GTEST_FLAG(repeat) = 2; + int ret_val = RUN_ALL_TESTS(); + + const char* const expected_events[] = { + "1st.OnTestProgramStart", + "2nd.OnTestProgramStart", + "1st.OnTestIterationStart(0)", + "2nd.OnTestIterationStart(0)", + "1st.OnEnvironmentsSetUpStart", + "2nd.OnEnvironmentsSetUpStart", + "Environment::SetUp", + "2nd.OnEnvironmentsSetUpEnd", + "1st.OnEnvironmentsSetUpEnd", + "1st.OnTestCaseStart", + "2nd.OnTestCaseStart", + "ListenerTest::SetUpTestCase", + "1st.OnTestStart", + "2nd.OnTestStart", + "ListenerTest::SetUp", + "ListenerTest::* Test Body", + "1st.OnTestPartResult", + "2nd.OnTestPartResult", + "ListenerTest::TearDown", + "2nd.OnTestEnd", + "1st.OnTestEnd", + "1st.OnTestStart", + "2nd.OnTestStart", + "ListenerTest::SetUp", + "ListenerTest::* Test Body", + "1st.OnTestPartResult", + "2nd.OnTestPartResult", + "ListenerTest::TearDown", + "2nd.OnTestEnd", + "1st.OnTestEnd", + "ListenerTest::TearDownTestCase", + "2nd.OnTestCaseEnd", + "1st.OnTestCaseEnd", + "1st.OnEnvironmentsTearDownStart", + "2nd.OnEnvironmentsTearDownStart", + "Environment::TearDown", + "2nd.OnEnvironmentsTearDownEnd", + "1st.OnEnvironmentsTearDownEnd", + "2nd.OnTestIterationEnd(0)", + "1st.OnTestIterationEnd(0)", + "1st.OnTestIterationStart(1)", + "2nd.OnTestIterationStart(1)", + "1st.OnEnvironmentsSetUpStart", + "2nd.OnEnvironmentsSetUpStart", + "Environment::SetUp", + "2nd.OnEnvironmentsSetUpEnd", + "1st.OnEnvironmentsSetUpEnd", + "1st.OnTestCaseStart", + "2nd.OnTestCaseStart", + "ListenerTest::SetUpTestCase", + "1st.OnTestStart", + "2nd.OnTestStart", + "ListenerTest::SetUp", + "ListenerTest::* Test Body", + "1st.OnTestPartResult", + "2nd.OnTestPartResult", + "ListenerTest::TearDown", + "2nd.OnTestEnd", + "1st.OnTestEnd", + "1st.OnTestStart", + "2nd.OnTestStart", + "ListenerTest::SetUp", + "ListenerTest::* Test Body", + "1st.OnTestPartResult", + "2nd.OnTestPartResult", + "ListenerTest::TearDown", + "2nd.OnTestEnd", + "1st.OnTestEnd", + "ListenerTest::TearDownTestCase", + "2nd.OnTestCaseEnd", + "1st.OnTestCaseEnd", + "1st.OnEnvironmentsTearDownStart", + "2nd.OnEnvironmentsTearDownStart", + "Environment::TearDown", + "2nd.OnEnvironmentsTearDownEnd", + "1st.OnEnvironmentsTearDownEnd", + "2nd.OnTestIterationEnd(1)", + "1st.OnTestIterationEnd(1)", + "2nd.OnTestProgramEnd", + "1st.OnTestProgramEnd" + }; + VerifyResults(events, + expected_events, + sizeof(expected_events)/sizeof(expected_events[0])); + + // We need to check manually for ad hoc test failures that happen after + // RUN_ALL_TESTS finishes. + if (UnitTest::GetInstance()->Failed()) + ret_val = 1; + + return ret_val; +} diff --git a/external/gtest/test/gtest-message_test.cc b/external/gtest/test/gtest-message_test.cc new file mode 100755 index 0000000000..175238ef4e --- /dev/null +++ b/external/gtest/test/gtest-message_test.cc @@ -0,0 +1,159 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Tests for the Message class. + +#include "gtest/gtest-message.h" + +#include "gtest/gtest.h" + +namespace { + +using ::testing::Message; + +// Tests the testing::Message class + +// Tests the default constructor. +TEST(MessageTest, DefaultConstructor) { + const Message msg; + EXPECT_EQ("", msg.GetString()); +} + +// Tests the copy constructor. +TEST(MessageTest, CopyConstructor) { + const Message msg1("Hello"); + const Message msg2(msg1); + EXPECT_EQ("Hello", msg2.GetString()); +} + +// Tests constructing a Message from a C-string. +TEST(MessageTest, ConstructsFromCString) { + Message msg("Hello"); + EXPECT_EQ("Hello", msg.GetString()); +} + +// Tests streaming a float. +TEST(MessageTest, StreamsFloat) { + const std::string s = (Message() << 1.23456F << " " << 2.34567F).GetString(); + // Both numbers should be printed with enough precision. + EXPECT_PRED_FORMAT2(testing::IsSubstring, "1.234560", s.c_str()); + EXPECT_PRED_FORMAT2(testing::IsSubstring, " 2.345669", s.c_str()); +} + +// Tests streaming a double. +TEST(MessageTest, StreamsDouble) { + const std::string s = (Message() << 1260570880.4555497 << " " + << 1260572265.1954534).GetString(); + // Both numbers should be printed with enough precision. + EXPECT_PRED_FORMAT2(testing::IsSubstring, "1260570880.45", s.c_str()); + EXPECT_PRED_FORMAT2(testing::IsSubstring, " 1260572265.19", s.c_str()); +} + +// Tests streaming a non-char pointer. +TEST(MessageTest, StreamsPointer) { + int n = 0; + int* p = &n; + EXPECT_NE("(null)", (Message() << p).GetString()); +} + +// Tests streaming a NULL non-char pointer. +TEST(MessageTest, StreamsNullPointer) { + int* p = NULL; + EXPECT_EQ("(null)", (Message() << p).GetString()); +} + +// Tests streaming a C string. +TEST(MessageTest, StreamsCString) { + EXPECT_EQ("Foo", (Message() << "Foo").GetString()); +} + +// Tests streaming a NULL C string. +TEST(MessageTest, StreamsNullCString) { + char* p = NULL; + EXPECT_EQ("(null)", (Message() << p).GetString()); +} + +// Tests streaming std::string. +TEST(MessageTest, StreamsString) { + const ::std::string str("Hello"); + EXPECT_EQ("Hello", (Message() << str).GetString()); +} + +// Tests that we can output strings containing embedded NULs. +TEST(MessageTest, StreamsStringWithEmbeddedNUL) { + const char char_array_with_nul[] = + "Here's a NUL\0 and some more string"; + const ::std::string string_with_nul(char_array_with_nul, + sizeof(char_array_with_nul) - 1); + EXPECT_EQ("Here's a NUL\\0 and some more string", + (Message() << string_with_nul).GetString()); +} + +// Tests streaming a NUL char. +TEST(MessageTest, StreamsNULChar) { + EXPECT_EQ("\\0", (Message() << '\0').GetString()); +} + +// Tests streaming int. +TEST(MessageTest, StreamsInt) { + EXPECT_EQ("123", (Message() << 123).GetString()); +} + +// Tests that basic IO manipulators (endl, ends, and flush) can be +// streamed to Message. +TEST(MessageTest, StreamsBasicIoManip) { + EXPECT_EQ("Line 1.\nA NUL char \\0 in line 2.", + (Message() << "Line 1." << std::endl + << "A NUL char " << std::ends << std::flush + << " in line 2.").GetString()); +} + +// Tests Message::GetString() +TEST(MessageTest, GetString) { + Message msg; + msg << 1 << " lamb"; + EXPECT_EQ("1 lamb", msg.GetString()); +} + +// Tests streaming a Message object to an ostream. +TEST(MessageTest, StreamsToOStream) { + Message msg("Hello"); + ::std::stringstream ss; + ss << msg; + EXPECT_EQ("Hello", testing::internal::StringStreamToString(&ss)); +} + +// Tests that a Message object doesn't take up too much stack space. +TEST(MessageTest, DoesNotTakeUpMuchStackSpace) { + EXPECT_LE(sizeof(Message), 16U); +} + +} // namespace diff --git a/external/gtest/test/gtest-options_test.cc b/external/gtest/test/gtest-options_test.cc new file mode 100755 index 0000000000..5586dc3b1b --- /dev/null +++ b/external/gtest/test/gtest-options_test.cc @@ -0,0 +1,215 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: keith.ray@gmail.com (Keith Ray) +// +// Google Test UnitTestOptions tests +// +// This file tests classes and functions used internally by +// Google Test. They are subject to change without notice. +// +// This file is #included from gtest.cc, to avoid changing build or +// make-files on Windows and other platforms. Do not #include this file +// anywhere else! + +#include "gtest/gtest.h" + +#if GTEST_OS_WINDOWS_MOBILE +# include +#elif GTEST_OS_WINDOWS +# include +#endif // GTEST_OS_WINDOWS_MOBILE + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#include "src/gtest-internal-inl.h" +#undef GTEST_IMPLEMENTATION_ + +namespace testing { +namespace internal { +namespace { + +// Turns the given relative path into an absolute path. +FilePath GetAbsolutePathOf(const FilePath& relative_path) { + return FilePath::ConcatPaths(FilePath::GetCurrentDir(), relative_path); +} + +// Testing UnitTestOptions::GetOutputFormat/GetOutputFile. + +TEST(XmlOutputTest, GetOutputFormatDefault) { + GTEST_FLAG(output) = ""; + EXPECT_STREQ("", UnitTestOptions::GetOutputFormat().c_str()); +} + +TEST(XmlOutputTest, GetOutputFormat) { + GTEST_FLAG(output) = "xml:filename"; + EXPECT_STREQ("xml", UnitTestOptions::GetOutputFormat().c_str()); +} + +TEST(XmlOutputTest, GetOutputFileDefault) { + GTEST_FLAG(output) = ""; + EXPECT_EQ(GetAbsolutePathOf(FilePath("test_detail.xml")).string(), + UnitTestOptions::GetAbsolutePathToOutputFile()); +} + +TEST(XmlOutputTest, GetOutputFileSingleFile) { + GTEST_FLAG(output) = "xml:filename.abc"; + EXPECT_EQ(GetAbsolutePathOf(FilePath("filename.abc")).string(), + UnitTestOptions::GetAbsolutePathToOutputFile()); +} + +TEST(XmlOutputTest, GetOutputFileFromDirectoryPath) { + GTEST_FLAG(output) = "xml:path" GTEST_PATH_SEP_; + const std::string expected_output_file = + GetAbsolutePathOf( + FilePath(std::string("path") + GTEST_PATH_SEP_ + + GetCurrentExecutableName().string() + ".xml")).string(); + const std::string& output_file = + UnitTestOptions::GetAbsolutePathToOutputFile(); +#if GTEST_OS_WINDOWS + EXPECT_STRCASEEQ(expected_output_file.c_str(), output_file.c_str()); +#else + EXPECT_EQ(expected_output_file, output_file.c_str()); +#endif +} + +TEST(OutputFileHelpersTest, GetCurrentExecutableName) { + const std::string exe_str = GetCurrentExecutableName().string(); +#if GTEST_OS_WINDOWS + const bool success = + _strcmpi("gtest-options_test", exe_str.c_str()) == 0 || + _strcmpi("gtest-options-ex_test", exe_str.c_str()) == 0 || + _strcmpi("gtest_all_test", exe_str.c_str()) == 0 || + _strcmpi("gtest_dll_test", exe_str.c_str()) == 0; +#else + // TODO(wan@google.com): remove the hard-coded "lt-" prefix when + // Chandler Carruth's libtool replacement is ready. + const bool success = + exe_str == "gtest-options_test" || + exe_str == "gtest_all_test" || + exe_str == "lt-gtest_all_test" || + exe_str == "gtest_dll_test"; +#endif // GTEST_OS_WINDOWS + if (!success) + FAIL() << "GetCurrentExecutableName() returns " << exe_str; +} + +class XmlOutputChangeDirTest : public Test { + protected: + virtual void SetUp() { + original_working_dir_ = FilePath::GetCurrentDir(); + posix::ChDir(".."); + // This will make the test fail if run from the root directory. + EXPECT_NE(original_working_dir_.string(), + FilePath::GetCurrentDir().string()); + } + + virtual void TearDown() { + posix::ChDir(original_working_dir_.string().c_str()); + } + + FilePath original_working_dir_; +}; + +TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithDefault) { + GTEST_FLAG(output) = ""; + EXPECT_EQ(FilePath::ConcatPaths(original_working_dir_, + FilePath("test_detail.xml")).string(), + UnitTestOptions::GetAbsolutePathToOutputFile()); +} + +TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithDefaultXML) { + GTEST_FLAG(output) = "xml"; + EXPECT_EQ(FilePath::ConcatPaths(original_working_dir_, + FilePath("test_detail.xml")).string(), + UnitTestOptions::GetAbsolutePathToOutputFile()); +} + +TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithRelativeFile) { + GTEST_FLAG(output) = "xml:filename.abc"; + EXPECT_EQ(FilePath::ConcatPaths(original_working_dir_, + FilePath("filename.abc")).string(), + UnitTestOptions::GetAbsolutePathToOutputFile()); +} + +TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithRelativePath) { + GTEST_FLAG(output) = "xml:path" GTEST_PATH_SEP_; + const std::string expected_output_file = + FilePath::ConcatPaths( + original_working_dir_, + FilePath(std::string("path") + GTEST_PATH_SEP_ + + GetCurrentExecutableName().string() + ".xml")).string(); + const std::string& output_file = + UnitTestOptions::GetAbsolutePathToOutputFile(); +#if GTEST_OS_WINDOWS + EXPECT_STRCASEEQ(expected_output_file.c_str(), output_file.c_str()); +#else + EXPECT_EQ(expected_output_file, output_file.c_str()); +#endif +} + +TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithAbsoluteFile) { +#if GTEST_OS_WINDOWS + GTEST_FLAG(output) = "xml:c:\\tmp\\filename.abc"; + EXPECT_EQ(FilePath("c:\\tmp\\filename.abc").string(), + UnitTestOptions::GetAbsolutePathToOutputFile()); +#else + GTEST_FLAG(output) ="xml:/tmp/filename.abc"; + EXPECT_EQ(FilePath("/tmp/filename.abc").string(), + UnitTestOptions::GetAbsolutePathToOutputFile()); +#endif +} + +TEST_F(XmlOutputChangeDirTest, PreserveOriginalWorkingDirWithAbsolutePath) { +#if GTEST_OS_WINDOWS + const std::string path = "c:\\tmp\\"; +#else + const std::string path = "/tmp/"; +#endif + + GTEST_FLAG(output) = "xml:" + path; + const std::string expected_output_file = + path + GetCurrentExecutableName().string() + ".xml"; + const std::string& output_file = + UnitTestOptions::GetAbsolutePathToOutputFile(); + +#if GTEST_OS_WINDOWS + EXPECT_STRCASEEQ(expected_output_file.c_str(), output_file.c_str()); +#else + EXPECT_EQ(expected_output_file, output_file.c_str()); +#endif +} + +} // namespace +} // namespace internal +} // namespace testing diff --git a/external/gtest/test/gtest-param-test2_test.cc b/external/gtest/test/gtest-param-test2_test.cc new file mode 100755 index 0000000000..4a782fe708 --- /dev/null +++ b/external/gtest/test/gtest-param-test2_test.cc @@ -0,0 +1,65 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) +// +// Tests for Google Test itself. This verifies that the basic constructs of +// Google Test work. + +#include "gtest/gtest.h" + +#include "test/gtest-param-test_test.h" + +#if GTEST_HAS_PARAM_TEST + +using ::testing::Values; +using ::testing::internal::ParamGenerator; + +// Tests that generators defined in a different translation unit +// are functional. The test using extern_gen is defined +// in gtest-param-test_test.cc. +ParamGenerator extern_gen = Values(33); + +// Tests that a parameterized test case can be defined in one translation unit +// and instantiated in another. The test is defined in gtest-param-test_test.cc +// and ExternalInstantiationTest fixture class is defined in +// gtest-param-test_test.h. +INSTANTIATE_TEST_CASE_P(MultiplesOf33, + ExternalInstantiationTest, + Values(33, 66)); + +// Tests that a parameterized test case can be instantiated +// in multiple translation units. Another instantiation is defined +// in gtest-param-test_test.cc and InstantiationInMultipleTranslaionUnitsTest +// fixture is defined in gtest-param-test_test.h +INSTANTIATE_TEST_CASE_P(Sequence2, + InstantiationInMultipleTranslaionUnitsTest, + Values(42*3, 42*4, 42*5)); + +#endif // GTEST_HAS_PARAM_TEST diff --git a/external/gtest/test/gtest-param-test_test.cc b/external/gtest/test/gtest-param-test_test.cc new file mode 100755 index 0000000000..f60cb8a558 --- /dev/null +++ b/external/gtest/test/gtest-param-test_test.cc @@ -0,0 +1,904 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) +// +// Tests for Google Test itself. This file verifies that the parameter +// generators objects produce correct parameter sequences and that +// Google Test runtime instantiates correct tests from those sequences. + +#include "gtest/gtest.h" + +#if GTEST_HAS_PARAM_TEST + +# include +# include +# include +# include +# include +# include + +// To include gtest-internal-inl.h. +# define GTEST_IMPLEMENTATION_ 1 +# include "src/gtest-internal-inl.h" // for UnitTestOptions +# undef GTEST_IMPLEMENTATION_ + +# include "test/gtest-param-test_test.h" + +using ::std::vector; +using ::std::sort; + +using ::testing::AddGlobalTestEnvironment; +using ::testing::Bool; +using ::testing::Message; +using ::testing::Range; +using ::testing::TestWithParam; +using ::testing::Values; +using ::testing::ValuesIn; + +# if GTEST_HAS_COMBINE +using ::testing::Combine; +using ::std::tr1::get; +using ::std::tr1::make_tuple; +using ::std::tr1::tuple; +# endif // GTEST_HAS_COMBINE + +using ::testing::internal::ParamGenerator; +using ::testing::internal::UnitTestOptions; + +// Prints a value to a string. +// +// TODO(wan@google.com): remove PrintValue() when we move matchers and +// EXPECT_THAT() from Google Mock to Google Test. At that time, we +// can write EXPECT_THAT(x, Eq(y)) to compare two tuples x and y, as +// EXPECT_THAT() and the matchers know how to print tuples. +template +::std::string PrintValue(const T& value) { + ::std::stringstream stream; + stream << value; + return stream.str(); +} + +# if GTEST_HAS_COMBINE + +// These overloads allow printing tuples in our tests. We cannot +// define an operator<< for tuples, as that definition needs to be in +// the std namespace in order to be picked up by Google Test via +// Argument-Dependent Lookup, yet defining anything in the std +// namespace in non-STL code is undefined behavior. + +template +::std::string PrintValue(const tuple& value) { + ::std::stringstream stream; + stream << "(" << get<0>(value) << ", " << get<1>(value) << ")"; + return stream.str(); +} + +template +::std::string PrintValue(const tuple& value) { + ::std::stringstream stream; + stream << "(" << get<0>(value) << ", " << get<1>(value) + << ", "<< get<2>(value) << ")"; + return stream.str(); +} + +template +::std::string PrintValue( + const tuple& value) { + ::std::stringstream stream; + stream << "(" << get<0>(value) << ", " << get<1>(value) + << ", "<< get<2>(value) << ", " << get<3>(value) + << ", "<< get<4>(value) << ", " << get<5>(value) + << ", "<< get<6>(value) << ", " << get<7>(value) + << ", "<< get<8>(value) << ", " << get<9>(value) << ")"; + return stream.str(); +} + +# endif // GTEST_HAS_COMBINE + +// Verifies that a sequence generated by the generator and accessed +// via the iterator object matches the expected one using Google Test +// assertions. +template +void VerifyGenerator(const ParamGenerator& generator, + const T (&expected_values)[N]) { + typename ParamGenerator::iterator it = generator.begin(); + for (size_t i = 0; i < N; ++i) { + ASSERT_FALSE(it == generator.end()) + << "At element " << i << " when accessing via an iterator " + << "created with the copy constructor.\n"; + // We cannot use EXPECT_EQ() here as the values may be tuples, + // which don't support <<. + EXPECT_TRUE(expected_values[i] == *it) + << "where i is " << i + << ", expected_values[i] is " << PrintValue(expected_values[i]) + << ", *it is " << PrintValue(*it) + << ", and 'it' is an iterator created with the copy constructor.\n"; + it++; + } + EXPECT_TRUE(it == generator.end()) + << "At the presumed end of sequence when accessing via an iterator " + << "created with the copy constructor.\n"; + + // Test the iterator assignment. The following lines verify that + // the sequence accessed via an iterator initialized via the + // assignment operator (as opposed to a copy constructor) matches + // just the same. + it = generator.begin(); + for (size_t i = 0; i < N; ++i) { + ASSERT_FALSE(it == generator.end()) + << "At element " << i << " when accessing via an iterator " + << "created with the assignment operator.\n"; + EXPECT_TRUE(expected_values[i] == *it) + << "where i is " << i + << ", expected_values[i] is " << PrintValue(expected_values[i]) + << ", *it is " << PrintValue(*it) + << ", and 'it' is an iterator created with the copy constructor.\n"; + it++; + } + EXPECT_TRUE(it == generator.end()) + << "At the presumed end of sequence when accessing via an iterator " + << "created with the assignment operator.\n"; +} + +template +void VerifyGeneratorIsEmpty(const ParamGenerator& generator) { + typename ParamGenerator::iterator it = generator.begin(); + EXPECT_TRUE(it == generator.end()); + + it = generator.begin(); + EXPECT_TRUE(it == generator.end()); +} + +// Generator tests. They test that each of the provided generator functions +// generates an expected sequence of values. The general test pattern +// instantiates a generator using one of the generator functions, +// checks the sequence produced by the generator using its iterator API, +// and then resets the iterator back to the beginning of the sequence +// and checks the sequence again. + +// Tests that iterators produced by generator functions conform to the +// ForwardIterator concept. +TEST(IteratorTest, ParamIteratorConformsToForwardIteratorConcept) { + const ParamGenerator gen = Range(0, 10); + ParamGenerator::iterator it = gen.begin(); + + // Verifies that iterator initialization works as expected. + ParamGenerator::iterator it2 = it; + EXPECT_TRUE(*it == *it2) << "Initialized iterators must point to the " + << "element same as its source points to"; + + // Verifies that iterator assignment works as expected. + it++; + EXPECT_FALSE(*it == *it2); + it2 = it; + EXPECT_TRUE(*it == *it2) << "Assigned iterators must point to the " + << "element same as its source points to"; + + // Verifies that prefix operator++() returns *this. + EXPECT_EQ(&it, &(++it)) << "Result of the prefix operator++ must be " + << "refer to the original object"; + + // Verifies that the result of the postfix operator++ points to the value + // pointed to by the original iterator. + int original_value = *it; // Have to compute it outside of macro call to be + // unaffected by the parameter evaluation order. + EXPECT_EQ(original_value, *(it++)); + + // Verifies that prefix and postfix operator++() advance an iterator + // all the same. + it2 = it; + it++; + ++it2; + EXPECT_TRUE(*it == *it2); +} + +// Tests that Range() generates the expected sequence. +TEST(RangeTest, IntRangeWithDefaultStep) { + const ParamGenerator gen = Range(0, 3); + const int expected_values[] = {0, 1, 2}; + VerifyGenerator(gen, expected_values); +} + +// Edge case. Tests that Range() generates the single element sequence +// as expected when provided with range limits that are equal. +TEST(RangeTest, IntRangeSingleValue) { + const ParamGenerator gen = Range(0, 1); + const int expected_values[] = {0}; + VerifyGenerator(gen, expected_values); +} + +// Edge case. Tests that Range() with generates empty sequence when +// supplied with an empty range. +TEST(RangeTest, IntRangeEmpty) { + const ParamGenerator gen = Range(0, 0); + VerifyGeneratorIsEmpty(gen); +} + +// Tests that Range() with custom step (greater then one) generates +// the expected sequence. +TEST(RangeTest, IntRangeWithCustomStep) { + const ParamGenerator gen = Range(0, 9, 3); + const int expected_values[] = {0, 3, 6}; + VerifyGenerator(gen, expected_values); +} + +// Tests that Range() with custom step (greater then one) generates +// the expected sequence when the last element does not fall on the +// upper range limit. Sequences generated by Range() must not have +// elements beyond the range limits. +TEST(RangeTest, IntRangeWithCustomStepOverUpperBound) { + const ParamGenerator gen = Range(0, 4, 3); + const int expected_values[] = {0, 3}; + VerifyGenerator(gen, expected_values); +} + +// Verifies that Range works with user-defined types that define +// copy constructor, operator=(), operator+(), and operator<(). +class DogAdder { + public: + explicit DogAdder(const char* a_value) : value_(a_value) {} + DogAdder(const DogAdder& other) : value_(other.value_.c_str()) {} + + DogAdder operator=(const DogAdder& other) { + if (this != &other) + value_ = other.value_; + return *this; + } + DogAdder operator+(const DogAdder& other) const { + Message msg; + msg << value_.c_str() << other.value_.c_str(); + return DogAdder(msg.GetString().c_str()); + } + bool operator<(const DogAdder& other) const { + return value_ < other.value_; + } + const std::string& value() const { return value_; } + + private: + std::string value_; +}; + +TEST(RangeTest, WorksWithACustomType) { + const ParamGenerator gen = + Range(DogAdder("cat"), DogAdder("catdogdog"), DogAdder("dog")); + ParamGenerator::iterator it = gen.begin(); + + ASSERT_FALSE(it == gen.end()); + EXPECT_STREQ("cat", it->value().c_str()); + + ASSERT_FALSE(++it == gen.end()); + EXPECT_STREQ("catdog", it->value().c_str()); + + EXPECT_TRUE(++it == gen.end()); +} + +class IntWrapper { + public: + explicit IntWrapper(int a_value) : value_(a_value) {} + IntWrapper(const IntWrapper& other) : value_(other.value_) {} + + IntWrapper operator=(const IntWrapper& other) { + value_ = other.value_; + return *this; + } + // operator+() adds a different type. + IntWrapper operator+(int other) const { return IntWrapper(value_ + other); } + bool operator<(const IntWrapper& other) const { + return value_ < other.value_; + } + int value() const { return value_; } + + private: + int value_; +}; + +TEST(RangeTest, WorksWithACustomTypeWithDifferentIncrementType) { + const ParamGenerator gen = Range(IntWrapper(0), IntWrapper(2)); + ParamGenerator::iterator it = gen.begin(); + + ASSERT_FALSE(it == gen.end()); + EXPECT_EQ(0, it->value()); + + ASSERT_FALSE(++it == gen.end()); + EXPECT_EQ(1, it->value()); + + EXPECT_TRUE(++it == gen.end()); +} + +// Tests that ValuesIn() with an array parameter generates +// the expected sequence. +TEST(ValuesInTest, ValuesInArray) { + int array[] = {3, 5, 8}; + const ParamGenerator gen = ValuesIn(array); + VerifyGenerator(gen, array); +} + +// Tests that ValuesIn() with a const array parameter generates +// the expected sequence. +TEST(ValuesInTest, ValuesInConstArray) { + const int array[] = {3, 5, 8}; + const ParamGenerator gen = ValuesIn(array); + VerifyGenerator(gen, array); +} + +// Edge case. Tests that ValuesIn() with an array parameter containing a +// single element generates the single element sequence. +TEST(ValuesInTest, ValuesInSingleElementArray) { + int array[] = {42}; + const ParamGenerator gen = ValuesIn(array); + VerifyGenerator(gen, array); +} + +// Tests that ValuesIn() generates the expected sequence for an STL +// container (vector). +TEST(ValuesInTest, ValuesInVector) { + typedef ::std::vector ContainerType; + ContainerType values; + values.push_back(3); + values.push_back(5); + values.push_back(8); + const ParamGenerator gen = ValuesIn(values); + + const int expected_values[] = {3, 5, 8}; + VerifyGenerator(gen, expected_values); +} + +// Tests that ValuesIn() generates the expected sequence. +TEST(ValuesInTest, ValuesInIteratorRange) { + typedef ::std::vector ContainerType; + ContainerType values; + values.push_back(3); + values.push_back(5); + values.push_back(8); + const ParamGenerator gen = ValuesIn(values.begin(), values.end()); + + const int expected_values[] = {3, 5, 8}; + VerifyGenerator(gen, expected_values); +} + +// Edge case. Tests that ValuesIn() provided with an iterator range specifying a +// single value generates a single-element sequence. +TEST(ValuesInTest, ValuesInSingleElementIteratorRange) { + typedef ::std::vector ContainerType; + ContainerType values; + values.push_back(42); + const ParamGenerator gen = ValuesIn(values.begin(), values.end()); + + const int expected_values[] = {42}; + VerifyGenerator(gen, expected_values); +} + +// Edge case. Tests that ValuesIn() provided with an empty iterator range +// generates an empty sequence. +TEST(ValuesInTest, ValuesInEmptyIteratorRange) { + typedef ::std::vector ContainerType; + ContainerType values; + const ParamGenerator gen = ValuesIn(values.begin(), values.end()); + + VerifyGeneratorIsEmpty(gen); +} + +// Tests that the Values() generates the expected sequence. +TEST(ValuesTest, ValuesWorks) { + const ParamGenerator gen = Values(3, 5, 8); + + const int expected_values[] = {3, 5, 8}; + VerifyGenerator(gen, expected_values); +} + +// Tests that Values() generates the expected sequences from elements of +// different types convertible to ParamGenerator's parameter type. +TEST(ValuesTest, ValuesWorksForValuesOfCompatibleTypes) { + const ParamGenerator gen = Values(3, 5.0f, 8.0); + + const double expected_values[] = {3.0, 5.0, 8.0}; + VerifyGenerator(gen, expected_values); +} + +TEST(ValuesTest, ValuesWorksForMaxLengthList) { + const ParamGenerator gen = Values( + 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, + 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, + 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, + 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, + 410, 420, 430, 440, 450, 460, 470, 480, 490, 500); + + const int expected_values[] = { + 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, + 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, + 210, 220, 230, 240, 250, 260, 270, 280, 290, 300, + 310, 320, 330, 340, 350, 360, 370, 380, 390, 400, + 410, 420, 430, 440, 450, 460, 470, 480, 490, 500}; + VerifyGenerator(gen, expected_values); +} + +// Edge case test. Tests that single-parameter Values() generates the sequence +// with the single value. +TEST(ValuesTest, ValuesWithSingleParameter) { + const ParamGenerator gen = Values(42); + + const int expected_values[] = {42}; + VerifyGenerator(gen, expected_values); +} + +// Tests that Bool() generates sequence (false, true). +TEST(BoolTest, BoolWorks) { + const ParamGenerator gen = Bool(); + + const bool expected_values[] = {false, true}; + VerifyGenerator(gen, expected_values); +} + +# if GTEST_HAS_COMBINE + +// Tests that Combine() with two parameters generates the expected sequence. +TEST(CombineTest, CombineWithTwoParameters) { + const char* foo = "foo"; + const char* bar = "bar"; + const ParamGenerator > gen = + Combine(Values(foo, bar), Values(3, 4)); + + tuple expected_values[] = { + make_tuple(foo, 3), make_tuple(foo, 4), + make_tuple(bar, 3), make_tuple(bar, 4)}; + VerifyGenerator(gen, expected_values); +} + +// Tests that Combine() with three parameters generates the expected sequence. +TEST(CombineTest, CombineWithThreeParameters) { + const ParamGenerator > gen = Combine(Values(0, 1), + Values(3, 4), + Values(5, 6)); + tuple expected_values[] = { + make_tuple(0, 3, 5), make_tuple(0, 3, 6), + make_tuple(0, 4, 5), make_tuple(0, 4, 6), + make_tuple(1, 3, 5), make_tuple(1, 3, 6), + make_tuple(1, 4, 5), make_tuple(1, 4, 6)}; + VerifyGenerator(gen, expected_values); +} + +// Tests that the Combine() with the first parameter generating a single value +// sequence generates a sequence with the number of elements equal to the +// number of elements in the sequence generated by the second parameter. +TEST(CombineTest, CombineWithFirstParameterSingleValue) { + const ParamGenerator > gen = Combine(Values(42), + Values(0, 1)); + + tuple expected_values[] = {make_tuple(42, 0), make_tuple(42, 1)}; + VerifyGenerator(gen, expected_values); +} + +// Tests that the Combine() with the second parameter generating a single value +// sequence generates a sequence with the number of elements equal to the +// number of elements in the sequence generated by the first parameter. +TEST(CombineTest, CombineWithSecondParameterSingleValue) { + const ParamGenerator > gen = Combine(Values(0, 1), + Values(42)); + + tuple expected_values[] = {make_tuple(0, 42), make_tuple(1, 42)}; + VerifyGenerator(gen, expected_values); +} + +// Tests that when the first parameter produces an empty sequence, +// Combine() produces an empty sequence, too. +TEST(CombineTest, CombineWithFirstParameterEmptyRange) { + const ParamGenerator > gen = Combine(Range(0, 0), + Values(0, 1)); + VerifyGeneratorIsEmpty(gen); +} + +// Tests that when the second parameter produces an empty sequence, +// Combine() produces an empty sequence, too. +TEST(CombineTest, CombineWithSecondParameterEmptyRange) { + const ParamGenerator > gen = Combine(Values(0, 1), + Range(1, 1)); + VerifyGeneratorIsEmpty(gen); +} + +// Edge case. Tests that combine works with the maximum number +// of parameters supported by Google Test (currently 10). +TEST(CombineTest, CombineWithMaxNumberOfParameters) { + const char* foo = "foo"; + const char* bar = "bar"; + const ParamGenerator > gen = Combine(Values(foo, bar), + Values(1), Values(2), + Values(3), Values(4), + Values(5), Values(6), + Values(7), Values(8), + Values(9)); + + tuple + expected_values[] = {make_tuple(foo, 1, 2, 3, 4, 5, 6, 7, 8, 9), + make_tuple(bar, 1, 2, 3, 4, 5, 6, 7, 8, 9)}; + VerifyGenerator(gen, expected_values); +} + +# endif // GTEST_HAS_COMBINE + +// Tests that an generator produces correct sequence after being +// assigned from another generator. +TEST(ParamGeneratorTest, AssignmentWorks) { + ParamGenerator gen = Values(1, 2); + const ParamGenerator gen2 = Values(3, 4); + gen = gen2; + + const int expected_values[] = {3, 4}; + VerifyGenerator(gen, expected_values); +} + +// This test verifies that the tests are expanded and run as specified: +// one test per element from the sequence produced by the generator +// specified in INSTANTIATE_TEST_CASE_P. It also verifies that the test's +// fixture constructor, SetUp(), and TearDown() have run and have been +// supplied with the correct parameters. + +// The use of environment object allows detection of the case where no test +// case functionality is run at all. In this case TestCaseTearDown will not +// be able to detect missing tests, naturally. +template +class TestGenerationEnvironment : public ::testing::Environment { + public: + static TestGenerationEnvironment* Instance() { + static TestGenerationEnvironment* instance = new TestGenerationEnvironment; + return instance; + } + + void FixtureConstructorExecuted() { fixture_constructor_count_++; } + void SetUpExecuted() { set_up_count_++; } + void TearDownExecuted() { tear_down_count_++; } + void TestBodyExecuted() { test_body_count_++; } + + virtual void TearDown() { + // If all MultipleTestGenerationTest tests have been de-selected + // by the filter flag, the following checks make no sense. + bool perform_check = false; + + for (int i = 0; i < kExpectedCalls; ++i) { + Message msg; + msg << "TestsExpandedAndRun/" << i; + if (UnitTestOptions::FilterMatchesTest( + "TestExpansionModule/MultipleTestGenerationTest", + msg.GetString().c_str())) { + perform_check = true; + } + } + if (perform_check) { + EXPECT_EQ(kExpectedCalls, fixture_constructor_count_) + << "Fixture constructor of ParamTestGenerationTest test case " + << "has not been run as expected."; + EXPECT_EQ(kExpectedCalls, set_up_count_) + << "Fixture SetUp method of ParamTestGenerationTest test case " + << "has not been run as expected."; + EXPECT_EQ(kExpectedCalls, tear_down_count_) + << "Fixture TearDown method of ParamTestGenerationTest test case " + << "has not been run as expected."; + EXPECT_EQ(kExpectedCalls, test_body_count_) + << "Test in ParamTestGenerationTest test case " + << "has not been run as expected."; + } + } + + private: + TestGenerationEnvironment() : fixture_constructor_count_(0), set_up_count_(0), + tear_down_count_(0), test_body_count_(0) {} + + int fixture_constructor_count_; + int set_up_count_; + int tear_down_count_; + int test_body_count_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestGenerationEnvironment); +}; + +const int test_generation_params[] = {36, 42, 72}; + +class TestGenerationTest : public TestWithParam { + public: + enum { + PARAMETER_COUNT = + sizeof(test_generation_params)/sizeof(test_generation_params[0]) + }; + + typedef TestGenerationEnvironment Environment; + + TestGenerationTest() { + Environment::Instance()->FixtureConstructorExecuted(); + current_parameter_ = GetParam(); + } + virtual void SetUp() { + Environment::Instance()->SetUpExecuted(); + EXPECT_EQ(current_parameter_, GetParam()); + } + virtual void TearDown() { + Environment::Instance()->TearDownExecuted(); + EXPECT_EQ(current_parameter_, GetParam()); + } + + static void SetUpTestCase() { + bool all_tests_in_test_case_selected = true; + + for (int i = 0; i < PARAMETER_COUNT; ++i) { + Message test_name; + test_name << "TestsExpandedAndRun/" << i; + if ( !UnitTestOptions::FilterMatchesTest( + "TestExpansionModule/MultipleTestGenerationTest", + test_name.GetString())) { + all_tests_in_test_case_selected = false; + } + } + EXPECT_TRUE(all_tests_in_test_case_selected) + << "When running the TestGenerationTest test case all of its tests\n" + << "must be selected by the filter flag for the test case to pass.\n" + << "If not all of them are enabled, we can't reliably conclude\n" + << "that the correct number of tests have been generated."; + + collected_parameters_.clear(); + } + + static void TearDownTestCase() { + vector expected_values(test_generation_params, + test_generation_params + PARAMETER_COUNT); + // Test execution order is not guaranteed by Google Test, + // so the order of values in collected_parameters_ can be + // different and we have to sort to compare. + sort(expected_values.begin(), expected_values.end()); + sort(collected_parameters_.begin(), collected_parameters_.end()); + + EXPECT_TRUE(collected_parameters_ == expected_values); + } + + protected: + int current_parameter_; + static vector collected_parameters_; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestGenerationTest); +}; +vector TestGenerationTest::collected_parameters_; + +TEST_P(TestGenerationTest, TestsExpandedAndRun) { + Environment::Instance()->TestBodyExecuted(); + EXPECT_EQ(current_parameter_, GetParam()); + collected_parameters_.push_back(GetParam()); +} +INSTANTIATE_TEST_CASE_P(TestExpansionModule, TestGenerationTest, + ValuesIn(test_generation_params)); + +// This test verifies that the element sequence (third parameter of +// INSTANTIATE_TEST_CASE_P) is evaluated in InitGoogleTest() and neither at +// the call site of INSTANTIATE_TEST_CASE_P nor in RUN_ALL_TESTS(). For +// that, we declare param_value_ to be a static member of +// GeneratorEvaluationTest and initialize it to 0. We set it to 1 in +// main(), just before invocation of InitGoogleTest(). After calling +// InitGoogleTest(), we set the value to 2. If the sequence is evaluated +// before or after InitGoogleTest, INSTANTIATE_TEST_CASE_P will create a +// test with parameter other than 1, and the test body will fail the +// assertion. +class GeneratorEvaluationTest : public TestWithParam { + public: + static int param_value() { return param_value_; } + static void set_param_value(int param_value) { param_value_ = param_value; } + + private: + static int param_value_; +}; +int GeneratorEvaluationTest::param_value_ = 0; + +TEST_P(GeneratorEvaluationTest, GeneratorsEvaluatedInMain) { + EXPECT_EQ(1, GetParam()); +} +INSTANTIATE_TEST_CASE_P(GenEvalModule, + GeneratorEvaluationTest, + Values(GeneratorEvaluationTest::param_value())); + +// Tests that generators defined in a different translation unit are +// functional. Generator extern_gen is defined in gtest-param-test_test2.cc. +extern ParamGenerator extern_gen; +class ExternalGeneratorTest : public TestWithParam {}; +TEST_P(ExternalGeneratorTest, ExternalGenerator) { + // Sequence produced by extern_gen contains only a single value + // which we verify here. + EXPECT_EQ(GetParam(), 33); +} +INSTANTIATE_TEST_CASE_P(ExternalGeneratorModule, + ExternalGeneratorTest, + extern_gen); + +// Tests that a parameterized test case can be defined in one translation +// unit and instantiated in another. This test will be instantiated in +// gtest-param-test_test2.cc. ExternalInstantiationTest fixture class is +// defined in gtest-param-test_test.h. +TEST_P(ExternalInstantiationTest, IsMultipleOf33) { + EXPECT_EQ(0, GetParam() % 33); +} + +// Tests that a parameterized test case can be instantiated with multiple +// generators. +class MultipleInstantiationTest : public TestWithParam {}; +TEST_P(MultipleInstantiationTest, AllowsMultipleInstances) { +} +INSTANTIATE_TEST_CASE_P(Sequence1, MultipleInstantiationTest, Values(1, 2)); +INSTANTIATE_TEST_CASE_P(Sequence2, MultipleInstantiationTest, Range(3, 5)); + +// Tests that a parameterized test case can be instantiated +// in multiple translation units. This test will be instantiated +// here and in gtest-param-test_test2.cc. +// InstantiationInMultipleTranslationUnitsTest fixture class +// is defined in gtest-param-test_test.h. +TEST_P(InstantiationInMultipleTranslaionUnitsTest, IsMultipleOf42) { + EXPECT_EQ(0, GetParam() % 42); +} +INSTANTIATE_TEST_CASE_P(Sequence1, + InstantiationInMultipleTranslaionUnitsTest, + Values(42, 42*2)); + +// Tests that each iteration of parameterized test runs in a separate test +// object. +class SeparateInstanceTest : public TestWithParam { + public: + SeparateInstanceTest() : count_(0) {} + + static void TearDownTestCase() { + EXPECT_GE(global_count_, 2) + << "If some (but not all) SeparateInstanceTest tests have been " + << "filtered out this test will fail. Make sure that all " + << "GeneratorEvaluationTest are selected or de-selected together " + << "by the test filter."; + } + + protected: + int count_; + static int global_count_; +}; +int SeparateInstanceTest::global_count_ = 0; + +TEST_P(SeparateInstanceTest, TestsRunInSeparateInstances) { + EXPECT_EQ(0, count_++); + global_count_++; +} +INSTANTIATE_TEST_CASE_P(FourElemSequence, SeparateInstanceTest, Range(1, 4)); + +// Tests that all instantiations of a test have named appropriately. Test +// defined with TEST_P(TestCaseName, TestName) and instantiated with +// INSTANTIATE_TEST_CASE_P(SequenceName, TestCaseName, generator) must be named +// SequenceName/TestCaseName.TestName/i, where i is the 0-based index of the +// sequence element used to instantiate the test. +class NamingTest : public TestWithParam {}; + +TEST_P(NamingTest, TestsReportCorrectNamesAndParameters) { + const ::testing::TestInfo* const test_info = + ::testing::UnitTest::GetInstance()->current_test_info(); + + EXPECT_STREQ("ZeroToFiveSequence/NamingTest", test_info->test_case_name()); + + Message index_stream; + index_stream << "TestsReportCorrectNamesAndParameters/" << GetParam(); + EXPECT_STREQ(index_stream.GetString().c_str(), test_info->name()); + + EXPECT_EQ(::testing::PrintToString(GetParam()), test_info->value_param()); +} + +INSTANTIATE_TEST_CASE_P(ZeroToFiveSequence, NamingTest, Range(0, 5)); + +// Class that cannot be streamed into an ostream. It needs to be copyable +// (and, in case of MSVC, also assignable) in order to be a test parameter +// type. Its default copy constructor and assignment operator do exactly +// what we need. +class Unstreamable { + public: + explicit Unstreamable(int value) : value_(value) {} + + private: + int value_; +}; + +class CommentTest : public TestWithParam {}; + +TEST_P(CommentTest, TestsCorrectlyReportUnstreamableParams) { + const ::testing::TestInfo* const test_info = + ::testing::UnitTest::GetInstance()->current_test_info(); + + EXPECT_EQ(::testing::PrintToString(GetParam()), test_info->value_param()); +} + +INSTANTIATE_TEST_CASE_P(InstantiationWithComments, + CommentTest, + Values(Unstreamable(1))); + +// Verify that we can create a hierarchy of test fixtures, where the base +// class fixture is not parameterized and the derived class is. In this case +// ParameterizedDerivedTest inherits from NonParameterizedBaseTest. We +// perform simple tests on both. +class NonParameterizedBaseTest : public ::testing::Test { + public: + NonParameterizedBaseTest() : n_(17) { } + protected: + int n_; +}; + +class ParameterizedDerivedTest : public NonParameterizedBaseTest, + public ::testing::WithParamInterface { + protected: + ParameterizedDerivedTest() : count_(0) { } + int count_; + static int global_count_; +}; + +int ParameterizedDerivedTest::global_count_ = 0; + +TEST_F(NonParameterizedBaseTest, FixtureIsInitialized) { + EXPECT_EQ(17, n_); +} + +TEST_P(ParameterizedDerivedTest, SeesSequence) { + EXPECT_EQ(17, n_); + EXPECT_EQ(0, count_++); + EXPECT_EQ(GetParam(), global_count_++); +} + +class ParameterizedDeathTest : public ::testing::TestWithParam { }; + +TEST_F(ParameterizedDeathTest, GetParamDiesFromTestF) { + EXPECT_DEATH_IF_SUPPORTED(GetParam(), + ".* value-parameterized test .*"); +} + +INSTANTIATE_TEST_CASE_P(RangeZeroToFive, ParameterizedDerivedTest, Range(0, 5)); + +#endif // GTEST_HAS_PARAM_TEST + +TEST(CompileTest, CombineIsDefinedOnlyWhenGtestHasParamTestIsDefined) { +#if GTEST_HAS_COMBINE && !GTEST_HAS_PARAM_TEST + FAIL() << "GTEST_HAS_COMBINE is defined while GTEST_HAS_PARAM_TEST is not\n" +#endif +} + +int main(int argc, char **argv) { +#if GTEST_HAS_PARAM_TEST + // Used in TestGenerationTest test case. + AddGlobalTestEnvironment(TestGenerationTest::Environment::Instance()); + // Used in GeneratorEvaluationTest test case. Tests that the updated value + // will be picked up for instantiating tests in GeneratorEvaluationTest. + GeneratorEvaluationTest::set_param_value(1); +#endif // GTEST_HAS_PARAM_TEST + + ::testing::InitGoogleTest(&argc, argv); + +#if GTEST_HAS_PARAM_TEST + // Used in GeneratorEvaluationTest test case. Tests that value updated + // here will NOT be used for instantiating tests in + // GeneratorEvaluationTest. + GeneratorEvaluationTest::set_param_value(2); +#endif // GTEST_HAS_PARAM_TEST + + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest-param-test_test.h b/external/gtest/test/gtest-param-test_test.h new file mode 100755 index 0000000000..26ea122b10 --- /dev/null +++ b/external/gtest/test/gtest-param-test_test.h @@ -0,0 +1,57 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: vladl@google.com (Vlad Losev) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file provides classes and functions used internally +// for testing Google Test itself. + +#ifndef GTEST_TEST_GTEST_PARAM_TEST_TEST_H_ +#define GTEST_TEST_GTEST_PARAM_TEST_TEST_H_ + +#include "gtest/gtest.h" + +#if GTEST_HAS_PARAM_TEST + +// Test fixture for testing definition and instantiation of a test +// in separate translation units. +class ExternalInstantiationTest : public ::testing::TestWithParam { +}; + +// Test fixture for testing instantiation of a test in multiple +// translation units. +class InstantiationInMultipleTranslaionUnitsTest + : public ::testing::TestWithParam { +}; + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_TEST_GTEST_PARAM_TEST_TEST_H_ diff --git a/external/gtest/test/gtest-port_test.cc b/external/gtest/test/gtest-port_test.cc new file mode 100755 index 0000000000..43f1f20105 --- /dev/null +++ b/external/gtest/test/gtest-port_test.cc @@ -0,0 +1,1253 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: vladl@google.com (Vlad Losev), wan@google.com (Zhanyong Wan) +// +// This file tests the internal cross-platform support utilities. + +#include "gtest/internal/gtest-port.h" + +#include + +#if GTEST_OS_MAC +# include +#endif // GTEST_OS_MAC + +#include +#include // For std::pair and std::make_pair. +#include + +#include "gtest/gtest.h" +#include "gtest/gtest-spi.h" + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#include "src/gtest-internal-inl.h" +#undef GTEST_IMPLEMENTATION_ + +using std::make_pair; +using std::pair; + +namespace testing { +namespace internal { + +TEST(IsXDigitTest, WorksForNarrowAscii) { + EXPECT_TRUE(IsXDigit('0')); + EXPECT_TRUE(IsXDigit('9')); + EXPECT_TRUE(IsXDigit('A')); + EXPECT_TRUE(IsXDigit('F')); + EXPECT_TRUE(IsXDigit('a')); + EXPECT_TRUE(IsXDigit('f')); + + EXPECT_FALSE(IsXDigit('-')); + EXPECT_FALSE(IsXDigit('g')); + EXPECT_FALSE(IsXDigit('G')); +} + +TEST(IsXDigitTest, ReturnsFalseForNarrowNonAscii) { + EXPECT_FALSE(IsXDigit(static_cast(0x80))); + EXPECT_FALSE(IsXDigit(static_cast('0' | 0x80))); +} + +TEST(IsXDigitTest, WorksForWideAscii) { + EXPECT_TRUE(IsXDigit(L'0')); + EXPECT_TRUE(IsXDigit(L'9')); + EXPECT_TRUE(IsXDigit(L'A')); + EXPECT_TRUE(IsXDigit(L'F')); + EXPECT_TRUE(IsXDigit(L'a')); + EXPECT_TRUE(IsXDigit(L'f')); + + EXPECT_FALSE(IsXDigit(L'-')); + EXPECT_FALSE(IsXDigit(L'g')); + EXPECT_FALSE(IsXDigit(L'G')); +} + +TEST(IsXDigitTest, ReturnsFalseForWideNonAscii) { + EXPECT_FALSE(IsXDigit(static_cast(0x80))); + EXPECT_FALSE(IsXDigit(static_cast(L'0' | 0x80))); + EXPECT_FALSE(IsXDigit(static_cast(L'0' | 0x100))); +} + +class Base { + public: + // Copy constructor and assignment operator do exactly what we need, so we + // use them. + Base() : member_(0) {} + explicit Base(int n) : member_(n) {} + virtual ~Base() {} + int member() { return member_; } + + private: + int member_; +}; + +class Derived : public Base { + public: + explicit Derived(int n) : Base(n) {} +}; + +TEST(ImplicitCastTest, ConvertsPointers) { + Derived derived(0); + EXPECT_TRUE(&derived == ::testing::internal::ImplicitCast_(&derived)); +} + +TEST(ImplicitCastTest, CanUseInheritance) { + Derived derived(1); + Base base = ::testing::internal::ImplicitCast_(derived); + EXPECT_EQ(derived.member(), base.member()); +} + +class Castable { + public: + explicit Castable(bool* converted) : converted_(converted) {} + operator Base() { + *converted_ = true; + return Base(); + } + + private: + bool* converted_; +}; + +TEST(ImplicitCastTest, CanUseNonConstCastOperator) { + bool converted = false; + Castable castable(&converted); + Base base = ::testing::internal::ImplicitCast_(castable); + EXPECT_TRUE(converted); +} + +class ConstCastable { + public: + explicit ConstCastable(bool* converted) : converted_(converted) {} + operator Base() const { + *converted_ = true; + return Base(); + } + + private: + bool* converted_; +}; + +TEST(ImplicitCastTest, CanUseConstCastOperatorOnConstValues) { + bool converted = false; + const ConstCastable const_castable(&converted); + Base base = ::testing::internal::ImplicitCast_(const_castable); + EXPECT_TRUE(converted); +} + +class ConstAndNonConstCastable { + public: + ConstAndNonConstCastable(bool* converted, bool* const_converted) + : converted_(converted), const_converted_(const_converted) {} + operator Base() { + *converted_ = true; + return Base(); + } + operator Base() const { + *const_converted_ = true; + return Base(); + } + + private: + bool* converted_; + bool* const_converted_; +}; + +TEST(ImplicitCastTest, CanSelectBetweenConstAndNonConstCasrAppropriately) { + bool converted = false; + bool const_converted = false; + ConstAndNonConstCastable castable(&converted, &const_converted); + Base base = ::testing::internal::ImplicitCast_(castable); + EXPECT_TRUE(converted); + EXPECT_FALSE(const_converted); + + converted = false; + const_converted = false; + const ConstAndNonConstCastable const_castable(&converted, &const_converted); + base = ::testing::internal::ImplicitCast_(const_castable); + EXPECT_FALSE(converted); + EXPECT_TRUE(const_converted); +} + +class To { + public: + To(bool* converted) { *converted = true; } // NOLINT +}; + +TEST(ImplicitCastTest, CanUseImplicitConstructor) { + bool converted = false; + To to = ::testing::internal::ImplicitCast_(&converted); + (void)to; + EXPECT_TRUE(converted); +} + +TEST(IteratorTraitsTest, WorksForSTLContainerIterators) { + StaticAssertTypeEq::const_iterator>::value_type>(); + StaticAssertTypeEq::iterator>::value_type>(); +} + +TEST(IteratorTraitsTest, WorksForPointerToNonConst) { + StaticAssertTypeEq::value_type>(); + StaticAssertTypeEq::value_type>(); +} + +TEST(IteratorTraitsTest, WorksForPointerToConst) { + StaticAssertTypeEq::value_type>(); + StaticAssertTypeEq::value_type>(); +} + +// Tests that the element_type typedef is available in scoped_ptr and refers +// to the parameter type. +TEST(ScopedPtrTest, DefinesElementType) { + StaticAssertTypeEq::element_type>(); +} + +// TODO(vladl@google.com): Implement THE REST of scoped_ptr tests. + +TEST(GtestCheckSyntaxTest, BehavesLikeASingleStatement) { + if (AlwaysFalse()) + GTEST_CHECK_(false) << "This should never be executed; " + "It's a compilation test only."; + + if (AlwaysTrue()) + GTEST_CHECK_(true); + else + ; // NOLINT + + if (AlwaysFalse()) + ; // NOLINT + else + GTEST_CHECK_(true) << ""; +} + +TEST(GtestCheckSyntaxTest, WorksWithSwitch) { + switch (0) { + case 1: + break; + default: + GTEST_CHECK_(true); + } + + switch (0) + case 0: + GTEST_CHECK_(true) << "Check failed in switch case"; +} + +// Verifies behavior of FormatFileLocation. +TEST(FormatFileLocationTest, FormatsFileLocation) { + EXPECT_PRED_FORMAT2(IsSubstring, "foo.cc", FormatFileLocation("foo.cc", 42)); + EXPECT_PRED_FORMAT2(IsSubstring, "42", FormatFileLocation("foo.cc", 42)); +} + +TEST(FormatFileLocationTest, FormatsUnknownFile) { + EXPECT_PRED_FORMAT2( + IsSubstring, "unknown file", FormatFileLocation(NULL, 42)); + EXPECT_PRED_FORMAT2(IsSubstring, "42", FormatFileLocation(NULL, 42)); +} + +TEST(FormatFileLocationTest, FormatsUknownLine) { + EXPECT_EQ("foo.cc:", FormatFileLocation("foo.cc", -1)); +} + +TEST(FormatFileLocationTest, FormatsUknownFileAndLine) { + EXPECT_EQ("unknown file:", FormatFileLocation(NULL, -1)); +} + +// Verifies behavior of FormatCompilerIndependentFileLocation. +TEST(FormatCompilerIndependentFileLocationTest, FormatsFileLocation) { + EXPECT_EQ("foo.cc:42", FormatCompilerIndependentFileLocation("foo.cc", 42)); +} + +TEST(FormatCompilerIndependentFileLocationTest, FormatsUknownFile) { + EXPECT_EQ("unknown file:42", + FormatCompilerIndependentFileLocation(NULL, 42)); +} + +TEST(FormatCompilerIndependentFileLocationTest, FormatsUknownLine) { + EXPECT_EQ("foo.cc", FormatCompilerIndependentFileLocation("foo.cc", -1)); +} + +TEST(FormatCompilerIndependentFileLocationTest, FormatsUknownFileAndLine) { + EXPECT_EQ("unknown file", FormatCompilerIndependentFileLocation(NULL, -1)); +} + +#if GTEST_OS_MAC || GTEST_OS_QNX +void* ThreadFunc(void* data) { + pthread_mutex_t* mutex = static_cast(data); + pthread_mutex_lock(mutex); + pthread_mutex_unlock(mutex); + return NULL; +} + +TEST(GetThreadCountTest, ReturnsCorrectValue) { + EXPECT_EQ(1U, GetThreadCount()); + pthread_mutex_t mutex; + pthread_attr_t attr; + pthread_t thread_id; + + // TODO(vladl@google.com): turn mutex into internal::Mutex for automatic + // destruction. + pthread_mutex_init(&mutex, NULL); + pthread_mutex_lock(&mutex); + ASSERT_EQ(0, pthread_attr_init(&attr)); + ASSERT_EQ(0, pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE)); + + const int status = pthread_create(&thread_id, &attr, &ThreadFunc, &mutex); + ASSERT_EQ(0, pthread_attr_destroy(&attr)); + ASSERT_EQ(0, status); + EXPECT_EQ(2U, GetThreadCount()); + pthread_mutex_unlock(&mutex); + + void* dummy; + ASSERT_EQ(0, pthread_join(thread_id, &dummy)); + +# if GTEST_OS_MAC + + // MacOS X may not immediately report the updated thread count after + // joining a thread, causing flakiness in this test. To counter that, we + // wait for up to .5 seconds for the OS to report the correct value. + for (int i = 0; i < 5; ++i) { + if (GetThreadCount() == 1) + break; + + SleepMilliseconds(100); + } + +# endif // GTEST_OS_MAC + + EXPECT_EQ(1U, GetThreadCount()); + pthread_mutex_destroy(&mutex); +} +#else +TEST(GetThreadCountTest, ReturnsZeroWhenUnableToCountThreads) { + EXPECT_EQ(0U, GetThreadCount()); +} +#endif // GTEST_OS_MAC || GTEST_OS_QNX + +TEST(GtestCheckDeathTest, DiesWithCorrectOutputOnFailure) { + const bool a_false_condition = false; + const char regex[] = +#ifdef _MSC_VER + "gtest-port_test\\.cc\\(\\d+\\):" +#elif GTEST_USES_POSIX_RE + "gtest-port_test\\.cc:[0-9]+" +#else + "gtest-port_test\\.cc:\\d+" +#endif // _MSC_VER + ".*a_false_condition.*Extra info.*"; + + EXPECT_DEATH_IF_SUPPORTED(GTEST_CHECK_(a_false_condition) << "Extra info", + regex); +} + +#if GTEST_HAS_DEATH_TEST + +TEST(GtestCheckDeathTest, LivesSilentlyOnSuccess) { + EXPECT_EXIT({ + GTEST_CHECK_(true) << "Extra info"; + ::std::cerr << "Success\n"; + exit(0); }, + ::testing::ExitedWithCode(0), "Success"); +} + +#endif // GTEST_HAS_DEATH_TEST + +// Verifies that Google Test choose regular expression engine appropriate to +// the platform. The test will produce compiler errors in case of failure. +// For simplicity, we only cover the most important platforms here. +TEST(RegexEngineSelectionTest, SelectsCorrectRegexEngine) { +#if GTEST_HAS_POSIX_RE + + EXPECT_TRUE(GTEST_USES_POSIX_RE); + +#else + + EXPECT_TRUE(GTEST_USES_SIMPLE_RE); + +#endif +} + +#if GTEST_USES_POSIX_RE + +# if GTEST_HAS_TYPED_TEST + +template +class RETest : public ::testing::Test {}; + +// Defines StringTypes as the list of all string types that class RE +// supports. +typedef testing::Types< + ::std::string, +# if GTEST_HAS_GLOBAL_STRING + ::string, +# endif // GTEST_HAS_GLOBAL_STRING + const char*> StringTypes; + +TYPED_TEST_CASE(RETest, StringTypes); + +// Tests RE's implicit constructors. +TYPED_TEST(RETest, ImplicitConstructorWorks) { + const RE empty(TypeParam("")); + EXPECT_STREQ("", empty.pattern()); + + const RE simple(TypeParam("hello")); + EXPECT_STREQ("hello", simple.pattern()); + + const RE normal(TypeParam(".*(\\w+)")); + EXPECT_STREQ(".*(\\w+)", normal.pattern()); +} + +// Tests that RE's constructors reject invalid regular expressions. +TYPED_TEST(RETest, RejectsInvalidRegex) { + EXPECT_NONFATAL_FAILURE({ + const RE invalid(TypeParam("?")); + }, "\"?\" is not a valid POSIX Extended regular expression."); +} + +// Tests RE::FullMatch(). +TYPED_TEST(RETest, FullMatchWorks) { + const RE empty(TypeParam("")); + EXPECT_TRUE(RE::FullMatch(TypeParam(""), empty)); + EXPECT_FALSE(RE::FullMatch(TypeParam("a"), empty)); + + const RE re(TypeParam("a.*z")); + EXPECT_TRUE(RE::FullMatch(TypeParam("az"), re)); + EXPECT_TRUE(RE::FullMatch(TypeParam("axyz"), re)); + EXPECT_FALSE(RE::FullMatch(TypeParam("baz"), re)); + EXPECT_FALSE(RE::FullMatch(TypeParam("azy"), re)); +} + +// Tests RE::PartialMatch(). +TYPED_TEST(RETest, PartialMatchWorks) { + const RE empty(TypeParam("")); + EXPECT_TRUE(RE::PartialMatch(TypeParam(""), empty)); + EXPECT_TRUE(RE::PartialMatch(TypeParam("a"), empty)); + + const RE re(TypeParam("a.*z")); + EXPECT_TRUE(RE::PartialMatch(TypeParam("az"), re)); + EXPECT_TRUE(RE::PartialMatch(TypeParam("axyz"), re)); + EXPECT_TRUE(RE::PartialMatch(TypeParam("baz"), re)); + EXPECT_TRUE(RE::PartialMatch(TypeParam("azy"), re)); + EXPECT_FALSE(RE::PartialMatch(TypeParam("zza"), re)); +} + +# endif // GTEST_HAS_TYPED_TEST + +#elif GTEST_USES_SIMPLE_RE + +TEST(IsInSetTest, NulCharIsNotInAnySet) { + EXPECT_FALSE(IsInSet('\0', "")); + EXPECT_FALSE(IsInSet('\0', "\0")); + EXPECT_FALSE(IsInSet('\0', "a")); +} + +TEST(IsInSetTest, WorksForNonNulChars) { + EXPECT_FALSE(IsInSet('a', "Ab")); + EXPECT_FALSE(IsInSet('c', "")); + + EXPECT_TRUE(IsInSet('b', "bcd")); + EXPECT_TRUE(IsInSet('b', "ab")); +} + +TEST(IsAsciiDigitTest, IsFalseForNonDigit) { + EXPECT_FALSE(IsAsciiDigit('\0')); + EXPECT_FALSE(IsAsciiDigit(' ')); + EXPECT_FALSE(IsAsciiDigit('+')); + EXPECT_FALSE(IsAsciiDigit('-')); + EXPECT_FALSE(IsAsciiDigit('.')); + EXPECT_FALSE(IsAsciiDigit('a')); +} + +TEST(IsAsciiDigitTest, IsTrueForDigit) { + EXPECT_TRUE(IsAsciiDigit('0')); + EXPECT_TRUE(IsAsciiDigit('1')); + EXPECT_TRUE(IsAsciiDigit('5')); + EXPECT_TRUE(IsAsciiDigit('9')); +} + +TEST(IsAsciiPunctTest, IsFalseForNonPunct) { + EXPECT_FALSE(IsAsciiPunct('\0')); + EXPECT_FALSE(IsAsciiPunct(' ')); + EXPECT_FALSE(IsAsciiPunct('\n')); + EXPECT_FALSE(IsAsciiPunct('a')); + EXPECT_FALSE(IsAsciiPunct('0')); +} + +TEST(IsAsciiPunctTest, IsTrueForPunct) { + for (const char* p = "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~"; *p; p++) { + EXPECT_PRED1(IsAsciiPunct, *p); + } +} + +TEST(IsRepeatTest, IsFalseForNonRepeatChar) { + EXPECT_FALSE(IsRepeat('\0')); + EXPECT_FALSE(IsRepeat(' ')); + EXPECT_FALSE(IsRepeat('a')); + EXPECT_FALSE(IsRepeat('1')); + EXPECT_FALSE(IsRepeat('-')); +} + +TEST(IsRepeatTest, IsTrueForRepeatChar) { + EXPECT_TRUE(IsRepeat('?')); + EXPECT_TRUE(IsRepeat('*')); + EXPECT_TRUE(IsRepeat('+')); +} + +TEST(IsAsciiWhiteSpaceTest, IsFalseForNonWhiteSpace) { + EXPECT_FALSE(IsAsciiWhiteSpace('\0')); + EXPECT_FALSE(IsAsciiWhiteSpace('a')); + EXPECT_FALSE(IsAsciiWhiteSpace('1')); + EXPECT_FALSE(IsAsciiWhiteSpace('+')); + EXPECT_FALSE(IsAsciiWhiteSpace('_')); +} + +TEST(IsAsciiWhiteSpaceTest, IsTrueForWhiteSpace) { + EXPECT_TRUE(IsAsciiWhiteSpace(' ')); + EXPECT_TRUE(IsAsciiWhiteSpace('\n')); + EXPECT_TRUE(IsAsciiWhiteSpace('\r')); + EXPECT_TRUE(IsAsciiWhiteSpace('\t')); + EXPECT_TRUE(IsAsciiWhiteSpace('\v')); + EXPECT_TRUE(IsAsciiWhiteSpace('\f')); +} + +TEST(IsAsciiWordCharTest, IsFalseForNonWordChar) { + EXPECT_FALSE(IsAsciiWordChar('\0')); + EXPECT_FALSE(IsAsciiWordChar('+')); + EXPECT_FALSE(IsAsciiWordChar('.')); + EXPECT_FALSE(IsAsciiWordChar(' ')); + EXPECT_FALSE(IsAsciiWordChar('\n')); +} + +TEST(IsAsciiWordCharTest, IsTrueForLetter) { + EXPECT_TRUE(IsAsciiWordChar('a')); + EXPECT_TRUE(IsAsciiWordChar('b')); + EXPECT_TRUE(IsAsciiWordChar('A')); + EXPECT_TRUE(IsAsciiWordChar('Z')); +} + +TEST(IsAsciiWordCharTest, IsTrueForDigit) { + EXPECT_TRUE(IsAsciiWordChar('0')); + EXPECT_TRUE(IsAsciiWordChar('1')); + EXPECT_TRUE(IsAsciiWordChar('7')); + EXPECT_TRUE(IsAsciiWordChar('9')); +} + +TEST(IsAsciiWordCharTest, IsTrueForUnderscore) { + EXPECT_TRUE(IsAsciiWordChar('_')); +} + +TEST(IsValidEscapeTest, IsFalseForNonPrintable) { + EXPECT_FALSE(IsValidEscape('\0')); + EXPECT_FALSE(IsValidEscape('\007')); +} + +TEST(IsValidEscapeTest, IsFalseForDigit) { + EXPECT_FALSE(IsValidEscape('0')); + EXPECT_FALSE(IsValidEscape('9')); +} + +TEST(IsValidEscapeTest, IsFalseForWhiteSpace) { + EXPECT_FALSE(IsValidEscape(' ')); + EXPECT_FALSE(IsValidEscape('\n')); +} + +TEST(IsValidEscapeTest, IsFalseForSomeLetter) { + EXPECT_FALSE(IsValidEscape('a')); + EXPECT_FALSE(IsValidEscape('Z')); +} + +TEST(IsValidEscapeTest, IsTrueForPunct) { + EXPECT_TRUE(IsValidEscape('.')); + EXPECT_TRUE(IsValidEscape('-')); + EXPECT_TRUE(IsValidEscape('^')); + EXPECT_TRUE(IsValidEscape('$')); + EXPECT_TRUE(IsValidEscape('(')); + EXPECT_TRUE(IsValidEscape(']')); + EXPECT_TRUE(IsValidEscape('{')); + EXPECT_TRUE(IsValidEscape('|')); +} + +TEST(IsValidEscapeTest, IsTrueForSomeLetter) { + EXPECT_TRUE(IsValidEscape('d')); + EXPECT_TRUE(IsValidEscape('D')); + EXPECT_TRUE(IsValidEscape('s')); + EXPECT_TRUE(IsValidEscape('S')); + EXPECT_TRUE(IsValidEscape('w')); + EXPECT_TRUE(IsValidEscape('W')); +} + +TEST(AtomMatchesCharTest, EscapedPunct) { + EXPECT_FALSE(AtomMatchesChar(true, '\\', '\0')); + EXPECT_FALSE(AtomMatchesChar(true, '\\', ' ')); + EXPECT_FALSE(AtomMatchesChar(true, '_', '.')); + EXPECT_FALSE(AtomMatchesChar(true, '.', 'a')); + + EXPECT_TRUE(AtomMatchesChar(true, '\\', '\\')); + EXPECT_TRUE(AtomMatchesChar(true, '_', '_')); + EXPECT_TRUE(AtomMatchesChar(true, '+', '+')); + EXPECT_TRUE(AtomMatchesChar(true, '.', '.')); +} + +TEST(AtomMatchesCharTest, Escaped_d) { + EXPECT_FALSE(AtomMatchesChar(true, 'd', '\0')); + EXPECT_FALSE(AtomMatchesChar(true, 'd', 'a')); + EXPECT_FALSE(AtomMatchesChar(true, 'd', '.')); + + EXPECT_TRUE(AtomMatchesChar(true, 'd', '0')); + EXPECT_TRUE(AtomMatchesChar(true, 'd', '9')); +} + +TEST(AtomMatchesCharTest, Escaped_D) { + EXPECT_FALSE(AtomMatchesChar(true, 'D', '0')); + EXPECT_FALSE(AtomMatchesChar(true, 'D', '9')); + + EXPECT_TRUE(AtomMatchesChar(true, 'D', '\0')); + EXPECT_TRUE(AtomMatchesChar(true, 'D', 'a')); + EXPECT_TRUE(AtomMatchesChar(true, 'D', '-')); +} + +TEST(AtomMatchesCharTest, Escaped_s) { + EXPECT_FALSE(AtomMatchesChar(true, 's', '\0')); + EXPECT_FALSE(AtomMatchesChar(true, 's', 'a')); + EXPECT_FALSE(AtomMatchesChar(true, 's', '.')); + EXPECT_FALSE(AtomMatchesChar(true, 's', '9')); + + EXPECT_TRUE(AtomMatchesChar(true, 's', ' ')); + EXPECT_TRUE(AtomMatchesChar(true, 's', '\n')); + EXPECT_TRUE(AtomMatchesChar(true, 's', '\t')); +} + +TEST(AtomMatchesCharTest, Escaped_S) { + EXPECT_FALSE(AtomMatchesChar(true, 'S', ' ')); + EXPECT_FALSE(AtomMatchesChar(true, 'S', '\r')); + + EXPECT_TRUE(AtomMatchesChar(true, 'S', '\0')); + EXPECT_TRUE(AtomMatchesChar(true, 'S', 'a')); + EXPECT_TRUE(AtomMatchesChar(true, 'S', '9')); +} + +TEST(AtomMatchesCharTest, Escaped_w) { + EXPECT_FALSE(AtomMatchesChar(true, 'w', '\0')); + EXPECT_FALSE(AtomMatchesChar(true, 'w', '+')); + EXPECT_FALSE(AtomMatchesChar(true, 'w', ' ')); + EXPECT_FALSE(AtomMatchesChar(true, 'w', '\n')); + + EXPECT_TRUE(AtomMatchesChar(true, 'w', '0')); + EXPECT_TRUE(AtomMatchesChar(true, 'w', 'b')); + EXPECT_TRUE(AtomMatchesChar(true, 'w', 'C')); + EXPECT_TRUE(AtomMatchesChar(true, 'w', '_')); +} + +TEST(AtomMatchesCharTest, Escaped_W) { + EXPECT_FALSE(AtomMatchesChar(true, 'W', 'A')); + EXPECT_FALSE(AtomMatchesChar(true, 'W', 'b')); + EXPECT_FALSE(AtomMatchesChar(true, 'W', '9')); + EXPECT_FALSE(AtomMatchesChar(true, 'W', '_')); + + EXPECT_TRUE(AtomMatchesChar(true, 'W', '\0')); + EXPECT_TRUE(AtomMatchesChar(true, 'W', '*')); + EXPECT_TRUE(AtomMatchesChar(true, 'W', '\n')); +} + +TEST(AtomMatchesCharTest, EscapedWhiteSpace) { + EXPECT_FALSE(AtomMatchesChar(true, 'f', '\0')); + EXPECT_FALSE(AtomMatchesChar(true, 'f', '\n')); + EXPECT_FALSE(AtomMatchesChar(true, 'n', '\0')); + EXPECT_FALSE(AtomMatchesChar(true, 'n', '\r')); + EXPECT_FALSE(AtomMatchesChar(true, 'r', '\0')); + EXPECT_FALSE(AtomMatchesChar(true, 'r', 'a')); + EXPECT_FALSE(AtomMatchesChar(true, 't', '\0')); + EXPECT_FALSE(AtomMatchesChar(true, 't', 't')); + EXPECT_FALSE(AtomMatchesChar(true, 'v', '\0')); + EXPECT_FALSE(AtomMatchesChar(true, 'v', '\f')); + + EXPECT_TRUE(AtomMatchesChar(true, 'f', '\f')); + EXPECT_TRUE(AtomMatchesChar(true, 'n', '\n')); + EXPECT_TRUE(AtomMatchesChar(true, 'r', '\r')); + EXPECT_TRUE(AtomMatchesChar(true, 't', '\t')); + EXPECT_TRUE(AtomMatchesChar(true, 'v', '\v')); +} + +TEST(AtomMatchesCharTest, UnescapedDot) { + EXPECT_FALSE(AtomMatchesChar(false, '.', '\n')); + + EXPECT_TRUE(AtomMatchesChar(false, '.', '\0')); + EXPECT_TRUE(AtomMatchesChar(false, '.', '.')); + EXPECT_TRUE(AtomMatchesChar(false, '.', 'a')); + EXPECT_TRUE(AtomMatchesChar(false, '.', ' ')); +} + +TEST(AtomMatchesCharTest, UnescapedChar) { + EXPECT_FALSE(AtomMatchesChar(false, 'a', '\0')); + EXPECT_FALSE(AtomMatchesChar(false, 'a', 'b')); + EXPECT_FALSE(AtomMatchesChar(false, '$', 'a')); + + EXPECT_TRUE(AtomMatchesChar(false, '$', '$')); + EXPECT_TRUE(AtomMatchesChar(false, '5', '5')); + EXPECT_TRUE(AtomMatchesChar(false, 'Z', 'Z')); +} + +TEST(ValidateRegexTest, GeneratesFailureAndReturnsFalseForInvalid) { + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex(NULL)), + "NULL is not a valid simple regular expression"); + EXPECT_NONFATAL_FAILURE( + ASSERT_FALSE(ValidateRegex("a\\")), + "Syntax error at index 1 in simple regular expression \"a\\\": "); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("a\\")), + "'\\' cannot appear at the end"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("\\n\\")), + "'\\' cannot appear at the end"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("\\s\\hb")), + "invalid escape sequence \"\\h\""); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("^^")), + "'^' can only appear at the beginning"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex(".*^b")), + "'^' can only appear at the beginning"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("$$")), + "'$' can only appear at the end"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("^$a")), + "'$' can only appear at the end"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("a(b")), + "'(' is unsupported"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("ab)")), + "')' is unsupported"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("[ab")), + "'[' is unsupported"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("a{2")), + "'{' is unsupported"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("?")), + "'?' can only follow a repeatable token"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("^*")), + "'*' can only follow a repeatable token"); + EXPECT_NONFATAL_FAILURE(ASSERT_FALSE(ValidateRegex("5*+")), + "'+' can only follow a repeatable token"); +} + +TEST(ValidateRegexTest, ReturnsTrueForValid) { + EXPECT_TRUE(ValidateRegex("")); + EXPECT_TRUE(ValidateRegex("a")); + EXPECT_TRUE(ValidateRegex(".*")); + EXPECT_TRUE(ValidateRegex("^a_+")); + EXPECT_TRUE(ValidateRegex("^a\\t\\&?")); + EXPECT_TRUE(ValidateRegex("09*$")); + EXPECT_TRUE(ValidateRegex("^Z$")); + EXPECT_TRUE(ValidateRegex("a\\^Z\\$\\(\\)\\|\\[\\]\\{\\}")); +} + +TEST(MatchRepetitionAndRegexAtHeadTest, WorksForZeroOrOne) { + EXPECT_FALSE(MatchRepetitionAndRegexAtHead(false, 'a', '?', "a", "ba")); + // Repeating more than once. + EXPECT_FALSE(MatchRepetitionAndRegexAtHead(false, 'a', '?', "b", "aab")); + + // Repeating zero times. + EXPECT_TRUE(MatchRepetitionAndRegexAtHead(false, 'a', '?', "b", "ba")); + // Repeating once. + EXPECT_TRUE(MatchRepetitionAndRegexAtHead(false, 'a', '?', "b", "ab")); + EXPECT_TRUE(MatchRepetitionAndRegexAtHead(false, '#', '?', ".", "##")); +} + +TEST(MatchRepetitionAndRegexAtHeadTest, WorksForZeroOrMany) { + EXPECT_FALSE(MatchRepetitionAndRegexAtHead(false, '.', '*', "a$", "baab")); + + // Repeating zero times. + EXPECT_TRUE(MatchRepetitionAndRegexAtHead(false, '.', '*', "b", "bc")); + // Repeating once. + EXPECT_TRUE(MatchRepetitionAndRegexAtHead(false, '.', '*', "b", "abc")); + // Repeating more than once. + EXPECT_TRUE(MatchRepetitionAndRegexAtHead(true, 'w', '*', "-", "ab_1-g")); +} + +TEST(MatchRepetitionAndRegexAtHeadTest, WorksForOneOrMany) { + EXPECT_FALSE(MatchRepetitionAndRegexAtHead(false, '.', '+', "a$", "baab")); + // Repeating zero times. + EXPECT_FALSE(MatchRepetitionAndRegexAtHead(false, '.', '+', "b", "bc")); + + // Repeating once. + EXPECT_TRUE(MatchRepetitionAndRegexAtHead(false, '.', '+', "b", "abc")); + // Repeating more than once. + EXPECT_TRUE(MatchRepetitionAndRegexAtHead(true, 'w', '+', "-", "ab_1-g")); +} + +TEST(MatchRegexAtHeadTest, ReturnsTrueForEmptyRegex) { + EXPECT_TRUE(MatchRegexAtHead("", "")); + EXPECT_TRUE(MatchRegexAtHead("", "ab")); +} + +TEST(MatchRegexAtHeadTest, WorksWhenDollarIsInRegex) { + EXPECT_FALSE(MatchRegexAtHead("$", "a")); + + EXPECT_TRUE(MatchRegexAtHead("$", "")); + EXPECT_TRUE(MatchRegexAtHead("a$", "a")); +} + +TEST(MatchRegexAtHeadTest, WorksWhenRegexStartsWithEscapeSequence) { + EXPECT_FALSE(MatchRegexAtHead("\\w", "+")); + EXPECT_FALSE(MatchRegexAtHead("\\W", "ab")); + + EXPECT_TRUE(MatchRegexAtHead("\\sa", "\nab")); + EXPECT_TRUE(MatchRegexAtHead("\\d", "1a")); +} + +TEST(MatchRegexAtHeadTest, WorksWhenRegexStartsWithRepetition) { + EXPECT_FALSE(MatchRegexAtHead(".+a", "abc")); + EXPECT_FALSE(MatchRegexAtHead("a?b", "aab")); + + EXPECT_TRUE(MatchRegexAtHead(".*a", "bc12-ab")); + EXPECT_TRUE(MatchRegexAtHead("a?b", "b")); + EXPECT_TRUE(MatchRegexAtHead("a?b", "ab")); +} + +TEST(MatchRegexAtHeadTest, + WorksWhenRegexStartsWithRepetionOfEscapeSequence) { + EXPECT_FALSE(MatchRegexAtHead("\\.+a", "abc")); + EXPECT_FALSE(MatchRegexAtHead("\\s?b", " b")); + + EXPECT_TRUE(MatchRegexAtHead("\\(*a", "((((ab")); + EXPECT_TRUE(MatchRegexAtHead("\\^?b", "^b")); + EXPECT_TRUE(MatchRegexAtHead("\\\\?b", "b")); + EXPECT_TRUE(MatchRegexAtHead("\\\\?b", "\\b")); +} + +TEST(MatchRegexAtHeadTest, MatchesSequentially) { + EXPECT_FALSE(MatchRegexAtHead("ab.*c", "acabc")); + + EXPECT_TRUE(MatchRegexAtHead("ab.*c", "ab-fsc")); +} + +TEST(MatchRegexAnywhereTest, ReturnsFalseWhenStringIsNull) { + EXPECT_FALSE(MatchRegexAnywhere("", NULL)); +} + +TEST(MatchRegexAnywhereTest, WorksWhenRegexStartsWithCaret) { + EXPECT_FALSE(MatchRegexAnywhere("^a", "ba")); + EXPECT_FALSE(MatchRegexAnywhere("^$", "a")); + + EXPECT_TRUE(MatchRegexAnywhere("^a", "ab")); + EXPECT_TRUE(MatchRegexAnywhere("^", "ab")); + EXPECT_TRUE(MatchRegexAnywhere("^$", "")); +} + +TEST(MatchRegexAnywhereTest, ReturnsFalseWhenNoMatch) { + EXPECT_FALSE(MatchRegexAnywhere("a", "bcde123")); + EXPECT_FALSE(MatchRegexAnywhere("a.+a", "--aa88888888")); +} + +TEST(MatchRegexAnywhereTest, ReturnsTrueWhenMatchingPrefix) { + EXPECT_TRUE(MatchRegexAnywhere("\\w+", "ab1_ - 5")); + EXPECT_TRUE(MatchRegexAnywhere(".*=", "=")); + EXPECT_TRUE(MatchRegexAnywhere("x.*ab?.*bc", "xaaabc")); +} + +TEST(MatchRegexAnywhereTest, ReturnsTrueWhenMatchingNonPrefix) { + EXPECT_TRUE(MatchRegexAnywhere("\\w+", "$$$ ab1_ - 5")); + EXPECT_TRUE(MatchRegexAnywhere("\\.+=", "= ...=")); +} + +// Tests RE's implicit constructors. +TEST(RETest, ImplicitConstructorWorks) { + const RE empty(""); + EXPECT_STREQ("", empty.pattern()); + + const RE simple("hello"); + EXPECT_STREQ("hello", simple.pattern()); +} + +// Tests that RE's constructors reject invalid regular expressions. +TEST(RETest, RejectsInvalidRegex) { + EXPECT_NONFATAL_FAILURE({ + const RE normal(NULL); + }, "NULL is not a valid simple regular expression"); + + EXPECT_NONFATAL_FAILURE({ + const RE normal(".*(\\w+"); + }, "'(' is unsupported"); + + EXPECT_NONFATAL_FAILURE({ + const RE invalid("^?"); + }, "'?' can only follow a repeatable token"); +} + +// Tests RE::FullMatch(). +TEST(RETest, FullMatchWorks) { + const RE empty(""); + EXPECT_TRUE(RE::FullMatch("", empty)); + EXPECT_FALSE(RE::FullMatch("a", empty)); + + const RE re1("a"); + EXPECT_TRUE(RE::FullMatch("a", re1)); + + const RE re("a.*z"); + EXPECT_TRUE(RE::FullMatch("az", re)); + EXPECT_TRUE(RE::FullMatch("axyz", re)); + EXPECT_FALSE(RE::FullMatch("baz", re)); + EXPECT_FALSE(RE::FullMatch("azy", re)); +} + +// Tests RE::PartialMatch(). +TEST(RETest, PartialMatchWorks) { + const RE empty(""); + EXPECT_TRUE(RE::PartialMatch("", empty)); + EXPECT_TRUE(RE::PartialMatch("a", empty)); + + const RE re("a.*z"); + EXPECT_TRUE(RE::PartialMatch("az", re)); + EXPECT_TRUE(RE::PartialMatch("axyz", re)); + EXPECT_TRUE(RE::PartialMatch("baz", re)); + EXPECT_TRUE(RE::PartialMatch("azy", re)); + EXPECT_FALSE(RE::PartialMatch("zza", re)); +} + +#endif // GTEST_USES_POSIX_RE + +#if !GTEST_OS_WINDOWS_MOBILE + +TEST(CaptureTest, CapturesStdout) { + CaptureStdout(); + fprintf(stdout, "abc"); + EXPECT_STREQ("abc", GetCapturedStdout().c_str()); + + CaptureStdout(); + fprintf(stdout, "def%cghi", '\0'); + EXPECT_EQ(::std::string("def\0ghi", 7), ::std::string(GetCapturedStdout())); +} + +TEST(CaptureTest, CapturesStderr) { + CaptureStderr(); + fprintf(stderr, "jkl"); + EXPECT_STREQ("jkl", GetCapturedStderr().c_str()); + + CaptureStderr(); + fprintf(stderr, "jkl%cmno", '\0'); + EXPECT_EQ(::std::string("jkl\0mno", 7), ::std::string(GetCapturedStderr())); +} + +// Tests that stdout and stderr capture don't interfere with each other. +TEST(CaptureTest, CapturesStdoutAndStderr) { + CaptureStdout(); + CaptureStderr(); + fprintf(stdout, "pqr"); + fprintf(stderr, "stu"); + EXPECT_STREQ("pqr", GetCapturedStdout().c_str()); + EXPECT_STREQ("stu", GetCapturedStderr().c_str()); +} + +TEST(CaptureDeathTest, CannotReenterStdoutCapture) { + CaptureStdout(); + EXPECT_DEATH_IF_SUPPORTED(CaptureStdout(), + "Only one stdout capturer can exist at a time"); + GetCapturedStdout(); + + // We cannot test stderr capturing using death tests as they use it + // themselves. +} + +#endif // !GTEST_OS_WINDOWS_MOBILE + +TEST(ThreadLocalTest, DefaultConstructorInitializesToDefaultValues) { + ThreadLocal t1; + EXPECT_EQ(0, t1.get()); + + ThreadLocal t2; + EXPECT_TRUE(t2.get() == NULL); +} + +TEST(ThreadLocalTest, SingleParamConstructorInitializesToParam) { + ThreadLocal t1(123); + EXPECT_EQ(123, t1.get()); + + int i = 0; + ThreadLocal t2(&i); + EXPECT_EQ(&i, t2.get()); +} + +class NoDefaultContructor { + public: + explicit NoDefaultContructor(const char*) {} + NoDefaultContructor(const NoDefaultContructor&) {} +}; + +TEST(ThreadLocalTest, ValueDefaultContructorIsNotRequiredForParamVersion) { + ThreadLocal bar(NoDefaultContructor("foo")); + bar.pointer(); +} + +TEST(ThreadLocalTest, GetAndPointerReturnSameValue) { + ThreadLocal thread_local_string; + + EXPECT_EQ(thread_local_string.pointer(), &(thread_local_string.get())); + + // Verifies the condition still holds after calling set. + thread_local_string.set("foo"); + EXPECT_EQ(thread_local_string.pointer(), &(thread_local_string.get())); +} + +TEST(ThreadLocalTest, PointerAndConstPointerReturnSameValue) { + ThreadLocal thread_local_string; + const ThreadLocal& const_thread_local_string = + thread_local_string; + + EXPECT_EQ(thread_local_string.pointer(), const_thread_local_string.pointer()); + + thread_local_string.set("foo"); + EXPECT_EQ(thread_local_string.pointer(), const_thread_local_string.pointer()); +} + +#if GTEST_IS_THREADSAFE + +void AddTwo(int* param) { *param += 2; } + +TEST(ThreadWithParamTest, ConstructorExecutesThreadFunc) { + int i = 40; + ThreadWithParam thread(&AddTwo, &i, NULL); + thread.Join(); + EXPECT_EQ(42, i); +} + +TEST(MutexDeathTest, AssertHeldShouldAssertWhenNotLocked) { + // AssertHeld() is flaky only in the presence of multiple threads accessing + // the lock. In this case, the test is robust. + EXPECT_DEATH_IF_SUPPORTED({ + Mutex m; + { MutexLock lock(&m); } + m.AssertHeld(); + }, + "thread .*hold"); +} + +TEST(MutexTest, AssertHeldShouldNotAssertWhenLocked) { + Mutex m; + MutexLock lock(&m); + m.AssertHeld(); +} + +class AtomicCounterWithMutex { + public: + explicit AtomicCounterWithMutex(Mutex* mutex) : + value_(0), mutex_(mutex), random_(42) {} + + void Increment() { + MutexLock lock(mutex_); + int temp = value_; + { + // Locking a mutex puts up a memory barrier, preventing reads and + // writes to value_ rearranged when observed from other threads. + // + // We cannot use Mutex and MutexLock here or rely on their memory + // barrier functionality as we are testing them here. + pthread_mutex_t memory_barrier_mutex; + GTEST_CHECK_POSIX_SUCCESS_( + pthread_mutex_init(&memory_barrier_mutex, NULL)); + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&memory_barrier_mutex)); + + SleepMilliseconds(random_.Generate(30)); + + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&memory_barrier_mutex)); + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&memory_barrier_mutex)); + } + value_ = temp + 1; + } + int value() const { return value_; } + + private: + volatile int value_; + Mutex* const mutex_; // Protects value_. + Random random_; +}; + +void CountingThreadFunc(pair param) { + for (int i = 0; i < param.second; ++i) + param.first->Increment(); +} + +// Tests that the mutex only lets one thread at a time to lock it. +TEST(MutexTest, OnlyOneThreadCanLockAtATime) { + Mutex mutex; + AtomicCounterWithMutex locked_counter(&mutex); + + typedef ThreadWithParam > ThreadType; + const int kCycleCount = 20; + const int kThreadCount = 7; + scoped_ptr counting_threads[kThreadCount]; + Notification threads_can_start; + // Creates and runs kThreadCount threads that increment locked_counter + // kCycleCount times each. + for (int i = 0; i < kThreadCount; ++i) { + counting_threads[i].reset(new ThreadType(&CountingThreadFunc, + make_pair(&locked_counter, + kCycleCount), + &threads_can_start)); + } + threads_can_start.Notify(); + for (int i = 0; i < kThreadCount; ++i) + counting_threads[i]->Join(); + + // If the mutex lets more than one thread to increment the counter at a + // time, they are likely to encounter a race condition and have some + // increments overwritten, resulting in the lower then expected counter + // value. + EXPECT_EQ(kCycleCount * kThreadCount, locked_counter.value()); +} + +template +void RunFromThread(void (func)(T), T param) { + ThreadWithParam thread(func, param, NULL); + thread.Join(); +} + +void RetrieveThreadLocalValue( + pair*, std::string*> param) { + *param.second = param.first->get(); +} + +TEST(ThreadLocalTest, ParameterizedConstructorSetsDefault) { + ThreadLocal thread_local_string("foo"); + EXPECT_STREQ("foo", thread_local_string.get().c_str()); + + thread_local_string.set("bar"); + EXPECT_STREQ("bar", thread_local_string.get().c_str()); + + std::string result; + RunFromThread(&RetrieveThreadLocalValue, + make_pair(&thread_local_string, &result)); + EXPECT_STREQ("foo", result.c_str()); +} + +// DestructorTracker keeps track of whether its instances have been +// destroyed. +static std::vector g_destroyed; + +class DestructorTracker { + public: + DestructorTracker() : index_(GetNewIndex()) {} + DestructorTracker(const DestructorTracker& /* rhs */) + : index_(GetNewIndex()) {} + ~DestructorTracker() { + // We never access g_destroyed concurrently, so we don't need to + // protect the write operation under a mutex. + g_destroyed[index_] = true; + } + + private: + static int GetNewIndex() { + g_destroyed.push_back(false); + return g_destroyed.size() - 1; + } + const int index_; +}; + +typedef ThreadLocal* ThreadParam; + +void CallThreadLocalGet(ThreadParam thread_local_param) { + thread_local_param->get(); +} + +// Tests that when a ThreadLocal object dies in a thread, it destroys +// the managed object for that thread. +TEST(ThreadLocalTest, DestroysManagedObjectForOwnThreadWhenDying) { + g_destroyed.clear(); + + { + // The next line default constructs a DestructorTracker object as + // the default value of objects managed by thread_local_tracker. + ThreadLocal thread_local_tracker; + ASSERT_EQ(1U, g_destroyed.size()); + ASSERT_FALSE(g_destroyed[0]); + + // This creates another DestructorTracker object for the main thread. + thread_local_tracker.get(); + ASSERT_EQ(2U, g_destroyed.size()); + ASSERT_FALSE(g_destroyed[0]); + ASSERT_FALSE(g_destroyed[1]); + } + + // Now thread_local_tracker has died. It should have destroyed both the + // default value shared by all threads and the value for the main + // thread. + ASSERT_EQ(2U, g_destroyed.size()); + EXPECT_TRUE(g_destroyed[0]); + EXPECT_TRUE(g_destroyed[1]); + + g_destroyed.clear(); +} + +// Tests that when a thread exits, the thread-local object for that +// thread is destroyed. +TEST(ThreadLocalTest, DestroysManagedObjectAtThreadExit) { + g_destroyed.clear(); + + { + // The next line default constructs a DestructorTracker object as + // the default value of objects managed by thread_local_tracker. + ThreadLocal thread_local_tracker; + ASSERT_EQ(1U, g_destroyed.size()); + ASSERT_FALSE(g_destroyed[0]); + + // This creates another DestructorTracker object in the new thread. + ThreadWithParam thread( + &CallThreadLocalGet, &thread_local_tracker, NULL); + thread.Join(); + + // Now the new thread has exited. The per-thread object for it + // should have been destroyed. + ASSERT_EQ(2U, g_destroyed.size()); + ASSERT_FALSE(g_destroyed[0]); + ASSERT_TRUE(g_destroyed[1]); + } + + // Now thread_local_tracker has died. The default value should have been + // destroyed too. + ASSERT_EQ(2U, g_destroyed.size()); + EXPECT_TRUE(g_destroyed[0]); + EXPECT_TRUE(g_destroyed[1]); + + g_destroyed.clear(); +} + +TEST(ThreadLocalTest, ThreadLocalMutationsAffectOnlyCurrentThread) { + ThreadLocal thread_local_string; + thread_local_string.set("Foo"); + EXPECT_STREQ("Foo", thread_local_string.get().c_str()); + + std::string result; + RunFromThread(&RetrieveThreadLocalValue, + make_pair(&thread_local_string, &result)); + EXPECT_TRUE(result.empty()); +} + +#endif // GTEST_IS_THREADSAFE + +} // namespace internal +} // namespace testing diff --git a/external/gtest/test/gtest-printers_test.cc b/external/gtest/test/gtest-printers_test.cc new file mode 100755 index 0000000000..c2beca7d8d --- /dev/null +++ b/external/gtest/test/gtest-printers_test.cc @@ -0,0 +1,1566 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file tests the universal value printer. + +#include "gtest/gtest-printers.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "gtest/gtest.h" + +// hash_map and hash_set are available under Visual C++. +#if _MSC_VER +# define GTEST_HAS_HASH_MAP_ 1 // Indicates that hash_map is available. +# include // NOLINT +# define GTEST_HAS_HASH_SET_ 1 // Indicates that hash_set is available. +# include // NOLINT +#endif // GTEST_OS_WINDOWS + +// Some user-defined types for testing the universal value printer. + +// An anonymous enum type. +enum AnonymousEnum { + kAE1 = -1, + kAE2 = 1 +}; + +// An enum without a user-defined printer. +enum EnumWithoutPrinter { + kEWP1 = -2, + kEWP2 = 42 +}; + +// An enum with a << operator. +enum EnumWithStreaming { + kEWS1 = 10 +}; + +std::ostream& operator<<(std::ostream& os, EnumWithStreaming e) { + return os << (e == kEWS1 ? "kEWS1" : "invalid"); +} + +// An enum with a PrintTo() function. +enum EnumWithPrintTo { + kEWPT1 = 1 +}; + +void PrintTo(EnumWithPrintTo e, std::ostream* os) { + *os << (e == kEWPT1 ? "kEWPT1" : "invalid"); +} + +// A class implicitly convertible to BiggestInt. +class BiggestIntConvertible { + public: + operator ::testing::internal::BiggestInt() const { return 42; } +}; + +// A user-defined unprintable class template in the global namespace. +template +class UnprintableTemplateInGlobal { + public: + UnprintableTemplateInGlobal() : value_() {} + private: + T value_; +}; + +// A user-defined streamable type in the global namespace. +class StreamableInGlobal { + public: + virtual ~StreamableInGlobal() {} +}; + +inline void operator<<(::std::ostream& os, const StreamableInGlobal& /* x */) { + os << "StreamableInGlobal"; +} + +void operator<<(::std::ostream& os, const StreamableInGlobal* /* x */) { + os << "StreamableInGlobal*"; +} + +namespace foo { + +// A user-defined unprintable type in a user namespace. +class UnprintableInFoo { + public: + UnprintableInFoo() : z_(0) { memcpy(xy_, "\xEF\x12\x0\x0\x34\xAB\x0\x0", 8); } + private: + char xy_[8]; + double z_; +}; + +// A user-defined printable type in a user-chosen namespace. +struct PrintableViaPrintTo { + PrintableViaPrintTo() : value() {} + int value; +}; + +void PrintTo(const PrintableViaPrintTo& x, ::std::ostream* os) { + *os << "PrintableViaPrintTo: " << x.value; +} + +// A type with a user-defined << for printing its pointer. +struct PointerPrintable { +}; + +::std::ostream& operator<<(::std::ostream& os, + const PointerPrintable* /* x */) { + return os << "PointerPrintable*"; +} + +// A user-defined printable class template in a user-chosen namespace. +template +class PrintableViaPrintToTemplate { + public: + explicit PrintableViaPrintToTemplate(const T& a_value) : value_(a_value) {} + + const T& value() const { return value_; } + private: + T value_; +}; + +template +void PrintTo(const PrintableViaPrintToTemplate& x, ::std::ostream* os) { + *os << "PrintableViaPrintToTemplate: " << x.value(); +} + +// A user-defined streamable class template in a user namespace. +template +class StreamableTemplateInFoo { + public: + StreamableTemplateInFoo() : value_() {} + + const T& value() const { return value_; } + private: + T value_; +}; + +template +inline ::std::ostream& operator<<(::std::ostream& os, + const StreamableTemplateInFoo& x) { + return os << "StreamableTemplateInFoo: " << x.value(); +} + +} // namespace foo + +namespace testing { +namespace gtest_printers_test { + +using ::std::deque; +using ::std::list; +using ::std::make_pair; +using ::std::map; +using ::std::multimap; +using ::std::multiset; +using ::std::pair; +using ::std::set; +using ::std::vector; +using ::testing::PrintToString; +using ::testing::internal::FormatForComparisonFailureMessage; +using ::testing::internal::ImplicitCast_; +using ::testing::internal::NativeArray; +using ::testing::internal::RE; +using ::testing::internal::Strings; +using ::testing::internal::UniversalPrint; +using ::testing::internal::UniversalPrinter; +using ::testing::internal::UniversalTersePrint; +using ::testing::internal::UniversalTersePrintTupleFieldsToStrings; +using ::testing::internal::kReference; +using ::testing::internal::string; + +#if GTEST_HAS_TR1_TUPLE +using ::std::tr1::make_tuple; +using ::std::tr1::tuple; +#endif + +// The hash_* classes are not part of the C++ standard. STLport +// defines them in namespace std. MSVC defines them in ::stdext. GCC +// defines them in ::. +#ifdef _STLP_HASH_MAP // We got from STLport. +using ::std::hash_map; +using ::std::hash_set; +using ::std::hash_multimap; +using ::std::hash_multiset; +#elif _MSC_VER +using ::stdext::hash_map; +using ::stdext::hash_set; +using ::stdext::hash_multimap; +using ::stdext::hash_multiset; +#endif + +// Prints a value to a string using the universal value printer. This +// is a helper for testing UniversalPrinter::Print() for various types. +template +string Print(const T& value) { + ::std::stringstream ss; + UniversalPrinter::Print(value, &ss); + return ss.str(); +} + +// Prints a value passed by reference to a string, using the universal +// value printer. This is a helper for testing +// UniversalPrinter::Print() for various types. +template +string PrintByRef(const T& value) { + ::std::stringstream ss; + UniversalPrinter::Print(value, &ss); + return ss.str(); +} + +// Tests printing various enum types. + +TEST(PrintEnumTest, AnonymousEnum) { + EXPECT_EQ("-1", Print(kAE1)); + EXPECT_EQ("1", Print(kAE2)); +} + +TEST(PrintEnumTest, EnumWithoutPrinter) { + EXPECT_EQ("-2", Print(kEWP1)); + EXPECT_EQ("42", Print(kEWP2)); +} + +TEST(PrintEnumTest, EnumWithStreaming) { + EXPECT_EQ("kEWS1", Print(kEWS1)); + EXPECT_EQ("invalid", Print(static_cast(0))); +} + +TEST(PrintEnumTest, EnumWithPrintTo) { + EXPECT_EQ("kEWPT1", Print(kEWPT1)); + EXPECT_EQ("invalid", Print(static_cast(0))); +} + +// Tests printing a class implicitly convertible to BiggestInt. + +TEST(PrintClassTest, BiggestIntConvertible) { + EXPECT_EQ("42", Print(BiggestIntConvertible())); +} + +// Tests printing various char types. + +// char. +TEST(PrintCharTest, PlainChar) { + EXPECT_EQ("'\\0'", Print('\0')); + EXPECT_EQ("'\\'' (39, 0x27)", Print('\'')); + EXPECT_EQ("'\"' (34, 0x22)", Print('"')); + EXPECT_EQ("'?' (63, 0x3F)", Print('?')); + EXPECT_EQ("'\\\\' (92, 0x5C)", Print('\\')); + EXPECT_EQ("'\\a' (7)", Print('\a')); + EXPECT_EQ("'\\b' (8)", Print('\b')); + EXPECT_EQ("'\\f' (12, 0xC)", Print('\f')); + EXPECT_EQ("'\\n' (10, 0xA)", Print('\n')); + EXPECT_EQ("'\\r' (13, 0xD)", Print('\r')); + EXPECT_EQ("'\\t' (9)", Print('\t')); + EXPECT_EQ("'\\v' (11, 0xB)", Print('\v')); + EXPECT_EQ("'\\x7F' (127)", Print('\x7F')); + EXPECT_EQ("'\\xFF' (255)", Print('\xFF')); + EXPECT_EQ("' ' (32, 0x20)", Print(' ')); + EXPECT_EQ("'a' (97, 0x61)", Print('a')); +} + +// signed char. +TEST(PrintCharTest, SignedChar) { + EXPECT_EQ("'\\0'", Print(static_cast('\0'))); + EXPECT_EQ("'\\xCE' (-50)", + Print(static_cast(-50))); +} + +// unsigned char. +TEST(PrintCharTest, UnsignedChar) { + EXPECT_EQ("'\\0'", Print(static_cast('\0'))); + EXPECT_EQ("'b' (98, 0x62)", + Print(static_cast('b'))); +} + +// Tests printing other simple, built-in types. + +// bool. +TEST(PrintBuiltInTypeTest, Bool) { + EXPECT_EQ("false", Print(false)); + EXPECT_EQ("true", Print(true)); +} + +// wchar_t. +TEST(PrintBuiltInTypeTest, Wchar_t) { + EXPECT_EQ("L'\\0'", Print(L'\0')); + EXPECT_EQ("L'\\'' (39, 0x27)", Print(L'\'')); + EXPECT_EQ("L'\"' (34, 0x22)", Print(L'"')); + EXPECT_EQ("L'?' (63, 0x3F)", Print(L'?')); + EXPECT_EQ("L'\\\\' (92, 0x5C)", Print(L'\\')); + EXPECT_EQ("L'\\a' (7)", Print(L'\a')); + EXPECT_EQ("L'\\b' (8)", Print(L'\b')); + EXPECT_EQ("L'\\f' (12, 0xC)", Print(L'\f')); + EXPECT_EQ("L'\\n' (10, 0xA)", Print(L'\n')); + EXPECT_EQ("L'\\r' (13, 0xD)", Print(L'\r')); + EXPECT_EQ("L'\\t' (9)", Print(L'\t')); + EXPECT_EQ("L'\\v' (11, 0xB)", Print(L'\v')); + EXPECT_EQ("L'\\x7F' (127)", Print(L'\x7F')); + EXPECT_EQ("L'\\xFF' (255)", Print(L'\xFF')); + EXPECT_EQ("L' ' (32, 0x20)", Print(L' ')); + EXPECT_EQ("L'a' (97, 0x61)", Print(L'a')); + EXPECT_EQ("L'\\x576' (1398)", Print(static_cast(0x576))); + EXPECT_EQ("L'\\xC74D' (51021)", Print(static_cast(0xC74D))); +} + +// Test that Int64 provides more storage than wchar_t. +TEST(PrintTypeSizeTest, Wchar_t) { + EXPECT_LT(sizeof(wchar_t), sizeof(testing::internal::Int64)); +} + +// Various integer types. +TEST(PrintBuiltInTypeTest, Integer) { + EXPECT_EQ("'\\xFF' (255)", Print(static_cast(255))); // uint8 + EXPECT_EQ("'\\x80' (-128)", Print(static_cast(-128))); // int8 + EXPECT_EQ("65535", Print(USHRT_MAX)); // uint16 + EXPECT_EQ("-32768", Print(SHRT_MIN)); // int16 + EXPECT_EQ("4294967295", Print(UINT_MAX)); // uint32 + EXPECT_EQ("-2147483648", Print(INT_MIN)); // int32 + EXPECT_EQ("18446744073709551615", + Print(static_cast(-1))); // uint64 + EXPECT_EQ("-9223372036854775808", + Print(static_cast(1) << 63)); // int64 +} + +// Size types. +TEST(PrintBuiltInTypeTest, Size_t) { + EXPECT_EQ("1", Print(sizeof('a'))); // size_t. +#if !GTEST_OS_WINDOWS + // Windows has no ssize_t type. + EXPECT_EQ("-2", Print(static_cast(-2))); // ssize_t. +#endif // !GTEST_OS_WINDOWS +} + +// Floating-points. +TEST(PrintBuiltInTypeTest, FloatingPoints) { + EXPECT_EQ("1.5", Print(1.5f)); // float + EXPECT_EQ("-2.5", Print(-2.5)); // double +} + +// Since ::std::stringstream::operator<<(const void *) formats the pointer +// output differently with different compilers, we have to create the expected +// output first and use it as our expectation. +static string PrintPointer(const void *p) { + ::std::stringstream expected_result_stream; + expected_result_stream << p; + return expected_result_stream.str(); +} + +// Tests printing C strings. + +// const char*. +TEST(PrintCStringTest, Const) { + const char* p = "World"; + EXPECT_EQ(PrintPointer(p) + " pointing to \"World\"", Print(p)); +} + +// char*. +TEST(PrintCStringTest, NonConst) { + char p[] = "Hi"; + EXPECT_EQ(PrintPointer(p) + " pointing to \"Hi\"", + Print(static_cast(p))); +} + +// NULL C string. +TEST(PrintCStringTest, Null) { + const char* p = NULL; + EXPECT_EQ("NULL", Print(p)); +} + +// Tests that C strings are escaped properly. +TEST(PrintCStringTest, EscapesProperly) { + const char* p = "'\"?\\\a\b\f\n\r\t\v\x7F\xFF a"; + EXPECT_EQ(PrintPointer(p) + " pointing to \"'\\\"?\\\\\\a\\b\\f" + "\\n\\r\\t\\v\\x7F\\xFF a\"", + Print(p)); +} + + + +// MSVC compiler can be configured to define whar_t as a typedef +// of unsigned short. Defining an overload for const wchar_t* in that case +// would cause pointers to unsigned shorts be printed as wide strings, +// possibly accessing more memory than intended and causing invalid +// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when +// wchar_t is implemented as a native type. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) + +// const wchar_t*. +TEST(PrintWideCStringTest, Const) { + const wchar_t* p = L"World"; + EXPECT_EQ(PrintPointer(p) + " pointing to L\"World\"", Print(p)); +} + +// wchar_t*. +TEST(PrintWideCStringTest, NonConst) { + wchar_t p[] = L"Hi"; + EXPECT_EQ(PrintPointer(p) + " pointing to L\"Hi\"", + Print(static_cast(p))); +} + +// NULL wide C string. +TEST(PrintWideCStringTest, Null) { + const wchar_t* p = NULL; + EXPECT_EQ("NULL", Print(p)); +} + +// Tests that wide C strings are escaped properly. +TEST(PrintWideCStringTest, EscapesProperly) { + const wchar_t s[] = {'\'', '"', '?', '\\', '\a', '\b', '\f', '\n', '\r', + '\t', '\v', 0xD3, 0x576, 0x8D3, 0xC74D, ' ', 'a', '\0'}; + EXPECT_EQ(PrintPointer(s) + " pointing to L\"'\\\"?\\\\\\a\\b\\f" + "\\n\\r\\t\\v\\xD3\\x576\\x8D3\\xC74D a\"", + Print(static_cast(s))); +} +#endif // native wchar_t + +// Tests printing pointers to other char types. + +// signed char*. +TEST(PrintCharPointerTest, SignedChar) { + signed char* p = reinterpret_cast(0x1234); + EXPECT_EQ(PrintPointer(p), Print(p)); + p = NULL; + EXPECT_EQ("NULL", Print(p)); +} + +// const signed char*. +TEST(PrintCharPointerTest, ConstSignedChar) { + signed char* p = reinterpret_cast(0x1234); + EXPECT_EQ(PrintPointer(p), Print(p)); + p = NULL; + EXPECT_EQ("NULL", Print(p)); +} + +// unsigned char*. +TEST(PrintCharPointerTest, UnsignedChar) { + unsigned char* p = reinterpret_cast(0x1234); + EXPECT_EQ(PrintPointer(p), Print(p)); + p = NULL; + EXPECT_EQ("NULL", Print(p)); +} + +// const unsigned char*. +TEST(PrintCharPointerTest, ConstUnsignedChar) { + const unsigned char* p = reinterpret_cast(0x1234); + EXPECT_EQ(PrintPointer(p), Print(p)); + p = NULL; + EXPECT_EQ("NULL", Print(p)); +} + +// Tests printing pointers to simple, built-in types. + +// bool*. +TEST(PrintPointerToBuiltInTypeTest, Bool) { + bool* p = reinterpret_cast(0xABCD); + EXPECT_EQ(PrintPointer(p), Print(p)); + p = NULL; + EXPECT_EQ("NULL", Print(p)); +} + +// void*. +TEST(PrintPointerToBuiltInTypeTest, Void) { + void* p = reinterpret_cast(0xABCD); + EXPECT_EQ(PrintPointer(p), Print(p)); + p = NULL; + EXPECT_EQ("NULL", Print(p)); +} + +// const void*. +TEST(PrintPointerToBuiltInTypeTest, ConstVoid) { + const void* p = reinterpret_cast(0xABCD); + EXPECT_EQ(PrintPointer(p), Print(p)); + p = NULL; + EXPECT_EQ("NULL", Print(p)); +} + +// Tests printing pointers to pointers. +TEST(PrintPointerToPointerTest, IntPointerPointer) { + int** p = reinterpret_cast(0xABCD); + EXPECT_EQ(PrintPointer(p), Print(p)); + p = NULL; + EXPECT_EQ("NULL", Print(p)); +} + +// Tests printing (non-member) function pointers. + +void MyFunction(int /* n */) {} + +TEST(PrintPointerTest, NonMemberFunctionPointer) { + // We cannot directly cast &MyFunction to const void* because the + // standard disallows casting between pointers to functions and + // pointers to objects, and some compilers (e.g. GCC 3.4) enforce + // this limitation. + EXPECT_EQ( + PrintPointer(reinterpret_cast( + reinterpret_cast(&MyFunction))), + Print(&MyFunction)); + int (*p)(bool) = NULL; // NOLINT + EXPECT_EQ("NULL", Print(p)); +} + +// An assertion predicate determining whether a one string is a prefix for +// another. +template +AssertionResult HasPrefix(const StringType& str, const StringType& prefix) { + if (str.find(prefix, 0) == 0) + return AssertionSuccess(); + + const bool is_wide_string = sizeof(prefix[0]) > 1; + const char* const begin_string_quote = is_wide_string ? "L\"" : "\""; + return AssertionFailure() + << begin_string_quote << prefix << "\" is not a prefix of " + << begin_string_quote << str << "\"\n"; +} + +// Tests printing member variable pointers. Although they are called +// pointers, they don't point to a location in the address space. +// Their representation is implementation-defined. Thus they will be +// printed as raw bytes. + +struct Foo { + public: + virtual ~Foo() {} + int MyMethod(char x) { return x + 1; } + virtual char MyVirtualMethod(int /* n */) { return 'a'; } + + int value; +}; + +TEST(PrintPointerTest, MemberVariablePointer) { + EXPECT_TRUE(HasPrefix(Print(&Foo::value), + Print(sizeof(&Foo::value)) + "-byte object ")); + int (Foo::*p) = NULL; // NOLINT + EXPECT_TRUE(HasPrefix(Print(p), + Print(sizeof(p)) + "-byte object ")); +} + +// Tests printing member function pointers. Although they are called +// pointers, they don't point to a location in the address space. +// Their representation is implementation-defined. Thus they will be +// printed as raw bytes. +TEST(PrintPointerTest, MemberFunctionPointer) { + EXPECT_TRUE(HasPrefix(Print(&Foo::MyMethod), + Print(sizeof(&Foo::MyMethod)) + "-byte object ")); + EXPECT_TRUE( + HasPrefix(Print(&Foo::MyVirtualMethod), + Print(sizeof((&Foo::MyVirtualMethod))) + "-byte object ")); + int (Foo::*p)(char) = NULL; // NOLINT + EXPECT_TRUE(HasPrefix(Print(p), + Print(sizeof(p)) + "-byte object ")); +} + +// Tests printing C arrays. + +// The difference between this and Print() is that it ensures that the +// argument is a reference to an array. +template +string PrintArrayHelper(T (&a)[N]) { + return Print(a); +} + +// One-dimensional array. +TEST(PrintArrayTest, OneDimensionalArray) { + int a[5] = { 1, 2, 3, 4, 5 }; + EXPECT_EQ("{ 1, 2, 3, 4, 5 }", PrintArrayHelper(a)); +} + +// Two-dimensional array. +TEST(PrintArrayTest, TwoDimensionalArray) { + int a[2][5] = { + { 1, 2, 3, 4, 5 }, + { 6, 7, 8, 9, 0 } + }; + EXPECT_EQ("{ { 1, 2, 3, 4, 5 }, { 6, 7, 8, 9, 0 } }", PrintArrayHelper(a)); +} + +// Array of const elements. +TEST(PrintArrayTest, ConstArray) { + const bool a[1] = { false }; + EXPECT_EQ("{ false }", PrintArrayHelper(a)); +} + +// char array without terminating NUL. +TEST(PrintArrayTest, CharArrayWithNoTerminatingNul) { + // Array a contains '\0' in the middle and doesn't end with '\0'. + char a[] = { 'H', '\0', 'i' }; + EXPECT_EQ("\"H\\0i\" (no terminating NUL)", PrintArrayHelper(a)); +} + +// const char array with terminating NUL. +TEST(PrintArrayTest, ConstCharArrayWithTerminatingNul) { + const char a[] = "\0Hi"; + EXPECT_EQ("\"\\0Hi\"", PrintArrayHelper(a)); +} + +// const wchar_t array without terminating NUL. +TEST(PrintArrayTest, WCharArrayWithNoTerminatingNul) { + // Array a contains '\0' in the middle and doesn't end with '\0'. + const wchar_t a[] = { L'H', L'\0', L'i' }; + EXPECT_EQ("L\"H\\0i\" (no terminating NUL)", PrintArrayHelper(a)); +} + +// wchar_t array with terminating NUL. +TEST(PrintArrayTest, WConstCharArrayWithTerminatingNul) { + const wchar_t a[] = L"\0Hi"; + EXPECT_EQ("L\"\\0Hi\"", PrintArrayHelper(a)); +} + +// Array of objects. +TEST(PrintArrayTest, ObjectArray) { + string a[3] = { "Hi", "Hello", "Ni hao" }; + EXPECT_EQ("{ \"Hi\", \"Hello\", \"Ni hao\" }", PrintArrayHelper(a)); +} + +// Array with many elements. +TEST(PrintArrayTest, BigArray) { + int a[100] = { 1, 2, 3 }; + EXPECT_EQ("{ 1, 2, 3, 0, 0, 0, 0, 0, ..., 0, 0, 0, 0, 0, 0, 0, 0 }", + PrintArrayHelper(a)); +} + +// Tests printing ::string and ::std::string. + +#if GTEST_HAS_GLOBAL_STRING +// ::string. +TEST(PrintStringTest, StringInGlobalNamespace) { + const char s[] = "'\"?\\\a\b\f\n\0\r\t\v\x7F\xFF a"; + const ::string str(s, sizeof(s)); + EXPECT_EQ("\"'\\\"?\\\\\\a\\b\\f\\n\\0\\r\\t\\v\\x7F\\xFF a\\0\"", + Print(str)); +} +#endif // GTEST_HAS_GLOBAL_STRING + +// ::std::string. +TEST(PrintStringTest, StringInStdNamespace) { + const char s[] = "'\"?\\\a\b\f\n\0\r\t\v\x7F\xFF a"; + const ::std::string str(s, sizeof(s)); + EXPECT_EQ("\"'\\\"?\\\\\\a\\b\\f\\n\\0\\r\\t\\v\\x7F\\xFF a\\0\"", + Print(str)); +} + +TEST(PrintStringTest, StringAmbiguousHex) { + // "\x6BANANA" is ambiguous, it can be interpreted as starting with either of: + // '\x6', '\x6B', or '\x6BA'. + + // a hex escaping sequence following by a decimal digit + EXPECT_EQ("\"0\\x12\" \"3\"", Print(::std::string("0\x12" "3"))); + // a hex escaping sequence following by a hex digit (lower-case) + EXPECT_EQ("\"mm\\x6\" \"bananas\"", Print(::std::string("mm\x6" "bananas"))); + // a hex escaping sequence following by a hex digit (upper-case) + EXPECT_EQ("\"NOM\\x6\" \"BANANA\"", Print(::std::string("NOM\x6" "BANANA"))); + // a hex escaping sequence following by a non-xdigit + EXPECT_EQ("\"!\\x5-!\"", Print(::std::string("!\x5-!"))); +} + +// Tests printing ::wstring and ::std::wstring. + +#if GTEST_HAS_GLOBAL_WSTRING +// ::wstring. +TEST(PrintWideStringTest, StringInGlobalNamespace) { + const wchar_t s[] = L"'\"?\\\a\b\f\n\0\r\t\v\xD3\x576\x8D3\xC74D a"; + const ::wstring str(s, sizeof(s)/sizeof(wchar_t)); + EXPECT_EQ("L\"'\\\"?\\\\\\a\\b\\f\\n\\0\\r\\t\\v" + "\\xD3\\x576\\x8D3\\xC74D a\\0\"", + Print(str)); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +// ::std::wstring. +TEST(PrintWideStringTest, StringInStdNamespace) { + const wchar_t s[] = L"'\"?\\\a\b\f\n\0\r\t\v\xD3\x576\x8D3\xC74D a"; + const ::std::wstring str(s, sizeof(s)/sizeof(wchar_t)); + EXPECT_EQ("L\"'\\\"?\\\\\\a\\b\\f\\n\\0\\r\\t\\v" + "\\xD3\\x576\\x8D3\\xC74D a\\0\"", + Print(str)); +} + +TEST(PrintWideStringTest, StringAmbiguousHex) { + // same for wide strings. + EXPECT_EQ("L\"0\\x12\" L\"3\"", Print(::std::wstring(L"0\x12" L"3"))); + EXPECT_EQ("L\"mm\\x6\" L\"bananas\"", + Print(::std::wstring(L"mm\x6" L"bananas"))); + EXPECT_EQ("L\"NOM\\x6\" L\"BANANA\"", + Print(::std::wstring(L"NOM\x6" L"BANANA"))); + EXPECT_EQ("L\"!\\x5-!\"", Print(::std::wstring(L"!\x5-!"))); +} +#endif // GTEST_HAS_STD_WSTRING + +// Tests printing types that support generic streaming (i.e. streaming +// to std::basic_ostream for any valid Char and +// CharTraits types). + +// Tests printing a non-template type that supports generic streaming. + +class AllowsGenericStreaming {}; + +template +std::basic_ostream& operator<<( + std::basic_ostream& os, + const AllowsGenericStreaming& /* a */) { + return os << "AllowsGenericStreaming"; +} + +TEST(PrintTypeWithGenericStreamingTest, NonTemplateType) { + AllowsGenericStreaming a; + EXPECT_EQ("AllowsGenericStreaming", Print(a)); +} + +// Tests printing a template type that supports generic streaming. + +template +class AllowsGenericStreamingTemplate {}; + +template +std::basic_ostream& operator<<( + std::basic_ostream& os, + const AllowsGenericStreamingTemplate& /* a */) { + return os << "AllowsGenericStreamingTemplate"; +} + +TEST(PrintTypeWithGenericStreamingTest, TemplateType) { + AllowsGenericStreamingTemplate a; + EXPECT_EQ("AllowsGenericStreamingTemplate", Print(a)); +} + +// Tests printing a type that supports generic streaming and can be +// implicitly converted to another printable type. + +template +class AllowsGenericStreamingAndImplicitConversionTemplate { + public: + operator bool() const { return false; } +}; + +template +std::basic_ostream& operator<<( + std::basic_ostream& os, + const AllowsGenericStreamingAndImplicitConversionTemplate& /* a */) { + return os << "AllowsGenericStreamingAndImplicitConversionTemplate"; +} + +TEST(PrintTypeWithGenericStreamingTest, TypeImplicitlyConvertible) { + AllowsGenericStreamingAndImplicitConversionTemplate a; + EXPECT_EQ("AllowsGenericStreamingAndImplicitConversionTemplate", Print(a)); +} + +#if GTEST_HAS_STRING_PIECE_ + +// Tests printing StringPiece. + +TEST(PrintStringPieceTest, SimpleStringPiece) { + const StringPiece sp = "Hello"; + EXPECT_EQ("\"Hello\"", Print(sp)); +} + +TEST(PrintStringPieceTest, UnprintableCharacters) { + const char str[] = "NUL (\0) and \r\t"; + const StringPiece sp(str, sizeof(str) - 1); + EXPECT_EQ("\"NUL (\\0) and \\r\\t\"", Print(sp)); +} + +#endif // GTEST_HAS_STRING_PIECE_ + +// Tests printing STL containers. + +TEST(PrintStlContainerTest, EmptyDeque) { + deque empty; + EXPECT_EQ("{}", Print(empty)); +} + +TEST(PrintStlContainerTest, NonEmptyDeque) { + deque non_empty; + non_empty.push_back(1); + non_empty.push_back(3); + EXPECT_EQ("{ 1, 3 }", Print(non_empty)); +} + +#if GTEST_HAS_HASH_MAP_ + +TEST(PrintStlContainerTest, OneElementHashMap) { + hash_map map1; + map1[1] = 'a'; + EXPECT_EQ("{ (1, 'a' (97, 0x61)) }", Print(map1)); +} + +TEST(PrintStlContainerTest, HashMultiMap) { + hash_multimap map1; + map1.insert(make_pair(5, true)); + map1.insert(make_pair(5, false)); + + // Elements of hash_multimap can be printed in any order. + const string result = Print(map1); + EXPECT_TRUE(result == "{ (5, true), (5, false) }" || + result == "{ (5, false), (5, true) }") + << " where Print(map1) returns \"" << result << "\"."; +} + +#endif // GTEST_HAS_HASH_MAP_ + +#if GTEST_HAS_HASH_SET_ + +TEST(PrintStlContainerTest, HashSet) { + hash_set set1; + set1.insert("hello"); + EXPECT_EQ("{ \"hello\" }", Print(set1)); +} + +TEST(PrintStlContainerTest, HashMultiSet) { + const int kSize = 5; + int a[kSize] = { 1, 1, 2, 5, 1 }; + hash_multiset set1(a, a + kSize); + + // Elements of hash_multiset can be printed in any order. + const string result = Print(set1); + const string expected_pattern = "{ d, d, d, d, d }"; // d means a digit. + + // Verifies the result matches the expected pattern; also extracts + // the numbers in the result. + ASSERT_EQ(expected_pattern.length(), result.length()); + std::vector numbers; + for (size_t i = 0; i != result.length(); i++) { + if (expected_pattern[i] == 'd') { + ASSERT_NE(isdigit(static_cast(result[i])), 0); + numbers.push_back(result[i] - '0'); + } else { + EXPECT_EQ(expected_pattern[i], result[i]) << " where result is " + << result; + } + } + + // Makes sure the result contains the right numbers. + std::sort(numbers.begin(), numbers.end()); + std::sort(a, a + kSize); + EXPECT_TRUE(std::equal(a, a + kSize, numbers.begin())); +} + +#endif // GTEST_HAS_HASH_SET_ + +TEST(PrintStlContainerTest, List) { + const string a[] = { + "hello", + "world" + }; + const list strings(a, a + 2); + EXPECT_EQ("{ \"hello\", \"world\" }", Print(strings)); +} + +TEST(PrintStlContainerTest, Map) { + map map1; + map1[1] = true; + map1[5] = false; + map1[3] = true; + EXPECT_EQ("{ (1, true), (3, true), (5, false) }", Print(map1)); +} + +TEST(PrintStlContainerTest, MultiMap) { + multimap map1; + // The make_pair template function would deduce the type as + // pair here, and since the key part in a multimap has to + // be constant, without a templated ctor in the pair class (as in + // libCstd on Solaris), make_pair call would fail to compile as no + // implicit conversion is found. Thus explicit typename is used + // here instead. + map1.insert(pair(true, 0)); + map1.insert(pair(true, 1)); + map1.insert(pair(false, 2)); + EXPECT_EQ("{ (false, 2), (true, 0), (true, 1) }", Print(map1)); +} + +TEST(PrintStlContainerTest, Set) { + const unsigned int a[] = { 3, 0, 5 }; + set set1(a, a + 3); + EXPECT_EQ("{ 0, 3, 5 }", Print(set1)); +} + +TEST(PrintStlContainerTest, MultiSet) { + const int a[] = { 1, 1, 2, 5, 1 }; + multiset set1(a, a + 5); + EXPECT_EQ("{ 1, 1, 1, 2, 5 }", Print(set1)); +} + +TEST(PrintStlContainerTest, Pair) { + pair p(true, 5); + EXPECT_EQ("(true, 5)", Print(p)); +} + +TEST(PrintStlContainerTest, Vector) { + vector v; + v.push_back(1); + v.push_back(2); + EXPECT_EQ("{ 1, 2 }", Print(v)); +} + +TEST(PrintStlContainerTest, LongSequence) { + const int a[100] = { 1, 2, 3 }; + const vector v(a, a + 100); + EXPECT_EQ("{ 1, 2, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, " + "0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, ... }", Print(v)); +} + +TEST(PrintStlContainerTest, NestedContainer) { + const int a1[] = { 1, 2 }; + const int a2[] = { 3, 4, 5 }; + const list l1(a1, a1 + 2); + const list l2(a2, a2 + 3); + + vector > v; + v.push_back(l1); + v.push_back(l2); + EXPECT_EQ("{ { 1, 2 }, { 3, 4, 5 } }", Print(v)); +} + +TEST(PrintStlContainerTest, OneDimensionalNativeArray) { + const int a[3] = { 1, 2, 3 }; + NativeArray b(a, 3, kReference); + EXPECT_EQ("{ 1, 2, 3 }", Print(b)); +} + +TEST(PrintStlContainerTest, TwoDimensionalNativeArray) { + const int a[2][3] = { { 1, 2, 3 }, { 4, 5, 6 } }; + NativeArray b(a, 2, kReference); + EXPECT_EQ("{ { 1, 2, 3 }, { 4, 5, 6 } }", Print(b)); +} + +// Tests that a class named iterator isn't treated as a container. + +struct iterator { + char x; +}; + +TEST(PrintStlContainerTest, Iterator) { + iterator it = {}; + EXPECT_EQ("1-byte object <00>", Print(it)); +} + +// Tests that a class named const_iterator isn't treated as a container. + +struct const_iterator { + char x; +}; + +TEST(PrintStlContainerTest, ConstIterator) { + const_iterator it = {}; + EXPECT_EQ("1-byte object <00>", Print(it)); +} + +#if GTEST_HAS_TR1_TUPLE +// Tests printing tuples. + +// Tuples of various arities. +TEST(PrintTupleTest, VariousSizes) { + tuple<> t0; + EXPECT_EQ("()", Print(t0)); + + tuple t1(5); + EXPECT_EQ("(5)", Print(t1)); + + tuple t2('a', true); + EXPECT_EQ("('a' (97, 0x61), true)", Print(t2)); + + tuple t3(false, 2, 3); + EXPECT_EQ("(false, 2, 3)", Print(t3)); + + tuple t4(false, 2, 3, 4); + EXPECT_EQ("(false, 2, 3, 4)", Print(t4)); + + tuple t5(false, 2, 3, 4, true); + EXPECT_EQ("(false, 2, 3, 4, true)", Print(t5)); + + tuple t6(false, 2, 3, 4, true, 6); + EXPECT_EQ("(false, 2, 3, 4, true, 6)", Print(t6)); + + tuple t7(false, 2, 3, 4, true, 6, 7); + EXPECT_EQ("(false, 2, 3, 4, true, 6, 7)", Print(t7)); + + tuple t8( + false, 2, 3, 4, true, 6, 7, true); + EXPECT_EQ("(false, 2, 3, 4, true, 6, 7, true)", Print(t8)); + + tuple t9( + false, 2, 3, 4, true, 6, 7, true, 9); + EXPECT_EQ("(false, 2, 3, 4, true, 6, 7, true, 9)", Print(t9)); + + const char* const str = "8"; + // VC++ 2010's implementation of tuple of C++0x is deficient, requiring + // an explicit type cast of NULL to be used. + tuple + t10(false, 'a', 3, 4, 5, 1.5F, -2.5, str, + ImplicitCast_(NULL), "10"); + EXPECT_EQ("(false, 'a' (97, 0x61), 3, 4, 5, 1.5, -2.5, " + PrintPointer(str) + + " pointing to \"8\", NULL, \"10\")", + Print(t10)); +} + +// Nested tuples. +TEST(PrintTupleTest, NestedTuple) { + tuple, char> nested(make_tuple(5, true), 'a'); + EXPECT_EQ("((5, true), 'a' (97, 0x61))", Print(nested)); +} + +#endif // GTEST_HAS_TR1_TUPLE + +// Tests printing user-defined unprintable types. + +// Unprintable types in the global namespace. +TEST(PrintUnprintableTypeTest, InGlobalNamespace) { + EXPECT_EQ("1-byte object <00>", + Print(UnprintableTemplateInGlobal())); +} + +// Unprintable types in a user namespace. +TEST(PrintUnprintableTypeTest, InUserNamespace) { + EXPECT_EQ("16-byte object ", + Print(::foo::UnprintableInFoo())); +} + +// Unprintable types are that too big to be printed completely. + +struct Big { + Big() { memset(array, 0, sizeof(array)); } + char array[257]; +}; + +TEST(PrintUnpritableTypeTest, BigObject) { + EXPECT_EQ("257-byte object <00-00 00-00 00-00 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 ... 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 " + "00-00 00-00 00-00 00-00 00-00 00-00 00-00 00-00 00>", + Print(Big())); +} + +// Tests printing user-defined streamable types. + +// Streamable types in the global namespace. +TEST(PrintStreamableTypeTest, InGlobalNamespace) { + StreamableInGlobal x; + EXPECT_EQ("StreamableInGlobal", Print(x)); + EXPECT_EQ("StreamableInGlobal*", Print(&x)); +} + +// Printable template types in a user namespace. +TEST(PrintStreamableTypeTest, TemplateTypeInUserNamespace) { + EXPECT_EQ("StreamableTemplateInFoo: 0", + Print(::foo::StreamableTemplateInFoo())); +} + +// Tests printing user-defined types that have a PrintTo() function. +TEST(PrintPrintableTypeTest, InUserNamespace) { + EXPECT_EQ("PrintableViaPrintTo: 0", + Print(::foo::PrintableViaPrintTo())); +} + +// Tests printing a pointer to a user-defined type that has a << +// operator for its pointer. +TEST(PrintPrintableTypeTest, PointerInUserNamespace) { + ::foo::PointerPrintable x; + EXPECT_EQ("PointerPrintable*", Print(&x)); +} + +// Tests printing user-defined class template that have a PrintTo() function. +TEST(PrintPrintableTypeTest, TemplateInUserNamespace) { + EXPECT_EQ("PrintableViaPrintToTemplate: 5", + Print(::foo::PrintableViaPrintToTemplate(5))); +} + +#if GTEST_HAS_PROTOBUF_ + +// Tests printing a protocol message. +TEST(PrintProtocolMessageTest, PrintsShortDebugString) { + testing::internal::TestMessage msg; + msg.set_member("yes"); + EXPECT_EQ("", Print(msg)); +} + +// Tests printing a short proto2 message. +TEST(PrintProto2MessageTest, PrintsShortDebugStringWhenItIsShort) { + testing::internal::FooMessage msg; + msg.set_int_field(2); + msg.set_string_field("hello"); + EXPECT_PRED2(RE::FullMatch, Print(msg), + ""); +} + +// Tests printing a long proto2 message. +TEST(PrintProto2MessageTest, PrintsDebugStringWhenItIsLong) { + testing::internal::FooMessage msg; + msg.set_int_field(2); + msg.set_string_field("hello"); + msg.add_names("peter"); + msg.add_names("paul"); + msg.add_names("mary"); + EXPECT_PRED2(RE::FullMatch, Print(msg), + "<\n" + "int_field:\\s*2\n" + "string_field:\\s*\"hello\"\n" + "names:\\s*\"peter\"\n" + "names:\\s*\"paul\"\n" + "names:\\s*\"mary\"\n" + ">"); +} + +#endif // GTEST_HAS_PROTOBUF_ + +// Tests that the universal printer prints both the address and the +// value of a reference. +TEST(PrintReferenceTest, PrintsAddressAndValue) { + int n = 5; + EXPECT_EQ("@" + PrintPointer(&n) + " 5", PrintByRef(n)); + + int a[2][3] = { + { 0, 1, 2 }, + { 3, 4, 5 } + }; + EXPECT_EQ("@" + PrintPointer(a) + " { { 0, 1, 2 }, { 3, 4, 5 } }", + PrintByRef(a)); + + const ::foo::UnprintableInFoo x; + EXPECT_EQ("@" + PrintPointer(&x) + " 16-byte object " + "", + PrintByRef(x)); +} + +// Tests that the universal printer prints a function pointer passed by +// reference. +TEST(PrintReferenceTest, HandlesFunctionPointer) { + void (*fp)(int n) = &MyFunction; + const string fp_pointer_string = + PrintPointer(reinterpret_cast(&fp)); + // We cannot directly cast &MyFunction to const void* because the + // standard disallows casting between pointers to functions and + // pointers to objects, and some compilers (e.g. GCC 3.4) enforce + // this limitation. + const string fp_string = PrintPointer(reinterpret_cast( + reinterpret_cast(fp))); + EXPECT_EQ("@" + fp_pointer_string + " " + fp_string, + PrintByRef(fp)); +} + +// Tests that the universal printer prints a member function pointer +// passed by reference. +TEST(PrintReferenceTest, HandlesMemberFunctionPointer) { + int (Foo::*p)(char ch) = &Foo::MyMethod; + EXPECT_TRUE(HasPrefix( + PrintByRef(p), + "@" + PrintPointer(reinterpret_cast(&p)) + " " + + Print(sizeof(p)) + "-byte object ")); + + char (Foo::*p2)(int n) = &Foo::MyVirtualMethod; + EXPECT_TRUE(HasPrefix( + PrintByRef(p2), + "@" + PrintPointer(reinterpret_cast(&p2)) + " " + + Print(sizeof(p2)) + "-byte object ")); +} + +// Tests that the universal printer prints a member variable pointer +// passed by reference. +TEST(PrintReferenceTest, HandlesMemberVariablePointer) { + int (Foo::*p) = &Foo::value; // NOLINT + EXPECT_TRUE(HasPrefix( + PrintByRef(p), + "@" + PrintPointer(&p) + " " + Print(sizeof(p)) + "-byte object ")); +} + +// Tests that FormatForComparisonFailureMessage(), which is used to print +// an operand in a comparison assertion (e.g. ASSERT_EQ) when the assertion +// fails, formats the operand in the desired way. + +// scalar +TEST(FormatForComparisonFailureMessageTest, WorksForScalar) { + EXPECT_STREQ("123", + FormatForComparisonFailureMessage(123, 124).c_str()); +} + +// non-char pointer +TEST(FormatForComparisonFailureMessageTest, WorksForNonCharPointer) { + int n = 0; + EXPECT_EQ(PrintPointer(&n), + FormatForComparisonFailureMessage(&n, &n).c_str()); +} + +// non-char array +TEST(FormatForComparisonFailureMessageTest, FormatsNonCharArrayAsPointer) { + // In expression 'array == x', 'array' is compared by pointer. + // Therefore we want to print an array operand as a pointer. + int n[] = { 1, 2, 3 }; + EXPECT_EQ(PrintPointer(n), + FormatForComparisonFailureMessage(n, n).c_str()); +} + +// Tests formatting a char pointer when it's compared with another pointer. +// In this case we want to print it as a raw pointer, as the comparision is by +// pointer. + +// char pointer vs pointer +TEST(FormatForComparisonFailureMessageTest, WorksForCharPointerVsPointer) { + // In expression 'p == x', where 'p' and 'x' are (const or not) char + // pointers, the operands are compared by pointer. Therefore we + // want to print 'p' as a pointer instead of a C string (we don't + // even know if it's supposed to point to a valid C string). + + // const char* + const char* s = "hello"; + EXPECT_EQ(PrintPointer(s), + FormatForComparisonFailureMessage(s, s).c_str()); + + // char* + char ch = 'a'; + EXPECT_EQ(PrintPointer(&ch), + FormatForComparisonFailureMessage(&ch, &ch).c_str()); +} + +// wchar_t pointer vs pointer +TEST(FormatForComparisonFailureMessageTest, WorksForWCharPointerVsPointer) { + // In expression 'p == x', where 'p' and 'x' are (const or not) char + // pointers, the operands are compared by pointer. Therefore we + // want to print 'p' as a pointer instead of a wide C string (we don't + // even know if it's supposed to point to a valid wide C string). + + // const wchar_t* + const wchar_t* s = L"hello"; + EXPECT_EQ(PrintPointer(s), + FormatForComparisonFailureMessage(s, s).c_str()); + + // wchar_t* + wchar_t ch = L'a'; + EXPECT_EQ(PrintPointer(&ch), + FormatForComparisonFailureMessage(&ch, &ch).c_str()); +} + +// Tests formatting a char pointer when it's compared to a string object. +// In this case we want to print the char pointer as a C string. + +#if GTEST_HAS_GLOBAL_STRING +// char pointer vs ::string +TEST(FormatForComparisonFailureMessageTest, WorksForCharPointerVsString) { + const char* s = "hello \"world"; + EXPECT_STREQ("\"hello \\\"world\"", // The string content should be escaped. + FormatForComparisonFailureMessage(s, ::string()).c_str()); + + // char* + char str[] = "hi\1"; + char* p = str; + EXPECT_STREQ("\"hi\\x1\"", // The string content should be escaped. + FormatForComparisonFailureMessage(p, ::string()).c_str()); +} +#endif + +// char pointer vs std::string +TEST(FormatForComparisonFailureMessageTest, WorksForCharPointerVsStdString) { + const char* s = "hello \"world"; + EXPECT_STREQ("\"hello \\\"world\"", // The string content should be escaped. + FormatForComparisonFailureMessage(s, ::std::string()).c_str()); + + // char* + char str[] = "hi\1"; + char* p = str; + EXPECT_STREQ("\"hi\\x1\"", // The string content should be escaped. + FormatForComparisonFailureMessage(p, ::std::string()).c_str()); +} + +#if GTEST_HAS_GLOBAL_WSTRING +// wchar_t pointer vs ::wstring +TEST(FormatForComparisonFailureMessageTest, WorksForWCharPointerVsWString) { + const wchar_t* s = L"hi \"world"; + EXPECT_STREQ("L\"hi \\\"world\"", // The string content should be escaped. + FormatForComparisonFailureMessage(s, ::wstring()).c_str()); + + // wchar_t* + wchar_t str[] = L"hi\1"; + wchar_t* p = str; + EXPECT_STREQ("L\"hi\\x1\"", // The string content should be escaped. + FormatForComparisonFailureMessage(p, ::wstring()).c_str()); +} +#endif + +#if GTEST_HAS_STD_WSTRING +// wchar_t pointer vs std::wstring +TEST(FormatForComparisonFailureMessageTest, WorksForWCharPointerVsStdWString) { + const wchar_t* s = L"hi \"world"; + EXPECT_STREQ("L\"hi \\\"world\"", // The string content should be escaped. + FormatForComparisonFailureMessage(s, ::std::wstring()).c_str()); + + // wchar_t* + wchar_t str[] = L"hi\1"; + wchar_t* p = str; + EXPECT_STREQ("L\"hi\\x1\"", // The string content should be escaped. + FormatForComparisonFailureMessage(p, ::std::wstring()).c_str()); +} +#endif + +// Tests formatting a char array when it's compared with a pointer or array. +// In this case we want to print the array as a row pointer, as the comparison +// is by pointer. + +// char array vs pointer +TEST(FormatForComparisonFailureMessageTest, WorksForCharArrayVsPointer) { + char str[] = "hi \"world\""; + char* p = NULL; + EXPECT_EQ(PrintPointer(str), + FormatForComparisonFailureMessage(str, p).c_str()); +} + +// char array vs char array +TEST(FormatForComparisonFailureMessageTest, WorksForCharArrayVsCharArray) { + const char str[] = "hi \"world\""; + EXPECT_EQ(PrintPointer(str), + FormatForComparisonFailureMessage(str, str).c_str()); +} + +// wchar_t array vs pointer +TEST(FormatForComparisonFailureMessageTest, WorksForWCharArrayVsPointer) { + wchar_t str[] = L"hi \"world\""; + wchar_t* p = NULL; + EXPECT_EQ(PrintPointer(str), + FormatForComparisonFailureMessage(str, p).c_str()); +} + +// wchar_t array vs wchar_t array +TEST(FormatForComparisonFailureMessageTest, WorksForWCharArrayVsWCharArray) { + const wchar_t str[] = L"hi \"world\""; + EXPECT_EQ(PrintPointer(str), + FormatForComparisonFailureMessage(str, str).c_str()); +} + +// Tests formatting a char array when it's compared with a string object. +// In this case we want to print the array as a C string. + +#if GTEST_HAS_GLOBAL_STRING +// char array vs string +TEST(FormatForComparisonFailureMessageTest, WorksForCharArrayVsString) { + const char str[] = "hi \"w\0rld\""; + EXPECT_STREQ("\"hi \\\"w\"", // The content should be escaped. + // Embedded NUL terminates the string. + FormatForComparisonFailureMessage(str, ::string()).c_str()); +} +#endif + +// char array vs std::string +TEST(FormatForComparisonFailureMessageTest, WorksForCharArrayVsStdString) { + const char str[] = "hi \"world\""; + EXPECT_STREQ("\"hi \\\"world\\\"\"", // The content should be escaped. + FormatForComparisonFailureMessage(str, ::std::string()).c_str()); +} + +#if GTEST_HAS_GLOBAL_WSTRING +// wchar_t array vs wstring +TEST(FormatForComparisonFailureMessageTest, WorksForWCharArrayVsWString) { + const wchar_t str[] = L"hi \"world\""; + EXPECT_STREQ("L\"hi \\\"world\\\"\"", // The content should be escaped. + FormatForComparisonFailureMessage(str, ::wstring()).c_str()); +} +#endif + +#if GTEST_HAS_STD_WSTRING +// wchar_t array vs std::wstring +TEST(FormatForComparisonFailureMessageTest, WorksForWCharArrayVsStdWString) { + const wchar_t str[] = L"hi \"w\0rld\""; + EXPECT_STREQ( + "L\"hi \\\"w\"", // The content should be escaped. + // Embedded NUL terminates the string. + FormatForComparisonFailureMessage(str, ::std::wstring()).c_str()); +} +#endif + +// Useful for testing PrintToString(). We cannot use EXPECT_EQ() +// there as its implementation uses PrintToString(). The caller must +// ensure that 'value' has no side effect. +#define EXPECT_PRINT_TO_STRING_(value, expected_string) \ + EXPECT_TRUE(PrintToString(value) == (expected_string)) \ + << " where " #value " prints as " << (PrintToString(value)) + +TEST(PrintToStringTest, WorksForScalar) { + EXPECT_PRINT_TO_STRING_(123, "123"); +} + +TEST(PrintToStringTest, WorksForPointerToConstChar) { + const char* p = "hello"; + EXPECT_PRINT_TO_STRING_(p, "\"hello\""); +} + +TEST(PrintToStringTest, WorksForPointerToNonConstChar) { + char s[] = "hello"; + char* p = s; + EXPECT_PRINT_TO_STRING_(p, "\"hello\""); +} + +TEST(PrintToStringTest, EscapesForPointerToConstChar) { + const char* p = "hello\n"; + EXPECT_PRINT_TO_STRING_(p, "\"hello\\n\""); +} + +TEST(PrintToStringTest, EscapesForPointerToNonConstChar) { + char s[] = "hello\1"; + char* p = s; + EXPECT_PRINT_TO_STRING_(p, "\"hello\\x1\""); +} + +TEST(PrintToStringTest, WorksForArray) { + int n[3] = { 1, 2, 3 }; + EXPECT_PRINT_TO_STRING_(n, "{ 1, 2, 3 }"); +} + +TEST(PrintToStringTest, WorksForCharArray) { + char s[] = "hello"; + EXPECT_PRINT_TO_STRING_(s, "\"hello\""); +} + +TEST(PrintToStringTest, WorksForCharArrayWithEmbeddedNul) { + const char str_with_nul[] = "hello\0 world"; + EXPECT_PRINT_TO_STRING_(str_with_nul, "\"hello\\0 world\""); + + char mutable_str_with_nul[] = "hello\0 world"; + EXPECT_PRINT_TO_STRING_(mutable_str_with_nul, "\"hello\\0 world\""); +} + +#undef EXPECT_PRINT_TO_STRING_ + +TEST(UniversalTersePrintTest, WorksForNonReference) { + ::std::stringstream ss; + UniversalTersePrint(123, &ss); + EXPECT_EQ("123", ss.str()); +} + +TEST(UniversalTersePrintTest, WorksForReference) { + const int& n = 123; + ::std::stringstream ss; + UniversalTersePrint(n, &ss); + EXPECT_EQ("123", ss.str()); +} + +TEST(UniversalTersePrintTest, WorksForCString) { + const char* s1 = "abc"; + ::std::stringstream ss1; + UniversalTersePrint(s1, &ss1); + EXPECT_EQ("\"abc\"", ss1.str()); + + char* s2 = const_cast(s1); + ::std::stringstream ss2; + UniversalTersePrint(s2, &ss2); + EXPECT_EQ("\"abc\"", ss2.str()); + + const char* s3 = NULL; + ::std::stringstream ss3; + UniversalTersePrint(s3, &ss3); + EXPECT_EQ("NULL", ss3.str()); +} + +TEST(UniversalPrintTest, WorksForNonReference) { + ::std::stringstream ss; + UniversalPrint(123, &ss); + EXPECT_EQ("123", ss.str()); +} + +TEST(UniversalPrintTest, WorksForReference) { + const int& n = 123; + ::std::stringstream ss; + UniversalPrint(n, &ss); + EXPECT_EQ("123", ss.str()); +} + +TEST(UniversalPrintTest, WorksForCString) { + const char* s1 = "abc"; + ::std::stringstream ss1; + UniversalPrint(s1, &ss1); + EXPECT_EQ(PrintPointer(s1) + " pointing to \"abc\"", string(ss1.str())); + + char* s2 = const_cast(s1); + ::std::stringstream ss2; + UniversalPrint(s2, &ss2); + EXPECT_EQ(PrintPointer(s2) + " pointing to \"abc\"", string(ss2.str())); + + const char* s3 = NULL; + ::std::stringstream ss3; + UniversalPrint(s3, &ss3); + EXPECT_EQ("NULL", ss3.str()); +} + +TEST(UniversalPrintTest, WorksForCharArray) { + const char str[] = "\"Line\0 1\"\nLine 2"; + ::std::stringstream ss1; + UniversalPrint(str, &ss1); + EXPECT_EQ("\"\\\"Line\\0 1\\\"\\nLine 2\"", ss1.str()); + + const char mutable_str[] = "\"Line\0 1\"\nLine 2"; + ::std::stringstream ss2; + UniversalPrint(mutable_str, &ss2); + EXPECT_EQ("\"\\\"Line\\0 1\\\"\\nLine 2\"", ss2.str()); +} + +#if GTEST_HAS_TR1_TUPLE + +TEST(UniversalTersePrintTupleFieldsToStringsTest, PrintsEmptyTuple) { + Strings result = UniversalTersePrintTupleFieldsToStrings(make_tuple()); + EXPECT_EQ(0u, result.size()); +} + +TEST(UniversalTersePrintTupleFieldsToStringsTest, PrintsOneTuple) { + Strings result = UniversalTersePrintTupleFieldsToStrings(make_tuple(1)); + ASSERT_EQ(1u, result.size()); + EXPECT_EQ("1", result[0]); +} + +TEST(UniversalTersePrintTupleFieldsToStringsTest, PrintsTwoTuple) { + Strings result = UniversalTersePrintTupleFieldsToStrings(make_tuple(1, 'a')); + ASSERT_EQ(2u, result.size()); + EXPECT_EQ("1", result[0]); + EXPECT_EQ("'a' (97, 0x61)", result[1]); +} + +TEST(UniversalTersePrintTupleFieldsToStringsTest, PrintsTersely) { + const int n = 1; + Strings result = UniversalTersePrintTupleFieldsToStrings( + tuple(n, "a")); + ASSERT_EQ(2u, result.size()); + EXPECT_EQ("1", result[0]); + EXPECT_EQ("\"a\"", result[1]); +} + +#endif // GTEST_HAS_TR1_TUPLE + +} // namespace gtest_printers_test +} // namespace testing diff --git a/external/gtest/test/gtest-test-part_test.cc b/external/gtest/test/gtest-test-part_test.cc new file mode 100755 index 0000000000..ca8ba933ae --- /dev/null +++ b/external/gtest/test/gtest-test-part_test.cc @@ -0,0 +1,208 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// + +#include "gtest/gtest-test-part.h" + +#include "gtest/gtest.h" + +using testing::Message; +using testing::Test; +using testing::TestPartResult; +using testing::TestPartResultArray; + +namespace { + +// Tests the TestPartResult class. + +// The test fixture for testing TestPartResult. +class TestPartResultTest : public Test { + protected: + TestPartResultTest() + : r1_(TestPartResult::kSuccess, "foo/bar.cc", 10, "Success!"), + r2_(TestPartResult::kNonFatalFailure, "foo/bar.cc", -1, "Failure!"), + r3_(TestPartResult::kFatalFailure, NULL, -1, "Failure!") {} + + TestPartResult r1_, r2_, r3_; +}; + + +TEST_F(TestPartResultTest, ConstructorWorks) { + Message message; + message << "something is terribly wrong"; + message << static_cast(testing::internal::kStackTraceMarker); + message << "some unimportant stack trace"; + + const TestPartResult result(TestPartResult::kNonFatalFailure, + "some_file.cc", + 42, + message.GetString().c_str()); + + EXPECT_EQ(TestPartResult::kNonFatalFailure, result.type()); + EXPECT_STREQ("some_file.cc", result.file_name()); + EXPECT_EQ(42, result.line_number()); + EXPECT_STREQ(message.GetString().c_str(), result.message()); + EXPECT_STREQ("something is terribly wrong", result.summary()); +} + +TEST_F(TestPartResultTest, ResultAccessorsWork) { + const TestPartResult success(TestPartResult::kSuccess, + "file.cc", + 42, + "message"); + EXPECT_TRUE(success.passed()); + EXPECT_FALSE(success.failed()); + EXPECT_FALSE(success.nonfatally_failed()); + EXPECT_FALSE(success.fatally_failed()); + + const TestPartResult nonfatal_failure(TestPartResult::kNonFatalFailure, + "file.cc", + 42, + "message"); + EXPECT_FALSE(nonfatal_failure.passed()); + EXPECT_TRUE(nonfatal_failure.failed()); + EXPECT_TRUE(nonfatal_failure.nonfatally_failed()); + EXPECT_FALSE(nonfatal_failure.fatally_failed()); + + const TestPartResult fatal_failure(TestPartResult::kFatalFailure, + "file.cc", + 42, + "message"); + EXPECT_FALSE(fatal_failure.passed()); + EXPECT_TRUE(fatal_failure.failed()); + EXPECT_FALSE(fatal_failure.nonfatally_failed()); + EXPECT_TRUE(fatal_failure.fatally_failed()); +} + +// Tests TestPartResult::type(). +TEST_F(TestPartResultTest, type) { + EXPECT_EQ(TestPartResult::kSuccess, r1_.type()); + EXPECT_EQ(TestPartResult::kNonFatalFailure, r2_.type()); + EXPECT_EQ(TestPartResult::kFatalFailure, r3_.type()); +} + +// Tests TestPartResult::file_name(). +TEST_F(TestPartResultTest, file_name) { + EXPECT_STREQ("foo/bar.cc", r1_.file_name()); + EXPECT_STREQ(NULL, r3_.file_name()); +} + +// Tests TestPartResult::line_number(). +TEST_F(TestPartResultTest, line_number) { + EXPECT_EQ(10, r1_.line_number()); + EXPECT_EQ(-1, r2_.line_number()); +} + +// Tests TestPartResult::message(). +TEST_F(TestPartResultTest, message) { + EXPECT_STREQ("Success!", r1_.message()); +} + +// Tests TestPartResult::passed(). +TEST_F(TestPartResultTest, Passed) { + EXPECT_TRUE(r1_.passed()); + EXPECT_FALSE(r2_.passed()); + EXPECT_FALSE(r3_.passed()); +} + +// Tests TestPartResult::failed(). +TEST_F(TestPartResultTest, Failed) { + EXPECT_FALSE(r1_.failed()); + EXPECT_TRUE(r2_.failed()); + EXPECT_TRUE(r3_.failed()); +} + +// Tests TestPartResult::fatally_failed(). +TEST_F(TestPartResultTest, FatallyFailed) { + EXPECT_FALSE(r1_.fatally_failed()); + EXPECT_FALSE(r2_.fatally_failed()); + EXPECT_TRUE(r3_.fatally_failed()); +} + +// Tests TestPartResult::nonfatally_failed(). +TEST_F(TestPartResultTest, NonfatallyFailed) { + EXPECT_FALSE(r1_.nonfatally_failed()); + EXPECT_TRUE(r2_.nonfatally_failed()); + EXPECT_FALSE(r3_.nonfatally_failed()); +} + +// Tests the TestPartResultArray class. + +class TestPartResultArrayTest : public Test { + protected: + TestPartResultArrayTest() + : r1_(TestPartResult::kNonFatalFailure, "foo/bar.cc", -1, "Failure 1"), + r2_(TestPartResult::kFatalFailure, "foo/bar.cc", -1, "Failure 2") {} + + const TestPartResult r1_, r2_; +}; + +// Tests that TestPartResultArray initially has size 0. +TEST_F(TestPartResultArrayTest, InitialSizeIsZero) { + TestPartResultArray results; + EXPECT_EQ(0, results.size()); +} + +// Tests that TestPartResultArray contains the given TestPartResult +// after one Append() operation. +TEST_F(TestPartResultArrayTest, ContainsGivenResultAfterAppend) { + TestPartResultArray results; + results.Append(r1_); + EXPECT_EQ(1, results.size()); + EXPECT_STREQ("Failure 1", results.GetTestPartResult(0).message()); +} + +// Tests that TestPartResultArray contains the given TestPartResults +// after two Append() operations. +TEST_F(TestPartResultArrayTest, ContainsGivenResultsAfterTwoAppends) { + TestPartResultArray results; + results.Append(r1_); + results.Append(r2_); + EXPECT_EQ(2, results.size()); + EXPECT_STREQ("Failure 1", results.GetTestPartResult(0).message()); + EXPECT_STREQ("Failure 2", results.GetTestPartResult(1).message()); +} + +typedef TestPartResultArrayTest TestPartResultArrayDeathTest; + +// Tests that the program dies when GetTestPartResult() is called with +// an invalid index. +TEST_F(TestPartResultArrayDeathTest, DiesWhenIndexIsOutOfBound) { + TestPartResultArray results; + results.Append(r1_); + + EXPECT_DEATH_IF_SUPPORTED(results.GetTestPartResult(-1), ""); + EXPECT_DEATH_IF_SUPPORTED(results.GetTestPartResult(1), ""); +} + +// TODO(mheule@google.com): Add a test for the class HasNewFatalFailureHelper. + +} // namespace diff --git a/external/gtest/test/gtest-tuple_test.cc b/external/gtest/test/gtest-tuple_test.cc new file mode 100755 index 0000000000..bfaa3e0ac4 --- /dev/null +++ b/external/gtest/test/gtest-tuple_test.cc @@ -0,0 +1,320 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#include "gtest/internal/gtest-tuple.h" +#include +#include "gtest/gtest.h" + +namespace { + +using ::std::tr1::get; +using ::std::tr1::make_tuple; +using ::std::tr1::tuple; +using ::std::tr1::tuple_element; +using ::std::tr1::tuple_size; +using ::testing::StaticAssertTypeEq; + +// Tests that tuple_element >::type returns TK. +TEST(tuple_element_Test, ReturnsElementType) { + StaticAssertTypeEq >::type>(); + StaticAssertTypeEq >::type>(); + StaticAssertTypeEq >::type>(); +} + +// Tests that tuple_size::value gives the number of fields in tuple +// type T. +TEST(tuple_size_Test, ReturnsNumberOfFields) { + EXPECT_EQ(0, +tuple_size >::value); + EXPECT_EQ(1, +tuple_size >::value); + EXPECT_EQ(1, +tuple_size >::value); + EXPECT_EQ(1, +(tuple_size > >::value)); + EXPECT_EQ(2, +(tuple_size >::value)); + EXPECT_EQ(3, +(tuple_size >::value)); +} + +// Tests comparing a tuple with itself. +TEST(ComparisonTest, ComparesWithSelf) { + const tuple a(5, 'a', false); + + EXPECT_TRUE(a == a); + EXPECT_FALSE(a != a); +} + +// Tests comparing two tuples with the same value. +TEST(ComparisonTest, ComparesEqualTuples) { + const tuple a(5, true), b(5, true); + + EXPECT_TRUE(a == b); + EXPECT_FALSE(a != b); +} + +// Tests comparing two different tuples that have no reference fields. +TEST(ComparisonTest, ComparesUnequalTuplesWithoutReferenceFields) { + typedef tuple FooTuple; + + const FooTuple a(0, 'x'); + const FooTuple b(1, 'a'); + + EXPECT_TRUE(a != b); + EXPECT_FALSE(a == b); + + const FooTuple c(1, 'b'); + + EXPECT_TRUE(b != c); + EXPECT_FALSE(b == c); +} + +// Tests comparing two different tuples that have reference fields. +TEST(ComparisonTest, ComparesUnequalTuplesWithReferenceFields) { + typedef tuple FooTuple; + + int i = 5; + const char ch = 'a'; + const FooTuple a(i, ch); + + int j = 6; + const FooTuple b(j, ch); + + EXPECT_TRUE(a != b); + EXPECT_FALSE(a == b); + + j = 5; + const char ch2 = 'b'; + const FooTuple c(j, ch2); + + EXPECT_TRUE(b != c); + EXPECT_FALSE(b == c); +} + +// Tests that a tuple field with a reference type is an alias of the +// variable it's supposed to reference. +TEST(ReferenceFieldTest, IsAliasOfReferencedVariable) { + int n = 0; + tuple t(true, n); + + n = 1; + EXPECT_EQ(n, get<1>(t)) + << "Changing a underlying variable should update the reference field."; + + // Makes sure that the implementation doesn't do anything funny with + // the & operator for the return type of get<>(). + EXPECT_EQ(&n, &(get<1>(t))) + << "The address of a reference field should equal the address of " + << "the underlying variable."; + + get<1>(t) = 2; + EXPECT_EQ(2, n) + << "Changing a reference field should update the underlying variable."; +} + +// Tests that tuple's default constructor default initializes each field. +// This test needs to compile without generating warnings. +TEST(TupleConstructorTest, DefaultConstructorDefaultInitializesEachField) { + // The TR1 report requires that tuple's default constructor default + // initializes each field, even if it's a primitive type. If the + // implementation forgets to do this, this test will catch it by + // generating warnings about using uninitialized variables (assuming + // a decent compiler). + + tuple<> empty; + + tuple a1, b1; + b1 = a1; + EXPECT_EQ(0, get<0>(b1)); + + tuple a2, b2; + b2 = a2; + EXPECT_EQ(0, get<0>(b2)); + EXPECT_EQ(0.0, get<1>(b2)); + + tuple a3, b3; + b3 = a3; + EXPECT_EQ(0.0, get<0>(b3)); + EXPECT_EQ('\0', get<1>(b3)); + EXPECT_TRUE(get<2>(b3) == NULL); + + tuple a10, b10; + b10 = a10; + EXPECT_EQ(0, get<0>(b10)); + EXPECT_EQ(0, get<1>(b10)); + EXPECT_EQ(0, get<2>(b10)); + EXPECT_EQ(0, get<3>(b10)); + EXPECT_EQ(0, get<4>(b10)); + EXPECT_EQ(0, get<5>(b10)); + EXPECT_EQ(0, get<6>(b10)); + EXPECT_EQ(0, get<7>(b10)); + EXPECT_EQ(0, get<8>(b10)); + EXPECT_EQ(0, get<9>(b10)); +} + +// Tests constructing a tuple from its fields. +TEST(TupleConstructorTest, ConstructsFromFields) { + int n = 1; + // Reference field. + tuple a(n); + EXPECT_EQ(&n, &(get<0>(a))); + + // Non-reference fields. + tuple b(5, 'a'); + EXPECT_EQ(5, get<0>(b)); + EXPECT_EQ('a', get<1>(b)); + + // Const reference field. + const int m = 2; + tuple c(true, m); + EXPECT_TRUE(get<0>(c)); + EXPECT_EQ(&m, &(get<1>(c))); +} + +// Tests tuple's copy constructor. +TEST(TupleConstructorTest, CopyConstructor) { + tuple a(0.0, true); + tuple b(a); + + EXPECT_DOUBLE_EQ(0.0, get<0>(b)); + EXPECT_TRUE(get<1>(b)); +} + +// Tests constructing a tuple from another tuple that has a compatible +// but different type. +TEST(TupleConstructorTest, ConstructsFromDifferentTupleType) { + tuple a(0, 1, 'a'); + tuple b(a); + + EXPECT_DOUBLE_EQ(0.0, get<0>(b)); + EXPECT_EQ(1, get<1>(b)); + EXPECT_EQ('a', get<2>(b)); +} + +// Tests constructing a 2-tuple from an std::pair. +TEST(TupleConstructorTest, ConstructsFromPair) { + ::std::pair a(1, 'a'); + tuple b(a); + tuple c(a); +} + +// Tests assigning a tuple to another tuple with the same type. +TEST(TupleAssignmentTest, AssignsToSameTupleType) { + const tuple a(5, 7L); + tuple b; + b = a; + EXPECT_EQ(5, get<0>(b)); + EXPECT_EQ(7L, get<1>(b)); +} + +// Tests assigning a tuple to another tuple with a different but +// compatible type. +TEST(TupleAssignmentTest, AssignsToDifferentTupleType) { + const tuple a(1, 7L, true); + tuple b; + b = a; + EXPECT_EQ(1L, get<0>(b)); + EXPECT_EQ(7, get<1>(b)); + EXPECT_TRUE(get<2>(b)); +} + +// Tests assigning an std::pair to a 2-tuple. +TEST(TupleAssignmentTest, AssignsFromPair) { + const ::std::pair a(5, true); + tuple b; + b = a; + EXPECT_EQ(5, get<0>(b)); + EXPECT_TRUE(get<1>(b)); + + tuple c; + c = a; + EXPECT_EQ(5L, get<0>(c)); + EXPECT_TRUE(get<1>(c)); +} + +// A fixture for testing big tuples. +class BigTupleTest : public testing::Test { + protected: + typedef tuple BigTuple; + + BigTupleTest() : + a_(1, 0, 0, 0, 0, 0, 0, 0, 0, 2), + b_(1, 0, 0, 0, 0, 0, 0, 0, 0, 3) {} + + BigTuple a_, b_; +}; + +// Tests constructing big tuples. +TEST_F(BigTupleTest, Construction) { + BigTuple a; + BigTuple b(b_); +} + +// Tests that get(t) returns the N-th (0-based) field of tuple t. +TEST_F(BigTupleTest, get) { + EXPECT_EQ(1, get<0>(a_)); + EXPECT_EQ(2, get<9>(a_)); + + // Tests that get() works on a const tuple too. + const BigTuple a(a_); + EXPECT_EQ(1, get<0>(a)); + EXPECT_EQ(2, get<9>(a)); +} + +// Tests comparing big tuples. +TEST_F(BigTupleTest, Comparisons) { + EXPECT_TRUE(a_ == a_); + EXPECT_FALSE(a_ != a_); + + EXPECT_TRUE(a_ != b_); + EXPECT_FALSE(a_ == b_); +} + +TEST(MakeTupleTest, WorksForScalarTypes) { + tuple a; + a = make_tuple(true, 5); + EXPECT_TRUE(get<0>(a)); + EXPECT_EQ(5, get<1>(a)); + + tuple b; + b = make_tuple('a', 'b', 5); + EXPECT_EQ('a', get<0>(b)); + EXPECT_EQ('b', get<1>(b)); + EXPECT_EQ(5, get<2>(b)); +} + +TEST(MakeTupleTest, WorksForPointers) { + int a[] = { 1, 2, 3, 4 }; + const char* const str = "hi"; + int* const p = a; + + tuple t; + t = make_tuple(str, p); + EXPECT_EQ(str, get<0>(t)); + EXPECT_EQ(p, get<1>(t)); +} + +} // namespace diff --git a/external/gtest/test/gtest-typed-test2_test.cc b/external/gtest/test/gtest-typed-test2_test.cc new file mode 100755 index 0000000000..c284700b02 --- /dev/null +++ b/external/gtest/test/gtest-typed-test2_test.cc @@ -0,0 +1,45 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#include + +#include "test/gtest-typed-test_test.h" +#include "gtest/gtest.h" + +#if GTEST_HAS_TYPED_TEST_P + +// Tests that the same type-parameterized test case can be +// instantiated in different translation units linked together. +// (ContainerTest is also instantiated in gtest-typed-test_test.cc.) +INSTANTIATE_TYPED_TEST_CASE_P(Vector, ContainerTest, + testing::Types >); + +#endif // GTEST_HAS_TYPED_TEST_P diff --git a/external/gtest/test/gtest-typed-test_test.cc b/external/gtest/test/gtest-typed-test_test.cc new file mode 100755 index 0000000000..dd4ba43bc2 --- /dev/null +++ b/external/gtest/test/gtest-typed-test_test.cc @@ -0,0 +1,360 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#include +#include + +#include "test/gtest-typed-test_test.h" +#include "gtest/gtest.h" + +using testing::Test; + +// Used for testing that SetUpTestCase()/TearDownTestCase(), fixture +// ctor/dtor, and SetUp()/TearDown() work correctly in typed tests and +// type-parameterized test. +template +class CommonTest : public Test { + // For some technical reason, SetUpTestCase() and TearDownTestCase() + // must be public. + public: + static void SetUpTestCase() { + shared_ = new T(5); + } + + static void TearDownTestCase() { + delete shared_; + shared_ = NULL; + } + + // This 'protected:' is optional. There's no harm in making all + // members of this fixture class template public. + protected: + // We used to use std::list here, but switched to std::vector since + // MSVC's doesn't compile cleanly with /W4. + typedef std::vector Vector; + typedef std::set IntSet; + + CommonTest() : value_(1) {} + + virtual ~CommonTest() { EXPECT_EQ(3, value_); } + + virtual void SetUp() { + EXPECT_EQ(1, value_); + value_++; + } + + virtual void TearDown() { + EXPECT_EQ(2, value_); + value_++; + } + + T value_; + static T* shared_; +}; + +template +T* CommonTest::shared_ = NULL; + +// This #ifdef block tests typed tests. +#if GTEST_HAS_TYPED_TEST + +using testing::Types; + +// Tests that SetUpTestCase()/TearDownTestCase(), fixture ctor/dtor, +// and SetUp()/TearDown() work correctly in typed tests + +typedef Types TwoTypes; +TYPED_TEST_CASE(CommonTest, TwoTypes); + +TYPED_TEST(CommonTest, ValuesAreCorrect) { + // Static members of the fixture class template can be visited via + // the TestFixture:: prefix. + EXPECT_EQ(5, *TestFixture::shared_); + + // Typedefs in the fixture class template can be visited via the + // "typename TestFixture::" prefix. + typename TestFixture::Vector empty; + EXPECT_EQ(0U, empty.size()); + + typename TestFixture::IntSet empty2; + EXPECT_EQ(0U, empty2.size()); + + // Non-static members of the fixture class must be visited via + // 'this', as required by C++ for class templates. + EXPECT_EQ(2, this->value_); +} + +// The second test makes sure shared_ is not deleted after the first +// test. +TYPED_TEST(CommonTest, ValuesAreStillCorrect) { + // Static members of the fixture class template can also be visited + // via 'this'. + ASSERT_TRUE(this->shared_ != NULL); + EXPECT_EQ(5, *this->shared_); + + // TypeParam can be used to refer to the type parameter. + EXPECT_EQ(static_cast(2), this->value_); +} + +// Tests that multiple TYPED_TEST_CASE's can be defined in the same +// translation unit. + +template +class TypedTest1 : public Test { +}; + +// Verifies that the second argument of TYPED_TEST_CASE can be a +// single type. +TYPED_TEST_CASE(TypedTest1, int); +TYPED_TEST(TypedTest1, A) {} + +template +class TypedTest2 : public Test { +}; + +// Verifies that the second argument of TYPED_TEST_CASE can be a +// Types<...> type list. +TYPED_TEST_CASE(TypedTest2, Types); + +// This also verifies that tests from different typed test cases can +// share the same name. +TYPED_TEST(TypedTest2, A) {} + +// Tests that a typed test case can be defined in a namespace. + +namespace library1 { + +template +class NumericTest : public Test { +}; + +typedef Types NumericTypes; +TYPED_TEST_CASE(NumericTest, NumericTypes); + +TYPED_TEST(NumericTest, DefaultIsZero) { + EXPECT_EQ(0, TypeParam()); +} + +} // namespace library1 + +#endif // GTEST_HAS_TYPED_TEST + +// This #ifdef block tests type-parameterized tests. +#if GTEST_HAS_TYPED_TEST_P + +using testing::Types; +using testing::internal::TypedTestCasePState; + +// Tests TypedTestCasePState. + +class TypedTestCasePStateTest : public Test { + protected: + virtual void SetUp() { + state_.AddTestName("foo.cc", 0, "FooTest", "A"); + state_.AddTestName("foo.cc", 0, "FooTest", "B"); + state_.AddTestName("foo.cc", 0, "FooTest", "C"); + } + + TypedTestCasePState state_; +}; + +TEST_F(TypedTestCasePStateTest, SucceedsForMatchingList) { + const char* tests = "A, B, C"; + EXPECT_EQ(tests, + state_.VerifyRegisteredTestNames("foo.cc", 1, tests)); +} + +// Makes sure that the order of the tests and spaces around the names +// don't matter. +TEST_F(TypedTestCasePStateTest, IgnoresOrderAndSpaces) { + const char* tests = "A,C, B"; + EXPECT_EQ(tests, + state_.VerifyRegisteredTestNames("foo.cc", 1, tests)); +} + +typedef TypedTestCasePStateTest TypedTestCasePStateDeathTest; + +TEST_F(TypedTestCasePStateDeathTest, DetectsDuplicates) { + EXPECT_DEATH_IF_SUPPORTED( + state_.VerifyRegisteredTestNames("foo.cc", 1, "A, B, A, C"), + "foo\\.cc.1.?: Test A is listed more than once\\."); +} + +TEST_F(TypedTestCasePStateDeathTest, DetectsExtraTest) { + EXPECT_DEATH_IF_SUPPORTED( + state_.VerifyRegisteredTestNames("foo.cc", 1, "A, B, C, D"), + "foo\\.cc.1.?: No test named D can be found in this test case\\."); +} + +TEST_F(TypedTestCasePStateDeathTest, DetectsMissedTest) { + EXPECT_DEATH_IF_SUPPORTED( + state_.VerifyRegisteredTestNames("foo.cc", 1, "A, C"), + "foo\\.cc.1.?: You forgot to list test B\\."); +} + +// Tests that defining a test for a parameterized test case generates +// a run-time error if the test case has been registered. +TEST_F(TypedTestCasePStateDeathTest, DetectsTestAfterRegistration) { + state_.VerifyRegisteredTestNames("foo.cc", 1, "A, B, C"); + EXPECT_DEATH_IF_SUPPORTED( + state_.AddTestName("foo.cc", 2, "FooTest", "D"), + "foo\\.cc.2.?: Test D must be defined before REGISTER_TYPED_TEST_CASE_P" + "\\(FooTest, \\.\\.\\.\\)\\."); +} + +// Tests that SetUpTestCase()/TearDownTestCase(), fixture ctor/dtor, +// and SetUp()/TearDown() work correctly in type-parameterized tests. + +template +class DerivedTest : public CommonTest { +}; + +TYPED_TEST_CASE_P(DerivedTest); + +TYPED_TEST_P(DerivedTest, ValuesAreCorrect) { + // Static members of the fixture class template can be visited via + // the TestFixture:: prefix. + EXPECT_EQ(5, *TestFixture::shared_); + + // Non-static members of the fixture class must be visited via + // 'this', as required by C++ for class templates. + EXPECT_EQ(2, this->value_); +} + +// The second test makes sure shared_ is not deleted after the first +// test. +TYPED_TEST_P(DerivedTest, ValuesAreStillCorrect) { + // Static members of the fixture class template can also be visited + // via 'this'. + ASSERT_TRUE(this->shared_ != NULL); + EXPECT_EQ(5, *this->shared_); + EXPECT_EQ(2, this->value_); +} + +REGISTER_TYPED_TEST_CASE_P(DerivedTest, + ValuesAreCorrect, ValuesAreStillCorrect); + +typedef Types MyTwoTypes; +INSTANTIATE_TYPED_TEST_CASE_P(My, DerivedTest, MyTwoTypes); + +// Tests that multiple TYPED_TEST_CASE_P's can be defined in the same +// translation unit. + +template +class TypedTestP1 : public Test { +}; + +TYPED_TEST_CASE_P(TypedTestP1); + +// For testing that the code between TYPED_TEST_CASE_P() and +// TYPED_TEST_P() is not enclosed in a namespace. +typedef int IntAfterTypedTestCaseP; + +TYPED_TEST_P(TypedTestP1, A) {} +TYPED_TEST_P(TypedTestP1, B) {} + +// For testing that the code between TYPED_TEST_P() and +// REGISTER_TYPED_TEST_CASE_P() is not enclosed in a namespace. +typedef int IntBeforeRegisterTypedTestCaseP; + +REGISTER_TYPED_TEST_CASE_P(TypedTestP1, A, B); + +template +class TypedTestP2 : public Test { +}; + +TYPED_TEST_CASE_P(TypedTestP2); + +// This also verifies that tests from different type-parameterized +// test cases can share the same name. +TYPED_TEST_P(TypedTestP2, A) {} + +REGISTER_TYPED_TEST_CASE_P(TypedTestP2, A); + +// Verifies that the code between TYPED_TEST_CASE_P() and +// REGISTER_TYPED_TEST_CASE_P() is not enclosed in a namespace. +IntAfterTypedTestCaseP after = 0; +IntBeforeRegisterTypedTestCaseP before = 0; + +// Verifies that the last argument of INSTANTIATE_TYPED_TEST_CASE_P() +// can be either a single type or a Types<...> type list. +INSTANTIATE_TYPED_TEST_CASE_P(Int, TypedTestP1, int); +INSTANTIATE_TYPED_TEST_CASE_P(Int, TypedTestP2, Types); + +// Tests that the same type-parameterized test case can be +// instantiated more than once in the same translation unit. +INSTANTIATE_TYPED_TEST_CASE_P(Double, TypedTestP2, Types); + +// Tests that the same type-parameterized test case can be +// instantiated in different translation units linked together. +// (ContainerTest is also instantiated in gtest-typed-test_test.cc.) +typedef Types, std::set > MyContainers; +INSTANTIATE_TYPED_TEST_CASE_P(My, ContainerTest, MyContainers); + +// Tests that a type-parameterized test case can be defined and +// instantiated in a namespace. + +namespace library2 { + +template +class NumericTest : public Test { +}; + +TYPED_TEST_CASE_P(NumericTest); + +TYPED_TEST_P(NumericTest, DefaultIsZero) { + EXPECT_EQ(0, TypeParam()); +} + +TYPED_TEST_P(NumericTest, ZeroIsLessThanOne) { + EXPECT_LT(TypeParam(0), TypeParam(1)); +} + +REGISTER_TYPED_TEST_CASE_P(NumericTest, + DefaultIsZero, ZeroIsLessThanOne); +typedef Types NumericTypes; +INSTANTIATE_TYPED_TEST_CASE_P(My, NumericTest, NumericTypes); + +} // namespace library2 + +#endif // GTEST_HAS_TYPED_TEST_P + +#if !defined(GTEST_HAS_TYPED_TEST) && !defined(GTEST_HAS_TYPED_TEST_P) + +// Google Test may not support type-parameterized tests with some +// compilers. If we use conditional compilation to compile out all +// code referring to the gtest_main library, MSVC linker will not link +// that library at all and consequently complain about missing entry +// point defined in that library (fatal error LNK1561: entry point +// must be defined). This dummy test keeps gtest_main linked in. +TEST(DummyTest, TypedTestsAreNotSupportedOnThisPlatform) {} + +#endif // #if !defined(GTEST_HAS_TYPED_TEST) && !defined(GTEST_HAS_TYPED_TEST_P) diff --git a/external/gtest/test/gtest-typed-test_test.h b/external/gtest/test/gtest-typed-test_test.h new file mode 100755 index 0000000000..41d75704cf --- /dev/null +++ b/external/gtest/test/gtest-typed-test_test.h @@ -0,0 +1,66 @@ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#ifndef GTEST_TEST_GTEST_TYPED_TEST_TEST_H_ +#define GTEST_TEST_GTEST_TYPED_TEST_TEST_H_ + +#include "gtest/gtest.h" + +#if GTEST_HAS_TYPED_TEST_P + +using testing::Test; + +// For testing that the same type-parameterized test case can be +// instantiated in different translation units linked together. +// ContainerTest will be instantiated in both gtest-typed-test_test.cc +// and gtest-typed-test2_test.cc. + +template +class ContainerTest : public Test { +}; + +TYPED_TEST_CASE_P(ContainerTest); + +TYPED_TEST_P(ContainerTest, CanBeDefaultConstructed) { + TypeParam container; +} + +TYPED_TEST_P(ContainerTest, InitialSizeIsZero) { + TypeParam container; + EXPECT_EQ(0U, container.size()); +} + +REGISTER_TYPED_TEST_CASE_P(ContainerTest, + CanBeDefaultConstructed, InitialSizeIsZero); + +#endif // GTEST_HAS_TYPED_TEST_P + +#endif // GTEST_TEST_GTEST_TYPED_TEST_TEST_H_ diff --git a/external/gtest/test/gtest-unittest-api_test.cc b/external/gtest/test/gtest-unittest-api_test.cc new file mode 100755 index 0000000000..07083e51b5 --- /dev/null +++ b/external/gtest/test/gtest-unittest-api_test.cc @@ -0,0 +1,341 @@ +// Copyright 2009 Google Inc. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) +// +// The Google C++ Testing Framework (Google Test) +// +// This file contains tests verifying correctness of data provided via +// UnitTest's public methods. + +#include "gtest/gtest.h" + +#include // For strcmp. +#include + +using ::testing::InitGoogleTest; + +namespace testing { +namespace internal { + +template +struct LessByName { + bool operator()(const T* a, const T* b) { + return strcmp(a->name(), b->name()) < 0; + } +}; + +class UnitTestHelper { + public: + // Returns the array of pointers to all test cases sorted by the test case + // name. The caller is responsible for deleting the array. + static TestCase const** const GetSortedTestCases() { + UnitTest& unit_test = *UnitTest::GetInstance(); + TestCase const** const test_cases = + new const TestCase*[unit_test.total_test_case_count()]; + + for (int i = 0; i < unit_test.total_test_case_count(); ++i) + test_cases[i] = unit_test.GetTestCase(i); + + std::sort(test_cases, + test_cases + unit_test.total_test_case_count(), + LessByName()); + return test_cases; + } + + // Returns the test case by its name. The caller doesn't own the returned + // pointer. + static const TestCase* FindTestCase(const char* name) { + UnitTest& unit_test = *UnitTest::GetInstance(); + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + const TestCase* test_case = unit_test.GetTestCase(i); + if (0 == strcmp(test_case->name(), name)) + return test_case; + } + return NULL; + } + + // Returns the array of pointers to all tests in a particular test case + // sorted by the test name. The caller is responsible for deleting the + // array. + static TestInfo const** const GetSortedTests(const TestCase* test_case) { + TestInfo const** const tests = + new const TestInfo*[test_case->total_test_count()]; + + for (int i = 0; i < test_case->total_test_count(); ++i) + tests[i] = test_case->GetTestInfo(i); + + std::sort(tests, tests + test_case->total_test_count(), + LessByName()); + return tests; + } +}; + +#if GTEST_HAS_TYPED_TEST +template class TestCaseWithCommentTest : public Test {}; +TYPED_TEST_CASE(TestCaseWithCommentTest, Types); +TYPED_TEST(TestCaseWithCommentTest, Dummy) {} + +const int kTypedTestCases = 1; +const int kTypedTests = 1; +#else +const int kTypedTestCases = 0; +const int kTypedTests = 0; +#endif // GTEST_HAS_TYPED_TEST + +// We can only test the accessors that do not change value while tests run. +// Since tests can be run in any order, the values the accessors that track +// test execution (such as failed_test_count) can not be predicted. +TEST(ApiTest, UnitTestImmutableAccessorsWork) { + UnitTest* unit_test = UnitTest::GetInstance(); + + ASSERT_EQ(2 + kTypedTestCases, unit_test->total_test_case_count()); + EXPECT_EQ(1 + kTypedTestCases, unit_test->test_case_to_run_count()); + EXPECT_EQ(2, unit_test->disabled_test_count()); + EXPECT_EQ(5 + kTypedTests, unit_test->total_test_count()); + EXPECT_EQ(3 + kTypedTests, unit_test->test_to_run_count()); + + const TestCase** const test_cases = UnitTestHelper::GetSortedTestCases(); + + EXPECT_STREQ("ApiTest", test_cases[0]->name()); + EXPECT_STREQ("DISABLED_Test", test_cases[1]->name()); +#if GTEST_HAS_TYPED_TEST + EXPECT_STREQ("TestCaseWithCommentTest/0", test_cases[2]->name()); +#endif // GTEST_HAS_TYPED_TEST + + delete[] test_cases; + + // The following lines initiate actions to verify certain methods in + // FinalSuccessChecker::TearDown. + + // Records a test property to verify TestResult::GetTestProperty(). + RecordProperty("key", "value"); +} + +AssertionResult IsNull(const char* str) { + if (str != NULL) { + return testing::AssertionFailure() << "argument is " << str; + } + return AssertionSuccess(); +} + +TEST(ApiTest, TestCaseImmutableAccessorsWork) { + const TestCase* test_case = UnitTestHelper::FindTestCase("ApiTest"); + ASSERT_TRUE(test_case != NULL); + + EXPECT_STREQ("ApiTest", test_case->name()); + EXPECT_TRUE(IsNull(test_case->type_param())); + EXPECT_TRUE(test_case->should_run()); + EXPECT_EQ(1, test_case->disabled_test_count()); + EXPECT_EQ(3, test_case->test_to_run_count()); + ASSERT_EQ(4, test_case->total_test_count()); + + const TestInfo** tests = UnitTestHelper::GetSortedTests(test_case); + + EXPECT_STREQ("DISABLED_Dummy1", tests[0]->name()); + EXPECT_STREQ("ApiTest", tests[0]->test_case_name()); + EXPECT_TRUE(IsNull(tests[0]->value_param())); + EXPECT_TRUE(IsNull(tests[0]->type_param())); + EXPECT_FALSE(tests[0]->should_run()); + + EXPECT_STREQ("TestCaseDisabledAccessorsWork", tests[1]->name()); + EXPECT_STREQ("ApiTest", tests[1]->test_case_name()); + EXPECT_TRUE(IsNull(tests[1]->value_param())); + EXPECT_TRUE(IsNull(tests[1]->type_param())); + EXPECT_TRUE(tests[1]->should_run()); + + EXPECT_STREQ("TestCaseImmutableAccessorsWork", tests[2]->name()); + EXPECT_STREQ("ApiTest", tests[2]->test_case_name()); + EXPECT_TRUE(IsNull(tests[2]->value_param())); + EXPECT_TRUE(IsNull(tests[2]->type_param())); + EXPECT_TRUE(tests[2]->should_run()); + + EXPECT_STREQ("UnitTestImmutableAccessorsWork", tests[3]->name()); + EXPECT_STREQ("ApiTest", tests[3]->test_case_name()); + EXPECT_TRUE(IsNull(tests[3]->value_param())); + EXPECT_TRUE(IsNull(tests[3]->type_param())); + EXPECT_TRUE(tests[3]->should_run()); + + delete[] tests; + tests = NULL; + +#if GTEST_HAS_TYPED_TEST + test_case = UnitTestHelper::FindTestCase("TestCaseWithCommentTest/0"); + ASSERT_TRUE(test_case != NULL); + + EXPECT_STREQ("TestCaseWithCommentTest/0", test_case->name()); + EXPECT_STREQ(GetTypeName().c_str(), test_case->type_param()); + EXPECT_TRUE(test_case->should_run()); + EXPECT_EQ(0, test_case->disabled_test_count()); + EXPECT_EQ(1, test_case->test_to_run_count()); + ASSERT_EQ(1, test_case->total_test_count()); + + tests = UnitTestHelper::GetSortedTests(test_case); + + EXPECT_STREQ("Dummy", tests[0]->name()); + EXPECT_STREQ("TestCaseWithCommentTest/0", tests[0]->test_case_name()); + EXPECT_TRUE(IsNull(tests[0]->value_param())); + EXPECT_STREQ(GetTypeName().c_str(), tests[0]->type_param()); + EXPECT_TRUE(tests[0]->should_run()); + + delete[] tests; +#endif // GTEST_HAS_TYPED_TEST +} + +TEST(ApiTest, TestCaseDisabledAccessorsWork) { + const TestCase* test_case = UnitTestHelper::FindTestCase("DISABLED_Test"); + ASSERT_TRUE(test_case != NULL); + + EXPECT_STREQ("DISABLED_Test", test_case->name()); + EXPECT_TRUE(IsNull(test_case->type_param())); + EXPECT_FALSE(test_case->should_run()); + EXPECT_EQ(1, test_case->disabled_test_count()); + EXPECT_EQ(0, test_case->test_to_run_count()); + ASSERT_EQ(1, test_case->total_test_count()); + + const TestInfo* const test_info = test_case->GetTestInfo(0); + EXPECT_STREQ("Dummy2", test_info->name()); + EXPECT_STREQ("DISABLED_Test", test_info->test_case_name()); + EXPECT_TRUE(IsNull(test_info->value_param())); + EXPECT_TRUE(IsNull(test_info->type_param())); + EXPECT_FALSE(test_info->should_run()); +} + +// These two tests are here to provide support for testing +// test_case_to_run_count, disabled_test_count, and test_to_run_count. +TEST(ApiTest, DISABLED_Dummy1) {} +TEST(DISABLED_Test, Dummy2) {} + +class FinalSuccessChecker : public Environment { + protected: + virtual void TearDown() { + UnitTest* unit_test = UnitTest::GetInstance(); + + EXPECT_EQ(1 + kTypedTestCases, unit_test->successful_test_case_count()); + EXPECT_EQ(3 + kTypedTests, unit_test->successful_test_count()); + EXPECT_EQ(0, unit_test->failed_test_case_count()); + EXPECT_EQ(0, unit_test->failed_test_count()); + EXPECT_TRUE(unit_test->Passed()); + EXPECT_FALSE(unit_test->Failed()); + ASSERT_EQ(2 + kTypedTestCases, unit_test->total_test_case_count()); + + const TestCase** const test_cases = UnitTestHelper::GetSortedTestCases(); + + EXPECT_STREQ("ApiTest", test_cases[0]->name()); + EXPECT_TRUE(IsNull(test_cases[0]->type_param())); + EXPECT_TRUE(test_cases[0]->should_run()); + EXPECT_EQ(1, test_cases[0]->disabled_test_count()); + ASSERT_EQ(4, test_cases[0]->total_test_count()); + EXPECT_EQ(3, test_cases[0]->successful_test_count()); + EXPECT_EQ(0, test_cases[0]->failed_test_count()); + EXPECT_TRUE(test_cases[0]->Passed()); + EXPECT_FALSE(test_cases[0]->Failed()); + + EXPECT_STREQ("DISABLED_Test", test_cases[1]->name()); + EXPECT_TRUE(IsNull(test_cases[1]->type_param())); + EXPECT_FALSE(test_cases[1]->should_run()); + EXPECT_EQ(1, test_cases[1]->disabled_test_count()); + ASSERT_EQ(1, test_cases[1]->total_test_count()); + EXPECT_EQ(0, test_cases[1]->successful_test_count()); + EXPECT_EQ(0, test_cases[1]->failed_test_count()); + +#if GTEST_HAS_TYPED_TEST + EXPECT_STREQ("TestCaseWithCommentTest/0", test_cases[2]->name()); + EXPECT_STREQ(GetTypeName().c_str(), test_cases[2]->type_param()); + EXPECT_TRUE(test_cases[2]->should_run()); + EXPECT_EQ(0, test_cases[2]->disabled_test_count()); + ASSERT_EQ(1, test_cases[2]->total_test_count()); + EXPECT_EQ(1, test_cases[2]->successful_test_count()); + EXPECT_EQ(0, test_cases[2]->failed_test_count()); + EXPECT_TRUE(test_cases[2]->Passed()); + EXPECT_FALSE(test_cases[2]->Failed()); +#endif // GTEST_HAS_TYPED_TEST + + const TestCase* test_case = UnitTestHelper::FindTestCase("ApiTest"); + const TestInfo** tests = UnitTestHelper::GetSortedTests(test_case); + EXPECT_STREQ("DISABLED_Dummy1", tests[0]->name()); + EXPECT_STREQ("ApiTest", tests[0]->test_case_name()); + EXPECT_FALSE(tests[0]->should_run()); + + EXPECT_STREQ("TestCaseDisabledAccessorsWork", tests[1]->name()); + EXPECT_STREQ("ApiTest", tests[1]->test_case_name()); + EXPECT_TRUE(IsNull(tests[1]->value_param())); + EXPECT_TRUE(IsNull(tests[1]->type_param())); + EXPECT_TRUE(tests[1]->should_run()); + EXPECT_TRUE(tests[1]->result()->Passed()); + EXPECT_EQ(0, tests[1]->result()->test_property_count()); + + EXPECT_STREQ("TestCaseImmutableAccessorsWork", tests[2]->name()); + EXPECT_STREQ("ApiTest", tests[2]->test_case_name()); + EXPECT_TRUE(IsNull(tests[2]->value_param())); + EXPECT_TRUE(IsNull(tests[2]->type_param())); + EXPECT_TRUE(tests[2]->should_run()); + EXPECT_TRUE(tests[2]->result()->Passed()); + EXPECT_EQ(0, tests[2]->result()->test_property_count()); + + EXPECT_STREQ("UnitTestImmutableAccessorsWork", tests[3]->name()); + EXPECT_STREQ("ApiTest", tests[3]->test_case_name()); + EXPECT_TRUE(IsNull(tests[3]->value_param())); + EXPECT_TRUE(IsNull(tests[3]->type_param())); + EXPECT_TRUE(tests[3]->should_run()); + EXPECT_TRUE(tests[3]->result()->Passed()); + EXPECT_EQ(1, tests[3]->result()->test_property_count()); + const TestProperty& property = tests[3]->result()->GetTestProperty(0); + EXPECT_STREQ("key", property.key()); + EXPECT_STREQ("value", property.value()); + + delete[] tests; + +#if GTEST_HAS_TYPED_TEST + test_case = UnitTestHelper::FindTestCase("TestCaseWithCommentTest/0"); + tests = UnitTestHelper::GetSortedTests(test_case); + + EXPECT_STREQ("Dummy", tests[0]->name()); + EXPECT_STREQ("TestCaseWithCommentTest/0", tests[0]->test_case_name()); + EXPECT_TRUE(IsNull(tests[0]->value_param())); + EXPECT_STREQ(GetTypeName().c_str(), tests[0]->type_param()); + EXPECT_TRUE(tests[0]->should_run()); + EXPECT_TRUE(tests[0]->result()->Passed()); + EXPECT_EQ(0, tests[0]->result()->test_property_count()); + + delete[] tests; +#endif // GTEST_HAS_TYPED_TEST + delete[] test_cases; + } +}; + +} // namespace internal +} // namespace testing + +int main(int argc, char **argv) { + InitGoogleTest(&argc, argv); + + AddGlobalTestEnvironment(new testing::internal::FinalSuccessChecker()); + + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest_all_test.cc b/external/gtest/test/gtest_all_test.cc new file mode 100755 index 0000000000..955aa62828 --- /dev/null +++ b/external/gtest/test/gtest_all_test.cc @@ -0,0 +1,47 @@ +// Copyright 2009, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Tests for Google C++ Testing Framework (Google Test) +// +// Sometimes it's desirable to build most of Google Test's own tests +// by compiling a single file. This file serves this purpose. +#include "test/gtest-filepath_test.cc" +#include "test/gtest-linked_ptr_test.cc" +#include "test/gtest-message_test.cc" +#include "test/gtest-options_test.cc" +#include "test/gtest-port_test.cc" +#include "test/gtest_pred_impl_unittest.cc" +#include "test/gtest_prod_test.cc" +#include "test/gtest-test-part_test.cc" +#include "test/gtest-typed-test_test.cc" +#include "test/gtest-typed-test2_test.cc" +#include "test/gtest_unittest.cc" +#include "test/production.cc" diff --git a/external/gtest/test/gtest_break_on_failure_unittest.py b/external/gtest/test/gtest_break_on_failure_unittest.py new file mode 100755 index 0000000000..78f3e0f53b --- /dev/null +++ b/external/gtest/test/gtest_break_on_failure_unittest.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python +# +# Copyright 2006, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test for Google Test's break-on-failure mode. + +A user can ask Google Test to seg-fault when an assertion fails, using +either the GTEST_BREAK_ON_FAILURE environment variable or the +--gtest_break_on_failure flag. This script tests such functionality +by invoking gtest_break_on_failure_unittest_ (a program written with +Google Test) with different environments and command line flags. +""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import gtest_test_utils +import os +import sys + + +# Constants. + +IS_WINDOWS = os.name == 'nt' + +# The environment variable for enabling/disabling the break-on-failure mode. +BREAK_ON_FAILURE_ENV_VAR = 'GTEST_BREAK_ON_FAILURE' + +# The command line flag for enabling/disabling the break-on-failure mode. +BREAK_ON_FAILURE_FLAG = 'gtest_break_on_failure' + +# The environment variable for enabling/disabling the throw-on-failure mode. +THROW_ON_FAILURE_ENV_VAR = 'GTEST_THROW_ON_FAILURE' + +# The environment variable for enabling/disabling the catch-exceptions mode. +CATCH_EXCEPTIONS_ENV_VAR = 'GTEST_CATCH_EXCEPTIONS' + +# Path to the gtest_break_on_failure_unittest_ program. +EXE_PATH = gtest_test_utils.GetTestExecutablePath( + 'gtest_break_on_failure_unittest_') + + +environ = gtest_test_utils.environ +SetEnvVar = gtest_test_utils.SetEnvVar + +# Tests in this file run a Google-Test-based test program and expect it +# to terminate prematurely. Therefore they are incompatible with +# the premature-exit-file protocol by design. Unset the +# premature-exit filepath to prevent Google Test from creating +# the file. +SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None) + + +def Run(command): + """Runs a command; returns 1 if it was killed by a signal, or 0 otherwise.""" + + p = gtest_test_utils.Subprocess(command, env=environ) + if p.terminated_by_signal: + return 1 + else: + return 0 + + +# The tests. + + +class GTestBreakOnFailureUnitTest(gtest_test_utils.TestCase): + """Tests using the GTEST_BREAK_ON_FAILURE environment variable or + the --gtest_break_on_failure flag to turn assertion failures into + segmentation faults. + """ + + def RunAndVerify(self, env_var_value, flag_value, expect_seg_fault): + """Runs gtest_break_on_failure_unittest_ and verifies that it does + (or does not) have a seg-fault. + + Args: + env_var_value: value of the GTEST_BREAK_ON_FAILURE environment + variable; None if the variable should be unset. + flag_value: value of the --gtest_break_on_failure flag; + None if the flag should not be present. + expect_seg_fault: 1 if the program is expected to generate a seg-fault; + 0 otherwise. + """ + + SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, env_var_value) + + if env_var_value is None: + env_var_value_msg = ' is not set' + else: + env_var_value_msg = '=' + env_var_value + + if flag_value is None: + flag = '' + elif flag_value == '0': + flag = '--%s=0' % BREAK_ON_FAILURE_FLAG + else: + flag = '--%s' % BREAK_ON_FAILURE_FLAG + + command = [EXE_PATH] + if flag: + command.append(flag) + + if expect_seg_fault: + should_or_not = 'should' + else: + should_or_not = 'should not' + + has_seg_fault = Run(command) + + SetEnvVar(BREAK_ON_FAILURE_ENV_VAR, None) + + msg = ('when %s%s, an assertion failure in "%s" %s cause a seg-fault.' % + (BREAK_ON_FAILURE_ENV_VAR, env_var_value_msg, ' '.join(command), + should_or_not)) + self.assert_(has_seg_fault == expect_seg_fault, msg) + + def testDefaultBehavior(self): + """Tests the behavior of the default mode.""" + + self.RunAndVerify(env_var_value=None, + flag_value=None, + expect_seg_fault=0) + + def testEnvVar(self): + """Tests using the GTEST_BREAK_ON_FAILURE environment variable.""" + + self.RunAndVerify(env_var_value='0', + flag_value=None, + expect_seg_fault=0) + self.RunAndVerify(env_var_value='1', + flag_value=None, + expect_seg_fault=1) + + def testFlag(self): + """Tests using the --gtest_break_on_failure flag.""" + + self.RunAndVerify(env_var_value=None, + flag_value='0', + expect_seg_fault=0) + self.RunAndVerify(env_var_value=None, + flag_value='1', + expect_seg_fault=1) + + def testFlagOverridesEnvVar(self): + """Tests that the flag overrides the environment variable.""" + + self.RunAndVerify(env_var_value='0', + flag_value='0', + expect_seg_fault=0) + self.RunAndVerify(env_var_value='0', + flag_value='1', + expect_seg_fault=1) + self.RunAndVerify(env_var_value='1', + flag_value='0', + expect_seg_fault=0) + self.RunAndVerify(env_var_value='1', + flag_value='1', + expect_seg_fault=1) + + def testBreakOnFailureOverridesThrowOnFailure(self): + """Tests that gtest_break_on_failure overrides gtest_throw_on_failure.""" + + SetEnvVar(THROW_ON_FAILURE_ENV_VAR, '1') + try: + self.RunAndVerify(env_var_value=None, + flag_value='1', + expect_seg_fault=1) + finally: + SetEnvVar(THROW_ON_FAILURE_ENV_VAR, None) + + if IS_WINDOWS: + def testCatchExceptionsDoesNotInterfere(self): + """Tests that gtest_catch_exceptions doesn't interfere.""" + + SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, '1') + try: + self.RunAndVerify(env_var_value='1', + flag_value='1', + expect_seg_fault=1) + finally: + SetEnvVar(CATCH_EXCEPTIONS_ENV_VAR, None) + + +if __name__ == '__main__': + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_break_on_failure_unittest_.cc b/external/gtest/test/gtest_break_on_failure_unittest_.cc new file mode 100755 index 0000000000..dd07478c07 --- /dev/null +++ b/external/gtest/test/gtest_break_on_failure_unittest_.cc @@ -0,0 +1,88 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Unit test for Google Test's break-on-failure mode. +// +// A user can ask Google Test to seg-fault when an assertion fails, using +// either the GTEST_BREAK_ON_FAILURE environment variable or the +// --gtest_break_on_failure flag. This file is used for testing such +// functionality. +// +// This program will be invoked from a Python unit test. It is +// expected to fail. Don't run it directly. + +#include "gtest/gtest.h" + +#if GTEST_OS_WINDOWS +# include +# include +#endif + +namespace { + +// A test that's expected to fail. +TEST(Foo, Bar) { + EXPECT_EQ(2, 3); +} + +#if GTEST_HAS_SEH && !GTEST_OS_WINDOWS_MOBILE +// On Windows Mobile global exception handlers are not supported. +LONG WINAPI ExitWithExceptionCode( + struct _EXCEPTION_POINTERS* exception_pointers) { + exit(exception_pointers->ExceptionRecord->ExceptionCode); +} +#endif + +} // namespace + +int main(int argc, char **argv) { +#if GTEST_OS_WINDOWS + // Suppresses display of the Windows error dialog upon encountering + // a general protection fault (segment violation). + SetErrorMode(SEM_NOGPFAULTERRORBOX | SEM_FAILCRITICALERRORS); + +# if GTEST_HAS_SEH && !GTEST_OS_WINDOWS_MOBILE + + // The default unhandled exception filter does not always exit + // with the exception code as exit code - for example it exits with + // 0 for EXCEPTION_ACCESS_VIOLATION and 1 for EXCEPTION_BREAKPOINT + // if the application is compiled in debug mode. Thus we use our own + // filter which always exits with the exception code for unhandled + // exceptions. + SetUnhandledExceptionFilter(ExitWithExceptionCode); + +# endif +#endif + + testing::InitGoogleTest(&argc, argv); + + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest_catch_exceptions_test.py b/external/gtest/test/gtest_catch_exceptions_test.py new file mode 100755 index 0000000000..e6fc22fd1f --- /dev/null +++ b/external/gtest/test/gtest_catch_exceptions_test.py @@ -0,0 +1,237 @@ +#!/usr/bin/env python +# +# Copyright 2010 Google Inc. All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests Google Test's exception catching behavior. + +This script invokes gtest_catch_exceptions_test_ and +gtest_catch_exceptions_ex_test_ (programs written with +Google Test) and verifies their output. +""" + +__author__ = 'vladl@google.com (Vlad Losev)' + +import os + +import gtest_test_utils + +# Constants. +FLAG_PREFIX = '--gtest_' +LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' +NO_CATCH_EXCEPTIONS_FLAG = FLAG_PREFIX + 'catch_exceptions=0' +FILTER_FLAG = FLAG_PREFIX + 'filter' + +# Path to the gtest_catch_exceptions_ex_test_ binary, compiled with +# exceptions enabled. +EX_EXE_PATH = gtest_test_utils.GetTestExecutablePath( + 'gtest_catch_exceptions_ex_test_') + +# Path to the gtest_catch_exceptions_test_ binary, compiled with +# exceptions disabled. +EXE_PATH = gtest_test_utils.GetTestExecutablePath( + 'gtest_catch_exceptions_no_ex_test_') + +environ = gtest_test_utils.environ +SetEnvVar = gtest_test_utils.SetEnvVar + +# Tests in this file run a Google-Test-based test program and expect it +# to terminate prematurely. Therefore they are incompatible with +# the premature-exit-file protocol by design. Unset the +# premature-exit filepath to prevent Google Test from creating +# the file. +SetEnvVar(gtest_test_utils.PREMATURE_EXIT_FILE_ENV_VAR, None) + +TEST_LIST = gtest_test_utils.Subprocess( + [EXE_PATH, LIST_TESTS_FLAG], env=environ).output + +SUPPORTS_SEH_EXCEPTIONS = 'ThrowsSehException' in TEST_LIST + +if SUPPORTS_SEH_EXCEPTIONS: + BINARY_OUTPUT = gtest_test_utils.Subprocess([EXE_PATH], env=environ).output + +EX_BINARY_OUTPUT = gtest_test_utils.Subprocess( + [EX_EXE_PATH], env=environ).output + + +# The tests. +if SUPPORTS_SEH_EXCEPTIONS: + # pylint:disable-msg=C6302 + class CatchSehExceptionsTest(gtest_test_utils.TestCase): + """Tests exception-catching behavior.""" + + + def TestSehExceptions(self, test_output): + self.assert_('SEH exception with code 0x2a thrown ' + 'in the test fixture\'s constructor' + in test_output) + self.assert_('SEH exception with code 0x2a thrown ' + 'in the test fixture\'s destructor' + in test_output) + self.assert_('SEH exception with code 0x2a thrown in SetUpTestCase()' + in test_output) + self.assert_('SEH exception with code 0x2a thrown in TearDownTestCase()' + in test_output) + self.assert_('SEH exception with code 0x2a thrown in SetUp()' + in test_output) + self.assert_('SEH exception with code 0x2a thrown in TearDown()' + in test_output) + self.assert_('SEH exception with code 0x2a thrown in the test body' + in test_output) + + def testCatchesSehExceptionsWithCxxExceptionsEnabled(self): + self.TestSehExceptions(EX_BINARY_OUTPUT) + + def testCatchesSehExceptionsWithCxxExceptionsDisabled(self): + self.TestSehExceptions(BINARY_OUTPUT) + + +class CatchCxxExceptionsTest(gtest_test_utils.TestCase): + """Tests C++ exception-catching behavior. + + Tests in this test case verify that: + * C++ exceptions are caught and logged as C++ (not SEH) exceptions + * Exception thrown affect the remainder of the test work flow in the + expected manner. + """ + + def testCatchesCxxExceptionsInFixtureConstructor(self): + self.assert_('C++ exception with description ' + '"Standard C++ exception" thrown ' + 'in the test fixture\'s constructor' + in EX_BINARY_OUTPUT) + self.assert_('unexpected' not in EX_BINARY_OUTPUT, + 'This failure belongs in this test only if ' + '"CxxExceptionInConstructorTest" (no quotes) ' + 'appears on the same line as words "called unexpectedly"') + + if ('CxxExceptionInDestructorTest.ThrowsExceptionInDestructor' in + EX_BINARY_OUTPUT): + + def testCatchesCxxExceptionsInFixtureDestructor(self): + self.assert_('C++ exception with description ' + '"Standard C++ exception" thrown ' + 'in the test fixture\'s destructor' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInDestructorTest::TearDownTestCase() ' + 'called as expected.' + in EX_BINARY_OUTPUT) + + def testCatchesCxxExceptionsInSetUpTestCase(self): + self.assert_('C++ exception with description "Standard C++ exception"' + ' thrown in SetUpTestCase()' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInConstructorTest::TearDownTestCase() ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInSetUpTestCaseTest constructor ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInSetUpTestCaseTest destructor ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInSetUpTestCaseTest::SetUp() ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInSetUpTestCaseTest::TearDown() ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInSetUpTestCaseTest test body ' + 'called as expected.' + in EX_BINARY_OUTPUT) + + def testCatchesCxxExceptionsInTearDownTestCase(self): + self.assert_('C++ exception with description "Standard C++ exception"' + ' thrown in TearDownTestCase()' + in EX_BINARY_OUTPUT) + + def testCatchesCxxExceptionsInSetUp(self): + self.assert_('C++ exception with description "Standard C++ exception"' + ' thrown in SetUp()' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInSetUpTest::TearDownTestCase() ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInSetUpTest destructor ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInSetUpTest::TearDown() ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('unexpected' not in EX_BINARY_OUTPUT, + 'This failure belongs in this test only if ' + '"CxxExceptionInSetUpTest" (no quotes) ' + 'appears on the same line as words "called unexpectedly"') + + def testCatchesCxxExceptionsInTearDown(self): + self.assert_('C++ exception with description "Standard C++ exception"' + ' thrown in TearDown()' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInTearDownTest::TearDownTestCase() ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInTearDownTest destructor ' + 'called as expected.' + in EX_BINARY_OUTPUT) + + def testCatchesCxxExceptionsInTestBody(self): + self.assert_('C++ exception with description "Standard C++ exception"' + ' thrown in the test body' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInTestBodyTest::TearDownTestCase() ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInTestBodyTest destructor ' + 'called as expected.' + in EX_BINARY_OUTPUT) + self.assert_('CxxExceptionInTestBodyTest::TearDown() ' + 'called as expected.' + in EX_BINARY_OUTPUT) + + def testCatchesNonStdCxxExceptions(self): + self.assert_('Unknown C++ exception thrown in the test body' + in EX_BINARY_OUTPUT) + + def testUnhandledCxxExceptionsAbortTheProgram(self): + # Filters out SEH exception tests on Windows. Unhandled SEH exceptions + # cause tests to show pop-up windows there. + FITLER_OUT_SEH_TESTS_FLAG = FILTER_FLAG + '=-*Seh*' + # By default, Google Test doesn't catch the exceptions. + uncaught_exceptions_ex_binary_output = gtest_test_utils.Subprocess( + [EX_EXE_PATH, + NO_CATCH_EXCEPTIONS_FLAG, + FITLER_OUT_SEH_TESTS_FLAG], + env=environ).output + + self.assert_('Unhandled C++ exception terminating the program' + in uncaught_exceptions_ex_binary_output) + self.assert_('unexpected' not in uncaught_exceptions_ex_binary_output) + + +if __name__ == '__main__': + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_catch_exceptions_test_.cc b/external/gtest/test/gtest_catch_exceptions_test_.cc new file mode 100755 index 0000000000..d0fc82c998 --- /dev/null +++ b/external/gtest/test/gtest_catch_exceptions_test_.cc @@ -0,0 +1,311 @@ +// Copyright 2010, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) +// +// Tests for Google Test itself. Tests in this file throw C++ or SEH +// exceptions, and the output is verified by gtest_catch_exceptions_test.py. + +#include "gtest/gtest.h" + +#include // NOLINT +#include // For exit(). + +#if GTEST_HAS_SEH +# include +#endif + +#if GTEST_HAS_EXCEPTIONS +# include // For set_terminate(). +# include +#endif + +using testing::Test; + +#if GTEST_HAS_SEH + +class SehExceptionInConstructorTest : public Test { + public: + SehExceptionInConstructorTest() { RaiseException(42, 0, 0, NULL); } +}; + +TEST_F(SehExceptionInConstructorTest, ThrowsExceptionInConstructor) {} + +class SehExceptionInDestructorTest : public Test { + public: + ~SehExceptionInDestructorTest() { RaiseException(42, 0, 0, NULL); } +}; + +TEST_F(SehExceptionInDestructorTest, ThrowsExceptionInDestructor) {} + +class SehExceptionInSetUpTestCaseTest : public Test { + public: + static void SetUpTestCase() { RaiseException(42, 0, 0, NULL); } +}; + +TEST_F(SehExceptionInSetUpTestCaseTest, ThrowsExceptionInSetUpTestCase) {} + +class SehExceptionInTearDownTestCaseTest : public Test { + public: + static void TearDownTestCase() { RaiseException(42, 0, 0, NULL); } +}; + +TEST_F(SehExceptionInTearDownTestCaseTest, ThrowsExceptionInTearDownTestCase) {} + +class SehExceptionInSetUpTest : public Test { + protected: + virtual void SetUp() { RaiseException(42, 0, 0, NULL); } +}; + +TEST_F(SehExceptionInSetUpTest, ThrowsExceptionInSetUp) {} + +class SehExceptionInTearDownTest : public Test { + protected: + virtual void TearDown() { RaiseException(42, 0, 0, NULL); } +}; + +TEST_F(SehExceptionInTearDownTest, ThrowsExceptionInTearDown) {} + +TEST(SehExceptionTest, ThrowsSehException) { + RaiseException(42, 0, 0, NULL); +} + +#endif // GTEST_HAS_SEH + +#if GTEST_HAS_EXCEPTIONS + +class CxxExceptionInConstructorTest : public Test { + public: + CxxExceptionInConstructorTest() { + // Without this macro VC++ complains about unreachable code at the end of + // the constructor. + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_( + throw std::runtime_error("Standard C++ exception")); + } + + static void TearDownTestCase() { + printf("%s", + "CxxExceptionInConstructorTest::TearDownTestCase() " + "called as expected.\n"); + } + + protected: + ~CxxExceptionInConstructorTest() { + ADD_FAILURE() << "CxxExceptionInConstructorTest destructor " + << "called unexpectedly."; + } + + virtual void SetUp() { + ADD_FAILURE() << "CxxExceptionInConstructorTest::SetUp() " + << "called unexpectedly."; + } + + virtual void TearDown() { + ADD_FAILURE() << "CxxExceptionInConstructorTest::TearDown() " + << "called unexpectedly."; + } +}; + +TEST_F(CxxExceptionInConstructorTest, ThrowsExceptionInConstructor) { + ADD_FAILURE() << "CxxExceptionInConstructorTest test body " + << "called unexpectedly."; +} + +// Exceptions in destructors are not supported in C++11. +#if !defined(__GXX_EXPERIMENTAL_CXX0X__) && __cplusplus < 201103L +class CxxExceptionInDestructorTest : public Test { + public: + static void TearDownTestCase() { + printf("%s", + "CxxExceptionInDestructorTest::TearDownTestCase() " + "called as expected.\n"); + } + + protected: + ~CxxExceptionInDestructorTest() { + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_( + throw std::runtime_error("Standard C++ exception")); + } +}; + +TEST_F(CxxExceptionInDestructorTest, ThrowsExceptionInDestructor) {} +#endif // C++11 mode + +class CxxExceptionInSetUpTestCaseTest : public Test { + public: + CxxExceptionInSetUpTestCaseTest() { + printf("%s", + "CxxExceptionInSetUpTestCaseTest constructor " + "called as expected.\n"); + } + + static void SetUpTestCase() { + throw std::runtime_error("Standard C++ exception"); + } + + static void TearDownTestCase() { + printf("%s", + "CxxExceptionInSetUpTestCaseTest::TearDownTestCase() " + "called as expected.\n"); + } + + protected: + ~CxxExceptionInSetUpTestCaseTest() { + printf("%s", + "CxxExceptionInSetUpTestCaseTest destructor " + "called as expected.\n"); + } + + virtual void SetUp() { + printf("%s", + "CxxExceptionInSetUpTestCaseTest::SetUp() " + "called as expected.\n"); + } + + virtual void TearDown() { + printf("%s", + "CxxExceptionInSetUpTestCaseTest::TearDown() " + "called as expected.\n"); + } +}; + +TEST_F(CxxExceptionInSetUpTestCaseTest, ThrowsExceptionInSetUpTestCase) { + printf("%s", + "CxxExceptionInSetUpTestCaseTest test body " + "called as expected.\n"); +} + +class CxxExceptionInTearDownTestCaseTest : public Test { + public: + static void TearDownTestCase() { + throw std::runtime_error("Standard C++ exception"); + } +}; + +TEST_F(CxxExceptionInTearDownTestCaseTest, ThrowsExceptionInTearDownTestCase) {} + +class CxxExceptionInSetUpTest : public Test { + public: + static void TearDownTestCase() { + printf("%s", + "CxxExceptionInSetUpTest::TearDownTestCase() " + "called as expected.\n"); + } + + protected: + ~CxxExceptionInSetUpTest() { + printf("%s", + "CxxExceptionInSetUpTest destructor " + "called as expected.\n"); + } + + virtual void SetUp() { throw std::runtime_error("Standard C++ exception"); } + + virtual void TearDown() { + printf("%s", + "CxxExceptionInSetUpTest::TearDown() " + "called as expected.\n"); + } +}; + +TEST_F(CxxExceptionInSetUpTest, ThrowsExceptionInSetUp) { + ADD_FAILURE() << "CxxExceptionInSetUpTest test body " + << "called unexpectedly."; +} + +class CxxExceptionInTearDownTest : public Test { + public: + static void TearDownTestCase() { + printf("%s", + "CxxExceptionInTearDownTest::TearDownTestCase() " + "called as expected.\n"); + } + + protected: + ~CxxExceptionInTearDownTest() { + printf("%s", + "CxxExceptionInTearDownTest destructor " + "called as expected.\n"); + } + + virtual void TearDown() { + throw std::runtime_error("Standard C++ exception"); + } +}; + +TEST_F(CxxExceptionInTearDownTest, ThrowsExceptionInTearDown) {} + +class CxxExceptionInTestBodyTest : public Test { + public: + static void TearDownTestCase() { + printf("%s", + "CxxExceptionInTestBodyTest::TearDownTestCase() " + "called as expected.\n"); + } + + protected: + ~CxxExceptionInTestBodyTest() { + printf("%s", + "CxxExceptionInTestBodyTest destructor " + "called as expected.\n"); + } + + virtual void TearDown() { + printf("%s", + "CxxExceptionInTestBodyTest::TearDown() " + "called as expected.\n"); + } +}; + +TEST_F(CxxExceptionInTestBodyTest, ThrowsStdCxxException) { + throw std::runtime_error("Standard C++ exception"); +} + +TEST(CxxExceptionTest, ThrowsNonStdCxxException) { + throw "C-string"; +} + +// This terminate handler aborts the program using exit() rather than abort(). +// This avoids showing pop-ups on Windows systems and core dumps on Unix-like +// ones. +void TerminateHandler() { + fprintf(stderr, "%s\n", "Unhandled C++ exception terminating the program."); + fflush(NULL); + exit(3); +} + +#endif // GTEST_HAS_EXCEPTIONS + +int main(int argc, char** argv) { +#if GTEST_HAS_EXCEPTIONS + std::set_terminate(&TerminateHandler); +#endif + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest_color_test.py b/external/gtest/test/gtest_color_test.py new file mode 100755 index 0000000000..d02a53ed85 --- /dev/null +++ b/external/gtest/test/gtest_color_test.py @@ -0,0 +1,130 @@ +#!/usr/bin/env python +# +# Copyright 2008, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Verifies that Google Test correctly determines whether to use colors.""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import os +import gtest_test_utils + + +IS_WINDOWS = os.name = 'nt' + +COLOR_ENV_VAR = 'GTEST_COLOR' +COLOR_FLAG = 'gtest_color' +COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_color_test_') + + +def SetEnvVar(env_var, value): + """Sets the env variable to 'value'; unsets it when 'value' is None.""" + + if value is not None: + os.environ[env_var] = value + elif env_var in os.environ: + del os.environ[env_var] + + +def UsesColor(term, color_env_var, color_flag): + """Runs gtest_color_test_ and returns its exit code.""" + + SetEnvVar('TERM', term) + SetEnvVar(COLOR_ENV_VAR, color_env_var) + + if color_flag is None: + args = [] + else: + args = ['--%s=%s' % (COLOR_FLAG, color_flag)] + p = gtest_test_utils.Subprocess([COMMAND] + args) + return not p.exited or p.exit_code + + +class GTestColorTest(gtest_test_utils.TestCase): + def testNoEnvVarNoFlag(self): + """Tests the case when there's neither GTEST_COLOR nor --gtest_color.""" + + if not IS_WINDOWS: + self.assert_(not UsesColor('dumb', None, None)) + self.assert_(not UsesColor('emacs', None, None)) + self.assert_(not UsesColor('xterm-mono', None, None)) + self.assert_(not UsesColor('unknown', None, None)) + self.assert_(not UsesColor(None, None, None)) + self.assert_(UsesColor('linux', None, None)) + self.assert_(UsesColor('cygwin', None, None)) + self.assert_(UsesColor('xterm', None, None)) + self.assert_(UsesColor('xterm-color', None, None)) + self.assert_(UsesColor('xterm-256color', None, None)) + + def testFlagOnly(self): + """Tests the case when there's --gtest_color but not GTEST_COLOR.""" + + self.assert_(not UsesColor('dumb', None, 'no')) + self.assert_(not UsesColor('xterm-color', None, 'no')) + if not IS_WINDOWS: + self.assert_(not UsesColor('emacs', None, 'auto')) + self.assert_(UsesColor('xterm', None, 'auto')) + self.assert_(UsesColor('dumb', None, 'yes')) + self.assert_(UsesColor('xterm', None, 'yes')) + + def testEnvVarOnly(self): + """Tests the case when there's GTEST_COLOR but not --gtest_color.""" + + self.assert_(not UsesColor('dumb', 'no', None)) + self.assert_(not UsesColor('xterm-color', 'no', None)) + if not IS_WINDOWS: + self.assert_(not UsesColor('dumb', 'auto', None)) + self.assert_(UsesColor('xterm-color', 'auto', None)) + self.assert_(UsesColor('dumb', 'yes', None)) + self.assert_(UsesColor('xterm-color', 'yes', None)) + + def testEnvVarAndFlag(self): + """Tests the case when there are both GTEST_COLOR and --gtest_color.""" + + self.assert_(not UsesColor('xterm-color', 'no', 'no')) + self.assert_(UsesColor('dumb', 'no', 'yes')) + self.assert_(UsesColor('xterm-color', 'no', 'auto')) + + def testAliasesOfYesAndNo(self): + """Tests using aliases in specifying --gtest_color.""" + + self.assert_(UsesColor('dumb', None, 'true')) + self.assert_(UsesColor('dumb', None, 'YES')) + self.assert_(UsesColor('dumb', None, 'T')) + self.assert_(UsesColor('dumb', None, '1')) + + self.assert_(not UsesColor('xterm', None, 'f')) + self.assert_(not UsesColor('xterm', None, 'false')) + self.assert_(not UsesColor('xterm', None, '0')) + self.assert_(not UsesColor('xterm', None, 'unknown')) + + +if __name__ == '__main__': + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_color_test_.cc b/external/gtest/test/gtest_color_test_.cc new file mode 100755 index 0000000000..f61ebb89b8 --- /dev/null +++ b/external/gtest/test/gtest_color_test_.cc @@ -0,0 +1,71 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// A helper program for testing how Google Test determines whether to use +// colors in the output. It prints "YES" and returns 1 if Google Test +// decides to use colors, and prints "NO" and returns 0 otherwise. + +#include + +#include "gtest/gtest.h" + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#include "src/gtest-internal-inl.h" +#undef GTEST_IMPLEMENTATION_ + +using testing::internal::ShouldUseColor; + +// The purpose of this is to ensure that the UnitTest singleton is +// created before main() is entered, and thus that ShouldUseColor() +// works the same way as in a real Google-Test-based test. We don't actual +// run the TEST itself. +TEST(GTestColorTest, Dummy) { +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + + if (ShouldUseColor(true)) { + // Google Test decides to use colors in the output (assuming it + // goes to a TTY). + printf("YES\n"); + return 1; + } else { + // Google Test decides not to use colors in the output. + printf("NO\n"); + return 0; + } +} diff --git a/external/gtest/test/gtest_env_var_test.py b/external/gtest/test/gtest_env_var_test.py new file mode 100755 index 0000000000..ac24337fa1 --- /dev/null +++ b/external/gtest/test/gtest_env_var_test.py @@ -0,0 +1,103 @@ +#!/usr/bin/env python +# +# Copyright 2008, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Verifies that Google Test correctly parses environment variables.""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import os +import gtest_test_utils + + +IS_WINDOWS = os.name == 'nt' +IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' + +COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_env_var_test_') + +environ = os.environ.copy() + + +def AssertEq(expected, actual): + if expected != actual: + print 'Expected: %s' % (expected,) + print ' Actual: %s' % (actual,) + raise AssertionError + + +def SetEnvVar(env_var, value): + """Sets the env variable to 'value'; unsets it when 'value' is None.""" + + if value is not None: + environ[env_var] = value + elif env_var in environ: + del environ[env_var] + + +def GetFlag(flag): + """Runs gtest_env_var_test_ and returns its output.""" + + args = [COMMAND] + if flag is not None: + args += [flag] + return gtest_test_utils.Subprocess(args, env=environ).output + + +def TestFlag(flag, test_val, default_val): + """Verifies that the given flag is affected by the corresponding env var.""" + + env_var = 'GTEST_' + flag.upper() + SetEnvVar(env_var, test_val) + AssertEq(test_val, GetFlag(flag)) + SetEnvVar(env_var, None) + AssertEq(default_val, GetFlag(flag)) + + +class GTestEnvVarTest(gtest_test_utils.TestCase): + def testEnvVarAffectsFlag(self): + """Tests that environment variable should affect the corresponding flag.""" + + TestFlag('break_on_failure', '1', '0') + TestFlag('color', 'yes', 'auto') + TestFlag('filter', 'FooTest.Bar', '*') + TestFlag('output', 'xml:tmp/foo.xml', '') + TestFlag('print_time', '0', '1') + TestFlag('repeat', '999', '1') + TestFlag('throw_on_failure', '1', '0') + TestFlag('death_test_style', 'threadsafe', 'fast') + TestFlag('catch_exceptions', '0', '1') + + if IS_LINUX: + TestFlag('death_test_use_fork', '1', '0') + TestFlag('stack_trace_depth', '0', '100') + + +if __name__ == '__main__': + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_env_var_test_.cc b/external/gtest/test/gtest_env_var_test_.cc new file mode 100755 index 0000000000..539afc9683 --- /dev/null +++ b/external/gtest/test/gtest_env_var_test_.cc @@ -0,0 +1,126 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// A helper program for testing that Google Test parses the environment +// variables correctly. + +#include "gtest/gtest.h" + +#include + +#define GTEST_IMPLEMENTATION_ 1 +#include "src/gtest-internal-inl.h" +#undef GTEST_IMPLEMENTATION_ + +using ::std::cout; + +namespace testing { + +// The purpose of this is to make the test more realistic by ensuring +// that the UnitTest singleton is created before main() is entered. +// We don't actual run the TEST itself. +TEST(GTestEnvVarTest, Dummy) { +} + +void PrintFlag(const char* flag) { + if (strcmp(flag, "break_on_failure") == 0) { + cout << GTEST_FLAG(break_on_failure); + return; + } + + if (strcmp(flag, "catch_exceptions") == 0) { + cout << GTEST_FLAG(catch_exceptions); + return; + } + + if (strcmp(flag, "color") == 0) { + cout << GTEST_FLAG(color); + return; + } + + if (strcmp(flag, "death_test_style") == 0) { + cout << GTEST_FLAG(death_test_style); + return; + } + + if (strcmp(flag, "death_test_use_fork") == 0) { + cout << GTEST_FLAG(death_test_use_fork); + return; + } + + if (strcmp(flag, "filter") == 0) { + cout << GTEST_FLAG(filter); + return; + } + + if (strcmp(flag, "output") == 0) { + cout << GTEST_FLAG(output); + return; + } + + if (strcmp(flag, "print_time") == 0) { + cout << GTEST_FLAG(print_time); + return; + } + + if (strcmp(flag, "repeat") == 0) { + cout << GTEST_FLAG(repeat); + return; + } + + if (strcmp(flag, "stack_trace_depth") == 0) { + cout << GTEST_FLAG(stack_trace_depth); + return; + } + + if (strcmp(flag, "throw_on_failure") == 0) { + cout << GTEST_FLAG(throw_on_failure); + return; + } + + cout << "Invalid flag name " << flag + << ". Valid names are break_on_failure, color, filter, etc.\n"; + exit(1); +} + +} // namespace testing + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + + if (argc != 2) { + cout << "Usage: gtest_env_var_test_ NAME_OF_FLAG\n"; + return 1; + } + + testing::PrintFlag(argv[1]); + return 0; +} diff --git a/external/gtest/test/gtest_environment_test.cc b/external/gtest/test/gtest_environment_test.cc new file mode 100755 index 0000000000..3cff19e70e --- /dev/null +++ b/external/gtest/test/gtest_environment_test.cc @@ -0,0 +1,192 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Tests using global test environments. + +#include +#include +#include "gtest/gtest.h" + +#define GTEST_IMPLEMENTATION_ 1 // Required for the next #include. +#include "src/gtest-internal-inl.h" +#undef GTEST_IMPLEMENTATION_ + +namespace testing { +GTEST_DECLARE_string_(filter); +} + +namespace { + +enum FailureType { + NO_FAILURE, NON_FATAL_FAILURE, FATAL_FAILURE +}; + +// For testing using global test environments. +class MyEnvironment : public testing::Environment { + public: + MyEnvironment() { Reset(); } + + // Depending on the value of failure_in_set_up_, SetUp() will + // generate a non-fatal failure, generate a fatal failure, or + // succeed. + virtual void SetUp() { + set_up_was_run_ = true; + + switch (failure_in_set_up_) { + case NON_FATAL_FAILURE: + ADD_FAILURE() << "Expected non-fatal failure in global set-up."; + break; + case FATAL_FAILURE: + FAIL() << "Expected fatal failure in global set-up."; + break; + default: + break; + } + } + + // Generates a non-fatal failure. + virtual void TearDown() { + tear_down_was_run_ = true; + ADD_FAILURE() << "Expected non-fatal failure in global tear-down."; + } + + // Resets the state of the environment s.t. it can be reused. + void Reset() { + failure_in_set_up_ = NO_FAILURE; + set_up_was_run_ = false; + tear_down_was_run_ = false; + } + + // We call this function to set the type of failure SetUp() should + // generate. + void set_failure_in_set_up(FailureType type) { + failure_in_set_up_ = type; + } + + // Was SetUp() run? + bool set_up_was_run() const { return set_up_was_run_; } + + // Was TearDown() run? + bool tear_down_was_run() const { return tear_down_was_run_; } + + private: + FailureType failure_in_set_up_; + bool set_up_was_run_; + bool tear_down_was_run_; +}; + +// Was the TEST run? +bool test_was_run; + +// The sole purpose of this TEST is to enable us to check whether it +// was run. +TEST(FooTest, Bar) { + test_was_run = true; +} + +// Prints the message and aborts the program if condition is false. +void Check(bool condition, const char* msg) { + if (!condition) { + printf("FAILED: %s\n", msg); + testing::internal::posix::Abort(); + } +} + +// Runs the tests. Return true iff successful. +// +// The 'failure' parameter specifies the type of failure that should +// be generated by the global set-up. +int RunAllTests(MyEnvironment* env, FailureType failure) { + env->Reset(); + env->set_failure_in_set_up(failure); + test_was_run = false; + testing::internal::GetUnitTestImpl()->ClearAdHocTestResult(); + return RUN_ALL_TESTS(); +} + +} // namespace + +int main(int argc, char **argv) { + testing::InitGoogleTest(&argc, argv); + + // Registers a global test environment, and verifies that the + // registration function returns its argument. + MyEnvironment* const env = new MyEnvironment; + Check(testing::AddGlobalTestEnvironment(env) == env, + "AddGlobalTestEnvironment() should return its argument."); + + // Verifies that RUN_ALL_TESTS() runs the tests when the global + // set-up is successful. + Check(RunAllTests(env, NO_FAILURE) != 0, + "RUN_ALL_TESTS() should return non-zero, as the global tear-down " + "should generate a failure."); + Check(test_was_run, + "The tests should run, as the global set-up should generate no " + "failure"); + Check(env->tear_down_was_run(), + "The global tear-down should run, as the global set-up was run."); + + // Verifies that RUN_ALL_TESTS() runs the tests when the global + // set-up generates no fatal failure. + Check(RunAllTests(env, NON_FATAL_FAILURE) != 0, + "RUN_ALL_TESTS() should return non-zero, as both the global set-up " + "and the global tear-down should generate a non-fatal failure."); + Check(test_was_run, + "The tests should run, as the global set-up should generate no " + "fatal failure."); + Check(env->tear_down_was_run(), + "The global tear-down should run, as the global set-up was run."); + + // Verifies that RUN_ALL_TESTS() runs no test when the global set-up + // generates a fatal failure. + Check(RunAllTests(env, FATAL_FAILURE) != 0, + "RUN_ALL_TESTS() should return non-zero, as the global set-up " + "should generate a fatal failure."); + Check(!test_was_run, + "The tests should not run, as the global set-up should generate " + "a fatal failure."); + Check(env->tear_down_was_run(), + "The global tear-down should run, as the global set-up was run."); + + // Verifies that RUN_ALL_TESTS() doesn't do global set-up or + // tear-down when there is no test to run. + testing::GTEST_FLAG(filter) = "-*"; + Check(RunAllTests(env, NO_FAILURE) == 0, + "RUN_ALL_TESTS() should return zero, as there is no test to run."); + Check(!env->set_up_was_run(), + "The global set-up should not run, as there is no test to run."); + Check(!env->tear_down_was_run(), + "The global tear-down should not run, " + "as the global set-up was not run."); + + printf("PASS\n"); + return 0; +} diff --git a/external/gtest/test/gtest_filter_unittest.py b/external/gtest/test/gtest_filter_unittest.py new file mode 100755 index 0000000000..0d1a770058 --- /dev/null +++ b/external/gtest/test/gtest_filter_unittest.py @@ -0,0 +1,633 @@ +#!/usr/bin/env python +# +# Copyright 2005 Google Inc. All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test for Google Test test filters. + +A user can specify which test(s) in a Google Test program to run via either +the GTEST_FILTER environment variable or the --gtest_filter flag. +This script tests such functionality by invoking +gtest_filter_unittest_ (a program written with Google Test) with different +environments and command line flags. + +Note that test sharding may also influence which tests are filtered. Therefore, +we test that here also. +""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import os +import re +import sets +import sys + +import gtest_test_utils + +# Constants. + +# Checks if this platform can pass empty environment variables to child +# processes. We set an env variable to an empty string and invoke a python +# script in a subprocess to print whether the variable is STILL in +# os.environ. We then use 'eval' to parse the child's output so that an +# exception is thrown if the input is anything other than 'True' nor 'False'. +os.environ['EMPTY_VAR'] = '' +child = gtest_test_utils.Subprocess( + [sys.executable, '-c', 'import os; print \'EMPTY_VAR\' in os.environ']) +CAN_PASS_EMPTY_ENV = eval(child.output) + + +# Check if this platform can unset environment variables in child processes. +# We set an env variable to a non-empty string, unset it, and invoke +# a python script in a subprocess to print whether the variable +# is NO LONGER in os.environ. +# We use 'eval' to parse the child's output so that an exception +# is thrown if the input is neither 'True' nor 'False'. +os.environ['UNSET_VAR'] = 'X' +del os.environ['UNSET_VAR'] +child = gtest_test_utils.Subprocess( + [sys.executable, '-c', 'import os; print \'UNSET_VAR\' not in os.environ']) +CAN_UNSET_ENV = eval(child.output) + + +# Checks if we should test with an empty filter. This doesn't +# make sense on platforms that cannot pass empty env variables (Win32) +# and on platforms that cannot unset variables (since we cannot tell +# the difference between "" and NULL -- Borland and Solaris < 5.10) +CAN_TEST_EMPTY_FILTER = (CAN_PASS_EMPTY_ENV and CAN_UNSET_ENV) + + +# The environment variable for specifying the test filters. +FILTER_ENV_VAR = 'GTEST_FILTER' + +# The environment variables for test sharding. +TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' +SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' +SHARD_STATUS_FILE_ENV_VAR = 'GTEST_SHARD_STATUS_FILE' + +# The command line flag for specifying the test filters. +FILTER_FLAG = 'gtest_filter' + +# The command line flag for including disabled tests. +ALSO_RUN_DISABED_TESTS_FLAG = 'gtest_also_run_disabled_tests' + +# Command to run the gtest_filter_unittest_ program. +COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_filter_unittest_') + +# Regex for determining whether parameterized tests are enabled in the binary. +PARAM_TEST_REGEX = re.compile(r'/ParamTest') + +# Regex for parsing test case names from Google Test's output. +TEST_CASE_REGEX = re.compile(r'^\[\-+\] \d+ tests? from (\w+(/\w+)?)') + +# Regex for parsing test names from Google Test's output. +TEST_REGEX = re.compile(r'^\[\s*RUN\s*\].*\.(\w+(/\w+)?)') + +# The command line flag to tell Google Test to output the list of tests it +# will run. +LIST_TESTS_FLAG = '--gtest_list_tests' + +# Indicates whether Google Test supports death tests. +SUPPORTS_DEATH_TESTS = 'HasDeathTest' in gtest_test_utils.Subprocess( + [COMMAND, LIST_TESTS_FLAG]).output + +# Full names of all tests in gtest_filter_unittests_. +PARAM_TESTS = [ + 'SeqP/ParamTest.TestX/0', + 'SeqP/ParamTest.TestX/1', + 'SeqP/ParamTest.TestY/0', + 'SeqP/ParamTest.TestY/1', + 'SeqQ/ParamTest.TestX/0', + 'SeqQ/ParamTest.TestX/1', + 'SeqQ/ParamTest.TestY/0', + 'SeqQ/ParamTest.TestY/1', + ] + +DISABLED_TESTS = [ + 'BarTest.DISABLED_TestFour', + 'BarTest.DISABLED_TestFive', + 'BazTest.DISABLED_TestC', + 'DISABLED_FoobarTest.Test1', + 'DISABLED_FoobarTest.DISABLED_Test2', + 'DISABLED_FoobarbazTest.TestA', + ] + +if SUPPORTS_DEATH_TESTS: + DEATH_TESTS = [ + 'HasDeathTest.Test1', + 'HasDeathTest.Test2', + ] +else: + DEATH_TESTS = [] + +# All the non-disabled tests. +ACTIVE_TESTS = [ + 'FooTest.Abc', + 'FooTest.Xyz', + + 'BarTest.TestOne', + 'BarTest.TestTwo', + 'BarTest.TestThree', + + 'BazTest.TestOne', + 'BazTest.TestA', + 'BazTest.TestB', + ] + DEATH_TESTS + PARAM_TESTS + +param_tests_present = None + +# Utilities. + +environ = os.environ.copy() + + +def SetEnvVar(env_var, value): + """Sets the env variable to 'value'; unsets it when 'value' is None.""" + + if value is not None: + environ[env_var] = value + elif env_var in environ: + del environ[env_var] + + +def RunAndReturnOutput(args = None): + """Runs the test program and returns its output.""" + + return gtest_test_utils.Subprocess([COMMAND] + (args or []), + env=environ).output + + +def RunAndExtractTestList(args = None): + """Runs the test program and returns its exit code and a list of tests run.""" + + p = gtest_test_utils.Subprocess([COMMAND] + (args or []), env=environ) + tests_run = [] + test_case = '' + test = '' + for line in p.output.split('\n'): + match = TEST_CASE_REGEX.match(line) + if match is not None: + test_case = match.group(1) + else: + match = TEST_REGEX.match(line) + if match is not None: + test = match.group(1) + tests_run.append(test_case + '.' + test) + return (tests_run, p.exit_code) + + +def InvokeWithModifiedEnv(extra_env, function, *args, **kwargs): + """Runs the given function and arguments in a modified environment.""" + try: + original_env = environ.copy() + environ.update(extra_env) + return function(*args, **kwargs) + finally: + environ.clear() + environ.update(original_env) + + +def RunWithSharding(total_shards, shard_index, command): + """Runs a test program shard and returns exit code and a list of tests run.""" + + extra_env = {SHARD_INDEX_ENV_VAR: str(shard_index), + TOTAL_SHARDS_ENV_VAR: str(total_shards)} + return InvokeWithModifiedEnv(extra_env, RunAndExtractTestList, command) + +# The unit test. + + +class GTestFilterUnitTest(gtest_test_utils.TestCase): + """Tests the env variable or the command line flag to filter tests.""" + + # Utilities. + + def AssertSetEqual(self, lhs, rhs): + """Asserts that two sets are equal.""" + + for elem in lhs: + self.assert_(elem in rhs, '%s in %s' % (elem, rhs)) + + for elem in rhs: + self.assert_(elem in lhs, '%s in %s' % (elem, lhs)) + + def AssertPartitionIsValid(self, set_var, list_of_sets): + """Asserts that list_of_sets is a valid partition of set_var.""" + + full_partition = [] + for slice_var in list_of_sets: + full_partition.extend(slice_var) + self.assertEqual(len(set_var), len(full_partition)) + self.assertEqual(sets.Set(set_var), sets.Set(full_partition)) + + def AdjustForParameterizedTests(self, tests_to_run): + """Adjust tests_to_run in case value parameterized tests are disabled.""" + + global param_tests_present + if not param_tests_present: + return list(sets.Set(tests_to_run) - sets.Set(PARAM_TESTS)) + else: + return tests_to_run + + def RunAndVerify(self, gtest_filter, tests_to_run): + """Checks that the binary runs correct set of tests for a given filter.""" + + tests_to_run = self.AdjustForParameterizedTests(tests_to_run) + + # First, tests using the environment variable. + + # Windows removes empty variables from the environment when passing it + # to a new process. This means it is impossible to pass an empty filter + # into a process using the environment variable. However, we can still + # test the case when the variable is not supplied (i.e., gtest_filter is + # None). + # pylint: disable-msg=C6403 + if CAN_TEST_EMPTY_FILTER or gtest_filter != '': + SetEnvVar(FILTER_ENV_VAR, gtest_filter) + tests_run = RunAndExtractTestList()[0] + SetEnvVar(FILTER_ENV_VAR, None) + self.AssertSetEqual(tests_run, tests_to_run) + # pylint: enable-msg=C6403 + + # Next, tests using the command line flag. + + if gtest_filter is None: + args = [] + else: + args = ['--%s=%s' % (FILTER_FLAG, gtest_filter)] + + tests_run = RunAndExtractTestList(args)[0] + self.AssertSetEqual(tests_run, tests_to_run) + + def RunAndVerifyWithSharding(self, gtest_filter, total_shards, tests_to_run, + args=None, check_exit_0=False): + """Checks that binary runs correct tests for the given filter and shard. + + Runs all shards of gtest_filter_unittest_ with the given filter, and + verifies that the right set of tests were run. The union of tests run + on each shard should be identical to tests_to_run, without duplicates. + + Args: + gtest_filter: A filter to apply to the tests. + total_shards: A total number of shards to split test run into. + tests_to_run: A set of tests expected to run. + args : Arguments to pass to the to the test binary. + check_exit_0: When set to a true value, make sure that all shards + return 0. + """ + + tests_to_run = self.AdjustForParameterizedTests(tests_to_run) + + # Windows removes empty variables from the environment when passing it + # to a new process. This means it is impossible to pass an empty filter + # into a process using the environment variable. However, we can still + # test the case when the variable is not supplied (i.e., gtest_filter is + # None). + # pylint: disable-msg=C6403 + if CAN_TEST_EMPTY_FILTER or gtest_filter != '': + SetEnvVar(FILTER_ENV_VAR, gtest_filter) + partition = [] + for i in range(0, total_shards): + (tests_run, exit_code) = RunWithSharding(total_shards, i, args) + if check_exit_0: + self.assertEqual(0, exit_code) + partition.append(tests_run) + + self.AssertPartitionIsValid(tests_to_run, partition) + SetEnvVar(FILTER_ENV_VAR, None) + # pylint: enable-msg=C6403 + + def RunAndVerifyAllowingDisabled(self, gtest_filter, tests_to_run): + """Checks that the binary runs correct set of tests for the given filter. + + Runs gtest_filter_unittest_ with the given filter, and enables + disabled tests. Verifies that the right set of tests were run. + + Args: + gtest_filter: A filter to apply to the tests. + tests_to_run: A set of tests expected to run. + """ + + tests_to_run = self.AdjustForParameterizedTests(tests_to_run) + + # Construct the command line. + args = ['--%s' % ALSO_RUN_DISABED_TESTS_FLAG] + if gtest_filter is not None: + args.append('--%s=%s' % (FILTER_FLAG, gtest_filter)) + + tests_run = RunAndExtractTestList(args)[0] + self.AssertSetEqual(tests_run, tests_to_run) + + def setUp(self): + """Sets up test case. + + Determines whether value-parameterized tests are enabled in the binary and + sets the flags accordingly. + """ + + global param_tests_present + if param_tests_present is None: + param_tests_present = PARAM_TEST_REGEX.search( + RunAndReturnOutput()) is not None + + def testDefaultBehavior(self): + """Tests the behavior of not specifying the filter.""" + + self.RunAndVerify(None, ACTIVE_TESTS) + + def testDefaultBehaviorWithShards(self): + """Tests the behavior without the filter, with sharding enabled.""" + + self.RunAndVerifyWithSharding(None, 1, ACTIVE_TESTS) + self.RunAndVerifyWithSharding(None, 2, ACTIVE_TESTS) + self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) - 1, ACTIVE_TESTS) + self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS), ACTIVE_TESTS) + self.RunAndVerifyWithSharding(None, len(ACTIVE_TESTS) + 1, ACTIVE_TESTS) + + def testEmptyFilter(self): + """Tests an empty filter.""" + + self.RunAndVerify('', []) + self.RunAndVerifyWithSharding('', 1, []) + self.RunAndVerifyWithSharding('', 2, []) + + def testBadFilter(self): + """Tests a filter that matches nothing.""" + + self.RunAndVerify('BadFilter', []) + self.RunAndVerifyAllowingDisabled('BadFilter', []) + + def testFullName(self): + """Tests filtering by full name.""" + + self.RunAndVerify('FooTest.Xyz', ['FooTest.Xyz']) + self.RunAndVerifyAllowingDisabled('FooTest.Xyz', ['FooTest.Xyz']) + self.RunAndVerifyWithSharding('FooTest.Xyz', 5, ['FooTest.Xyz']) + + def testUniversalFilters(self): + """Tests filters that match everything.""" + + self.RunAndVerify('*', ACTIVE_TESTS) + self.RunAndVerify('*.*', ACTIVE_TESTS) + self.RunAndVerifyWithSharding('*.*', len(ACTIVE_TESTS) - 3, ACTIVE_TESTS) + self.RunAndVerifyAllowingDisabled('*', ACTIVE_TESTS + DISABLED_TESTS) + self.RunAndVerifyAllowingDisabled('*.*', ACTIVE_TESTS + DISABLED_TESTS) + + def testFilterByTestCase(self): + """Tests filtering by test case name.""" + + self.RunAndVerify('FooTest.*', ['FooTest.Abc', 'FooTest.Xyz']) + + BAZ_TESTS = ['BazTest.TestOne', 'BazTest.TestA', 'BazTest.TestB'] + self.RunAndVerify('BazTest.*', BAZ_TESTS) + self.RunAndVerifyAllowingDisabled('BazTest.*', + BAZ_TESTS + ['BazTest.DISABLED_TestC']) + + def testFilterByTest(self): + """Tests filtering by test name.""" + + self.RunAndVerify('*.TestOne', ['BarTest.TestOne', 'BazTest.TestOne']) + + def testFilterDisabledTests(self): + """Select only the disabled tests to run.""" + + self.RunAndVerify('DISABLED_FoobarTest.Test1', []) + self.RunAndVerifyAllowingDisabled('DISABLED_FoobarTest.Test1', + ['DISABLED_FoobarTest.Test1']) + + self.RunAndVerify('*DISABLED_*', []) + self.RunAndVerifyAllowingDisabled('*DISABLED_*', DISABLED_TESTS) + + self.RunAndVerify('*.DISABLED_*', []) + self.RunAndVerifyAllowingDisabled('*.DISABLED_*', [ + 'BarTest.DISABLED_TestFour', + 'BarTest.DISABLED_TestFive', + 'BazTest.DISABLED_TestC', + 'DISABLED_FoobarTest.DISABLED_Test2', + ]) + + self.RunAndVerify('DISABLED_*', []) + self.RunAndVerifyAllowingDisabled('DISABLED_*', [ + 'DISABLED_FoobarTest.Test1', + 'DISABLED_FoobarTest.DISABLED_Test2', + 'DISABLED_FoobarbazTest.TestA', + ]) + + def testWildcardInTestCaseName(self): + """Tests using wildcard in the test case name.""" + + self.RunAndVerify('*a*.*', [ + 'BarTest.TestOne', + 'BarTest.TestTwo', + 'BarTest.TestThree', + + 'BazTest.TestOne', + 'BazTest.TestA', + 'BazTest.TestB', ] + DEATH_TESTS + PARAM_TESTS) + + def testWildcardInTestName(self): + """Tests using wildcard in the test name.""" + + self.RunAndVerify('*.*A*', ['FooTest.Abc', 'BazTest.TestA']) + + def testFilterWithoutDot(self): + """Tests a filter that has no '.' in it.""" + + self.RunAndVerify('*z*', [ + 'FooTest.Xyz', + + 'BazTest.TestOne', + 'BazTest.TestA', + 'BazTest.TestB', + ]) + + def testTwoPatterns(self): + """Tests filters that consist of two patterns.""" + + self.RunAndVerify('Foo*.*:*A*', [ + 'FooTest.Abc', + 'FooTest.Xyz', + + 'BazTest.TestA', + ]) + + # An empty pattern + a non-empty one + self.RunAndVerify(':*A*', ['FooTest.Abc', 'BazTest.TestA']) + + def testThreePatterns(self): + """Tests filters that consist of three patterns.""" + + self.RunAndVerify('*oo*:*A*:*One', [ + 'FooTest.Abc', + 'FooTest.Xyz', + + 'BarTest.TestOne', + + 'BazTest.TestOne', + 'BazTest.TestA', + ]) + + # The 2nd pattern is empty. + self.RunAndVerify('*oo*::*One', [ + 'FooTest.Abc', + 'FooTest.Xyz', + + 'BarTest.TestOne', + + 'BazTest.TestOne', + ]) + + # The last 2 patterns are empty. + self.RunAndVerify('*oo*::', [ + 'FooTest.Abc', + 'FooTest.Xyz', + ]) + + def testNegativeFilters(self): + self.RunAndVerify('*-BazTest.TestOne', [ + 'FooTest.Abc', + 'FooTest.Xyz', + + 'BarTest.TestOne', + 'BarTest.TestTwo', + 'BarTest.TestThree', + + 'BazTest.TestA', + 'BazTest.TestB', + ] + DEATH_TESTS + PARAM_TESTS) + + self.RunAndVerify('*-FooTest.Abc:BazTest.*', [ + 'FooTest.Xyz', + + 'BarTest.TestOne', + 'BarTest.TestTwo', + 'BarTest.TestThree', + ] + DEATH_TESTS + PARAM_TESTS) + + self.RunAndVerify('BarTest.*-BarTest.TestOne', [ + 'BarTest.TestTwo', + 'BarTest.TestThree', + ]) + + # Tests without leading '*'. + self.RunAndVerify('-FooTest.Abc:FooTest.Xyz:BazTest.*', [ + 'BarTest.TestOne', + 'BarTest.TestTwo', + 'BarTest.TestThree', + ] + DEATH_TESTS + PARAM_TESTS) + + # Value parameterized tests. + self.RunAndVerify('*/*', PARAM_TESTS) + + # Value parameterized tests filtering by the sequence name. + self.RunAndVerify('SeqP/*', [ + 'SeqP/ParamTest.TestX/0', + 'SeqP/ParamTest.TestX/1', + 'SeqP/ParamTest.TestY/0', + 'SeqP/ParamTest.TestY/1', + ]) + + # Value parameterized tests filtering by the test name. + self.RunAndVerify('*/0', [ + 'SeqP/ParamTest.TestX/0', + 'SeqP/ParamTest.TestY/0', + 'SeqQ/ParamTest.TestX/0', + 'SeqQ/ParamTest.TestY/0', + ]) + + def testFlagOverridesEnvVar(self): + """Tests that the filter flag overrides the filtering env. variable.""" + + SetEnvVar(FILTER_ENV_VAR, 'Foo*') + args = ['--%s=%s' % (FILTER_FLAG, '*One')] + tests_run = RunAndExtractTestList(args)[0] + SetEnvVar(FILTER_ENV_VAR, None) + + self.AssertSetEqual(tests_run, ['BarTest.TestOne', 'BazTest.TestOne']) + + def testShardStatusFileIsCreated(self): + """Tests that the shard file is created if specified in the environment.""" + + shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), + 'shard_status_file') + self.assert_(not os.path.exists(shard_status_file)) + + extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} + try: + InvokeWithModifiedEnv(extra_env, RunAndReturnOutput) + finally: + self.assert_(os.path.exists(shard_status_file)) + os.remove(shard_status_file) + + def testShardStatusFileIsCreatedWithListTests(self): + """Tests that the shard file is created with the "list_tests" flag.""" + + shard_status_file = os.path.join(gtest_test_utils.GetTempDir(), + 'shard_status_file2') + self.assert_(not os.path.exists(shard_status_file)) + + extra_env = {SHARD_STATUS_FILE_ENV_VAR: shard_status_file} + try: + output = InvokeWithModifiedEnv(extra_env, + RunAndReturnOutput, + [LIST_TESTS_FLAG]) + finally: + # This assertion ensures that Google Test enumerated the tests as + # opposed to running them. + self.assert_('[==========]' not in output, + 'Unexpected output during test enumeration.\n' + 'Please ensure that LIST_TESTS_FLAG is assigned the\n' + 'correct flag value for listing Google Test tests.') + + self.assert_(os.path.exists(shard_status_file)) + os.remove(shard_status_file) + + if SUPPORTS_DEATH_TESTS: + def testShardingWorksWithDeathTests(self): + """Tests integration with death tests and sharding.""" + + gtest_filter = 'HasDeathTest.*:SeqP/*' + expected_tests = [ + 'HasDeathTest.Test1', + 'HasDeathTest.Test2', + + 'SeqP/ParamTest.TestX/0', + 'SeqP/ParamTest.TestX/1', + 'SeqP/ParamTest.TestY/0', + 'SeqP/ParamTest.TestY/1', + ] + + for flag in ['--gtest_death_test_style=threadsafe', + '--gtest_death_test_style=fast']: + self.RunAndVerifyWithSharding(gtest_filter, 3, expected_tests, + check_exit_0=True, args=[flag]) + self.RunAndVerifyWithSharding(gtest_filter, 5, expected_tests, + check_exit_0=True, args=[flag]) + +if __name__ == '__main__': + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_filter_unittest_.cc b/external/gtest/test/gtest_filter_unittest_.cc new file mode 100755 index 0000000000..77deffc38f --- /dev/null +++ b/external/gtest/test/gtest_filter_unittest_.cc @@ -0,0 +1,140 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Unit test for Google Test test filters. +// +// A user can specify which test(s) in a Google Test program to run via +// either the GTEST_FILTER environment variable or the --gtest_filter +// flag. This is used for testing such functionality. +// +// The program will be invoked from a Python unit test. Don't run it +// directly. + +#include "gtest/gtest.h" + +namespace { + +// Test case FooTest. + +class FooTest : public testing::Test { +}; + +TEST_F(FooTest, Abc) { +} + +TEST_F(FooTest, Xyz) { + FAIL() << "Expected failure."; +} + +// Test case BarTest. + +TEST(BarTest, TestOne) { +} + +TEST(BarTest, TestTwo) { +} + +TEST(BarTest, TestThree) { +} + +TEST(BarTest, DISABLED_TestFour) { + FAIL() << "Expected failure."; +} + +TEST(BarTest, DISABLED_TestFive) { + FAIL() << "Expected failure."; +} + +// Test case BazTest. + +TEST(BazTest, TestOne) { + FAIL() << "Expected failure."; +} + +TEST(BazTest, TestA) { +} + +TEST(BazTest, TestB) { +} + +TEST(BazTest, DISABLED_TestC) { + FAIL() << "Expected failure."; +} + +// Test case HasDeathTest + +TEST(HasDeathTest, Test1) { + EXPECT_DEATH_IF_SUPPORTED(exit(1), ".*"); +} + +// We need at least two death tests to make sure that the all death tests +// aren't on the first shard. +TEST(HasDeathTest, Test2) { + EXPECT_DEATH_IF_SUPPORTED(exit(1), ".*"); +} + +// Test case FoobarTest + +TEST(DISABLED_FoobarTest, Test1) { + FAIL() << "Expected failure."; +} + +TEST(DISABLED_FoobarTest, DISABLED_Test2) { + FAIL() << "Expected failure."; +} + +// Test case FoobarbazTest + +TEST(DISABLED_FoobarbazTest, TestA) { + FAIL() << "Expected failure."; +} + +#if GTEST_HAS_PARAM_TEST +class ParamTest : public testing::TestWithParam { +}; + +TEST_P(ParamTest, TestX) { +} + +TEST_P(ParamTest, TestY) { +} + +INSTANTIATE_TEST_CASE_P(SeqP, ParamTest, testing::Values(1, 2)); +INSTANTIATE_TEST_CASE_P(SeqQ, ParamTest, testing::Values(5, 6)); +#endif // GTEST_HAS_PARAM_TEST + +} // namespace + +int main(int argc, char **argv) { + ::testing::InitGoogleTest(&argc, argv); + + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest_help_test.py b/external/gtest/test/gtest_help_test.py new file mode 100755 index 0000000000..093c838d9e --- /dev/null +++ b/external/gtest/test/gtest_help_test.py @@ -0,0 +1,172 @@ +#!/usr/bin/env python +# +# Copyright 2009, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests the --help flag of Google C++ Testing Framework. + +SYNOPSIS + gtest_help_test.py --build_dir=BUILD/DIR + # where BUILD/DIR contains the built gtest_help_test_ file. + gtest_help_test.py +""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import os +import re +import gtest_test_utils + + +IS_LINUX = os.name == 'posix' and os.uname()[0] == 'Linux' +IS_WINDOWS = os.name == 'nt' + +PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_help_test_') +FLAG_PREFIX = '--gtest_' +DEATH_TEST_STYLE_FLAG = FLAG_PREFIX + 'death_test_style' +STREAM_RESULT_TO_FLAG = FLAG_PREFIX + 'stream_result_to' +UNKNOWN_FLAG = FLAG_PREFIX + 'unknown_flag_for_testing' +LIST_TESTS_FLAG = FLAG_PREFIX + 'list_tests' +INCORRECT_FLAG_VARIANTS = [re.sub('^--', '-', LIST_TESTS_FLAG), + re.sub('^--', '/', LIST_TESTS_FLAG), + re.sub('_', '-', LIST_TESTS_FLAG)] +INTERNAL_FLAG_FOR_TESTING = FLAG_PREFIX + 'internal_flag_for_testing' + +SUPPORTS_DEATH_TESTS = "DeathTest" in gtest_test_utils.Subprocess( + [PROGRAM_PATH, LIST_TESTS_FLAG]).output + +# The help message must match this regex. +HELP_REGEX = re.compile( + FLAG_PREFIX + r'list_tests.*' + + FLAG_PREFIX + r'filter=.*' + + FLAG_PREFIX + r'also_run_disabled_tests.*' + + FLAG_PREFIX + r'repeat=.*' + + FLAG_PREFIX + r'shuffle.*' + + FLAG_PREFIX + r'random_seed=.*' + + FLAG_PREFIX + r'color=.*' + + FLAG_PREFIX + r'print_time.*' + + FLAG_PREFIX + r'output=.*' + + FLAG_PREFIX + r'break_on_failure.*' + + FLAG_PREFIX + r'throw_on_failure.*' + + FLAG_PREFIX + r'catch_exceptions=0.*', + re.DOTALL) + + +def RunWithFlag(flag): + """Runs gtest_help_test_ with the given flag. + + Returns: + the exit code and the text output as a tuple. + Args: + flag: the command-line flag to pass to gtest_help_test_, or None. + """ + + if flag is None: + command = [PROGRAM_PATH] + else: + command = [PROGRAM_PATH, flag] + child = gtest_test_utils.Subprocess(command) + return child.exit_code, child.output + + +class GTestHelpTest(gtest_test_utils.TestCase): + """Tests the --help flag and its equivalent forms.""" + + def TestHelpFlag(self, flag): + """Verifies correct behavior when help flag is specified. + + The right message must be printed and the tests must + skipped when the given flag is specified. + + Args: + flag: A flag to pass to the binary or None. + """ + + exit_code, output = RunWithFlag(flag) + self.assertEquals(0, exit_code) + self.assert_(HELP_REGEX.search(output), output) + + if IS_LINUX: + self.assert_(STREAM_RESULT_TO_FLAG in output, output) + else: + self.assert_(STREAM_RESULT_TO_FLAG not in output, output) + + if SUPPORTS_DEATH_TESTS and not IS_WINDOWS: + self.assert_(DEATH_TEST_STYLE_FLAG in output, output) + else: + self.assert_(DEATH_TEST_STYLE_FLAG not in output, output) + + def TestNonHelpFlag(self, flag): + """Verifies correct behavior when no help flag is specified. + + Verifies that when no help flag is specified, the tests are run + and the help message is not printed. + + Args: + flag: A flag to pass to the binary or None. + """ + + exit_code, output = RunWithFlag(flag) + self.assert_(exit_code != 0) + self.assert_(not HELP_REGEX.search(output), output) + + def testPrintsHelpWithFullFlag(self): + self.TestHelpFlag('--help') + + def testPrintsHelpWithShortFlag(self): + self.TestHelpFlag('-h') + + def testPrintsHelpWithQuestionFlag(self): + self.TestHelpFlag('-?') + + def testPrintsHelpWithWindowsStyleQuestionFlag(self): + self.TestHelpFlag('/?') + + def testPrintsHelpWithUnrecognizedGoogleTestFlag(self): + self.TestHelpFlag(UNKNOWN_FLAG) + + def testPrintsHelpWithIncorrectFlagStyle(self): + for incorrect_flag in INCORRECT_FLAG_VARIANTS: + self.TestHelpFlag(incorrect_flag) + + def testRunsTestsWithoutHelpFlag(self): + """Verifies that when no help flag is specified, the tests are run + and the help message is not printed.""" + + self.TestNonHelpFlag(None) + + def testRunsTestsWithGtestInternalFlag(self): + """Verifies that the tests are run and no help message is printed when + a flag starting with Google Test prefix and 'internal_' is supplied.""" + + self.TestNonHelpFlag(INTERNAL_FLAG_FOR_TESTING) + + +if __name__ == '__main__': + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_help_test_.cc b/external/gtest/test/gtest_help_test_.cc new file mode 100755 index 0000000000..31f78c2441 --- /dev/null +++ b/external/gtest/test/gtest_help_test_.cc @@ -0,0 +1,46 @@ +// Copyright 2009, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// This program is meant to be run by gtest_help_test.py. Do not run +// it directly. + +#include "gtest/gtest.h" + +// When a help flag is specified, this program should skip the tests +// and exit with 0; otherwise the following test will be executed, +// causing this program to exit with a non-zero code. +TEST(HelpFlagTest, ShouldNotBeRun) { + ASSERT_TRUE(false) << "Tests shouldn't be run when --help is specified."; +} + +#if GTEST_HAS_DEATH_TEST +TEST(DeathTest, UsedByPythonScriptToDetectSupportForDeathTestsInThisBinary) {} +#endif diff --git a/external/gtest/test/gtest_list_tests_unittest.py b/external/gtest/test/gtest_list_tests_unittest.py new file mode 100755 index 0000000000..925b09d9cb --- /dev/null +++ b/external/gtest/test/gtest_list_tests_unittest.py @@ -0,0 +1,207 @@ +#!/usr/bin/env python +# +# Copyright 2006, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test for Google Test's --gtest_list_tests flag. + +A user can ask Google Test to list all tests by specifying the +--gtest_list_tests flag. This script tests such functionality +by invoking gtest_list_tests_unittest_ (a program written with +Google Test) the command line flags. +""" + +__author__ = 'phanna@google.com (Patrick Hanna)' + +import gtest_test_utils +import re + + +# Constants. + +# The command line flag for enabling/disabling listing all tests. +LIST_TESTS_FLAG = 'gtest_list_tests' + +# Path to the gtest_list_tests_unittest_ program. +EXE_PATH = gtest_test_utils.GetTestExecutablePath('gtest_list_tests_unittest_') + +# The expected output when running gtest_list_tests_unittest_ with +# --gtest_list_tests +EXPECTED_OUTPUT_NO_FILTER_RE = re.compile(r"""FooDeathTest\. + Test1 +Foo\. + Bar1 + Bar2 + DISABLED_Bar3 +Abc\. + Xyz + Def +FooBar\. + Baz +FooTest\. + Test1 + DISABLED_Test2 + Test3 +TypedTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\. + TestA + TestB +TypedTest/1\. # TypeParam = int\s*\* + TestA + TestB +TypedTest/2\. # TypeParam = .*MyArray + TestA + TestB +My/TypeParamTest/0\. # TypeParam = (VeryLo{245}|class VeryLo{239})\.\.\. + TestA + TestB +My/TypeParamTest/1\. # TypeParam = int\s*\* + TestA + TestB +My/TypeParamTest/2\. # TypeParam = .*MyArray + TestA + TestB +MyInstantiation/ValueParamTest\. + TestA/0 # GetParam\(\) = one line + TestA/1 # GetParam\(\) = two\\nlines + TestA/2 # GetParam\(\) = a very\\nlo{241}\.\.\. + TestB/0 # GetParam\(\) = one line + TestB/1 # GetParam\(\) = two\\nlines + TestB/2 # GetParam\(\) = a very\\nlo{241}\.\.\. +""") + +# The expected output when running gtest_list_tests_unittest_ with +# --gtest_list_tests and --gtest_filter=Foo*. +EXPECTED_OUTPUT_FILTER_FOO_RE = re.compile(r"""FooDeathTest\. + Test1 +Foo\. + Bar1 + Bar2 + DISABLED_Bar3 +FooBar\. + Baz +FooTest\. + Test1 + DISABLED_Test2 + Test3 +""") + +# Utilities. + + +def Run(args): + """Runs gtest_list_tests_unittest_ and returns the list of tests printed.""" + + return gtest_test_utils.Subprocess([EXE_PATH] + args, + capture_stderr=False).output + + +# The unit test. + +class GTestListTestsUnitTest(gtest_test_utils.TestCase): + """Tests using the --gtest_list_tests flag to list all tests.""" + + def RunAndVerify(self, flag_value, expected_output_re, other_flag): + """Runs gtest_list_tests_unittest_ and verifies that it prints + the correct tests. + + Args: + flag_value: value of the --gtest_list_tests flag; + None if the flag should not be present. + expected_output_re: regular expression that matches the expected + output after running command; + other_flag: a different flag to be passed to command + along with gtest_list_tests; + None if the flag should not be present. + """ + + if flag_value is None: + flag = '' + flag_expression = 'not set' + elif flag_value == '0': + flag = '--%s=0' % LIST_TESTS_FLAG + flag_expression = '0' + else: + flag = '--%s' % LIST_TESTS_FLAG + flag_expression = '1' + + args = [flag] + + if other_flag is not None: + args += [other_flag] + + output = Run(args) + + if expected_output_re: + self.assert_( + expected_output_re.match(output), + ('when %s is %s, the output of "%s" is "%s",\n' + 'which does not match regex "%s"' % + (LIST_TESTS_FLAG, flag_expression, ' '.join(args), output, + expected_output_re.pattern))) + else: + self.assert_( + not EXPECTED_OUTPUT_NO_FILTER_RE.match(output), + ('when %s is %s, the output of "%s" is "%s"'% + (LIST_TESTS_FLAG, flag_expression, ' '.join(args), output))) + + def testDefaultBehavior(self): + """Tests the behavior of the default mode.""" + + self.RunAndVerify(flag_value=None, + expected_output_re=None, + other_flag=None) + + def testFlag(self): + """Tests using the --gtest_list_tests flag.""" + + self.RunAndVerify(flag_value='0', + expected_output_re=None, + other_flag=None) + self.RunAndVerify(flag_value='1', + expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE, + other_flag=None) + + def testOverrideNonFilterFlags(self): + """Tests that --gtest_list_tests overrides the non-filter flags.""" + + self.RunAndVerify(flag_value='1', + expected_output_re=EXPECTED_OUTPUT_NO_FILTER_RE, + other_flag='--gtest_break_on_failure') + + def testWithFilterFlags(self): + """Tests that --gtest_list_tests takes into account the + --gtest_filter flag.""" + + self.RunAndVerify(flag_value='1', + expected_output_re=EXPECTED_OUTPUT_FILTER_FOO_RE, + other_flag='--gtest_filter=Foo*') + + +if __name__ == '__main__': + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_list_tests_unittest_.cc b/external/gtest/test/gtest_list_tests_unittest_.cc new file mode 100755 index 0000000000..907c176ba9 --- /dev/null +++ b/external/gtest/test/gtest_list_tests_unittest_.cc @@ -0,0 +1,157 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: phanna@google.com (Patrick Hanna) + +// Unit test for Google Test's --gtest_list_tests flag. +// +// A user can ask Google Test to list all tests that will run +// so that when using a filter, a user will know what +// tests to look for. The tests will not be run after listing. +// +// This program will be invoked from a Python unit test. +// Don't run it directly. + +#include "gtest/gtest.h" + +// Several different test cases and tests that will be listed. +TEST(Foo, Bar1) { +} + +TEST(Foo, Bar2) { +} + +TEST(Foo, DISABLED_Bar3) { +} + +TEST(Abc, Xyz) { +} + +TEST(Abc, Def) { +} + +TEST(FooBar, Baz) { +} + +class FooTest : public testing::Test { +}; + +TEST_F(FooTest, Test1) { +} + +TEST_F(FooTest, DISABLED_Test2) { +} + +TEST_F(FooTest, Test3) { +} + +TEST(FooDeathTest, Test1) { +} + +// A group of value-parameterized tests. + +class MyType { + public: + explicit MyType(const std::string& a_value) : value_(a_value) {} + + const std::string& value() const { return value_; } + + private: + std::string value_; +}; + +// Teaches Google Test how to print a MyType. +void PrintTo(const MyType& x, std::ostream* os) { + *os << x.value(); +} + +class ValueParamTest : public testing::TestWithParam { +}; + +TEST_P(ValueParamTest, TestA) { +} + +TEST_P(ValueParamTest, TestB) { +} + +INSTANTIATE_TEST_CASE_P( + MyInstantiation, ValueParamTest, + testing::Values(MyType("one line"), + MyType("two\nlines"), + MyType("a very\nloooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooong line"))); // NOLINT + +// A group of typed tests. + +// A deliberately long type name for testing the line-truncating +// behavior when printing a type parameter. +class VeryLoooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooooogName { // NOLINT +}; + +template +class TypedTest : public testing::Test { +}; + +template +class MyArray { +}; + +typedef testing::Types > MyTypes; + +TYPED_TEST_CASE(TypedTest, MyTypes); + +TYPED_TEST(TypedTest, TestA) { +} + +TYPED_TEST(TypedTest, TestB) { +} + +// A group of type-parameterized tests. + +template +class TypeParamTest : public testing::Test { +}; + +TYPED_TEST_CASE_P(TypeParamTest); + +TYPED_TEST_P(TypeParamTest, TestA) { +} + +TYPED_TEST_P(TypeParamTest, TestB) { +} + +REGISTER_TYPED_TEST_CASE_P(TypeParamTest, TestA, TestB); + +INSTANTIATE_TYPED_TEST_CASE_P(My, TypeParamTest, MyTypes); + +int main(int argc, char **argv) { + ::testing::InitGoogleTest(&argc, argv); + + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest_main_unittest.cc b/external/gtest/test/gtest_main_unittest.cc new file mode 100755 index 0000000000..ecd9bb876f --- /dev/null +++ b/external/gtest/test/gtest_main_unittest.cc @@ -0,0 +1,45 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#include "gtest/gtest.h" + +// Tests that we don't have to define main() when we link to +// gtest_main instead of gtest. + +namespace { + +TEST(GTestMainTest, ShouldSucceed) { +} + +} // namespace + +// We are using the main() function defined in src/gtest_main.cc, so +// we don't define it here. diff --git a/external/gtest/test/gtest_no_test_unittest.cc b/external/gtest/test/gtest_no_test_unittest.cc new file mode 100755 index 0000000000..292599af8d --- /dev/null +++ b/external/gtest/test/gtest_no_test_unittest.cc @@ -0,0 +1,56 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Tests that a Google Test program that has no test defined can run +// successfully. +// +// Author: wan@google.com (Zhanyong Wan) + +#include "gtest/gtest.h" + +int main(int argc, char **argv) { + testing::InitGoogleTest(&argc, argv); + + // An ad-hoc assertion outside of all tests. + // + // This serves three purposes: + // + // 1. It verifies that an ad-hoc assertion can be executed even if + // no test is defined. + // 2. It verifies that a failed ad-hoc assertion causes the test + // program to fail. + // 3. We had a bug where the XML output won't be generated if an + // assertion is executed before RUN_ALL_TESTS() is called, even + // though --gtest_output=xml is specified. This makes sure the + // bug is fixed and doesn't regress. + EXPECT_EQ(1, 2); + + // The above EXPECT_EQ() should cause RUN_ALL_TESTS() to return non-zero. + return RUN_ALL_TESTS() ? 0 : 1; +} diff --git a/external/gtest/test/gtest_output_test.py b/external/gtest/test/gtest_output_test.py new file mode 100755 index 0000000000..f409e2a78b --- /dev/null +++ b/external/gtest/test/gtest_output_test.py @@ -0,0 +1,335 @@ +#!/usr/bin/env python +# +# Copyright 2008, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests the text output of Google C++ Testing Framework. + +SYNOPSIS + gtest_output_test.py --build_dir=BUILD/DIR --gengolden + # where BUILD/DIR contains the built gtest_output_test_ file. + gtest_output_test.py --gengolden + gtest_output_test.py +""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import os +import re +import sys +import gtest_test_utils + + +# The flag for generating the golden file +GENGOLDEN_FLAG = '--gengolden' +CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS' + +IS_WINDOWS = os.name == 'nt' + +# TODO(vladl@google.com): remove the _lin suffix. +GOLDEN_NAME = 'gtest_output_test_golden_lin.txt' + +PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_') + +# At least one command we exercise must not have the +# --gtest_internal_skip_environment_and_ad_hoc_tests flag. +COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests']) +COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes']) +COMMAND_WITH_TIME = ({}, [PROGRAM_PATH, + '--gtest_print_time', + '--gtest_internal_skip_environment_and_ad_hoc_tests', + '--gtest_filter=FatalFailureTest.*:LoggingTest.*']) +COMMAND_WITH_DISABLED = ( + {}, [PROGRAM_PATH, + '--gtest_also_run_disabled_tests', + '--gtest_internal_skip_environment_and_ad_hoc_tests', + '--gtest_filter=*DISABLED_*']) +COMMAND_WITH_SHARDING = ( + {'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'}, + [PROGRAM_PATH, + '--gtest_internal_skip_environment_and_ad_hoc_tests', + '--gtest_filter=PassingTest.*']) + +GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME) + + +def ToUnixLineEnding(s): + """Changes all Windows/Mac line endings in s to UNIX line endings.""" + + return s.replace('\r\n', '\n').replace('\r', '\n') + + +def RemoveLocations(test_output): + """Removes all file location info from a Google Test program's output. + + Args: + test_output: the output of a Google Test program. + + Returns: + output with all file location info (in the form of + 'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or + 'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by + 'FILE_NAME:#: '. + """ + + return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output) + + +def RemoveStackTraceDetails(output): + """Removes all stack traces from a Google Test program's output.""" + + # *? means "find the shortest string that matches". + return re.sub(r'Stack trace:(.|\n)*?\n\n', + 'Stack trace: (omitted)\n\n', output) + + +def RemoveStackTraces(output): + """Removes all traces of stack traces from a Google Test program's output.""" + + # *? means "find the shortest string that matches". + return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output) + + +def RemoveTime(output): + """Removes all time information from a Google Test program's output.""" + + return re.sub(r'\(\d+ ms', '(? ms', output) + + +def RemoveTypeInfoDetails(test_output): + """Removes compiler-specific type info from Google Test program's output. + + Args: + test_output: the output of a Google Test program. + + Returns: + output with type information normalized to canonical form. + """ + + # some compilers output the name of type 'unsigned int' as 'unsigned' + return re.sub(r'unsigned int', 'unsigned', test_output) + + +def NormalizeToCurrentPlatform(test_output): + """Normalizes platform specific output details for easier comparison.""" + + if IS_WINDOWS: + # Removes the color information that is not present on Windows. + test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output) + # Changes failure message headers into the Windows format. + test_output = re.sub(r': Failure\n', r': error: ', test_output) + # Changes file(line_number) to file:line_number. + test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output) + + return test_output + + +def RemoveTestCounts(output): + """Removes test counts from a Google Test program's output.""" + + output = re.sub(r'\d+ tests?, listed below', + '? tests, listed below', output) + output = re.sub(r'\d+ FAILED TESTS', + '? FAILED TESTS', output) + output = re.sub(r'\d+ tests? from \d+ test cases?', + '? tests from ? test cases', output) + output = re.sub(r'\d+ tests? from ([a-zA-Z_])', + r'? tests from \1', output) + return re.sub(r'\d+ tests?\.', '? tests.', output) + + +def RemoveMatchingTests(test_output, pattern): + """Removes output of specified tests from a Google Test program's output. + + This function strips not only the beginning and the end of a test but also + all output in between. + + Args: + test_output: A string containing the test output. + pattern: A regex string that matches names of test cases or + tests to remove. + + Returns: + Contents of test_output with tests whose names match pattern removed. + """ + + test_output = re.sub( + r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % ( + pattern, pattern), + '', + test_output) + return re.sub(r'.*%s.*\n' % pattern, '', test_output) + + +def NormalizeOutput(output): + """Normalizes output (the output of gtest_output_test_.exe).""" + + output = ToUnixLineEnding(output) + output = RemoveLocations(output) + output = RemoveStackTraceDetails(output) + output = RemoveTime(output) + return output + + +def GetShellCommandOutput(env_cmd): + """Runs a command in a sub-process, and returns its output in a string. + + Args: + env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra + environment variables to set, and element 1 is a string with + the command and any flags. + + Returns: + A string with the command's combined standard and diagnostic output. + """ + + # Spawns cmd in a sub-process, and gets its standard I/O file objects. + # Set and save the environment properly. + environ = os.environ.copy() + environ.update(env_cmd[0]) + p = gtest_test_utils.Subprocess(env_cmd[1], env=environ) + + return p.output + + +def GetCommandOutput(env_cmd): + """Runs a command and returns its output with all file location + info stripped off. + + Args: + env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra + environment variables to set, and element 1 is a string with + the command and any flags. + """ + + # Disables exception pop-ups on Windows. + environ, cmdline = env_cmd + environ = dict(environ) # Ensures we are modifying a copy. + environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1' + return NormalizeOutput(GetShellCommandOutput((environ, cmdline))) + + +def GetOutputOfAllCommands(): + """Returns concatenated output from several representative commands.""" + + return (GetCommandOutput(COMMAND_WITH_COLOR) + + GetCommandOutput(COMMAND_WITH_TIME) + + GetCommandOutput(COMMAND_WITH_DISABLED) + + GetCommandOutput(COMMAND_WITH_SHARDING)) + + +test_list = GetShellCommandOutput(COMMAND_LIST_TESTS) +SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list +SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list +SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list +SUPPORTS_STACK_TRACES = False + +CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and + SUPPORTS_TYPED_TESTS and + SUPPORTS_THREADS) + + +class GTestOutputTest(gtest_test_utils.TestCase): + def RemoveUnsupportedTests(self, test_output): + if not SUPPORTS_DEATH_TESTS: + test_output = RemoveMatchingTests(test_output, 'DeathTest') + if not SUPPORTS_TYPED_TESTS: + test_output = RemoveMatchingTests(test_output, 'TypedTest') + test_output = RemoveMatchingTests(test_output, 'TypedDeathTest') + test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest') + if not SUPPORTS_THREADS: + test_output = RemoveMatchingTests(test_output, + 'ExpectFailureWithThreadsTest') + test_output = RemoveMatchingTests(test_output, + 'ScopedFakeTestPartResultReporterTest') + test_output = RemoveMatchingTests(test_output, + 'WorksConcurrently') + if not SUPPORTS_STACK_TRACES: + test_output = RemoveStackTraces(test_output) + + return test_output + + def testOutput(self): + output = GetOutputOfAllCommands() + + golden_file = open(GOLDEN_PATH, 'rb') + # A mis-configured source control system can cause \r appear in EOL + # sequences when we read the golden file irrespective of an operating + # system used. Therefore, we need to strip those \r's from newlines + # unconditionally. + golden = ToUnixLineEnding(golden_file.read()) + golden_file.close() + + # We want the test to pass regardless of certain features being + # supported or not. + + # We still have to remove type name specifics in all cases. + normalized_actual = RemoveTypeInfoDetails(output) + normalized_golden = RemoveTypeInfoDetails(golden) + + if CAN_GENERATE_GOLDEN_FILE: + self.assertEqual(normalized_golden, normalized_actual) + else: + normalized_actual = NormalizeToCurrentPlatform( + RemoveTestCounts(normalized_actual)) + normalized_golden = NormalizeToCurrentPlatform( + RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden))) + + # This code is very handy when debugging golden file differences: + if os.getenv('DEBUG_GTEST_OUTPUT_TEST'): + open(os.path.join( + gtest_test_utils.GetSourceDir(), + '_gtest_output_test_normalized_actual.txt'), 'wb').write( + normalized_actual) + open(os.path.join( + gtest_test_utils.GetSourceDir(), + '_gtest_output_test_normalized_golden.txt'), 'wb').write( + normalized_golden) + + self.assertEqual(normalized_golden, normalized_actual) + + +if __name__ == '__main__': + if sys.argv[1:] == [GENGOLDEN_FLAG]: + if CAN_GENERATE_GOLDEN_FILE: + output = GetOutputOfAllCommands() + golden_file = open(GOLDEN_PATH, 'wb') + golden_file.write(output) + golden_file.close() + else: + message = ( + """Unable to write a golden file when compiled in an environment +that does not support all the required features (death tests, typed tests, +and multiple threads). Please generate the golden file using a binary built +with those features enabled.""") + + sys.stderr.write(message) + sys.exit(1) + else: + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_output_test_.cc b/external/gtest/test/gtest_output_test_.cc new file mode 100755 index 0000000000..07ab633d4d --- /dev/null +++ b/external/gtest/test/gtest_output_test_.cc @@ -0,0 +1,1034 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// The purpose of this file is to generate Google Test output under +// various conditions. The output will then be verified by +// gtest_output_test.py to ensure that Google Test generates the +// desired messages. Therefore, most tests in this file are MEANT TO +// FAIL. +// +// Author: wan@google.com (Zhanyong Wan) + +#include "gtest/gtest-spi.h" +#include "gtest/gtest.h" + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#include "src/gtest-internal-inl.h" +#undef GTEST_IMPLEMENTATION_ + +#include + +#if GTEST_IS_THREADSAFE +using testing::ScopedFakeTestPartResultReporter; +using testing::TestPartResultArray; + +using testing::internal::Notification; +using testing::internal::ThreadWithParam; +#endif + +namespace posix = ::testing::internal::posix; +using testing::internal::scoped_ptr; + +// Tests catching fatal failures. + +// A subroutine used by the following test. +void TestEq1(int x) { + ASSERT_EQ(1, x); +} + +// This function calls a test subroutine, catches the fatal failure it +// generates, and then returns early. +void TryTestSubroutine() { + // Calls a subrountine that yields a fatal failure. + TestEq1(2); + + // Catches the fatal failure and aborts the test. + // + // The testing::Test:: prefix is necessary when calling + // HasFatalFailure() outside of a TEST, TEST_F, or test fixture. + if (testing::Test::HasFatalFailure()) return; + + // If we get here, something is wrong. + FAIL() << "This should never be reached."; +} + +TEST(PassingTest, PassingTest1) { +} + +TEST(PassingTest, PassingTest2) { +} + +// Tests that parameters of failing parameterized tests are printed in the +// failing test summary. +class FailingParamTest : public testing::TestWithParam {}; + +TEST_P(FailingParamTest, Fails) { + EXPECT_EQ(1, GetParam()); +} + +// This generates a test which will fail. Google Test is expected to print +// its parameter when it outputs the list of all failed tests. +INSTANTIATE_TEST_CASE_P(PrintingFailingParams, + FailingParamTest, + testing::Values(2)); + +static const char kGoldenString[] = "\"Line\0 1\"\nLine 2"; + +TEST(NonfatalFailureTest, EscapesStringOperands) { + std::string actual = "actual \"string\""; + EXPECT_EQ(kGoldenString, actual); + + const char* golden = kGoldenString; + EXPECT_EQ(golden, actual); +} + +// Tests catching a fatal failure in a subroutine. +TEST(FatalFailureTest, FatalFailureInSubroutine) { + printf("(expecting a failure that x should be 1)\n"); + + TryTestSubroutine(); +} + +// Tests catching a fatal failure in a nested subroutine. +TEST(FatalFailureTest, FatalFailureInNestedSubroutine) { + printf("(expecting a failure that x should be 1)\n"); + + // Calls a subrountine that yields a fatal failure. + TryTestSubroutine(); + + // Catches the fatal failure and aborts the test. + // + // When calling HasFatalFailure() inside a TEST, TEST_F, or test + // fixture, the testing::Test:: prefix is not needed. + if (HasFatalFailure()) return; + + // If we get here, something is wrong. + FAIL() << "This should never be reached."; +} + +// Tests HasFatalFailure() after a failed EXPECT check. +TEST(FatalFailureTest, NonfatalFailureInSubroutine) { + printf("(expecting a failure on false)\n"); + EXPECT_TRUE(false); // Generates a nonfatal failure + ASSERT_FALSE(HasFatalFailure()); // This should succeed. +} + +// Tests interleaving user logging and Google Test assertions. +TEST(LoggingTest, InterleavingLoggingAndAssertions) { + static const int a[4] = { + 3, 9, 2, 6 + }; + + printf("(expecting 2 failures on (3) >= (a[i]))\n"); + for (int i = 0; i < static_cast(sizeof(a)/sizeof(*a)); i++) { + printf("i == %d\n", i); + EXPECT_GE(3, a[i]); + } +} + +// Tests the SCOPED_TRACE macro. + +// A helper function for testing SCOPED_TRACE. +void SubWithoutTrace(int n) { + EXPECT_EQ(1, n); + ASSERT_EQ(2, n); +} + +// Another helper function for testing SCOPED_TRACE. +void SubWithTrace(int n) { + SCOPED_TRACE(testing::Message() << "n = " << n); + + SubWithoutTrace(n); +} + +// Tests that SCOPED_TRACE() obeys lexical scopes. +TEST(SCOPED_TRACETest, ObeysScopes) { + printf("(expected to fail)\n"); + + // There should be no trace before SCOPED_TRACE() is invoked. + ADD_FAILURE() << "This failure is expected, and shouldn't have a trace."; + + { + SCOPED_TRACE("Expected trace"); + // After SCOPED_TRACE(), a failure in the current scope should contain + // the trace. + ADD_FAILURE() << "This failure is expected, and should have a trace."; + } + + // Once the control leaves the scope of the SCOPED_TRACE(), there + // should be no trace again. + ADD_FAILURE() << "This failure is expected, and shouldn't have a trace."; +} + +// Tests that SCOPED_TRACE works inside a loop. +TEST(SCOPED_TRACETest, WorksInLoop) { + printf("(expected to fail)\n"); + + for (int i = 1; i <= 2; i++) { + SCOPED_TRACE(testing::Message() << "i = " << i); + + SubWithoutTrace(i); + } +} + +// Tests that SCOPED_TRACE works in a subroutine. +TEST(SCOPED_TRACETest, WorksInSubroutine) { + printf("(expected to fail)\n"); + + SubWithTrace(1); + SubWithTrace(2); +} + +// Tests that SCOPED_TRACE can be nested. +TEST(SCOPED_TRACETest, CanBeNested) { + printf("(expected to fail)\n"); + + SCOPED_TRACE(""); // A trace without a message. + + SubWithTrace(2); +} + +// Tests that multiple SCOPED_TRACEs can be used in the same scope. +TEST(SCOPED_TRACETest, CanBeRepeated) { + printf("(expected to fail)\n"); + + SCOPED_TRACE("A"); + ADD_FAILURE() + << "This failure is expected, and should contain trace point A."; + + SCOPED_TRACE("B"); + ADD_FAILURE() + << "This failure is expected, and should contain trace point A and B."; + + { + SCOPED_TRACE("C"); + ADD_FAILURE() << "This failure is expected, and should " + << "contain trace point A, B, and C."; + } + + SCOPED_TRACE("D"); + ADD_FAILURE() << "This failure is expected, and should " + << "contain trace point A, B, and D."; +} + +#if GTEST_IS_THREADSAFE +// Tests that SCOPED_TRACE()s can be used concurrently from multiple +// threads. Namely, an assertion should be affected by +// SCOPED_TRACE()s in its own thread only. + +// Here's the sequence of actions that happen in the test: +// +// Thread A (main) | Thread B (spawned) +// ===============================|================================ +// spawns thread B | +// -------------------------------+-------------------------------- +// waits for n1 | SCOPED_TRACE("Trace B"); +// | generates failure #1 +// | notifies n1 +// -------------------------------+-------------------------------- +// SCOPED_TRACE("Trace A"); | waits for n2 +// generates failure #2 | +// notifies n2 | +// -------------------------------|-------------------------------- +// waits for n3 | generates failure #3 +// | trace B dies +// | generates failure #4 +// | notifies n3 +// -------------------------------|-------------------------------- +// generates failure #5 | finishes +// trace A dies | +// generates failure #6 | +// -------------------------------|-------------------------------- +// waits for thread B to finish | + +struct CheckPoints { + Notification n1; + Notification n2; + Notification n3; +}; + +static void ThreadWithScopedTrace(CheckPoints* check_points) { + { + SCOPED_TRACE("Trace B"); + ADD_FAILURE() + << "Expected failure #1 (in thread B, only trace B alive)."; + check_points->n1.Notify(); + check_points->n2.WaitForNotification(); + + ADD_FAILURE() + << "Expected failure #3 (in thread B, trace A & B both alive)."; + } // Trace B dies here. + ADD_FAILURE() + << "Expected failure #4 (in thread B, only trace A alive)."; + check_points->n3.Notify(); +} + +TEST(SCOPED_TRACETest, WorksConcurrently) { + printf("(expecting 6 failures)\n"); + + CheckPoints check_points; + ThreadWithParam thread(&ThreadWithScopedTrace, + &check_points, + NULL); + check_points.n1.WaitForNotification(); + + { + SCOPED_TRACE("Trace A"); + ADD_FAILURE() + << "Expected failure #2 (in thread A, trace A & B both alive)."; + check_points.n2.Notify(); + check_points.n3.WaitForNotification(); + + ADD_FAILURE() + << "Expected failure #5 (in thread A, only trace A alive)."; + } // Trace A dies here. + ADD_FAILURE() + << "Expected failure #6 (in thread A, no trace alive)."; + thread.Join(); +} +#endif // GTEST_IS_THREADSAFE + +TEST(DisabledTestsWarningTest, + DISABLED_AlsoRunDisabledTestsFlagSuppressesWarning) { + // This test body is intentionally empty. Its sole purpose is for + // verifying that the --gtest_also_run_disabled_tests flag + // suppresses the "YOU HAVE 12 DISABLED TESTS" warning at the end of + // the test output. +} + +// Tests using assertions outside of TEST and TEST_F. +// +// This function creates two failures intentionally. +void AdHocTest() { + printf("The non-test part of the code is expected to have 2 failures.\n\n"); + EXPECT_TRUE(false); + EXPECT_EQ(2, 3); +} + +// Runs all TESTs, all TEST_Fs, and the ad hoc test. +int RunAllTests() { + AdHocTest(); + return RUN_ALL_TESTS(); +} + +// Tests non-fatal failures in the fixture constructor. +class NonFatalFailureInFixtureConstructorTest : public testing::Test { + protected: + NonFatalFailureInFixtureConstructorTest() { + printf("(expecting 5 failures)\n"); + ADD_FAILURE() << "Expected failure #1, in the test fixture c'tor."; + } + + ~NonFatalFailureInFixtureConstructorTest() { + ADD_FAILURE() << "Expected failure #5, in the test fixture d'tor."; + } + + virtual void SetUp() { + ADD_FAILURE() << "Expected failure #2, in SetUp()."; + } + + virtual void TearDown() { + ADD_FAILURE() << "Expected failure #4, in TearDown."; + } +}; + +TEST_F(NonFatalFailureInFixtureConstructorTest, FailureInConstructor) { + ADD_FAILURE() << "Expected failure #3, in the test body."; +} + +// Tests fatal failures in the fixture constructor. +class FatalFailureInFixtureConstructorTest : public testing::Test { + protected: + FatalFailureInFixtureConstructorTest() { + printf("(expecting 2 failures)\n"); + Init(); + } + + ~FatalFailureInFixtureConstructorTest() { + ADD_FAILURE() << "Expected failure #2, in the test fixture d'tor."; + } + + virtual void SetUp() { + ADD_FAILURE() << "UNEXPECTED failure in SetUp(). " + << "We should never get here, as the test fixture c'tor " + << "had a fatal failure."; + } + + virtual void TearDown() { + ADD_FAILURE() << "UNEXPECTED failure in TearDown(). " + << "We should never get here, as the test fixture c'tor " + << "had a fatal failure."; + } + + private: + void Init() { + FAIL() << "Expected failure #1, in the test fixture c'tor."; + } +}; + +TEST_F(FatalFailureInFixtureConstructorTest, FailureInConstructor) { + ADD_FAILURE() << "UNEXPECTED failure in the test body. " + << "We should never get here, as the test fixture c'tor " + << "had a fatal failure."; +} + +// Tests non-fatal failures in SetUp(). +class NonFatalFailureInSetUpTest : public testing::Test { + protected: + virtual ~NonFatalFailureInSetUpTest() { + Deinit(); + } + + virtual void SetUp() { + printf("(expecting 4 failures)\n"); + ADD_FAILURE() << "Expected failure #1, in SetUp()."; + } + + virtual void TearDown() { + FAIL() << "Expected failure #3, in TearDown()."; + } + private: + void Deinit() { + FAIL() << "Expected failure #4, in the test fixture d'tor."; + } +}; + +TEST_F(NonFatalFailureInSetUpTest, FailureInSetUp) { + FAIL() << "Expected failure #2, in the test function."; +} + +// Tests fatal failures in SetUp(). +class FatalFailureInSetUpTest : public testing::Test { + protected: + virtual ~FatalFailureInSetUpTest() { + Deinit(); + } + + virtual void SetUp() { + printf("(expecting 3 failures)\n"); + FAIL() << "Expected failure #1, in SetUp()."; + } + + virtual void TearDown() { + FAIL() << "Expected failure #2, in TearDown()."; + } + private: + void Deinit() { + FAIL() << "Expected failure #3, in the test fixture d'tor."; + } +}; + +TEST_F(FatalFailureInSetUpTest, FailureInSetUp) { + FAIL() << "UNEXPECTED failure in the test function. " + << "We should never get here, as SetUp() failed."; +} + +TEST(AddFailureAtTest, MessageContainsSpecifiedFileAndLineNumber) { + ADD_FAILURE_AT("foo.cc", 42) << "Expected failure in foo.cc"; +} + +#if GTEST_IS_THREADSAFE + +// A unary function that may die. +void DieIf(bool should_die) { + GTEST_CHECK_(!should_die) << " - death inside DieIf()."; +} + +// Tests running death tests in a multi-threaded context. + +// Used for coordination between the main and the spawn thread. +struct SpawnThreadNotifications { + SpawnThreadNotifications() {} + + Notification spawn_thread_started; + Notification spawn_thread_ok_to_terminate; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(SpawnThreadNotifications); +}; + +// The function to be executed in the thread spawn by the +// MultipleThreads test (below). +static void ThreadRoutine(SpawnThreadNotifications* notifications) { + // Signals the main thread that this thread has started. + notifications->spawn_thread_started.Notify(); + + // Waits for permission to finish from the main thread. + notifications->spawn_thread_ok_to_terminate.WaitForNotification(); +} + +// This is a death-test test, but it's not named with a DeathTest +// suffix. It starts threads which might interfere with later +// death tests, so it must run after all other death tests. +class DeathTestAndMultiThreadsTest : public testing::Test { + protected: + // Starts a thread and waits for it to begin. + virtual void SetUp() { + thread_.reset(new ThreadWithParam( + &ThreadRoutine, ¬ifications_, NULL)); + notifications_.spawn_thread_started.WaitForNotification(); + } + // Tells the thread to finish, and reaps it. + // Depending on the version of the thread library in use, + // a manager thread might still be left running that will interfere + // with later death tests. This is unfortunate, but this class + // cleans up after itself as best it can. + virtual void TearDown() { + notifications_.spawn_thread_ok_to_terminate.Notify(); + } + + private: + SpawnThreadNotifications notifications_; + scoped_ptr > thread_; +}; + +#endif // GTEST_IS_THREADSAFE + +// The MixedUpTestCaseTest test case verifies that Google Test will fail a +// test if it uses a different fixture class than what other tests in +// the same test case use. It deliberately contains two fixture +// classes with the same name but defined in different namespaces. + +// The MixedUpTestCaseWithSameTestNameTest test case verifies that +// when the user defines two tests with the same test case name AND +// same test name (but in different namespaces), the second test will +// fail. + +namespace foo { + +class MixedUpTestCaseTest : public testing::Test { +}; + +TEST_F(MixedUpTestCaseTest, FirstTestFromNamespaceFoo) {} +TEST_F(MixedUpTestCaseTest, SecondTestFromNamespaceFoo) {} + +class MixedUpTestCaseWithSameTestNameTest : public testing::Test { +}; + +TEST_F(MixedUpTestCaseWithSameTestNameTest, + TheSecondTestWithThisNameShouldFail) {} + +} // namespace foo + +namespace bar { + +class MixedUpTestCaseTest : public testing::Test { +}; + +// The following two tests are expected to fail. We rely on the +// golden file to check that Google Test generates the right error message. +TEST_F(MixedUpTestCaseTest, ThisShouldFail) {} +TEST_F(MixedUpTestCaseTest, ThisShouldFailToo) {} + +class MixedUpTestCaseWithSameTestNameTest : public testing::Test { +}; + +// Expected to fail. We rely on the golden file to check that Google Test +// generates the right error message. +TEST_F(MixedUpTestCaseWithSameTestNameTest, + TheSecondTestWithThisNameShouldFail) {} + +} // namespace bar + +// The following two test cases verify that Google Test catches the user +// error of mixing TEST and TEST_F in the same test case. The first +// test case checks the scenario where TEST_F appears before TEST, and +// the second one checks where TEST appears before TEST_F. + +class TEST_F_before_TEST_in_same_test_case : public testing::Test { +}; + +TEST_F(TEST_F_before_TEST_in_same_test_case, DefinedUsingTEST_F) {} + +// Expected to fail. We rely on the golden file to check that Google Test +// generates the right error message. +TEST(TEST_F_before_TEST_in_same_test_case, DefinedUsingTESTAndShouldFail) {} + +class TEST_before_TEST_F_in_same_test_case : public testing::Test { +}; + +TEST(TEST_before_TEST_F_in_same_test_case, DefinedUsingTEST) {} + +// Expected to fail. We rely on the golden file to check that Google Test +// generates the right error message. +TEST_F(TEST_before_TEST_F_in_same_test_case, DefinedUsingTEST_FAndShouldFail) { +} + +// Used for testing EXPECT_NONFATAL_FAILURE() and EXPECT_FATAL_FAILURE(). +int global_integer = 0; + +// Tests that EXPECT_NONFATAL_FAILURE() can reference global variables. +TEST(ExpectNonfatalFailureTest, CanReferenceGlobalVariables) { + global_integer = 0; + EXPECT_NONFATAL_FAILURE({ + EXPECT_EQ(1, global_integer) << "Expected non-fatal failure."; + }, "Expected non-fatal failure."); +} + +// Tests that EXPECT_NONFATAL_FAILURE() can reference local variables +// (static or not). +TEST(ExpectNonfatalFailureTest, CanReferenceLocalVariables) { + int m = 0; + static int n; + n = 1; + EXPECT_NONFATAL_FAILURE({ + EXPECT_EQ(m, n) << "Expected non-fatal failure."; + }, "Expected non-fatal failure."); +} + +// Tests that EXPECT_NONFATAL_FAILURE() succeeds when there is exactly +// one non-fatal failure and no fatal failure. +TEST(ExpectNonfatalFailureTest, SucceedsWhenThereIsOneNonfatalFailure) { + EXPECT_NONFATAL_FAILURE({ + ADD_FAILURE() << "Expected non-fatal failure."; + }, "Expected non-fatal failure."); +} + +// Tests that EXPECT_NONFATAL_FAILURE() fails when there is no +// non-fatal failure. +TEST(ExpectNonfatalFailureTest, FailsWhenThereIsNoNonfatalFailure) { + printf("(expecting a failure)\n"); + EXPECT_NONFATAL_FAILURE({ + }, ""); +} + +// Tests that EXPECT_NONFATAL_FAILURE() fails when there are two +// non-fatal failures. +TEST(ExpectNonfatalFailureTest, FailsWhenThereAreTwoNonfatalFailures) { + printf("(expecting a failure)\n"); + EXPECT_NONFATAL_FAILURE({ + ADD_FAILURE() << "Expected non-fatal failure 1."; + ADD_FAILURE() << "Expected non-fatal failure 2."; + }, ""); +} + +// Tests that EXPECT_NONFATAL_FAILURE() fails when there is one fatal +// failure. +TEST(ExpectNonfatalFailureTest, FailsWhenThereIsOneFatalFailure) { + printf("(expecting a failure)\n"); + EXPECT_NONFATAL_FAILURE({ + FAIL() << "Expected fatal failure."; + }, ""); +} + +// Tests that EXPECT_NONFATAL_FAILURE() fails when the statement being +// tested returns. +TEST(ExpectNonfatalFailureTest, FailsWhenStatementReturns) { + printf("(expecting a failure)\n"); + EXPECT_NONFATAL_FAILURE({ + return; + }, ""); +} + +#if GTEST_HAS_EXCEPTIONS + +// Tests that EXPECT_NONFATAL_FAILURE() fails when the statement being +// tested throws. +TEST(ExpectNonfatalFailureTest, FailsWhenStatementThrows) { + printf("(expecting a failure)\n"); + try { + EXPECT_NONFATAL_FAILURE({ + throw 0; + }, ""); + } catch(int) { // NOLINT + } +} + +#endif // GTEST_HAS_EXCEPTIONS + +// Tests that EXPECT_FATAL_FAILURE() can reference global variables. +TEST(ExpectFatalFailureTest, CanReferenceGlobalVariables) { + global_integer = 0; + EXPECT_FATAL_FAILURE({ + ASSERT_EQ(1, global_integer) << "Expected fatal failure."; + }, "Expected fatal failure."); +} + +// Tests that EXPECT_FATAL_FAILURE() can reference local static +// variables. +TEST(ExpectFatalFailureTest, CanReferenceLocalStaticVariables) { + static int n; + n = 1; + EXPECT_FATAL_FAILURE({ + ASSERT_EQ(0, n) << "Expected fatal failure."; + }, "Expected fatal failure."); +} + +// Tests that EXPECT_FATAL_FAILURE() succeeds when there is exactly +// one fatal failure and no non-fatal failure. +TEST(ExpectFatalFailureTest, SucceedsWhenThereIsOneFatalFailure) { + EXPECT_FATAL_FAILURE({ + FAIL() << "Expected fatal failure."; + }, "Expected fatal failure."); +} + +// Tests that EXPECT_FATAL_FAILURE() fails when there is no fatal +// failure. +TEST(ExpectFatalFailureTest, FailsWhenThereIsNoFatalFailure) { + printf("(expecting a failure)\n"); + EXPECT_FATAL_FAILURE({ + }, ""); +} + +// A helper for generating a fatal failure. +void FatalFailure() { + FAIL() << "Expected fatal failure."; +} + +// Tests that EXPECT_FATAL_FAILURE() fails when there are two +// fatal failures. +TEST(ExpectFatalFailureTest, FailsWhenThereAreTwoFatalFailures) { + printf("(expecting a failure)\n"); + EXPECT_FATAL_FAILURE({ + FatalFailure(); + FatalFailure(); + }, ""); +} + +// Tests that EXPECT_FATAL_FAILURE() fails when there is one non-fatal +// failure. +TEST(ExpectFatalFailureTest, FailsWhenThereIsOneNonfatalFailure) { + printf("(expecting a failure)\n"); + EXPECT_FATAL_FAILURE({ + ADD_FAILURE() << "Expected non-fatal failure."; + }, ""); +} + +// Tests that EXPECT_FATAL_FAILURE() fails when the statement being +// tested returns. +TEST(ExpectFatalFailureTest, FailsWhenStatementReturns) { + printf("(expecting a failure)\n"); + EXPECT_FATAL_FAILURE({ + return; + }, ""); +} + +#if GTEST_HAS_EXCEPTIONS + +// Tests that EXPECT_FATAL_FAILURE() fails when the statement being +// tested throws. +TEST(ExpectFatalFailureTest, FailsWhenStatementThrows) { + printf("(expecting a failure)\n"); + try { + EXPECT_FATAL_FAILURE({ + throw 0; + }, ""); + } catch(int) { // NOLINT + } +} + +#endif // GTEST_HAS_EXCEPTIONS + +// This #ifdef block tests the output of typed tests. +#if GTEST_HAS_TYPED_TEST + +template +class TypedTest : public testing::Test { +}; + +TYPED_TEST_CASE(TypedTest, testing::Types); + +TYPED_TEST(TypedTest, Success) { + EXPECT_EQ(0, TypeParam()); +} + +TYPED_TEST(TypedTest, Failure) { + EXPECT_EQ(1, TypeParam()) << "Expected failure"; +} + +#endif // GTEST_HAS_TYPED_TEST + +// This #ifdef block tests the output of type-parameterized tests. +#if GTEST_HAS_TYPED_TEST_P + +template +class TypedTestP : public testing::Test { +}; + +TYPED_TEST_CASE_P(TypedTestP); + +TYPED_TEST_P(TypedTestP, Success) { + EXPECT_EQ(0U, TypeParam()); +} + +TYPED_TEST_P(TypedTestP, Failure) { + EXPECT_EQ(1U, TypeParam()) << "Expected failure"; +} + +REGISTER_TYPED_TEST_CASE_P(TypedTestP, Success, Failure); + +typedef testing::Types UnsignedTypes; +INSTANTIATE_TYPED_TEST_CASE_P(Unsigned, TypedTestP, UnsignedTypes); + +#endif // GTEST_HAS_TYPED_TEST_P + +#if GTEST_HAS_DEATH_TEST + +// We rely on the golden file to verify that tests whose test case +// name ends with DeathTest are run first. + +TEST(ADeathTest, ShouldRunFirst) { +} + +# if GTEST_HAS_TYPED_TEST + +// We rely on the golden file to verify that typed tests whose test +// case name ends with DeathTest are run first. + +template +class ATypedDeathTest : public testing::Test { +}; + +typedef testing::Types NumericTypes; +TYPED_TEST_CASE(ATypedDeathTest, NumericTypes); + +TYPED_TEST(ATypedDeathTest, ShouldRunFirst) { +} + +# endif // GTEST_HAS_TYPED_TEST + +# if GTEST_HAS_TYPED_TEST_P + + +// We rely on the golden file to verify that type-parameterized tests +// whose test case name ends with DeathTest are run first. + +template +class ATypeParamDeathTest : public testing::Test { +}; + +TYPED_TEST_CASE_P(ATypeParamDeathTest); + +TYPED_TEST_P(ATypeParamDeathTest, ShouldRunFirst) { +} + +REGISTER_TYPED_TEST_CASE_P(ATypeParamDeathTest, ShouldRunFirst); + +INSTANTIATE_TYPED_TEST_CASE_P(My, ATypeParamDeathTest, NumericTypes); + +# endif // GTEST_HAS_TYPED_TEST_P + +#endif // GTEST_HAS_DEATH_TEST + +// Tests various failure conditions of +// EXPECT_{,NON}FATAL_FAILURE{,_ON_ALL_THREADS}. +class ExpectFailureTest : public testing::Test { + public: // Must be public and not protected due to a bug in g++ 3.4.2. + enum FailureMode { + FATAL_FAILURE, + NONFATAL_FAILURE + }; + static void AddFailure(FailureMode failure) { + if (failure == FATAL_FAILURE) { + FAIL() << "Expected fatal failure."; + } else { + ADD_FAILURE() << "Expected non-fatal failure."; + } + } +}; + +TEST_F(ExpectFailureTest, ExpectFatalFailure) { + // Expected fatal failure, but succeeds. + printf("(expecting 1 failure)\n"); + EXPECT_FATAL_FAILURE(SUCCEED(), "Expected fatal failure."); + // Expected fatal failure, but got a non-fatal failure. + printf("(expecting 1 failure)\n"); + EXPECT_FATAL_FAILURE(AddFailure(NONFATAL_FAILURE), "Expected non-fatal " + "failure."); + // Wrong message. + printf("(expecting 1 failure)\n"); + EXPECT_FATAL_FAILURE(AddFailure(FATAL_FAILURE), "Some other fatal failure " + "expected."); +} + +TEST_F(ExpectFailureTest, ExpectNonFatalFailure) { + // Expected non-fatal failure, but succeeds. + printf("(expecting 1 failure)\n"); + EXPECT_NONFATAL_FAILURE(SUCCEED(), "Expected non-fatal failure."); + // Expected non-fatal failure, but got a fatal failure. + printf("(expecting 1 failure)\n"); + EXPECT_NONFATAL_FAILURE(AddFailure(FATAL_FAILURE), "Expected fatal failure."); + // Wrong message. + printf("(expecting 1 failure)\n"); + EXPECT_NONFATAL_FAILURE(AddFailure(NONFATAL_FAILURE), "Some other non-fatal " + "failure."); +} + +#if GTEST_IS_THREADSAFE + +class ExpectFailureWithThreadsTest : public ExpectFailureTest { + protected: + static void AddFailureInOtherThread(FailureMode failure) { + ThreadWithParam thread(&AddFailure, failure, NULL); + thread.Join(); + } +}; + +TEST_F(ExpectFailureWithThreadsTest, ExpectFatalFailure) { + // We only intercept the current thread. + printf("(expecting 2 failures)\n"); + EXPECT_FATAL_FAILURE(AddFailureInOtherThread(FATAL_FAILURE), + "Expected fatal failure."); +} + +TEST_F(ExpectFailureWithThreadsTest, ExpectNonFatalFailure) { + // We only intercept the current thread. + printf("(expecting 2 failures)\n"); + EXPECT_NONFATAL_FAILURE(AddFailureInOtherThread(NONFATAL_FAILURE), + "Expected non-fatal failure."); +} + +typedef ExpectFailureWithThreadsTest ScopedFakeTestPartResultReporterTest; + +// Tests that the ScopedFakeTestPartResultReporter only catches failures from +// the current thread if it is instantiated with INTERCEPT_ONLY_CURRENT_THREAD. +TEST_F(ScopedFakeTestPartResultReporterTest, InterceptOnlyCurrentThread) { + printf("(expecting 2 failures)\n"); + TestPartResultArray results; + { + ScopedFakeTestPartResultReporter reporter( + ScopedFakeTestPartResultReporter::INTERCEPT_ONLY_CURRENT_THREAD, + &results); + AddFailureInOtherThread(FATAL_FAILURE); + AddFailureInOtherThread(NONFATAL_FAILURE); + } + // The two failures should not have been intercepted. + EXPECT_EQ(0, results.size()) << "This shouldn't fail."; +} + +#endif // GTEST_IS_THREADSAFE + +TEST_F(ExpectFailureTest, ExpectFatalFailureOnAllThreads) { + // Expected fatal failure, but succeeds. + printf("(expecting 1 failure)\n"); + EXPECT_FATAL_FAILURE_ON_ALL_THREADS(SUCCEED(), "Expected fatal failure."); + // Expected fatal failure, but got a non-fatal failure. + printf("(expecting 1 failure)\n"); + EXPECT_FATAL_FAILURE_ON_ALL_THREADS(AddFailure(NONFATAL_FAILURE), + "Expected non-fatal failure."); + // Wrong message. + printf("(expecting 1 failure)\n"); + EXPECT_FATAL_FAILURE_ON_ALL_THREADS(AddFailure(FATAL_FAILURE), + "Some other fatal failure expected."); +} + +TEST_F(ExpectFailureTest, ExpectNonFatalFailureOnAllThreads) { + // Expected non-fatal failure, but succeeds. + printf("(expecting 1 failure)\n"); + EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(SUCCEED(), "Expected non-fatal " + "failure."); + // Expected non-fatal failure, but got a fatal failure. + printf("(expecting 1 failure)\n"); + EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(AddFailure(FATAL_FAILURE), + "Expected fatal failure."); + // Wrong message. + printf("(expecting 1 failure)\n"); + EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(AddFailure(NONFATAL_FAILURE), + "Some other non-fatal failure."); +} + + +// Two test environments for testing testing::AddGlobalTestEnvironment(). + +class FooEnvironment : public testing::Environment { + public: + virtual void SetUp() { + printf("%s", "FooEnvironment::SetUp() called.\n"); + } + + virtual void TearDown() { + printf("%s", "FooEnvironment::TearDown() called.\n"); + FAIL() << "Expected fatal failure."; + } +}; + +class BarEnvironment : public testing::Environment { + public: + virtual void SetUp() { + printf("%s", "BarEnvironment::SetUp() called.\n"); + } + + virtual void TearDown() { + printf("%s", "BarEnvironment::TearDown() called.\n"); + ADD_FAILURE() << "Expected non-fatal failure."; + } +}; + +bool GTEST_FLAG(internal_skip_environment_and_ad_hoc_tests) = false; + +// The main function. +// +// The idea is to use Google Test to run all the tests we have defined (some +// of them are intended to fail), and then compare the test results +// with the "golden" file. +int main(int argc, char **argv) { + testing::GTEST_FLAG(print_time) = false; + + // We just run the tests, knowing some of them are intended to fail. + // We will use a separate Python script to compare the output of + // this program with the golden file. + + // It's hard to test InitGoogleTest() directly, as it has many + // global side effects. The following line serves as a sanity test + // for it. + testing::InitGoogleTest(&argc, argv); + if (argc >= 2 && + (std::string(argv[1]) == + "--gtest_internal_skip_environment_and_ad_hoc_tests")) + GTEST_FLAG(internal_skip_environment_and_ad_hoc_tests) = true; + +#if GTEST_HAS_DEATH_TEST + if (testing::internal::GTEST_FLAG(internal_run_death_test) != "") { + // Skip the usual output capturing if we're running as the child + // process of an threadsafe-style death test. +# if GTEST_OS_WINDOWS + posix::FReopen("nul:", "w", stdout); +# else + posix::FReopen("/dev/null", "w", stdout); +# endif // GTEST_OS_WINDOWS + return RUN_ALL_TESTS(); + } +#endif // GTEST_HAS_DEATH_TEST + + if (GTEST_FLAG(internal_skip_environment_and_ad_hoc_tests)) + return RUN_ALL_TESTS(); + + // Registers two global test environments. + // The golden file verifies that they are set up in the order they + // are registered, and torn down in the reverse order. + testing::AddGlobalTestEnvironment(new FooEnvironment); + testing::AddGlobalTestEnvironment(new BarEnvironment); + + return RunAllTests(); +} diff --git a/external/gtest/test/gtest_output_test_golden_lin.txt b/external/gtest/test/gtest_output_test_golden_lin.txt new file mode 100755 index 0000000000..960eedce24 --- /dev/null +++ b/external/gtest/test/gtest_output_test_golden_lin.txt @@ -0,0 +1,720 @@ +The non-test part of the code is expected to have 2 failures. + +gtest_output_test_.cc:#: Failure +Value of: false + Actual: false +Expected: true +gtest_output_test_.cc:#: Failure +Value of: 3 +Expected: 2 +[==========] Running 63 tests from 28 test cases. +[----------] Global test environment set-up. +FooEnvironment::SetUp() called. +BarEnvironment::SetUp() called. +[----------] 1 test from ADeathTest +[ RUN ] ADeathTest.ShouldRunFirst +[ OK ] ADeathTest.ShouldRunFirst +[----------] 1 test from ATypedDeathTest/0, where TypeParam = int +[ RUN ] ATypedDeathTest/0.ShouldRunFirst +[ OK ] ATypedDeathTest/0.ShouldRunFirst +[----------] 1 test from ATypedDeathTest/1, where TypeParam = double +[ RUN ] ATypedDeathTest/1.ShouldRunFirst +[ OK ] ATypedDeathTest/1.ShouldRunFirst +[----------] 1 test from My/ATypeParamDeathTest/0, where TypeParam = int +[ RUN ] My/ATypeParamDeathTest/0.ShouldRunFirst +[ OK ] My/ATypeParamDeathTest/0.ShouldRunFirst +[----------] 1 test from My/ATypeParamDeathTest/1, where TypeParam = double +[ RUN ] My/ATypeParamDeathTest/1.ShouldRunFirst +[ OK ] My/ATypeParamDeathTest/1.ShouldRunFirst +[----------] 2 tests from PassingTest +[ RUN ] PassingTest.PassingTest1 +[ OK ] PassingTest.PassingTest1 +[ RUN ] PassingTest.PassingTest2 +[ OK ] PassingTest.PassingTest2 +[----------] 1 test from NonfatalFailureTest +[ RUN ] NonfatalFailureTest.EscapesStringOperands +gtest_output_test_.cc:#: Failure +Value of: actual + Actual: "actual \"string\"" +Expected: kGoldenString +Which is: "\"Line" +gtest_output_test_.cc:#: Failure +Value of: actual + Actual: "actual \"string\"" +Expected: golden +Which is: "\"Line" +[ FAILED ] NonfatalFailureTest.EscapesStringOperands +[----------] 3 tests from FatalFailureTest +[ RUN ] FatalFailureTest.FatalFailureInSubroutine +(expecting a failure that x should be 1) +gtest_output_test_.cc:#: Failure +Value of: x + Actual: 2 +Expected: 1 +[ FAILED ] FatalFailureTest.FatalFailureInSubroutine +[ RUN ] FatalFailureTest.FatalFailureInNestedSubroutine +(expecting a failure that x should be 1) +gtest_output_test_.cc:#: Failure +Value of: x + Actual: 2 +Expected: 1 +[ FAILED ] FatalFailureTest.FatalFailureInNestedSubroutine +[ RUN ] FatalFailureTest.NonfatalFailureInSubroutine +(expecting a failure on false) +gtest_output_test_.cc:#: Failure +Value of: false + Actual: false +Expected: true +[ FAILED ] FatalFailureTest.NonfatalFailureInSubroutine +[----------] 1 test from LoggingTest +[ RUN ] LoggingTest.InterleavingLoggingAndAssertions +(expecting 2 failures on (3) >= (a[i])) +i == 0 +i == 1 +gtest_output_test_.cc:#: Failure +Expected: (3) >= (a[i]), actual: 3 vs 9 +i == 2 +i == 3 +gtest_output_test_.cc:#: Failure +Expected: (3) >= (a[i]), actual: 3 vs 6 +[ FAILED ] LoggingTest.InterleavingLoggingAndAssertions +[----------] 6 tests from SCOPED_TRACETest +[ RUN ] SCOPED_TRACETest.ObeysScopes +(expected to fail) +gtest_output_test_.cc:#: Failure +Failed +This failure is expected, and shouldn't have a trace. +gtest_output_test_.cc:#: Failure +Failed +This failure is expected, and should have a trace. +Google Test trace: +gtest_output_test_.cc:#: Expected trace +gtest_output_test_.cc:#: Failure +Failed +This failure is expected, and shouldn't have a trace. +[ FAILED ] SCOPED_TRACETest.ObeysScopes +[ RUN ] SCOPED_TRACETest.WorksInLoop +(expected to fail) +gtest_output_test_.cc:#: Failure +Value of: n + Actual: 1 +Expected: 2 +Google Test trace: +gtest_output_test_.cc:#: i = 1 +gtest_output_test_.cc:#: Failure +Value of: n + Actual: 2 +Expected: 1 +Google Test trace: +gtest_output_test_.cc:#: i = 2 +[ FAILED ] SCOPED_TRACETest.WorksInLoop +[ RUN ] SCOPED_TRACETest.WorksInSubroutine +(expected to fail) +gtest_output_test_.cc:#: Failure +Value of: n + Actual: 1 +Expected: 2 +Google Test trace: +gtest_output_test_.cc:#: n = 1 +gtest_output_test_.cc:#: Failure +Value of: n + Actual: 2 +Expected: 1 +Google Test trace: +gtest_output_test_.cc:#: n = 2 +[ FAILED ] SCOPED_TRACETest.WorksInSubroutine +[ RUN ] SCOPED_TRACETest.CanBeNested +(expected to fail) +gtest_output_test_.cc:#: Failure +Value of: n + Actual: 2 +Expected: 1 +Google Test trace: +gtest_output_test_.cc:#: n = 2 +gtest_output_test_.cc:#: +[ FAILED ] SCOPED_TRACETest.CanBeNested +[ RUN ] SCOPED_TRACETest.CanBeRepeated +(expected to fail) +gtest_output_test_.cc:#: Failure +Failed +This failure is expected, and should contain trace point A. +Google Test trace: +gtest_output_test_.cc:#: A +gtest_output_test_.cc:#: Failure +Failed +This failure is expected, and should contain trace point A and B. +Google Test trace: +gtest_output_test_.cc:#: B +gtest_output_test_.cc:#: A +gtest_output_test_.cc:#: Failure +Failed +This failure is expected, and should contain trace point A, B, and C. +Google Test trace: +gtest_output_test_.cc:#: C +gtest_output_test_.cc:#: B +gtest_output_test_.cc:#: A +gtest_output_test_.cc:#: Failure +Failed +This failure is expected, and should contain trace point A, B, and D. +Google Test trace: +gtest_output_test_.cc:#: D +gtest_output_test_.cc:#: B +gtest_output_test_.cc:#: A +[ FAILED ] SCOPED_TRACETest.CanBeRepeated +[ RUN ] SCOPED_TRACETest.WorksConcurrently +(expecting 6 failures) +gtest_output_test_.cc:#: Failure +Failed +Expected failure #1 (in thread B, only trace B alive). +Google Test trace: +gtest_output_test_.cc:#: Trace B +gtest_output_test_.cc:#: Failure +Failed +Expected failure #2 (in thread A, trace A & B both alive). +Google Test trace: +gtest_output_test_.cc:#: Trace A +gtest_output_test_.cc:#: Failure +Failed +Expected failure #3 (in thread B, trace A & B both alive). +Google Test trace: +gtest_output_test_.cc:#: Trace B +gtest_output_test_.cc:#: Failure +Failed +Expected failure #4 (in thread B, only trace A alive). +gtest_output_test_.cc:#: Failure +Failed +Expected failure #5 (in thread A, only trace A alive). +Google Test trace: +gtest_output_test_.cc:#: Trace A +gtest_output_test_.cc:#: Failure +Failed +Expected failure #6 (in thread A, no trace alive). +[ FAILED ] SCOPED_TRACETest.WorksConcurrently +[----------] 1 test from NonFatalFailureInFixtureConstructorTest +[ RUN ] NonFatalFailureInFixtureConstructorTest.FailureInConstructor +(expecting 5 failures) +gtest_output_test_.cc:#: Failure +Failed +Expected failure #1, in the test fixture c'tor. +gtest_output_test_.cc:#: Failure +Failed +Expected failure #2, in SetUp(). +gtest_output_test_.cc:#: Failure +Failed +Expected failure #3, in the test body. +gtest_output_test_.cc:#: Failure +Failed +Expected failure #4, in TearDown. +gtest_output_test_.cc:#: Failure +Failed +Expected failure #5, in the test fixture d'tor. +[ FAILED ] NonFatalFailureInFixtureConstructorTest.FailureInConstructor +[----------] 1 test from FatalFailureInFixtureConstructorTest +[ RUN ] FatalFailureInFixtureConstructorTest.FailureInConstructor +(expecting 2 failures) +gtest_output_test_.cc:#: Failure +Failed +Expected failure #1, in the test fixture c'tor. +gtest_output_test_.cc:#: Failure +Failed +Expected failure #2, in the test fixture d'tor. +[ FAILED ] FatalFailureInFixtureConstructorTest.FailureInConstructor +[----------] 1 test from NonFatalFailureInSetUpTest +[ RUN ] NonFatalFailureInSetUpTest.FailureInSetUp +(expecting 4 failures) +gtest_output_test_.cc:#: Failure +Failed +Expected failure #1, in SetUp(). +gtest_output_test_.cc:#: Failure +Failed +Expected failure #2, in the test function. +gtest_output_test_.cc:#: Failure +Failed +Expected failure #3, in TearDown(). +gtest_output_test_.cc:#: Failure +Failed +Expected failure #4, in the test fixture d'tor. +[ FAILED ] NonFatalFailureInSetUpTest.FailureInSetUp +[----------] 1 test from FatalFailureInSetUpTest +[ RUN ] FatalFailureInSetUpTest.FailureInSetUp +(expecting 3 failures) +gtest_output_test_.cc:#: Failure +Failed +Expected failure #1, in SetUp(). +gtest_output_test_.cc:#: Failure +Failed +Expected failure #2, in TearDown(). +gtest_output_test_.cc:#: Failure +Failed +Expected failure #3, in the test fixture d'tor. +[ FAILED ] FatalFailureInSetUpTest.FailureInSetUp +[----------] 1 test from AddFailureAtTest +[ RUN ] AddFailureAtTest.MessageContainsSpecifiedFileAndLineNumber +foo.cc:42: Failure +Failed +Expected failure in foo.cc +[ FAILED ] AddFailureAtTest.MessageContainsSpecifiedFileAndLineNumber +[----------] 4 tests from MixedUpTestCaseTest +[ RUN ] MixedUpTestCaseTest.FirstTestFromNamespaceFoo +[ OK ] MixedUpTestCaseTest.FirstTestFromNamespaceFoo +[ RUN ] MixedUpTestCaseTest.SecondTestFromNamespaceFoo +[ OK ] MixedUpTestCaseTest.SecondTestFromNamespaceFoo +[ RUN ] MixedUpTestCaseTest.ThisShouldFail +gtest.cc:#: Failure +Failed +All tests in the same test case must use the same test fixture +class. However, in test case MixedUpTestCaseTest, +you defined test FirstTestFromNamespaceFoo and test ThisShouldFail +using two different test fixture classes. This can happen if +the two classes are from different namespaces or translation +units and have the same name. You should probably rename one +of the classes to put the tests into different test cases. +[ FAILED ] MixedUpTestCaseTest.ThisShouldFail +[ RUN ] MixedUpTestCaseTest.ThisShouldFailToo +gtest.cc:#: Failure +Failed +All tests in the same test case must use the same test fixture +class. However, in test case MixedUpTestCaseTest, +you defined test FirstTestFromNamespaceFoo and test ThisShouldFailToo +using two different test fixture classes. This can happen if +the two classes are from different namespaces or translation +units and have the same name. You should probably rename one +of the classes to put the tests into different test cases. +[ FAILED ] MixedUpTestCaseTest.ThisShouldFailToo +[----------] 2 tests from MixedUpTestCaseWithSameTestNameTest +[ RUN ] MixedUpTestCaseWithSameTestNameTest.TheSecondTestWithThisNameShouldFail +[ OK ] MixedUpTestCaseWithSameTestNameTest.TheSecondTestWithThisNameShouldFail +[ RUN ] MixedUpTestCaseWithSameTestNameTest.TheSecondTestWithThisNameShouldFail +gtest.cc:#: Failure +Failed +All tests in the same test case must use the same test fixture +class. However, in test case MixedUpTestCaseWithSameTestNameTest, +you defined test TheSecondTestWithThisNameShouldFail and test TheSecondTestWithThisNameShouldFail +using two different test fixture classes. This can happen if +the two classes are from different namespaces or translation +units and have the same name. You should probably rename one +of the classes to put the tests into different test cases. +[ FAILED ] MixedUpTestCaseWithSameTestNameTest.TheSecondTestWithThisNameShouldFail +[----------] 2 tests from TEST_F_before_TEST_in_same_test_case +[ RUN ] TEST_F_before_TEST_in_same_test_case.DefinedUsingTEST_F +[ OK ] TEST_F_before_TEST_in_same_test_case.DefinedUsingTEST_F +[ RUN ] TEST_F_before_TEST_in_same_test_case.DefinedUsingTESTAndShouldFail +gtest.cc:#: Failure +Failed +All tests in the same test case must use the same test fixture +class, so mixing TEST_F and TEST in the same test case is +illegal. In test case TEST_F_before_TEST_in_same_test_case, +test DefinedUsingTEST_F is defined using TEST_F but +test DefinedUsingTESTAndShouldFail is defined using TEST. You probably +want to change the TEST to TEST_F or move it to another test +case. +[ FAILED ] TEST_F_before_TEST_in_same_test_case.DefinedUsingTESTAndShouldFail +[----------] 2 tests from TEST_before_TEST_F_in_same_test_case +[ RUN ] TEST_before_TEST_F_in_same_test_case.DefinedUsingTEST +[ OK ] TEST_before_TEST_F_in_same_test_case.DefinedUsingTEST +[ RUN ] TEST_before_TEST_F_in_same_test_case.DefinedUsingTEST_FAndShouldFail +gtest.cc:#: Failure +Failed +All tests in the same test case must use the same test fixture +class, so mixing TEST_F and TEST in the same test case is +illegal. In test case TEST_before_TEST_F_in_same_test_case, +test DefinedUsingTEST_FAndShouldFail is defined using TEST_F but +test DefinedUsingTEST is defined using TEST. You probably +want to change the TEST to TEST_F or move it to another test +case. +[ FAILED ] TEST_before_TEST_F_in_same_test_case.DefinedUsingTEST_FAndShouldFail +[----------] 8 tests from ExpectNonfatalFailureTest +[ RUN ] ExpectNonfatalFailureTest.CanReferenceGlobalVariables +[ OK ] ExpectNonfatalFailureTest.CanReferenceGlobalVariables +[ RUN ] ExpectNonfatalFailureTest.CanReferenceLocalVariables +[ OK ] ExpectNonfatalFailureTest.CanReferenceLocalVariables +[ RUN ] ExpectNonfatalFailureTest.SucceedsWhenThereIsOneNonfatalFailure +[ OK ] ExpectNonfatalFailureTest.SucceedsWhenThereIsOneNonfatalFailure +[ RUN ] ExpectNonfatalFailureTest.FailsWhenThereIsNoNonfatalFailure +(expecting a failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure + Actual: 0 failures +[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereIsNoNonfatalFailure +[ RUN ] ExpectNonfatalFailureTest.FailsWhenThereAreTwoNonfatalFailures +(expecting a failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure + Actual: 2 failures +gtest_output_test_.cc:#: Non-fatal failure: +Failed +Expected non-fatal failure 1. + +gtest_output_test_.cc:#: Non-fatal failure: +Failed +Expected non-fatal failure 2. + +[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereAreTwoNonfatalFailures +[ RUN ] ExpectNonfatalFailureTest.FailsWhenThereIsOneFatalFailure +(expecting a failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure + Actual: +gtest_output_test_.cc:#: Fatal failure: +Failed +Expected fatal failure. + +[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereIsOneFatalFailure +[ RUN ] ExpectNonfatalFailureTest.FailsWhenStatementReturns +(expecting a failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure + Actual: 0 failures +[ FAILED ] ExpectNonfatalFailureTest.FailsWhenStatementReturns +[ RUN ] ExpectNonfatalFailureTest.FailsWhenStatementThrows +(expecting a failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure + Actual: 0 failures +[ FAILED ] ExpectNonfatalFailureTest.FailsWhenStatementThrows +[----------] 8 tests from ExpectFatalFailureTest +[ RUN ] ExpectFatalFailureTest.CanReferenceGlobalVariables +[ OK ] ExpectFatalFailureTest.CanReferenceGlobalVariables +[ RUN ] ExpectFatalFailureTest.CanReferenceLocalStaticVariables +[ OK ] ExpectFatalFailureTest.CanReferenceLocalStaticVariables +[ RUN ] ExpectFatalFailureTest.SucceedsWhenThereIsOneFatalFailure +[ OK ] ExpectFatalFailureTest.SucceedsWhenThereIsOneFatalFailure +[ RUN ] ExpectFatalFailureTest.FailsWhenThereIsNoFatalFailure +(expecting a failure) +gtest.cc:#: Failure +Expected: 1 fatal failure + Actual: 0 failures +[ FAILED ] ExpectFatalFailureTest.FailsWhenThereIsNoFatalFailure +[ RUN ] ExpectFatalFailureTest.FailsWhenThereAreTwoFatalFailures +(expecting a failure) +gtest.cc:#: Failure +Expected: 1 fatal failure + Actual: 2 failures +gtest_output_test_.cc:#: Fatal failure: +Failed +Expected fatal failure. + +gtest_output_test_.cc:#: Fatal failure: +Failed +Expected fatal failure. + +[ FAILED ] ExpectFatalFailureTest.FailsWhenThereAreTwoFatalFailures +[ RUN ] ExpectFatalFailureTest.FailsWhenThereIsOneNonfatalFailure +(expecting a failure) +gtest.cc:#: Failure +Expected: 1 fatal failure + Actual: +gtest_output_test_.cc:#: Non-fatal failure: +Failed +Expected non-fatal failure. + +[ FAILED ] ExpectFatalFailureTest.FailsWhenThereIsOneNonfatalFailure +[ RUN ] ExpectFatalFailureTest.FailsWhenStatementReturns +(expecting a failure) +gtest.cc:#: Failure +Expected: 1 fatal failure + Actual: 0 failures +[ FAILED ] ExpectFatalFailureTest.FailsWhenStatementReturns +[ RUN ] ExpectFatalFailureTest.FailsWhenStatementThrows +(expecting a failure) +gtest.cc:#: Failure +Expected: 1 fatal failure + Actual: 0 failures +[ FAILED ] ExpectFatalFailureTest.FailsWhenStatementThrows +[----------] 2 tests from TypedTest/0, where TypeParam = int +[ RUN ] TypedTest/0.Success +[ OK ] TypedTest/0.Success +[ RUN ] TypedTest/0.Failure +gtest_output_test_.cc:#: Failure +Value of: TypeParam() + Actual: 0 +Expected: 1 +Expected failure +[ FAILED ] TypedTest/0.Failure, where TypeParam = int +[----------] 2 tests from Unsigned/TypedTestP/0, where TypeParam = unsigned char +[ RUN ] Unsigned/TypedTestP/0.Success +[ OK ] Unsigned/TypedTestP/0.Success +[ RUN ] Unsigned/TypedTestP/0.Failure +gtest_output_test_.cc:#: Failure +Value of: TypeParam() + Actual: '\0' +Expected: 1U +Which is: 1 +Expected failure +[ FAILED ] Unsigned/TypedTestP/0.Failure, where TypeParam = unsigned char +[----------] 2 tests from Unsigned/TypedTestP/1, where TypeParam = unsigned int +[ RUN ] Unsigned/TypedTestP/1.Success +[ OK ] Unsigned/TypedTestP/1.Success +[ RUN ] Unsigned/TypedTestP/1.Failure +gtest_output_test_.cc:#: Failure +Value of: TypeParam() + Actual: 0 +Expected: 1U +Which is: 1 +Expected failure +[ FAILED ] Unsigned/TypedTestP/1.Failure, where TypeParam = unsigned int +[----------] 4 tests from ExpectFailureTest +[ RUN ] ExpectFailureTest.ExpectFatalFailure +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 fatal failure + Actual: +gtest_output_test_.cc:#: Success: +Succeeded + +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 fatal failure + Actual: +gtest_output_test_.cc:#: Non-fatal failure: +Failed +Expected non-fatal failure. + +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 fatal failure containing "Some other fatal failure expected." + Actual: +gtest_output_test_.cc:#: Fatal failure: +Failed +Expected fatal failure. + +[ FAILED ] ExpectFailureTest.ExpectFatalFailure +[ RUN ] ExpectFailureTest.ExpectNonFatalFailure +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure + Actual: +gtest_output_test_.cc:#: Success: +Succeeded + +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure + Actual: +gtest_output_test_.cc:#: Fatal failure: +Failed +Expected fatal failure. + +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure containing "Some other non-fatal failure." + Actual: +gtest_output_test_.cc:#: Non-fatal failure: +Failed +Expected non-fatal failure. + +[ FAILED ] ExpectFailureTest.ExpectNonFatalFailure +[ RUN ] ExpectFailureTest.ExpectFatalFailureOnAllThreads +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 fatal failure + Actual: +gtest_output_test_.cc:#: Success: +Succeeded + +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 fatal failure + Actual: +gtest_output_test_.cc:#: Non-fatal failure: +Failed +Expected non-fatal failure. + +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 fatal failure containing "Some other fatal failure expected." + Actual: +gtest_output_test_.cc:#: Fatal failure: +Failed +Expected fatal failure. + +[ FAILED ] ExpectFailureTest.ExpectFatalFailureOnAllThreads +[ RUN ] ExpectFailureTest.ExpectNonFatalFailureOnAllThreads +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure + Actual: +gtest_output_test_.cc:#: Success: +Succeeded + +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure + Actual: +gtest_output_test_.cc:#: Fatal failure: +Failed +Expected fatal failure. + +(expecting 1 failure) +gtest.cc:#: Failure +Expected: 1 non-fatal failure containing "Some other non-fatal failure." + Actual: +gtest_output_test_.cc:#: Non-fatal failure: +Failed +Expected non-fatal failure. + +[ FAILED ] ExpectFailureTest.ExpectNonFatalFailureOnAllThreads +[----------] 2 tests from ExpectFailureWithThreadsTest +[ RUN ] ExpectFailureWithThreadsTest.ExpectFatalFailure +(expecting 2 failures) +gtest_output_test_.cc:#: Failure +Failed +Expected fatal failure. +gtest.cc:#: Failure +Expected: 1 fatal failure + Actual: 0 failures +[ FAILED ] ExpectFailureWithThreadsTest.ExpectFatalFailure +[ RUN ] ExpectFailureWithThreadsTest.ExpectNonFatalFailure +(expecting 2 failures) +gtest_output_test_.cc:#: Failure +Failed +Expected non-fatal failure. +gtest.cc:#: Failure +Expected: 1 non-fatal failure + Actual: 0 failures +[ FAILED ] ExpectFailureWithThreadsTest.ExpectNonFatalFailure +[----------] 1 test from ScopedFakeTestPartResultReporterTest +[ RUN ] ScopedFakeTestPartResultReporterTest.InterceptOnlyCurrentThread +(expecting 2 failures) +gtest_output_test_.cc:#: Failure +Failed +Expected fatal failure. +gtest_output_test_.cc:#: Failure +Failed +Expected non-fatal failure. +[ FAILED ] ScopedFakeTestPartResultReporterTest.InterceptOnlyCurrentThread +[----------] 1 test from PrintingFailingParams/FailingParamTest +[ RUN ] PrintingFailingParams/FailingParamTest.Fails/0 +gtest_output_test_.cc:#: Failure +Value of: GetParam() + Actual: 2 +Expected: 1 +[ FAILED ] PrintingFailingParams/FailingParamTest.Fails/0, where GetParam() = 2 +[----------] Global test environment tear-down +BarEnvironment::TearDown() called. +gtest_output_test_.cc:#: Failure +Failed +Expected non-fatal failure. +FooEnvironment::TearDown() called. +gtest_output_test_.cc:#: Failure +Failed +Expected fatal failure. +[==========] 63 tests from 28 test cases ran. +[ PASSED ] 21 tests. +[ FAILED ] 42 tests, listed below: +[ FAILED ] NonfatalFailureTest.EscapesStringOperands +[ FAILED ] FatalFailureTest.FatalFailureInSubroutine +[ FAILED ] FatalFailureTest.FatalFailureInNestedSubroutine +[ FAILED ] FatalFailureTest.NonfatalFailureInSubroutine +[ FAILED ] LoggingTest.InterleavingLoggingAndAssertions +[ FAILED ] SCOPED_TRACETest.ObeysScopes +[ FAILED ] SCOPED_TRACETest.WorksInLoop +[ FAILED ] SCOPED_TRACETest.WorksInSubroutine +[ FAILED ] SCOPED_TRACETest.CanBeNested +[ FAILED ] SCOPED_TRACETest.CanBeRepeated +[ FAILED ] SCOPED_TRACETest.WorksConcurrently +[ FAILED ] NonFatalFailureInFixtureConstructorTest.FailureInConstructor +[ FAILED ] FatalFailureInFixtureConstructorTest.FailureInConstructor +[ FAILED ] NonFatalFailureInSetUpTest.FailureInSetUp +[ FAILED ] FatalFailureInSetUpTest.FailureInSetUp +[ FAILED ] AddFailureAtTest.MessageContainsSpecifiedFileAndLineNumber +[ FAILED ] MixedUpTestCaseTest.ThisShouldFail +[ FAILED ] MixedUpTestCaseTest.ThisShouldFailToo +[ FAILED ] MixedUpTestCaseWithSameTestNameTest.TheSecondTestWithThisNameShouldFail +[ FAILED ] TEST_F_before_TEST_in_same_test_case.DefinedUsingTESTAndShouldFail +[ FAILED ] TEST_before_TEST_F_in_same_test_case.DefinedUsingTEST_FAndShouldFail +[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereIsNoNonfatalFailure +[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereAreTwoNonfatalFailures +[ FAILED ] ExpectNonfatalFailureTest.FailsWhenThereIsOneFatalFailure +[ FAILED ] ExpectNonfatalFailureTest.FailsWhenStatementReturns +[ FAILED ] ExpectNonfatalFailureTest.FailsWhenStatementThrows +[ FAILED ] ExpectFatalFailureTest.FailsWhenThereIsNoFatalFailure +[ FAILED ] ExpectFatalFailureTest.FailsWhenThereAreTwoFatalFailures +[ FAILED ] ExpectFatalFailureTest.FailsWhenThereIsOneNonfatalFailure +[ FAILED ] ExpectFatalFailureTest.FailsWhenStatementReturns +[ FAILED ] ExpectFatalFailureTest.FailsWhenStatementThrows +[ FAILED ] TypedTest/0.Failure, where TypeParam = int +[ FAILED ] Unsigned/TypedTestP/0.Failure, where TypeParam = unsigned char +[ FAILED ] Unsigned/TypedTestP/1.Failure, where TypeParam = unsigned int +[ FAILED ] ExpectFailureTest.ExpectFatalFailure +[ FAILED ] ExpectFailureTest.ExpectNonFatalFailure +[ FAILED ] ExpectFailureTest.ExpectFatalFailureOnAllThreads +[ FAILED ] ExpectFailureTest.ExpectNonFatalFailureOnAllThreads +[ FAILED ] ExpectFailureWithThreadsTest.ExpectFatalFailure +[ FAILED ] ExpectFailureWithThreadsTest.ExpectNonFatalFailure +[ FAILED ] ScopedFakeTestPartResultReporterTest.InterceptOnlyCurrentThread +[ FAILED ] PrintingFailingParams/FailingParamTest.Fails/0, where GetParam() = 2 + +42 FAILED TESTS + YOU HAVE 1 DISABLED TEST + +Note: Google Test filter = FatalFailureTest.*:LoggingTest.* +[==========] Running 4 tests from 2 test cases. +[----------] Global test environment set-up. +[----------] 3 tests from FatalFailureTest +[ RUN ] FatalFailureTest.FatalFailureInSubroutine +(expecting a failure that x should be 1) +gtest_output_test_.cc:#: Failure +Value of: x + Actual: 2 +Expected: 1 +[ FAILED ] FatalFailureTest.FatalFailureInSubroutine (? ms) +[ RUN ] FatalFailureTest.FatalFailureInNestedSubroutine +(expecting a failure that x should be 1) +gtest_output_test_.cc:#: Failure +Value of: x + Actual: 2 +Expected: 1 +[ FAILED ] FatalFailureTest.FatalFailureInNestedSubroutine (? ms) +[ RUN ] FatalFailureTest.NonfatalFailureInSubroutine +(expecting a failure on false) +gtest_output_test_.cc:#: Failure +Value of: false + Actual: false +Expected: true +[ FAILED ] FatalFailureTest.NonfatalFailureInSubroutine (? ms) +[----------] 3 tests from FatalFailureTest (? ms total) + +[----------] 1 test from LoggingTest +[ RUN ] LoggingTest.InterleavingLoggingAndAssertions +(expecting 2 failures on (3) >= (a[i])) +i == 0 +i == 1 +gtest_output_test_.cc:#: Failure +Expected: (3) >= (a[i]), actual: 3 vs 9 +i == 2 +i == 3 +gtest_output_test_.cc:#: Failure +Expected: (3) >= (a[i]), actual: 3 vs 6 +[ FAILED ] LoggingTest.InterleavingLoggingAndAssertions (? ms) +[----------] 1 test from LoggingTest (? ms total) + +[----------] Global test environment tear-down +[==========] 4 tests from 2 test cases ran. (? ms total) +[ PASSED ] 0 tests. +[ FAILED ] 4 tests, listed below: +[ FAILED ] FatalFailureTest.FatalFailureInSubroutine +[ FAILED ] FatalFailureTest.FatalFailureInNestedSubroutine +[ FAILED ] FatalFailureTest.NonfatalFailureInSubroutine +[ FAILED ] LoggingTest.InterleavingLoggingAndAssertions + + 4 FAILED TESTS +Note: Google Test filter = *DISABLED_* +[==========] Running 1 test from 1 test case. +[----------] Global test environment set-up. +[----------] 1 test from DisabledTestsWarningTest +[ RUN ] DisabledTestsWarningTest.DISABLED_AlsoRunDisabledTestsFlagSuppressesWarning +[ OK ] DisabledTestsWarningTest.DISABLED_AlsoRunDisabledTestsFlagSuppressesWarning +[----------] Global test environment tear-down +[==========] 1 test from 1 test case ran. +[ PASSED ] 1 test. +Note: Google Test filter = PassingTest.* +Note: This is test shard 2 of 2. +[==========] Running 1 test from 1 test case. +[----------] Global test environment set-up. +[----------] 1 test from PassingTest +[ RUN ] PassingTest.PassingTest2 +[ OK ] PassingTest.PassingTest2 +[----------] Global test environment tear-down +[==========] 1 test from 1 test case ran. +[ PASSED ] 1 test. diff --git a/external/gtest/test/gtest_pred_impl_unittest.cc b/external/gtest/test/gtest_pred_impl_unittest.cc new file mode 100755 index 0000000000..a84eff860a --- /dev/null +++ b/external/gtest/test/gtest_pred_impl_unittest.cc @@ -0,0 +1,2427 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is AUTOMATICALLY GENERATED on 10/31/2011 by command +// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! + +// Regression test for gtest_pred_impl.h +// +// This file is generated by a script and quite long. If you intend to +// learn how Google Test works by reading its unit tests, read +// gtest_unittest.cc instead. +// +// This is intended as a regression test for the Google Test predicate +// assertions. We compile it as part of the gtest_unittest target +// only to keep the implementation tidy and compact, as it is quite +// involved to set up the stage for testing Google Test using Google +// Test itself. +// +// Currently, gtest_unittest takes ~11 seconds to run in the testing +// daemon. In the future, if it grows too large and needs much more +// time to finish, we should consider separating this file into a +// stand-alone regression test. + +#include + +#include "gtest/gtest.h" +#include "gtest/gtest-spi.h" + +// A user-defined data type. +struct Bool { + explicit Bool(int val) : value(val != 0) {} + + bool operator>(int n) const { return value > Bool(n).value; } + + Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); } + + bool operator==(const Bool& rhs) const { return value == rhs.value; } + + bool value; +}; + +// Enables Bool to be used in assertions. +std::ostream& operator<<(std::ostream& os, const Bool& x) { + return os << (x.value ? "true" : "false"); +} + +// Sample functions/functors for testing unary predicate assertions. + +// A unary predicate function. +template +bool PredFunction1(T1 v1) { + return v1 > 0; +} + +// The following two functions are needed to circumvent a bug in +// gcc 2.95.3, which sometimes has problem with the above template +// function. +bool PredFunction1Int(int v1) { + return v1 > 0; +} +bool PredFunction1Bool(Bool v1) { + return v1 > 0; +} + +// A unary predicate functor. +struct PredFunctor1 { + template + bool operator()(const T1& v1) { + return v1 > 0; + } +}; + +// A unary predicate-formatter function. +template +testing::AssertionResult PredFormatFunction1(const char* e1, + const T1& v1) { + if (PredFunction1(v1)) + return testing::AssertionSuccess(); + + return testing::AssertionFailure() + << e1 + << " is expected to be positive, but evaluates to " + << v1 << "."; +} + +// A unary predicate-formatter functor. +struct PredFormatFunctor1 { + template + testing::AssertionResult operator()(const char* e1, + const T1& v1) const { + return PredFormatFunction1(e1, v1); + } +}; + +// Tests for {EXPECT|ASSERT}_PRED_FORMAT1. + +class Predicate1Test : public testing::Test { + protected: + virtual void SetUp() { + expected_to_finish_ = true; + finished_ = false; + n1_ = 0; + } + + virtual void TearDown() { + // Verifies that each of the predicate's arguments was evaluated + // exactly once. + EXPECT_EQ(1, n1_) << + "The predicate assertion didn't evaluate argument 2 " + "exactly once."; + + // Verifies that the control flow in the test function is expected. + if (expected_to_finish_ && !finished_) { + FAIL() << "The predicate assertion unexpactedly aborted the test."; + } else if (!expected_to_finish_ && finished_) { + FAIL() << "The failed predicate assertion didn't abort the test " + "as expected."; + } + } + + // true iff the test function is expected to run to finish. + static bool expected_to_finish_; + + // true iff the test function did run to finish. + static bool finished_; + + static int n1_; +}; + +bool Predicate1Test::expected_to_finish_; +bool Predicate1Test::finished_; +int Predicate1Test::n1_; + +typedef Predicate1Test EXPECT_PRED_FORMAT1Test; +typedef Predicate1Test ASSERT_PRED_FORMAT1Test; +typedef Predicate1Test EXPECT_PRED1Test; +typedef Predicate1Test ASSERT_PRED1Test; + +// Tests a successful EXPECT_PRED1 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED1Test, FunctionOnBuiltInTypeSuccess) { + EXPECT_PRED1(PredFunction1Int, + ++n1_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED1 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED1Test, FunctionOnUserTypeSuccess) { + EXPECT_PRED1(PredFunction1Bool, + Bool(++n1_)); + finished_ = true; +} + +// Tests a successful EXPECT_PRED1 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED1Test, FunctorOnBuiltInTypeSuccess) { + EXPECT_PRED1(PredFunctor1(), + ++n1_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED1 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED1Test, FunctorOnUserTypeSuccess) { + EXPECT_PRED1(PredFunctor1(), + Bool(++n1_)); + finished_ = true; +} + +// Tests a failed EXPECT_PRED1 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED1Test, FunctionOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED1(PredFunction1Int, + n1_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED1 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED1Test, FunctionOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED1(PredFunction1Bool, + Bool(n1_++)); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED1 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED1Test, FunctorOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED1(PredFunctor1(), + n1_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED1 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED1Test, FunctorOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED1(PredFunctor1(), + Bool(n1_++)); + finished_ = true; + }, ""); +} + +// Tests a successful ASSERT_PRED1 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED1Test, FunctionOnBuiltInTypeSuccess) { + ASSERT_PRED1(PredFunction1Int, + ++n1_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED1 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED1Test, FunctionOnUserTypeSuccess) { + ASSERT_PRED1(PredFunction1Bool, + Bool(++n1_)); + finished_ = true; +} + +// Tests a successful ASSERT_PRED1 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED1Test, FunctorOnBuiltInTypeSuccess) { + ASSERT_PRED1(PredFunctor1(), + ++n1_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED1 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED1Test, FunctorOnUserTypeSuccess) { + ASSERT_PRED1(PredFunctor1(), + Bool(++n1_)); + finished_ = true; +} + +// Tests a failed ASSERT_PRED1 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED1Test, FunctionOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED1(PredFunction1Int, + n1_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED1 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED1Test, FunctionOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED1(PredFunction1Bool, + Bool(n1_++)); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED1 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED1Test, FunctorOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED1(PredFunctor1(), + n1_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED1 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED1Test, FunctorOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED1(PredFunctor1(), + Bool(n1_++)); + finished_ = true; + }, ""); +} + +// Tests a successful EXPECT_PRED_FORMAT1 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnBuiltInTypeSuccess) { + EXPECT_PRED_FORMAT1(PredFormatFunction1, + ++n1_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT1 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnUserTypeSuccess) { + EXPECT_PRED_FORMAT1(PredFormatFunction1, + Bool(++n1_)); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT1 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnBuiltInTypeSuccess) { + EXPECT_PRED_FORMAT1(PredFormatFunctor1(), + ++n1_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT1 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnUserTypeSuccess) { + EXPECT_PRED_FORMAT1(PredFormatFunctor1(), + Bool(++n1_)); + finished_ = true; +} + +// Tests a failed EXPECT_PRED_FORMAT1 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT1(PredFormatFunction1, + n1_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT1 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT1Test, FunctionOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT1(PredFormatFunction1, + Bool(n1_++)); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT1 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT1(PredFormatFunctor1(), + n1_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT1 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT1Test, FunctorOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT1(PredFormatFunctor1(), + Bool(n1_++)); + finished_ = true; + }, ""); +} + +// Tests a successful ASSERT_PRED_FORMAT1 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnBuiltInTypeSuccess) { + ASSERT_PRED_FORMAT1(PredFormatFunction1, + ++n1_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT1 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnUserTypeSuccess) { + ASSERT_PRED_FORMAT1(PredFormatFunction1, + Bool(++n1_)); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT1 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnBuiltInTypeSuccess) { + ASSERT_PRED_FORMAT1(PredFormatFunctor1(), + ++n1_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT1 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnUserTypeSuccess) { + ASSERT_PRED_FORMAT1(PredFormatFunctor1(), + Bool(++n1_)); + finished_ = true; +} + +// Tests a failed ASSERT_PRED_FORMAT1 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT1(PredFormatFunction1, + n1_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT1 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT1Test, FunctionOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT1(PredFormatFunction1, + Bool(n1_++)); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT1 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT1(PredFormatFunctor1(), + n1_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT1 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT1Test, FunctorOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT1(PredFormatFunctor1(), + Bool(n1_++)); + finished_ = true; + }, ""); +} +// Sample functions/functors for testing binary predicate assertions. + +// A binary predicate function. +template +bool PredFunction2(T1 v1, T2 v2) { + return v1 + v2 > 0; +} + +// The following two functions are needed to circumvent a bug in +// gcc 2.95.3, which sometimes has problem with the above template +// function. +bool PredFunction2Int(int v1, int v2) { + return v1 + v2 > 0; +} +bool PredFunction2Bool(Bool v1, Bool v2) { + return v1 + v2 > 0; +} + +// A binary predicate functor. +struct PredFunctor2 { + template + bool operator()(const T1& v1, + const T2& v2) { + return v1 + v2 > 0; + } +}; + +// A binary predicate-formatter function. +template +testing::AssertionResult PredFormatFunction2(const char* e1, + const char* e2, + const T1& v1, + const T2& v2) { + if (PredFunction2(v1, v2)) + return testing::AssertionSuccess(); + + return testing::AssertionFailure() + << e1 << " + " << e2 + << " is expected to be positive, but evaluates to " + << v1 + v2 << "."; +} + +// A binary predicate-formatter functor. +struct PredFormatFunctor2 { + template + testing::AssertionResult operator()(const char* e1, + const char* e2, + const T1& v1, + const T2& v2) const { + return PredFormatFunction2(e1, e2, v1, v2); + } +}; + +// Tests for {EXPECT|ASSERT}_PRED_FORMAT2. + +class Predicate2Test : public testing::Test { + protected: + virtual void SetUp() { + expected_to_finish_ = true; + finished_ = false; + n1_ = n2_ = 0; + } + + virtual void TearDown() { + // Verifies that each of the predicate's arguments was evaluated + // exactly once. + EXPECT_EQ(1, n1_) << + "The predicate assertion didn't evaluate argument 2 " + "exactly once."; + EXPECT_EQ(1, n2_) << + "The predicate assertion didn't evaluate argument 3 " + "exactly once."; + + // Verifies that the control flow in the test function is expected. + if (expected_to_finish_ && !finished_) { + FAIL() << "The predicate assertion unexpactedly aborted the test."; + } else if (!expected_to_finish_ && finished_) { + FAIL() << "The failed predicate assertion didn't abort the test " + "as expected."; + } + } + + // true iff the test function is expected to run to finish. + static bool expected_to_finish_; + + // true iff the test function did run to finish. + static bool finished_; + + static int n1_; + static int n2_; +}; + +bool Predicate2Test::expected_to_finish_; +bool Predicate2Test::finished_; +int Predicate2Test::n1_; +int Predicate2Test::n2_; + +typedef Predicate2Test EXPECT_PRED_FORMAT2Test; +typedef Predicate2Test ASSERT_PRED_FORMAT2Test; +typedef Predicate2Test EXPECT_PRED2Test; +typedef Predicate2Test ASSERT_PRED2Test; + +// Tests a successful EXPECT_PRED2 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED2Test, FunctionOnBuiltInTypeSuccess) { + EXPECT_PRED2(PredFunction2Int, + ++n1_, + ++n2_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED2 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED2Test, FunctionOnUserTypeSuccess) { + EXPECT_PRED2(PredFunction2Bool, + Bool(++n1_), + Bool(++n2_)); + finished_ = true; +} + +// Tests a successful EXPECT_PRED2 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED2Test, FunctorOnBuiltInTypeSuccess) { + EXPECT_PRED2(PredFunctor2(), + ++n1_, + ++n2_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED2 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED2Test, FunctorOnUserTypeSuccess) { + EXPECT_PRED2(PredFunctor2(), + Bool(++n1_), + Bool(++n2_)); + finished_ = true; +} + +// Tests a failed EXPECT_PRED2 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED2Test, FunctionOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED2(PredFunction2Int, + n1_++, + n2_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED2 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED2Test, FunctionOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED2(PredFunction2Bool, + Bool(n1_++), + Bool(n2_++)); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED2 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED2Test, FunctorOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED2(PredFunctor2(), + n1_++, + n2_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED2 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED2Test, FunctorOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED2(PredFunctor2(), + Bool(n1_++), + Bool(n2_++)); + finished_ = true; + }, ""); +} + +// Tests a successful ASSERT_PRED2 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED2Test, FunctionOnBuiltInTypeSuccess) { + ASSERT_PRED2(PredFunction2Int, + ++n1_, + ++n2_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED2 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED2Test, FunctionOnUserTypeSuccess) { + ASSERT_PRED2(PredFunction2Bool, + Bool(++n1_), + Bool(++n2_)); + finished_ = true; +} + +// Tests a successful ASSERT_PRED2 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED2Test, FunctorOnBuiltInTypeSuccess) { + ASSERT_PRED2(PredFunctor2(), + ++n1_, + ++n2_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED2 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED2Test, FunctorOnUserTypeSuccess) { + ASSERT_PRED2(PredFunctor2(), + Bool(++n1_), + Bool(++n2_)); + finished_ = true; +} + +// Tests a failed ASSERT_PRED2 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED2Test, FunctionOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED2(PredFunction2Int, + n1_++, + n2_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED2 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED2Test, FunctionOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED2(PredFunction2Bool, + Bool(n1_++), + Bool(n2_++)); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED2 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED2Test, FunctorOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED2(PredFunctor2(), + n1_++, + n2_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED2 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED2Test, FunctorOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED2(PredFunctor2(), + Bool(n1_++), + Bool(n2_++)); + finished_ = true; + }, ""); +} + +// Tests a successful EXPECT_PRED_FORMAT2 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnBuiltInTypeSuccess) { + EXPECT_PRED_FORMAT2(PredFormatFunction2, + ++n1_, + ++n2_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT2 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnUserTypeSuccess) { + EXPECT_PRED_FORMAT2(PredFormatFunction2, + Bool(++n1_), + Bool(++n2_)); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT2 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnBuiltInTypeSuccess) { + EXPECT_PRED_FORMAT2(PredFormatFunctor2(), + ++n1_, + ++n2_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT2 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnUserTypeSuccess) { + EXPECT_PRED_FORMAT2(PredFormatFunctor2(), + Bool(++n1_), + Bool(++n2_)); + finished_ = true; +} + +// Tests a failed EXPECT_PRED_FORMAT2 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT2(PredFormatFunction2, + n1_++, + n2_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT2 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT2Test, FunctionOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT2(PredFormatFunction2, + Bool(n1_++), + Bool(n2_++)); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT2 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT2(PredFormatFunctor2(), + n1_++, + n2_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT2 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT2Test, FunctorOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT2(PredFormatFunctor2(), + Bool(n1_++), + Bool(n2_++)); + finished_ = true; + }, ""); +} + +// Tests a successful ASSERT_PRED_FORMAT2 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnBuiltInTypeSuccess) { + ASSERT_PRED_FORMAT2(PredFormatFunction2, + ++n1_, + ++n2_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT2 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnUserTypeSuccess) { + ASSERT_PRED_FORMAT2(PredFormatFunction2, + Bool(++n1_), + Bool(++n2_)); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT2 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnBuiltInTypeSuccess) { + ASSERT_PRED_FORMAT2(PredFormatFunctor2(), + ++n1_, + ++n2_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT2 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnUserTypeSuccess) { + ASSERT_PRED_FORMAT2(PredFormatFunctor2(), + Bool(++n1_), + Bool(++n2_)); + finished_ = true; +} + +// Tests a failed ASSERT_PRED_FORMAT2 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT2(PredFormatFunction2, + n1_++, + n2_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT2 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT2Test, FunctionOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT2(PredFormatFunction2, + Bool(n1_++), + Bool(n2_++)); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT2 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT2(PredFormatFunctor2(), + n1_++, + n2_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT2 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT2Test, FunctorOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT2(PredFormatFunctor2(), + Bool(n1_++), + Bool(n2_++)); + finished_ = true; + }, ""); +} +// Sample functions/functors for testing ternary predicate assertions. + +// A ternary predicate function. +template +bool PredFunction3(T1 v1, T2 v2, T3 v3) { + return v1 + v2 + v3 > 0; +} + +// The following two functions are needed to circumvent a bug in +// gcc 2.95.3, which sometimes has problem with the above template +// function. +bool PredFunction3Int(int v1, int v2, int v3) { + return v1 + v2 + v3 > 0; +} +bool PredFunction3Bool(Bool v1, Bool v2, Bool v3) { + return v1 + v2 + v3 > 0; +} + +// A ternary predicate functor. +struct PredFunctor3 { + template + bool operator()(const T1& v1, + const T2& v2, + const T3& v3) { + return v1 + v2 + v3 > 0; + } +}; + +// A ternary predicate-formatter function. +template +testing::AssertionResult PredFormatFunction3(const char* e1, + const char* e2, + const char* e3, + const T1& v1, + const T2& v2, + const T3& v3) { + if (PredFunction3(v1, v2, v3)) + return testing::AssertionSuccess(); + + return testing::AssertionFailure() + << e1 << " + " << e2 << " + " << e3 + << " is expected to be positive, but evaluates to " + << v1 + v2 + v3 << "."; +} + +// A ternary predicate-formatter functor. +struct PredFormatFunctor3 { + template + testing::AssertionResult operator()(const char* e1, + const char* e2, + const char* e3, + const T1& v1, + const T2& v2, + const T3& v3) const { + return PredFormatFunction3(e1, e2, e3, v1, v2, v3); + } +}; + +// Tests for {EXPECT|ASSERT}_PRED_FORMAT3. + +class Predicate3Test : public testing::Test { + protected: + virtual void SetUp() { + expected_to_finish_ = true; + finished_ = false; + n1_ = n2_ = n3_ = 0; + } + + virtual void TearDown() { + // Verifies that each of the predicate's arguments was evaluated + // exactly once. + EXPECT_EQ(1, n1_) << + "The predicate assertion didn't evaluate argument 2 " + "exactly once."; + EXPECT_EQ(1, n2_) << + "The predicate assertion didn't evaluate argument 3 " + "exactly once."; + EXPECT_EQ(1, n3_) << + "The predicate assertion didn't evaluate argument 4 " + "exactly once."; + + // Verifies that the control flow in the test function is expected. + if (expected_to_finish_ && !finished_) { + FAIL() << "The predicate assertion unexpactedly aborted the test."; + } else if (!expected_to_finish_ && finished_) { + FAIL() << "The failed predicate assertion didn't abort the test " + "as expected."; + } + } + + // true iff the test function is expected to run to finish. + static bool expected_to_finish_; + + // true iff the test function did run to finish. + static bool finished_; + + static int n1_; + static int n2_; + static int n3_; +}; + +bool Predicate3Test::expected_to_finish_; +bool Predicate3Test::finished_; +int Predicate3Test::n1_; +int Predicate3Test::n2_; +int Predicate3Test::n3_; + +typedef Predicate3Test EXPECT_PRED_FORMAT3Test; +typedef Predicate3Test ASSERT_PRED_FORMAT3Test; +typedef Predicate3Test EXPECT_PRED3Test; +typedef Predicate3Test ASSERT_PRED3Test; + +// Tests a successful EXPECT_PRED3 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED3Test, FunctionOnBuiltInTypeSuccess) { + EXPECT_PRED3(PredFunction3Int, + ++n1_, + ++n2_, + ++n3_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED3 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED3Test, FunctionOnUserTypeSuccess) { + EXPECT_PRED3(PredFunction3Bool, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_)); + finished_ = true; +} + +// Tests a successful EXPECT_PRED3 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED3Test, FunctorOnBuiltInTypeSuccess) { + EXPECT_PRED3(PredFunctor3(), + ++n1_, + ++n2_, + ++n3_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED3 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED3Test, FunctorOnUserTypeSuccess) { + EXPECT_PRED3(PredFunctor3(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_)); + finished_ = true; +} + +// Tests a failed EXPECT_PRED3 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED3Test, FunctionOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED3(PredFunction3Int, + n1_++, + n2_++, + n3_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED3 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED3Test, FunctionOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED3(PredFunction3Bool, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED3 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED3Test, FunctorOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED3(PredFunctor3(), + n1_++, + n2_++, + n3_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED3 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED3Test, FunctorOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED3(PredFunctor3(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, ""); +} + +// Tests a successful ASSERT_PRED3 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED3Test, FunctionOnBuiltInTypeSuccess) { + ASSERT_PRED3(PredFunction3Int, + ++n1_, + ++n2_, + ++n3_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED3 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED3Test, FunctionOnUserTypeSuccess) { + ASSERT_PRED3(PredFunction3Bool, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_)); + finished_ = true; +} + +// Tests a successful ASSERT_PRED3 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED3Test, FunctorOnBuiltInTypeSuccess) { + ASSERT_PRED3(PredFunctor3(), + ++n1_, + ++n2_, + ++n3_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED3 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED3Test, FunctorOnUserTypeSuccess) { + ASSERT_PRED3(PredFunctor3(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_)); + finished_ = true; +} + +// Tests a failed ASSERT_PRED3 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED3Test, FunctionOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED3(PredFunction3Int, + n1_++, + n2_++, + n3_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED3 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED3Test, FunctionOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED3(PredFunction3Bool, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED3 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED3Test, FunctorOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED3(PredFunctor3(), + n1_++, + n2_++, + n3_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED3 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED3Test, FunctorOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED3(PredFunctor3(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, ""); +} + +// Tests a successful EXPECT_PRED_FORMAT3 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnBuiltInTypeSuccess) { + EXPECT_PRED_FORMAT3(PredFormatFunction3, + ++n1_, + ++n2_, + ++n3_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT3 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnUserTypeSuccess) { + EXPECT_PRED_FORMAT3(PredFormatFunction3, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_)); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT3 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnBuiltInTypeSuccess) { + EXPECT_PRED_FORMAT3(PredFormatFunctor3(), + ++n1_, + ++n2_, + ++n3_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT3 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnUserTypeSuccess) { + EXPECT_PRED_FORMAT3(PredFormatFunctor3(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_)); + finished_ = true; +} + +// Tests a failed EXPECT_PRED_FORMAT3 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT3(PredFormatFunction3, + n1_++, + n2_++, + n3_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT3 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT3Test, FunctionOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT3(PredFormatFunction3, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT3 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT3(PredFormatFunctor3(), + n1_++, + n2_++, + n3_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT3 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT3Test, FunctorOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT3(PredFormatFunctor3(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, ""); +} + +// Tests a successful ASSERT_PRED_FORMAT3 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnBuiltInTypeSuccess) { + ASSERT_PRED_FORMAT3(PredFormatFunction3, + ++n1_, + ++n2_, + ++n3_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT3 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnUserTypeSuccess) { + ASSERT_PRED_FORMAT3(PredFormatFunction3, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_)); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT3 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnBuiltInTypeSuccess) { + ASSERT_PRED_FORMAT3(PredFormatFunctor3(), + ++n1_, + ++n2_, + ++n3_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT3 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnUserTypeSuccess) { + ASSERT_PRED_FORMAT3(PredFormatFunctor3(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_)); + finished_ = true; +} + +// Tests a failed ASSERT_PRED_FORMAT3 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT3(PredFormatFunction3, + n1_++, + n2_++, + n3_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT3 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT3Test, FunctionOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT3(PredFormatFunction3, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT3 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT3(PredFormatFunctor3(), + n1_++, + n2_++, + n3_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT3 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT3Test, FunctorOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT3(PredFormatFunctor3(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++)); + finished_ = true; + }, ""); +} +// Sample functions/functors for testing 4-ary predicate assertions. + +// A 4-ary predicate function. +template +bool PredFunction4(T1 v1, T2 v2, T3 v3, T4 v4) { + return v1 + v2 + v3 + v4 > 0; +} + +// The following two functions are needed to circumvent a bug in +// gcc 2.95.3, which sometimes has problem with the above template +// function. +bool PredFunction4Int(int v1, int v2, int v3, int v4) { + return v1 + v2 + v3 + v4 > 0; +} +bool PredFunction4Bool(Bool v1, Bool v2, Bool v3, Bool v4) { + return v1 + v2 + v3 + v4 > 0; +} + +// A 4-ary predicate functor. +struct PredFunctor4 { + template + bool operator()(const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4) { + return v1 + v2 + v3 + v4 > 0; + } +}; + +// A 4-ary predicate-formatter function. +template +testing::AssertionResult PredFormatFunction4(const char* e1, + const char* e2, + const char* e3, + const char* e4, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4) { + if (PredFunction4(v1, v2, v3, v4)) + return testing::AssertionSuccess(); + + return testing::AssertionFailure() + << e1 << " + " << e2 << " + " << e3 << " + " << e4 + << " is expected to be positive, but evaluates to " + << v1 + v2 + v3 + v4 << "."; +} + +// A 4-ary predicate-formatter functor. +struct PredFormatFunctor4 { + template + testing::AssertionResult operator()(const char* e1, + const char* e2, + const char* e3, + const char* e4, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4) const { + return PredFormatFunction4(e1, e2, e3, e4, v1, v2, v3, v4); + } +}; + +// Tests for {EXPECT|ASSERT}_PRED_FORMAT4. + +class Predicate4Test : public testing::Test { + protected: + virtual void SetUp() { + expected_to_finish_ = true; + finished_ = false; + n1_ = n2_ = n3_ = n4_ = 0; + } + + virtual void TearDown() { + // Verifies that each of the predicate's arguments was evaluated + // exactly once. + EXPECT_EQ(1, n1_) << + "The predicate assertion didn't evaluate argument 2 " + "exactly once."; + EXPECT_EQ(1, n2_) << + "The predicate assertion didn't evaluate argument 3 " + "exactly once."; + EXPECT_EQ(1, n3_) << + "The predicate assertion didn't evaluate argument 4 " + "exactly once."; + EXPECT_EQ(1, n4_) << + "The predicate assertion didn't evaluate argument 5 " + "exactly once."; + + // Verifies that the control flow in the test function is expected. + if (expected_to_finish_ && !finished_) { + FAIL() << "The predicate assertion unexpactedly aborted the test."; + } else if (!expected_to_finish_ && finished_) { + FAIL() << "The failed predicate assertion didn't abort the test " + "as expected."; + } + } + + // true iff the test function is expected to run to finish. + static bool expected_to_finish_; + + // true iff the test function did run to finish. + static bool finished_; + + static int n1_; + static int n2_; + static int n3_; + static int n4_; +}; + +bool Predicate4Test::expected_to_finish_; +bool Predicate4Test::finished_; +int Predicate4Test::n1_; +int Predicate4Test::n2_; +int Predicate4Test::n3_; +int Predicate4Test::n4_; + +typedef Predicate4Test EXPECT_PRED_FORMAT4Test; +typedef Predicate4Test ASSERT_PRED_FORMAT4Test; +typedef Predicate4Test EXPECT_PRED4Test; +typedef Predicate4Test ASSERT_PRED4Test; + +// Tests a successful EXPECT_PRED4 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED4Test, FunctionOnBuiltInTypeSuccess) { + EXPECT_PRED4(PredFunction4Int, + ++n1_, + ++n2_, + ++n3_, + ++n4_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED4 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED4Test, FunctionOnUserTypeSuccess) { + EXPECT_PRED4(PredFunction4Bool, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_)); + finished_ = true; +} + +// Tests a successful EXPECT_PRED4 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED4Test, FunctorOnBuiltInTypeSuccess) { + EXPECT_PRED4(PredFunctor4(), + ++n1_, + ++n2_, + ++n3_, + ++n4_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED4 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED4Test, FunctorOnUserTypeSuccess) { + EXPECT_PRED4(PredFunctor4(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_)); + finished_ = true; +} + +// Tests a failed EXPECT_PRED4 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED4Test, FunctionOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED4(PredFunction4Int, + n1_++, + n2_++, + n3_++, + n4_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED4 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED4Test, FunctionOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED4(PredFunction4Bool, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED4 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED4Test, FunctorOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED4(PredFunctor4(), + n1_++, + n2_++, + n3_++, + n4_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED4 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED4Test, FunctorOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED4(PredFunctor4(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, ""); +} + +// Tests a successful ASSERT_PRED4 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED4Test, FunctionOnBuiltInTypeSuccess) { + ASSERT_PRED4(PredFunction4Int, + ++n1_, + ++n2_, + ++n3_, + ++n4_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED4 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED4Test, FunctionOnUserTypeSuccess) { + ASSERT_PRED4(PredFunction4Bool, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_)); + finished_ = true; +} + +// Tests a successful ASSERT_PRED4 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED4Test, FunctorOnBuiltInTypeSuccess) { + ASSERT_PRED4(PredFunctor4(), + ++n1_, + ++n2_, + ++n3_, + ++n4_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED4 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED4Test, FunctorOnUserTypeSuccess) { + ASSERT_PRED4(PredFunctor4(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_)); + finished_ = true; +} + +// Tests a failed ASSERT_PRED4 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED4Test, FunctionOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED4(PredFunction4Int, + n1_++, + n2_++, + n3_++, + n4_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED4 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED4Test, FunctionOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED4(PredFunction4Bool, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED4 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED4Test, FunctorOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED4(PredFunctor4(), + n1_++, + n2_++, + n3_++, + n4_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED4 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED4Test, FunctorOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED4(PredFunctor4(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, ""); +} + +// Tests a successful EXPECT_PRED_FORMAT4 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnBuiltInTypeSuccess) { + EXPECT_PRED_FORMAT4(PredFormatFunction4, + ++n1_, + ++n2_, + ++n3_, + ++n4_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT4 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnUserTypeSuccess) { + EXPECT_PRED_FORMAT4(PredFormatFunction4, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_)); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT4 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnBuiltInTypeSuccess) { + EXPECT_PRED_FORMAT4(PredFormatFunctor4(), + ++n1_, + ++n2_, + ++n3_, + ++n4_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT4 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnUserTypeSuccess) { + EXPECT_PRED_FORMAT4(PredFormatFunctor4(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_)); + finished_ = true; +} + +// Tests a failed EXPECT_PRED_FORMAT4 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT4(PredFormatFunction4, + n1_++, + n2_++, + n3_++, + n4_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT4 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT4Test, FunctionOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT4(PredFormatFunction4, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT4 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT4(PredFormatFunctor4(), + n1_++, + n2_++, + n3_++, + n4_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT4 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT4Test, FunctorOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT4(PredFormatFunctor4(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, ""); +} + +// Tests a successful ASSERT_PRED_FORMAT4 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnBuiltInTypeSuccess) { + ASSERT_PRED_FORMAT4(PredFormatFunction4, + ++n1_, + ++n2_, + ++n3_, + ++n4_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT4 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnUserTypeSuccess) { + ASSERT_PRED_FORMAT4(PredFormatFunction4, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_)); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT4 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnBuiltInTypeSuccess) { + ASSERT_PRED_FORMAT4(PredFormatFunctor4(), + ++n1_, + ++n2_, + ++n3_, + ++n4_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT4 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnUserTypeSuccess) { + ASSERT_PRED_FORMAT4(PredFormatFunctor4(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_)); + finished_ = true; +} + +// Tests a failed ASSERT_PRED_FORMAT4 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT4(PredFormatFunction4, + n1_++, + n2_++, + n3_++, + n4_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT4 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT4Test, FunctionOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT4(PredFormatFunction4, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT4 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT4(PredFormatFunctor4(), + n1_++, + n2_++, + n3_++, + n4_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT4 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT4Test, FunctorOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT4(PredFormatFunctor4(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++)); + finished_ = true; + }, ""); +} +// Sample functions/functors for testing 5-ary predicate assertions. + +// A 5-ary predicate function. +template +bool PredFunction5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) { + return v1 + v2 + v3 + v4 + v5 > 0; +} + +// The following two functions are needed to circumvent a bug in +// gcc 2.95.3, which sometimes has problem with the above template +// function. +bool PredFunction5Int(int v1, int v2, int v3, int v4, int v5) { + return v1 + v2 + v3 + v4 + v5 > 0; +} +bool PredFunction5Bool(Bool v1, Bool v2, Bool v3, Bool v4, Bool v5) { + return v1 + v2 + v3 + v4 + v5 > 0; +} + +// A 5-ary predicate functor. +struct PredFunctor5 { + template + bool operator()(const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4, + const T5& v5) { + return v1 + v2 + v3 + v4 + v5 > 0; + } +}; + +// A 5-ary predicate-formatter function. +template +testing::AssertionResult PredFormatFunction5(const char* e1, + const char* e2, + const char* e3, + const char* e4, + const char* e5, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4, + const T5& v5) { + if (PredFunction5(v1, v2, v3, v4, v5)) + return testing::AssertionSuccess(); + + return testing::AssertionFailure() + << e1 << " + " << e2 << " + " << e3 << " + " << e4 << " + " << e5 + << " is expected to be positive, but evaluates to " + << v1 + v2 + v3 + v4 + v5 << "."; +} + +// A 5-ary predicate-formatter functor. +struct PredFormatFunctor5 { + template + testing::AssertionResult operator()(const char* e1, + const char* e2, + const char* e3, + const char* e4, + const char* e5, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4, + const T5& v5) const { + return PredFormatFunction5(e1, e2, e3, e4, e5, v1, v2, v3, v4, v5); + } +}; + +// Tests for {EXPECT|ASSERT}_PRED_FORMAT5. + +class Predicate5Test : public testing::Test { + protected: + virtual void SetUp() { + expected_to_finish_ = true; + finished_ = false; + n1_ = n2_ = n3_ = n4_ = n5_ = 0; + } + + virtual void TearDown() { + // Verifies that each of the predicate's arguments was evaluated + // exactly once. + EXPECT_EQ(1, n1_) << + "The predicate assertion didn't evaluate argument 2 " + "exactly once."; + EXPECT_EQ(1, n2_) << + "The predicate assertion didn't evaluate argument 3 " + "exactly once."; + EXPECT_EQ(1, n3_) << + "The predicate assertion didn't evaluate argument 4 " + "exactly once."; + EXPECT_EQ(1, n4_) << + "The predicate assertion didn't evaluate argument 5 " + "exactly once."; + EXPECT_EQ(1, n5_) << + "The predicate assertion didn't evaluate argument 6 " + "exactly once."; + + // Verifies that the control flow in the test function is expected. + if (expected_to_finish_ && !finished_) { + FAIL() << "The predicate assertion unexpactedly aborted the test."; + } else if (!expected_to_finish_ && finished_) { + FAIL() << "The failed predicate assertion didn't abort the test " + "as expected."; + } + } + + // true iff the test function is expected to run to finish. + static bool expected_to_finish_; + + // true iff the test function did run to finish. + static bool finished_; + + static int n1_; + static int n2_; + static int n3_; + static int n4_; + static int n5_; +}; + +bool Predicate5Test::expected_to_finish_; +bool Predicate5Test::finished_; +int Predicate5Test::n1_; +int Predicate5Test::n2_; +int Predicate5Test::n3_; +int Predicate5Test::n4_; +int Predicate5Test::n5_; + +typedef Predicate5Test EXPECT_PRED_FORMAT5Test; +typedef Predicate5Test ASSERT_PRED_FORMAT5Test; +typedef Predicate5Test EXPECT_PRED5Test; +typedef Predicate5Test ASSERT_PRED5Test; + +// Tests a successful EXPECT_PRED5 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED5Test, FunctionOnBuiltInTypeSuccess) { + EXPECT_PRED5(PredFunction5Int, + ++n1_, + ++n2_, + ++n3_, + ++n4_, + ++n5_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED5 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED5Test, FunctionOnUserTypeSuccess) { + EXPECT_PRED5(PredFunction5Bool, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_), + Bool(++n5_)); + finished_ = true; +} + +// Tests a successful EXPECT_PRED5 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED5Test, FunctorOnBuiltInTypeSuccess) { + EXPECT_PRED5(PredFunctor5(), + ++n1_, + ++n2_, + ++n3_, + ++n4_, + ++n5_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED5 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED5Test, FunctorOnUserTypeSuccess) { + EXPECT_PRED5(PredFunctor5(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_), + Bool(++n5_)); + finished_ = true; +} + +// Tests a failed EXPECT_PRED5 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED5Test, FunctionOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED5(PredFunction5Int, + n1_++, + n2_++, + n3_++, + n4_++, + n5_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED5 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED5Test, FunctionOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED5(PredFunction5Bool, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++), + Bool(n5_++)); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED5 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED5Test, FunctorOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED5(PredFunctor5(), + n1_++, + n2_++, + n3_++, + n4_++, + n5_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED5 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED5Test, FunctorOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED5(PredFunctor5(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++), + Bool(n5_++)); + finished_ = true; + }, ""); +} + +// Tests a successful ASSERT_PRED5 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED5Test, FunctionOnBuiltInTypeSuccess) { + ASSERT_PRED5(PredFunction5Int, + ++n1_, + ++n2_, + ++n3_, + ++n4_, + ++n5_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED5 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED5Test, FunctionOnUserTypeSuccess) { + ASSERT_PRED5(PredFunction5Bool, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_), + Bool(++n5_)); + finished_ = true; +} + +// Tests a successful ASSERT_PRED5 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED5Test, FunctorOnBuiltInTypeSuccess) { + ASSERT_PRED5(PredFunctor5(), + ++n1_, + ++n2_, + ++n3_, + ++n4_, + ++n5_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED5 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED5Test, FunctorOnUserTypeSuccess) { + ASSERT_PRED5(PredFunctor5(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_), + Bool(++n5_)); + finished_ = true; +} + +// Tests a failed ASSERT_PRED5 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED5Test, FunctionOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED5(PredFunction5Int, + n1_++, + n2_++, + n3_++, + n4_++, + n5_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED5 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED5Test, FunctionOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED5(PredFunction5Bool, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++), + Bool(n5_++)); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED5 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED5Test, FunctorOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED5(PredFunctor5(), + n1_++, + n2_++, + n3_++, + n4_++, + n5_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED5 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED5Test, FunctorOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED5(PredFunctor5(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++), + Bool(n5_++)); + finished_ = true; + }, ""); +} + +// Tests a successful EXPECT_PRED_FORMAT5 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnBuiltInTypeSuccess) { + EXPECT_PRED_FORMAT5(PredFormatFunction5, + ++n1_, + ++n2_, + ++n3_, + ++n4_, + ++n5_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT5 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnUserTypeSuccess) { + EXPECT_PRED_FORMAT5(PredFormatFunction5, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_), + Bool(++n5_)); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT5 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnBuiltInTypeSuccess) { + EXPECT_PRED_FORMAT5(PredFormatFunctor5(), + ++n1_, + ++n2_, + ++n3_, + ++n4_, + ++n5_); + finished_ = true; +} + +// Tests a successful EXPECT_PRED_FORMAT5 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnUserTypeSuccess) { + EXPECT_PRED_FORMAT5(PredFormatFunctor5(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_), + Bool(++n5_)); + finished_ = true; +} + +// Tests a failed EXPECT_PRED_FORMAT5 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT5(PredFormatFunction5, + n1_++, + n2_++, + n3_++, + n4_++, + n5_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT5 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT5Test, FunctionOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT5(PredFormatFunction5, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++), + Bool(n5_++)); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT5 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnBuiltInTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT5(PredFormatFunctor5(), + n1_++, + n2_++, + n3_++, + n4_++, + n5_++); + finished_ = true; + }, ""); +} + +// Tests a failed EXPECT_PRED_FORMAT5 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(EXPECT_PRED_FORMAT5Test, FunctorOnUserTypeFailure) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT5(PredFormatFunctor5(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++), + Bool(n5_++)); + finished_ = true; + }, ""); +} + +// Tests a successful ASSERT_PRED_FORMAT5 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnBuiltInTypeSuccess) { + ASSERT_PRED_FORMAT5(PredFormatFunction5, + ++n1_, + ++n2_, + ++n3_, + ++n4_, + ++n5_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT5 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnUserTypeSuccess) { + ASSERT_PRED_FORMAT5(PredFormatFunction5, + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_), + Bool(++n5_)); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT5 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnBuiltInTypeSuccess) { + ASSERT_PRED_FORMAT5(PredFormatFunctor5(), + ++n1_, + ++n2_, + ++n3_, + ++n4_, + ++n5_); + finished_ = true; +} + +// Tests a successful ASSERT_PRED_FORMAT5 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnUserTypeSuccess) { + ASSERT_PRED_FORMAT5(PredFormatFunctor5(), + Bool(++n1_), + Bool(++n2_), + Bool(++n3_), + Bool(++n4_), + Bool(++n5_)); + finished_ = true; +} + +// Tests a failed ASSERT_PRED_FORMAT5 where the +// predicate-formatter is a function on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT5(PredFormatFunction5, + n1_++, + n2_++, + n3_++, + n4_++, + n5_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT5 where the +// predicate-formatter is a function on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT5Test, FunctionOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT5(PredFormatFunction5, + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++), + Bool(n5_++)); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT5 where the +// predicate-formatter is a functor on a built-in type (int). +TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnBuiltInTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT5(PredFormatFunctor5(), + n1_++, + n2_++, + n3_++, + n4_++, + n5_++); + finished_ = true; + }, ""); +} + +// Tests a failed ASSERT_PRED_FORMAT5 where the +// predicate-formatter is a functor on a user-defined type (Bool). +TEST_F(ASSERT_PRED_FORMAT5Test, FunctorOnUserTypeFailure) { + expected_to_finish_ = false; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT5(PredFormatFunctor5(), + Bool(n1_++), + Bool(n2_++), + Bool(n3_++), + Bool(n4_++), + Bool(n5_++)); + finished_ = true; + }, ""); +} diff --git a/external/gtest/test/gtest_premature_exit_test.cc b/external/gtest/test/gtest_premature_exit_test.cc new file mode 100755 index 0000000000..f6b6be9aeb --- /dev/null +++ b/external/gtest/test/gtest_premature_exit_test.cc @@ -0,0 +1,141 @@ +// Copyright 2013, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Tests that Google Test manipulates the premature-exit-detection +// file correctly. + +#include + +#include "gtest/gtest.h" + +using ::testing::InitGoogleTest; +using ::testing::Test; +using ::testing::internal::posix::GetEnv; +using ::testing::internal::posix::Stat; +using ::testing::internal::posix::StatStruct; + +namespace { + +// Is the TEST_PREMATURE_EXIT_FILE environment variable expected to be +// set? +const bool kTestPrematureExitFileEnvVarShouldBeSet = false; + +class PrematureExitTest : public Test { + public: + // Returns true iff the given file exists. + static bool FileExists(const char* filepath) { + StatStruct stat; + return Stat(filepath, &stat) == 0; + } + + protected: + PrematureExitTest() { + premature_exit_file_path_ = GetEnv("TEST_PREMATURE_EXIT_FILE"); + + // Normalize NULL to "" for ease of handling. + if (premature_exit_file_path_ == NULL) { + premature_exit_file_path_ = ""; + } + } + + // Returns true iff the premature-exit file exists. + bool PrematureExitFileExists() const { + return FileExists(premature_exit_file_path_); + } + + const char* premature_exit_file_path_; +}; + +typedef PrematureExitTest PrematureExitDeathTest; + +// Tests that: +// - the premature-exit file exists during the execution of a +// death test (EXPECT_DEATH*), and +// - a death test doesn't interfere with the main test process's +// handling of the premature-exit file. +TEST_F(PrematureExitDeathTest, FileExistsDuringExecutionOfDeathTest) { + if (*premature_exit_file_path_ == '\0') { + return; + } + + EXPECT_DEATH_IF_SUPPORTED({ + // If the file exists, crash the process such that the main test + // process will catch the (expected) crash and report a success; + // otherwise don't crash, which will cause the main test process + // to report that the death test has failed. + if (PrematureExitFileExists()) { + exit(1); + } + }, ""); +} + +// Tests that TEST_PREMATURE_EXIT_FILE is set where it's expected to +// be set. +TEST_F(PrematureExitTest, TestPrematureExitFileEnvVarIsSet) { + if (kTestPrematureExitFileEnvVarShouldBeSet) { + const char* const filepath = GetEnv("TEST_PREMATURE_EXIT_FILE"); + ASSERT_TRUE(filepath != NULL); + ASSERT_NE(*filepath, '\0'); + } +} + +// Tests that the premature-exit file exists during the execution of a +// normal (non-death) test. +TEST_F(PrematureExitTest, PrematureExitFileExistsDuringTestExecution) { + if (*premature_exit_file_path_ == '\0') { + return; + } + + EXPECT_TRUE(PrematureExitFileExists()) + << " file " << premature_exit_file_path_ + << " should exist during test execution, but doesn't."; +} + +} // namespace + +int main(int argc, char **argv) { + InitGoogleTest(&argc, argv); + const int exit_code = RUN_ALL_TESTS(); + + // Test that the premature-exit file is deleted upon return from + // RUN_ALL_TESTS(). + const char* const filepath = GetEnv("TEST_PREMATURE_EXIT_FILE"); + if (filepath != NULL && *filepath != '\0') { + if (PrematureExitTest::FileExists(filepath)) { + printf( + "File %s shouldn't exist after the test program finishes, but does.", + filepath); + return 1; + } + } + + return exit_code; +} diff --git a/external/gtest/test/gtest_prod_test.cc b/external/gtest/test/gtest_prod_test.cc new file mode 100755 index 0000000000..060abce187 --- /dev/null +++ b/external/gtest/test/gtest_prod_test.cc @@ -0,0 +1,57 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Unit test for include/gtest/gtest_prod.h. + +#include "gtest/gtest.h" +#include "test/production.h" + +// Tests that private members can be accessed from a TEST declared as +// a friend of the class. +TEST(PrivateCodeTest, CanAccessPrivateMembers) { + PrivateCode a; + EXPECT_EQ(0, a.x_); + + a.set_x(1); + EXPECT_EQ(1, a.x_); +} + +typedef testing::Test PrivateCodeFixtureTest; + +// Tests that private members can be accessed from a TEST_F declared +// as a friend of the class. +TEST_F(PrivateCodeFixtureTest, CanAccessPrivateMembers) { + PrivateCode a; + EXPECT_EQ(0, a.x_); + + a.set_x(2); + EXPECT_EQ(2, a.x_); +} diff --git a/external/gtest/test/gtest_repeat_test.cc b/external/gtest/test/gtest_repeat_test.cc new file mode 100755 index 0000000000..481012adc2 --- /dev/null +++ b/external/gtest/test/gtest_repeat_test.cc @@ -0,0 +1,253 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Tests the --gtest_repeat=number flag. + +#include +#include +#include "gtest/gtest.h" + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#include "src/gtest-internal-inl.h" +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +GTEST_DECLARE_string_(death_test_style); +GTEST_DECLARE_string_(filter); +GTEST_DECLARE_int32_(repeat); + +} // namespace testing + +using testing::GTEST_FLAG(death_test_style); +using testing::GTEST_FLAG(filter); +using testing::GTEST_FLAG(repeat); + +namespace { + +// We need this when we are testing Google Test itself and therefore +// cannot use Google Test assertions. +#define GTEST_CHECK_INT_EQ_(expected, actual) \ + do {\ + const int expected_val = (expected);\ + const int actual_val = (actual);\ + if (::testing::internal::IsTrue(expected_val != actual_val)) {\ + ::std::cout << "Value of: " #actual "\n"\ + << " Actual: " << actual_val << "\n"\ + << "Expected: " #expected "\n"\ + << "Which is: " << expected_val << "\n";\ + ::testing::internal::posix::Abort();\ + }\ + } while (::testing::internal::AlwaysFalse()) + + +// Used for verifying that global environment set-up and tear-down are +// inside the gtest_repeat loop. + +int g_environment_set_up_count = 0; +int g_environment_tear_down_count = 0; + +class MyEnvironment : public testing::Environment { + public: + MyEnvironment() {} + virtual void SetUp() { g_environment_set_up_count++; } + virtual void TearDown() { g_environment_tear_down_count++; } +}; + +// A test that should fail. + +int g_should_fail_count = 0; + +TEST(FooTest, ShouldFail) { + g_should_fail_count++; + EXPECT_EQ(0, 1) << "Expected failure."; +} + +// A test that should pass. + +int g_should_pass_count = 0; + +TEST(FooTest, ShouldPass) { + g_should_pass_count++; +} + +// A test that contains a thread-safe death test and a fast death +// test. It should pass. + +int g_death_test_count = 0; + +TEST(BarDeathTest, ThreadSafeAndFast) { + g_death_test_count++; + + GTEST_FLAG(death_test_style) = "threadsafe"; + EXPECT_DEATH_IF_SUPPORTED(::testing::internal::posix::Abort(), ""); + + GTEST_FLAG(death_test_style) = "fast"; + EXPECT_DEATH_IF_SUPPORTED(::testing::internal::posix::Abort(), ""); +} + +#if GTEST_HAS_PARAM_TEST +int g_param_test_count = 0; + +const int kNumberOfParamTests = 10; + +class MyParamTest : public testing::TestWithParam {}; + +TEST_P(MyParamTest, ShouldPass) { + // TODO(vladl@google.com): Make parameter value checking robust + // WRT order of tests. + GTEST_CHECK_INT_EQ_(g_param_test_count % kNumberOfParamTests, GetParam()); + g_param_test_count++; +} +INSTANTIATE_TEST_CASE_P(MyParamSequence, + MyParamTest, + testing::Range(0, kNumberOfParamTests)); +#endif // GTEST_HAS_PARAM_TEST + +// Resets the count for each test. +void ResetCounts() { + g_environment_set_up_count = 0; + g_environment_tear_down_count = 0; + g_should_fail_count = 0; + g_should_pass_count = 0; + g_death_test_count = 0; +#if GTEST_HAS_PARAM_TEST + g_param_test_count = 0; +#endif // GTEST_HAS_PARAM_TEST +} + +// Checks that the count for each test is expected. +void CheckCounts(int expected) { + GTEST_CHECK_INT_EQ_(expected, g_environment_set_up_count); + GTEST_CHECK_INT_EQ_(expected, g_environment_tear_down_count); + GTEST_CHECK_INT_EQ_(expected, g_should_fail_count); + GTEST_CHECK_INT_EQ_(expected, g_should_pass_count); + GTEST_CHECK_INT_EQ_(expected, g_death_test_count); +#if GTEST_HAS_PARAM_TEST + GTEST_CHECK_INT_EQ_(expected * kNumberOfParamTests, g_param_test_count); +#endif // GTEST_HAS_PARAM_TEST +} + +// Tests the behavior of Google Test when --gtest_repeat is not specified. +void TestRepeatUnspecified() { + ResetCounts(); + GTEST_CHECK_INT_EQ_(1, RUN_ALL_TESTS()); + CheckCounts(1); +} + +// Tests the behavior of Google Test when --gtest_repeat has the given value. +void TestRepeat(int repeat) { + GTEST_FLAG(repeat) = repeat; + + ResetCounts(); + GTEST_CHECK_INT_EQ_(repeat > 0 ? 1 : 0, RUN_ALL_TESTS()); + CheckCounts(repeat); +} + +// Tests using --gtest_repeat when --gtest_filter specifies an empty +// set of tests. +void TestRepeatWithEmptyFilter(int repeat) { + GTEST_FLAG(repeat) = repeat; + GTEST_FLAG(filter) = "None"; + + ResetCounts(); + GTEST_CHECK_INT_EQ_(0, RUN_ALL_TESTS()); + CheckCounts(0); +} + +// Tests using --gtest_repeat when --gtest_filter specifies a set of +// successful tests. +void TestRepeatWithFilterForSuccessfulTests(int repeat) { + GTEST_FLAG(repeat) = repeat; + GTEST_FLAG(filter) = "*-*ShouldFail"; + + ResetCounts(); + GTEST_CHECK_INT_EQ_(0, RUN_ALL_TESTS()); + GTEST_CHECK_INT_EQ_(repeat, g_environment_set_up_count); + GTEST_CHECK_INT_EQ_(repeat, g_environment_tear_down_count); + GTEST_CHECK_INT_EQ_(0, g_should_fail_count); + GTEST_CHECK_INT_EQ_(repeat, g_should_pass_count); + GTEST_CHECK_INT_EQ_(repeat, g_death_test_count); +#if GTEST_HAS_PARAM_TEST + GTEST_CHECK_INT_EQ_(repeat * kNumberOfParamTests, g_param_test_count); +#endif // GTEST_HAS_PARAM_TEST +} + +// Tests using --gtest_repeat when --gtest_filter specifies a set of +// failed tests. +void TestRepeatWithFilterForFailedTests(int repeat) { + GTEST_FLAG(repeat) = repeat; + GTEST_FLAG(filter) = "*ShouldFail"; + + ResetCounts(); + GTEST_CHECK_INT_EQ_(1, RUN_ALL_TESTS()); + GTEST_CHECK_INT_EQ_(repeat, g_environment_set_up_count); + GTEST_CHECK_INT_EQ_(repeat, g_environment_tear_down_count); + GTEST_CHECK_INT_EQ_(repeat, g_should_fail_count); + GTEST_CHECK_INT_EQ_(0, g_should_pass_count); + GTEST_CHECK_INT_EQ_(0, g_death_test_count); +#if GTEST_HAS_PARAM_TEST + GTEST_CHECK_INT_EQ_(0, g_param_test_count); +#endif // GTEST_HAS_PARAM_TEST +} + +} // namespace + +int main(int argc, char **argv) { + testing::InitGoogleTest(&argc, argv); + testing::AddGlobalTestEnvironment(new MyEnvironment); + + TestRepeatUnspecified(); + TestRepeat(0); + TestRepeat(1); + TestRepeat(5); + + TestRepeatWithEmptyFilter(2); + TestRepeatWithEmptyFilter(3); + + TestRepeatWithFilterForSuccessfulTests(3); + + TestRepeatWithFilterForFailedTests(4); + + // It would be nice to verify that the tests indeed loop forever + // when GTEST_FLAG(repeat) is negative, but this test will be quite + // complicated to write. Since this flag is for interactive + // debugging only and doesn't affect the normal test result, such a + // test would be an overkill. + + printf("PASS\n"); + return 0; +} diff --git a/external/gtest/test/gtest_shuffle_test.py b/external/gtest/test/gtest_shuffle_test.py new file mode 100755 index 0000000000..30d0303d19 --- /dev/null +++ b/external/gtest/test/gtest_shuffle_test.py @@ -0,0 +1,325 @@ +#!/usr/bin/env python +# +# Copyright 2009 Google Inc. All Rights Reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Verifies that test shuffling works.""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import os +import gtest_test_utils + +# Command to run the gtest_shuffle_test_ program. +COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_shuffle_test_') + +# The environment variables for test sharding. +TOTAL_SHARDS_ENV_VAR = 'GTEST_TOTAL_SHARDS' +SHARD_INDEX_ENV_VAR = 'GTEST_SHARD_INDEX' + +TEST_FILTER = 'A*.A:A*.B:C*' + +ALL_TESTS = [] +ACTIVE_TESTS = [] +FILTERED_TESTS = [] +SHARDED_TESTS = [] + +SHUFFLED_ALL_TESTS = [] +SHUFFLED_ACTIVE_TESTS = [] +SHUFFLED_FILTERED_TESTS = [] +SHUFFLED_SHARDED_TESTS = [] + + +def AlsoRunDisabledTestsFlag(): + return '--gtest_also_run_disabled_tests' + + +def FilterFlag(test_filter): + return '--gtest_filter=%s' % (test_filter,) + + +def RepeatFlag(n): + return '--gtest_repeat=%s' % (n,) + + +def ShuffleFlag(): + return '--gtest_shuffle' + + +def RandomSeedFlag(n): + return '--gtest_random_seed=%s' % (n,) + + +def RunAndReturnOutput(extra_env, args): + """Runs the test program and returns its output.""" + + environ_copy = os.environ.copy() + environ_copy.update(extra_env) + + return gtest_test_utils.Subprocess([COMMAND] + args, env=environ_copy).output + + +def GetTestsForAllIterations(extra_env, args): + """Runs the test program and returns a list of test lists. + + Args: + extra_env: a map from environment variables to their values + args: command line flags to pass to gtest_shuffle_test_ + + Returns: + A list where the i-th element is the list of tests run in the i-th + test iteration. + """ + + test_iterations = [] + for line in RunAndReturnOutput(extra_env, args).split('\n'): + if line.startswith('----'): + tests = [] + test_iterations.append(tests) + elif line.strip(): + tests.append(line.strip()) # 'TestCaseName.TestName' + + return test_iterations + + +def GetTestCases(tests): + """Returns a list of test cases in the given full test names. + + Args: + tests: a list of full test names + + Returns: + A list of test cases from 'tests', in their original order. + Consecutive duplicates are removed. + """ + + test_cases = [] + for test in tests: + test_case = test.split('.')[0] + if not test_case in test_cases: + test_cases.append(test_case) + + return test_cases + + +def CalculateTestLists(): + """Calculates the list of tests run under different flags.""" + + if not ALL_TESTS: + ALL_TESTS.extend( + GetTestsForAllIterations({}, [AlsoRunDisabledTestsFlag()])[0]) + + if not ACTIVE_TESTS: + ACTIVE_TESTS.extend(GetTestsForAllIterations({}, [])[0]) + + if not FILTERED_TESTS: + FILTERED_TESTS.extend( + GetTestsForAllIterations({}, [FilterFlag(TEST_FILTER)])[0]) + + if not SHARDED_TESTS: + SHARDED_TESTS.extend( + GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', + SHARD_INDEX_ENV_VAR: '1'}, + [])[0]) + + if not SHUFFLED_ALL_TESTS: + SHUFFLED_ALL_TESTS.extend(GetTestsForAllIterations( + {}, [AlsoRunDisabledTestsFlag(), ShuffleFlag(), RandomSeedFlag(1)])[0]) + + if not SHUFFLED_ACTIVE_TESTS: + SHUFFLED_ACTIVE_TESTS.extend(GetTestsForAllIterations( + {}, [ShuffleFlag(), RandomSeedFlag(1)])[0]) + + if not SHUFFLED_FILTERED_TESTS: + SHUFFLED_FILTERED_TESTS.extend(GetTestsForAllIterations( + {}, [ShuffleFlag(), RandomSeedFlag(1), FilterFlag(TEST_FILTER)])[0]) + + if not SHUFFLED_SHARDED_TESTS: + SHUFFLED_SHARDED_TESTS.extend( + GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', + SHARD_INDEX_ENV_VAR: '1'}, + [ShuffleFlag(), RandomSeedFlag(1)])[0]) + + +class GTestShuffleUnitTest(gtest_test_utils.TestCase): + """Tests test shuffling.""" + + def setUp(self): + CalculateTestLists() + + def testShufflePreservesNumberOfTests(self): + self.assertEqual(len(ALL_TESTS), len(SHUFFLED_ALL_TESTS)) + self.assertEqual(len(ACTIVE_TESTS), len(SHUFFLED_ACTIVE_TESTS)) + self.assertEqual(len(FILTERED_TESTS), len(SHUFFLED_FILTERED_TESTS)) + self.assertEqual(len(SHARDED_TESTS), len(SHUFFLED_SHARDED_TESTS)) + + def testShuffleChangesTestOrder(self): + self.assert_(SHUFFLED_ALL_TESTS != ALL_TESTS, SHUFFLED_ALL_TESTS) + self.assert_(SHUFFLED_ACTIVE_TESTS != ACTIVE_TESTS, SHUFFLED_ACTIVE_TESTS) + self.assert_(SHUFFLED_FILTERED_TESTS != FILTERED_TESTS, + SHUFFLED_FILTERED_TESTS) + self.assert_(SHUFFLED_SHARDED_TESTS != SHARDED_TESTS, + SHUFFLED_SHARDED_TESTS) + + def testShuffleChangesTestCaseOrder(self): + self.assert_(GetTestCases(SHUFFLED_ALL_TESTS) != GetTestCases(ALL_TESTS), + GetTestCases(SHUFFLED_ALL_TESTS)) + self.assert_( + GetTestCases(SHUFFLED_ACTIVE_TESTS) != GetTestCases(ACTIVE_TESTS), + GetTestCases(SHUFFLED_ACTIVE_TESTS)) + self.assert_( + GetTestCases(SHUFFLED_FILTERED_TESTS) != GetTestCases(FILTERED_TESTS), + GetTestCases(SHUFFLED_FILTERED_TESTS)) + self.assert_( + GetTestCases(SHUFFLED_SHARDED_TESTS) != GetTestCases(SHARDED_TESTS), + GetTestCases(SHUFFLED_SHARDED_TESTS)) + + def testShuffleDoesNotRepeatTest(self): + for test in SHUFFLED_ALL_TESTS: + self.assertEqual(1, SHUFFLED_ALL_TESTS.count(test), + '%s appears more than once' % (test,)) + for test in SHUFFLED_ACTIVE_TESTS: + self.assertEqual(1, SHUFFLED_ACTIVE_TESTS.count(test), + '%s appears more than once' % (test,)) + for test in SHUFFLED_FILTERED_TESTS: + self.assertEqual(1, SHUFFLED_FILTERED_TESTS.count(test), + '%s appears more than once' % (test,)) + for test in SHUFFLED_SHARDED_TESTS: + self.assertEqual(1, SHUFFLED_SHARDED_TESTS.count(test), + '%s appears more than once' % (test,)) + + def testShuffleDoesNotCreateNewTest(self): + for test in SHUFFLED_ALL_TESTS: + self.assert_(test in ALL_TESTS, '%s is an invalid test' % (test,)) + for test in SHUFFLED_ACTIVE_TESTS: + self.assert_(test in ACTIVE_TESTS, '%s is an invalid test' % (test,)) + for test in SHUFFLED_FILTERED_TESTS: + self.assert_(test in FILTERED_TESTS, '%s is an invalid test' % (test,)) + for test in SHUFFLED_SHARDED_TESTS: + self.assert_(test in SHARDED_TESTS, '%s is an invalid test' % (test,)) + + def testShuffleIncludesAllTests(self): + for test in ALL_TESTS: + self.assert_(test in SHUFFLED_ALL_TESTS, '%s is missing' % (test,)) + for test in ACTIVE_TESTS: + self.assert_(test in SHUFFLED_ACTIVE_TESTS, '%s is missing' % (test,)) + for test in FILTERED_TESTS: + self.assert_(test in SHUFFLED_FILTERED_TESTS, '%s is missing' % (test,)) + for test in SHARDED_TESTS: + self.assert_(test in SHUFFLED_SHARDED_TESTS, '%s is missing' % (test,)) + + def testShuffleLeavesDeathTestsAtFront(self): + non_death_test_found = False + for test in SHUFFLED_ACTIVE_TESTS: + if 'DeathTest.' in test: + self.assert_(not non_death_test_found, + '%s appears after a non-death test' % (test,)) + else: + non_death_test_found = True + + def _VerifyTestCasesDoNotInterleave(self, tests): + test_cases = [] + for test in tests: + [test_case, _] = test.split('.') + if test_cases and test_cases[-1] != test_case: + test_cases.append(test_case) + self.assertEqual(1, test_cases.count(test_case), + 'Test case %s is not grouped together in %s' % + (test_case, tests)) + + def testShuffleDoesNotInterleaveTestCases(self): + self._VerifyTestCasesDoNotInterleave(SHUFFLED_ALL_TESTS) + self._VerifyTestCasesDoNotInterleave(SHUFFLED_ACTIVE_TESTS) + self._VerifyTestCasesDoNotInterleave(SHUFFLED_FILTERED_TESTS) + self._VerifyTestCasesDoNotInterleave(SHUFFLED_SHARDED_TESTS) + + def testShuffleRestoresOrderAfterEachIteration(self): + # Get the test lists in all 3 iterations, using random seed 1, 2, + # and 3 respectively. Google Test picks a different seed in each + # iteration, and this test depends on the current implementation + # picking successive numbers. This dependency is not ideal, but + # makes the test much easier to write. + [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( + GetTestsForAllIterations( + {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) + + # Make sure running the tests with random seed 1 gets the same + # order as in iteration 1 above. + [tests_with_seed1] = GetTestsForAllIterations( + {}, [ShuffleFlag(), RandomSeedFlag(1)]) + self.assertEqual(tests_in_iteration1, tests_with_seed1) + + # Make sure running the tests with random seed 2 gets the same + # order as in iteration 2 above. Success means that Google Test + # correctly restores the test order before re-shuffling at the + # beginning of iteration 2. + [tests_with_seed2] = GetTestsForAllIterations( + {}, [ShuffleFlag(), RandomSeedFlag(2)]) + self.assertEqual(tests_in_iteration2, tests_with_seed2) + + # Make sure running the tests with random seed 3 gets the same + # order as in iteration 3 above. Success means that Google Test + # correctly restores the test order before re-shuffling at the + # beginning of iteration 3. + [tests_with_seed3] = GetTestsForAllIterations( + {}, [ShuffleFlag(), RandomSeedFlag(3)]) + self.assertEqual(tests_in_iteration3, tests_with_seed3) + + def testShuffleGeneratesNewOrderInEachIteration(self): + [tests_in_iteration1, tests_in_iteration2, tests_in_iteration3] = ( + GetTestsForAllIterations( + {}, [ShuffleFlag(), RandomSeedFlag(1), RepeatFlag(3)])) + + self.assert_(tests_in_iteration1 != tests_in_iteration2, + tests_in_iteration1) + self.assert_(tests_in_iteration1 != tests_in_iteration3, + tests_in_iteration1) + self.assert_(tests_in_iteration2 != tests_in_iteration3, + tests_in_iteration2) + + def testShuffleShardedTestsPreservesPartition(self): + # If we run M tests on N shards, the same M tests should be run in + # total, regardless of the random seeds used by the shards. + [tests1] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', + SHARD_INDEX_ENV_VAR: '0'}, + [ShuffleFlag(), RandomSeedFlag(1)]) + [tests2] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', + SHARD_INDEX_ENV_VAR: '1'}, + [ShuffleFlag(), RandomSeedFlag(20)]) + [tests3] = GetTestsForAllIterations({TOTAL_SHARDS_ENV_VAR: '3', + SHARD_INDEX_ENV_VAR: '2'}, + [ShuffleFlag(), RandomSeedFlag(25)]) + sorted_sharded_tests = tests1 + tests2 + tests3 + sorted_sharded_tests.sort() + sorted_active_tests = [] + sorted_active_tests.extend(ACTIVE_TESTS) + sorted_active_tests.sort() + self.assertEqual(sorted_active_tests, sorted_sharded_tests) + +if __name__ == '__main__': + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_shuffle_test_.cc b/external/gtest/test/gtest_shuffle_test_.cc new file mode 100755 index 0000000000..6fb441bd4d --- /dev/null +++ b/external/gtest/test/gtest_shuffle_test_.cc @@ -0,0 +1,103 @@ +// Copyright 2009, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Verifies that test shuffling works. + +#include "gtest/gtest.h" + +namespace { + +using ::testing::EmptyTestEventListener; +using ::testing::InitGoogleTest; +using ::testing::Message; +using ::testing::Test; +using ::testing::TestEventListeners; +using ::testing::TestInfo; +using ::testing::UnitTest; +using ::testing::internal::scoped_ptr; + +// The test methods are empty, as the sole purpose of this program is +// to print the test names before/after shuffling. + +class A : public Test {}; +TEST_F(A, A) {} +TEST_F(A, B) {} + +TEST(ADeathTest, A) {} +TEST(ADeathTest, B) {} +TEST(ADeathTest, C) {} + +TEST(B, A) {} +TEST(B, B) {} +TEST(B, C) {} +TEST(B, DISABLED_D) {} +TEST(B, DISABLED_E) {} + +TEST(BDeathTest, A) {} +TEST(BDeathTest, B) {} + +TEST(C, A) {} +TEST(C, B) {} +TEST(C, C) {} +TEST(C, DISABLED_D) {} + +TEST(CDeathTest, A) {} + +TEST(DISABLED_D, A) {} +TEST(DISABLED_D, DISABLED_B) {} + +// This printer prints the full test names only, starting each test +// iteration with a "----" marker. +class TestNamePrinter : public EmptyTestEventListener { + public: + virtual void OnTestIterationStart(const UnitTest& /* unit_test */, + int /* iteration */) { + printf("----\n"); + } + + virtual void OnTestStart(const TestInfo& test_info) { + printf("%s.%s\n", test_info.test_case_name(), test_info.name()); + } +}; + +} // namespace + +int main(int argc, char **argv) { + InitGoogleTest(&argc, argv); + + // Replaces the default printer with TestNamePrinter, which prints + // the test name only. + TestEventListeners& listeners = UnitTest::GetInstance()->listeners(); + delete listeners.Release(listeners.default_result_printer()); + listeners.Append(new TestNamePrinter); + + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest_sole_header_test.cc b/external/gtest/test/gtest_sole_header_test.cc new file mode 100755 index 0000000000..ccd091a281 --- /dev/null +++ b/external/gtest/test/gtest_sole_header_test.cc @@ -0,0 +1,57 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// This test verifies that it's possible to use Google Test by including +// the gtest.h header file alone. + +#include "gtest/gtest.h" + +namespace { + +void Subroutine() { + EXPECT_EQ(42, 42); +} + +TEST(NoFatalFailureTest, ExpectNoFatalFailure) { + EXPECT_NO_FATAL_FAILURE(;); + EXPECT_NO_FATAL_FAILURE(SUCCEED()); + EXPECT_NO_FATAL_FAILURE(Subroutine()); + EXPECT_NO_FATAL_FAILURE({ SUCCEED(); }); +} + +TEST(NoFatalFailureTest, AssertNoFatalFailure) { + ASSERT_NO_FATAL_FAILURE(;); + ASSERT_NO_FATAL_FAILURE(SUCCEED()); + ASSERT_NO_FATAL_FAILURE(Subroutine()); + ASSERT_NO_FATAL_FAILURE({ SUCCEED(); }); +} + +} // namespace diff --git a/external/gtest/test/gtest_stress_test.cc b/external/gtest/test/gtest_stress_test.cc new file mode 100755 index 0000000000..e7daa430df --- /dev/null +++ b/external/gtest/test/gtest_stress_test.cc @@ -0,0 +1,256 @@ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Tests that SCOPED_TRACE() and various Google Test assertions can be +// used in a large number of threads concurrently. + +#include "gtest/gtest.h" + +#include +#include + +// We must define this macro in order to #include +// gtest-internal-inl.h. This is how Google Test prevents a user from +// accidentally depending on its internal implementation. +#define GTEST_IMPLEMENTATION_ 1 +#include "src/gtest-internal-inl.h" +#undef GTEST_IMPLEMENTATION_ + +#if GTEST_IS_THREADSAFE + +namespace testing { +namespace { + +using internal::Notification; +using internal::TestPropertyKeyIs; +using internal::ThreadWithParam; +using internal::scoped_ptr; + +// In order to run tests in this file, for platforms where Google Test is +// thread safe, implement ThreadWithParam. See the description of its API +// in gtest-port.h, where it is defined for already supported platforms. + +// How many threads to create? +const int kThreadCount = 50; + +std::string IdToKey(int id, const char* suffix) { + Message key; + key << "key_" << id << "_" << suffix; + return key.GetString(); +} + +std::string IdToString(int id) { + Message id_message; + id_message << id; + return id_message.GetString(); +} + +void ExpectKeyAndValueWereRecordedForId( + const std::vector& properties, + int id, const char* suffix) { + TestPropertyKeyIs matches_key(IdToKey(id, suffix).c_str()); + const std::vector::const_iterator property = + std::find_if(properties.begin(), properties.end(), matches_key); + ASSERT_TRUE(property != properties.end()) + << "expecting " << suffix << " value for id " << id; + EXPECT_STREQ(IdToString(id).c_str(), property->value()); +} + +// Calls a large number of Google Test assertions, where exactly one of them +// will fail. +void ManyAsserts(int id) { + GTEST_LOG_(INFO) << "Thread #" << id << " running..."; + + SCOPED_TRACE(Message() << "Thread #" << id); + + for (int i = 0; i < kThreadCount; i++) { + SCOPED_TRACE(Message() << "Iteration #" << i); + + // A bunch of assertions that should succeed. + EXPECT_TRUE(true); + ASSERT_FALSE(false) << "This shouldn't fail."; + EXPECT_STREQ("a", "a"); + ASSERT_LE(5, 6); + EXPECT_EQ(i, i) << "This shouldn't fail."; + + // RecordProperty() should interact safely with other threads as well. + // The shared_key forces property updates. + Test::RecordProperty(IdToKey(id, "string").c_str(), IdToString(id).c_str()); + Test::RecordProperty(IdToKey(id, "int").c_str(), id); + Test::RecordProperty("shared_key", IdToString(id).c_str()); + + // This assertion should fail kThreadCount times per thread. It + // is for testing whether Google Test can handle failed assertions in a + // multi-threaded context. + EXPECT_LT(i, 0) << "This should always fail."; + } +} + +void CheckTestFailureCount(int expected_failures) { + const TestInfo* const info = UnitTest::GetInstance()->current_test_info(); + const TestResult* const result = info->result(); + GTEST_CHECK_(expected_failures == result->total_part_count()) + << "Logged " << result->total_part_count() << " failures " + << " vs. " << expected_failures << " expected"; +} + +// Tests using SCOPED_TRACE() and Google Test assertions in many threads +// concurrently. +TEST(StressTest, CanUseScopedTraceAndAssertionsInManyThreads) { + { + scoped_ptr > threads[kThreadCount]; + Notification threads_can_start; + for (int i = 0; i != kThreadCount; i++) + threads[i].reset(new ThreadWithParam(&ManyAsserts, + i, + &threads_can_start)); + + threads_can_start.Notify(); + + // Blocks until all the threads are done. + for (int i = 0; i != kThreadCount; i++) + threads[i]->Join(); + } + + // Ensures that kThreadCount*kThreadCount failures have been reported. + const TestInfo* const info = UnitTest::GetInstance()->current_test_info(); + const TestResult* const result = info->result(); + + std::vector properties; + // We have no access to the TestResult's list of properties but we can + // copy them one by one. + for (int i = 0; i < result->test_property_count(); ++i) + properties.push_back(result->GetTestProperty(i)); + + EXPECT_EQ(kThreadCount * 2 + 1, result->test_property_count()) + << "String and int values recorded on each thread, " + << "as well as one shared_key"; + for (int i = 0; i < kThreadCount; ++i) { + ExpectKeyAndValueWereRecordedForId(properties, i, "string"); + ExpectKeyAndValueWereRecordedForId(properties, i, "int"); + } + CheckTestFailureCount(kThreadCount*kThreadCount); +} + +void FailingThread(bool is_fatal) { + if (is_fatal) + FAIL() << "Fatal failure in some other thread. " + << "(This failure is expected.)"; + else + ADD_FAILURE() << "Non-fatal failure in some other thread. " + << "(This failure is expected.)"; +} + +void GenerateFatalFailureInAnotherThread(bool is_fatal) { + ThreadWithParam thread(&FailingThread, is_fatal, NULL); + thread.Join(); +} + +TEST(NoFatalFailureTest, ExpectNoFatalFailureIgnoresFailuresInOtherThreads) { + EXPECT_NO_FATAL_FAILURE(GenerateFatalFailureInAnotherThread(true)); + // We should only have one failure (the one from + // GenerateFatalFailureInAnotherThread()), since the EXPECT_NO_FATAL_FAILURE + // should succeed. + CheckTestFailureCount(1); +} + +void AssertNoFatalFailureIgnoresFailuresInOtherThreads() { + ASSERT_NO_FATAL_FAILURE(GenerateFatalFailureInAnotherThread(true)); +} +TEST(NoFatalFailureTest, AssertNoFatalFailureIgnoresFailuresInOtherThreads) { + // Using a subroutine, to make sure, that the test continues. + AssertNoFatalFailureIgnoresFailuresInOtherThreads(); + // We should only have one failure (the one from + // GenerateFatalFailureInAnotherThread()), since the EXPECT_NO_FATAL_FAILURE + // should succeed. + CheckTestFailureCount(1); +} + +TEST(FatalFailureTest, ExpectFatalFailureIgnoresFailuresInOtherThreads) { + // This statement should fail, since the current thread doesn't generate a + // fatal failure, only another one does. + EXPECT_FATAL_FAILURE(GenerateFatalFailureInAnotherThread(true), "expected"); + CheckTestFailureCount(2); +} + +TEST(FatalFailureOnAllThreadsTest, ExpectFatalFailureOnAllThreads) { + // This statement should succeed, because failures in all threads are + // considered. + EXPECT_FATAL_FAILURE_ON_ALL_THREADS( + GenerateFatalFailureInAnotherThread(true), "expected"); + CheckTestFailureCount(0); + // We need to add a failure, because main() checks that there are failures. + // But when only this test is run, we shouldn't have any failures. + ADD_FAILURE() << "This is an expected non-fatal failure."; +} + +TEST(NonFatalFailureTest, ExpectNonFatalFailureIgnoresFailuresInOtherThreads) { + // This statement should fail, since the current thread doesn't generate a + // fatal failure, only another one does. + EXPECT_NONFATAL_FAILURE(GenerateFatalFailureInAnotherThread(false), + "expected"); + CheckTestFailureCount(2); +} + +TEST(NonFatalFailureOnAllThreadsTest, ExpectNonFatalFailureOnAllThreads) { + // This statement should succeed, because failures in all threads are + // considered. + EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS( + GenerateFatalFailureInAnotherThread(false), "expected"); + CheckTestFailureCount(0); + // We need to add a failure, because main() checks that there are failures, + // But when only this test is run, we shouldn't have any failures. + ADD_FAILURE() << "This is an expected non-fatal failure."; +} + +} // namespace +} // namespace testing + +int main(int argc, char **argv) { + testing::InitGoogleTest(&argc, argv); + + const int result = RUN_ALL_TESTS(); // Expected to fail. + GTEST_CHECK_(result == 1) << "RUN_ALL_TESTS() did not fail as expected"; + + printf("\nPASS\n"); + return 0; +} + +#else +TEST(StressTest, + DISABLED_ThreadSafetyTestsAreSkippedWhenGoogleTestIsNotThreadSafe) { +} + +int main(int argc, char **argv) { + testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} +#endif // GTEST_IS_THREADSAFE diff --git a/external/gtest/test/gtest_test_utils.py b/external/gtest/test/gtest_test_utils.py new file mode 100755 index 0000000000..28884bdc17 --- /dev/null +++ b/external/gtest/test/gtest_test_utils.py @@ -0,0 +1,320 @@ +#!/usr/bin/env python +# +# Copyright 2006, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test utilities for Google C++ Testing Framework.""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import atexit +import os +import shutil +import sys +import tempfile +import unittest +_test_module = unittest + +# Suppresses the 'Import not at the top of the file' lint complaint. +# pylint: disable-msg=C6204 +try: + import subprocess + _SUBPROCESS_MODULE_AVAILABLE = True +except: + import popen2 + _SUBPROCESS_MODULE_AVAILABLE = False +# pylint: enable-msg=C6204 + +GTEST_OUTPUT_VAR_NAME = 'GTEST_OUTPUT' + +IS_WINDOWS = os.name == 'nt' +IS_CYGWIN = os.name == 'posix' and 'CYGWIN' in os.uname()[0] + +# The environment variable for specifying the path to the premature-exit file. +PREMATURE_EXIT_FILE_ENV_VAR = 'TEST_PREMATURE_EXIT_FILE' + +environ = os.environ.copy() + + +def SetEnvVar(env_var, value): + """Sets/unsets an environment variable to a given value.""" + + if value is not None: + environ[env_var] = value + elif env_var in environ: + del environ[env_var] + + +# Here we expose a class from a particular module, depending on the +# environment. The comment suppresses the 'Invalid variable name' lint +# complaint. +TestCase = _test_module.TestCase # pylint: disable-msg=C6409 + +# Initially maps a flag to its default value. After +# _ParseAndStripGTestFlags() is called, maps a flag to its actual value. +_flag_map = {'source_dir': os.path.dirname(sys.argv[0]), + 'build_dir': os.path.dirname(sys.argv[0])} +_gtest_flags_are_parsed = False + + +def _ParseAndStripGTestFlags(argv): + """Parses and strips Google Test flags from argv. This is idempotent.""" + + # Suppresses the lint complaint about a global variable since we need it + # here to maintain module-wide state. + global _gtest_flags_are_parsed # pylint: disable-msg=W0603 + if _gtest_flags_are_parsed: + return + + _gtest_flags_are_parsed = True + for flag in _flag_map: + # The environment variable overrides the default value. + if flag.upper() in os.environ: + _flag_map[flag] = os.environ[flag.upper()] + + # The command line flag overrides the environment variable. + i = 1 # Skips the program name. + while i < len(argv): + prefix = '--' + flag + '=' + if argv[i].startswith(prefix): + _flag_map[flag] = argv[i][len(prefix):] + del argv[i] + break + else: + # We don't increment i in case we just found a --gtest_* flag + # and removed it from argv. + i += 1 + + +def GetFlag(flag): + """Returns the value of the given flag.""" + + # In case GetFlag() is called before Main(), we always call + # _ParseAndStripGTestFlags() here to make sure the --gtest_* flags + # are parsed. + _ParseAndStripGTestFlags(sys.argv) + + return _flag_map[flag] + + +def GetSourceDir(): + """Returns the absolute path of the directory where the .py files are.""" + + return os.path.abspath(GetFlag('source_dir')) + + +def GetBuildDir(): + """Returns the absolute path of the directory where the test binaries are.""" + + return os.path.abspath(GetFlag('build_dir')) + + +_temp_dir = None + +def _RemoveTempDir(): + if _temp_dir: + shutil.rmtree(_temp_dir, ignore_errors=True) + +atexit.register(_RemoveTempDir) + + +def GetTempDir(): + """Returns a directory for temporary files.""" + + global _temp_dir + if not _temp_dir: + _temp_dir = tempfile.mkdtemp() + return _temp_dir + + +def GetTestExecutablePath(executable_name, build_dir=None): + """Returns the absolute path of the test binary given its name. + + The function will print a message and abort the program if the resulting file + doesn't exist. + + Args: + executable_name: name of the test binary that the test script runs. + build_dir: directory where to look for executables, by default + the result of GetBuildDir(). + + Returns: + The absolute path of the test binary. + """ + + path = os.path.abspath(os.path.join(build_dir or GetBuildDir(), + executable_name)) + if (IS_WINDOWS or IS_CYGWIN) and not path.endswith('.exe'): + path += '.exe' + + if not os.path.exists(path): + message = ( + 'Unable to find the test binary. Please make sure to provide path\n' + 'to the binary via the --build_dir flag or the BUILD_DIR\n' + 'environment variable.') + print >> sys.stderr, message + sys.exit(1) + + return path + + +def GetExitStatus(exit_code): + """Returns the argument to exit(), or -1 if exit() wasn't called. + + Args: + exit_code: the result value of os.system(command). + """ + + if os.name == 'nt': + # On Windows, os.WEXITSTATUS() doesn't work and os.system() returns + # the argument to exit() directly. + return exit_code + else: + # On Unix, os.WEXITSTATUS() must be used to extract the exit status + # from the result of os.system(). + if os.WIFEXITED(exit_code): + return os.WEXITSTATUS(exit_code) + else: + return -1 + + +class Subprocess: + def __init__(self, command, working_dir=None, capture_stderr=True, env=None): + """Changes into a specified directory, if provided, and executes a command. + + Restores the old directory afterwards. + + Args: + command: The command to run, in the form of sys.argv. + working_dir: The directory to change into. + capture_stderr: Determines whether to capture stderr in the output member + or to discard it. + env: Dictionary with environment to pass to the subprocess. + + Returns: + An object that represents outcome of the executed process. It has the + following attributes: + terminated_by_signal True iff the child process has been terminated + by a signal. + signal Sygnal that terminated the child process. + exited True iff the child process exited normally. + exit_code The code with which the child process exited. + output Child process's stdout and stderr output + combined in a string. + """ + + # The subprocess module is the preferrable way of running programs + # since it is available and behaves consistently on all platforms, + # including Windows. But it is only available starting in python 2.4. + # In earlier python versions, we revert to the popen2 module, which is + # available in python 2.0 and later but doesn't provide required + # functionality (Popen4) under Windows. This allows us to support Mac + # OS X 10.4 Tiger, which has python 2.3 installed. + if _SUBPROCESS_MODULE_AVAILABLE: + if capture_stderr: + stderr = subprocess.STDOUT + else: + stderr = subprocess.PIPE + + p = subprocess.Popen(command, + stdout=subprocess.PIPE, stderr=stderr, + cwd=working_dir, universal_newlines=True, env=env) + # communicate returns a tuple with the file obect for the child's + # output. + self.output = p.communicate()[0] + self._return_code = p.returncode + else: + old_dir = os.getcwd() + + def _ReplaceEnvDict(dest, src): + # Changes made by os.environ.clear are not inheritable by child + # processes until Python 2.6. To produce inheritable changes we have + # to delete environment items with the del statement. + for key in dest.keys(): + del dest[key] + dest.update(src) + + # When 'env' is not None, backup the environment variables and replace + # them with the passed 'env'. When 'env' is None, we simply use the + # current 'os.environ' for compatibility with the subprocess.Popen + # semantics used above. + if env is not None: + old_environ = os.environ.copy() + _ReplaceEnvDict(os.environ, env) + + try: + if working_dir is not None: + os.chdir(working_dir) + if capture_stderr: + p = popen2.Popen4(command) + else: + p = popen2.Popen3(command) + p.tochild.close() + self.output = p.fromchild.read() + ret_code = p.wait() + finally: + os.chdir(old_dir) + + # Restore the old environment variables + # if they were replaced. + if env is not None: + _ReplaceEnvDict(os.environ, old_environ) + + # Converts ret_code to match the semantics of + # subprocess.Popen.returncode. + if os.WIFSIGNALED(ret_code): + self._return_code = -os.WTERMSIG(ret_code) + else: # os.WIFEXITED(ret_code) should return True here. + self._return_code = os.WEXITSTATUS(ret_code) + + if self._return_code < 0: + self.terminated_by_signal = True + self.exited = False + self.signal = -self._return_code + else: + self.terminated_by_signal = False + self.exited = True + self.exit_code = self._return_code + + +def Main(): + """Runs the unit test.""" + + # We must call _ParseAndStripGTestFlags() before calling + # unittest.main(). Otherwise the latter will be confused by the + # --gtest_* flags. + _ParseAndStripGTestFlags(sys.argv) + # The tested binaries should not be writing XML output files unless the + # script explicitly instructs them to. + # TODO(vladl@google.com): Move this into Subprocess when we implement + # passing environment into it as a parameter. + if GTEST_OUTPUT_VAR_NAME in os.environ: + del os.environ[GTEST_OUTPUT_VAR_NAME] + + _test_module.main() diff --git a/external/gtest/test/gtest_throw_on_failure_ex_test.cc b/external/gtest/test/gtest_throw_on_failure_ex_test.cc new file mode 100755 index 0000000000..8d46c76f16 --- /dev/null +++ b/external/gtest/test/gtest_throw_on_failure_ex_test.cc @@ -0,0 +1,92 @@ +// Copyright 2009, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Tests Google Test's throw-on-failure mode with exceptions enabled. + +#include "gtest/gtest.h" + +#include +#include +#include +#include + +// Prints the given failure message and exits the program with +// non-zero. We use this instead of a Google Test assertion to +// indicate a failure, as the latter is been tested and cannot be +// relied on. +void Fail(const char* msg) { + printf("FAILURE: %s\n", msg); + fflush(stdout); + exit(1); +} + +// Tests that an assertion failure throws a subclass of +// std::runtime_error. +void TestFailureThrowsRuntimeError() { + testing::GTEST_FLAG(throw_on_failure) = true; + + // A successful assertion shouldn't throw. + try { + EXPECT_EQ(3, 3); + } catch(...) { + Fail("A successful assertion wrongfully threw."); + } + + // A failed assertion should throw a subclass of std::runtime_error. + try { + EXPECT_EQ(2, 3) << "Expected failure"; + } catch(const std::runtime_error& e) { + if (strstr(e.what(), "Expected failure") != NULL) + return; + + printf("%s", + "A failed assertion did throw an exception of the right type, " + "but the message is incorrect. Instead of containing \"Expected " + "failure\", it is:\n"); + Fail(e.what()); + } catch(...) { + Fail("A failed assertion threw the wrong type of exception."); + } + Fail("A failed assertion should've thrown but didn't."); +} + +int main(int argc, char** argv) { + testing::InitGoogleTest(&argc, argv); + + // We want to ensure that people can use Google Test assertions in + // other testing frameworks, as long as they initialize Google Test + // properly and set the thrown-on-failure mode. Therefore, we don't + // use Google Test's constructs for defining and running tests + // (e.g. TEST and RUN_ALL_TESTS) here. + + TestFailureThrowsRuntimeError(); + return 0; +} diff --git a/external/gtest/test/gtest_throw_on_failure_test.py b/external/gtest/test/gtest_throw_on_failure_test.py new file mode 100755 index 0000000000..5678ffeaf6 --- /dev/null +++ b/external/gtest/test/gtest_throw_on_failure_test.py @@ -0,0 +1,171 @@ +#!/usr/bin/env python +# +# Copyright 2009, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Tests Google Test's throw-on-failure mode with exceptions disabled. + +This script invokes gtest_throw_on_failure_test_ (a program written with +Google Test) with different environments and command line flags. +""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import os +import gtest_test_utils + + +# Constants. + +# The command line flag for enabling/disabling the throw-on-failure mode. +THROW_ON_FAILURE = 'gtest_throw_on_failure' + +# Path to the gtest_throw_on_failure_test_ program, compiled with +# exceptions disabled. +EXE_PATH = gtest_test_utils.GetTestExecutablePath( + 'gtest_throw_on_failure_test_') + + +# Utilities. + + +def SetEnvVar(env_var, value): + """Sets an environment variable to a given value; unsets it when the + given value is None. + """ + + env_var = env_var.upper() + if value is not None: + os.environ[env_var] = value + elif env_var in os.environ: + del os.environ[env_var] + + +def Run(command): + """Runs a command; returns True/False if its exit code is/isn't 0.""" + + print 'Running "%s". . .' % ' '.join(command) + p = gtest_test_utils.Subprocess(command) + return p.exited and p.exit_code == 0 + + +# The tests. TODO(wan@google.com): refactor the class to share common +# logic with code in gtest_break_on_failure_unittest.py. +class ThrowOnFailureTest(gtest_test_utils.TestCase): + """Tests the throw-on-failure mode.""" + + def RunAndVerify(self, env_var_value, flag_value, should_fail): + """Runs gtest_throw_on_failure_test_ and verifies that it does + (or does not) exit with a non-zero code. + + Args: + env_var_value: value of the GTEST_BREAK_ON_FAILURE environment + variable; None if the variable should be unset. + flag_value: value of the --gtest_break_on_failure flag; + None if the flag should not be present. + should_fail: True iff the program is expected to fail. + """ + + SetEnvVar(THROW_ON_FAILURE, env_var_value) + + if env_var_value is None: + env_var_value_msg = ' is not set' + else: + env_var_value_msg = '=' + env_var_value + + if flag_value is None: + flag = '' + elif flag_value == '0': + flag = '--%s=0' % THROW_ON_FAILURE + else: + flag = '--%s' % THROW_ON_FAILURE + + command = [EXE_PATH] + if flag: + command.append(flag) + + if should_fail: + should_or_not = 'should' + else: + should_or_not = 'should not' + + failed = not Run(command) + + SetEnvVar(THROW_ON_FAILURE, None) + + msg = ('when %s%s, an assertion failure in "%s" %s cause a non-zero ' + 'exit code.' % + (THROW_ON_FAILURE, env_var_value_msg, ' '.join(command), + should_or_not)) + self.assert_(failed == should_fail, msg) + + def testDefaultBehavior(self): + """Tests the behavior of the default mode.""" + + self.RunAndVerify(env_var_value=None, flag_value=None, should_fail=False) + + def testThrowOnFailureEnvVar(self): + """Tests using the GTEST_THROW_ON_FAILURE environment variable.""" + + self.RunAndVerify(env_var_value='0', + flag_value=None, + should_fail=False) + self.RunAndVerify(env_var_value='1', + flag_value=None, + should_fail=True) + + def testThrowOnFailureFlag(self): + """Tests using the --gtest_throw_on_failure flag.""" + + self.RunAndVerify(env_var_value=None, + flag_value='0', + should_fail=False) + self.RunAndVerify(env_var_value=None, + flag_value='1', + should_fail=True) + + def testThrowOnFailureFlagOverridesEnvVar(self): + """Tests that --gtest_throw_on_failure overrides GTEST_THROW_ON_FAILURE.""" + + self.RunAndVerify(env_var_value='0', + flag_value='0', + should_fail=False) + self.RunAndVerify(env_var_value='0', + flag_value='1', + should_fail=True) + self.RunAndVerify(env_var_value='1', + flag_value='0', + should_fail=False) + self.RunAndVerify(env_var_value='1', + flag_value='1', + should_fail=True) + + +if __name__ == '__main__': + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_throw_on_failure_test_.cc b/external/gtest/test/gtest_throw_on_failure_test_.cc new file mode 100755 index 0000000000..2b88fe3d9b --- /dev/null +++ b/external/gtest/test/gtest_throw_on_failure_test_.cc @@ -0,0 +1,72 @@ +// Copyright 2009, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Tests Google Test's throw-on-failure mode with exceptions disabled. +// +// This program must be compiled with exceptions disabled. It will be +// invoked by gtest_throw_on_failure_test.py, and is expected to exit +// with non-zero in the throw-on-failure mode or 0 otherwise. + +#include "gtest/gtest.h" + +#include // for fflush, fprintf, NULL, etc. +#include // for exit +#include // for set_terminate + +// This terminate handler aborts the program using exit() rather than abort(). +// This avoids showing pop-ups on Windows systems and core dumps on Unix-like +// ones. +void TerminateHandler() { + fprintf(stderr, "%s\n", "Unhandled C++ exception terminating the program."); + fflush(NULL); + exit(1); +} + +int main(int argc, char** argv) { +#if GTEST_HAS_EXCEPTIONS + std::set_terminate(&TerminateHandler); +#endif + testing::InitGoogleTest(&argc, argv); + + // We want to ensure that people can use Google Test assertions in + // other testing frameworks, as long as they initialize Google Test + // properly and set the throw-on-failure mode. Therefore, we don't + // use Google Test's constructs for defining and running tests + // (e.g. TEST and RUN_ALL_TESTS) here. + + // In the throw-on-failure mode with exceptions disabled, this + // assertion will cause the program to exit with a non-zero code. + EXPECT_EQ(2, 3); + + // When not in the throw-on-failure mode, the control will reach + // here. + return 0; +} diff --git a/external/gtest/test/gtest_uninitialized_test.py b/external/gtest/test/gtest_uninitialized_test.py new file mode 100755 index 0000000000..6ae57eeeda --- /dev/null +++ b/external/gtest/test/gtest_uninitialized_test.py @@ -0,0 +1,70 @@ +#!/usr/bin/env python +# +# Copyright 2008, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Verifies that Google Test warns the user when not initialized properly.""" + +__author__ = 'wan@google.com (Zhanyong Wan)' + +import gtest_test_utils + + +COMMAND = gtest_test_utils.GetTestExecutablePath('gtest_uninitialized_test_') + + +def Assert(condition): + if not condition: + raise AssertionError + + +def AssertEq(expected, actual): + if expected != actual: + print 'Expected: %s' % (expected,) + print ' Actual: %s' % (actual,) + raise AssertionError + + +def TestExitCodeAndOutput(command): + """Runs the given command and verifies its exit code and output.""" + + # Verifies that 'command' exits with code 1. + p = gtest_test_utils.Subprocess(command) + Assert(p.exited) + AssertEq(1, p.exit_code) + Assert('InitGoogleTest' in p.output) + + +class GTestUninitializedTest(gtest_test_utils.TestCase): + def testExitCodeAndOutput(self): + TestExitCodeAndOutput(COMMAND) + + +if __name__ == '__main__': + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_uninitialized_test_.cc b/external/gtest/test/gtest_uninitialized_test_.cc new file mode 100755 index 0000000000..44316987fb --- /dev/null +++ b/external/gtest/test/gtest_uninitialized_test_.cc @@ -0,0 +1,43 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#include "gtest/gtest.h" + +TEST(DummyTest, Dummy) { + // This test doesn't verify anything. We just need it to create a + // realistic stage for testing the behavior of Google Test when + // RUN_ALL_TESTS() is called without testing::InitGoogleTest() being + // called first. +} + +int main() { + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest_unittest.cc b/external/gtest/test/gtest_unittest.cc new file mode 100755 index 0000000000..0cab07d156 --- /dev/null +++ b/external/gtest/test/gtest_unittest.cc @@ -0,0 +1,7415 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Tests for Google Test itself. This verifies that the basic constructs of +// Google Test work. + +#include "gtest/gtest.h" + +// Verifies that the command line flag variables can be accessed +// in code once has been #included. +// Do not move it after other #includes. +TEST(CommandLineFlagsTest, CanBeAccessedInCodeOnceGTestHIsIncluded) { + bool dummy = testing::GTEST_FLAG(also_run_disabled_tests) + || testing::GTEST_FLAG(break_on_failure) + || testing::GTEST_FLAG(catch_exceptions) + || testing::GTEST_FLAG(color) != "unknown" + || testing::GTEST_FLAG(filter) != "unknown" + || testing::GTEST_FLAG(list_tests) + || testing::GTEST_FLAG(output) != "unknown" + || testing::GTEST_FLAG(print_time) + || testing::GTEST_FLAG(random_seed) + || testing::GTEST_FLAG(repeat) > 0 + || testing::GTEST_FLAG(show_internal_stack_frames) + || testing::GTEST_FLAG(shuffle) + || testing::GTEST_FLAG(stack_trace_depth) > 0 + || testing::GTEST_FLAG(stream_result_to) != "unknown" + || testing::GTEST_FLAG(throw_on_failure); + EXPECT_TRUE(dummy || !dummy); // Suppresses warning that dummy is unused. +} + +#include // For INT_MAX. +#include +#include +#include + +#include +#include +#include + +#include "gtest/gtest-spi.h" + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +#include "src/gtest-internal-inl.h" +#undef GTEST_IMPLEMENTATION_ + +namespace testing { +namespace internal { + +#if GTEST_CAN_STREAM_RESULTS_ + +class StreamingListenerTest : public Test { + public: + class FakeSocketWriter : public StreamingListener::AbstractSocketWriter { + public: + // Sends a string to the socket. + virtual void Send(const string& message) { output_ += message; } + + string output_; + }; + + StreamingListenerTest() + : fake_sock_writer_(new FakeSocketWriter), + streamer_(fake_sock_writer_), + test_info_obj_("FooTest", "Bar", NULL, NULL, 0, NULL) {} + + protected: + string* output() { return &(fake_sock_writer_->output_); } + + FakeSocketWriter* const fake_sock_writer_; + StreamingListener streamer_; + UnitTest unit_test_; + TestInfo test_info_obj_; // The name test_info_ was taken by testing::Test. +}; + +TEST_F(StreamingListenerTest, OnTestProgramEnd) { + *output() = ""; + streamer_.OnTestProgramEnd(unit_test_); + EXPECT_EQ("event=TestProgramEnd&passed=1\n", *output()); +} + +TEST_F(StreamingListenerTest, OnTestIterationEnd) { + *output() = ""; + streamer_.OnTestIterationEnd(unit_test_, 42); + EXPECT_EQ("event=TestIterationEnd&passed=1&elapsed_time=0ms\n", *output()); +} + +TEST_F(StreamingListenerTest, OnTestCaseStart) { + *output() = ""; + streamer_.OnTestCaseStart(TestCase("FooTest", "Bar", NULL, NULL)); + EXPECT_EQ("event=TestCaseStart&name=FooTest\n", *output()); +} + +TEST_F(StreamingListenerTest, OnTestCaseEnd) { + *output() = ""; + streamer_.OnTestCaseEnd(TestCase("FooTest", "Bar", NULL, NULL)); + EXPECT_EQ("event=TestCaseEnd&passed=1&elapsed_time=0ms\n", *output()); +} + +TEST_F(StreamingListenerTest, OnTestStart) { + *output() = ""; + streamer_.OnTestStart(test_info_obj_); + EXPECT_EQ("event=TestStart&name=Bar\n", *output()); +} + +TEST_F(StreamingListenerTest, OnTestEnd) { + *output() = ""; + streamer_.OnTestEnd(test_info_obj_); + EXPECT_EQ("event=TestEnd&passed=1&elapsed_time=0ms\n", *output()); +} + +TEST_F(StreamingListenerTest, OnTestPartResult) { + *output() = ""; + streamer_.OnTestPartResult(TestPartResult( + TestPartResult::kFatalFailure, "foo.cc", 42, "failed=\n&%")); + + // Meta characters in the failure message should be properly escaped. + EXPECT_EQ( + "event=TestPartResult&file=foo.cc&line=42&message=failed%3D%0A%26%25\n", + *output()); +} + +#endif // GTEST_CAN_STREAM_RESULTS_ + +// Provides access to otherwise private parts of the TestEventListeners class +// that are needed to test it. +class TestEventListenersAccessor { + public: + static TestEventListener* GetRepeater(TestEventListeners* listeners) { + return listeners->repeater(); + } + + static void SetDefaultResultPrinter(TestEventListeners* listeners, + TestEventListener* listener) { + listeners->SetDefaultResultPrinter(listener); + } + static void SetDefaultXmlGenerator(TestEventListeners* listeners, + TestEventListener* listener) { + listeners->SetDefaultXmlGenerator(listener); + } + + static bool EventForwardingEnabled(const TestEventListeners& listeners) { + return listeners.EventForwardingEnabled(); + } + + static void SuppressEventForwarding(TestEventListeners* listeners) { + listeners->SuppressEventForwarding(); + } +}; + +class UnitTestRecordPropertyTestHelper : public Test { + protected: + UnitTestRecordPropertyTestHelper() {} + + // Forwards to UnitTest::RecordProperty() to bypass access controls. + void UnitTestRecordProperty(const char* key, const std::string& value) { + unit_test_.RecordProperty(key, value); + } + + UnitTest unit_test_; +}; + +} // namespace internal +} // namespace testing + +using testing::AssertionFailure; +using testing::AssertionResult; +using testing::AssertionSuccess; +using testing::DoubleLE; +using testing::EmptyTestEventListener; +using testing::Environment; +using testing::FloatLE; +using testing::GTEST_FLAG(also_run_disabled_tests); +using testing::GTEST_FLAG(break_on_failure); +using testing::GTEST_FLAG(catch_exceptions); +using testing::GTEST_FLAG(color); +using testing::GTEST_FLAG(death_test_use_fork); +using testing::GTEST_FLAG(filter); +using testing::GTEST_FLAG(list_tests); +using testing::GTEST_FLAG(output); +using testing::GTEST_FLAG(print_time); +using testing::GTEST_FLAG(random_seed); +using testing::GTEST_FLAG(repeat); +using testing::GTEST_FLAG(show_internal_stack_frames); +using testing::GTEST_FLAG(shuffle); +using testing::GTEST_FLAG(stack_trace_depth); +using testing::GTEST_FLAG(stream_result_to); +using testing::GTEST_FLAG(throw_on_failure); +using testing::IsNotSubstring; +using testing::IsSubstring; +using testing::Message; +using testing::ScopedFakeTestPartResultReporter; +using testing::StaticAssertTypeEq; +using testing::Test; +using testing::TestCase; +using testing::TestEventListeners; +using testing::TestInfo; +using testing::TestPartResult; +using testing::TestPartResultArray; +using testing::TestProperty; +using testing::TestResult; +using testing::TimeInMillis; +using testing::UnitTest; +using testing::kMaxStackTraceDepth; +using testing::internal::AddReference; +using testing::internal::AlwaysFalse; +using testing::internal::AlwaysTrue; +using testing::internal::AppendUserMessage; +using testing::internal::ArrayAwareFind; +using testing::internal::ArrayEq; +using testing::internal::CodePointToUtf8; +using testing::internal::CompileAssertTypesEqual; +using testing::internal::CopyArray; +using testing::internal::CountIf; +using testing::internal::EqFailure; +using testing::internal::FloatingPoint; +using testing::internal::ForEach; +using testing::internal::FormatEpochTimeInMillisAsIso8601; +using testing::internal::FormatTimeInMillisAsSeconds; +using testing::internal::GTestFlagSaver; +using testing::internal::GetCurrentOsStackTraceExceptTop; +using testing::internal::GetElementOr; +using testing::internal::GetNextRandomSeed; +using testing::internal::GetRandomSeedFromFlag; +using testing::internal::GetTestTypeId; +using testing::internal::GetTimeInMillis; +using testing::internal::GetTypeId; +using testing::internal::GetUnitTestImpl; +using testing::internal::ImplicitlyConvertible; +using testing::internal::Int32; +using testing::internal::Int32FromEnvOrDie; +using testing::internal::IsAProtocolMessage; +using testing::internal::IsContainer; +using testing::internal::IsContainerTest; +using testing::internal::IsNotContainer; +using testing::internal::NativeArray; +using testing::internal::ParseInt32Flag; +using testing::internal::RemoveConst; +using testing::internal::RemoveReference; +using testing::internal::ShouldRunTestOnShard; +using testing::internal::ShouldShard; +using testing::internal::ShouldUseColor; +using testing::internal::Shuffle; +using testing::internal::ShuffleRange; +using testing::internal::SkipPrefix; +using testing::internal::StreamableToString; +using testing::internal::String; +using testing::internal::TestEventListenersAccessor; +using testing::internal::TestResultAccessor; +using testing::internal::UInt32; +using testing::internal::WideStringToUtf8; +using testing::internal::kCopy; +using testing::internal::kMaxRandomSeed; +using testing::internal::kReference; +using testing::internal::kTestTypeIdInGoogleTest; +using testing::internal::scoped_ptr; + +#if GTEST_HAS_STREAM_REDIRECTION +using testing::internal::CaptureStdout; +using testing::internal::GetCapturedStdout; +#endif + +#if GTEST_IS_THREADSAFE +using testing::internal::ThreadWithParam; +#endif + +class TestingVector : public std::vector { +}; + +::std::ostream& operator<<(::std::ostream& os, + const TestingVector& vector) { + os << "{ "; + for (size_t i = 0; i < vector.size(); i++) { + os << vector[i] << " "; + } + os << "}"; + return os; +} + +// This line tests that we can define tests in an unnamed namespace. +namespace { + +TEST(GetRandomSeedFromFlagTest, HandlesZero) { + const int seed = GetRandomSeedFromFlag(0); + EXPECT_LE(1, seed); + EXPECT_LE(seed, static_cast(kMaxRandomSeed)); +} + +TEST(GetRandomSeedFromFlagTest, PreservesValidSeed) { + EXPECT_EQ(1, GetRandomSeedFromFlag(1)); + EXPECT_EQ(2, GetRandomSeedFromFlag(2)); + EXPECT_EQ(kMaxRandomSeed - 1, GetRandomSeedFromFlag(kMaxRandomSeed - 1)); + EXPECT_EQ(static_cast(kMaxRandomSeed), + GetRandomSeedFromFlag(kMaxRandomSeed)); +} + +TEST(GetRandomSeedFromFlagTest, NormalizesInvalidSeed) { + const int seed1 = GetRandomSeedFromFlag(-1); + EXPECT_LE(1, seed1); + EXPECT_LE(seed1, static_cast(kMaxRandomSeed)); + + const int seed2 = GetRandomSeedFromFlag(kMaxRandomSeed + 1); + EXPECT_LE(1, seed2); + EXPECT_LE(seed2, static_cast(kMaxRandomSeed)); +} + +TEST(GetNextRandomSeedTest, WorksForValidInput) { + EXPECT_EQ(2, GetNextRandomSeed(1)); + EXPECT_EQ(3, GetNextRandomSeed(2)); + EXPECT_EQ(static_cast(kMaxRandomSeed), + GetNextRandomSeed(kMaxRandomSeed - 1)); + EXPECT_EQ(1, GetNextRandomSeed(kMaxRandomSeed)); + + // We deliberately don't test GetNextRandomSeed() with invalid + // inputs, as that requires death tests, which are expensive. This + // is fine as GetNextRandomSeed() is internal and has a + // straightforward definition. +} + +static void ClearCurrentTestPartResults() { + TestResultAccessor::ClearTestPartResults( + GetUnitTestImpl()->current_test_result()); +} + +// Tests GetTypeId. + +TEST(GetTypeIdTest, ReturnsSameValueForSameType) { + EXPECT_EQ(GetTypeId(), GetTypeId()); + EXPECT_EQ(GetTypeId(), GetTypeId()); +} + +class SubClassOfTest : public Test {}; +class AnotherSubClassOfTest : public Test {}; + +TEST(GetTypeIdTest, ReturnsDifferentValuesForDifferentTypes) { + EXPECT_NE(GetTypeId(), GetTypeId()); + EXPECT_NE(GetTypeId(), GetTypeId()); + EXPECT_NE(GetTypeId(), GetTestTypeId()); + EXPECT_NE(GetTypeId(), GetTestTypeId()); + EXPECT_NE(GetTypeId(), GetTestTypeId()); + EXPECT_NE(GetTypeId(), GetTypeId()); +} + +// Verifies that GetTestTypeId() returns the same value, no matter it +// is called from inside Google Test or outside of it. +TEST(GetTestTypeIdTest, ReturnsTheSameValueInsideOrOutsideOfGoogleTest) { + EXPECT_EQ(kTestTypeIdInGoogleTest, GetTestTypeId()); +} + +// Tests FormatTimeInMillisAsSeconds(). + +TEST(FormatTimeInMillisAsSecondsTest, FormatsZero) { + EXPECT_EQ("0", FormatTimeInMillisAsSeconds(0)); +} + +TEST(FormatTimeInMillisAsSecondsTest, FormatsPositiveNumber) { + EXPECT_EQ("0.003", FormatTimeInMillisAsSeconds(3)); + EXPECT_EQ("0.01", FormatTimeInMillisAsSeconds(10)); + EXPECT_EQ("0.2", FormatTimeInMillisAsSeconds(200)); + EXPECT_EQ("1.2", FormatTimeInMillisAsSeconds(1200)); + EXPECT_EQ("3", FormatTimeInMillisAsSeconds(3000)); +} + +TEST(FormatTimeInMillisAsSecondsTest, FormatsNegativeNumber) { + EXPECT_EQ("-0.003", FormatTimeInMillisAsSeconds(-3)); + EXPECT_EQ("-0.01", FormatTimeInMillisAsSeconds(-10)); + EXPECT_EQ("-0.2", FormatTimeInMillisAsSeconds(-200)); + EXPECT_EQ("-1.2", FormatTimeInMillisAsSeconds(-1200)); + EXPECT_EQ("-3", FormatTimeInMillisAsSeconds(-3000)); +} + +// Tests FormatEpochTimeInMillisAsIso8601(). The correctness of conversion +// for particular dates below was verified in Python using +// datetime.datetime.fromutctimestamp(/1000). + +// FormatEpochTimeInMillisAsIso8601 depends on the current timezone, so we +// have to set up a particular timezone to obtain predictable results. +class FormatEpochTimeInMillisAsIso8601Test : public Test { + public: + // On Cygwin, GCC doesn't allow unqualified integer literals to exceed + // 32 bits, even when 64-bit integer types are available. We have to + // force the constants to have a 64-bit type here. + static const TimeInMillis kMillisPerSec = 1000; + + private: + virtual void SetUp() { + saved_tz_ = NULL; +#if _MSC_VER +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996 + // (function or variable may be unsafe + // for getenv, function is deprecated for + // strdup). + if (getenv("TZ")) + saved_tz_ = strdup(getenv("TZ")); +# pragma warning(pop) // Restores the warning state again. +#else + if (getenv("TZ")) + saved_tz_ = strdup(getenv("TZ")); +#endif + + // Set up the time zone for FormatEpochTimeInMillisAsIso8601 to use. We + // cannot use the local time zone because the function's output depends + // on the time zone. + SetTimeZone("UTC+00"); + } + + virtual void TearDown() { + SetTimeZone(saved_tz_); + free(const_cast(saved_tz_)); + saved_tz_ = NULL; + } + + static void SetTimeZone(const char* time_zone) { + // tzset() distinguishes between the TZ variable being present and empty + // and not being present, so we have to consider the case of time_zone + // being NULL. +#if _MSC_VER + // ...Unless it's MSVC, whose standard library's _putenv doesn't + // distinguish between an empty and a missing variable. + const std::string env_var = + std::string("TZ=") + (time_zone ? time_zone : ""); + _putenv(env_var.c_str()); +# pragma warning(push) // Saves the current warning state. +# pragma warning(disable:4996) // Temporarily disables warning 4996 + // (function is deprecated). + tzset(); +# pragma warning(pop) // Restores the warning state again. +#else + if (time_zone) { + setenv(("TZ"), time_zone, 1); + } else { + unsetenv("TZ"); + } + tzset(); +#endif + } + + const char* saved_tz_; +}; + +const TimeInMillis FormatEpochTimeInMillisAsIso8601Test::kMillisPerSec; + +TEST_F(FormatEpochTimeInMillisAsIso8601Test, PrintsTwoDigitSegments) { + EXPECT_EQ("2011-10-31T18:52:42", + FormatEpochTimeInMillisAsIso8601(1320087162 * kMillisPerSec)); +} + +TEST_F(FormatEpochTimeInMillisAsIso8601Test, MillisecondsDoNotAffectResult) { + EXPECT_EQ( + "2011-10-31T18:52:42", + FormatEpochTimeInMillisAsIso8601(1320087162 * kMillisPerSec + 234)); +} + +TEST_F(FormatEpochTimeInMillisAsIso8601Test, PrintsLeadingZeroes) { + EXPECT_EQ("2011-09-03T05:07:02", + FormatEpochTimeInMillisAsIso8601(1315026422 * kMillisPerSec)); +} + +TEST_F(FormatEpochTimeInMillisAsIso8601Test, Prints24HourTime) { + EXPECT_EQ("2011-09-28T17:08:22", + FormatEpochTimeInMillisAsIso8601(1317229702 * kMillisPerSec)); +} + +TEST_F(FormatEpochTimeInMillisAsIso8601Test, PrintsEpochStart) { + EXPECT_EQ("1970-01-01T00:00:00", FormatEpochTimeInMillisAsIso8601(0)); +} + +#if GTEST_CAN_COMPARE_NULL + +# ifdef __BORLANDC__ +// Silences warnings: "Condition is always true", "Unreachable code" +# pragma option push -w-ccc -w-rch +# endif + +// Tests that GTEST_IS_NULL_LITERAL_(x) is true when x is a null +// pointer literal. +TEST(NullLiteralTest, IsTrueForNullLiterals) { + EXPECT_TRUE(GTEST_IS_NULL_LITERAL_(NULL)); + EXPECT_TRUE(GTEST_IS_NULL_LITERAL_(0)); + EXPECT_TRUE(GTEST_IS_NULL_LITERAL_(0U)); + EXPECT_TRUE(GTEST_IS_NULL_LITERAL_(0L)); +} + +// Tests that GTEST_IS_NULL_LITERAL_(x) is false when x is not a null +// pointer literal. +TEST(NullLiteralTest, IsFalseForNonNullLiterals) { + EXPECT_FALSE(GTEST_IS_NULL_LITERAL_(1)); + EXPECT_FALSE(GTEST_IS_NULL_LITERAL_(0.0)); + EXPECT_FALSE(GTEST_IS_NULL_LITERAL_('a')); + EXPECT_FALSE(GTEST_IS_NULL_LITERAL_(static_cast(NULL))); +} + +# ifdef __BORLANDC__ +// Restores warnings after previous "#pragma option push" suppressed them. +# pragma option pop +# endif + +#endif // GTEST_CAN_COMPARE_NULL +// +// Tests CodePointToUtf8(). + +// Tests that the NUL character L'\0' is encoded correctly. +TEST(CodePointToUtf8Test, CanEncodeNul) { + EXPECT_EQ("", CodePointToUtf8(L'\0')); +} + +// Tests that ASCII characters are encoded correctly. +TEST(CodePointToUtf8Test, CanEncodeAscii) { + EXPECT_EQ("a", CodePointToUtf8(L'a')); + EXPECT_EQ("Z", CodePointToUtf8(L'Z')); + EXPECT_EQ("&", CodePointToUtf8(L'&')); + EXPECT_EQ("\x7F", CodePointToUtf8(L'\x7F')); +} + +// Tests that Unicode code-points that have 8 to 11 bits are encoded +// as 110xxxxx 10xxxxxx. +TEST(CodePointToUtf8Test, CanEncode8To11Bits) { + // 000 1101 0011 => 110-00011 10-010011 + EXPECT_EQ("\xC3\x93", CodePointToUtf8(L'\xD3')); + + // 101 0111 0110 => 110-10101 10-110110 + // Some compilers (e.g., GCC on MinGW) cannot handle non-ASCII codepoints + // in wide strings and wide chars. In order to accomodate them, we have to + // introduce such character constants as integers. + EXPECT_EQ("\xD5\xB6", + CodePointToUtf8(static_cast(0x576))); +} + +// Tests that Unicode code-points that have 12 to 16 bits are encoded +// as 1110xxxx 10xxxxxx 10xxxxxx. +TEST(CodePointToUtf8Test, CanEncode12To16Bits) { + // 0000 1000 1101 0011 => 1110-0000 10-100011 10-010011 + EXPECT_EQ("\xE0\xA3\x93", + CodePointToUtf8(static_cast(0x8D3))); + + // 1100 0111 0100 1101 => 1110-1100 10-011101 10-001101 + EXPECT_EQ("\xEC\x9D\x8D", + CodePointToUtf8(static_cast(0xC74D))); +} + +#if !GTEST_WIDE_STRING_USES_UTF16_ +// Tests in this group require a wchar_t to hold > 16 bits, and thus +// are skipped on Windows, Cygwin, and Symbian, where a wchar_t is +// 16-bit wide. This code may not compile on those systems. + +// Tests that Unicode code-points that have 17 to 21 bits are encoded +// as 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx. +TEST(CodePointToUtf8Test, CanEncode17To21Bits) { + // 0 0001 0000 1000 1101 0011 => 11110-000 10-010000 10-100011 10-010011 + EXPECT_EQ("\xF0\x90\xA3\x93", CodePointToUtf8(L'\x108D3')); + + // 0 0001 0000 0100 0000 0000 => 11110-000 10-010000 10-010000 10-000000 + EXPECT_EQ("\xF0\x90\x90\x80", CodePointToUtf8(L'\x10400')); + + // 1 0000 1000 0110 0011 0100 => 11110-100 10-001000 10-011000 10-110100 + EXPECT_EQ("\xF4\x88\x98\xB4", CodePointToUtf8(L'\x108634')); +} + +// Tests that encoding an invalid code-point generates the expected result. +TEST(CodePointToUtf8Test, CanEncodeInvalidCodePoint) { + EXPECT_EQ("(Invalid Unicode 0x1234ABCD)", CodePointToUtf8(L'\x1234ABCD')); +} + +#endif // !GTEST_WIDE_STRING_USES_UTF16_ + +// Tests WideStringToUtf8(). + +// Tests that the NUL character L'\0' is encoded correctly. +TEST(WideStringToUtf8Test, CanEncodeNul) { + EXPECT_STREQ("", WideStringToUtf8(L"", 0).c_str()); + EXPECT_STREQ("", WideStringToUtf8(L"", -1).c_str()); +} + +// Tests that ASCII strings are encoded correctly. +TEST(WideStringToUtf8Test, CanEncodeAscii) { + EXPECT_STREQ("a", WideStringToUtf8(L"a", 1).c_str()); + EXPECT_STREQ("ab", WideStringToUtf8(L"ab", 2).c_str()); + EXPECT_STREQ("a", WideStringToUtf8(L"a", -1).c_str()); + EXPECT_STREQ("ab", WideStringToUtf8(L"ab", -1).c_str()); +} + +// Tests that Unicode code-points that have 8 to 11 bits are encoded +// as 110xxxxx 10xxxxxx. +TEST(WideStringToUtf8Test, CanEncode8To11Bits) { + // 000 1101 0011 => 110-00011 10-010011 + EXPECT_STREQ("\xC3\x93", WideStringToUtf8(L"\xD3", 1).c_str()); + EXPECT_STREQ("\xC3\x93", WideStringToUtf8(L"\xD3", -1).c_str()); + + // 101 0111 0110 => 110-10101 10-110110 + const wchar_t s[] = { 0x576, '\0' }; + EXPECT_STREQ("\xD5\xB6", WideStringToUtf8(s, 1).c_str()); + EXPECT_STREQ("\xD5\xB6", WideStringToUtf8(s, -1).c_str()); +} + +// Tests that Unicode code-points that have 12 to 16 bits are encoded +// as 1110xxxx 10xxxxxx 10xxxxxx. +TEST(WideStringToUtf8Test, CanEncode12To16Bits) { + // 0000 1000 1101 0011 => 1110-0000 10-100011 10-010011 + const wchar_t s1[] = { 0x8D3, '\0' }; + EXPECT_STREQ("\xE0\xA3\x93", WideStringToUtf8(s1, 1).c_str()); + EXPECT_STREQ("\xE0\xA3\x93", WideStringToUtf8(s1, -1).c_str()); + + // 1100 0111 0100 1101 => 1110-1100 10-011101 10-001101 + const wchar_t s2[] = { 0xC74D, '\0' }; + EXPECT_STREQ("\xEC\x9D\x8D", WideStringToUtf8(s2, 1).c_str()); + EXPECT_STREQ("\xEC\x9D\x8D", WideStringToUtf8(s2, -1).c_str()); +} + +// Tests that the conversion stops when the function encounters \0 character. +TEST(WideStringToUtf8Test, StopsOnNulCharacter) { + EXPECT_STREQ("ABC", WideStringToUtf8(L"ABC\0XYZ", 100).c_str()); +} + +// Tests that the conversion stops when the function reaches the limit +// specified by the 'length' parameter. +TEST(WideStringToUtf8Test, StopsWhenLengthLimitReached) { + EXPECT_STREQ("ABC", WideStringToUtf8(L"ABCDEF", 3).c_str()); +} + +#if !GTEST_WIDE_STRING_USES_UTF16_ +// Tests that Unicode code-points that have 17 to 21 bits are encoded +// as 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx. This code may not compile +// on the systems using UTF-16 encoding. +TEST(WideStringToUtf8Test, CanEncode17To21Bits) { + // 0 0001 0000 1000 1101 0011 => 11110-000 10-010000 10-100011 10-010011 + EXPECT_STREQ("\xF0\x90\xA3\x93", WideStringToUtf8(L"\x108D3", 1).c_str()); + EXPECT_STREQ("\xF0\x90\xA3\x93", WideStringToUtf8(L"\x108D3", -1).c_str()); + + // 1 0000 1000 0110 0011 0100 => 11110-100 10-001000 10-011000 10-110100 + EXPECT_STREQ("\xF4\x88\x98\xB4", WideStringToUtf8(L"\x108634", 1).c_str()); + EXPECT_STREQ("\xF4\x88\x98\xB4", WideStringToUtf8(L"\x108634", -1).c_str()); +} + +// Tests that encoding an invalid code-point generates the expected result. +TEST(WideStringToUtf8Test, CanEncodeInvalidCodePoint) { + EXPECT_STREQ("(Invalid Unicode 0xABCDFF)", + WideStringToUtf8(L"\xABCDFF", -1).c_str()); +} +#else // !GTEST_WIDE_STRING_USES_UTF16_ +// Tests that surrogate pairs are encoded correctly on the systems using +// UTF-16 encoding in the wide strings. +TEST(WideStringToUtf8Test, CanEncodeValidUtf16SUrrogatePairs) { + const wchar_t s[] = { 0xD801, 0xDC00, '\0' }; + EXPECT_STREQ("\xF0\x90\x90\x80", WideStringToUtf8(s, -1).c_str()); +} + +// Tests that encoding an invalid UTF-16 surrogate pair +// generates the expected result. +TEST(WideStringToUtf8Test, CanEncodeInvalidUtf16SurrogatePair) { + // Leading surrogate is at the end of the string. + const wchar_t s1[] = { 0xD800, '\0' }; + EXPECT_STREQ("\xED\xA0\x80", WideStringToUtf8(s1, -1).c_str()); + // Leading surrogate is not followed by the trailing surrogate. + const wchar_t s2[] = { 0xD800, 'M', '\0' }; + EXPECT_STREQ("\xED\xA0\x80M", WideStringToUtf8(s2, -1).c_str()); + // Trailing surrogate appearas without a leading surrogate. + const wchar_t s3[] = { 0xDC00, 'P', 'Q', 'R', '\0' }; + EXPECT_STREQ("\xED\xB0\x80PQR", WideStringToUtf8(s3, -1).c_str()); +} +#endif // !GTEST_WIDE_STRING_USES_UTF16_ + +// Tests that codepoint concatenation works correctly. +#if !GTEST_WIDE_STRING_USES_UTF16_ +TEST(WideStringToUtf8Test, ConcatenatesCodepointsCorrectly) { + const wchar_t s[] = { 0x108634, 0xC74D, '\n', 0x576, 0x8D3, 0x108634, '\0'}; + EXPECT_STREQ( + "\xF4\x88\x98\xB4" + "\xEC\x9D\x8D" + "\n" + "\xD5\xB6" + "\xE0\xA3\x93" + "\xF4\x88\x98\xB4", + WideStringToUtf8(s, -1).c_str()); +} +#else +TEST(WideStringToUtf8Test, ConcatenatesCodepointsCorrectly) { + const wchar_t s[] = { 0xC74D, '\n', 0x576, 0x8D3, '\0'}; + EXPECT_STREQ( + "\xEC\x9D\x8D" "\n" "\xD5\xB6" "\xE0\xA3\x93", + WideStringToUtf8(s, -1).c_str()); +} +#endif // !GTEST_WIDE_STRING_USES_UTF16_ + +// Tests the Random class. + +TEST(RandomDeathTest, GeneratesCrashesOnInvalidRange) { + testing::internal::Random random(42); + EXPECT_DEATH_IF_SUPPORTED( + random.Generate(0), + "Cannot generate a number in the range \\[0, 0\\)"); + EXPECT_DEATH_IF_SUPPORTED( + random.Generate(testing::internal::Random::kMaxRange + 1), + "Generation of a number in \\[0, 2147483649\\) was requested, " + "but this can only generate numbers in \\[0, 2147483648\\)"); +} + +TEST(RandomTest, GeneratesNumbersWithinRange) { + const UInt32 kRange = 10000; + testing::internal::Random random(12345); + for (int i = 0; i < 10; i++) { + EXPECT_LT(random.Generate(kRange), kRange) << " for iteration " << i; + } + + testing::internal::Random random2(testing::internal::Random::kMaxRange); + for (int i = 0; i < 10; i++) { + EXPECT_LT(random2.Generate(kRange), kRange) << " for iteration " << i; + } +} + +TEST(RandomTest, RepeatsWhenReseeded) { + const int kSeed = 123; + const int kArraySize = 10; + const UInt32 kRange = 10000; + UInt32 values[kArraySize]; + + testing::internal::Random random(kSeed); + for (int i = 0; i < kArraySize; i++) { + values[i] = random.Generate(kRange); + } + + random.Reseed(kSeed); + for (int i = 0; i < kArraySize; i++) { + EXPECT_EQ(values[i], random.Generate(kRange)) << " for iteration " << i; + } +} + +// Tests STL container utilities. + +// Tests CountIf(). + +static bool IsPositive(int n) { return n > 0; } + +TEST(ContainerUtilityTest, CountIf) { + std::vector v; + EXPECT_EQ(0, CountIf(v, IsPositive)); // Works for an empty container. + + v.push_back(-1); + v.push_back(0); + EXPECT_EQ(0, CountIf(v, IsPositive)); // Works when no value satisfies. + + v.push_back(2); + v.push_back(-10); + v.push_back(10); + EXPECT_EQ(2, CountIf(v, IsPositive)); +} + +// Tests ForEach(). + +static int g_sum = 0; +static void Accumulate(int n) { g_sum += n; } + +TEST(ContainerUtilityTest, ForEach) { + std::vector v; + g_sum = 0; + ForEach(v, Accumulate); + EXPECT_EQ(0, g_sum); // Works for an empty container; + + g_sum = 0; + v.push_back(1); + ForEach(v, Accumulate); + EXPECT_EQ(1, g_sum); // Works for a container with one element. + + g_sum = 0; + v.push_back(20); + v.push_back(300); + ForEach(v, Accumulate); + EXPECT_EQ(321, g_sum); +} + +// Tests GetElementOr(). +TEST(ContainerUtilityTest, GetElementOr) { + std::vector a; + EXPECT_EQ('x', GetElementOr(a, 0, 'x')); + + a.push_back('a'); + a.push_back('b'); + EXPECT_EQ('a', GetElementOr(a, 0, 'x')); + EXPECT_EQ('b', GetElementOr(a, 1, 'x')); + EXPECT_EQ('x', GetElementOr(a, -2, 'x')); + EXPECT_EQ('x', GetElementOr(a, 2, 'x')); +} + +TEST(ContainerUtilityDeathTest, ShuffleRange) { + std::vector a; + a.push_back(0); + a.push_back(1); + a.push_back(2); + testing::internal::Random random(1); + + EXPECT_DEATH_IF_SUPPORTED( + ShuffleRange(&random, -1, 1, &a), + "Invalid shuffle range start -1: must be in range \\[0, 3\\]"); + EXPECT_DEATH_IF_SUPPORTED( + ShuffleRange(&random, 4, 4, &a), + "Invalid shuffle range start 4: must be in range \\[0, 3\\]"); + EXPECT_DEATH_IF_SUPPORTED( + ShuffleRange(&random, 3, 2, &a), + "Invalid shuffle range finish 2: must be in range \\[3, 3\\]"); + EXPECT_DEATH_IF_SUPPORTED( + ShuffleRange(&random, 3, 4, &a), + "Invalid shuffle range finish 4: must be in range \\[3, 3\\]"); +} + +class VectorShuffleTest : public Test { + protected: + static const int kVectorSize = 20; + + VectorShuffleTest() : random_(1) { + for (int i = 0; i < kVectorSize; i++) { + vector_.push_back(i); + } + } + + static bool VectorIsCorrupt(const TestingVector& vector) { + if (kVectorSize != static_cast(vector.size())) { + return true; + } + + bool found_in_vector[kVectorSize] = { false }; + for (size_t i = 0; i < vector.size(); i++) { + const int e = vector[i]; + if (e < 0 || e >= kVectorSize || found_in_vector[e]) { + return true; + } + found_in_vector[e] = true; + } + + // Vector size is correct, elements' range is correct, no + // duplicate elements. Therefore no corruption has occurred. + return false; + } + + static bool VectorIsNotCorrupt(const TestingVector& vector) { + return !VectorIsCorrupt(vector); + } + + static bool RangeIsShuffled(const TestingVector& vector, int begin, int end) { + for (int i = begin; i < end; i++) { + if (i != vector[i]) { + return true; + } + } + return false; + } + + static bool RangeIsUnshuffled( + const TestingVector& vector, int begin, int end) { + return !RangeIsShuffled(vector, begin, end); + } + + static bool VectorIsShuffled(const TestingVector& vector) { + return RangeIsShuffled(vector, 0, static_cast(vector.size())); + } + + static bool VectorIsUnshuffled(const TestingVector& vector) { + return !VectorIsShuffled(vector); + } + + testing::internal::Random random_; + TestingVector vector_; +}; // class VectorShuffleTest + +const int VectorShuffleTest::kVectorSize; + +TEST_F(VectorShuffleTest, HandlesEmptyRange) { + // Tests an empty range at the beginning... + ShuffleRange(&random_, 0, 0, &vector_); + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + ASSERT_PRED1(VectorIsUnshuffled, vector_); + + // ...in the middle... + ShuffleRange(&random_, kVectorSize/2, kVectorSize/2, &vector_); + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + ASSERT_PRED1(VectorIsUnshuffled, vector_); + + // ...at the end... + ShuffleRange(&random_, kVectorSize - 1, kVectorSize - 1, &vector_); + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + ASSERT_PRED1(VectorIsUnshuffled, vector_); + + // ...and past the end. + ShuffleRange(&random_, kVectorSize, kVectorSize, &vector_); + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + ASSERT_PRED1(VectorIsUnshuffled, vector_); +} + +TEST_F(VectorShuffleTest, HandlesRangeOfSizeOne) { + // Tests a size one range at the beginning... + ShuffleRange(&random_, 0, 1, &vector_); + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + ASSERT_PRED1(VectorIsUnshuffled, vector_); + + // ...in the middle... + ShuffleRange(&random_, kVectorSize/2, kVectorSize/2 + 1, &vector_); + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + ASSERT_PRED1(VectorIsUnshuffled, vector_); + + // ...and at the end. + ShuffleRange(&random_, kVectorSize - 1, kVectorSize, &vector_); + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + ASSERT_PRED1(VectorIsUnshuffled, vector_); +} + +// Because we use our own random number generator and a fixed seed, +// we can guarantee that the following "random" tests will succeed. + +TEST_F(VectorShuffleTest, ShufflesEntireVector) { + Shuffle(&random_, &vector_); + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + EXPECT_FALSE(VectorIsUnshuffled(vector_)) << vector_; + + // Tests the first and last elements in particular to ensure that + // there are no off-by-one problems in our shuffle algorithm. + EXPECT_NE(0, vector_[0]); + EXPECT_NE(kVectorSize - 1, vector_[kVectorSize - 1]); +} + +TEST_F(VectorShuffleTest, ShufflesStartOfVector) { + const int kRangeSize = kVectorSize/2; + + ShuffleRange(&random_, 0, kRangeSize, &vector_); + + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + EXPECT_PRED3(RangeIsShuffled, vector_, 0, kRangeSize); + EXPECT_PRED3(RangeIsUnshuffled, vector_, kRangeSize, kVectorSize); +} + +TEST_F(VectorShuffleTest, ShufflesEndOfVector) { + const int kRangeSize = kVectorSize / 2; + ShuffleRange(&random_, kRangeSize, kVectorSize, &vector_); + + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + EXPECT_PRED3(RangeIsUnshuffled, vector_, 0, kRangeSize); + EXPECT_PRED3(RangeIsShuffled, vector_, kRangeSize, kVectorSize); +} + +TEST_F(VectorShuffleTest, ShufflesMiddleOfVector) { + int kRangeSize = kVectorSize/3; + ShuffleRange(&random_, kRangeSize, 2*kRangeSize, &vector_); + + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + EXPECT_PRED3(RangeIsUnshuffled, vector_, 0, kRangeSize); + EXPECT_PRED3(RangeIsShuffled, vector_, kRangeSize, 2*kRangeSize); + EXPECT_PRED3(RangeIsUnshuffled, vector_, 2*kRangeSize, kVectorSize); +} + +TEST_F(VectorShuffleTest, ShufflesRepeatably) { + TestingVector vector2; + for (int i = 0; i < kVectorSize; i++) { + vector2.push_back(i); + } + + random_.Reseed(1234); + Shuffle(&random_, &vector_); + random_.Reseed(1234); + Shuffle(&random_, &vector2); + + ASSERT_PRED1(VectorIsNotCorrupt, vector_); + ASSERT_PRED1(VectorIsNotCorrupt, vector2); + + for (int i = 0; i < kVectorSize; i++) { + EXPECT_EQ(vector_[i], vector2[i]) << " where i is " << i; + } +} + +// Tests the size of the AssertHelper class. + +TEST(AssertHelperTest, AssertHelperIsSmall) { + // To avoid breaking clients that use lots of assertions in one + // function, we cannot grow the size of AssertHelper. + EXPECT_LE(sizeof(testing::internal::AssertHelper), sizeof(void*)); +} + +// Tests String::EndsWithCaseInsensitive(). +TEST(StringTest, EndsWithCaseInsensitive) { + EXPECT_TRUE(String::EndsWithCaseInsensitive("foobar", "BAR")); + EXPECT_TRUE(String::EndsWithCaseInsensitive("foobaR", "bar")); + EXPECT_TRUE(String::EndsWithCaseInsensitive("foobar", "")); + EXPECT_TRUE(String::EndsWithCaseInsensitive("", "")); + + EXPECT_FALSE(String::EndsWithCaseInsensitive("Foobar", "foo")); + EXPECT_FALSE(String::EndsWithCaseInsensitive("foobar", "Foo")); + EXPECT_FALSE(String::EndsWithCaseInsensitive("", "foo")); +} + +// C++Builder's preprocessor is buggy; it fails to expand macros that +// appear in macro parameters after wide char literals. Provide an alias +// for NULL as a workaround. +static const wchar_t* const kNull = NULL; + +// Tests String::CaseInsensitiveWideCStringEquals +TEST(StringTest, CaseInsensitiveWideCStringEquals) { + EXPECT_TRUE(String::CaseInsensitiveWideCStringEquals(NULL, NULL)); + EXPECT_FALSE(String::CaseInsensitiveWideCStringEquals(kNull, L"")); + EXPECT_FALSE(String::CaseInsensitiveWideCStringEquals(L"", kNull)); + EXPECT_FALSE(String::CaseInsensitiveWideCStringEquals(kNull, L"foobar")); + EXPECT_FALSE(String::CaseInsensitiveWideCStringEquals(L"foobar", kNull)); + EXPECT_TRUE(String::CaseInsensitiveWideCStringEquals(L"foobar", L"foobar")); + EXPECT_TRUE(String::CaseInsensitiveWideCStringEquals(L"foobar", L"FOOBAR")); + EXPECT_TRUE(String::CaseInsensitiveWideCStringEquals(L"FOOBAR", L"foobar")); +} + +#if GTEST_OS_WINDOWS + +// Tests String::ShowWideCString(). +TEST(StringTest, ShowWideCString) { + EXPECT_STREQ("(null)", + String::ShowWideCString(NULL).c_str()); + EXPECT_STREQ("", String::ShowWideCString(L"").c_str()); + EXPECT_STREQ("foo", String::ShowWideCString(L"foo").c_str()); +} + +# if GTEST_OS_WINDOWS_MOBILE +TEST(StringTest, AnsiAndUtf16Null) { + EXPECT_EQ(NULL, String::AnsiToUtf16(NULL)); + EXPECT_EQ(NULL, String::Utf16ToAnsi(NULL)); +} + +TEST(StringTest, AnsiAndUtf16ConvertBasic) { + const char* ansi = String::Utf16ToAnsi(L"str"); + EXPECT_STREQ("str", ansi); + delete [] ansi; + const WCHAR* utf16 = String::AnsiToUtf16("str"); + EXPECT_EQ(0, wcsncmp(L"str", utf16, 3)); + delete [] utf16; +} + +TEST(StringTest, AnsiAndUtf16ConvertPathChars) { + const char* ansi = String::Utf16ToAnsi(L".:\\ \"*?"); + EXPECT_STREQ(".:\\ \"*?", ansi); + delete [] ansi; + const WCHAR* utf16 = String::AnsiToUtf16(".:\\ \"*?"); + EXPECT_EQ(0, wcsncmp(L".:\\ \"*?", utf16, 3)); + delete [] utf16; +} +# endif // GTEST_OS_WINDOWS_MOBILE + +#endif // GTEST_OS_WINDOWS + +// Tests TestProperty construction. +TEST(TestPropertyTest, StringValue) { + TestProperty property("key", "1"); + EXPECT_STREQ("key", property.key()); + EXPECT_STREQ("1", property.value()); +} + +// Tests TestProperty replacing a value. +TEST(TestPropertyTest, ReplaceStringValue) { + TestProperty property("key", "1"); + EXPECT_STREQ("1", property.value()); + property.SetValue("2"); + EXPECT_STREQ("2", property.value()); +} + +// AddFatalFailure() and AddNonfatalFailure() must be stand-alone +// functions (i.e. their definitions cannot be inlined at the call +// sites), or C++Builder won't compile the code. +static void AddFatalFailure() { + FAIL() << "Expected fatal failure."; +} + +static void AddNonfatalFailure() { + ADD_FAILURE() << "Expected non-fatal failure."; +} + +class ScopedFakeTestPartResultReporterTest : public Test { + public: // Must be public and not protected due to a bug in g++ 3.4.2. + enum FailureMode { + FATAL_FAILURE, + NONFATAL_FAILURE + }; + static void AddFailure(FailureMode failure) { + if (failure == FATAL_FAILURE) { + AddFatalFailure(); + } else { + AddNonfatalFailure(); + } + } +}; + +// Tests that ScopedFakeTestPartResultReporter intercepts test +// failures. +TEST_F(ScopedFakeTestPartResultReporterTest, InterceptsTestFailures) { + TestPartResultArray results; + { + ScopedFakeTestPartResultReporter reporter( + ScopedFakeTestPartResultReporter::INTERCEPT_ONLY_CURRENT_THREAD, + &results); + AddFailure(NONFATAL_FAILURE); + AddFailure(FATAL_FAILURE); + } + + EXPECT_EQ(2, results.size()); + EXPECT_TRUE(results.GetTestPartResult(0).nonfatally_failed()); + EXPECT_TRUE(results.GetTestPartResult(1).fatally_failed()); +} + +TEST_F(ScopedFakeTestPartResultReporterTest, DeprecatedConstructor) { + TestPartResultArray results; + { + // Tests, that the deprecated constructor still works. + ScopedFakeTestPartResultReporter reporter(&results); + AddFailure(NONFATAL_FAILURE); + } + EXPECT_EQ(1, results.size()); +} + +#if GTEST_IS_THREADSAFE + +class ScopedFakeTestPartResultReporterWithThreadsTest + : public ScopedFakeTestPartResultReporterTest { + protected: + static void AddFailureInOtherThread(FailureMode failure) { + ThreadWithParam thread(&AddFailure, failure, NULL); + thread.Join(); + } +}; + +TEST_F(ScopedFakeTestPartResultReporterWithThreadsTest, + InterceptsTestFailuresInAllThreads) { + TestPartResultArray results; + { + ScopedFakeTestPartResultReporter reporter( + ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, &results); + AddFailure(NONFATAL_FAILURE); + AddFailure(FATAL_FAILURE); + AddFailureInOtherThread(NONFATAL_FAILURE); + AddFailureInOtherThread(FATAL_FAILURE); + } + + EXPECT_EQ(4, results.size()); + EXPECT_TRUE(results.GetTestPartResult(0).nonfatally_failed()); + EXPECT_TRUE(results.GetTestPartResult(1).fatally_failed()); + EXPECT_TRUE(results.GetTestPartResult(2).nonfatally_failed()); + EXPECT_TRUE(results.GetTestPartResult(3).fatally_failed()); +} + +#endif // GTEST_IS_THREADSAFE + +// Tests EXPECT_FATAL_FAILURE{,ON_ALL_THREADS}. Makes sure that they +// work even if the failure is generated in a called function rather than +// the current context. + +typedef ScopedFakeTestPartResultReporterTest ExpectFatalFailureTest; + +TEST_F(ExpectFatalFailureTest, CatchesFatalFaliure) { + EXPECT_FATAL_FAILURE(AddFatalFailure(), "Expected fatal failure."); +} + +#if GTEST_HAS_GLOBAL_STRING +TEST_F(ExpectFatalFailureTest, AcceptsStringObject) { + EXPECT_FATAL_FAILURE(AddFatalFailure(), ::string("Expected fatal failure.")); +} +#endif + +TEST_F(ExpectFatalFailureTest, AcceptsStdStringObject) { + EXPECT_FATAL_FAILURE(AddFatalFailure(), + ::std::string("Expected fatal failure.")); +} + +TEST_F(ExpectFatalFailureTest, CatchesFatalFailureOnAllThreads) { + // We have another test below to verify that the macro catches fatal + // failures generated on another thread. + EXPECT_FATAL_FAILURE_ON_ALL_THREADS(AddFatalFailure(), + "Expected fatal failure."); +} + +#ifdef __BORLANDC__ +// Silences warnings: "Condition is always true" +# pragma option push -w-ccc +#endif + +// Tests that EXPECT_FATAL_FAILURE() can be used in a non-void +// function even when the statement in it contains ASSERT_*. + +int NonVoidFunction() { + EXPECT_FATAL_FAILURE(ASSERT_TRUE(false), ""); + EXPECT_FATAL_FAILURE_ON_ALL_THREADS(FAIL(), ""); + return 0; +} + +TEST_F(ExpectFatalFailureTest, CanBeUsedInNonVoidFunction) { + NonVoidFunction(); +} + +// Tests that EXPECT_FATAL_FAILURE(statement, ...) doesn't abort the +// current function even though 'statement' generates a fatal failure. + +void DoesNotAbortHelper(bool* aborted) { + EXPECT_FATAL_FAILURE(ASSERT_TRUE(false), ""); + EXPECT_FATAL_FAILURE_ON_ALL_THREADS(FAIL(), ""); + + *aborted = false; +} + +#ifdef __BORLANDC__ +// Restores warnings after previous "#pragma option push" suppressed them. +# pragma option pop +#endif + +TEST_F(ExpectFatalFailureTest, DoesNotAbort) { + bool aborted = true; + DoesNotAbortHelper(&aborted); + EXPECT_FALSE(aborted); +} + +// Tests that the EXPECT_FATAL_FAILURE{,_ON_ALL_THREADS} accepts a +// statement that contains a macro which expands to code containing an +// unprotected comma. + +static int global_var = 0; +#define GTEST_USE_UNPROTECTED_COMMA_ global_var++, global_var++ + +TEST_F(ExpectFatalFailureTest, AcceptsMacroThatExpandsToUnprotectedComma) { +#ifndef __BORLANDC__ + // ICE's in C++Builder. + EXPECT_FATAL_FAILURE({ + GTEST_USE_UNPROTECTED_COMMA_; + AddFatalFailure(); + }, ""); +#endif + + EXPECT_FATAL_FAILURE_ON_ALL_THREADS({ + GTEST_USE_UNPROTECTED_COMMA_; + AddFatalFailure(); + }, ""); +} + +// Tests EXPECT_NONFATAL_FAILURE{,ON_ALL_THREADS}. + +typedef ScopedFakeTestPartResultReporterTest ExpectNonfatalFailureTest; + +TEST_F(ExpectNonfatalFailureTest, CatchesNonfatalFailure) { + EXPECT_NONFATAL_FAILURE(AddNonfatalFailure(), + "Expected non-fatal failure."); +} + +#if GTEST_HAS_GLOBAL_STRING +TEST_F(ExpectNonfatalFailureTest, AcceptsStringObject) { + EXPECT_NONFATAL_FAILURE(AddNonfatalFailure(), + ::string("Expected non-fatal failure.")); +} +#endif + +TEST_F(ExpectNonfatalFailureTest, AcceptsStdStringObject) { + EXPECT_NONFATAL_FAILURE(AddNonfatalFailure(), + ::std::string("Expected non-fatal failure.")); +} + +TEST_F(ExpectNonfatalFailureTest, CatchesNonfatalFailureOnAllThreads) { + // We have another test below to verify that the macro catches + // non-fatal failures generated on another thread. + EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(AddNonfatalFailure(), + "Expected non-fatal failure."); +} + +// Tests that the EXPECT_NONFATAL_FAILURE{,_ON_ALL_THREADS} accepts a +// statement that contains a macro which expands to code containing an +// unprotected comma. +TEST_F(ExpectNonfatalFailureTest, AcceptsMacroThatExpandsToUnprotectedComma) { + EXPECT_NONFATAL_FAILURE({ + GTEST_USE_UNPROTECTED_COMMA_; + AddNonfatalFailure(); + }, ""); + + EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS({ + GTEST_USE_UNPROTECTED_COMMA_; + AddNonfatalFailure(); + }, ""); +} + +#if GTEST_IS_THREADSAFE + +typedef ScopedFakeTestPartResultReporterWithThreadsTest + ExpectFailureWithThreadsTest; + +TEST_F(ExpectFailureWithThreadsTest, ExpectFatalFailureOnAllThreads) { + EXPECT_FATAL_FAILURE_ON_ALL_THREADS(AddFailureInOtherThread(FATAL_FAILURE), + "Expected fatal failure."); +} + +TEST_F(ExpectFailureWithThreadsTest, ExpectNonFatalFailureOnAllThreads) { + EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS( + AddFailureInOtherThread(NONFATAL_FAILURE), "Expected non-fatal failure."); +} + +#endif // GTEST_IS_THREADSAFE + +// Tests the TestProperty class. + +TEST(TestPropertyTest, ConstructorWorks) { + const TestProperty property("key", "value"); + EXPECT_STREQ("key", property.key()); + EXPECT_STREQ("value", property.value()); +} + +TEST(TestPropertyTest, SetValue) { + TestProperty property("key", "value_1"); + EXPECT_STREQ("key", property.key()); + property.SetValue("value_2"); + EXPECT_STREQ("key", property.key()); + EXPECT_STREQ("value_2", property.value()); +} + +// Tests the TestResult class + +// The test fixture for testing TestResult. +class TestResultTest : public Test { + protected: + typedef std::vector TPRVector; + + // We make use of 2 TestPartResult objects, + TestPartResult * pr1, * pr2; + + // ... and 3 TestResult objects. + TestResult * r0, * r1, * r2; + + virtual void SetUp() { + // pr1 is for success. + pr1 = new TestPartResult(TestPartResult::kSuccess, + "foo/bar.cc", + 10, + "Success!"); + + // pr2 is for fatal failure. + pr2 = new TestPartResult(TestPartResult::kFatalFailure, + "foo/bar.cc", + -1, // This line number means "unknown" + "Failure!"); + + // Creates the TestResult objects. + r0 = new TestResult(); + r1 = new TestResult(); + r2 = new TestResult(); + + // In order to test TestResult, we need to modify its internal + // state, in particular the TestPartResult vector it holds. + // test_part_results() returns a const reference to this vector. + // We cast it to a non-const object s.t. it can be modified (yes, + // this is a hack). + TPRVector* results1 = const_cast( + &TestResultAccessor::test_part_results(*r1)); + TPRVector* results2 = const_cast( + &TestResultAccessor::test_part_results(*r2)); + + // r0 is an empty TestResult. + + // r1 contains a single SUCCESS TestPartResult. + results1->push_back(*pr1); + + // r2 contains a SUCCESS, and a FAILURE. + results2->push_back(*pr1); + results2->push_back(*pr2); + } + + virtual void TearDown() { + delete pr1; + delete pr2; + + delete r0; + delete r1; + delete r2; + } + + // Helper that compares two two TestPartResults. + static void CompareTestPartResult(const TestPartResult& expected, + const TestPartResult& actual) { + EXPECT_EQ(expected.type(), actual.type()); + EXPECT_STREQ(expected.file_name(), actual.file_name()); + EXPECT_EQ(expected.line_number(), actual.line_number()); + EXPECT_STREQ(expected.summary(), actual.summary()); + EXPECT_STREQ(expected.message(), actual.message()); + EXPECT_EQ(expected.passed(), actual.passed()); + EXPECT_EQ(expected.failed(), actual.failed()); + EXPECT_EQ(expected.nonfatally_failed(), actual.nonfatally_failed()); + EXPECT_EQ(expected.fatally_failed(), actual.fatally_failed()); + } +}; + +// Tests TestResult::total_part_count(). +TEST_F(TestResultTest, total_part_count) { + ASSERT_EQ(0, r0->total_part_count()); + ASSERT_EQ(1, r1->total_part_count()); + ASSERT_EQ(2, r2->total_part_count()); +} + +// Tests TestResult::Passed(). +TEST_F(TestResultTest, Passed) { + ASSERT_TRUE(r0->Passed()); + ASSERT_TRUE(r1->Passed()); + ASSERT_FALSE(r2->Passed()); +} + +// Tests TestResult::Failed(). +TEST_F(TestResultTest, Failed) { + ASSERT_FALSE(r0->Failed()); + ASSERT_FALSE(r1->Failed()); + ASSERT_TRUE(r2->Failed()); +} + +// Tests TestResult::GetTestPartResult(). + +typedef TestResultTest TestResultDeathTest; + +TEST_F(TestResultDeathTest, GetTestPartResult) { + CompareTestPartResult(*pr1, r2->GetTestPartResult(0)); + CompareTestPartResult(*pr2, r2->GetTestPartResult(1)); + EXPECT_DEATH_IF_SUPPORTED(r2->GetTestPartResult(2), ""); + EXPECT_DEATH_IF_SUPPORTED(r2->GetTestPartResult(-1), ""); +} + +// Tests TestResult has no properties when none are added. +TEST(TestResultPropertyTest, NoPropertiesFoundWhenNoneAreAdded) { + TestResult test_result; + ASSERT_EQ(0, test_result.test_property_count()); +} + +// Tests TestResult has the expected property when added. +TEST(TestResultPropertyTest, OnePropertyFoundWhenAdded) { + TestResult test_result; + TestProperty property("key_1", "1"); + TestResultAccessor::RecordProperty(&test_result, "testcase", property); + ASSERT_EQ(1, test_result.test_property_count()); + const TestProperty& actual_property = test_result.GetTestProperty(0); + EXPECT_STREQ("key_1", actual_property.key()); + EXPECT_STREQ("1", actual_property.value()); +} + +// Tests TestResult has multiple properties when added. +TEST(TestResultPropertyTest, MultiplePropertiesFoundWhenAdded) { + TestResult test_result; + TestProperty property_1("key_1", "1"); + TestProperty property_2("key_2", "2"); + TestResultAccessor::RecordProperty(&test_result, "testcase", property_1); + TestResultAccessor::RecordProperty(&test_result, "testcase", property_2); + ASSERT_EQ(2, test_result.test_property_count()); + const TestProperty& actual_property_1 = test_result.GetTestProperty(0); + EXPECT_STREQ("key_1", actual_property_1.key()); + EXPECT_STREQ("1", actual_property_1.value()); + + const TestProperty& actual_property_2 = test_result.GetTestProperty(1); + EXPECT_STREQ("key_2", actual_property_2.key()); + EXPECT_STREQ("2", actual_property_2.value()); +} + +// Tests TestResult::RecordProperty() overrides values for duplicate keys. +TEST(TestResultPropertyTest, OverridesValuesForDuplicateKeys) { + TestResult test_result; + TestProperty property_1_1("key_1", "1"); + TestProperty property_2_1("key_2", "2"); + TestProperty property_1_2("key_1", "12"); + TestProperty property_2_2("key_2", "22"); + TestResultAccessor::RecordProperty(&test_result, "testcase", property_1_1); + TestResultAccessor::RecordProperty(&test_result, "testcase", property_2_1); + TestResultAccessor::RecordProperty(&test_result, "testcase", property_1_2); + TestResultAccessor::RecordProperty(&test_result, "testcase", property_2_2); + + ASSERT_EQ(2, test_result.test_property_count()); + const TestProperty& actual_property_1 = test_result.GetTestProperty(0); + EXPECT_STREQ("key_1", actual_property_1.key()); + EXPECT_STREQ("12", actual_property_1.value()); + + const TestProperty& actual_property_2 = test_result.GetTestProperty(1); + EXPECT_STREQ("key_2", actual_property_2.key()); + EXPECT_STREQ("22", actual_property_2.value()); +} + +// Tests TestResult::GetTestProperty(). +TEST(TestResultPropertyTest, GetTestProperty) { + TestResult test_result; + TestProperty property_1("key_1", "1"); + TestProperty property_2("key_2", "2"); + TestProperty property_3("key_3", "3"); + TestResultAccessor::RecordProperty(&test_result, "testcase", property_1); + TestResultAccessor::RecordProperty(&test_result, "testcase", property_2); + TestResultAccessor::RecordProperty(&test_result, "testcase", property_3); + + const TestProperty& fetched_property_1 = test_result.GetTestProperty(0); + const TestProperty& fetched_property_2 = test_result.GetTestProperty(1); + const TestProperty& fetched_property_3 = test_result.GetTestProperty(2); + + EXPECT_STREQ("key_1", fetched_property_1.key()); + EXPECT_STREQ("1", fetched_property_1.value()); + + EXPECT_STREQ("key_2", fetched_property_2.key()); + EXPECT_STREQ("2", fetched_property_2.value()); + + EXPECT_STREQ("key_3", fetched_property_3.key()); + EXPECT_STREQ("3", fetched_property_3.value()); + + EXPECT_DEATH_IF_SUPPORTED(test_result.GetTestProperty(3), ""); + EXPECT_DEATH_IF_SUPPORTED(test_result.GetTestProperty(-1), ""); +} + +// Tests that GTestFlagSaver works on Windows and Mac. + +class GTestFlagSaverTest : public Test { + protected: + // Saves the Google Test flags such that we can restore them later, and + // then sets them to their default values. This will be called + // before the first test in this test case is run. + static void SetUpTestCase() { + saver_ = new GTestFlagSaver; + + GTEST_FLAG(also_run_disabled_tests) = false; + GTEST_FLAG(break_on_failure) = false; + GTEST_FLAG(catch_exceptions) = false; + GTEST_FLAG(death_test_use_fork) = false; + GTEST_FLAG(color) = "auto"; + GTEST_FLAG(filter) = ""; + GTEST_FLAG(list_tests) = false; + GTEST_FLAG(output) = ""; + GTEST_FLAG(print_time) = true; + GTEST_FLAG(random_seed) = 0; + GTEST_FLAG(repeat) = 1; + GTEST_FLAG(shuffle) = false; + GTEST_FLAG(stack_trace_depth) = kMaxStackTraceDepth; + GTEST_FLAG(stream_result_to) = ""; + GTEST_FLAG(throw_on_failure) = false; + } + + // Restores the Google Test flags that the tests have modified. This will + // be called after the last test in this test case is run. + static void TearDownTestCase() { + delete saver_; + saver_ = NULL; + } + + // Verifies that the Google Test flags have their default values, and then + // modifies each of them. + void VerifyAndModifyFlags() { + EXPECT_FALSE(GTEST_FLAG(also_run_disabled_tests)); + EXPECT_FALSE(GTEST_FLAG(break_on_failure)); + EXPECT_FALSE(GTEST_FLAG(catch_exceptions)); + EXPECT_STREQ("auto", GTEST_FLAG(color).c_str()); + EXPECT_FALSE(GTEST_FLAG(death_test_use_fork)); + EXPECT_STREQ("", GTEST_FLAG(filter).c_str()); + EXPECT_FALSE(GTEST_FLAG(list_tests)); + EXPECT_STREQ("", GTEST_FLAG(output).c_str()); + EXPECT_TRUE(GTEST_FLAG(print_time)); + EXPECT_EQ(0, GTEST_FLAG(random_seed)); + EXPECT_EQ(1, GTEST_FLAG(repeat)); + EXPECT_FALSE(GTEST_FLAG(shuffle)); + EXPECT_EQ(kMaxStackTraceDepth, GTEST_FLAG(stack_trace_depth)); + EXPECT_STREQ("", GTEST_FLAG(stream_result_to).c_str()); + EXPECT_FALSE(GTEST_FLAG(throw_on_failure)); + + GTEST_FLAG(also_run_disabled_tests) = true; + GTEST_FLAG(break_on_failure) = true; + GTEST_FLAG(catch_exceptions) = true; + GTEST_FLAG(color) = "no"; + GTEST_FLAG(death_test_use_fork) = true; + GTEST_FLAG(filter) = "abc"; + GTEST_FLAG(list_tests) = true; + GTEST_FLAG(output) = "xml:foo.xml"; + GTEST_FLAG(print_time) = false; + GTEST_FLAG(random_seed) = 1; + GTEST_FLAG(repeat) = 100; + GTEST_FLAG(shuffle) = true; + GTEST_FLAG(stack_trace_depth) = 1; + GTEST_FLAG(stream_result_to) = "localhost:1234"; + GTEST_FLAG(throw_on_failure) = true; + } + + private: + // For saving Google Test flags during this test case. + static GTestFlagSaver* saver_; +}; + +GTestFlagSaver* GTestFlagSaverTest::saver_ = NULL; + +// Google Test doesn't guarantee the order of tests. The following two +// tests are designed to work regardless of their order. + +// Modifies the Google Test flags in the test body. +TEST_F(GTestFlagSaverTest, ModifyGTestFlags) { + VerifyAndModifyFlags(); +} + +// Verifies that the Google Test flags in the body of the previous test were +// restored to their original values. +TEST_F(GTestFlagSaverTest, VerifyGTestFlags) { + VerifyAndModifyFlags(); +} + +// Sets an environment variable with the given name to the given +// value. If the value argument is "", unsets the environment +// variable. The caller must ensure that both arguments are not NULL. +static void SetEnv(const char* name, const char* value) { +#if GTEST_OS_WINDOWS_MOBILE + // Environment variables are not supported on Windows CE. + return; +#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9) + // C++Builder's putenv only stores a pointer to its parameter; we have to + // ensure that the string remains valid as long as it might be needed. + // We use an std::map to do so. + static std::map added_env; + + // Because putenv stores a pointer to the string buffer, we can't delete the + // previous string (if present) until after it's replaced. + std::string *prev_env = NULL; + if (added_env.find(name) != added_env.end()) { + prev_env = added_env[name]; + } + added_env[name] = new std::string( + (Message() << name << "=" << value).GetString()); + + // The standard signature of putenv accepts a 'char*' argument. Other + // implementations, like C++Builder's, accept a 'const char*'. + // We cast away the 'const' since that would work for both variants. + putenv(const_cast(added_env[name]->c_str())); + delete prev_env; +#elif GTEST_OS_WINDOWS // If we are on Windows proper. + _putenv((Message() << name << "=" << value).GetString().c_str()); +#else + if (*value == '\0') { + unsetenv(name); + } else { + setenv(name, value, 1); + } +#endif // GTEST_OS_WINDOWS_MOBILE +} + +#if !GTEST_OS_WINDOWS_MOBILE +// Environment variables are not supported on Windows CE. + +using testing::internal::Int32FromGTestEnv; + +// Tests Int32FromGTestEnv(). + +// Tests that Int32FromGTestEnv() returns the default value when the +// environment variable is not set. +TEST(Int32FromGTestEnvTest, ReturnsDefaultWhenVariableIsNotSet) { + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "TEMP", ""); + EXPECT_EQ(10, Int32FromGTestEnv("temp", 10)); +} + +// Tests that Int32FromGTestEnv() returns the default value when the +// environment variable overflows as an Int32. +TEST(Int32FromGTestEnvTest, ReturnsDefaultWhenValueOverflows) { + printf("(expecting 2 warnings)\n"); + + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "TEMP", "12345678987654321"); + EXPECT_EQ(20, Int32FromGTestEnv("temp", 20)); + + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "TEMP", "-12345678987654321"); + EXPECT_EQ(30, Int32FromGTestEnv("temp", 30)); +} + +// Tests that Int32FromGTestEnv() returns the default value when the +// environment variable does not represent a valid decimal integer. +TEST(Int32FromGTestEnvTest, ReturnsDefaultWhenValueIsInvalid) { + printf("(expecting 2 warnings)\n"); + + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "TEMP", "A1"); + EXPECT_EQ(40, Int32FromGTestEnv("temp", 40)); + + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "TEMP", "12X"); + EXPECT_EQ(50, Int32FromGTestEnv("temp", 50)); +} + +// Tests that Int32FromGTestEnv() parses and returns the value of the +// environment variable when it represents a valid decimal integer in +// the range of an Int32. +TEST(Int32FromGTestEnvTest, ParsesAndReturnsValidValue) { + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "TEMP", "123"); + EXPECT_EQ(123, Int32FromGTestEnv("temp", 0)); + + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "TEMP", "-321"); + EXPECT_EQ(-321, Int32FromGTestEnv("temp", 0)); +} +#endif // !GTEST_OS_WINDOWS_MOBILE + +// Tests ParseInt32Flag(). + +// Tests that ParseInt32Flag() returns false and doesn't change the +// output value when the flag has wrong format +TEST(ParseInt32FlagTest, ReturnsFalseForInvalidFlag) { + Int32 value = 123; + EXPECT_FALSE(ParseInt32Flag("--a=100", "b", &value)); + EXPECT_EQ(123, value); + + EXPECT_FALSE(ParseInt32Flag("a=100", "a", &value)); + EXPECT_EQ(123, value); +} + +// Tests that ParseInt32Flag() returns false and doesn't change the +// output value when the flag overflows as an Int32. +TEST(ParseInt32FlagTest, ReturnsDefaultWhenValueOverflows) { + printf("(expecting 2 warnings)\n"); + + Int32 value = 123; + EXPECT_FALSE(ParseInt32Flag("--abc=12345678987654321", "abc", &value)); + EXPECT_EQ(123, value); + + EXPECT_FALSE(ParseInt32Flag("--abc=-12345678987654321", "abc", &value)); + EXPECT_EQ(123, value); +} + +// Tests that ParseInt32Flag() returns false and doesn't change the +// output value when the flag does not represent a valid decimal +// integer. +TEST(ParseInt32FlagTest, ReturnsDefaultWhenValueIsInvalid) { + printf("(expecting 2 warnings)\n"); + + Int32 value = 123; + EXPECT_FALSE(ParseInt32Flag("--abc=A1", "abc", &value)); + EXPECT_EQ(123, value); + + EXPECT_FALSE(ParseInt32Flag("--abc=12X", "abc", &value)); + EXPECT_EQ(123, value); +} + +// Tests that ParseInt32Flag() parses the value of the flag and +// returns true when the flag represents a valid decimal integer in +// the range of an Int32. +TEST(ParseInt32FlagTest, ParsesAndReturnsValidValue) { + Int32 value = 123; + EXPECT_TRUE(ParseInt32Flag("--" GTEST_FLAG_PREFIX_ "abc=456", "abc", &value)); + EXPECT_EQ(456, value); + + EXPECT_TRUE(ParseInt32Flag("--" GTEST_FLAG_PREFIX_ "abc=-789", + "abc", &value)); + EXPECT_EQ(-789, value); +} + +// Tests that Int32FromEnvOrDie() parses the value of the var or +// returns the correct default. +// Environment variables are not supported on Windows CE. +#if !GTEST_OS_WINDOWS_MOBILE +TEST(Int32FromEnvOrDieTest, ParsesAndReturnsValidValue) { + EXPECT_EQ(333, Int32FromEnvOrDie(GTEST_FLAG_PREFIX_UPPER_ "UnsetVar", 333)); + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "UnsetVar", "123"); + EXPECT_EQ(123, Int32FromEnvOrDie(GTEST_FLAG_PREFIX_UPPER_ "UnsetVar", 333)); + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "UnsetVar", "-123"); + EXPECT_EQ(-123, Int32FromEnvOrDie(GTEST_FLAG_PREFIX_UPPER_ "UnsetVar", 333)); +} +#endif // !GTEST_OS_WINDOWS_MOBILE + +// Tests that Int32FromEnvOrDie() aborts with an error message +// if the variable is not an Int32. +TEST(Int32FromEnvOrDieDeathTest, AbortsOnFailure) { + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "VAR", "xxx"); + EXPECT_DEATH_IF_SUPPORTED( + Int32FromEnvOrDie(GTEST_FLAG_PREFIX_UPPER_ "VAR", 123), + ".*"); +} + +// Tests that Int32FromEnvOrDie() aborts with an error message +// if the variable cannot be represnted by an Int32. +TEST(Int32FromEnvOrDieDeathTest, AbortsOnInt32Overflow) { + SetEnv(GTEST_FLAG_PREFIX_UPPER_ "VAR", "1234567891234567891234"); + EXPECT_DEATH_IF_SUPPORTED( + Int32FromEnvOrDie(GTEST_FLAG_PREFIX_UPPER_ "VAR", 123), + ".*"); +} + +// Tests that ShouldRunTestOnShard() selects all tests +// where there is 1 shard. +TEST(ShouldRunTestOnShardTest, IsPartitionWhenThereIsOneShard) { + EXPECT_TRUE(ShouldRunTestOnShard(1, 0, 0)); + EXPECT_TRUE(ShouldRunTestOnShard(1, 0, 1)); + EXPECT_TRUE(ShouldRunTestOnShard(1, 0, 2)); + EXPECT_TRUE(ShouldRunTestOnShard(1, 0, 3)); + EXPECT_TRUE(ShouldRunTestOnShard(1, 0, 4)); +} + +class ShouldShardTest : public testing::Test { + protected: + virtual void SetUp() { + index_var_ = GTEST_FLAG_PREFIX_UPPER_ "INDEX"; + total_var_ = GTEST_FLAG_PREFIX_UPPER_ "TOTAL"; + } + + virtual void TearDown() { + SetEnv(index_var_, ""); + SetEnv(total_var_, ""); + } + + const char* index_var_; + const char* total_var_; +}; + +// Tests that sharding is disabled if neither of the environment variables +// are set. +TEST_F(ShouldShardTest, ReturnsFalseWhenNeitherEnvVarIsSet) { + SetEnv(index_var_, ""); + SetEnv(total_var_, ""); + + EXPECT_FALSE(ShouldShard(total_var_, index_var_, false)); + EXPECT_FALSE(ShouldShard(total_var_, index_var_, true)); +} + +// Tests that sharding is not enabled if total_shards == 1. +TEST_F(ShouldShardTest, ReturnsFalseWhenTotalShardIsOne) { + SetEnv(index_var_, "0"); + SetEnv(total_var_, "1"); + EXPECT_FALSE(ShouldShard(total_var_, index_var_, false)); + EXPECT_FALSE(ShouldShard(total_var_, index_var_, true)); +} + +// Tests that sharding is enabled if total_shards > 1 and +// we are not in a death test subprocess. +// Environment variables are not supported on Windows CE. +#if !GTEST_OS_WINDOWS_MOBILE +TEST_F(ShouldShardTest, WorksWhenShardEnvVarsAreValid) { + SetEnv(index_var_, "4"); + SetEnv(total_var_, "22"); + EXPECT_TRUE(ShouldShard(total_var_, index_var_, false)); + EXPECT_FALSE(ShouldShard(total_var_, index_var_, true)); + + SetEnv(index_var_, "8"); + SetEnv(total_var_, "9"); + EXPECT_TRUE(ShouldShard(total_var_, index_var_, false)); + EXPECT_FALSE(ShouldShard(total_var_, index_var_, true)); + + SetEnv(index_var_, "0"); + SetEnv(total_var_, "9"); + EXPECT_TRUE(ShouldShard(total_var_, index_var_, false)); + EXPECT_FALSE(ShouldShard(total_var_, index_var_, true)); +} +#endif // !GTEST_OS_WINDOWS_MOBILE + +// Tests that we exit in error if the sharding values are not valid. + +typedef ShouldShardTest ShouldShardDeathTest; + +TEST_F(ShouldShardDeathTest, AbortsWhenShardingEnvVarsAreInvalid) { + SetEnv(index_var_, "4"); + SetEnv(total_var_, "4"); + EXPECT_DEATH_IF_SUPPORTED(ShouldShard(total_var_, index_var_, false), ".*"); + + SetEnv(index_var_, "4"); + SetEnv(total_var_, "-2"); + EXPECT_DEATH_IF_SUPPORTED(ShouldShard(total_var_, index_var_, false), ".*"); + + SetEnv(index_var_, "5"); + SetEnv(total_var_, ""); + EXPECT_DEATH_IF_SUPPORTED(ShouldShard(total_var_, index_var_, false), ".*"); + + SetEnv(index_var_, ""); + SetEnv(total_var_, "5"); + EXPECT_DEATH_IF_SUPPORTED(ShouldShard(total_var_, index_var_, false), ".*"); +} + +// Tests that ShouldRunTestOnShard is a partition when 5 +// shards are used. +TEST(ShouldRunTestOnShardTest, IsPartitionWhenThereAreFiveShards) { + // Choose an arbitrary number of tests and shards. + const int num_tests = 17; + const int num_shards = 5; + + // Check partitioning: each test should be on exactly 1 shard. + for (int test_id = 0; test_id < num_tests; test_id++) { + int prev_selected_shard_index = -1; + for (int shard_index = 0; shard_index < num_shards; shard_index++) { + if (ShouldRunTestOnShard(num_shards, shard_index, test_id)) { + if (prev_selected_shard_index < 0) { + prev_selected_shard_index = shard_index; + } else { + ADD_FAILURE() << "Shard " << prev_selected_shard_index << " and " + << shard_index << " are both selected to run test " << test_id; + } + } + } + } + + // Check balance: This is not required by the sharding protocol, but is a + // desirable property for performance. + for (int shard_index = 0; shard_index < num_shards; shard_index++) { + int num_tests_on_shard = 0; + for (int test_id = 0; test_id < num_tests; test_id++) { + num_tests_on_shard += + ShouldRunTestOnShard(num_shards, shard_index, test_id); + } + EXPECT_GE(num_tests_on_shard, num_tests / num_shards); + } +} + +// For the same reason we are not explicitly testing everything in the +// Test class, there are no separate tests for the following classes +// (except for some trivial cases): +// +// TestCase, UnitTest, UnitTestResultPrinter. +// +// Similarly, there are no separate tests for the following macros: +// +// TEST, TEST_F, RUN_ALL_TESTS + +TEST(UnitTestTest, CanGetOriginalWorkingDir) { + ASSERT_TRUE(UnitTest::GetInstance()->original_working_dir() != NULL); + EXPECT_STRNE(UnitTest::GetInstance()->original_working_dir(), ""); +} + +TEST(UnitTestTest, ReturnsPlausibleTimestamp) { + EXPECT_LT(0, UnitTest::GetInstance()->start_timestamp()); + EXPECT_LE(UnitTest::GetInstance()->start_timestamp(), GetTimeInMillis()); +} + +// When a property using a reserved key is supplied to this function, it +// tests that a non-fatal failure is added, a fatal failure is not added, +// and that the property is not recorded. +void ExpectNonFatalFailureRecordingPropertyWithReservedKey( + const TestResult& test_result, const char* key) { + EXPECT_NONFATAL_FAILURE(Test::RecordProperty(key, "1"), "Reserved key"); + ASSERT_EQ(0, test_result.test_property_count()) << "Property for key '" << key + << "' recorded unexpectedly."; +} + +void ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( + const char* key) { + const TestInfo* test_info = UnitTest::GetInstance()->current_test_info(); + ASSERT_TRUE(test_info != NULL); + ExpectNonFatalFailureRecordingPropertyWithReservedKey(*test_info->result(), + key); +} + +void ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTestCase( + const char* key) { + const TestCase* test_case = UnitTest::GetInstance()->current_test_case(); + ASSERT_TRUE(test_case != NULL); + ExpectNonFatalFailureRecordingPropertyWithReservedKey( + test_case->ad_hoc_test_result(), key); +} + +void ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestCase( + const char* key) { + ExpectNonFatalFailureRecordingPropertyWithReservedKey( + UnitTest::GetInstance()->ad_hoc_test_result(), key); +} + +// Tests that property recording functions in UnitTest outside of tests +// functions correcly. Creating a separate instance of UnitTest ensures it +// is in a state similar to the UnitTest's singleton's between tests. +class UnitTestRecordPropertyTest : + public testing::internal::UnitTestRecordPropertyTestHelper { + public: + static void SetUpTestCase() { + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTestCase( + "disabled"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTestCase( + "errors"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTestCase( + "failures"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTestCase( + "name"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTestCase( + "tests"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTestCase( + "time"); + + Test::RecordProperty("test_case_key_1", "1"); + const TestCase* test_case = UnitTest::GetInstance()->current_test_case(); + ASSERT_TRUE(test_case != NULL); + + ASSERT_EQ(1, test_case->ad_hoc_test_result().test_property_count()); + EXPECT_STREQ("test_case_key_1", + test_case->ad_hoc_test_result().GetTestProperty(0).key()); + EXPECT_STREQ("1", + test_case->ad_hoc_test_result().GetTestProperty(0).value()); + } +}; + +// Tests TestResult has the expected property when added. +TEST_F(UnitTestRecordPropertyTest, OnePropertyFoundWhenAdded) { + UnitTestRecordProperty("key_1", "1"); + + ASSERT_EQ(1, unit_test_.ad_hoc_test_result().test_property_count()); + + EXPECT_STREQ("key_1", + unit_test_.ad_hoc_test_result().GetTestProperty(0).key()); + EXPECT_STREQ("1", + unit_test_.ad_hoc_test_result().GetTestProperty(0).value()); +} + +// Tests TestResult has multiple properties when added. +TEST_F(UnitTestRecordPropertyTest, MultiplePropertiesFoundWhenAdded) { + UnitTestRecordProperty("key_1", "1"); + UnitTestRecordProperty("key_2", "2"); + + ASSERT_EQ(2, unit_test_.ad_hoc_test_result().test_property_count()); + + EXPECT_STREQ("key_1", + unit_test_.ad_hoc_test_result().GetTestProperty(0).key()); + EXPECT_STREQ("1", unit_test_.ad_hoc_test_result().GetTestProperty(0).value()); + + EXPECT_STREQ("key_2", + unit_test_.ad_hoc_test_result().GetTestProperty(1).key()); + EXPECT_STREQ("2", unit_test_.ad_hoc_test_result().GetTestProperty(1).value()); +} + +// Tests TestResult::RecordProperty() overrides values for duplicate keys. +TEST_F(UnitTestRecordPropertyTest, OverridesValuesForDuplicateKeys) { + UnitTestRecordProperty("key_1", "1"); + UnitTestRecordProperty("key_2", "2"); + UnitTestRecordProperty("key_1", "12"); + UnitTestRecordProperty("key_2", "22"); + + ASSERT_EQ(2, unit_test_.ad_hoc_test_result().test_property_count()); + + EXPECT_STREQ("key_1", + unit_test_.ad_hoc_test_result().GetTestProperty(0).key()); + EXPECT_STREQ("12", + unit_test_.ad_hoc_test_result().GetTestProperty(0).value()); + + EXPECT_STREQ("key_2", + unit_test_.ad_hoc_test_result().GetTestProperty(1).key()); + EXPECT_STREQ("22", + unit_test_.ad_hoc_test_result().GetTestProperty(1).value()); +} + +TEST_F(UnitTestRecordPropertyTest, + AddFailureInsideTestsWhenUsingTestCaseReservedKeys) { + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( + "name"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( + "value_param"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( + "type_param"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( + "status"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( + "time"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyForCurrentTest( + "classname"); +} + +TEST_F(UnitTestRecordPropertyTest, + AddRecordWithReservedKeysGeneratesCorrectPropertyList) { + EXPECT_NONFATAL_FAILURE( + Test::RecordProperty("name", "1"), + "'classname', 'name', 'status', 'time', 'type_param', and 'value_param'" + " are reserved"); +} + +class UnitTestRecordPropertyTestEnvironment : public Environment { + public: + virtual void TearDown() { + ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestCase( + "tests"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestCase( + "failures"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestCase( + "disabled"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestCase( + "errors"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestCase( + "name"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestCase( + "timestamp"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestCase( + "time"); + ExpectNonFatalFailureRecordingPropertyWithReservedKeyOutsideOfTestCase( + "random_seed"); + } +}; + +// This will test property recording outside of any test or test case. +static Environment* record_property_env = + AddGlobalTestEnvironment(new UnitTestRecordPropertyTestEnvironment); + +// This group of tests is for predicate assertions (ASSERT_PRED*, etc) +// of various arities. They do not attempt to be exhaustive. Rather, +// view them as smoke tests that can be easily reviewed and verified. +// A more complete set of tests for predicate assertions can be found +// in gtest_pred_impl_unittest.cc. + +// First, some predicates and predicate-formatters needed by the tests. + +// Returns true iff the argument is an even number. +bool IsEven(int n) { + return (n % 2) == 0; +} + +// A functor that returns true iff the argument is an even number. +struct IsEvenFunctor { + bool operator()(int n) { return IsEven(n); } +}; + +// A predicate-formatter function that asserts the argument is an even +// number. +AssertionResult AssertIsEven(const char* expr, int n) { + if (IsEven(n)) { + return AssertionSuccess(); + } + + Message msg; + msg << expr << " evaluates to " << n << ", which is not even."; + return AssertionFailure(msg); +} + +// A predicate function that returns AssertionResult for use in +// EXPECT/ASSERT_TRUE/FALSE. +AssertionResult ResultIsEven(int n) { + if (IsEven(n)) + return AssertionSuccess() << n << " is even"; + else + return AssertionFailure() << n << " is odd"; +} + +// A predicate function that returns AssertionResult but gives no +// explanation why it succeeds. Needed for testing that +// EXPECT/ASSERT_FALSE handles such functions correctly. +AssertionResult ResultIsEvenNoExplanation(int n) { + if (IsEven(n)) + return AssertionSuccess(); + else + return AssertionFailure() << n << " is odd"; +} + +// A predicate-formatter functor that asserts the argument is an even +// number. +struct AssertIsEvenFunctor { + AssertionResult operator()(const char* expr, int n) { + return AssertIsEven(expr, n); + } +}; + +// Returns true iff the sum of the arguments is an even number. +bool SumIsEven2(int n1, int n2) { + return IsEven(n1 + n2); +} + +// A functor that returns true iff the sum of the arguments is an even +// number. +struct SumIsEven3Functor { + bool operator()(int n1, int n2, int n3) { + return IsEven(n1 + n2 + n3); + } +}; + +// A predicate-formatter function that asserts the sum of the +// arguments is an even number. +AssertionResult AssertSumIsEven4( + const char* e1, const char* e2, const char* e3, const char* e4, + int n1, int n2, int n3, int n4) { + const int sum = n1 + n2 + n3 + n4; + if (IsEven(sum)) { + return AssertionSuccess(); + } + + Message msg; + msg << e1 << " + " << e2 << " + " << e3 << " + " << e4 + << " (" << n1 << " + " << n2 << " + " << n3 << " + " << n4 + << ") evaluates to " << sum << ", which is not even."; + return AssertionFailure(msg); +} + +// A predicate-formatter functor that asserts the sum of the arguments +// is an even number. +struct AssertSumIsEven5Functor { + AssertionResult operator()( + const char* e1, const char* e2, const char* e3, const char* e4, + const char* e5, int n1, int n2, int n3, int n4, int n5) { + const int sum = n1 + n2 + n3 + n4 + n5; + if (IsEven(sum)) { + return AssertionSuccess(); + } + + Message msg; + msg << e1 << " + " << e2 << " + " << e3 << " + " << e4 << " + " << e5 + << " (" + << n1 << " + " << n2 << " + " << n3 << " + " << n4 << " + " << n5 + << ") evaluates to " << sum << ", which is not even."; + return AssertionFailure(msg); + } +}; + + +// Tests unary predicate assertions. + +// Tests unary predicate assertions that don't use a custom formatter. +TEST(Pred1Test, WithoutFormat) { + // Success cases. + EXPECT_PRED1(IsEvenFunctor(), 2) << "This failure is UNEXPECTED!"; + ASSERT_PRED1(IsEven, 4); + + // Failure cases. + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED1(IsEven, 5) << "This failure is expected."; + }, "This failure is expected."); + EXPECT_FATAL_FAILURE(ASSERT_PRED1(IsEvenFunctor(), 5), + "evaluates to false"); +} + +// Tests unary predicate assertions that use a custom formatter. +TEST(Pred1Test, WithFormat) { + // Success cases. + EXPECT_PRED_FORMAT1(AssertIsEven, 2); + ASSERT_PRED_FORMAT1(AssertIsEvenFunctor(), 4) + << "This failure is UNEXPECTED!"; + + // Failure cases. + const int n = 5; + EXPECT_NONFATAL_FAILURE(EXPECT_PRED_FORMAT1(AssertIsEvenFunctor(), n), + "n evaluates to 5, which is not even."); + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT1(AssertIsEven, 5) << "This failure is expected."; + }, "This failure is expected."); +} + +// Tests that unary predicate assertions evaluates their arguments +// exactly once. +TEST(Pred1Test, SingleEvaluationOnFailure) { + // A success case. + static int n = 0; + EXPECT_PRED1(IsEven, n++); + EXPECT_EQ(1, n) << "The argument is not evaluated exactly once."; + + // A failure case. + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT1(AssertIsEvenFunctor(), n++) + << "This failure is expected."; + }, "This failure is expected."); + EXPECT_EQ(2, n) << "The argument is not evaluated exactly once."; +} + + +// Tests predicate assertions whose arity is >= 2. + +// Tests predicate assertions that don't use a custom formatter. +TEST(PredTest, WithoutFormat) { + // Success cases. + ASSERT_PRED2(SumIsEven2, 2, 4) << "This failure is UNEXPECTED!"; + EXPECT_PRED3(SumIsEven3Functor(), 4, 6, 8); + + // Failure cases. + const int n1 = 1; + const int n2 = 2; + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED2(SumIsEven2, n1, n2) << "This failure is expected."; + }, "This failure is expected."); + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED3(SumIsEven3Functor(), 1, 2, 4); + }, "evaluates to false"); +} + +// Tests predicate assertions that use a custom formatter. +TEST(PredTest, WithFormat) { + // Success cases. + ASSERT_PRED_FORMAT4(AssertSumIsEven4, 4, 6, 8, 10) << + "This failure is UNEXPECTED!"; + EXPECT_PRED_FORMAT5(AssertSumIsEven5Functor(), 2, 4, 6, 8, 10); + + // Failure cases. + const int n1 = 1; + const int n2 = 2; + const int n3 = 4; + const int n4 = 6; + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT4(AssertSumIsEven4, n1, n2, n3, n4); + }, "evaluates to 13, which is not even."); + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT5(AssertSumIsEven5Functor(), 1, 2, 4, 6, 8) + << "This failure is expected."; + }, "This failure is expected."); +} + +// Tests that predicate assertions evaluates their arguments +// exactly once. +TEST(PredTest, SingleEvaluationOnFailure) { + // A success case. + int n1 = 0; + int n2 = 0; + EXPECT_PRED2(SumIsEven2, n1++, n2++); + EXPECT_EQ(1, n1) << "Argument 1 is not evaluated exactly once."; + EXPECT_EQ(1, n2) << "Argument 2 is not evaluated exactly once."; + + // Another success case. + n1 = n2 = 0; + int n3 = 0; + int n4 = 0; + int n5 = 0; + ASSERT_PRED_FORMAT5(AssertSumIsEven5Functor(), + n1++, n2++, n3++, n4++, n5++) + << "This failure is UNEXPECTED!"; + EXPECT_EQ(1, n1) << "Argument 1 is not evaluated exactly once."; + EXPECT_EQ(1, n2) << "Argument 2 is not evaluated exactly once."; + EXPECT_EQ(1, n3) << "Argument 3 is not evaluated exactly once."; + EXPECT_EQ(1, n4) << "Argument 4 is not evaluated exactly once."; + EXPECT_EQ(1, n5) << "Argument 5 is not evaluated exactly once."; + + // A failure case. + n1 = n2 = n3 = 0; + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED3(SumIsEven3Functor(), ++n1, n2++, n3++) + << "This failure is expected."; + }, "This failure is expected."); + EXPECT_EQ(1, n1) << "Argument 1 is not evaluated exactly once."; + EXPECT_EQ(1, n2) << "Argument 2 is not evaluated exactly once."; + EXPECT_EQ(1, n3) << "Argument 3 is not evaluated exactly once."; + + // Another failure case. + n1 = n2 = n3 = n4 = 0; + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT4(AssertSumIsEven4, ++n1, n2++, n3++, n4++); + }, "evaluates to 1, which is not even."); + EXPECT_EQ(1, n1) << "Argument 1 is not evaluated exactly once."; + EXPECT_EQ(1, n2) << "Argument 2 is not evaluated exactly once."; + EXPECT_EQ(1, n3) << "Argument 3 is not evaluated exactly once."; + EXPECT_EQ(1, n4) << "Argument 4 is not evaluated exactly once."; +} + + +// Some helper functions for testing using overloaded/template +// functions with ASSERT_PREDn and EXPECT_PREDn. + +bool IsPositive(double x) { + return x > 0; +} + +template +bool IsNegative(T x) { + return x < 0; +} + +template +bool GreaterThan(T1 x1, T2 x2) { + return x1 > x2; +} + +// Tests that overloaded functions can be used in *_PRED* as long as +// their types are explicitly specified. +TEST(PredicateAssertionTest, AcceptsOverloadedFunction) { + // C++Builder requires C-style casts rather than static_cast. + EXPECT_PRED1((bool (*)(int))(IsPositive), 5); // NOLINT + ASSERT_PRED1((bool (*)(double))(IsPositive), 6.0); // NOLINT +} + +// Tests that template functions can be used in *_PRED* as long as +// their types are explicitly specified. +TEST(PredicateAssertionTest, AcceptsTemplateFunction) { + EXPECT_PRED1(IsNegative, -5); + // Makes sure that we can handle templates with more than one + // parameter. + ASSERT_PRED2((GreaterThan), 5, 0); +} + + +// Some helper functions for testing using overloaded/template +// functions with ASSERT_PRED_FORMATn and EXPECT_PRED_FORMATn. + +AssertionResult IsPositiveFormat(const char* /* expr */, int n) { + return n > 0 ? AssertionSuccess() : + AssertionFailure(Message() << "Failure"); +} + +AssertionResult IsPositiveFormat(const char* /* expr */, double x) { + return x > 0 ? AssertionSuccess() : + AssertionFailure(Message() << "Failure"); +} + +template +AssertionResult IsNegativeFormat(const char* /* expr */, T x) { + return x < 0 ? AssertionSuccess() : + AssertionFailure(Message() << "Failure"); +} + +template +AssertionResult EqualsFormat(const char* /* expr1 */, const char* /* expr2 */, + const T1& x1, const T2& x2) { + return x1 == x2 ? AssertionSuccess() : + AssertionFailure(Message() << "Failure"); +} + +// Tests that overloaded functions can be used in *_PRED_FORMAT* +// without explicitly specifying their types. +TEST(PredicateFormatAssertionTest, AcceptsOverloadedFunction) { + EXPECT_PRED_FORMAT1(IsPositiveFormat, 5); + ASSERT_PRED_FORMAT1(IsPositiveFormat, 6.0); +} + +// Tests that template functions can be used in *_PRED_FORMAT* without +// explicitly specifying their types. +TEST(PredicateFormatAssertionTest, AcceptsTemplateFunction) { + EXPECT_PRED_FORMAT1(IsNegativeFormat, -5); + ASSERT_PRED_FORMAT2(EqualsFormat, 3, 3); +} + + +// Tests string assertions. + +// Tests ASSERT_STREQ with non-NULL arguments. +TEST(StringAssertionTest, ASSERT_STREQ) { + const char * const p1 = "good"; + ASSERT_STREQ(p1, p1); + + // Let p2 have the same content as p1, but be at a different address. + const char p2[] = "good"; + ASSERT_STREQ(p1, p2); + + EXPECT_FATAL_FAILURE(ASSERT_STREQ("bad", "good"), + "Expected: \"bad\""); +} + +// Tests ASSERT_STREQ with NULL arguments. +TEST(StringAssertionTest, ASSERT_STREQ_Null) { + ASSERT_STREQ(static_cast(NULL), NULL); + EXPECT_FATAL_FAILURE(ASSERT_STREQ(NULL, "non-null"), + "non-null"); +} + +// Tests ASSERT_STREQ with NULL arguments. +TEST(StringAssertionTest, ASSERT_STREQ_Null2) { + EXPECT_FATAL_FAILURE(ASSERT_STREQ("non-null", NULL), + "non-null"); +} + +// Tests ASSERT_STRNE. +TEST(StringAssertionTest, ASSERT_STRNE) { + ASSERT_STRNE("hi", "Hi"); + ASSERT_STRNE("Hi", NULL); + ASSERT_STRNE(NULL, "Hi"); + ASSERT_STRNE("", NULL); + ASSERT_STRNE(NULL, ""); + ASSERT_STRNE("", "Hi"); + ASSERT_STRNE("Hi", ""); + EXPECT_FATAL_FAILURE(ASSERT_STRNE("Hi", "Hi"), + "\"Hi\" vs \"Hi\""); +} + +// Tests ASSERT_STRCASEEQ. +TEST(StringAssertionTest, ASSERT_STRCASEEQ) { + ASSERT_STRCASEEQ("hi", "Hi"); + ASSERT_STRCASEEQ(static_cast(NULL), NULL); + + ASSERT_STRCASEEQ("", ""); + EXPECT_FATAL_FAILURE(ASSERT_STRCASEEQ("Hi", "hi2"), + "(ignoring case)"); +} + +// Tests ASSERT_STRCASENE. +TEST(StringAssertionTest, ASSERT_STRCASENE) { + ASSERT_STRCASENE("hi1", "Hi2"); + ASSERT_STRCASENE("Hi", NULL); + ASSERT_STRCASENE(NULL, "Hi"); + ASSERT_STRCASENE("", NULL); + ASSERT_STRCASENE(NULL, ""); + ASSERT_STRCASENE("", "Hi"); + ASSERT_STRCASENE("Hi", ""); + EXPECT_FATAL_FAILURE(ASSERT_STRCASENE("Hi", "hi"), + "(ignoring case)"); +} + +// Tests *_STREQ on wide strings. +TEST(StringAssertionTest, STREQ_Wide) { + // NULL strings. + ASSERT_STREQ(static_cast(NULL), NULL); + + // Empty strings. + ASSERT_STREQ(L"", L""); + + // Non-null vs NULL. + EXPECT_NONFATAL_FAILURE(EXPECT_STREQ(L"non-null", NULL), + "non-null"); + + // Equal strings. + EXPECT_STREQ(L"Hi", L"Hi"); + + // Unequal strings. + EXPECT_NONFATAL_FAILURE(EXPECT_STREQ(L"abc", L"Abc"), + "Abc"); + + // Strings containing wide characters. + EXPECT_NONFATAL_FAILURE(EXPECT_STREQ(L"abc\x8119", L"abc\x8120"), + "abc"); + + // The streaming variation. + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_STREQ(L"abc\x8119", L"abc\x8121") << "Expected failure"; + }, "Expected failure"); +} + +// Tests *_STRNE on wide strings. +TEST(StringAssertionTest, STRNE_Wide) { + // NULL strings. + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_STRNE(static_cast(NULL), NULL); + }, ""); + + // Empty strings. + EXPECT_NONFATAL_FAILURE(EXPECT_STRNE(L"", L""), + "L\"\""); + + // Non-null vs NULL. + ASSERT_STRNE(L"non-null", NULL); + + // Equal strings. + EXPECT_NONFATAL_FAILURE(EXPECT_STRNE(L"Hi", L"Hi"), + "L\"Hi\""); + + // Unequal strings. + EXPECT_STRNE(L"abc", L"Abc"); + + // Strings containing wide characters. + EXPECT_NONFATAL_FAILURE(EXPECT_STRNE(L"abc\x8119", L"abc\x8119"), + "abc"); + + // The streaming variation. + ASSERT_STRNE(L"abc\x8119", L"abc\x8120") << "This shouldn't happen"; +} + +// Tests for ::testing::IsSubstring(). + +// Tests that IsSubstring() returns the correct result when the input +// argument type is const char*. +TEST(IsSubstringTest, ReturnsCorrectResultForCString) { + EXPECT_FALSE(IsSubstring("", "", NULL, "a")); + EXPECT_FALSE(IsSubstring("", "", "b", NULL)); + EXPECT_FALSE(IsSubstring("", "", "needle", "haystack")); + + EXPECT_TRUE(IsSubstring("", "", static_cast(NULL), NULL)); + EXPECT_TRUE(IsSubstring("", "", "needle", "two needles")); +} + +// Tests that IsSubstring() returns the correct result when the input +// argument type is const wchar_t*. +TEST(IsSubstringTest, ReturnsCorrectResultForWideCString) { + EXPECT_FALSE(IsSubstring("", "", kNull, L"a")); + EXPECT_FALSE(IsSubstring("", "", L"b", kNull)); + EXPECT_FALSE(IsSubstring("", "", L"needle", L"haystack")); + + EXPECT_TRUE(IsSubstring("", "", static_cast(NULL), NULL)); + EXPECT_TRUE(IsSubstring("", "", L"needle", L"two needles")); +} + +// Tests that IsSubstring() generates the correct message when the input +// argument type is const char*. +TEST(IsSubstringTest, GeneratesCorrectMessageForCString) { + EXPECT_STREQ("Value of: needle_expr\n" + " Actual: \"needle\"\n" + "Expected: a substring of haystack_expr\n" + "Which is: \"haystack\"", + IsSubstring("needle_expr", "haystack_expr", + "needle", "haystack").failure_message()); +} + +// Tests that IsSubstring returns the correct result when the input +// argument type is ::std::string. +TEST(IsSubstringTest, ReturnsCorrectResultsForStdString) { + EXPECT_TRUE(IsSubstring("", "", std::string("hello"), "ahellob")); + EXPECT_FALSE(IsSubstring("", "", "hello", std::string("world"))); +} + +#if GTEST_HAS_STD_WSTRING +// Tests that IsSubstring returns the correct result when the input +// argument type is ::std::wstring. +TEST(IsSubstringTest, ReturnsCorrectResultForStdWstring) { + EXPECT_TRUE(IsSubstring("", "", ::std::wstring(L"needle"), L"two needles")); + EXPECT_FALSE(IsSubstring("", "", L"needle", ::std::wstring(L"haystack"))); +} + +// Tests that IsSubstring() generates the correct message when the input +// argument type is ::std::wstring. +TEST(IsSubstringTest, GeneratesCorrectMessageForWstring) { + EXPECT_STREQ("Value of: needle_expr\n" + " Actual: L\"needle\"\n" + "Expected: a substring of haystack_expr\n" + "Which is: L\"haystack\"", + IsSubstring( + "needle_expr", "haystack_expr", + ::std::wstring(L"needle"), L"haystack").failure_message()); +} + +#endif // GTEST_HAS_STD_WSTRING + +// Tests for ::testing::IsNotSubstring(). + +// Tests that IsNotSubstring() returns the correct result when the input +// argument type is const char*. +TEST(IsNotSubstringTest, ReturnsCorrectResultForCString) { + EXPECT_TRUE(IsNotSubstring("", "", "needle", "haystack")); + EXPECT_FALSE(IsNotSubstring("", "", "needle", "two needles")); +} + +// Tests that IsNotSubstring() returns the correct result when the input +// argument type is const wchar_t*. +TEST(IsNotSubstringTest, ReturnsCorrectResultForWideCString) { + EXPECT_TRUE(IsNotSubstring("", "", L"needle", L"haystack")); + EXPECT_FALSE(IsNotSubstring("", "", L"needle", L"two needles")); +} + +// Tests that IsNotSubstring() generates the correct message when the input +// argument type is const wchar_t*. +TEST(IsNotSubstringTest, GeneratesCorrectMessageForWideCString) { + EXPECT_STREQ("Value of: needle_expr\n" + " Actual: L\"needle\"\n" + "Expected: not a substring of haystack_expr\n" + "Which is: L\"two needles\"", + IsNotSubstring( + "needle_expr", "haystack_expr", + L"needle", L"two needles").failure_message()); +} + +// Tests that IsNotSubstring returns the correct result when the input +// argument type is ::std::string. +TEST(IsNotSubstringTest, ReturnsCorrectResultsForStdString) { + EXPECT_FALSE(IsNotSubstring("", "", std::string("hello"), "ahellob")); + EXPECT_TRUE(IsNotSubstring("", "", "hello", std::string("world"))); +} + +// Tests that IsNotSubstring() generates the correct message when the input +// argument type is ::std::string. +TEST(IsNotSubstringTest, GeneratesCorrectMessageForStdString) { + EXPECT_STREQ("Value of: needle_expr\n" + " Actual: \"needle\"\n" + "Expected: not a substring of haystack_expr\n" + "Which is: \"two needles\"", + IsNotSubstring( + "needle_expr", "haystack_expr", + ::std::string("needle"), "two needles").failure_message()); +} + +#if GTEST_HAS_STD_WSTRING + +// Tests that IsNotSubstring returns the correct result when the input +// argument type is ::std::wstring. +TEST(IsNotSubstringTest, ReturnsCorrectResultForStdWstring) { + EXPECT_FALSE( + IsNotSubstring("", "", ::std::wstring(L"needle"), L"two needles")); + EXPECT_TRUE(IsNotSubstring("", "", L"needle", ::std::wstring(L"haystack"))); +} + +#endif // GTEST_HAS_STD_WSTRING + +// Tests floating-point assertions. + +template +class FloatingPointTest : public Test { + protected: + // Pre-calculated numbers to be used by the tests. + struct TestValues { + RawType close_to_positive_zero; + RawType close_to_negative_zero; + RawType further_from_negative_zero; + + RawType close_to_one; + RawType further_from_one; + + RawType infinity; + RawType close_to_infinity; + RawType further_from_infinity; + + RawType nan1; + RawType nan2; + }; + + typedef typename testing::internal::FloatingPoint Floating; + typedef typename Floating::Bits Bits; + + virtual void SetUp() { + const size_t max_ulps = Floating::kMaxUlps; + + // The bits that represent 0.0. + const Bits zero_bits = Floating(0).bits(); + + // Makes some numbers close to 0.0. + values_.close_to_positive_zero = Floating::ReinterpretBits( + zero_bits + max_ulps/2); + values_.close_to_negative_zero = -Floating::ReinterpretBits( + zero_bits + max_ulps - max_ulps/2); + values_.further_from_negative_zero = -Floating::ReinterpretBits( + zero_bits + max_ulps + 1 - max_ulps/2); + + // The bits that represent 1.0. + const Bits one_bits = Floating(1).bits(); + + // Makes some numbers close to 1.0. + values_.close_to_one = Floating::ReinterpretBits(one_bits + max_ulps); + values_.further_from_one = Floating::ReinterpretBits( + one_bits + max_ulps + 1); + + // +infinity. + values_.infinity = Floating::Infinity(); + + // The bits that represent +infinity. + const Bits infinity_bits = Floating(values_.infinity).bits(); + + // Makes some numbers close to infinity. + values_.close_to_infinity = Floating::ReinterpretBits( + infinity_bits - max_ulps); + values_.further_from_infinity = Floating::ReinterpretBits( + infinity_bits - max_ulps - 1); + + // Makes some NAN's. Sets the most significant bit of the fraction so that + // our NaN's are quiet; trying to process a signaling NaN would raise an + // exception if our environment enables floating point exceptions. + values_.nan1 = Floating::ReinterpretBits(Floating::kExponentBitMask + | (static_cast(1) << (Floating::kFractionBitCount - 1)) | 1); + values_.nan2 = Floating::ReinterpretBits(Floating::kExponentBitMask + | (static_cast(1) << (Floating::kFractionBitCount - 1)) | 200); + } + + void TestSize() { + EXPECT_EQ(sizeof(RawType), sizeof(Bits)); + } + + static TestValues values_; +}; + +template +typename FloatingPointTest::TestValues + FloatingPointTest::values_; + +// Instantiates FloatingPointTest for testing *_FLOAT_EQ. +typedef FloatingPointTest FloatTest; + +// Tests that the size of Float::Bits matches the size of float. +TEST_F(FloatTest, Size) { + TestSize(); +} + +// Tests comparing with +0 and -0. +TEST_F(FloatTest, Zeros) { + EXPECT_FLOAT_EQ(0.0, -0.0); + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(-0.0, 1.0), + "1.0"); + EXPECT_FATAL_FAILURE(ASSERT_FLOAT_EQ(0.0, 1.5), + "1.5"); +} + +// Tests comparing numbers close to 0. +// +// This ensures that *_FLOAT_EQ handles the sign correctly and no +// overflow occurs when comparing numbers whose absolute value is very +// small. +TEST_F(FloatTest, AlmostZeros) { + // In C++Builder, names within local classes (such as used by + // EXPECT_FATAL_FAILURE) cannot be resolved against static members of the + // scoping class. Use a static local alias as a workaround. + // We use the assignment syntax since some compilers, like Sun Studio, + // don't allow initializing references using construction syntax + // (parentheses). + static const FloatTest::TestValues& v = this->values_; + + EXPECT_FLOAT_EQ(0.0, v.close_to_positive_zero); + EXPECT_FLOAT_EQ(-0.0, v.close_to_negative_zero); + EXPECT_FLOAT_EQ(v.close_to_positive_zero, v.close_to_negative_zero); + + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_FLOAT_EQ(v.close_to_positive_zero, + v.further_from_negative_zero); + }, "v.further_from_negative_zero"); +} + +// Tests comparing numbers close to each other. +TEST_F(FloatTest, SmallDiff) { + EXPECT_FLOAT_EQ(1.0, values_.close_to_one); + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(1.0, values_.further_from_one), + "values_.further_from_one"); +} + +// Tests comparing numbers far apart. +TEST_F(FloatTest, LargeDiff) { + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(2.5, 3.0), + "3.0"); +} + +// Tests comparing with infinity. +// +// This ensures that no overflow occurs when comparing numbers whose +// absolute value is very large. +TEST_F(FloatTest, Infinity) { + EXPECT_FLOAT_EQ(values_.infinity, values_.close_to_infinity); + EXPECT_FLOAT_EQ(-values_.infinity, -values_.close_to_infinity); +#if !GTEST_OS_SYMBIAN + // Nokia's STLport crashes if we try to output infinity or NaN. + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(values_.infinity, -values_.infinity), + "-values_.infinity"); + + // This is interesting as the representations of infinity and nan1 + // are only 1 DLP apart. + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(values_.infinity, values_.nan1), + "values_.nan1"); +#endif // !GTEST_OS_SYMBIAN +} + +// Tests that comparing with NAN always returns false. +TEST_F(FloatTest, NaN) { +#if !GTEST_OS_SYMBIAN +// Nokia's STLport crashes if we try to output infinity or NaN. + + // In C++Builder, names within local classes (such as used by + // EXPECT_FATAL_FAILURE) cannot be resolved against static members of the + // scoping class. Use a static local alias as a workaround. + // We use the assignment syntax since some compilers, like Sun Studio, + // don't allow initializing references using construction syntax + // (parentheses). + static const FloatTest::TestValues& v = this->values_; + + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(v.nan1, v.nan1), + "v.nan1"); + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(v.nan1, v.nan2), + "v.nan2"); + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(1.0, v.nan1), + "v.nan1"); + + EXPECT_FATAL_FAILURE(ASSERT_FLOAT_EQ(v.nan1, v.infinity), + "v.infinity"); +#endif // !GTEST_OS_SYMBIAN +} + +// Tests that *_FLOAT_EQ are reflexive. +TEST_F(FloatTest, Reflexive) { + EXPECT_FLOAT_EQ(0.0, 0.0); + EXPECT_FLOAT_EQ(1.0, 1.0); + ASSERT_FLOAT_EQ(values_.infinity, values_.infinity); +} + +// Tests that *_FLOAT_EQ are commutative. +TEST_F(FloatTest, Commutative) { + // We already tested EXPECT_FLOAT_EQ(1.0, values_.close_to_one). + EXPECT_FLOAT_EQ(values_.close_to_one, 1.0); + + // We already tested EXPECT_FLOAT_EQ(1.0, values_.further_from_one). + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(values_.further_from_one, 1.0), + "1.0"); +} + +// Tests EXPECT_NEAR. +TEST_F(FloatTest, EXPECT_NEAR) { + EXPECT_NEAR(-1.0f, -1.1f, 0.2f); + EXPECT_NEAR(2.0f, 3.0f, 1.0f); + EXPECT_NONFATAL_FAILURE(EXPECT_NEAR(1.0f,1.5f, 0.25f), // NOLINT + "The difference between 1.0f and 1.5f is 0.5, " + "which exceeds 0.25f"); + // To work around a bug in gcc 2.95.0, there is intentionally no + // space after the first comma in the previous line. +} + +// Tests ASSERT_NEAR. +TEST_F(FloatTest, ASSERT_NEAR) { + ASSERT_NEAR(-1.0f, -1.1f, 0.2f); + ASSERT_NEAR(2.0f, 3.0f, 1.0f); + EXPECT_FATAL_FAILURE(ASSERT_NEAR(1.0f,1.5f, 0.25f), // NOLINT + "The difference between 1.0f and 1.5f is 0.5, " + "which exceeds 0.25f"); + // To work around a bug in gcc 2.95.0, there is intentionally no + // space after the first comma in the previous line. +} + +// Tests the cases where FloatLE() should succeed. +TEST_F(FloatTest, FloatLESucceeds) { + EXPECT_PRED_FORMAT2(FloatLE, 1.0f, 2.0f); // When val1 < val2, + ASSERT_PRED_FORMAT2(FloatLE, 1.0f, 1.0f); // val1 == val2, + + // or when val1 is greater than, but almost equals to, val2. + EXPECT_PRED_FORMAT2(FloatLE, values_.close_to_positive_zero, 0.0f); +} + +// Tests the cases where FloatLE() should fail. +TEST_F(FloatTest, FloatLEFails) { + // When val1 is greater than val2 by a large margin, + EXPECT_NONFATAL_FAILURE(EXPECT_PRED_FORMAT2(FloatLE, 2.0f, 1.0f), + "(2.0f) <= (1.0f)"); + + // or by a small yet non-negligible margin, + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT2(FloatLE, values_.further_from_one, 1.0f); + }, "(values_.further_from_one) <= (1.0f)"); + +#if !GTEST_OS_SYMBIAN && !defined(__BORLANDC__) + // Nokia's STLport crashes if we try to output infinity or NaN. + // C++Builder gives bad results for ordered comparisons involving NaNs + // due to compiler bugs. + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT2(FloatLE, values_.nan1, values_.infinity); + }, "(values_.nan1) <= (values_.infinity)"); + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT2(FloatLE, -values_.infinity, values_.nan1); + }, "(-values_.infinity) <= (values_.nan1)"); + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT2(FloatLE, values_.nan1, values_.nan1); + }, "(values_.nan1) <= (values_.nan1)"); +#endif // !GTEST_OS_SYMBIAN && !defined(__BORLANDC__) +} + +// Instantiates FloatingPointTest for testing *_DOUBLE_EQ. +typedef FloatingPointTest DoubleTest; + +// Tests that the size of Double::Bits matches the size of double. +TEST_F(DoubleTest, Size) { + TestSize(); +} + +// Tests comparing with +0 and -0. +TEST_F(DoubleTest, Zeros) { + EXPECT_DOUBLE_EQ(0.0, -0.0); + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(-0.0, 1.0), + "1.0"); + EXPECT_FATAL_FAILURE(ASSERT_DOUBLE_EQ(0.0, 1.0), + "1.0"); +} + +// Tests comparing numbers close to 0. +// +// This ensures that *_DOUBLE_EQ handles the sign correctly and no +// overflow occurs when comparing numbers whose absolute value is very +// small. +TEST_F(DoubleTest, AlmostZeros) { + // In C++Builder, names within local classes (such as used by + // EXPECT_FATAL_FAILURE) cannot be resolved against static members of the + // scoping class. Use a static local alias as a workaround. + // We use the assignment syntax since some compilers, like Sun Studio, + // don't allow initializing references using construction syntax + // (parentheses). + static const DoubleTest::TestValues& v = this->values_; + + EXPECT_DOUBLE_EQ(0.0, v.close_to_positive_zero); + EXPECT_DOUBLE_EQ(-0.0, v.close_to_negative_zero); + EXPECT_DOUBLE_EQ(v.close_to_positive_zero, v.close_to_negative_zero); + + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_DOUBLE_EQ(v.close_to_positive_zero, + v.further_from_negative_zero); + }, "v.further_from_negative_zero"); +} + +// Tests comparing numbers close to each other. +TEST_F(DoubleTest, SmallDiff) { + EXPECT_DOUBLE_EQ(1.0, values_.close_to_one); + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(1.0, values_.further_from_one), + "values_.further_from_one"); +} + +// Tests comparing numbers far apart. +TEST_F(DoubleTest, LargeDiff) { + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(2.0, 3.0), + "3.0"); +} + +// Tests comparing with infinity. +// +// This ensures that no overflow occurs when comparing numbers whose +// absolute value is very large. +TEST_F(DoubleTest, Infinity) { + EXPECT_DOUBLE_EQ(values_.infinity, values_.close_to_infinity); + EXPECT_DOUBLE_EQ(-values_.infinity, -values_.close_to_infinity); +#if !GTEST_OS_SYMBIAN + // Nokia's STLport crashes if we try to output infinity or NaN. + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(values_.infinity, -values_.infinity), + "-values_.infinity"); + + // This is interesting as the representations of infinity_ and nan1_ + // are only 1 DLP apart. + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(values_.infinity, values_.nan1), + "values_.nan1"); +#endif // !GTEST_OS_SYMBIAN +} + +// Tests that comparing with NAN always returns false. +TEST_F(DoubleTest, NaN) { +#if !GTEST_OS_SYMBIAN + // In C++Builder, names within local classes (such as used by + // EXPECT_FATAL_FAILURE) cannot be resolved against static members of the + // scoping class. Use a static local alias as a workaround. + // We use the assignment syntax since some compilers, like Sun Studio, + // don't allow initializing references using construction syntax + // (parentheses). + static const DoubleTest::TestValues& v = this->values_; + + // Nokia's STLport crashes if we try to output infinity or NaN. + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(v.nan1, v.nan1), + "v.nan1"); + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(v.nan1, v.nan2), "v.nan2"); + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(1.0, v.nan1), "v.nan1"); + EXPECT_FATAL_FAILURE(ASSERT_DOUBLE_EQ(v.nan1, v.infinity), + "v.infinity"); +#endif // !GTEST_OS_SYMBIAN +} + +// Tests that *_DOUBLE_EQ are reflexive. +TEST_F(DoubleTest, Reflexive) { + EXPECT_DOUBLE_EQ(0.0, 0.0); + EXPECT_DOUBLE_EQ(1.0, 1.0); +#if !GTEST_OS_SYMBIAN + // Nokia's STLport crashes if we try to output infinity or NaN. + ASSERT_DOUBLE_EQ(values_.infinity, values_.infinity); +#endif // !GTEST_OS_SYMBIAN +} + +// Tests that *_DOUBLE_EQ are commutative. +TEST_F(DoubleTest, Commutative) { + // We already tested EXPECT_DOUBLE_EQ(1.0, values_.close_to_one). + EXPECT_DOUBLE_EQ(values_.close_to_one, 1.0); + + // We already tested EXPECT_DOUBLE_EQ(1.0, values_.further_from_one). + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(values_.further_from_one, 1.0), + "1.0"); +} + +// Tests EXPECT_NEAR. +TEST_F(DoubleTest, EXPECT_NEAR) { + EXPECT_NEAR(-1.0, -1.1, 0.2); + EXPECT_NEAR(2.0, 3.0, 1.0); + EXPECT_NONFATAL_FAILURE(EXPECT_NEAR(1.0, 1.5, 0.25), // NOLINT + "The difference between 1.0 and 1.5 is 0.5, " + "which exceeds 0.25"); + // To work around a bug in gcc 2.95.0, there is intentionally no + // space after the first comma in the previous statement. +} + +// Tests ASSERT_NEAR. +TEST_F(DoubleTest, ASSERT_NEAR) { + ASSERT_NEAR(-1.0, -1.1, 0.2); + ASSERT_NEAR(2.0, 3.0, 1.0); + EXPECT_FATAL_FAILURE(ASSERT_NEAR(1.0, 1.5, 0.25), // NOLINT + "The difference between 1.0 and 1.5 is 0.5, " + "which exceeds 0.25"); + // To work around a bug in gcc 2.95.0, there is intentionally no + // space after the first comma in the previous statement. +} + +// Tests the cases where DoubleLE() should succeed. +TEST_F(DoubleTest, DoubleLESucceeds) { + EXPECT_PRED_FORMAT2(DoubleLE, 1.0, 2.0); // When val1 < val2, + ASSERT_PRED_FORMAT2(DoubleLE, 1.0, 1.0); // val1 == val2, + + // or when val1 is greater than, but almost equals to, val2. + EXPECT_PRED_FORMAT2(DoubleLE, values_.close_to_positive_zero, 0.0); +} + +// Tests the cases where DoubleLE() should fail. +TEST_F(DoubleTest, DoubleLEFails) { + // When val1 is greater than val2 by a large margin, + EXPECT_NONFATAL_FAILURE(EXPECT_PRED_FORMAT2(DoubleLE, 2.0, 1.0), + "(2.0) <= (1.0)"); + + // or by a small yet non-negligible margin, + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT2(DoubleLE, values_.further_from_one, 1.0); + }, "(values_.further_from_one) <= (1.0)"); + +#if !GTEST_OS_SYMBIAN && !defined(__BORLANDC__) + // Nokia's STLport crashes if we try to output infinity or NaN. + // C++Builder gives bad results for ordered comparisons involving NaNs + // due to compiler bugs. + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT2(DoubleLE, values_.nan1, values_.infinity); + }, "(values_.nan1) <= (values_.infinity)"); + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_PRED_FORMAT2(DoubleLE, -values_.infinity, values_.nan1); + }, " (-values_.infinity) <= (values_.nan1)"); + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_PRED_FORMAT2(DoubleLE, values_.nan1, values_.nan1); + }, "(values_.nan1) <= (values_.nan1)"); +#endif // !GTEST_OS_SYMBIAN && !defined(__BORLANDC__) +} + + +// Verifies that a test or test case whose name starts with DISABLED_ is +// not run. + +// A test whose name starts with DISABLED_. +// Should not run. +TEST(DisabledTest, DISABLED_TestShouldNotRun) { + FAIL() << "Unexpected failure: Disabled test should not be run."; +} + +// A test whose name does not start with DISABLED_. +// Should run. +TEST(DisabledTest, NotDISABLED_TestShouldRun) { + EXPECT_EQ(1, 1); +} + +// A test case whose name starts with DISABLED_. +// Should not run. +TEST(DISABLED_TestCase, TestShouldNotRun) { + FAIL() << "Unexpected failure: Test in disabled test case should not be run."; +} + +// A test case and test whose names start with DISABLED_. +// Should not run. +TEST(DISABLED_TestCase, DISABLED_TestShouldNotRun) { + FAIL() << "Unexpected failure: Test in disabled test case should not be run."; +} + +// Check that when all tests in a test case are disabled, SetupTestCase() and +// TearDownTestCase() are not called. +class DisabledTestsTest : public Test { + protected: + static void SetUpTestCase() { + FAIL() << "Unexpected failure: All tests disabled in test case. " + "SetupTestCase() should not be called."; + } + + static void TearDownTestCase() { + FAIL() << "Unexpected failure: All tests disabled in test case. " + "TearDownTestCase() should not be called."; + } +}; + +TEST_F(DisabledTestsTest, DISABLED_TestShouldNotRun_1) { + FAIL() << "Unexpected failure: Disabled test should not be run."; +} + +TEST_F(DisabledTestsTest, DISABLED_TestShouldNotRun_2) { + FAIL() << "Unexpected failure: Disabled test should not be run."; +} + +// Tests that disabled typed tests aren't run. + +#if GTEST_HAS_TYPED_TEST + +template +class TypedTest : public Test { +}; + +typedef testing::Types NumericTypes; +TYPED_TEST_CASE(TypedTest, NumericTypes); + +TYPED_TEST(TypedTest, DISABLED_ShouldNotRun) { + FAIL() << "Unexpected failure: Disabled typed test should not run."; +} + +template +class DISABLED_TypedTest : public Test { +}; + +TYPED_TEST_CASE(DISABLED_TypedTest, NumericTypes); + +TYPED_TEST(DISABLED_TypedTest, ShouldNotRun) { + FAIL() << "Unexpected failure: Disabled typed test should not run."; +} + +#endif // GTEST_HAS_TYPED_TEST + +// Tests that disabled type-parameterized tests aren't run. + +#if GTEST_HAS_TYPED_TEST_P + +template +class TypedTestP : public Test { +}; + +TYPED_TEST_CASE_P(TypedTestP); + +TYPED_TEST_P(TypedTestP, DISABLED_ShouldNotRun) { + FAIL() << "Unexpected failure: " + << "Disabled type-parameterized test should not run."; +} + +REGISTER_TYPED_TEST_CASE_P(TypedTestP, DISABLED_ShouldNotRun); + +INSTANTIATE_TYPED_TEST_CASE_P(My, TypedTestP, NumericTypes); + +template +class DISABLED_TypedTestP : public Test { +}; + +TYPED_TEST_CASE_P(DISABLED_TypedTestP); + +TYPED_TEST_P(DISABLED_TypedTestP, ShouldNotRun) { + FAIL() << "Unexpected failure: " + << "Disabled type-parameterized test should not run."; +} + +REGISTER_TYPED_TEST_CASE_P(DISABLED_TypedTestP, ShouldNotRun); + +INSTANTIATE_TYPED_TEST_CASE_P(My, DISABLED_TypedTestP, NumericTypes); + +#endif // GTEST_HAS_TYPED_TEST_P + +// Tests that assertion macros evaluate their arguments exactly once. + +class SingleEvaluationTest : public Test { + public: // Must be public and not protected due to a bug in g++ 3.4.2. + // This helper function is needed by the FailedASSERT_STREQ test + // below. It's public to work around C++Builder's bug with scoping local + // classes. + static void CompareAndIncrementCharPtrs() { + ASSERT_STREQ(p1_++, p2_++); + } + + // This helper function is needed by the FailedASSERT_NE test below. It's + // public to work around C++Builder's bug with scoping local classes. + static void CompareAndIncrementInts() { + ASSERT_NE(a_++, b_++); + } + + protected: + SingleEvaluationTest() { + p1_ = s1_; + p2_ = s2_; + a_ = 0; + b_ = 0; + } + + static const char* const s1_; + static const char* const s2_; + static const char* p1_; + static const char* p2_; + + static int a_; + static int b_; +}; + +const char* const SingleEvaluationTest::s1_ = "01234"; +const char* const SingleEvaluationTest::s2_ = "abcde"; +const char* SingleEvaluationTest::p1_; +const char* SingleEvaluationTest::p2_; +int SingleEvaluationTest::a_; +int SingleEvaluationTest::b_; + +// Tests that when ASSERT_STREQ fails, it evaluates its arguments +// exactly once. +TEST_F(SingleEvaluationTest, FailedASSERT_STREQ) { + EXPECT_FATAL_FAILURE(SingleEvaluationTest::CompareAndIncrementCharPtrs(), + "p2_++"); + EXPECT_EQ(s1_ + 1, p1_); + EXPECT_EQ(s2_ + 1, p2_); +} + +// Tests that string assertion arguments are evaluated exactly once. +TEST_F(SingleEvaluationTest, ASSERT_STR) { + // successful EXPECT_STRNE + EXPECT_STRNE(p1_++, p2_++); + EXPECT_EQ(s1_ + 1, p1_); + EXPECT_EQ(s2_ + 1, p2_); + + // failed EXPECT_STRCASEEQ + EXPECT_NONFATAL_FAILURE(EXPECT_STRCASEEQ(p1_++, p2_++), + "ignoring case"); + EXPECT_EQ(s1_ + 2, p1_); + EXPECT_EQ(s2_ + 2, p2_); +} + +// Tests that when ASSERT_NE fails, it evaluates its arguments exactly +// once. +TEST_F(SingleEvaluationTest, FailedASSERT_NE) { + EXPECT_FATAL_FAILURE(SingleEvaluationTest::CompareAndIncrementInts(), + "(a_++) != (b_++)"); + EXPECT_EQ(1, a_); + EXPECT_EQ(1, b_); +} + +// Tests that assertion arguments are evaluated exactly once. +TEST_F(SingleEvaluationTest, OtherCases) { + // successful EXPECT_TRUE + EXPECT_TRUE(0 == a_++); // NOLINT + EXPECT_EQ(1, a_); + + // failed EXPECT_TRUE + EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(-1 == a_++), "-1 == a_++"); + EXPECT_EQ(2, a_); + + // successful EXPECT_GT + EXPECT_GT(a_++, b_++); + EXPECT_EQ(3, a_); + EXPECT_EQ(1, b_); + + // failed EXPECT_LT + EXPECT_NONFATAL_FAILURE(EXPECT_LT(a_++, b_++), "(a_++) < (b_++)"); + EXPECT_EQ(4, a_); + EXPECT_EQ(2, b_); + + // successful ASSERT_TRUE + ASSERT_TRUE(0 < a_++); // NOLINT + EXPECT_EQ(5, a_); + + // successful ASSERT_GT + ASSERT_GT(a_++, b_++); + EXPECT_EQ(6, a_); + EXPECT_EQ(3, b_); +} + +#if GTEST_HAS_EXCEPTIONS + +void ThrowAnInteger() { + throw 1; +} + +// Tests that assertion arguments are evaluated exactly once. +TEST_F(SingleEvaluationTest, ExceptionTests) { + // successful EXPECT_THROW + EXPECT_THROW({ // NOLINT + a_++; + ThrowAnInteger(); + }, int); + EXPECT_EQ(1, a_); + + // failed EXPECT_THROW, throws different + EXPECT_NONFATAL_FAILURE(EXPECT_THROW({ // NOLINT + a_++; + ThrowAnInteger(); + }, bool), "throws a different type"); + EXPECT_EQ(2, a_); + + // failed EXPECT_THROW, throws nothing + EXPECT_NONFATAL_FAILURE(EXPECT_THROW(a_++, bool), "throws nothing"); + EXPECT_EQ(3, a_); + + // successful EXPECT_NO_THROW + EXPECT_NO_THROW(a_++); + EXPECT_EQ(4, a_); + + // failed EXPECT_NO_THROW + EXPECT_NONFATAL_FAILURE(EXPECT_NO_THROW({ // NOLINT + a_++; + ThrowAnInteger(); + }), "it throws"); + EXPECT_EQ(5, a_); + + // successful EXPECT_ANY_THROW + EXPECT_ANY_THROW({ // NOLINT + a_++; + ThrowAnInteger(); + }); + EXPECT_EQ(6, a_); + + // failed EXPECT_ANY_THROW + EXPECT_NONFATAL_FAILURE(EXPECT_ANY_THROW(a_++), "it doesn't"); + EXPECT_EQ(7, a_); +} + +#endif // GTEST_HAS_EXCEPTIONS + +// Tests {ASSERT|EXPECT}_NO_FATAL_FAILURE. +class NoFatalFailureTest : public Test { + protected: + void Succeeds() {} + void FailsNonFatal() { + ADD_FAILURE() << "some non-fatal failure"; + } + void Fails() { + FAIL() << "some fatal failure"; + } + + void DoAssertNoFatalFailureOnFails() { + ASSERT_NO_FATAL_FAILURE(Fails()); + ADD_FAILURE() << "shold not reach here."; + } + + void DoExpectNoFatalFailureOnFails() { + EXPECT_NO_FATAL_FAILURE(Fails()); + ADD_FAILURE() << "other failure"; + } +}; + +TEST_F(NoFatalFailureTest, NoFailure) { + EXPECT_NO_FATAL_FAILURE(Succeeds()); + ASSERT_NO_FATAL_FAILURE(Succeeds()); +} + +TEST_F(NoFatalFailureTest, NonFatalIsNoFailure) { + EXPECT_NONFATAL_FAILURE( + EXPECT_NO_FATAL_FAILURE(FailsNonFatal()), + "some non-fatal failure"); + EXPECT_NONFATAL_FAILURE( + ASSERT_NO_FATAL_FAILURE(FailsNonFatal()), + "some non-fatal failure"); +} + +TEST_F(NoFatalFailureTest, AssertNoFatalFailureOnFatalFailure) { + TestPartResultArray gtest_failures; + { + ScopedFakeTestPartResultReporter gtest_reporter(>est_failures); + DoAssertNoFatalFailureOnFails(); + } + ASSERT_EQ(2, gtest_failures.size()); + EXPECT_EQ(TestPartResult::kFatalFailure, + gtest_failures.GetTestPartResult(0).type()); + EXPECT_EQ(TestPartResult::kFatalFailure, + gtest_failures.GetTestPartResult(1).type()); + EXPECT_PRED_FORMAT2(testing::IsSubstring, "some fatal failure", + gtest_failures.GetTestPartResult(0).message()); + EXPECT_PRED_FORMAT2(testing::IsSubstring, "it does", + gtest_failures.GetTestPartResult(1).message()); +} + +TEST_F(NoFatalFailureTest, ExpectNoFatalFailureOnFatalFailure) { + TestPartResultArray gtest_failures; + { + ScopedFakeTestPartResultReporter gtest_reporter(>est_failures); + DoExpectNoFatalFailureOnFails(); + } + ASSERT_EQ(3, gtest_failures.size()); + EXPECT_EQ(TestPartResult::kFatalFailure, + gtest_failures.GetTestPartResult(0).type()); + EXPECT_EQ(TestPartResult::kNonFatalFailure, + gtest_failures.GetTestPartResult(1).type()); + EXPECT_EQ(TestPartResult::kNonFatalFailure, + gtest_failures.GetTestPartResult(2).type()); + EXPECT_PRED_FORMAT2(testing::IsSubstring, "some fatal failure", + gtest_failures.GetTestPartResult(0).message()); + EXPECT_PRED_FORMAT2(testing::IsSubstring, "it does", + gtest_failures.GetTestPartResult(1).message()); + EXPECT_PRED_FORMAT2(testing::IsSubstring, "other failure", + gtest_failures.GetTestPartResult(2).message()); +} + +TEST_F(NoFatalFailureTest, MessageIsStreamable) { + TestPartResultArray gtest_failures; + { + ScopedFakeTestPartResultReporter gtest_reporter(>est_failures); + EXPECT_NO_FATAL_FAILURE(FAIL() << "foo") << "my message"; + } + ASSERT_EQ(2, gtest_failures.size()); + EXPECT_EQ(TestPartResult::kNonFatalFailure, + gtest_failures.GetTestPartResult(0).type()); + EXPECT_EQ(TestPartResult::kNonFatalFailure, + gtest_failures.GetTestPartResult(1).type()); + EXPECT_PRED_FORMAT2(testing::IsSubstring, "foo", + gtest_failures.GetTestPartResult(0).message()); + EXPECT_PRED_FORMAT2(testing::IsSubstring, "my message", + gtest_failures.GetTestPartResult(1).message()); +} + +// Tests non-string assertions. + +// Tests EqFailure(), used for implementing *EQ* assertions. +TEST(AssertionTest, EqFailure) { + const std::string foo_val("5"), bar_val("6"); + const std::string msg1( + EqFailure("foo", "bar", foo_val, bar_val, false) + .failure_message()); + EXPECT_STREQ( + "Value of: bar\n" + " Actual: 6\n" + "Expected: foo\n" + "Which is: 5", + msg1.c_str()); + + const std::string msg2( + EqFailure("foo", "6", foo_val, bar_val, false) + .failure_message()); + EXPECT_STREQ( + "Value of: 6\n" + "Expected: foo\n" + "Which is: 5", + msg2.c_str()); + + const std::string msg3( + EqFailure("5", "bar", foo_val, bar_val, false) + .failure_message()); + EXPECT_STREQ( + "Value of: bar\n" + " Actual: 6\n" + "Expected: 5", + msg3.c_str()); + + const std::string msg4( + EqFailure("5", "6", foo_val, bar_val, false).failure_message()); + EXPECT_STREQ( + "Value of: 6\n" + "Expected: 5", + msg4.c_str()); + + const std::string msg5( + EqFailure("foo", "bar", + std::string("\"x\""), std::string("\"y\""), + true).failure_message()); + EXPECT_STREQ( + "Value of: bar\n" + " Actual: \"y\"\n" + "Expected: foo (ignoring case)\n" + "Which is: \"x\"", + msg5.c_str()); +} + +// Tests AppendUserMessage(), used for implementing the *EQ* macros. +TEST(AssertionTest, AppendUserMessage) { + const std::string foo("foo"); + + Message msg; + EXPECT_STREQ("foo", + AppendUserMessage(foo, msg).c_str()); + + msg << "bar"; + EXPECT_STREQ("foo\nbar", + AppendUserMessage(foo, msg).c_str()); +} + +#ifdef __BORLANDC__ +// Silences warnings: "Condition is always true", "Unreachable code" +# pragma option push -w-ccc -w-rch +#endif + +// Tests ASSERT_TRUE. +TEST(AssertionTest, ASSERT_TRUE) { + ASSERT_TRUE(2 > 1); // NOLINT + EXPECT_FATAL_FAILURE(ASSERT_TRUE(2 < 1), + "2 < 1"); +} + +// Tests ASSERT_TRUE(predicate) for predicates returning AssertionResult. +TEST(AssertionTest, AssertTrueWithAssertionResult) { + ASSERT_TRUE(ResultIsEven(2)); +#ifndef __BORLANDC__ + // ICE's in C++Builder. + EXPECT_FATAL_FAILURE(ASSERT_TRUE(ResultIsEven(3)), + "Value of: ResultIsEven(3)\n" + " Actual: false (3 is odd)\n" + "Expected: true"); +#endif + ASSERT_TRUE(ResultIsEvenNoExplanation(2)); + EXPECT_FATAL_FAILURE(ASSERT_TRUE(ResultIsEvenNoExplanation(3)), + "Value of: ResultIsEvenNoExplanation(3)\n" + " Actual: false (3 is odd)\n" + "Expected: true"); +} + +// Tests ASSERT_FALSE. +TEST(AssertionTest, ASSERT_FALSE) { + ASSERT_FALSE(2 < 1); // NOLINT + EXPECT_FATAL_FAILURE(ASSERT_FALSE(2 > 1), + "Value of: 2 > 1\n" + " Actual: true\n" + "Expected: false"); +} + +// Tests ASSERT_FALSE(predicate) for predicates returning AssertionResult. +TEST(AssertionTest, AssertFalseWithAssertionResult) { + ASSERT_FALSE(ResultIsEven(3)); +#ifndef __BORLANDC__ + // ICE's in C++Builder. + EXPECT_FATAL_FAILURE(ASSERT_FALSE(ResultIsEven(2)), + "Value of: ResultIsEven(2)\n" + " Actual: true (2 is even)\n" + "Expected: false"); +#endif + ASSERT_FALSE(ResultIsEvenNoExplanation(3)); + EXPECT_FATAL_FAILURE(ASSERT_FALSE(ResultIsEvenNoExplanation(2)), + "Value of: ResultIsEvenNoExplanation(2)\n" + " Actual: true\n" + "Expected: false"); +} + +#ifdef __BORLANDC__ +// Restores warnings after previous "#pragma option push" supressed them +# pragma option pop +#endif + +// Tests using ASSERT_EQ on double values. The purpose is to make +// sure that the specialization we did for integer and anonymous enums +// isn't used for double arguments. +TEST(ExpectTest, ASSERT_EQ_Double) { + // A success. + ASSERT_EQ(5.6, 5.6); + + // A failure. + EXPECT_FATAL_FAILURE(ASSERT_EQ(5.1, 5.2), + "5.1"); +} + +// Tests ASSERT_EQ. +TEST(AssertionTest, ASSERT_EQ) { + ASSERT_EQ(5, 2 + 3); + EXPECT_FATAL_FAILURE(ASSERT_EQ(5, 2*3), + "Value of: 2*3\n" + " Actual: 6\n" + "Expected: 5"); +} + +// Tests ASSERT_EQ(NULL, pointer). +#if GTEST_CAN_COMPARE_NULL +TEST(AssertionTest, ASSERT_EQ_NULL) { + // A success. + const char* p = NULL; + // Some older GCC versions may issue a spurious waring in this or the next + // assertion statement. This warning should not be suppressed with + // static_cast since the test verifies the ability to use bare NULL as the + // expected parameter to the macro. + ASSERT_EQ(NULL, p); + + // A failure. + static int n = 0; + EXPECT_FATAL_FAILURE(ASSERT_EQ(NULL, &n), + "Value of: &n\n"); +} +#endif // GTEST_CAN_COMPARE_NULL + +// Tests ASSERT_EQ(0, non_pointer). Since the literal 0 can be +// treated as a null pointer by the compiler, we need to make sure +// that ASSERT_EQ(0, non_pointer) isn't interpreted by Google Test as +// ASSERT_EQ(static_cast(NULL), non_pointer). +TEST(ExpectTest, ASSERT_EQ_0) { + int n = 0; + + // A success. + ASSERT_EQ(0, n); + + // A failure. + EXPECT_FATAL_FAILURE(ASSERT_EQ(0, 5.6), + "Expected: 0"); +} + +// Tests ASSERT_NE. +TEST(AssertionTest, ASSERT_NE) { + ASSERT_NE(6, 7); + EXPECT_FATAL_FAILURE(ASSERT_NE('a', 'a'), + "Expected: ('a') != ('a'), " + "actual: 'a' (97, 0x61) vs 'a' (97, 0x61)"); +} + +// Tests ASSERT_LE. +TEST(AssertionTest, ASSERT_LE) { + ASSERT_LE(2, 3); + ASSERT_LE(2, 2); + EXPECT_FATAL_FAILURE(ASSERT_LE(2, 0), + "Expected: (2) <= (0), actual: 2 vs 0"); +} + +// Tests ASSERT_LT. +TEST(AssertionTest, ASSERT_LT) { + ASSERT_LT(2, 3); + EXPECT_FATAL_FAILURE(ASSERT_LT(2, 2), + "Expected: (2) < (2), actual: 2 vs 2"); +} + +// Tests ASSERT_GE. +TEST(AssertionTest, ASSERT_GE) { + ASSERT_GE(2, 1); + ASSERT_GE(2, 2); + EXPECT_FATAL_FAILURE(ASSERT_GE(2, 3), + "Expected: (2) >= (3), actual: 2 vs 3"); +} + +// Tests ASSERT_GT. +TEST(AssertionTest, ASSERT_GT) { + ASSERT_GT(2, 1); + EXPECT_FATAL_FAILURE(ASSERT_GT(2, 2), + "Expected: (2) > (2), actual: 2 vs 2"); +} + +#if GTEST_HAS_EXCEPTIONS + +void ThrowNothing() {} + +// Tests ASSERT_THROW. +TEST(AssertionTest, ASSERT_THROW) { + ASSERT_THROW(ThrowAnInteger(), int); + +# ifndef __BORLANDC__ + + // ICE's in C++Builder 2007 and 2009. + EXPECT_FATAL_FAILURE( + ASSERT_THROW(ThrowAnInteger(), bool), + "Expected: ThrowAnInteger() throws an exception of type bool.\n" + " Actual: it throws a different type."); +# endif + + EXPECT_FATAL_FAILURE( + ASSERT_THROW(ThrowNothing(), bool), + "Expected: ThrowNothing() throws an exception of type bool.\n" + " Actual: it throws nothing."); +} + +// Tests ASSERT_NO_THROW. +TEST(AssertionTest, ASSERT_NO_THROW) { + ASSERT_NO_THROW(ThrowNothing()); + EXPECT_FATAL_FAILURE(ASSERT_NO_THROW(ThrowAnInteger()), + "Expected: ThrowAnInteger() doesn't throw an exception." + "\n Actual: it throws."); +} + +// Tests ASSERT_ANY_THROW. +TEST(AssertionTest, ASSERT_ANY_THROW) { + ASSERT_ANY_THROW(ThrowAnInteger()); + EXPECT_FATAL_FAILURE( + ASSERT_ANY_THROW(ThrowNothing()), + "Expected: ThrowNothing() throws an exception.\n" + " Actual: it doesn't."); +} + +#endif // GTEST_HAS_EXCEPTIONS + +// Makes sure we deal with the precedence of <<. This test should +// compile. +TEST(AssertionTest, AssertPrecedence) { + ASSERT_EQ(1 < 2, true); + bool false_value = false; + ASSERT_EQ(true && false_value, false); +} + +// A subroutine used by the following test. +void TestEq1(int x) { + ASSERT_EQ(1, x); +} + +// Tests calling a test subroutine that's not part of a fixture. +TEST(AssertionTest, NonFixtureSubroutine) { + EXPECT_FATAL_FAILURE(TestEq1(2), + "Value of: x"); +} + +// An uncopyable class. +class Uncopyable { + public: + explicit Uncopyable(int a_value) : value_(a_value) {} + + int value() const { return value_; } + bool operator==(const Uncopyable& rhs) const { + return value() == rhs.value(); + } + private: + // This constructor deliberately has no implementation, as we don't + // want this class to be copyable. + Uncopyable(const Uncopyable&); // NOLINT + + int value_; +}; + +::std::ostream& operator<<(::std::ostream& os, const Uncopyable& value) { + return os << value.value(); +} + + +bool IsPositiveUncopyable(const Uncopyable& x) { + return x.value() > 0; +} + +// A subroutine used by the following test. +void TestAssertNonPositive() { + Uncopyable y(-1); + ASSERT_PRED1(IsPositiveUncopyable, y); +} +// A subroutine used by the following test. +void TestAssertEqualsUncopyable() { + Uncopyable x(5); + Uncopyable y(-1); + ASSERT_EQ(x, y); +} + +// Tests that uncopyable objects can be used in assertions. +TEST(AssertionTest, AssertWorksWithUncopyableObject) { + Uncopyable x(5); + ASSERT_PRED1(IsPositiveUncopyable, x); + ASSERT_EQ(x, x); + EXPECT_FATAL_FAILURE(TestAssertNonPositive(), + "IsPositiveUncopyable(y) evaluates to false, where\ny evaluates to -1"); + EXPECT_FATAL_FAILURE(TestAssertEqualsUncopyable(), + "Value of: y\n Actual: -1\nExpected: x\nWhich is: 5"); +} + +// Tests that uncopyable objects can be used in expects. +TEST(AssertionTest, ExpectWorksWithUncopyableObject) { + Uncopyable x(5); + EXPECT_PRED1(IsPositiveUncopyable, x); + Uncopyable y(-1); + EXPECT_NONFATAL_FAILURE(EXPECT_PRED1(IsPositiveUncopyable, y), + "IsPositiveUncopyable(y) evaluates to false, where\ny evaluates to -1"); + EXPECT_EQ(x, x); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(x, y), + "Value of: y\n Actual: -1\nExpected: x\nWhich is: 5"); +} + +enum NamedEnum { + kE1 = 0, + kE2 = 1 +}; + +TEST(AssertionTest, NamedEnum) { + EXPECT_EQ(kE1, kE1); + EXPECT_LT(kE1, kE2); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(kE1, kE2), "Which is: 0"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(kE1, kE2), "Actual: 1"); +} + +// The version of gcc used in XCode 2.2 has a bug and doesn't allow +// anonymous enums in assertions. Therefore the following test is not +// done on Mac. +// Sun Studio and HP aCC also reject this code. +#if !GTEST_OS_MAC && !defined(__SUNPRO_CC) && !defined(__HP_aCC) + +// Tests using assertions with anonymous enums. +enum { + kCaseA = -1, + +# if GTEST_OS_LINUX + + // We want to test the case where the size of the anonymous enum is + // larger than sizeof(int), to make sure our implementation of the + // assertions doesn't truncate the enums. However, MSVC + // (incorrectly) doesn't allow an enum value to exceed the range of + // an int, so this has to be conditionally compiled. + // + // On Linux, kCaseB and kCaseA have the same value when truncated to + // int size. We want to test whether this will confuse the + // assertions. + kCaseB = testing::internal::kMaxBiggestInt, + +# else + + kCaseB = INT_MAX, + +# endif // GTEST_OS_LINUX + + kCaseC = 42 +}; + +TEST(AssertionTest, AnonymousEnum) { +# if GTEST_OS_LINUX + + EXPECT_EQ(static_cast(kCaseA), static_cast(kCaseB)); + +# endif // GTEST_OS_LINUX + + EXPECT_EQ(kCaseA, kCaseA); + EXPECT_NE(kCaseA, kCaseB); + EXPECT_LT(kCaseA, kCaseB); + EXPECT_LE(kCaseA, kCaseB); + EXPECT_GT(kCaseB, kCaseA); + EXPECT_GE(kCaseA, kCaseA); + EXPECT_NONFATAL_FAILURE(EXPECT_GE(kCaseA, kCaseB), + "(kCaseA) >= (kCaseB)"); + EXPECT_NONFATAL_FAILURE(EXPECT_GE(kCaseA, kCaseC), + "-1 vs 42"); + + ASSERT_EQ(kCaseA, kCaseA); + ASSERT_NE(kCaseA, kCaseB); + ASSERT_LT(kCaseA, kCaseB); + ASSERT_LE(kCaseA, kCaseB); + ASSERT_GT(kCaseB, kCaseA); + ASSERT_GE(kCaseA, kCaseA); + +# ifndef __BORLANDC__ + + // ICE's in C++Builder. + EXPECT_FATAL_FAILURE(ASSERT_EQ(kCaseA, kCaseB), + "Value of: kCaseB"); + EXPECT_FATAL_FAILURE(ASSERT_EQ(kCaseA, kCaseC), + "Actual: 42"); +# endif + + EXPECT_FATAL_FAILURE(ASSERT_EQ(kCaseA, kCaseC), + "Which is: -1"); +} + +#endif // !GTEST_OS_MAC && !defined(__SUNPRO_CC) + +#if GTEST_OS_WINDOWS + +static HRESULT UnexpectedHRESULTFailure() { + return E_UNEXPECTED; +} + +static HRESULT OkHRESULTSuccess() { + return S_OK; +} + +static HRESULT FalseHRESULTSuccess() { + return S_FALSE; +} + +// HRESULT assertion tests test both zero and non-zero +// success codes as well as failure message for each. +// +// Windows CE doesn't support message texts. +TEST(HRESULTAssertionTest, EXPECT_HRESULT_SUCCEEDED) { + EXPECT_HRESULT_SUCCEEDED(S_OK); + EXPECT_HRESULT_SUCCEEDED(S_FALSE); + + EXPECT_NONFATAL_FAILURE(EXPECT_HRESULT_SUCCEEDED(UnexpectedHRESULTFailure()), + "Expected: (UnexpectedHRESULTFailure()) succeeds.\n" + " Actual: 0x8000FFFF"); +} + +TEST(HRESULTAssertionTest, ASSERT_HRESULT_SUCCEEDED) { + ASSERT_HRESULT_SUCCEEDED(S_OK); + ASSERT_HRESULT_SUCCEEDED(S_FALSE); + + EXPECT_FATAL_FAILURE(ASSERT_HRESULT_SUCCEEDED(UnexpectedHRESULTFailure()), + "Expected: (UnexpectedHRESULTFailure()) succeeds.\n" + " Actual: 0x8000FFFF"); +} + +TEST(HRESULTAssertionTest, EXPECT_HRESULT_FAILED) { + EXPECT_HRESULT_FAILED(E_UNEXPECTED); + + EXPECT_NONFATAL_FAILURE(EXPECT_HRESULT_FAILED(OkHRESULTSuccess()), + "Expected: (OkHRESULTSuccess()) fails.\n" + " Actual: 0x0"); + EXPECT_NONFATAL_FAILURE(EXPECT_HRESULT_FAILED(FalseHRESULTSuccess()), + "Expected: (FalseHRESULTSuccess()) fails.\n" + " Actual: 0x1"); +} + +TEST(HRESULTAssertionTest, ASSERT_HRESULT_FAILED) { + ASSERT_HRESULT_FAILED(E_UNEXPECTED); + +# ifndef __BORLANDC__ + + // ICE's in C++Builder 2007 and 2009. + EXPECT_FATAL_FAILURE(ASSERT_HRESULT_FAILED(OkHRESULTSuccess()), + "Expected: (OkHRESULTSuccess()) fails.\n" + " Actual: 0x0"); +# endif + + EXPECT_FATAL_FAILURE(ASSERT_HRESULT_FAILED(FalseHRESULTSuccess()), + "Expected: (FalseHRESULTSuccess()) fails.\n" + " Actual: 0x1"); +} + +// Tests that streaming to the HRESULT macros works. +TEST(HRESULTAssertionTest, Streaming) { + EXPECT_HRESULT_SUCCEEDED(S_OK) << "unexpected failure"; + ASSERT_HRESULT_SUCCEEDED(S_OK) << "unexpected failure"; + EXPECT_HRESULT_FAILED(E_UNEXPECTED) << "unexpected failure"; + ASSERT_HRESULT_FAILED(E_UNEXPECTED) << "unexpected failure"; + + EXPECT_NONFATAL_FAILURE( + EXPECT_HRESULT_SUCCEEDED(E_UNEXPECTED) << "expected failure", + "expected failure"); + +# ifndef __BORLANDC__ + + // ICE's in C++Builder 2007 and 2009. + EXPECT_FATAL_FAILURE( + ASSERT_HRESULT_SUCCEEDED(E_UNEXPECTED) << "expected failure", + "expected failure"); +# endif + + EXPECT_NONFATAL_FAILURE( + EXPECT_HRESULT_FAILED(S_OK) << "expected failure", + "expected failure"); + + EXPECT_FATAL_FAILURE( + ASSERT_HRESULT_FAILED(S_OK) << "expected failure", + "expected failure"); +} + +#endif // GTEST_OS_WINDOWS + +#ifdef __BORLANDC__ +// Silences warnings: "Condition is always true", "Unreachable code" +# pragma option push -w-ccc -w-rch +#endif + +// Tests that the assertion macros behave like single statements. +TEST(AssertionSyntaxTest, BasicAssertionsBehavesLikeSingleStatement) { + if (AlwaysFalse()) + ASSERT_TRUE(false) << "This should never be executed; " + "It's a compilation test only."; + + if (AlwaysTrue()) + EXPECT_FALSE(false); + else + ; // NOLINT + + if (AlwaysFalse()) + ASSERT_LT(1, 3); + + if (AlwaysFalse()) + ; // NOLINT + else + EXPECT_GT(3, 2) << ""; +} + +#if GTEST_HAS_EXCEPTIONS +// Tests that the compiler will not complain about unreachable code in the +// EXPECT_THROW/EXPECT_ANY_THROW/EXPECT_NO_THROW macros. +TEST(ExpectThrowTest, DoesNotGenerateUnreachableCodeWarning) { + int n = 0; + + EXPECT_THROW(throw 1, int); + EXPECT_NONFATAL_FAILURE(EXPECT_THROW(n++, int), ""); + EXPECT_NONFATAL_FAILURE(EXPECT_THROW(throw 1, const char*), ""); + EXPECT_NO_THROW(n++); + EXPECT_NONFATAL_FAILURE(EXPECT_NO_THROW(throw 1), ""); + EXPECT_ANY_THROW(throw 1); + EXPECT_NONFATAL_FAILURE(EXPECT_ANY_THROW(n++), ""); +} + +TEST(AssertionSyntaxTest, ExceptionAssertionsBehavesLikeSingleStatement) { + if (AlwaysFalse()) + EXPECT_THROW(ThrowNothing(), bool); + + if (AlwaysTrue()) + EXPECT_THROW(ThrowAnInteger(), int); + else + ; // NOLINT + + if (AlwaysFalse()) + EXPECT_NO_THROW(ThrowAnInteger()); + + if (AlwaysTrue()) + EXPECT_NO_THROW(ThrowNothing()); + else + ; // NOLINT + + if (AlwaysFalse()) + EXPECT_ANY_THROW(ThrowNothing()); + + if (AlwaysTrue()) + EXPECT_ANY_THROW(ThrowAnInteger()); + else + ; // NOLINT +} +#endif // GTEST_HAS_EXCEPTIONS + +TEST(AssertionSyntaxTest, NoFatalFailureAssertionsBehavesLikeSingleStatement) { + if (AlwaysFalse()) + EXPECT_NO_FATAL_FAILURE(FAIL()) << "This should never be executed. " + << "It's a compilation test only."; + else + ; // NOLINT + + if (AlwaysFalse()) + ASSERT_NO_FATAL_FAILURE(FAIL()) << ""; + else + ; // NOLINT + + if (AlwaysTrue()) + EXPECT_NO_FATAL_FAILURE(SUCCEED()); + else + ; // NOLINT + + if (AlwaysFalse()) + ; // NOLINT + else + ASSERT_NO_FATAL_FAILURE(SUCCEED()); +} + +// Tests that the assertion macros work well with switch statements. +TEST(AssertionSyntaxTest, WorksWithSwitch) { + switch (0) { + case 1: + break; + default: + ASSERT_TRUE(true); + } + + switch (0) + case 0: + EXPECT_FALSE(false) << "EXPECT_FALSE failed in switch case"; + + // Binary assertions are implemented using a different code path + // than the Boolean assertions. Hence we test them separately. + switch (0) { + case 1: + default: + ASSERT_EQ(1, 1) << "ASSERT_EQ failed in default switch handler"; + } + + switch (0) + case 0: + EXPECT_NE(1, 2); +} + +#if GTEST_HAS_EXCEPTIONS + +void ThrowAString() { + throw "std::string"; +} + +// Test that the exception assertion macros compile and work with const +// type qualifier. +TEST(AssertionSyntaxTest, WorksWithConst) { + ASSERT_THROW(ThrowAString(), const char*); + + EXPECT_THROW(ThrowAString(), const char*); +} + +#endif // GTEST_HAS_EXCEPTIONS + +} // namespace + +namespace testing { + +// Tests that Google Test tracks SUCCEED*. +TEST(SuccessfulAssertionTest, SUCCEED) { + SUCCEED(); + SUCCEED() << "OK"; + EXPECT_EQ(2, GetUnitTestImpl()->current_test_result()->total_part_count()); +} + +// Tests that Google Test doesn't track successful EXPECT_*. +TEST(SuccessfulAssertionTest, EXPECT) { + EXPECT_TRUE(true); + EXPECT_EQ(0, GetUnitTestImpl()->current_test_result()->total_part_count()); +} + +// Tests that Google Test doesn't track successful EXPECT_STR*. +TEST(SuccessfulAssertionTest, EXPECT_STR) { + EXPECT_STREQ("", ""); + EXPECT_EQ(0, GetUnitTestImpl()->current_test_result()->total_part_count()); +} + +// Tests that Google Test doesn't track successful ASSERT_*. +TEST(SuccessfulAssertionTest, ASSERT) { + ASSERT_TRUE(true); + EXPECT_EQ(0, GetUnitTestImpl()->current_test_result()->total_part_count()); +} + +// Tests that Google Test doesn't track successful ASSERT_STR*. +TEST(SuccessfulAssertionTest, ASSERT_STR) { + ASSERT_STREQ("", ""); + EXPECT_EQ(0, GetUnitTestImpl()->current_test_result()->total_part_count()); +} + +} // namespace testing + +namespace { + +// Tests the message streaming variation of assertions. + +TEST(AssertionWithMessageTest, EXPECT) { + EXPECT_EQ(1, 1) << "This should succeed."; + EXPECT_NONFATAL_FAILURE(EXPECT_NE(1, 1) << "Expected failure #1.", + "Expected failure #1"); + EXPECT_LE(1, 2) << "This should succeed."; + EXPECT_NONFATAL_FAILURE(EXPECT_LT(1, 0) << "Expected failure #2.", + "Expected failure #2."); + EXPECT_GE(1, 0) << "This should succeed."; + EXPECT_NONFATAL_FAILURE(EXPECT_GT(1, 2) << "Expected failure #3.", + "Expected failure #3."); + + EXPECT_STREQ("1", "1") << "This should succeed."; + EXPECT_NONFATAL_FAILURE(EXPECT_STRNE("1", "1") << "Expected failure #4.", + "Expected failure #4."); + EXPECT_STRCASEEQ("a", "A") << "This should succeed."; + EXPECT_NONFATAL_FAILURE(EXPECT_STRCASENE("a", "A") << "Expected failure #5.", + "Expected failure #5."); + + EXPECT_FLOAT_EQ(1, 1) << "This should succeed."; + EXPECT_NONFATAL_FAILURE(EXPECT_DOUBLE_EQ(1, 1.2) << "Expected failure #6.", + "Expected failure #6."); + EXPECT_NEAR(1, 1.1, 0.2) << "This should succeed."; +} + +TEST(AssertionWithMessageTest, ASSERT) { + ASSERT_EQ(1, 1) << "This should succeed."; + ASSERT_NE(1, 2) << "This should succeed."; + ASSERT_LE(1, 2) << "This should succeed."; + ASSERT_LT(1, 2) << "This should succeed."; + ASSERT_GE(1, 0) << "This should succeed."; + EXPECT_FATAL_FAILURE(ASSERT_GT(1, 2) << "Expected failure.", + "Expected failure."); +} + +TEST(AssertionWithMessageTest, ASSERT_STR) { + ASSERT_STREQ("1", "1") << "This should succeed."; + ASSERT_STRNE("1", "2") << "This should succeed."; + ASSERT_STRCASEEQ("a", "A") << "This should succeed."; + EXPECT_FATAL_FAILURE(ASSERT_STRCASENE("a", "A") << "Expected failure.", + "Expected failure."); +} + +TEST(AssertionWithMessageTest, ASSERT_FLOATING) { + ASSERT_FLOAT_EQ(1, 1) << "This should succeed."; + ASSERT_DOUBLE_EQ(1, 1) << "This should succeed."; + EXPECT_FATAL_FAILURE(ASSERT_NEAR(1,1.2, 0.1) << "Expect failure.", // NOLINT + "Expect failure."); + // To work around a bug in gcc 2.95.0, there is intentionally no + // space after the first comma in the previous statement. +} + +// Tests using ASSERT_FALSE with a streamed message. +TEST(AssertionWithMessageTest, ASSERT_FALSE) { + ASSERT_FALSE(false) << "This shouldn't fail."; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_FALSE(true) << "Expected failure: " << 2 << " > " << 1 + << " evaluates to " << true; + }, "Expected failure"); +} + +// Tests using FAIL with a streamed message. +TEST(AssertionWithMessageTest, FAIL) { + EXPECT_FATAL_FAILURE(FAIL() << 0, + "0"); +} + +// Tests using SUCCEED with a streamed message. +TEST(AssertionWithMessageTest, SUCCEED) { + SUCCEED() << "Success == " << 1; +} + +// Tests using ASSERT_TRUE with a streamed message. +TEST(AssertionWithMessageTest, ASSERT_TRUE) { + ASSERT_TRUE(true) << "This should succeed."; + ASSERT_TRUE(true) << true; + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_TRUE(false) << static_cast(NULL) + << static_cast(NULL); + }, "(null)(null)"); +} + +#if GTEST_OS_WINDOWS +// Tests using wide strings in assertion messages. +TEST(AssertionWithMessageTest, WideStringMessage) { + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_TRUE(false) << L"This failure is expected.\x8119"; + }, "This failure is expected."); + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_EQ(1, 2) << "This failure is " + << L"expected too.\x8120"; + }, "This failure is expected too."); +} +#endif // GTEST_OS_WINDOWS + +// Tests EXPECT_TRUE. +TEST(ExpectTest, EXPECT_TRUE) { + EXPECT_TRUE(true) << "Intentional success"; + EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(false) << "Intentional failure #1.", + "Intentional failure #1."); + EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(false) << "Intentional failure #2.", + "Intentional failure #2."); + EXPECT_TRUE(2 > 1); // NOLINT + EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(2 < 1), + "Value of: 2 < 1\n" + " Actual: false\n" + "Expected: true"); + EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(2 > 3), + "2 > 3"); +} + +// Tests EXPECT_TRUE(predicate) for predicates returning AssertionResult. +TEST(ExpectTest, ExpectTrueWithAssertionResult) { + EXPECT_TRUE(ResultIsEven(2)); + EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(ResultIsEven(3)), + "Value of: ResultIsEven(3)\n" + " Actual: false (3 is odd)\n" + "Expected: true"); + EXPECT_TRUE(ResultIsEvenNoExplanation(2)); + EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(ResultIsEvenNoExplanation(3)), + "Value of: ResultIsEvenNoExplanation(3)\n" + " Actual: false (3 is odd)\n" + "Expected: true"); +} + +// Tests EXPECT_FALSE with a streamed message. +TEST(ExpectTest, EXPECT_FALSE) { + EXPECT_FALSE(2 < 1); // NOLINT + EXPECT_FALSE(false) << "Intentional success"; + EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(true) << "Intentional failure #1.", + "Intentional failure #1."); + EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(true) << "Intentional failure #2.", + "Intentional failure #2."); + EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(2 > 1), + "Value of: 2 > 1\n" + " Actual: true\n" + "Expected: false"); + EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(2 < 3), + "2 < 3"); +} + +// Tests EXPECT_FALSE(predicate) for predicates returning AssertionResult. +TEST(ExpectTest, ExpectFalseWithAssertionResult) { + EXPECT_FALSE(ResultIsEven(3)); + EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(ResultIsEven(2)), + "Value of: ResultIsEven(2)\n" + " Actual: true (2 is even)\n" + "Expected: false"); + EXPECT_FALSE(ResultIsEvenNoExplanation(3)); + EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(ResultIsEvenNoExplanation(2)), + "Value of: ResultIsEvenNoExplanation(2)\n" + " Actual: true\n" + "Expected: false"); +} + +#ifdef __BORLANDC__ +// Restores warnings after previous "#pragma option push" supressed them +# pragma option pop +#endif + +// Tests EXPECT_EQ. +TEST(ExpectTest, EXPECT_EQ) { + EXPECT_EQ(5, 2 + 3); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(5, 2*3), + "Value of: 2*3\n" + " Actual: 6\n" + "Expected: 5"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(5, 2 - 3), + "2 - 3"); +} + +// Tests using EXPECT_EQ on double values. The purpose is to make +// sure that the specialization we did for integer and anonymous enums +// isn't used for double arguments. +TEST(ExpectTest, EXPECT_EQ_Double) { + // A success. + EXPECT_EQ(5.6, 5.6); + + // A failure. + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(5.1, 5.2), + "5.1"); +} + +#if GTEST_CAN_COMPARE_NULL +// Tests EXPECT_EQ(NULL, pointer). +TEST(ExpectTest, EXPECT_EQ_NULL) { + // A success. + const char* p = NULL; + // Some older GCC versions may issue a spurious warning in this or the next + // assertion statement. This warning should not be suppressed with + // static_cast since the test verifies the ability to use bare NULL as the + // expected parameter to the macro. + EXPECT_EQ(NULL, p); + + // A failure. + int n = 0; + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(NULL, &n), + "Value of: &n\n"); +} +#endif // GTEST_CAN_COMPARE_NULL + +// Tests EXPECT_EQ(0, non_pointer). Since the literal 0 can be +// treated as a null pointer by the compiler, we need to make sure +// that EXPECT_EQ(0, non_pointer) isn't interpreted by Google Test as +// EXPECT_EQ(static_cast(NULL), non_pointer). +TEST(ExpectTest, EXPECT_EQ_0) { + int n = 0; + + // A success. + EXPECT_EQ(0, n); + + // A failure. + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(0, 5.6), + "Expected: 0"); +} + +// Tests EXPECT_NE. +TEST(ExpectTest, EXPECT_NE) { + EXPECT_NE(6, 7); + + EXPECT_NONFATAL_FAILURE(EXPECT_NE('a', 'a'), + "Expected: ('a') != ('a'), " + "actual: 'a' (97, 0x61) vs 'a' (97, 0x61)"); + EXPECT_NONFATAL_FAILURE(EXPECT_NE(2, 2), + "2"); + char* const p0 = NULL; + EXPECT_NONFATAL_FAILURE(EXPECT_NE(p0, p0), + "p0"); + // Only way to get the Nokia compiler to compile the cast + // is to have a separate void* variable first. Putting + // the two casts on the same line doesn't work, neither does + // a direct C-style to char*. + void* pv1 = (void*)0x1234; // NOLINT + char* const p1 = reinterpret_cast(pv1); + EXPECT_NONFATAL_FAILURE(EXPECT_NE(p1, p1), + "p1"); +} + +// Tests EXPECT_LE. +TEST(ExpectTest, EXPECT_LE) { + EXPECT_LE(2, 3); + EXPECT_LE(2, 2); + EXPECT_NONFATAL_FAILURE(EXPECT_LE(2, 0), + "Expected: (2) <= (0), actual: 2 vs 0"); + EXPECT_NONFATAL_FAILURE(EXPECT_LE(1.1, 0.9), + "(1.1) <= (0.9)"); +} + +// Tests EXPECT_LT. +TEST(ExpectTest, EXPECT_LT) { + EXPECT_LT(2, 3); + EXPECT_NONFATAL_FAILURE(EXPECT_LT(2, 2), + "Expected: (2) < (2), actual: 2 vs 2"); + EXPECT_NONFATAL_FAILURE(EXPECT_LT(2, 1), + "(2) < (1)"); +} + +// Tests EXPECT_GE. +TEST(ExpectTest, EXPECT_GE) { + EXPECT_GE(2, 1); + EXPECT_GE(2, 2); + EXPECT_NONFATAL_FAILURE(EXPECT_GE(2, 3), + "Expected: (2) >= (3), actual: 2 vs 3"); + EXPECT_NONFATAL_FAILURE(EXPECT_GE(0.9, 1.1), + "(0.9) >= (1.1)"); +} + +// Tests EXPECT_GT. +TEST(ExpectTest, EXPECT_GT) { + EXPECT_GT(2, 1); + EXPECT_NONFATAL_FAILURE(EXPECT_GT(2, 2), + "Expected: (2) > (2), actual: 2 vs 2"); + EXPECT_NONFATAL_FAILURE(EXPECT_GT(2, 3), + "(2) > (3)"); +} + +#if GTEST_HAS_EXCEPTIONS + +// Tests EXPECT_THROW. +TEST(ExpectTest, EXPECT_THROW) { + EXPECT_THROW(ThrowAnInteger(), int); + EXPECT_NONFATAL_FAILURE(EXPECT_THROW(ThrowAnInteger(), bool), + "Expected: ThrowAnInteger() throws an exception of " + "type bool.\n Actual: it throws a different type."); + EXPECT_NONFATAL_FAILURE( + EXPECT_THROW(ThrowNothing(), bool), + "Expected: ThrowNothing() throws an exception of type bool.\n" + " Actual: it throws nothing."); +} + +// Tests EXPECT_NO_THROW. +TEST(ExpectTest, EXPECT_NO_THROW) { + EXPECT_NO_THROW(ThrowNothing()); + EXPECT_NONFATAL_FAILURE(EXPECT_NO_THROW(ThrowAnInteger()), + "Expected: ThrowAnInteger() doesn't throw an " + "exception.\n Actual: it throws."); +} + +// Tests EXPECT_ANY_THROW. +TEST(ExpectTest, EXPECT_ANY_THROW) { + EXPECT_ANY_THROW(ThrowAnInteger()); + EXPECT_NONFATAL_FAILURE( + EXPECT_ANY_THROW(ThrowNothing()), + "Expected: ThrowNothing() throws an exception.\n" + " Actual: it doesn't."); +} + +#endif // GTEST_HAS_EXCEPTIONS + +// Make sure we deal with the precedence of <<. +TEST(ExpectTest, ExpectPrecedence) { + EXPECT_EQ(1 < 2, true); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(true, true && false), + "Value of: true && false"); +} + + +// Tests the StreamableToString() function. + +// Tests using StreamableToString() on a scalar. +TEST(StreamableToStringTest, Scalar) { + EXPECT_STREQ("5", StreamableToString(5).c_str()); +} + +// Tests using StreamableToString() on a non-char pointer. +TEST(StreamableToStringTest, Pointer) { + int n = 0; + int* p = &n; + EXPECT_STRNE("(null)", StreamableToString(p).c_str()); +} + +// Tests using StreamableToString() on a NULL non-char pointer. +TEST(StreamableToStringTest, NullPointer) { + int* p = NULL; + EXPECT_STREQ("(null)", StreamableToString(p).c_str()); +} + +// Tests using StreamableToString() on a C string. +TEST(StreamableToStringTest, CString) { + EXPECT_STREQ("Foo", StreamableToString("Foo").c_str()); +} + +// Tests using StreamableToString() on a NULL C string. +TEST(StreamableToStringTest, NullCString) { + char* p = NULL; + EXPECT_STREQ("(null)", StreamableToString(p).c_str()); +} + +// Tests using streamable values as assertion messages. + +// Tests using std::string as an assertion message. +TEST(StreamableTest, string) { + static const std::string str( + "This failure message is a std::string, and is expected."); + EXPECT_FATAL_FAILURE(FAIL() << str, + str.c_str()); +} + +// Tests that we can output strings containing embedded NULs. +// Limited to Linux because we can only do this with std::string's. +TEST(StreamableTest, stringWithEmbeddedNUL) { + static const char char_array_with_nul[] = + "Here's a NUL\0 and some more string"; + static const std::string string_with_nul(char_array_with_nul, + sizeof(char_array_with_nul) + - 1); // drops the trailing NUL + EXPECT_FATAL_FAILURE(FAIL() << string_with_nul, + "Here's a NUL\\0 and some more string"); +} + +// Tests that we can output a NUL char. +TEST(StreamableTest, NULChar) { + EXPECT_FATAL_FAILURE({ // NOLINT + FAIL() << "A NUL" << '\0' << " and some more string"; + }, "A NUL\\0 and some more string"); +} + +// Tests using int as an assertion message. +TEST(StreamableTest, int) { + EXPECT_FATAL_FAILURE(FAIL() << 900913, + "900913"); +} + +// Tests using NULL char pointer as an assertion message. +// +// In MSVC, streaming a NULL char * causes access violation. Google Test +// implemented a workaround (substituting "(null)" for NULL). This +// tests whether the workaround works. +TEST(StreamableTest, NullCharPtr) { + EXPECT_FATAL_FAILURE(FAIL() << static_cast(NULL), + "(null)"); +} + +// Tests that basic IO manipulators (endl, ends, and flush) can be +// streamed to testing::Message. +TEST(StreamableTest, BasicIoManip) { + EXPECT_FATAL_FAILURE({ // NOLINT + FAIL() << "Line 1." << std::endl + << "A NUL char " << std::ends << std::flush << " in line 2."; + }, "Line 1.\nA NUL char \\0 in line 2."); +} + +// Tests the macros that haven't been covered so far. + +void AddFailureHelper(bool* aborted) { + *aborted = true; + ADD_FAILURE() << "Intentional failure."; + *aborted = false; +} + +// Tests ADD_FAILURE. +TEST(MacroTest, ADD_FAILURE) { + bool aborted = true; + EXPECT_NONFATAL_FAILURE(AddFailureHelper(&aborted), + "Intentional failure."); + EXPECT_FALSE(aborted); +} + +// Tests ADD_FAILURE_AT. +TEST(MacroTest, ADD_FAILURE_AT) { + // Verifies that ADD_FAILURE_AT does generate a nonfatal failure and + // the failure message contains the user-streamed part. + EXPECT_NONFATAL_FAILURE(ADD_FAILURE_AT("foo.cc", 42) << "Wrong!", "Wrong!"); + + // Verifies that the user-streamed part is optional. + EXPECT_NONFATAL_FAILURE(ADD_FAILURE_AT("foo.cc", 42), "Failed"); + + // Unfortunately, we cannot verify that the failure message contains + // the right file path and line number the same way, as + // EXPECT_NONFATAL_FAILURE() doesn't get to see the file path and + // line number. Instead, we do that in gtest_output_test_.cc. +} + +// Tests FAIL. +TEST(MacroTest, FAIL) { + EXPECT_FATAL_FAILURE(FAIL(), + "Failed"); + EXPECT_FATAL_FAILURE(FAIL() << "Intentional failure.", + "Intentional failure."); +} + +// Tests SUCCEED +TEST(MacroTest, SUCCEED) { + SUCCEED(); + SUCCEED() << "Explicit success."; +} + +// Tests for EXPECT_EQ() and ASSERT_EQ(). +// +// These tests fail *intentionally*, s.t. the failure messages can be +// generated and tested. +// +// We have different tests for different argument types. + +// Tests using bool values in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, Bool) { + EXPECT_EQ(true, true); + EXPECT_FATAL_FAILURE({ + bool false_value = false; + ASSERT_EQ(false_value, true); + }, "Value of: true"); +} + +// Tests using int values in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, Int) { + ASSERT_EQ(32, 32); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(32, 33), + "33"); +} + +// Tests using time_t values in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, Time_T) { + EXPECT_EQ(static_cast(0), + static_cast(0)); + EXPECT_FATAL_FAILURE(ASSERT_EQ(static_cast(0), + static_cast(1234)), + "1234"); +} + +// Tests using char values in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, Char) { + ASSERT_EQ('z', 'z'); + const char ch = 'b'; + EXPECT_NONFATAL_FAILURE(EXPECT_EQ('\0', ch), + "ch"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ('a', ch), + "ch"); +} + +// Tests using wchar_t values in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, WideChar) { + EXPECT_EQ(L'b', L'b'); + + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(L'\0', L'x'), + "Value of: L'x'\n" + " Actual: L'x' (120, 0x78)\n" + "Expected: L'\0'\n" + "Which is: L'\0' (0, 0x0)"); + + static wchar_t wchar; + wchar = L'b'; + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(L'a', wchar), + "wchar"); + wchar = 0x8119; + EXPECT_FATAL_FAILURE(ASSERT_EQ(static_cast(0x8120), wchar), + "Value of: wchar"); +} + +// Tests using ::std::string values in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, StdString) { + // Compares a const char* to an std::string that has identical + // content. + ASSERT_EQ("Test", ::std::string("Test")); + + // Compares two identical std::strings. + static const ::std::string str1("A * in the middle"); + static const ::std::string str2(str1); + EXPECT_EQ(str1, str2); + + // Compares a const char* to an std::string that has different + // content + EXPECT_NONFATAL_FAILURE(EXPECT_EQ("Test", ::std::string("test")), + "\"test\""); + + // Compares an std::string to a char* that has different content. + char* const p1 = const_cast("foo"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(::std::string("bar"), p1), + "p1"); + + // Compares two std::strings that have different contents, one of + // which having a NUL character in the middle. This should fail. + static ::std::string str3(str1); + str3.at(2) = '\0'; + EXPECT_FATAL_FAILURE(ASSERT_EQ(str1, str3), + "Value of: str3\n" + " Actual: \"A \\0 in the middle\""); +} + +#if GTEST_HAS_STD_WSTRING + +// Tests using ::std::wstring values in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, StdWideString) { + // Compares two identical std::wstrings. + const ::std::wstring wstr1(L"A * in the middle"); + const ::std::wstring wstr2(wstr1); + ASSERT_EQ(wstr1, wstr2); + + // Compares an std::wstring to a const wchar_t* that has identical + // content. + const wchar_t kTestX8119[] = { 'T', 'e', 's', 't', 0x8119, '\0' }; + EXPECT_EQ(::std::wstring(kTestX8119), kTestX8119); + + // Compares an std::wstring to a const wchar_t* that has different + // content. + const wchar_t kTestX8120[] = { 'T', 'e', 's', 't', 0x8120, '\0' }; + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_EQ(::std::wstring(kTestX8119), kTestX8120); + }, "kTestX8120"); + + // Compares two std::wstrings that have different contents, one of + // which having a NUL character in the middle. + ::std::wstring wstr3(wstr1); + wstr3.at(2) = L'\0'; + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(wstr1, wstr3), + "wstr3"); + + // Compares a wchar_t* to an std::wstring that has different + // content. + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_EQ(const_cast(L"foo"), ::std::wstring(L"bar")); + }, ""); +} + +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_STRING +// Tests using ::string values in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, GlobalString) { + // Compares a const char* to a ::string that has identical content. + EXPECT_EQ("Test", ::string("Test")); + + // Compares two identical ::strings. + const ::string str1("A * in the middle"); + const ::string str2(str1); + ASSERT_EQ(str1, str2); + + // Compares a ::string to a const char* that has different content. + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(::string("Test"), "test"), + "test"); + + // Compares two ::strings that have different contents, one of which + // having a NUL character in the middle. + ::string str3(str1); + str3.at(2) = '\0'; + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(str1, str3), + "str3"); + + // Compares a ::string to a char* that has different content. + EXPECT_FATAL_FAILURE({ // NOLINT + ASSERT_EQ(::string("bar"), const_cast("foo")); + }, ""); +} + +#endif // GTEST_HAS_GLOBAL_STRING + +#if GTEST_HAS_GLOBAL_WSTRING + +// Tests using ::wstring values in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, GlobalWideString) { + // Compares two identical ::wstrings. + static const ::wstring wstr1(L"A * in the middle"); + static const ::wstring wstr2(wstr1); + EXPECT_EQ(wstr1, wstr2); + + // Compares a const wchar_t* to a ::wstring that has identical content. + const wchar_t kTestX8119[] = { 'T', 'e', 's', 't', 0x8119, '\0' }; + ASSERT_EQ(kTestX8119, ::wstring(kTestX8119)); + + // Compares a const wchar_t* to a ::wstring that has different + // content. + const wchar_t kTestX8120[] = { 'T', 'e', 's', 't', 0x8120, '\0' }; + EXPECT_NONFATAL_FAILURE({ // NOLINT + EXPECT_EQ(kTestX8120, ::wstring(kTestX8119)); + }, "Test\\x8119"); + + // Compares a wchar_t* to a ::wstring that has different content. + wchar_t* const p1 = const_cast(L"foo"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p1, ::wstring(L"bar")), + "bar"); + + // Compares two ::wstrings that have different contents, one of which + // having a NUL character in the middle. + static ::wstring wstr3; + wstr3 = wstr1; + wstr3.at(2) = L'\0'; + EXPECT_FATAL_FAILURE(ASSERT_EQ(wstr1, wstr3), + "wstr3"); +} + +#endif // GTEST_HAS_GLOBAL_WSTRING + +// Tests using char pointers in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, CharPointer) { + char* const p0 = NULL; + // Only way to get the Nokia compiler to compile the cast + // is to have a separate void* variable first. Putting + // the two casts on the same line doesn't work, neither does + // a direct C-style to char*. + void* pv1 = (void*)0x1234; // NOLINT + void* pv2 = (void*)0xABC0; // NOLINT + char* const p1 = reinterpret_cast(pv1); + char* const p2 = reinterpret_cast(pv2); + ASSERT_EQ(p1, p1); + + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p0, p2), + "Value of: p2"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p1, p2), + "p2"); + EXPECT_FATAL_FAILURE(ASSERT_EQ(reinterpret_cast(0x1234), + reinterpret_cast(0xABC0)), + "ABC0"); +} + +// Tests using wchar_t pointers in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, WideCharPointer) { + wchar_t* const p0 = NULL; + // Only way to get the Nokia compiler to compile the cast + // is to have a separate void* variable first. Putting + // the two casts on the same line doesn't work, neither does + // a direct C-style to char*. + void* pv1 = (void*)0x1234; // NOLINT + void* pv2 = (void*)0xABC0; // NOLINT + wchar_t* const p1 = reinterpret_cast(pv1); + wchar_t* const p2 = reinterpret_cast(pv2); + EXPECT_EQ(p0, p0); + + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p0, p2), + "Value of: p2"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p1, p2), + "p2"); + void* pv3 = (void*)0x1234; // NOLINT + void* pv4 = (void*)0xABC0; // NOLINT + const wchar_t* p3 = reinterpret_cast(pv3); + const wchar_t* p4 = reinterpret_cast(pv4); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(p3, p4), + "p4"); +} + +// Tests using other types of pointers in {EXPECT|ASSERT}_EQ. +TEST(EqAssertionTest, OtherPointer) { + ASSERT_EQ(static_cast(NULL), + static_cast(NULL)); + EXPECT_FATAL_FAILURE(ASSERT_EQ(static_cast(NULL), + reinterpret_cast(0x1234)), + "0x1234"); +} + +// A class that supports binary comparison operators but not streaming. +class UnprintableChar { + public: + explicit UnprintableChar(char ch) : char_(ch) {} + + bool operator==(const UnprintableChar& rhs) const { + return char_ == rhs.char_; + } + bool operator!=(const UnprintableChar& rhs) const { + return char_ != rhs.char_; + } + bool operator<(const UnprintableChar& rhs) const { + return char_ < rhs.char_; + } + bool operator<=(const UnprintableChar& rhs) const { + return char_ <= rhs.char_; + } + bool operator>(const UnprintableChar& rhs) const { + return char_ > rhs.char_; + } + bool operator>=(const UnprintableChar& rhs) const { + return char_ >= rhs.char_; + } + + private: + char char_; +}; + +// Tests that ASSERT_EQ() and friends don't require the arguments to +// be printable. +TEST(ComparisonAssertionTest, AcceptsUnprintableArgs) { + const UnprintableChar x('x'), y('y'); + ASSERT_EQ(x, x); + EXPECT_NE(x, y); + ASSERT_LT(x, y); + EXPECT_LE(x, y); + ASSERT_GT(y, x); + EXPECT_GE(x, x); + + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(x, y), "1-byte object <78>"); + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(x, y), "1-byte object <79>"); + EXPECT_NONFATAL_FAILURE(EXPECT_LT(y, y), "1-byte object <79>"); + EXPECT_NONFATAL_FAILURE(EXPECT_GT(x, y), "1-byte object <78>"); + EXPECT_NONFATAL_FAILURE(EXPECT_GT(x, y), "1-byte object <79>"); + + // Code tested by EXPECT_FATAL_FAILURE cannot reference local + // variables, so we have to write UnprintableChar('x') instead of x. +#ifndef __BORLANDC__ + // ICE's in C++Builder. + EXPECT_FATAL_FAILURE(ASSERT_NE(UnprintableChar('x'), UnprintableChar('x')), + "1-byte object <78>"); + EXPECT_FATAL_FAILURE(ASSERT_LE(UnprintableChar('y'), UnprintableChar('x')), + "1-byte object <78>"); +#endif + EXPECT_FATAL_FAILURE(ASSERT_LE(UnprintableChar('y'), UnprintableChar('x')), + "1-byte object <79>"); + EXPECT_FATAL_FAILURE(ASSERT_GE(UnprintableChar('x'), UnprintableChar('y')), + "1-byte object <78>"); + EXPECT_FATAL_FAILURE(ASSERT_GE(UnprintableChar('x'), UnprintableChar('y')), + "1-byte object <79>"); +} + +// Tests the FRIEND_TEST macro. + +// This class has a private member we want to test. We will test it +// both in a TEST and in a TEST_F. +class Foo { + public: + Foo() {} + + private: + int Bar() const { return 1; } + + // Declares the friend tests that can access the private member + // Bar(). + FRIEND_TEST(FRIEND_TEST_Test, TEST); + FRIEND_TEST(FRIEND_TEST_Test2, TEST_F); +}; + +// Tests that the FRIEND_TEST declaration allows a TEST to access a +// class's private members. This should compile. +TEST(FRIEND_TEST_Test, TEST) { + ASSERT_EQ(1, Foo().Bar()); +} + +// The fixture needed to test using FRIEND_TEST with TEST_F. +class FRIEND_TEST_Test2 : public Test { + protected: + Foo foo; +}; + +// Tests that the FRIEND_TEST declaration allows a TEST_F to access a +// class's private members. This should compile. +TEST_F(FRIEND_TEST_Test2, TEST_F) { + ASSERT_EQ(1, foo.Bar()); +} + +// Tests the life cycle of Test objects. + +// The test fixture for testing the life cycle of Test objects. +// +// This class counts the number of live test objects that uses this +// fixture. +class TestLifeCycleTest : public Test { + protected: + // Constructor. Increments the number of test objects that uses + // this fixture. + TestLifeCycleTest() { count_++; } + + // Destructor. Decrements the number of test objects that uses this + // fixture. + ~TestLifeCycleTest() { count_--; } + + // Returns the number of live test objects that uses this fixture. + int count() const { return count_; } + + private: + static int count_; +}; + +int TestLifeCycleTest::count_ = 0; + +// Tests the life cycle of test objects. +TEST_F(TestLifeCycleTest, Test1) { + // There should be only one test object in this test case that's + // currently alive. + ASSERT_EQ(1, count()); +} + +// Tests the life cycle of test objects. +TEST_F(TestLifeCycleTest, Test2) { + // After Test1 is done and Test2 is started, there should still be + // only one live test object, as the object for Test1 should've been + // deleted. + ASSERT_EQ(1, count()); +} + +} // namespace + +// Tests that the copy constructor works when it is NOT optimized away by +// the compiler. +TEST(AssertionResultTest, CopyConstructorWorksWhenNotOptimied) { + // Checks that the copy constructor doesn't try to dereference NULL pointers + // in the source object. + AssertionResult r1 = AssertionSuccess(); + AssertionResult r2 = r1; + // The following line is added to prevent the compiler from optimizing + // away the constructor call. + r1 << "abc"; + + AssertionResult r3 = r1; + EXPECT_EQ(static_cast(r3), static_cast(r1)); + EXPECT_STREQ("abc", r1.message()); +} + +// Tests that AssertionSuccess and AssertionFailure construct +// AssertionResult objects as expected. +TEST(AssertionResultTest, ConstructionWorks) { + AssertionResult r1 = AssertionSuccess(); + EXPECT_TRUE(r1); + EXPECT_STREQ("", r1.message()); + + AssertionResult r2 = AssertionSuccess() << "abc"; + EXPECT_TRUE(r2); + EXPECT_STREQ("abc", r2.message()); + + AssertionResult r3 = AssertionFailure(); + EXPECT_FALSE(r3); + EXPECT_STREQ("", r3.message()); + + AssertionResult r4 = AssertionFailure() << "def"; + EXPECT_FALSE(r4); + EXPECT_STREQ("def", r4.message()); + + AssertionResult r5 = AssertionFailure(Message() << "ghi"); + EXPECT_FALSE(r5); + EXPECT_STREQ("ghi", r5.message()); +} + +// Tests that the negation flips the predicate result but keeps the message. +TEST(AssertionResultTest, NegationWorks) { + AssertionResult r1 = AssertionSuccess() << "abc"; + EXPECT_FALSE(!r1); + EXPECT_STREQ("abc", (!r1).message()); + + AssertionResult r2 = AssertionFailure() << "def"; + EXPECT_TRUE(!r2); + EXPECT_STREQ("def", (!r2).message()); +} + +TEST(AssertionResultTest, StreamingWorks) { + AssertionResult r = AssertionSuccess(); + r << "abc" << 'd' << 0 << true; + EXPECT_STREQ("abcd0true", r.message()); +} + +TEST(AssertionResultTest, CanStreamOstreamManipulators) { + AssertionResult r = AssertionSuccess(); + r << "Data" << std::endl << std::flush << std::ends << "Will be visible"; + EXPECT_STREQ("Data\n\\0Will be visible", r.message()); +} + +// Tests streaming a user type whose definition and operator << are +// both in the global namespace. +class Base { + public: + explicit Base(int an_x) : x_(an_x) {} + int x() const { return x_; } + private: + int x_; +}; +std::ostream& operator<<(std::ostream& os, + const Base& val) { + return os << val.x(); +} +std::ostream& operator<<(std::ostream& os, + const Base* pointer) { + return os << "(" << pointer->x() << ")"; +} + +TEST(MessageTest, CanStreamUserTypeInGlobalNameSpace) { + Message msg; + Base a(1); + + msg << a << &a; // Uses ::operator<<. + EXPECT_STREQ("1(1)", msg.GetString().c_str()); +} + +// Tests streaming a user type whose definition and operator<< are +// both in an unnamed namespace. +namespace { +class MyTypeInUnnamedNameSpace : public Base { + public: + explicit MyTypeInUnnamedNameSpace(int an_x): Base(an_x) {} +}; +std::ostream& operator<<(std::ostream& os, + const MyTypeInUnnamedNameSpace& val) { + return os << val.x(); +} +std::ostream& operator<<(std::ostream& os, + const MyTypeInUnnamedNameSpace* pointer) { + return os << "(" << pointer->x() << ")"; +} +} // namespace + +TEST(MessageTest, CanStreamUserTypeInUnnamedNameSpace) { + Message msg; + MyTypeInUnnamedNameSpace a(1); + + msg << a << &a; // Uses ::operator<<. + EXPECT_STREQ("1(1)", msg.GetString().c_str()); +} + +// Tests streaming a user type whose definition and operator<< are +// both in a user namespace. +namespace namespace1 { +class MyTypeInNameSpace1 : public Base { + public: + explicit MyTypeInNameSpace1(int an_x): Base(an_x) {} +}; +std::ostream& operator<<(std::ostream& os, + const MyTypeInNameSpace1& val) { + return os << val.x(); +} +std::ostream& operator<<(std::ostream& os, + const MyTypeInNameSpace1* pointer) { + return os << "(" << pointer->x() << ")"; +} +} // namespace namespace1 + +TEST(MessageTest, CanStreamUserTypeInUserNameSpace) { + Message msg; + namespace1::MyTypeInNameSpace1 a(1); + + msg << a << &a; // Uses namespace1::operator<<. + EXPECT_STREQ("1(1)", msg.GetString().c_str()); +} + +// Tests streaming a user type whose definition is in a user namespace +// but whose operator<< is in the global namespace. +namespace namespace2 { +class MyTypeInNameSpace2 : public ::Base { + public: + explicit MyTypeInNameSpace2(int an_x): Base(an_x) {} +}; +} // namespace namespace2 +std::ostream& operator<<(std::ostream& os, + const namespace2::MyTypeInNameSpace2& val) { + return os << val.x(); +} +std::ostream& operator<<(std::ostream& os, + const namespace2::MyTypeInNameSpace2* pointer) { + return os << "(" << pointer->x() << ")"; +} + +TEST(MessageTest, CanStreamUserTypeInUserNameSpaceWithStreamOperatorInGlobal) { + Message msg; + namespace2::MyTypeInNameSpace2 a(1); + + msg << a << &a; // Uses ::operator<<. + EXPECT_STREQ("1(1)", msg.GetString().c_str()); +} + +// Tests streaming NULL pointers to testing::Message. +TEST(MessageTest, NullPointers) { + Message msg; + char* const p1 = NULL; + unsigned char* const p2 = NULL; + int* p3 = NULL; + double* p4 = NULL; + bool* p5 = NULL; + Message* p6 = NULL; + + msg << p1 << p2 << p3 << p4 << p5 << p6; + ASSERT_STREQ("(null)(null)(null)(null)(null)(null)", + msg.GetString().c_str()); +} + +// Tests streaming wide strings to testing::Message. +TEST(MessageTest, WideStrings) { + // Streams a NULL of type const wchar_t*. + const wchar_t* const_wstr = NULL; + EXPECT_STREQ("(null)", + (Message() << const_wstr).GetString().c_str()); + + // Streams a NULL of type wchar_t*. + wchar_t* wstr = NULL; + EXPECT_STREQ("(null)", + (Message() << wstr).GetString().c_str()); + + // Streams a non-NULL of type const wchar_t*. + const_wstr = L"abc\x8119"; + EXPECT_STREQ("abc\xe8\x84\x99", + (Message() << const_wstr).GetString().c_str()); + + // Streams a non-NULL of type wchar_t*. + wstr = const_cast(const_wstr); + EXPECT_STREQ("abc\xe8\x84\x99", + (Message() << wstr).GetString().c_str()); +} + + +// This line tests that we can define tests in the testing namespace. +namespace testing { + +// Tests the TestInfo class. + +class TestInfoTest : public Test { + protected: + static const TestInfo* GetTestInfo(const char* test_name) { + const TestCase* const test_case = GetUnitTestImpl()-> + GetTestCase("TestInfoTest", "", NULL, NULL); + + for (int i = 0; i < test_case->total_test_count(); ++i) { + const TestInfo* const test_info = test_case->GetTestInfo(i); + if (strcmp(test_name, test_info->name()) == 0) + return test_info; + } + return NULL; + } + + static const TestResult* GetTestResult( + const TestInfo* test_info) { + return test_info->result(); + } +}; + +// Tests TestInfo::test_case_name() and TestInfo::name(). +TEST_F(TestInfoTest, Names) { + const TestInfo* const test_info = GetTestInfo("Names"); + + ASSERT_STREQ("TestInfoTest", test_info->test_case_name()); + ASSERT_STREQ("Names", test_info->name()); +} + +// Tests TestInfo::result(). +TEST_F(TestInfoTest, result) { + const TestInfo* const test_info = GetTestInfo("result"); + + // Initially, there is no TestPartResult for this test. + ASSERT_EQ(0, GetTestResult(test_info)->total_part_count()); + + // After the previous assertion, there is still none. + ASSERT_EQ(0, GetTestResult(test_info)->total_part_count()); +} + +// Tests setting up and tearing down a test case. + +class SetUpTestCaseTest : public Test { + protected: + // This will be called once before the first test in this test case + // is run. + static void SetUpTestCase() { + printf("Setting up the test case . . .\n"); + + // Initializes some shared resource. In this simple example, we + // just create a C string. More complex stuff can be done if + // desired. + shared_resource_ = "123"; + + // Increments the number of test cases that have been set up. + counter_++; + + // SetUpTestCase() should be called only once. + EXPECT_EQ(1, counter_); + } + + // This will be called once after the last test in this test case is + // run. + static void TearDownTestCase() { + printf("Tearing down the test case . . .\n"); + + // Decrements the number of test cases that have been set up. + counter_--; + + // TearDownTestCase() should be called only once. + EXPECT_EQ(0, counter_); + + // Cleans up the shared resource. + shared_resource_ = NULL; + } + + // This will be called before each test in this test case. + virtual void SetUp() { + // SetUpTestCase() should be called only once, so counter_ should + // always be 1. + EXPECT_EQ(1, counter_); + } + + // Number of test cases that have been set up. + static int counter_; + + // Some resource to be shared by all tests in this test case. + static const char* shared_resource_; +}; + +int SetUpTestCaseTest::counter_ = 0; +const char* SetUpTestCaseTest::shared_resource_ = NULL; + +// A test that uses the shared resource. +TEST_F(SetUpTestCaseTest, Test1) { + EXPECT_STRNE(NULL, shared_resource_); +} + +// Another test that uses the shared resource. +TEST_F(SetUpTestCaseTest, Test2) { + EXPECT_STREQ("123", shared_resource_); +} + +// The InitGoogleTestTest test case tests testing::InitGoogleTest(). + +// The Flags struct stores a copy of all Google Test flags. +struct Flags { + // Constructs a Flags struct where each flag has its default value. + Flags() : also_run_disabled_tests(false), + break_on_failure(false), + catch_exceptions(false), + death_test_use_fork(false), + filter(""), + list_tests(false), + output(""), + print_time(true), + random_seed(0), + repeat(1), + shuffle(false), + stack_trace_depth(kMaxStackTraceDepth), + stream_result_to(""), + throw_on_failure(false) {} + + // Factory methods. + + // Creates a Flags struct where the gtest_also_run_disabled_tests flag has + // the given value. + static Flags AlsoRunDisabledTests(bool also_run_disabled_tests) { + Flags flags; + flags.also_run_disabled_tests = also_run_disabled_tests; + return flags; + } + + // Creates a Flags struct where the gtest_break_on_failure flag has + // the given value. + static Flags BreakOnFailure(bool break_on_failure) { + Flags flags; + flags.break_on_failure = break_on_failure; + return flags; + } + + // Creates a Flags struct where the gtest_catch_exceptions flag has + // the given value. + static Flags CatchExceptions(bool catch_exceptions) { + Flags flags; + flags.catch_exceptions = catch_exceptions; + return flags; + } + + // Creates a Flags struct where the gtest_death_test_use_fork flag has + // the given value. + static Flags DeathTestUseFork(bool death_test_use_fork) { + Flags flags; + flags.death_test_use_fork = death_test_use_fork; + return flags; + } + + // Creates a Flags struct where the gtest_filter flag has the given + // value. + static Flags Filter(const char* filter) { + Flags flags; + flags.filter = filter; + return flags; + } + + // Creates a Flags struct where the gtest_list_tests flag has the + // given value. + static Flags ListTests(bool list_tests) { + Flags flags; + flags.list_tests = list_tests; + return flags; + } + + // Creates a Flags struct where the gtest_output flag has the given + // value. + static Flags Output(const char* output) { + Flags flags; + flags.output = output; + return flags; + } + + // Creates a Flags struct where the gtest_print_time flag has the given + // value. + static Flags PrintTime(bool print_time) { + Flags flags; + flags.print_time = print_time; + return flags; + } + + // Creates a Flags struct where the gtest_random_seed flag has + // the given value. + static Flags RandomSeed(Int32 random_seed) { + Flags flags; + flags.random_seed = random_seed; + return flags; + } + + // Creates a Flags struct where the gtest_repeat flag has the given + // value. + static Flags Repeat(Int32 repeat) { + Flags flags; + flags.repeat = repeat; + return flags; + } + + // Creates a Flags struct where the gtest_shuffle flag has + // the given value. + static Flags Shuffle(bool shuffle) { + Flags flags; + flags.shuffle = shuffle; + return flags; + } + + // Creates a Flags struct where the GTEST_FLAG(stack_trace_depth) flag has + // the given value. + static Flags StackTraceDepth(Int32 stack_trace_depth) { + Flags flags; + flags.stack_trace_depth = stack_trace_depth; + return flags; + } + + // Creates a Flags struct where the GTEST_FLAG(stream_result_to) flag has + // the given value. + static Flags StreamResultTo(const char* stream_result_to) { + Flags flags; + flags.stream_result_to = stream_result_to; + return flags; + } + + // Creates a Flags struct where the gtest_throw_on_failure flag has + // the given value. + static Flags ThrowOnFailure(bool throw_on_failure) { + Flags flags; + flags.throw_on_failure = throw_on_failure; + return flags; + } + + // These fields store the flag values. + bool also_run_disabled_tests; + bool break_on_failure; + bool catch_exceptions; + bool death_test_use_fork; + const char* filter; + bool list_tests; + const char* output; + bool print_time; + Int32 random_seed; + Int32 repeat; + bool shuffle; + Int32 stack_trace_depth; + const char* stream_result_to; + bool throw_on_failure; +}; + +// Fixture for testing InitGoogleTest(). +class InitGoogleTestTest : public Test { + protected: + // Clears the flags before each test. + virtual void SetUp() { + GTEST_FLAG(also_run_disabled_tests) = false; + GTEST_FLAG(break_on_failure) = false; + GTEST_FLAG(catch_exceptions) = false; + GTEST_FLAG(death_test_use_fork) = false; + GTEST_FLAG(filter) = ""; + GTEST_FLAG(list_tests) = false; + GTEST_FLAG(output) = ""; + GTEST_FLAG(print_time) = true; + GTEST_FLAG(random_seed) = 0; + GTEST_FLAG(repeat) = 1; + GTEST_FLAG(shuffle) = false; + GTEST_FLAG(stack_trace_depth) = kMaxStackTraceDepth; + GTEST_FLAG(stream_result_to) = ""; + GTEST_FLAG(throw_on_failure) = false; + } + + // Asserts that two narrow or wide string arrays are equal. + template + static void AssertStringArrayEq(size_t size1, CharType** array1, + size_t size2, CharType** array2) { + ASSERT_EQ(size1, size2) << " Array sizes different."; + + for (size_t i = 0; i != size1; i++) { + ASSERT_STREQ(array1[i], array2[i]) << " where i == " << i; + } + } + + // Verifies that the flag values match the expected values. + static void CheckFlags(const Flags& expected) { + EXPECT_EQ(expected.also_run_disabled_tests, + GTEST_FLAG(also_run_disabled_tests)); + EXPECT_EQ(expected.break_on_failure, GTEST_FLAG(break_on_failure)); + EXPECT_EQ(expected.catch_exceptions, GTEST_FLAG(catch_exceptions)); + EXPECT_EQ(expected.death_test_use_fork, GTEST_FLAG(death_test_use_fork)); + EXPECT_STREQ(expected.filter, GTEST_FLAG(filter).c_str()); + EXPECT_EQ(expected.list_tests, GTEST_FLAG(list_tests)); + EXPECT_STREQ(expected.output, GTEST_FLAG(output).c_str()); + EXPECT_EQ(expected.print_time, GTEST_FLAG(print_time)); + EXPECT_EQ(expected.random_seed, GTEST_FLAG(random_seed)); + EXPECT_EQ(expected.repeat, GTEST_FLAG(repeat)); + EXPECT_EQ(expected.shuffle, GTEST_FLAG(shuffle)); + EXPECT_EQ(expected.stack_trace_depth, GTEST_FLAG(stack_trace_depth)); + EXPECT_STREQ(expected.stream_result_to, + GTEST_FLAG(stream_result_to).c_str()); + EXPECT_EQ(expected.throw_on_failure, GTEST_FLAG(throw_on_failure)); + } + + // Parses a command line (specified by argc1 and argv1), then + // verifies that the flag values are expected and that the + // recognized flags are removed from the command line. + template + static void TestParsingFlags(int argc1, const CharType** argv1, + int argc2, const CharType** argv2, + const Flags& expected, bool should_print_help) { + const bool saved_help_flag = ::testing::internal::g_help_flag; + ::testing::internal::g_help_flag = false; + +#if GTEST_HAS_STREAM_REDIRECTION + CaptureStdout(); +#endif + + // Parses the command line. + internal::ParseGoogleTestFlagsOnly(&argc1, const_cast(argv1)); + +#if GTEST_HAS_STREAM_REDIRECTION + const std::string captured_stdout = GetCapturedStdout(); +#endif + + // Verifies the flag values. + CheckFlags(expected); + + // Verifies that the recognized flags are removed from the command + // line. + AssertStringArrayEq(argc1 + 1, argv1, argc2 + 1, argv2); + + // ParseGoogleTestFlagsOnly should neither set g_help_flag nor print the + // help message for the flags it recognizes. + EXPECT_EQ(should_print_help, ::testing::internal::g_help_flag); + +#if GTEST_HAS_STREAM_REDIRECTION + const char* const expected_help_fragment = + "This program contains tests written using"; + if (should_print_help) { + EXPECT_PRED_FORMAT2(IsSubstring, expected_help_fragment, captured_stdout); + } else { + EXPECT_PRED_FORMAT2(IsNotSubstring, + expected_help_fragment, captured_stdout); + } +#endif // GTEST_HAS_STREAM_REDIRECTION + + ::testing::internal::g_help_flag = saved_help_flag; + } + + // This macro wraps TestParsingFlags s.t. the user doesn't need + // to specify the array sizes. + +#define GTEST_TEST_PARSING_FLAGS_(argv1, argv2, expected, should_print_help) \ + TestParsingFlags(sizeof(argv1)/sizeof(*argv1) - 1, argv1, \ + sizeof(argv2)/sizeof(*argv2) - 1, argv2, \ + expected, should_print_help) +}; + +// Tests parsing an empty command line. +TEST_F(InitGoogleTestTest, Empty) { + const char* argv[] = { + NULL + }; + + const char* argv2[] = { + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags(), false); +} + +// Tests parsing a command line that has no flag. +TEST_F(InitGoogleTestTest, NoFlag) { + const char* argv[] = { + "foo.exe", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags(), false); +} + +// Tests parsing a bad --gtest_filter flag. +TEST_F(InitGoogleTestTest, FilterBad) { + const char* argv[] = { + "foo.exe", + "--gtest_filter", + NULL + }; + + const char* argv2[] = { + "foo.exe", + "--gtest_filter", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter(""), true); +} + +// Tests parsing an empty --gtest_filter flag. +TEST_F(InitGoogleTestTest, FilterEmpty) { + const char* argv[] = { + "foo.exe", + "--gtest_filter=", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter(""), false); +} + +// Tests parsing a non-empty --gtest_filter flag. +TEST_F(InitGoogleTestTest, FilterNonEmpty) { + const char* argv[] = { + "foo.exe", + "--gtest_filter=abc", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter("abc"), false); +} + +// Tests parsing --gtest_break_on_failure. +TEST_F(InitGoogleTestTest, BreakOnFailureWithoutValue) { + const char* argv[] = { + "foo.exe", + "--gtest_break_on_failure", + NULL +}; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(true), false); +} + +// Tests parsing --gtest_break_on_failure=0. +TEST_F(InitGoogleTestTest, BreakOnFailureFalse_0) { + const char* argv[] = { + "foo.exe", + "--gtest_break_on_failure=0", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(false), false); +} + +// Tests parsing --gtest_break_on_failure=f. +TEST_F(InitGoogleTestTest, BreakOnFailureFalse_f) { + const char* argv[] = { + "foo.exe", + "--gtest_break_on_failure=f", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(false), false); +} + +// Tests parsing --gtest_break_on_failure=F. +TEST_F(InitGoogleTestTest, BreakOnFailureFalse_F) { + const char* argv[] = { + "foo.exe", + "--gtest_break_on_failure=F", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(false), false); +} + +// Tests parsing a --gtest_break_on_failure flag that has a "true" +// definition. +TEST_F(InitGoogleTestTest, BreakOnFailureTrue) { + const char* argv[] = { + "foo.exe", + "--gtest_break_on_failure=1", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::BreakOnFailure(true), false); +} + +// Tests parsing --gtest_catch_exceptions. +TEST_F(InitGoogleTestTest, CatchExceptions) { + const char* argv[] = { + "foo.exe", + "--gtest_catch_exceptions", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::CatchExceptions(true), false); +} + +// Tests parsing --gtest_death_test_use_fork. +TEST_F(InitGoogleTestTest, DeathTestUseFork) { + const char* argv[] = { + "foo.exe", + "--gtest_death_test_use_fork", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::DeathTestUseFork(true), false); +} + +// Tests having the same flag twice with different values. The +// expected behavior is that the one coming last takes precedence. +TEST_F(InitGoogleTestTest, DuplicatedFlags) { + const char* argv[] = { + "foo.exe", + "--gtest_filter=a", + "--gtest_filter=b", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Filter("b"), false); +} + +// Tests having an unrecognized flag on the command line. +TEST_F(InitGoogleTestTest, UnrecognizedFlag) { + const char* argv[] = { + "foo.exe", + "--gtest_break_on_failure", + "bar", // Unrecognized by Google Test. + "--gtest_filter=b", + NULL + }; + + const char* argv2[] = { + "foo.exe", + "bar", + NULL + }; + + Flags flags; + flags.break_on_failure = true; + flags.filter = "b"; + GTEST_TEST_PARSING_FLAGS_(argv, argv2, flags, false); +} + +// Tests having a --gtest_list_tests flag +TEST_F(InitGoogleTestTest, ListTestsFlag) { + const char* argv[] = { + "foo.exe", + "--gtest_list_tests", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(true), false); +} + +// Tests having a --gtest_list_tests flag with a "true" value +TEST_F(InitGoogleTestTest, ListTestsTrue) { + const char* argv[] = { + "foo.exe", + "--gtest_list_tests=1", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(true), false); +} + +// Tests having a --gtest_list_tests flag with a "false" value +TEST_F(InitGoogleTestTest, ListTestsFalse) { + const char* argv[] = { + "foo.exe", + "--gtest_list_tests=0", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(false), false); +} + +// Tests parsing --gtest_list_tests=f. +TEST_F(InitGoogleTestTest, ListTestsFalse_f) { + const char* argv[] = { + "foo.exe", + "--gtest_list_tests=f", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(false), false); +} + +// Tests parsing --gtest_list_tests=F. +TEST_F(InitGoogleTestTest, ListTestsFalse_F) { + const char* argv[] = { + "foo.exe", + "--gtest_list_tests=F", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ListTests(false), false); +} + +// Tests parsing --gtest_output (invalid). +TEST_F(InitGoogleTestTest, OutputEmpty) { + const char* argv[] = { + "foo.exe", + "--gtest_output", + NULL + }; + + const char* argv2[] = { + "foo.exe", + "--gtest_output", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags(), true); +} + +// Tests parsing --gtest_output=xml +TEST_F(InitGoogleTestTest, OutputXml) { + const char* argv[] = { + "foo.exe", + "--gtest_output=xml", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Output("xml"), false); +} + +// Tests parsing --gtest_output=xml:file +TEST_F(InitGoogleTestTest, OutputXmlFile) { + const char* argv[] = { + "foo.exe", + "--gtest_output=xml:file", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Output("xml:file"), false); +} + +// Tests parsing --gtest_output=xml:directory/path/ +TEST_F(InitGoogleTestTest, OutputXmlDirectory) { + const char* argv[] = { + "foo.exe", + "--gtest_output=xml:directory/path/", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, + Flags::Output("xml:directory/path/"), false); +} + +// Tests having a --gtest_print_time flag +TEST_F(InitGoogleTestTest, PrintTimeFlag) { + const char* argv[] = { + "foo.exe", + "--gtest_print_time", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(true), false); +} + +// Tests having a --gtest_print_time flag with a "true" value +TEST_F(InitGoogleTestTest, PrintTimeTrue) { + const char* argv[] = { + "foo.exe", + "--gtest_print_time=1", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(true), false); +} + +// Tests having a --gtest_print_time flag with a "false" value +TEST_F(InitGoogleTestTest, PrintTimeFalse) { + const char* argv[] = { + "foo.exe", + "--gtest_print_time=0", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(false), false); +} + +// Tests parsing --gtest_print_time=f. +TEST_F(InitGoogleTestTest, PrintTimeFalse_f) { + const char* argv[] = { + "foo.exe", + "--gtest_print_time=f", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(false), false); +} + +// Tests parsing --gtest_print_time=F. +TEST_F(InitGoogleTestTest, PrintTimeFalse_F) { + const char* argv[] = { + "foo.exe", + "--gtest_print_time=F", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::PrintTime(false), false); +} + +// Tests parsing --gtest_random_seed=number +TEST_F(InitGoogleTestTest, RandomSeed) { + const char* argv[] = { + "foo.exe", + "--gtest_random_seed=1000", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::RandomSeed(1000), false); +} + +// Tests parsing --gtest_repeat=number +TEST_F(InitGoogleTestTest, Repeat) { + const char* argv[] = { + "foo.exe", + "--gtest_repeat=1000", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Repeat(1000), false); +} + +// Tests having a --gtest_also_run_disabled_tests flag +TEST_F(InitGoogleTestTest, AlsoRunDisabledTestsFlag) { + const char* argv[] = { + "foo.exe", + "--gtest_also_run_disabled_tests", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, + Flags::AlsoRunDisabledTests(true), false); +} + +// Tests having a --gtest_also_run_disabled_tests flag with a "true" value +TEST_F(InitGoogleTestTest, AlsoRunDisabledTestsTrue) { + const char* argv[] = { + "foo.exe", + "--gtest_also_run_disabled_tests=1", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, + Flags::AlsoRunDisabledTests(true), false); +} + +// Tests having a --gtest_also_run_disabled_tests flag with a "false" value +TEST_F(InitGoogleTestTest, AlsoRunDisabledTestsFalse) { + const char* argv[] = { + "foo.exe", + "--gtest_also_run_disabled_tests=0", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, + Flags::AlsoRunDisabledTests(false), false); +} + +// Tests parsing --gtest_shuffle. +TEST_F(InitGoogleTestTest, ShuffleWithoutValue) { + const char* argv[] = { + "foo.exe", + "--gtest_shuffle", + NULL +}; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Shuffle(true), false); +} + +// Tests parsing --gtest_shuffle=0. +TEST_F(InitGoogleTestTest, ShuffleFalse_0) { + const char* argv[] = { + "foo.exe", + "--gtest_shuffle=0", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Shuffle(false), false); +} + +// Tests parsing a --gtest_shuffle flag that has a "true" +// definition. +TEST_F(InitGoogleTestTest, ShuffleTrue) { + const char* argv[] = { + "foo.exe", + "--gtest_shuffle=1", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::Shuffle(true), false); +} + +// Tests parsing --gtest_stack_trace_depth=number. +TEST_F(InitGoogleTestTest, StackTraceDepth) { + const char* argv[] = { + "foo.exe", + "--gtest_stack_trace_depth=5", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::StackTraceDepth(5), false); +} + +TEST_F(InitGoogleTestTest, StreamResultTo) { + const char* argv[] = { + "foo.exe", + "--gtest_stream_result_to=localhost:1234", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_( + argv, argv2, Flags::StreamResultTo("localhost:1234"), false); +} + +// Tests parsing --gtest_throw_on_failure. +TEST_F(InitGoogleTestTest, ThrowOnFailureWithoutValue) { + const char* argv[] = { + "foo.exe", + "--gtest_throw_on_failure", + NULL +}; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(true), false); +} + +// Tests parsing --gtest_throw_on_failure=0. +TEST_F(InitGoogleTestTest, ThrowOnFailureFalse_0) { + const char* argv[] = { + "foo.exe", + "--gtest_throw_on_failure=0", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(false), false); +} + +// Tests parsing a --gtest_throw_on_failure flag that has a "true" +// definition. +TEST_F(InitGoogleTestTest, ThrowOnFailureTrue) { + const char* argv[] = { + "foo.exe", + "--gtest_throw_on_failure=1", + NULL + }; + + const char* argv2[] = { + "foo.exe", + NULL + }; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, Flags::ThrowOnFailure(true), false); +} + +#if GTEST_OS_WINDOWS +// Tests parsing wide strings. +TEST_F(InitGoogleTestTest, WideStrings) { + const wchar_t* argv[] = { + L"foo.exe", + L"--gtest_filter=Foo*", + L"--gtest_list_tests=1", + L"--gtest_break_on_failure", + L"--non_gtest_flag", + NULL + }; + + const wchar_t* argv2[] = { + L"foo.exe", + L"--non_gtest_flag", + NULL + }; + + Flags expected_flags; + expected_flags.break_on_failure = true; + expected_flags.filter = "Foo*"; + expected_flags.list_tests = true; + + GTEST_TEST_PARSING_FLAGS_(argv, argv2, expected_flags, false); +} +#endif // GTEST_OS_WINDOWS + +// Tests current_test_info() in UnitTest. +class CurrentTestInfoTest : public Test { + protected: + // Tests that current_test_info() returns NULL before the first test in + // the test case is run. + static void SetUpTestCase() { + // There should be no tests running at this point. + const TestInfo* test_info = + UnitTest::GetInstance()->current_test_info(); + EXPECT_TRUE(test_info == NULL) + << "There should be no tests running at this point."; + } + + // Tests that current_test_info() returns NULL after the last test in + // the test case has run. + static void TearDownTestCase() { + const TestInfo* test_info = + UnitTest::GetInstance()->current_test_info(); + EXPECT_TRUE(test_info == NULL) + << "There should be no tests running at this point."; + } +}; + +// Tests that current_test_info() returns TestInfo for currently running +// test by checking the expected test name against the actual one. +TEST_F(CurrentTestInfoTest, WorksForFirstTestInATestCase) { + const TestInfo* test_info = + UnitTest::GetInstance()->current_test_info(); + ASSERT_TRUE(NULL != test_info) + << "There is a test running so we should have a valid TestInfo."; + EXPECT_STREQ("CurrentTestInfoTest", test_info->test_case_name()) + << "Expected the name of the currently running test case."; + EXPECT_STREQ("WorksForFirstTestInATestCase", test_info->name()) + << "Expected the name of the currently running test."; +} + +// Tests that current_test_info() returns TestInfo for currently running +// test by checking the expected test name against the actual one. We +// use this test to see that the TestInfo object actually changed from +// the previous invocation. +TEST_F(CurrentTestInfoTest, WorksForSecondTestInATestCase) { + const TestInfo* test_info = + UnitTest::GetInstance()->current_test_info(); + ASSERT_TRUE(NULL != test_info) + << "There is a test running so we should have a valid TestInfo."; + EXPECT_STREQ("CurrentTestInfoTest", test_info->test_case_name()) + << "Expected the name of the currently running test case."; + EXPECT_STREQ("WorksForSecondTestInATestCase", test_info->name()) + << "Expected the name of the currently running test."; +} + +} // namespace testing + +// These two lines test that we can define tests in a namespace that +// has the name "testing" and is nested in another namespace. +namespace my_namespace { +namespace testing { + +// Makes sure that TEST knows to use ::testing::Test instead of +// ::my_namespace::testing::Test. +class Test {}; + +// Makes sure that an assertion knows to use ::testing::Message instead of +// ::my_namespace::testing::Message. +class Message {}; + +// Makes sure that an assertion knows to use +// ::testing::AssertionResult instead of +// ::my_namespace::testing::AssertionResult. +class AssertionResult {}; + +// Tests that an assertion that should succeed works as expected. +TEST(NestedTestingNamespaceTest, Success) { + EXPECT_EQ(1, 1) << "This shouldn't fail."; +} + +// Tests that an assertion that should fail works as expected. +TEST(NestedTestingNamespaceTest, Failure) { + EXPECT_FATAL_FAILURE(FAIL() << "This failure is expected.", + "This failure is expected."); +} + +} // namespace testing +} // namespace my_namespace + +// Tests that one can call superclass SetUp and TearDown methods-- +// that is, that they are not private. +// No tests are based on this fixture; the test "passes" if it compiles +// successfully. +class ProtectedFixtureMethodsTest : public Test { + protected: + virtual void SetUp() { + Test::SetUp(); + } + virtual void TearDown() { + Test::TearDown(); + } +}; + +// StreamingAssertionsTest tests the streaming versions of a representative +// sample of assertions. +TEST(StreamingAssertionsTest, Unconditional) { + SUCCEED() << "expected success"; + EXPECT_NONFATAL_FAILURE(ADD_FAILURE() << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(FAIL() << "expected failure", + "expected failure"); +} + +#ifdef __BORLANDC__ +// Silences warnings: "Condition is always true", "Unreachable code" +# pragma option push -w-ccc -w-rch +#endif + +TEST(StreamingAssertionsTest, Truth) { + EXPECT_TRUE(true) << "unexpected failure"; + ASSERT_TRUE(true) << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_TRUE(false) << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_TRUE(false) << "expected failure", + "expected failure"); +} + +TEST(StreamingAssertionsTest, Truth2) { + EXPECT_FALSE(false) << "unexpected failure"; + ASSERT_FALSE(false) << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_FALSE(true) << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_FALSE(true) << "expected failure", + "expected failure"); +} + +#ifdef __BORLANDC__ +// Restores warnings after previous "#pragma option push" supressed them +# pragma option pop +#endif + +TEST(StreamingAssertionsTest, IntegerEquals) { + EXPECT_EQ(1, 1) << "unexpected failure"; + ASSERT_EQ(1, 1) << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_EQ(1, 2) << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_EQ(1, 2) << "expected failure", + "expected failure"); +} + +TEST(StreamingAssertionsTest, IntegerLessThan) { + EXPECT_LT(1, 2) << "unexpected failure"; + ASSERT_LT(1, 2) << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_LT(2, 1) << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_LT(2, 1) << "expected failure", + "expected failure"); +} + +TEST(StreamingAssertionsTest, StringsEqual) { + EXPECT_STREQ("foo", "foo") << "unexpected failure"; + ASSERT_STREQ("foo", "foo") << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_STREQ("foo", "bar") << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_STREQ("foo", "bar") << "expected failure", + "expected failure"); +} + +TEST(StreamingAssertionsTest, StringsNotEqual) { + EXPECT_STRNE("foo", "bar") << "unexpected failure"; + ASSERT_STRNE("foo", "bar") << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_STRNE("foo", "foo") << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_STRNE("foo", "foo") << "expected failure", + "expected failure"); +} + +TEST(StreamingAssertionsTest, StringsEqualIgnoringCase) { + EXPECT_STRCASEEQ("foo", "FOO") << "unexpected failure"; + ASSERT_STRCASEEQ("foo", "FOO") << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_STRCASEEQ("foo", "bar") << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_STRCASEEQ("foo", "bar") << "expected failure", + "expected failure"); +} + +TEST(StreamingAssertionsTest, StringNotEqualIgnoringCase) { + EXPECT_STRCASENE("foo", "bar") << "unexpected failure"; + ASSERT_STRCASENE("foo", "bar") << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_STRCASENE("foo", "FOO") << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_STRCASENE("bar", "BAR") << "expected failure", + "expected failure"); +} + +TEST(StreamingAssertionsTest, FloatingPointEquals) { + EXPECT_FLOAT_EQ(1.0, 1.0) << "unexpected failure"; + ASSERT_FLOAT_EQ(1.0, 1.0) << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_FLOAT_EQ(0.0, 1.0) << "expected failure", + "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_FLOAT_EQ(0.0, 1.0) << "expected failure", + "expected failure"); +} + +#if GTEST_HAS_EXCEPTIONS + +TEST(StreamingAssertionsTest, Throw) { + EXPECT_THROW(ThrowAnInteger(), int) << "unexpected failure"; + ASSERT_THROW(ThrowAnInteger(), int) << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_THROW(ThrowAnInteger(), bool) << + "expected failure", "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_THROW(ThrowAnInteger(), bool) << + "expected failure", "expected failure"); +} + +TEST(StreamingAssertionsTest, NoThrow) { + EXPECT_NO_THROW(ThrowNothing()) << "unexpected failure"; + ASSERT_NO_THROW(ThrowNothing()) << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_NO_THROW(ThrowAnInteger()) << + "expected failure", "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_NO_THROW(ThrowAnInteger()) << + "expected failure", "expected failure"); +} + +TEST(StreamingAssertionsTest, AnyThrow) { + EXPECT_ANY_THROW(ThrowAnInteger()) << "unexpected failure"; + ASSERT_ANY_THROW(ThrowAnInteger()) << "unexpected failure"; + EXPECT_NONFATAL_FAILURE(EXPECT_ANY_THROW(ThrowNothing()) << + "expected failure", "expected failure"); + EXPECT_FATAL_FAILURE(ASSERT_ANY_THROW(ThrowNothing()) << + "expected failure", "expected failure"); +} + +#endif // GTEST_HAS_EXCEPTIONS + +// Tests that Google Test correctly decides whether to use colors in the output. + +TEST(ColoredOutputTest, UsesColorsWhenGTestColorFlagIsYes) { + GTEST_FLAG(color) = "yes"; + + SetEnv("TERM", "xterm"); // TERM supports colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + EXPECT_TRUE(ShouldUseColor(false)); // Stdout is not a TTY. + + SetEnv("TERM", "dumb"); // TERM doesn't support colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + EXPECT_TRUE(ShouldUseColor(false)); // Stdout is not a TTY. +} + +TEST(ColoredOutputTest, UsesColorsWhenGTestColorFlagIsAliasOfYes) { + SetEnv("TERM", "dumb"); // TERM doesn't support colors. + + GTEST_FLAG(color) = "True"; + EXPECT_TRUE(ShouldUseColor(false)); // Stdout is not a TTY. + + GTEST_FLAG(color) = "t"; + EXPECT_TRUE(ShouldUseColor(false)); // Stdout is not a TTY. + + GTEST_FLAG(color) = "1"; + EXPECT_TRUE(ShouldUseColor(false)); // Stdout is not a TTY. +} + +TEST(ColoredOutputTest, UsesNoColorWhenGTestColorFlagIsNo) { + GTEST_FLAG(color) = "no"; + + SetEnv("TERM", "xterm"); // TERM supports colors. + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. + EXPECT_FALSE(ShouldUseColor(false)); // Stdout is not a TTY. + + SetEnv("TERM", "dumb"); // TERM doesn't support colors. + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. + EXPECT_FALSE(ShouldUseColor(false)); // Stdout is not a TTY. +} + +TEST(ColoredOutputTest, UsesNoColorWhenGTestColorFlagIsInvalid) { + SetEnv("TERM", "xterm"); // TERM supports colors. + + GTEST_FLAG(color) = "F"; + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. + + GTEST_FLAG(color) = "0"; + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. + + GTEST_FLAG(color) = "unknown"; + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. +} + +TEST(ColoredOutputTest, UsesColorsWhenStdoutIsTty) { + GTEST_FLAG(color) = "auto"; + + SetEnv("TERM", "xterm"); // TERM supports colors. + EXPECT_FALSE(ShouldUseColor(false)); // Stdout is not a TTY. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. +} + +TEST(ColoredOutputTest, UsesColorsWhenTermSupportsColors) { + GTEST_FLAG(color) = "auto"; + +#if GTEST_OS_WINDOWS + // On Windows, we ignore the TERM variable as it's usually not set. + + SetEnv("TERM", "dumb"); + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", ""); + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "xterm"); + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. +#else + // On non-Windows platforms, we rely on TERM to determine if the + // terminal supports colors. + + SetEnv("TERM", "dumb"); // TERM doesn't support colors. + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "emacs"); // TERM doesn't support colors. + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "vt100"); // TERM doesn't support colors. + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "xterm-mono"); // TERM doesn't support colors. + EXPECT_FALSE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "xterm"); // TERM supports colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "xterm-color"); // TERM supports colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "xterm-256color"); // TERM supports colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "screen"); // TERM supports colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "screen-256color"); // TERM supports colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "linux"); // TERM supports colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. + + SetEnv("TERM", "cygwin"); // TERM supports colors. + EXPECT_TRUE(ShouldUseColor(true)); // Stdout is a TTY. +#endif // GTEST_OS_WINDOWS +} + +// Verifies that StaticAssertTypeEq works in a namespace scope. + +static bool dummy1 GTEST_ATTRIBUTE_UNUSED_ = StaticAssertTypeEq(); +static bool dummy2 GTEST_ATTRIBUTE_UNUSED_ = + StaticAssertTypeEq(); + +// Verifies that StaticAssertTypeEq works in a class. + +template +class StaticAssertTypeEqTestHelper { + public: + StaticAssertTypeEqTestHelper() { StaticAssertTypeEq(); } +}; + +TEST(StaticAssertTypeEqTest, WorksInClass) { + StaticAssertTypeEqTestHelper(); +} + +// Verifies that StaticAssertTypeEq works inside a function. + +typedef int IntAlias; + +TEST(StaticAssertTypeEqTest, CompilesForEqualTypes) { + StaticAssertTypeEq(); + StaticAssertTypeEq(); +} + +TEST(GetCurrentOsStackTraceExceptTopTest, ReturnsTheStackTrace) { + testing::UnitTest* const unit_test = testing::UnitTest::GetInstance(); + + // We don't have a stack walker in Google Test yet. + EXPECT_STREQ("", GetCurrentOsStackTraceExceptTop(unit_test, 0).c_str()); + EXPECT_STREQ("", GetCurrentOsStackTraceExceptTop(unit_test, 1).c_str()); +} + +TEST(HasNonfatalFailureTest, ReturnsFalseWhenThereIsNoFailure) { + EXPECT_FALSE(HasNonfatalFailure()); +} + +static void FailFatally() { FAIL(); } + +TEST(HasNonfatalFailureTest, ReturnsFalseWhenThereIsOnlyFatalFailure) { + FailFatally(); + const bool has_nonfatal_failure = HasNonfatalFailure(); + ClearCurrentTestPartResults(); + EXPECT_FALSE(has_nonfatal_failure); +} + +TEST(HasNonfatalFailureTest, ReturnsTrueWhenThereIsNonfatalFailure) { + ADD_FAILURE(); + const bool has_nonfatal_failure = HasNonfatalFailure(); + ClearCurrentTestPartResults(); + EXPECT_TRUE(has_nonfatal_failure); +} + +TEST(HasNonfatalFailureTest, ReturnsTrueWhenThereAreFatalAndNonfatalFailures) { + FailFatally(); + ADD_FAILURE(); + const bool has_nonfatal_failure = HasNonfatalFailure(); + ClearCurrentTestPartResults(); + EXPECT_TRUE(has_nonfatal_failure); +} + +// A wrapper for calling HasNonfatalFailure outside of a test body. +static bool HasNonfatalFailureHelper() { + return testing::Test::HasNonfatalFailure(); +} + +TEST(HasNonfatalFailureTest, WorksOutsideOfTestBody) { + EXPECT_FALSE(HasNonfatalFailureHelper()); +} + +TEST(HasNonfatalFailureTest, WorksOutsideOfTestBody2) { + ADD_FAILURE(); + const bool has_nonfatal_failure = HasNonfatalFailureHelper(); + ClearCurrentTestPartResults(); + EXPECT_TRUE(has_nonfatal_failure); +} + +TEST(HasFailureTest, ReturnsFalseWhenThereIsNoFailure) { + EXPECT_FALSE(HasFailure()); +} + +TEST(HasFailureTest, ReturnsTrueWhenThereIsFatalFailure) { + FailFatally(); + const bool has_failure = HasFailure(); + ClearCurrentTestPartResults(); + EXPECT_TRUE(has_failure); +} + +TEST(HasFailureTest, ReturnsTrueWhenThereIsNonfatalFailure) { + ADD_FAILURE(); + const bool has_failure = HasFailure(); + ClearCurrentTestPartResults(); + EXPECT_TRUE(has_failure); +} + +TEST(HasFailureTest, ReturnsTrueWhenThereAreFatalAndNonfatalFailures) { + FailFatally(); + ADD_FAILURE(); + const bool has_failure = HasFailure(); + ClearCurrentTestPartResults(); + EXPECT_TRUE(has_failure); +} + +// A wrapper for calling HasFailure outside of a test body. +static bool HasFailureHelper() { return testing::Test::HasFailure(); } + +TEST(HasFailureTest, WorksOutsideOfTestBody) { + EXPECT_FALSE(HasFailureHelper()); +} + +TEST(HasFailureTest, WorksOutsideOfTestBody2) { + ADD_FAILURE(); + const bool has_failure = HasFailureHelper(); + ClearCurrentTestPartResults(); + EXPECT_TRUE(has_failure); +} + +class TestListener : public EmptyTestEventListener { + public: + TestListener() : on_start_counter_(NULL), is_destroyed_(NULL) {} + TestListener(int* on_start_counter, bool* is_destroyed) + : on_start_counter_(on_start_counter), + is_destroyed_(is_destroyed) {} + + virtual ~TestListener() { + if (is_destroyed_) + *is_destroyed_ = true; + } + + protected: + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) { + if (on_start_counter_ != NULL) + (*on_start_counter_)++; + } + + private: + int* on_start_counter_; + bool* is_destroyed_; +}; + +// Tests the constructor. +TEST(TestEventListenersTest, ConstructionWorks) { + TestEventListeners listeners; + + EXPECT_TRUE(TestEventListenersAccessor::GetRepeater(&listeners) != NULL); + EXPECT_TRUE(listeners.default_result_printer() == NULL); + EXPECT_TRUE(listeners.default_xml_generator() == NULL); +} + +// Tests that the TestEventListeners destructor deletes all the listeners it +// owns. +TEST(TestEventListenersTest, DestructionWorks) { + bool default_result_printer_is_destroyed = false; + bool default_xml_printer_is_destroyed = false; + bool extra_listener_is_destroyed = false; + TestListener* default_result_printer = new TestListener( + NULL, &default_result_printer_is_destroyed); + TestListener* default_xml_printer = new TestListener( + NULL, &default_xml_printer_is_destroyed); + TestListener* extra_listener = new TestListener( + NULL, &extra_listener_is_destroyed); + + { + TestEventListeners listeners; + TestEventListenersAccessor::SetDefaultResultPrinter(&listeners, + default_result_printer); + TestEventListenersAccessor::SetDefaultXmlGenerator(&listeners, + default_xml_printer); + listeners.Append(extra_listener); + } + EXPECT_TRUE(default_result_printer_is_destroyed); + EXPECT_TRUE(default_xml_printer_is_destroyed); + EXPECT_TRUE(extra_listener_is_destroyed); +} + +// Tests that a listener Append'ed to a TestEventListeners list starts +// receiving events. +TEST(TestEventListenersTest, Append) { + int on_start_counter = 0; + bool is_destroyed = false; + TestListener* listener = new TestListener(&on_start_counter, &is_destroyed); + { + TestEventListeners listeners; + listeners.Append(listener); + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( + *UnitTest::GetInstance()); + EXPECT_EQ(1, on_start_counter); + } + EXPECT_TRUE(is_destroyed); +} + +// Tests that listeners receive events in the order they were appended to +// the list, except for *End requests, which must be received in the reverse +// order. +class SequenceTestingListener : public EmptyTestEventListener { + public: + SequenceTestingListener(std::vector* vector, const char* id) + : vector_(vector), id_(id) {} + + protected: + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) { + vector_->push_back(GetEventDescription("OnTestProgramStart")); + } + + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) { + vector_->push_back(GetEventDescription("OnTestProgramEnd")); + } + + virtual void OnTestIterationStart(const UnitTest& /*unit_test*/, + int /*iteration*/) { + vector_->push_back(GetEventDescription("OnTestIterationStart")); + } + + virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/, + int /*iteration*/) { + vector_->push_back(GetEventDescription("OnTestIterationEnd")); + } + + private: + std::string GetEventDescription(const char* method) { + Message message; + message << id_ << "." << method; + return message.GetString(); + } + + std::vector* vector_; + const char* const id_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SequenceTestingListener); +}; + +TEST(EventListenerTest, AppendKeepsOrder) { + std::vector vec; + TestEventListeners listeners; + listeners.Append(new SequenceTestingListener(&vec, "1st")); + listeners.Append(new SequenceTestingListener(&vec, "2nd")); + listeners.Append(new SequenceTestingListener(&vec, "3rd")); + + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( + *UnitTest::GetInstance()); + ASSERT_EQ(3U, vec.size()); + EXPECT_STREQ("1st.OnTestProgramStart", vec[0].c_str()); + EXPECT_STREQ("2nd.OnTestProgramStart", vec[1].c_str()); + EXPECT_STREQ("3rd.OnTestProgramStart", vec[2].c_str()); + + vec.clear(); + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramEnd( + *UnitTest::GetInstance()); + ASSERT_EQ(3U, vec.size()); + EXPECT_STREQ("3rd.OnTestProgramEnd", vec[0].c_str()); + EXPECT_STREQ("2nd.OnTestProgramEnd", vec[1].c_str()); + EXPECT_STREQ("1st.OnTestProgramEnd", vec[2].c_str()); + + vec.clear(); + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestIterationStart( + *UnitTest::GetInstance(), 0); + ASSERT_EQ(3U, vec.size()); + EXPECT_STREQ("1st.OnTestIterationStart", vec[0].c_str()); + EXPECT_STREQ("2nd.OnTestIterationStart", vec[1].c_str()); + EXPECT_STREQ("3rd.OnTestIterationStart", vec[2].c_str()); + + vec.clear(); + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestIterationEnd( + *UnitTest::GetInstance(), 0); + ASSERT_EQ(3U, vec.size()); + EXPECT_STREQ("3rd.OnTestIterationEnd", vec[0].c_str()); + EXPECT_STREQ("2nd.OnTestIterationEnd", vec[1].c_str()); + EXPECT_STREQ("1st.OnTestIterationEnd", vec[2].c_str()); +} + +// Tests that a listener removed from a TestEventListeners list stops receiving +// events and is not deleted when the list is destroyed. +TEST(TestEventListenersTest, Release) { + int on_start_counter = 0; + bool is_destroyed = false; + // Although Append passes the ownership of this object to the list, + // the following calls release it, and we need to delete it before the + // test ends. + TestListener* listener = new TestListener(&on_start_counter, &is_destroyed); + { + TestEventListeners listeners; + listeners.Append(listener); + EXPECT_EQ(listener, listeners.Release(listener)); + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( + *UnitTest::GetInstance()); + EXPECT_TRUE(listeners.Release(listener) == NULL); + } + EXPECT_EQ(0, on_start_counter); + EXPECT_FALSE(is_destroyed); + delete listener; +} + +// Tests that no events are forwarded when event forwarding is disabled. +TEST(EventListenerTest, SuppressEventForwarding) { + int on_start_counter = 0; + TestListener* listener = new TestListener(&on_start_counter, NULL); + + TestEventListeners listeners; + listeners.Append(listener); + ASSERT_TRUE(TestEventListenersAccessor::EventForwardingEnabled(listeners)); + TestEventListenersAccessor::SuppressEventForwarding(&listeners); + ASSERT_FALSE(TestEventListenersAccessor::EventForwardingEnabled(listeners)); + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( + *UnitTest::GetInstance()); + EXPECT_EQ(0, on_start_counter); +} + +// Tests that events generated by Google Test are not forwarded in +// death test subprocesses. +TEST(EventListenerDeathTest, EventsNotForwardedInDeathTestSubprecesses) { + EXPECT_DEATH_IF_SUPPORTED({ + GTEST_CHECK_(TestEventListenersAccessor::EventForwardingEnabled( + *GetUnitTestImpl()->listeners())) << "expected failure";}, + "expected failure"); +} + +// Tests that a listener installed via SetDefaultResultPrinter() starts +// receiving events and is returned via default_result_printer() and that +// the previous default_result_printer is removed from the list and deleted. +TEST(EventListenerTest, default_result_printer) { + int on_start_counter = 0; + bool is_destroyed = false; + TestListener* listener = new TestListener(&on_start_counter, &is_destroyed); + + TestEventListeners listeners; + TestEventListenersAccessor::SetDefaultResultPrinter(&listeners, listener); + + EXPECT_EQ(listener, listeners.default_result_printer()); + + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( + *UnitTest::GetInstance()); + + EXPECT_EQ(1, on_start_counter); + + // Replacing default_result_printer with something else should remove it + // from the list and destroy it. + TestEventListenersAccessor::SetDefaultResultPrinter(&listeners, NULL); + + EXPECT_TRUE(listeners.default_result_printer() == NULL); + EXPECT_TRUE(is_destroyed); + + // After broadcasting an event the counter is still the same, indicating + // the listener is not in the list anymore. + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( + *UnitTest::GetInstance()); + EXPECT_EQ(1, on_start_counter); +} + +// Tests that the default_result_printer listener stops receiving events +// when removed via Release and that is not owned by the list anymore. +TEST(EventListenerTest, RemovingDefaultResultPrinterWorks) { + int on_start_counter = 0; + bool is_destroyed = false; + // Although Append passes the ownership of this object to the list, + // the following calls release it, and we need to delete it before the + // test ends. + TestListener* listener = new TestListener(&on_start_counter, &is_destroyed); + { + TestEventListeners listeners; + TestEventListenersAccessor::SetDefaultResultPrinter(&listeners, listener); + + EXPECT_EQ(listener, listeners.Release(listener)); + EXPECT_TRUE(listeners.default_result_printer() == NULL); + EXPECT_FALSE(is_destroyed); + + // Broadcasting events now should not affect default_result_printer. + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( + *UnitTest::GetInstance()); + EXPECT_EQ(0, on_start_counter); + } + // Destroying the list should not affect the listener now, too. + EXPECT_FALSE(is_destroyed); + delete listener; +} + +// Tests that a listener installed via SetDefaultXmlGenerator() starts +// receiving events and is returned via default_xml_generator() and that +// the previous default_xml_generator is removed from the list and deleted. +TEST(EventListenerTest, default_xml_generator) { + int on_start_counter = 0; + bool is_destroyed = false; + TestListener* listener = new TestListener(&on_start_counter, &is_destroyed); + + TestEventListeners listeners; + TestEventListenersAccessor::SetDefaultXmlGenerator(&listeners, listener); + + EXPECT_EQ(listener, listeners.default_xml_generator()); + + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( + *UnitTest::GetInstance()); + + EXPECT_EQ(1, on_start_counter); + + // Replacing default_xml_generator with something else should remove it + // from the list and destroy it. + TestEventListenersAccessor::SetDefaultXmlGenerator(&listeners, NULL); + + EXPECT_TRUE(listeners.default_xml_generator() == NULL); + EXPECT_TRUE(is_destroyed); + + // After broadcasting an event the counter is still the same, indicating + // the listener is not in the list anymore. + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( + *UnitTest::GetInstance()); + EXPECT_EQ(1, on_start_counter); +} + +// Tests that the default_xml_generator listener stops receiving events +// when removed via Release and that is not owned by the list anymore. +TEST(EventListenerTest, RemovingDefaultXmlGeneratorWorks) { + int on_start_counter = 0; + bool is_destroyed = false; + // Although Append passes the ownership of this object to the list, + // the following calls release it, and we need to delete it before the + // test ends. + TestListener* listener = new TestListener(&on_start_counter, &is_destroyed); + { + TestEventListeners listeners; + TestEventListenersAccessor::SetDefaultXmlGenerator(&listeners, listener); + + EXPECT_EQ(listener, listeners.Release(listener)); + EXPECT_TRUE(listeners.default_xml_generator() == NULL); + EXPECT_FALSE(is_destroyed); + + // Broadcasting events now should not affect default_xml_generator. + TestEventListenersAccessor::GetRepeater(&listeners)->OnTestProgramStart( + *UnitTest::GetInstance()); + EXPECT_EQ(0, on_start_counter); + } + // Destroying the list should not affect the listener now, too. + EXPECT_FALSE(is_destroyed); + delete listener; +} + +// Sanity tests to ensure that the alternative, verbose spellings of +// some of the macros work. We don't test them thoroughly as that +// would be quite involved. Since their implementations are +// straightforward, and they are rarely used, we'll just rely on the +// users to tell us when they are broken. +GTEST_TEST(AlternativeNameTest, Works) { // GTEST_TEST is the same as TEST. + GTEST_SUCCEED() << "OK"; // GTEST_SUCCEED is the same as SUCCEED. + + // GTEST_FAIL is the same as FAIL. + EXPECT_FATAL_FAILURE(GTEST_FAIL() << "An expected failure", + "An expected failure"); + + // GTEST_ASSERT_XY is the same as ASSERT_XY. + + GTEST_ASSERT_EQ(0, 0); + EXPECT_FATAL_FAILURE(GTEST_ASSERT_EQ(0, 1) << "An expected failure", + "An expected failure"); + EXPECT_FATAL_FAILURE(GTEST_ASSERT_EQ(1, 0) << "An expected failure", + "An expected failure"); + + GTEST_ASSERT_NE(0, 1); + GTEST_ASSERT_NE(1, 0); + EXPECT_FATAL_FAILURE(GTEST_ASSERT_NE(0, 0) << "An expected failure", + "An expected failure"); + + GTEST_ASSERT_LE(0, 0); + GTEST_ASSERT_LE(0, 1); + EXPECT_FATAL_FAILURE(GTEST_ASSERT_LE(1, 0) << "An expected failure", + "An expected failure"); + + GTEST_ASSERT_LT(0, 1); + EXPECT_FATAL_FAILURE(GTEST_ASSERT_LT(0, 0) << "An expected failure", + "An expected failure"); + EXPECT_FATAL_FAILURE(GTEST_ASSERT_LT(1, 0) << "An expected failure", + "An expected failure"); + + GTEST_ASSERT_GE(0, 0); + GTEST_ASSERT_GE(1, 0); + EXPECT_FATAL_FAILURE(GTEST_ASSERT_GE(0, 1) << "An expected failure", + "An expected failure"); + + GTEST_ASSERT_GT(1, 0); + EXPECT_FATAL_FAILURE(GTEST_ASSERT_GT(0, 1) << "An expected failure", + "An expected failure"); + EXPECT_FATAL_FAILURE(GTEST_ASSERT_GT(1, 1) << "An expected failure", + "An expected failure"); +} + +// Tests for internal utilities necessary for implementation of the universal +// printing. +// TODO(vladl@google.com): Find a better home for them. + +class ConversionHelperBase {}; +class ConversionHelperDerived : public ConversionHelperBase {}; + +// Tests that IsAProtocolMessage::value is a compile-time constant. +TEST(IsAProtocolMessageTest, ValueIsCompileTimeConstant) { + GTEST_COMPILE_ASSERT_(IsAProtocolMessage::value, + const_true); + GTEST_COMPILE_ASSERT_(!IsAProtocolMessage::value, const_false); +} + +// Tests that IsAProtocolMessage::value is true when T is +// proto2::Message or a sub-class of it. +TEST(IsAProtocolMessageTest, ValueIsTrueWhenTypeIsAProtocolMessage) { + EXPECT_TRUE(IsAProtocolMessage< ::proto2::Message>::value); + EXPECT_TRUE(IsAProtocolMessage::value); +} + +// Tests that IsAProtocolMessage::value is false when T is neither +// ProtocolMessage nor a sub-class of it. +TEST(IsAProtocolMessageTest, ValueIsFalseWhenTypeIsNotAProtocolMessage) { + EXPECT_FALSE(IsAProtocolMessage::value); + EXPECT_FALSE(IsAProtocolMessage::value); +} + +// Tests that CompileAssertTypesEqual compiles when the type arguments are +// equal. +TEST(CompileAssertTypesEqual, CompilesWhenTypesAreEqual) { + CompileAssertTypesEqual(); + CompileAssertTypesEqual(); +} + +// Tests that RemoveReference does not affect non-reference types. +TEST(RemoveReferenceTest, DoesNotAffectNonReferenceType) { + CompileAssertTypesEqual::type>(); + CompileAssertTypesEqual::type>(); +} + +// Tests that RemoveReference removes reference from reference types. +TEST(RemoveReferenceTest, RemovesReference) { + CompileAssertTypesEqual::type>(); + CompileAssertTypesEqual::type>(); +} + +// Tests GTEST_REMOVE_REFERENCE_. + +template +void TestGTestRemoveReference() { + CompileAssertTypesEqual(); +} + +TEST(RemoveReferenceTest, MacroVersion) { + TestGTestRemoveReference(); + TestGTestRemoveReference(); +} + + +// Tests that RemoveConst does not affect non-const types. +TEST(RemoveConstTest, DoesNotAffectNonConstType) { + CompileAssertTypesEqual::type>(); + CompileAssertTypesEqual::type>(); +} + +// Tests that RemoveConst removes const from const types. +TEST(RemoveConstTest, RemovesConst) { + CompileAssertTypesEqual::type>(); + CompileAssertTypesEqual::type>(); + CompileAssertTypesEqual::type>(); +} + +// Tests GTEST_REMOVE_CONST_. + +template +void TestGTestRemoveConst() { + CompileAssertTypesEqual(); +} + +TEST(RemoveConstTest, MacroVersion) { + TestGTestRemoveConst(); + TestGTestRemoveConst(); + TestGTestRemoveConst(); +} + +// Tests GTEST_REMOVE_REFERENCE_AND_CONST_. + +template +void TestGTestRemoveReferenceAndConst() { + CompileAssertTypesEqual(); +} + +TEST(RemoveReferenceToConstTest, Works) { + TestGTestRemoveReferenceAndConst(); + TestGTestRemoveReferenceAndConst(); + TestGTestRemoveReferenceAndConst(); + TestGTestRemoveReferenceAndConst(); + TestGTestRemoveReferenceAndConst(); +} + +// Tests that AddReference does not affect reference types. +TEST(AddReferenceTest, DoesNotAffectReferenceType) { + CompileAssertTypesEqual::type>(); + CompileAssertTypesEqual::type>(); +} + +// Tests that AddReference adds reference to non-reference types. +TEST(AddReferenceTest, AddsReference) { + CompileAssertTypesEqual::type>(); + CompileAssertTypesEqual::type>(); +} + +// Tests GTEST_ADD_REFERENCE_. + +template +void TestGTestAddReference() { + CompileAssertTypesEqual(); +} + +TEST(AddReferenceTest, MacroVersion) { + TestGTestAddReference(); + TestGTestAddReference(); +} + +// Tests GTEST_REFERENCE_TO_CONST_. + +template +void TestGTestReferenceToConst() { + CompileAssertTypesEqual(); +} + +TEST(GTestReferenceToConstTest, Works) { + TestGTestReferenceToConst(); + TestGTestReferenceToConst(); + TestGTestReferenceToConst(); + TestGTestReferenceToConst(); +} + +// Tests that ImplicitlyConvertible::value is a compile-time constant. +TEST(ImplicitlyConvertibleTest, ValueIsCompileTimeConstant) { + GTEST_COMPILE_ASSERT_((ImplicitlyConvertible::value), const_true); + GTEST_COMPILE_ASSERT_((!ImplicitlyConvertible::value), + const_false); +} + +// Tests that ImplicitlyConvertible::value is true when T1 can +// be implicitly converted to T2. +TEST(ImplicitlyConvertibleTest, ValueIsTrueWhenConvertible) { + EXPECT_TRUE((ImplicitlyConvertible::value)); + EXPECT_TRUE((ImplicitlyConvertible::value)); + EXPECT_TRUE((ImplicitlyConvertible::value)); + EXPECT_TRUE((ImplicitlyConvertible::value)); + EXPECT_TRUE((ImplicitlyConvertible::value)); + EXPECT_TRUE((ImplicitlyConvertible::value)); +} + +// Tests that ImplicitlyConvertible::value is false when T1 +// cannot be implicitly converted to T2. +TEST(ImplicitlyConvertibleTest, ValueIsFalseWhenNotConvertible) { + EXPECT_FALSE((ImplicitlyConvertible::value)); + EXPECT_FALSE((ImplicitlyConvertible::value)); + EXPECT_FALSE((ImplicitlyConvertible::value)); + EXPECT_FALSE((ImplicitlyConvertible::value)); +} + +// Tests IsContainerTest. + +class NonContainer {}; + +TEST(IsContainerTestTest, WorksForNonContainer) { + EXPECT_EQ(sizeof(IsNotContainer), sizeof(IsContainerTest(0))); + EXPECT_EQ(sizeof(IsNotContainer), sizeof(IsContainerTest(0))); + EXPECT_EQ(sizeof(IsNotContainer), sizeof(IsContainerTest(0))); +} + +TEST(IsContainerTestTest, WorksForContainer) { + EXPECT_EQ(sizeof(IsContainer), + sizeof(IsContainerTest >(0))); + EXPECT_EQ(sizeof(IsContainer), + sizeof(IsContainerTest >(0))); +} + +// Tests ArrayEq(). + +TEST(ArrayEqTest, WorksForDegeneratedArrays) { + EXPECT_TRUE(ArrayEq(5, 5L)); + EXPECT_FALSE(ArrayEq('a', 0)); +} + +TEST(ArrayEqTest, WorksForOneDimensionalArrays) { + // Note that a and b are distinct but compatible types. + const int a[] = { 0, 1 }; + long b[] = { 0, 1 }; + EXPECT_TRUE(ArrayEq(a, b)); + EXPECT_TRUE(ArrayEq(a, 2, b)); + + b[0] = 2; + EXPECT_FALSE(ArrayEq(a, b)); + EXPECT_FALSE(ArrayEq(a, 1, b)); +} + +TEST(ArrayEqTest, WorksForTwoDimensionalArrays) { + const char a[][3] = { "hi", "lo" }; + const char b[][3] = { "hi", "lo" }; + const char c[][3] = { "hi", "li" }; + + EXPECT_TRUE(ArrayEq(a, b)); + EXPECT_TRUE(ArrayEq(a, 2, b)); + + EXPECT_FALSE(ArrayEq(a, c)); + EXPECT_FALSE(ArrayEq(a, 2, c)); +} + +// Tests ArrayAwareFind(). + +TEST(ArrayAwareFindTest, WorksForOneDimensionalArray) { + const char a[] = "hello"; + EXPECT_EQ(a + 4, ArrayAwareFind(a, a + 5, 'o')); + EXPECT_EQ(a + 5, ArrayAwareFind(a, a + 5, 'x')); +} + +TEST(ArrayAwareFindTest, WorksForTwoDimensionalArray) { + int a[][2] = { { 0, 1 }, { 2, 3 }, { 4, 5 } }; + const int b[2] = { 2, 3 }; + EXPECT_EQ(a + 1, ArrayAwareFind(a, a + 3, b)); + + const int c[2] = { 6, 7 }; + EXPECT_EQ(a + 3, ArrayAwareFind(a, a + 3, c)); +} + +// Tests CopyArray(). + +TEST(CopyArrayTest, WorksForDegeneratedArrays) { + int n = 0; + CopyArray('a', &n); + EXPECT_EQ('a', n); +} + +TEST(CopyArrayTest, WorksForOneDimensionalArrays) { + const char a[3] = "hi"; + int b[3]; +#ifndef __BORLANDC__ // C++Builder cannot compile some array size deductions. + CopyArray(a, &b); + EXPECT_TRUE(ArrayEq(a, b)); +#endif + + int c[3]; + CopyArray(a, 3, c); + EXPECT_TRUE(ArrayEq(a, c)); +} + +TEST(CopyArrayTest, WorksForTwoDimensionalArrays) { + const int a[2][3] = { { 0, 1, 2 }, { 3, 4, 5 } }; + int b[2][3]; +#ifndef __BORLANDC__ // C++Builder cannot compile some array size deductions. + CopyArray(a, &b); + EXPECT_TRUE(ArrayEq(a, b)); +#endif + + int c[2][3]; + CopyArray(a, 2, c); + EXPECT_TRUE(ArrayEq(a, c)); +} + +// Tests NativeArray. + +TEST(NativeArrayTest, ConstructorFromArrayWorks) { + const int a[3] = { 0, 1, 2 }; + NativeArray na(a, 3, kReference); + EXPECT_EQ(3U, na.size()); + EXPECT_EQ(a, na.begin()); +} + +TEST(NativeArrayTest, CreatesAndDeletesCopyOfArrayWhenAskedTo) { + typedef int Array[2]; + Array* a = new Array[1]; + (*a)[0] = 0; + (*a)[1] = 1; + NativeArray na(*a, 2, kCopy); + EXPECT_NE(*a, na.begin()); + delete[] a; + EXPECT_EQ(0, na.begin()[0]); + EXPECT_EQ(1, na.begin()[1]); + + // We rely on the heap checker to verify that na deletes the copy of + // array. +} + +TEST(NativeArrayTest, TypeMembersAreCorrect) { + StaticAssertTypeEq::value_type>(); + StaticAssertTypeEq::value_type>(); + + StaticAssertTypeEq::const_iterator>(); + StaticAssertTypeEq::const_iterator>(); +} + +TEST(NativeArrayTest, MethodsWork) { + const int a[3] = { 0, 1, 2 }; + NativeArray na(a, 3, kCopy); + ASSERT_EQ(3U, na.size()); + EXPECT_EQ(3, na.end() - na.begin()); + + NativeArray::const_iterator it = na.begin(); + EXPECT_EQ(0, *it); + ++it; + EXPECT_EQ(1, *it); + it++; + EXPECT_EQ(2, *it); + ++it; + EXPECT_EQ(na.end(), it); + + EXPECT_TRUE(na == na); + + NativeArray na2(a, 3, kReference); + EXPECT_TRUE(na == na2); + + const int b1[3] = { 0, 1, 1 }; + const int b2[4] = { 0, 1, 2, 3 }; + EXPECT_FALSE(na == NativeArray(b1, 3, kReference)); + EXPECT_FALSE(na == NativeArray(b2, 4, kCopy)); +} + +TEST(NativeArrayTest, WorksForTwoDimensionalArray) { + const char a[2][3] = { "hi", "lo" }; + NativeArray na(a, 2, kReference); + ASSERT_EQ(2U, na.size()); + EXPECT_EQ(a, na.begin()); +} + +// Tests SkipPrefix(). + +TEST(SkipPrefixTest, SkipsWhenPrefixMatches) { + const char* const str = "hello"; + + const char* p = str; + EXPECT_TRUE(SkipPrefix("", &p)); + EXPECT_EQ(str, p); + + p = str; + EXPECT_TRUE(SkipPrefix("hell", &p)); + EXPECT_EQ(str + 4, p); +} + +TEST(SkipPrefixTest, DoesNotSkipWhenPrefixDoesNotMatch) { + const char* const str = "world"; + + const char* p = str; + EXPECT_FALSE(SkipPrefix("W", &p)); + EXPECT_EQ(str, p); + + p = str; + EXPECT_FALSE(SkipPrefix("world!", &p)); + EXPECT_EQ(str, p); +} diff --git a/external/gtest/test/gtest_xml_outfile1_test_.cc b/external/gtest/test/gtest_xml_outfile1_test_.cc new file mode 100755 index 0000000000..531ced49d4 --- /dev/null +++ b/external/gtest/test/gtest_xml_outfile1_test_.cc @@ -0,0 +1,49 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: keith.ray@gmail.com (Keith Ray) +// +// gtest_xml_outfile1_test_ writes some xml via TestProperty used by +// gtest_xml_outfiles_test.py + +#include "gtest/gtest.h" + +class PropertyOne : public testing::Test { + protected: + virtual void SetUp() { + RecordProperty("SetUpProp", 1); + } + virtual void TearDown() { + RecordProperty("TearDownProp", 1); + } +}; + +TEST_F(PropertyOne, TestSomeProperties) { + RecordProperty("TestSomeProperty", 1); +} diff --git a/external/gtest/test/gtest_xml_outfile2_test_.cc b/external/gtest/test/gtest_xml_outfile2_test_.cc new file mode 100755 index 0000000000..7b400b2760 --- /dev/null +++ b/external/gtest/test/gtest_xml_outfile2_test_.cc @@ -0,0 +1,49 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: keith.ray@gmail.com (Keith Ray) +// +// gtest_xml_outfile2_test_ writes some xml via TestProperty used by +// gtest_xml_outfiles_test.py + +#include "gtest/gtest.h" + +class PropertyTwo : public testing::Test { + protected: + virtual void SetUp() { + RecordProperty("SetUpProp", 2); + } + virtual void TearDown() { + RecordProperty("TearDownProp", 2); + } +}; + +TEST_F(PropertyTwo, TestSomeProperties) { + RecordProperty("TestSomeProperty", 2); +} diff --git a/external/gtest/test/gtest_xml_outfiles_test.py b/external/gtest/test/gtest_xml_outfiles_test.py new file mode 100755 index 0000000000..524e437e6c --- /dev/null +++ b/external/gtest/test/gtest_xml_outfiles_test.py @@ -0,0 +1,132 @@ +#!/usr/bin/env python +# +# Copyright 2008, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test for the gtest_xml_output module.""" + +__author__ = "keith.ray@gmail.com (Keith Ray)" + +import os +from xml.dom import minidom, Node + +import gtest_test_utils +import gtest_xml_test_utils + + +GTEST_OUTPUT_SUBDIR = "xml_outfiles" +GTEST_OUTPUT_1_TEST = "gtest_xml_outfile1_test_" +GTEST_OUTPUT_2_TEST = "gtest_xml_outfile2_test_" + +EXPECTED_XML_1 = """ + + + + + +""" + +EXPECTED_XML_2 = """ + + + + + +""" + + +class GTestXMLOutFilesTest(gtest_xml_test_utils.GTestXMLTestCase): + """Unit test for Google Test's XML output functionality.""" + + def setUp(self): + # We want the trailing '/' that the last "" provides in os.path.join, for + # telling Google Test to create an output directory instead of a single file + # for xml output. + self.output_dir_ = os.path.join(gtest_test_utils.GetTempDir(), + GTEST_OUTPUT_SUBDIR, "") + self.DeleteFilesAndDir() + + def tearDown(self): + self.DeleteFilesAndDir() + + def DeleteFilesAndDir(self): + try: + os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_1_TEST + ".xml")) + except os.error: + pass + try: + os.remove(os.path.join(self.output_dir_, GTEST_OUTPUT_2_TEST + ".xml")) + except os.error: + pass + try: + os.rmdir(self.output_dir_) + except os.error: + pass + + def testOutfile1(self): + self._TestOutFile(GTEST_OUTPUT_1_TEST, EXPECTED_XML_1) + + def testOutfile2(self): + self._TestOutFile(GTEST_OUTPUT_2_TEST, EXPECTED_XML_2) + + def _TestOutFile(self, test_name, expected_xml): + gtest_prog_path = gtest_test_utils.GetTestExecutablePath(test_name) + command = [gtest_prog_path, "--gtest_output=xml:%s" % self.output_dir_] + p = gtest_test_utils.Subprocess(command, + working_dir=gtest_test_utils.GetTempDir()) + self.assert_(p.exited) + self.assertEquals(0, p.exit_code) + + # TODO(wan@google.com): libtool causes the built test binary to be + # named lt-gtest_xml_outfiles_test_ instead of + # gtest_xml_outfiles_test_. To account for this possibillity, we + # allow both names in the following code. We should remove this + # hack when Chandler Carruth's libtool replacement tool is ready. + output_file_name1 = test_name + ".xml" + output_file1 = os.path.join(self.output_dir_, output_file_name1) + output_file_name2 = 'lt-' + output_file_name1 + output_file2 = os.path.join(self.output_dir_, output_file_name2) + self.assert_(os.path.isfile(output_file1) or os.path.isfile(output_file2), + output_file1) + + expected = minidom.parseString(expected_xml) + if os.path.isfile(output_file1): + actual = minidom.parse(output_file1) + else: + actual = minidom.parse(output_file2) + self.NormalizeXml(actual.documentElement) + self.AssertEquivalentNodes(expected.documentElement, + actual.documentElement) + expected.unlink() + actual.unlink() + + +if __name__ == "__main__": + os.environ["GTEST_STACK_TRACE_DEPTH"] = "0" + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_xml_output_unittest.py b/external/gtest/test/gtest_xml_output_unittest.py new file mode 100755 index 0000000000..f605d4ee2a --- /dev/null +++ b/external/gtest/test/gtest_xml_output_unittest.py @@ -0,0 +1,307 @@ +#!/usr/bin/env python +# +# Copyright 2006, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test for the gtest_xml_output module""" + +__author__ = 'eefacm@gmail.com (Sean Mcafee)' + +import datetime +import errno +import os +import re +import sys +from xml.dom import minidom, Node + +import gtest_test_utils +import gtest_xml_test_utils + + +GTEST_FILTER_FLAG = '--gtest_filter' +GTEST_LIST_TESTS_FLAG = '--gtest_list_tests' +GTEST_OUTPUT_FLAG = "--gtest_output" +GTEST_DEFAULT_OUTPUT_FILE = "test_detail.xml" +GTEST_PROGRAM_NAME = "gtest_xml_output_unittest_" + +SUPPORTS_STACK_TRACES = False + +if SUPPORTS_STACK_TRACES: + STACK_TRACE_TEMPLATE = '\nStack trace:\n*' +else: + STACK_TRACE_TEMPLATE = '' + +EXPECTED_NON_EMPTY_XML = """ + + + + + + + + + + + + + + + + + + + + ]]>%(stack)s]]> + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +""" % {'stack': STACK_TRACE_TEMPLATE} + +EXPECTED_FILTERED_TEST_XML = """ + + + + +""" + +EXPECTED_EMPTY_XML = """ + +""" + +GTEST_PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath(GTEST_PROGRAM_NAME) + +SUPPORTS_TYPED_TESTS = 'TypedTest' in gtest_test_utils.Subprocess( + [GTEST_PROGRAM_PATH, GTEST_LIST_TESTS_FLAG], capture_stderr=False).output + + +class GTestXMLOutputUnitTest(gtest_xml_test_utils.GTestXMLTestCase): + """ + Unit test for Google Test's XML output functionality. + """ + + # This test currently breaks on platforms that do not support typed and + # type-parameterized tests, so we don't run it under them. + if SUPPORTS_TYPED_TESTS: + def testNonEmptyXmlOutput(self): + """ + Runs a test program that generates a non-empty XML output, and + tests that the XML output is expected. + """ + self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_NON_EMPTY_XML, 1) + + def testEmptyXmlOutput(self): + """Verifies XML output for a Google Test binary without actual tests. + + Runs a test program that generates an empty XML output, and + tests that the XML output is expected. + """ + + self._TestXmlOutput('gtest_no_test_unittest', EXPECTED_EMPTY_XML, 0) + + def testTimestampValue(self): + """Checks whether the timestamp attribute in the XML output is valid. + + Runs a test program that generates an empty XML output, and checks if + the timestamp attribute in the testsuites tag is valid. + """ + actual = self._GetXmlOutput('gtest_no_test_unittest', [], 0) + date_time_str = actual.documentElement.getAttributeNode('timestamp').value + # datetime.strptime() is only available in Python 2.5+ so we have to + # parse the expected datetime manually. + match = re.match(r'(\d+)-(\d\d)-(\d\d)T(\d\d):(\d\d):(\d\d)', date_time_str) + self.assertTrue( + re.match, + 'XML datettime string %s has incorrect format' % date_time_str) + date_time_from_xml = datetime.datetime( + year=int(match.group(1)), month=int(match.group(2)), + day=int(match.group(3)), hour=int(match.group(4)), + minute=int(match.group(5)), second=int(match.group(6))) + + time_delta = abs(datetime.datetime.now() - date_time_from_xml) + # timestamp value should be near the current local time + self.assertTrue(time_delta < datetime.timedelta(seconds=600), + 'time_delta is %s' % time_delta) + actual.unlink() + + def testDefaultOutputFile(self): + """ + Confirms that Google Test produces an XML output file with the expected + default name if no name is explicitly specified. + """ + output_file = os.path.join(gtest_test_utils.GetTempDir(), + GTEST_DEFAULT_OUTPUT_FILE) + gtest_prog_path = gtest_test_utils.GetTestExecutablePath( + 'gtest_no_test_unittest') + try: + os.remove(output_file) + except OSError, e: + if e.errno != errno.ENOENT: + raise + + p = gtest_test_utils.Subprocess( + [gtest_prog_path, '%s=xml' % GTEST_OUTPUT_FLAG], + working_dir=gtest_test_utils.GetTempDir()) + self.assert_(p.exited) + self.assertEquals(0, p.exit_code) + self.assert_(os.path.isfile(output_file)) + + def testSuppressedXmlOutput(self): + """ + Tests that no XML file is generated if the default XML listener is + shut down before RUN_ALL_TESTS is invoked. + """ + + xml_path = os.path.join(gtest_test_utils.GetTempDir(), + GTEST_PROGRAM_NAME + 'out.xml') + if os.path.isfile(xml_path): + os.remove(xml_path) + + command = [GTEST_PROGRAM_PATH, + '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path), + '--shut_down_xml'] + p = gtest_test_utils.Subprocess(command) + if p.terminated_by_signal: + # p.signal is avalable only if p.terminated_by_signal is True. + self.assertFalse( + p.terminated_by_signal, + '%s was killed by signal %d' % (GTEST_PROGRAM_NAME, p.signal)) + else: + self.assert_(p.exited) + self.assertEquals(1, p.exit_code, + "'%s' exited with code %s, which doesn't match " + 'the expected exit code %s.' + % (command, p.exit_code, 1)) + + self.assert_(not os.path.isfile(xml_path)) + + def testFilteredTestXmlOutput(self): + """Verifies XML output when a filter is applied. + + Runs a test program that executes only some tests and verifies that + non-selected tests do not show up in the XML output. + """ + + self._TestXmlOutput(GTEST_PROGRAM_NAME, EXPECTED_FILTERED_TEST_XML, 0, + extra_args=['%s=SuccessfulTest.*' % GTEST_FILTER_FLAG]) + + def _GetXmlOutput(self, gtest_prog_name, extra_args, expected_exit_code): + """ + Returns the xml output generated by running the program gtest_prog_name. + Furthermore, the program's exit code must be expected_exit_code. + """ + xml_path = os.path.join(gtest_test_utils.GetTempDir(), + gtest_prog_name + 'out.xml') + gtest_prog_path = gtest_test_utils.GetTestExecutablePath(gtest_prog_name) + + command = ([gtest_prog_path, '%s=xml:%s' % (GTEST_OUTPUT_FLAG, xml_path)] + + extra_args) + p = gtest_test_utils.Subprocess(command) + if p.terminated_by_signal: + self.assert_(False, + '%s was killed by signal %d' % (gtest_prog_name, p.signal)) + else: + self.assert_(p.exited) + self.assertEquals(expected_exit_code, p.exit_code, + "'%s' exited with code %s, which doesn't match " + 'the expected exit code %s.' + % (command, p.exit_code, expected_exit_code)) + actual = minidom.parse(xml_path) + return actual + + def _TestXmlOutput(self, gtest_prog_name, expected_xml, + expected_exit_code, extra_args=None): + """ + Asserts that the XML document generated by running the program + gtest_prog_name matches expected_xml, a string containing another + XML document. Furthermore, the program's exit code must be + expected_exit_code. + """ + + actual = self._GetXmlOutput(gtest_prog_name, extra_args or [], + expected_exit_code) + expected = minidom.parseString(expected_xml) + self.NormalizeXml(actual.documentElement) + self.AssertEquivalentNodes(expected.documentElement, + actual.documentElement) + expected.unlink() + actual.unlink() + + +if __name__ == '__main__': + os.environ['GTEST_STACK_TRACE_DEPTH'] = '1' + gtest_test_utils.Main() diff --git a/external/gtest/test/gtest_xml_output_unittest_.cc b/external/gtest/test/gtest_xml_output_unittest_.cc new file mode 100755 index 0000000000..48b8771b52 --- /dev/null +++ b/external/gtest/test/gtest_xml_output_unittest_.cc @@ -0,0 +1,181 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Author: eefacm@gmail.com (Sean Mcafee) + +// Unit test for Google Test XML output. +// +// A user can specify XML output in a Google Test program to run via +// either the GTEST_OUTPUT environment variable or the --gtest_output +// flag. This is used for testing such functionality. +// +// This program will be invoked from a Python unit test. Don't run it +// directly. + +#include "gtest/gtest.h" + +using ::testing::InitGoogleTest; +using ::testing::TestEventListeners; +using ::testing::TestWithParam; +using ::testing::UnitTest; +using ::testing::Test; +using ::testing::Values; + +class SuccessfulTest : public Test { +}; + +TEST_F(SuccessfulTest, Succeeds) { + SUCCEED() << "This is a success."; + ASSERT_EQ(1, 1); +} + +class FailedTest : public Test { +}; + +TEST_F(FailedTest, Fails) { + ASSERT_EQ(1, 2); +} + +class DisabledTest : public Test { +}; + +TEST_F(DisabledTest, DISABLED_test_not_run) { + FAIL() << "Unexpected failure: Disabled test should not be run"; +} + +TEST(MixedResultTest, Succeeds) { + EXPECT_EQ(1, 1); + ASSERT_EQ(1, 1); +} + +TEST(MixedResultTest, Fails) { + EXPECT_EQ(1, 2); + ASSERT_EQ(2, 3); +} + +TEST(MixedResultTest, DISABLED_test) { + FAIL() << "Unexpected failure: Disabled test should not be run"; +} + +TEST(XmlQuotingTest, OutputsCData) { + FAIL() << "XML output: " + ""; +} + +// Helps to test that invalid characters produced by test code do not make +// it into the XML file. +TEST(InvalidCharactersTest, InvalidCharactersInMessage) { + FAIL() << "Invalid characters in brackets [\x1\x2]"; +} + +class PropertyRecordingTest : public Test { + public: + static void SetUpTestCase() { RecordProperty("SetUpTestCase", "yes"); } + static void TearDownTestCase() { RecordProperty("TearDownTestCase", "aye"); } +}; + +TEST_F(PropertyRecordingTest, OneProperty) { + RecordProperty("key_1", "1"); +} + +TEST_F(PropertyRecordingTest, IntValuedProperty) { + RecordProperty("key_int", 1); +} + +TEST_F(PropertyRecordingTest, ThreeProperties) { + RecordProperty("key_1", "1"); + RecordProperty("key_2", "2"); + RecordProperty("key_3", "3"); +} + +TEST_F(PropertyRecordingTest, TwoValuesForOneKeyUsesLastValue) { + RecordProperty("key_1", "1"); + RecordProperty("key_1", "2"); +} + +TEST(NoFixtureTest, RecordProperty) { + RecordProperty("key", "1"); +} + +void ExternalUtilityThatCallsRecordProperty(const std::string& key, int value) { + testing::Test::RecordProperty(key, value); +} + +void ExternalUtilityThatCallsRecordProperty(const std::string& key, + const std::string& value) { + testing::Test::RecordProperty(key, value); +} + +TEST(NoFixtureTest, ExternalUtilityThatCallsRecordIntValuedProperty) { + ExternalUtilityThatCallsRecordProperty("key_for_utility_int", 1); +} + +TEST(NoFixtureTest, ExternalUtilityThatCallsRecordStringValuedProperty) { + ExternalUtilityThatCallsRecordProperty("key_for_utility_string", "1"); +} + +// Verifies that the test parameter value is output in the 'value_param' +// XML attribute for value-parameterized tests. +class ValueParamTest : public TestWithParam {}; +TEST_P(ValueParamTest, HasValueParamAttribute) {} +TEST_P(ValueParamTest, AnotherTestThatHasValueParamAttribute) {} +INSTANTIATE_TEST_CASE_P(Single, ValueParamTest, Values(33, 42)); + +#if GTEST_HAS_TYPED_TEST +// Verifies that the type parameter name is output in the 'type_param' +// XML attribute for typed tests. +template class TypedTest : public Test {}; +typedef testing::Types TypedTestTypes; +TYPED_TEST_CASE(TypedTest, TypedTestTypes); +TYPED_TEST(TypedTest, HasTypeParamAttribute) {} +#endif + +#if GTEST_HAS_TYPED_TEST_P +// Verifies that the type parameter name is output in the 'type_param' +// XML attribute for type-parameterized tests. +template class TypeParameterizedTestCase : public Test {}; +TYPED_TEST_CASE_P(TypeParameterizedTestCase); +TYPED_TEST_P(TypeParameterizedTestCase, HasTypeParamAttribute) {} +REGISTER_TYPED_TEST_CASE_P(TypeParameterizedTestCase, HasTypeParamAttribute); +typedef testing::Types TypeParameterizedTestCaseTypes; +INSTANTIATE_TYPED_TEST_CASE_P(Single, + TypeParameterizedTestCase, + TypeParameterizedTestCaseTypes); +#endif + +int main(int argc, char** argv) { + InitGoogleTest(&argc, argv); + + if (argc > 1 && strcmp(argv[1], "--shut_down_xml") == 0) { + TestEventListeners& listeners = UnitTest::GetInstance()->listeners(); + delete listeners.Release(listeners.default_xml_generator()); + } + testing::Test::RecordProperty("ad_hoc_property", "42"); + return RUN_ALL_TESTS(); +} diff --git a/external/gtest/test/gtest_xml_test_utils.py b/external/gtest/test/gtest_xml_test_utils.py new file mode 100755 index 0000000000..3d0c3b2c2d --- /dev/null +++ b/external/gtest/test/gtest_xml_test_utils.py @@ -0,0 +1,194 @@ +#!/usr/bin/env python +# +# Copyright 2006, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Unit test utilities for gtest_xml_output""" + +__author__ = 'eefacm@gmail.com (Sean Mcafee)' + +import re +from xml.dom import minidom, Node + +import gtest_test_utils + + +GTEST_OUTPUT_FLAG = '--gtest_output' +GTEST_DEFAULT_OUTPUT_FILE = 'test_detail.xml' + +class GTestXMLTestCase(gtest_test_utils.TestCase): + """ + Base class for tests of Google Test's XML output functionality. + """ + + + def AssertEquivalentNodes(self, expected_node, actual_node): + """ + Asserts that actual_node (a DOM node object) is equivalent to + expected_node (another DOM node object), in that either both of + them are CDATA nodes and have the same value, or both are DOM + elements and actual_node meets all of the following conditions: + + * It has the same tag name as expected_node. + * It has the same set of attributes as expected_node, each with + the same value as the corresponding attribute of expected_node. + Exceptions are any attribute named "time", which needs only be + convertible to a floating-point number and any attribute named + "type_param" which only has to be non-empty. + * It has an equivalent set of child nodes (including elements and + CDATA sections) as expected_node. Note that we ignore the + order of the children as they are not guaranteed to be in any + particular order. + """ + + if expected_node.nodeType == Node.CDATA_SECTION_NODE: + self.assertEquals(Node.CDATA_SECTION_NODE, actual_node.nodeType) + self.assertEquals(expected_node.nodeValue, actual_node.nodeValue) + return + + self.assertEquals(Node.ELEMENT_NODE, actual_node.nodeType) + self.assertEquals(Node.ELEMENT_NODE, expected_node.nodeType) + self.assertEquals(expected_node.tagName, actual_node.tagName) + + expected_attributes = expected_node.attributes + actual_attributes = actual_node .attributes + self.assertEquals( + expected_attributes.length, actual_attributes.length, + 'attribute numbers differ in element %s:\nExpected: %r\nActual: %r' % ( + actual_node.tagName, expected_attributes.keys(), + actual_attributes.keys())) + for i in range(expected_attributes.length): + expected_attr = expected_attributes.item(i) + actual_attr = actual_attributes.get(expected_attr.name) + self.assert_( + actual_attr is not None, + 'expected attribute %s not found in element %s' % + (expected_attr.name, actual_node.tagName)) + self.assertEquals( + expected_attr.value, actual_attr.value, + ' values of attribute %s in element %s differ: %s vs %s' % + (expected_attr.name, actual_node.tagName, + expected_attr.value, actual_attr.value)) + + expected_children = self._GetChildren(expected_node) + actual_children = self._GetChildren(actual_node) + self.assertEquals( + len(expected_children), len(actual_children), + 'number of child elements differ in element ' + actual_node.tagName) + for child_id, child in expected_children.iteritems(): + self.assert_(child_id in actual_children, + '<%s> is not in <%s> (in element %s)' % + (child_id, actual_children, actual_node.tagName)) + self.AssertEquivalentNodes(child, actual_children[child_id]) + + identifying_attribute = { + 'testsuites': 'name', + 'testsuite': 'name', + 'testcase': 'name', + 'failure': 'message', + } + + def _GetChildren(self, element): + """ + Fetches all of the child nodes of element, a DOM Element object. + Returns them as the values of a dictionary keyed by the IDs of the + children. For , and elements, the ID + is the value of their "name" attribute; for elements, it is + the value of the "message" attribute; CDATA sections and non-whitespace + text nodes are concatenated into a single CDATA section with ID + "detail". An exception is raised if any element other than the above + four is encountered, if two child elements with the same identifying + attributes are encountered, or if any other type of node is encountered. + """ + + children = {} + for child in element.childNodes: + if child.nodeType == Node.ELEMENT_NODE: + self.assert_(child.tagName in self.identifying_attribute, + 'Encountered unknown element <%s>' % child.tagName) + childID = child.getAttribute(self.identifying_attribute[child.tagName]) + self.assert_(childID not in children) + children[childID] = child + elif child.nodeType in [Node.TEXT_NODE, Node.CDATA_SECTION_NODE]: + if 'detail' not in children: + if (child.nodeType == Node.CDATA_SECTION_NODE or + not child.nodeValue.isspace()): + children['detail'] = child.ownerDocument.createCDATASection( + child.nodeValue) + else: + children['detail'].nodeValue += child.nodeValue + else: + self.fail('Encountered unexpected node type %d' % child.nodeType) + return children + + def NormalizeXml(self, element): + """ + Normalizes Google Test's XML output to eliminate references to transient + information that may change from run to run. + + * The "time" attribute of , and + elements is replaced with a single asterisk, if it contains + only digit characters. + * The "timestamp" attribute of elements is replaced with a + single asterisk, if it contains a valid ISO8601 datetime value. + * The "type_param" attribute of elements is replaced with a + single asterisk (if it sn non-empty) as it is the type name returned + by the compiler and is platform dependent. + * The line info reported in the first line of the "message" + attribute and CDATA section of elements is replaced with the + file's basename and a single asterisk for the line number. + * The directory names in file paths are removed. + * The stack traces are removed. + """ + + if element.tagName == 'testsuites': + timestamp = element.getAttributeNode('timestamp') + timestamp.value = re.sub(r'^\d{4}-\d\d-\d\dT\d\d:\d\d:\d\d$', + '*', timestamp.value) + if element.tagName in ('testsuites', 'testsuite', 'testcase'): + time = element.getAttributeNode('time') + time.value = re.sub(r'^\d+(\.\d+)?$', '*', time.value) + type_param = element.getAttributeNode('type_param') + if type_param and type_param.value: + type_param.value = '*' + elif element.tagName == 'failure': + source_line_pat = r'^.*[/\\](.*:)\d+\n' + # Replaces the source line information with a normalized form. + message = element.getAttributeNode('message') + message.value = re.sub(source_line_pat, '\\1*\n', message.value) + for child in element.childNodes: + if child.nodeType == Node.CDATA_SECTION_NODE: + # Replaces the source line information with a normalized form. + cdata = re.sub(source_line_pat, '\\1*\n', child.nodeValue) + # Removes the actual stack trace. + child.nodeValue = re.sub(r'\nStack trace:\n(.|\n)*', + '', cdata) + for child in element.childNodes: + if child.nodeType == Node.ELEMENT_NODE: + self.NormalizeXml(child) diff --git a/external/gtest/test/production.cc b/external/gtest/test/production.cc new file mode 100755 index 0000000000..8b8a40b44e --- /dev/null +++ b/external/gtest/test/production.cc @@ -0,0 +1,36 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// This is part of the unit test for include/gtest/gtest_prod.h. + +#include "production.h" + +PrivateCode::PrivateCode() : x_(0) {} diff --git a/external/gtest/test/production.h b/external/gtest/test/production.h new file mode 100755 index 0000000000..98fd5e476c --- /dev/null +++ b/external/gtest/test/production.h @@ -0,0 +1,55 @@ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// This is part of the unit test for include/gtest/gtest_prod.h. + +#ifndef GTEST_TEST_PRODUCTION_H_ +#define GTEST_TEST_PRODUCTION_H_ + +#include "gtest/gtest_prod.h" + +class PrivateCode { + public: + // Declares a friend test that does not use a fixture. + FRIEND_TEST(PrivateCodeTest, CanAccessPrivateMembers); + + // Declares a friend test that uses a fixture. + FRIEND_TEST(PrivateCodeFixtureTest, CanAccessPrivateMembers); + + PrivateCode(); + + int x() const { return x_; } + private: + void set_x(int an_x) { x_ = an_x; } + int x_; +}; + +#endif // GTEST_TEST_PRODUCTION_H_ diff --git a/external/gtest/xcode/Config/DebugProject.xcconfig b/external/gtest/xcode/Config/DebugProject.xcconfig new file mode 100755 index 0000000000..3d68157d5d --- /dev/null +++ b/external/gtest/xcode/Config/DebugProject.xcconfig @@ -0,0 +1,30 @@ +// +// DebugProject.xcconfig +// +// These are Debug Configuration project settings for the gtest framework and +// examples. It is set in the "Based On:" dropdown in the "Project" info +// dialog. +// This file is based on the Xcode Configuration files in: +// http://code.google.com/p/google-toolbox-for-mac/ +// + +#include "General.xcconfig" + +// No optimization +GCC_OPTIMIZATION_LEVEL = 0 + +// Deployment postprocessing is what triggers Xcode to strip, turn it off +DEPLOYMENT_POSTPROCESSING = NO + +// Dead code stripping off +DEAD_CODE_STRIPPING = NO + +// Debug symbols should be on obviously +GCC_GENERATE_DEBUGGING_SYMBOLS = YES + +// Define the DEBUG macro in all debug builds +OTHER_CFLAGS = $(OTHER_CFLAGS) -DDEBUG=1 + +// These are turned off to avoid STL incompatibilities with client code +// // Turns on special C++ STL checks to "encourage" good STL use +// GCC_PREPROCESSOR_DEFINITIONS = $(GCC_PREPROCESSOR_DEFINITIONS) _GLIBCXX_DEBUG_PEDANTIC _GLIBCXX_DEBUG _GLIBCPP_CONCEPT_CHECKS diff --git a/external/gtest/xcode/Config/FrameworkTarget.xcconfig b/external/gtest/xcode/Config/FrameworkTarget.xcconfig new file mode 100755 index 0000000000..357b1c8fbf --- /dev/null +++ b/external/gtest/xcode/Config/FrameworkTarget.xcconfig @@ -0,0 +1,17 @@ +// +// FrameworkTarget.xcconfig +// +// These are Framework target settings for the gtest framework and examples. It +// is set in the "Based On:" dropdown in the "Target" info dialog. +// This file is based on the Xcode Configuration files in: +// http://code.google.com/p/google-toolbox-for-mac/ +// + +// Dynamic libs need to be position independent +GCC_DYNAMIC_NO_PIC = NO + +// Dynamic libs should not have their external symbols stripped. +STRIP_STYLE = non-global + +// Let the user install by specifying the $DSTROOT with xcodebuild +SKIP_INSTALL = NO diff --git a/external/gtest/xcode/Config/General.xcconfig b/external/gtest/xcode/Config/General.xcconfig new file mode 100755 index 0000000000..f23e322272 --- /dev/null +++ b/external/gtest/xcode/Config/General.xcconfig @@ -0,0 +1,41 @@ +// +// General.xcconfig +// +// These are General configuration settings for the gtest framework and +// examples. +// This file is based on the Xcode Configuration files in: +// http://code.google.com/p/google-toolbox-for-mac/ +// + +// Build for PPC and Intel, 32- and 64-bit +ARCHS = i386 x86_64 ppc ppc64 + +// Zerolink prevents link warnings so turn it off +ZERO_LINK = NO + +// Prebinding considered unhelpful in 10.3 and later +PREBINDING = NO + +// Strictest warning policy +WARNING_CFLAGS = -Wall -Werror -Wendif-labels -Wnewline-eof -Wno-sign-compare -Wshadow + +// Work around Xcode bugs by using external strip. See: +// http://lists.apple.com/archives/Xcode-users/2006/Feb/msg00050.html +SEPARATE_STRIP = YES + +// Force C99 dialect +GCC_C_LANGUAGE_STANDARD = c99 + +// not sure why apple defaults this on, but it's pretty risky +ALWAYS_SEARCH_USER_PATHS = NO + +// Turn on position dependent code for most cases (overridden where appropriate) +GCC_DYNAMIC_NO_PIC = YES + +// Default SDK and minimum OS version is 10.4 +SDKROOT = $(DEVELOPER_SDK_DIR)/MacOSX10.4u.sdk +MACOSX_DEPLOYMENT_TARGET = 10.4 +GCC_VERSION = 4.0 + +// VERSIONING BUILD SETTINGS (used in Info.plist) +GTEST_VERSIONINFO_ABOUT = © 2008 Google Inc. diff --git a/external/gtest/xcode/Config/ReleaseProject.xcconfig b/external/gtest/xcode/Config/ReleaseProject.xcconfig new file mode 100755 index 0000000000..5349f0a04a --- /dev/null +++ b/external/gtest/xcode/Config/ReleaseProject.xcconfig @@ -0,0 +1,32 @@ +// +// ReleaseProject.xcconfig +// +// These are Release Configuration project settings for the gtest framework +// and examples. It is set in the "Based On:" dropdown in the "Project" info +// dialog. +// This file is based on the Xcode Configuration files in: +// http://code.google.com/p/google-toolbox-for-mac/ +// + +#include "General.xcconfig" + +// subconfig/Release.xcconfig + +// Optimize for space and size (Apple recommendation) +GCC_OPTIMIZATION_LEVEL = s + +// Deploment postprocessing is what triggers Xcode to strip +DEPLOYMENT_POSTPROCESSING = YES + +// No symbols +GCC_GENERATE_DEBUGGING_SYMBOLS = NO + +// Dead code strip does not affect ObjC code but can help for C +DEAD_CODE_STRIPPING = YES + +// NDEBUG is used by things like assert.h, so define it for general compat. +// ASSERT going away in release tends to create unused vars. +OTHER_CFLAGS = $(OTHER_CFLAGS) -DNDEBUG=1 -Wno-unused-variable + +// When we strip we want to strip all symbols in release, but save externals. +STRIP_STYLE = all diff --git a/external/gtest/xcode/Config/StaticLibraryTarget.xcconfig b/external/gtest/xcode/Config/StaticLibraryTarget.xcconfig new file mode 100755 index 0000000000..3922fa51d5 --- /dev/null +++ b/external/gtest/xcode/Config/StaticLibraryTarget.xcconfig @@ -0,0 +1,18 @@ +// +// StaticLibraryTarget.xcconfig +// +// These are static library target settings for libgtest.a. It +// is set in the "Based On:" dropdown in the "Target" info dialog. +// This file is based on the Xcode Configuration files in: +// http://code.google.com/p/google-toolbox-for-mac/ +// + +// Static libs can be included in bundles so make them position independent +GCC_DYNAMIC_NO_PIC = NO + +// Static libs should not have their internal globals or external symbols +// stripped. +STRIP_STYLE = debugging + +// Let the user install by specifying the $DSTROOT with xcodebuild +SKIP_INSTALL = NO diff --git a/external/gtest/xcode/Config/TestTarget.xcconfig b/external/gtest/xcode/Config/TestTarget.xcconfig new file mode 100755 index 0000000000..e6652ba859 --- /dev/null +++ b/external/gtest/xcode/Config/TestTarget.xcconfig @@ -0,0 +1,8 @@ +// +// TestTarget.xcconfig +// +// These are Test target settings for the gtest framework and examples. It +// is set in the "Based On:" dropdown in the "Target" info dialog. + +PRODUCT_NAME = $(TARGET_NAME) +HEADER_SEARCH_PATHS = ../include diff --git a/external/gtest/xcode/Resources/Info.plist b/external/gtest/xcode/Resources/Info.plist new file mode 100755 index 0000000000..9dd28ea148 --- /dev/null +++ b/external/gtest/xcode/Resources/Info.plist @@ -0,0 +1,30 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + ${EXECUTABLE_NAME} + CFBundleIconFile + + CFBundleIdentifier + com.google.${PRODUCT_NAME} + CFBundleInfoDictionaryVersion + 6.0 + CFBundlePackageType + FMWK + CFBundleSignature + ???? + CFBundleVersion + GTEST_VERSIONINFO_LONG + CFBundleShortVersionString + GTEST_VERSIONINFO_SHORT + CFBundleGetInfoString + ${PRODUCT_NAME} GTEST_VERSIONINFO_LONG, ${GTEST_VERSIONINFO_ABOUT} + NSHumanReadableCopyright + ${GTEST_VERSIONINFO_ABOUT} + CSResourcesFileMapped + + + diff --git a/external/gtest/xcode/Samples/FrameworkSample/Info.plist b/external/gtest/xcode/Samples/FrameworkSample/Info.plist new file mode 100755 index 0000000000..f3852edea2 --- /dev/null +++ b/external/gtest/xcode/Samples/FrameworkSample/Info.plist @@ -0,0 +1,28 @@ + + + + + CFBundleDevelopmentRegion + English + CFBundleExecutable + ${EXECUTABLE_NAME} + CFBundleIconFile + + CFBundleIdentifier + com.google.gtest.${PRODUCT_NAME:identifier} + CFBundleInfoDictionaryVersion + 6.0 + CFBundleName + ${PRODUCT_NAME} + CFBundlePackageType + FMWK + CFBundleShortVersionString + 1.0 + CFBundleSignature + ???? + CFBundleVersion + 1.0 + CSResourcesFileMapped + + + diff --git a/external/gtest/xcode/Samples/FrameworkSample/WidgetFramework.xcodeproj/project.pbxproj b/external/gtest/xcode/Samples/FrameworkSample/WidgetFramework.xcodeproj/project.pbxproj new file mode 100755 index 0000000000..497617eb68 --- /dev/null +++ b/external/gtest/xcode/Samples/FrameworkSample/WidgetFramework.xcodeproj/project.pbxproj @@ -0,0 +1,457 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 42; + objects = { + +/* Begin PBXAggregateTarget section */ + 4024D162113D7D2400C7059E /* Test */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 4024D169113D7D4600C7059E /* Build configuration list for PBXAggregateTarget "Test" */; + buildPhases = ( + 4024D161113D7D2400C7059E /* ShellScript */, + ); + dependencies = ( + 4024D166113D7D3100C7059E /* PBXTargetDependency */, + ); + name = Test; + productName = TestAndBuild; + }; + 4024D1E9113D83FF00C7059E /* TestAndBuild */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 4024D1F0113D842B00C7059E /* Build configuration list for PBXAggregateTarget "TestAndBuild" */; + buildPhases = ( + ); + dependencies = ( + 4024D1ED113D840900C7059E /* PBXTargetDependency */, + 4024D1EF113D840D00C7059E /* PBXTargetDependency */, + ); + name = TestAndBuild; + productName = TestAndBuild; + }; +/* End PBXAggregateTarget section */ + +/* Begin PBXBuildFile section */ + 3B7EB1250E5AEE3500C7F239 /* widget.cc in Sources */ = {isa = PBXBuildFile; fileRef = 3B7EB1230E5AEE3500C7F239 /* widget.cc */; }; + 3B7EB1260E5AEE3500C7F239 /* widget.h in Headers */ = {isa = PBXBuildFile; fileRef = 3B7EB1240E5AEE3500C7F239 /* widget.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 3B7EB1280E5AEE4600C7F239 /* widget_test.cc in Sources */ = {isa = PBXBuildFile; fileRef = 3B7EB1270E5AEE4600C7F239 /* widget_test.cc */; }; + 3B7EB1480E5AF3B400C7F239 /* Widget.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 8D07F2C80486CC7A007CD1D0 /* Widget.framework */; }; + 4024D188113D7D7800C7059E /* libgtest.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 4024D185113D7D5500C7059E /* libgtest.a */; }; + 4024D189113D7D7A00C7059E /* libgtest_main.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 4024D183113D7D5500C7059E /* libgtest_main.a */; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 3B07BDF00E3F3FAE00647869 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 8D07F2BC0486CC7A007CD1D0; + remoteInfo = gTestExample; + }; + 4024D165113D7D3100C7059E /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 3B07BDE90E3F3F9E00647869; + remoteInfo = WidgetFrameworkTest; + }; + 4024D1EC113D840900C7059E /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 8D07F2BC0486CC7A007CD1D0; + remoteInfo = WidgetFramework; + }; + 4024D1EE113D840D00C7059E /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4024D162113D7D2400C7059E; + remoteInfo = Test; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXFileReference section */ + 3B07BDEA0E3F3F9E00647869 /* WidgetFrameworkTest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = WidgetFrameworkTest; sourceTree = BUILT_PRODUCTS_DIR; }; + 3B7EB1230E5AEE3500C7F239 /* widget.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = widget.cc; sourceTree = ""; }; + 3B7EB1240E5AEE3500C7F239 /* widget.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = widget.h; sourceTree = ""; }; + 3B7EB1270E5AEE4600C7F239 /* widget_test.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = widget_test.cc; sourceTree = ""; }; + 4024D183113D7D5500C7059E /* libgtest_main.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libgtest_main.a; path = /usr/local/lib/libgtest_main.a; sourceTree = ""; }; + 4024D185113D7D5500C7059E /* libgtest.a */ = {isa = PBXFileReference; lastKnownFileType = archive.ar; name = libgtest.a; path = /usr/local/lib/libgtest.a; sourceTree = ""; }; + 4024D1E2113D838200C7059E /* runtests.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = runtests.sh; sourceTree = ""; }; + 8D07F2C70486CC7A007CD1D0 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist; path = Info.plist; sourceTree = ""; }; + 8D07F2C80486CC7A007CD1D0 /* Widget.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = Widget.framework; sourceTree = BUILT_PRODUCTS_DIR; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 3B07BDE80E3F3F9E00647869 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 4024D189113D7D7A00C7059E /* libgtest_main.a in Frameworks */, + 4024D188113D7D7800C7059E /* libgtest.a in Frameworks */, + 3B7EB1480E5AF3B400C7F239 /* Widget.framework in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8D07F2C30486CC7A007CD1D0 /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 034768DDFF38A45A11DB9C8B /* Products */ = { + isa = PBXGroup; + children = ( + 8D07F2C80486CC7A007CD1D0 /* Widget.framework */, + 3B07BDEA0E3F3F9E00647869 /* WidgetFrameworkTest */, + ); + name = Products; + sourceTree = ""; + }; + 0867D691FE84028FC02AAC07 /* gTestExample */ = { + isa = PBXGroup; + children = ( + 4024D1E1113D836C00C7059E /* Scripts */, + 08FB77ACFE841707C02AAC07 /* Source */, + 089C1665FE841158C02AAC07 /* Resources */, + 3B07BE350E4094E400647869 /* Test */, + 0867D69AFE84028FC02AAC07 /* External Frameworks and Libraries */, + 034768DDFF38A45A11DB9C8B /* Products */, + ); + name = gTestExample; + sourceTree = ""; + }; + 0867D69AFE84028FC02AAC07 /* External Frameworks and Libraries */ = { + isa = PBXGroup; + children = ( + 4024D183113D7D5500C7059E /* libgtest_main.a */, + 4024D185113D7D5500C7059E /* libgtest.a */, + ); + name = "External Frameworks and Libraries"; + sourceTree = ""; + }; + 089C1665FE841158C02AAC07 /* Resources */ = { + isa = PBXGroup; + children = ( + 8D07F2C70486CC7A007CD1D0 /* Info.plist */, + ); + name = Resources; + sourceTree = ""; + }; + 08FB77ACFE841707C02AAC07 /* Source */ = { + isa = PBXGroup; + children = ( + 3B7EB1230E5AEE3500C7F239 /* widget.cc */, + 3B7EB1240E5AEE3500C7F239 /* widget.h */, + ); + name = Source; + sourceTree = ""; + }; + 3B07BE350E4094E400647869 /* Test */ = { + isa = PBXGroup; + children = ( + 3B7EB1270E5AEE4600C7F239 /* widget_test.cc */, + ); + name = Test; + sourceTree = ""; + }; + 4024D1E1113D836C00C7059E /* Scripts */ = { + isa = PBXGroup; + children = ( + 4024D1E2113D838200C7059E /* runtests.sh */, + ); + name = Scripts; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + 8D07F2BD0486CC7A007CD1D0 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 3B7EB1260E5AEE3500C7F239 /* widget.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + 3B07BDE90E3F3F9E00647869 /* WidgetFrameworkTest */ = { + isa = PBXNativeTarget; + buildConfigurationList = 3B07BDF40E3F3FB600647869 /* Build configuration list for PBXNativeTarget "WidgetFrameworkTest" */; + buildPhases = ( + 3B07BDE70E3F3F9E00647869 /* Sources */, + 3B07BDE80E3F3F9E00647869 /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + 3B07BDF10E3F3FAE00647869 /* PBXTargetDependency */, + ); + name = WidgetFrameworkTest; + productName = gTestExampleTest; + productReference = 3B07BDEA0E3F3F9E00647869 /* WidgetFrameworkTest */; + productType = "com.apple.product-type.tool"; + }; + 8D07F2BC0486CC7A007CD1D0 /* WidgetFramework */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4FADC24208B4156D00ABE55E /* Build configuration list for PBXNativeTarget "WidgetFramework" */; + buildPhases = ( + 8D07F2C10486CC7A007CD1D0 /* Sources */, + 8D07F2C30486CC7A007CD1D0 /* Frameworks */, + 8D07F2BD0486CC7A007CD1D0 /* Headers */, + 8D07F2BF0486CC7A007CD1D0 /* Resources */, + 8D07F2C50486CC7A007CD1D0 /* Rez */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = WidgetFramework; + productInstallPath = "$(HOME)/Library/Frameworks"; + productName = gTestExample; + productReference = 8D07F2C80486CC7A007CD1D0 /* Widget.framework */; + productType = "com.apple.product-type.framework"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 0867D690FE84028FC02AAC07 /* Project object */ = { + isa = PBXProject; + buildConfigurationList = 4FADC24608B4156D00ABE55E /* Build configuration list for PBXProject "WidgetFramework" */; + compatibilityVersion = "Xcode 2.4"; + hasScannedForEncodings = 1; + mainGroup = 0867D691FE84028FC02AAC07 /* gTestExample */; + productRefGroup = 034768DDFF38A45A11DB9C8B /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 8D07F2BC0486CC7A007CD1D0 /* WidgetFramework */, + 3B07BDE90E3F3F9E00647869 /* WidgetFrameworkTest */, + 4024D162113D7D2400C7059E /* Test */, + 4024D1E9113D83FF00C7059E /* TestAndBuild */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 8D07F2BF0486CC7A007CD1D0 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXRezBuildPhase section */ + 8D07F2C50486CC7A007CD1D0 /* Rez */ = { + isa = PBXRezBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXRezBuildPhase section */ + +/* Begin PBXShellScriptBuildPhase section */ + 4024D161113D7D2400C7059E /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "/bin/bash $SRCROOT/runtests.sh $BUILT_PRODUCTS_DIR/WidgetFrameworkTest\n"; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 3B07BDE70E3F3F9E00647869 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3B7EB1280E5AEE4600C7F239 /* widget_test.cc in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8D07F2C10486CC7A007CD1D0 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 3B7EB1250E5AEE3500C7F239 /* widget.cc in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 3B07BDF10E3F3FAE00647869 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 8D07F2BC0486CC7A007CD1D0 /* WidgetFramework */; + targetProxy = 3B07BDF00E3F3FAE00647869 /* PBXContainerItemProxy */; + }; + 4024D166113D7D3100C7059E /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 3B07BDE90E3F3F9E00647869 /* WidgetFrameworkTest */; + targetProxy = 4024D165113D7D3100C7059E /* PBXContainerItemProxy */; + }; + 4024D1ED113D840900C7059E /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 8D07F2BC0486CC7A007CD1D0 /* WidgetFramework */; + targetProxy = 4024D1EC113D840900C7059E /* PBXContainerItemProxy */; + }; + 4024D1EF113D840D00C7059E /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4024D162113D7D2400C7059E /* Test */; + targetProxy = 4024D1EE113D840D00C7059E /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin XCBuildConfiguration section */ + 3B07BDEC0E3F3F9F00647869 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = WidgetFrameworkTest; + }; + name = Debug; + }; + 3B07BDED0E3F3F9F00647869 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = WidgetFrameworkTest; + }; + name = Release; + }; + 4024D163113D7D2400C7059E /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = TestAndBuild; + }; + name = Debug; + }; + 4024D164113D7D2400C7059E /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = TestAndBuild; + }; + name = Release; + }; + 4024D1EA113D83FF00C7059E /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = TestAndBuild; + }; + name = Debug; + }; + 4024D1EB113D83FF00C7059E /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + PRODUCT_NAME = TestAndBuild; + }; + name = Release; + }; + 4FADC24308B4156D00ABE55E /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + FRAMEWORK_VERSION = A; + INFOPLIST_FILE = Info.plist; + INSTALL_PATH = "@loader_path/../Frameworks"; + PRODUCT_NAME = Widget; + }; + name = Debug; + }; + 4FADC24408B4156D00ABE55E /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + FRAMEWORK_VERSION = A; + INFOPLIST_FILE = Info.plist; + INSTALL_PATH = "@loader_path/../Frameworks"; + PRODUCT_NAME = Widget; + }; + name = Release; + }; + 4FADC24708B4156D00ABE55E /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_VERSION = 4.0; + SDKROOT = /Developer/SDKs/MacOSX10.4u.sdk; + }; + name = Debug; + }; + 4FADC24808B4156D00ABE55E /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_VERSION = 4.0; + SDKROOT = /Developer/SDKs/MacOSX10.4u.sdk; + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 3B07BDF40E3F3FB600647869 /* Build configuration list for PBXNativeTarget "WidgetFrameworkTest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3B07BDEC0E3F3F9F00647869 /* Debug */, + 3B07BDED0E3F3F9F00647869 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4024D169113D7D4600C7059E /* Build configuration list for PBXAggregateTarget "Test" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4024D163113D7D2400C7059E /* Debug */, + 4024D164113D7D2400C7059E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4024D1F0113D842B00C7059E /* Build configuration list for PBXAggregateTarget "TestAndBuild" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4024D1EA113D83FF00C7059E /* Debug */, + 4024D1EB113D83FF00C7059E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4FADC24208B4156D00ABE55E /* Build configuration list for PBXNativeTarget "WidgetFramework" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4FADC24308B4156D00ABE55E /* Debug */, + 4FADC24408B4156D00ABE55E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4FADC24608B4156D00ABE55E /* Build configuration list for PBXProject "WidgetFramework" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4FADC24708B4156D00ABE55E /* Debug */, + 4FADC24808B4156D00ABE55E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 0867D690FE84028FC02AAC07 /* Project object */; +} diff --git a/external/gtest/xcode/Samples/FrameworkSample/runtests.sh b/external/gtest/xcode/Samples/FrameworkSample/runtests.sh new file mode 100755 index 0000000000..4a0d413e52 --- /dev/null +++ b/external/gtest/xcode/Samples/FrameworkSample/runtests.sh @@ -0,0 +1,62 @@ +#!/bin/bash +# +# Copyright 2008, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Executes the samples and tests for the Google Test Framework. + +# Help the dynamic linker find the path to the libraries. +export DYLD_FRAMEWORK_PATH=$BUILT_PRODUCTS_DIR +export DYLD_LIBRARY_PATH=$BUILT_PRODUCTS_DIR + +# Create some executables. +test_executables=$@ + +# Now execute each one in turn keeping track of how many succeeded and failed. +succeeded=0 +failed=0 +failed_list=() +for test in ${test_executables[*]}; do + "$test" + result=$? + if [ $result -eq 0 ]; then + succeeded=$(( $succeeded + 1 )) + else + failed=$(( failed + 1 )) + failed_list="$failed_list $test" + fi +done + +# Report the successes and failures to the console. +echo "Tests complete with $succeeded successes and $failed failures." +if [ $failed -ne 0 ]; then + echo "The following tests failed:" + echo $failed_list +fi +exit $failed diff --git a/external/gtest/xcode/Samples/FrameworkSample/widget.cc b/external/gtest/xcode/Samples/FrameworkSample/widget.cc new file mode 100755 index 0000000000..bfc4e7fcfd --- /dev/null +++ b/external/gtest/xcode/Samples/FrameworkSample/widget.cc @@ -0,0 +1,63 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: preston.a.jackson@gmail.com (Preston Jackson) +// +// Google Test - FrameworkSample +// widget.cc +// + +// Widget is a very simple class used for demonstrating the use of gtest + +#include "widget.h" + +Widget::Widget(int number, const std::string& name) + : number_(number), + name_(name) {} + +Widget::~Widget() {} + +float Widget::GetFloatValue() const { + return number_; +} + +int Widget::GetIntValue() const { + return static_cast(number_); +} + +std::string Widget::GetStringValue() const { + return name_; +} + +void Widget::GetCharPtrValue(char* buffer, size_t max_size) const { + // Copy the char* representation of name_ into buffer, up to max_size. + strncpy(buffer, name_.c_str(), max_size-1); + buffer[max_size-1] = '\0'; + return; +} diff --git a/external/gtest/xcode/Samples/FrameworkSample/widget.h b/external/gtest/xcode/Samples/FrameworkSample/widget.h new file mode 100755 index 0000000000..0c55cdc8cf --- /dev/null +++ b/external/gtest/xcode/Samples/FrameworkSample/widget.h @@ -0,0 +1,59 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: preston.a.jackson@gmail.com (Preston Jackson) +// +// Google Test - FrameworkSample +// widget.h +// + +// Widget is a very simple class used for demonstrating the use of gtest. It +// simply stores two values a string and an integer, which are returned via +// public accessors in multiple forms. + +#import + +class Widget { + public: + Widget(int number, const std::string& name); + ~Widget(); + + // Public accessors to number data + float GetFloatValue() const; + int GetIntValue() const; + + // Public accessors to the string data + std::string GetStringValue() const; + void GetCharPtrValue(char* buffer, size_t max_size) const; + + private: + // Data members + float number_; + std::string name_; +}; diff --git a/external/gtest/xcode/Samples/FrameworkSample/widget_test.cc b/external/gtest/xcode/Samples/FrameworkSample/widget_test.cc new file mode 100755 index 0000000000..8725994218 --- /dev/null +++ b/external/gtest/xcode/Samples/FrameworkSample/widget_test.cc @@ -0,0 +1,68 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: preston.a.jackson@gmail.com (Preston Jackson) +// +// Google Test - FrameworkSample +// widget_test.cc +// + +// This is a simple test file for the Widget class in the Widget.framework + +#include +#include "gtest/gtest.h" + +#include + +// This test verifies that the constructor sets the internal state of the +// Widget class correctly. +TEST(WidgetInitializerTest, TestConstructor) { + Widget widget(1.0f, "name"); + EXPECT_FLOAT_EQ(1.0f, widget.GetFloatValue()); + EXPECT_EQ(std::string("name"), widget.GetStringValue()); +} + +// This test verifies the conversion of the float and string values to int and +// char*, respectively. +TEST(WidgetInitializerTest, TestConversion) { + Widget widget(1.0f, "name"); + EXPECT_EQ(1, widget.GetIntValue()); + + size_t max_size = 128; + char buffer[max_size]; + widget.GetCharPtrValue(buffer, max_size); + EXPECT_STREQ("name", buffer); +} + +// Use the Google Test main that is linked into the framework. It does something +// like this: +// int main(int argc, char** argv) { +// testing::InitGoogleTest(&argc, argv); +// return RUN_ALL_TESTS(); +// } diff --git a/external/gtest/xcode/Scripts/runtests.sh b/external/gtest/xcode/Scripts/runtests.sh new file mode 100755 index 0000000000..3fc229f1d4 --- /dev/null +++ b/external/gtest/xcode/Scripts/runtests.sh @@ -0,0 +1,65 @@ +#!/bin/bash +# +# Copyright 2008, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +# Executes the samples and tests for the Google Test Framework. + +# Help the dynamic linker find the path to the libraries. +export DYLD_FRAMEWORK_PATH=$BUILT_PRODUCTS_DIR +export DYLD_LIBRARY_PATH=$BUILT_PRODUCTS_DIR + +# Create some executables. +test_executables=("$BUILT_PRODUCTS_DIR/gtest_unittest-framework" + "$BUILT_PRODUCTS_DIR/gtest_unittest" + "$BUILT_PRODUCTS_DIR/sample1_unittest-framework" + "$BUILT_PRODUCTS_DIR/sample1_unittest-static") + +# Now execute each one in turn keeping track of how many succeeded and failed. +succeeded=0 +failed=0 +failed_list=() +for test in ${test_executables[*]}; do + "$test" + result=$? + if [ $result -eq 0 ]; then + succeeded=$(( $succeeded + 1 )) + else + failed=$(( failed + 1 )) + failed_list="$failed_list $test" + fi +done + +# Report the successes and failures to the console. +echo "Tests complete with $succeeded successes and $failed failures." +if [ $failed -ne 0 ]; then + echo "The following tests failed:" + echo $failed_list +fi +exit $failed diff --git a/external/gtest/xcode/Scripts/versiongenerate.py b/external/gtest/xcode/Scripts/versiongenerate.py new file mode 100755 index 0000000000..81de8c96ac --- /dev/null +++ b/external/gtest/xcode/Scripts/versiongenerate.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python +# +# Copyright 2008, Google Inc. +# All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""A script to prepare version informtion for use the gtest Info.plist file. + + This script extracts the version information from the configure.ac file and + uses it to generate a header file containing the same information. The + #defines in this header file will be included in during the generation of + the Info.plist of the framework, giving the correct value to the version + shown in the Finder. + + This script makes the following assumptions (these are faults of the script, + not problems with the Autoconf): + 1. The AC_INIT macro will be contained within the first 1024 characters + of configure.ac + 2. The version string will be 3 integers separated by periods and will be + surrounded by squre brackets, "[" and "]" (e.g. [1.0.1]). The first + segment represents the major version, the second represents the minor + version and the third represents the fix version. + 3. No ")" character exists between the opening "(" and closing ")" of + AC_INIT, including in comments and character strings. +""" + +import sys +import re + +# Read the command line argument (the output directory for Version.h) +if (len(sys.argv) < 3): + print "Usage: versiongenerate.py input_dir output_dir" + sys.exit(1) +else: + input_dir = sys.argv[1] + output_dir = sys.argv[2] + +# Read the first 1024 characters of the configure.ac file +config_file = open("%s/configure.ac" % input_dir, 'r') +buffer_size = 1024 +opening_string = config_file.read(buffer_size) +config_file.close() + +# Extract the version string from the AC_INIT macro +# The following init_expression means: +# Extract three integers separated by periods and surrounded by squre +# brackets(e.g. "[1.0.1]") between "AC_INIT(" and ")". Do not be greedy +# (*? is the non-greedy flag) since that would pull in everything between +# the first "(" and the last ")" in the file. +version_expression = re.compile(r"AC_INIT\(.*?\[(\d+)\.(\d+)\.(\d+)\].*?\)", + re.DOTALL) +version_values = version_expression.search(opening_string) +major_version = version_values.group(1) +minor_version = version_values.group(2) +fix_version = version_values.group(3) + +# Write the version information to a header file to be included in the +# Info.plist file. +file_data = """// +// DO NOT MODIFY THIS FILE (but you can delete it) +// +// This file is autogenerated by the versiongenerate.py script. This script +// is executed in a "Run Script" build phase when creating gtest.framework. This +// header file is not used during compilation of C-source. Rather, it simply +// defines some version strings for substitution in the Info.plist. Because of +// this, we are not not restricted to C-syntax nor are we using include guards. +// + +#define GTEST_VERSIONINFO_SHORT %s.%s +#define GTEST_VERSIONINFO_LONG %s.%s.%s + +""" % (major_version, minor_version, major_version, minor_version, fix_version) +version_file = open("%s/Version.h" % output_dir, 'w') +version_file.write(file_data) +version_file.close() diff --git a/external/gtest/xcode/gtest.xcodeproj/project.pbxproj b/external/gtest/xcode/gtest.xcodeproj/project.pbxproj new file mode 100755 index 0000000000..0452a63d0c --- /dev/null +++ b/external/gtest/xcode/gtest.xcodeproj/project.pbxproj @@ -0,0 +1,1135 @@ +// !$*UTF8*$! +{ + archiveVersion = 1; + classes = { + }; + objectVersion = 46; + objects = { + +/* Begin PBXAggregateTarget section */ + 3B238F5F0E828B5400846E11 /* Check */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 3B238FA30E828BB600846E11 /* Build configuration list for PBXAggregateTarget "Check" */; + buildPhases = ( + 3B238F5E0E828B5400846E11 /* ShellScript */, + ); + dependencies = ( + 40899F9D0FFA740F000B29AE /* PBXTargetDependency */, + 40C849F7101A43440083642A /* PBXTargetDependency */, + 4089A0980FFAD34A000B29AE /* PBXTargetDependency */, + 40C849F9101A43490083642A /* PBXTargetDependency */, + ); + name = Check; + productName = Check; + }; + 40C44ADC0E3798F4008FCC51 /* Version Info */ = { + isa = PBXAggregateTarget; + buildConfigurationList = 40C44AE40E379905008FCC51 /* Build configuration list for PBXAggregateTarget "Version Info" */; + buildPhases = ( + 40C44ADB0E3798F4008FCC51 /* Generate Version.h */, + ); + comments = "The generation of Version.h must be performed in its own target. Since the Info.plist is preprocessed before any of the other build phases in gtest, the Version.h file would not be ready if included as a build phase of that target."; + dependencies = ( + ); + name = "Version Info"; + productName = Version.h; + }; +/* End PBXAggregateTarget section */ + +/* Begin PBXBuildFile section */ + 224A12A30E9EADCC00BD17FD /* gtest-test-part.h in Headers */ = {isa = PBXBuildFile; fileRef = 224A12A20E9EADCC00BD17FD /* gtest-test-part.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 3BF6F2A00E79B5AD000F2EEE /* gtest-type-util.h in Copy Headers Internal */ = {isa = PBXBuildFile; fileRef = 3BF6F29F0E79B5AD000F2EEE /* gtest-type-util.h */; }; + 3BF6F2A50E79B616000F2EEE /* gtest-typed-test.h in Headers */ = {isa = PBXBuildFile; fileRef = 3BF6F2A40E79B616000F2EEE /* gtest-typed-test.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 404884380E2F799B00CF7658 /* gtest-death-test.h in Headers */ = {isa = PBXBuildFile; fileRef = 404883DB0E2F799B00CF7658 /* gtest-death-test.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 404884390E2F799B00CF7658 /* gtest-message.h in Headers */ = {isa = PBXBuildFile; fileRef = 404883DC0E2F799B00CF7658 /* gtest-message.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 4048843A0E2F799B00CF7658 /* gtest-spi.h in Headers */ = {isa = PBXBuildFile; fileRef = 404883DD0E2F799B00CF7658 /* gtest-spi.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 4048843B0E2F799B00CF7658 /* gtest.h in Headers */ = {isa = PBXBuildFile; fileRef = 404883DE0E2F799B00CF7658 /* gtest.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 4048843C0E2F799B00CF7658 /* gtest_pred_impl.h in Headers */ = {isa = PBXBuildFile; fileRef = 404883DF0E2F799B00CF7658 /* gtest_pred_impl.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 4048843D0E2F799B00CF7658 /* gtest_prod.h in Headers */ = {isa = PBXBuildFile; fileRef = 404883E00E2F799B00CF7658 /* gtest_prod.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 404884500E2F799B00CF7658 /* README in Resources */ = {isa = PBXBuildFile; fileRef = 404883F60E2F799B00CF7658 /* README */; }; + 404884A00E2F7BE600CF7658 /* gtest-death-test-internal.h in Copy Headers Internal */ = {isa = PBXBuildFile; fileRef = 404883E20E2F799B00CF7658 /* gtest-death-test-internal.h */; }; + 404884A10E2F7BE600CF7658 /* gtest-filepath.h in Copy Headers Internal */ = {isa = PBXBuildFile; fileRef = 404883E30E2F799B00CF7658 /* gtest-filepath.h */; }; + 404884A20E2F7BE600CF7658 /* gtest-internal.h in Copy Headers Internal */ = {isa = PBXBuildFile; fileRef = 404883E40E2F799B00CF7658 /* gtest-internal.h */; }; + 404884A30E2F7BE600CF7658 /* gtest-port.h in Copy Headers Internal */ = {isa = PBXBuildFile; fileRef = 404883E50E2F799B00CF7658 /* gtest-port.h */; }; + 404884A40E2F7BE600CF7658 /* gtest-string.h in Copy Headers Internal */ = {isa = PBXBuildFile; fileRef = 404883E60E2F799B00CF7658 /* gtest-string.h */; }; + 404884AC0E2F7CD900CF7658 /* CHANGES in Resources */ = {isa = PBXBuildFile; fileRef = 404884A90E2F7CD900CF7658 /* CHANGES */; }; + 404884AD0E2F7CD900CF7658 /* CONTRIBUTORS in Resources */ = {isa = PBXBuildFile; fileRef = 404884AA0E2F7CD900CF7658 /* CONTRIBUTORS */; }; + 404884AE0E2F7CD900CF7658 /* LICENSE in Resources */ = {isa = PBXBuildFile; fileRef = 404884AB0E2F7CD900CF7658 /* LICENSE */; }; + 40899F3A0FFA70D4000B29AE /* gtest-all.cc in Sources */ = {isa = PBXBuildFile; fileRef = 224A12A10E9EADA700BD17FD /* gtest-all.cc */; }; + 40899F500FFA7281000B29AE /* gtest-tuple.h in Copy Headers Internal */ = {isa = PBXBuildFile; fileRef = 40899F4D0FFA7271000B29AE /* gtest-tuple.h */; }; + 40899F530FFA72A0000B29AE /* gtest_unittest.cc in Sources */ = {isa = PBXBuildFile; fileRef = 3B238C120E7FE13C00846E11 /* gtest_unittest.cc */; }; + 4089A0440FFAD1BE000B29AE /* sample1.cc in Sources */ = {isa = PBXBuildFile; fileRef = 4089A02C0FFACF7F000B29AE /* sample1.cc */; }; + 4089A0460FFAD1BE000B29AE /* sample1_unittest.cc in Sources */ = {isa = PBXBuildFile; fileRef = 4089A02E0FFACF7F000B29AE /* sample1_unittest.cc */; }; + 40C848FF101A21150083642A /* gtest-all.cc in Sources */ = {isa = PBXBuildFile; fileRef = 224A12A10E9EADA700BD17FD /* gtest-all.cc */; }; + 40C84915101A21DF0083642A /* gtest_main.cc in Sources */ = {isa = PBXBuildFile; fileRef = 4048840D0E2F799B00CF7658 /* gtest_main.cc */; }; + 40C84916101A235B0083642A /* libgtest_main.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 40C8490B101A217E0083642A /* libgtest_main.a */; }; + 40C84921101A23AD0083642A /* libgtest_main.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 40C8490B101A217E0083642A /* libgtest_main.a */; }; + 40C84978101A36540083642A /* libgtest_main.a in Resources */ = {isa = PBXBuildFile; fileRef = 40C8490B101A217E0083642A /* libgtest_main.a */; }; + 40C84980101A36850083642A /* gtest_unittest.cc in Sources */ = {isa = PBXBuildFile; fileRef = 3B238C120E7FE13C00846E11 /* gtest_unittest.cc */; }; + 40C84982101A36850083642A /* libgtest.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 40C848FA101A209C0083642A /* libgtest.a */; }; + 40C84983101A36850083642A /* libgtest_main.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 40C8490B101A217E0083642A /* libgtest_main.a */; }; + 40C8498F101A36A60083642A /* sample1.cc in Sources */ = {isa = PBXBuildFile; fileRef = 4089A02C0FFACF7F000B29AE /* sample1.cc */; }; + 40C84990101A36A60083642A /* sample1_unittest.cc in Sources */ = {isa = PBXBuildFile; fileRef = 4089A02E0FFACF7F000B29AE /* sample1_unittest.cc */; }; + 40C84992101A36A60083642A /* libgtest.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 40C848FA101A209C0083642A /* libgtest.a */; }; + 40C84993101A36A60083642A /* libgtest_main.a in Frameworks */ = {isa = PBXBuildFile; fileRef = 40C8490B101A217E0083642A /* libgtest_main.a */; }; + 40C849A2101A37050083642A /* gtest.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 4539C8FF0EC27F6400A70F4C /* gtest.framework */; }; + 40C849A4101A37150083642A /* gtest.framework in Frameworks */ = {isa = PBXBuildFile; fileRef = 4539C8FF0EC27F6400A70F4C /* gtest.framework */; }; + 4539C9340EC280AE00A70F4C /* gtest-param-test.h in Headers */ = {isa = PBXBuildFile; fileRef = 4539C9330EC280AE00A70F4C /* gtest-param-test.h */; settings = {ATTRIBUTES = (Public, ); }; }; + 4539C9380EC280E200A70F4C /* gtest-linked_ptr.h in Copy Headers Internal */ = {isa = PBXBuildFile; fileRef = 4539C9350EC280E200A70F4C /* gtest-linked_ptr.h */; }; + 4539C9390EC280E200A70F4C /* gtest-param-util-generated.h in Copy Headers Internal */ = {isa = PBXBuildFile; fileRef = 4539C9360EC280E200A70F4C /* gtest-param-util-generated.h */; }; + 4539C93A0EC280E200A70F4C /* gtest-param-util.h in Copy Headers Internal */ = {isa = PBXBuildFile; fileRef = 4539C9370EC280E200A70F4C /* gtest-param-util.h */; }; + 4567C8181264FF71007740BE /* gtest-printers.h in Headers */ = {isa = PBXBuildFile; fileRef = 4567C8171264FF71007740BE /* gtest-printers.h */; settings = {ATTRIBUTES = (Public, ); }; }; +/* End PBXBuildFile section */ + +/* Begin PBXContainerItemProxy section */ + 40899F9C0FFA740F000B29AE /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 40899F420FFA7184000B29AE; + remoteInfo = gtest_unittest; + }; + 4089A0970FFAD34A000B29AE /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 4089A0120FFACEFC000B29AE; + remoteInfo = sample1_unittest; + }; + 408BEC0F1046CFE900DEF522 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 40C848F9101A209C0083642A; + remoteInfo = "gtest-static"; + }; + 40C44AE50E379922008FCC51 /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 40C44ADC0E3798F4008FCC51; + remoteInfo = Version.h; + }; + 40C8497C101A36850083642A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 40C848F9101A209C0083642A; + remoteInfo = "gtest-static"; + }; + 40C8497E101A36850083642A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 40C8490A101A217E0083642A; + remoteInfo = "gtest_main-static"; + }; + 40C8498B101A36A60083642A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 40C848F9101A209C0083642A; + remoteInfo = "gtest-static"; + }; + 40C8498D101A36A60083642A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 40C8490A101A217E0083642A; + remoteInfo = "gtest_main-static"; + }; + 40C8499B101A36DC0083642A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 40C8490A101A217E0083642A; + remoteInfo = "gtest_main-static"; + }; + 40C8499D101A36E50083642A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 8D07F2BC0486CC7A007CD1D0; + remoteInfo = "gtest-framework"; + }; + 40C8499F101A36F10083642A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 8D07F2BC0486CC7A007CD1D0; + remoteInfo = "gtest-framework"; + }; + 40C849F6101A43440083642A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 40C8497A101A36850083642A; + remoteInfo = "gtest_unittest-static"; + }; + 40C849F8101A43490083642A /* PBXContainerItemProxy */ = { + isa = PBXContainerItemProxy; + containerPortal = 0867D690FE84028FC02AAC07 /* Project object */; + proxyType = 1; + remoteGlobalIDString = 40C84989101A36A60083642A; + remoteInfo = "sample1_unittest-static"; + }; +/* End PBXContainerItemProxy section */ + +/* Begin PBXCopyFilesBuildPhase section */ + 404884A50E2F7C0400CF7658 /* Copy Headers Internal */ = { + isa = PBXCopyFilesBuildPhase; + buildActionMask = 2147483647; + dstPath = Headers/internal; + dstSubfolderSpec = 6; + files = ( + 404884A00E2F7BE600CF7658 /* gtest-death-test-internal.h in Copy Headers Internal */, + 404884A10E2F7BE600CF7658 /* gtest-filepath.h in Copy Headers Internal */, + 404884A20E2F7BE600CF7658 /* gtest-internal.h in Copy Headers Internal */, + 4539C9380EC280E200A70F4C /* gtest-linked_ptr.h in Copy Headers Internal */, + 4539C9390EC280E200A70F4C /* gtest-param-util-generated.h in Copy Headers Internal */, + 4539C93A0EC280E200A70F4C /* gtest-param-util.h in Copy Headers Internal */, + 404884A30E2F7BE600CF7658 /* gtest-port.h in Copy Headers Internal */, + 404884A40E2F7BE600CF7658 /* gtest-string.h in Copy Headers Internal */, + 40899F500FFA7281000B29AE /* gtest-tuple.h in Copy Headers Internal */, + 3BF6F2A00E79B5AD000F2EEE /* gtest-type-util.h in Copy Headers Internal */, + ); + name = "Copy Headers Internal"; + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXCopyFilesBuildPhase section */ + +/* Begin PBXFileReference section */ + 224A12A10E9EADA700BD17FD /* gtest-all.cc */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.cpp.cpp; path = "gtest-all.cc"; sourceTree = ""; }; + 224A12A20E9EADCC00BD17FD /* gtest-test-part.h */ = {isa = PBXFileReference; fileEncoding = 30; lastKnownFileType = sourcecode.c.h; path = "gtest-test-part.h"; sourceTree = ""; }; + 3B238C120E7FE13C00846E11 /* gtest_unittest.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = gtest_unittest.cc; sourceTree = ""; }; + 3B87D2100E96B92E000D1852 /* runtests.sh */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.sh; path = runtests.sh; sourceTree = ""; }; + 3BF6F29F0E79B5AD000F2EEE /* gtest-type-util.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-type-util.h"; sourceTree = ""; }; + 3BF6F2A40E79B616000F2EEE /* gtest-typed-test.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-typed-test.h"; sourceTree = ""; }; + 403EE37C0E377822004BD1E2 /* versiongenerate.py */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.script.python; path = versiongenerate.py; sourceTree = ""; }; + 404883DB0E2F799B00CF7658 /* gtest-death-test.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-death-test.h"; sourceTree = ""; }; + 404883DC0E2F799B00CF7658 /* gtest-message.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-message.h"; sourceTree = ""; }; + 404883DD0E2F799B00CF7658 /* gtest-spi.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-spi.h"; sourceTree = ""; }; + 404883DE0E2F799B00CF7658 /* gtest.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gtest.h; sourceTree = ""; }; + 404883DF0E2F799B00CF7658 /* gtest_pred_impl.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gtest_pred_impl.h; sourceTree = ""; }; + 404883E00E2F799B00CF7658 /* gtest_prod.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = gtest_prod.h; sourceTree = ""; }; + 404883E20E2F799B00CF7658 /* gtest-death-test-internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-death-test-internal.h"; sourceTree = ""; }; + 404883E30E2F799B00CF7658 /* gtest-filepath.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-filepath.h"; sourceTree = ""; }; + 404883E40E2F799B00CF7658 /* gtest-internal.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-internal.h"; sourceTree = ""; }; + 404883E50E2F799B00CF7658 /* gtest-port.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-port.h"; sourceTree = ""; }; + 404883E60E2F799B00CF7658 /* gtest-string.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-string.h"; sourceTree = ""; }; + 404883F60E2F799B00CF7658 /* README */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = README; path = ../README; sourceTree = SOURCE_ROOT; }; + 4048840D0E2F799B00CF7658 /* gtest_main.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = gtest_main.cc; sourceTree = ""; }; + 404884A90E2F7CD900CF7658 /* CHANGES */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = CHANGES; path = ../CHANGES; sourceTree = SOURCE_ROOT; }; + 404884AA0E2F7CD900CF7658 /* CONTRIBUTORS */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = CONTRIBUTORS; path = ../CONTRIBUTORS; sourceTree = SOURCE_ROOT; }; + 404884AB0E2F7CD900CF7658 /* LICENSE */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text; name = LICENSE; path = ../LICENSE; sourceTree = SOURCE_ROOT; }; + 40899F430FFA7184000B29AE /* gtest_unittest-framework */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "gtest_unittest-framework"; sourceTree = BUILT_PRODUCTS_DIR; }; + 40899F4D0FFA7271000B29AE /* gtest-tuple.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-tuple.h"; sourceTree = ""; }; + 40899FB30FFA7567000B29AE /* StaticLibraryTarget.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = StaticLibraryTarget.xcconfig; sourceTree = ""; }; + 4089A0130FFACEFC000B29AE /* sample1_unittest-framework */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "sample1_unittest-framework"; sourceTree = BUILT_PRODUCTS_DIR; }; + 4089A02C0FFACF7F000B29AE /* sample1.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = sample1.cc; sourceTree = ""; }; + 4089A02D0FFACF7F000B29AE /* sample1.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = sample1.h; sourceTree = ""; }; + 4089A02E0FFACF7F000B29AE /* sample1_unittest.cc */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = sample1_unittest.cc; sourceTree = ""; }; + 40C848FA101A209C0083642A /* libgtest.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libgtest.a; sourceTree = BUILT_PRODUCTS_DIR; }; + 40C8490B101A217E0083642A /* libgtest_main.a */ = {isa = PBXFileReference; explicitFileType = archive.ar; includeInIndex = 0; path = libgtest_main.a; sourceTree = BUILT_PRODUCTS_DIR; }; + 40C84987101A36850083642A /* gtest_unittest */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = gtest_unittest; sourceTree = BUILT_PRODUCTS_DIR; }; + 40C84997101A36A60083642A /* sample1_unittest-static */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = "sample1_unittest-static"; sourceTree = BUILT_PRODUCTS_DIR; }; + 40D4CDF10E30E07400294801 /* DebugProject.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = DebugProject.xcconfig; sourceTree = ""; }; + 40D4CDF20E30E07400294801 /* FrameworkTarget.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = FrameworkTarget.xcconfig; sourceTree = ""; }; + 40D4CDF30E30E07400294801 /* General.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = General.xcconfig; sourceTree = ""; }; + 40D4CDF40E30E07400294801 /* ReleaseProject.xcconfig */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.xcconfig; path = ReleaseProject.xcconfig; sourceTree = ""; }; + 40D4CF510E30F5E200294801 /* Info.plist */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = text.plist.xml; path = Info.plist; sourceTree = ""; }; + 4539C8FF0EC27F6400A70F4C /* gtest.framework */ = {isa = PBXFileReference; explicitFileType = wrapper.framework; includeInIndex = 0; path = gtest.framework; sourceTree = BUILT_PRODUCTS_DIR; }; + 4539C9330EC280AE00A70F4C /* gtest-param-test.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-param-test.h"; sourceTree = ""; }; + 4539C9350EC280E200A70F4C /* gtest-linked_ptr.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-linked_ptr.h"; sourceTree = ""; }; + 4539C9360EC280E200A70F4C /* gtest-param-util-generated.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-param-util-generated.h"; sourceTree = ""; }; + 4539C9370EC280E200A70F4C /* gtest-param-util.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-param-util.h"; sourceTree = ""; }; + 4567C8171264FF71007740BE /* gtest-printers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = "gtest-printers.h"; sourceTree = ""; }; +/* End PBXFileReference section */ + +/* Begin PBXFrameworksBuildPhase section */ + 40899F410FFA7184000B29AE /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 40C849A4101A37150083642A /* gtest.framework in Frameworks */, + 40C84916101A235B0083642A /* libgtest_main.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4089A0110FFACEFC000B29AE /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 40C849A2101A37050083642A /* gtest.framework in Frameworks */, + 40C84921101A23AD0083642A /* libgtest_main.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 40C84981101A36850083642A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 40C84982101A36850083642A /* libgtest.a in Frameworks */, + 40C84983101A36850083642A /* libgtest_main.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 40C84991101A36A60083642A /* Frameworks */ = { + isa = PBXFrameworksBuildPhase; + buildActionMask = 2147483647; + files = ( + 40C84992101A36A60083642A /* libgtest.a in Frameworks */, + 40C84993101A36A60083642A /* libgtest_main.a in Frameworks */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXFrameworksBuildPhase section */ + +/* Begin PBXGroup section */ + 034768DDFF38A45A11DB9C8B /* Products */ = { + isa = PBXGroup; + children = ( + 4539C8FF0EC27F6400A70F4C /* gtest.framework */, + 40C848FA101A209C0083642A /* libgtest.a */, + 40C8490B101A217E0083642A /* libgtest_main.a */, + 40899F430FFA7184000B29AE /* gtest_unittest-framework */, + 40C84987101A36850083642A /* gtest_unittest */, + 4089A0130FFACEFC000B29AE /* sample1_unittest-framework */, + 40C84997101A36A60083642A /* sample1_unittest-static */, + ); + name = Products; + sourceTree = ""; + }; + 0867D691FE84028FC02AAC07 /* gtest */ = { + isa = PBXGroup; + children = ( + 40D4CDF00E30E07400294801 /* Config */, + 08FB77ACFE841707C02AAC07 /* Source */, + 40D4CF4E0E30F5E200294801 /* Resources */, + 403EE37B0E377822004BD1E2 /* Scripts */, + 034768DDFF38A45A11DB9C8B /* Products */, + ); + name = gtest; + sourceTree = ""; + }; + 08FB77ACFE841707C02AAC07 /* Source */ = { + isa = PBXGroup; + children = ( + 404884A90E2F7CD900CF7658 /* CHANGES */, + 404884AA0E2F7CD900CF7658 /* CONTRIBUTORS */, + 404884AB0E2F7CD900CF7658 /* LICENSE */, + 404883F60E2F799B00CF7658 /* README */, + 404883D90E2F799B00CF7658 /* include */, + 4089A02F0FFACF84000B29AE /* samples */, + 404884070E2F799B00CF7658 /* src */, + 3B238BF00E7FE13B00846E11 /* test */, + ); + name = Source; + sourceTree = ""; + }; + 3B238BF00E7FE13B00846E11 /* test */ = { + isa = PBXGroup; + children = ( + 3B238C120E7FE13C00846E11 /* gtest_unittest.cc */, + ); + name = test; + path = ../test; + sourceTree = SOURCE_ROOT; + }; + 403EE37B0E377822004BD1E2 /* Scripts */ = { + isa = PBXGroup; + children = ( + 403EE37C0E377822004BD1E2 /* versiongenerate.py */, + 3B87D2100E96B92E000D1852 /* runtests.sh */, + ); + path = Scripts; + sourceTree = ""; + }; + 404883D90E2F799B00CF7658 /* include */ = { + isa = PBXGroup; + children = ( + 404883DA0E2F799B00CF7658 /* gtest */, + ); + name = include; + path = ../include; + sourceTree = SOURCE_ROOT; + }; + 404883DA0E2F799B00CF7658 /* gtest */ = { + isa = PBXGroup; + children = ( + 404883E10E2F799B00CF7658 /* internal */, + 224A12A20E9EADCC00BD17FD /* gtest-test-part.h */, + 404883DB0E2F799B00CF7658 /* gtest-death-test.h */, + 404883DC0E2F799B00CF7658 /* gtest-message.h */, + 4539C9330EC280AE00A70F4C /* gtest-param-test.h */, + 4567C8171264FF71007740BE /* gtest-printers.h */, + 404883DD0E2F799B00CF7658 /* gtest-spi.h */, + 404883DE0E2F799B00CF7658 /* gtest.h */, + 404883DF0E2F799B00CF7658 /* gtest_pred_impl.h */, + 404883E00E2F799B00CF7658 /* gtest_prod.h */, + 3BF6F2A40E79B616000F2EEE /* gtest-typed-test.h */, + ); + path = gtest; + sourceTree = ""; + }; + 404883E10E2F799B00CF7658 /* internal */ = { + isa = PBXGroup; + children = ( + 404883E20E2F799B00CF7658 /* gtest-death-test-internal.h */, + 404883E30E2F799B00CF7658 /* gtest-filepath.h */, + 404883E40E2F799B00CF7658 /* gtest-internal.h */, + 4539C9350EC280E200A70F4C /* gtest-linked_ptr.h */, + 4539C9360EC280E200A70F4C /* gtest-param-util-generated.h */, + 4539C9370EC280E200A70F4C /* gtest-param-util.h */, + 404883E50E2F799B00CF7658 /* gtest-port.h */, + 404883E60E2F799B00CF7658 /* gtest-string.h */, + 40899F4D0FFA7271000B29AE /* gtest-tuple.h */, + 3BF6F29F0E79B5AD000F2EEE /* gtest-type-util.h */, + ); + path = internal; + sourceTree = ""; + }; + 404884070E2F799B00CF7658 /* src */ = { + isa = PBXGroup; + children = ( + 224A12A10E9EADA700BD17FD /* gtest-all.cc */, + 4048840D0E2F799B00CF7658 /* gtest_main.cc */, + ); + name = src; + path = ../src; + sourceTree = SOURCE_ROOT; + }; + 4089A02F0FFACF84000B29AE /* samples */ = { + isa = PBXGroup; + children = ( + 4089A02C0FFACF7F000B29AE /* sample1.cc */, + 4089A02D0FFACF7F000B29AE /* sample1.h */, + 4089A02E0FFACF7F000B29AE /* sample1_unittest.cc */, + ); + name = samples; + path = ../samples; + sourceTree = SOURCE_ROOT; + }; + 40D4CDF00E30E07400294801 /* Config */ = { + isa = PBXGroup; + children = ( + 40D4CDF10E30E07400294801 /* DebugProject.xcconfig */, + 40D4CDF20E30E07400294801 /* FrameworkTarget.xcconfig */, + 40D4CDF30E30E07400294801 /* General.xcconfig */, + 40D4CDF40E30E07400294801 /* ReleaseProject.xcconfig */, + 40899FB30FFA7567000B29AE /* StaticLibraryTarget.xcconfig */, + ); + path = Config; + sourceTree = ""; + }; + 40D4CF4E0E30F5E200294801 /* Resources */ = { + isa = PBXGroup; + children = ( + 40D4CF510E30F5E200294801 /* Info.plist */, + ); + path = Resources; + sourceTree = ""; + }; +/* End PBXGroup section */ + +/* Begin PBXHeadersBuildPhase section */ + 8D07F2BD0486CC7A007CD1D0 /* Headers */ = { + isa = PBXHeadersBuildPhase; + buildActionMask = 2147483647; + files = ( + 404884380E2F799B00CF7658 /* gtest-death-test.h in Headers */, + 404884390E2F799B00CF7658 /* gtest-message.h in Headers */, + 4539C9340EC280AE00A70F4C /* gtest-param-test.h in Headers */, + 4567C8181264FF71007740BE /* gtest-printers.h in Headers */, + 3BF6F2A50E79B616000F2EEE /* gtest-typed-test.h in Headers */, + 4048843A0E2F799B00CF7658 /* gtest-spi.h in Headers */, + 4048843B0E2F799B00CF7658 /* gtest.h in Headers */, + 4048843C0E2F799B00CF7658 /* gtest_pred_impl.h in Headers */, + 4048843D0E2F799B00CF7658 /* gtest_prod.h in Headers */, + 224A12A30E9EADCC00BD17FD /* gtest-test-part.h in Headers */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXHeadersBuildPhase section */ + +/* Begin PBXNativeTarget section */ + 40899F420FFA7184000B29AE /* gtest_unittest-framework */ = { + isa = PBXNativeTarget; + buildConfigurationList = 40899F4A0FFA71BC000B29AE /* Build configuration list for PBXNativeTarget "gtest_unittest-framework" */; + buildPhases = ( + 40899F400FFA7184000B29AE /* Sources */, + 40899F410FFA7184000B29AE /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + 40C849A0101A36F10083642A /* PBXTargetDependency */, + ); + name = "gtest_unittest-framework"; + productName = gtest_unittest; + productReference = 40899F430FFA7184000B29AE /* gtest_unittest-framework */; + productType = "com.apple.product-type.tool"; + }; + 4089A0120FFACEFC000B29AE /* sample1_unittest-framework */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4089A0240FFACF01000B29AE /* Build configuration list for PBXNativeTarget "sample1_unittest-framework" */; + buildPhases = ( + 4089A0100FFACEFC000B29AE /* Sources */, + 4089A0110FFACEFC000B29AE /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + 40C8499E101A36E50083642A /* PBXTargetDependency */, + ); + name = "sample1_unittest-framework"; + productName = sample1_unittest; + productReference = 4089A0130FFACEFC000B29AE /* sample1_unittest-framework */; + productType = "com.apple.product-type.tool"; + }; + 40C848F9101A209C0083642A /* gtest-static */ = { + isa = PBXNativeTarget; + buildConfigurationList = 40C84902101A212E0083642A /* Build configuration list for PBXNativeTarget "gtest-static" */; + buildPhases = ( + 40C848F7101A209C0083642A /* Sources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "gtest-static"; + productName = "gtest-static"; + productReference = 40C848FA101A209C0083642A /* libgtest.a */; + productType = "com.apple.product-type.library.static"; + }; + 40C8490A101A217E0083642A /* gtest_main-static */ = { + isa = PBXNativeTarget; + buildConfigurationList = 40C84912101A21D20083642A /* Build configuration list for PBXNativeTarget "gtest_main-static" */; + buildPhases = ( + 40C84908101A217E0083642A /* Sources */, + ); + buildRules = ( + ); + dependencies = ( + ); + name = "gtest_main-static"; + productName = "gtest_main-static"; + productReference = 40C8490B101A217E0083642A /* libgtest_main.a */; + productType = "com.apple.product-type.library.static"; + }; + 40C8497A101A36850083642A /* gtest_unittest-static */ = { + isa = PBXNativeTarget; + buildConfigurationList = 40C84984101A36850083642A /* Build configuration list for PBXNativeTarget "gtest_unittest-static" */; + buildPhases = ( + 40C8497F101A36850083642A /* Sources */, + 40C84981101A36850083642A /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + 40C8497B101A36850083642A /* PBXTargetDependency */, + 40C8497D101A36850083642A /* PBXTargetDependency */, + ); + name = "gtest_unittest-static"; + productName = gtest_unittest; + productReference = 40C84987101A36850083642A /* gtest_unittest */; + productType = "com.apple.product-type.tool"; + }; + 40C84989101A36A60083642A /* sample1_unittest-static */ = { + isa = PBXNativeTarget; + buildConfigurationList = 40C84994101A36A60083642A /* Build configuration list for PBXNativeTarget "sample1_unittest-static" */; + buildPhases = ( + 40C8498E101A36A60083642A /* Sources */, + 40C84991101A36A60083642A /* Frameworks */, + ); + buildRules = ( + ); + dependencies = ( + 40C8498A101A36A60083642A /* PBXTargetDependency */, + 40C8498C101A36A60083642A /* PBXTargetDependency */, + ); + name = "sample1_unittest-static"; + productName = sample1_unittest; + productReference = 40C84997101A36A60083642A /* sample1_unittest-static */; + productType = "com.apple.product-type.tool"; + }; + 8D07F2BC0486CC7A007CD1D0 /* gtest-framework */ = { + isa = PBXNativeTarget; + buildConfigurationList = 4FADC24208B4156D00ABE55E /* Build configuration list for PBXNativeTarget "gtest-framework" */; + buildPhases = ( + 8D07F2C10486CC7A007CD1D0 /* Sources */, + 8D07F2BD0486CC7A007CD1D0 /* Headers */, + 404884A50E2F7C0400CF7658 /* Copy Headers Internal */, + 8D07F2BF0486CC7A007CD1D0 /* Resources */, + ); + buildRules = ( + ); + dependencies = ( + 40C44AE60E379922008FCC51 /* PBXTargetDependency */, + 408BEC101046CFE900DEF522 /* PBXTargetDependency */, + 40C8499C101A36DC0083642A /* PBXTargetDependency */, + ); + name = "gtest-framework"; + productInstallPath = "$(HOME)/Library/Frameworks"; + productName = gtest; + productReference = 4539C8FF0EC27F6400A70F4C /* gtest.framework */; + productType = "com.apple.product-type.framework"; + }; +/* End PBXNativeTarget section */ + +/* Begin PBXProject section */ + 0867D690FE84028FC02AAC07 /* Project object */ = { + isa = PBXProject; + attributes = { + LastUpgradeCheck = 0460; + }; + buildConfigurationList = 4FADC24608B4156D00ABE55E /* Build configuration list for PBXProject "gtest" */; + compatibilityVersion = "Xcode 3.2"; + developmentRegion = English; + hasScannedForEncodings = 1; + knownRegions = ( + English, + Japanese, + French, + German, + en, + ); + mainGroup = 0867D691FE84028FC02AAC07 /* gtest */; + productRefGroup = 034768DDFF38A45A11DB9C8B /* Products */; + projectDirPath = ""; + projectRoot = ""; + targets = ( + 8D07F2BC0486CC7A007CD1D0 /* gtest-framework */, + 40C848F9101A209C0083642A /* gtest-static */, + 40C8490A101A217E0083642A /* gtest_main-static */, + 40899F420FFA7184000B29AE /* gtest_unittest-framework */, + 40C8497A101A36850083642A /* gtest_unittest-static */, + 4089A0120FFACEFC000B29AE /* sample1_unittest-framework */, + 40C84989101A36A60083642A /* sample1_unittest-static */, + 3B238F5F0E828B5400846E11 /* Check */, + 40C44ADC0E3798F4008FCC51 /* Version Info */, + ); + }; +/* End PBXProject section */ + +/* Begin PBXResourcesBuildPhase section */ + 8D07F2BF0486CC7A007CD1D0 /* Resources */ = { + isa = PBXResourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 404884500E2F799B00CF7658 /* README in Resources */, + 404884AC0E2F7CD900CF7658 /* CHANGES in Resources */, + 404884AD0E2F7CD900CF7658 /* CONTRIBUTORS in Resources */, + 404884AE0E2F7CD900CF7658 /* LICENSE in Resources */, + 40C84978101A36540083642A /* libgtest_main.a in Resources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXResourcesBuildPhase section */ + +/* Begin PBXShellScriptBuildPhase section */ + 3B238F5E0E828B5400846E11 /* ShellScript */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + ); + outputPaths = ( + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "# Remember, this \"Run Script\" build phase will be executed from $SRCROOT\n/bin/bash Scripts/runtests.sh"; + }; + 40C44ADB0E3798F4008FCC51 /* Generate Version.h */ = { + isa = PBXShellScriptBuildPhase; + buildActionMask = 2147483647; + files = ( + ); + inputPaths = ( + "$(SRCROOT)/Scripts/versiongenerate.py", + "$(SRCROOT)/../configure.ac", + ); + name = "Generate Version.h"; + outputPaths = ( + "$(PROJECT_TEMP_DIR)/Version.h", + ); + runOnlyForDeploymentPostprocessing = 0; + shellPath = /bin/sh; + shellScript = "# Remember, this \"Run Script\" build phase will be executed from $SRCROOT\n/usr/bin/python Scripts/versiongenerate.py ../ $PROJECT_TEMP_DIR"; + }; +/* End PBXShellScriptBuildPhase section */ + +/* Begin PBXSourcesBuildPhase section */ + 40899F400FFA7184000B29AE /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 40899F530FFA72A0000B29AE /* gtest_unittest.cc in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 4089A0100FFACEFC000B29AE /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 4089A0440FFAD1BE000B29AE /* sample1.cc in Sources */, + 4089A0460FFAD1BE000B29AE /* sample1_unittest.cc in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 40C848F7101A209C0083642A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 40C848FF101A21150083642A /* gtest-all.cc in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 40C84908101A217E0083642A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 40C84915101A21DF0083642A /* gtest_main.cc in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 40C8497F101A36850083642A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 40C84980101A36850083642A /* gtest_unittest.cc in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 40C8498E101A36A60083642A /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 40C8498F101A36A60083642A /* sample1.cc in Sources */, + 40C84990101A36A60083642A /* sample1_unittest.cc in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; + 8D07F2C10486CC7A007CD1D0 /* Sources */ = { + isa = PBXSourcesBuildPhase; + buildActionMask = 2147483647; + files = ( + 40899F3A0FFA70D4000B29AE /* gtest-all.cc in Sources */, + ); + runOnlyForDeploymentPostprocessing = 0; + }; +/* End PBXSourcesBuildPhase section */ + +/* Begin PBXTargetDependency section */ + 40899F9D0FFA740F000B29AE /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 40899F420FFA7184000B29AE /* gtest_unittest-framework */; + targetProxy = 40899F9C0FFA740F000B29AE /* PBXContainerItemProxy */; + }; + 4089A0980FFAD34A000B29AE /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 4089A0120FFACEFC000B29AE /* sample1_unittest-framework */; + targetProxy = 4089A0970FFAD34A000B29AE /* PBXContainerItemProxy */; + }; + 408BEC101046CFE900DEF522 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 40C848F9101A209C0083642A /* gtest-static */; + targetProxy = 408BEC0F1046CFE900DEF522 /* PBXContainerItemProxy */; + }; + 40C44AE60E379922008FCC51 /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 40C44ADC0E3798F4008FCC51 /* Version Info */; + targetProxy = 40C44AE50E379922008FCC51 /* PBXContainerItemProxy */; + }; + 40C8497B101A36850083642A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 40C848F9101A209C0083642A /* gtest-static */; + targetProxy = 40C8497C101A36850083642A /* PBXContainerItemProxy */; + }; + 40C8497D101A36850083642A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 40C8490A101A217E0083642A /* gtest_main-static */; + targetProxy = 40C8497E101A36850083642A /* PBXContainerItemProxy */; + }; + 40C8498A101A36A60083642A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 40C848F9101A209C0083642A /* gtest-static */; + targetProxy = 40C8498B101A36A60083642A /* PBXContainerItemProxy */; + }; + 40C8498C101A36A60083642A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 40C8490A101A217E0083642A /* gtest_main-static */; + targetProxy = 40C8498D101A36A60083642A /* PBXContainerItemProxy */; + }; + 40C8499C101A36DC0083642A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 40C8490A101A217E0083642A /* gtest_main-static */; + targetProxy = 40C8499B101A36DC0083642A /* PBXContainerItemProxy */; + }; + 40C8499E101A36E50083642A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 8D07F2BC0486CC7A007CD1D0 /* gtest-framework */; + targetProxy = 40C8499D101A36E50083642A /* PBXContainerItemProxy */; + }; + 40C849A0101A36F10083642A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 8D07F2BC0486CC7A007CD1D0 /* gtest-framework */; + targetProxy = 40C8499F101A36F10083642A /* PBXContainerItemProxy */; + }; + 40C849F7101A43440083642A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 40C8497A101A36850083642A /* gtest_unittest-static */; + targetProxy = 40C849F6101A43440083642A /* PBXContainerItemProxy */; + }; + 40C849F9101A43490083642A /* PBXTargetDependency */ = { + isa = PBXTargetDependency; + target = 40C84989101A36A60083642A /* sample1_unittest-static */; + targetProxy = 40C849F8101A43490083642A /* PBXContainerItemProxy */; + }; +/* End PBXTargetDependency section */ + +/* Begin XCBuildConfiguration section */ + 3B238F600E828B5400846E11 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = NO; + GCC_DYNAMIC_NO_PIC = NO; + GCC_OPTIMIZATION_LEVEL = 0; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + PRODUCT_NAME = Check; + SDKROOT = macosx; + }; + name = Debug; + }; + 3B238F610E828B5400846E11 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + COMBINE_HIDPI_IMAGES = YES; + COPY_PHASE_STRIP = YES; + DEBUG_INFORMATION_FORMAT = "dwarf-with-dsym"; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + PRODUCT_NAME = Check; + SDKROOT = macosx; + ZERO_LINK = NO; + }; + name = Release; + }; + 40899F450FFA7185000B29AE /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + HEADER_SEARCH_PATHS = ../; + PRODUCT_NAME = "gtest_unittest-framework"; + SDKROOT = macosx; + }; + name = Debug; + }; + 40899F460FFA7185000B29AE /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + HEADER_SEARCH_PATHS = ../; + PRODUCT_NAME = "gtest_unittest-framework"; + SDKROOT = macosx; + }; + name = Release; + }; + 4089A0150FFACEFD000B29AE /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + PRODUCT_NAME = "sample1_unittest-framework"; + SDKROOT = macosx; + }; + name = Debug; + }; + 4089A0160FFACEFD000B29AE /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + PRODUCT_NAME = "sample1_unittest-framework"; + SDKROOT = macosx; + }; + name = Release; + }; + 40C44ADF0E3798F4008FCC51 /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + COMBINE_HIDPI_IMAGES = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + MACOSX_DEPLOYMENT_TARGET = 10.7; + PRODUCT_NAME = gtest; + SDKROOT = macosx; + TARGET_NAME = gtest; + }; + name = Debug; + }; + 40C44AE00E3798F4008FCC51 /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + COMBINE_HIDPI_IMAGES = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + MACOSX_DEPLOYMENT_TARGET = 10.7; + PRODUCT_NAME = gtest; + SDKROOT = macosx; + TARGET_NAME = gtest; + }; + name = Release; + }; + 40C848FB101A209D0083642A /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 40899FB30FFA7567000B29AE /* StaticLibraryTarget.xcconfig */; + buildSettings = { + COMBINE_HIDPI_IMAGES = YES; + GCC_INLINES_ARE_PRIVATE_EXTERN = YES; + GCC_SYMBOLS_PRIVATE_EXTERN = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + HEADER_SEARCH_PATHS = ( + ../, + ../include/, + ); + PRODUCT_NAME = gtest; + SDKROOT = macosx; + }; + name = Debug; + }; + 40C848FC101A209D0083642A /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 40899FB30FFA7567000B29AE /* StaticLibraryTarget.xcconfig */; + buildSettings = { + COMBINE_HIDPI_IMAGES = YES; + GCC_INLINES_ARE_PRIVATE_EXTERN = YES; + GCC_SYMBOLS_PRIVATE_EXTERN = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + HEADER_SEARCH_PATHS = ( + ../, + ../include/, + ); + PRODUCT_NAME = gtest; + SDKROOT = macosx; + }; + name = Release; + }; + 40C8490E101A217F0083642A /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 40899FB30FFA7567000B29AE /* StaticLibraryTarget.xcconfig */; + buildSettings = { + COMBINE_HIDPI_IMAGES = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + HEADER_SEARCH_PATHS = ( + ../, + ../include/, + ); + PRODUCT_NAME = gtest_main; + SDKROOT = macosx; + }; + name = Debug; + }; + 40C8490F101A217F0083642A /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 40899FB30FFA7567000B29AE /* StaticLibraryTarget.xcconfig */; + buildSettings = { + COMBINE_HIDPI_IMAGES = YES; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + HEADER_SEARCH_PATHS = ( + ../, + ../include/, + ); + PRODUCT_NAME = gtest_main; + SDKROOT = macosx; + }; + name = Release; + }; + 40C84985101A36850083642A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + HEADER_SEARCH_PATHS = ../; + PRODUCT_NAME = gtest_unittest; + SDKROOT = macosx; + }; + name = Debug; + }; + 40C84986101A36850083642A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + HEADER_SEARCH_PATHS = ../; + PRODUCT_NAME = gtest_unittest; + SDKROOT = macosx; + }; + name = Release; + }; + 40C84995101A36A60083642A /* Debug */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + PRODUCT_NAME = "sample1_unittest-static"; + SDKROOT = macosx; + }; + name = Debug; + }; + 40C84996101A36A60083642A /* Release */ = { + isa = XCBuildConfiguration; + buildSettings = { + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + PRODUCT_NAME = "sample1_unittest-static"; + SDKROOT = macosx; + }; + name = Release; + }; + 4FADC24308B4156D00ABE55E /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 40D4CDF20E30E07400294801 /* FrameworkTarget.xcconfig */; + buildSettings = { + COMBINE_HIDPI_IMAGES = YES; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + HEADER_SEARCH_PATHS = ( + ../, + ../include/, + ); + INFOPLIST_FILE = Resources/Info.plist; + INFOPLIST_PREFIX_HEADER = "$(PROJECT_TEMP_DIR)/Version.h"; + INFOPLIST_PREPROCESS = YES; + PRODUCT_NAME = gtest; + SDKROOT = macosx; + VERSIONING_SYSTEM = "apple-generic"; + }; + name = Debug; + }; + 4FADC24408B4156D00ABE55E /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 40D4CDF20E30E07400294801 /* FrameworkTarget.xcconfig */; + buildSettings = { + COMBINE_HIDPI_IMAGES = YES; + DYLIB_COMPATIBILITY_VERSION = 1; + DYLIB_CURRENT_VERSION = 1; + GCC_VERSION = com.apple.compilers.llvm.clang.1_0; + HEADER_SEARCH_PATHS = ( + ../, + ../include/, + ); + INFOPLIST_FILE = Resources/Info.plist; + INFOPLIST_PREFIX_HEADER = "$(PROJECT_TEMP_DIR)/Version.h"; + INFOPLIST_PREPROCESS = YES; + PRODUCT_NAME = gtest; + SDKROOT = macosx; + VERSIONING_SYSTEM = "apple-generic"; + }; + name = Release; + }; + 4FADC24708B4156D00ABE55E /* Debug */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 40D4CDF10E30E07400294801 /* DebugProject.xcconfig */; + buildSettings = { + }; + name = Debug; + }; + 4FADC24808B4156D00ABE55E /* Release */ = { + isa = XCBuildConfiguration; + baseConfigurationReference = 40D4CDF40E30E07400294801 /* ReleaseProject.xcconfig */; + buildSettings = { + }; + name = Release; + }; +/* End XCBuildConfiguration section */ + +/* Begin XCConfigurationList section */ + 3B238FA30E828BB600846E11 /* Build configuration list for PBXAggregateTarget "Check" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 3B238F600E828B5400846E11 /* Debug */, + 3B238F610E828B5400846E11 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 40899F4A0FFA71BC000B29AE /* Build configuration list for PBXNativeTarget "gtest_unittest-framework" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 40899F450FFA7185000B29AE /* Debug */, + 40899F460FFA7185000B29AE /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4089A0240FFACF01000B29AE /* Build configuration list for PBXNativeTarget "sample1_unittest-framework" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4089A0150FFACEFD000B29AE /* Debug */, + 4089A0160FFACEFD000B29AE /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 40C44AE40E379905008FCC51 /* Build configuration list for PBXAggregateTarget "Version Info" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 40C44ADF0E3798F4008FCC51 /* Debug */, + 40C44AE00E3798F4008FCC51 /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 40C84902101A212E0083642A /* Build configuration list for PBXNativeTarget "gtest-static" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 40C848FB101A209D0083642A /* Debug */, + 40C848FC101A209D0083642A /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 40C84912101A21D20083642A /* Build configuration list for PBXNativeTarget "gtest_main-static" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 40C8490E101A217F0083642A /* Debug */, + 40C8490F101A217F0083642A /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 40C84984101A36850083642A /* Build configuration list for PBXNativeTarget "gtest_unittest-static" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 40C84985101A36850083642A /* Debug */, + 40C84986101A36850083642A /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 40C84994101A36A60083642A /* Build configuration list for PBXNativeTarget "sample1_unittest-static" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 40C84995101A36A60083642A /* Debug */, + 40C84996101A36A60083642A /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4FADC24208B4156D00ABE55E /* Build configuration list for PBXNativeTarget "gtest-framework" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4FADC24308B4156D00ABE55E /* Debug */, + 4FADC24408B4156D00ABE55E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; + 4FADC24608B4156D00ABE55E /* Build configuration list for PBXProject "gtest" */ = { + isa = XCConfigurationList; + buildConfigurations = ( + 4FADC24708B4156D00ABE55E /* Debug */, + 4FADC24808B4156D00ABE55E /* Release */, + ); + defaultConfigurationIsVisible = 0; + defaultConfigurationName = Release; + }; +/* End XCConfigurationList section */ + }; + rootObject = 0867D690FE84028FC02AAC07 /* Project object */; +} diff --git a/external/rocksdb/.arcconfig b/external/rocksdb/.arcconfig new file mode 100755 index 0000000000..f06f314723 --- /dev/null +++ b/external/rocksdb/.arcconfig @@ -0,0 +1,17 @@ +{ + "project_id" : "rocksdb", + "conduit_uri" : "https://reviews.facebook.net/", + "copyright_holder" : "Facebook", + "load" : [ + "arcanist_util" + ], + "lint.engine" : "FacebookFbcodeLintEngine", + "lint.engine.single.linter" : "FbcodeCppLinter", + "unit.engine" : "FacebookFbcodeUnitTestEngine", + "arcanist_configuration" : "FacebookArcanistConfiguration", + "base" : "git:HEAD^, hg:.^", + "git.default-relative-commit" : "HEAD^", + "git:arc.feature.start.default" : "origin/master", + "arc.feature.start.default" : "master", + "history.immutable" : false +} diff --git a/external/rocksdb/.clang-format b/external/rocksdb/.clang-format new file mode 100755 index 0000000000..7c279811ac --- /dev/null +++ b/external/rocksdb/.clang-format @@ -0,0 +1,5 @@ +# Complete list of style options can be found at: +# http://clang.llvm.org/docs/ClangFormatStyleOptions.html +--- +BasedOnStyle: Google +... diff --git a/external/rocksdb/.gitignore b/external/rocksdb/.gitignore new file mode 100755 index 0000000000..0a297b4029 --- /dev/null +++ b/external/rocksdb/.gitignore @@ -0,0 +1,70 @@ +TARGETS +make_config.mk + +*.a +*.arc +*.d +*.dylib* +*.gcda +*.gcno +*.o +*.so +*.so.* +*_test +*_bench +*_stress +*.out +*.class +*.jar +*.*jnilib* +*.d-e +*.o-* +*.swp +*~ +*.vcxproj +*.vcxproj.filters +*.sln +*.cmake +CMakeCache.txt +CMakeFiles/ +build/ + +ldb +manifest_dump +sst_dump +util/build_version.cc +build_tools/VALGRIND_LOGS/ +coverage/COVERAGE_REPORT +.gdbhistory +package/ +.phutil_module_cache +unity.a +tags +rocksdb_dump +rocksdb_undump +db_test2 + +java/out +java/target +java/test-libs +java/*.log +java/include/org_rocksdb_*.h + +.idea/ +*.iml + +rocksdb.cc +rocksdb.h +unity.cc +java/crossbuild/.vagrant +.vagrant/ +java/**.asc +java/javadoc + +scan_build_report/ +t +LOG + +db_logs/ +tp2/ +fbcode/ diff --git a/external/rocksdb/.travis.yml b/external/rocksdb/.travis.yml new file mode 100755 index 0000000000..85eb6bfa06 --- /dev/null +++ b/external/rocksdb/.travis.yml @@ -0,0 +1,47 @@ +sudo: false +language: cpp +os: + - linux + - osx +compiler: + - clang + +addons: + apt: + sources: ['ubuntu-toolchain-r-test', 'llvm-toolchain-precise-3.6'] + packages: ['clang-3.6' , 'zlib1g-dev', 'libbz2-dev', 'libsnappy-dev', 'curl'] +env: + # Run all tests before db_block_cache_test (db_test, db_test2) + - JOB_NAME=unittests ROCKSDBTESTS_END=db_block_cache_test + # Run all tests starting from db_block_cache_test (db_block_cache_test, db_iter_test, ...) + - JOB_NAME=unittests ROCKSDBTESTS_START=db_block_cache_test + # Run java tests + - JOB_NAME=java_test + # Build ROCKSDB_LITE + - JOB_NAME=lite_build + +install: + # Build gflags + # TODO(noetzli): Remove when gflags available through Travis + - pushd /tmp/ && curl -L https://github.com/gflags/gflags/archive/v2.1.2.tar.gz -o gflags.tar.gz && tar xfz gflags.tar.gz && cd gflags-2.1.2 && cmake . && make && popd + +before_script: + # Add gflags to include/library paths + # TODO(noetzli): Remove when gflags available through Travis + - export LD_LIBRARY_PATH="$LD_LIBRARY_PATH:/tmp/gflags-2.1.2/lib" + - export LIBRARY_PATH="$LIBRARY_PATH:/tmp/gflags-2.1.2/lib" + - export CPLUS_INCLUDE_PATH="$CPLUS_INCLUDE_PATH:/tmp/gflags-2.1.2/include" + - if [[ "${TRAVIS_OS_NAME}" == 'linux' ]]; then CXX=clang++-3.6; fi + - if [[ "${TRAVIS_OS_NAME}" == 'osx' ]]; then brew install gflags snappy; fi + # Limit the maximum number of open file descriptors to 2000 + - ulimit -n 2000 || true + +script: + - if [[ "${JOB_NAME}" == 'unittests' ]]; then OPT=-DTRAVIS V=1 make -j4 check_some; fi + - if [[ "${JOB_NAME}" == 'java_test' ]]; then OPT=-DTRAVIS V=1 make clean jclean rocksdbjava jtest; fi + - if [[ "${JOB_NAME}" == 'lite_build' ]]; then OPT="-DTRAVIS -DROCKSDB_LITE" V=1 make -j4 static_lib; fi +notifications: + email: + - leveldb@fb.com + webhooks: + - https://buildtimetrend.herokuapp.com/travis diff --git a/external/rocksdb/AUTHORS b/external/rocksdb/AUTHORS new file mode 100755 index 0000000000..e644f5530f --- /dev/null +++ b/external/rocksdb/AUTHORS @@ -0,0 +1,11 @@ +Facebook Inc. +Facebook Engineering Team + +Google Inc. +# Initial version authors: +Jeffrey Dean +Sanjay Ghemawat + +# Partial list of contributors: +Kevin Regan +Johan Bilien diff --git a/external/rocksdb/CMakeLists.txt b/external/rocksdb/CMakeLists.txt new file mode 100755 index 0000000000..26e5a6f770 --- /dev/null +++ b/external/rocksdb/CMakeLists.txt @@ -0,0 +1,294 @@ +# This cmake build is for Windows 64-bit only. +# +# Prerequisites: +# You must have Visual Studio 2013 Update 4 installed. Start the Developer Command Prompt window that is a part of Visual Studio installation. +# Run the build commands from within the Developer Command Prompt window to have paths to the compiler and runtime libraries set. +# You must have git.exe in your %PATH% environment variable. +# +# To build Rocksdb for Windows is as easy as 1-2-3-4-5: +# +# 1. Update paths to third-party libraries in thirdparty.inc file +# 2. Create a new directory for build artifacts +# mkdir build +# cd build +# 3. Run cmake to generate project files for Windows, add more options to enable required third-party libraries. +# See thirdparty.inc for more information. +# sample command: cmake -G "Visual Studio 12 Win64" -DGFLAGS=1 -DSNAPPY=1 -DJEMALLOC=1 -DJNI=1 .. +# OR for VS Studio 15 cmake -G "Visual Studio 14 Win64" -DGFLAGS=1 -DSNAPPY=1 -DJEMALLOC=1 -DJNI=1 .. +# 4. Then build the project in debug mode (you may want to add /m[:] flag to run msbuild in parallel threads +# or simply /m ot use all avail cores) +# msbuild rocksdb.sln +# +# rocksdb.sln build features exclusions of test only code in Release. If you build ALL_BUILD then everything +# will be attempted but test only code does not build in Release mode. +# +# 5. And release mode (/m[:] is also supported) +# msbuild rocksdb.sln /p:Configuration=Release +# + +cmake_minimum_required(VERSION 2.6) +project(rocksdb) + +execute_process(COMMAND powershell -Command "Get-Date -format MM_dd_yyyy" OUTPUT_VARIABLE DATE) +execute_process(COMMAND powershell -Command "Get-Date -format HH:mm:ss" OUTPUT_VARIABLE TIME) +string(REGEX REPLACE "(..)_(..)_..(..).*" "\\1/\\2/\\3" DATE "${DATE}") +string(REGEX REPLACE "(..):(.....).*" " \\1:\\2" TIME "${TIME}") +set(GIT_DATE_TIME ${DATE}${TIME}) + +string(REGEX REPLACE "\n" "" GIT_DATE_TIME ${GIT_DATE_TIME}) +string(REGEX REPLACE "\r" "" GIT_DATE_TIME ${GIT_DATE_TIME}) + +find_package(Git) + +if (GIT_FOUND AND EXISTS "${CMAKE_CURRENT_SOURCE_DIR}/.git") + execute_process(COMMAND $ENV{COMSPEC} /C ${GIT_EXECUTABLE} -C ${CMAKE_CURRENT_SOURCE_DIR} rev-parse HEAD OUTPUT_VARIABLE GIT_SHA) +else() + set(GIT_SHA 0) +endif() + +string(REGEX REPLACE "[^0-9a-f]+" "" GIT_SHA "${GIT_SHA}") + +set(BUILD_VERSION_CC ${CMAKE_CURRENT_SOURCE_DIR}/util/build_version.cc) + +add_custom_command(OUTPUT ${BUILD_VERSION_CC} + COMMAND echo "#include \"build_version.h\"" > ${BUILD_VERSION_CC} + COMMAND echo "const char* rocksdb_build_git_sha = \"rocksdb_build_git_sha:${GIT_SHA}\";" >> ${BUILD_VERSION_CC} + COMMAND echo "const char* rocksdb_build_git_datetime = \"rocksdb_build_git_datetime:${GIT_DATE_TIME}\";" >> ${BUILD_VERSION_CC} + COMMAND echo const char* rocksdb_build_compile_date = __DATE__\; >> ${BUILD_VERSION_CC} +) + +add_custom_target(GenerateBuildVersion DEPENDS ${BUILD_VERSION_CC}) + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /Zi /nologo /EHsc /GS /Gd /GR /GF /fp:precise /Zc:wchar_t /Zc:forScope /errorReport:queue") + +set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /FC /d2Zi+ /W3 /WX /wd4127 /wd4800 /wd4996 /wd4351 /wd4244") + + +# Used to run CI build and tests so we can run faster +set(OPTIMIZE_DEBUG_DEFAULT 0) # Debug build is unoptimized by default use -DOPTDBG=1 to optimize + +if(DEFINED OPTDBG) + set(OPTIMIZE_DEBUG ${OPTDBG}) +else() + set(OPTIMIZE_DEBUG ${OPTIMIZE_DEBUG_DEFAULT}) +endif() + +if((${OPTIMIZE_DEBUG} EQUAL 1)) + message(STATUS "Debug optimization is enabled") + set(CMAKE_CXX_FLAGS_DEBUG "/Oxt") +else() + set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_FLAGS_DEBUG} /Od /RTC1 /Gm") +endif() + +set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE} /Oxt /Zp8 /Gm- /Gy") + +set(CMAKE_SHARED_LINKER_FLAGS "${CMAKE_SHARED_LINKER_FLAGS} /DEBUG") +set(CMAKE_EXE_LINKER_FLAGS "${CMAKE_EXE_LINKER_FLAGS} /DEBUG") + +set(STATIC ${MSVC} CACHE BOOL "Link libraries statically") +if(STATIC) + foreach(VAR CMAKE_CXX_FLAGS_DEBUG CMAKE_CXX_FLAGS_RELWITHDEBINFO CMAKE_CXX_FLAGS_RELEASE) + string(REPLACE "/MD" "/MT" ${VAR} "${${VAR}}") + endforeach() +endif() + +add_definitions(-DWIN32 -DOS_WIN -D_MBCS -DWIN64 -DNOMINMAX) + +include_directories(${PROJECT_SOURCE_DIR}) +include_directories(${PROJECT_SOURCE_DIR}/include) + +set(ROCKSDB_LIBS rocksdblib${ARTIFACT_SUFFIX}) + +set(LIBS ${ROCKSDB_LIBS} ${THIRDPARTY_LIBS} ${SYSTEM_LIBS}) + +# Main library source code +set(SOURCES + db/auto_roll_logger.cc + db/builder.cc + db/c.cc + db/column_family.cc + db/compacted_db_impl.cc + db/compaction.cc + db/compaction_iterator.cc + db/compaction_job.cc + db/compaction_picker.cc + db/convenience.cc + db/dbformat.cc + db/db_filesnapshot.cc + db/db_impl.cc + db/db_impl_debug.cc + db/db_impl_experimental.cc + db/db_impl_add_file.cc + db/db_impl_readonly.cc + db/db_info_dumper.cc + db/db_iter.cc + db/event_helpers.cc + db/experimental.cc + db/filename.cc + db/file_indexer.cc + db/flush_job.cc + db/flush_scheduler.cc + db/forward_iterator.cc + db/internal_stats.cc + db/log_reader.cc + db/log_writer.cc + db/managed_iterator.cc + db/memtable.cc + db/memtable_allocator.cc + db/memtable_list.cc + db/merge_helper.cc + db/merge_operator.cc + db/repair.cc + db/snapshot_impl.cc + db/table_cache.cc + db/table_properties_collector.cc + db/transaction_log_impl.cc + db/version_builder.cc + db/version_edit.cc + db/version_set.cc + db/wal_manager.cc + db/write_batch.cc + db/write_batch_base.cc + db/write_controller.cc + db/write_thread.cc + db/xfunc_test_points.cc + memtable/hash_cuckoo_rep.cc + memtable/hash_linklist_rep.cc + memtable/hash_skiplist_rep.cc + memtable/skiplistrep.cc + memtable/vectorrep.cc + port/stack_trace.cc + port/win/io_win.cc + port/win/env_win.cc + port/win/env_default.cc + port/win/port_win.cc + port/win/win_logger.cc + port/win/xpress_win.cc + table/adaptive_table_factory.cc + table/block.cc + table/block_based_filter_block.cc + table/block_based_table_builder.cc + table/block_based_table_factory.cc + table/block_based_table_reader.cc + table/block_builder.cc + table/block_prefix_index.cc + table/bloom_block.cc + table/cuckoo_table_builder.cc + table/cuckoo_table_factory.cc + table/cuckoo_table_reader.cc + table/flush_block_policy.cc + table/format.cc + table/full_filter_block.cc + table/get_context.cc + table/iterator.cc + table/merger.cc + table/sst_file_writer.cc + table/meta_blocks.cc + table/plain_table_builder.cc + table/plain_table_factory.cc + table/plain_table_index.cc + table/plain_table_key_coding.cc + table/plain_table_reader.cc + table/persistent_cache_helper.cc + table/table_properties.cc + table/two_level_iterator.cc + tools/sst_dump_tool.cc + tools/db_bench_tool.cc + tools/dump/db_dump_tool.cc + util/arena.cc + util/bloom.cc + util/build_version.cc + util/coding.cc + util/compaction_job_stats_impl.cc + util/comparator.cc + util/concurrent_arena.cc + util/crc32c.cc + util/delete_scheduler.cc + util/dynamic_bloom.cc + util/env.cc + util/env_chroot.cc + util/env_hdfs.cc + util/event_logger.cc + util/file_util.cc + util/file_reader_writer.cc + util/sst_file_manager_impl.cc + util/filter_policy.cc + util/hash.cc + util/histogram.cc + util/histogram_windowing.cc + util/instrumented_mutex.cc + util/iostats_context.cc + util/lru_cache.cc + tools/ldb_cmd.cc + tools/ldb_tool.cc + util/logging.cc + util/log_buffer.cc + util/memenv.cc + util/murmurhash.cc + util/mutable_cf_options.cc + util/options.cc + util/options_helper.cc + util/options_parser.cc + util/options_sanity_check.cc + util/perf_context.cc + util/perf_level.cc + util/random.cc + util/rate_limiter.cc + util/sharded_cache.cc + util/slice.cc + util/statistics.cc + util/status.cc + util/status_message.cc + util/string_util.cc + util/sync_point.cc + util/testharness.cc + util/testutil.cc + util/thread_local.cc + util/threadpool.cc + util/thread_status_impl.cc + util/thread_status_updater.cc + util/thread_status_util.cc + util/thread_status_util_debug.cc + util/transaction_test_util.cc + util/xfunc.cc + util/xxhash.cc + utilities/backupable/backupable_db.cc + utilities/checkpoint/checkpoint.cc + utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc + utilities/document/document_db.cc + utilities/document/json_document.cc + utilities/document/json_document_builder.cc + utilities/env_mirror.cc + utilities/env_registry.cc + utilities/flashcache/flashcache.cc + utilities/geodb/geodb_impl.cc + utilities/leveldb_options/leveldb_options.cc + utilities/memory/memory_util.cc + utilities/merge_operators/string_append/stringappend.cc + utilities/merge_operators/string_append/stringappend2.cc + utilities/merge_operators/put.cc + utilities/merge_operators/max.cc + utilities/merge_operators/uint64add.cc + utilities/option_change_migration/option_change_migration.cc + utilities/options/options_util.cc + utilities/persistent_cache/persistent_cache_tier.cc + utilities/persistent_cache/volatile_tier_impl.cc + utilities/redis/redis_lists.cc + utilities/simulator_cache/sim_cache.cc + utilities/spatialdb/spatial_db.cc + utilities/table_properties_collectors/compact_on_deletion_collector.cc + utilities/transactions/optimistic_transaction_impl.cc + utilities/transactions/optimistic_transaction_db_impl.cc + utilities/transactions/transaction_base.cc + utilities/transactions/transaction_impl.cc + utilities/transactions/transaction_db_impl.cc + utilities/transactions/transaction_db_mutex_impl.cc + utilities/transactions/transaction_lock_mgr.cc + utilities/transactions/transaction_util.cc + utilities/ttl/db_ttl_impl.cc + utilities/write_batch_with_index/write_batch_with_index.cc + utilities/write_batch_with_index/write_batch_with_index_internal.cc +) + +add_library(rocksdblib${ARTIFACT_SUFFIX} ${SOURCES}) +set_target_properties(rocksdblib${ARTIFACT_SUFFIX} PROPERTIES COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/rocksdblib${ARTIFACT_SUFFIX}.pdb") diff --git a/external/rocksdb/CONTRIBUTING.md b/external/rocksdb/CONTRIBUTING.md new file mode 100755 index 0000000000..d6467fe07b --- /dev/null +++ b/external/rocksdb/CONTRIBUTING.md @@ -0,0 +1,19 @@ +# Contributing to RocksDB + +## Contributor License Agreement ("CLA") + +In order to accept your pull request, we need you to submit a CLA. You +only need to do this once, so if you've done this for another Facebook +open source project, you're good to go. If you are submitting a pull +request for the first time, just let us know that you have completed +the CLA and we can cross-check with your GitHub username. + +Complete your CLA here: + +If you prefer to sign a paper copy, we can send you a PDF. Send us an +e-mail or create a new github issue to request the CLA in PDF format. + +## License + +By contributing to RocksDB, you agree that your contributions will be +licensed under the [BSD License](LICENSE). diff --git a/external/rocksdb/DEFAULT_OPTIONS_HISTORY.md b/external/rocksdb/DEFAULT_OPTIONS_HISTORY.md new file mode 100755 index 0000000000..e4ad3a7285 --- /dev/null +++ b/external/rocksdb/DEFAULT_OPTIONS_HISTORY.md @@ -0,0 +1,14 @@ +# RocksDB default options change log +## 4.8.0 (5/2/2016) +* options.max_open_files changes from 5000 to -1. It improves performance, but users need to set file descriptor limit to be large enough and watch memory usage for index and bloom filters. +* options.base_background_compactions changes from max_background_compactions to 1. When users set higher max_background_compactions but the write throughput is not high, the writes are less spiky to disks. +* options.wal_recovery_mode changes from kTolerateCorruptedTailRecords to kPointInTimeRecovery. Avoid some false positive when file system or hardware reorder the writes for file data and metadata. + +## 4.7.0 (4/8/2016) +* options.write_buffer_size changes from 4MB to 64MB. +* options.target_file_size_base changes from 2MB to 64MB. +* options.max_bytes_for_level_base changes from 10MB to 256MB. +* options.soft_pending_compaction_bytes_limit changes from 0 (disabled) to 64GB. +* options.hard_pending_compaction_bytes_limit changes from 0 (disabled) to 256GB. +* table_cache_numshardbits changes from 4 to 6. +* max_file_opening_threads changes from 1 to 16. diff --git a/external/rocksdb/DUMP_FORMAT.md b/external/rocksdb/DUMP_FORMAT.md new file mode 100755 index 0000000000..009dabad52 --- /dev/null +++ b/external/rocksdb/DUMP_FORMAT.md @@ -0,0 +1,16 @@ +## RocksDB dump format + +The version 1 RocksDB dump format is fairly simple: + +1) The dump starts with the magic 8 byte identifier "ROCKDUMP" + +2) The magic is followed by an 8 byte big-endian version which is 0x00000001. + +3) Next are arbitrarily sized chunks of bytes prepended by 4 byte little endian number indicating how large each chunk is. + +4) The first chunk is special and is a json string indicating some things about the creation of this dump. It contains the following keys: +* database-path: The path of the database this dump was created from. +* hostname: The hostname of the machine where the dump was created. +* creation-time: Unix seconds since epoc when this dump was created. + +5) Following the info dump the slices paired into are key/value pairs. diff --git a/external/rocksdb/HISTORY.md b/external/rocksdb/HISTORY.md new file mode 100755 index 0000000000..1121bf7852 --- /dev/null +++ b/external/rocksdb/HISTORY.md @@ -0,0 +1,425 @@ +# Rocksdb Change Log +## 4.11.2 (9/15/2016) +### Bug fixes +* Segfault when failing to open an SST file for read-ahead iterators. +* WAL without data for all CFs is not deleted after recovery. + +## 4.11.1 (8/30/2016) +### Bug Fixes +* Mitigate the regression bug of deadlock condition during recovery when options.max_successive_merges hits. +* Fix data race condition related to hash index in block based table when putting indexes in the block cache. + +## 4.11.0 (8/1/2016) +### Public API Change +* options.memtable_prefix_bloom_huge_page_tlb_size => memtable_huge_page_size. When it is set, RocksDB will try to allocate memory from huge page for memtable too, rather than just memtable bloom filter. + +### New Features +* A tool to migrate DB after options change. See include/rocksdb/utilities/option_change_migration.h. +* Add ReadOptions.background_purge_on_iterator_cleanup. If true, we avoid file deletion when destorying iterators. + +## 4.10.0 (7/5/2016) +### Public API Change +* options.memtable_prefix_bloom_bits changes to options.memtable_prefix_bloom_bits_ratio and deprecate options.memtable_prefix_bloom_probes +* enum type CompressionType and PerfLevel changes from char to unsigned char. Value of all PerfLevel shift by one. +* Deprecate options.filter_deletes. + +### New Features +* Add avoid_flush_during_recovery option. +* Add a read option background_purge_on_iterator_cleanup to avoid deleting files in foreground when destroying iterators. Instead, a job is scheduled in high priority queue and would be executed in a separate background thread. +* RepairDB support for column families. RepairDB now associates data with non-default column families using information embedded in the SST/WAL files (4.7 or later). For data written by 4.6 or earlier, RepairDB associates it with the default column family. +* Add options.write_buffer_manager which allows users to control total memtable sizes across multiple DB instances. + +## 4.9.0 (6/9/2016) +### Public API changes +* Add bottommost_compression option, This option can be used to set a specific compression algorithm for the bottommost level (Last level containing files in the DB). +* Introduce CompactionJobInfo::compression, This field state the compression algorithm used to generate the output files of the compaction. +* Deprecate BlockBaseTableOptions.hash_index_allow_collision=false +* Deprecate options builder (GetOptions()). + +### New Features +* Introduce NewSimCache() in rocksdb/utilities/sim_cache.h. This function creates a block cache that is able to give simulation results (mainly hit rate) of simulating block behavior with a configurable cache size. + +## 4.8.0 (5/2/2016) +### Public API Change +* Allow preset compression dictionary for improved compression of block-based tables. This is supported for zlib, zstd, and lz4. The compression dictionary's size is configurable via CompressionOptions::max_dict_bytes. +* Delete deprecated classes for creating backups (BackupableDB) and restoring from backups (RestoreBackupableDB). Now, BackupEngine should be used for creating backups, and BackupEngineReadOnly should be used for restorations. For more details, see https://github.com/facebook/rocksdb/wiki/How-to-backup-RocksDB%3F +* Expose estimate of per-level compression ratio via DB property: "rocksdb.compression-ratio-at-levelN". +* Added EventListener::OnTableFileCreationStarted. EventListener::OnTableFileCreated will be called on failure case. User can check creation status via TableFileCreationInfo::status. + +### New Features +* Add ReadOptions::readahead_size. If non-zero, NewIterator will create a new table reader which performs reads of the given size. + +## 4.7.0 (4/8/2016) +### Public API Change +* rename options compaction_measure_io_stats to report_bg_io_stats and include flush too. +* Change some default options. Now default options will optimize for server-workloads. Also enable slowdown and full stop triggers for pending compaction bytes. These changes may cause sub-optimal performance or significant increase of resource usage. To avoid these risks, users can open existing RocksDB with options extracted from RocksDB option files. See https://github.com/facebook/rocksdb/wiki/RocksDB-Options-File for how to use RocksDB option files. Or you can call Options.OldDefaults() to recover old defaults. DEFAULT_OPTIONS_HISTORY.md will track change history of default options. + +## 4.6.0 (3/10/2016) +### Public API Changes +* Change default of BlockBasedTableOptions.format_version to 2. It means default DB created by 4.6 or up cannot be opened by RocksDB version 3.9 or earlier. +* Added strict_capacity_limit option to NewLRUCache. If the flag is set to true, insert to cache will fail if no enough capacity can be free. Signature of Cache::Insert() is updated accordingly. +* Tickers [NUMBER_DB_NEXT, NUMBER_DB_PREV, NUMBER_DB_NEXT_FOUND, NUMBER_DB_PREV_FOUND, ITER_BYTES_READ] are not updated immediately. The are updated when the Iterator is deleted. +* Add monotonically increasing counter (DB property "rocksdb.current-super-version-number") that increments upon any change to the LSM tree. + +### New Features +* Add CompactionPri::kMinOverlappingRatio, a compaction picking mode friendly to write amplification. +* Deprecate Iterator::IsKeyPinned() and replace it with Iterator::GetProperty() with prop_name="rocksdb.iterator.is.key.pinned" + +## 4.5.0 (2/5/2016) +### Public API Changes +* Add a new perf context level between kEnableCount and kEnableTime. Level 2 now does not include timers for mutexes. +* Statistics of mutex operation durations will not be measured by default. If you want to have them enabled, you need to set Statistics::stats_level_ to kAll. +* DBOptions::delete_scheduler and NewDeleteScheduler() are removed, please use DBOptions::sst_file_manager and NewSstFileManager() instead + +### New Features +* ldb tool now supports operations to non-default column families. +* Add kPersistedTier to ReadTier. This option allows Get and MultiGet to read only the persited data and skip mem-tables if writes were done with disableWAL = true. +* Add DBOptions::sst_file_manager. Use NewSstFileManager() in include/rocksdb/sst_file_manager.h to create a SstFileManager that can be used to track the total size of SST files and control the SST files deletion rate. + +## 4.4.0 (1/14/2016) +### Public API Changes +* Change names in CompactionPri and add a new one. +* Deprecate options.soft_rate_limit and add options.soft_pending_compaction_bytes_limit. +* If options.max_write_buffer_number > 3, writes will be slowed down when writing to the last write buffer to delay a full stop. +* Introduce CompactionJobInfo::compaction_reason, this field include the reason to trigger the compaction. +* After slow down is triggered, if estimated pending compaction bytes keep increasing, slowdown more. +* Increase default options.delayed_write_rate to 2MB/s. +* Added a new parameter --path to ldb tool. --path accepts the name of either MANIFEST, SST or a WAL file. Either --db or --path can be used when calling ldb. + +## 4.3.0 (12/8/2015) +### New Features +* CompactionFilter has new member function called IgnoreSnapshots which allows CompactionFilter to be called even if there are snapshots later than the key. +* RocksDB will now persist options under the same directory as the RocksDB database on successful DB::Open, CreateColumnFamily, DropColumnFamily, and SetOptions. +* Introduce LoadLatestOptions() in rocksdb/utilities/options_util.h. This function can construct the latest DBOptions / ColumnFamilyOptions used by the specified RocksDB intance. +* Introduce CheckOptionsCompatibility() in rocksdb/utilities/options_util.h. This function checks whether the input set of options is able to open the specified DB successfully. + +### Public API Changes +* When options.db_write_buffer_size triggers, only the column family with the largest column family size will be flushed, not all the column families. + +## 4.2.0 (11/9/2015) +### New Features +* Introduce CreateLoggerFromOptions(), this function create a Logger for provided DBOptions. +* Add GetAggregatedIntProperty(), which returns the sum of the GetIntProperty of all the column families. +* Add MemoryUtil in rocksdb/utilities/memory.h. It currently offers a way to get the memory usage by type from a list rocksdb instances. + +### Public API Changes +* CompactionFilter::Context includes information of Column Family ID +* The need-compaction hint given by TablePropertiesCollector::NeedCompact() will be persistent and recoverable after DB recovery. This introduces a breaking format change. If you use this experimental feature, including NewCompactOnDeletionCollectorFactory() in the new version, you may not be able to directly downgrade the DB back to version 4.0 or lower. +* TablePropertiesCollectorFactory::CreateTablePropertiesCollector() now takes an option Context, containing the information of column family ID for the file being written. +* Remove DefaultCompactionFilterFactory. + + +## 4.1.0 (10/8/2015) +### New Features +* Added single delete operation as a more efficient way to delete keys that have not been overwritten. +* Added experimental AddFile() to DB interface that allow users to add files created by SstFileWriter into an empty Database, see include/rocksdb/sst_file_writer.h and DB::AddFile() for more info. +* Added support for opening SST files with .ldb suffix which enables opening LevelDB databases. +* CompactionFilter now supports filtering of merge operands and merge results. + +### Public API Changes +* Added SingleDelete() to the DB interface. +* Added AddFile() to DB interface. +* Added SstFileWriter class. +* CompactionFilter has a new method FilterMergeOperand() that RocksDB applies to every merge operand during compaction to decide whether to filter the operand. +* We removed CompactionFilterV2 interfaces from include/rocksdb/compaction_filter.h. The functionality was deprecated already in version 3.13. + +## 4.0.0 (9/9/2015) +### New Features +* Added support for transactions. See include/rocksdb/utilities/transaction.h for more info. +* DB::GetProperty() now accepts "rocksdb.aggregated-table-properties" and "rocksdb.aggregated-table-properties-at-levelN", in which case it returns aggregated table properties of the target column family, or the aggregated table properties of the specified level N if the "at-level" version is used. +* Add compression option kZSTDNotFinalCompression for people to experiment ZSTD although its format is not finalized. +* We removed the need for LATEST_BACKUP file in BackupEngine. We still keep writing it when we create new backups (because of backward compatibility), but we don't read it anymore. + +### Public API Changes +* Removed class Env::RandomRWFile and Env::NewRandomRWFile(). +* Renamed DBOptions.num_subcompactions to DBOptions.max_subcompactions to make the name better match the actual functionality of the option. +* Added Equal() method to the Comparator interface that can optionally be overwritten in cases where equality comparisons can be done more efficiently than three-way comparisons. +* Previous 'experimental' OptimisticTransaction class has been replaced by Transaction class. + +## 3.13.0 (8/6/2015) +### New Features +* RollbackToSavePoint() in WriteBatch/WriteBatchWithIndex +* Add NewCompactOnDeletionCollectorFactory() in utilities/table_properties_collectors, which allows rocksdb to mark a SST file as need-compaction when it observes at least D deletion entries in any N consecutive entries in that SST file. Note that this feature depends on an experimental NeedCompact() API --- the result of this API will not persist after DB restart. +* Add DBOptions::delete_scheduler. Use NewDeleteScheduler() in include/rocksdb/delete_scheduler.h to create a DeleteScheduler that can be shared among multiple RocksDB instances to control the file deletion rate of SST files that exist in the first db_path. + +### Public API Changes +* Deprecated WriteOptions::timeout_hint_us. We no longer support write timeout. If you really need this option, talk to us and we might consider returning it. +* Deprecated purge_redundant_kvs_while_flush option. +* Removed BackupEngine::NewBackupEngine() and NewReadOnlyBackupEngine() that were deprecated in RocksDB 3.8. Please use BackupEngine::Open() instead. +* Deprecated Compaction Filter V2. We are not aware of any existing use-cases. If you use this filter, your compile will break with RocksDB 3.13. Please let us know if you use it and we'll put it back in RocksDB 3.14. +* Env::FileExists now returns a Status instead of a boolean +* Add statistics::getHistogramString() to print detailed distribution of a histogram metric. +* Add DBOptions::skip_stats_update_on_db_open. When it is on, DB::Open() will run faster as it skips the random reads required for loading necessary stats from SST files to optimize compaction. + +## 3.12.0 (7/2/2015) +### New Features +* Added experimental support for optimistic transactions. See include/rocksdb/utilities/optimistic_transaction.h for more info. +* Added a new way to report QPS from db_bench (check out --report_file and --report_interval_seconds) +* Added a cache for individual rows. See DBOptions::row_cache for more info. +* Several new features on EventListener (see include/rocksdb/listener.h): + - OnCompationCompleted() now returns per-compaciton job statistics, defined in include/rocksdb/compaction_job_stats.h. + - Added OnTableFileCreated() and OnTableFileDeleted(). +* Add compaction_options_universal.enable_trivial_move to true, to allow trivial move while performing universal compaction. Trivial move will happen only when all the input files are non overlapping. + +### Public API changes +* EventListener::OnFlushCompleted() now passes FlushJobInfo instead of a list of parameters. +* DB::GetDbIdentity() is now a const function. If this function is overridden in your application, be sure to also make GetDbIdentity() const to avoid compile error. +* Move listeners from ColumnFamilyOptions to DBOptions. +* Add max_write_buffer_number_to_maintain option +* DB::CompactRange()'s parameter reduce_level is changed to change_level, to allow users to move levels to lower levels if allowed. It can be used to migrate a DB from options.level_compaction_dynamic_level_bytes=false to options.level_compaction_dynamic_level_bytes.true. +* Change default value for options.compaction_filter_factory and options.compaction_filter_factory_v2 to nullptr instead of DefaultCompactionFilterFactory and DefaultCompactionFilterFactoryV2. +* If CancelAllBackgroundWork is called without doing a flush after doing loads with WAL disabled, the changes which haven't been flushed before the call to CancelAllBackgroundWork will be lost. +* WBWIIterator::Entry() now returns WriteEntry instead of `const WriteEntry&` +* options.hard_rate_limit is deprecated. +* When options.soft_rate_limit or options.level0_slowdown_writes_trigger is triggered, the way to slow down writes is changed to: write rate to DB is limited to to options.delayed_write_rate. +* DB::GetApproximateSizes() adds a parameter to allow the estimation to include data in mem table, with default to be not to include. It is now only supported in skip list mem table. +* DB::CompactRange() now accept CompactRangeOptions instead of multiple parameters. CompactRangeOptions is defined in include/rocksdb/options.h. +* CompactRange() will now skip bottommost level compaction for level based compaction if there is no compaction filter, bottommost_level_compaction is introduced in CompactRangeOptions to control when it's possible to skip bottommost level compaction. This mean that if you want the compaction to produce a single file you need to set bottommost_level_compaction to BottommostLevelCompaction::kForce. +* Add Cache.GetPinnedUsage() to get the size of memory occupied by entries that are in use by the system. +* DB:Open() will fail if the compression specified in Options is not linked with the binary. If you see this failure, recompile RocksDB with compression libraries present on your system. Also, previously our default compression was snappy. This behavior is now changed. Now, the default compression is snappy only if it's available on the system. If it isn't we change the default to kNoCompression. +* We changed how we account for memory used in block cache. Previously, we only counted the sum of block sizes currently present in block cache. Now, we count the actual memory usage of the blocks. For example, a block of size 4.5KB will use 8KB memory with jemalloc. This might decrease your memory usage and possibly decrease performance. Increase block cache size if you see this happening after an upgrade. +* Add BackupEngineImpl.options_.max_background_operations to specify the maximum number of operations that may be performed in parallel. Add support for parallelized backup and restore. +* Add DB::SyncWAL() that does a WAL sync without blocking writers. + +## 3.11.0 (5/19/2015) +### New Features +* Added a new API Cache::SetCapacity(size_t capacity) to dynamically change the maximum configured capacity of the cache. If the new capacity is less than the existing cache usage, the implementation will try to lower the usage by evicting the necessary number of elements following a strict LRU policy. +* Added an experimental API for handling flashcache devices (blacklists background threads from caching their reads) -- NewFlashcacheAwareEnv +* If universal compaction is used and options.num_levels > 1, compact files are tried to be stored in none-L0 with smaller files based on options.target_file_size_base. The limitation of DB size when using universal compaction is greatly mitigated by using more levels. You can set num_levels = 1 to make universal compaction behave as before. If you set num_levels > 1 and want to roll back to a previous version, you need to compact all files to a big file in level 0 (by setting target_file_size_base to be large and CompactRange(, nullptr, nullptr, true, 0) and reopen the DB with the same version to rewrite the manifest, and then you can open it using previous releases. +* More information about rocksdb background threads are available in Env::GetThreadList(), including the number of bytes read / written by a compaction job, mem-table size and current number of bytes written by a flush job and many more. Check include/rocksdb/thread_status.h for more detail. + +### Public API changes +* TablePropertiesCollector::AddUserKey() is added to replace TablePropertiesCollector::Add(). AddUserKey() exposes key type, sequence number and file size up to now to users. +* DBOptions::bytes_per_sync used to apply to both WAL and table files. As of 3.11 it applies only to table files. If you want to use this option to sync WAL in the background, please use wal_bytes_per_sync + +## 3.10.0 (3/24/2015) +### New Features +* GetThreadStatus() is now able to report detailed thread status, including: + - Thread Operation including flush and compaction. + - The stage of the current thread operation. + - The elapsed time in micros since the current thread operation started. + More information can be found in include/rocksdb/thread_status.h. In addition, when running db_bench with --thread_status_per_interval, db_bench will also report thread status periodically. +* Changed the LRU caching algorithm so that referenced blocks (by iterators) are never evicted. This change made parameter removeScanCountLimit obsolete. Because of that NewLRUCache doesn't take three arguments anymore. table_cache_remove_scan_limit option is also removed +* By default we now optimize the compilation for the compilation platform (using -march=native). If you want to build portable binary, use 'PORTABLE=1' before the make command. +* We now allow level-compaction to place files in different paths by + specifying them in db_paths along with the target_size. + Lower numbered levels will be placed earlier in the db_paths and higher + numbered levels will be placed later in the db_paths vector. +* Potentially big performance improvements if you're using RocksDB with lots of column families (100-1000) +* Added BlockBasedTableOptions.format_version option, which allows user to specify which version of block based table he wants. As a general guideline, newer versions have more features, but might not be readable by older versions of RocksDB. +* Added new block based table format (version 2), which you can enable by setting BlockBasedTableOptions.format_version = 2. This format changes how we encode size information in compressed blocks and should help with memory allocations if you're using Zlib or BZip2 compressions. +* MemEnv (env that stores data in memory) is now available in default library build. You can create it by calling NewMemEnv(). +* Add SliceTransform.SameResultWhenAppended() to help users determine it is safe to apply prefix bloom/hash. +* Block based table now makes use of prefix bloom filter if it is a full fulter. +* Block based table remembers whether a whole key or prefix based bloom filter is supported in SST files. Do a sanity check when reading the file with users' configuration. +* Fixed a bug in ReadOnlyBackupEngine that deleted corrupted backups in some cases, even though the engine was ReadOnly +* options.level_compaction_dynamic_level_bytes, a feature to allow RocksDB to pick dynamic base of bytes for levels. With this feature turned on, we will automatically adjust max bytes for each level. The goal of this feature is to have lower bound on size amplification. For more details, see comments in options.h. +* Added an abstract base class WriteBatchBase for write batches +* Fixed a bug where we start deleting files of a dropped column families even if there are still live references to it + +### Public API changes +* Deprecated skip_log_error_on_recovery and table_cache_remove_scan_count_limit options. +* Logger method logv with log level parameter is now virtual + +### RocksJava +* Added compression per level API. +* MemEnv is now available in RocksJava via RocksMemEnv class. +* lz4 compression is now included in rocksjava static library when running `make rocksdbjavastatic`. +* Overflowing a size_t when setting rocksdb options now throws an IllegalArgumentException, which removes the necessity for a developer to catch these Exceptions explicitly. + +## 3.9.0 (12/8/2014) + +### New Features +* Add rocksdb::GetThreadList(), which in the future will return the current status of all + rocksdb-related threads. We will have more code instruments in the following RocksDB + releases. +* Change convert function in rocksdb/utilities/convenience.h to return Status instead of boolean. + Also add support for nested options in convert function + +### Public API changes +* New API to create a checkpoint added. Given a directory name, creates a new + database which is an image of the existing database. +* New API LinkFile added to Env. If you implement your own Env class, an + implementation of the API LinkFile will have to be provided. +* MemTableRep takes MemTableAllocator instead of Arena + +### Improvements +* RocksDBLite library now becomes smaller and will be compiled with -fno-exceptions flag. + +## 3.8.0 (11/14/2014) + +### Public API changes +* BackupEngine::NewBackupEngine() was deprecated; please use BackupEngine::Open() from now on. +* BackupableDB/RestoreBackupableDB have new GarbageCollect() methods, which will clean up files from corrupt and obsolete backups. +* BackupableDB/RestoreBackupableDB have new GetCorruptedBackups() methods which list corrupt backups. + +### Cleanup +* Bunch of code cleanup, some extra warnings turned on (-Wshadow, -Wshorten-64-to-32, -Wnon-virtual-dtor) + +### New features +* CompactFiles and EventListener, although they are still in experimental state +* Full ColumnFamily support in RocksJava. + +## 3.7.0 (11/6/2014) +### Public API changes +* Introduce SetOptions() API to allow adjusting a subset of options dynamically online +* Introduce 4 new convenient functions for converting Options from string: GetColumnFamilyOptionsFromMap(), GetColumnFamilyOptionsFromString(), GetDBOptionsFromMap(), GetDBOptionsFromString() +* Remove WriteBatchWithIndex.Delete() overloads using SliceParts +* When opening a DB, if options.max_background_compactions is larger than the existing low pri pool of options.env, it will enlarge it. Similarly, options.max_background_flushes is larger than the existing high pri pool of options.env, it will enlarge it. + +## 3.6.0 (10/7/2014) +### Disk format changes +* If you're using RocksDB on ARM platforms and you're using default bloom filter, there is a disk format change you need to be aware of. There are three steps you need to do when you convert to new release: 1. turn off filter policy, 2. compact the whole database, 3. turn on filter policy + +### Behavior changes +* We have refactored our system of stalling writes. Any stall-related statistics' meanings are changed. Instead of per-write stall counts, we now count stalls per-epoch, where epochs are periods between flushes and compactions. You'll find more information in our Tuning Perf Guide once we release RocksDB 3.6. +* When disableDataSync=true, we no longer sync the MANIFEST file. +* Add identity_as_first_hash property to CuckooTable. SST file needs to be rebuilt to be opened by reader properly. + +### Public API changes +* Change target_file_size_base type to uint64_t from int. +* Remove allow_thread_local. This feature was proved to be stable, so we are turning it always-on. + +## 3.5.0 (9/3/2014) +### New Features +* Add include/utilities/write_batch_with_index.h, providing a utility class to query data out of WriteBatch when building it. +* Move BlockBasedTable related options to BlockBasedTableOptions from Options. Change corresponding JNI interface. Options affected include: + no_block_cache, block_cache, block_cache_compressed, block_size, block_size_deviation, block_restart_interval, filter_policy, whole_key_filtering. filter_policy is changed to shared_ptr from a raw pointer. +* Remove deprecated options: disable_seek_compaction and db_stats_log_interval +* OptimizeForPointLookup() takes one parameter for block cache size. It now builds hash index, bloom filter, and block cache. + +### Public API changes +* The Prefix Extractor used with V2 compaction filters is now passed user key to SliceTransform::Transform instead of unparsed RocksDB key. + +## 3.4.0 (8/18/2014) +### New Features +* Support Multiple DB paths in universal style compactions +* Add feature of storing plain table index and bloom filter in SST file. +* CompactRange() will never output compacted files to level 0. This used to be the case when all the compaction input files were at level 0. +* Added iterate_upper_bound to define the extent upto which the forward iterator will return entries. This will prevent iterating over delete markers and overwritten entries for edge cases where you want to break out the iterator anyways. This may improve performance in case there are a large number of delete markers or overwritten entries. + +### Public API changes +* DBOptions.db_paths now is a vector of a DBPath structure which indicates both of path and target size +* NewPlainTableFactory instead of bunch of parameters now accepts PlainTableOptions, which is defined in include/rocksdb/table.h +* Moved include/utilities/*.h to include/rocksdb/utilities/*.h +* Statistics APIs now take uint32_t as type instead of Tickers. Also make two access functions getTickerCount and histogramData const +* Add DB property rocksdb.estimate-num-keys, estimated number of live keys in DB. +* Add DB::GetIntProperty(), which returns DB properties that are integer as uint64_t. +* The Prefix Extractor used with V2 compaction filters is now passed user key to SliceTransform::Transform instead of unparsed RocksDB key. + +## 3.3.0 (7/10/2014) +### New Features +* Added JSON API prototype. +* HashLinklist reduces performance outlier caused by skewed bucket by switching data in the bucket from linked list to skip list. Add parameter threshold_use_skiplist in NewHashLinkListRepFactory(). +* RocksDB is now able to reclaim storage space more effectively during the compaction process. This is done by compensating the size of each deletion entry by the 2X average value size, which makes compaction to be triggered by deletion entries more easily. +* Add TimeOut API to write. Now WriteOptions have a variable called timeout_hint_us. With timeout_hint_us set to non-zero, any write associated with this timeout_hint_us may be aborted when it runs longer than the specified timeout_hint_us, and it is guaranteed that any write completes earlier than the specified time-out will not be aborted due to the time-out condition. +* Add a rate_limiter option, which controls total throughput of flush and compaction. The throughput is specified in bytes/sec. Flush always has precedence over compaction when available bandwidth is constrained. + +### Public API changes +* Removed NewTotalOrderPlainTableFactory because it is not used and implemented semantically incorrect. + +## 3.2.0 (06/20/2014) + +### Public API changes +* We removed seek compaction as a concept from RocksDB because: +1) It makes more sense for spinning disk workloads, while RocksDB is primarily designed for flash and memory, +2) It added some complexity to the important code-paths, +3) None of our internal customers were really using it. +Because of that, Options::disable_seek_compaction is now obsolete. It is still a parameter in Options, so it does not break the build, but it does not have any effect. We plan to completely remove it at some point, so we ask users to please remove this option from your code base. +* Add two parameters to NewHashLinkListRepFactory() for logging on too many entries in a hash bucket when flushing. +* Added new option BlockBasedTableOptions::hash_index_allow_collision. When enabled, prefix hash index for block-based table will not store prefix and allow hash collision, reducing memory consumption. + +### New Features +* PlainTable now supports a new key encoding: for keys of the same prefix, the prefix is only written once. It can be enabled through encoding_type parameter of NewPlainTableFactory() +* Add AdaptiveTableFactory, which is used to convert from a DB of PlainTable to BlockBasedTabe, or vise versa. It can be created using NewAdaptiveTableFactory() + +### Performance Improvements +* Tailing Iterator re-implemeted with ForwardIterator + Cascading Search Hint , see ~20% throughput improvement. + +## 3.1.0 (05/21/2014) + +### Public API changes +* Replaced ColumnFamilyOptions::table_properties_collectors with ColumnFamilyOptions::table_properties_collector_factories + +### New Features +* Hash index for block-based table will be materialized and reconstructed more efficiently. Previously hash index is constructed by scanning the whole table during every table open. +* FIFO compaction style + +## 3.0.0 (05/05/2014) + +### Public API changes +* Added _LEVEL to all InfoLogLevel enums +* Deprecated ReadOptions.prefix and ReadOptions.prefix_seek. Seek() defaults to prefix-based seek when Options.prefix_extractor is supplied. More detail is documented in https://github.com/facebook/rocksdb/wiki/Prefix-Seek-API-Changes +* MemTableRepFactory::CreateMemTableRep() takes info logger as an extra parameter. + +### New Features +* Column family support +* Added an option to use different checksum functions in BlockBasedTableOptions +* Added ApplyToAllCacheEntries() function to Cache + +## 2.8.0 (04/04/2014) + +* Removed arena.h from public header files. +* By default, checksums are verified on every read from database +* Change default value of several options, including: paranoid_checks=true, max_open_files=5000, level0_slowdown_writes_trigger=20, level0_stop_writes_trigger=24, disable_seek_compaction=true, max_background_flushes=1 and allow_mmap_writes=false +* Added is_manual_compaction to CompactionFilter::Context +* Added "virtual void WaitForJoin()" in class Env. Default operation is no-op. +* Removed BackupEngine::DeleteBackupsNewerThan() function +* Added new option -- verify_checksums_in_compaction +* Changed Options.prefix_extractor from raw pointer to shared_ptr (take ownership) + Changed HashSkipListRepFactory and HashLinkListRepFactory constructor to not take SliceTransform object (use Options.prefix_extractor implicitly) +* Added Env::GetThreadPoolQueueLen(), which returns the waiting queue length of thread pools +* Added a command "checkconsistency" in ldb tool, which checks + if file system state matches DB state (file existence and file sizes) +* Separate options related to block based table to a new struct BlockBasedTableOptions. +* WriteBatch has a new function Count() to return total size in the batch, and Data() now returns a reference instead of a copy +* Add more counters to perf context. +* Supports several more DB properties: compaction-pending, background-errors and cur-size-active-mem-table. + +### New Features +* If we find one truncated record at the end of the MANIFEST or WAL files, + we will ignore it. We assume that writers of these records were interrupted + and that we can safely ignore it. +* A new SST format "PlainTable" is added, which is optimized for memory-only workloads. It can be created through NewPlainTableFactory() or NewTotalOrderPlainTableFactory(). +* A new mem table implementation hash linked list optimizing for the case that there are only few keys for each prefix, which can be created through NewHashLinkListRepFactory(). +* Merge operator supports a new function PartialMergeMulti() to allow users to do partial merges against multiple operands. +* Now compaction filter has a V2 interface. It buffers the kv-pairs sharing the same key prefix, process them in batches, and return the batched results back to DB. The new interface uses a new structure CompactionFilterContext for the same purpose as CompactionFilter::Context in V1. +* Geo-spatial support for locations and radial-search. + +## 2.7.0 (01/28/2014) + +### Public API changes + +* Renamed `StackableDB::GetRawDB()` to `StackableDB::GetBaseDB()`. +* Renamed `WriteBatch::Data()` `const std::string& Data() const`. +* Renamed class `TableStats` to `TableProperties`. +* Deleted class `PrefixHashRepFactory`. Please use `NewHashSkipListRepFactory()` instead. +* Supported multi-threaded `EnableFileDeletions()` and `DisableFileDeletions()`. +* Added `DB::GetOptions()`. +* Added `DB::GetDbIdentity()`. + +### New Features + +* Added [BackupableDB](https://github.com/facebook/rocksdb/wiki/How-to-backup-RocksDB%3F) +* Implemented [TailingIterator](https://github.com/facebook/rocksdb/wiki/Tailing-Iterator), a special type of iterator that + doesn't create a snapshot (can be used to read newly inserted data) + and is optimized for doing sequential reads. +* Added property block for table, which allows (1) a table to store + its metadata and (2) end user to collect and store properties they + are interested in. +* Enabled caching index and filter block in block cache (turned off by default). +* Supported error report when doing manual compaction. +* Supported additional Linux platform flavors and Mac OS. +* Put with `SliceParts` - Variant of `Put()` that gathers output like `writev(2)` +* Bug fixes and code refactor for compatibility with upcoming Column + Family feature. + +### Performance Improvements + +* Huge benchmark performance improvements by multiple efforts. For example, increase in readonly QPS from about 530k in 2.6 release to 1.1 million in 2.7 [1] +* Speeding up a way RocksDB deleted obsolete files - no longer listing the whole directory under a lock -- decrease in p99 +* Use raw pointer instead of shared pointer for statistics: [5b825d](https://github.com/facebook/rocksdb/commit/5b825d6964e26ec3b4bb6faa708ebb1787f1d7bd) -- huge increase in performance -- shared pointers are slow +* Optimized locking for `Get()` -- [1fdb3f](https://github.com/facebook/rocksdb/commit/1fdb3f7dc60e96394e3e5b69a46ede5d67fb976c) -- 1.5x QPS increase for some workloads +* Cache speedup - [e8d40c3](https://github.com/facebook/rocksdb/commit/e8d40c31b3cca0c3e1ae9abe9b9003b1288026a9) +* Implemented autovector, which allocates first N elements on stack. Most of vectors in RocksDB are small. Also, we never want to allocate heap objects while holding a mutex. -- [c01676e4](https://github.com/facebook/rocksdb/commit/c01676e46d3be08c3c140361ef1f5884f47d3b3c) +* Lots of efforts to move malloc, memcpy and IO outside of locks diff --git a/external/rocksdb/INSTALL.md b/external/rocksdb/INSTALL.md new file mode 100755 index 0000000000..3669bf1cf1 --- /dev/null +++ b/external/rocksdb/INSTALL.md @@ -0,0 +1,91 @@ +## Compilation + +**Important**: If you plan to run RocksDB in production, don't compile using default +`make` or `make all`. That will compile RocksDB in debug mode, which is much slower +than release mode. + +RocksDB's library should be able to compile without any dependency installed, +although we recommend installing some compression libraries (see below). +We do depend on newer gcc/clang with C++11 support. + +There are few options when compiling RocksDB: + +* [recommended] `make static_lib` will compile librocksdb.a, RocksDB static library. Compiles static library in release mode. + +* `make shared_lib` will compile librocksdb.so, RocksDB shared library. Compiles shared library in release mode. + +* `make check` will compile and run all the unit tests. `make check` will compile RocksDB in debug mode. + +* `make all` will compile our static library, and all our tools and unit tests. Our tools +depend on gflags. You will need to have gflags installed to run `make all`. This will compile RocksDB in debug mode. Don't +use binaries compiled by `make all` in production. + +* By default the binary we produce is optimized for the platform you're compiling on +(-march=native or the equivalent). If you want to build a portable binary, add 'PORTABLE=1' before +your make commands, like this: `PORTABLE=1 make static_lib` + +## Dependencies + +* You can link RocksDB with following compression libraries: + - [zlib](http://www.zlib.net/) - a library for data compression. + - [bzip2](http://www.bzip.org/) - a library for data compression. + - [snappy](https://code.google.com/p/snappy/) - a library for fast + data compression. + +* All our tools depend on: + - [gflags](https://gflags.github.io/gflags/) - a library that handles + command line flags processing. You can compile rocksdb library even + if you don't have gflags installed. + +## Supported platforms + +* **Linux - Ubuntu** + * Upgrade your gcc to version at least 4.7 to get C++11 support. + * Install gflags. First, try: `sudo apt-get install libgflags-dev` + If this doesn't work and you're using Ubuntu, here's a nice tutorial: + (http://askubuntu.com/questions/312173/installing-gflags-12-04) + * Install snappy. This is usually as easy as: + `sudo apt-get install libsnappy-dev`. + * Install zlib. Try: `sudo apt-get install zlib1g-dev`. + * Install bzip2: `sudo apt-get install libbz2-dev`. +* **Linux - CentOS** + * Upgrade your gcc to version at least 4.7 to get C++11 support: + `yum install gcc47-c++` + * Install gflags: + + wget https://gflags.googlecode.com/files/gflags-2.0-no-svn-files.tar.gz + tar -xzvf gflags-2.0-no-svn-files.tar.gz + cd gflags-2.0 + ./configure && make && sudo make install + + * Install snappy: + + wget https://snappy.googlecode.com/files/snappy-1.1.1.tar.gz + tar -xzvf snappy-1.1.1.tar.gz + cd snappy-1.1.1 + ./configure && make && sudo make install + + * Install zlib: + + sudo yum install zlib + sudo yum install zlib-devel + + * Install bzip2: + + sudo yum install bzip2 + sudo yum install bzip2-devel + +* **OS X**: + * Install latest C++ compiler that supports C++ 11: + * Update XCode: run `xcode-select --install` (or install it from XCode App's settting). + * Install via [homebrew](http://brew.sh/). + * If you're first time developer in MacOS, you still need to run: `xcode-select --install` in your command line. + * run `brew tap homebrew/versions; brew install gcc47 --use-llvm` to install gcc 4.7 (or higher). + * run `brew install rocksdb` + +* **iOS**: + * Run: `TARGET_OS=IOS make static_lib`. When building the project which uses rocksdb iOS library, make sure to define two important pre-processing macros: `ROCKSDB_LITE` and `IOS_CROSS_COMPILE`. + +* **Windows**: + * For building with MS Visual Studio 13 you will need Update 4 installed. + * Read and follow the instructions at CMakeLists.txt diff --git a/external/rocksdb/LANGUAGE-BINDINGS.md b/external/rocksdb/LANGUAGE-BINDINGS.md new file mode 100755 index 0000000000..e9aaf2f16a --- /dev/null +++ b/external/rocksdb/LANGUAGE-BINDINGS.md @@ -0,0 +1,12 @@ +This is the list of all known third-party language bindings for RocksDB. If something is missing, please open a pull request to add it. + +* Java - https://github.com/facebook/rocksdb/tree/master/java +* Python - http://pyrocksdb.readthedocs.org/en/latest/ +* Perl - https://metacpan.org/pod/RocksDB +* Node.js - https://npmjs.org/package/rocksdb +* Go - https://github.com/tecbot/gorocksdb +* Ruby - http://rubygems.org/gems/rocksdb-ruby +* Haskell - https://hackage.haskell.org/package/rocksdb-haskell +* PHP - https://github.com/Photonios/rocksdb-php +* C# - https://github.com/warrenfalk/rocksdb-sharp +* Rust - https://github.com/spacejam/rust-rocksdb diff --git a/external/rocksdb/LICENSE b/external/rocksdb/LICENSE new file mode 100755 index 0000000000..46f685e968 --- /dev/null +++ b/external/rocksdb/LICENSE @@ -0,0 +1,35 @@ +BSD License + +For rocksdb software + +Copyright (c) 2011-present, Facebook, Inc. +All rights reserved. +--------------------------------------------------------------------- + +Copyright (c) 2011 The LevelDB Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/external/rocksdb/Makefile b/external/rocksdb/Makefile new file mode 100755 index 0000000000..857c521019 --- /dev/null +++ b/external/rocksdb/Makefile @@ -0,0 +1,1477 @@ +# Copyright (c) 2011 The LevelDB Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. See the AUTHORS file for names of contributors. + +# Inherit some settings from environment variables, if available + +#----------------------------------------------- + +CLEAN_FILES = # deliberately empty, so we can append below. +CFLAGS += ${EXTRA_CFLAGS} +CXXFLAGS += ${EXTRA_CXXFLAGS} +LDFLAGS += $(EXTRA_LDFLAGS) +MACHINE ?= $(shell uname -m) +ARFLAGS = rs + +# Transform parallel LOG output into something more readable. +perl_command = perl -n \ + -e '@a=split("\t",$$_,-1); $$t=$$a[8];' \ + -e '$$t =~ /.*if\s\[\[\s"(.*?\.[\w\/]+)/ and $$t=$$1;' \ + -e '$$t =~ s,^\./,,;' \ + -e '$$t =~ s, >.*,,; chomp $$t;' \ + -e '$$t =~ /.*--gtest_filter=(.*?\.[\w\/]+)/ and $$t=$$1;' \ + -e 'printf "%7.3f %s %s\n", $$a[3], $$a[6] == 0 ? "PASS" : "FAIL", $$t' +quoted_perl_command = $(subst ','\'',$(perl_command)) + +# DEBUG_LEVEL can have three values: +# * DEBUG_LEVEL=2; this is the ultimate debug mode. It will compile rocksdb +# without any optimizations. To compile with level 2, issue `make dbg` +# * DEBUG_LEVEL=1; debug level 1 enables all assertions and debug code, but +# compiles rocksdb with -O2 optimizations. this is the default debug level. +# `make all` or `make ` compile RocksDB with debug level 1. +# We use this debug level when developing RocksDB. +# * DEBUG_LEVEL=0; this is the debug level we use for release. If you're +# running rocksdb in production you most definitely want to compile RocksDB +# with debug level 0. To compile with level 0, run `make shared_lib`, +# `make install-shared`, `make static_lib`, `make install-static` or +# `make install` + +# Set the default DEBUG_LEVEL to 1 +DEBUG_LEVEL?=1 + +ifeq ($(MAKECMDGOALS),dbg) + DEBUG_LEVEL=2 +endif + +ifeq ($(MAKECMDGOALS),clean) + DEBUG_LEVEL=0 +endif + +ifeq ($(MAKECMDGOALS),release) + DEBUG_LEVEL=0 +endif + +ifeq ($(MAKECMDGOALS),shared_lib) + DEBUG_LEVEL=0 +endif + +ifeq ($(MAKECMDGOALS),install-shared) + DEBUG_LEVEL=0 +endif + +ifeq ($(MAKECMDGOALS),static_lib) + DEBUG_LEVEL=0 +endif + +ifeq ($(MAKECMDGOALS),install-static) + DEBUG_LEVEL=0 +endif + +ifeq ($(MAKECMDGOALS),install) + DEBUG_LEVEL=0 +endif + +ifeq ($(MAKECMDGOALS),rocksdbjavastatic) + DEBUG_LEVEL=0 +endif + +ifeq ($(MAKECMDGOALS),rocksdbjavastaticrelease) + DEBUG_LEVEL=0 +endif + +ifeq ($(MAKECMDGOALS),rocksdbjavastaticpublish) + DEBUG_LEVEL=0 +endif + +# compile with -O2 if debug level is not 2 +ifneq ($(DEBUG_LEVEL), 2) +OPT += -O2 -fno-omit-frame-pointer +# Skip for archs that don't support -momit-leaf-frame-pointer +ifeq (,$(shell $(CXX) -fsyntax-only -momit-leaf-frame-pointer -xc /dev/null 2>&1)) +OPT += -momit-leaf-frame-pointer +endif +endif + +# if we're compiling for release, compile without debug code (-DNDEBUG) and +# don't treat warnings as errors +ifeq ($(DEBUG_LEVEL),0) +OPT += -DNDEBUG +DISABLE_WARNING_AS_ERROR=1 +else +$(warning Warning: Compiling in debug mode. Don't use the resulting binary in production) +endif + +#----------------------------------------------- +include src.mk + +AM_DEFAULT_VERBOSITY = 0 + +AM_V_GEN = $(am__v_GEN_$(V)) +am__v_GEN_ = $(am__v_GEN_$(AM_DEFAULT_VERBOSITY)) +am__v_GEN_0 = @echo " GEN " $@; +am__v_GEN_1 = +AM_V_at = $(am__v_at_$(V)) +am__v_at_ = $(am__v_at_$(AM_DEFAULT_VERBOSITY)) +am__v_at_0 = @ +am__v_at_1 = + +AM_V_CC = $(am__v_CC_$(V)) +am__v_CC_ = $(am__v_CC_$(AM_DEFAULT_VERBOSITY)) +am__v_CC_0 = @echo " CC " $@; +am__v_CC_1 = +CCLD = $(CC) +LINK = $(CCLD) $(AM_CFLAGS) $(CFLAGS) $(AM_LDFLAGS) $(LDFLAGS) -o $@ +AM_V_CCLD = $(am__v_CCLD_$(V)) +am__v_CCLD_ = $(am__v_CCLD_$(AM_DEFAULT_VERBOSITY)) +am__v_CCLD_0 = @echo " CCLD " $@; +am__v_CCLD_1 = +AM_V_AR = $(am__v_AR_$(V)) +am__v_AR_ = $(am__v_AR_$(AM_DEFAULT_VERBOSITY)) +am__v_AR_0 = @echo " AR " $@; +am__v_AR_1 = + +ifdef ROCKSDB_USE_LIBRADOS +LIB_SOURCES += utilities/env_librados.cc +LDFLAGS += -lrados +endif + +AM_LINK = $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS) +# detect what platform we're building on +dummy := $(shell (export ROCKSDB_ROOT="$(CURDIR)"; "$(CURDIR)/build_tools/build_detect_platform" "$(CURDIR)/make_config.mk")) +# this file is generated by the previous line to set build flags and sources +include make_config.mk +CLEAN_FILES += make_config.mk + +missing_make_config_paths := $(shell \ + grep "\/\S*" -o $(CURDIR)/make_config.mk | \ + while read path; \ + do [ -e $$path ] || echo $$path; \ + done | sort | uniq) + +$(foreach path, $(missing_make_config_paths), \ + $(warning Warning: $(path) dont exist)) + +ifneq ($(PLATFORM), IOS) +CFLAGS += -g +CXXFLAGS += -g +else +# no debug info for IOS, that will make our library big +OPT += -DNDEBUG +endif + +ifeq ($(PLATFORM), OS_SOLARIS) + PLATFORM_CXXFLAGS += -D _GLIBCXX_USE_C99 +endif +ifneq ($(filter -DROCKSDB_LITE,$(OPT)),) + # found + CFLAGS += -fno-exceptions + CXXFLAGS += -fno-exceptions +endif + +# ASAN doesn't work well with jemalloc. If we're compiling with ASAN, we should use regular malloc. +ifdef COMPILE_WITH_ASAN + DISABLE_JEMALLOC=1 + EXEC_LDFLAGS += -fsanitize=address + PLATFORM_CCFLAGS += -fsanitize=address + PLATFORM_CXXFLAGS += -fsanitize=address +endif + +# TSAN doesn't work well with jemalloc. If we're compiling with TSAN, we should use regular malloc. +ifdef COMPILE_WITH_TSAN + DISABLE_JEMALLOC=1 + EXEC_LDFLAGS += -fsanitize=thread -pie + PLATFORM_CCFLAGS += -fsanitize=thread -fPIC -DROCKSDB_TSAN_RUN + PLATFORM_CXXFLAGS += -fsanitize=thread -fPIC -DROCKSDB_TSAN_RUN + # Turn off -pg when enabling TSAN testing, because that induces + # a link failure. TODO: find the root cause + PROFILING_FLAGS = +endif + +# USAN doesn't work well with jemalloc. If we're compiling with USAN, we should use regular malloc. +ifdef COMPILE_WITH_UBSAN + DISABLE_JEMALLOC=1 + EXEC_LDFLAGS += -fsanitize=undefined + PLATFORM_CCFLAGS += -fsanitize=undefined + PLATFORM_CXXFLAGS += -fsanitize=undefined +endif + +ifndef DISABLE_JEMALLOC + ifdef JEMALLOC + PLATFORM_CXXFLAGS += "-DROCKSDB_JEMALLOC" + PLATFORM_CCFLAGS += "-DROCKSDB_JEMALLOC" + endif + EXEC_LDFLAGS := $(JEMALLOC_LIB) $(EXEC_LDFLAGS) + PLATFORM_CXXFLAGS += $(JEMALLOC_INCLUDE) + PLATFORM_CCFLAGS += $(JEMALLOC_INCLUDE) +endif + +export GTEST_THROW_ON_FAILURE=1 GTEST_HAS_EXCEPTIONS=1 +GTEST_DIR = ./third-party/gtest-1.7.0/fused-src +PLATFORM_CCFLAGS += -isystem $(GTEST_DIR) +PLATFORM_CXXFLAGS += -isystem $(GTEST_DIR) + +# This (the first rule) must depend on "all". +default: all + +WARNING_FLAGS = -W -Wextra -Wall -Wsign-compare -Wshadow \ + -Wno-unused-parameter + +ifndef DISABLE_WARNING_AS_ERROR + WARNING_FLAGS += -Werror +endif + +CFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CCFLAGS) $(OPT) +CXXFLAGS += $(WARNING_FLAGS) -I. -I./include $(PLATFORM_CXXFLAGS) $(OPT) -Woverloaded-virtual -Wnon-virtual-dtor -Wno-missing-field-initializers + +LDFLAGS += $(PLATFORM_LDFLAGS) + +date := $(shell date +%F) +ifdef FORCE_GIT_SHA + git_sha := $(FORCE_GIT_SHA) +else + git_sha := $(shell git rev-parse HEAD 2>/dev/null) +endif +gen_build_version = \ + printf '%s\n' \ + '\#include "build_version.h"' \ + 'const char* rocksdb_build_git_sha = \ + "rocksdb_build_git_sha:$(git_sha)";' \ + 'const char* rocksdb_build_git_date = \ + "rocksdb_build_git_date:$(date)";' \ + 'const char* rocksdb_build_compile_date = __DATE__;' + +# Record the version of the source that we are compiling. +# We keep a record of the git revision in this file. It is then built +# as a regular source file as part of the compilation process. +# One can run "strings executable_filename | grep _build_" to find +# the version of the source that we used to build the executable file. +CLEAN_FILES += util/build_version.cc: +FORCE: +util/build_version.cc: FORCE + $(AM_V_GEN)rm -f $@-t + $(AM_V_at)$(gen_build_version) > $@-t + $(AM_V_at)if test -f $@; then \ + cmp -s $@-t $@ && rm -f $@-t || mv -f $@-t $@; \ + else mv -f $@-t $@; fi + +LIBOBJECTS = $(LIB_SOURCES:.cc=.o) +LIBOBJECTS += $(TOOL_LIB_SOURCES:.cc=.o) +MOCKOBJECTS = $(MOCK_LIB_SOURCES:.cc=.o) + +GTEST = $(GTEST_DIR)/gtest/gtest-all.o +TESTUTIL = ./util/testutil.o +TESTHARNESS = ./util/testharness.o $(TESTUTIL) $(MOCKOBJECTS) $(GTEST) +VALGRIND_ERROR = 2 +VALGRIND_VER := $(join $(VALGRIND_VER),valgrind) + +VALGRIND_OPTS = --error-exitcode=$(VALGRIND_ERROR) --leak-check=full + +BENCHTOOLOBJECTS = $(BENCH_LIB_SOURCES:.cc=.o) $(LIBOBJECTS) $(TESTUTIL) + +TESTS = \ + db_test \ + db_test2 \ + db_block_cache_test \ + db_bloom_filter_test \ + db_iter_test \ + db_log_iter_test \ + db_compaction_filter_test \ + db_compaction_test \ + db_dynamic_level_test \ + db_flush_test \ + db_inplace_update_test \ + db_iterator_test \ + db_options_test \ + db_sst_test \ + db_tailing_iter_test \ + db_universal_compaction_test \ + db_wal_test \ + db_io_failure_test \ + db_properties_test \ + db_table_properties_test \ + autovector_test \ + column_family_test \ + table_properties_collector_test \ + arena_test \ + auto_roll_logger_test \ + block_test \ + bloom_test \ + dynamic_bloom_test \ + c_test \ + cache_test \ + checkpoint_test \ + coding_test \ + corruption_test \ + crc32c_test \ + slice_transform_test \ + dbformat_test \ + env_basic_test \ + env_test \ + fault_injection_test \ + filelock_test \ + filename_test \ + file_reader_writer_test \ + block_based_filter_block_test \ + full_filter_block_test \ + hash_table_test \ + histogram_test \ + inlineskiplist_test \ + log_test \ + manual_compaction_test \ + mock_env_test \ + memtable_list_test \ + merge_helper_test \ + memory_test \ + merge_test \ + merger_test \ + util_merge_operators_test \ + options_file_test \ + redis_test \ + reduce_levels_test \ + plain_table_db_test \ + comparator_db_test \ + prefix_test \ + skiplist_test \ + stringappend_test \ + ttl_test \ + backupable_db_test \ + document_db_test \ + json_document_test \ + sim_cache_test \ + spatial_db_test \ + version_edit_test \ + version_set_test \ + compaction_picker_test \ + version_builder_test \ + file_indexer_test \ + write_batch_test \ + write_batch_with_index_test \ + write_controller_test\ + deletefile_test \ + table_test \ + thread_local_test \ + geodb_test \ + rate_limiter_test \ + delete_scheduler_test \ + options_test \ + options_settable_test \ + options_util_test \ + event_logger_test \ + cuckoo_table_builder_test \ + cuckoo_table_reader_test \ + cuckoo_table_db_test \ + flush_job_test \ + wal_manager_test \ + listener_test \ + compaction_iterator_test \ + compaction_job_test \ + thread_list_test \ + sst_dump_test \ + compact_files_test \ + perf_context_test \ + optimistic_transaction_test \ + write_callback_test \ + heap_test \ + compact_on_deletion_collector_test \ + compaction_job_stats_test \ + option_change_migration_test \ + transaction_test \ + ldb_cmd_test \ + iostats_context_test \ + persistent_cache_test \ + statistics_test \ + +PARALLEL_TEST = \ + backupable_db_test \ + compact_on_deletion_collector_test \ + db_compaction_filter_test \ + db_compaction_test \ + db_sst_test \ + db_test \ + db_universal_compaction_test \ + fault_injection_test \ + inlineskiplist_test \ + manual_compaction_test \ + table_test + +SUBSET := $(TESTS) +ifdef ROCKSDBTESTS_START + SUBSET := $(shell echo $(SUBSET) | sed 's/^.*$(ROCKSDBTESTS_START)/$(ROCKSDBTESTS_START)/') +endif + +ifdef ROCKSDBTESTS_END + SUBSET := $(shell echo $(SUBSET) | sed 's/$(ROCKSDBTESTS_END).*//') +endif + +TOOLS = \ + sst_dump \ + db_sanity_test \ + db_stress \ + write_stress \ + ldb \ + db_repl_stress \ + rocksdb_dump \ + rocksdb_undump + +TEST_LIBS = \ + librocksdb_env_basic_test.a + +# TODO: add back forward_iterator_bench, after making it build in all environemnts. +BENCHMARKS = db_bench table_reader_bench cache_bench memtablerep_bench + +# if user didn't config LIBNAME, set the default +ifeq ($(LIBNAME),) +# we should only run rocksdb in production with DEBUG_LEVEL 0 +ifeq ($(DEBUG_LEVEL),0) + LIBNAME=librocksdb +else + LIBNAME=librocksdb_debug +endif +endif +LIBRARY = ${LIBNAME}.a +TOOLS_LIBRARY = ${LIBNAME}_tools.a + +ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3) +ROCKSDB_MINOR = $(shell egrep "ROCKSDB_MINOR.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3) +ROCKSDB_PATCH = $(shell egrep "ROCKSDB_PATCH.[0-9]" include/rocksdb/version.h | cut -d ' ' -f 3) + +default: all + +#----------------------------------------------- +# Create platform independent shared libraries. +#----------------------------------------------- +ifneq ($(PLATFORM_SHARED_EXT),) + +ifneq ($(PLATFORM_SHARED_VERSIONED),true) +SHARED1 = ${LIBNAME}.$(PLATFORM_SHARED_EXT) +SHARED2 = $(SHARED1) +SHARED3 = $(SHARED1) +SHARED4 = $(SHARED1) +SHARED = $(SHARED1) +else +SHARED_MAJOR = $(ROCKSDB_MAJOR) +SHARED_MINOR = $(ROCKSDB_MINOR) +SHARED_PATCH = $(ROCKSDB_PATCH) +SHARED1 = ${LIBNAME}.$(PLATFORM_SHARED_EXT) +ifeq ($(PLATFORM), OS_MACOSX) +SHARED_OSX = $(LIBNAME).$(SHARED_MAJOR) +SHARED2 = $(SHARED_OSX).$(PLATFORM_SHARED_EXT) +SHARED3 = $(SHARED_OSX).$(SHARED_MINOR).$(PLATFORM_SHARED_EXT) +SHARED4 = $(SHARED_OSX).$(SHARED_MINOR).$(SHARED_PATCH).$(PLATFORM_SHARED_EXT) +else +SHARED2 = $(SHARED1).$(SHARED_MAJOR) +SHARED3 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR) +SHARED4 = $(SHARED1).$(SHARED_MAJOR).$(SHARED_MINOR).$(SHARED_PATCH) +endif +SHARED = $(SHARED1) $(SHARED2) $(SHARED3) $(SHARED4) +$(SHARED1): $(SHARED4) + ln -fs $(SHARED4) $(SHARED1) +$(SHARED2): $(SHARED4) + ln -fs $(SHARED4) $(SHARED2) +$(SHARED3): $(SHARED4) + ln -fs $(SHARED4) $(SHARED3) +endif + +$(SHARED4): + $(CXX) $(PLATFORM_SHARED_LDFLAGS)$(SHARED3) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) $(LIB_SOURCES) $(TOOL_LIB_SOURCES) \ + $(LDFLAGS) -o $@ + +endif # PLATFORM_SHARED_EXT + +.PHONY: blackbox_crash_test check clean coverage crash_test ldb_tests package \ + release tags valgrind_check whitebox_crash_test format static_lib shared_lib all \ + dbg rocksdbjavastatic rocksdbjava install install-static install-shared uninstall \ + analyze tools tools_lib + + +all: $(LIBRARY) tools + +static_lib: $(LIBRARY) + +shared_lib: $(SHARED) + +tools: $(TOOLS) + +tools_lib: $(TOOLS_LIBRARY) + +test_libs: $(TEST_LIBS) + +dbg: $(LIBRARY) $(BENCHMARKS) tools $(TESTS) + +# creates static library and programs +release: + $(MAKE) clean + DEBUG_LEVEL=0 $(MAKE) static_lib tools db_bench + +coverage: + $(MAKE) clean + COVERAGEFLAGS="-fprofile-arcs -ftest-coverage" LDFLAGS+="-lgcov" $(MAKE) J=1 all check + cd coverage && ./coverage_test.sh + # Delete intermediate files + find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \; + +ifneq (,$(filter check parallel_check,$(MAKECMDGOALS)),) +# Use /dev/shm if it has the sticky bit set (otherwise, /tmp), +# and create a randomly-named rocksdb.XXXX directory therein. +# We'll use that directory in the "make check" rules. +ifeq ($(TMPD),) +TMPD := $(shell f=/dev/shm; test -k $$f || f=/tmp; \ + perl -le 'use File::Temp "tempdir";' \ + -e 'print tempdir("'$$f'/rocksdb.XXXX", CLEANUP => 0)') +endif +endif + +# Run all tests in parallel, accumulating per-test logs in t/log-*. +# +# Each t/run-* file is a tiny generated bourne shell script that invokes one of +# sub-tests. Why use a file for this? Because that makes the invocation of +# parallel below simpler, which in turn makes the parsing of parallel's +# LOG simpler (the latter is for live monitoring as parallel +# tests run). +# +# Test names are extracted by running tests with --gtest_list_tests. +# This filter removes the "#"-introduced comments, and expands to +# fully-qualified names by changing input like this: +# +# DBTest. +# Empty +# WriteEmptyBatch +# MultiThreaded/MultiThreadedDBTest. +# MultiThreaded/0 # GetParam() = 0 +# MultiThreaded/1 # GetParam() = 1 +# +# into this: +# +# DBTest.Empty +# DBTest.WriteEmptyBatch +# MultiThreaded/MultiThreadedDBTest.MultiThreaded/0 +# MultiThreaded/MultiThreadedDBTest.MultiThreaded/1 +# + +parallel_tests = $(patsubst %,parallel_%,$(PARALLEL_TEST)) +.PHONY: gen_parallel_tests $(parallel_tests) +$(parallel_tests): $(PARALLEL_TEST) + $(AM_V_at)TEST_BINARY=$(patsubst parallel_%,%,$@); \ + TEST_NAMES=` \ + ./$$TEST_BINARY --gtest_list_tests \ + | perl -n \ + -e 's/ *\#.*//;' \ + -e '/^(\s*)(\S+)/; !$$1 and do {$$p=$$2; break};' \ + -e 'print qq! $$p$$2!'`; \ + for TEST_NAME in $$TEST_NAMES; do \ + TEST_SCRIPT=t/run-$$TEST_BINARY-$${TEST_NAME//\//-}; \ + echo " GEN " $$TEST_SCRIPT; \ + printf '%s\n' \ + '#!/bin/sh' \ + "d=\$(TMPD)$$TEST_SCRIPT" \ + 'mkdir -p $$d' \ + "TEST_TMPDIR=\$$d ./$$TEST_BINARY --gtest_filter=$$TEST_NAME" \ + > $$TEST_SCRIPT; \ + chmod a=rx $$TEST_SCRIPT; \ + done + +gen_parallel_tests: + $(AM_V_at)mkdir -p t + $(AM_V_at)rm -f t/run-* + $(MAKE) $(parallel_tests) + +# Reorder input lines (which are one per test) so that the +# longest-running tests appear first in the output. +# Do this by prefixing each selected name with its duration, +# sort the resulting names, and remove the leading numbers. +# FIXME: the "100" we prepend is a fake time, for now. +# FIXME: squirrel away timings from each run and use them +# (when present) on subsequent runs to order these tests. +# +# Without this reordering, these two tests would happen to start only +# after almost all other tests had completed, thus adding 100 seconds +# to the duration of parallel "make check". That's the difference +# between 4 minutes (old) and 2m20s (new). +# +# 152.120 PASS t/DBTest.FileCreationRandomFailure +# 107.816 PASS t/DBTest.EncodeDecompressedBlockSizeTest +# +slow_test_regexp = \ + ^t/run-table_test-HarnessTest.Randomized$$|^t/run-db_test-.*(?:FileCreationRandomFailure|EncodeDecompressedBlockSizeTest)$$ +prioritize_long_running_tests = \ + perl -pe 's,($(slow_test_regexp)),100 $$1,' \ + | sort -k1,1gr \ + | sed 's/^[.0-9]* //' + +# "make check" uses +# Run with "make J=1 check" to disable parallelism in "make check". +# Run with "make J=200% check" to run two parallel jobs per core. +# The default is to run one job per core (J=100%). +# See "man parallel" for its "-j ..." option. +J ?= 100% + +# Use this regexp to select the subset of tests whose names match. +tests-regexp = . + +t_run = $(wildcard t/run-*) +.PHONY: check_0 +check_0: + $(AM_V_GEN)export TEST_TMPDIR=$(TMPD); \ + printf '%s\n' '' \ + 'To monitor subtest ,' \ + ' run "make watch-log" in a separate window' ''; \ + test -t 1 && eta=--eta || eta=; \ + { \ + printf './%s\n' $(filter-out $(PARALLEL_TEST),$(TESTS)); \ + printf '%s\n' $(t_run); \ + } \ + | $(prioritize_long_running_tests) \ + | grep -E '$(tests-regexp)' \ + | build_tools/gnu_parallel -j$(J) --joblog=LOG $$eta --gnu '{} >& t/log-{/}' + +.PHONY: valgrind_check_0 +valgrind_check_0: + $(AM_V_GEN)export TEST_TMPDIR=$(TMPD); \ + printf '%s\n' '' \ + 'To monitor subtest ,' \ + ' run "make watch-log" in a separate window' ''; \ + test -t 1 && eta=--eta || eta=; \ + { \ + printf './%s\n' $(filter-out $(PARALLEL_TEST) %skiplist_test options_settable_test, $(TESTS)); \ + printf '%s\n' $(t_run); \ + } \ + | $(prioritize_long_running_tests) \ + | grep -E '$(tests-regexp)' \ + | build_tools/gnu_parallel -j$(J) --joblog=LOG $$eta --gnu \ + 'if [[ "{}" == "./"* ]] ; then $(DRIVER) {} >& t/valgrind_log-{/}; ' \ + 'else {} >& t/valgrind_log-{/}; fi' + +CLEAN_FILES += t LOG $(TMPD) + +# When running parallel "make check", you can monitor its progress +# from another window. +# Run "make watch_LOG" to show the duration,PASS/FAIL,name of parallel +# tests as they are being run. We sort them so that longer-running ones +# appear at the top of the list and any failing tests remain at the top +# regardless of their duration. As with any use of "watch", hit ^C to +# interrupt. +watch-log: + watch --interval=0 'sort -k7,7nr -k4,4gr LOG|$(quoted_perl_command)' + +# If J != 1 and GNU parallel is installed, run the tests in parallel, +# via the check_0 rule above. Otherwise, run them sequentially. +check: all + $(MAKE) gen_parallel_tests + $(AM_V_GEN)if test "$(J)" != 1 \ + && (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \ + grep -q 'GNU Parallel'; \ + then \ + $(MAKE) T="$$t" TMPD=$(TMPD) check_0; \ + else \ + for t in $(TESTS); do \ + echo "===== Running $$t"; ./$$t || exit 1; done; \ + fi + rm -rf $(TMPD) +ifeq ($(filter -DROCKSDB_LITE,$(OPT)),) + python tools/ldb_test.py + sh tools/rocksdb_dump_test.sh +endif + +check_some: $(SUBSET) ldb_tests + for t in $(SUBSET); do echo "===== Running $$t"; ./$$t || exit 1; done + +.PHONY: ldb_tests +ldb_tests: ldb + python tools/ldb_test.py + +crash_test: whitebox_crash_test blackbox_crash_test + +blackbox_crash_test: db_stress + python -u tools/db_crashtest.py --simple blackbox + python -u tools/db_crashtest.py blackbox + +ifeq ($(CRASH_TEST_KILL_ODD),) + CRASH_TEST_KILL_ODD=888887 +endif + +whitebox_crash_test: db_stress + python -u tools/db_crashtest.py --simple whitebox --random_kill_odd \ + $(CRASH_TEST_KILL_ODD) + python -u tools/db_crashtest.py whitebox --random_kill_odd \ + $(CRASH_TEST_KILL_ODD) + +asan_check: + $(MAKE) clean + COMPILE_WITH_ASAN=1 $(MAKE) check -j32 + $(MAKE) clean + +asan_crash_test: + $(MAKE) clean + COMPILE_WITH_ASAN=1 $(MAKE) crash_test + $(MAKE) clean + +ubsan_check: + $(MAKE) clean + COMPILE_WITH_UBSAN=1 $(MAKE) check -j32 + $(MAKE) clean + +ubsan_crash_test: + $(MAKE) clean + COMPILE_WITH_UBSAN=1 $(MAKE) crash_test + $(MAKE) clean + +valgrind_check: $(TESTS) + $(MAKE) gen_parallel_tests + $(AM_V_GEN)if test "$(J)" != 1 \ + && (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \ + grep -q 'GNU Parallel'; \ + then \ + $(MAKE) TMPD=$(TMPD) \ + DRIVER="$(VALGRIND_VER) $(VALGRIND_OPTS)" valgrind_check_0; \ + else \ + for t in $(filter-out %skiplist_test options_settable_test,$(TESTS)); do \ + $(VALGRIND_VER) $(VALGRIND_OPTS) ./$$t; \ + ret_code=$$?; \ + if [ $$ret_code -ne 0 ]; then \ + exit $$ret_code; \ + fi; \ + done; \ + fi + + +ifneq ($(PAR_TEST),) +parloop: + ret_bad=0; \ + for t in $(PAR_TEST); do \ + echo "===== Running $$t in parallel $(NUM_PAR)";\ + if [ $(db_test) -eq 1 ]; then \ + seq $(J) | v="$$t" build_tools/gnu_parallel --gnu 's=$(TMPD)/rdb-{}; export TEST_TMPDIR=$$s;' \ + 'timeout 2m ./db_test --gtest_filter=$$v >> $$s/log-{} 2>1'; \ + else\ + seq $(J) | v="./$$t" build_tools/gnu_parallel --gnu 's=$(TMPD)/rdb-{};' \ + 'export TEST_TMPDIR=$$s; timeout 10m $$v >> $$s/log-{} 2>1'; \ + fi; \ + ret_code=$$?; \ + if [ $$ret_code -ne 0 ]; then \ + ret_bad=$$ret_code; \ + echo $$t exited with $$ret_code; \ + fi; \ + done; \ + exit $$ret_bad; +endif + +test_names = \ + ./db_test --gtest_list_tests \ + | perl -n \ + -e 's/ *\#.*//;' \ + -e '/^(\s*)(\S+)/; !$$1 and do {$$p=$$2; break};' \ + -e 'print qq! $$p$$2!' + +parallel_check: $(TESTS) + $(AM_V_GEN)if test "$(J)" > 1 \ + && (build_tools/gnu_parallel --gnu --help 2>/dev/null) | \ + grep -q 'GNU Parallel'; \ + then \ + echo Running in parallel $(J); \ + else \ + echo "Need to have GNU Parallel and J > 1"; exit 1; \ + fi; \ + ret_bad=0; \ + echo $(J);\ + echo Test Dir: $(TMPD); \ + seq $(J) | build_tools/gnu_parallel --gnu 's=$(TMPD)/rdb-{}; rm -rf $$s; mkdir $$s'; \ + $(MAKE) PAR_TEST="$(shell $(test_names))" TMPD=$(TMPD) \ + J=$(J) db_test=1 parloop; \ + $(MAKE) PAR_TEST="$(filter-out db_test, $(TESTS))" \ + TMPD=$(TMPD) J=$(J) db_test=0 parloop; + +analyze: clean + $(CLANG_SCAN_BUILD) --use-analyzer=$(CLANG_ANALYZER) \ + --use-c++=$(CXX) --use-cc=$(CC) --status-bugs \ + -o $(CURDIR)/scan_build_report \ + $(MAKE) dbg + +CLEAN_FILES += unity.cc +unity.cc: Makefile + rm -f $@ $@-t + for source_file in $(LIB_SOURCES); do \ + echo "#include \"$$source_file\"" >> $@-t; \ + done + chmod a=r $@-t + mv $@-t $@ + +unity.a: unity.o + $(AM_V_AR)rm -f $@ + $(AM_V_at)$(AR) $(ARFLAGS) $@ unity.o + +# try compiling db_test with unity +unity_test: db/db_test.o db/db_test_util.o $(TESTHARNESS) unity.a + $(AM_LINK) + ./unity_test + +rocksdb.h rocksdb.cc: build_tools/amalgamate.py Makefile $(LIB_SOURCES) unity.cc + build_tools/amalgamate.py -I. -i./include unity.cc -x include/rocksdb/c.h -H rocksdb.h -o rocksdb.cc + +clean: + rm -f $(BENCHMARKS) $(TOOLS) $(TESTS) $(LIBRARY) $(SHARED) + rm -rf $(CLEAN_FILES) ios-x86 ios-arm scan_build_report + find . -name "*.[oda]" -exec rm -f {} \; + find . -type f -regex ".*\.\(\(gcda\)\|\(gcno\)\)" -exec rm {} \; + rm -rf bzip2* snappy* zlib* lz4* + +tags: + ctags * -R + cscope -b `find . -name '*.cc'` `find . -name '*.h'` `find . -name '*.c'` + +format: + build_tools/format-diff.sh + +package: + bash build_tools/make_package.sh $(SHARED_MAJOR).$(SHARED_MINOR) + +# --------------------------------------------------------------------------- +# Unit tests and tools +# --------------------------------------------------------------------------- +$(LIBRARY): $(LIBOBJECTS) + $(AM_V_AR)rm -f $@ + $(AM_V_at)$(AR) $(ARFLAGS) $@ $(LIBOBJECTS) + +$(TOOLS_LIBRARY): $(BENCH_LIB_SOURCES:.cc=.o) $(TOOL_LIB_SOURCES:.cc=.o) $(LIB_SOURCES:.cc=.o) $(TESTUTIL) + $(AM_V_AR)rm -f $@ + $(AM_V_at)$(AR) $(ARFLAGS) $@ $^ + +librocksdb_env_basic_test.a: util/env_basic_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_V_AR)rm -f $@ + $(AM_V_at)$(AR) $(ARFLAGS) $@ $^ + +db_bench: tools/db_bench.o $(BENCHTOOLOBJECTS) + $(AM_LINK) + +cache_bench: util/cache_bench.o $(LIBOBJECTS) $(TESTUTIL) + $(AM_LINK) + +memtablerep_bench: db/memtablerep_bench.o $(LIBOBJECTS) $(TESTUTIL) + $(AM_LINK) + +db_stress: tools/db_stress.o $(LIBOBJECTS) $(TESTUTIL) + $(AM_LINK) + +write_stress: tools/write_stress.o $(LIBOBJECTS) $(TESTUTIL) + $(AM_LINK) + +db_sanity_test: tools/db_sanity_test.o $(LIBOBJECTS) $(TESTUTIL) + $(AM_LINK) + +db_repl_stress: tools/db_repl_stress.o $(LIBOBJECTS) $(TESTUTIL) + $(AM_LINK) + +arena_test: util/arena_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +autovector_test: util/autovector_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +column_family_test: db/column_family_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +table_properties_collector_test: db/table_properties_collector_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +bloom_test: util/bloom_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +dynamic_bloom_test: util/dynamic_bloom_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +c_test: db/c_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +cache_test: util/cache_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +coding_test: util/coding_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +option_change_migration_test: utilities/option_change_migration/option_change_migration_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +stringappend_test: utilities/merge_operators/string_append/stringappend_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +redis_test: utilities/redis/redis_lists_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +hash_table_test: utilities/persistent_cache/hash_table_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +histogram_test: util/histogram_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +thread_local_test: util/thread_local_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +corruption_test: db/corruption_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +crc32c_test: util/crc32c_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +slice_transform_test: util/slice_transform_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_test: db/db_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_test2: db/db_test2.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_block_cache_test: db/db_block_cache_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_bloom_filter_test: db/db_bloom_filter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_log_iter_test: db/db_log_iter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_compaction_filter_test: db/db_compaction_filter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_compaction_test: db/db_compaction_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_dynamic_level_test: db/db_dynamic_level_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_flush_test: db/db_flush_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_inplace_update_test: db/db_inplace_update_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_iterator_test: db/db_iterator_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_options_test: db/db_options_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_sst_test: db/db_sst_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_tailing_iter_test: db/db_tailing_iter_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_iter_test: db/db_iter_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_universal_compaction_test: db/db_universal_compaction_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_wal_test: db/db_wal_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_io_failure_test: db/db_io_failure_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_properties_test: db/db_properties_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_table_properties_test: db/db_table_properties_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +log_write_bench: util/log_write_bench.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) $(PROFILING_FLAGS) + +plain_table_db_test: db/plain_table_db_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +comparator_db_test: db/comparator_db_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +table_reader_bench: table/table_reader_bench.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) $(PROFILING_FLAGS) + +perf_context_test: db/perf_context_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) + +prefix_test: db/prefix_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) + +backupable_db_test: utilities/backupable/backupable_db_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +checkpoint_test: utilities/checkpoint/checkpoint_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +document_db_test: utilities/document/document_db_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +json_document_test: utilities/document/json_document_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +sim_cache_test: utilities/simulator_cache/sim_cache_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +spatial_db_test: utilities/spatialdb/spatial_db_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +env_mirror_test: utilities/env_mirror_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +ifdef ROCKSDB_USE_LIBRADOS +env_librados_test: utilities/env_librados_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) $(COVERAGEFLAGS) +endif + +env_registry_test: utilities/env_registry_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +ttl_test: utilities/ttl/ttl_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +write_batch_with_index_test: utilities/write_batch_with_index/write_batch_with_index_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +flush_job_test: db/flush_job_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +compaction_iterator_test: db/compaction_iterator_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +compaction_job_test: db/compaction_job_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +compaction_job_stats_test: db/compaction_job_stats_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +compact_on_deletion_collector_test: utilities/table_properties_collectors/compact_on_deletion_collector_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +wal_manager_test: db/wal_manager_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +dbformat_test: db/dbformat_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +env_basic_test: util/env_basic_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +env_test: util/env_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +fault_injection_test: db/fault_injection_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +rate_limiter_test: util/rate_limiter_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +delete_scheduler_test: util/delete_scheduler_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +filename_test: db/filename_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +file_reader_writer_test: util/file_reader_writer_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +block_based_filter_block_test: table/block_based_filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +full_filter_block_test: table/full_filter_block_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +log_test: db/log_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +table_test: table/table_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +block_test: table/block_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +inlineskiplist_test: db/inlineskiplist_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +skiplist_test: db/skiplist_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +version_edit_test: db/version_edit_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +version_set_test: db/version_set_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +compaction_picker_test: db/compaction_picker_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +version_builder_test: db/version_builder_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +file_indexer_test: db/file_indexer_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +reduce_levels_test: tools/reduce_levels_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +write_batch_test: db/write_batch_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +write_controller_test: db/write_controller_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +merge_helper_test: db/merge_helper_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +memory_test: utilities/memory/memory_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +merge_test: db/merge_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +merger_test: table/merger_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +util_merge_operators_test: utilities/util_merge_operators_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +options_file_test: db/options_file_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +deletefile_test: db/deletefile_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +geodb_test: utilities/geodb/geodb_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +rocksdb_dump: tools/dump/rocksdb_dump.o $(LIBOBJECTS) + $(AM_LINK) + +rocksdb_undump: tools/dump/rocksdb_undump.o $(LIBOBJECTS) + $(AM_LINK) + +cuckoo_table_builder_test: table/cuckoo_table_builder_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +cuckoo_table_reader_test: table/cuckoo_table_reader_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +cuckoo_table_db_test: db/cuckoo_table_db_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +listener_test: db/listener_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +thread_list_test: util/thread_list_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +compact_files_test: db/compact_files_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +options_test: util/options_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +options_settable_test: util/options_settable_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +options_util_test: utilities/options/options_util_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +db_bench_tool_test: tools/db_bench_tool_test.o $(BENCHTOOLOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +event_logger_test: util/event_logger_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +sst_dump_test: tools/sst_dump_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +optimistic_transaction_test: utilities/transactions/optimistic_transaction_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +mock_env_test : util/mock_env_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +manual_compaction_test: db/manual_compaction_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +filelock_test: util/filelock_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +auto_roll_logger_test: db/auto_roll_logger_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +memtable_list_test: db/memtable_list_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +write_callback_test: db/write_callback_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +heap_test: util/heap_test.o $(GTEST) + $(AM_LINK) + +transaction_test: utilities/transactions/transaction_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +sst_dump: tools/sst_dump.o $(LIBOBJECTS) + $(AM_LINK) + +repair_test: db/repair_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +ldb_cmd_test: tools/ldb_cmd_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +ldb: tools/ldb.o $(LIBOBJECTS) + $(AM_LINK) + +iostats_context_test: util/iostats_context_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_V_CCLD)$(CXX) $^ $(EXEC_LDFLAGS) -o $@ $(LDFLAGS) + +persistent_cache_test: utilities/persistent_cache/persistent_cache_test.o db/db_test_util.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +statistics_test: util/statistics_test.o $(LIBOBJECTS) $(TESTHARNESS) + $(AM_LINK) + +#------------------------------------------------- +# make install related stuff +INSTALL_PATH ?= /usr/local + +uninstall: + rm -rf $(INSTALL_PATH)/include/rocksdb \ + $(INSTALL_PATH)/lib/$(LIBRARY) \ + $(INSTALL_PATH)/lib/$(SHARED4) \ + $(INSTALL_PATH)/lib/$(SHARED3) \ + $(INSTALL_PATH)/lib/$(SHARED2) \ + $(INSTALL_PATH)/lib/$(SHARED1) + +install-headers: + install -d $(INSTALL_PATH)/lib + for header_dir in `find "include/rocksdb" -type d`; do \ + install -d $(INSTALL_PATH)/$$header_dir; \ + done + for header in `find "include/rocksdb" -type f -name *.h`; do \ + install -C -m 644 $$header $(INSTALL_PATH)/$$header; \ + done + +install-static: install-headers $(LIBRARY) + install -C -m 755 $(LIBRARY) $(INSTALL_PATH)/lib + +install-shared: install-headers $(SHARED4) + install -C -m 755 $(SHARED4) $(INSTALL_PATH)/lib && \ + ln -fs $(SHARED4) $(INSTALL_PATH)/lib/$(SHARED3) && \ + ln -fs $(SHARED4) $(INSTALL_PATH)/lib/$(SHARED2) && \ + ln -fs $(SHARED4) $(INSTALL_PATH)/lib/$(SHARED1) + +# install static by default + install shared if it exists +install: install-static + [ -e $(SHARED4) ] && $(MAKE) install-shared || : + +#------------------------------------------------- + + +# --------------------------------------------------------------------------- +# Jni stuff +# --------------------------------------------------------------------------- + +JAVA_INCLUDE = -I$(JAVA_HOME)/include/ -I$(JAVA_HOME)/include/linux +ifeq ($(PLATFORM), OS_SOLARIS) + ARCH := $(shell isainfo -b) +else + ARCH := $(shell getconf LONG_BIT) +endif +ROCKSDBJNILIB = librocksdbjni-linux$(ARCH).so +ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux$(ARCH).jar +ROCKSDB_JAR_ALL = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH).jar +ROCKSDB_JAVADOCS_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-javadoc.jar +ROCKSDB_SOURCES_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-sources.jar + +ifeq ($(PLATFORM), OS_MACOSX) + ROCKSDBJNILIB = librocksdbjni-osx.jnilib + ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar +ifneq ("$(wildcard $(JAVA_HOME)/include/darwin)","") + JAVA_INCLUDE = -I$(JAVA_HOME)/include -I $(JAVA_HOME)/include/darwin +else + JAVA_INCLUDE = -I/System/Library/Frameworks/JavaVM.framework/Headers/ +endif +endif +ifeq ($(PLATFORM), OS_SOLARIS) + ROCKSDBJNILIB = librocksdbjni-solaris$(ARCH).so + ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-solaris$(ARCH).jar + JAVA_INCLUDE = -I$(JAVA_HOME)/include/ -I$(JAVA_HOME)/include/solaris +endif + +libz.a: + -rm -rf zlib-1.2.8 + curl -O http://zlib.net/zlib-1.2.8.tar.gz + tar xvzf zlib-1.2.8.tar.gz + cd zlib-1.2.8 && CFLAGS='-fPIC' ./configure --static && make + cp zlib-1.2.8/libz.a . + +libbz2.a: + -rm -rf bzip2-1.0.6 + curl -O http://www.bzip.org/1.0.6/bzip2-1.0.6.tar.gz + tar xvzf bzip2-1.0.6.tar.gz + cd bzip2-1.0.6 && make CFLAGS='-fPIC -O2 -g -D_FILE_OFFSET_BITS=64' + cp bzip2-1.0.6/libbz2.a . + +libsnappy.a: + -rm -rf snappy-1.1.1 + curl -O https://snappy.googlecode.com/files/snappy-1.1.1.tar.gz + tar xvzf snappy-1.1.1.tar.gz + cd snappy-1.1.1 && ./configure --with-pic --enable-static + cd snappy-1.1.1 && make + cp snappy-1.1.1/.libs/libsnappy.a . + +liblz4.a: + -rm -rf lz4-r127 + curl -O https://codeload.github.com/Cyan4973/lz4/tar.gz/r127 + mv r127 lz4-r127.tar.gz + tar xvzf lz4-r127.tar.gz + cd lz4-r127/lib && make CFLAGS='-fPIC' all + cp lz4-r127/lib/liblz4.a . + +# A version of each $(LIBOBJECTS) compiled with -fPIC and a fixed set of static compression libraries +java_static_libobjects = $(patsubst %,jls/%,$(LIBOBJECTS)) +CLEAN_FILES += jls + +JAVA_STATIC_FLAGS = -DZLIB -DBZIP2 -DSNAPPY -DLZ4 +JAVA_STATIC_INCLUDES = -I./zlib-1.2.8 -I./bzip2-1.0.6 -I./snappy-1.1.1 -I./lz4-r127/lib + +$(java_static_libobjects): jls/%.o: %.cc libz.a libbz2.a libsnappy.a liblz4.a + $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) $(JAVA_STATIC_FLAGS) $(JAVA_STATIC_INCLUDES) -fPIC -c $< -o $@ $(COVERAGEFLAGS) + +rocksdbjavastatic: $(java_static_libobjects) + cd java;$(MAKE) javalib; + rm -f ./java/target/$(ROCKSDBJNILIB) + $(CXX) $(CXXFLAGS) -I./java/. $(JAVA_INCLUDE) -shared -fPIC \ + -o ./java/target/$(ROCKSDBJNILIB) $(JNI_NATIVE_SOURCES) \ + $(java_static_libobjects) $(COVERAGEFLAGS) \ + libz.a libbz2.a libsnappy.a liblz4.a $(JAVA_STATIC_LDFLAGS) + cd java/target;strip -S -x $(ROCKSDBJNILIB) + cd java;jar -cf target/$(ROCKSDB_JAR) HISTORY*.md + cd java/target;jar -uf $(ROCKSDB_JAR) $(ROCKSDBJNILIB) + cd java/target/classes;jar -uf ../$(ROCKSDB_JAR) org/rocksdb/*.class org/rocksdb/util/*.class + cd java/target/apidocs;jar -cf ../$(ROCKSDB_JAVADOCS_JAR) * + cd java/src/main/java;jar -cf ../../../target/$(ROCKSDB_SOURCES_JAR) org + +rocksdbjavastaticrelease: rocksdbjavastatic + cd java/crossbuild && vagrant destroy -f && vagrant up linux32 && vagrant halt linux32 && vagrant up linux64 && vagrant halt linux64 + cd java;jar -cf target/$(ROCKSDB_JAR_ALL) HISTORY*.md + cd java/target;jar -uf $(ROCKSDB_JAR_ALL) librocksdbjni-*.so librocksdbjni-*.jnilib + cd java/target/classes;jar -uf ../$(ROCKSDB_JAR_ALL) org/rocksdb/*.class org/rocksdb/util/*.class + +rocksdbjavastaticpublish: rocksdbjavastaticrelease + mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-javadoc.jar -Dclassifier=javadoc + mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-sources.jar -Dclassifier=sources + mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux64.jar -Dclassifier=linux64 + mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux32.jar -Dclassifier=linux32 + mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar -Dclassifier=osx + mvn gpg:sign-and-deploy-file -Durl=https://oss.sonatype.org/service/local/staging/deploy/maven2/ -DrepositoryId=sonatype-nexus-staging -DpomFile=java/rocksjni.pom -Dfile=java/target/rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH).jar + +# A version of each $(LIBOBJECTS) compiled with -fPIC +java_libobjects = $(patsubst %,jl/%,$(LIBOBJECTS)) +CLEAN_FILES += jl + +$(java_libobjects): jl/%.o: %.cc + $(AM_V_CC)mkdir -p $(@D) && $(CXX) $(CXXFLAGS) -fPIC -c $< -o $@ $(COVERAGEFLAGS) + +rocksdbjava: $(java_libobjects) + $(AM_V_GEN)cd java;$(MAKE) javalib; + $(AM_V_at)rm -f ./java/target/$(ROCKSDBJNILIB) + $(AM_V_at)$(CXX) $(CXXFLAGS) -I./java/. $(JAVA_INCLUDE) -shared -fPIC -o ./java/target/$(ROCKSDBJNILIB) $(JNI_NATIVE_SOURCES) $(java_libobjects) $(JAVA_LDFLAGS) $(COVERAGEFLAGS) + $(AM_V_at)cd java;jar -cf target/$(ROCKSDB_JAR) HISTORY*.md + $(AM_V_at)cd java/target;jar -uf $(ROCKSDB_JAR) $(ROCKSDBJNILIB) + $(AM_V_at)cd java/target/classes;jar -uf ../$(ROCKSDB_JAR) org/rocksdb/*.class org/rocksdb/util/*.class + +jclean: + cd java;$(MAKE) clean; + +jtest_compile: rocksdbjava + cd java;$(MAKE) java_test + +jtest_run: + cd java;$(MAKE) run_test + +jtest: rocksdbjava + cd java;$(MAKE) sample;$(MAKE) test; + +jdb_bench: + cd java;$(MAKE) db_bench; + +commit_prereq: build_tools/rocksdb-lego-determinator \ + build_tools/precommit_checker.py + J=$(J) build_tools/precommit_checker.py unit unit_481 clang_unit release release_481 clang_release tsan asan ubsan lite unit_non_shm + $(MAKE) clean && $(MAKE) jclean && $(MAKE) rocksdbjava; + +xfunc: + for xftest in $(XFUNC_TESTS); do \ + echo "===== Running xftest $$xftest"; \ + make check ROCKSDB_XFUNC_TEST="$$xftest" tests-regexp="DBTest" ;\ + done + + +# --------------------------------------------------------------------------- +# Platform-specific compilation +# --------------------------------------------------------------------------- + +ifeq ($(PLATFORM), IOS) +# For iOS, create universal object files to be used on both the simulator and +# a device. +PLATFORMSROOT=/Applications/Xcode.app/Contents/Developer/Platforms +SIMULATORROOT=$(PLATFORMSROOT)/iPhoneSimulator.platform/Developer +DEVICEROOT=$(PLATFORMSROOT)/iPhoneOS.platform/Developer +IOSVERSION=$(shell defaults read $(PLATFORMSROOT)/iPhoneOS.platform/version CFBundleShortVersionString) + +.cc.o: + mkdir -p ios-x86/$(dir $@) + $(CXX) $(CXXFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@ + mkdir -p ios-arm/$(dir $@) + xcrun -sdk iphoneos $(CXX) $(CXXFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk -arch armv6 -arch armv7 -arch armv7s -arch arm64 -c $< -o ios-arm/$@ + lipo ios-x86/$@ ios-arm/$@ -create -output $@ + +.c.o: + mkdir -p ios-x86/$(dir $@) + $(CC) $(CFLAGS) -isysroot $(SIMULATORROOT)/SDKs/iPhoneSimulator$(IOSVERSION).sdk -arch i686 -arch x86_64 -c $< -o ios-x86/$@ + mkdir -p ios-arm/$(dir $@) + xcrun -sdk iphoneos $(CC) $(CFLAGS) -isysroot $(DEVICEROOT)/SDKs/iPhoneOS$(IOSVERSION).sdk -arch armv6 -arch armv7 -arch armv7s -arch arm64 -c $< -o ios-arm/$@ + lipo ios-x86/$@ ios-arm/$@ -create -output $@ + +else +.cc.o: + $(AM_V_CC)$(CXX) $(CXXFLAGS) -c $< -o $@ $(COVERAGEFLAGS) + +.c.o: + $(AM_V_CC)$(CC) $(CFLAGS) -c $< -o $@ +endif + +# --------------------------------------------------------------------------- +# Source files dependencies detection +# --------------------------------------------------------------------------- + +all_sources = $(LIB_SOURCES) $(MAIN_SOURCES) $(MOCK_LIB_SOURCES) $(TOOL_LIB_SOURCES) $(BENCH_LIB_SOURCES) $(TEST_LIB_SOURCES) +DEPFILES = $(all_sources:.cc=.d) + +# Add proper dependency support so changing a .h file forces a .cc file to +# rebuild. + +# The .d file indicates .cc file's dependencies on .h files. We generate such +# dependency by g++'s -MM option, whose output is a make dependency rule. +$(DEPFILES): %.d: %.cc + @$(CXX) $(CXXFLAGS) $(PLATFORM_SHARED_CFLAGS) \ + -MM -MT'$@' -MT'$(<:.cc=.o)' "$<" -o '$@' + +depend: $(DEPFILES) + +# if the make goal is either "clean" or "format", we shouldn't +# try to import the *.d files. +# TODO(kailiu) The unfamiliarity of Make's conditions leads to the ugly +# working solution. +ifneq ($(MAKECMDGOALS),clean) +ifneq ($(MAKECMDGOALS),format) +ifneq ($(MAKECMDGOALS),jclean) +ifneq ($(MAKECMDGOALS),jtest) +ifneq ($(MAKECMDGOALS),package) +ifneq ($(MAKECMDGOALS),analyze) +-include $(DEPFILES) +endif +endif +endif +endif +endif +endif diff --git a/external/rocksdb/PATENTS b/external/rocksdb/PATENTS new file mode 100755 index 0000000000..65332e3a4e --- /dev/null +++ b/external/rocksdb/PATENTS @@ -0,0 +1,33 @@ +Additional Grant of Patent Rights Version 2 + +"Software" means the RocksDB software distributed by Facebook, Inc. + +Facebook, Inc. ("Facebook") hereby grants to each recipient of the Software +("you") a perpetual, worldwide, royalty-free, non-exclusive, irrevocable +(subject to the termination provision below) license under any Necessary +Claims, to make, have made, use, sell, offer to sell, import, and otherwise +transfer the Software. For avoidance of doubt, no license is granted under +Facebook’s rights in any patent claims that are infringed by (i) modifications +to the Software made by you or any third party or (ii) the Software in +combination with any software or other technology. + +The license granted hereunder will terminate, automatically and without notice, +if you (or any of your subsidiaries, corporate affiliates or agents) initiate +directly or indirectly, or take a direct financial interest in, any Patent +Assertion: (i) against Facebook or any of its subsidiaries or corporate +affiliates, (ii) against any party if such Patent Assertion arises in whole or +in part from any software, technology, product or service of Facebook or any of +its subsidiaries or corporate affiliates, or (iii) against any party relating +to the Software. Notwithstanding the foregoing, if Facebook or any of its +subsidiaries or corporate affiliates files a lawsuit alleging patent +infringement against you in the first instance, and you respond by filing a +patent infringement counterclaim in that lawsuit against that party that is +unrelated to the Software, the license granted hereunder will not terminate +under section (i) of this paragraph due to such counterclaim. + +A "Necessary Claim" is a claim of a patent owned by Facebook that is +necessarily infringed by the Software standing alone. + +A "Patent Assertion" is any lawsuit or other action alleging direct, indirect, +or contributory infringement or inducement to infringe any patent, including a +cross-claim or counterclaim. diff --git a/external/rocksdb/README.md b/external/rocksdb/README.md new file mode 100755 index 0000000000..550c352b88 --- /dev/null +++ b/external/rocksdb/README.md @@ -0,0 +1,27 @@ +## RocksDB: A Persistent Key-Value Store for Flash and RAM Storage + +[![Build Status](https://travis-ci.org/facebook/rocksdb.svg?branch=master)](https://travis-ci.org/facebook/rocksdb) +[![Build status](https://ci.appveyor.com/api/projects/status/fbgfu0so3afcno78/branch/master?svg=true)](https://ci.appveyor.com/project/Facebook/rocksdb/branch/master) + + +RocksDB is developed and maintained by Facebook Database Engineering Team. +It is built on earlier work on LevelDB by Sanjay Ghemawat (sanjay@google.com) +and Jeff Dean (jeff@google.com) + +This code is a library that forms the core building block for a fast +key value server, especially suited for storing data on flash drives. +It has a Log-Structured-Merge-Database (LSM) design with flexible tradeoffs +between Write-Amplification-Factor (WAF), Read-Amplification-Factor (RAF) +and Space-Amplification-Factor (SAF). It has multi-threaded compactions, +making it specially suitable for storing multiple terabytes of data in a +single database. + +Start with example usage here: https://github.com/facebook/rocksdb/tree/master/examples + +See the [github wiki](https://github.com/facebook/rocksdb/wiki) for more explanation. + +The public interface is in `include/`. Callers should not include or +rely on the details of any other header files in this package. Those +internal APIs may be changed without warning. + +Design discussions are conducted in https://www.facebook.com/groups/rocksdb.dev/ diff --git a/external/rocksdb/ROCKSDB_LITE.md b/external/rocksdb/ROCKSDB_LITE.md new file mode 100755 index 0000000000..41cfbecc2c --- /dev/null +++ b/external/rocksdb/ROCKSDB_LITE.md @@ -0,0 +1,21 @@ +# RocksDBLite + +RocksDBLite is a project focused on mobile use cases, which don't need a lot of fancy things we've built for server workloads and they are very sensitive to binary size. For that reason, we added a compile flag ROCKSDB_LITE that comments out a lot of the nonessential code and keeps the binary lean. + +Some examples of the features disabled by ROCKSDB_LITE: +* compiled-in support for LDB tool +* No backupable DB +* No support for replication (which we provide in form of TrasactionalIterator) +* No advanced monitoring tools +* No special-purpose memtables that are highly optimized for specific use cases +* No Transactions + +When adding a new big feature to RocksDB, please add ROCKSDB_LITE compile guard if: +* Nobody from mobile really needs your feature, +* Your feature is adding a lot of weight to the binary. + +Don't add ROCKSDB_LITE compile guard if: +* It would introduce a lot of code complexity. Compile guards make code harder to read. It's a trade-off. +* Your feature is not adding a lot of weight. + +If unsure, ask. :) diff --git a/external/rocksdb/USERS.md b/external/rocksdb/USERS.md new file mode 100755 index 0000000000..890f3399b4 --- /dev/null +++ b/external/rocksdb/USERS.md @@ -0,0 +1,73 @@ +This document lists users of RocksDB and their use cases. If you are using RocksDB, please open a pull request and add yourself to the list. + +## Facebook +At Facebook, we use RocksDB as storage engines in multiple data management services and a backend for many different stateful services, including: + +1. MyRocks -- https://github.com/MySQLOnRocksDB/mysql-5.6 +2. MongoRocks -- https://github.com/mongodb-partners/mongo-rocks +3. ZippyDB -- Facebook's distributed key-value store with Paxos-style replication, built on top of RocksDB.[*] https://www.youtube.com/watch?v=DfiN7pG0D0khtt +4. Laser -- Laser is a high query throughput, low (millisecond) latency, key-value storage service built on top of RocksDB.[*] +4. Dragan -- a distributed graph query engine. https://code.facebook.com/posts/1737605303120405/dragon-a-distributed-graph-query-engine/ +5. Stylus -- a low-level stream processing framework writtenin C++.[*] + +[*] https://research.facebook.com/publications/realtime-data-processing-at-facebook/ + +## LinkedIn +Two different use cases at Linkedin are using RocksDB as a storage engine: + +1. LinkedIn's follow feed for storing user's activities. Check out the blog post: https://engineering.linkedin.com/blog/2016/03/followfeed--linkedin-s-feed-made-faster-and-smarter +2. Apache Samza, open source framework for stream processing + +Learn more about those use cases in a Tech Talk by Ankit Gupta and Naveen Somasundaram: http://www.youtube.com/watch?v=plqVp_OnSzg + +## Yahoo +Yahoo is using RocksDB as a storage engine for their biggest distributed data store Sherpa. Learn more about it here: http://yahooeng.tumblr.com/post/120730204806/sherpa-scales-new-heights + +## CockroachDB +CockroachDB is an open-source geo-replicated transactional database (still in development). They are using RocksDB as their storage engine. Check out their github: https://github.com/cockroachdb/cockroach + +## DNANexus +DNANexus is using RocksDB to speed up processing of genomics data. +You can learn more from this great blog post by Mike Lin: http://devblog.dnanexus.com/faster-bam-sorting-with-samtools-and-rocksdb/ + +## Iron.io +Iron.io is using RocksDB as a storage engine for their distributed queueing system. +Learn more from Tech Talk by Reed Allman: http://www.youtube.com/watch?v=HTjt6oj-RL4 + +## Tango Me +Tango is using RocksDB as a graph storage to store all users' connection data and other social activity data. + +## Turn +Turn is using RocksDB as a storage layer for their key/value store, serving at peak 2.4MM QPS out of different datacenters. +Check out our RocksDB Protobuf merge operator at: https://github.com/vladb38/rocksdb_protobuf + +## Santanader UK/Cloudera Profession Services +Check out their blog post: http://blog.cloudera.com/blog/2015/08/inside-santanders-near-real-time-data-ingest-architecture/ + +## Airbnb +Airbnb is using RocksDB as a storage engine for their personalized search service. You can learn more about it here: https://www.youtube.com/watch?v=ASQ6XMtogMs + +## Pinterest +Pinterest's Object Retrieval System uses RocksDB for storage: https://www.youtube.com/watch?v=MtFEVEs_2Vo + +## Smyte +[Smyte](https://www.smyte.com/) uses RocksDB as the storage layer for their core key-value storage, high-performance counters and time-windowed HyperLogLog services. + +## Rakuten Marketing +[Rakuten Marketing](https://marketing.rakuten.com/) uses RocksDB as the disk cache layer for the real-time bidding service in their Performance DSP. + +## VWO, Wingify +[VWO's](https://vwo.com/) Smart Code checker and URL helper uses RocksDB to store all the URLs where VWO's Smart Code is installed. + +## quasardb +[quasardb](https://www.quasardb.net) is a high-performance, distributed, transactional key-value database that integrates well with in-memory analytics engines such as Apache Spark. +quasardb uses a heavily tuned RocksDB as its persistence layer. + +## Netflix +[Netflix](http://techblog.netflix.com/2016/05/application-data-caching-using-ssds.html) uses RocksDB on spinning disks to cache application data. + +## TiKV +[TiKV](https://github.com/pingcap/tikv) is a GEO-replicated, high-performance, distributed, transactional key-value database. TiKV is powered by Rust and Raft. TiKV uses RocksDB as its persistence layer. + +## Apache Flink +[Apache Flink](https://flink.apache.org/news/2016/03/08/release-1.0.0.html) uses RocksDB to store state locally on a machine. diff --git a/external/rocksdb/Vagrantfile b/external/rocksdb/Vagrantfile new file mode 100755 index 0000000000..c517182e29 --- /dev/null +++ b/external/rocksdb/Vagrantfile @@ -0,0 +1,33 @@ +Vagrant.configure("2") do |config| + + config.vm.provider "virtualbox" do |v| + v.memory = 4096 + v.cpus = 2 + end + + config.vm.define "ubuntu14" do |box| + box.vm.box = "ubuntu/trusty64" + end + + config.vm.define "centos65" do |box| + box.vm.box = "chef/centos-6.5" + end + + config.vm.define "FreeBSD10" do |box| + box.vm.guest = :freebsd + box.vm.box = "robin/freebsd-10" + # FreeBSD does not support 'mount_virtualbox_shared_folder', use NFS + box.vm.synced_folder ".", "/vagrant", :nfs => true, id: "vagrant-root" + box.vm.network "private_network", ip: "10.0.1.10" + + # build everything after creating VM, skip using --no-provision + box.vm.provision "shell", inline: <<-SCRIPT + pkg install -y gmake clang35 + export CXX=/usr/local/bin/clang++35 + cd /vagrant + gmake clean + gmake all OPT=-g + SCRIPT + end + +end diff --git a/external/rocksdb/WINDOWS_PORT.md b/external/rocksdb/WINDOWS_PORT.md new file mode 100755 index 0000000000..a0fe1fe11f --- /dev/null +++ b/external/rocksdb/WINDOWS_PORT.md @@ -0,0 +1,228 @@ +# Microsoft Contribution Notes + +## Contributors +* Alexander Zinoviev https://github.com/zinoale +* Dmitri Smirnov https://github.com/yuslepukhin +* Praveen Rao https://github.com/PraveenSinghRao +* Sherlock Huang https://github.com/SherlockNoMad + +## Introduction +RocksDB is a well proven open source key-value persistent store, optimized for fast storage. It provides scalability with number of CPUs and storage IOPS, to support IO-bound, in-memory and write-once workloads, most importantly, to be flexible to allow for innovation. + +As Microsoft Bing team we have been continuously pushing hard to improve the scalability, efficiency of platform and eventually benefit Bing end-user satisfaction. We would like to explore the opportunity to embrace open source, RocksDB here, to use, enhance and customize for our usage, and also contribute back to the RocksDB community. Herein, we are pleased to offer this RocksDB port for Windows platform. + +These notes describe some decisions and changes we had to make with regards to porting RocksDB on Windows. We hope this will help both reviewers and users of the Windows port. +We are open for comments and improvements. + +## OS specifics +All of the porting, testing and benchmarking was done on Windows Server 2012 R2 Datacenter 64-bit but to the best of our knowledge there is not a specific API we used during porting that is unsupported on other Windows OS after Vista. + +## Porting goals +We strive to achieve the following goals: +* make use of the existing porting interface of RocksDB +* make minimum [WY2]modifications within platform independent code. +* make all unit test pass both in debug and release builds. + * Note: latest introduction of SyncPoint seems to disable running db_test in Release. +* make performance on par with published benchmarks accounting for HW differences +* we would like to keep the port code inline with the master branch with no forking + +## Build system +We have chosen CMake as a widely accepted build system to build the Windows port. It is very fast and convenient. + +At the same time it generates Visual Studio projects that are both usable from a command line and IDE. + +The top-level CMakeLists.txt file contains description of all targets and build rules. It also provides brief instructions on how to build the software for Windows. One more build related file is thirdparty.inc that also resides on the top level. This file must be edited to point to actual third party libraries location. +We think that it would be beneficial to merge the existing make-based build system and the new cmake-based build system into a single one to use on all platforms. + +All building and testing was done for 64-bit. We have not conducted any testing for 32-bit and early reports indicate that it will not run on 32-bit. + +## C++ and STL notes +We had to make some minimum changes within the portable files that either account for OS differences or the shortcomings of C++11 support in the current version of the MS compiler. Most or all of them are expected to be fixed in the upcoming compiler releases. + +We plan to use this port for our business purposes here at Bing and this provided business justification for this port. This also means, we do not have at present to choose the compiler version at will. + +* Certain headers that are not present and not necessary on Windows were simply `#ifndef OS_WIN` in a few places (`unistd.h`) +* All posix specific headers were replaced to port/port.h which worked well +* Replaced `dirent.h` for `port/dirent.h` (very few places) with the implementation of the relevant interfaces within `rocksdb::port` namespace +* Replaced `sys/time.h` to `port/sys_time.h` (few places) implemented equivalents within `rocksdb::port` +* `printf %z` specification is not supported on Windows. To imitate existing standards we came up with a string macro `ROCKSDB_PRIszt` which expands to `%z` on posix systems and to Iu on windows. +* in class member initialization were moved to a __ctors in some cases +* `constexpr` is not supported. We had to replace `std::numeric_limits<>::max/min()` to its C macros for constants. Sometimes we had to make class members `static const` and place a definition within a .cc file. +* `constexpr` for functions was replaced to a template specialization (1 place) +* Union members that have non-trivial constructors were replaced to `char[]` in one place along with bug fixes (spatial experimental feature) +* Zero-sized arrays are deemed a non-standard extension which we converted to 1 size array and that should work well for the purposes of these classes. +* `std::chrono` lacks nanoseconds support (fixed in the upcoming release of the STL) and we had to use `QueryPerfCounter()` within env_win.cc +* Function local statics initialization is still not safe. Used `std::once` to mitigate within WinEnv. + +## Windows Environments notes +We endeavored to make it functionally on par with posix_env. This means we replicated the functionality of the thread pool and other things as precise as possible, including: +* Replicate posix logic using std:thread primitives. +* Implement all posix_env disk access functionality. +* Set `use_os_buffer=false` to disable OS disk buffering for WinWritableFile and WinRandomAccessFile. +* Replace `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure. +* Use `SetFileInformationByHandle` to compensate absence of `fallocate`. + +### In detail +Even though Windows provides its own efficient thread-pool implementation we chose to replicate posix logic using `std::thread` primitives. This allows anyone to quickly detect any changes within the posix source code and replicate them within windows env. This has proven to work very well. At the same time for anyone who wishes to replace the built-in thread-pool can do so using RocksDB stackable environments. + +For disk access we implemented all of the functionality present within the posix_env which includes memory mapped files, random access, rate-limiter support etc. +The `use_os_buffer` flag on Posix platforms currently denotes disabling read-ahead log via `fadvise` mechanism. Windows does not have `fadvise` system call. What is more, it implements disk cache in a way that differs from Linux greatly. It’s not an uncommon practice on Windows to perform un-buffered disk access to gain control of the memory consumption. We think that in our use case this may also be a good configuration option at the expense of disk throughput. To compensate one may increase the configured in-memory cache size instead. Thus we have chosen `use_os_buffer=false` to disable OS disk buffering for `WinWritableFile` and `WinRandomAccessFile`. The OS imposes restrictions on the alignment of the disk offsets, buffers used and the amount of data that is read/written when accessing files in un-buffered mode. When the option is true, the classes behave in a standard way. This allows to perform writes and reads in cases when un-buffered access does not make sense such as WAL and MANIFEST. + +We have replaced `pread/pwrite` with `WriteFile/ReadFile` with `OVERLAPPED` structure so we can atomically seek to the position of the disk operation but still perform the operation synchronously. Thus we able to emulate that functionality of `pread/pwrite` reasonably well. The only difference is that the file pointer is not returned to its original position but that hardly matters given the random nature of access. + +We used `SetFileInformationByHandle` both to truncate files after writing a full final page to disk and to pre-allocate disk space for faster I/O thus compensating for the absence of `fallocate` although some differences remain. For example, the pre-allocated space is not filled with zeros like on Linux, however, on a positive note, the end of file position is also not modified after pre-allocation. + +RocksDB renames, copies and deletes files at will even though they may be opened with another handle at the same time. We had to relax and allow nearly all the concurrent access permissions possible. + +## Thread-Local Storage +Thread-Local storage plays a significant role for RocksDB performance. Rather than creating a separate implementation we chose to create inline wrappers that forward `pthread_specific` calls to Windows `Tls` interfaces within `rocksdb::port` namespace. This leaves the existing meat of the logic in tact and unchanged and just as maintainable. + +To mitigate the lack of thread local storage cleanup on thread-exit we added a limited amount of windows specific code within the same thread_local.cc file that injects a cleanup callback into a `"__tls"` structure within `".CRT$XLB"` data segment. This approach guarantees that the callback is invoked regardless of whether RocksDB used within an executable, standalone DLL or within another DLL. + +## Jemalloc usage + +When RocksDB is used with Jemalloc the latter needs to be initialized before any of the C++ globals or statics. To accomplish that we injected an initialization routine into `".CRT$XCT"` that is automatically invoked by the runtime before initializing static objects. je-uninit is queued to `atexit()`. + +The jemalloc redirecting `new/delete` global operators are used by the linker providing certain conditions are met. See build section in these notes. + +## Stack Trace and Unhandled Exception Handler + +We decided not to implement these two features because the hosting program as a rule has these two things in it. +We experienced no inconveniences debugging issues in the debugger or analyzing process dumps if need be and thus we did not +see this as a priority. + +## Performance results +### Setup +All of the benchmarks are run on the same set of machines. Here are the details of the test setup: +* 2 Intel(R) Xeon(R) E5 2450 0 @ 2.10 GHz (total 16 cores) +* 2 XK0480GDQPH SSD Device, total 894GB free disk +* Machine has 128 GB of RAM +* Operating System: Windows Server 2012 R2 Datacenter +* 100 Million keys; each key is of size 10 bytes, each value is of size 800 bytes +* total database size is ~76GB +* The performance result is based on RocksDB 3.11. +* The parameters used, unless specified, were exactly the same as published in the GitHub Wiki page. + +### RocksDB on flash storage + +#### Test 1. Bulk Load of keys in Random Order + +Version 3.11 + +* Total Run Time: 17.6 min +* Fillrandom: 5.480 micros/op 182465 ops/sec; 142.0 MB/s +* Compact: 486056544.000 micros/op 0 ops/sec + +Version 3.10 + +* Total Run Time: 16.2 min +* Fillrandom: 5.018 micros/op 199269 ops/sec; 155.1 MB/s +* Compact: 441313173.000 micros/op 0 ops/sec; + + +#### Test 2. Bulk Load of keys in Sequential Order + +Version 3.11 + +* Fillseq: 4.944 micros/op 202k ops/sec; 157.4 MB/s + +Version 3.10 + +* Fillseq: 4.105 micros/op 243.6k ops/sec; 189.6 MB/s + + +#### Test 3. Random Write + +Version 3.11 + +* Unbuffered I/O enabled +* Overwrite: 52.661 micros/op 18.9k ops/sec; 14.8 MB/s + +Version 3.10 + +* Unbuffered I/O enabled +* Overwrite: 52.661 micros/op 18.9k ops/sec; + + +#### Test 4. Random Read + +Version 3.11 + +* Unbuffered I/O enabled +* Readrandom: 15.716 micros/op 63.6k ops/sec; 49.5 MB/s + +Version 3.10 + +* Unbuffered I/O enabled +* Readrandom: 15.548 micros/op 64.3k ops/sec; + + +#### Test 5. Multi-threaded read and single-threaded write + +Version 3.11 + +* Unbuffered I/O enabled +* Readwhilewriting: 25.128 micros/op 39.7k ops/sec; + +Version 3.10 + +* Unbuffered I/O enabled +* Readwhilewriting: 24.854 micros/op 40.2k ops/sec; + + +### RocksDB In Memory + +#### Test 1. Point Lookup + +Version 3.11 + +80K writes/sec +* Write Rate Achieved: 40.5k write/sec; +* Readwhilewriting: 0.314 micros/op 3187455 ops/sec; 364.8 MB/s (715454999 of 715454999 found) + +Version 3.10 + +* Write Rate Achieved: 50.6k write/sec +* Readwhilewriting: 0.316 micros/op 3162028 ops/sec; (719576999 of 719576999 found) + + +*10K writes/sec* + +Version 3.11 + +* Write Rate Achieved: 5.8k/s write/sec +* Readwhilewriting: 0.246 micros/op 4062669 ops/sec; 464.9 MB/s (915481999 of 915481999 found) + +Version 3.10 + +* Write Rate Achieved: 5.8k/s write/sec +* Readwhilewriting: 0.244 micros/op 4106253 ops/sec; (927986999 of 927986999 found) + + +#### Test 2. Prefix Range Query + +Version 3.11 + +80K writes/sec +* Write Rate Achieved: 46.3k/s write/sec +* Readwhilewriting: 0.362 micros/op 2765052 ops/sec; 316.4 MB/s (611549999 of 611549999 found) + +Version 3.10 + +* Write Rate Achieved: 45.8k/s write/sec +* Readwhilewriting: 0.317 micros/op 3154941 ops/sec; (708158999 of 708158999 found) + +Version 3.11 + +10K writes/sec +* Write Rate Achieved: 5.78k write/sec +* Readwhilewriting: 0.269 micros/op 3716692 ops/sec; 425.3 MB/s (837401999 of 837401999 found) + +Version 3.10 + +* Write Rate Achieved: 5.7k write/sec +* Readwhilewriting: 0.261 micros/op 3830152 ops/sec; (863482999 of 863482999 found) + + +We think that there is still big room to improve the performance, which will be an ongoing effort for us. + diff --git a/external/rocksdb/appveyor.yml b/external/rocksdb/appveyor.yml new file mode 100755 index 0000000000..aeb8b948d5 --- /dev/null +++ b/external/rocksdb/appveyor.yml @@ -0,0 +1,16 @@ +version: 1.0.{build} +before_build: +- md %APPVEYOR_BUILD_FOLDER%\build +- cd %APPVEYOR_BUILD_FOLDER%\build +- cmake -G "Visual Studio 12 Win64" -DOPTDBG=1 -DXPRESS=1 .. +- cd .. +build: + project: build\rocksdb.sln + parallel: true + verbosity: minimal +test: +test_script: +- ps: build_tools\run_ci_db_test.ps1 -EnableRerun -Run db_test2 -Concurrency 8 +- ps: build_tools\run_ci_db_test.ps1 -EnableRerun -Run db_test -Exclude DBTest.GroupCommitTest -Concurrency 10 +- ps: build_tools\run_ci_db_test.ps1 -Run env_test -Concurrency 1 + diff --git a/external/rocksdb/arcanist_util/__phutil_library_init__.php b/external/rocksdb/arcanist_util/__phutil_library_init__.php new file mode 100755 index 0000000000..bc732cad60 --- /dev/null +++ b/external/rocksdb/arcanist_util/__phutil_library_init__.php @@ -0,0 +1,3 @@ + 2, + 'class' => + array( + 'ArcanistCpplintLinter' => 'cpp_linter/ArcanistCpplintLinter.php', + 'BaseDirectoryScopedFormatLinter' => 'cpp_linter/BaseDirectoryScopedFormatLinter.php', + 'FacebookArcanistConfiguration' => 'config/FacebookArcanistConfiguration.php', + 'FacebookFbcodeLintEngine' => 'lint_engine/FacebookFbcodeLintEngine.php', + 'FacebookFbcodeUnitTestEngine' => 'unit_engine/FacebookFbcodeUnitTestEngine.php', + 'FacebookHowtoevenLintEngine' => 'lint_engine/FacebookHowtoevenLintEngine.php', + 'FacebookHowtoevenLinter' => 'cpp_linter/FacebookHowtoevenLinter.php', + 'FbcodeClangFormatLinter' => 'cpp_linter/FbcodeClangFormatLinter.php', + 'FbcodeCppLinter' => 'cpp_linter/FbcodeCppLinter.php', + ), + 'function' => + array( + ), + 'xmap' => + array( + 'ArcanistCpplintLinter' => 'ArcanistLinter', + 'BaseDirectoryScopedFormatLinter' => 'ArcanistLinter', + 'FacebookArcanistConfiguration' => 'ArcanistConfiguration', + 'FacebookFbcodeLintEngine' => 'ArcanistLintEngine', + 'FacebookFbcodeUnitTestEngine' => 'ArcanistBaseUnitTestEngine', + 'FacebookHowtoevenLintEngine' => 'ArcanistLintEngine', + 'FacebookHowtoevenLinter' => 'ArcanistLinter', + 'FbcodeClangFormatLinter' => 'BaseDirectoryScopedFormatLinter', + 'FbcodeCppLinter' => 'ArcanistLinter', + ), + )); +} else { + phutil_register_library_map(array( + '__library_version__' => 2, + 'class' => + array( + 'ArcanistCpplintLinter' => 'cpp_linter/ArcanistCpplintLinter.php', + 'BaseDirectoryScopedFormatLinter' => 'cpp_linter/BaseDirectoryScopedFormatLinter.php', + 'FacebookArcanistConfiguration' => 'config/FacebookOldArcanistConfiguration.php', + 'FacebookFbcodeLintEngine' => 'lint_engine/FacebookFbcodeLintEngine.php', + 'FacebookFbcodeUnitTestEngine' => 'unit_engine/FacebookFbcodeUnitTestEngine.php', + 'FacebookHowtoevenLintEngine' => 'lint_engine/FacebookHowtoevenLintEngine.php', + 'FacebookHowtoevenLinter' => 'cpp_linter/FacebookHowtoevenLinter.php', + 'FbcodeClangFormatLinter' => 'cpp_linter/FbcodeClangFormatLinter.php', + 'FbcodeCppLinter' => 'cpp_linter/FbcodeCppLinter.php', + ), + 'function' => + array( + ), + 'xmap' => + array( + 'ArcanistCpplintLinter' => 'ArcanistLinter', + 'BaseDirectoryScopedFormatLinter' => 'ArcanistLinter', + 'FacebookArcanistConfiguration' => 'ArcanistConfiguration', + 'FacebookFbcodeLintEngine' => 'ArcanistLintEngine', + 'FacebookFbcodeUnitTestEngine' => 'ArcanistBaseUnitTestEngine', + 'FacebookHowtoevenLintEngine' => 'ArcanistLintEngine', + 'FacebookHowtoevenLinter' => 'ArcanistLinter', + 'FbcodeClangFormatLinter' => 'BaseDirectoryScopedFormatLinter', + 'FbcodeCppLinter' => 'ArcanistLinter', + ), + )); +} diff --git a/external/rocksdb/arcanist_util/config/FacebookArcanistConfiguration.php b/external/rocksdb/arcanist_util/config/FacebookArcanistConfiguration.php new file mode 100755 index 0000000000..d82f9a8734 --- /dev/null +++ b/external/rocksdb/arcanist_util/config/FacebookArcanistConfiguration.php @@ -0,0 +1,36 @@ + 0); + + if ($command == DIFF_COMMAND && !$workflow->isRawDiffSource()) { + $diffID = $workflow->getDiffId(); + + // When submitting a diff this code path gets executed multiple times in + // a row. We only care about the case when ID for the diff is provided + // because that's what we need to apply the diff and trigger the tests. + if (strlen($diffID) > 0) { + assert(is_numeric($diffID)); + startTestsInSandcastle(true /* $applyDiff */, $workflow, $diffID); + } + } + } +} diff --git a/external/rocksdb/arcanist_util/config/FacebookOldArcanistConfiguration.php b/external/rocksdb/arcanist_util/config/FacebookOldArcanistConfiguration.php new file mode 100755 index 0000000000..aaab040dd1 --- /dev/null +++ b/external/rocksdb/arcanist_util/config/FacebookOldArcanistConfiguration.php @@ -0,0 +1,36 @@ + 0); + + if ($command == DIFF_COMMAND && !$workflow->isRawDiffSource()) { + $diffID = $workflow->getDiffId(); + + // When submitting a diff this code path gets executed multiple times in + // a row. We only care about the case when ID for the diff is provided + // because that's what we need to apply the diff and trigger the tests. + if (strlen($diffID) > 0) { + assert(is_numeric($diffID)); + startTestsInSandcastle(true /* $applyDiff */, $workflow, $diffID); + } + } + } +} diff --git a/external/rocksdb/arcanist_util/config/RocksDBCommonHelper.php b/external/rocksdb/arcanist_util/config/RocksDBCommonHelper.php new file mode 100755 index 0000000000..b3adf240cd --- /dev/null +++ b/external/rocksdb/arcanist_util/config/RocksDBCommonHelper.php @@ -0,0 +1,328 @@ + 0); + assert(is_numeric($diffID)); + assert(strlen($url) > 0); + + $cmd = 'echo \'{"diff_id": "' . $diffID . '", ' + . '"name":"click here for sandcastle tests for D' . $diffID . '", ' + . '"link":"' . $url . '"}\' | ' + . 'http_proxy=fwdproxy.any.facebook.com:8080 ' + . 'https_proxy=fwdproxy.any.facebook.com:8080 arc call-conduit ' + . 'differential.updateunitresults'; + shell_exec($cmd); +} + +function buildUpdateTestStatusCmd($diffID, $test, $status) { + assert(strlen($diffID) > 0); + assert(is_numeric($diffID)); + assert(strlen($test) > 0); + assert(strlen($status) > 0); + + $cmd = 'echo \'{"diff_id": "' . $diffID . '", ' + . '"name":"' . $test . '", ' + . '"result":"' . $status . '"}\' | ' + . 'http_proxy=fwdproxy.any.facebook.com:8080 ' + . 'https_proxy=fwdproxy.any.facebook.com:8080 arc call-conduit ' + . 'differential.updateunitresults'; + return $cmd; +} + +function updateTestStatus($diffID, $test) { + assert(strlen($diffID) > 0); + assert(is_numeric($diffID)); + assert(strlen($test) > 0); + + shell_exec(buildUpdateTestStatusCmd($diffID, $test, "waiting")); +} + +function getSteps($applyDiff, $diffID, $username, $test) { + assert(strlen($username) > 0); + assert(strlen($test) > 0); + + if ($applyDiff) { + assert(strlen($diffID) > 0); + assert(is_numeric($diffID)); + + $arcrc_content = exec("cat ~/.arcrc | gzip -f | base64 -w0"); + assert(strlen($arcrc_content) > 0); + + // Sandcastle machines don't have arc setup. We copy the user certificate + // and authenticate using that in Sandcastle. + $setup = array( + "name" => "Setup arcrc", + "shell" => "echo " . $arcrc_content . " | base64 --decode" + . " | gzip -d > ~/.arcrc", + "user" => "root" + ); + + // arc demands certain permission on its config. + // also fix the sticky bit issue in sandcastle + $fix_permission = array( + "name" => "Fix environment", + "shell" => "chmod 600 ~/.arcrc && chmod +t /dev/shm", + "user" => "root" + ); + + // Construct the steps in the order of execution. + $steps[] = $setup; + $steps[] = $fix_permission; + } + + // fbcode is a sub-repo. We cannot patch until we add it to ignore otherwise + // Git thinks it is an uncommited change. + $fix_git_ignore = array( + "name" => "Fix git ignore", + "shell" => "echo fbcode >> .git/info/exclude", + "user" => "root" + ); + + $steps[] = $fix_git_ignore; + + // This will be the command used to execute particular type of tests. + $cmd = ""; + + if ($applyDiff) { + // Patch the code (keep your fingures crossed). + $patch = array( + "name" => "Patch " . $diffID, + "shell" => "HTTPS_PROXY=fwdproxy:8080 arc --arcrc-file ~/.arcrc " + . "patch --nocommit --diff " . $diffID, + "user" => "root" + ); + + $steps[] = $patch; + + updateTestStatus($diffID, $test); + $cmd = buildUpdateTestStatusCmd($diffID, $test, "running") . "; "; + } + + // Run the actual command. + $cmd = $cmd . "J=$(nproc) ./build_tools/precommit_checker.py " . $test + . "; exit_code=$?; "; + + if ($applyDiff) { + $cmd = $cmd . "([[ \$exit_code -eq 0 ]] &&" + . buildUpdateTestStatusCmd($diffID, $test, "pass") . ")" + . "||" . buildUpdateTestStatusCmd($diffID, $test, "fail") + . "; "; + } + + $cmd = $cmd . " cat /tmp/precommit-check.log" + . "; for f in `ls t/log-*`; do echo \$f; cat \$f; done;" + . "[[ \$exit_code -eq 0 ]]"; + assert(strlen($cmd) > 0); + + $run_test = array( + "name" => "Run " . $test, + "shell" => $cmd, + "user" => "root", + "parser" => "python build_tools/error_filter.py " . $test, + ); + + $steps[] = $run_test; + + if ($applyDiff) { + // Clean up the user arc config we are using. + $cleanup = array( + "name" => "Arc cleanup", + "shell" => "rm -f ~/.arcrc", + "user" => "root" + ); + + $steps[] = $cleanup; + } + + assert(count($steps) > 0); + return $steps; +} + +function getSandcastleConfig() { + $sandcastle_config = array(); + + // This is a case when we're executed from a continuous run. Fetch the values + // from the environment. + if (getenv(ENV_POST_RECEIVE_HOOK)) { + $sandcastle_config[0] = getenv(ENV_HTTPS_APP_VALUE); + $sandcastle_config[1] = getenv(ENV_HTTPS_TOKEN_VALUE); + } else { + // This is a typical `[p]arc diff` case. Fetch the values from the specific + // configuration files. + assert(file_exists(PRIMARY_TOKEN_FILE) || + file_exists(SECONDARY_TOKEN_FILE)); + + // Try the primary location first, followed by a secondary. + if (file_exists(PRIMARY_TOKEN_FILE)) { + $cmd = 'cat ' . PRIMARY_TOKEN_FILE; + } else { + $cmd = 'cat ' . SECONDARY_TOKEN_FILE; + } + + assert(strlen($cmd) > 0); + $sandcastle_config = explode(':', rtrim(shell_exec($cmd))); + } + + // In this case be very explicit about the implications. + if (count($sandcastle_config) != 2) { + echo "Sandcastle configuration files don't contain valid information " . + "or the necessary environment variables aren't defined. Unable " . + "to validate the code changes."; + exit(1); + } + + assert(strlen($sandcastle_config[0]) > 0); + assert(strlen($sandcastle_config[1]) > 0); + assert(count($sandcastle_config) > 0); + + return $sandcastle_config; +} + +// This function can be called either from `[p]arc diff` command or during +// the Git post-receive hook. + function startTestsInSandcastle($applyDiff, $workflow, $diffID) { + // Default options don't terminate on failure, but that's what we want. In + // the current case we use assertions intentionally as "terminate on failure + // invariants". + assert_options(ASSERT_BAIL, true); + + // In case of a diff we'll send notificatios to the author. Else it'll go to + // the entire team because failures indicate that build quality has regressed. + $username = $applyDiff ? exec("whoami") : CONT_RUN_ALIAS; + assert(strlen($username) > 0); + + if ($applyDiff) { + assert($workflow); + assert(strlen($diffID) > 0); + assert(is_numeric($diffID)); + } + + if (strcmp(getenv("ROCKSDB_CHECK_ALL"), 1) == 0) { + // Extract all tests from the CI definition. + $output = file_get_contents("build_tools/rocksdb-lego-determinator"); + assert(strlen($output) > 0); + + preg_match_all('/[ ]{2}([a-zA-Z0-9_]+)[\)]{1}/', $output, $matches); + $tests = $matches[1]; + assert(count($tests) > 0); + } else { + // Manually list of tests we want to run in Sandcastle. + $tests = array( + "unit", "unit_non_shm", "unit_481", "clang_unit", "tsan", "asan", + "lite_test", "valgrind", "release", "release_481", "clang_release" + ); + } + + $send_email_template = array( + 'type' => 'email', + 'triggers' => array('fail'), + 'emails' => array($username . '@fb.com'), + ); + + // Construct a job definition for each test and add it to the master plan. + foreach ($tests as $test) { + $stepName = "RocksDB diff " . $diffID . " test " . $test; + + if (!$applyDiff) { + $stepName = "RocksDB continuous integration test " . $test; + } + + $arg[] = array( + "name" => $stepName, + "report" => array($send_email_template), + "steps" => getSteps($applyDiff, $diffID, $username, $test) + ); + } + + // We cannot submit the parallel execution master plan to Sandcastle and + // need supply the job plan as a determinator. So we construct a small job + // that will spit out the master job plan which Sandcastle will parse and + // execute. Why compress the job definitions? Otherwise we run over the max + // string size. + $cmd = "echo " . base64_encode(json_encode($arg)) + . " | gzip -f | base64 -w0"; + assert(strlen($cmd) > 0); + + $arg_encoded = shell_exec($cmd); + assert(strlen($arg_encoded) > 0); + + $runName = "Run diff " . $diffID . "for user " . $username; + + if (!$applyDiff) { + $runName = "RocksDB continuous integration build and test run"; + } + + $command = array( + "name" => $runName, + "steps" => array() + ); + + $command["steps"][] = array( + "name" => "Generate determinator", + "shell" => "echo " . $arg_encoded . " | base64 --decode | gzip -d" + . " | base64 --decode", + "determinator" => true, + "user" => "root" + ); + + // Submit to Sandcastle. + $url = 'https://interngraph.intern.facebook.com/sandcastle/generate?' + .'command=SandcastleUniversalCommand' + .'&vcs=rocksdb-git&revision=origin%2Fmaster&type=lego' + .'&user=' . $username . '&alias=rocksdb-precommit' + .'&command-args=' . urlencode(json_encode($command)); + + // Fetch the configuration necessary to submit a successful HTTPS request. + $sandcastle_config = getSandcastleConfig(); + + $app = $sandcastle_config[0]; + $token = $sandcastle_config[1]; + + $cmd = 'https_proxy= HTTPS_PROXY= curl -s -k -F app=' . $app . ' ' + . '-F token=' . $token . ' "' . $url . '"'; + + $output = shell_exec($cmd); + assert(strlen($output) > 0); + + // Extract Sandcastle URL from the response. + preg_match('/url": "(.+)"/', $output, $sandcastle_url); + + assert(count($sandcastle_url) > 0, "Unable to submit Sandcastle request."); + assert(strlen($sandcastle_url[1]) > 0, "Unable to extract Sandcastle URL."); + + if ($applyDiff) { + echo "\nSandcastle URL: " . $sandcastle_url[1] . "\n"; + // Ask Phabricator to display it on the diff UI. + postURL($diffID, $sandcastle_url[1]); + } else { + echo "Continuous integration started Sandcastle tests. You can look at "; + echo "the progress at:\n" . $sandcastle_url[1] . "\n"; + } +} + +// Continuous run cript will set the environment variable and based on that +// we'll trigger the execution of tests in Sandcastle. In that case we don't +// need to apply any diffs and there's no associated workflow either. +if (getenv(ENV_POST_RECEIVE_HOOK)) { + startTestsInSandcastle( + false /* $applyDiff */, + NULL /* $workflow */, + NULL /* $diffID */); +} diff --git a/external/rocksdb/arcanist_util/cpp_linter/ArcanistCpplintLinter.php b/external/rocksdb/arcanist_util/cpp_linter/ArcanistCpplintLinter.php new file mode 100755 index 0000000000..b9c4137555 --- /dev/null +++ b/external/rocksdb/arcanist_util/cpp_linter/ArcanistCpplintLinter.php @@ -0,0 +1,88 @@ +linterDir(), $bin); + if (!$err) { + return $this->linterDir().'/'.$bin; + } + + // Look for globally installed cpplint.py + list($err) = exec_manual('which %s', $bin); + if ($err) { + throw new ArcanistUsageException( + "cpplint.py does not appear to be installed on this system. Install ". + "it (e.g., with 'wget \"http://google-styleguide.googlecode.com/". + "svn/trunk/cpplint/cpplint.py\"') ". + "in your .arcconfig to point to the directory where it resides. ". + "Also don't forget to chmod a+x cpplint.py!"); + } + + return $bin; + } + + public function lintPath($path) { + $bin = $this->getLintPath(); + $path = $this->rocksdbDir().'/'.$path; + + $f = new ExecFuture("%C $path", $bin); + + list($err, $stdout, $stderr) = $f->resolve(); + + if ($err === 2) { + throw new Exception("cpplint failed to run correctly:\n".$stderr); + } + + $lines = explode("\n", $stderr); + $messages = array(); + foreach ($lines as $line) { + $line = trim($line); + $matches = null; + $regex = '/^[^:]+:(\d+):\s*(.*)\s*\[(.*)\] \[(\d+)\]$/'; + if (!preg_match($regex, $line, $matches)) { + continue; + } + foreach ($matches as $key => $match) { + $matches[$key] = trim($match); + } + $message = new ArcanistLintMessage(); + $message->setPath($path); + $message->setLine($matches[1]); + $message->setCode($matches[3]); + $message->setName($matches[3]); + $message->setDescription($matches[2]); + $message->setSeverity(ArcanistLintSeverity::SEVERITY_WARNING); + $this->addLintMessage($message); + } + } + + // The path of this linter + private function linterDir() { + return dirname(__FILE__); + } + + // TODO(kaili) a quick and dirty way to figure out rocksdb's root dir. + private function rocksdbDir() { + return $this->linterDir()."/../.."; + } +} diff --git a/external/rocksdb/arcanist_util/cpp_linter/BaseDirectoryScopedFormatLinter.php b/external/rocksdb/arcanist_util/cpp_linter/BaseDirectoryScopedFormatLinter.php new file mode 100755 index 0000000000..4a7b307dc8 --- /dev/null +++ b/external/rocksdb/arcanist_util/cpp_linter/BaseDirectoryScopedFormatLinter.php @@ -0,0 +1,74 @@ +getPathsToLint() as $p) { + // check if $path starts with $p + if (strncmp($path, $p, strlen($p)) === 0) { + return true; + } + } + return false; + } + + // API to tell this linter which lines were changed + final public function setPathChangedLines($path, $changed) { + $this->changedLines[$path] = $changed; + } + + final public function willLintPaths(array $paths) { + $futures = array(); + foreach ($paths as $path) { + if (!$this->shouldLintPath($path)) { + continue; + } + + $changed = $this->changedLines[$path]; + if (!isset($changed)) { + // do not run linter if there are no changes + continue; + } + + $futures[$path] = $this->getFormatFuture($path, $changed); + } + + foreach (id(new FutureIterator($futures))->limit(8) as $p => $f) { + $this->rawLintOutput[$p] = $f->resolvex(); + } + } + + abstract protected function getFormatFuture($path, array $changed); + abstract protected function getLintMessage($diff); + + final public function lintPath($path) { + if (!isset($this->rawLintOutput[$path])) { + return; + } + + list($new_content) = $this->rawLintOutput[$path]; + $old_content = $this->getData($path); + + if ($new_content != $old_content) { + $diff = ArcanistDiffUtils::renderDifferences($old_content, $new_content); + $this->raiseLintAtOffset( + 0, + self::LINT_FORMATTING, + $this->getLintMessage($diff), + $old_content, + $new_content); + } + } + +} diff --git a/external/rocksdb/arcanist_util/cpp_linter/FacebookHowtoevenLinter.php b/external/rocksdb/arcanist_util/cpp_linter/FacebookHowtoevenLinter.php new file mode 100755 index 0000000000..6edb114b66 --- /dev/null +++ b/external/rocksdb/arcanist_util/cpp_linter/FacebookHowtoevenLinter.php @@ -0,0 +1,223 @@ +localExecx("rm -rf _build/_lint"); + + // Build compilation database. + $lintable_paths = $this->getLintablePaths($paths); + $interesting_paths = $this->getInterestingPaths($lintable_paths); + + if (!$lintable_paths) { + return; + } + + // Run lint. + try { + $this->localExecx( + "%C %C -p _build/dev/ %Ls", + $this->getBinaryPath(), + $this->getFilteredIssues(), + $lintable_paths); + } catch (CommandException $exception) { + PhutilConsole::getConsole()->writeErr($exception->getMessage()); + } + + // Load results. + $result = id( + new SQLite3( + $this->getProjectRoot().'/_build/_lint/lint.db', + SQLITE3_OPEN_READONLY)) + ->query("SELECT * FROM raised_issues"); + + while ($issue = $result->fetchArray(SQLITE3_ASSOC)) { + // Skip issues not part of the linted file. + if (in_array($issue['file'], $interesting_paths)) { + $this->addLintMessage(id(new ArcanistLintMessage()) + ->setPath($issue['file']) + ->setLine($issue['line']) + ->setChar($issue['column']) + ->setCode('Howtoeven') + ->setSeverity($this->getSeverity($issue['severity'])) + ->setName('Hte-'.$issue['name']) + ->setDescription( + sprintf( + "%s\n\n%s", + ($issue['message']) ? $issue['message'] : $issue['description'], + $issue['explanation'])) + ->setOriginalText(idx($issue, 'original', '')) + ->setReplacementText(idx($issue, 'replacement', ''))); + } + } + } + + public function lintPath($path) { + } + + /** + * Get the paths that we know how to lint. + * + * The strategy is to first look whether there's an existing compilation + * database and use that if it's exhaustive. We generate our own only if + * necessary. + */ + private function getLintablePaths($paths) { + // Replace headers with existing sources. + for ($i = 0; $i < count($paths); $i++) { + if (preg_match("/\.h$/", $paths[$i])) { + $header = preg_replace("/\.h$/", ".cpp", $paths[$i]); + if (file_exists($header)) { + $paths[$i] = $header; + } + } + } + + // Check if database exists and is exhaustive. + $available_paths = $this->getAvailablePaths(); + $lintable_paths = array_intersect($paths, $available_paths); + if ($paths === $lintable_paths) { + return $lintable_paths; + } + + // Generate our own database. + $targets = $this->getTargetsFor($paths); + if (!$targets) { + PhutilConsole::getConsole()->writeErr( + "No build targets found for %s\n", + implode(', ', $paths)); + return array(); + } + + $this->localExecx("./tools/build/bin/fbconfig.par -r %Ls", $targets); + $this->localExecx("./tools/build/bin/fbmake.par gen_cdb"); + + $available_paths = $this->getAvailablePaths(); + $lintable_paths = array_intersect($paths, $available_paths); + if ($paths != $lintable_paths) { + PhutilConsole::getConsole()->writeErr( + "Can't lint %s\n", + implode(', ', array_diff($paths, $available_paths))); + } + + // Return what we know how to lint. + return $lintable_paths; + } + + /** + * Get the available paths in the current compilation database. + */ + private function getAvailablePaths() { + $database_path = $this->getProjectRoot() + .'/_build/dev/compile_commands.json'; + if (!file_exists($database_path)) { + return array(); + } + + $entries = json_decode(file_get_contents($database_path), true); + $paths = array(); + foreach ($entries as $entry) { + $paths[] = $entry['file']; + } + return $paths; + } + + /** + * Search for the targets directories for the given files. + */ + private static function getTargetsFor($paths) { + $targets = array(); + foreach ($paths as $path) { + while (($path = dirname($path)) !== '.') { + if (in_array('TARGETS', scandir($path))) { + $contents = file_get_contents($path.'/TARGETS'); + if (strpos($contents, 'cpp_binary') !== false) { + $targets[] = $path; + break; + } + } + } + } + return array_unique($targets); + } + + /** + * The paths that we actually want to report on. + */ + private function getInterestingPaths($paths) { + $headers = array(); + foreach ($paths as $path) { + $headers[] = preg_replace("/\.cpp$/", ".h", $path); + } + return array_merge($paths, $headers); + } + + /** + * The path where the binary is located. Will return the current dewey binary + * unless the `HOWTOEVEN_BUILD` environment variable is set. + */ + private function getBinaryPath() { + $path = sprintf( + "/mnt/dewey/fbcode/.commits/%s/builds/howtoeven/client", + self::VERSION); + + $build = getenv('HOWTOEVEN_BUILD'); + if ($build) { + $path = sprintf( + "./_build/%s/tools/howtoeven/client", + $build); + if (!file_exists($path)) { + PhutilConsole::getConsole()->writeErr(">> %s does not exist\n", $path); + exit(1); + } + } + + return $path; + } + + /** + * Execute the command in the root directory. + */ + private function localExecx($command /* , ... */) { + $arguments = func_get_args(); + return newv('ExecFuture', $arguments) + ->setCWD($this->getProjectRoot()) + ->resolvex(); + } + + /** + * The root of the project. + */ + private function getProjectRoot() { + return $this->getEngine()->getWorkingCopy()->getProjectRoot(); + } + + private function getFilteredIssues() { + $issues = getenv('HOWTOEVEN_ISSUES'); + return ($issues) ? csprintf('-issues %s', $issues) : ''; + } + +} diff --git a/external/rocksdb/arcanist_util/cpp_linter/FbcodeClangFormatLinter.php b/external/rocksdb/arcanist_util/cpp_linter/FbcodeClangFormatLinter.php new file mode 100755 index 0000000000..a94a0bed15 --- /dev/null +++ b/external/rocksdb/arcanist_util/cpp_linter/FbcodeClangFormatLinter.php @@ -0,0 +1,58 @@ + ArcanistLintSeverity::SEVERITY_ADVICE, + ); + } + + public function getLintNameMap() { + return array( + self::LINT_FORMATTING => pht('Changes are not clang-formatted'), + ); + } + + protected function getFormatFuture($path, array $changed) { + $args = ""; + foreach ($changed as $key => $value) { + $args .= " --lines=$key:$key"; + } + + $binary = self::CLANG_FORMAT_BINARY; + if (!file_exists($binary)) { + // trust the $PATH + $binary = "clang-format"; + } + + return new ExecFuture( + "%s %s $args", + $binary, + $this->getEngine()->getFilePathOnDisk($path)); + } + + protected function getLintMessage($diff) { + $link_to_clang_format = + "[[ http://fburl.com/clang-format | clang-format ]]"; + return <<getEngine()->getFilePathOnDisk($p); + $lpath_file = file($lpath); + if (preg_match('/\.(c)$/', $lpath) || + preg_match('/-\*-.*Mode: C[; ].*-\*-/', $lpath_file[0]) || + preg_match('/vim(:.*)*:\s*(set\s+)?filetype=c\s*:/', $lpath_file[0]) + ) { + $futures[$p] = new ExecFuture("%s %s %s 2>&1", + self::FLINT, self::C_FLAG, + $this->getEngine()->getFilePathOnDisk($p)); + } else { + $futures[$p] = new ExecFuture("%s %s 2>&1", + self::FLINT, $this->getEngine()->getFilePathOnDisk($p)); + } + } + + foreach (Futures($futures)->limit(8) as $p => $f) { + $this->rawLintOutput[$p] = $f->resolvex(); + } + + return; + } + + public function getLinterName() { + return "FBCPP"; + } + + public function lintPath($path) { + $this->runCppLint($path); + } + + private function runCppLint($path) { + $msgs = $this->getCppLintOutput($path); + foreach ($msgs as $m) { + $this->raiseLintAtLine($m['line'], 0, $m['severity'], $m['msg']); + } + } + + private function adviseOnEachPattern( + $path, + $regex, + $message, + $lint_type = self::LINT_ADVICE, + $match_idx = 0) { + $file_data = $this->getData($path); + $matches = array(); + if (!preg_match_all($regex, $file_data, $matches, PREG_OFFSET_CAPTURE)) { + return; + } + + foreach ($matches[$match_idx] as $match) { + list($match_str, $offset) = $match; + $this->raiseLintAtOffset($offset, $lint_type, $message, $match_str); + } + } + + public function getLintSeverityMap() { + return array( + self::LINT_WARNING => ArcanistLintSeverity::SEVERITY_WARNING, + self::LINT_ADVICE => ArcanistLintSeverity::SEVERITY_ADVICE, + self::LINT_ERROR => ArcanistLintSeverity::SEVERITY_ERROR + ); + } + + public function getLintNameMap() { + return array( + self::LINT_ADVICE => "CppLint Advice", + self::LINT_WARNING => "CppLint Warning", + self::LINT_ERROR => "CppLint Error" + ); + } + + private function getCppLintOutput($path) { + if (!array_key_exists($path, $this->rawLintOutput)) { + return array(); + } + list($output) = $this->rawLintOutput[$path]; + + $msgs = array(); + $current = null; + $matches = array(); + foreach (explode("\n", $output) as $line) { + if (preg_match('/.*?:(\d+):(.*)/', $line, $matches)) { + if ($current) { + $msgs[] = $current; + } + $line = $matches[1]; + $text = $matches[2]; + if (preg_match('/.*Warning.*/', $text)) { + $sev = self::LINT_WARNING; + } else if (preg_match('/.*Advice.*/', $text)) { + $sev = self::LINT_ADVICE; + } else { + $sev = self::LINT_ERROR; + } + $current = array('line' => $line, + 'msg' => $text, + 'severity' => $sev); + } else if ($current) { + $current['msg'] .= ' ' . $line; + } + } + if ($current) { + $msgs[] = $current; + } + + return $msgs; + } +} diff --git a/external/rocksdb/arcanist_util/cpp_linter/cpplint.py b/external/rocksdb/arcanist_util/cpp_linter/cpplint.py new file mode 100755 index 0000000000..3d0c45a6dd --- /dev/null +++ b/external/rocksdb/arcanist_util/cpp_linter/cpplint.py @@ -0,0 +1,4767 @@ +#!/usr/bin/python +# Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. +# Copyright (c) 2011 The LevelDB Authors. All rights reserved. +# Use of this source code is governed by a BSD-style license that can be +# found in the LICENSE file. See the AUTHORS file for names of contributors. +# +# Copyright (c) 2009 Google Inc. All rights reserved. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are +# met: +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# * Redistributions in binary form must reproduce the above +# copyright notice, this list of conditions and the following disclaimer +# in the documentation and/or other materials provided with the +# distribution. +# * Neither the name of Google Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +"""Does google-lint on c++ files. + +The goal of this script is to identify places in the code that *may* +be in non-compliance with google style. It does not attempt to fix +up these problems -- the point is to educate. It does also not +attempt to find all problems, or to ensure that everything it does +find is legitimately a problem. + +In particular, we can get very confused by /* and // inside strings! +We do a small hack, which is to ignore //'s with "'s after them on the +same line, but it is far from perfect (in either direction). +""" + +import codecs +import copy +import getopt +import math # for log +import os +import re +import sre_compile +import string +import sys +import unicodedata + + +_USAGE = """ +Syntax: cpplint.py [--verbose=#] [--output=vs7] [--filter=-x,+y,...] + [--counting=total|toplevel|detailed] [--root=subdir] + [--linelength=digits] + [file] ... + + The style guidelines this tries to follow are those in + http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml + + Every problem is given a confidence score from 1-5, with 5 meaning we are + certain of the problem, and 1 meaning it could be a legitimate construct. + This will miss some errors, and is not a substitute for a code review. + + To suppress false-positive errors of a certain category, add a + 'NOLINT(category)' comment to the line. NOLINT or NOLINT(*) + suppresses errors of all categories on that line. + + The files passed in will be linted; at least one file must be provided. + Default linted extensions are .cc, .cpp, .cu, .cuh and .h. Change the + extensions with the --extensions flag. + + Flags: + + output=vs7 + By default, the output is formatted to ease emacs parsing. Visual Studio + compatible output (vs7) may also be used. Other formats are unsupported. + + verbose=# + Specify a number 0-5 to restrict errors to certain verbosity levels. + + filter=-x,+y,... + Specify a comma-separated list of category-filters to apply: only + error messages whose category names pass the filters will be printed. + (Category names are printed with the message and look like + "[whitespace/indent]".) Filters are evaluated left to right. + "-FOO" and "FOO" means "do not print categories that start with FOO". + "+FOO" means "do print categories that start with FOO". + + Examples: --filter=-whitespace,+whitespace/braces + --filter=whitespace,runtime/printf,+runtime/printf_format + --filter=-,+build/include_what_you_use + + To see a list of all the categories used in cpplint, pass no arg: + --filter= + + counting=total|toplevel|detailed + The total number of errors found is always printed. If + 'toplevel' is provided, then the count of errors in each of + the top-level categories like 'build' and 'whitespace' will + also be printed. If 'detailed' is provided, then a count + is provided for each category like 'build/class'. + + root=subdir + The root directory used for deriving header guard CPP variable. + By default, the header guard CPP variable is calculated as the relative + path to the directory that contains .git, .hg, or .svn. When this flag + is specified, the relative path is calculated from the specified + directory. If the specified directory does not exist, this flag is + ignored. + + Examples: + Assuing that src/.git exists, the header guard CPP variables for + src/chrome/browser/ui/browser.h are: + + No flag => CHROME_BROWSER_UI_BROWSER_H_ + --root=chrome => BROWSER_UI_BROWSER_H_ + --root=chrome/browser => UI_BROWSER_H_ + + linelength=digits + This is the allowed line length for the project. The default value is + 80 characters. + + Examples: + --linelength=120 + + extensions=extension,extension,... + The allowed file extensions that cpplint will check + + Examples: + --extensions=hpp,cpp +""" + +# We categorize each error message we print. Here are the categories. +# We want an explicit list so we can list them all in cpplint --filter=. +# If you add a new error message with a new category, add it to the list +# here! cpplint_unittest.py should tell you if you forget to do this. +_ERROR_CATEGORIES = [ + 'build/class', + 'build/deprecated', + 'build/endif_comment', + 'build/explicit_make_pair', + 'build/forward_decl', + 'build/header_guard', + 'build/include', + 'build/include_alpha', + 'build/include_order', + 'build/include_what_you_use', + 'build/namespaces', + 'build/printf_format', + 'build/storage_class', + 'legal/copyright', + 'readability/alt_tokens', + 'readability/braces', + 'readability/casting', + 'readability/check', + 'readability/constructors', + 'readability/fn_size', + 'readability/function', + 'readability/multiline_comment', + 'readability/multiline_string', + 'readability/namespace', + 'readability/nolint', + 'readability/nul', + 'readability/streams', + 'readability/todo', + 'readability/utf8', + 'runtime/arrays', + 'runtime/casting', + 'runtime/explicit', + 'runtime/int', + 'runtime/init', + 'runtime/invalid_increment', + 'runtime/member_string_references', + 'runtime/memset', + 'runtime/operator', + 'runtime/printf', + 'runtime/printf_format', + 'runtime/references', + 'runtime/string', + 'runtime/threadsafe_fn', + 'runtime/vlog', + 'whitespace/blank_line', + 'whitespace/braces', + 'whitespace/comma', + 'whitespace/comments', + 'whitespace/empty_conditional_body', + 'whitespace/empty_loop_body', + 'whitespace/end_of_line', + 'whitespace/ending_newline', + 'whitespace/forcolon', + 'whitespace/indent', + 'whitespace/line_length', + 'whitespace/newline', + 'whitespace/operators', + 'whitespace/parens', + 'whitespace/semicolon', + 'whitespace/tab', + 'whitespace/todo' + ] + +# The default state of the category filter. This is overrided by the --filter= +# flag. By default all errors are on, so only add here categories that should be +# off by default (i.e., categories that must be enabled by the --filter= flags). +# All entries here should start with a '-' or '+', as in the --filter= flag. +_DEFAULT_FILTERS = [] + +# We used to check for high-bit characters, but after much discussion we +# decided those were OK, as long as they were in UTF-8 and didn't represent +# hard-coded international strings, which belong in a separate i18n file. + + +# C++ headers +_CPP_HEADERS = frozenset([ + # Legacy + 'algobase.h', + 'algo.h', + 'alloc.h', + 'builtinbuf.h', + 'bvector.h', + 'complex.h', + 'defalloc.h', + 'deque.h', + 'editbuf.h', + 'fstream.h', + 'function.h', + 'hash_map', + 'hash_map.h', + 'hash_set', + 'hash_set.h', + 'hashtable.h', + 'heap.h', + 'indstream.h', + 'iomanip.h', + 'iostream.h', + 'istream.h', + 'iterator.h', + 'list.h', + 'map.h', + 'multimap.h', + 'multiset.h', + 'ostream.h', + 'pair.h', + 'parsestream.h', + 'pfstream.h', + 'procbuf.h', + 'pthread_alloc', + 'pthread_alloc.h', + 'rope', + 'rope.h', + 'ropeimpl.h', + 'set.h', + 'slist', + 'slist.h', + 'stack.h', + 'stdiostream.h', + 'stl_alloc.h', + 'stl_relops.h', + 'streambuf.h', + 'stream.h', + 'strfile.h', + 'strstream.h', + 'tempbuf.h', + 'tree.h', + 'type_traits.h', + 'vector.h', + # 17.6.1.2 C++ library headers + 'algorithm', + 'array', + 'atomic', + 'bitset', + 'chrono', + 'codecvt', + 'complex', + 'condition_variable', + 'deque', + 'exception', + 'forward_list', + 'fstream', + 'functional', + 'future', + 'initializer_list', + 'iomanip', + 'ios', + 'iosfwd', + 'iostream', + 'istream', + 'iterator', + 'limits', + 'list', + 'locale', + 'map', + 'memory', + 'mutex', + 'new', + 'numeric', + 'ostream', + 'queue', + 'random', + 'ratio', + 'regex', + 'set', + 'sstream', + 'stack', + 'stdexcept', + 'streambuf', + 'string', + 'strstream', + 'system_error', + 'thread', + 'tuple', + 'typeindex', + 'typeinfo', + 'type_traits', + 'unordered_map', + 'unordered_set', + 'utility', + 'valarray', + 'vector', + # 17.6.1.2 C++ headers for C library facilities + 'cassert', + 'ccomplex', + 'cctype', + 'cerrno', + 'cfenv', + 'cfloat', + 'cinttypes', + 'ciso646', + 'climits', + 'clocale', + 'cmath', + 'csetjmp', + 'csignal', + 'cstdalign', + 'cstdarg', + 'cstdbool', + 'cstddef', + 'cstdint', + 'cstdio', + 'cstdlib', + 'cstring', + 'ctgmath', + 'ctime', + 'cuchar', + 'cwchar', + 'cwctype', + ]) + +# Assertion macros. These are defined in base/logging.h and +# testing/base/gunit.h. Note that the _M versions need to come first +# for substring matching to work. +_CHECK_MACROS = [ + 'DCHECK', 'CHECK', + 'EXPECT_TRUE_M', 'EXPECT_TRUE', + 'ASSERT_TRUE_M', 'ASSERT_TRUE', + 'EXPECT_FALSE_M', 'EXPECT_FALSE', + 'ASSERT_FALSE_M', 'ASSERT_FALSE', + ] + +# Replacement macros for CHECK/DCHECK/EXPECT_TRUE/EXPECT_FALSE +_CHECK_REPLACEMENT = dict([(m, {}) for m in _CHECK_MACROS]) + +for op, replacement in [('==', 'EQ'), ('!=', 'NE'), + ('>=', 'GE'), ('>', 'GT'), + ('<=', 'LE'), ('<', 'LT')]: + _CHECK_REPLACEMENT['DCHECK'][op] = 'DCHECK_%s' % replacement + _CHECK_REPLACEMENT['CHECK'][op] = 'CHECK_%s' % replacement + _CHECK_REPLACEMENT['EXPECT_TRUE'][op] = 'EXPECT_%s' % replacement + _CHECK_REPLACEMENT['ASSERT_TRUE'][op] = 'ASSERT_%s' % replacement + _CHECK_REPLACEMENT['EXPECT_TRUE_M'][op] = 'EXPECT_%s_M' % replacement + _CHECK_REPLACEMENT['ASSERT_TRUE_M'][op] = 'ASSERT_%s_M' % replacement + +for op, inv_replacement in [('==', 'NE'), ('!=', 'EQ'), + ('>=', 'LT'), ('>', 'LE'), + ('<=', 'GT'), ('<', 'GE')]: + _CHECK_REPLACEMENT['EXPECT_FALSE'][op] = 'EXPECT_%s' % inv_replacement + _CHECK_REPLACEMENT['ASSERT_FALSE'][op] = 'ASSERT_%s' % inv_replacement + _CHECK_REPLACEMENT['EXPECT_FALSE_M'][op] = 'EXPECT_%s_M' % inv_replacement + _CHECK_REPLACEMENT['ASSERT_FALSE_M'][op] = 'ASSERT_%s_M' % inv_replacement + +# Alternative tokens and their replacements. For full list, see section 2.5 +# Alternative tokens [lex.digraph] in the C++ standard. +# +# Digraphs (such as '%:') are not included here since it's a mess to +# match those on a word boundary. +_ALT_TOKEN_REPLACEMENT = { + 'and': '&&', + 'bitor': '|', + 'or': '||', + 'xor': '^', + 'compl': '~', + 'bitand': '&', + 'and_eq': '&=', + 'or_eq': '|=', + 'xor_eq': '^=', + 'not': '!', + 'not_eq': '!=' + } + +# Compile regular expression that matches all the above keywords. The "[ =()]" +# bit is meant to avoid matching these keywords outside of boolean expressions. +# +# False positives include C-style multi-line comments and multi-line strings +# but those have always been troublesome for cpplint. +_ALT_TOKEN_REPLACEMENT_PATTERN = re.compile( + r'[ =()](' + ('|'.join(_ALT_TOKEN_REPLACEMENT.keys())) + r')(?=[ (]|$)') + + +# These constants define types of headers for use with +# _IncludeState.CheckNextIncludeOrder(). +_C_SYS_HEADER = 1 +_CPP_SYS_HEADER = 2 +_LIKELY_MY_HEADER = 3 +_POSSIBLE_MY_HEADER = 4 +_OTHER_HEADER = 5 + +# These constants define the current inline assembly state +_NO_ASM = 0 # Outside of inline assembly block +_INSIDE_ASM = 1 # Inside inline assembly block +_END_ASM = 2 # Last line of inline assembly block +_BLOCK_ASM = 3 # The whole block is an inline assembly block + +# Match start of assembly blocks +_MATCH_ASM = re.compile(r'^\s*(?:asm|_asm|__asm|__asm__)' + r'(?:\s+(volatile|__volatile__))?' + r'\s*[{(]') + + +_regexp_compile_cache = {} + +# Finds occurrences of NOLINT or NOLINT(...). +_RE_SUPPRESSION = re.compile(r'\bNOLINT\b(\([^)]*\))?') + +# {str, set(int)}: a map from error categories to sets of linenumbers +# on which those errors are expected and should be suppressed. +_error_suppressions = {} + +# The root directory used for deriving header guard CPP variable. +# This is set by --root flag. +_root = None + +# The allowed line length of files. +# This is set by --linelength flag. +_line_length = 80 + +# The allowed extensions for file names +# This is set by --extensions flag. +_valid_extensions = set(['cc', 'h', 'cpp', 'cu', 'cuh']) + +def ParseNolintSuppressions(filename, raw_line, linenum, error): + """Updates the global list of error-suppressions. + + Parses any NOLINT comments on the current line, updating the global + error_suppressions store. Reports an error if the NOLINT comment + was malformed. + + Args: + filename: str, the name of the input file. + raw_line: str, the line of input text, with comments. + linenum: int, the number of the current line. + error: function, an error handler. + """ + # FIXME(adonovan): "NOLINT(" is misparsed as NOLINT(*). + matched = _RE_SUPPRESSION.search(raw_line) + if matched: + category = matched.group(1) + if category in (None, '(*)'): # => "suppress all" + _error_suppressions.setdefault(None, set()).add(linenum) + else: + if category.startswith('(') and category.endswith(')'): + category = category[1:-1] + if category in _ERROR_CATEGORIES: + _error_suppressions.setdefault(category, set()).add(linenum) + else: + error(filename, linenum, 'readability/nolint', 5, + 'Unknown NOLINT error category: %s' % category) + + +def ResetNolintSuppressions(): + "Resets the set of NOLINT suppressions to empty." + _error_suppressions.clear() + + +def IsErrorSuppressedByNolint(category, linenum): + """Returns true if the specified error category is suppressed on this line. + + Consults the global error_suppressions map populated by + ParseNolintSuppressions/ResetNolintSuppressions. + + Args: + category: str, the category of the error. + linenum: int, the current line number. + Returns: + bool, True iff the error should be suppressed due to a NOLINT comment. + """ + return (linenum in _error_suppressions.get(category, set()) or + linenum in _error_suppressions.get(None, set())) + +def Match(pattern, s): + """Matches the string with the pattern, caching the compiled regexp.""" + # The regexp compilation caching is inlined in both Match and Search for + # performance reasons; factoring it out into a separate function turns out + # to be noticeably expensive. + if pattern not in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].match(s) + + +def ReplaceAll(pattern, rep, s): + """Replaces instances of pattern in a string with a replacement. + + The compiled regex is kept in a cache shared by Match and Search. + + Args: + pattern: regex pattern + rep: replacement text + s: search string + + Returns: + string with replacements made (or original string if no replacements) + """ + if pattern not in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].sub(rep, s) + + +def Search(pattern, s): + """Searches the string for the pattern, caching the compiled regexp.""" + if pattern not in _regexp_compile_cache: + _regexp_compile_cache[pattern] = sre_compile.compile(pattern) + return _regexp_compile_cache[pattern].search(s) + + +class _IncludeState(dict): + """Tracks line numbers for includes, and the order in which includes appear. + + As a dict, an _IncludeState object serves as a mapping between include + filename and line number on which that file was included. + + Call CheckNextIncludeOrder() once for each header in the file, passing + in the type constants defined above. Calls in an illegal order will + raise an _IncludeError with an appropriate error message. + + """ + # self._section will move monotonically through this set. If it ever + # needs to move backwards, CheckNextIncludeOrder will raise an error. + _INITIAL_SECTION = 0 + _MY_H_SECTION = 1 + _C_SECTION = 2 + _CPP_SECTION = 3 + _OTHER_H_SECTION = 4 + + _TYPE_NAMES = { + _C_SYS_HEADER: 'C system header', + _CPP_SYS_HEADER: 'C++ system header', + _LIKELY_MY_HEADER: 'header this file implements', + _POSSIBLE_MY_HEADER: 'header this file may implement', + _OTHER_HEADER: 'other header', + } + _SECTION_NAMES = { + _INITIAL_SECTION: "... nothing. (This can't be an error.)", + _MY_H_SECTION: 'a header this file implements', + _C_SECTION: 'C system header', + _CPP_SECTION: 'C++ system header', + _OTHER_H_SECTION: 'other header', + } + + def __init__(self): + dict.__init__(self) + self.ResetSection() + + def ResetSection(self): + # The name of the current section. + self._section = self._INITIAL_SECTION + # The path of last found header. + self._last_header = '' + + def SetLastHeader(self, header_path): + self._last_header = header_path + + def CanonicalizeAlphabeticalOrder(self, header_path): + """Returns a path canonicalized for alphabetical comparison. + + - replaces "-" with "_" so they both cmp the same. + - removes '-inl' since we don't require them to be after the main header. + - lowercase everything, just in case. + + Args: + header_path: Path to be canonicalized. + + Returns: + Canonicalized path. + """ + return header_path.replace('-inl.h', '.h').replace('-', '_').lower() + + def IsInAlphabeticalOrder(self, clean_lines, linenum, header_path): + """Check if a header is in alphabetical order with the previous header. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + header_path: Canonicalized header to be checked. + + Returns: + Returns true if the header is in alphabetical order. + """ + # If previous section is different from current section, _last_header will + # be reset to empty string, so it's always less than current header. + # + # If previous line was a blank line, assume that the headers are + # intentionally sorted the way they are. + if (self._last_header > header_path and + not Match(r'^\s*$', clean_lines.elided[linenum - 1])): + return False + return True + + def CheckNextIncludeOrder(self, header_type): + """Returns a non-empty error message if the next header is out of order. + + This function also updates the internal state to be ready to check + the next include. + + Args: + header_type: One of the _XXX_HEADER constants defined above. + + Returns: + The empty string if the header is in the right order, or an + error message describing what's wrong. + + """ + error_message = ('Found %s after %s' % + (self._TYPE_NAMES[header_type], + self._SECTION_NAMES[self._section])) + + last_section = self._section + + if header_type == _C_SYS_HEADER: + if self._section <= self._C_SECTION: + self._section = self._C_SECTION + else: + self._last_header = '' + return error_message + elif header_type == _CPP_SYS_HEADER: + if self._section <= self._CPP_SECTION: + self._section = self._CPP_SECTION + else: + self._last_header = '' + return error_message + elif header_type == _LIKELY_MY_HEADER: + if self._section <= self._MY_H_SECTION: + self._section = self._MY_H_SECTION + else: + self._section = self._OTHER_H_SECTION + elif header_type == _POSSIBLE_MY_HEADER: + if self._section <= self._MY_H_SECTION: + self._section = self._MY_H_SECTION + else: + # This will always be the fallback because we're not sure + # enough that the header is associated with this file. + self._section = self._OTHER_H_SECTION + else: + assert header_type == _OTHER_HEADER + self._section = self._OTHER_H_SECTION + + if last_section != self._section: + self._last_header = '' + + return '' + + +class _CppLintState(object): + """Maintains module-wide state..""" + + def __init__(self): + self.verbose_level = 1 # global setting. + self.error_count = 0 # global count of reported errors + # filters to apply when emitting error messages + self.filters = _DEFAULT_FILTERS[:] + self.counting = 'total' # In what way are we counting errors? + self.errors_by_category = {} # string to int dict storing error counts + + # output format: + # "emacs" - format that emacs can parse (default) + # "vs7" - format that Microsoft Visual Studio 7 can parse + self.output_format = 'emacs' + + def SetOutputFormat(self, output_format): + """Sets the output format for errors.""" + self.output_format = output_format + + def SetVerboseLevel(self, level): + """Sets the module's verbosity, and returns the previous setting.""" + last_verbose_level = self.verbose_level + self.verbose_level = level + return last_verbose_level + + def SetCountingStyle(self, counting_style): + """Sets the module's counting options.""" + self.counting = counting_style + + def SetFilters(self, filters): + """Sets the error-message filters. + + These filters are applied when deciding whether to emit a given + error message. + + Args: + filters: A string of comma-separated filters (eg "+whitespace/indent"). + Each filter should start with + or -; else we die. + + Raises: + ValueError: The comma-separated filters did not all start with '+' or '-'. + E.g. "-,+whitespace,-whitespace/indent,whitespace/badfilter" + """ + # Default filters always have less priority than the flag ones. + self.filters = _DEFAULT_FILTERS[:] + for filt in filters.split(','): + clean_filt = filt.strip() + if clean_filt: + self.filters.append(clean_filt) + for filt in self.filters: + if not (filt.startswith('+') or filt.startswith('-')): + raise ValueError('Every filter in --filters must start with + or -' + ' (%s does not)' % filt) + + def ResetErrorCounts(self): + """Sets the module's error statistic back to zero.""" + self.error_count = 0 + self.errors_by_category = {} + + def IncrementErrorCount(self, category): + """Bumps the module's error statistic.""" + self.error_count += 1 + if self.counting in ('toplevel', 'detailed'): + if self.counting != 'detailed': + category = category.split('/')[0] + if category not in self.errors_by_category: + self.errors_by_category[category] = 0 + self.errors_by_category[category] += 1 + + def PrintErrorCounts(self): + """Print a summary of errors by category, and the total.""" + for category, count in self.errors_by_category.iteritems(): + sys.stderr.write('Category \'%s\' errors found: %d\n' % + (category, count)) + sys.stderr.write('Total errors found: %d\n' % self.error_count) + +_cpplint_state = _CppLintState() + + +def _OutputFormat(): + """Gets the module's output format.""" + return _cpplint_state.output_format + + +def _SetOutputFormat(output_format): + """Sets the module's output format.""" + _cpplint_state.SetOutputFormat(output_format) + + +def _VerboseLevel(): + """Returns the module's verbosity setting.""" + return _cpplint_state.verbose_level + + +def _SetVerboseLevel(level): + """Sets the module's verbosity, and returns the previous setting.""" + return _cpplint_state.SetVerboseLevel(level) + + +def _SetCountingStyle(level): + """Sets the module's counting options.""" + _cpplint_state.SetCountingStyle(level) + + +def _Filters(): + """Returns the module's list of output filters, as a list.""" + return _cpplint_state.filters + + +def _SetFilters(filters): + """Sets the module's error-message filters. + + These filters are applied when deciding whether to emit a given + error message. + + Args: + filters: A string of comma-separated filters (eg "whitespace/indent"). + Each filter should start with + or -; else we die. + """ + _cpplint_state.SetFilters(filters) + + +class _FunctionState(object): + """Tracks current function name and the number of lines in its body.""" + + _NORMAL_TRIGGER = 250 # for --v=0, 500 for --v=1, etc. + _TEST_TRIGGER = 400 # about 50% more than _NORMAL_TRIGGER. + + def __init__(self): + self.in_a_function = False + self.lines_in_function = 0 + self.current_function = '' + + def Begin(self, function_name): + """Start analyzing function body. + + Args: + function_name: The name of the function being tracked. + """ + self.in_a_function = True + self.lines_in_function = 0 + self.current_function = function_name + + def Count(self): + """Count line in current function body.""" + if self.in_a_function: + self.lines_in_function += 1 + + def Check(self, error, filename, linenum): + """Report if too many lines in function body. + + Args: + error: The function to call with any errors found. + filename: The name of the current file. + linenum: The number of the line to check. + """ + if Match(r'T(EST|est)', self.current_function): + base_trigger = self._TEST_TRIGGER + else: + base_trigger = self._NORMAL_TRIGGER + trigger = base_trigger * 2**_VerboseLevel() + + if self.lines_in_function > trigger: + error_level = int(math.log(self.lines_in_function / base_trigger, 2)) + # 50 => 0, 100 => 1, 200 => 2, 400 => 3, 800 => 4, 1600 => 5, ... + if error_level > 5: + error_level = 5 + error(filename, linenum, 'readability/fn_size', error_level, + 'Small and focused functions are preferred:' + ' %s has %d non-comment lines' + ' (error triggered by exceeding %d lines).' % ( + self.current_function, self.lines_in_function, trigger)) + + def End(self): + """Stop analyzing function body.""" + self.in_a_function = False + + +class _IncludeError(Exception): + """Indicates a problem with the include order in a file.""" + pass + + +class FileInfo: + """Provides utility functions for filenames. + + FileInfo provides easy access to the components of a file's path + relative to the project root. + """ + + def __init__(self, filename): + self._filename = filename + + def FullName(self): + """Make Windows paths like Unix.""" + return os.path.abspath(self._filename).replace('\\', '/') + + def RepositoryName(self): + """FullName after removing the local path to the repository. + + If we have a real absolute path name here we can try to do something smart: + detecting the root of the checkout and truncating /path/to/checkout from + the name so that we get header guards that don't include things like + "C:\Documents and Settings\..." or "/home/username/..." in them and thus + people on different computers who have checked the source out to different + locations won't see bogus errors. + """ + fullname = self.FullName() + + if os.path.exists(fullname): + project_dir = os.path.dirname(fullname) + + if os.path.exists(os.path.join(project_dir, ".svn")): + # If there's a .svn file in the current directory, we recursively look + # up the directory tree for the top of the SVN checkout + root_dir = project_dir + one_up_dir = os.path.dirname(root_dir) + while os.path.exists(os.path.join(one_up_dir, ".svn")): + root_dir = os.path.dirname(root_dir) + one_up_dir = os.path.dirname(one_up_dir) + + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1:] + + # Not SVN <= 1.6? Try to find a git, hg, or svn top level directory by + # searching up from the current path. + root_dir = os.path.dirname(fullname) + while (root_dir != os.path.dirname(root_dir) and + not os.path.exists(os.path.join(root_dir, ".git")) and + not os.path.exists(os.path.join(root_dir, ".hg")) and + not os.path.exists(os.path.join(root_dir, ".svn"))): + root_dir = os.path.dirname(root_dir) + + if (os.path.exists(os.path.join(root_dir, ".git")) or + os.path.exists(os.path.join(root_dir, ".hg")) or + os.path.exists(os.path.join(root_dir, ".svn"))): + prefix = os.path.commonprefix([root_dir, project_dir]) + return fullname[len(prefix) + 1:] + + # Don't know what to do; header guard warnings may be wrong... + return fullname + + def Split(self): + """Splits the file into the directory, basename, and extension. + + For 'chrome/browser/browser.cc', Split() would + return ('chrome/browser', 'browser', '.cc') + + Returns: + A tuple of (directory, basename, extension). + """ + + googlename = self.RepositoryName() + project, rest = os.path.split(googlename) + return (project,) + os.path.splitext(rest) + + def BaseName(self): + """File base name - text after the final slash, before the final period.""" + return self.Split()[1] + + def Extension(self): + """File extension - text following the final period.""" + return self.Split()[2] + + def NoExtension(self): + """File has no source file extension.""" + return '/'.join(self.Split()[0:2]) + + def IsSource(self): + """File has a source file extension.""" + return self.Extension()[1:] in ('c', 'cc', 'cpp', 'cxx') + + +def _ShouldPrintError(category, confidence, linenum): + """If confidence >= verbose, category passes filter and is not suppressed.""" + + # There are three ways we might decide not to print an error message: + # a "NOLINT(category)" comment appears in the source, + # the verbosity level isn't high enough, or the filters filter it out. + if IsErrorSuppressedByNolint(category, linenum): + return False + if confidence < _cpplint_state.verbose_level: + return False + + is_filtered = False + for one_filter in _Filters(): + if one_filter.startswith('-'): + if category.startswith(one_filter[1:]): + is_filtered = True + elif one_filter.startswith('+'): + if category.startswith(one_filter[1:]): + is_filtered = False + else: + assert False # should have been checked for in SetFilter. + if is_filtered: + return False + + return True + + +def Error(filename, linenum, category, confidence, message): + """Logs the fact we've found a lint error. + + We log where the error was found, and also our confidence in the error, + that is, how certain we are this is a legitimate style regression, and + not a misidentification or a use that's sometimes justified. + + False positives can be suppressed by the use of + "cpplint(category)" comments on the offending line. These are + parsed into _error_suppressions. + + Args: + filename: The name of the file containing the error. + linenum: The number of the line containing the error. + category: A string used to describe the "category" this bug + falls under: "whitespace", say, or "runtime". Categories + may have a hierarchy separated by slashes: "whitespace/indent". + confidence: A number from 1-5 representing a confidence score for + the error, with 5 meaning that we are certain of the problem, + and 1 meaning that it could be a legitimate construct. + message: The error message. + """ + if _ShouldPrintError(category, confidence, linenum): + _cpplint_state.IncrementErrorCount(category) + if _cpplint_state.output_format == 'vs7': + sys.stderr.write('%s(%s): %s [%s] [%d]\n' % ( + filename, linenum, message, category, confidence)) + elif _cpplint_state.output_format == 'eclipse': + sys.stderr.write('%s:%s: warning: %s [%s] [%d]\n' % ( + filename, linenum, message, category, confidence)) + else: + sys.stderr.write('%s:%s: %s [%s] [%d]\n' % ( + filename, linenum, message, category, confidence)) + + +# Matches standard C++ escape sequences per 2.13.2.3 of the C++ standard. +_RE_PATTERN_CLEANSE_LINE_ESCAPES = re.compile( + r'\\([abfnrtv?"\\\']|\d+|x[0-9a-fA-F]+)') +# Matches strings. Escape codes should already be removed by ESCAPES. +_RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES = re.compile(r'"[^"]*"') +# Matches characters. Escape codes should already be removed by ESCAPES. +_RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES = re.compile(r"'.'") +# Matches multi-line C++ comments. +# This RE is a little bit more complicated than one might expect, because we +# have to take care of space removals tools so we can handle comments inside +# statements better. +# The current rule is: We only clear spaces from both sides when we're at the +# end of the line. Otherwise, we try to remove spaces from the right side, +# if this doesn't work we try on left side but only if there's a non-character +# on the right. +_RE_PATTERN_CLEANSE_LINE_C_COMMENTS = re.compile( + r"""(\s*/\*.*\*/\s*$| + /\*.*\*/\s+| + \s+/\*.*\*/(?=\W)| + /\*.*\*/)""", re.VERBOSE) + + +def IsCppString(line): + """Does line terminate so, that the next symbol is in string constant. + + This function does not consider single-line nor multi-line comments. + + Args: + line: is a partial line of code starting from the 0..n. + + Returns: + True, if next character appended to 'line' is inside a + string constant. + """ + + line = line.replace(r'\\', 'XX') # after this, \\" does not match to \" + return ((line.count('"') - line.count(r'\"') - line.count("'\"'")) & 1) == 1 + + +def CleanseRawStrings(raw_lines): + """Removes C++11 raw strings from lines. + + Before: + static const char kData[] = R"( + multi-line string + )"; + + After: + static const char kData[] = "" + (replaced by blank line) + ""; + + Args: + raw_lines: list of raw lines. + + Returns: + list of lines with C++11 raw strings replaced by empty strings. + """ + + delimiter = None + lines_without_raw_strings = [] + for line in raw_lines: + if delimiter: + # Inside a raw string, look for the end + end = line.find(delimiter) + if end >= 0: + # Found the end of the string, match leading space for this + # line and resume copying the original lines, and also insert + # a "" on the last line. + leading_space = Match(r'^(\s*)\S', line) + line = leading_space.group(1) + '""' + line[end + len(delimiter):] + delimiter = None + else: + # Haven't found the end yet, append a blank line. + line = '' + + else: + # Look for beginning of a raw string. + # See 2.14.15 [lex.string] for syntax. + matched = Match(r'^(.*)\b(?:R|u8R|uR|UR|LR)"([^\s\\()]*)\((.*)$', line) + if matched: + delimiter = ')' + matched.group(2) + '"' + + end = matched.group(3).find(delimiter) + if end >= 0: + # Raw string ended on same line + line = (matched.group(1) + '""' + + matched.group(3)[end + len(delimiter):]) + delimiter = None + else: + # Start of a multi-line raw string + line = matched.group(1) + '""' + + lines_without_raw_strings.append(line) + + # TODO(unknown): if delimiter is not None here, we might want to + # emit a warning for unterminated string. + return lines_without_raw_strings + + +def FindNextMultiLineCommentStart(lines, lineix): + """Find the beginning marker for a multiline comment.""" + while lineix < len(lines): + if lines[lineix].strip().startswith('/*'): + # Only return this marker if the comment goes beyond this line + if lines[lineix].strip().find('*/', 2) < 0: + return lineix + lineix += 1 + return len(lines) + + +def FindNextMultiLineCommentEnd(lines, lineix): + """We are inside a comment, find the end marker.""" + while lineix < len(lines): + if lines[lineix].strip().endswith('*/'): + return lineix + lineix += 1 + return len(lines) + + +def RemoveMultiLineCommentsFromRange(lines, begin, end): + """Clears a range of lines for multi-line comments.""" + # Having // dummy comments makes the lines non-empty, so we will not get + # unnecessary blank line warnings later in the code. + for i in range(begin, end): + lines[i] = '// dummy' + + +def RemoveMultiLineComments(filename, lines, error): + """Removes multiline (c-style) comments from lines.""" + lineix = 0 + while lineix < len(lines): + lineix_begin = FindNextMultiLineCommentStart(lines, lineix) + if lineix_begin >= len(lines): + return + lineix_end = FindNextMultiLineCommentEnd(lines, lineix_begin) + if lineix_end >= len(lines): + error(filename, lineix_begin + 1, 'readability/multiline_comment', 5, + 'Could not find end of multi-line comment') + return + RemoveMultiLineCommentsFromRange(lines, lineix_begin, lineix_end + 1) + lineix = lineix_end + 1 + + +def CleanseComments(line): + """Removes //-comments and single-line C-style /* */ comments. + + Args: + line: A line of C++ source. + + Returns: + The line with single-line comments removed. + """ + commentpos = line.find('//') + if commentpos != -1 and not IsCppString(line[:commentpos]): + line = line[:commentpos].rstrip() + # get rid of /* ... */ + return _RE_PATTERN_CLEANSE_LINE_C_COMMENTS.sub('', line) + + +class CleansedLines(object): + """Holds 3 copies of all lines with different preprocessing applied to them. + + 1) elided member contains lines without strings and comments, + 2) lines member contains lines without comments, and + 3) raw_lines member contains all the lines without processing. + All these three members are of , and of the same length. + """ + + def __init__(self, lines): + self.elided = [] + self.lines = [] + self.raw_lines = lines + self.num_lines = len(lines) + self.lines_without_raw_strings = CleanseRawStrings(lines) + for linenum in range(len(self.lines_without_raw_strings)): + self.lines.append(CleanseComments( + self.lines_without_raw_strings[linenum])) + elided = self._CollapseStrings(self.lines_without_raw_strings[linenum]) + self.elided.append(CleanseComments(elided)) + + def NumLines(self): + """Returns the number of lines represented.""" + return self.num_lines + + @staticmethod + def _CollapseStrings(elided): + """Collapses strings and chars on a line to simple "" or '' blocks. + + We nix strings first so we're not fooled by text like '"http://"' + + Args: + elided: The line being processed. + + Returns: + The line with collapsed strings. + """ + if not _RE_PATTERN_INCLUDE.match(elided): + # Remove escaped characters first to make quote/single quote collapsing + # basic. Things that look like escaped characters shouldn't occur + # outside of strings and chars. + elided = _RE_PATTERN_CLEANSE_LINE_ESCAPES.sub('', elided) + elided = _RE_PATTERN_CLEANSE_LINE_SINGLE_QUOTES.sub("''", elided) + elided = _RE_PATTERN_CLEANSE_LINE_DOUBLE_QUOTES.sub('""', elided) + return elided + + +def FindEndOfExpressionInLine(line, startpos, depth, startchar, endchar): + """Find the position just after the matching endchar. + + Args: + line: a CleansedLines line. + startpos: start searching at this position. + depth: nesting level at startpos. + startchar: expression opening character. + endchar: expression closing character. + + Returns: + On finding matching endchar: (index just after matching endchar, 0) + Otherwise: (-1, new depth at end of this line) + """ + for i in xrange(startpos, len(line)): + if line[i] == startchar: + depth += 1 + elif line[i] == endchar: + depth -= 1 + if depth == 0: + return (i + 1, 0) + return (-1, depth) + + +def CloseExpression(clean_lines, linenum, pos): + """If input points to ( or { or [ or <, finds the position that closes it. + + If lines[linenum][pos] points to a '(' or '{' or '[' or '<', finds the + linenum/pos that correspond to the closing of the expression. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + pos: A position on the line. + + Returns: + A tuple (line, linenum, pos) pointer *past* the closing brace, or + (line, len(lines), -1) if we never find a close. Note we ignore + strings and comments when matching; and the line we return is the + 'cleansed' line at linenum. + """ + + line = clean_lines.elided[linenum] + startchar = line[pos] + if startchar not in '({[<': + return (line, clean_lines.NumLines(), -1) + if startchar == '(': endchar = ')' + if startchar == '[': endchar = ']' + if startchar == '{': endchar = '}' + if startchar == '<': endchar = '>' + + # Check first line + (end_pos, num_open) = FindEndOfExpressionInLine( + line, pos, 0, startchar, endchar) + if end_pos > -1: + return (line, linenum, end_pos) + + # Continue scanning forward + while linenum < clean_lines.NumLines() - 1: + linenum += 1 + line = clean_lines.elided[linenum] + (end_pos, num_open) = FindEndOfExpressionInLine( + line, 0, num_open, startchar, endchar) + if end_pos > -1: + return (line, linenum, end_pos) + + # Did not find endchar before end of file, give up + return (line, clean_lines.NumLines(), -1) + + +def FindStartOfExpressionInLine(line, endpos, depth, startchar, endchar): + """Find position at the matching startchar. + + This is almost the reverse of FindEndOfExpressionInLine, but note + that the input position and returned position differs by 1. + + Args: + line: a CleansedLines line. + endpos: start searching at this position. + depth: nesting level at endpos. + startchar: expression opening character. + endchar: expression closing character. + + Returns: + On finding matching startchar: (index at matching startchar, 0) + Otherwise: (-1, new depth at beginning of this line) + """ + for i in xrange(endpos, -1, -1): + if line[i] == endchar: + depth += 1 + elif line[i] == startchar: + depth -= 1 + if depth == 0: + return (i, 0) + return (-1, depth) + + +def ReverseCloseExpression(clean_lines, linenum, pos): + """If input points to ) or } or ] or >, finds the position that opens it. + + If lines[linenum][pos] points to a ')' or '}' or ']' or '>', finds the + linenum/pos that correspond to the opening of the expression. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + pos: A position on the line. + + Returns: + A tuple (line, linenum, pos) pointer *at* the opening brace, or + (line, 0, -1) if we never find the matching opening brace. Note + we ignore strings and comments when matching; and the line we + return is the 'cleansed' line at linenum. + """ + line = clean_lines.elided[linenum] + endchar = line[pos] + if endchar not in ')}]>': + return (line, 0, -1) + if endchar == ')': startchar = '(' + if endchar == ']': startchar = '[' + if endchar == '}': startchar = '{' + if endchar == '>': startchar = '<' + + # Check last line + (start_pos, num_open) = FindStartOfExpressionInLine( + line, pos, 0, startchar, endchar) + if start_pos > -1: + return (line, linenum, start_pos) + + # Continue scanning backward + while linenum > 0: + linenum -= 1 + line = clean_lines.elided[linenum] + (start_pos, num_open) = FindStartOfExpressionInLine( + line, len(line) - 1, num_open, startchar, endchar) + if start_pos > -1: + return (line, linenum, start_pos) + + # Did not find startchar before beginning of file, give up + return (line, 0, -1) + + +def CheckForCopyright(filename, lines, error): + """Logs an error if no Copyright message appears at the top of the file.""" + + # We'll say it should occur by line 10. Don't forget there's a + # dummy line at the front. + for line in xrange(1, min(len(lines), 11)): + if re.search(r'Copyright', lines[line], re.I): break + else: # means no copyright line was found + error(filename, 0, 'legal/copyright', 5, + 'No copyright message found. ' + 'You should have a line: "Copyright [year] "') + + +def GetHeaderGuardCPPVariable(filename): + """Returns the CPP variable that should be used as a header guard. + + Args: + filename: The name of a C++ header file. + + Returns: + The CPP variable that should be used as a header guard in the + named file. + + """ + + # Restores original filename in case that cpplint is invoked from Emacs's + # flymake. + filename = re.sub(r'_flymake\.h$', '.h', filename) + filename = re.sub(r'/\.flymake/([^/]*)$', r'/\1', filename) + + fileinfo = FileInfo(filename) + file_path_from_root = fileinfo.RepositoryName() + if _root: + file_path_from_root = re.sub('^' + _root + os.sep, '', file_path_from_root) + return re.sub(r'[-./\s]', '_', file_path_from_root).upper() + '_' + + +def CheckForHeaderGuard(filename, lines, error): + """Checks that the file contains a header guard. + + Logs an error if no #ifndef header guard is present. For other + headers, checks that the full pathname is used. + + Args: + filename: The name of the C++ header file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + cppvar = GetHeaderGuardCPPVariable(filename) + + ifndef = None + ifndef_linenum = 0 + define = None + endif = None + endif_linenum = 0 + for linenum, line in enumerate(lines): + # Already been well guarded, no need for further checking. + if line.strip() == "#pragma once": + return + linesplit = line.split() + if len(linesplit) >= 2: + # find the first occurrence of #ifndef and #define, save arg + if not ifndef and linesplit[0] == '#ifndef': + # set ifndef to the header guard presented on the #ifndef line. + ifndef = linesplit[1] + ifndef_linenum = linenum + if not define and linesplit[0] == '#define': + define = linesplit[1] + # find the last occurrence of #endif, save entire line + if line.startswith('#endif'): + endif = line + endif_linenum = linenum + + if not ifndef: + error(filename, 0, 'build/header_guard', 5, + 'No #ifndef header guard found, suggested CPP variable is: %s' % + cppvar) + return + + if not define: + error(filename, 0, 'build/header_guard', 5, + 'No #define header guard found, suggested CPP variable is: %s' % + cppvar) + return + + # The guard should be PATH_FILE_H_, but we also allow PATH_FILE_H__ + # for backward compatibility. + if ifndef != cppvar: + error_level = 0 + if ifndef != cppvar + '_': + error_level = 5 + + ParseNolintSuppressions(filename, lines[ifndef_linenum], ifndef_linenum, + error) + error(filename, ifndef_linenum, 'build/header_guard', error_level, + '#ifndef header guard has wrong style, please use: %s' % cppvar) + + if define != ifndef: + error(filename, 0, 'build/header_guard', 5, + '#ifndef and #define don\'t match, suggested CPP variable is: %s' % + cppvar) + return + + if endif != ('#endif // %s' % cppvar): + error_level = 0 + if endif != ('#endif // %s' % (cppvar + '_')): + error_level = 5 + + ParseNolintSuppressions(filename, lines[endif_linenum], endif_linenum, + error) + error(filename, endif_linenum, 'build/header_guard', error_level, + '#endif line should be "#endif // %s"' % cppvar) + + +def CheckForBadCharacters(filename, lines, error): + """Logs an error for each line containing bad characters. + + Two kinds of bad characters: + + 1. Unicode replacement characters: These indicate that either the file + contained invalid UTF-8 (likely) or Unicode replacement characters (which + it shouldn't). Note that it's possible for this to throw off line + numbering if the invalid UTF-8 occurred adjacent to a newline. + + 2. NUL bytes. These are problematic for some tools. + + Args: + filename: The name of the current file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + for linenum, line in enumerate(lines): + if u'\ufffd' in line: + error(filename, linenum, 'readability/utf8', 5, + 'Line contains invalid UTF-8 (or Unicode replacement character).') + if '\0' in line: + error(filename, linenum, 'readability/nul', 5, 'Line contains NUL byte.') + + +def CheckForNewlineAtEOF(filename, lines, error): + """Logs an error if there is no newline char at the end of the file. + + Args: + filename: The name of the current file. + lines: An array of strings, each representing a line of the file. + error: The function to call with any errors found. + """ + + # The array lines() was created by adding two newlines to the + # original file (go figure), then splitting on \n. + # To verify that the file ends in \n, we just have to make sure the + # last-but-two element of lines() exists and is empty. + if len(lines) < 3 or lines[-2]: + error(filename, len(lines) - 2, 'whitespace/ending_newline', 5, + 'Could not find a newline character at the end of the file.') + + +def CheckForMultilineCommentsAndStrings(filename, clean_lines, linenum, error): + """Logs an error if we see /* ... */ or "..." that extend past one line. + + /* ... */ comments are legit inside macros, for one line. + Otherwise, we prefer // comments, so it's ok to warn about the + other. Likewise, it's ok for strings to extend across multiple + lines, as long as a line continuation character (backslash) + terminates each line. Although not currently prohibited by the C++ + style guide, it's ugly and unnecessary. We don't do well with either + in this lint program, so we warn about both. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Remove all \\ (escaped backslashes) from the line. They are OK, and the + # second (escaped) slash may trigger later \" detection erroneously. + line = line.replace('\\\\', '') + + if line.count('/*') > line.count('*/'): + error(filename, linenum, 'readability/multiline_comment', 5, + 'Complex multi-line /*...*/-style comment found. ' + 'Lint may give bogus warnings. ' + 'Consider replacing these with //-style comments, ' + 'with #if 0...#endif, ' + 'or with more clearly structured multi-line comments.') + + if (line.count('"') - line.count('\\"')) % 2: + error(filename, linenum, 'readability/multiline_string', 5, + 'Multi-line string ("...") found. This lint script doesn\'t ' + 'do well with such strings, and may give bogus warnings. ' + 'Use C++11 raw strings or concatenation instead.') + + +threading_list = ( + ('asctime(', 'asctime_r('), + ('ctime(', 'ctime_r('), + ('getgrgid(', 'getgrgid_r('), + ('getgrnam(', 'getgrnam_r('), + ('getlogin(', 'getlogin_r('), + ('getpwnam(', 'getpwnam_r('), + ('getpwuid(', 'getpwuid_r('), + ('gmtime(', 'gmtime_r('), + ('localtime(', 'localtime_r('), + ('rand(', 'rand_r('), + ('strtok(', 'strtok_r('), + ('ttyname(', 'ttyname_r('), + ) + + +def CheckPosixThreading(filename, clean_lines, linenum, error): + """Checks for calls to thread-unsafe functions. + + Much code has been originally written without consideration of + multi-threading. Also, engineers are relying on their old experience; + they have learned posix before threading extensions were added. These + tests guide the engineers to use thread-safe functions (when using + posix directly). + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + for single_thread_function, multithread_safe_function in threading_list: + ix = line.find(single_thread_function) + # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison + if ix >= 0 and (ix == 0 or (not line[ix - 1].isalnum() and + line[ix - 1] not in ('_', '.', '>'))): + error(filename, linenum, 'runtime/threadsafe_fn', 2, + 'Consider using ' + multithread_safe_function + + '...) instead of ' + single_thread_function + + '...) for improved thread safety.') + + +def CheckVlogArguments(filename, clean_lines, linenum, error): + """Checks that VLOG() is only used for defining a logging level. + + For example, VLOG(2) is correct. VLOG(INFO), VLOG(WARNING), VLOG(ERROR), and + VLOG(FATAL) are not. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + if Search(r'\bVLOG\((INFO|ERROR|WARNING|DFATAL|FATAL)\)', line): + error(filename, linenum, 'runtime/vlog', 5, + 'VLOG() should be used with numeric verbosity level. ' + 'Use LOG() if you want symbolic severity levels.') + + +# Matches invalid increment: *count++, which moves pointer instead of +# incrementing a value. +_RE_PATTERN_INVALID_INCREMENT = re.compile( + r'^\s*\*\w+(\+\+|--);') + + +def CheckInvalidIncrement(filename, clean_lines, linenum, error): + """Checks for invalid increment *count++. + + For example following function: + void increment_counter(int* count) { + *count++; + } + is invalid, because it effectively does count++, moving pointer, and should + be replaced with ++*count, (*count)++ or *count += 1. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + if _RE_PATTERN_INVALID_INCREMENT.match(line): + error(filename, linenum, 'runtime/invalid_increment', 5, + 'Changing pointer instead of value (or unused value of operator*).') + + +class _BlockInfo(object): + """Stores information about a generic block of code.""" + + def __init__(self, seen_open_brace): + self.seen_open_brace = seen_open_brace + self.open_parentheses = 0 + self.inline_asm = _NO_ASM + + def CheckBegin(self, filename, clean_lines, linenum, error): + """Run checks that applies to text up to the opening brace. + + This is mostly for checking the text after the class identifier + and the "{", usually where the base class is specified. For other + blocks, there isn't much to check, so we always pass. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + pass + + def CheckEnd(self, filename, clean_lines, linenum, error): + """Run checks that applies to text after the closing brace. + + This is mostly used for checking end of namespace comments. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + pass + + +class _ClassInfo(_BlockInfo): + """Stores information about a class.""" + + def __init__(self, name, class_or_struct, clean_lines, linenum): + _BlockInfo.__init__(self, False) + self.name = name + self.starting_linenum = linenum + self.is_derived = False + if class_or_struct == 'struct': + self.access = 'public' + self.is_struct = True + else: + self.access = 'private' + self.is_struct = False + + # Remember initial indentation level for this class. Using raw_lines here + # instead of elided to account for leading comments. + initial_indent = Match(r'^( *)\S', clean_lines.raw_lines[linenum]) + if initial_indent: + self.class_indent = len(initial_indent.group(1)) + else: + self.class_indent = 0 + + # Try to find the end of the class. This will be confused by things like: + # class A { + # } *x = { ... + # + # But it's still good enough for CheckSectionSpacing. + self.last_line = 0 + depth = 0 + for i in range(linenum, clean_lines.NumLines()): + line = clean_lines.elided[i] + depth += line.count('{') - line.count('}') + if not depth: + self.last_line = i + break + + def CheckBegin(self, filename, clean_lines, linenum, error): + # Look for a bare ':' + if Search('(^|[^:]):($|[^:])', clean_lines.elided[linenum]): + self.is_derived = True + + def CheckEnd(self, filename, clean_lines, linenum, error): + # Check that closing brace is aligned with beginning of the class. + # Only do this if the closing brace is indented by only whitespaces. + # This means we will not check single-line class definitions. + indent = Match(r'^( *)\}', clean_lines.elided[linenum]) + if indent and len(indent.group(1)) != self.class_indent: + if self.is_struct: + parent = 'struct ' + self.name + else: + parent = 'class ' + self.name + error(filename, linenum, 'whitespace/indent', 3, + 'Closing brace should be aligned with beginning of %s' % parent) + + +class _NamespaceInfo(_BlockInfo): + """Stores information about a namespace.""" + + def __init__(self, name, linenum): + _BlockInfo.__init__(self, False) + self.name = name or '' + self.starting_linenum = linenum + + def CheckEnd(self, filename, clean_lines, linenum, error): + """Check end of namespace comments.""" + line = clean_lines.raw_lines[linenum] + + # Check how many lines is enclosed in this namespace. Don't issue + # warning for missing namespace comments if there aren't enough + # lines. However, do apply checks if there is already an end of + # namespace comment and it's incorrect. + # + # TODO(unknown): We always want to check end of namespace comments + # if a namespace is large, but sometimes we also want to apply the + # check if a short namespace contained nontrivial things (something + # other than forward declarations). There is currently no logic on + # deciding what these nontrivial things are, so this check is + # triggered by namespace size only, which works most of the time. + if (linenum - self.starting_linenum < 10 + and not Match(r'};*\s*(//|/\*).*\bnamespace\b', line)): + return + + # Look for matching comment at end of namespace. + # + # Note that we accept C style "/* */" comments for terminating + # namespaces, so that code that terminate namespaces inside + # preprocessor macros can be cpplint clean. + # + # We also accept stuff like "// end of namespace ." with the + # period at the end. + # + # Besides these, we don't accept anything else, otherwise we might + # get false negatives when existing comment is a substring of the + # expected namespace. + if self.name: + # Named namespace + if not Match((r'};*\s*(//|/\*).*\bnamespace\s+' + re.escape(self.name) + + r'[\*/\.\\\s]*$'), + line): + error(filename, linenum, 'readability/namespace', 5, + 'Namespace should be terminated with "// namespace %s"' % + self.name) + else: + # Anonymous namespace + if not Match(r'};*\s*(//|/\*).*\bnamespace[\*/\.\\\s]*$', line): + error(filename, linenum, 'readability/namespace', 5, + 'Namespace should be terminated with "// namespace"') + + +class _PreprocessorInfo(object): + """Stores checkpoints of nesting stacks when #if/#else is seen.""" + + def __init__(self, stack_before_if): + # The entire nesting stack before #if + self.stack_before_if = stack_before_if + + # The entire nesting stack up to #else + self.stack_before_else = [] + + # Whether we have already seen #else or #elif + self.seen_else = False + + +class _NestingState(object): + """Holds states related to parsing braces.""" + + def __init__(self): + # Stack for tracking all braces. An object is pushed whenever we + # see a "{", and popped when we see a "}". Only 3 types of + # objects are possible: + # - _ClassInfo: a class or struct. + # - _NamespaceInfo: a namespace. + # - _BlockInfo: some other type of block. + self.stack = [] + + # Stack of _PreprocessorInfo objects. + self.pp_stack = [] + + def SeenOpenBrace(self): + """Check if we have seen the opening brace for the innermost block. + + Returns: + True if we have seen the opening brace, False if the innermost + block is still expecting an opening brace. + """ + return (not self.stack) or self.stack[-1].seen_open_brace + + def InNamespaceBody(self): + """Check if we are currently one level inside a namespace body. + + Returns: + True if top of the stack is a namespace block, False otherwise. + """ + return self.stack and isinstance(self.stack[-1], _NamespaceInfo) + + def UpdatePreprocessor(self, line): + """Update preprocessor stack. + + We need to handle preprocessors due to classes like this: + #ifdef SWIG + struct ResultDetailsPageElementExtensionPoint { + #else + struct ResultDetailsPageElementExtensionPoint : public Extension { + #endif + + We make the following assumptions (good enough for most files): + - Preprocessor condition evaluates to true from #if up to first + #else/#elif/#endif. + + - Preprocessor condition evaluates to false from #else/#elif up + to #endif. We still perform lint checks on these lines, but + these do not affect nesting stack. + + Args: + line: current line to check. + """ + if Match(r'^\s*#\s*(if|ifdef|ifndef)\b', line): + # Beginning of #if block, save the nesting stack here. The saved + # stack will allow us to restore the parsing state in the #else case. + self.pp_stack.append(_PreprocessorInfo(copy.deepcopy(self.stack))) + elif Match(r'^\s*#\s*(else|elif)\b', line): + # Beginning of #else block + if self.pp_stack: + if not self.pp_stack[-1].seen_else: + # This is the first #else or #elif block. Remember the + # whole nesting stack up to this point. This is what we + # keep after the #endif. + self.pp_stack[-1].seen_else = True + self.pp_stack[-1].stack_before_else = copy.deepcopy(self.stack) + + # Restore the stack to how it was before the #if + self.stack = copy.deepcopy(self.pp_stack[-1].stack_before_if) + else: + # TODO(unknown): unexpected #else, issue warning? + pass + elif Match(r'^\s*#\s*endif\b', line): + # End of #if or #else blocks. + if self.pp_stack: + # If we saw an #else, we will need to restore the nesting + # stack to its former state before the #else, otherwise we + # will just continue from where we left off. + if self.pp_stack[-1].seen_else: + # Here we can just use a shallow copy since we are the last + # reference to it. + self.stack = self.pp_stack[-1].stack_before_else + # Drop the corresponding #if + self.pp_stack.pop() + else: + # TODO(unknown): unexpected #endif, issue warning? + pass + + def Update(self, filename, clean_lines, linenum, error): + """Update nesting state with current line. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Update pp_stack first + self.UpdatePreprocessor(line) + + # Count parentheses. This is to avoid adding struct arguments to + # the nesting stack. + if self.stack: + inner_block = self.stack[-1] + depth_change = line.count('(') - line.count(')') + inner_block.open_parentheses += depth_change + + # Also check if we are starting or ending an inline assembly block. + if inner_block.inline_asm in (_NO_ASM, _END_ASM): + if (depth_change != 0 and + inner_block.open_parentheses == 1 and + _MATCH_ASM.match(line)): + # Enter assembly block + inner_block.inline_asm = _INSIDE_ASM + else: + # Not entering assembly block. If previous line was _END_ASM, + # we will now shift to _NO_ASM state. + inner_block.inline_asm = _NO_ASM + elif (inner_block.inline_asm == _INSIDE_ASM and + inner_block.open_parentheses == 0): + # Exit assembly block + inner_block.inline_asm = _END_ASM + + # Consume namespace declaration at the beginning of the line. Do + # this in a loop so that we catch same line declarations like this: + # namespace proto2 { namespace bridge { class MessageSet; } } + while True: + # Match start of namespace. The "\b\s*" below catches namespace + # declarations even if it weren't followed by a whitespace, this + # is so that we don't confuse our namespace checker. The + # missing spaces will be flagged by CheckSpacing. + namespace_decl_match = Match(r'^\s*namespace\b\s*([:\w]+)?(.*)$', line) + if not namespace_decl_match: + break + + new_namespace = _NamespaceInfo(namespace_decl_match.group(1), linenum) + self.stack.append(new_namespace) + + line = namespace_decl_match.group(2) + if line.find('{') != -1: + new_namespace.seen_open_brace = True + line = line[line.find('{') + 1:] + + # Look for a class declaration in whatever is left of the line + # after parsing namespaces. The regexp accounts for decorated classes + # such as in: + # class LOCKABLE API Object { + # }; + # + # Templates with class arguments may confuse the parser, for example: + # template , + # class Vector = vector > + # class HeapQueue { + # + # Because this parser has no nesting state about templates, by the + # time it saw "class Comparator", it may think that it's a new class. + # Nested templates have a similar problem: + # template < + # typename ExportedType, + # typename TupleType, + # template class ImplTemplate> + # + # To avoid these cases, we ignore classes that are followed by '=' or '>' + class_decl_match = Match( + r'\s*(template\s*<[\w\s<>,:]*>\s*)?' + r'(class|struct)\s+([A-Z_]+\s+)*(\w+(?:::\w+)*)' + r'(([^=>]|<[^<>]*>|<[^<>]*<[^<>]*>\s*>)*)$', line) + if (class_decl_match and + (not self.stack or self.stack[-1].open_parentheses == 0)): + self.stack.append(_ClassInfo( + class_decl_match.group(4), class_decl_match.group(2), + clean_lines, linenum)) + line = class_decl_match.group(5) + + # If we have not yet seen the opening brace for the innermost block, + # run checks here. + if not self.SeenOpenBrace(): + self.stack[-1].CheckBegin(filename, clean_lines, linenum, error) + + # Update access control if we are inside a class/struct + if self.stack and isinstance(self.stack[-1], _ClassInfo): + classinfo = self.stack[-1] + access_match = Match( + r'^(.*)\b(public|private|protected|signals)(\s+(?:slots\s*)?)?' + r':(?:[^:]|$)', + line) + if access_match: + classinfo.access = access_match.group(2) + + # Check that access keywords are indented +1 space. Skip this + # check if the keywords are not preceded by whitespaces. + indent = access_match.group(1) + if (len(indent) != classinfo.class_indent + 1 and + Match(r'^\s*$', indent)): + if classinfo.is_struct: + parent = 'struct ' + classinfo.name + else: + parent = 'class ' + classinfo.name + slots = '' + if access_match.group(3): + slots = access_match.group(3) + error(filename, linenum, 'whitespace/indent', 3, + '%s%s: should be indented +1 space inside %s' % ( + access_match.group(2), slots, parent)) + + # Consume braces or semicolons from what's left of the line + while True: + # Match first brace, semicolon, or closed parenthesis. + matched = Match(r'^[^{;)}]*([{;)}])(.*)$', line) + if not matched: + break + + token = matched.group(1) + if token == '{': + # If namespace or class hasn't seen a opening brace yet, mark + # namespace/class head as complete. Push a new block onto the + # stack otherwise. + if not self.SeenOpenBrace(): + self.stack[-1].seen_open_brace = True + else: + self.stack.append(_BlockInfo(True)) + if _MATCH_ASM.match(line): + self.stack[-1].inline_asm = _BLOCK_ASM + elif token == ';' or token == ')': + # If we haven't seen an opening brace yet, but we already saw + # a semicolon, this is probably a forward declaration. Pop + # the stack for these. + # + # Similarly, if we haven't seen an opening brace yet, but we + # already saw a closing parenthesis, then these are probably + # function arguments with extra "class" or "struct" keywords. + # Also pop these stack for these. + if not self.SeenOpenBrace(): + self.stack.pop() + else: # token == '}' + # Perform end of block checks and pop the stack. + if self.stack: + self.stack[-1].CheckEnd(filename, clean_lines, linenum, error) + self.stack.pop() + line = matched.group(2) + + def InnermostClass(self): + """Get class info on the top of the stack. + + Returns: + A _ClassInfo object if we are inside a class, or None otherwise. + """ + for i in range(len(self.stack), 0, -1): + classinfo = self.stack[i - 1] + if isinstance(classinfo, _ClassInfo): + return classinfo + return None + + def CheckCompletedBlocks(self, filename, error): + """Checks that all classes and namespaces have been completely parsed. + + Call this when all lines in a file have been processed. + Args: + filename: The name of the current file. + error: The function to call with any errors found. + """ + # Note: This test can result in false positives if #ifdef constructs + # get in the way of brace matching. See the testBuildClass test in + # cpplint_unittest.py for an example of this. + for obj in self.stack: + if isinstance(obj, _ClassInfo): + error(filename, obj.starting_linenum, 'build/class', 5, + 'Failed to find complete declaration of class %s' % + obj.name) + elif isinstance(obj, _NamespaceInfo): + error(filename, obj.starting_linenum, 'build/namespaces', 5, + 'Failed to find complete declaration of namespace %s' % + obj.name) + + +def CheckForNonStandardConstructs(filename, clean_lines, linenum, + nesting_state, error): + r"""Logs an error if we see certain non-ANSI constructs ignored by gcc-2. + + Complain about several constructs which gcc-2 accepts, but which are + not standard C++. Warning about these in lint is one way to ease the + transition to new compilers. + - put storage class first (e.g. "static const" instead of "const static"). + - "%lld" instead of %qd" in printf-type functions. + - "%1$d" is non-standard in printf-type functions. + - "\%" is an undefined character escape sequence. + - text after #endif is not allowed. + - invalid inner-style forward declaration. + - >? and ?= and )\?=?\s*(\w+|[+-]?\d+)(\.\d*)?', + line): + error(filename, linenum, 'build/deprecated', 3, + '>? and ))?' + # r'\s*const\s*' + type_name + '\s*&\s*\w+\s*;' + error(filename, linenum, 'runtime/member_string_references', 2, + 'const string& members are dangerous. It is much better to use ' + 'alternatives, such as pointers or simple constants.') + + # Everything else in this function operates on class declarations. + # Return early if the top of the nesting stack is not a class, or if + # the class head is not completed yet. + classinfo = nesting_state.InnermostClass() + if not classinfo or not classinfo.seen_open_brace: + return + + # The class may have been declared with namespace or classname qualifiers. + # The constructor and destructor will not have those qualifiers. + base_classname = classinfo.name.split('::')[-1] + + # Look for single-argument constructors that aren't marked explicit. + # Technically a valid construct, but against style. + args = Match(r'\s+(?:inline\s+)?%s\s*\(([^,()]+)\)' + % re.escape(base_classname), + line) + if (args and + args.group(1) != 'void' and + not Match(r'(const\s+)?%s(\s+const)?\s*(?:<\w+>\s*)?&' + % re.escape(base_classname), args.group(1).strip())): + error(filename, linenum, 'runtime/explicit', 5, + 'Single-argument constructors should be marked explicit.') + + +def CheckSpacingForFunctionCall(filename, line, linenum, error): + """Checks for the correctness of various spacing around function calls. + + Args: + filename: The name of the current file. + line: The text of the line to check. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + # Since function calls often occur inside if/for/while/switch + # expressions - which have their own, more liberal conventions - we + # first see if we should be looking inside such an expression for a + # function call, to which we can apply more strict standards. + fncall = line # if there's no control flow construct, look at whole line + for pattern in (r'\bif\s*\((.*)\)\s*{', + r'\bfor\s*\((.*)\)\s*{', + r'\bwhile\s*\((.*)\)\s*[{;]', + r'\bswitch\s*\((.*)\)\s*{'): + match = Search(pattern, line) + if match: + fncall = match.group(1) # look inside the parens for function calls + break + + # Except in if/for/while/switch, there should never be space + # immediately inside parens (eg "f( 3, 4 )"). We make an exception + # for nested parens ( (a+b) + c ). Likewise, there should never be + # a space before a ( when it's a function argument. I assume it's a + # function argument when the char before the whitespace is legal in + # a function name (alnum + _) and we're not starting a macro. Also ignore + # pointers and references to arrays and functions coz they're too tricky: + # we use a very simple way to recognize these: + # " (something)(maybe-something)" or + # " (something)(maybe-something," or + # " (something)[something]" + # Note that we assume the contents of [] to be short enough that + # they'll never need to wrap. + if ( # Ignore control structures. + not Search(r'\b(if|for|while|switch|return|new|delete|catch|sizeof)\b', + fncall) and + # Ignore pointers/references to functions. + not Search(r' \([^)]+\)\([^)]*(\)|,$)', fncall) and + # Ignore pointers/references to arrays. + not Search(r' \([^)]+\)\[[^\]]+\]', fncall)): + if Search(r'\w\s*\(\s(?!\s*\\$)', fncall): # a ( used for a fn call + error(filename, linenum, 'whitespace/parens', 4, + 'Extra space after ( in function call') + elif Search(r'\(\s+(?!(\s*\\)|\()', fncall): + error(filename, linenum, 'whitespace/parens', 2, + 'Extra space after (') + if (Search(r'\w\s+\(', fncall) and + not Search(r'#\s*define|typedef', fncall) and + not Search(r'\w\s+\((\w+::)*\*\w+\)\(', fncall)): + error(filename, linenum, 'whitespace/parens', 4, + 'Extra space before ( in function call') + # If the ) is followed only by a newline or a { + newline, assume it's + # part of a control statement (if/while/etc), and don't complain + if Search(r'[^)]\s+\)\s*[^{\s]', fncall): + # If the closing parenthesis is preceded by only whitespaces, + # try to give a more descriptive error message. + if Search(r'^\s+\)', fncall): + error(filename, linenum, 'whitespace/parens', 2, + 'Closing ) should be moved to the previous line') + else: + error(filename, linenum, 'whitespace/parens', 2, + 'Extra space before )') + + +def IsBlankLine(line): + """Returns true if the given line is blank. + + We consider a line to be blank if the line is empty or consists of + only white spaces. + + Args: + line: A line of a string. + + Returns: + True, if the given line is blank. + """ + return not line or line.isspace() + + +def CheckForFunctionLengths(filename, clean_lines, linenum, + function_state, error): + """Reports for long function bodies. + + For an overview why this is done, see: + http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Write_Short_Functions + + Uses a simplistic algorithm assuming other style guidelines + (especially spacing) are followed. + Only checks unindented functions, so class members are unchecked. + Trivial bodies are unchecked, so constructors with huge initializer lists + may be missed. + Blank/comment lines are not counted so as to avoid encouraging the removal + of vertical space and comments just to get through a lint check. + NOLINT *on the last line of a function* disables this check. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + function_state: Current function name and lines in body so far. + error: The function to call with any errors found. + """ + lines = clean_lines.lines + line = lines[linenum] + raw = clean_lines.raw_lines + raw_line = raw[linenum] + joined_line = '' + + starting_func = False + regexp = r'(\w(\w|::|\*|\&|\s)*)\(' # decls * & space::name( ... + match_result = Match(regexp, line) + if match_result: + # If the name is all caps and underscores, figure it's a macro and + # ignore it, unless it's TEST or TEST_F. + function_name = match_result.group(1).split()[-1] + if function_name == 'TEST' or function_name == 'TEST_F' or ( + not Match(r'[A-Z_]+$', function_name)): + starting_func = True + + if starting_func: + body_found = False + for start_linenum in xrange(linenum, clean_lines.NumLines()): + start_line = lines[start_linenum] + joined_line += ' ' + start_line.lstrip() + if Search(r'(;|})', start_line): # Declarations and trivial functions + body_found = True + break # ... ignore + elif Search(r'{', start_line): + body_found = True + function = Search(r'((\w|:)*)\(', line).group(1) + if Match(r'TEST', function): # Handle TEST... macros + parameter_regexp = Search(r'(\(.*\))', joined_line) + if parameter_regexp: # Ignore bad syntax + function += parameter_regexp.group(1) + else: + function += '()' + function_state.Begin(function) + break + if not body_found: + # No body for the function (or evidence of a non-function) was found. + error(filename, linenum, 'readability/fn_size', 5, + 'Lint failed to find start of function body.') + elif Match(r'^\}\s*$', line): # function end + function_state.Check(error, filename, linenum) + function_state.End() + elif not Match(r'^\s*$', line): + function_state.Count() # Count non-blank/non-comment lines. + + +_RE_PATTERN_TODO = re.compile(r'^//(\s*)TODO(\(.+?\))?:?(\s|$)?') + + +def CheckComment(comment, filename, linenum, error): + """Checks for common mistakes in TODO comments. + + Args: + comment: The text of the comment from the line in question. + filename: The name of the current file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + match = _RE_PATTERN_TODO.match(comment) + if match: + # One whitespace is correct; zero whitespace is handled elsewhere. + leading_whitespace = match.group(1) + if len(leading_whitespace) > 1: + error(filename, linenum, 'whitespace/todo', 2, + 'Too many spaces before TODO') + + username = match.group(2) + if not username: + error(filename, linenum, 'readability/todo', 2, + 'Missing username in TODO; it should look like ' + '"// TODO(my_username): Stuff."') + + middle_whitespace = match.group(3) + # Comparisons made explicit for correctness -- pylint: disable=g-explicit-bool-comparison + if middle_whitespace != ' ' and middle_whitespace != '': + error(filename, linenum, 'whitespace/todo', 2, + 'TODO(my_username) should be followed by a space') + +def CheckAccess(filename, clean_lines, linenum, nesting_state, error): + """Checks for improper use of DISALLOW* macros. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] # get rid of comments and strings + + matched = Match((r'\s*(DISALLOW_COPY_AND_ASSIGN|' + r'DISALLOW_EVIL_CONSTRUCTORS|' + r'DISALLOW_IMPLICIT_CONSTRUCTORS)'), line) + if not matched: + return + if nesting_state.stack and isinstance(nesting_state.stack[-1], _ClassInfo): + if nesting_state.stack[-1].access != 'private': + error(filename, linenum, 'readability/constructors', 3, + '%s must be in the private: section' % matched.group(1)) + + else: + # Found DISALLOW* macro outside a class declaration, or perhaps it + # was used inside a function when it should have been part of the + # class declaration. We could issue a warning here, but it + # probably resulted in a compiler error already. + pass + + +def FindNextMatchingAngleBracket(clean_lines, linenum, init_suffix): + """Find the corresponding > to close a template. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: Current line number. + init_suffix: Remainder of the current line after the initial <. + + Returns: + True if a matching bracket exists. + """ + line = init_suffix + nesting_stack = ['<'] + while True: + # Find the next operator that can tell us whether < is used as an + # opening bracket or as a less-than operator. We only want to + # warn on the latter case. + # + # We could also check all other operators and terminate the search + # early, e.g. if we got something like this "a(),;\[\]]*([<>(),;\[\]])(.*)$', line) + if match: + # Found an operator, update nesting stack + operator = match.group(1) + line = match.group(2) + + if nesting_stack[-1] == '<': + # Expecting closing angle bracket + if operator in ('<', '(', '['): + nesting_stack.append(operator) + elif operator == '>': + nesting_stack.pop() + if not nesting_stack: + # Found matching angle bracket + return True + elif operator == ',': + # Got a comma after a bracket, this is most likely a template + # argument. We have not seen a closing angle bracket yet, but + # it's probably a few lines later if we look for it, so just + # return early here. + return True + else: + # Got some other operator. + return False + + else: + # Expecting closing parenthesis or closing bracket + if operator in ('<', '(', '['): + nesting_stack.append(operator) + elif operator in (')', ']'): + # We don't bother checking for matching () or []. If we got + # something like (] or [), it would have been a syntax error. + nesting_stack.pop() + + else: + # Scan the next line + linenum += 1 + if linenum >= len(clean_lines.elided): + break + line = clean_lines.elided[linenum] + + # Exhausted all remaining lines and still no matching angle bracket. + # Most likely the input was incomplete, otherwise we should have + # seen a semicolon and returned early. + return True + + +def FindPreviousMatchingAngleBracket(clean_lines, linenum, init_prefix): + """Find the corresponding < that started a template. + + Args: + clean_lines: A CleansedLines instance containing the file. + linenum: Current line number. + init_prefix: Part of the current line before the initial >. + + Returns: + True if a matching bracket exists. + """ + line = init_prefix + nesting_stack = ['>'] + while True: + # Find the previous operator + match = Search(r'^(.*)([<>(),;\[\]])[^<>(),;\[\]]*$', line) + if match: + # Found an operator, update nesting stack + operator = match.group(2) + line = match.group(1) + + if nesting_stack[-1] == '>': + # Expecting opening angle bracket + if operator in ('>', ')', ']'): + nesting_stack.append(operator) + elif operator == '<': + nesting_stack.pop() + if not nesting_stack: + # Found matching angle bracket + return True + elif operator == ',': + # Got a comma before a bracket, this is most likely a + # template argument. The opening angle bracket is probably + # there if we look for it, so just return early here. + return True + else: + # Got some other operator. + return False + + else: + # Expecting opening parenthesis or opening bracket + if operator in ('>', ')', ']'): + nesting_stack.append(operator) + elif operator in ('(', '['): + nesting_stack.pop() + + else: + # Scan the previous line + linenum -= 1 + if linenum < 0: + break + line = clean_lines.elided[linenum] + + # Exhausted all earlier lines and still no matching angle bracket. + return False + + +def CheckSpacing(filename, clean_lines, linenum, nesting_state, error): + """Checks for the correctness of various spacing issues in the code. + + Things we check for: spaces around operators, spaces after + if/for/while/switch, no spaces around parens in function calls, two + spaces between code and comment, don't start a block with a blank + line, don't end a function with a blank line, don't add a blank line + after public/protected/private, don't have too many blank lines in a row. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + + # Don't use "elided" lines here, otherwise we can't check commented lines. + # Don't want to use "raw" either, because we don't want to check inside C++11 + # raw strings, + raw = clean_lines.lines_without_raw_strings + line = raw[linenum] + + # Before nixing comments, check if the line is blank for no good + # reason. This includes the first line after a block is opened, and + # blank lines at the end of a function (ie, right before a line like '}' + # + # Skip all the blank line checks if we are immediately inside a + # namespace body. In other words, don't issue blank line warnings + # for this block: + # namespace { + # + # } + # + # A warning about missing end of namespace comments will be issued instead. + if IsBlankLine(line) and not nesting_state.InNamespaceBody(): + elided = clean_lines.elided + prev_line = elided[linenum - 1] + prevbrace = prev_line.rfind('{') + # TODO(unknown): Don't complain if line before blank line, and line after, + # both start with alnums and are indented the same amount. + # This ignores whitespace at the start of a namespace block + # because those are not usually indented. + if prevbrace != -1 and prev_line[prevbrace:].find('}') == -1: + # OK, we have a blank line at the start of a code block. Before we + # complain, we check if it is an exception to the rule: The previous + # non-empty line has the parameters of a function header that are indented + # 4 spaces (because they did not fit in a 80 column line when placed on + # the same line as the function name). We also check for the case where + # the previous line is indented 6 spaces, which may happen when the + # initializers of a constructor do not fit into a 80 column line. + exception = False + if Match(r' {6}\w', prev_line): # Initializer list? + # We are looking for the opening column of initializer list, which + # should be indented 4 spaces to cause 6 space indentation afterwards. + search_position = linenum-2 + while (search_position >= 0 + and Match(r' {6}\w', elided[search_position])): + search_position -= 1 + exception = (search_position >= 0 + and elided[search_position][:5] == ' :') + else: + # Search for the function arguments or an initializer list. We use a + # simple heuristic here: If the line is indented 4 spaces; and we have a + # closing paren, without the opening paren, followed by an opening brace + # or colon (for initializer lists) we assume that it is the last line of + # a function header. If we have a colon indented 4 spaces, it is an + # initializer list. + exception = (Match(r' {4}\w[^\(]*\)\s*(const\s*)?(\{\s*$|:)', + prev_line) + or Match(r' {4}:', prev_line)) + + if not exception: + error(filename, linenum, 'whitespace/blank_line', 2, + 'Redundant blank line at the start of a code block ' + 'should be deleted.') + # Ignore blank lines at the end of a block in a long if-else + # chain, like this: + # if (condition1) { + # // Something followed by a blank line + # + # } else if (condition2) { + # // Something else + # } + if linenum + 1 < clean_lines.NumLines(): + next_line = raw[linenum + 1] + if (next_line + and Match(r'\s*}', next_line) + and next_line.find('} else ') == -1): + error(filename, linenum, 'whitespace/blank_line', 3, + 'Redundant blank line at the end of a code block ' + 'should be deleted.') + + matched = Match(r'\s*(public|protected|private):', prev_line) + if matched: + error(filename, linenum, 'whitespace/blank_line', 3, + 'Do not leave a blank line after "%s:"' % matched.group(1)) + + # Next, we complain if there's a comment too near the text + commentpos = line.find('//') + if commentpos != -1: + # Check if the // may be in quotes. If so, ignore it + # Comparisons made explicit for clarity -- pylint: disable=g-explicit-bool-comparison + if (line.count('"', 0, commentpos) - + line.count('\\"', 0, commentpos)) % 2 == 0: # not in quotes + # Allow one space for new scopes, two spaces otherwise: + if (not Match(r'^\s*{ //', line) and + ((commentpos >= 1 and + line[commentpos-1] not in string.whitespace) or + (commentpos >= 2 and + line[commentpos-2] not in string.whitespace))): + error(filename, linenum, 'whitespace/comments', 2, + 'At least two spaces is best between code and comments') + # There should always be a space between the // and the comment + commentend = commentpos + 2 + if commentend < len(line) and not line[commentend] == ' ': + # but some lines are exceptions -- e.g. if they're big + # comment delimiters like: + # //---------------------------------------------------------- + # or are an empty C++ style Doxygen comment, like: + # /// + # or C++ style Doxygen comments placed after the variable: + # ///< Header comment + # //!< Header comment + # or they begin with multiple slashes followed by a space: + # //////// Header comment + match = (Search(r'[=/-]{4,}\s*$', line[commentend:]) or + Search(r'^/$', line[commentend:]) or + Search(r'^!< ', line[commentend:]) or + Search(r'^/< ', line[commentend:]) or + Search(r'^/+ ', line[commentend:])) + if not match: + error(filename, linenum, 'whitespace/comments', 4, + 'Should have a space between // and comment') + CheckComment(line[commentpos:], filename, linenum, error) + + line = clean_lines.elided[linenum] # get rid of comments and strings + + # Don't try to do spacing checks for operator methods + line = re.sub(r'operator(==|!=|<|<<|<=|>=|>>|>)\(', 'operator\(', line) + + # We allow no-spaces around = within an if: "if ( (a=Foo()) == 0 )". + # Otherwise not. Note we only check for non-spaces on *both* sides; + # sometimes people put non-spaces on one side when aligning ='s among + # many lines (not that this is behavior that I approve of...) + if Search(r'[\w.]=[\w.]', line) and not Search(r'\b(if|while) ', line): + error(filename, linenum, 'whitespace/operators', 4, + 'Missing spaces around =') + + # It's ok not to have spaces around binary operators like + - * /, but if + # there's too little whitespace, we get concerned. It's hard to tell, + # though, so we punt on this one for now. TODO. + + # You should always have whitespace around binary operators. + # + # Check <= and >= first to avoid false positives with < and >, then + # check non-include lines for spacing around < and >. + match = Search(r'[^<>=!\s](==|!=|<=|>=)[^<>=!\s]', line) + if match: + error(filename, linenum, 'whitespace/operators', 3, + 'Missing spaces around %s' % match.group(1)) + # We allow no-spaces around << when used like this: 10<<20, but + # not otherwise (particularly, not when used as streams) + # Also ignore using ns::operator<<; + match = Search(r'(operator|\S)(?:L|UL|ULL|l|ul|ull)?<<(\S)', line) + if (match and + not (match.group(1).isdigit() and match.group(2).isdigit()) and + not (match.group(1) == 'operator' and match.group(2) == ';')): + error(filename, linenum, 'whitespace/operators', 3, + 'Missing spaces around <<') + elif not Match(r'#.*include', line): + # Avoid false positives on -> + reduced_line = line.replace('->', '') + + # Look for < that is not surrounded by spaces. This is only + # triggered if both sides are missing spaces, even though + # technically should flag if at least one side is missing a + # space. This is done to avoid some false positives with shifts. + match = Search(r'[^\s<]<([^\s=<].*)', reduced_line) + if (match and + not FindNextMatchingAngleBracket(clean_lines, linenum, match.group(1))): + error(filename, linenum, 'whitespace/operators', 3, + 'Missing spaces around <') + + # Look for > that is not surrounded by spaces. Similar to the + # above, we only trigger if both sides are missing spaces to avoid + # false positives with shifts. + match = Search(r'^(.*[^\s>])>[^\s=>]', reduced_line) + if (match and + not FindPreviousMatchingAngleBracket(clean_lines, linenum, + match.group(1))): + error(filename, linenum, 'whitespace/operators', 3, + 'Missing spaces around >') + + # We allow no-spaces around >> for almost anything. This is because + # C++11 allows ">>" to close nested templates, which accounts for + # most cases when ">>" is not followed by a space. + # + # We still warn on ">>" followed by alpha character, because that is + # likely due to ">>" being used for right shifts, e.g.: + # value >> alpha + # + # When ">>" is used to close templates, the alphanumeric letter that + # follows would be part of an identifier, and there should still be + # a space separating the template type and the identifier. + # type> alpha + match = Search(r'>>[a-zA-Z_]', line) + if match: + error(filename, linenum, 'whitespace/operators', 3, + 'Missing spaces around >>') + + # There shouldn't be space around unary operators + match = Search(r'(!\s|~\s|[\s]--[\s;]|[\s]\+\+[\s;])', line) + if match: + error(filename, linenum, 'whitespace/operators', 4, + 'Extra space for operator %s' % match.group(1)) + + # A pet peeve of mine: no spaces after an if, while, switch, or for + match = Search(r' (if\(|for\(|while\(|switch\()', line) + if match: + error(filename, linenum, 'whitespace/parens', 5, + 'Missing space before ( in %s' % match.group(1)) + + # For if/for/while/switch, the left and right parens should be + # consistent about how many spaces are inside the parens, and + # there should either be zero or one spaces inside the parens. + # We don't want: "if ( foo)" or "if ( foo )". + # Exception: "for ( ; foo; bar)" and "for (foo; bar; )" are allowed. + match = Search(r'\b(if|for|while|switch)\s*' + r'\(([ ]*)(.).*[^ ]+([ ]*)\)\s*{\s*$', + line) + if match: + if len(match.group(2)) != len(match.group(4)): + if not (match.group(3) == ';' and + len(match.group(2)) == 1 + len(match.group(4)) or + not match.group(2) and Search(r'\bfor\s*\(.*; \)', line)): + error(filename, linenum, 'whitespace/parens', 5, + 'Mismatching spaces inside () in %s' % match.group(1)) + if len(match.group(2)) not in [0, 1]: + error(filename, linenum, 'whitespace/parens', 5, + 'Should have zero or one spaces inside ( and ) in %s' % + match.group(1)) + + # You should always have a space after a comma (either as fn arg or operator) + # + # This does not apply when the non-space character following the + # comma is another comma, since the only time when that happens is + # for empty macro arguments. + # + # We run this check in two passes: first pass on elided lines to + # verify that lines contain missing whitespaces, second pass on raw + # lines to confirm that those missing whitespaces are not due to + # elided comments. + if Search(r',[^,\s]', line) and Search(r',[^,\s]', raw[linenum]): + error(filename, linenum, 'whitespace/comma', 3, + 'Missing space after ,') + + # You should always have a space after a semicolon + # except for few corner cases + # TODO(unknown): clarify if 'if (1) { return 1;}' is requires one more + # space after ; + if Search(r';[^\s};\\)/]', line): + error(filename, linenum, 'whitespace/semicolon', 3, + 'Missing space after ;') + + # Next we will look for issues with function calls. + CheckSpacingForFunctionCall(filename, line, linenum, error) + + # Except after an opening paren, or after another opening brace (in case of + # an initializer list, for instance), you should have spaces before your + # braces. And since you should never have braces at the beginning of a line, + # this is an easy test. + match = Match(r'^(.*[^ ({]){', line) + if match: + # Try a bit harder to check for brace initialization. This + # happens in one of the following forms: + # Constructor() : initializer_list_{} { ... } + # Constructor{}.MemberFunction() + # Type variable{}; + # FunctionCall(type{}, ...); + # LastArgument(..., type{}); + # LOG(INFO) << type{} << " ..."; + # map_of_type[{...}] = ...; + # + # We check for the character following the closing brace, and + # silence the warning if it's one of those listed above, i.e. + # "{.;,)<]". + # + # To account for nested initializer list, we allow any number of + # closing braces up to "{;,)<". We can't simply silence the + # warning on first sight of closing brace, because that would + # cause false negatives for things that are not initializer lists. + # Silence this: But not this: + # Outer{ if (...) { + # Inner{...} if (...){ // Missing space before { + # }; } + # + # There is a false negative with this approach if people inserted + # spurious semicolons, e.g. "if (cond){};", but we will catch the + # spurious semicolon with a separate check. + (endline, endlinenum, endpos) = CloseExpression( + clean_lines, linenum, len(match.group(1))) + trailing_text = '' + if endpos > -1: + trailing_text = endline[endpos:] + for offset in xrange(endlinenum + 1, + min(endlinenum + 3, clean_lines.NumLines() - 1)): + trailing_text += clean_lines.elided[offset] + if not Match(r'^[\s}]*[{.;,)<\]]', trailing_text): + error(filename, linenum, 'whitespace/braces', 5, + 'Missing space before {') + + # Make sure '} else {' has spaces. + if Search(r'}else', line): + error(filename, linenum, 'whitespace/braces', 5, + 'Missing space before else') + + # You shouldn't have spaces before your brackets, except maybe after + # 'delete []' or 'new char * []'. + if Search(r'\w\s+\[', line) and not Search(r'delete\s+\[', line): + error(filename, linenum, 'whitespace/braces', 5, + 'Extra space before [') + + # You shouldn't have a space before a semicolon at the end of the line. + # There's a special case for "for" since the style guide allows space before + # the semicolon there. + if Search(r':\s*;\s*$', line): + error(filename, linenum, 'whitespace/semicolon', 5, + 'Semicolon defining empty statement. Use {} instead.') + elif Search(r'^\s*;\s*$', line): + error(filename, linenum, 'whitespace/semicolon', 5, + 'Line contains only semicolon. If this should be an empty statement, ' + 'use {} instead.') + elif (Search(r'\s+;\s*$', line) and + not Search(r'\bfor\b', line)): + error(filename, linenum, 'whitespace/semicolon', 5, + 'Extra space before last semicolon. If this should be an empty ' + 'statement, use {} instead.') + + # In range-based for, we wanted spaces before and after the colon, but + # not around "::" tokens that might appear. + if (Search('for *\(.*[^:]:[^: ]', line) or + Search('for *\(.*[^: ]:[^:]', line)): + error(filename, linenum, 'whitespace/forcolon', 2, + 'Missing space around colon in range-based for loop') + + +def CheckSectionSpacing(filename, clean_lines, class_info, linenum, error): + """Checks for additional blank line issues related to sections. + + Currently the only thing checked here is blank line before protected/private. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + class_info: A _ClassInfo objects. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + # Skip checks if the class is small, where small means 25 lines or less. + # 25 lines seems like a good cutoff since that's the usual height of + # terminals, and any class that can't fit in one screen can't really + # be considered "small". + # + # Also skip checks if we are on the first line. This accounts for + # classes that look like + # class Foo { public: ... }; + # + # If we didn't find the end of the class, last_line would be zero, + # and the check will be skipped by the first condition. + if (class_info.last_line - class_info.starting_linenum <= 24 or + linenum <= class_info.starting_linenum): + return + + matched = Match(r'\s*(public|protected|private):', clean_lines.lines[linenum]) + if matched: + # Issue warning if the line before public/protected/private was + # not a blank line, but don't do this if the previous line contains + # "class" or "struct". This can happen two ways: + # - We are at the beginning of the class. + # - We are forward-declaring an inner class that is semantically + # private, but needed to be public for implementation reasons. + # Also ignores cases where the previous line ends with a backslash as can be + # common when defining classes in C macros. + prev_line = clean_lines.lines[linenum - 1] + if (not IsBlankLine(prev_line) and + not Search(r'\b(class|struct)\b', prev_line) and + not Search(r'\\$', prev_line)): + # Try a bit harder to find the beginning of the class. This is to + # account for multi-line base-specifier lists, e.g.: + # class Derived + # : public Base { + end_class_head = class_info.starting_linenum + for i in range(class_info.starting_linenum, linenum): + if Search(r'\{\s*$', clean_lines.lines[i]): + end_class_head = i + break + if end_class_head < linenum - 1: + error(filename, linenum, 'whitespace/blank_line', 3, + '"%s:" should be preceded by a blank line' % matched.group(1)) + + +def GetPreviousNonBlankLine(clean_lines, linenum): + """Return the most recent non-blank line and its line number. + + Args: + clean_lines: A CleansedLines instance containing the file contents. + linenum: The number of the line to check. + + Returns: + A tuple with two elements. The first element is the contents of the last + non-blank line before the current line, or the empty string if this is the + first non-blank line. The second is the line number of that line, or -1 + if this is the first non-blank line. + """ + + prevlinenum = linenum - 1 + while prevlinenum >= 0: + prevline = clean_lines.elided[prevlinenum] + if not IsBlankLine(prevline): # if not a blank line... + return (prevline, prevlinenum) + prevlinenum -= 1 + return ('', -1) + + +def CheckBraces(filename, clean_lines, linenum, error): + """Looks for misplaced braces (e.g. at the end of line). + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + line = clean_lines.elided[linenum] # get rid of comments and strings + + if Match(r'\s*{\s*$', line): + # We allow an open brace to start a line in the case where someone is using + # braces in a block to explicitly create a new scope, which is commonly used + # to control the lifetime of stack-allocated variables. Braces are also + # used for brace initializers inside function calls. We don't detect this + # perfectly: we just don't complain if the last non-whitespace character on + # the previous non-blank line is ',', ';', ':', '(', '{', or '}', or if the + # previous line starts a preprocessor block. + prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] + if (not Search(r'[,;:}{(]\s*$', prevline) and + not Match(r'\s*#', prevline)): + error(filename, linenum, 'whitespace/braces', 4, + '{ should almost always be at the end of the previous line') + + # An else clause should be on the same line as the preceding closing brace. + if Match(r'\s*else\s*', line): + prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] + if Match(r'\s*}\s*$', prevline): + error(filename, linenum, 'whitespace/newline', 4, + 'An else should appear on the same line as the preceding }') + + # If braces come on one side of an else, they should be on both. + # However, we have to worry about "else if" that spans multiple lines! + if Search(r'}\s*else[^{]*$', line) or Match(r'[^}]*else\s*{', line): + if Search(r'}\s*else if([^{]*)$', line): # could be multi-line if + # find the ( after the if + pos = line.find('else if') + pos = line.find('(', pos) + if pos > 0: + (endline, _, endpos) = CloseExpression(clean_lines, linenum, pos) + if endline[endpos:].find('{') == -1: # must be brace after if + error(filename, linenum, 'readability/braces', 5, + 'If an else has a brace on one side, it should have it on both') + else: # common case: else not followed by a multi-line if + error(filename, linenum, 'readability/braces', 5, + 'If an else has a brace on one side, it should have it on both') + + # Likewise, an else should never have the else clause on the same line + if Search(r'\belse [^\s{]', line) and not Search(r'\belse if\b', line): + error(filename, linenum, 'whitespace/newline', 4, + 'Else clause should never be on same line as else (use 2 lines)') + + # In the same way, a do/while should never be on one line + if Match(r'\s*do [^\s{]', line): + error(filename, linenum, 'whitespace/newline', 4, + 'do/while clauses should not be on a single line') + + # Block bodies should not be followed by a semicolon. Due to C++11 + # brace initialization, there are more places where semicolons are + # required than not, so we use a whitelist approach to check these + # rather than a blacklist. These are the places where "};" should + # be replaced by just "}": + # 1. Some flavor of block following closing parenthesis: + # for (;;) {}; + # while (...) {}; + # switch (...) {}; + # Function(...) {}; + # if (...) {}; + # if (...) else if (...) {}; + # + # 2. else block: + # if (...) else {}; + # + # 3. const member function: + # Function(...) const {}; + # + # 4. Block following some statement: + # x = 42; + # {}; + # + # 5. Block at the beginning of a function: + # Function(...) { + # {}; + # } + # + # Note that naively checking for the preceding "{" will also match + # braces inside multi-dimensional arrays, but this is fine since + # that expression will not contain semicolons. + # + # 6. Block following another block: + # while (true) {} + # {}; + # + # 7. End of namespaces: + # namespace {}; + # + # These semicolons seems far more common than other kinds of + # redundant semicolons, possibly due to people converting classes + # to namespaces. For now we do not warn for this case. + # + # Try matching case 1 first. + match = Match(r'^(.*\)\s*)\{', line) + if match: + # Matched closing parenthesis (case 1). Check the token before the + # matching opening parenthesis, and don't warn if it looks like a + # macro. This avoids these false positives: + # - macro that defines a base class + # - multi-line macro that defines a base class + # - macro that defines the whole class-head + # + # But we still issue warnings for macros that we know are safe to + # warn, specifically: + # - TEST, TEST_F, TEST_P, MATCHER, MATCHER_P + # - TYPED_TEST + # - INTERFACE_DEF + # - EXCLUSIVE_LOCKS_REQUIRED, SHARED_LOCKS_REQUIRED, LOCKS_EXCLUDED: + # + # We implement a whitelist of safe macros instead of a blacklist of + # unsafe macros, even though the latter appears less frequently in + # google code and would have been easier to implement. This is because + # the downside for getting the whitelist wrong means some extra + # semicolons, while the downside for getting the blacklist wrong + # would result in compile errors. + # + # In addition to macros, we also don't want to warn on compound + # literals. + closing_brace_pos = match.group(1).rfind(')') + opening_parenthesis = ReverseCloseExpression( + clean_lines, linenum, closing_brace_pos) + if opening_parenthesis[2] > -1: + line_prefix = opening_parenthesis[0][0:opening_parenthesis[2]] + macro = Search(r'\b([A-Z_]+)\s*$', line_prefix) + if ((macro and + macro.group(1) not in ( + 'TEST', 'TEST_F', 'MATCHER', 'MATCHER_P', 'TYPED_TEST', + 'EXCLUSIVE_LOCKS_REQUIRED', 'SHARED_LOCKS_REQUIRED', + 'LOCKS_EXCLUDED', 'INTERFACE_DEF')) or + Search(r'\s+=\s*$', line_prefix)): + match = None + # Whitelist lambda function definition which also requires a ";" after + # closing brace + if match: + if Match(r'^.*\[.*\]\s*(.*\)\s*)\{', line): + match = None + + else: + # Try matching cases 2-3. + match = Match(r'^(.*(?:else|\)\s*const)\s*)\{', line) + if not match: + # Try matching cases 4-6. These are always matched on separate lines. + # + # Note that we can't simply concatenate the previous line to the + # current line and do a single match, otherwise we may output + # duplicate warnings for the blank line case: + # if (cond) { + # // blank line + # } + prevline = GetPreviousNonBlankLine(clean_lines, linenum)[0] + if prevline and Search(r'[;{}]\s*$', prevline): + match = Match(r'^(\s*)\{', line) + + # Check matching closing brace + if match: + (endline, endlinenum, endpos) = CloseExpression( + clean_lines, linenum, len(match.group(1))) + if endpos > -1 and Match(r'^\s*;', endline[endpos:]): + # Current {} pair is eligible for semicolon check, and we have found + # the redundant semicolon, output warning here. + # + # Note: because we are scanning forward for opening braces, and + # outputting warnings for the matching closing brace, if there are + # nested blocks with trailing semicolons, we will get the error + # messages in reversed order. + error(filename, endlinenum, 'readability/braces', 4, + "You don't need a ; after a }") + + +def CheckEmptyBlockBody(filename, clean_lines, linenum, error): + """Look for empty loop/conditional body with only a single semicolon. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + # Search for loop keywords at the beginning of the line. Because only + # whitespaces are allowed before the keywords, this will also ignore most + # do-while-loops, since those lines should start with closing brace. + # + # We also check "if" blocks here, since an empty conditional block + # is likely an error. + line = clean_lines.elided[linenum] + matched = Match(r'\s*(for|while|if)\s*\(', line) + if matched: + # Find the end of the conditional expression + (end_line, end_linenum, end_pos) = CloseExpression( + clean_lines, linenum, line.find('(')) + + # Output warning if what follows the condition expression is a semicolon. + # No warning for all other cases, including whitespace or newline, since we + # have a separate check for semicolons preceded by whitespace. + if end_pos >= 0 and Match(r';', end_line[end_pos:]): + if matched.group(1) == 'if': + error(filename, end_linenum, 'whitespace/empty_conditional_body', 5, + 'Empty conditional bodies should use {}') + else: + error(filename, end_linenum, 'whitespace/empty_loop_body', 5, + 'Empty loop bodies should use {} or continue') + + +def CheckCheck(filename, clean_lines, linenum, error): + """Checks the use of CHECK and EXPECT macros. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + + # Decide the set of replacement macros that should be suggested + lines = clean_lines.elided + check_macro = None + start_pos = -1 + for macro in _CHECK_MACROS: + i = lines[linenum].find(macro) + if i >= 0: + check_macro = macro + + # Find opening parenthesis. Do a regular expression match here + # to make sure that we are matching the expected CHECK macro, as + # opposed to some other macro that happens to contain the CHECK + # substring. + matched = Match(r'^(.*\b' + check_macro + r'\s*)\(', lines[linenum]) + if not matched: + continue + start_pos = len(matched.group(1)) + break + if not check_macro or start_pos < 0: + # Don't waste time here if line doesn't contain 'CHECK' or 'EXPECT' + return + + # Find end of the boolean expression by matching parentheses + (last_line, end_line, end_pos) = CloseExpression( + clean_lines, linenum, start_pos) + if end_pos < 0: + return + if linenum == end_line: + expression = lines[linenum][start_pos + 1:end_pos - 1] + else: + expression = lines[linenum][start_pos + 1:] + for i in xrange(linenum + 1, end_line): + expression += lines[i] + expression += last_line[0:end_pos - 1] + + # Parse expression so that we can take parentheses into account. + # This avoids false positives for inputs like "CHECK((a < 4) == b)", + # which is not replaceable by CHECK_LE. + lhs = '' + rhs = '' + operator = None + while expression: + matched = Match(r'^\s*(<<|<<=|>>|>>=|->\*|->|&&|\|\||' + r'==|!=|>=|>|<=|<|\()(.*)$', expression) + if matched: + token = matched.group(1) + if token == '(': + # Parenthesized operand + expression = matched.group(2) + (end, _) = FindEndOfExpressionInLine(expression, 0, 1, '(', ')') + if end < 0: + return # Unmatched parenthesis + lhs += '(' + expression[0:end] + expression = expression[end:] + elif token in ('&&', '||'): + # Logical and/or operators. This means the expression + # contains more than one term, for example: + # CHECK(42 < a && a < b); + # + # These are not replaceable with CHECK_LE, so bail out early. + return + elif token in ('<<', '<<=', '>>', '>>=', '->*', '->'): + # Non-relational operator + lhs += token + expression = matched.group(2) + else: + # Relational operator + operator = token + rhs = matched.group(2) + break + else: + # Unparenthesized operand. Instead of appending to lhs one character + # at a time, we do another regular expression match to consume several + # characters at once if possible. Trivial benchmark shows that this + # is more efficient when the operands are longer than a single + # character, which is generally the case. + matched = Match(r'^([^-=!<>()&|]+)(.*)$', expression) + if not matched: + matched = Match(r'^(\s*\S)(.*)$', expression) + if not matched: + break + lhs += matched.group(1) + expression = matched.group(2) + + # Only apply checks if we got all parts of the boolean expression + if not (lhs and operator and rhs): + return + + # Check that rhs do not contain logical operators. We already know + # that lhs is fine since the loop above parses out && and ||. + if rhs.find('&&') > -1 or rhs.find('||') > -1: + return + + # At least one of the operands must be a constant literal. This is + # to avoid suggesting replacements for unprintable things like + # CHECK(variable != iterator) + # + # The following pattern matches decimal, hex integers, strings, and + # characters (in that order). + lhs = lhs.strip() + rhs = rhs.strip() + match_constant = r'^([-+]?(\d+|0[xX][0-9a-fA-F]+)[lLuU]{0,3}|".*"|\'.*\')$' + if Match(match_constant, lhs) or Match(match_constant, rhs): + # Note: since we know both lhs and rhs, we can provide a more + # descriptive error message like: + # Consider using CHECK_EQ(x, 42) instead of CHECK(x == 42) + # Instead of: + # Consider using CHECK_EQ instead of CHECK(a == b) + # + # We are still keeping the less descriptive message because if lhs + # or rhs gets long, the error message might become unreadable. + error(filename, linenum, 'readability/check', 2, + 'Consider using %s instead of %s(a %s b)' % ( + _CHECK_REPLACEMENT[check_macro][operator], + check_macro, operator)) + + +def CheckAltTokens(filename, clean_lines, linenum, error): + """Check alternative keywords being used in boolean expressions. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + + # Avoid preprocessor lines + if Match(r'^\s*#', line): + return + + # Last ditch effort to avoid multi-line comments. This will not help + # if the comment started before the current line or ended after the + # current line, but it catches most of the false positives. At least, + # it provides a way to workaround this warning for people who use + # multi-line comments in preprocessor macros. + # + # TODO(unknown): remove this once cpplint has better support for + # multi-line comments. + if line.find('/*') >= 0 or line.find('*/') >= 0: + return + + for match in _ALT_TOKEN_REPLACEMENT_PATTERN.finditer(line): + error(filename, linenum, 'readability/alt_tokens', 2, + 'Use operator %s instead of %s' % ( + _ALT_TOKEN_REPLACEMENT[match.group(1)], match.group(1))) + + +def GetLineWidth(line): + """Determines the width of the line in column positions. + + Args: + line: A string, which may be a Unicode string. + + Returns: + The width of the line in column positions, accounting for Unicode + combining characters and wide characters. + """ + if isinstance(line, unicode): + width = 0 + for uc in unicodedata.normalize('NFC', line): + if unicodedata.east_asian_width(uc) in ('W', 'F'): + width += 2 + elif not unicodedata.combining(uc): + width += 1 + return width + else: + return len(line) + + +def CheckStyle(filename, clean_lines, linenum, file_extension, nesting_state, + error): + """Checks rules from the 'C++ style rules' section of cppguide.html. + + Most of these rules are hard to test (naming, comment style), but we + do what we can. In particular we check for 2-space indents, line lengths, + tab usage, spaces inside code, etc. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + file_extension: The extension (without the dot) of the filename. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + + # Don't use "elided" lines here, otherwise we can't check commented lines. + # Don't want to use "raw" either, because we don't want to check inside C++11 + # raw strings, + raw_lines = clean_lines.lines_without_raw_strings + line = raw_lines[linenum] + + if line.find('\t') != -1: + error(filename, linenum, 'whitespace/tab', 1, + 'Tab found; better to use spaces') + + # One or three blank spaces at the beginning of the line is weird; it's + # hard to reconcile that with 2-space indents. + # NOTE: here are the conditions rob pike used for his tests. Mine aren't + # as sophisticated, but it may be worth becoming so: RLENGTH==initial_spaces + # if(RLENGTH > 20) complain = 0; + # if(match($0, " +(error|private|public|protected):")) complain = 0; + # if(match(prev, "&& *$")) complain = 0; + # if(match(prev, "\\|\\| *$")) complain = 0; + # if(match(prev, "[\",=><] *$")) complain = 0; + # if(match($0, " <<")) complain = 0; + # if(match(prev, " +for \\(")) complain = 0; + # if(prevodd && match(prevprev, " +for \\(")) complain = 0; + initial_spaces = 0 + cleansed_line = clean_lines.elided[linenum] + while initial_spaces < len(line) and line[initial_spaces] == ' ': + initial_spaces += 1 + if line and line[-1].isspace(): + error(filename, linenum, 'whitespace/end_of_line', 4, + 'Line ends in whitespace. Consider deleting these extra spaces.') + # There are certain situations we allow one space, notably for section labels + elif ((initial_spaces == 1 or initial_spaces == 3) and + not Match(r'\s*\w+\s*:\s*$', cleansed_line)): + error(filename, linenum, 'whitespace/indent', 3, + 'Weird number of spaces at line-start. ' + 'Are you using a 2-space indent?') + + # Check if the line is a header guard. + is_header_guard = False + if file_extension == 'h': + cppvar = GetHeaderGuardCPPVariable(filename) + if (line.startswith('#ifndef %s' % cppvar) or + line.startswith('#define %s' % cppvar) or + line.startswith('#endif // %s' % cppvar)): + is_header_guard = True + # #include lines and header guards can be long, since there's no clean way to + # split them. + # + # URLs can be long too. It's possible to split these, but it makes them + # harder to cut&paste. + # + # The "$Id:...$" comment may also get very long without it being the + # developers fault. + if (not line.startswith('#include') and not is_header_guard and + not Match(r'^\s*//.*http(s?)://\S*$', line) and + not Match(r'^// \$Id:.*#[0-9]+ \$$', line)): + line_width = GetLineWidth(line) + extended_length = int((_line_length * 1.25)) + if line_width > extended_length: + error(filename, linenum, 'whitespace/line_length', 4, + 'Lines should very rarely be longer than %i characters' % + extended_length) + elif line_width > _line_length: + error(filename, linenum, 'whitespace/line_length', 2, + 'Lines should be <= %i characters long' % _line_length) + + if (cleansed_line.count(';') > 1 and + # for loops are allowed two ;'s (and may run over two lines). + cleansed_line.find('for') == -1 and + (GetPreviousNonBlankLine(clean_lines, linenum)[0].find('for') == -1 or + GetPreviousNonBlankLine(clean_lines, linenum)[0].find(';') != -1) and + # It's ok to have many commands in a switch case that fits in 1 line + not ((cleansed_line.find('case ') != -1 or + cleansed_line.find('default:') != -1) and + cleansed_line.find('break;') != -1)): + error(filename, linenum, 'whitespace/newline', 0, + 'More than one command on the same line') + + # Some more style checks + CheckBraces(filename, clean_lines, linenum, error) + CheckEmptyBlockBody(filename, clean_lines, linenum, error) + CheckAccess(filename, clean_lines, linenum, nesting_state, error) + CheckSpacing(filename, clean_lines, linenum, nesting_state, error) + CheckCheck(filename, clean_lines, linenum, error) + CheckAltTokens(filename, clean_lines, linenum, error) + classinfo = nesting_state.InnermostClass() + if classinfo: + CheckSectionSpacing(filename, clean_lines, classinfo, linenum, error) + + +_RE_PATTERN_INCLUDE_NEW_STYLE = re.compile(r'#include +"[^/]+\.h"') +_RE_PATTERN_INCLUDE = re.compile(r'^\s*#\s*include\s*([<"])([^>"]*)[>"].*$') +# Matches the first component of a filename delimited by -s and _s. That is: +# _RE_FIRST_COMPONENT.match('foo').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo.cc').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo-bar_baz.cc').group(0) == 'foo' +# _RE_FIRST_COMPONENT.match('foo_bar-baz.cc').group(0) == 'foo' +_RE_FIRST_COMPONENT = re.compile(r'^[^-_.]+') + + +def _DropCommonSuffixes(filename): + """Drops common suffixes like _test.cc or -inl.h from filename. + + For example: + >>> _DropCommonSuffixes('foo/foo-inl.h') + 'foo/foo' + >>> _DropCommonSuffixes('foo/bar/foo.cc') + 'foo/bar/foo' + >>> _DropCommonSuffixes('foo/foo_internal.h') + 'foo/foo' + >>> _DropCommonSuffixes('foo/foo_unusualinternal.h') + 'foo/foo_unusualinternal' + + Args: + filename: The input filename. + + Returns: + The filename with the common suffix removed. + """ + for suffix in ('test.cc', 'regtest.cc', 'unittest.cc', + 'inl.h', 'impl.h', 'internal.h'): + if (filename.endswith(suffix) and len(filename) > len(suffix) and + filename[-len(suffix) - 1] in ('-', '_')): + return filename[:-len(suffix) - 1] + return os.path.splitext(filename)[0] + + +def _IsTestFilename(filename): + """Determines if the given filename has a suffix that identifies it as a test. + + Args: + filename: The input filename. + + Returns: + True if 'filename' looks like a test, False otherwise. + """ + if (filename.endswith('_test.cc') or + filename.endswith('_unittest.cc') or + filename.endswith('_regtest.cc')): + return True + else: + return False + + +def _ClassifyInclude(fileinfo, include, is_system): + """Figures out what kind of header 'include' is. + + Args: + fileinfo: The current file cpplint is running over. A FileInfo instance. + include: The path to a #included file. + is_system: True if the #include used <> rather than "". + + Returns: + One of the _XXX_HEADER constants. + + For example: + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'stdio.h', True) + _C_SYS_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'string', True) + _CPP_SYS_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/foo.h', False) + _LIKELY_MY_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo_unknown_extension.cc'), + ... 'bar/foo_other_ext.h', False) + _POSSIBLE_MY_HEADER + >>> _ClassifyInclude(FileInfo('foo/foo.cc'), 'foo/bar.h', False) + _OTHER_HEADER + """ + # This is a list of all standard c++ header files, except + # those already checked for above. + is_cpp_h = include in _CPP_HEADERS + + if is_system: + if is_cpp_h: + return _CPP_SYS_HEADER + else: + return _C_SYS_HEADER + + # If the target file and the include we're checking share a + # basename when we drop common extensions, and the include + # lives in . , then it's likely to be owned by the target file. + target_dir, target_base = ( + os.path.split(_DropCommonSuffixes(fileinfo.RepositoryName()))) + include_dir, include_base = os.path.split(_DropCommonSuffixes(include)) + if target_base == include_base and ( + include_dir == target_dir or + include_dir == os.path.normpath(target_dir + '/../public')): + return _LIKELY_MY_HEADER + + # If the target and include share some initial basename + # component, it's possible the target is implementing the + # include, so it's allowed to be first, but we'll never + # complain if it's not there. + target_first_component = _RE_FIRST_COMPONENT.match(target_base) + include_first_component = _RE_FIRST_COMPONENT.match(include_base) + if (target_first_component and include_first_component and + target_first_component.group(0) == + include_first_component.group(0)): + return _POSSIBLE_MY_HEADER + + return _OTHER_HEADER + + + +def CheckIncludeLine(filename, clean_lines, linenum, include_state, error): + """Check rules that are applicable to #include lines. + + Strings on #include lines are NOT removed from elided line, to make + certain tasks easier. However, to prevent false positives, checks + applicable to #include lines in CheckLanguage must be put here. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + include_state: An _IncludeState instance in which the headers are inserted. + error: The function to call with any errors found. + """ + fileinfo = FileInfo(filename) + + line = clean_lines.lines[linenum] + + # "include" should use the new style "foo/bar.h" instead of just "bar.h" + if _RE_PATTERN_INCLUDE_NEW_STYLE.search(line): + error(filename, linenum, 'build/include', 4, + 'Include the directory when naming .h files') + + # we shouldn't include a file more than once. actually, there are a + # handful of instances where doing so is okay, but in general it's + # not. + match = _RE_PATTERN_INCLUDE.search(line) + if match: + include = match.group(2) + is_system = (match.group(1) == '<') + if include in include_state: + error(filename, linenum, 'build/include', 4, + '"%s" already included at %s:%s' % + (include, filename, include_state[include])) + else: + include_state[include] = linenum + + # We want to ensure that headers appear in the right order: + # 1) for foo.cc, foo.h (preferred location) + # 2) c system files + # 3) cpp system files + # 4) for foo.cc, foo.h (deprecated location) + # 5) other google headers + # + # We classify each include statement as one of those 5 types + # using a number of techniques. The include_state object keeps + # track of the highest type seen, and complains if we see a + # lower type after that. + error_message = include_state.CheckNextIncludeOrder( + _ClassifyInclude(fileinfo, include, is_system)) + if error_message: + error(filename, linenum, 'build/include_order', 4, + '%s. Should be: %s.h, c system, c++ system, other.' % + (error_message, fileinfo.BaseName())) + canonical_include = include_state.CanonicalizeAlphabeticalOrder(include) + if not include_state.IsInAlphabeticalOrder( + clean_lines, linenum, canonical_include): + error(filename, linenum, 'build/include_alpha', 4, + 'Include "%s" not in alphabetical order' % include) + include_state.SetLastHeader(canonical_include) + + # Look for any of the stream classes that are part of standard C++. + match = _RE_PATTERN_INCLUDE.match(line) + if match: + include = match.group(2) + if Match(r'(f|ind|io|i|o|parse|pf|stdio|str|)?stream$', include): + # Many unit tests use cout, so we exempt them. + if not _IsTestFilename(filename): + error(filename, linenum, 'readability/streams', 3, + 'Streams are highly discouraged.') + + +def _GetTextInside(text, start_pattern): + r"""Retrieves all the text between matching open and close parentheses. + + Given a string of lines and a regular expression string, retrieve all the text + following the expression and between opening punctuation symbols like + (, [, or {, and the matching close-punctuation symbol. This properly nested + occurrences of the punctuations, so for the text like + printf(a(), b(c())); + a call to _GetTextInside(text, r'printf\(') will return 'a(), b(c())'. + start_pattern must match string having an open punctuation symbol at the end. + + Args: + text: The lines to extract text. Its comments and strings must be elided. + It can be single line and can span multiple lines. + start_pattern: The regexp string indicating where to start extracting + the text. + Returns: + The extracted text. + None if either the opening string or ending punctuation could not be found. + """ + # TODO(sugawarayu): Audit cpplint.py to see what places could be profitably + # rewritten to use _GetTextInside (and use inferior regexp matching today). + + # Give opening punctuations to get the matching close-punctuations. + matching_punctuation = {'(': ')', '{': '}', '[': ']'} + closing_punctuation = set(matching_punctuation.itervalues()) + + # Find the position to start extracting text. + match = re.search(start_pattern, text, re.M) + if not match: # start_pattern not found in text. + return None + start_position = match.end(0) + + assert start_position > 0, ( + 'start_pattern must ends with an opening punctuation.') + assert text[start_position - 1] in matching_punctuation, ( + 'start_pattern must ends with an opening punctuation.') + # Stack of closing punctuations we expect to have in text after position. + punctuation_stack = [matching_punctuation[text[start_position - 1]]] + position = start_position + while punctuation_stack and position < len(text): + if text[position] == punctuation_stack[-1]: + punctuation_stack.pop() + elif text[position] in closing_punctuation: + # A closing punctuation without matching opening punctuations. + return None + elif text[position] in matching_punctuation: + punctuation_stack.append(matching_punctuation[text[position]]) + position += 1 + if punctuation_stack: + # Opening punctuations left without matching close-punctuations. + return None + # punctuations match. + return text[start_position:position - 1] + + +# Patterns for matching call-by-reference parameters. +# +# Supports nested templates up to 2 levels deep using this messy pattern: +# < (?: < (?: < [^<>]* +# > +# | [^<>] )* +# > +# | [^<>] )* +# > +_RE_PATTERN_IDENT = r'[_a-zA-Z]\w*' # =~ [[:alpha:]][[:alnum:]]* +_RE_PATTERN_TYPE = ( + r'(?:const\s+)?(?:typename\s+|class\s+|struct\s+|union\s+|enum\s+)?' + r'(?:\w|' + r'\s*<(?:<(?:<[^<>]*>|[^<>])*>|[^<>])*>|' + r'::)+') +# A call-by-reference parameter ends with '& identifier'. +_RE_PATTERN_REF_PARAM = re.compile( + r'(' + _RE_PATTERN_TYPE + r'(?:\s*(?:\bconst\b|[*]))*\s*' + r'&\s*' + _RE_PATTERN_IDENT + r')\s*(?:=[^,()]+)?[,)]') +# A call-by-const-reference parameter either ends with 'const& identifier' +# or looks like 'const type& identifier' when 'type' is atomic. +_RE_PATTERN_CONST_REF_PARAM = ( + r'(?:.*\s*\bconst\s*&\s*' + _RE_PATTERN_IDENT + + r'|const\s+' + _RE_PATTERN_TYPE + r'\s*&\s*' + _RE_PATTERN_IDENT + r')') + + +def CheckLanguage(filename, clean_lines, linenum, file_extension, + include_state, nesting_state, error): + """Checks rules from the 'C++ language rules' section of cppguide.html. + + Some of these rules are hard to test (function overloading, using + uint32 inappropriately), but we do the best we can. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + file_extension: The extension (without the dot) of the filename. + include_state: An _IncludeState instance in which the headers are inserted. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + # If the line is empty or consists of entirely a comment, no need to + # check it. + line = clean_lines.elided[linenum] + if not line: + return + + match = _RE_PATTERN_INCLUDE.search(line) + if match: + CheckIncludeLine(filename, clean_lines, linenum, include_state, error) + return + + # Reset include state across preprocessor directives. This is meant + # to silence warnings for conditional includes. + if Match(r'^\s*#\s*(?:ifdef|elif|else|endif)\b', line): + include_state.ResetSection() + + # Make Windows paths like Unix. + fullname = os.path.abspath(filename).replace('\\', '/') + + # TODO(unknown): figure out if they're using default arguments in fn proto. + + # Check to see if they're using an conversion function cast. + # I just try to capture the most common basic types, though there are more. + # Parameterless conversion functions, such as bool(), are allowed as they are + # probably a member operator declaration or default constructor. + match = Search( + r'(\bnew\s+)?\b' # Grab 'new' operator, if it's there + r'(int|float|double|bool|char|int32|uint32|int64|uint64)' + r'(\([^)].*)', line) + if match: + matched_new = match.group(1) + matched_type = match.group(2) + matched_funcptr = match.group(3) + + # gMock methods are defined using some variant of MOCK_METHODx(name, type) + # where type may be float(), int(string), etc. Without context they are + # virtually indistinguishable from int(x) casts. Likewise, gMock's + # MockCallback takes a template parameter of the form return_type(arg_type), + # which looks much like the cast we're trying to detect. + # + # std::function<> wrapper has a similar problem. + # + # Return types for function pointers also look like casts if they + # don't have an extra space. + if (matched_new is None and # If new operator, then this isn't a cast + not (Match(r'^\s*MOCK_(CONST_)?METHOD\d+(_T)?\(', line) or + Search(r'\bMockCallback<.*>', line) or + Search(r'\bstd::function<.*>', line)) and + not (matched_funcptr and + Match(r'\((?:[^() ]+::\s*\*\s*)?[^() ]+\)\s*\(', + matched_funcptr))): + # Try a bit harder to catch gmock lines: the only place where + # something looks like an old-style cast is where we declare the + # return type of the mocked method, and the only time when we + # are missing context is if MOCK_METHOD was split across + # multiple lines. The missing MOCK_METHOD is usually one or two + # lines back, so scan back one or two lines. + # + # It's not possible for gmock macros to appear in the first 2 + # lines, since the class head + section name takes up 2 lines. + if (linenum < 2 or + not (Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\((?:\S+,)?\s*$', + clean_lines.elided[linenum - 1]) or + Match(r'^\s*MOCK_(?:CONST_)?METHOD\d+(?:_T)?\(\s*$', + clean_lines.elided[linenum - 2]))): + error(filename, linenum, 'readability/casting', 4, + 'Using deprecated casting style. ' + 'Use static_cast<%s>(...) instead' % + matched_type) + + CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], + 'static_cast', + r'\((int|float|double|bool|char|u?int(16|32|64))\)', error) + + # This doesn't catch all cases. Consider (const char * const)"hello". + # + # (char *) "foo" should always be a const_cast (reinterpret_cast won't + # compile). + if CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], + 'const_cast', r'\((char\s?\*+\s?)\)\s*"', error): + pass + else: + # Check pointer casts for other than string constants + CheckCStyleCast(filename, linenum, line, clean_lines.raw_lines[linenum], + 'reinterpret_cast', r'\((\w+\s?\*+\s?)\)', error) + + # In addition, we look for people taking the address of a cast. This + # is dangerous -- casts can assign to temporaries, so the pointer doesn't + # point where you think. + match = Search( + r'(?:&\(([^)]+)\)[\w(])|' + r'(?:&(static|dynamic|down|reinterpret)_cast\b)', line) + if match and match.group(1) != '*': + error(filename, linenum, 'runtime/casting', 4, + ('Are you taking an address of a cast? ' + 'This is dangerous: could be a temp var. ' + 'Take the address before doing the cast, rather than after')) + + # Create an extended_line, which is the concatenation of the current and + # next lines, for more effective checking of code that may span more than one + # line. + if linenum + 1 < clean_lines.NumLines(): + extended_line = line + clean_lines.elided[linenum + 1] + else: + extended_line = line + + # Check for people declaring static/global STL strings at the top level. + # This is dangerous because the C++ language does not guarantee that + # globals with constructors are initialized before the first access. + match = Match( + r'((?:|static +)(?:|const +))string +([a-zA-Z0-9_:]+)\b(.*)', + line) + # Make sure it's not a function. + # Function template specialization looks like: "string foo(...". + # Class template definitions look like: "string Foo::Method(...". + # + # Also ignore things that look like operators. These are matched separately + # because operator names cross non-word boundaries. If we change the pattern + # above, we would decrease the accuracy of matching identifiers. + if (match and + not Search(r'\boperator\W', line) and + not Match(r'\s*(<.*>)?(::[a-zA-Z0-9_]+)?\s*\(([^"]|$)', match.group(3))): + error(filename, linenum, 'runtime/string', 4, + 'For a static/global string constant, use a C style string instead: ' + '"%schar %s[]".' % + (match.group(1), match.group(2))) + + if Search(r'\b([A-Za-z0-9_]*_)\(\1\)', line): + error(filename, linenum, 'runtime/init', 4, + 'You seem to be initializing a member variable with itself.') + + if file_extension == 'h': + # TODO(unknown): check that 1-arg constructors are explicit. + # How to tell it's a constructor? + # (handled in CheckForNonStandardConstructs for now) + # TODO(unknown): check that classes have DISALLOW_EVIL_CONSTRUCTORS + # (level 1 error) + pass + + # Check if people are using the verboten C basic types. The only exception + # we regularly allow is "unsigned short port" for port. + if Search(r'\bshort port\b', line): + if not Search(r'\bunsigned short port\b', line): + error(filename, linenum, 'runtime/int', 4, + 'Use "unsigned short" for ports, not "short"') + else: + match = Search(r'\b(short|long(?! +double)|long long)\b', line) + if match: + error(filename, linenum, 'runtime/int', 4, + 'Use int16/int64/etc, rather than the C type %s' % match.group(1)) + + # When snprintf is used, the second argument shouldn't be a literal. + match = Search(r'snprintf\s*\(([^,]*),\s*([0-9]*)\s*,', line) + if match and match.group(2) != '0': + # If 2nd arg is zero, snprintf is used to calculate size. + error(filename, linenum, 'runtime/printf', 3, + 'If you can, use sizeof(%s) instead of %s as the 2nd arg ' + 'to snprintf.' % (match.group(1), match.group(2))) + + # Check if some verboten C functions are being used. + if Search(r'\bsprintf\b', line): + error(filename, linenum, 'runtime/printf', 5, + 'Never use sprintf. Use snprintf instead.') + match = Search(r'\b(strcpy|strcat)\b', line) + if match: + error(filename, linenum, 'runtime/printf', 4, + 'Almost always, snprintf is better than %s' % match.group(1)) + + # Check if some verboten operator overloading is going on + # TODO(unknown): catch out-of-line unary operator&: + # class X {}; + # int operator&(const X& x) { return 42; } // unary operator& + # The trick is it's hard to tell apart from binary operator&: + # class Y { int operator&(const Y& x) { return 23; } }; // binary operator& + if Search(r'\boperator\s*&\s*\(\s*\)', line): + error(filename, linenum, 'runtime/operator', 4, + 'Unary operator& is dangerous. Do not use it.') + + # Check for suspicious usage of "if" like + # } if (a == b) { + if Search(r'\}\s*if\s*\(', line): + error(filename, linenum, 'readability/braces', 4, + 'Did you mean "else if"? If not, start a new line for "if".') + + # Check for potential format string bugs like printf(foo). + # We constrain the pattern not to pick things like DocidForPrintf(foo). + # Not perfect but it can catch printf(foo.c_str()) and printf(foo->c_str()) + # TODO(sugawarayu): Catch the following case. Need to change the calling + # convention of the whole function to process multiple line to handle it. + # printf( + # boy_this_is_a_really_long_variable_that_cannot_fit_on_the_prev_line); + printf_args = _GetTextInside(line, r'(?i)\b(string)?printf\s*\(') + if printf_args: + match = Match(r'([\w.\->()]+)$', printf_args) + if match and match.group(1) != '__VA_ARGS__': + function_name = re.search(r'\b((?:string)?printf)\s*\(', + line, re.I).group(1) + error(filename, linenum, 'runtime/printf', 4, + 'Potential format string bug. Do %s("%%s", %s) instead.' + % (function_name, match.group(1))) + + # Check for potential memset bugs like memset(buf, sizeof(buf), 0). + match = Search(r'memset\s*\(([^,]*),\s*([^,]*),\s*0\s*\)', line) + if match and not Match(r"^''|-?[0-9]+|0x[0-9A-Fa-f]$", match.group(2)): + error(filename, linenum, 'runtime/memset', 4, + 'Did you mean "memset(%s, 0, %s)"?' + % (match.group(1), match.group(2))) + + if Search(r'\busing namespace\b', line): + error(filename, linenum, 'build/namespaces', 5, + 'Do not use namespace using-directives. ' + 'Use using-declarations instead.') + + # Detect variable-length arrays. + match = Match(r'\s*(.+::)?(\w+) [a-z]\w*\[(.+)];', line) + if (match and match.group(2) != 'return' and match.group(2) != 'delete' and + match.group(3).find(']') == -1): + # Split the size using space and arithmetic operators as delimiters. + # If any of the resulting tokens are not compile time constants then + # report the error. + tokens = re.split(r'\s|\+|\-|\*|\/|<<|>>]', match.group(3)) + is_const = True + skip_next = False + for tok in tokens: + if skip_next: + skip_next = False + continue + + if Search(r'sizeof\(.+\)', tok): continue + if Search(r'arraysize\(\w+\)', tok): continue + + tok = tok.lstrip('(') + tok = tok.rstrip(')') + if not tok: continue + if Match(r'\d+', tok): continue + if Match(r'0[xX][0-9a-fA-F]+', tok): continue + if Match(r'k[A-Z0-9]\w*', tok): continue + if Match(r'(.+::)?k[A-Z0-9]\w*', tok): continue + if Match(r'(.+::)?[A-Z][A-Z0-9_]*', tok): continue + # A catch all for tricky sizeof cases, including 'sizeof expression', + # 'sizeof(*type)', 'sizeof(const type)', 'sizeof(struct StructName)' + # requires skipping the next token because we split on ' ' and '*'. + if tok.startswith('sizeof'): + skip_next = True + continue + is_const = False + break + if not is_const: + error(filename, linenum, 'runtime/arrays', 1, + 'Do not use variable-length arrays. Use an appropriately named ' + "('k' followed by CamelCase) compile-time constant for the size.") + + # If DISALLOW_EVIL_CONSTRUCTORS, DISALLOW_COPY_AND_ASSIGN, or + # DISALLOW_IMPLICIT_CONSTRUCTORS is present, then it should be the last thing + # in the class declaration. + match = Match( + (r'\s*' + r'(DISALLOW_(EVIL_CONSTRUCTORS|COPY_AND_ASSIGN|IMPLICIT_CONSTRUCTORS))' + r'\(.*\);$'), + line) + if match and linenum + 1 < clean_lines.NumLines(): + next_line = clean_lines.elided[linenum + 1] + # We allow some, but not all, declarations of variables to be present + # in the statement that defines the class. The [\w\*,\s]* fragment of + # the regular expression below allows users to declare instances of + # the class or pointers to instances, but not less common types such + # as function pointers or arrays. It's a tradeoff between allowing + # reasonable code and avoiding trying to parse more C++ using regexps. + if not Search(r'^\s*}[\w\*,\s]*;', next_line): + error(filename, linenum, 'readability/constructors', 3, + match.group(1) + ' should be the last thing in the class') + + # Check for use of unnamed namespaces in header files. Registration + # macros are typically OK, so we allow use of "namespace {" on lines + # that end with backslashes. + if (file_extension == 'h' + and Search(r'\bnamespace\s*{', line) + and line[-1] != '\\'): + error(filename, linenum, 'build/namespaces', 4, + 'Do not use unnamed namespaces in header files. See ' + 'http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml#Namespaces' + ' for more information.') + +def CheckForNonConstReference(filename, clean_lines, linenum, + nesting_state, error): + """Check for non-const references. + + Separate from CheckLanguage since it scans backwards from current + line, instead of scanning forward. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: The function to call with any errors found. + """ + # Do nothing if there is no '&' on current line. + line = clean_lines.elided[linenum] + if '&' not in line: + return + + # Long type names may be broken across multiple lines, usually in one + # of these forms: + # LongType + # ::LongTypeContinued &identifier + # LongType:: + # LongTypeContinued &identifier + # LongType< + # ...>::LongTypeContinued &identifier + # + # If we detected a type split across two lines, join the previous + # line to current line so that we can match const references + # accordingly. + # + # Note that this only scans back one line, since scanning back + # arbitrary number of lines would be expensive. If you have a type + # that spans more than 2 lines, please use a typedef. + if linenum > 1: + previous = None + if Match(r'\s*::(?:[\w<>]|::)+\s*&\s*\S', line): + # previous_line\n + ::current_line + previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+[\w<>])\s*$', + clean_lines.elided[linenum - 1]) + elif Match(r'\s*[a-zA-Z_]([\w<>]|::)+\s*&\s*\S', line): + # previous_line::\n + current_line + previous = Search(r'\b((?:const\s*)?(?:[\w<>]|::)+::)\s*$', + clean_lines.elided[linenum - 1]) + if previous: + line = previous.group(1) + line.lstrip() + else: + # Check for templated parameter that is split across multiple lines + endpos = line.rfind('>') + if endpos > -1: + (_, startline, startpos) = ReverseCloseExpression( + clean_lines, linenum, endpos) + if startpos > -1 and startline < linenum: + # Found the matching < on an earlier line, collect all + # pieces up to current line. + line = '' + for i in xrange(startline, linenum + 1): + line += clean_lines.elided[i].strip() + + # Check for non-const references in function parameters. A single '&' may + # found in the following places: + # inside expression: binary & for bitwise AND + # inside expression: unary & for taking the address of something + # inside declarators: reference parameter + # We will exclude the first two cases by checking that we are not inside a + # function body, including one that was just introduced by a trailing '{'. + # TODO(unknwon): Doesn't account for preprocessor directives. + # TODO(unknown): Doesn't account for 'catch(Exception& e)' [rare]. + check_params = False + if not nesting_state.stack: + check_params = True # top level + elif (isinstance(nesting_state.stack[-1], _ClassInfo) or + isinstance(nesting_state.stack[-1], _NamespaceInfo)): + check_params = True # within class or namespace + elif Match(r'.*{\s*$', line): + if (len(nesting_state.stack) == 1 or + isinstance(nesting_state.stack[-2], _ClassInfo) or + isinstance(nesting_state.stack[-2], _NamespaceInfo)): + check_params = True # just opened global/class/namespace block + # We allow non-const references in a few standard places, like functions + # called "swap()" or iostream operators like "<<" or ">>". Do not check + # those function parameters. + # + # We also accept & in static_assert, which looks like a function but + # it's actually a declaration expression. + whitelisted_functions = (r'(?:[sS]wap(?:<\w:+>)?|' + r'operator\s*[<>][<>]|' + r'static_assert|COMPILE_ASSERT' + r')\s*\(') + if Search(whitelisted_functions, line): + check_params = False + elif not Search(r'\S+\([^)]*$', line): + # Don't see a whitelisted function on this line. Actually we + # didn't see any function name on this line, so this is likely a + # multi-line parameter list. Try a bit harder to catch this case. + for i in xrange(2): + if (linenum > i and + Search(whitelisted_functions, clean_lines.elided[linenum - i - 1])): + check_params = False + break + + if check_params: + decls = ReplaceAll(r'{[^}]*}', ' ', line) # exclude function body + for parameter in re.findall(_RE_PATTERN_REF_PARAM, decls): + if not Match(_RE_PATTERN_CONST_REF_PARAM, parameter): + error(filename, linenum, 'runtime/references', 2, + 'Is this a non-const reference? ' + 'If so, make const or use a pointer: ' + + ReplaceAll(' *<', '<', parameter)) + + +def CheckCStyleCast(filename, linenum, line, raw_line, cast_type, pattern, + error): + """Checks for a C-style cast by looking for the pattern. + + Args: + filename: The name of the current file. + linenum: The number of the line to check. + line: The line of code to check. + raw_line: The raw line of code to check, with comments. + cast_type: The string for the C++ cast to recommend. This is either + reinterpret_cast, static_cast, or const_cast, depending. + pattern: The regular expression used to find C-style casts. + error: The function to call with any errors found. + + Returns: + True if an error was emitted. + False otherwise. + """ + match = Search(pattern, line) + if not match: + return False + + # Exclude lines with sizeof, since sizeof looks like a cast. + sizeof_match = Match(r'.*sizeof\s*$', line[0:match.start(1) - 1]) + if sizeof_match: + return False + + # operator++(int) and operator--(int) + if (line[0:match.start(1) - 1].endswith(' operator++') or + line[0:match.start(1) - 1].endswith(' operator--')): + return False + + # A single unnamed argument for a function tends to look like old + # style cast. If we see those, don't issue warnings for deprecated + # casts, instead issue warnings for unnamed arguments where + # appropriate. + # + # These are things that we want warnings for, since the style guide + # explicitly require all parameters to be named: + # Function(int); + # Function(int) { + # ConstMember(int) const; + # ConstMember(int) const { + # ExceptionMember(int) throw (...); + # ExceptionMember(int) throw (...) { + # PureVirtual(int) = 0; + # + # These are functions of some sort, where the compiler would be fine + # if they had named parameters, but people often omit those + # identifiers to reduce clutter: + # (FunctionPointer)(int); + # (FunctionPointer)(int) = value; + # Function((function_pointer_arg)(int)) + # ; + # <(FunctionPointerTemplateArgument)(int)>; + remainder = line[match.end(0):] + if Match(r'^\s*(?:;|const\b|throw\b|=|>|\{|\))', remainder): + # Looks like an unnamed parameter. + + # Don't warn on any kind of template arguments. + if Match(r'^\s*>', remainder): + return False + + # Don't warn on assignments to function pointers, but keep warnings for + # unnamed parameters to pure virtual functions. Note that this pattern + # will also pass on assignments of "0" to function pointers, but the + # preferred values for those would be "nullptr" or "NULL". + matched_zero = Match(r'^\s=\s*(\S+)\s*;', remainder) + if matched_zero and matched_zero.group(1) != '0': + return False + + # Don't warn on function pointer declarations. For this we need + # to check what came before the "(type)" string. + if Match(r'.*\)\s*$', line[0:match.start(0)]): + return False + + # Don't warn if the parameter is named with block comments, e.g.: + # Function(int /*unused_param*/); + if '/*' in raw_line: + return False + + # Passed all filters, issue warning here. + error(filename, linenum, 'readability/function', 3, + 'All parameters should be named in a function') + return True + + # At this point, all that should be left is actual casts. + error(filename, linenum, 'readability/casting', 4, + 'Using C-style cast. Use %s<%s>(...) instead' % + (cast_type, match.group(1))) + + return True + + +_HEADERS_CONTAINING_TEMPLATES = ( + ('', ('deque',)), + ('', ('unary_function', 'binary_function', + 'plus', 'minus', 'multiplies', 'divides', 'modulus', + 'negate', + 'equal_to', 'not_equal_to', 'greater', 'less', + 'greater_equal', 'less_equal', + 'logical_and', 'logical_or', 'logical_not', + 'unary_negate', 'not1', 'binary_negate', 'not2', + 'bind1st', 'bind2nd', + 'pointer_to_unary_function', + 'pointer_to_binary_function', + 'ptr_fun', + 'mem_fun_t', 'mem_fun', 'mem_fun1_t', 'mem_fun1_ref_t', + 'mem_fun_ref_t', + 'const_mem_fun_t', 'const_mem_fun1_t', + 'const_mem_fun_ref_t', 'const_mem_fun1_ref_t', + 'mem_fun_ref', + )), + ('', ('numeric_limits',)), + ('', ('list',)), + ('', ('map', 'multimap',)), + ('', ('allocator',)), + ('', ('queue', 'priority_queue',)), + ('', ('set', 'multiset',)), + ('', ('stack',)), + ('', ('char_traits', 'basic_string',)), + ('', ('pair',)), + ('', ('vector',)), + + # gcc extensions. + # Note: std::hash is their hash, ::hash is our hash + ('', ('hash_map', 'hash_multimap',)), + ('', ('hash_set', 'hash_multiset',)), + ('', ('slist',)), + ) + +_RE_PATTERN_STRING = re.compile(r'\bstring\b') + +_re_pattern_algorithm_header = [] +for _template in ('copy', 'max', 'min', 'min_element', 'sort', 'swap', + 'transform'): + # Match max(..., ...), max(..., ...), but not foo->max, foo.max or + # type::max(). + _re_pattern_algorithm_header.append( + (re.compile(r'[^>.]\b' + _template + r'(<.*?>)?\([^\)]'), + _template, + '')) + +_re_pattern_templates = [] +for _header, _templates in _HEADERS_CONTAINING_TEMPLATES: + for _template in _templates: + _re_pattern_templates.append( + (re.compile(r'(\<|\b)' + _template + r'\s*\<'), + _template + '<>', + _header)) + + +def FilesBelongToSameModule(filename_cc, filename_h): + """Check if these two filenames belong to the same module. + + The concept of a 'module' here is a as follows: + foo.h, foo-inl.h, foo.cc, foo_test.cc and foo_unittest.cc belong to the + same 'module' if they are in the same directory. + some/path/public/xyzzy and some/path/internal/xyzzy are also considered + to belong to the same module here. + + If the filename_cc contains a longer path than the filename_h, for example, + '/absolute/path/to/base/sysinfo.cc', and this file would include + 'base/sysinfo.h', this function also produces the prefix needed to open the + header. This is used by the caller of this function to more robustly open the + header file. We don't have access to the real include paths in this context, + so we need this guesswork here. + + Known bugs: tools/base/bar.cc and base/bar.h belong to the same module + according to this implementation. Because of this, this function gives + some false positives. This should be sufficiently rare in practice. + + Args: + filename_cc: is the path for the .cc file + filename_h: is the path for the header path + + Returns: + Tuple with a bool and a string: + bool: True if filename_cc and filename_h belong to the same module. + string: the additional prefix needed to open the header file. + """ + + if not filename_cc.endswith('.cc'): + return (False, '') + filename_cc = filename_cc[:-len('.cc')] + if filename_cc.endswith('_unittest'): + filename_cc = filename_cc[:-len('_unittest')] + elif filename_cc.endswith('_test'): + filename_cc = filename_cc[:-len('_test')] + filename_cc = filename_cc.replace('/public/', '/') + filename_cc = filename_cc.replace('/internal/', '/') + + if not filename_h.endswith('.h'): + return (False, '') + filename_h = filename_h[:-len('.h')] + if filename_h.endswith('-inl'): + filename_h = filename_h[:-len('-inl')] + filename_h = filename_h.replace('/public/', '/') + filename_h = filename_h.replace('/internal/', '/') + + files_belong_to_same_module = filename_cc.endswith(filename_h) + common_path = '' + if files_belong_to_same_module: + common_path = filename_cc[:-len(filename_h)] + return files_belong_to_same_module, common_path + + +def UpdateIncludeState(filename, include_state, io=codecs): + """Fill up the include_state with new includes found from the file. + + Args: + filename: the name of the header to read. + include_state: an _IncludeState instance in which the headers are inserted. + io: The io factory to use to read the file. Provided for testability. + + Returns: + True if a header was successfully added. False otherwise. + """ + headerfile = None + try: + headerfile = io.open(filename, 'r', 'utf8', 'replace') + except IOError: + return False + linenum = 0 + for line in headerfile: + linenum += 1 + clean_line = CleanseComments(line) + match = _RE_PATTERN_INCLUDE.search(clean_line) + if match: + include = match.group(2) + # The value formatting is cute, but not really used right now. + # What matters here is that the key is in include_state. + include_state.setdefault(include, '%s:%d' % (filename, linenum)) + return True + + +def CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error, + io=codecs): + """Reports for missing stl includes. + + This function will output warnings to make sure you are including the headers + necessary for the stl containers and functions that you use. We only give one + reason to include a header. For example, if you use both equal_to<> and + less<> in a .h file, only one (the latter in the file) of these will be + reported as a reason to include the . + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + include_state: An _IncludeState instance. + error: The function to call with any errors found. + io: The IO factory to use to read the header file. Provided for unittest + injection. + """ + required = {} # A map of header name to linenumber and the template entity. + # Example of required: { '': (1219, 'less<>') } + + for linenum in xrange(clean_lines.NumLines()): + line = clean_lines.elided[linenum] + if not line or line[0] == '#': + continue + + # String is special -- it is a non-templatized type in STL. + matched = _RE_PATTERN_STRING.search(line) + if matched: + # Don't warn about strings in non-STL namespaces: + # (We check only the first match per line; good enough.) + prefix = line[:matched.start()] + if prefix.endswith('std::') or not prefix.endswith('::'): + required[''] = (linenum, 'string') + + for pattern, template, header in _re_pattern_algorithm_header: + if pattern.search(line): + required[header] = (linenum, template) + + # The following function is just a speed up, no semantics are changed. + if not '<' in line: # Reduces the cpu time usage by skipping lines. + continue + + for pattern, template, header in _re_pattern_templates: + if pattern.search(line): + required[header] = (linenum, template) + + # The policy is that if you #include something in foo.h you don't need to + # include it again in foo.cc. Here, we will look at possible includes. + # Let's copy the include_state so it is only messed up within this function. + include_state = include_state.copy() + + # Did we find the header for this file (if any) and successfully load it? + header_found = False + + # Use the absolute path so that matching works properly. + abs_filename = FileInfo(filename).FullName() + + # For Emacs's flymake. + # If cpplint is invoked from Emacs's flymake, a temporary file is generated + # by flymake and that file name might end with '_flymake.cc'. In that case, + # restore original file name here so that the corresponding header file can be + # found. + # e.g. If the file name is 'foo_flymake.cc', we should search for 'foo.h' + # instead of 'foo_flymake.h' + abs_filename = re.sub(r'_flymake\.cc$', '.cc', abs_filename) + + # include_state is modified during iteration, so we iterate over a copy of + # the keys. + header_keys = include_state.keys() + for header in header_keys: + (same_module, common_path) = FilesBelongToSameModule(abs_filename, header) + fullpath = common_path + header + if same_module and UpdateIncludeState(fullpath, include_state, io): + header_found = True + + # If we can't find the header file for a .cc, assume it's because we don't + # know where to look. In that case we'll give up as we're not sure they + # didn't include it in the .h file. + # TODO(unknown): Do a better job of finding .h files so we are confident that + # not having the .h file means there isn't one. + if filename.endswith('.cc') and not header_found: + return + + # All the lines have been processed, report the errors found. + for required_header_unstripped in required: + template = required[required_header_unstripped][1] + if required_header_unstripped.strip('<>"') not in include_state: + error(filename, required[required_header_unstripped][0], + 'build/include_what_you_use', 4, + 'Add #include ' + required_header_unstripped + ' for ' + template) + + +_RE_PATTERN_EXPLICIT_MAKEPAIR = re.compile(r'\bmake_pair\s*<') + + +def CheckMakePairUsesDeduction(filename, clean_lines, linenum, error): + """Check that make_pair's template arguments are deduced. + + G++ 4.6 in C++0x mode fails badly if make_pair's template arguments are + specified explicitly, and such use isn't intended in any case. + + Args: + filename: The name of the current file. + clean_lines: A CleansedLines instance containing the file. + linenum: The number of the line to check. + error: The function to call with any errors found. + """ + line = clean_lines.elided[linenum] + match = _RE_PATTERN_EXPLICIT_MAKEPAIR.search(line) + if match: + error(filename, linenum, 'build/explicit_make_pair', + 4, # 4 = high confidence + 'For C++11-compatibility, omit template arguments from make_pair' + ' OR use pair directly OR if appropriate, construct a pair directly') + + +def ProcessLine(filename, file_extension, clean_lines, line, + include_state, function_state, nesting_state, error, + extra_check_functions=[]): + """Processes a single line in the file. + + Args: + filename: Filename of the file that is being processed. + file_extension: The extension (dot not included) of the file. + clean_lines: An array of strings, each representing a line of the file, + with comments stripped. + line: Number of line being processed. + include_state: An _IncludeState instance in which the headers are inserted. + function_state: A _FunctionState instance which counts function lines, etc. + nesting_state: A _NestingState instance which maintains information about + the current stack of nested blocks being parsed. + error: A callable to which errors are reported, which takes 4 arguments: + filename, line number, error level, and message + extra_check_functions: An array of additional check functions that will be + run on each source line. Each function takes 4 + arguments: filename, clean_lines, line, error + """ + raw_lines = clean_lines.raw_lines + ParseNolintSuppressions(filename, raw_lines[line], line, error) + nesting_state.Update(filename, clean_lines, line, error) + if nesting_state.stack and nesting_state.stack[-1].inline_asm != _NO_ASM: + return + CheckForFunctionLengths(filename, clean_lines, line, function_state, error) + CheckForMultilineCommentsAndStrings(filename, clean_lines, line, error) + CheckStyle(filename, clean_lines, line, file_extension, nesting_state, error) + CheckLanguage(filename, clean_lines, line, file_extension, include_state, + nesting_state, error) + CheckForNonConstReference(filename, clean_lines, line, nesting_state, error) + CheckForNonStandardConstructs(filename, clean_lines, line, + nesting_state, error) + CheckVlogArguments(filename, clean_lines, line, error) + CheckPosixThreading(filename, clean_lines, line, error) + CheckInvalidIncrement(filename, clean_lines, line, error) + CheckMakePairUsesDeduction(filename, clean_lines, line, error) + for check_fn in extra_check_functions: + check_fn(filename, clean_lines, line, error) + +def ProcessFileData(filename, file_extension, lines, error, + extra_check_functions=[]): + """Performs lint checks and reports any errors to the given error function. + + Args: + filename: Filename of the file that is being processed. + file_extension: The extension (dot not included) of the file. + lines: An array of strings, each representing a line of the file, with the + last element being empty if the file is terminated with a newline. + error: A callable to which errors are reported, which takes 4 arguments: + filename, line number, error level, and message + extra_check_functions: An array of additional check functions that will be + run on each source line. Each function takes 4 + arguments: filename, clean_lines, line, error + """ + lines = (['// marker so line numbers and indices both start at 1'] + lines + + ['// marker so line numbers end in a known way']) + + include_state = _IncludeState() + function_state = _FunctionState() + nesting_state = _NestingState() + + ResetNolintSuppressions() + + CheckForCopyright(filename, lines, error) + + if file_extension == 'h': + CheckForHeaderGuard(filename, lines, error) + + RemoveMultiLineComments(filename, lines, error) + clean_lines = CleansedLines(lines) + for line in xrange(clean_lines.NumLines()): + ProcessLine(filename, file_extension, clean_lines, line, + include_state, function_state, nesting_state, error, + extra_check_functions) + nesting_state.CheckCompletedBlocks(filename, error) + + CheckForIncludeWhatYouUse(filename, clean_lines, include_state, error) + + # We check here rather than inside ProcessLine so that we see raw + # lines rather than "cleaned" lines. + CheckForBadCharacters(filename, lines, error) + + CheckForNewlineAtEOF(filename, lines, error) + +def ProcessFile(filename, vlevel, extra_check_functions=[]): + """Does google-lint on a single file. + + Args: + filename: The name of the file to parse. + + vlevel: The level of errors to report. Every error of confidence + >= verbose_level will be reported. 0 is a good default. + + extra_check_functions: An array of additional check functions that will be + run on each source line. Each function takes 4 + arguments: filename, clean_lines, line, error + """ + + _SetVerboseLevel(vlevel) + + try: + # Support the UNIX convention of using "-" for stdin. Note that + # we are not opening the file with universal newline support + # (which codecs doesn't support anyway), so the resulting lines do + # contain trailing '\r' characters if we are reading a file that + # has CRLF endings. + # If after the split a trailing '\r' is present, it is removed + # below. If it is not expected to be present (i.e. os.linesep != + # '\r\n' as in Windows), a warning is issued below if this file + # is processed. + + if filename == '-': + lines = codecs.StreamReaderWriter(sys.stdin, + codecs.getreader('utf8'), + codecs.getwriter('utf8'), + 'replace').read().split('\n') + else: + lines = codecs.open(filename, 'r', 'utf8', 'replace').read().split('\n') + + carriage_return_found = False + # Remove trailing '\r'. + for linenum in range(len(lines)): + if lines[linenum].endswith('\r'): + lines[linenum] = lines[linenum].rstrip('\r') + carriage_return_found = True + + except IOError: + sys.stderr.write( + "Skipping input '%s': Can't open for reading\n" % filename) + return + + # Note, if no dot is found, this will give the entire filename as the ext. + file_extension = filename[filename.rfind('.') + 1:] + + # When reading from stdin, the extension is unknown, so no cpplint tests + # should rely on the extension. + if filename != '-' and file_extension not in _valid_extensions: + sys.stderr.write('Ignoring %s; not a valid file name ' + '(%s)\n' % (filename, ', '.join(_valid_extensions))) + else: + ProcessFileData(filename, file_extension, lines, Error, + extra_check_functions) + if carriage_return_found and os.linesep != '\r\n': + # Use 0 for linenum since outputting only one error for potentially + # several lines. + Error(filename, 0, 'whitespace/newline', 1, + 'One or more unexpected \\r (^M) found;' + 'better to use only a \\n') + + sys.stderr.write('Done processing %s\n' % filename) + + +def PrintUsage(message): + """Prints a brief usage string and exits, optionally with an error message. + + Args: + message: The optional error message. + """ + sys.stderr.write(_USAGE) + if message: + sys.exit('\nFATAL ERROR: ' + message) + else: + sys.exit(1) + + +def PrintCategories(): + """Prints a list of all the error-categories used by error messages. + + These are the categories used to filter messages via --filter. + """ + sys.stderr.write(''.join(' %s\n' % cat for cat in _ERROR_CATEGORIES)) + sys.exit(0) + + +def ParseArguments(args): + """Parses the command line arguments. + + This may set the output format and verbosity level as side-effects. + + Args: + args: The command line arguments: + + Returns: + The list of filenames to lint. + """ + try: + (opts, filenames) = getopt.getopt(args, '', ['help', 'output=', 'verbose=', + 'counting=', + 'filter=', + 'root=', + 'linelength=', + 'extensions=']) + except getopt.GetoptError: + PrintUsage('Invalid arguments.') + + verbosity = _VerboseLevel() + output_format = _OutputFormat() + filters = '' + counting_style = '' + + for (opt, val) in opts: + if opt == '--help': + PrintUsage(None) + elif opt == '--output': + if val not in ('emacs', 'vs7', 'eclipse'): + PrintUsage('The only allowed output formats are emacs, vs7 and eclipse.') + output_format = val + elif opt == '--verbose': + verbosity = int(val) + elif opt == '--filter': + filters = val + if not filters: + PrintCategories() + elif opt == '--counting': + if val not in ('total', 'toplevel', 'detailed'): + PrintUsage('Valid counting options are total, toplevel, and detailed') + counting_style = val + elif opt == '--root': + global _root + _root = val + elif opt == '--linelength': + global _line_length + try: + _line_length = int(val) + except ValueError: + PrintUsage('Line length must be digits.') + elif opt == '--extensions': + global _valid_extensions + try: + _valid_extensions = set(val.split(',')) + except ValueError: + PrintUsage('Extensions must be comma separated list.') + + if not filenames: + PrintUsage('No files were specified.') + + _SetOutputFormat(output_format) + _SetVerboseLevel(verbosity) + _SetFilters(filters) + _SetCountingStyle(counting_style) + + return filenames + + +def main(): + filenames = ParseArguments(sys.argv[1:]) + + # Change stderr to write with replacement characters so we don't die + # if we try to print something containing non-ASCII characters. + sys.stderr = codecs.StreamReaderWriter(sys.stderr, + codecs.getreader('utf8'), + codecs.getwriter('utf8'), + 'replace') + + _cpplint_state.ResetErrorCounts() + for filename in filenames: + ProcessFile(filename, _cpplint_state.verbose_level) + _cpplint_state.PrintErrorCounts() + + sys.exit(_cpplint_state.error_count > 0) + + +if __name__ == '__main__': + main() diff --git a/external/rocksdb/arcanist_util/lint_engine/FacebookFbcodeLintEngine.php b/external/rocksdb/arcanist_util/lint_engine/FacebookFbcodeLintEngine.php new file mode 100755 index 0000000000..88b0748f7d --- /dev/null +++ b/external/rocksdb/arcanist_util/lint_engine/FacebookFbcodeLintEngine.php @@ -0,0 +1,138 @@ +getPaths(); + + // Remove all deleted files, which are not checked by the + // following linters. + foreach ($paths as $key => $path) { + if (!Filesystem::pathExists($this->getFilePathOnDisk($path))) { + unset($paths[$key]); + } + } + + $generated_linter = new ArcanistGeneratedLinter(); + $linters[] = $generated_linter; + + $nolint_linter = new ArcanistNoLintLinter(); + $linters[] = $nolint_linter; + + $text_linter = new ArcanistTextLinter(); + $text_linter->setCustomSeverityMap(array( + ArcanistTextLinter::LINT_LINE_WRAP + => ArcanistLintSeverity::SEVERITY_ADVICE, + )); + $linters[] = $text_linter; + + $java_text_linter = new ArcanistTextLinter(); + $java_text_linter->setMaxLineLength(100); + $java_text_linter->setCustomSeverityMap(array( + ArcanistTextLinter::LINT_LINE_WRAP + => ArcanistLintSeverity::SEVERITY_ADVICE, + )); + $linters[] = $java_text_linter; + + $python_linter = new ArcanistPEP8Linter(); + $linters[] = $python_linter; + + $cpp_linters = array(); + $cpp_linters[] = $linters[] = new ArcanistCpplintLinter(); + $cpp_linters[] = $linters[] = new FbcodeCppLinter(); + + $clang_format_linter = new FbcodeClangFormatLinter(); + $linters[] = $clang_format_linter; + + $spelling_linter = new ArcanistSpellingLinter(); + $linters[] = $spelling_linter; + + foreach ($paths as $path) { + $is_text = false; + + $text_extensions = ( + '/\.('. + 'cpp|cxx|c|cc|h|hpp|hxx|tcc|'. + 'py|rb|hs|pl|pm|tw|'. + 'php|phpt|css|js|'. + 'java|'. + 'thrift|'. + 'lua|'. + 'siv|'. + 'txt'. + ')$/' + ); + if (preg_match($text_extensions, $path)) { + $is_text = true; + } + if ($is_text) { + $nolint_linter->addPath($path); + + $generated_linter->addPath($path); + $generated_linter->addData($path, $this->loadData($path)); + + if (preg_match('/\.java$/', $path)) { + $java_text_linter->addPath($path); + $java_text_linter->addData($path, $this->loadData($path)); + } else { + $text_linter->addPath($path); + $text_linter->addData($path, $this->loadData($path)); + } + + $spelling_linter->addPath($path); + $spelling_linter->addData($path, $this->loadData($path)); + } + if (preg_match('/\.(cpp|c|cc|cxx|h|hh|hpp|hxx|tcc)$/', $path) + && !preg_match('/third-party/', $path)) { + foreach ($cpp_linters as &$linter) { + $linter->addPath($path); + $linter->addData($path, $this->loadData($path)); + } + + $clang_format_linter->addPath($path); + $clang_format_linter->addData($path, $this->loadData($path)); + $clang_format_linter->setPathChangedLines( + $path, $this->getPathChangedLines($path)); + } + + // Match *.py and contbuild config files + if (preg_match('/(\.(py|tw|smcprops)|^contbuild\/configs\/[^\/]*)$/', + $path)) { + $space_count = 4; + $real_path = $this->getFilePathOnDisk($path); + $dir = dirname($real_path); + do { + if (file_exists($dir.'/.python2space')) { + $space_count = 2; + break; + } + $dir = dirname($dir); + } while ($dir != '/' && $dir != '.'); + + $cur_path_linter = $python_linter; + $cur_path_linter->addPath($path); + $cur_path_linter->addData($path, $this->loadData($path)); + + if (preg_match('/\.tw$/', $path)) { + $cur_path_linter->setCustomSeverityMap(array( + 'E251' => ArcanistLintSeverity::SEVERITY_DISABLED, + )); + } + } + } + + $name_linter = new ArcanistFilenameLinter(); + $linters[] = $name_linter; + foreach ($paths as $path) { + $name_linter->addPath($path); + } + + return $linters; + } + +} diff --git a/external/rocksdb/arcanist_util/lint_engine/FacebookHowtoevenLintEngine.php b/external/rocksdb/arcanist_util/lint_engine/FacebookHowtoevenLintEngine.php new file mode 100755 index 0000000000..2e0148141a --- /dev/null +++ b/external/rocksdb/arcanist_util/lint_engine/FacebookHowtoevenLintEngine.php @@ -0,0 +1,27 @@ +getPaths() as $path) { + // Don't try to lint deleted files or changed directories. + if (!Filesystem::pathExists($path) || is_dir($path)) { + continue; + } + + if (preg_match('/\.(cpp|c|cc|cxx|h|hh|hpp|hxx|tcc)$/', $path)) { + $paths[] = $path; + } + } + + $howtoeven = new FacebookHowtoevenLinter(); + $howtoeven->setPaths($paths); + return array($howtoeven); + } +} diff --git a/external/rocksdb/arcanist_util/unit_engine/FacebookFbcodeUnitTestEngine.php b/external/rocksdb/arcanist_util/unit_engine/FacebookFbcodeUnitTestEngine.php new file mode 100755 index 0000000000..985bd68fc2 --- /dev/null +++ b/external/rocksdb/arcanist_util/unit_engine/FacebookFbcodeUnitTestEngine.php @@ -0,0 +1,17 @@ +setName("dummy_placeholder_entry"); + $result->setResult(ArcanistUnitTestResult::RESULT_PASS); + return array($result); + } +} diff --git a/external/rocksdb/build_tools/amalgamate.py b/external/rocksdb/build_tools/amalgamate.py new file mode 100755 index 0000000000..548b1e8cec --- /dev/null +++ b/external/rocksdb/build_tools/amalgamate.py @@ -0,0 +1,110 @@ +#!/usr/bin/python + +# amalgamate.py creates an amalgamation from a unity build. +# It can be run with either Python 2 or 3. +# An amalgamation consists of a header that includes the contents of all public +# headers and a source file that includes the contents of all source files and +# private headers. +# +# This script works by starting with the unity build file and recursively expanding +# #include directives. If the #include is found in a public include directory, +# that header is expanded into the amalgamation header. +# +# A particular header is only expanded once, so this script will +# break if there are multiple inclusions of the same header that are expected to +# expand differently. Similarly, this type of code causes issues: +# +# #ifdef FOO +# #include "bar.h" +# // code here +# #else +# #include "bar.h" // oops, doesn't get expanded +# // different code here +# #endif +# +# The solution is to move the include out of the #ifdef. + +from __future__ import print_function + +import argparse +from os import path +import re +import sys + +include_re = re.compile('^[ \t]*#include[ \t]+"(.*)"[ \t]*$') +included = set() +excluded = set() + +def find_header(name, abs_path, include_paths): + samedir = path.join(path.dirname(abs_path), name) + if path.exists(samedir): + return samedir + for include_path in include_paths: + include_path = path.join(include_path, name) + if path.exists(include_path): + return include_path + return None + +def expand_include(include_path, f, abs_path, source_out, header_out, include_paths, public_include_paths): + if include_path in included: + return False + + included.add(include_path) + with open(include_path) as f: + print('#line 1 "{}"'.format(include_path), file=source_out) + process_file(f, include_path, source_out, header_out, include_paths, public_include_paths) + return True + +def process_file(f, abs_path, source_out, header_out, include_paths, public_include_paths): + for (line, text) in enumerate(f): + m = include_re.match(text) + if m: + filename = m.groups()[0] + # first check private headers + include_path = find_header(filename, abs_path, include_paths) + if include_path: + if include_path in excluded: + source_out.write(text) + expanded = False + else: + expanded = expand_include(include_path, f, abs_path, source_out, header_out, include_paths, public_include_paths) + else: + # now try public headers + include_path = find_header(filename, abs_path, public_include_paths) + if include_path: + # found public header + expanded = False + if include_path in excluded: + source_out.write(text) + else: + expand_include(include_path, f, abs_path, header_out, None, public_include_paths, []) + else: + sys.exit("unable to find {}, included in {} on line {}".format(filename, abs_path, line)) + + if expanded: + print('#line {} "{}"'.format(line+1, abs_path), file=source_out) + elif text != "#pragma once\n": + source_out.write(text) + +def main(): + parser = argparse.ArgumentParser(description="Transform a unity build into an amalgamation") + parser.add_argument("source", help="source file") + parser.add_argument("-I", action="append", dest="include_paths", help="include paths for private headers") + parser.add_argument("-i", action="append", dest="public_include_paths", help="include paths for public headers") + parser.add_argument("-x", action="append", dest="excluded", help="excluded header files") + parser.add_argument("-o", dest="source_out", help="output C++ file", required=True) + parser.add_argument("-H", dest="header_out", help="output C++ header file", required=True) + args = parser.parse_args() + + include_paths = list(map(path.abspath, args.include_paths or [])) + public_include_paths = list(map(path.abspath, args.public_include_paths or [])) + excluded.update(map(path.abspath, args.excluded or [])) + filename = args.source + abs_path = path.abspath(filename) + with open(filename) as f, open(args.source_out, 'w') as source_out, open(args.header_out, 'w') as header_out: + print('#line 1 "{}"'.format(filename), file=source_out) + print('#include "{}"'.format(header_out.name), file=source_out) + process_file(f, abs_path, source_out, header_out, include_paths, public_include_paths) + +if __name__ == "__main__": + main() diff --git a/external/rocksdb/build_tools/build_detect_platform b/external/rocksdb/build_tools/build_detect_platform new file mode 100755 index 0000000000..afff3794f0 --- /dev/null +++ b/external/rocksdb/build_tools/build_detect_platform @@ -0,0 +1,458 @@ +#!/bin/sh +# +# Detects OS we're compiling on and outputs a file specified by the first +# argument, which in turn gets read while processing Makefile. +# +# The output will set the following variables: +# CC C Compiler path +# CXX C++ Compiler path +# PLATFORM_LDFLAGS Linker flags +# JAVA_LDFLAGS Linker flags for RocksDBJava +# JAVA_STATIC_LDFLAGS Linker flags for RocksDBJava static build +# PLATFORM_SHARED_EXT Extension for shared libraries +# PLATFORM_SHARED_LDFLAGS Flags for building shared library +# PLATFORM_SHARED_CFLAGS Flags for compiling objects for shared library +# PLATFORM_CCFLAGS C compiler flags +# PLATFORM_CXXFLAGS C++ compiler flags. Will contain: +# PLATFORM_SHARED_VERSIONED Set to 'true' if platform supports versioned +# shared libraries, empty otherwise. +# +# The PLATFORM_CCFLAGS and PLATFORM_CXXFLAGS might include the following: +# +# -DLEVELDB_PLATFORM_POSIX if cstdatomic is present +# -DLEVELDB_PLATFORM_NOATOMIC if it is not +# -DSNAPPY if the Snappy library is present +# -DLZ4 if the LZ4 library is present +# -DZSTD if the ZSTD library is present +# -DNUMA if the NUMA library is present +# +# Using gflags in rocksdb: +# Our project depends on gflags, which requires users to take some extra steps +# before they can compile the whole repository: +# 1. Install gflags. You may download it from here: +# https://code.google.com/p/gflags/ +# 2. Once install, add the include path/lib path for gflags to CPATH and +# LIBRARY_PATH respectively. If installed with default mode, the +# lib and include path will be /usr/local/lib and /usr/local/include +# Mac user can do this by having brew installed and running brew install gflags + +OUTPUT=$1 +if test -z "$OUTPUT"; then + echo "usage: $0 " >&2 + exit 1 +fi + +# we depend on C++11 +PLATFORM_CXXFLAGS="-std=c++11" +# we currently depend on POSIX platform +COMMON_FLAGS="-DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX" + +# Default to fbcode gcc on internal fb machines +if [ -z "$ROCKSDB_NO_FBCODE" -a -d /mnt/gvfs/third-party ]; then + FBCODE_BUILD="true" + # If we're compiling with TSAN we need pic build + PIC_BUILD=$COMPILE_WITH_TSAN + if [ -z "$ROCKSDB_FBCODE_BUILD_WITH_481" ]; then + source "$PWD/build_tools/fbcode_config.sh" + else + # we need this to build with MySQL. Don't use for other purposes. + source "$PWD/build_tools/fbcode_config4.8.1.sh" + fi +fi + +# Delete existing output, if it exists +rm -f "$OUTPUT" +touch "$OUTPUT" + +if test -z "$CC"; then + CC=cc +fi + +if test -z "$CXX"; then + CXX=g++ +fi + +# Detect OS +if test -z "$TARGET_OS"; then + TARGET_OS=`uname -s` +fi + +if test -z "$TARGET_ARCHITECTURE"; then + TARGET_ARCHITECTURE=`uname -m` +fi + +if test -z "$CLANG_SCAN_BUILD"; then + CLANG_SCAN_BUILD=scan-build +fi + +if test -z "$CLANG_ANALYZER"; then + CLANG_ANALYZER=$(which clang++ 2> /dev/null) +fi + +COMMON_FLAGS="$COMMON_FLAGS ${CFLAGS}" +CROSS_COMPILE= +PLATFORM_CCFLAGS= +PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS" +PLATFORM_SHARED_EXT="so" +PLATFORM_SHARED_LDFLAGS="-Wl,--no-as-needed -shared -Wl,-soname -Wl," +PLATFORM_SHARED_CFLAGS="-fPIC" +PLATFORM_SHARED_VERSIONED=true + +# generic port files (working on all platform by #ifdef) go directly in /port +GENERIC_PORT_FILES=`cd "$ROCKSDB_ROOT"; find port -name '*.cc' | tr "\n" " "` + +# On GCC, we pick libc's memcmp over GCC's memcmp via -fno-builtin-memcmp +case "$TARGET_OS" in + Darwin) + PLATFORM=OS_MACOSX + COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX" + PLATFORM_SHARED_EXT=dylib + PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name " + # PORT_FILES=port/darwin/darwin_specific.cc + ;; + IOS) + PLATFORM=IOS + COMMON_FLAGS="$COMMON_FLAGS -DOS_MACOSX -DIOS_CROSS_COMPILE -DROCKSDB_LITE" + PLATFORM_SHARED_EXT=dylib + PLATFORM_SHARED_LDFLAGS="-dynamiclib -install_name " + CROSS_COMPILE=true + PLATFORM_SHARED_VERSIONED= + ;; + Linux) + PLATFORM=OS_LINUX + COMMON_FLAGS="$COMMON_FLAGS -DOS_LINUX" + if [ -z "$USE_CLANG" ]; then + COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp" + fi + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt" + # PORT_FILES=port/linux/linux_specific.cc + ;; + SunOS) + PLATFORM=OS_SOLARIS + COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_SOLARIS" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt" + # PORT_FILES=port/sunos/sunos_specific.cc + ;; + FreeBSD) + PLATFORM=OS_FREEBSD + COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_FREEBSD" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread" + # PORT_FILES=port/freebsd/freebsd_specific.cc + ;; + NetBSD) + PLATFORM=OS_NETBSD + COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_NETBSD" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lgcc_s" + # PORT_FILES=port/netbsd/netbsd_specific.cc + ;; + OpenBSD) + PLATFORM=OS_OPENBSD + COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_OPENBSD" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -pthread" + # PORT_FILES=port/openbsd/openbsd_specific.cc + ;; + DragonFly) + PLATFORM=OS_DRAGONFLYBSD + COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_DRAGONFLYBSD" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread" + # PORT_FILES=port/dragonfly/dragonfly_specific.cc + ;; + Cygwin) + PLATFORM=CYGWIN + PLATFORM_SHARED_CFLAGS="" + PLATFORM_CXXFLAGS="-std=gnu++11" + COMMON_FLAGS="$COMMON_FLAGS -DCYGWIN" + if [ -z "$USE_CLANG" ]; then + COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp" + fi + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lpthread -lrt" + # PORT_FILES=port/linux/linux_specific.cc + ;; + OS_ANDROID_CROSSCOMPILE) + PLATFORM=OS_ANDROID + COMMON_FLAGS="$COMMON_FLAGS -fno-builtin-memcmp -D_REENTRANT -DOS_ANDROID -DLEVELDB_PLATFORM_POSIX" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS " # All pthread features are in the Android C library + # PORT_FILES=port/android/android.cc + CROSS_COMPILE=true + ;; + *) + echo "Unknown platform!" >&2 + exit 1 +esac + +PLATFORM_CXXFLAGS="$PLATFORM_CXXFLAGS ${CXXFLAGS}" +JAVA_LDFLAGS="$PLATFORM_LDFLAGS" +JAVA_STATIC_LDFLAGS="$PLATFORM_LDFLAGS" + +if [ "$CROSS_COMPILE" = "true" -o "$FBCODE_BUILD" = "true" ]; then + # Cross-compiling; do not try any compilation tests. + # Also don't need any compilation tests if compiling on fbcode + true +else + if ! test $ROCKSDB_DISABLE_FALLOCATE; then + # Test whether fallocate is available + $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null < + #include + int main() { + int fd = open("/dev/null", 0); + fallocate(fd, FALLOC_FL_KEEP_SIZE | FALLOC_FL_PUNCH_HOLE, 0, 1024); + } +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_FALLOCATE_PRESENT" + fi + fi + + # Test whether Snappy library is installed + # http://code.google.com/p/snappy/ + $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() {} +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DSNAPPY" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lsnappy" + JAVA_LDFLAGS="$JAVA_LDFLAGS -lsnappy" + fi + + # Test whether gflags library is installed + # http://gflags.github.io/gflags/ + # check if the namespace is gflags + $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF + #include + using namespace gflags; + int main() {} +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=gflags" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags" + else + # check if namespace is google + $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null << EOF + #include + using namespace google; + int main() {} +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DGFLAGS=google" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lgflags" + fi + fi + + # Test whether zlib library is installed + $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() {} +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DZLIB" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lz" + JAVA_LDFLAGS="$JAVA_LDFLAGS -lz" + fi + + # Test whether bzip library is installed + $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() {} +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DBZIP2" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lbz2" + JAVA_LDFLAGS="$JAVA_LDFLAGS -lbz2" + fi + + # Test whether lz4 library is installed + $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < + #include + int main() {} +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DLZ4" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -llz4" + JAVA_LDFLAGS="$JAVA_LDFLAGS -llz4" + fi + + # Test whether zstd library is installed + $CXX $CFLAGS $COMMON_FLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() {} +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DZSTD" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lzstd" + JAVA_LDFLAGS="$JAVA_LDFLAGS -lzstd" + fi + + # Test whether numa is available + $CXX $CFLAGS -x c++ - -o /dev/null -lnuma 2>/dev/null < + #include + int main() {} +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DNUMA" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lnuma" + JAVA_LDFLAGS="$JAVA_LDFLAGS -lnuma" + fi + + # Test whether jemalloc is available + if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null -ljemalloc \ + 2>/dev/null; then + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -ljemalloc" + JAVA_LDFLAGS="$JAVA_LDFLAGS -ljemalloc" + JEMALLOC=1 + else + # jemalloc is not available. Let's try tcmalloc + if echo 'int main() {}' | $CXX $CFLAGS -x c++ - -o /dev/null \ + -ltcmalloc 2>/dev/null; then + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -ltcmalloc" + JAVA_LDFLAGS="$JAVA_LDFLAGS -ltcmalloc" + fi + fi + + # Test whether malloc_usable_size is available + $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() { + size_t res = malloc_usable_size(0); + return 0; + } +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_MALLOC_USABLE_SIZE" + fi + + # Test whether PTHREAD_MUTEX_ADAPTIVE_NP mutex type is available + $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null < + int main() { + int x = PTHREAD_MUTEX_ADAPTIVE_NP; + return 0; + } +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_PTHREAD_ADAPTIVE_MUTEX" + fi + + # Test whether backtrace is available + $CXX $CFLAGS -x c++ - -o /dev/null 2>/dev/null <> + int main() { + void* frames[1]; + backtrace_symbols(frames, backtrace(frames, 1)); + return 0; + } +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_BACKTRACE" + else + # Test whether execinfo library is installed + $CXX $CFLAGS -lexecinfo -x c++ - -o /dev/null 2>/dev/null < + int main() { + void* frames[1]; + backtrace_symbols(frames, backtrace(frames, 1)); + } +EOF + if [ "$?" = 0 ]; then + COMMON_FLAGS="$COMMON_FLAGS -DROCKSDB_BACKTRACE" + PLATFORM_LDFLAGS="$PLATFORM_LDFLAGS -lexecinfo" + JAVA_LDFLAGS="$JAVA_LDFLAGS -lexecinfo" + fi + fi + + # Test if -pg is supported + $CXX $CFLAGS -pg -x c++ - -o /dev/null 2>/dev/null </dev/null <> "$OUTPUT" +echo "CXX=$CXX" >> "$OUTPUT" +echo "PLATFORM=$PLATFORM" >> "$OUTPUT" +echo "PLATFORM_LDFLAGS=$PLATFORM_LDFLAGS" >> "$OUTPUT" +echo "JAVA_LDFLAGS=$JAVA_LDFLAGS" >> "$OUTPUT" +echo "JAVA_STATIC_LDFLAGS=$JAVA_STATIC_LDFLAGS" >> "$OUTPUT" +echo "VALGRIND_VER=$VALGRIND_VER" >> "$OUTPUT" +echo "PLATFORM_CCFLAGS=$PLATFORM_CCFLAGS" >> "$OUTPUT" +echo "PLATFORM_CXXFLAGS=$PLATFORM_CXXFLAGS" >> "$OUTPUT" +echo "PLATFORM_SHARED_CFLAGS=$PLATFORM_SHARED_CFLAGS" >> "$OUTPUT" +echo "PLATFORM_SHARED_EXT=$PLATFORM_SHARED_EXT" >> "$OUTPUT" +echo "PLATFORM_SHARED_LDFLAGS=$PLATFORM_SHARED_LDFLAGS" >> "$OUTPUT" +echo "PLATFORM_SHARED_VERSIONED=$PLATFORM_SHARED_VERSIONED" >> "$OUTPUT" +echo "EXEC_LDFLAGS=$EXEC_LDFLAGS" >> "$OUTPUT" +echo "JEMALLOC_INCLUDE=$JEMALLOC_INCLUDE" >> "$OUTPUT" +echo "JEMALLOC_LIB=$JEMALLOC_LIB" >> "$OUTPUT" +echo "ROCKSDB_MAJOR=$ROCKSDB_MAJOR" >> "$OUTPUT" +echo "ROCKSDB_MINOR=$ROCKSDB_MINOR" >> "$OUTPUT" +echo "ROCKSDB_PATCH=$ROCKSDB_PATCH" >> "$OUTPUT" +echo "CLANG_SCAN_BUILD=$CLANG_SCAN_BUILD" >> "$OUTPUT" +echo "CLANG_ANALYZER=$CLANG_ANALYZER" >> "$OUTPUT" +echo "PROFILING_FLAGS=$PROFILING_FLAGS" >> "$OUTPUT" +if test -n "$JEMALLOC"; then + echo "JEMALLOC=1" >> "$OUTPUT" +fi diff --git a/external/rocksdb/build_tools/dockerbuild.sh b/external/rocksdb/build_tools/dockerbuild.sh new file mode 100755 index 0000000000..2685380bf1 --- /dev/null +++ b/external/rocksdb/build_tools/dockerbuild.sh @@ -0,0 +1,2 @@ +#!/bin/bash +docker run -v $PWD:/rocks -w /rocks buildpack-deps make diff --git a/external/rocksdb/build_tools/fb_compile_mongo.sh b/external/rocksdb/build_tools/fb_compile_mongo.sh new file mode 100755 index 0000000000..c087f81611 --- /dev/null +++ b/external/rocksdb/build_tools/fb_compile_mongo.sh @@ -0,0 +1,55 @@ +#!/bin/sh + +# fail early +set -e + +if test -z $ROCKSDB_PATH; then + ROCKSDB_PATH=~/rocksdb +fi +source $ROCKSDB_PATH/build_tools/fbcode_config4.8.1.sh + +EXTRA_LDFLAGS="" + +if test -z $ALLOC; then + # default + ALLOC=tcmalloc +elif [[ $ALLOC == "jemalloc" ]]; then + ALLOC=system + EXTRA_LDFLAGS+=" -Wl,--whole-archive $JEMALLOC_LIB -Wl,--no-whole-archive" +fi + +# we need to force mongo to use static library, not shared +STATIC_LIB_DEP_DIR='build/static_library_dependencies' +test -d $STATIC_LIB_DEP_DIR || mkdir $STATIC_LIB_DEP_DIR +test -h $STATIC_LIB_DEP_DIR/`basename $SNAPPY_LIBS` || ln -s $SNAPPY_LIBS $STATIC_LIB_DEP_DIR +test -h $STATIC_LIB_DEP_DIR/`basename $LZ4_LIBS` || ln -s $LZ4_LIBS $STATIC_LIB_DEP_DIR + +EXTRA_LDFLAGS+=" -L $STATIC_LIB_DEP_DIR" + +set -x + +EXTRA_CMD="" +if ! test -e version.json; then + # this is Mongo 3.0 + EXTRA_CMD="--rocksdb \ + --variant-dir=linux2/norm + --cxx=${CXX} \ + --cc=${CC} \ + --use-system-zlib" # add this line back to normal code path + # when https://jira.mongodb.org/browse/SERVER-19123 is resolved +fi + +scons \ + LINKFLAGS="$EXTRA_LDFLAGS $EXEC_LDFLAGS $PLATFORM_LDFLAGS" \ + CCFLAGS="$CXXFLAGS -L $STATIC_LIB_DEP_DIR" \ + LIBS="lz4 gcc stdc++" \ + LIBPATH="$ROCKSDB_PATH" \ + CPPPATH="$ROCKSDB_PATH/include" \ + -j32 \ + --allocator=$ALLOC \ + --nostrip \ + --opt=on \ + --disable-minimum-compiler-version-enforcement \ + --use-system-snappy \ + --disable-warnings-as-errors \ + $EXTRA_CMD $* diff --git a/external/rocksdb/build_tools/fbcode_config.sh b/external/rocksdb/build_tools/fbcode_config.sh new file mode 100755 index 0000000000..1c2416c913 --- /dev/null +++ b/external/rocksdb/build_tools/fbcode_config.sh @@ -0,0 +1,136 @@ +#!/bin/sh +# +# Set environment variables so that we can compile rocksdb using +# fbcode settings. It uses the latest g++ and clang compilers and also +# uses jemalloc +# Environment variables that change the behavior of this script: +# PIC_BUILD -- if true, it will only take pic versions of libraries from fbcode. libraries that don't have pic variant will not be included + + +BASEDIR=`dirname $BASH_SOURCE` +source "$BASEDIR/dependencies.sh" + +CFLAGS="" + +# libgcc +LIBGCC_INCLUDE="$LIBGCC_BASE/include" +LIBGCC_LIBS=" -L $LIBGCC_BASE/lib" + +# glibc +GLIBC_INCLUDE="$GLIBC_BASE/include" +GLIBC_LIBS=" -L $GLIBC_BASE/lib" + +# snappy +SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include/" +if test -z $PIC_BUILD; then + SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a" +else + SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy_pic.a" +fi +CFLAGS+=" -DSNAPPY" + +if test -z $PIC_BUILD; then + # location of zlib headers and libraries + ZLIB_INCLUDE=" -I $ZLIB_BASE/include/" + ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a" + CFLAGS+=" -DZLIB" + + # location of bzip headers and libraries + BZIP_INCLUDE=" -I $BZIP2_BASE/include/" + BZIP_LIBS=" $BZIP2_BASE/lib/libbz2.a" + CFLAGS+=" -DBZIP2" + + LZ4_INCLUDE=" -I $LZ4_BASE/include/" + LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a" + CFLAGS+=" -DLZ4" + + ZSTD_INCLUDE=" -I $ZSTD_BASE/include/" + ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a" + CFLAGS+=" -DZSTD" +fi + +# location of gflags headers and libraries +GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/" +if test -z $PIC_BUILD; then + GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags.a" +else + GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags_pic.a" +fi +CFLAGS+=" -DGFLAGS=google" + +# location of jemalloc +JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include/" +JEMALLOC_LIB=" $JEMALLOC_BASE/lib/libjemalloc.a" + +if test -z $PIC_BUILD; then + # location of numa + NUMA_INCLUDE=" -I $NUMA_BASE/include/" + NUMA_LIB=" $NUMA_BASE/lib/libnuma.a" + CFLAGS+=" -DNUMA" + + # location of libunwind + LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind.a" +fi + +# use Intel SSE support for checksum calculations +export USE_SSE=1 + +BINUTILS="$BINUTILS_BASE/bin" +AR="$BINUTILS/ar" + +DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE" + +STDLIBS="-L $GCC_BASE/lib64" + +CLANG_BIN="$CLANG_BASE/bin" +CLANG_LIB="$CLANG_BASE/lib" +CLANG_SRC="$CLANG_BASE/../../src" + +CLANG_ANALYZER="$CLANG_BIN/clang++" +CLANG_SCAN_BUILD="$CLANG_SRC/llvm/tools/clang/tools/scan-build/bin/scan-build" + +if [ -z "$USE_CLANG" ]; then + # gcc + CC="$GCC_BASE/bin/gcc" + CXX="$GCC_BASE/bin/g++" + + CFLAGS+=" -B$BINUTILS/gold" + CFLAGS+=" -isystem $GLIBC_INCLUDE" + CFLAGS+=" -isystem $LIBGCC_INCLUDE" + JEMALLOC=1 +else + # clang + CLANG_INCLUDE="$CLANG_LIB/clang/stable/include" + CC="$CLANG_BIN/clang" + CXX="$CLANG_BIN/clang++" + + KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include" + + CFLAGS+=" -B$BINUTILS/gold -nostdinc -nostdlib" + CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.9.x " + CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.9.x/x86_64-facebook-linux " + CFLAGS+=" -isystem $GLIBC_INCLUDE" + CFLAGS+=" -isystem $LIBGCC_INCLUDE" + CFLAGS+=" -isystem $CLANG_INCLUDE" + CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux " + CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE " + CFLAGS+=" -Wno-expansion-to-defined " + CXXFLAGS="-nostdinc++" +fi + +CFLAGS+=" $DEPS_INCLUDE" +CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE" +CXXFLAGS+=" $CFLAGS" + +EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB" +EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/gcc-4.9-glibc-2.20/lib/ld.so" +EXEC_LDFLAGS+=" $LIBUNWIND" +EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/gcc-4.9-glibc-2.20/lib" + +PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++" + +EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS" + +VALGRIND_VER="$VALGRIND_BASE/bin/" + +export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE CLANG_ANALYZER CLANG_SCAN_BUILD diff --git a/external/rocksdb/build_tools/fbcode_config4.8.1.sh b/external/rocksdb/build_tools/fbcode_config4.8.1.sh new file mode 100755 index 0000000000..d338bf6b65 --- /dev/null +++ b/external/rocksdb/build_tools/fbcode_config4.8.1.sh @@ -0,0 +1,107 @@ +#!/bin/sh +# +# Set environment variables so that we can compile rocksdb using +# fbcode settings. It uses the latest g++ compiler and also +# uses jemalloc + +BASEDIR=`dirname $BASH_SOURCE` +source "$BASEDIR/dependencies_4.8.1.sh" + +# location of libgcc +LIBGCC_INCLUDE="$LIBGCC_BASE/include" +LIBGCC_LIBS=" -L $LIBGCC_BASE/lib" + +# location of glibc +GLIBC_INCLUDE="$GLIBC_BASE/include" +GLIBC_LIBS=" -L $GLIBC_BASE/lib" + +# location of snappy headers and libraries +SNAPPY_INCLUDE=" -I $SNAPPY_BASE/include" +SNAPPY_LIBS=" $SNAPPY_BASE/lib/libsnappy.a" + +# location of zlib headers and libraries +ZLIB_INCLUDE=" -I $ZLIB_BASE/include" +ZLIB_LIBS=" $ZLIB_BASE/lib/libz.a" + +# location of bzip headers and libraries +BZIP2_INCLUDE=" -I $BZIP2_BASE/include/" +BZIP2_LIBS=" $BZIP2_BASE/lib/libbz2.a" + +LZ4_INCLUDE=" -I $LZ4_BASE/include" +LZ4_LIBS=" $LZ4_BASE/lib/liblz4.a" + +ZSTD_INCLUDE=" -I $ZSTD_BASE/include" +ZSTD_LIBS=" $ZSTD_BASE/lib/libzstd.a" + +# location of gflags headers and libraries +GFLAGS_INCLUDE=" -I $GFLAGS_BASE/include/" +GFLAGS_LIBS=" $GFLAGS_BASE/lib/libgflags.a" + +# location of jemalloc +JEMALLOC_INCLUDE=" -I $JEMALLOC_BASE/include" +JEMALLOC_LIB="$JEMALLOC_BASE/lib/libjemalloc.a" + +# location of numa +NUMA_INCLUDE=" -I $NUMA_BASE/include/" +NUMA_LIB=" $NUMA_BASE/lib/libnuma.a" + +# location of libunwind +LIBUNWIND="$LIBUNWIND_BASE/lib/libunwind.a" + +# use Intel SSE support for checksum calculations +export USE_SSE=1 + +BINUTILS="$BINUTILS_BASE/bin" +AR="$BINUTILS/ar" + +DEPS_INCLUDE="$SNAPPY_INCLUDE $ZLIB_INCLUDE $BZIP2_INCLUDE $LZ4_INCLUDE $ZSTD_INCLUDE $GFLAGS_INCLUDE $NUMA_INCLUDE" + +STDLIBS="-L $GCC_BASE/lib64" + +if [ -z "$USE_CLANG" ]; then + # gcc + CC="$GCC_BASE/bin/gcc" + CXX="$GCC_BASE/bin/g++" + + CFLAGS="-B$BINUTILS/gold -m64 -mtune=generic" + CFLAGS+=" -isystem $GLIBC_INCLUDE" + CFLAGS+=" -isystem $LIBGCC_INCLUDE" + JEMALLOC=1 +else + # clang + CLANG_BIN="$CLANG_BASE/bin" + CLANG_LIB="$CLANG_BASE/lib" + CLANG_INCLUDE="$CLANG_LIB/clang/*/include" + CC="$CLANG_BIN/clang" + CXX="$CLANG_BIN/clang++" + + KERNEL_HEADERS_INCLUDE="$KERNEL_HEADERS_BASE/include/" + + CFLAGS="-B$BINUTILS/gold -nostdinc -nostdlib" + CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.8.1 " + CFLAGS+=" -isystem $LIBGCC_BASE/include/c++/4.8.1/x86_64-facebook-linux " + CFLAGS+=" -isystem $GLIBC_INCLUDE" + CFLAGS+=" -isystem $LIBGCC_INCLUDE" + CFLAGS+=" -isystem $CLANG_INCLUDE" + CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE/linux " + CFLAGS+=" -isystem $KERNEL_HEADERS_INCLUDE " + CXXFLAGS="-nostdinc++" +fi + +CFLAGS+=" $DEPS_INCLUDE" +CFLAGS+=" -DROCKSDB_PLATFORM_POSIX -DROCKSDB_LIB_IO_POSIX -DROCKSDB_FALLOCATE_PRESENT -DROCKSDB_MALLOC_USABLE_SIZE" +CFLAGS+=" -DSNAPPY -DGFLAGS=google -DZLIB -DBZIP2 -DLZ4 -DZSTD -DNUMA" +CXXFLAGS+=" $CFLAGS" + +EXEC_LDFLAGS=" $SNAPPY_LIBS $ZLIB_LIBS $BZIP2_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS $NUMA_LIB" +EXEC_LDFLAGS+=" -Wl,--dynamic-linker,/usr/local/fbcode/gcc-4.8.1-glibc-2.17/lib/ld.so" +EXEC_LDFLAGS+=" $LIBUNWIND" +EXEC_LDFLAGS+=" -Wl,-rpath=/usr/local/fbcode/gcc-4.8.1-glibc-2.17/lib" + +PLATFORM_LDFLAGS="$LIBGCC_LIBS $GLIBC_LIBS $STDLIBS -lgcc -lstdc++" + +EXEC_LDFLAGS_SHARED="$SNAPPY_LIBS $ZLIB_LIBS $BZIP2_LIBS $LZ4_LIBS $ZSTD_LIBS $GFLAGS_LIBS" + +VALGRIND_VER="$VALGRIND_BASE/bin/" + +export CC CXX AR CFLAGS CXXFLAGS EXEC_LDFLAGS EXEC_LDFLAGS_SHARED VALGRIND_VER JEMALLOC_LIB JEMALLOC_INCLUDE diff --git a/external/rocksdb/build_tools/format-diff.sh b/external/rocksdb/build_tools/format-diff.sh new file mode 100755 index 0000000000..5b2efdd1a9 --- /dev/null +++ b/external/rocksdb/build_tools/format-diff.sh @@ -0,0 +1,113 @@ +#!/bin/bash +# If clang_format_diff.py command is not specfied, we assume we are able to +# access directly without any path. +if [ -z $CLANG_FORMAT_DIFF ] +then +CLANG_FORMAT_DIFF="clang-format-diff.py" +fi + +# Check clang-format-diff.py +if ! which $CLANG_FORMAT_DIFF &> /dev/null +then + echo "You didn't have clang-format-diff.py available in your computer!" + echo "You can download it by running: " + echo " curl http://goo.gl/iUW1u2" + exit 128 +fi + +# Check argparse, a library that clang-format-diff.py requires. +python 2>/dev/null << EOF +import argparse +EOF + +if [ "$?" != 0 ] +then + echo "To run clang-format-diff.py, we'll need the library "argparse" to be" + echo "installed. You can try either of the follow ways to install it:" + echo " 1. Manually download argparse: https://pypi.python.org/pypi/argparse" + echo " 2. easy_install argparse (if you have easy_install)" + echo " 3. pip install argparse (if you have pip)" + exit 129 +fi + +# TODO(kailiu) following work is not complete since we still need to figure +# out how to add the modified files done pre-commit hook to git's commit index. +# +# Check if this script has already been added to pre-commit hook. +# Will suggest user to add this script to pre-commit hook if their pre-commit +# is empty. +# PRE_COMMIT_SCRIPT_PATH="`git rev-parse --show-toplevel`/.git/hooks/pre-commit" +# if ! ls $PRE_COMMIT_SCRIPT_PATH &> /dev/null +# then +# echo "Would you like to add this script to pre-commit hook, which will do " +# echo -n "the format check for all the affected lines before you check in (y/n):" +# read add_to_hook +# if [ "$add_to_hook" == "y" ] +# then +# ln -s `git rev-parse --show-toplevel`/build_tools/format-diff.sh $PRE_COMMIT_SCRIPT_PATH +# fi +# fi +set -e + +uncommitted_code=`git diff HEAD` + +# If there's no uncommitted changes, we assume user are doing post-commit +# format check, in which case we'll check the modified lines from latest commit. +# Otherwise, we'll check format of the uncommitted code only. +if [ -z "$uncommitted_code" ] +then + # Check the format of last commit + diffs=$(git diff -U0 HEAD^ | $CLANG_FORMAT_DIFF -p 1) +else + # Check the format of uncommitted lines, + diffs=$(git diff -U0 HEAD | $CLANG_FORMAT_DIFF -p 1) +fi + +if [ -z "$diffs" ] +then + echo "Nothing needs to be reformatted!" + exit 0 +fi + +# Highlight the insertion/deletion from the clang-format-diff.py's output +COLOR_END="\033[0m" +COLOR_RED="\033[0;31m" +COLOR_GREEN="\033[0;32m" + +echo -e "Detect lines that doesn't follow the format rules:\r" +# Add the color to the diff. lines added will be green; lines removed will be red. +echo "$diffs" | + sed -e "s/\(^-.*$\)/`echo -e \"$COLOR_RED\1$COLOR_END\"`/" | + sed -e "s/\(^+.*$\)/`echo -e \"$COLOR_GREEN\1$COLOR_END\"`/" + +if [[ "$OPT" == *"-DTRAVIS"* ]] +then + exit 1 +fi + +echo -e "Would you like to fix the format automatically (y/n): \c" + +# Make sure under any mode, we can read user input. +exec < /dev/tty +read to_fix + +if [ "$to_fix" != "y" ] +then + exit 1 +fi + +# Do in-place format adjustment. +git diff -U0 HEAD^ | $CLANG_FORMAT_DIFF -i -p 1 +echo "Files reformatted!" + +# Amend to last commit if user do the post-commit format check +if [ -z "$uncommitted_code" ]; then + echo -e "Would you like to amend the changes to last commit (`git log HEAD --oneline | head -1`)? (y/n): \c" + read to_amend + + if [ "$to_amend" == "y" ] + then + git commit -a --amend --reuse-message HEAD + echo "Amended to last commit" + fi +fi diff --git a/external/rocksdb/build_tools/make_new_version.sh b/external/rocksdb/build_tools/make_new_version.sh new file mode 100755 index 0000000000..76a8473557 --- /dev/null +++ b/external/rocksdb/build_tools/make_new_version.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +# This source code is licensed under the BSD-style license found in the +# LICENSE file in the root directory of this source tree. An additional grant +# of patent rights can be found in the PATENTS file in the same directory. + +set -e +if [ -z "$GIT" ] +then + GIT="git" +fi + +# Print out the colored progress info so that it can be brainlessly +# distinguished by users. +function title() { + echo -e "\033[1;32m$*\033[0m" +} + +usage="Create new RocksDB version and prepare it for the release process\n" +usage+="USAGE: ./make_new_version.sh " + +# -- Pre-check +if [[ $# < 1 ]]; then + echo -e $usage + exit 1 +fi + +ROCKSDB_VERSION=$1 + +GIT_BRANCH=`git rev-parse --abbrev-ref HEAD` +echo $GIT_BRANCH + +if [ $GIT_BRANCH != "master" ]; then + echo "Error: Current branch is '$GIT_BRANCH', Please switch to master branch." + exit 1 +fi + +title "Adding new tag for this release ..." +BRANCH="$ROCKSDB_VERSION.fb" +$GIT checkout -b $BRANCH + +# Setting up the proxy for remote repo access +title "Pushing new branch to remote repo ..." +git push origin --set-upstream $BRANCH + +title "Branch $BRANCH is pushed to github;" diff --git a/external/rocksdb/build_tools/make_package.sh b/external/rocksdb/build_tools/make_package.sh new file mode 100755 index 0000000000..2ca28023de --- /dev/null +++ b/external/rocksdb/build_tools/make_package.sh @@ -0,0 +1,116 @@ +#/usr/bin/env bash + +set -e + +function log() { + echo "[+] $1" +} + +function fatal() { + echo "[!] $1" + exit 1 +} + +function platform() { + local __resultvar=$1 + if [[ -f "/etc/yum.conf" ]]; then + eval $__resultvar="centos" + elif [[ -f "/etc/dpkg/dpkg.cfg" ]]; then + eval $__resultvar="ubuntu" + else + fatal "Unknwon operating system" + fi +} +platform OS + +function package() { + if [[ $OS = "ubuntu" ]]; then + if dpkg --get-selections | grep --quiet $1; then + log "$1 is already installed. skipping." + else + apt-get install $@ -y + fi + elif [[ $OS = "centos" ]]; then + if rpm -qa | grep --quiet $1; then + log "$1 is already installed. skipping." + else + yum install $@ -y + fi + fi +} + +function detect_fpm_output() { + if [[ $OS = "ubuntu" ]]; then + export FPM_OUTPUT=deb + elif [[ $OS = "centos" ]]; then + export FPM_OUTPUT=rpm + fi +} +detect_fpm_output + +function gem_install() { + if gem list | grep --quiet $1; then + log "$1 is already installed. skipping." + else + gem install $@ + fi +} + +function main() { + if [[ $# -ne 1 ]]; then + fatal "Usage: $0 " + else + log "using rocksdb version: $1" + fi + + if [[ -d /vagrant ]]; then + if [[ $OS = "ubuntu" ]]; then + package g++-4.7 + export CXX=g++-4.7 + + # the deb would depend on libgflags2, but the static lib is the only thing + # installed by make install + package libgflags-dev + + package ruby-all-dev + elif [[ $OS = "centos" ]]; then + pushd /etc/yum.repos.d + if [[ ! -f /etc/yum.repos.d/devtools-1.1.repo ]]; then + wget http://people.centos.org/tru/devtools-1.1/devtools-1.1.repo + fi + package devtoolset-1.1-gcc --enablerepo=testing-1.1-devtools-6 + package devtoolset-1.1-gcc-c++ --enablerepo=testing-1.1-devtools-6 + export CC=/opt/centos/devtoolset-1.1/root/usr/bin/gcc + export CPP=/opt/centos/devtoolset-1.1/root/usr/bin/cpp + export CXX=/opt/centos/devtoolset-1.1/root/usr/bin/c++ + export PATH=$PATH:/opt/centos/devtoolset-1.1/root/usr/bin + popd + if ! rpm -qa | grep --quiet gflags; then + rpm -i https://github.com/schuhschuh/gflags/releases/download/v2.1.0/gflags-devel-2.1.0-1.amd64.rpm + fi + + package ruby + package ruby-devel + package rubygems + package rpm-build + fi + fi + gem_install fpm + + make static_lib + make install INSTALL_PATH=package + fpm \ + -s dir \ + -t $FPM_OUTPUT \ + -n rocksdb \ + -v $1 \ + --prefix /usr \ + --url http://rocksdb.org/ \ + -m rocksdb@fb.com \ + --license BSD \ + --vendor Facebook \ + --description "RocksDB is an embeddable persistent key-value store for fast storage." \ + package +} + +main $@ diff --git a/external/rocksdb/build_tools/regression_build_test.sh b/external/rocksdb/build_tools/regression_build_test.sh new file mode 100755 index 0000000000..8ac1ceecec --- /dev/null +++ b/external/rocksdb/build_tools/regression_build_test.sh @@ -0,0 +1,428 @@ +#!/bin/bash + +set -e + +NUM=10000000 + +if [ $# -eq 1 ];then + DATA_DIR=$1 +elif [ $# -eq 2 ];then + DATA_DIR=$1 + STAT_FILE=$2 +fi + +# On the production build servers, set data and stat +# files/directories not in /tmp or else the tempdir cleaning +# scripts will make you very unhappy. +DATA_DIR=${DATA_DIR:-$(mktemp -t -d rocksdb_XXXX)} +STAT_FILE=${STAT_FILE:-$(mktemp -t -u rocksdb_test_stats_XXXX)} + +function cleanup { + rm -rf $DATA_DIR + rm -f $STAT_FILE.fillseq + rm -f $STAT_FILE.readrandom + rm -f $STAT_FILE.overwrite + rm -f $STAT_FILE.memtablefillreadrandom +} + +trap cleanup EXIT + +if [ -z $GIT_BRANCH ]; then + git_br=`git rev-parse --abbrev-ref HEAD` +else + git_br=$(basename $GIT_BRANCH) +fi + +if [ $git_br == "master" ]; then + git_br="" +else + git_br="."$git_br +fi + +make release + +# measure fillseq + fill up the DB for overwrite benchmark +./db_bench \ + --benchmarks=fillseq \ + --db=$DATA_DIR \ + --use_existing_db=0 \ + --bloom_bits=10 \ + --num=$NUM \ + --writes=$NUM \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 > ${STAT_FILE}.fillseq + +# measure overwrite performance +./db_bench \ + --benchmarks=overwrite \ + --db=$DATA_DIR \ + --use_existing_db=1 \ + --bloom_bits=10 \ + --num=$NUM \ + --writes=$((NUM / 10)) \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=8 > ${STAT_FILE}.overwrite + +# fill up the db for readrandom benchmark (1GB total size) +./db_bench \ + --benchmarks=fillseq \ + --db=$DATA_DIR \ + --use_existing_db=0 \ + --bloom_bits=10 \ + --num=$NUM \ + --writes=$NUM \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=1 > /dev/null + +# measure readrandom with 6GB block cache +./db_bench \ + --benchmarks=readrandom \ + --db=$DATA_DIR \ + --use_existing_db=1 \ + --bloom_bits=10 \ + --num=$NUM \ + --reads=$((NUM / 5)) \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=16 > ${STAT_FILE}.readrandom + +# measure readrandom with 6GB block cache and tailing iterator +./db_bench \ + --benchmarks=readrandom \ + --db=$DATA_DIR \ + --use_existing_db=1 \ + --bloom_bits=10 \ + --num=$NUM \ + --reads=$((NUM / 5)) \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --open_files=55000 \ + --use_tailing_iterator=1 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=16 > ${STAT_FILE}.readrandomtailing + +# measure readrandom with 100MB block cache +./db_bench \ + --benchmarks=readrandom \ + --db=$DATA_DIR \ + --use_existing_db=1 \ + --bloom_bits=10 \ + --num=$NUM \ + --reads=$((NUM / 5)) \ + --cache_size=104857600 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=16 > ${STAT_FILE}.readrandomsmallblockcache + +# measure readrandom with 8k data in memtable +./db_bench \ + --benchmarks=overwrite,readrandom \ + --db=$DATA_DIR \ + --use_existing_db=1 \ + --bloom_bits=10 \ + --num=$NUM \ + --reads=$((NUM / 5)) \ + --writes=512 \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --write_buffer_size=1000000000 \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=16 > ${STAT_FILE}.readrandom_mem_sst + + +# fill up the db for readrandom benchmark with filluniquerandom (1GB total size) +./db_bench \ + --benchmarks=filluniquerandom \ + --db=$DATA_DIR \ + --use_existing_db=0 \ + --bloom_bits=10 \ + --num=$((NUM / 4)) \ + --writes=$((NUM / 4)) \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=1 > /dev/null + +# dummy test just to compact the data +./db_bench \ + --benchmarks=readrandom \ + --db=$DATA_DIR \ + --use_existing_db=1 \ + --bloom_bits=10 \ + --num=$((NUM / 1000)) \ + --reads=$((NUM / 1000)) \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=16 > /dev/null + +# measure readrandom after load with filluniquerandom with 6GB block cache +./db_bench \ + --benchmarks=readrandom \ + --db=$DATA_DIR \ + --use_existing_db=1 \ + --bloom_bits=10 \ + --num=$((NUM / 4)) \ + --reads=$((NUM / 4)) \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --open_files=55000 \ + --disable_auto_compactions=1 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=16 > ${STAT_FILE}.readrandom_filluniquerandom + +# measure readwhilewriting after load with filluniquerandom with 6GB block cache +./db_bench \ + --benchmarks=readwhilewriting \ + --db=$DATA_DIR \ + --use_existing_db=1 \ + --bloom_bits=10 \ + --num=$((NUM / 4)) \ + --reads=$((NUM / 4)) \ + --benchmark_write_rate_limit=$(( 110 * 1024 )) \ + --write_buffer_size=100000000 \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=16 > ${STAT_FILE}.readwhilewriting + +# measure memtable performance -- none of the data gets flushed to disk +./db_bench \ + --benchmarks=fillrandom,readrandom, \ + --db=$DATA_DIR \ + --use_existing_db=0 \ + --num=$((NUM / 10)) \ + --reads=$NUM \ + --cache_size=6442450944 \ + --cache_numshardbits=6 \ + --table_cache_numshardbits=4 \ + --write_buffer_size=1000000000 \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --value_size=10 \ + --threads=16 > ${STAT_FILE}.memtablefillreadrandom + +common_in_mem_args="--db=/dev/shm/rocksdb \ + --num_levels=6 \ + --key_size=20 \ + --prefix_size=12 \ + --keys_per_prefix=10 \ + --value_size=100 \ + --compression_type=none \ + --compression_ratio=1 \ + --hard_rate_limit=2 \ + --write_buffer_size=134217728 \ + --max_write_buffer_number=4 \ + --level0_file_num_compaction_trigger=8 \ + --level0_slowdown_writes_trigger=16 \ + --level0_stop_writes_trigger=24 \ + --target_file_size_base=134217728 \ + --max_bytes_for_level_base=1073741824 \ + --disable_wal=0 \ + --wal_dir=/dev/shm/rocksdb \ + --sync=0 \ + --disable_data_sync=1 \ + --verify_checksum=1 \ + --delete_obsolete_files_period_micros=314572800 \ + --max_grandparent_overlap_factor=10 \ + --use_plain_table=1 \ + --open_files=-1 \ + --mmap_read=1 \ + --mmap_write=0 \ + --memtablerep=prefix_hash \ + --bloom_bits=10 \ + --bloom_locality=1 \ + --perf_level=0" + +# prepare a in-memory DB with 50M keys, total DB size is ~6G +./db_bench \ + $common_in_mem_args \ + --statistics=0 \ + --max_background_compactions=16 \ + --max_background_flushes=16 \ + --benchmarks=filluniquerandom \ + --use_existing_db=0 \ + --num=52428800 \ + --threads=1 > /dev/null + +# Readwhilewriting +./db_bench \ + $common_in_mem_args \ + --statistics=1 \ + --max_background_compactions=4 \ + --max_background_flushes=0 \ + --benchmarks=readwhilewriting\ + --use_existing_db=1 \ + --duration=600 \ + --threads=32 \ + --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.readwhilewriting_in_ram + +# Seekrandomwhilewriting +./db_bench \ + $common_in_mem_args \ + --statistics=1 \ + --max_background_compactions=4 \ + --max_background_flushes=0 \ + --benchmarks=seekrandomwhilewriting \ + --use_existing_db=1 \ + --use_tailing_iterator=1 \ + --duration=600 \ + --threads=32 \ + --benchmark_write_rate_limit=9502720 > ${STAT_FILE}.seekwhilewriting_in_ram + +# measure fillseq with bunch of column families +./db_bench \ + --benchmarks=fillseq \ + --num_column_families=500 \ + --write_buffer_size=1048576 \ + --db=$DATA_DIR \ + --use_existing_db=0 \ + --num=$NUM \ + --writes=$NUM \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 > ${STAT_FILE}.fillseq_lots_column_families + +# measure overwrite performance with bunch of column families +./db_bench \ + --benchmarks=overwrite \ + --num_column_families=500 \ + --write_buffer_size=1048576 \ + --db=$DATA_DIR \ + --use_existing_db=1 \ + --num=$NUM \ + --writes=$((NUM / 10)) \ + --open_files=55000 \ + --statistics=1 \ + --histogram=1 \ + --disable_data_sync=1 \ + --disable_wal=1 \ + --sync=0 \ + --threads=8 > ${STAT_FILE}.overwrite_lots_column_families + +# send data to ods +function send_to_ods { + key="$1" + value="$2" + + if [ -z $JENKINS_HOME ]; then + # running on devbox, just print out the values + echo $1 $2 + return + fi + + if [ -z "$value" ];then + echo >&2 "ERROR: Key $key doesn't have a value." + return + fi + curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build$git_br&key=$key&value=$value" \ + --connect-timeout 60 +} + +function send_benchmark_to_ods { + bench="$1" + bench_key="$2" + file="$3" + + QPS=$(grep $bench $file | awk '{print $5}') + P50_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $3}' ) + P75_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $5}' ) + P99_MICROS=$(grep $bench $file -A 6 | grep "Percentiles" | awk '{print $7}' ) + + send_to_ods rocksdb.build.$bench_key.qps $QPS + send_to_ods rocksdb.build.$bench_key.p50_micros $P50_MICROS + send_to_ods rocksdb.build.$bench_key.p75_micros $P75_MICROS + send_to_ods rocksdb.build.$bench_key.p99_micros $P99_MICROS +} + +send_benchmark_to_ods overwrite overwrite $STAT_FILE.overwrite +send_benchmark_to_ods fillseq fillseq $STAT_FILE.fillseq +send_benchmark_to_ods readrandom readrandom $STAT_FILE.readrandom +send_benchmark_to_ods readrandom readrandom_tailing $STAT_FILE.readrandomtailing +send_benchmark_to_ods readrandom readrandom_smallblockcache $STAT_FILE.readrandomsmallblockcache +send_benchmark_to_ods readrandom readrandom_memtable_sst $STAT_FILE.readrandom_mem_sst +send_benchmark_to_ods readrandom readrandom_fillunique_random $STAT_FILE.readrandom_filluniquerandom +send_benchmark_to_ods fillrandom memtablefillrandom $STAT_FILE.memtablefillreadrandom +send_benchmark_to_ods readrandom memtablereadrandom $STAT_FILE.memtablefillreadrandom +send_benchmark_to_ods readwhilewriting readwhilewriting $STAT_FILE.readwhilewriting +send_benchmark_to_ods readwhilewriting readwhilewriting_in_ram ${STAT_FILE}.readwhilewriting_in_ram +send_benchmark_to_ods seekrandomwhilewriting seekwhilewriting_in_ram ${STAT_FILE}.seekwhilewriting_in_ram +send_benchmark_to_ods fillseq fillseq_lots_column_families ${STAT_FILE}.fillseq_lots_column_families +send_benchmark_to_ods overwrite overwrite_lots_column_families ${STAT_FILE}.overwrite_lots_column_families diff --git a/external/rocksdb/build_tools/rocksdb-lego-determinator b/external/rocksdb/build_tools/rocksdb-lego-determinator new file mode 100755 index 0000000000..c3f32820c3 --- /dev/null +++ b/external/rocksdb/build_tools/rocksdb-lego-determinator @@ -0,0 +1,868 @@ +#!/bin/bash +# This script is executed by Sandcastle +# to determine next steps to run + +# Usage: +# EMAIL= ONCALL= TRIGGER= SUBSCRIBER= rocks_ci.py +# +# Input Value +# ------------------------------------------------------------------------- +# EMAIL Email address to report on trigger conditions +# ONCALL Email address to raise a task on failure +# TRIGGER Trigger conditions for email. Valid values are fail, warn, all +# SUBSCRIBER Email addresss to add as subscriber for task +# + +# +# Report configuration +# +REPORT_EMAIL= +if [ ! -z $EMAIL ]; then + if [ -z $TRIGGER ]; then + TRIGGER="fail" + fi + + REPORT_EMAIL=" + { + 'type':'email', + 'triggers': [ '$TRIGGER' ], + 'emails':['$EMAIL'] + }," +fi + +CREATE_TASK= +if [ ! -z $ONCALL ]; then + CREATE_TASK=" + { + 'type':'task', + 'triggers':[ 'fail' ], + 'priority':0, + 'subscribers':[ '$SUBSCRIBER' ], + 'tags':[ 'rocksdb', 'ci' ], + }," +fi + +# For now, create the tasks using only the dedicated task creation tool. +CREATE_TASK= + +REPORT= +if [[ ! -z $REPORT_EMAIL || ! -z $CREATE_TASK ]]; then + REPORT="'report': [ + $REPORT_EMAIL + $CREATE_TASK + ]" +fi + +# +# Helper variables +# +CLEANUP_ENV=" +{ + 'name':'Cleanup environment', + 'shell':'rm -rf /dev/shm/rocksdb && mkdir /dev/shm/rocksdb && chmod +t /dev/shm && make clean', + 'user':'root' +}" + +# We will eventually set the RATIO to 1, but we want do this +# in steps. RATIO=$(nproc) will make it work as J=1 +if [ -z $RATIO ]; then + RATIO=$(nproc) +fi + +if [ -z $PARALLEL_J ]; then + PARALLEL_J="J=$(expr $(nproc) / ${RATIO})" +fi + +if [ -z $PARALLEL_j ]; then + PARALLEL_j="-j$(expr $(nproc) / ${RATIO})" +fi + +PARALLELISM="$PARALLEL_J $PARALLEL_j" + +DEBUG="OPT=-g" +SHM="TEST_TMPDIR=/dev/shm/rocksdb" +GCC_481="ROCKSDB_FBCODE_BUILD_WITH_481=1" +ASAN="COMPILE_WITH_ASAN=1" +CLANG="USE_CLANG=1" +LITE="OPT=\"-DROCKSDB_LITE -g\"" +TSAN="COMPILE_WITH_TSAN=1" +UBSAN="COMPILE_WITH_UBSAN=1" +DISABLE_JEMALLOC="DISABLE_JEMALLOC=1" +HTTP_PROXY="https_proxy=http://fwdproxy.29.prn1:8080 http_proxy=http://fwdproxy.29.prn1:8080 ftp_proxy=http://fwdproxy.29.prn1:8080" +SETUP_JAVA_ENV="export $HTTP_PROXY; export JAVA_HOME=/usr/local/jdk-7u10-64/; export PATH=\$PATH:\$JAVA_HOME/bin" +PARSER="'parser':'python build_tools/error_filter.py $1'" + +CONTRUN_NAME="ROCKSDB_CONTRUN_NAME" + +# This code is getting called under various scenarios. What we care about is to +# understand when it's called from nightly contruns because in that case we'll +# create tasks for any failures. To follow the existing pattern, we'll check +# the value of $ONCALL. If it's a diff then just call `false` to make sure +# that errors will be properly propagated to the caller. +if [ ! -z $ONCALL ]; then + TASK_CREATION_TOOL="/usr/local/bin/mysql_mtr_filter --rocksdb" +else + TASK_CREATION_TOOL="false" +fi + +ARTIFACTS=" 'artifacts': [ + { + 'name':'database', + 'paths':[ '/dev/shm/rocksdb' ], + } +]" + +# +# A mechanism to disable tests temporarily +# +DISABLE_COMMANDS="[ + { + 'name':'Disable test', + 'oncall':'$ONCALL', + 'steps': [ + { + 'name':'Job disabled. Please contact test owner', + 'shell':'exit 1', + 'user':'root' + }, + ], + } +]" + +# +# RocksDB unit test in parallel +# Currently we always have noise in our parallel runs. This job is to help +# manage the noise +# +PARALLEL_UNIT_TEST_COMMANDS="[ + { + 'name':'Rocksdb Parallel Unit Test', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build and test RocksDB debug version', + 'shell':'$DEBUG make -j$(nproc) all && $SHM make check > /dev/null 2>&1 || $CONTRUN_NAME=punit_check $TASK_CREATION_TOOL || cat t/log-*', + 'user':'root', + $PARSER + }, + $CLEANUP_ENV, + { + 'name':'Build and test RocksDB debug version under gcc-4.8.1', + 'shell':'$GCC_481 $DEBUG make -j$(nproc) all && $SHM make check > /dev/null 2>&1 || $CONTRUN_NAME=punit_check_gcc481 $TASK_CREATION_TOOL || cat t/log-*', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB unit test +# +UNIT_TEST_COMMANDS="[ + { + 'name':'Rocksdb Unit Test', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build and test RocksDB debug version', + 'shell':'$SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=check $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB unit test not under /dev/shm +# +UNIT_TEST_NON_SHM_COMMANDS="[ + { + 'name':'Rocksdb Unit Test', + 'oncall':'$ONCALL', + 'timeout': 86400, + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build and test RocksDB debug version', + 'timeout': 86400, + 'shell':'$DEBUG make $PARALLELISM check || $CONTRUN_NAME=non_shm_check $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB release build and unit tests +# +RELEASE_BUILD_COMMANDS="[ + { + 'name':'Rocksdb Release Build', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build RocksDB release', + 'shell':'make $PARALLEL_j release || $CONTRUN_NAME=release $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB unit test on gcc-4.8.1 +# +UNIT_TEST_COMMANDS_481="[ + { + 'name':'Rocksdb Unit Test on GCC 4.8.1', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build and test RocksDB debug version', + 'shell':'$SHM $GCC_481 $DEBUG make $PARALLELISM check || $CONTRUN_NAME=unit_gcc_481_check $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB release build and unit tests +# +RELEASE_BUILD_COMMANDS_481="[ + { + 'name':'Rocksdb Release on GCC 4.8.1', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build RocksDB release on GCC 4.8.1', + 'shell':'$GCC_481 make $PARALLEL_j release || $CONTRUN_NAME=release_gcc481 $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB unit test with CLANG +# +CLANG_UNIT_TEST_COMMANDS="[ + { + 'name':'Rocksdb Unit Test', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build and test RocksDB debug', + 'shell':'$CLANG $SHM $DEBUG make $PARALLELISM check || $CONTRUN_NAME=clang_check $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB release build with CLANG +# +CLANG_RELEASE_BUILD_COMMANDS="[ + { + 'name':'Rocksdb CLANG Release Build', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build RocksDB release', + 'shell':'$CLANG make $PARALLEL_j release|| $CONTRUN_NAME=clang_release $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB analyze +# +CLANG_ANALYZE_COMMANDS="[ + { + 'name':'Rocksdb analyze', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'RocksDB build and analyze', + 'shell':'$CLANG $SHM $DEBUG make $PARALLEL_j analyze || $CONTRUN_NAME=clang_analyze $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB code coverage +# +CODE_COV_COMMANDS="[ + { + 'name':'Rocksdb Unit Test Code Coverage', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build, test and collect code coverage info', + 'shell':'$SHM $DEBUG make $PARALLELISM coverage || $CONTRUN_NAME=coverage $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB unity +# +UNITY_COMMANDS="[ + { + 'name':'Rocksdb Unity', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build, test unity test', + 'shell':'$SHM $DEBUG V=1 make J=1 unity_test || $CONTRUN_NAME=unity_test $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# Build RocksDB lite +# +LITE_BUILD_COMMANDS="[ + { + 'name':'Rocksdb Lite build', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build RocksDB debug version', + 'shell':'$LITE make J=1 static_lib || $CONTRUN_NAME=lite_static_lib $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB lite tests +# +LITE_UNIT_TEST_COMMANDS="[ + { + 'name':'Rocksdb Lite Unit Test', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build RocksDB debug version', + 'shell':'$SHM $LITE make J=1 check || $CONTRUN_NAME=lite_check $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB stress/crash test +# +STRESS_CRASH_TEST_COMMANDS="[ + { + 'name':'Rocksdb Stress/Crash Test', + 'oncall':'$ONCALL', + 'timeout': 86400, + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build and run RocksDB debug stress tests', + 'shell':'$SHM $DEBUG make J=1 db_stress || $CONTRUN_NAME=db_stress $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + { + 'name':'Build and run RocksDB debug crash tests', + 'timeout': 86400, + 'shell':'$SHM $DEBUG make J=1 crash_test || $CONTRUN_NAME=crash_test $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + } + ], + $ARTIFACTS, + $REPORT + } +]" + +# RocksDB write stress test. +# We run on disk device on purpose (i.e. no $SHM) +# because we want to add some randomness to fsync commands +WRITE_STRESS_COMMANDS="[ + { + 'name':'Rocksdb Write Stress Test', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build and run RocksDB write stress tests', + 'shell':'make write_stress && python tools/write_stress_runner.py --runtime_sec=3600 --db=/tmp/rocksdb_write_stress || $CONTRUN_NAME=write_stress $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + } + ], + 'artifacts': [{'name': 'database', 'paths': ['/tmp/rocksdb_write_stress']}], + $REPORT + } +]" + + +# +# RocksDB test under address sanitizer +# +ASAN_TEST_COMMANDS="[ + { + 'name':'Rocksdb Unit Test under ASAN', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Test RocksDB debug under ASAN', +'shell':'set -o pipefail && ($SHM $ASAN $DEBUG make $PARALLELISM asan_check || $CONTRUN_NAME=asan_check $TASK_CREATION_TOOL) |& /usr/facebook/ops/scripts/asan_symbolize.py -d', + 'user':'root', + $PARSER + } + ], + $REPORT + } +]" + +# +# RocksDB crash testing under address sanitizer +# +ASAN_CRASH_TEST_COMMANDS="[ + { + 'name':'Rocksdb crash test under ASAN', + 'oncall':'$ONCALL', + 'timeout': 86400, + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build and run RocksDB debug asan_crash_test', + 'timeout': 86400, + 'shell':'$SHM $DEBUG make J=1 asan_crash_test || $CONTRUN_NAME=asan_crash_test $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB test under undefined behavior sanitizer +# +UBSAN_TEST_COMMANDS="[ + { + 'name':'Rocksdb Unit Test under UBSAN', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Test RocksDB debug under UBSAN', + 'shell':'set -o pipefail && $SHM $UBSAN $DEBUG make $PARALLELISM ubsan_check || $CONTRUN_NAME=ubsan_check $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + } + ], + $REPORT + } +]" + +# +# RocksDB crash testing under udnefined behavior sanitizer +# +UBSAN_CRASH_TEST_COMMANDS="[ + { + 'name':'Rocksdb crash test under UBSAN', + 'oncall':'$ONCALL', + 'timeout': 86400, + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build and run RocksDB debug ubsan_crash_test', + 'timeout': 86400, + 'shell':'$SHM $DEBUG make J=1 ubsan_crash_test || $CONTRUN_NAME=ubsan_crash_test $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB unit test under valgrind +# +VALGRIND_TEST_COMMANDS="[ + { + 'name':'Rocksdb Unit Test under valgrind', + 'oncall':'$ONCALL', + 'timeout': 86400, + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Run RocksDB debug unit tests', + 'timeout': 86400, + 'shell':'$DISABLE_JEMALLOC $SHM $DEBUG make $PARALLELISM valgrind_check || $CONTRUN_NAME=valgrind_check $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB test under TSAN +# +TSAN_UNIT_TEST_COMMANDS="[ + { + 'name':'Rocksdb Unit Test under TSAN', + 'oncall':'$ONCALL', + 'timeout': 86400, + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Run RocksDB debug unit test', + 'timeout': 86400, + 'shell':'set -o pipefail && $SHM $DEBUG $TSAN make $PARALLELISM check || $CONTRUN_NAME=tsan_check $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB crash test under TSAN +# +TSAN_CRASH_TEST_COMMANDS="[ + { + 'name':'Rocksdb Crash Test under TSAN', + 'oncall':'$ONCALL', + 'timeout': 86400, + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Compile and run', + 'timeout': 86400, + 'shell':'set -o pipefail && $SHM $DEBUG $TSAN CRASH_TEST_KILL_ODD=1887 make J=1 crash_test || $CONTRUN_NAME=tsan_crash_test $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB format compatible +# + +run_format_compatible() +{ + export TEST_TMPDIR=/dev/shm/rocksdb + rm -rf /dev/shm/rocksdb + mkdir /dev/shm/rocksdb + + echo ' + if [ -e "build_tools/build_detect_platform" ] + then + sed "s/tcmalloc/nothingnothingnothing/g" build_tools/build_detect_platform > $TEST_TMPDIR/temp_build_file + rm -rf build_tools/build_detect_platform + cp $TEST_TMPDIR/temp_build_file build_tools/build_detect_platform + chmod +x build_tools/build_detect_platform + fi + + if [ -e "build_detect_platform" ] + then + sed "s/tcmalloc/nothingnothingnothing/g" build_detect_platform > $TEST_TMPDIR/temp_build_file + rm -rf build_detect_platform + cp $TEST_TMPDIR/temp_build_file build_detect_platform + chmod +x build_detect_platform + fi + + make ldb -j32 + + if [ -e "build_detect_platform" ] + then + git checkout -- build_detect_platform + fi + + if [ -e "build_tools/build_detect_platform" ] + then + git checkout -- build_tools/build_detect_platform + fi + ' > temp_build_ldb.sh + + sed "s/make ldb -j32/source temp_build_ldb.sh/g" tools/check_format_compatible.sh > tools/temp_check_format_compatible.sh + chmod +x tools/temp_check_format_compatible.sh + tools/temp_check_format_compatible.sh +} + +FORMAT_COMPATIBLE_COMMANDS="[ + { + 'name':'Rocksdb Format Compatible tests', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Run RocksDB debug unit test', + 'shell':'build_tools/rocksdb-lego-determinator run_format_compatible || $CONTRUN_NAME=run_format_compatible $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB no compression +# +run_no_compression() +{ + export TEST_TMPDIR=/dev/shm/rocksdb + rm -rf /dev/shm/rocksdb + mkdir /dev/shm/rocksdb + make clean + cat build_tools/fbcode_config.sh | grep -iv dzlib | grep -iv dlz4 | grep -iv dsnappy | grep -iv dbzip2 > .tmp.fbcode_config.sh + mv .tmp.fbcode_config.sh build_tools/fbcode_config.sh + cat Makefile | grep -v tools/ldb_test.py > .tmp.Makefile + mv .tmp.Makefile Makefile + make $DEBUG J=1 check +} + +NO_COMPRESSION_COMMANDS="[ + { + 'name':'Rocksdb No Compression tests', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Run RocksDB debug unit test', + 'shell':'build_tools/rocksdb-lego-determinator run_no_compression || $CONTRUN_NAME=run_no_compression $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB regression +# +run_regression() +{ + time -v bash -vx ./build_tools/regression_build_test.sh $(mktemp -d $WORKSPACE/leveldb.XXXX) $(mktemp leveldb_test_stats.XXXX) + + # ======= report size to ODS ======== + + # parameters: $1 -- key, $2 -- value + function send_size_to_ods { + curl -s "https://www.intern.facebook.com/intern/agent/ods_set.php?entity=rocksdb_build&key=rocksdb.build_size.$1&value=$2" \ + --connect-timeout 60 + } + + # === normal build === + make clean + make -j$(nproc) static_lib + send_size_to_ods static_lib $(stat --printf="%s" librocksdb.a) + strip librocksdb.a + send_size_to_ods static_lib_stripped $(stat --printf="%s" librocksdb.a) + + make -j$(nproc) shared_lib + send_size_to_ods shared_lib $(stat --printf="%s" `readlink -f librocksdb.so`) + strip `readlink -f librocksdb.so` + send_size_to_ods shared_lib_stripped $(stat --printf="%s" `readlink -f librocksdb.so`) + + # === lite build === + make clean + OPT=-DROCKSDB_LITE make -j$(nproc) static_lib + send_size_to_ods static_lib_lite $(stat --printf="%s" librocksdb.a) + strip librocksdb.a + send_size_to_ods static_lib_lite_stripped $(stat --printf="%s" librocksdb.a) + + OPT=-DROCKSDB_LITE make -j$(nproc) shared_lib + send_size_to_ods shared_lib_lite $(stat --printf="%s" `readlink -f librocksdb.so`) + strip `readlink -f librocksdb.so` + send_size_to_ods shared_lib_lite_stripped $(stat --printf="%s" `readlink -f librocksdb.so`) +} + +REGRESSION_COMMANDS="[ + { + 'name':'Rocksdb regression commands', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Make and run script', + 'shell':'build_tools/rocksdb-lego-determinator run_regression || $CONTRUN_NAME=run_regression $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + +# +# RocksDB Java build +# +JAVA_BUILD_TEST_COMMANDS="[ + { + 'name':'Rocksdb Java Build', + 'oncall':'$ONCALL', + 'steps': [ + $CLEANUP_ENV, + { + 'name':'Build RocksDB for Java', + 'shell':'$SETUP_JAVA_ENV; $SHM make rocksdbjava || $CONTRUN_NAME=rocksdbjava $TASK_CREATION_TOOL', + 'user':'root', + $PARSER + }, + ], + $REPORT + } +]" + + +case $1 in + punit) + echo $PARALLEL_UNIT_TEST_COMMANDS + ;; + unit) + echo $UNIT_TEST_COMMANDS + ;; + unit_non_shm) + echo $UNIT_TEST_NON_SHM_COMMANDS + ;; + release) + echo $RELEASE_BUILD_COMMANDS + ;; + unit_481) + echo $UNIT_TEST_COMMANDS_481 + ;; + release_481) + echo $RELEASE_BUILD_COMMANDS_481 + ;; + clang_unit) + echo $CLANG_UNIT_TEST_COMMANDS + ;; + clang_release) + echo $CLANG_RELEASE_BUILD_COMMANDS + ;; + clang_analyze) + echo $CLANG_ANALYZE_COMMANDS + ;; + code_cov) + echo $CODE_COV_COMMANDS + ;; + unity) + echo $UNITY_COMMANDS + ;; + lite) + echo $LITE_BUILD_COMMANDS + ;; + lite_test) + echo $LITE_UNIT_TEST_COMMANDS + ;; + stress_crash) + echo $STRESS_CRASH_TEST_COMMANDS + ;; + write_stress) + echo $WRITE_STRESS_COMMANDS + ;; + asan) + echo $ASAN_TEST_COMMANDS + ;; + asan_crash) + echo $ASAN_CRASH_TEST_COMMANDS + ;; + ubsan) + echo $UBSAN_TEST_COMMANDS + ;; + ubsan_crash) + echo $UBSAN_CRASH_TEST_COMMANDS + ;; + valgrind) + echo $VALGRIND_TEST_COMMANDS + ;; + tsan) + echo $TSAN_UNIT_TEST_COMMANDS + ;; + tsan_crash) + echo $TSAN_CRASH_TEST_COMMANDS + ;; + format_compatible) + echo $FORMAT_COMPATIBLE_COMMANDS + ;; + run_format_compatible) + run_format_compatible + ;; + no_compression) + echo $NO_COMPRESSION_COMMANDS + ;; + run_no_compression) + run_no_compression + ;; + regression) + echo $REGRESSION_COMMANDS + ;; + run_regression) + run_regression + ;; + java_build) + echo $JAVA_BUILD_TEST_COMMANDS + ;; + *) + echo "Invalid determinator command" + ;; +esac diff --git a/external/rocksdb/build_tools/run_ci_db_test.ps1 b/external/rocksdb/build_tools/run_ci_db_test.ps1 new file mode 100755 index 0000000000..94d81cc24c --- /dev/null +++ b/external/rocksdb/build_tools/run_ci_db_test.ps1 @@ -0,0 +1,330 @@ +# This script enables you running RocksDB tests by running +# All the tests in parallel and utilizing all the cores +# For db_test the script first lists and parses the tests +# and then fires them up in parallel using async PS Job functionality +# Run the script from the enlistment +Param( + [switch]$EnableJE = $false, # Use je executable + [switch]$EnableRerun = $false, # Rerun failed tests sequentially at the end + [string]$WorkFolder = "", # Direct tests to use that folder + [int]$Limit = -1, # -1 means run all otherwise limit for testing purposes + [string]$Exclude = "", # Expect a comma separated list, no spaces + [string]$Run = "db_test", # Run db_test|db_test2|tests|testname1,testname2... + # Number of async tasks that would run concurrently. Recommend a number below 64. + # However, CPU utlization really depends on the storage media. Recommend ram based disk. + # a value of 1 will run everything serially + [int]$Concurrency = 16 +) + +# Folders and commands must be fullpath to run assuming +# the current folder is at the root of the git enlistment +Get-Date + +# If running under Appveyor assume that root +[string]$Appveyor = $Env:APPVEYOR_BUILD_FOLDER +if($Appveyor -ne "") { + $RootFolder = $Appveyor +} else { + $RootFolder = $PSScriptRoot -replace '\\build_tools', '' +} + +$LogFolder = -Join($RootFolder, "\db_logs\") +$BinariesFolder = -Join($RootFolder, "\build\Debug\") + +if($WorkFolder -eq "") { + + # If TEST_TMPDIR is set use it + [string]$var = $Env:TEST_TMPDIR + if($var -eq "") { + $WorkFolder = -Join($RootFolder, "\db_tests\") + $Env:TEST_TMPDIR = $WorkFolder + } else { + $WorkFolder = $var + } +} else { +# Override from a command line + $Env:TEST_TMPDIR = $WorkFolder +} + +Write-Output "Root: $RootFolder, WorkFolder: $WorkFolder" + +# Use JEMALLOC executables +if($Run -ceq "db_test" -or + $Run -ceq "db_test2" ) { + + $file_name = $Run + + if($EnableJE) { + $file_name += "_je" + } + + $file_name += ".exe" + + $db_test = -Join ($BinariesFolder, $file_name) + + Write-Output "Binaries: $BinariesFolder db_test: $db_test" +} + + +#Exclusions that we do not want to run +$ExcludeTests = New-Object System.Collections.Generic.HashSet[string] + + +if($Exclude -ne "") { + Write-Host "Exclude: $Exclude" + $l = $Exclude -split ' ' + ForEach($t in $l) { $ExcludeTests.Add($t) | Out-Null } +} + +# Create test directories in the current folder +md -Path $WorkFolder -ErrorAction Ignore | Out-Null +md -Path $LogFolder -ErrorAction Ignore | Out-Null + +# Extract the names of its tests by running db_test with --gtest_list_tests. +# This filter removes the "#"-introduced comments, and expands to +# fully-qualified names by changing input like this: +# +# DBTest. +# Empty +# WriteEmptyBatch +# MultiThreaded/MultiThreadedDBTest. +# MultiThreaded/0 # GetParam() = 0 +# MultiThreaded/1 # GetParam() = 1 +# +# into this: +# +# DBTest.Empty +# DBTest.WriteEmptyBatch +# MultiThreaded/MultiThreadedDBTest.MultiThreaded/0 +# MultiThreaded/MultiThreadedDBTest.MultiThreaded/1 +# Output into the parameter in a form TestName -> Log File Name +function Normalize-DbTests($HashTable) { + + $Tests = @() +# Run db_test to get a list of tests and store it into $a array + &$db_test --gtest_list_tests | tee -Variable Tests | Out-Null + + # Current group + $Group="" + + ForEach( $l in $Tests) { + # Trailing dot is a test group + if( $l -match "\.$") { + $Group = $l + } else { + # Otherwise it is a test name, remove leading space + $test = $l -replace '^\s+','' + # remove trailing comment if any and create a log name + $test = $test -replace '\s+\#.*','' + $test = "$Group$test" + + if($ExcludeTests.Contains($test)) { + continue + } + + $test_log = $test -replace '[\./]','_' + $test_log += ".log" + $log_path = -join ($LogFolder, $test_log) + + # Add to a hashtable + $HashTable.Add($test, $log_path); + } + } +} + +# The function removes trailing .exe siffix if any, +# creates a name for the log file +function MakeAndAdd([string]$token, $HashTable) { + $test_name = $token -replace '.exe$', '' + $log_name = -join ($test_name, ".log") + $log_path = -join ($LogFolder, $log_name) + if(!$ExcludeTests.Contains($test_name)) { + $HashTable.Add($test_name, $log_path) + } else { + Write-Warning "Test $test_name is excluded" + } +} + +# The function scans build\Debug folder to discover +# Test executables. It then populates a table with +# Test executable name -> Log file +function Discover-TestBinaries([string]$Pattern, $HashTable) { + + $Exclusions = @("db_test*", "db_sanity_test*") + + $p = -join ($BinariesFolder, $pattern) + + Write-Host "Path: $p" + + dir -Path $p -Exclude $Exclusions | ForEach-Object { + MakeAndAdd -token ($_.Name) -HashTable $HashTable + } +} + +$TestsToRun = [ordered]@{} + +if($Run -ceq "db_test" -or + $Run -ceq "db_test2") { + Normalize-DbTests -HashTable $TestsToRun +} elseif($Run -ceq "tests") { + if($EnableJE) { + $pattern = "*_test_je.exe" + } else { + $pattern = "*_test.exe" + } + Discover-TestBinaries -Pattern $pattern -HashTable $TestsToRun +} else { + + $test_list = $Run -split ' ' + + ForEach($t in $test_list) { + MakeAndAdd -token $t -HashTable $TestsToRun + } +} + +$NumTestsToStart = $TestsToRun.Count +if($Limit -ge 0 -and $NumTestsToStart -gt $Limit) { + $NumTestsToStart = $Limit +} + +Write-Host "Attempting to start: $NumTestsToStart tests" + +# Invoke a test with a filter and redirect all output +$InvokeTestCase = { + param($exe, $test, $log); + &$exe --gtest_filter=$test > $log 2>&1 +} + +# Invoke all tests and redirect output +$InvokeTestAsync = { + param($exe, $log) + &$exe > $log 2>&1 +} + +# Hash that contains tests to rerun if any failed +# Those tests will be rerun sequentially +$Rerun = [ordered]@{} +# Test limiting factor here +$count = 0 +# Overall status +[bool]$success = $true; + +function RunJobs($TestToLog, [int]$ConcurrencyVal, [bool]$AddForRerun) +{ + # Array to wait for any of the running jobs + $jobs = @() + # Hash JobToLog + $JobToLog = @{} + + # Wait for all to finish and get the results + while(($JobToLog.Count -gt 0) -or + ($TestToLog.Count -gt 0)) { + + # Make sure we have maximum concurrent jobs running if anything + # and the $Limit either not set or allows to proceed + while(($JobToLog.Count -lt $ConcurrencyVal) -and + (($TestToLog.Count -gt 0) -and + (($Limit -lt 0) -or ($count -lt $Limit)))) { + + + # We only need the first key + foreach($key in $TestToLog.keys) { + $k = $key + break + } + + Write-Host "Starting $k" + $log_path = ($TestToLog.$k) + + if($Run -ceq "db_test" -or + $Run -ceq "db_test2") { + $job = Start-Job -Name $k -ScriptBlock $InvokeTestCase -ArgumentList @($db_test,$k,$log_path) + } else { + [string]$Exe = -Join ($BinariesFolder, $k) + $job = Start-Job -Name $k -ScriptBlock $InvokeTestAsync -ArgumentList @($exe,$log_path) + } + + $JobToLog.Add($job, $log_path) + $TestToLog.Remove($k) + + ++$count + } + + if($JobToLog.Count -lt 1) { + break + } + + $jobs = @() + foreach($k in $JobToLog.Keys) { $jobs += $k } + + $completed = Wait-Job -Job $jobs -Any + $log = $JobToLog[$completed] + $JobToLog.Remove($completed) + + $message = -join @($completed.Name, " State: ", ($completed.State)) + + $log_content = @(Get-Content $log) + + if($completed.State -ne "Completed") { + $success = $false + Write-Warning $message + $log_content | Write-Warning + } else { + # Scan the log. If we find PASSED and no occurrence of FAILED + # then it is a success + [bool]$pass_found = $false + ForEach($l in $log_content) { + + if(($l -match "^\[\s+FAILED") -or + ($l -match "Assertion failed:")) { + $pass_found = $false + break + } + + if(($l -match "^\[\s+PASSED") -or + ($l -match " : PASSED$") -or + ($l -match "^PASS$") -or # Special c_test case + ($l -match "Passed all tests!") ) { + $pass_found = $true + } + } + + if(!$pass_found) { + $success = $false; + Write-Warning $message + $log_content | Write-Warning + if($AddForRerun) { + $Rerun.Add($completed.Name, $log) + } + } else { + Write-Host $message + } + } + + # Remove cached job info from the system + # Should be no output + Receive-Job -Job $completed | Out-Null + } +} + +RunJobs -TestToLog $TestsToRun -ConcurrencyVal $Concurrency -AddForRerun $EnableRerun + +if($Rerun.Count -gt 0) { + Write-Host "Rerunning " ($Rerun.Count) " tests sequentially" + $success = $true + $count = 0 + RunJobs -TestToLog $Rerun -ConcurrencyVal 1 -AddForRerun $false +} + +Get-Date + + +if(!$success) { +# This does not succeed killing off jobs quick +# So we simply exit +# Remove-Job -Job $jobs -Force +# indicate failure using this exit code + exit 12345 + } + + diff --git a/external/rocksdb/build_tools/version.sh b/external/rocksdb/build_tools/version.sh new file mode 100755 index 0000000000..c5a8595fb7 --- /dev/null +++ b/external/rocksdb/build_tools/version.sh @@ -0,0 +1,14 @@ +#!/bin/sh +if [ "$#" = "0" ]; then + echo "Usage: $0 major|minor|patch" + exit 1 +fi +if [ "$1" = "major" ]; then + cat include/rocksdb/version.h | grep MAJOR | head -n1 | awk '{print $3}' +fi +if [ "$1" = "minor" ]; then + cat include/rocksdb/version.h | grep MINOR | head -n1 | awk '{print $3}' +fi +if [ "$1" = "patch" ]; then + cat include/rocksdb/version.h | grep PATCH | head -n1 | awk '{print $3}' +fi diff --git a/external/rocksdb/coverage/coverage_test.sh b/external/rocksdb/coverage/coverage_test.sh new file mode 100755 index 0000000000..4d8052c9e4 --- /dev/null +++ b/external/rocksdb/coverage/coverage_test.sh @@ -0,0 +1,78 @@ +#!/bin/bash + +# Exit on error. +set -e + +if [ -n "$USE_CLANG" ]; then + echo "Error: Coverage test is supported only for gcc." + exit 1 +fi + +ROOT=".." +# Fetch right version of gcov +if [ -d /mnt/gvfs/third-party -a -z "$CXX" ]; then + source $ROOT/build_tools/fbcode_config.sh + GCOV=$GCC_BASE/bin/gcov +else + GCOV=$(which gcov) +fi + +COVERAGE_DIR="$PWD/COVERAGE_REPORT" +mkdir -p $COVERAGE_DIR + +# Find all gcno files to generate the coverage report + +GCNO_FILES=`find $ROOT -name "*.gcno"` +$GCOV --preserve-paths --relative-only --no-output $GCNO_FILES 2>/dev/null | + # Parse the raw gcov report to more human readable form. + python $ROOT/coverage/parse_gcov_output.py | + # Write the output to both stdout and report file. + tee $COVERAGE_DIR/coverage_report_all.txt && +echo -e "Generated coverage report for all files: $COVERAGE_DIR/coverage_report_all.txt\n" + +# TODO: we also need to get the files of the latest commits. +# Get the most recently committed files. +LATEST_FILES=` + git show --pretty="format:" --name-only HEAD | + grep -v "^$" | + paste -s -d,` +RECENT_REPORT=$COVERAGE_DIR/coverage_report_recent.txt + +echo -e "Recently updated files: $LATEST_FILES\n" > $RECENT_REPORT +$GCOV --preserve-paths --relative-only --no-output $GCNO_FILES 2>/dev/null | + python $ROOT/coverage/parse_gcov_output.py -interested-files $LATEST_FILES | + tee -a $RECENT_REPORT && +echo -e "Generated coverage report for recently updated files: $RECENT_REPORT\n" + +# Unless otherwise specified, we'll not generate html report by default +if [ -z "$HTML" ]; then + exit 0 +fi + +# Generate the html report. If we cannot find lcov in this machine, we'll simply +# skip this step. +echo "Generating the html coverage report..." + +LCOV=$(which lcov || true 2>/dev/null) +if [ -z $LCOV ] +then + echo "Skip: Cannot find lcov to generate the html report." + exit 0 +fi + +LCOV_VERSION=$(lcov -v | grep 1.1 || true) +if [ $LCOV_VERSION ] +then + echo "Not supported lcov version. Expect lcov 1.1." + exit 0 +fi + +(cd $ROOT; lcov --no-external \ + --capture \ + --directory $PWD \ + --gcov-tool $GCOV \ + --output-file $COVERAGE_DIR/coverage.info) + +genhtml $COVERAGE_DIR/coverage.info -o $COVERAGE_DIR + +echo "HTML Coverage report is generated in $COVERAGE_DIR" diff --git a/external/rocksdb/coverage/parse_gcov_output.py b/external/rocksdb/coverage/parse_gcov_output.py new file mode 100755 index 0000000000..72e8b07230 --- /dev/null +++ b/external/rocksdb/coverage/parse_gcov_output.py @@ -0,0 +1,118 @@ +import optparse +import re +import sys + +from optparse import OptionParser + +# the gcov report follows certain pattern. Each file will have two lines +# of report, from which we can extract the file name, total lines and coverage +# percentage. +def parse_gcov_report(gcov_input): + per_file_coverage = {} + total_coverage = None + + for line in sys.stdin: + line = line.strip() + + # --First line of the coverage report (with file name in it)? + match_obj = re.match("^File '(.*)'$", line) + if match_obj: + # fetch the file name from the first line of the report. + current_file = match_obj.group(1) + continue + + # -- Second line of the file report (with coverage percentage) + match_obj = re.match("^Lines executed:(.*)% of (.*)", line) + + if match_obj: + coverage = float(match_obj.group(1)) + lines = int(match_obj.group(2)) + + if current_file is not None: + per_file_coverage[current_file] = (coverage, lines) + current_file = None + else: + # If current_file is not set, we reach the last line of report, + # which contains the summarized coverage percentage. + total_coverage = (coverage, lines) + continue + + # If the line's pattern doesn't fall into the above categories. We + # can simply ignore them since they're either empty line or doesn't + # find executable lines of the given file. + current_file = None + + return per_file_coverage, total_coverage + +def get_option_parser(): + usage = "Parse the gcov output and generate more human-readable code " +\ + "coverage report." + parser = OptionParser(usage) + + parser.add_option( + "--interested-files", "-i", + dest="filenames", + help="Comma separated files names. if specified, we will display " + + "the coverage report only for interested source files. " + + "Otherwise we will display the coverage report for all " + + "source files." + ) + return parser + +def display_file_coverage(per_file_coverage, total_coverage): + # To print out auto-adjustable column, we need to know the longest + # length of file names. + max_file_name_length = max( + len(fname) for fname in per_file_coverage.keys() + ) + + # -- Print header + # size of separator is determined by 3 column sizes: + # file name, coverage percentage and lines. + header_template = \ + "%" + str(max_file_name_length) + "s\t%s\t%s" + separator = "-" * (max_file_name_length + 10 + 20) + print header_template % ("Filename", "Coverage", "Lines") + print separator + + # -- Print body + # template for printing coverage report for each file. + record_template = "%" + str(max_file_name_length) + "s\t%5.2f%%\t%10d" + + for fname, coverage_info in per_file_coverage.items(): + coverage, lines = coverage_info + print record_template % (fname, coverage, lines) + + # -- Print footer + if total_coverage: + print separator + print record_template % ("Total", total_coverage[0], total_coverage[1]) + +def report_coverage(): + parser = get_option_parser() + (options, args) = parser.parse_args() + + interested_files = set() + if options.filenames is not None: + interested_files = set(f.strip() for f in options.filenames.split(',')) + + # To make things simple, right now we only read gcov report from the input + per_file_coverage, total_coverage = parse_gcov_report(sys.stdin) + + # Check if we need to display coverage info for interested files. + if len(interested_files): + per_file_coverage = dict( + (fname, per_file_coverage[fname]) for fname in interested_files + if fname in per_file_coverage + ) + # If we only interested in several files, it makes no sense to report + # the total_coverage + total_coverage = None + + if not len(per_file_coverage): + print >> sys.stderr, "Cannot find coverage info for the given files." + return + display_file_coverage(per_file_coverage, total_coverage) + +if __name__ == "__main__": + report_coverage() diff --git a/external/rocksdb/db/auto_roll_logger.cc b/external/rocksdb/db/auto_roll_logger.cc new file mode 100755 index 0000000000..0fb2a1d2a4 --- /dev/null +++ b/external/rocksdb/db/auto_roll_logger.cc @@ -0,0 +1,174 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include "db/auto_roll_logger.h" +#include "util/mutexlock.h" + +namespace rocksdb { + +// -- AutoRollLogger +Status AutoRollLogger::ResetLogger() { + TEST_SYNC_POINT("AutoRollLogger::ResetLogger:BeforeNewLogger"); + status_ = env_->NewLogger(log_fname_, &logger_); + TEST_SYNC_POINT("AutoRollLogger::ResetLogger:AfterNewLogger"); + + if (!status_.ok()) { + return status_; + } + + if (logger_->GetLogFileSize() == Logger::kDoNotSupportGetLogFileSize) { + status_ = Status::NotSupported( + "The underlying logger doesn't support GetLogFileSize()"); + } + if (status_.ok()) { + cached_now = static_cast(env_->NowMicros() * 1e-6); + ctime_ = cached_now; + cached_now_access_count = 0; + } + + return status_; +} + +void AutoRollLogger::RollLogFile() { + // This function is called when log is rotating. Two rotations + // can happen quickly (NowMicro returns same value). To not overwrite + // previous log file we increment by one micro second and try again. + uint64_t now = env_->NowMicros(); + std::string old_fname; + do { + old_fname = OldInfoLogFileName( + dbname_, now, db_absolute_path_, db_log_dir_); + now++; + } while (env_->FileExists(old_fname).ok()); + env_->RenameFile(log_fname_, old_fname); +} + +std::string AutoRollLogger::ValistToString(const char* format, + va_list args) const { + // Any log messages longer than 1024 will get truncated. + // The user is responsible for chopping longer messages into multi line log + static const int MAXBUFFERSIZE = 1024; + char buffer[MAXBUFFERSIZE]; + + int count = vsnprintf(buffer, MAXBUFFERSIZE, format, args); + (void) count; + assert(count >= 0); + + return buffer; +} + +void AutoRollLogger::LogInternal(const char* format, ...) { + mutex_.AssertHeld(); + va_list args; + va_start(args, format); + logger_->Logv(format, args); + va_end(args); +} + +void AutoRollLogger::Logv(const char* format, va_list ap) { + assert(GetStatus().ok()); + + std::shared_ptr logger; + { + MutexLock l(&mutex_); + if ((kLogFileTimeToRoll > 0 && LogExpired()) || + (kMaxLogFileSize > 0 && logger_->GetLogFileSize() >= kMaxLogFileSize)) { + RollLogFile(); + Status s = ResetLogger(); + if (!s.ok()) { + // can't really log the error if creating a new LOG file failed + return; + } + + WriteHeaderInfo(); + } + + // pin down the current logger_ instance before releasing the mutex. + logger = logger_; + } + + // Another thread could have put a new Logger instance into logger_ by now. + // However, since logger is still hanging on to the previous instance + // (reference count is not zero), we don't have to worry about it being + // deleted while we are accessing it. + // Note that logv itself is not mutex protected to allow maximum concurrency, + // as thread safety should have been handled by the underlying logger. + logger->Logv(format, ap); +} + +void AutoRollLogger::WriteHeaderInfo() { + mutex_.AssertHeld(); + for (auto& header : headers_) { + LogInternal("%s", header.c_str()); + } +} + +void AutoRollLogger::LogHeader(const char* format, va_list args) { + // header message are to be retained in memory. Since we cannot make any + // assumptions about the data contained in va_list, we will retain them as + // strings + va_list tmp; + va_copy(tmp, args); + std::string data = ValistToString(format, tmp); + va_end(tmp); + + MutexLock l(&mutex_); + headers_.push_back(data); + + // Log the original message to the current log + logger_->Logv(format, args); +} + +bool AutoRollLogger::LogExpired() { + if (cached_now_access_count >= call_NowMicros_every_N_records_) { + cached_now = static_cast(env_->NowMicros() * 1e-6); + cached_now_access_count = 0; + } + + ++cached_now_access_count; + return cached_now >= ctime_ + kLogFileTimeToRoll; +} + +Status CreateLoggerFromOptions(const std::string& dbname, + const DBOptions& options, + std::shared_ptr* logger) { + if (options.info_log) { + *logger = options.info_log; + return Status::OK(); + } + + Env* env = options.env; + std::string db_absolute_path; + env->GetAbsolutePath(dbname, &db_absolute_path); + std::string fname = + InfoLogFileName(dbname, db_absolute_path, options.db_log_dir); + + env->CreateDirIfMissing(dbname); // In case it does not exist + // Currently we only support roll by time-to-roll and log size + if (options.log_file_time_to_roll > 0 || options.max_log_file_size > 0) { + AutoRollLogger* result = new AutoRollLogger( + env, dbname, options.db_log_dir, options.max_log_file_size, + options.log_file_time_to_roll, options.info_log_level); + Status s = result->GetStatus(); + if (!s.ok()) { + delete result; + } else { + logger->reset(result); + } + return s; + } else { + // Open a log file in the same directory as the db + env->RenameFile( + fname, OldInfoLogFileName(dbname, env->NowMicros(), db_absolute_path, + options.db_log_dir)); + auto s = env->NewLogger(fname, logger); + if (logger->get() != nullptr) { + (*logger)->SetInfoLogLevel(options.info_log_level); + } + return s; + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/auto_roll_logger.h b/external/rocksdb/db/auto_roll_logger.h new file mode 100755 index 0000000000..a43a98a8f1 --- /dev/null +++ b/external/rocksdb/db/auto_roll_logger.h @@ -0,0 +1,129 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Logger implementation that can be shared by all environments +// where enough posix functionality is available. + +#pragma once +#include +#include + +#include "db/filename.h" +#include "port/port.h" +#include "port/util_logger.h" +#include "util/sync_point.h" +#include "util/mutexlock.h" + +namespace rocksdb { + +// Rolls the log file by size and/or time +class AutoRollLogger : public Logger { + public: + AutoRollLogger(Env* env, const std::string& dbname, + const std::string& db_log_dir, size_t log_max_size, + size_t log_file_time_to_roll, + const InfoLogLevel log_level = InfoLogLevel::INFO_LEVEL) + : Logger(log_level), + dbname_(dbname), + db_log_dir_(db_log_dir), + env_(env), + status_(Status::OK()), + kMaxLogFileSize(log_max_size), + kLogFileTimeToRoll(log_file_time_to_roll), + cached_now(static_cast(env_->NowMicros() * 1e-6)), + ctime_(cached_now), + cached_now_access_count(0), + call_NowMicros_every_N_records_(100), + mutex_() { + env->GetAbsolutePath(dbname, &db_absolute_path_); + log_fname_ = InfoLogFileName(dbname_, db_absolute_path_, db_log_dir_); + RollLogFile(); + ResetLogger(); + } + + using Logger::Logv; + void Logv(const char* format, va_list ap) override; + + // Write a header entry to the log. All header information will be written + // again every time the log rolls over. + virtual void LogHeader(const char* format, va_list ap) override; + + // check if the logger has encountered any problem. + Status GetStatus() { + return status_; + } + + size_t GetLogFileSize() const override { + std::shared_ptr logger; + { + MutexLock l(&mutex_); + // pin down the current logger_ instance before releasing the mutex. + logger = logger_; + } + return logger->GetLogFileSize(); + } + + void Flush() override { + std::shared_ptr logger; + { + MutexLock l(&mutex_); + // pin down the current logger_ instance before releasing the mutex. + logger = logger_; + } + TEST_SYNC_POINT("AutoRollLogger::Flush:PinnedLogger"); + if (logger) { + logger->Flush(); + } + } + + virtual ~AutoRollLogger() { + } + + void SetCallNowMicrosEveryNRecords(uint64_t call_NowMicros_every_N_records) { + call_NowMicros_every_N_records_ = call_NowMicros_every_N_records; + } + + // Expose the log file path for testing purpose + std::string TEST_log_fname() const { + return log_fname_; + } + + private: + bool LogExpired(); + Status ResetLogger(); + void RollLogFile(); + // Log message to logger without rolling + void LogInternal(const char* format, ...); + // Serialize the va_list to a string + std::string ValistToString(const char* format, va_list args) const; + // Write the logs marked as headers to the new log file + void WriteHeaderInfo(); + + std::string log_fname_; // Current active info log's file name. + std::string dbname_; + std::string db_log_dir_; + std::string db_absolute_path_; + Env* env_; + std::shared_ptr logger_; + // current status of the logger + Status status_; + const size_t kMaxLogFileSize; + const size_t kLogFileTimeToRoll; + // header information + std::list headers_; + // to avoid frequent env->NowMicros() calls, we cached the current time + uint64_t cached_now; + uint64_t ctime_; + uint64_t cached_now_access_count; + uint64_t call_NowMicros_every_N_records_; + mutable port::Mutex mutex_; +}; + +// Facade to craete logger automatically +Status CreateLoggerFromOptions(const std::string& dbname, + const DBOptions& options, + std::shared_ptr* logger); + +} // namespace rocksdb diff --git a/external/rocksdb/db/auto_roll_logger_test.cc b/external/rocksdb/db/auto_roll_logger_test.cc new file mode 100755 index 0000000000..6a0c954613 --- /dev/null +++ b/external/rocksdb/db/auto_roll_logger_test.cc @@ -0,0 +1,471 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include +#include +#include +#include +#include +#include +#include +#include +#include "db/auto_roll_logger.h" +#include "port/port.h" +#include "util/sync_point.h" +#include "util/testharness.h" +#include "rocksdb/db.h" +#include +#include + +namespace rocksdb { + +class AutoRollLoggerTest : public testing::Test { + public: + static void InitTestDb() { +#ifdef OS_WIN + // Replace all slashes in the path so windows CompSpec does not + // become confused + std::string testDir(kTestDir); + std::replace_if(testDir.begin(), testDir.end(), + [](char ch) { return ch == '/'; }, '\\'); + std::string deleteCmd = "if exist " + testDir + " rd /s /q " + testDir; +#else + std::string deleteCmd = "rm -rf " + kTestDir; +#endif + ASSERT_TRUE(system(deleteCmd.c_str()) == 0); + Env::Default()->CreateDir(kTestDir); + } + + void RollLogFileBySizeTest(AutoRollLogger* logger, size_t log_max_size, + const std::string& log_message); + uint64_t RollLogFileByTimeTest(AutoRollLogger* logger, size_t time, + const std::string& log_message); + + static const std::string kSampleMessage; + static const std::string kTestDir; + static const std::string kLogFile; + static Env* env; +}; + +const std::string AutoRollLoggerTest::kSampleMessage( + "this is the message to be written to the log file!!"); +const std::string AutoRollLoggerTest::kTestDir(test::TmpDir() + "/db_log_test"); +const std::string AutoRollLoggerTest::kLogFile(test::TmpDir() + + "/db_log_test/LOG"); +Env* AutoRollLoggerTest::env = Env::Default(); + +// In this test we only want to Log some simple log message with +// no format. LogMessage() provides such a simple interface and +// avoids the [format-security] warning which occurs when you +// call Log(logger, log_message) directly. +namespace { +void LogMessage(Logger* logger, const char* message) { + Log(logger, "%s", message); +} + +void LogMessage(const InfoLogLevel log_level, Logger* logger, + const char* message) { + Log(log_level, logger, "%s", message); +} +} // namespace + +namespace { +void GetFileCreateTime(const std::string& fname, uint64_t* file_ctime) { + struct stat s; + if (stat(fname.c_str(), &s) != 0) { + *file_ctime = (uint64_t)0; + } + *file_ctime = static_cast(s.st_ctime); +} +} // namespace + +void AutoRollLoggerTest::RollLogFileBySizeTest(AutoRollLogger* logger, + size_t log_max_size, + const std::string& log_message) { + logger->SetInfoLogLevel(InfoLogLevel::INFO_LEVEL); + // measure the size of each message, which is supposed + // to be equal or greater than log_message.size() + LogMessage(logger, log_message.c_str()); + size_t message_size = logger->GetLogFileSize(); + size_t current_log_size = message_size; + + // Test the cases when the log file will not be rolled. + while (current_log_size + message_size < log_max_size) { + LogMessage(logger, log_message.c_str()); + current_log_size += message_size; + ASSERT_EQ(current_log_size, logger->GetLogFileSize()); + } + + // Now the log file will be rolled + LogMessage(logger, log_message.c_str()); + // Since rotation is checked before actual logging, we need to + // trigger the rotation by logging another message. + LogMessage(logger, log_message.c_str()); + + ASSERT_TRUE(message_size == logger->GetLogFileSize()); +} + +uint64_t AutoRollLoggerTest::RollLogFileByTimeTest( + AutoRollLogger* logger, size_t time, const std::string& log_message) { + uint64_t expected_create_time; + uint64_t actual_create_time; + uint64_t total_log_size; + EXPECT_OK(env->GetFileSize(kLogFile, &total_log_size)); + GetFileCreateTime(kLogFile, &expected_create_time); + logger->SetCallNowMicrosEveryNRecords(0); + + // -- Write to the log for several times, which is supposed + // to be finished before time. + for (int i = 0; i < 10; ++i) { + LogMessage(logger, log_message.c_str()); + EXPECT_OK(logger->GetStatus()); + // Make sure we always write to the same log file (by + // checking the create time); + GetFileCreateTime(kLogFile, &actual_create_time); + + // Also make sure the log size is increasing. + EXPECT_EQ(expected_create_time, actual_create_time); + EXPECT_GT(logger->GetLogFileSize(), total_log_size); + total_log_size = logger->GetLogFileSize(); + } + + // -- Make the log file expire +#ifdef OS_WIN + Sleep(static_cast(time) * 1000); +#else + sleep(static_cast(time)); +#endif + LogMessage(logger, log_message.c_str()); + + // At this time, the new log file should be created. + GetFileCreateTime(kLogFile, &actual_create_time); + EXPECT_GT(actual_create_time, expected_create_time); + EXPECT_LT(logger->GetLogFileSize(), total_log_size); + expected_create_time = actual_create_time; + + return expected_create_time; +} + +TEST_F(AutoRollLoggerTest, RollLogFileBySize) { + InitTestDb(); + size_t log_max_size = 1024 * 5; + + AutoRollLogger logger(Env::Default(), kTestDir, "", log_max_size, 0); + + RollLogFileBySizeTest(&logger, log_max_size, + kSampleMessage + ":RollLogFileBySize"); +} + +TEST_F(AutoRollLoggerTest, RollLogFileByTime) { + size_t time = 2; + size_t log_size = 1024 * 5; + + InitTestDb(); + // -- Test the existence of file during the server restart. + ASSERT_EQ(Status::NotFound(), env->FileExists(kLogFile)); + AutoRollLogger logger(Env::Default(), kTestDir, "", log_size, time); + ASSERT_OK(env->FileExists(kLogFile)); + + RollLogFileByTimeTest(&logger, time, kSampleMessage + ":RollLogFileByTime"); +} + +TEST_F(AutoRollLoggerTest, OpenLogFilesMultipleTimesWithOptionLog_max_size) { + // If only 'log_max_size' options is specified, then every time + // when rocksdb is restarted, a new empty log file will be created. + InitTestDb(); + // WORKAROUND: + // avoid complier's complaint of "comparison between signed + // and unsigned integer expressions" because literal 0 is + // treated as "singed". + size_t kZero = 0; + size_t log_size = 1024; + + AutoRollLogger* logger = new AutoRollLogger( + Env::Default(), kTestDir, "", log_size, 0); + + LogMessage(logger, kSampleMessage.c_str()); + ASSERT_GT(logger->GetLogFileSize(), kZero); + delete logger; + + // reopens the log file and an empty log file will be created. + logger = new AutoRollLogger( + Env::Default(), kTestDir, "", log_size, 0); + ASSERT_EQ(logger->GetLogFileSize(), kZero); + delete logger; +} + +TEST_F(AutoRollLoggerTest, CompositeRollByTimeAndSizeLogger) { + size_t time = 2, log_max_size = 1024 * 5; + + InitTestDb(); + + AutoRollLogger logger(Env::Default(), kTestDir, "", log_max_size, time); + + // Test the ability to roll by size + RollLogFileBySizeTest( + &logger, log_max_size, + kSampleMessage + ":CompositeRollByTimeAndSizeLogger"); + + // Test the ability to roll by Time + RollLogFileByTimeTest( &logger, time, + kSampleMessage + ":CompositeRollByTimeAndSizeLogger"); +} + +#ifndef OS_WIN +// TODO: does not build for Windows because of PosixLogger use below. Need to +// port +TEST_F(AutoRollLoggerTest, CreateLoggerFromOptions) { + DBOptions options; + shared_ptr logger; + + // Normal logger + ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + ASSERT_TRUE(dynamic_cast(logger.get())); + + // Only roll by size + InitTestDb(); + options.max_log_file_size = 1024; + ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + AutoRollLogger* auto_roll_logger = + dynamic_cast(logger.get()); + ASSERT_TRUE(auto_roll_logger); + RollLogFileBySizeTest( + auto_roll_logger, options.max_log_file_size, + kSampleMessage + ":CreateLoggerFromOptions - size"); + + // Only roll by Time + InitTestDb(); + options.max_log_file_size = 0; + options.log_file_time_to_roll = 2; + ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + auto_roll_logger = + dynamic_cast(logger.get()); + RollLogFileByTimeTest( + auto_roll_logger, options.log_file_time_to_roll, + kSampleMessage + ":CreateLoggerFromOptions - time"); + + // roll by both Time and size + InitTestDb(); + options.max_log_file_size = 1024 * 5; + options.log_file_time_to_roll = 2; + ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + auto_roll_logger = + dynamic_cast(logger.get()); + RollLogFileBySizeTest( + auto_roll_logger, options.max_log_file_size, + kSampleMessage + ":CreateLoggerFromOptions - both"); + RollLogFileByTimeTest( + auto_roll_logger, options.log_file_time_to_roll, + kSampleMessage + ":CreateLoggerFromOptions - both"); +} + +TEST_F(AutoRollLoggerTest, LogFlushWhileRolling) { + DBOptions options; + shared_ptr logger; + + InitTestDb(); + options.max_log_file_size = 1024 * 5; + ASSERT_OK(CreateLoggerFromOptions(kTestDir, options, &logger)); + AutoRollLogger* auto_roll_logger = + dynamic_cast(logger.get()); + ASSERT_TRUE(auto_roll_logger); + std::thread flush_thread; + + // Notes: + // (1) Need to pin the old logger before beginning the roll, as rolling grabs + // the mutex, which would prevent us from accessing the old logger. This + // also marks flush_thread with AutoRollLogger::Flush:PinnedLogger. + // (2) Need to reset logger during PosixLogger::Flush() to exercise a race + // condition case, which is executing the flush with the pinned (old) + // logger after auto-roll logger has cut over to a new logger. + // (3) PosixLogger::Flush() happens in both threads but its SyncPoints only + // are enabled in flush_thread (the one pinning the old logger). + rocksdb::SyncPoint::GetInstance()->LoadDependencyAndMarkers( + {{"AutoRollLogger::Flush:PinnedLogger", + "AutoRollLoggerTest::LogFlushWhileRolling:PreRollAndPostThreadInit"}, + {"PosixLogger::Flush:Begin1", + "AutoRollLogger::ResetLogger:BeforeNewLogger"}, + {"AutoRollLogger::ResetLogger:AfterNewLogger", + "PosixLogger::Flush:Begin2"}}, + {{"AutoRollLogger::Flush:PinnedLogger", "PosixLogger::Flush:Begin1"}, + {"AutoRollLogger::Flush:PinnedLogger", "PosixLogger::Flush:Begin2"}}); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + flush_thread = std::thread([&]() { auto_roll_logger->Flush(); }); + TEST_SYNC_POINT( + "AutoRollLoggerTest::LogFlushWhileRolling:PreRollAndPostThreadInit"); + RollLogFileBySizeTest(auto_roll_logger, options.max_log_file_size, + kSampleMessage + ":LogFlushWhileRolling"); + flush_thread.join(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +#endif // OS_WIN + +TEST_F(AutoRollLoggerTest, InfoLogLevel) { + InitTestDb(); + + size_t log_size = 8192; + size_t log_lines = 0; + // an extra-scope to force the AutoRollLogger to flush the log file when it + // becomes out of scope. + { + AutoRollLogger logger(Env::Default(), kTestDir, "", log_size, 0); + for (int log_level = InfoLogLevel::HEADER_LEVEL; + log_level >= InfoLogLevel::DEBUG_LEVEL; log_level--) { + logger.SetInfoLogLevel((InfoLogLevel)log_level); + for (int log_type = InfoLogLevel::DEBUG_LEVEL; + log_type <= InfoLogLevel::HEADER_LEVEL; log_type++) { + // log messages with log level smaller than log_level will not be + // logged. + LogMessage((InfoLogLevel)log_type, &logger, kSampleMessage.c_str()); + } + log_lines += InfoLogLevel::HEADER_LEVEL - log_level + 1; + } + for (int log_level = InfoLogLevel::HEADER_LEVEL; + log_level >= InfoLogLevel::DEBUG_LEVEL; log_level--) { + logger.SetInfoLogLevel((InfoLogLevel)log_level); + + // again, messages with level smaller than log_level will not be logged. + Log(InfoLogLevel::HEADER_LEVEL, &logger, "%s", kSampleMessage.c_str()); + Debug(&logger, "%s", kSampleMessage.c_str()); + Info(&logger, "%s", kSampleMessage.c_str()); + Warn(&logger, "%s", kSampleMessage.c_str()); + Error(&logger, "%s", kSampleMessage.c_str()); + Fatal(&logger, "%s", kSampleMessage.c_str()); + log_lines += InfoLogLevel::HEADER_LEVEL - log_level + 1; + } + } + std::ifstream inFile(AutoRollLoggerTest::kLogFile.c_str()); + size_t lines = std::count(std::istreambuf_iterator(inFile), + std::istreambuf_iterator(), '\n'); + ASSERT_EQ(log_lines, lines); + inFile.close(); +} + +// Test the logger Header function for roll over logs +// We expect the new logs creates as roll over to carry the headers specified +static std::vector GetOldFileNames(const std::string& path) { + std::vector ret; + + const std::string dirname = path.substr(/*start=*/0, path.find_last_of("/")); + const std::string fname = path.substr(path.find_last_of("/") + 1); + + std::vector children; + Env::Default()->GetChildren(dirname, &children); + + // We know that the old log files are named [path] + // Return all entities that match the pattern + for (auto& child : children) { + if (fname != child && child.find(fname) == 0) { + ret.push_back(dirname + "/" + child); + } + } + + return ret; +} + +// Return the number of lines where a given pattern was found in the file +static size_t GetLinesCount(const std::string& fname, + const std::string& pattern) { + std::stringstream ssbuf; + std::string line; + size_t count = 0; + + std::ifstream inFile(fname.c_str()); + ssbuf << inFile.rdbuf(); + + while (getline(ssbuf, line)) { + if (line.find(pattern) != std::string::npos) { + count++; + } + } + + return count; +} + +TEST_F(AutoRollLoggerTest, LogHeaderTest) { + static const size_t MAX_HEADERS = 10; + static const size_t LOG_MAX_SIZE = 1024 * 5; + static const std::string HEADER_STR = "Log header line"; + + // test_num == 0 -> standard call to Header() + // test_num == 1 -> call to Log() with InfoLogLevel::HEADER_LEVEL + for (int test_num = 0; test_num < 2; test_num++) { + + InitTestDb(); + + AutoRollLogger logger(Env::Default(), kTestDir, /*db_log_dir=*/ "", + LOG_MAX_SIZE, /*log_file_time_to_roll=*/ 0); + + if (test_num == 0) { + // Log some headers explicitly using Header() + for (size_t i = 0; i < MAX_HEADERS; i++) { + Header(&logger, "%s %d", HEADER_STR.c_str(), i); + } + } else if (test_num == 1) { + // HEADER_LEVEL should make this behave like calling Header() + for (size_t i = 0; i < MAX_HEADERS; i++) { + Log(InfoLogLevel::HEADER_LEVEL, &logger, "%s %d", + HEADER_STR.c_str(), i); + } + } + + const std::string newfname = logger.TEST_log_fname(); + + // Log enough data to cause a roll over + int i = 0; + for (size_t iter = 0; iter < 2; iter++) { + while (logger.GetLogFileSize() < LOG_MAX_SIZE) { + Info(&logger, (kSampleMessage + ":LogHeaderTest line %d").c_str(), i); + ++i; + } + + Info(&logger, "Rollover"); + } + + // Flush the log for the latest file + LogFlush(&logger); + + const auto oldfiles = GetOldFileNames(newfname); + + ASSERT_EQ(oldfiles.size(), (size_t) 2); + + for (auto& oldfname : oldfiles) { + // verify that the files rolled over + ASSERT_NE(oldfname, newfname); + // verify that the old log contains all the header logs + ASSERT_EQ(GetLinesCount(oldfname, HEADER_STR), MAX_HEADERS); + } + } +} + +TEST_F(AutoRollLoggerTest, LogFileExistence) { + rocksdb::DB* db; + rocksdb::Options options; +#ifdef OS_WIN + // Replace all slashes in the path so windows CompSpec does not + // become confused + std::string testDir(kTestDir); + std::replace_if(testDir.begin(), testDir.end(), + [](char ch) { return ch == '/'; }, '\\'); + std::string deleteCmd = "if exist " + testDir + " rd /s /q " + testDir; +#else + std::string deleteCmd = "rm -rf " + kTestDir; +#endif + ASSERT_EQ(system(deleteCmd.c_str()), 0); + options.max_log_file_size = 100 * 1024 * 1024; + options.create_if_missing = true; + ASSERT_OK(rocksdb::DB::Open(options, kTestDir, &db)); + ASSERT_OK(env->FileExists(kLogFile)); + delete db; +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/builder.cc b/external/rocksdb/db/builder.cc new file mode 100755 index 0000000000..ab255e493d --- /dev/null +++ b/external/rocksdb/db/builder.cc @@ -0,0 +1,203 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/builder.h" + +#include +#include +#include + +#include "db/compaction_iterator.h" +#include "db/dbformat.h" +#include "db/event_helpers.h" +#include "db/filename.h" +#include "db/internal_stats.h" +#include "db/merge_helper.h" +#include "db/table_cache.h" +#include "db/version_edit.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/iterator.h" +#include "rocksdb/options.h" +#include "rocksdb/table.h" +#include "table/block_based_table_builder.h" +#include "table/internal_iterator.h" +#include "util/file_reader_writer.h" +#include "util/iostats_context_imp.h" +#include "util/stop_watch.h" +#include "util/thread_status_util.h" + +namespace rocksdb { + +class TableFactory; + +TableBuilder* NewTableBuilder( + const ImmutableCFOptions& ioptions, + const InternalKeyComparator& internal_comparator, + const std::vector>* + int_tbl_prop_collector_factories, + uint32_t column_family_id, const std::string& column_family_name, + WritableFileWriter* file, const CompressionType compression_type, + const CompressionOptions& compression_opts, + const std::string* compression_dict, const bool skip_filters) { + assert((column_family_id == + TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) == + column_family_name.empty()); + return ioptions.table_factory->NewTableBuilder( + TableBuilderOptions(ioptions, internal_comparator, + int_tbl_prop_collector_factories, compression_type, + compression_opts, compression_dict, skip_filters, + column_family_name), + column_family_id, file); +} + +Status BuildTable( + const std::string& dbname, Env* env, const ImmutableCFOptions& ioptions, + const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options, + TableCache* table_cache, InternalIterator* iter, FileMetaData* meta, + const InternalKeyComparator& internal_comparator, + const std::vector>* + int_tbl_prop_collector_factories, + uint32_t column_family_id, const std::string& column_family_name, + std::vector snapshots, + SequenceNumber earliest_write_conflict_snapshot, + const CompressionType compression, + const CompressionOptions& compression_opts, bool paranoid_file_checks, + InternalStats* internal_stats, TableFileCreationReason reason, + EventLogger* event_logger, int job_id, const Env::IOPriority io_priority, + TableProperties* table_properties, int level) { + assert((column_family_id == + TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) == + column_family_name.empty()); + // Reports the IOStats for flush for every following bytes. + const size_t kReportFlushIOStatsEvery = 1048576; + Status s; + meta->fd.file_size = 0; + iter->SeekToFirst(); + + std::string fname = TableFileName(ioptions.db_paths, meta->fd.GetNumber(), + meta->fd.GetPathId()); +#ifndef ROCKSDB_LITE + EventHelpers::NotifyTableFileCreationStarted( + ioptions.listeners, dbname, column_family_name, fname, job_id, reason); +#endif // !ROCKSDB_LITE + TableProperties tp; + + if (iter->Valid()) { + TableBuilder* builder; + unique_ptr file_writer; + { + unique_ptr file; + s = NewWritableFile(env, fname, &file, env_options); + if (!s.ok()) { + EventHelpers::LogAndNotifyTableFileCreationFinished( + event_logger, ioptions.listeners, dbname, column_family_name, fname, + job_id, meta->fd, tp, reason, s); + return s; + } + file->SetIOPriority(io_priority); + + file_writer.reset(new WritableFileWriter(std::move(file), env_options)); + + builder = NewTableBuilder( + ioptions, internal_comparator, int_tbl_prop_collector_factories, + column_family_id, column_family_name, file_writer.get(), compression, + compression_opts); + } + + MergeHelper merge(env, internal_comparator.user_comparator(), + ioptions.merge_operator, nullptr, ioptions.info_log, + mutable_cf_options.min_partial_merge_operands, + true /* internal key corruption is not ok */, + snapshots.empty() ? 0 : snapshots.back()); + + CompactionIterator c_iter(iter, internal_comparator.user_comparator(), + &merge, kMaxSequenceNumber, &snapshots, + earliest_write_conflict_snapshot, env, + true /* internal key corruption is not ok */); + c_iter.SeekToFirst(); + for (; c_iter.Valid(); c_iter.Next()) { + const Slice& key = c_iter.key(); + const Slice& value = c_iter.value(); + builder->Add(key, value); + meta->UpdateBoundaries(key, c_iter.ikey().sequence); + + // TODO(noetzli): Update stats after flush, too. + if (io_priority == Env::IO_HIGH && + IOSTATS(bytes_written) >= kReportFlushIOStatsEvery) { + ThreadStatusUtil::SetThreadOperationProperty( + ThreadStatus::FLUSH_BYTES_WRITTEN, IOSTATS(bytes_written)); + } + } + + // Finish and check for builder errors + bool empty = builder->NumEntries() == 0; + s = c_iter.status(); + if (!s.ok() || empty) { + builder->Abandon(); + } else { + s = builder->Finish(); + } + + if (s.ok() && !empty) { + uint64_t file_size = builder->FileSize(); + meta->fd.file_size = file_size; + meta->marked_for_compaction = builder->NeedCompact(); + assert(meta->fd.GetFileSize() > 0); + tp = builder->GetTableProperties(); + if (table_properties) { + *table_properties = tp; + } + } + delete builder; + + // Finish and check for file errors + if (s.ok() && !empty && !ioptions.disable_data_sync) { + StopWatch sw(env, ioptions.statistics, TABLE_SYNC_MICROS); + file_writer->Sync(ioptions.use_fsync); + } + if (s.ok() && !empty) { + s = file_writer->Close(); + } + + if (s.ok() && !empty) { + // Verify that the table is usable + std::unique_ptr it(table_cache->NewIterator( + ReadOptions(), env_options, internal_comparator, meta->fd, nullptr, + (internal_stats == nullptr) ? nullptr + : internal_stats->GetFileReadHist(0), + false /* for_compaction */, nullptr /* arena */, + false /* skip_filter */, level)); + s = it->status(); + if (s.ok() && paranoid_file_checks) { + for (it->SeekToFirst(); it->Valid(); it->Next()) { + } + s = it->status(); + } + } + } + + // Check for input iterator errors + if (!iter->status().ok()) { + s = iter->status(); + } + + if (!s.ok() || meta->fd.GetFileSize() == 0) { + env->DeleteFile(fname); + } + + // Output to event logger and fire events. + EventHelpers::LogAndNotifyTableFileCreationFinished( + event_logger, ioptions.listeners, dbname, column_family_name, fname, + job_id, meta->fd, tp, reason, s); + + return s; +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/builder.h b/external/rocksdb/db/builder.h new file mode 100755 index 0000000000..62aa717c66 --- /dev/null +++ b/external/rocksdb/db/builder.h @@ -0,0 +1,80 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#pragma once +#include +#include +#include +#include "db/table_properties_collector.h" +#include "rocksdb/comparator.h" +#include "rocksdb/env.h" +#include "rocksdb/immutable_options.h" +#include "rocksdb/listener.h" +#include "rocksdb/options.h" +#include "rocksdb/status.h" +#include "rocksdb/table_properties.h" +#include "rocksdb/types.h" +#include "util/event_logger.h" +#include "util/mutable_cf_options.h" + +namespace rocksdb { + +struct Options; +struct FileMetaData; + +class Env; +struct EnvOptions; +class Iterator; +class TableCache; +class VersionEdit; +class TableBuilder; +class WritableFileWriter; +class InternalStats; +class InternalIterator; + +// @param column_family_name Name of the column family that is also identified +// by column_family_id, or empty string if unknown. It must outlive the +// TableBuilder returned by this function. +// @param compression_dict Data for presetting the compression library's +// dictionary, or nullptr. +TableBuilder* NewTableBuilder( + const ImmutableCFOptions& options, + const InternalKeyComparator& internal_comparator, + const std::vector>* + int_tbl_prop_collector_factories, + uint32_t column_family_id, const std::string& column_family_name, + WritableFileWriter* file, const CompressionType compression_type, + const CompressionOptions& compression_opts, + const std::string* compression_dict = nullptr, + const bool skip_filters = false); + +// Build a Table file from the contents of *iter. The generated file +// will be named according to number specified in meta. On success, the rest of +// *meta will be filled with metadata about the generated table. +// If no data is present in *iter, meta->file_size will be set to +// zero, and no Table file will be produced. +// +// @param column_family_name Name of the column family that is also identified +// by column_family_id, or empty string if unknown. +extern Status BuildTable( + const std::string& dbname, Env* env, const ImmutableCFOptions& options, + const MutableCFOptions& mutable_cf_options, const EnvOptions& env_options, + TableCache* table_cache, InternalIterator* iter, FileMetaData* meta, + const InternalKeyComparator& internal_comparator, + const std::vector>* + int_tbl_prop_collector_factories, + uint32_t column_family_id, const std::string& column_family_name, + std::vector snapshots, + SequenceNumber earliest_write_conflict_snapshot, + const CompressionType compression, + const CompressionOptions& compression_opts, bool paranoid_file_checks, + InternalStats* internal_stats, TableFileCreationReason reason, + EventLogger* event_logger = nullptr, int job_id = 0, + const Env::IOPriority io_priority = Env::IO_HIGH, + TableProperties* table_properties = nullptr, int level = -1); + +} // namespace rocksdb diff --git a/external/rocksdb/db/c.cc b/external/rocksdb/db/c.cc new file mode 100755 index 0000000000..05802aa21d --- /dev/null +++ b/external/rocksdb/db/c.cc @@ -0,0 +1,2422 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef ROCKSDB_LITE + +#include "rocksdb/c.h" + +#include +#include "port/port.h" +#include "rocksdb/cache.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/comparator.h" +#include "rocksdb/convenience.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/iterator.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/options.h" +#include "rocksdb/status.h" +#include "rocksdb/write_batch.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/universal_compaction.h" +#include "rocksdb/statistics.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/table.h" +#include "rocksdb/utilities/backupable_db.h" +#include "utilities/merge_operators.h" + +using rocksdb::Cache; +using rocksdb::ColumnFamilyDescriptor; +using rocksdb::ColumnFamilyHandle; +using rocksdb::ColumnFamilyOptions; +using rocksdb::CompactionFilter; +using rocksdb::CompactionFilterFactory; +using rocksdb::CompactionFilterContext; +using rocksdb::CompactionOptionsFIFO; +using rocksdb::Comparator; +using rocksdb::CompressionType; +using rocksdb::DB; +using rocksdb::DBOptions; +using rocksdb::Env; +using rocksdb::InfoLogLevel; +using rocksdb::FileLock; +using rocksdb::FilterPolicy; +using rocksdb::FlushOptions; +using rocksdb::Iterator; +using rocksdb::Logger; +using rocksdb::MergeOperator; +using rocksdb::MergeOperators; +using rocksdb::NewBloomFilterPolicy; +using rocksdb::NewLRUCache; +using rocksdb::Options; +using rocksdb::BlockBasedTableOptions; +using rocksdb::CuckooTableOptions; +using rocksdb::RandomAccessFile; +using rocksdb::Range; +using rocksdb::ReadOptions; +using rocksdb::SequentialFile; +using rocksdb::Slice; +using rocksdb::SliceParts; +using rocksdb::SliceTransform; +using rocksdb::Snapshot; +using rocksdb::Status; +using rocksdb::WritableFile; +using rocksdb::WriteBatch; +using rocksdb::WriteOptions; +using rocksdb::LiveFileMetaData; +using rocksdb::BackupEngine; +using rocksdb::BackupableDBOptions; +using rocksdb::BackupInfo; +using rocksdb::RestoreOptions; +using rocksdb::CompactRangeOptions; + +using std::shared_ptr; + +extern "C" { + +struct rocksdb_t { DB* rep; }; +struct rocksdb_backup_engine_t { BackupEngine* rep; }; +struct rocksdb_backup_engine_info_t { std::vector rep; }; +struct rocksdb_restore_options_t { RestoreOptions rep; }; +struct rocksdb_iterator_t { Iterator* rep; }; +struct rocksdb_writebatch_t { WriteBatch rep; }; +struct rocksdb_snapshot_t { const Snapshot* rep; }; +struct rocksdb_flushoptions_t { FlushOptions rep; }; +struct rocksdb_fifo_compaction_options_t { CompactionOptionsFIFO rep; }; +struct rocksdb_readoptions_t { + ReadOptions rep; + Slice upper_bound; // stack variable to set pointer to in ReadOptions +}; +struct rocksdb_writeoptions_t { WriteOptions rep; }; +struct rocksdb_options_t { Options rep; }; +struct rocksdb_block_based_table_options_t { BlockBasedTableOptions rep; }; +struct rocksdb_cuckoo_table_options_t { CuckooTableOptions rep; }; +struct rocksdb_seqfile_t { SequentialFile* rep; }; +struct rocksdb_randomfile_t { RandomAccessFile* rep; }; +struct rocksdb_writablefile_t { WritableFile* rep; }; +struct rocksdb_filelock_t { FileLock* rep; }; +struct rocksdb_logger_t { shared_ptr rep; }; +struct rocksdb_cache_t { shared_ptr rep; }; +struct rocksdb_livefiles_t { std::vector rep; }; +struct rocksdb_column_family_handle_t { ColumnFamilyHandle* rep; }; + +struct rocksdb_compactionfiltercontext_t { + CompactionFilter::Context rep; +}; + +struct rocksdb_compactionfilter_t : public CompactionFilter { + void* state_; + void (*destructor_)(void*); + unsigned char (*filter_)( + void*, + int level, + const char* key, size_t key_length, + const char* existing_value, size_t value_length, + char** new_value, size_t *new_value_length, + unsigned char* value_changed); + const char* (*name_)(void*); + + virtual ~rocksdb_compactionfilter_t() { + (*destructor_)(state_); + } + + virtual bool Filter(int level, const Slice& key, const Slice& existing_value, + std::string* new_value, + bool* value_changed) const override { + char* c_new_value = nullptr; + size_t new_value_length = 0; + unsigned char c_value_changed = 0; + unsigned char result = (*filter_)( + state_, + level, + key.data(), key.size(), + existing_value.data(), existing_value.size(), + &c_new_value, &new_value_length, &c_value_changed); + if (c_value_changed) { + new_value->assign(c_new_value, new_value_length); + *value_changed = true; + } + return result; + } + + virtual const char* Name() const override { return (*name_)(state_); } +}; + +struct rocksdb_compactionfilterfactory_t : public CompactionFilterFactory { + void* state_; + void (*destructor_)(void*); + rocksdb_compactionfilter_t* (*create_compaction_filter_)( + void*, rocksdb_compactionfiltercontext_t* context); + const char* (*name_)(void*); + + virtual ~rocksdb_compactionfilterfactory_t() { (*destructor_)(state_); } + + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + rocksdb_compactionfiltercontext_t ccontext; + ccontext.rep = context; + CompactionFilter* cf = (*create_compaction_filter_)(state_, &ccontext); + return std::unique_ptr(cf); + } + + virtual const char* Name() const override { return (*name_)(state_); } +}; + +struct rocksdb_comparator_t : public Comparator { + void* state_; + void (*destructor_)(void*); + int (*compare_)( + void*, + const char* a, size_t alen, + const char* b, size_t blen); + const char* (*name_)(void*); + + virtual ~rocksdb_comparator_t() { + (*destructor_)(state_); + } + + virtual int Compare(const Slice& a, const Slice& b) const override { + return (*compare_)(state_, a.data(), a.size(), b.data(), b.size()); + } + + virtual const char* Name() const override { return (*name_)(state_); } + + // No-ops since the C binding does not support key shortening methods. + virtual void FindShortestSeparator(std::string*, + const Slice&) const override {} + virtual void FindShortSuccessor(std::string* key) const override {} +}; + +struct rocksdb_filterpolicy_t : public FilterPolicy { + void* state_; + void (*destructor_)(void*); + const char* (*name_)(void*); + char* (*create_)( + void*, + const char* const* key_array, const size_t* key_length_array, + int num_keys, + size_t* filter_length); + unsigned char (*key_match_)( + void*, + const char* key, size_t length, + const char* filter, size_t filter_length); + void (*delete_filter_)( + void*, + const char* filter, size_t filter_length); + + virtual ~rocksdb_filterpolicy_t() { + (*destructor_)(state_); + } + + virtual const char* Name() const override { return (*name_)(state_); } + + virtual void CreateFilter(const Slice* keys, int n, + std::string* dst) const override { + std::vector key_pointers(n); + std::vector key_sizes(n); + for (int i = 0; i < n; i++) { + key_pointers[i] = keys[i].data(); + key_sizes[i] = keys[i].size(); + } + size_t len; + char* filter = (*create_)(state_, &key_pointers[0], &key_sizes[0], n, &len); + dst->append(filter, len); + + if (delete_filter_ != nullptr) { + (*delete_filter_)(state_, filter, len); + } else { + free(filter); + } + } + + virtual bool KeyMayMatch(const Slice& key, + const Slice& filter) const override { + return (*key_match_)(state_, key.data(), key.size(), + filter.data(), filter.size()); + } +}; + +struct rocksdb_mergeoperator_t : public MergeOperator { + void* state_; + void (*destructor_)(void*); + const char* (*name_)(void*); + char* (*full_merge_)( + void*, + const char* key, size_t key_length, + const char* existing_value, size_t existing_value_length, + const char* const* operands_list, const size_t* operands_list_length, + int num_operands, + unsigned char* success, size_t* new_value_length); + char* (*partial_merge_)(void*, const char* key, size_t key_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length); + void (*delete_value_)( + void*, + const char* value, size_t value_length); + + virtual ~rocksdb_mergeoperator_t() { + (*destructor_)(state_); + } + + virtual const char* Name() const override { return (*name_)(state_); } + + virtual bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const override { + size_t n = merge_in.operand_list.size(); + std::vector operand_pointers(n); + std::vector operand_sizes(n); + for (size_t i = 0; i < n; i++) { + Slice operand(merge_in.operand_list[i]); + operand_pointers[i] = operand.data(); + operand_sizes[i] = operand.size(); + } + + const char* existing_value_data = nullptr; + size_t existing_value_len = 0; + if (merge_in.existing_value != nullptr) { + existing_value_data = merge_in.existing_value->data(); + existing_value_len = merge_in.existing_value->size(); + } + + unsigned char success; + size_t new_value_len; + char* tmp_new_value = (*full_merge_)( + state_, merge_in.key.data(), merge_in.key.size(), existing_value_data, + existing_value_len, &operand_pointers[0], &operand_sizes[0], + static_cast(n), &success, &new_value_len); + merge_out->new_value.assign(tmp_new_value, new_value_len); + + if (delete_value_ != nullptr) { + (*delete_value_)(state_, tmp_new_value, new_value_len); + } else { + free(tmp_new_value); + } + + return success; + } + + virtual bool PartialMergeMulti(const Slice& key, + const std::deque& operand_list, + std::string* new_value, + Logger* logger) const override { + size_t operand_count = operand_list.size(); + std::vector operand_pointers(operand_count); + std::vector operand_sizes(operand_count); + for (size_t i = 0; i < operand_count; ++i) { + Slice operand(operand_list[i]); + operand_pointers[i] = operand.data(); + operand_sizes[i] = operand.size(); + } + + unsigned char success; + size_t new_value_len; + char* tmp_new_value = (*partial_merge_)( + state_, key.data(), key.size(), &operand_pointers[0], &operand_sizes[0], + static_cast(operand_count), &success, &new_value_len); + new_value->assign(tmp_new_value, new_value_len); + + if (delete_value_ != nullptr) { + (*delete_value_)(state_, tmp_new_value, new_value_len); + } else { + free(tmp_new_value); + } + + return success; + } +}; + +struct rocksdb_env_t { + Env* rep; + bool is_default; +}; + +struct rocksdb_slicetransform_t : public SliceTransform { + void* state_; + void (*destructor_)(void*); + const char* (*name_)(void*); + char* (*transform_)( + void*, + const char* key, size_t length, + size_t* dst_length); + unsigned char (*in_domain_)( + void*, + const char* key, size_t length); + unsigned char (*in_range_)( + void*, + const char* key, size_t length); + + virtual ~rocksdb_slicetransform_t() { + (*destructor_)(state_); + } + + virtual const char* Name() const override { return (*name_)(state_); } + + virtual Slice Transform(const Slice& src) const override { + size_t len; + char* dst = (*transform_)(state_, src.data(), src.size(), &len); + return Slice(dst, len); + } + + virtual bool InDomain(const Slice& src) const override { + return (*in_domain_)(state_, src.data(), src.size()); + } + + virtual bool InRange(const Slice& src) const override { + return (*in_range_)(state_, src.data(), src.size()); + } +}; + +struct rocksdb_universal_compaction_options_t { + rocksdb::CompactionOptionsUniversal *rep; +}; + +static bool SaveError(char** errptr, const Status& s) { + assert(errptr != nullptr); + if (s.ok()) { + return false; + } else if (*errptr == nullptr) { + *errptr = strdup(s.ToString().c_str()); + } else { + // TODO(sanjay): Merge with existing error? + // This is a bug if *errptr is not created by malloc() + free(*errptr); + *errptr = strdup(s.ToString().c_str()); + } + return true; +} + +static char* CopyString(const std::string& str) { + char* result = reinterpret_cast(malloc(sizeof(char) * str.size())); + memcpy(result, str.data(), sizeof(char) * str.size()); + return result; +} + +rocksdb_t* rocksdb_open( + const rocksdb_options_t* options, + const char* name, + char** errptr) { + DB* db; + if (SaveError(errptr, DB::Open(options->rep, std::string(name), &db))) { + return nullptr; + } + rocksdb_t* result = new rocksdb_t; + result->rep = db; + return result; +} + +rocksdb_t* rocksdb_open_for_read_only( + const rocksdb_options_t* options, + const char* name, + unsigned char error_if_log_file_exist, + char** errptr) { + DB* db; + if (SaveError(errptr, DB::OpenForReadOnly(options->rep, std::string(name), &db, error_if_log_file_exist))) { + return nullptr; + } + rocksdb_t* result = new rocksdb_t; + result->rep = db; + return result; +} + +rocksdb_backup_engine_t* rocksdb_backup_engine_open( + const rocksdb_options_t* options, const char* path, char** errptr) { + BackupEngine* be; + if (SaveError(errptr, BackupEngine::Open(options->rep.env, + BackupableDBOptions(path), &be))) { + return nullptr; + } + rocksdb_backup_engine_t* result = new rocksdb_backup_engine_t; + result->rep = be; + return result; +} + +void rocksdb_backup_engine_create_new_backup(rocksdb_backup_engine_t* be, + rocksdb_t* db, char** errptr) { + SaveError(errptr, be->rep->CreateNewBackup(db->rep)); +} + +void rocksdb_backup_engine_purge_old_backups(rocksdb_backup_engine_t* be, + uint32_t num_backups_to_keep, + char** errptr) { + SaveError(errptr, be->rep->PurgeOldBackups(num_backups_to_keep)); +} + +rocksdb_restore_options_t* rocksdb_restore_options_create() { + return new rocksdb_restore_options_t; +} + +void rocksdb_restore_options_destroy(rocksdb_restore_options_t* opt) { + delete opt; +} + +void rocksdb_restore_options_set_keep_log_files(rocksdb_restore_options_t* opt, + int v) { + opt->rep.keep_log_files = v; +} + +void rocksdb_backup_engine_restore_db_from_latest_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, char** errptr) { + SaveError(errptr, be->rep->RestoreDBFromLatestBackup(std::string(db_dir), + std::string(wal_dir), + restore_options->rep)); +} + +const rocksdb_backup_engine_info_t* rocksdb_backup_engine_get_backup_info( + rocksdb_backup_engine_t* be) { + rocksdb_backup_engine_info_t* result = new rocksdb_backup_engine_info_t; + be->rep->GetBackupInfo(&result->rep); + return result; +} + +int rocksdb_backup_engine_info_count(const rocksdb_backup_engine_info_t* info) { + return static_cast(info->rep.size()); +} + +int64_t rocksdb_backup_engine_info_timestamp( + const rocksdb_backup_engine_info_t* info, int index) { + return info->rep[index].timestamp; +} + +uint32_t rocksdb_backup_engine_info_backup_id( + const rocksdb_backup_engine_info_t* info, int index) { + return info->rep[index].backup_id; +} + +uint64_t rocksdb_backup_engine_info_size( + const rocksdb_backup_engine_info_t* info, int index) { + return info->rep[index].size; +} + +uint32_t rocksdb_backup_engine_info_number_files( + const rocksdb_backup_engine_info_t* info, int index) { + return info->rep[index].number_files; +} + +void rocksdb_backup_engine_info_destroy( + const rocksdb_backup_engine_info_t* info) { + delete info; +} + +void rocksdb_backup_engine_close(rocksdb_backup_engine_t* be) { + delete be->rep; + delete be; +} + +void rocksdb_close(rocksdb_t* db) { + delete db->rep; + delete db; +} + +void rocksdb_options_set_uint64add_merge_operator(rocksdb_options_t* opt) { + opt->rep.merge_operator = rocksdb::MergeOperators::CreateUInt64AddOperator(); +} + +rocksdb_t* rocksdb_open_column_families( + const rocksdb_options_t* db_options, + const char* name, + int num_column_families, + const char** column_family_names, + const rocksdb_options_t** column_family_options, + rocksdb_column_family_handle_t** column_family_handles, + char** errptr) { + std::vector column_families; + for (int i = 0; i < num_column_families; i++) { + column_families.push_back(ColumnFamilyDescriptor( + std::string(column_family_names[i]), + ColumnFamilyOptions(column_family_options[i]->rep))); + } + + DB* db; + std::vector handles; + if (SaveError(errptr, DB::Open(DBOptions(db_options->rep), + std::string(name), column_families, &handles, &db))) { + return nullptr; + } + + for (size_t i = 0; i < handles.size(); i++) { + rocksdb_column_family_handle_t* c_handle = new rocksdb_column_family_handle_t; + c_handle->rep = handles[i]; + column_family_handles[i] = c_handle; + } + rocksdb_t* result = new rocksdb_t; + result->rep = db; + return result; +} + +rocksdb_t* rocksdb_open_for_read_only_column_families( + const rocksdb_options_t* db_options, + const char* name, + int num_column_families, + const char** column_family_names, + const rocksdb_options_t** column_family_options, + rocksdb_column_family_handle_t** column_family_handles, + unsigned char error_if_log_file_exist, + char** errptr) { + std::vector column_families; + for (int i = 0; i < num_column_families; i++) { + column_families.push_back(ColumnFamilyDescriptor( + std::string(column_family_names[i]), + ColumnFamilyOptions(column_family_options[i]->rep))); + } + + DB* db; + std::vector handles; + if (SaveError(errptr, DB::OpenForReadOnly(DBOptions(db_options->rep), + std::string(name), column_families, &handles, &db, error_if_log_file_exist))) { + return nullptr; + } + + for (size_t i = 0; i < handles.size(); i++) { + rocksdb_column_family_handle_t* c_handle = new rocksdb_column_family_handle_t; + c_handle->rep = handles[i]; + column_family_handles[i] = c_handle; + } + rocksdb_t* result = new rocksdb_t; + result->rep = db; + return result; +} + +char** rocksdb_list_column_families( + const rocksdb_options_t* options, + const char* name, + size_t* lencfs, + char** errptr) { + std::vector fams; + SaveError(errptr, + DB::ListColumnFamilies(DBOptions(options->rep), + std::string(name), &fams)); + + *lencfs = fams.size(); + char** column_families = static_cast(malloc(sizeof(char*) * fams.size())); + for (size_t i = 0; i < fams.size(); i++) { + column_families[i] = strdup(fams[i].c_str()); + } + return column_families; +} + +void rocksdb_list_column_families_destroy(char** list, size_t len) { + for (size_t i = 0; i < len; ++i) { + free(list[i]); + } + free(list); +} + +rocksdb_column_family_handle_t* rocksdb_create_column_family( + rocksdb_t* db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, + char** errptr) { + rocksdb_column_family_handle_t* handle = new rocksdb_column_family_handle_t; + SaveError(errptr, + db->rep->CreateColumnFamily(ColumnFamilyOptions(column_family_options->rep), + std::string(column_family_name), &(handle->rep))); + return handle; +} + +void rocksdb_drop_column_family( + rocksdb_t* db, + rocksdb_column_family_handle_t* handle, + char** errptr) { + SaveError(errptr, db->rep->DropColumnFamily(handle->rep)); +} + +void rocksdb_column_family_handle_destroy(rocksdb_column_family_handle_t* handle) { + delete handle->rep; + delete handle; +} + +void rocksdb_put( + rocksdb_t* db, + const rocksdb_writeoptions_t* options, + const char* key, size_t keylen, + const char* val, size_t vallen, + char** errptr) { + SaveError(errptr, + db->rep->Put(options->rep, Slice(key, keylen), Slice(val, vallen))); +} + +void rocksdb_put_cf( + rocksdb_t* db, + const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t keylen, + const char* val, size_t vallen, + char** errptr) { + SaveError(errptr, + db->rep->Put(options->rep, column_family->rep, + Slice(key, keylen), Slice(val, vallen))); +} + +void rocksdb_delete( + rocksdb_t* db, + const rocksdb_writeoptions_t* options, + const char* key, size_t keylen, + char** errptr) { + SaveError(errptr, db->rep->Delete(options->rep, Slice(key, keylen))); +} + +void rocksdb_delete_cf( + rocksdb_t* db, + const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t keylen, + char** errptr) { + SaveError(errptr, db->rep->Delete(options->rep, column_family->rep, + Slice(key, keylen))); +} + +void rocksdb_merge( + rocksdb_t* db, + const rocksdb_writeoptions_t* options, + const char* key, size_t keylen, + const char* val, size_t vallen, + char** errptr) { + SaveError(errptr, + db->rep->Merge(options->rep, Slice(key, keylen), Slice(val, vallen))); +} + +void rocksdb_merge_cf( + rocksdb_t* db, + const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t keylen, + const char* val, size_t vallen, + char** errptr) { + SaveError(errptr, + db->rep->Merge(options->rep, column_family->rep, + Slice(key, keylen), Slice(val, vallen))); +} + +void rocksdb_write( + rocksdb_t* db, + const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, + char** errptr) { + SaveError(errptr, db->rep->Write(options->rep, &batch->rep)); +} + +char* rocksdb_get( + rocksdb_t* db, + const rocksdb_readoptions_t* options, + const char* key, size_t keylen, + size_t* vallen, + char** errptr) { + char* result = nullptr; + std::string tmp; + Status s = db->rep->Get(options->rep, Slice(key, keylen), &tmp); + if (s.ok()) { + *vallen = tmp.size(); + result = CopyString(tmp); + } else { + *vallen = 0; + if (!s.IsNotFound()) { + SaveError(errptr, s); + } + } + return result; +} + +char* rocksdb_get_cf( + rocksdb_t* db, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t keylen, + size_t* vallen, + char** errptr) { + char* result = nullptr; + std::string tmp; + Status s = db->rep->Get(options->rep, column_family->rep, + Slice(key, keylen), &tmp); + if (s.ok()) { + *vallen = tmp.size(); + result = CopyString(tmp); + } else { + *vallen = 0; + if (!s.IsNotFound()) { + SaveError(errptr, s); + } + } + return result; +} + +void rocksdb_multi_get( + rocksdb_t* db, + const rocksdb_readoptions_t* options, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, + char** errs) { + std::vector keys(num_keys); + for (size_t i = 0; i < num_keys; i++) { + keys[i] = Slice(keys_list[i], keys_list_sizes[i]); + } + std::vector values(num_keys); + std::vector statuses = db->rep->MultiGet(options->rep, keys, &values); + for (size_t i = 0; i < num_keys; i++) { + if (statuses[i].ok()) { + values_list[i] = CopyString(values[i]); + values_list_sizes[i] = values[i].size(); + errs[i] = nullptr; + } else { + values_list[i] = nullptr; + values_list_sizes[i] = 0; + if (!statuses[i].IsNotFound()) { + errs[i] = strdup(statuses[i].ToString().c_str()); + } else { + errs[i] = nullptr; + } + } + } +} + +void rocksdb_multi_get_cf( + rocksdb_t* db, + const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, + char** errs) { + std::vector keys(num_keys); + std::vector cfs(num_keys); + for (size_t i = 0; i < num_keys; i++) { + keys[i] = Slice(keys_list[i], keys_list_sizes[i]); + cfs[i] = column_families[i]->rep; + } + std::vector values(num_keys); + std::vector statuses = db->rep->MultiGet(options->rep, cfs, keys, &values); + for (size_t i = 0; i < num_keys; i++) { + if (statuses[i].ok()) { + values_list[i] = CopyString(values[i]); + values_list_sizes[i] = values[i].size(); + errs[i] = nullptr; + } else { + values_list[i] = nullptr; + values_list_sizes[i] = 0; + if (!statuses[i].IsNotFound()) { + errs[i] = strdup(statuses[i].ToString().c_str()); + } else { + errs[i] = nullptr; + } + } + } +} + +rocksdb_iterator_t* rocksdb_create_iterator( + rocksdb_t* db, + const rocksdb_readoptions_t* options) { + rocksdb_iterator_t* result = new rocksdb_iterator_t; + result->rep = db->rep->NewIterator(options->rep); + return result; +} + +rocksdb_iterator_t* rocksdb_create_iterator_cf( + rocksdb_t* db, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family) { + rocksdb_iterator_t* result = new rocksdb_iterator_t; + result->rep = db->rep->NewIterator(options->rep, column_family->rep); + return result; +} + +void rocksdb_create_iterators( + rocksdb_t *db, + rocksdb_readoptions_t* opts, + rocksdb_column_family_handle_t** column_families, + rocksdb_iterator_t** iterators, + size_t size, + char** errptr) { + std::vector column_families_vec; + for (size_t i = 0; i < size; i++) { + column_families_vec.push_back(column_families[i]->rep); + } + + std::vector res; + Status status = db->rep->NewIterators(opts->rep, column_families_vec, &res); + assert(res.size() == size); + if (SaveError(errptr, status)) { + return; + } + + for (size_t i = 0; i < size; i++) { + iterators[i] = new rocksdb_iterator_t; + iterators[i]->rep = res[i]; + } +} + +const rocksdb_snapshot_t* rocksdb_create_snapshot( + rocksdb_t* db) { + rocksdb_snapshot_t* result = new rocksdb_snapshot_t; + result->rep = db->rep->GetSnapshot(); + return result; +} + +void rocksdb_release_snapshot( + rocksdb_t* db, + const rocksdb_snapshot_t* snapshot) { + db->rep->ReleaseSnapshot(snapshot->rep); + delete snapshot; +} + +char* rocksdb_property_value( + rocksdb_t* db, + const char* propname) { + std::string tmp; + if (db->rep->GetProperty(Slice(propname), &tmp)) { + // We use strdup() since we expect human readable output. + return strdup(tmp.c_str()); + } else { + return nullptr; + } +} + +char* rocksdb_property_value_cf( + rocksdb_t* db, + rocksdb_column_family_handle_t* column_family, + const char* propname) { + std::string tmp; + if (db->rep->GetProperty(column_family->rep, Slice(propname), &tmp)) { + // We use strdup() since we expect human readable output. + return strdup(tmp.c_str()); + } else { + return nullptr; + } +} + +void rocksdb_approximate_sizes( + rocksdb_t* db, + int num_ranges, + const char* const* range_start_key, const size_t* range_start_key_len, + const char* const* range_limit_key, const size_t* range_limit_key_len, + uint64_t* sizes) { + Range* ranges = new Range[num_ranges]; + for (int i = 0; i < num_ranges; i++) { + ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]); + ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]); + } + db->rep->GetApproximateSizes(ranges, num_ranges, sizes); + delete[] ranges; +} + +void rocksdb_approximate_sizes_cf( + rocksdb_t* db, + rocksdb_column_family_handle_t* column_family, + int num_ranges, + const char* const* range_start_key, const size_t* range_start_key_len, + const char* const* range_limit_key, const size_t* range_limit_key_len, + uint64_t* sizes) { + Range* ranges = new Range[num_ranges]; + for (int i = 0; i < num_ranges; i++) { + ranges[i].start = Slice(range_start_key[i], range_start_key_len[i]); + ranges[i].limit = Slice(range_limit_key[i], range_limit_key_len[i]); + } + db->rep->GetApproximateSizes(column_family->rep, ranges, num_ranges, sizes); + delete[] ranges; +} + +void rocksdb_delete_file( + rocksdb_t* db, + const char* name) { + db->rep->DeleteFile(name); +} + +const rocksdb_livefiles_t* rocksdb_livefiles( + rocksdb_t* db) { + rocksdb_livefiles_t* result = new rocksdb_livefiles_t; + db->rep->GetLiveFilesMetaData(&result->rep); + return result; +} + +void rocksdb_compact_range( + rocksdb_t* db, + const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len) { + Slice a, b; + db->rep->CompactRange( + CompactRangeOptions(), + // Pass nullptr Slice if corresponding "const char*" is nullptr + (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr), + (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)); +} + +void rocksdb_compact_range_cf( + rocksdb_t* db, + rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len) { + Slice a, b; + db->rep->CompactRange( + CompactRangeOptions(), column_family->rep, + // Pass nullptr Slice if corresponding "const char*" is nullptr + (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr), + (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr)); +} + +void rocksdb_flush( + rocksdb_t* db, + const rocksdb_flushoptions_t* options, + char** errptr) { + SaveError(errptr, db->rep->Flush(options->rep)); +} + +void rocksdb_disable_file_deletions( + rocksdb_t* db, + char** errptr) { + SaveError(errptr, db->rep->DisableFileDeletions()); +} + +void rocksdb_enable_file_deletions( + rocksdb_t* db, + unsigned char force, + char** errptr) { + SaveError(errptr, db->rep->EnableFileDeletions(force)); +} + +void rocksdb_destroy_db( + const rocksdb_options_t* options, + const char* name, + char** errptr) { + SaveError(errptr, DestroyDB(name, options->rep)); +} + +void rocksdb_repair_db( + const rocksdb_options_t* options, + const char* name, + char** errptr) { + SaveError(errptr, RepairDB(name, options->rep)); +} + +void rocksdb_iter_destroy(rocksdb_iterator_t* iter) { + delete iter->rep; + delete iter; +} + +unsigned char rocksdb_iter_valid(const rocksdb_iterator_t* iter) { + return iter->rep->Valid(); +} + +void rocksdb_iter_seek_to_first(rocksdb_iterator_t* iter) { + iter->rep->SeekToFirst(); +} + +void rocksdb_iter_seek_to_last(rocksdb_iterator_t* iter) { + iter->rep->SeekToLast(); +} + +void rocksdb_iter_seek(rocksdb_iterator_t* iter, const char* k, size_t klen) { + iter->rep->Seek(Slice(k, klen)); +} + +void rocksdb_iter_next(rocksdb_iterator_t* iter) { + iter->rep->Next(); +} + +void rocksdb_iter_prev(rocksdb_iterator_t* iter) { + iter->rep->Prev(); +} + +const char* rocksdb_iter_key(const rocksdb_iterator_t* iter, size_t* klen) { + Slice s = iter->rep->key(); + *klen = s.size(); + return s.data(); +} + +const char* rocksdb_iter_value(const rocksdb_iterator_t* iter, size_t* vlen) { + Slice s = iter->rep->value(); + *vlen = s.size(); + return s.data(); +} + +void rocksdb_iter_get_error(const rocksdb_iterator_t* iter, char** errptr) { + SaveError(errptr, iter->rep->status()); +} + +rocksdb_writebatch_t* rocksdb_writebatch_create() { + return new rocksdb_writebatch_t; +} + +rocksdb_writebatch_t* rocksdb_writebatch_create_from(const char* rep, + size_t size) { + rocksdb_writebatch_t* b = new rocksdb_writebatch_t; + b->rep = WriteBatch(std::string(rep, size)); + return b; +} + +void rocksdb_writebatch_destroy(rocksdb_writebatch_t* b) { + delete b; +} + +void rocksdb_writebatch_clear(rocksdb_writebatch_t* b) { + b->rep.Clear(); +} + +int rocksdb_writebatch_count(rocksdb_writebatch_t* b) { + return b->rep.Count(); +} + +void rocksdb_writebatch_put( + rocksdb_writebatch_t* b, + const char* key, size_t klen, + const char* val, size_t vlen) { + b->rep.Put(Slice(key, klen), Slice(val, vlen)); +} + +void rocksdb_writebatch_put_cf( + rocksdb_writebatch_t* b, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, + const char* val, size_t vlen) { + b->rep.Put(column_family->rep, Slice(key, klen), Slice(val, vlen)); +} + +void rocksdb_writebatch_putv( + rocksdb_writebatch_t* b, + int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes) { + std::vector key_slices(num_keys); + for (int i = 0; i < num_keys; i++) { + key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); + } + std::vector value_slices(num_values); + for (int i = 0; i < num_values; i++) { + value_slices[i] = Slice(values_list[i], values_list_sizes[i]); + } + b->rep.Put(SliceParts(key_slices.data(), num_keys), + SliceParts(value_slices.data(), num_values)); +} + +void rocksdb_writebatch_putv_cf( + rocksdb_writebatch_t* b, + rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes) { + std::vector key_slices(num_keys); + for (int i = 0; i < num_keys; i++) { + key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); + } + std::vector value_slices(num_values); + for (int i = 0; i < num_values; i++) { + value_slices[i] = Slice(values_list[i], values_list_sizes[i]); + } + b->rep.Put(column_family->rep, SliceParts(key_slices.data(), num_keys), + SliceParts(value_slices.data(), num_values)); +} + +void rocksdb_writebatch_merge( + rocksdb_writebatch_t* b, + const char* key, size_t klen, + const char* val, size_t vlen) { + b->rep.Merge(Slice(key, klen), Slice(val, vlen)); +} + +void rocksdb_writebatch_merge_cf( + rocksdb_writebatch_t* b, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, + const char* val, size_t vlen) { + b->rep.Merge(column_family->rep, Slice(key, klen), Slice(val, vlen)); +} + +void rocksdb_writebatch_mergev( + rocksdb_writebatch_t* b, + int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes) { + std::vector key_slices(num_keys); + for (int i = 0; i < num_keys; i++) { + key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); + } + std::vector value_slices(num_values); + for (int i = 0; i < num_values; i++) { + value_slices[i] = Slice(values_list[i], values_list_sizes[i]); + } + b->rep.Merge(SliceParts(key_slices.data(), num_keys), + SliceParts(value_slices.data(), num_values)); +} + +void rocksdb_writebatch_mergev_cf( + rocksdb_writebatch_t* b, + rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes) { + std::vector key_slices(num_keys); + for (int i = 0; i < num_keys; i++) { + key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); + } + std::vector value_slices(num_values); + for (int i = 0; i < num_values; i++) { + value_slices[i] = Slice(values_list[i], values_list_sizes[i]); + } + b->rep.Merge(column_family->rep, SliceParts(key_slices.data(), num_keys), + SliceParts(value_slices.data(), num_values)); +} + +void rocksdb_writebatch_delete( + rocksdb_writebatch_t* b, + const char* key, size_t klen) { + b->rep.Delete(Slice(key, klen)); +} + +void rocksdb_writebatch_delete_cf( + rocksdb_writebatch_t* b, + rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen) { + b->rep.Delete(column_family->rep, Slice(key, klen)); +} + +void rocksdb_writebatch_deletev( + rocksdb_writebatch_t* b, + int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes) { + std::vector key_slices(num_keys); + for (int i = 0; i < num_keys; i++) { + key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); + } + b->rep.Delete(SliceParts(key_slices.data(), num_keys)); +} + +void rocksdb_writebatch_deletev_cf( + rocksdb_writebatch_t* b, + rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes) { + std::vector key_slices(num_keys); + for (int i = 0; i < num_keys; i++) { + key_slices[i] = Slice(keys_list[i], keys_list_sizes[i]); + } + b->rep.Delete(column_family->rep, SliceParts(key_slices.data(), num_keys)); +} + +void rocksdb_writebatch_put_log_data( + rocksdb_writebatch_t* b, + const char* blob, size_t len) { + b->rep.PutLogData(Slice(blob, len)); +} + +void rocksdb_writebatch_iterate( + rocksdb_writebatch_t* b, + void* state, + void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), + void (*deleted)(void*, const char* k, size_t klen)) { + class H : public WriteBatch::Handler { + public: + void* state_; + void (*put_)(void*, const char* k, size_t klen, const char* v, size_t vlen); + void (*deleted_)(void*, const char* k, size_t klen); + virtual void Put(const Slice& key, const Slice& value) override { + (*put_)(state_, key.data(), key.size(), value.data(), value.size()); + } + virtual void Delete(const Slice& key) override { + (*deleted_)(state_, key.data(), key.size()); + } + }; + H handler; + handler.state_ = state; + handler.put_ = put; + handler.deleted_ = deleted; + b->rep.Iterate(&handler); +} + +const char* rocksdb_writebatch_data(rocksdb_writebatch_t* b, size_t* size) { + *size = b->rep.GetDataSize(); + return b->rep.Data().c_str(); +} + +rocksdb_block_based_table_options_t* +rocksdb_block_based_options_create() { + return new rocksdb_block_based_table_options_t; +} + +void rocksdb_block_based_options_destroy( + rocksdb_block_based_table_options_t* options) { + delete options; +} + +void rocksdb_block_based_options_set_block_size( + rocksdb_block_based_table_options_t* options, size_t block_size) { + options->rep.block_size = block_size; +} + +void rocksdb_block_based_options_set_block_size_deviation( + rocksdb_block_based_table_options_t* options, int block_size_deviation) { + options->rep.block_size_deviation = block_size_deviation; +} + +void rocksdb_block_based_options_set_block_restart_interval( + rocksdb_block_based_table_options_t* options, int block_restart_interval) { + options->rep.block_restart_interval = block_restart_interval; +} + +void rocksdb_block_based_options_set_filter_policy( + rocksdb_block_based_table_options_t* options, + rocksdb_filterpolicy_t* filter_policy) { + options->rep.filter_policy.reset(filter_policy); +} + +void rocksdb_block_based_options_set_no_block_cache( + rocksdb_block_based_table_options_t* options, + unsigned char no_block_cache) { + options->rep.no_block_cache = no_block_cache; +} + +void rocksdb_block_based_options_set_block_cache( + rocksdb_block_based_table_options_t* options, + rocksdb_cache_t* block_cache) { + if (block_cache) { + options->rep.block_cache = block_cache->rep; + } +} + +void rocksdb_block_based_options_set_block_cache_compressed( + rocksdb_block_based_table_options_t* options, + rocksdb_cache_t* block_cache_compressed) { + if (block_cache_compressed) { + options->rep.block_cache_compressed = block_cache_compressed->rep; + } +} + +void rocksdb_block_based_options_set_whole_key_filtering( + rocksdb_block_based_table_options_t* options, unsigned char v) { + options->rep.whole_key_filtering = v; +} + +void rocksdb_block_based_options_set_format_version( + rocksdb_block_based_table_options_t* options, int v) { + options->rep.format_version = v; +} + +void rocksdb_block_based_options_set_index_type( + rocksdb_block_based_table_options_t* options, int v) { + options->rep.index_type = static_cast(v); +} + +void rocksdb_block_based_options_set_hash_index_allow_collision( + rocksdb_block_based_table_options_t* options, unsigned char v) { + options->rep.hash_index_allow_collision = v; +} + +void rocksdb_block_based_options_set_cache_index_and_filter_blocks( + rocksdb_block_based_table_options_t* options, unsigned char v) { + options->rep.cache_index_and_filter_blocks = v; +} + +void rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache( + rocksdb_block_based_table_options_t* options, unsigned char v) { + options->rep.pin_l0_filter_and_index_blocks_in_cache = v; +} + +void rocksdb_block_based_options_set_skip_table_builder_flush( + rocksdb_block_based_table_options_t* options, unsigned char v) { + options->rep.skip_table_builder_flush = v; +} + +void rocksdb_options_set_block_based_table_factory( + rocksdb_options_t *opt, + rocksdb_block_based_table_options_t* table_options) { + if (table_options) { + opt->rep.table_factory.reset( + rocksdb::NewBlockBasedTableFactory(table_options->rep)); + } +} + + +rocksdb_cuckoo_table_options_t* +rocksdb_cuckoo_options_create() { + return new rocksdb_cuckoo_table_options_t; +} + +void rocksdb_cuckoo_options_destroy( + rocksdb_cuckoo_table_options_t* options) { + delete options; +} + +void rocksdb_cuckoo_options_set_hash_ratio( + rocksdb_cuckoo_table_options_t* options, double v) { + options->rep.hash_table_ratio = v; +} + +void rocksdb_cuckoo_options_set_max_search_depth( + rocksdb_cuckoo_table_options_t* options, uint32_t v) { + options->rep.max_search_depth = v; +} + +void rocksdb_cuckoo_options_set_cuckoo_block_size( + rocksdb_cuckoo_table_options_t* options, uint32_t v) { + options->rep.cuckoo_block_size = v; +} + +void rocksdb_cuckoo_options_set_identity_as_first_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v) { + options->rep.identity_as_first_hash = v; +} + +void rocksdb_cuckoo_options_set_use_module_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v) { + options->rep.use_module_hash = v; +} + +void rocksdb_options_set_cuckoo_table_factory( + rocksdb_options_t *opt, + rocksdb_cuckoo_table_options_t* table_options) { + if (table_options) { + opt->rep.table_factory.reset( + rocksdb::NewCuckooTableFactory(table_options->rep)); + } +} + + +rocksdb_options_t* rocksdb_options_create() { + return new rocksdb_options_t; +} + +void rocksdb_options_destroy(rocksdb_options_t* options) { + delete options; +} + +void rocksdb_options_increase_parallelism( + rocksdb_options_t* opt, int total_threads) { + opt->rep.IncreaseParallelism(total_threads); +} + +void rocksdb_options_optimize_for_point_lookup( + rocksdb_options_t* opt, uint64_t block_cache_size_mb) { + opt->rep.OptimizeForPointLookup(block_cache_size_mb); +} + +void rocksdb_options_optimize_level_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget) { + opt->rep.OptimizeLevelStyleCompaction(memtable_memory_budget); +} + +void rocksdb_options_optimize_universal_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget) { + opt->rep.OptimizeUniversalStyleCompaction(memtable_memory_budget); +} + +void rocksdb_options_set_compaction_filter( + rocksdb_options_t* opt, + rocksdb_compactionfilter_t* filter) { + opt->rep.compaction_filter = filter; +} + +void rocksdb_options_set_compaction_filter_factory( + rocksdb_options_t* opt, rocksdb_compactionfilterfactory_t* factory) { + opt->rep.compaction_filter_factory = + std::shared_ptr(factory); +} + +void rocksdb_options_compaction_readahead_size( + rocksdb_options_t* opt, size_t s) { + opt->rep.compaction_readahead_size = s; +} + +void rocksdb_options_set_comparator( + rocksdb_options_t* opt, + rocksdb_comparator_t* cmp) { + opt->rep.comparator = cmp; +} + +void rocksdb_options_set_merge_operator( + rocksdb_options_t* opt, + rocksdb_mergeoperator_t* merge_operator) { + opt->rep.merge_operator = std::shared_ptr(merge_operator); +} + + +void rocksdb_options_set_create_if_missing( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.create_if_missing = v; +} + +void rocksdb_options_set_create_missing_column_families( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.create_missing_column_families = v; +} + +void rocksdb_options_set_error_if_exists( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.error_if_exists = v; +} + +void rocksdb_options_set_paranoid_checks( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.paranoid_checks = v; +} + +void rocksdb_options_set_env(rocksdb_options_t* opt, rocksdb_env_t* env) { + opt->rep.env = (env ? env->rep : nullptr); +} + +void rocksdb_options_set_info_log(rocksdb_options_t* opt, rocksdb_logger_t* l) { + if (l) { + opt->rep.info_log = l->rep; + } +} + +void rocksdb_options_set_info_log_level( + rocksdb_options_t* opt, int v) { + opt->rep.info_log_level = static_cast(v); +} + +void rocksdb_options_set_db_write_buffer_size(rocksdb_options_t* opt, + size_t s) { + opt->rep.db_write_buffer_size = s; +} + +void rocksdb_options_set_write_buffer_size(rocksdb_options_t* opt, size_t s) { + opt->rep.write_buffer_size = s; +} + +void rocksdb_options_set_max_open_files(rocksdb_options_t* opt, int n) { + opt->rep.max_open_files = n; +} + +void rocksdb_options_set_max_total_wal_size(rocksdb_options_t* opt, uint64_t n) { + opt->rep.max_total_wal_size = n; +} + +void rocksdb_options_set_target_file_size_base( + rocksdb_options_t* opt, uint64_t n) { + opt->rep.target_file_size_base = n; +} + +void rocksdb_options_set_target_file_size_multiplier( + rocksdb_options_t* opt, int n) { + opt->rep.target_file_size_multiplier = n; +} + +void rocksdb_options_set_max_bytes_for_level_base( + rocksdb_options_t* opt, uint64_t n) { + opt->rep.max_bytes_for_level_base = n; +} + +void rocksdb_options_set_max_bytes_for_level_multiplier( + rocksdb_options_t* opt, int n) { + opt->rep.max_bytes_for_level_multiplier = n; +} + +void rocksdb_options_set_expanded_compaction_factor( + rocksdb_options_t* opt, int n) { + opt->rep.expanded_compaction_factor = n; +} + +void rocksdb_options_set_max_grandparent_overlap_factor( + rocksdb_options_t* opt, int n) { + opt->rep.max_grandparent_overlap_factor = n; +} + +void rocksdb_options_set_max_bytes_for_level_multiplier_additional( + rocksdb_options_t* opt, int* level_values, size_t num_levels) { + opt->rep.max_bytes_for_level_multiplier_additional.resize(num_levels); + for (size_t i = 0; i < num_levels; ++i) { + opt->rep.max_bytes_for_level_multiplier_additional[i] = level_values[i]; + } +} + +void rocksdb_options_enable_statistics(rocksdb_options_t* opt) { + opt->rep.statistics = rocksdb::CreateDBStatistics(); +} + +void rocksdb_options_set_num_levels(rocksdb_options_t* opt, int n) { + opt->rep.num_levels = n; +} + +void rocksdb_options_set_level0_file_num_compaction_trigger( + rocksdb_options_t* opt, int n) { + opt->rep.level0_file_num_compaction_trigger = n; +} + +void rocksdb_options_set_level0_slowdown_writes_trigger( + rocksdb_options_t* opt, int n) { + opt->rep.level0_slowdown_writes_trigger = n; +} + +void rocksdb_options_set_level0_stop_writes_trigger( + rocksdb_options_t* opt, int n) { + opt->rep.level0_stop_writes_trigger = n; +} + +void rocksdb_options_set_max_mem_compaction_level(rocksdb_options_t* opt, + int n) {} + +void rocksdb_options_set_compression(rocksdb_options_t* opt, int t) { + opt->rep.compression = static_cast(t); +} + +void rocksdb_options_set_compression_per_level(rocksdb_options_t* opt, + int* level_values, + size_t num_levels) { + opt->rep.compression_per_level.resize(num_levels); + for (size_t i = 0; i < num_levels; ++i) { + opt->rep.compression_per_level[i] = + static_cast(level_values[i]); + } +} + +void rocksdb_options_set_compression_options(rocksdb_options_t* opt, int w_bits, + int level, int strategy, + int max_dict_bytes) { + opt->rep.compression_opts.window_bits = w_bits; + opt->rep.compression_opts.level = level; + opt->rep.compression_opts.strategy = strategy; + opt->rep.compression_opts.max_dict_bytes = max_dict_bytes; +} + +void rocksdb_options_set_prefix_extractor( + rocksdb_options_t* opt, rocksdb_slicetransform_t* prefix_extractor) { + opt->rep.prefix_extractor.reset(prefix_extractor); +} + +void rocksdb_options_set_disable_data_sync( + rocksdb_options_t* opt, int disable_data_sync) { + opt->rep.disableDataSync = disable_data_sync; +} + +void rocksdb_options_set_use_fsync( + rocksdb_options_t* opt, int use_fsync) { + opt->rep.use_fsync = use_fsync; +} + +void rocksdb_options_set_db_log_dir( + rocksdb_options_t* opt, const char* db_log_dir) { + opt->rep.db_log_dir = db_log_dir; +} + +void rocksdb_options_set_wal_dir( + rocksdb_options_t* opt, const char* v) { + opt->rep.wal_dir = v; +} + +void rocksdb_options_set_WAL_ttl_seconds(rocksdb_options_t* opt, uint64_t ttl) { + opt->rep.WAL_ttl_seconds = ttl; +} + +void rocksdb_options_set_WAL_size_limit_MB( + rocksdb_options_t* opt, uint64_t limit) { + opt->rep.WAL_size_limit_MB = limit; +} + +void rocksdb_options_set_manifest_preallocation_size( + rocksdb_options_t* opt, size_t v) { + opt->rep.manifest_preallocation_size = v; +} + +// noop +void rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t* opt, + unsigned char v) {} + +void rocksdb_options_set_allow_os_buffer(rocksdb_options_t* opt, + unsigned char v) { + opt->rep.allow_os_buffer = v; +} + +void rocksdb_options_set_allow_mmap_reads( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.allow_mmap_reads = v; +} + +void rocksdb_options_set_allow_mmap_writes( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.allow_mmap_writes = v; +} + +void rocksdb_options_set_is_fd_close_on_exec( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.is_fd_close_on_exec = v; +} + +void rocksdb_options_set_skip_log_error_on_recovery( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.skip_log_error_on_recovery = v; +} + +void rocksdb_options_set_stats_dump_period_sec( + rocksdb_options_t* opt, unsigned int v) { + opt->rep.stats_dump_period_sec = v; +} + +void rocksdb_options_set_advise_random_on_open( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.advise_random_on_open = v; +} + +void rocksdb_options_set_access_hint_on_compaction_start( + rocksdb_options_t* opt, int v) { + switch(v) { + case 0: + opt->rep.access_hint_on_compaction_start = rocksdb::Options::NONE; + break; + case 1: + opt->rep.access_hint_on_compaction_start = rocksdb::Options::NORMAL; + break; + case 2: + opt->rep.access_hint_on_compaction_start = rocksdb::Options::SEQUENTIAL; + break; + case 3: + opt->rep.access_hint_on_compaction_start = rocksdb::Options::WILLNEED; + break; + } +} + +void rocksdb_options_set_use_adaptive_mutex( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.use_adaptive_mutex = v; +} + +void rocksdb_options_set_bytes_per_sync( + rocksdb_options_t* opt, uint64_t v) { + opt->rep.bytes_per_sync = v; +} + +void rocksdb_options_set_verify_checksums_in_compaction( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.verify_checksums_in_compaction = v; +} + +void rocksdb_options_set_max_sequential_skip_in_iterations( + rocksdb_options_t* opt, uint64_t v) { + opt->rep.max_sequential_skip_in_iterations = v; +} + +void rocksdb_options_set_max_write_buffer_number(rocksdb_options_t* opt, int n) { + opt->rep.max_write_buffer_number = n; +} + +void rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t* opt, int n) { + opt->rep.min_write_buffer_number_to_merge = n; +} + +void rocksdb_options_set_max_write_buffer_number_to_maintain( + rocksdb_options_t* opt, int n) { + opt->rep.max_write_buffer_number_to_maintain = n; +} + +void rocksdb_options_set_max_background_compactions(rocksdb_options_t* opt, int n) { + opt->rep.max_background_compactions = n; +} + +void rocksdb_options_set_max_background_flushes(rocksdb_options_t* opt, int n) { + opt->rep.max_background_flushes = n; +} + +void rocksdb_options_set_max_log_file_size(rocksdb_options_t* opt, size_t v) { + opt->rep.max_log_file_size = v; +} + +void rocksdb_options_set_log_file_time_to_roll(rocksdb_options_t* opt, size_t v) { + opt->rep.log_file_time_to_roll = v; +} + +void rocksdb_options_set_keep_log_file_num(rocksdb_options_t* opt, size_t v) { + opt->rep.keep_log_file_num = v; +} + +void rocksdb_options_set_recycle_log_file_num(rocksdb_options_t* opt, + size_t v) { + opt->rep.recycle_log_file_num = v; +} + +void rocksdb_options_set_soft_rate_limit(rocksdb_options_t* opt, double v) { + opt->rep.soft_rate_limit = v; +} + +void rocksdb_options_set_hard_rate_limit(rocksdb_options_t* opt, double v) { + opt->rep.hard_rate_limit = v; +} + +void rocksdb_options_set_rate_limit_delay_max_milliseconds( + rocksdb_options_t* opt, unsigned int v) { + opt->rep.rate_limit_delay_max_milliseconds = v; +} + +void rocksdb_options_set_max_manifest_file_size( + rocksdb_options_t* opt, size_t v) { + opt->rep.max_manifest_file_size = v; +} + +void rocksdb_options_set_table_cache_numshardbits( + rocksdb_options_t* opt, int v) { + opt->rep.table_cache_numshardbits = v; +} + +void rocksdb_options_set_table_cache_remove_scan_count_limit( + rocksdb_options_t* opt, int v) { + // this option is deprecated +} + +void rocksdb_options_set_arena_block_size( + rocksdb_options_t* opt, size_t v) { + opt->rep.arena_block_size = v; +} + +void rocksdb_options_set_disable_auto_compactions(rocksdb_options_t* opt, int disable) { + opt->rep.disable_auto_compactions = disable; +} + +void rocksdb_options_set_delete_obsolete_files_period_micros( + rocksdb_options_t* opt, uint64_t v) { + opt->rep.delete_obsolete_files_period_micros = v; +} + +void rocksdb_options_set_source_compaction_factor( + rocksdb_options_t* opt, int n) { + opt->rep.expanded_compaction_factor = n; +} + +void rocksdb_options_prepare_for_bulk_load(rocksdb_options_t* opt) { + opt->rep.PrepareForBulkLoad(); +} + +void rocksdb_options_set_memtable_vector_rep(rocksdb_options_t *opt) { + static rocksdb::VectorRepFactory* factory = 0; + if (!factory) { + factory = new rocksdb::VectorRepFactory; + } + opt->rep.memtable_factory.reset(factory); +} + +void rocksdb_options_set_memtable_prefix_bloom_size_ratio( + rocksdb_options_t* opt, double v) { + opt->rep.memtable_prefix_bloom_size_ratio = v; +} + +void rocksdb_options_set_memtable_huge_page_size(rocksdb_options_t* opt, + size_t v) { + opt->rep.memtable_huge_page_size = v; +} + +void rocksdb_options_set_hash_skip_list_rep( + rocksdb_options_t *opt, size_t bucket_count, + int32_t skiplist_height, int32_t skiplist_branching_factor) { + static rocksdb::MemTableRepFactory* factory = 0; + if (!factory) { + factory = rocksdb::NewHashSkipListRepFactory( + bucket_count, skiplist_height, skiplist_branching_factor); + } + opt->rep.memtable_factory.reset(factory); +} + +void rocksdb_options_set_hash_link_list_rep( + rocksdb_options_t *opt, size_t bucket_count) { + static rocksdb::MemTableRepFactory* factory = 0; + if (!factory) { + factory = rocksdb::NewHashLinkListRepFactory(bucket_count); + } + opt->rep.memtable_factory.reset(factory); +} + +void rocksdb_options_set_plain_table_factory( + rocksdb_options_t *opt, uint32_t user_key_len, int bloom_bits_per_key, + double hash_table_ratio, size_t index_sparseness) { + static rocksdb::TableFactory* factory = 0; + if (!factory) { + rocksdb::PlainTableOptions options; + options.user_key_len = user_key_len; + options.bloom_bits_per_key = bloom_bits_per_key; + options.hash_table_ratio = hash_table_ratio; + options.index_sparseness = index_sparseness; + + factory = rocksdb::NewPlainTableFactory(options); + } + opt->rep.table_factory.reset(factory); +} + +void rocksdb_options_set_max_successive_merges( + rocksdb_options_t* opt, size_t v) { + opt->rep.max_successive_merges = v; +} + +void rocksdb_options_set_min_partial_merge_operands( + rocksdb_options_t* opt, uint32_t v) { + opt->rep.min_partial_merge_operands = v; +} + +void rocksdb_options_set_bloom_locality( + rocksdb_options_t* opt, uint32_t v) { + opt->rep.bloom_locality = v; +} + +void rocksdb_options_set_inplace_update_support( + rocksdb_options_t* opt, unsigned char v) { + opt->rep.inplace_update_support = v; +} + +void rocksdb_options_set_inplace_update_num_locks( + rocksdb_options_t* opt, size_t v) { + opt->rep.inplace_update_num_locks = v; +} + +void rocksdb_options_set_report_bg_io_stats( + rocksdb_options_t* opt, int v) { + opt->rep.report_bg_io_stats = v; +} + +void rocksdb_options_set_compaction_style(rocksdb_options_t *opt, int style) { + opt->rep.compaction_style = static_cast(style); +} + +void rocksdb_options_set_universal_compaction_options(rocksdb_options_t *opt, rocksdb_universal_compaction_options_t *uco) { + opt->rep.compaction_options_universal = *(uco->rep); +} + +void rocksdb_options_set_fifo_compaction_options( + rocksdb_options_t* opt, + rocksdb_fifo_compaction_options_t* fifo) { + opt->rep.compaction_options_fifo = fifo->rep; +} + +char *rocksdb_options_statistics_get_string(rocksdb_options_t *opt) { + rocksdb::Statistics *statistics = opt->rep.statistics.get(); + if (statistics) { + return strdup(statistics->ToString().c_str()); + } + return nullptr; +} + +/* +TODO: +DB::OpenForReadOnly +DB::KeyMayExist +DB::GetOptions +DB::GetSortedWalFiles +DB::GetLatestSequenceNumber +DB::GetUpdatesSince +DB::GetDbIdentity +DB::RunManualCompaction +custom cache +table_properties_collectors +*/ + +rocksdb_compactionfilter_t* rocksdb_compactionfilter_create( + void* state, + void (*destructor)(void*), + unsigned char (*filter)( + void*, + int level, + const char* key, size_t key_length, + const char* existing_value, size_t value_length, + char** new_value, size_t *new_value_length, + unsigned char* value_changed), + const char* (*name)(void*)) { + rocksdb_compactionfilter_t* result = new rocksdb_compactionfilter_t; + result->state_ = state; + result->destructor_ = destructor; + result->filter_ = filter; + result->name_ = name; + return result; +} + +void rocksdb_compactionfilter_destroy(rocksdb_compactionfilter_t* filter) { + delete filter; +} + +unsigned char rocksdb_compactionfiltercontext_is_full_compaction( + rocksdb_compactionfiltercontext_t* context) { + return context->rep.is_full_compaction; +} + +unsigned char rocksdb_compactionfiltercontext_is_manual_compaction( + rocksdb_compactionfiltercontext_t* context) { + return context->rep.is_manual_compaction; +} + +rocksdb_compactionfilterfactory_t* rocksdb_compactionfilterfactory_create( + void* state, void (*destructor)(void*), + rocksdb_compactionfilter_t* (*create_compaction_filter)( + void*, rocksdb_compactionfiltercontext_t* context), + const char* (*name)(void*)) { + rocksdb_compactionfilterfactory_t* result = + new rocksdb_compactionfilterfactory_t; + result->state_ = state; + result->destructor_ = destructor; + result->create_compaction_filter_ = create_compaction_filter; + result->name_ = name; + return result; +} + +void rocksdb_compactionfilterfactory_destroy( + rocksdb_compactionfilterfactory_t* factory) { + delete factory; +} + +rocksdb_comparator_t* rocksdb_comparator_create( + void* state, + void (*destructor)(void*), + int (*compare)( + void*, + const char* a, size_t alen, + const char* b, size_t blen), + const char* (*name)(void*)) { + rocksdb_comparator_t* result = new rocksdb_comparator_t; + result->state_ = state; + result->destructor_ = destructor; + result->compare_ = compare; + result->name_ = name; + return result; +} + +void rocksdb_comparator_destroy(rocksdb_comparator_t* cmp) { + delete cmp; +} + +rocksdb_filterpolicy_t* rocksdb_filterpolicy_create( + void* state, + void (*destructor)(void*), + char* (*create_filter)( + void*, + const char* const* key_array, const size_t* key_length_array, + int num_keys, + size_t* filter_length), + unsigned char (*key_may_match)( + void*, + const char* key, size_t length, + const char* filter, size_t filter_length), + void (*delete_filter)( + void*, + const char* filter, size_t filter_length), + const char* (*name)(void*)) { + rocksdb_filterpolicy_t* result = new rocksdb_filterpolicy_t; + result->state_ = state; + result->destructor_ = destructor; + result->create_ = create_filter; + result->key_match_ = key_may_match; + result->delete_filter_ = delete_filter; + result->name_ = name; + return result; +} + +void rocksdb_filterpolicy_destroy(rocksdb_filterpolicy_t* filter) { + delete filter; +} + +rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom_format(int bits_per_key, bool original_format) { + // Make a rocksdb_filterpolicy_t, but override all of its methods so + // they delegate to a NewBloomFilterPolicy() instead of user + // supplied C functions. + struct Wrapper : public rocksdb_filterpolicy_t { + const FilterPolicy* rep_; + ~Wrapper() { delete rep_; } + const char* Name() const override { return rep_->Name(); } + void CreateFilter(const Slice* keys, int n, + std::string* dst) const override { + return rep_->CreateFilter(keys, n, dst); + } + bool KeyMayMatch(const Slice& key, const Slice& filter) const override { + return rep_->KeyMayMatch(key, filter); + } + static void DoNothing(void*) { } + }; + Wrapper* wrapper = new Wrapper; + wrapper->rep_ = NewBloomFilterPolicy(bits_per_key, original_format); + wrapper->state_ = nullptr; + wrapper->delete_filter_ = nullptr; + wrapper->destructor_ = &Wrapper::DoNothing; + return wrapper; +} + +rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom_full(int bits_per_key) { + return rocksdb_filterpolicy_create_bloom_format(bits_per_key, false); +} + +rocksdb_filterpolicy_t* rocksdb_filterpolicy_create_bloom(int bits_per_key) { + return rocksdb_filterpolicy_create_bloom_format(bits_per_key, true); +} + +rocksdb_mergeoperator_t* rocksdb_mergeoperator_create( + void* state, void (*destructor)(void*), + char* (*full_merge)(void*, const char* key, size_t key_length, + const char* existing_value, + size_t existing_value_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + char* (*partial_merge)(void*, const char* key, size_t key_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + void (*delete_value)(void*, const char* value, size_t value_length), + const char* (*name)(void*)) { + rocksdb_mergeoperator_t* result = new rocksdb_mergeoperator_t; + result->state_ = state; + result->destructor_ = destructor; + result->full_merge_ = full_merge; + result->partial_merge_ = partial_merge; + result->delete_value_ = delete_value; + result->name_ = name; + return result; +} + +void rocksdb_mergeoperator_destroy(rocksdb_mergeoperator_t* merge_operator) { + delete merge_operator; +} + +rocksdb_readoptions_t* rocksdb_readoptions_create() { + return new rocksdb_readoptions_t; +} + +void rocksdb_readoptions_destroy(rocksdb_readoptions_t* opt) { + delete opt; +} + +void rocksdb_readoptions_set_verify_checksums( + rocksdb_readoptions_t* opt, + unsigned char v) { + opt->rep.verify_checksums = v; +} + +void rocksdb_readoptions_set_fill_cache( + rocksdb_readoptions_t* opt, unsigned char v) { + opt->rep.fill_cache = v; +} + +void rocksdb_readoptions_set_snapshot( + rocksdb_readoptions_t* opt, + const rocksdb_snapshot_t* snap) { + opt->rep.snapshot = (snap ? snap->rep : nullptr); +} + +void rocksdb_readoptions_set_iterate_upper_bound( + rocksdb_readoptions_t* opt, + const char* key, size_t keylen) { + if (key == nullptr) { + opt->upper_bound = Slice(); + opt->rep.iterate_upper_bound = nullptr; + + } else { + opt->upper_bound = Slice(key, keylen); + opt->rep.iterate_upper_bound = &opt->upper_bound; + } +} + +void rocksdb_readoptions_set_read_tier( + rocksdb_readoptions_t* opt, int v) { + opt->rep.read_tier = static_cast(v); +} + +void rocksdb_readoptions_set_tailing( + rocksdb_readoptions_t* opt, unsigned char v) { + opt->rep.tailing = v; +} + +void rocksdb_readoptions_set_readahead_size( + rocksdb_readoptions_t* opt, size_t v) { + opt->rep.readahead_size = v; +} + +rocksdb_writeoptions_t* rocksdb_writeoptions_create() { + return new rocksdb_writeoptions_t; +} + +void rocksdb_writeoptions_destroy(rocksdb_writeoptions_t* opt) { + delete opt; +} + +void rocksdb_writeoptions_set_sync( + rocksdb_writeoptions_t* opt, unsigned char v) { + opt->rep.sync = v; +} + +void rocksdb_writeoptions_disable_WAL(rocksdb_writeoptions_t* opt, int disable) { + opt->rep.disableWAL = disable; +} + + +rocksdb_flushoptions_t* rocksdb_flushoptions_create() { + return new rocksdb_flushoptions_t; +} + +void rocksdb_flushoptions_destroy(rocksdb_flushoptions_t* opt) { + delete opt; +} + +void rocksdb_flushoptions_set_wait( + rocksdb_flushoptions_t* opt, unsigned char v) { + opt->rep.wait = v; +} + +rocksdb_cache_t* rocksdb_cache_create_lru(size_t capacity) { + rocksdb_cache_t* c = new rocksdb_cache_t; + c->rep = NewLRUCache(capacity); + return c; +} + +void rocksdb_cache_destroy(rocksdb_cache_t* cache) { + delete cache; +} + +void rocksdb_cache_set_capacity(rocksdb_cache_t* cache, size_t capacity) { + cache->rep->SetCapacity(capacity); +} + +rocksdb_env_t* rocksdb_create_default_env() { + rocksdb_env_t* result = new rocksdb_env_t; + result->rep = Env::Default(); + result->is_default = true; + return result; +} + +rocksdb_env_t* rocksdb_create_mem_env() { + rocksdb_env_t* result = new rocksdb_env_t; + result->rep = rocksdb::NewMemEnv(Env::Default()); + result->is_default = false; + return result; +} + +void rocksdb_env_set_background_threads(rocksdb_env_t* env, int n) { + env->rep->SetBackgroundThreads(n); +} + +void rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n) { + env->rep->SetBackgroundThreads(n, Env::HIGH); +} + +void rocksdb_env_join_all_threads(rocksdb_env_t* env) { + env->rep->WaitForJoin(); +} + +void rocksdb_env_destroy(rocksdb_env_t* env) { + if (!env->is_default) delete env->rep; + delete env; +} + +rocksdb_slicetransform_t* rocksdb_slicetransform_create( + void* state, + void (*destructor)(void*), + char* (*transform)( + void*, + const char* key, size_t length, + size_t* dst_length), + unsigned char (*in_domain)( + void*, + const char* key, size_t length), + unsigned char (*in_range)( + void*, + const char* key, size_t length), + const char* (*name)(void*)) { + rocksdb_slicetransform_t* result = new rocksdb_slicetransform_t; + result->state_ = state; + result->destructor_ = destructor; + result->transform_ = transform; + result->in_domain_ = in_domain; + result->in_range_ = in_range; + result->name_ = name; + return result; +} + +void rocksdb_slicetransform_destroy(rocksdb_slicetransform_t* st) { + delete st; +} + +rocksdb_slicetransform_t* rocksdb_slicetransform_create_fixed_prefix(size_t prefixLen) { + struct Wrapper : public rocksdb_slicetransform_t { + const SliceTransform* rep_; + ~Wrapper() { delete rep_; } + const char* Name() const override { return rep_->Name(); } + Slice Transform(const Slice& src) const override { + return rep_->Transform(src); + } + bool InDomain(const Slice& src) const override { + return rep_->InDomain(src); + } + bool InRange(const Slice& src) const override { return rep_->InRange(src); } + static void DoNothing(void*) { } + }; + Wrapper* wrapper = new Wrapper; + wrapper->rep_ = rocksdb::NewFixedPrefixTransform(prefixLen); + wrapper->state_ = nullptr; + wrapper->destructor_ = &Wrapper::DoNothing; + return wrapper; +} + +rocksdb_slicetransform_t* rocksdb_slicetransform_create_noop() { + struct Wrapper : public rocksdb_slicetransform_t { + const SliceTransform* rep_; + ~Wrapper() { delete rep_; } + const char* Name() const override { return rep_->Name(); } + Slice Transform(const Slice& src) const override { + return rep_->Transform(src); + } + bool InDomain(const Slice& src) const override { + return rep_->InDomain(src); + } + bool InRange(const Slice& src) const override { return rep_->InRange(src); } + static void DoNothing(void*) { } + }; + Wrapper* wrapper = new Wrapper; + wrapper->rep_ = rocksdb::NewNoopTransform(); + wrapper->state_ = nullptr; + wrapper->destructor_ = &Wrapper::DoNothing; + return wrapper; +} + +rocksdb_universal_compaction_options_t* rocksdb_universal_compaction_options_create() { + rocksdb_universal_compaction_options_t* result = new rocksdb_universal_compaction_options_t; + result->rep = new rocksdb::CompactionOptionsUniversal; + return result; +} + +void rocksdb_universal_compaction_options_set_size_ratio( + rocksdb_universal_compaction_options_t* uco, int ratio) { + uco->rep->size_ratio = ratio; +} + +void rocksdb_universal_compaction_options_set_min_merge_width( + rocksdb_universal_compaction_options_t* uco, int w) { + uco->rep->min_merge_width = w; +} + +void rocksdb_universal_compaction_options_set_max_merge_width( + rocksdb_universal_compaction_options_t* uco, int w) { + uco->rep->max_merge_width = w; +} + +void rocksdb_universal_compaction_options_set_max_size_amplification_percent( + rocksdb_universal_compaction_options_t* uco, int p) { + uco->rep->max_size_amplification_percent = p; +} + +void rocksdb_universal_compaction_options_set_compression_size_percent( + rocksdb_universal_compaction_options_t* uco, int p) { + uco->rep->compression_size_percent = p; +} + +void rocksdb_universal_compaction_options_set_stop_style( + rocksdb_universal_compaction_options_t* uco, int style) { + uco->rep->stop_style = static_cast(style); +} + +void rocksdb_universal_compaction_options_destroy( + rocksdb_universal_compaction_options_t* uco) { + delete uco->rep; + delete uco; +} + +rocksdb_fifo_compaction_options_t* rocksdb_fifo_compaction_options_create() { + rocksdb_fifo_compaction_options_t* result = new rocksdb_fifo_compaction_options_t; + result->rep = CompactionOptionsFIFO(); + return result; +} + +void rocksdb_fifo_compaction_options_set_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size) { + fifo_opts->rep.max_table_files_size = size; +} + +void rocksdb_fifo_compaction_options_destroy( + rocksdb_fifo_compaction_options_t* fifo_opts) { + delete fifo_opts; +} + +void rocksdb_options_set_min_level_to_compress(rocksdb_options_t* opt, int level) { + if (level >= 0) { + assert(level <= opt->rep.num_levels); + opt->rep.compression_per_level.resize(opt->rep.num_levels); + for (int i = 0; i < level; i++) { + opt->rep.compression_per_level[i] = rocksdb::kNoCompression; + } + for (int i = level; i < opt->rep.num_levels; i++) { + opt->rep.compression_per_level[i] = opt->rep.compression; + } + } +} + +int rocksdb_livefiles_count( + const rocksdb_livefiles_t* lf) { + return static_cast(lf->rep.size()); +} + +const char* rocksdb_livefiles_name( + const rocksdb_livefiles_t* lf, + int index) { + return lf->rep[index].name.c_str(); +} + +int rocksdb_livefiles_level( + const rocksdb_livefiles_t* lf, + int index) { + return lf->rep[index].level; +} + +size_t rocksdb_livefiles_size( + const rocksdb_livefiles_t* lf, + int index) { + return lf->rep[index].size; +} + +const char* rocksdb_livefiles_smallestkey( + const rocksdb_livefiles_t* lf, + int index, + size_t* size) { + *size = lf->rep[index].smallestkey.size(); + return lf->rep[index].smallestkey.data(); +} + +const char* rocksdb_livefiles_largestkey( + const rocksdb_livefiles_t* lf, + int index, + size_t* size) { + *size = lf->rep[index].largestkey.size(); + return lf->rep[index].largestkey.data(); +} + +extern void rocksdb_livefiles_destroy( + const rocksdb_livefiles_t* lf) { + delete lf; +} + +void rocksdb_get_options_from_string(const rocksdb_options_t* base_options, + const char* opts_str, + rocksdb_options_t* new_options, + char** errptr) { + SaveError(errptr, + GetOptionsFromString(base_options->rep, std::string(opts_str), + &new_options->rep)); +} + +void rocksdb_delete_file_in_range(rocksdb_t* db, const char* start_key, + size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr) { + Slice a, b; + SaveError( + errptr, + DeleteFilesInRange( + db->rep, db->rep->DefaultColumnFamily(), + (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr), + (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr))); +} + +void rocksdb_delete_file_in_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr) { + Slice a, b; + SaveError( + errptr, + DeleteFilesInRange( + db->rep, column_family->rep, + (start_key ? (a = Slice(start_key, start_key_len), &a) : nullptr), + (limit_key ? (b = Slice(limit_key, limit_key_len), &b) : nullptr))); +} + +void rocksdb_free(void* ptr) { free(ptr); } + +} // end extern "C" + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/c_test.c b/external/rocksdb/db/c_test.c new file mode 100755 index 0000000000..7236e01db0 --- /dev/null +++ b/external/rocksdb/db/c_test.c @@ -0,0 +1,968 @@ +/* Copyright (c) 2011 The LevelDB Authors. All rights reserved. + Use of this source code is governed by a BSD-style license that can be + found in the LICENSE file. See the AUTHORS file for names of contributors. */ + +#ifndef ROCKSDB_LITE // Lite does not support C API + +#include "rocksdb/c.h" + +#include +#include +#include +#include +#include +#ifndef OS_WIN +# include +#endif +#include + +// Can not use port/port.h macros as this is a c file +#ifdef OS_WIN + +#include + +# define snprintf _snprintf + +// Ok for uniqueness +int geteuid() { + + int result = 0; + + result = ((int)GetCurrentProcessId() << 16); + result |= (int)GetCurrentThreadId(); + + return result; +} + +#endif + +const char* phase = ""; +static char dbname[200]; +static char dbbackupname[200]; + +static void StartPhase(const char* name) { + fprintf(stderr, "=== Test %s\n", name); + phase = name; +} + +static const char* GetTempDir(void) { + const char* ret = getenv("TEST_TMPDIR"); + if (ret == NULL || ret[0] == '\0') + ret = "/tmp"; + return ret; +} + +#define CheckNoError(err) \ + if ((err) != NULL) { \ + fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, (err)); \ + abort(); \ + } + +#define CheckCondition(cond) \ + if (!(cond)) { \ + fprintf(stderr, "%s:%d: %s: %s\n", __FILE__, __LINE__, phase, #cond); \ + abort(); \ + } + +static void CheckEqual(const char* expected, const char* v, size_t n) { + if (expected == NULL && v == NULL) { + // ok + } else if (expected != NULL && v != NULL && n == strlen(expected) && + memcmp(expected, v, n) == 0) { + // ok + return; + } else { + fprintf(stderr, "%s: expected '%s', got '%s'\n", + phase, + (expected ? expected : "(null)"), + (v ? v : "(null")); + abort(); + } +} + +static void Free(char** ptr) { + if (*ptr) { + free(*ptr); + *ptr = NULL; + } +} + +static void CheckGet( + rocksdb_t* db, + const rocksdb_readoptions_t* options, + const char* key, + const char* expected) { + char* err = NULL; + size_t val_len; + char* val; + val = rocksdb_get(db, options, key, strlen(key), &val_len, &err); + CheckNoError(err); + CheckEqual(expected, val, val_len); + Free(&val); +} + +static void CheckGetCF( + rocksdb_t* db, + const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* handle, + const char* key, + const char* expected) { + char* err = NULL; + size_t val_len; + char* val; + val = rocksdb_get_cf(db, options, handle, key, strlen(key), &val_len, &err); + CheckNoError(err); + CheckEqual(expected, val, val_len); + Free(&val); +} + + +static void CheckIter(rocksdb_iterator_t* iter, + const char* key, const char* val) { + size_t len; + const char* str; + str = rocksdb_iter_key(iter, &len); + CheckEqual(key, str, len); + str = rocksdb_iter_value(iter, &len); + CheckEqual(val, str, len); +} + +// Callback from rocksdb_writebatch_iterate() +static void CheckPut(void* ptr, + const char* k, size_t klen, + const char* v, size_t vlen) { + int* state = (int*) ptr; + CheckCondition(*state < 2); + switch (*state) { + case 0: + CheckEqual("bar", k, klen); + CheckEqual("b", v, vlen); + break; + case 1: + CheckEqual("box", k, klen); + CheckEqual("c", v, vlen); + break; + } + (*state)++; +} + +// Callback from rocksdb_writebatch_iterate() +static void CheckDel(void* ptr, const char* k, size_t klen) { + int* state = (int*) ptr; + CheckCondition(*state == 2); + CheckEqual("bar", k, klen); + (*state)++; +} + +static void CmpDestroy(void* arg) { } + +static int CmpCompare(void* arg, const char* a, size_t alen, + const char* b, size_t blen) { + size_t n = (alen < blen) ? alen : blen; + int r = memcmp(a, b, n); + if (r == 0) { + if (alen < blen) r = -1; + else if (alen > blen) r = +1; + } + return r; +} + +static const char* CmpName(void* arg) { + return "foo"; +} + +// Custom filter policy +static unsigned char fake_filter_result = 1; +static void FilterDestroy(void* arg) { } +static const char* FilterName(void* arg) { + return "TestFilter"; +} +static char* FilterCreate( + void* arg, + const char* const* key_array, const size_t* key_length_array, + int num_keys, + size_t* filter_length) { + *filter_length = 4; + char* result = malloc(4); + memcpy(result, "fake", 4); + return result; +} +static unsigned char FilterKeyMatch( + void* arg, + const char* key, size_t length, + const char* filter, size_t filter_length) { + CheckCondition(filter_length == 4); + CheckCondition(memcmp(filter, "fake", 4) == 0); + return fake_filter_result; +} + +// Custom compaction filter +static void CFilterDestroy(void* arg) {} +static const char* CFilterName(void* arg) { return "foo"; } +static unsigned char CFilterFilter(void* arg, int level, const char* key, + size_t key_length, + const char* existing_value, + size_t value_length, char** new_value, + size_t* new_value_length, + unsigned char* value_changed) { + if (key_length == 3) { + if (memcmp(key, "bar", key_length) == 0) { + return 1; + } else if (memcmp(key, "baz", key_length) == 0) { + *value_changed = 1; + *new_value = "newbazvalue"; + *new_value_length = 11; + return 0; + } + } + return 0; +} + +static void CFilterFactoryDestroy(void* arg) {} +static const char* CFilterFactoryName(void* arg) { return "foo"; } +static rocksdb_compactionfilter_t* CFilterCreate( + void* arg, rocksdb_compactionfiltercontext_t* context) { + return rocksdb_compactionfilter_create(NULL, CFilterDestroy, CFilterFilter, + CFilterName); +} + +static rocksdb_t* CheckCompaction(rocksdb_t* db, rocksdb_options_t* options, + rocksdb_readoptions_t* roptions, + rocksdb_writeoptions_t* woptions) { + char* err = NULL; + db = rocksdb_open(options, dbname, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "foo", 3, "foovalue", 8, &err); + CheckNoError(err); + CheckGet(db, roptions, "foo", "foovalue"); + rocksdb_put(db, woptions, "bar", 3, "barvalue", 8, &err); + CheckNoError(err); + CheckGet(db, roptions, "bar", "barvalue"); + rocksdb_put(db, woptions, "baz", 3, "bazvalue", 8, &err); + CheckNoError(err); + CheckGet(db, roptions, "baz", "bazvalue"); + + // Force compaction + rocksdb_compact_range(db, NULL, 0, NULL, 0); + // should have filtered bar, but not foo + CheckGet(db, roptions, "foo", "foovalue"); + CheckGet(db, roptions, "bar", NULL); + CheckGet(db, roptions, "baz", "newbazvalue"); + return db; +} + +// Custom merge operator +static void MergeOperatorDestroy(void* arg) { } +static const char* MergeOperatorName(void* arg) { + return "TestMergeOperator"; +} +static char* MergeOperatorFullMerge( + void* arg, + const char* key, size_t key_length, + const char* existing_value, size_t existing_value_length, + const char* const* operands_list, const size_t* operands_list_length, + int num_operands, + unsigned char* success, size_t* new_value_length) { + *new_value_length = 4; + *success = 1; + char* result = malloc(4); + memcpy(result, "fake", 4); + return result; +} +static char* MergeOperatorPartialMerge( + void* arg, + const char* key, size_t key_length, + const char* const* operands_list, const size_t* operands_list_length, + int num_operands, + unsigned char* success, size_t* new_value_length) { + *new_value_length = 4; + *success = 1; + char* result = malloc(4); + memcpy(result, "fake", 4); + return result; +} + +int main(int argc, char** argv) { + rocksdb_t* db; + rocksdb_comparator_t* cmp; + rocksdb_cache_t* cache; + rocksdb_env_t* env; + rocksdb_options_t* options; + rocksdb_block_based_table_options_t* table_options; + rocksdb_readoptions_t* roptions; + rocksdb_writeoptions_t* woptions; + char* err = NULL; + int run = -1; + + snprintf(dbname, sizeof(dbname), + "%s/rocksdb_c_test-%d", + GetTempDir(), + ((int) geteuid())); + + snprintf(dbbackupname, sizeof(dbbackupname), + "%s/rocksdb_c_test-%d-backup", + GetTempDir(), + ((int) geteuid())); + + StartPhase("create_objects"); + cmp = rocksdb_comparator_create(NULL, CmpDestroy, CmpCompare, CmpName); + env = rocksdb_create_default_env(); + cache = rocksdb_cache_create_lru(100000); + + options = rocksdb_options_create(); + rocksdb_options_set_comparator(options, cmp); + rocksdb_options_set_error_if_exists(options, 1); + rocksdb_options_set_env(options, env); + rocksdb_options_set_info_log(options, NULL); + rocksdb_options_set_write_buffer_size(options, 100000); + rocksdb_options_set_paranoid_checks(options, 1); + rocksdb_options_set_max_open_files(options, 10); + table_options = rocksdb_block_based_options_create(); + rocksdb_block_based_options_set_block_cache(table_options, cache); + rocksdb_options_set_block_based_table_factory(options, table_options); + + rocksdb_options_set_compression(options, rocksdb_no_compression); + rocksdb_options_set_compression_options(options, -14, -1, 0, 0); + int compression_levels[] = {rocksdb_no_compression, rocksdb_no_compression, + rocksdb_no_compression, rocksdb_no_compression}; + rocksdb_options_set_compression_per_level(options, compression_levels, 4); + + roptions = rocksdb_readoptions_create(); + rocksdb_readoptions_set_verify_checksums(roptions, 1); + rocksdb_readoptions_set_fill_cache(roptions, 0); + + woptions = rocksdb_writeoptions_create(); + rocksdb_writeoptions_set_sync(woptions, 1); + + StartPhase("destroy"); + rocksdb_destroy_db(options, dbname, &err); + Free(&err); + + StartPhase("open_error"); + rocksdb_open(options, dbname, &err); + CheckCondition(err != NULL); + Free(&err); + + StartPhase("open"); + rocksdb_options_set_create_if_missing(options, 1); + db = rocksdb_open(options, dbname, &err); + CheckNoError(err); + CheckGet(db, roptions, "foo", NULL); + + StartPhase("put"); + rocksdb_put(db, woptions, "foo", 3, "hello", 5, &err); + CheckNoError(err); + CheckGet(db, roptions, "foo", "hello"); + + StartPhase("backup_and_restore"); + { + rocksdb_destroy_db(options, dbbackupname, &err); + CheckNoError(err); + + rocksdb_backup_engine_t *be = rocksdb_backup_engine_open(options, dbbackupname, &err); + CheckNoError(err); + + rocksdb_backup_engine_create_new_backup(be, db, &err); + CheckNoError(err); + + // need a change to trigger a new backup + rocksdb_delete(db, woptions, "does-not-exist", 14, &err); + CheckNoError(err); + + rocksdb_backup_engine_create_new_backup(be, db, &err); + CheckNoError(err); + + const rocksdb_backup_engine_info_t* bei = rocksdb_backup_engine_get_backup_info(be); + CheckCondition(rocksdb_backup_engine_info_count(bei) > 1); + rocksdb_backup_engine_info_destroy(bei); + + rocksdb_backup_engine_purge_old_backups(be, 1, &err); + CheckNoError(err); + + bei = rocksdb_backup_engine_get_backup_info(be); + CheckCondition(rocksdb_backup_engine_info_count(bei) == 1); + rocksdb_backup_engine_info_destroy(bei); + + rocksdb_delete(db, woptions, "foo", 3, &err); + CheckNoError(err); + + rocksdb_close(db); + + rocksdb_destroy_db(options, dbname, &err); + CheckNoError(err); + + rocksdb_restore_options_t *restore_options = rocksdb_restore_options_create(); + rocksdb_restore_options_set_keep_log_files(restore_options, 0); + rocksdb_backup_engine_restore_db_from_latest_backup(be, dbname, dbname, restore_options, &err); + CheckNoError(err); + rocksdb_restore_options_destroy(restore_options); + + rocksdb_options_set_error_if_exists(options, 0); + db = rocksdb_open(options, dbname, &err); + CheckNoError(err); + rocksdb_options_set_error_if_exists(options, 1); + + CheckGet(db, roptions, "foo", "hello"); + + rocksdb_backup_engine_close(be); + } + + StartPhase("compactall"); + rocksdb_compact_range(db, NULL, 0, NULL, 0); + CheckGet(db, roptions, "foo", "hello"); + + StartPhase("compactrange"); + rocksdb_compact_range(db, "a", 1, "z", 1); + CheckGet(db, roptions, "foo", "hello"); + + StartPhase("writebatch"); + { + rocksdb_writebatch_t* wb = rocksdb_writebatch_create(); + rocksdb_writebatch_put(wb, "foo", 3, "a", 1); + rocksdb_writebatch_clear(wb); + rocksdb_writebatch_put(wb, "bar", 3, "b", 1); + rocksdb_writebatch_put(wb, "box", 3, "c", 1); + rocksdb_writebatch_delete(wb, "bar", 3); + rocksdb_write(db, woptions, wb, &err); + CheckNoError(err); + CheckGet(db, roptions, "foo", "hello"); + CheckGet(db, roptions, "bar", NULL); + CheckGet(db, roptions, "box", "c"); + int pos = 0; + rocksdb_writebatch_iterate(wb, &pos, CheckPut, CheckDel); + CheckCondition(pos == 3); + rocksdb_writebatch_destroy(wb); + } + + StartPhase("writebatch_vectors"); + { + rocksdb_writebatch_t* wb = rocksdb_writebatch_create(); + const char* k_list[2] = { "z", "ap" }; + const size_t k_sizes[2] = { 1, 2 }; + const char* v_list[3] = { "x", "y", "z" }; + const size_t v_sizes[3] = { 1, 1, 1 }; + rocksdb_writebatch_putv(wb, 2, k_list, k_sizes, 3, v_list, v_sizes); + rocksdb_write(db, woptions, wb, &err); + CheckNoError(err); + CheckGet(db, roptions, "zap", "xyz"); + rocksdb_writebatch_delete(wb, "zap", 3); + rocksdb_write(db, woptions, wb, &err); + CheckNoError(err); + CheckGet(db, roptions, "zap", NULL); + rocksdb_writebatch_destroy(wb); + } + + StartPhase("writebatch_rep"); + { + rocksdb_writebatch_t* wb1 = rocksdb_writebatch_create(); + rocksdb_writebatch_put(wb1, "baz", 3, "d", 1); + rocksdb_writebatch_put(wb1, "quux", 4, "e", 1); + rocksdb_writebatch_delete(wb1, "quux", 4); + size_t repsize1 = 0; + const char* rep = rocksdb_writebatch_data(wb1, &repsize1); + rocksdb_writebatch_t* wb2 = rocksdb_writebatch_create_from(rep, repsize1); + CheckCondition(rocksdb_writebatch_count(wb1) == + rocksdb_writebatch_count(wb2)); + size_t repsize2 = 0; + CheckCondition( + memcmp(rep, rocksdb_writebatch_data(wb2, &repsize2), repsize1) == 0); + rocksdb_writebatch_destroy(wb1); + rocksdb_writebatch_destroy(wb2); + } + + StartPhase("iter"); + { + rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions); + CheckCondition(!rocksdb_iter_valid(iter)); + rocksdb_iter_seek_to_first(iter); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "box", "c"); + rocksdb_iter_next(iter); + CheckIter(iter, "foo", "hello"); + rocksdb_iter_prev(iter); + CheckIter(iter, "box", "c"); + rocksdb_iter_prev(iter); + CheckCondition(!rocksdb_iter_valid(iter)); + rocksdb_iter_seek_to_last(iter); + CheckIter(iter, "foo", "hello"); + rocksdb_iter_seek(iter, "b", 1); + CheckIter(iter, "box", "c"); + rocksdb_iter_get_error(iter, &err); + CheckNoError(err); + rocksdb_iter_destroy(iter); + } + + StartPhase("multiget"); + { + const char* keys[3] = { "box", "foo", "notfound" }; + const size_t keys_sizes[3] = { 3, 3, 8 }; + char* vals[3]; + size_t vals_sizes[3]; + char* errs[3]; + rocksdb_multi_get(db, roptions, 3, keys, keys_sizes, vals, vals_sizes, errs); + + int i; + for (i = 0; i < 3; i++) { + CheckEqual(NULL, errs[i], 0); + switch (i) { + case 0: + CheckEqual("c", vals[i], vals_sizes[i]); + break; + case 1: + CheckEqual("hello", vals[i], vals_sizes[i]); + break; + case 2: + CheckEqual(NULL, vals[i], vals_sizes[i]); + break; + } + Free(&vals[i]); + } + } + + StartPhase("approximate_sizes"); + { + int i; + int n = 20000; + char keybuf[100]; + char valbuf[100]; + uint64_t sizes[2]; + const char* start[2] = { "a", "k00000000000000010000" }; + size_t start_len[2] = { 1, 21 }; + const char* limit[2] = { "k00000000000000010000", "z" }; + size_t limit_len[2] = { 21, 1 }; + rocksdb_writeoptions_set_sync(woptions, 0); + for (i = 0; i < n; i++) { + snprintf(keybuf, sizeof(keybuf), "k%020d", i); + snprintf(valbuf, sizeof(valbuf), "v%020d", i); + rocksdb_put(db, woptions, keybuf, strlen(keybuf), valbuf, strlen(valbuf), + &err); + CheckNoError(err); + } + rocksdb_approximate_sizes(db, 2, start, start_len, limit, limit_len, sizes); + CheckCondition(sizes[0] > 0); + CheckCondition(sizes[1] > 0); + } + + StartPhase("property"); + { + char* prop = rocksdb_property_value(db, "nosuchprop"); + CheckCondition(prop == NULL); + prop = rocksdb_property_value(db, "rocksdb.stats"); + CheckCondition(prop != NULL); + Free(&prop); + } + + StartPhase("snapshot"); + { + const rocksdb_snapshot_t* snap; + snap = rocksdb_create_snapshot(db); + rocksdb_delete(db, woptions, "foo", 3, &err); + CheckNoError(err); + rocksdb_readoptions_set_snapshot(roptions, snap); + CheckGet(db, roptions, "foo", "hello"); + rocksdb_readoptions_set_snapshot(roptions, NULL); + CheckGet(db, roptions, "foo", NULL); + rocksdb_release_snapshot(db, snap); + } + + StartPhase("repair"); + { + // If we do not compact here, then the lazy deletion of + // files (https://reviews.facebook.net/D6123) would leave + // around deleted files and the repair process will find + // those files and put them back into the database. + rocksdb_compact_range(db, NULL, 0, NULL, 0); + rocksdb_close(db); + rocksdb_options_set_create_if_missing(options, 0); + rocksdb_options_set_error_if_exists(options, 0); + rocksdb_repair_db(options, dbname, &err); + CheckNoError(err); + db = rocksdb_open(options, dbname, &err); + CheckNoError(err); + CheckGet(db, roptions, "foo", NULL); + CheckGet(db, roptions, "bar", NULL); + CheckGet(db, roptions, "box", "c"); + rocksdb_options_set_create_if_missing(options, 1); + rocksdb_options_set_error_if_exists(options, 1); + } + + StartPhase("filter"); + for (run = 0; run < 2; run++) { + // First run uses custom filter, second run uses bloom filter + CheckNoError(err); + rocksdb_filterpolicy_t* policy; + if (run == 0) { + policy = rocksdb_filterpolicy_create( + NULL, FilterDestroy, FilterCreate, FilterKeyMatch, NULL, FilterName); + } else { + policy = rocksdb_filterpolicy_create_bloom(10); + } + + rocksdb_block_based_options_set_filter_policy(table_options, policy); + + // Create new database + rocksdb_close(db); + rocksdb_destroy_db(options, dbname, &err); + rocksdb_options_set_block_based_table_factory(options, table_options); + db = rocksdb_open(options, dbname, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "foo", 3, "foovalue", 8, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "bar", 3, "barvalue", 8, &err); + CheckNoError(err); + rocksdb_compact_range(db, NULL, 0, NULL, 0); + + fake_filter_result = 1; + CheckGet(db, roptions, "foo", "foovalue"); + CheckGet(db, roptions, "bar", "barvalue"); + if (phase == 0) { + // Must not find value when custom filter returns false + fake_filter_result = 0; + CheckGet(db, roptions, "foo", NULL); + CheckGet(db, roptions, "bar", NULL); + fake_filter_result = 1; + + CheckGet(db, roptions, "foo", "foovalue"); + CheckGet(db, roptions, "bar", "barvalue"); + } + // Reset the policy + rocksdb_block_based_options_set_filter_policy(table_options, NULL); + rocksdb_options_set_block_based_table_factory(options, table_options); + } + + StartPhase("compaction_filter"); + { + rocksdb_options_t* options_with_filter = rocksdb_options_create(); + rocksdb_options_set_create_if_missing(options_with_filter, 1); + rocksdb_compactionfilter_t* cfilter; + cfilter = rocksdb_compactionfilter_create(NULL, CFilterDestroy, + CFilterFilter, CFilterName); + // Create new database + rocksdb_close(db); + rocksdb_destroy_db(options_with_filter, dbname, &err); + rocksdb_options_set_compaction_filter(options_with_filter, cfilter); + db = CheckCompaction(db, options_with_filter, roptions, woptions); + + rocksdb_options_set_compaction_filter(options_with_filter, NULL); + rocksdb_compactionfilter_destroy(cfilter); + rocksdb_options_destroy(options_with_filter); + } + + StartPhase("compaction_filter_factory"); + { + rocksdb_options_t* options_with_filter_factory = rocksdb_options_create(); + rocksdb_options_set_create_if_missing(options_with_filter_factory, 1); + rocksdb_compactionfilterfactory_t* factory; + factory = rocksdb_compactionfilterfactory_create( + NULL, CFilterFactoryDestroy, CFilterCreate, CFilterFactoryName); + // Create new database + rocksdb_close(db); + rocksdb_destroy_db(options_with_filter_factory, dbname, &err); + rocksdb_options_set_compaction_filter_factory(options_with_filter_factory, + factory); + db = CheckCompaction(db, options_with_filter_factory, roptions, woptions); + + rocksdb_options_set_compaction_filter_factory( + options_with_filter_factory, NULL); + rocksdb_options_destroy(options_with_filter_factory); + } + + StartPhase("merge_operator"); + { + rocksdb_mergeoperator_t* merge_operator; + merge_operator = rocksdb_mergeoperator_create( + NULL, MergeOperatorDestroy, MergeOperatorFullMerge, + MergeOperatorPartialMerge, NULL, MergeOperatorName); + // Create new database + rocksdb_close(db); + rocksdb_destroy_db(options, dbname, &err); + rocksdb_options_set_merge_operator(options, merge_operator); + db = rocksdb_open(options, dbname, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "foo", 3, "foovalue", 8, &err); + CheckNoError(err); + CheckGet(db, roptions, "foo", "foovalue"); + rocksdb_merge(db, woptions, "foo", 3, "barvalue", 8, &err); + CheckNoError(err); + CheckGet(db, roptions, "foo", "fake"); + + // Merge of a non-existing value + rocksdb_merge(db, woptions, "bar", 3, "barvalue", 8, &err); + CheckNoError(err); + CheckGet(db, roptions, "bar", "fake"); + + } + + StartPhase("columnfamilies"); + { + rocksdb_close(db); + rocksdb_destroy_db(options, dbname, &err); + CheckNoError(err) + + rocksdb_options_t* db_options = rocksdb_options_create(); + rocksdb_options_set_create_if_missing(db_options, 1); + db = rocksdb_open(db_options, dbname, &err); + CheckNoError(err) + rocksdb_column_family_handle_t* cfh; + cfh = rocksdb_create_column_family(db, db_options, "cf1", &err); + rocksdb_column_family_handle_destroy(cfh); + CheckNoError(err); + rocksdb_close(db); + + size_t cflen; + char** column_fams = rocksdb_list_column_families(db_options, dbname, &cflen, &err); + CheckNoError(err); + CheckEqual("default", column_fams[0], 7); + CheckEqual("cf1", column_fams[1], 3); + CheckCondition(cflen == 2); + rocksdb_list_column_families_destroy(column_fams, cflen); + + rocksdb_options_t* cf_options = rocksdb_options_create(); + + const char* cf_names[2] = {"default", "cf1"}; + const rocksdb_options_t* cf_opts[2] = {cf_options, cf_options}; + rocksdb_column_family_handle_t* handles[2]; + db = rocksdb_open_column_families(db_options, dbname, 2, cf_names, cf_opts, handles, &err); + CheckNoError(err); + + rocksdb_put_cf(db, woptions, handles[1], "foo", 3, "hello", 5, &err); + CheckNoError(err); + + CheckGetCF(db, roptions, handles[1], "foo", "hello"); + + rocksdb_delete_cf(db, woptions, handles[1], "foo", 3, &err); + CheckNoError(err); + + CheckGetCF(db, roptions, handles[1], "foo", NULL); + + rocksdb_writebatch_t* wb = rocksdb_writebatch_create(); + rocksdb_writebatch_put_cf(wb, handles[1], "baz", 3, "a", 1); + rocksdb_writebatch_clear(wb); + rocksdb_writebatch_put_cf(wb, handles[1], "bar", 3, "b", 1); + rocksdb_writebatch_put_cf(wb, handles[1], "box", 3, "c", 1); + rocksdb_writebatch_delete_cf(wb, handles[1], "bar", 3); + rocksdb_write(db, woptions, wb, &err); + CheckNoError(err); + CheckGetCF(db, roptions, handles[1], "baz", NULL); + CheckGetCF(db, roptions, handles[1], "bar", NULL); + CheckGetCF(db, roptions, handles[1], "box", "c"); + rocksdb_writebatch_destroy(wb); + + const char* keys[3] = { "box", "box", "barfooxx" }; + const rocksdb_column_family_handle_t* get_handles[3] = { handles[0], handles[1], handles[1] }; + const size_t keys_sizes[3] = { 3, 3, 8 }; + char* vals[3]; + size_t vals_sizes[3]; + char* errs[3]; + rocksdb_multi_get_cf(db, roptions, get_handles, 3, keys, keys_sizes, vals, vals_sizes, errs); + + int i; + for (i = 0; i < 3; i++) { + CheckEqual(NULL, errs[i], 0); + switch (i) { + case 0: + CheckEqual(NULL, vals[i], vals_sizes[i]); // wrong cf + break; + case 1: + CheckEqual("c", vals[i], vals_sizes[i]); // bingo + break; + case 2: + CheckEqual(NULL, vals[i], vals_sizes[i]); // normal not found + break; + } + Free(&vals[i]); + } + + rocksdb_iterator_t* iter = rocksdb_create_iterator_cf(db, roptions, handles[1]); + CheckCondition(!rocksdb_iter_valid(iter)); + rocksdb_iter_seek_to_first(iter); + CheckCondition(rocksdb_iter_valid(iter)); + + for (i = 0; rocksdb_iter_valid(iter) != 0; rocksdb_iter_next(iter)) { + i++; + } + CheckCondition(i == 1); + rocksdb_iter_get_error(iter, &err); + CheckNoError(err); + rocksdb_iter_destroy(iter); + + rocksdb_column_family_handle_t* iters_cf_handles[2] = { handles[0], handles[1] }; + rocksdb_iterator_t* iters_handles[2]; + rocksdb_create_iterators(db, roptions, iters_cf_handles, iters_handles, 2, &err); + CheckNoError(err); + + iter = iters_handles[0]; + CheckCondition(!rocksdb_iter_valid(iter)); + rocksdb_iter_seek_to_first(iter); + CheckCondition(!rocksdb_iter_valid(iter)); + rocksdb_iter_destroy(iter); + + iter = iters_handles[1]; + CheckCondition(!rocksdb_iter_valid(iter)); + rocksdb_iter_seek_to_first(iter); + CheckCondition(rocksdb_iter_valid(iter)); + + for (i = 0; rocksdb_iter_valid(iter) != 0; rocksdb_iter_next(iter)) { + i++; + } + CheckCondition(i == 1); + rocksdb_iter_get_error(iter, &err); + CheckNoError(err); + rocksdb_iter_destroy(iter); + + rocksdb_drop_column_family(db, handles[1], &err); + CheckNoError(err); + for (i = 0; i < 2; i++) { + rocksdb_column_family_handle_destroy(handles[i]); + } + rocksdb_close(db); + rocksdb_destroy_db(options, dbname, &err); + rocksdb_options_destroy(db_options); + rocksdb_options_destroy(cf_options); + } + + StartPhase("prefix"); + { + // Create new database + rocksdb_options_set_allow_mmap_reads(options, 1); + rocksdb_options_set_prefix_extractor(options, rocksdb_slicetransform_create_fixed_prefix(3)); + rocksdb_options_set_hash_skip_list_rep(options, 5000, 4, 4); + rocksdb_options_set_plain_table_factory(options, 4, 10, 0.75, 16); + + db = rocksdb_open(options, dbname, &err); + CheckNoError(err); + + rocksdb_put(db, woptions, "foo1", 4, "foo", 3, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "foo2", 4, "foo", 3, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "foo3", 4, "foo", 3, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "bar1", 4, "bar", 3, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "bar2", 4, "bar", 3, &err); + CheckNoError(err); + rocksdb_put(db, woptions, "bar3", 4, "bar", 3, &err); + CheckNoError(err); + + rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions); + CheckCondition(!rocksdb_iter_valid(iter)); + + rocksdb_iter_seek(iter, "bar", 3); + rocksdb_iter_get_error(iter, &err); + CheckNoError(err); + CheckCondition(rocksdb_iter_valid(iter)); + + CheckIter(iter, "bar1", "bar"); + rocksdb_iter_next(iter); + CheckIter(iter, "bar2", "bar"); + rocksdb_iter_next(iter); + CheckIter(iter, "bar3", "bar"); + rocksdb_iter_get_error(iter, &err); + CheckNoError(err); + rocksdb_iter_destroy(iter); + + rocksdb_close(db); + rocksdb_destroy_db(options, dbname, &err); + } + + StartPhase("cuckoo_options"); + { + rocksdb_cuckoo_table_options_t* cuckoo_options; + cuckoo_options = rocksdb_cuckoo_options_create(); + rocksdb_cuckoo_options_set_hash_ratio(cuckoo_options, 0.5); + rocksdb_cuckoo_options_set_max_search_depth(cuckoo_options, 200); + rocksdb_cuckoo_options_set_cuckoo_block_size(cuckoo_options, 10); + rocksdb_cuckoo_options_set_identity_as_first_hash(cuckoo_options, 1); + rocksdb_cuckoo_options_set_use_module_hash(cuckoo_options, 0); + rocksdb_options_set_cuckoo_table_factory(options, cuckoo_options); + + db = rocksdb_open(options, dbname, &err); + CheckNoError(err); + + rocksdb_cuckoo_options_destroy(cuckoo_options); + } + + StartPhase("iterate_upper_bound"); + { + // Create new empty database + rocksdb_close(db); + rocksdb_destroy_db(options, dbname, &err); + CheckNoError(err); + + rocksdb_options_set_prefix_extractor(options, NULL); + db = rocksdb_open(options, dbname, &err); + CheckNoError(err); + + rocksdb_put(db, woptions, "a", 1, "0", 1, &err); CheckNoError(err); + rocksdb_put(db, woptions, "foo", 3, "bar", 3, &err); CheckNoError(err); + rocksdb_put(db, woptions, "foo1", 4, "bar1", 4, &err); CheckNoError(err); + rocksdb_put(db, woptions, "g1", 2, "0", 1, &err); CheckNoError(err); + + // testing basic case with no iterate_upper_bound and no prefix_extractor + { + rocksdb_readoptions_set_iterate_upper_bound(roptions, NULL, 0); + rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions); + + rocksdb_iter_seek(iter, "foo", 3); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "foo", "bar"); + + rocksdb_iter_next(iter); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "foo1", "bar1"); + + rocksdb_iter_next(iter); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "g1", "0"); + + rocksdb_iter_destroy(iter); + } + + // testing iterate_upper_bound and forward iterator + // to make sure it stops at bound + { + // iterate_upper_bound points beyond the last expected entry + rocksdb_readoptions_set_iterate_upper_bound(roptions, "foo2", 4); + + rocksdb_iterator_t* iter = rocksdb_create_iterator(db, roptions); + + rocksdb_iter_seek(iter, "foo", 3); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "foo", "bar"); + + rocksdb_iter_next(iter); + CheckCondition(rocksdb_iter_valid(iter)); + CheckIter(iter, "foo1", "bar1"); + + rocksdb_iter_next(iter); + // should stop here... + CheckCondition(!rocksdb_iter_valid(iter)); + + rocksdb_iter_destroy(iter); + } + } + + StartPhase("cleanup"); + rocksdb_close(db); + rocksdb_options_destroy(options); + rocksdb_block_based_options_destroy(table_options); + rocksdb_readoptions_destroy(roptions); + rocksdb_writeoptions_destroy(woptions); + rocksdb_cache_destroy(cache); + rocksdb_comparator_destroy(cmp); + rocksdb_env_destroy(env); + + fprintf(stderr, "PASS\n"); + return 0; +} + +#else +#include + +int main() { + fprintf(stderr, "SKIPPED\n"); + return 0; +} + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/column_family.cc b/external/rocksdb/db/column_family.cc new file mode 100755 index 0000000000..dcc94f9692 --- /dev/null +++ b/external/rocksdb/db/column_family.cc @@ -0,0 +1,1020 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/column_family.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include +#include + +#include "db/compaction_picker.h" +#include "db/db_impl.h" +#include "db/internal_stats.h" +#include "db/job_context.h" +#include "db/table_properties_collector.h" +#include "db/version_set.h" +#include "db/write_controller.h" +#include "memtable/hash_skiplist_rep.h" +#include "util/autovector.h" +#include "util/compression.h" +#include "util/options_helper.h" +#include "util/thread_status_util.h" +#include "util/xfunc.h" + +namespace rocksdb { + +ColumnFamilyHandleImpl::ColumnFamilyHandleImpl( + ColumnFamilyData* column_family_data, DBImpl* db, InstrumentedMutex* mutex) + : cfd_(column_family_data), db_(db), mutex_(mutex) { + if (cfd_ != nullptr) { + cfd_->Ref(); + } +} + +ColumnFamilyHandleImpl::~ColumnFamilyHandleImpl() { + if (cfd_ != nullptr) { + // Job id == 0 means that this is not our background process, but rather + // user thread + JobContext job_context(0); + mutex_->Lock(); + if (cfd_->Unref()) { + delete cfd_; + } + db_->FindObsoleteFiles(&job_context, false, true); + mutex_->Unlock(); + if (job_context.HaveSomethingToDelete()) { + db_->PurgeObsoleteFiles(job_context); + } + job_context.Clean(); + } +} + +uint32_t ColumnFamilyHandleImpl::GetID() const { return cfd()->GetID(); } + +const std::string& ColumnFamilyHandleImpl::GetName() const { + return cfd()->GetName(); +} + +Status ColumnFamilyHandleImpl::GetDescriptor(ColumnFamilyDescriptor* desc) { +#ifndef ROCKSDB_LITE + // accessing mutable cf-options requires db mutex. + InstrumentedMutexLock l(mutex_); + *desc = ColumnFamilyDescriptor( + cfd()->GetName(), + BuildColumnFamilyOptions(*cfd()->options(), + *cfd()->GetLatestMutableCFOptions())); + return Status::OK(); +#else + return Status::NotSupported(); +#endif // !ROCKSDB_LITE +} + +const Comparator* ColumnFamilyHandleImpl::user_comparator() const { + return cfd()->user_comparator(); +} + +void GetIntTblPropCollectorFactory( + const ColumnFamilyOptions& cf_options, + std::vector>* + int_tbl_prop_collector_factories) { + auto& collector_factories = cf_options.table_properties_collector_factories; + for (size_t i = 0; i < cf_options.table_properties_collector_factories.size(); + ++i) { + assert(collector_factories[i]); + int_tbl_prop_collector_factories->emplace_back( + new UserKeyTablePropertiesCollectorFactory(collector_factories[i])); + } + // Add collector to collect internal key statistics + int_tbl_prop_collector_factories->emplace_back( + new InternalKeyPropertiesCollectorFactory); +} + +Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options) { + if (!cf_options.compression_per_level.empty()) { + for (size_t level = 0; level < cf_options.compression_per_level.size(); + ++level) { + if (!CompressionTypeSupported(cf_options.compression_per_level[level])) { + return Status::InvalidArgument( + "Compression type " + + CompressionTypeToString(cf_options.compression_per_level[level]) + + " is not linked with the binary."); + } + } + } else { + if (!CompressionTypeSupported(cf_options.compression)) { + return Status::InvalidArgument( + "Compression type " + + CompressionTypeToString(cf_options.compression) + + " is not linked with the binary."); + } + } + return Status::OK(); +} + +Status CheckConcurrentWritesSupported(const ColumnFamilyOptions& cf_options) { + if (cf_options.inplace_update_support) { + return Status::InvalidArgument( + "In-place memtable updates (inplace_update_support) is not compatible " + "with concurrent writes (allow_concurrent_memtable_write)"); + } + if (!cf_options.memtable_factory->IsInsertConcurrentlySupported()) { + return Status::InvalidArgument( + "Memtable doesn't concurrent writes (allow_concurrent_memtable_write)"); + } + return Status::OK(); +} + +ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options, + const InternalKeyComparator* icmp, + const ColumnFamilyOptions& src) { + ColumnFamilyOptions result = src; + result.comparator = icmp; + size_t clamp_max = std::conditional< + sizeof(size_t) == 4, std::integral_constant, + std::integral_constant>::type::value; + ClipToRange(&result.write_buffer_size, ((size_t)64) << 10, clamp_max); + // if user sets arena_block_size, we trust user to use this value. Otherwise, + // calculate a proper value from writer_buffer_size; + if (result.arena_block_size <= 0) { + result.arena_block_size = result.write_buffer_size / 8; + + // Align up to 4k + const size_t align = 4 * 1024; + result.arena_block_size = + ((result.arena_block_size + align - 1) / align) * align; + } + result.min_write_buffer_number_to_merge = + std::min(result.min_write_buffer_number_to_merge, + result.max_write_buffer_number - 1); + if (result.num_levels < 1) { + result.num_levels = 1; + } + if (result.compaction_style == kCompactionStyleLevel && + result.num_levels < 2) { + result.num_levels = 2; + } + if (result.max_write_buffer_number < 2) { + result.max_write_buffer_number = 2; + } + if (result.max_write_buffer_number_to_maintain < 0) { + result.max_write_buffer_number_to_maintain = result.max_write_buffer_number; + } + // bloom filter size shouldn't exceed 1/4 of memtable size. + if (result.memtable_prefix_bloom_size_ratio > 0.25) { + result.memtable_prefix_bloom_size_ratio = 0.25; + } else if (result.memtable_prefix_bloom_size_ratio < 0) { + result.memtable_prefix_bloom_size_ratio = 0; + } + XFUNC_TEST("memtablelist_history", "transaction_xftest_SanitizeOptions", + xf_transaction_set_memtable_history1, + xf_transaction_set_memtable_history, + &result.max_write_buffer_number_to_maintain); + XFUNC_TEST("memtablelist_history_clear", "transaction_xftest_SanitizeOptions", + xf_transaction_clear_memtable_history1, + xf_transaction_clear_memtable_history, + &result.max_write_buffer_number_to_maintain); + + if (!result.prefix_extractor) { + assert(result.memtable_factory); + Slice name = result.memtable_factory->Name(); + if (name.compare("HashSkipListRepFactory") == 0 || + name.compare("HashLinkListRepFactory") == 0) { + result.memtable_factory = std::make_shared(); + } + } + + if (result.compaction_style == kCompactionStyleFIFO) { + result.num_levels = 1; + // since we delete level0 files in FIFO compaction when there are too many + // of them, these options don't really mean anything + result.level0_file_num_compaction_trigger = std::numeric_limits::max(); + result.level0_slowdown_writes_trigger = std::numeric_limits::max(); + result.level0_stop_writes_trigger = std::numeric_limits::max(); + } + + if (result.level0_file_num_compaction_trigger == 0) { + Warn(db_options.info_log.get(), + "level0_file_num_compaction_trigger cannot be 0"); + result.level0_file_num_compaction_trigger = 1; + } + + if (result.level0_stop_writes_trigger < + result.level0_slowdown_writes_trigger || + result.level0_slowdown_writes_trigger < + result.level0_file_num_compaction_trigger) { + Warn(db_options.info_log.get(), + "This condition must be satisfied: " + "level0_stop_writes_trigger(%d) >= " + "level0_slowdown_writes_trigger(%d) >= " + "level0_file_num_compaction_trigger(%d)", + result.level0_stop_writes_trigger, + result.level0_slowdown_writes_trigger, + result.level0_file_num_compaction_trigger); + if (result.level0_slowdown_writes_trigger < + result.level0_file_num_compaction_trigger) { + result.level0_slowdown_writes_trigger = + result.level0_file_num_compaction_trigger; + } + if (result.level0_stop_writes_trigger < + result.level0_slowdown_writes_trigger) { + result.level0_stop_writes_trigger = result.level0_slowdown_writes_trigger; + } + Warn(db_options.info_log.get(), + "Adjust the value to " + "level0_stop_writes_trigger(%d)" + "level0_slowdown_writes_trigger(%d)" + "level0_file_num_compaction_trigger(%d)", + result.level0_stop_writes_trigger, + result.level0_slowdown_writes_trigger, + result.level0_file_num_compaction_trigger); + } + + if (result.soft_pending_compaction_bytes_limit == 0) { + result.soft_pending_compaction_bytes_limit = + result.hard_pending_compaction_bytes_limit; + } else if (result.hard_pending_compaction_bytes_limit > 0 && + result.soft_pending_compaction_bytes_limit > + result.hard_pending_compaction_bytes_limit) { + result.soft_pending_compaction_bytes_limit = + result.hard_pending_compaction_bytes_limit; + } + + if (result.level_compaction_dynamic_level_bytes) { + if (result.compaction_style != kCompactionStyleLevel || + db_options.db_paths.size() > 1U) { + // 1. level_compaction_dynamic_level_bytes only makes sense for + // level-based compaction. + // 2. we don't yet know how to make both of this feature and multiple + // DB path work. + result.level_compaction_dynamic_level_bytes = false; + } + } + + return result; +} + +int SuperVersion::dummy = 0; +void* const SuperVersion::kSVInUse = &SuperVersion::dummy; +void* const SuperVersion::kSVObsolete = nullptr; + +SuperVersion::~SuperVersion() { + for (auto td : to_delete) { + delete td; + } +} + +SuperVersion* SuperVersion::Ref() { + refs.fetch_add(1, std::memory_order_relaxed); + return this; +} + +bool SuperVersion::Unref() { + // fetch_sub returns the previous value of ref + uint32_t previous_refs = refs.fetch_sub(1); + assert(previous_refs > 0); + return previous_refs == 1; +} + +void SuperVersion::Cleanup() { + assert(refs.load(std::memory_order_relaxed) == 0); + imm->Unref(&to_delete); + MemTable* m = mem->Unref(); + if (m != nullptr) { + auto* memory_usage = current->cfd()->imm()->current_memory_usage(); + assert(*memory_usage >= m->ApproximateMemoryUsage()); + *memory_usage -= m->ApproximateMemoryUsage(); + to_delete.push_back(m); + } + current->Unref(); +} + +void SuperVersion::Init(MemTable* new_mem, MemTableListVersion* new_imm, + Version* new_current) { + mem = new_mem; + imm = new_imm; + current = new_current; + mem->Ref(); + imm->Ref(); + current->Ref(); + refs.store(1, std::memory_order_relaxed); +} + +namespace { +void SuperVersionUnrefHandle(void* ptr) { + // UnrefHandle is called when a thread exists or a ThreadLocalPtr gets + // destroyed. When former happens, the thread shouldn't see kSVInUse. + // When latter happens, we are in ~ColumnFamilyData(), no get should happen as + // well. + SuperVersion* sv = static_cast(ptr); + if (sv->Unref()) { + sv->db_mutex->Lock(); + sv->Cleanup(); + sv->db_mutex->Unlock(); + delete sv; + } +} +} // anonymous namespace + +ColumnFamilyData::ColumnFamilyData( + uint32_t id, const std::string& name, Version* _dummy_versions, + Cache* _table_cache, WriteBufferManager* write_buffer_manager, + const ColumnFamilyOptions& cf_options, const DBOptions* db_options, + const EnvOptions& env_options, ColumnFamilySet* column_family_set) + : id_(id), + name_(name), + dummy_versions_(_dummy_versions), + current_(nullptr), + refs_(0), + dropped_(false), + internal_comparator_(cf_options.comparator), + options_(*db_options, + SanitizeOptions(*db_options, &internal_comparator_, cf_options)), + ioptions_(options_), + mutable_cf_options_(options_, ioptions_), + write_buffer_manager_(write_buffer_manager), + mem_(nullptr), + imm_(options_.min_write_buffer_number_to_merge, + options_.max_write_buffer_number_to_maintain), + super_version_(nullptr), + super_version_number_(0), + local_sv_(new ThreadLocalPtr(&SuperVersionUnrefHandle)), + next_(nullptr), + prev_(nullptr), + log_number_(0), + column_family_set_(column_family_set), + pending_flush_(false), + pending_compaction_(false), + prev_compaction_needed_bytes_(0) { + Ref(); + + // Convert user defined table properties collector factories to internal ones. + GetIntTblPropCollectorFactory(options_, &int_tbl_prop_collector_factories_); + + // if _dummy_versions is nullptr, then this is a dummy column family. + if (_dummy_versions != nullptr) { + internal_stats_.reset( + new InternalStats(ioptions_.num_levels, db_options->env, this)); + table_cache_.reset(new TableCache(ioptions_, env_options, _table_cache)); + if (ioptions_.compaction_style == kCompactionStyleLevel) { + compaction_picker_.reset( + new LevelCompactionPicker(ioptions_, &internal_comparator_)); +#ifndef ROCKSDB_LITE + } else if (ioptions_.compaction_style == kCompactionStyleUniversal) { + compaction_picker_.reset( + new UniversalCompactionPicker(ioptions_, &internal_comparator_)); + } else if (ioptions_.compaction_style == kCompactionStyleFIFO) { + compaction_picker_.reset( + new FIFOCompactionPicker(ioptions_, &internal_comparator_)); + } else if (ioptions_.compaction_style == kCompactionStyleNone) { + compaction_picker_.reset(new NullCompactionPicker( + ioptions_, &internal_comparator_)); + Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, + "Column family %s does not use any background compaction. " + "Compactions can only be done via CompactFiles\n", + GetName().c_str()); +#endif // !ROCKSDB_LITE + } else { + Log(InfoLogLevel::ERROR_LEVEL, ioptions_.info_log, + "Unable to recognize the specified compaction style %d. " + "Column family %s will use kCompactionStyleLevel.\n", + ioptions_.compaction_style, GetName().c_str()); + compaction_picker_.reset( + new LevelCompactionPicker(ioptions_, &internal_comparator_)); + } + + if (column_family_set_->NumberOfColumnFamilies() < 10) { + Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log, + "--------------- Options for column family [%s]:\n", name.c_str()); + options_.DumpCFOptions(ioptions_.info_log); + } else { + Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log, + "\t(skipping printing options)\n"); + } + } + + RecalculateWriteStallConditions(mutable_cf_options_); +} + +// DB mutex held +ColumnFamilyData::~ColumnFamilyData() { + assert(refs_.load(std::memory_order_relaxed) == 0); + // remove from linked list + auto prev = prev_; + auto next = next_; + prev->next_ = next; + next->prev_ = prev; + + if (!dropped_ && column_family_set_ != nullptr) { + // If it's dropped, it's already removed from column family set + // If column_family_set_ == nullptr, this is dummy CFD and not in + // ColumnFamilySet + column_family_set_->RemoveColumnFamily(this); + } + + if (current_ != nullptr) { + current_->Unref(); + } + + // It would be wrong if this ColumnFamilyData is in flush_queue_ or + // compaction_queue_ and we destroyed it + assert(!pending_flush_); + assert(!pending_compaction_); + + if (super_version_ != nullptr) { + // Release SuperVersion reference kept in ThreadLocalPtr. + // This must be done outside of mutex_ since unref handler can lock mutex. + super_version_->db_mutex->Unlock(); + local_sv_.reset(); + super_version_->db_mutex->Lock(); + + bool is_last_reference __attribute__((unused)); + is_last_reference = super_version_->Unref(); + assert(is_last_reference); + super_version_->Cleanup(); + delete super_version_; + super_version_ = nullptr; + } + + if (dummy_versions_ != nullptr) { + // List must be empty + assert(dummy_versions_->TEST_Next() == dummy_versions_); + bool deleted __attribute__((unused)) = dummy_versions_->Unref(); + assert(deleted); + } + + if (mem_ != nullptr) { + delete mem_->Unref(); + } + autovector to_delete; + imm_.current()->Unref(&to_delete); + for (MemTable* m : to_delete) { + delete m; + } +} + +void ColumnFamilyData::SetDropped() { + // can't drop default CF + assert(id_ != 0); + dropped_ = true; + write_controller_token_.reset(); + + // remove from column_family_set + column_family_set_->RemoveColumnFamily(this); +} + +const double kSlowdownRatio = 1.2; + +namespace { +std::unique_ptr SetupDelay( + uint64_t max_write_rate, WriteController* write_controller, + uint64_t compaction_needed_bytes, uint64_t prev_compaction_neeed_bytes, + bool auto_comapctions_disabled) { + const uint64_t kMinWriteRate = 1024u; // Minimum write rate 1KB/s. + + uint64_t write_rate = write_controller->delayed_write_rate(); + + if (auto_comapctions_disabled) { + // When auto compaction is disabled, always use the value user gave. + write_rate = max_write_rate; + } else if (write_controller->NeedsDelay() && max_write_rate > kMinWriteRate) { + // If user gives rate less than kMinWriteRate, don't adjust it. + // + // If already delayed, need to adjust based on previous compaction debt. + // When there are two or more column families require delay, we always + // increase or reduce write rate based on information for one single + // column family. It is likely to be OK but we can improve if there is a + // problem. + // Ignore compaction_needed_bytes = 0 case because compaction_needed_bytes + // is only available in level-based compaction + // + // If the compaction debt stays the same as previously, we also further slow + // down. It usually means a mem table is full. It's mainly for the case + // where both of flush and compaction are much slower than the speed we + // insert to mem tables, so we need to actively slow down before we get + // feedback signal from compaction and flushes to avoid the full stop + // because of hitting the max write buffer number. + if (prev_compaction_neeed_bytes > 0 && + prev_compaction_neeed_bytes <= compaction_needed_bytes) { + write_rate = static_cast(static_cast(write_rate) / + kSlowdownRatio); + if (write_rate < kMinWriteRate) { + write_rate = kMinWriteRate; + } + } else if (prev_compaction_neeed_bytes > compaction_needed_bytes) { + // We are speeding up by ratio of kSlowdownRatio when we have paid + // compaction debt. But we'll never speed up to faster than the write rate + // given by users. + write_rate = static_cast(static_cast(write_rate) * + kSlowdownRatio); + if (write_rate > max_write_rate) { + write_rate = max_write_rate; + } + } + } + return write_controller->GetDelayToken(write_rate); +} + +int GetL0ThresholdSpeedupCompaction(int level0_file_num_compaction_trigger, + int level0_slowdown_writes_trigger) { + // SanitizeOptions() ensures it. + assert(level0_file_num_compaction_trigger <= level0_slowdown_writes_trigger); + + // 1/4 of the way between L0 compaction trigger threshold and slowdown + // condition. + // Or twice as compaction trigger, if it is smaller. + return std::min(level0_file_num_compaction_trigger * 2, + level0_file_num_compaction_trigger + + (level0_slowdown_writes_trigger - + level0_file_num_compaction_trigger) / + 4); +} +} // namespace + +void ColumnFamilyData::RecalculateWriteStallConditions( + const MutableCFOptions& mutable_cf_options) { + if (current_ != nullptr) { + auto* vstorage = current_->storage_info(); + auto write_controller = column_family_set_->write_controller_; + uint64_t compaction_needed_bytes = + vstorage->estimated_compaction_needed_bytes(); + + if (imm()->NumNotFlushed() >= mutable_cf_options.max_write_buffer_number) { + write_controller_token_ = write_controller->GetStopToken(); + internal_stats_->AddCFStats(InternalStats::MEMTABLE_COMPACTION, 1); + Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, + "[%s] Stopping writes because we have %d immutable memtables " + "(waiting for flush), max_write_buffer_number is set to %d", + name_.c_str(), imm()->NumNotFlushed(), + mutable_cf_options.max_write_buffer_number); + } else if (!mutable_cf_options.disable_auto_compactions && + vstorage->l0_delay_trigger_count() >= + mutable_cf_options.level0_stop_writes_trigger) { + write_controller_token_ = write_controller->GetStopToken(); + internal_stats_->AddCFStats(InternalStats::LEVEL0_NUM_FILES_TOTAL, 1); + if (compaction_picker_->IsLevel0CompactionInProgress()) { + internal_stats_->AddCFStats( + InternalStats::LEVEL0_NUM_FILES_WITH_COMPACTION, 1); + } + Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, + "[%s] Stopping writes because we have %d level-0 files", + name_.c_str(), vstorage->l0_delay_trigger_count()); + } else if (!mutable_cf_options.disable_auto_compactions && + mutable_cf_options.hard_pending_compaction_bytes_limit > 0 && + compaction_needed_bytes >= + mutable_cf_options.hard_pending_compaction_bytes_limit) { + write_controller_token_ = write_controller->GetStopToken(); + internal_stats_->AddCFStats( + InternalStats::HARD_PENDING_COMPACTION_BYTES_LIMIT, 1); + Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, + "[%s] Stopping writes because of estimated pending compaction " + "bytes %" PRIu64, + name_.c_str(), compaction_needed_bytes); + } else if (mutable_cf_options.max_write_buffer_number > 3 && + imm()->NumNotFlushed() >= + mutable_cf_options.max_write_buffer_number - 1) { + write_controller_token_ = + SetupDelay(ioptions_.delayed_write_rate, write_controller, + compaction_needed_bytes, prev_compaction_needed_bytes_, + mutable_cf_options.disable_auto_compactions); + internal_stats_->AddCFStats(InternalStats::MEMTABLE_SLOWDOWN, 1); + Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, + "[%s] Stalling writes because we have %d immutable memtables " + "(waiting for flush), max_write_buffer_number is set to %d " + "rate %" PRIu64, + name_.c_str(), imm()->NumNotFlushed(), + mutable_cf_options.max_write_buffer_number, + write_controller->delayed_write_rate()); + } else if (!mutable_cf_options.disable_auto_compactions && + mutable_cf_options.level0_slowdown_writes_trigger >= 0 && + vstorage->l0_delay_trigger_count() >= + mutable_cf_options.level0_slowdown_writes_trigger) { + write_controller_token_ = + SetupDelay(ioptions_.delayed_write_rate, write_controller, + compaction_needed_bytes, prev_compaction_needed_bytes_, + mutable_cf_options.disable_auto_compactions); + internal_stats_->AddCFStats(InternalStats::LEVEL0_SLOWDOWN_TOTAL, 1); + if (compaction_picker_->IsLevel0CompactionInProgress()) { + internal_stats_->AddCFStats( + InternalStats::LEVEL0_SLOWDOWN_WITH_COMPACTION, 1); + } + Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, + "[%s] Stalling writes because we have %d level-0 files " + "rate %" PRIu64, + name_.c_str(), vstorage->l0_delay_trigger_count(), + write_controller->delayed_write_rate()); + } else if (!mutable_cf_options.disable_auto_compactions && + mutable_cf_options.soft_pending_compaction_bytes_limit > 0 && + vstorage->estimated_compaction_needed_bytes() >= + mutable_cf_options.soft_pending_compaction_bytes_limit) { + write_controller_token_ = + SetupDelay(ioptions_.delayed_write_rate, write_controller, + compaction_needed_bytes, prev_compaction_needed_bytes_, + mutable_cf_options.disable_auto_compactions); + internal_stats_->AddCFStats( + InternalStats::SOFT_PENDING_COMPACTION_BYTES_LIMIT, 1); + Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, + "[%s] Stalling writes because of estimated pending compaction " + "bytes %" PRIu64 " rate %" PRIu64, + name_.c_str(), vstorage->estimated_compaction_needed_bytes(), + write_controller->delayed_write_rate()); + } else if (vstorage->l0_delay_trigger_count() >= + GetL0ThresholdSpeedupCompaction( + mutable_cf_options.level0_file_num_compaction_trigger, + mutable_cf_options.level0_slowdown_writes_trigger)) { + write_controller_token_ = write_controller->GetCompactionPressureToken(); + Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, + "[%s] Increasing compaction threads because we have %d level-0 " + "files ", + name_.c_str(), vstorage->l0_delay_trigger_count()); + } else if (vstorage->estimated_compaction_needed_bytes() >= + mutable_cf_options.soft_pending_compaction_bytes_limit / 4) { + // Increase compaction threads if bytes needed for compaction exceeds + // 1/4 of threshold for slowing down. + // If soft pending compaction byte limit is not set, always speed up + // compaction. + write_controller_token_ = write_controller->GetCompactionPressureToken(); + if (mutable_cf_options.soft_pending_compaction_bytes_limit > 0) { + Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, + "[%s] Increasing compaction threads because of estimated pending " + "compaction " + "bytes %" PRIu64, + name_.c_str(), vstorage->estimated_compaction_needed_bytes()); + } + } else { + write_controller_token_.reset(); + } + prev_compaction_needed_bytes_ = compaction_needed_bytes; + } +} + +const EnvOptions* ColumnFamilyData::soptions() const { + return &(column_family_set_->env_options_); +} + +void ColumnFamilyData::SetCurrent(Version* current_version) { + current_ = current_version; +} + +uint64_t ColumnFamilyData::GetNumLiveVersions() const { + return VersionSet::GetNumLiveVersions(dummy_versions_); +} + +uint64_t ColumnFamilyData::GetTotalSstFilesSize() const { + return VersionSet::GetTotalSstFilesSize(dummy_versions_); +} + +MemTable* ColumnFamilyData::ConstructNewMemtable( + const MutableCFOptions& mutable_cf_options, SequenceNumber earliest_seq) { + assert(current_ != nullptr); + return new MemTable(internal_comparator_, ioptions_, mutable_cf_options, + write_buffer_manager_, earliest_seq); +} + +void ColumnFamilyData::CreateNewMemtable( + const MutableCFOptions& mutable_cf_options, SequenceNumber earliest_seq) { + if (mem_ != nullptr) { + delete mem_->Unref(); + } + SetMemtable(ConstructNewMemtable(mutable_cf_options, earliest_seq)); + mem_->Ref(); +} + +bool ColumnFamilyData::NeedsCompaction() const { + return compaction_picker_->NeedsCompaction(current_->storage_info()); +} + +Compaction* ColumnFamilyData::PickCompaction( + const MutableCFOptions& mutable_options, LogBuffer* log_buffer) { + auto* result = compaction_picker_->PickCompaction( + GetName(), mutable_options, current_->storage_info(), log_buffer); + if (result != nullptr) { + result->SetInputVersion(current_); + } + return result; +} + +const int ColumnFamilyData::kCompactAllLevels = -1; +const int ColumnFamilyData::kCompactToBaseLevel = -2; + +Compaction* ColumnFamilyData::CompactRange( + const MutableCFOptions& mutable_cf_options, int input_level, + int output_level, uint32_t output_path_id, const InternalKey* begin, + const InternalKey* end, InternalKey** compaction_end, bool* conflict) { + auto* result = compaction_picker_->CompactRange( + GetName(), mutable_cf_options, current_->storage_info(), input_level, + output_level, output_path_id, begin, end, compaction_end, conflict); + if (result != nullptr) { + result->SetInputVersion(current_); + } + return result; +} + +SuperVersion* ColumnFamilyData::GetReferencedSuperVersion( + InstrumentedMutex* db_mutex) { + SuperVersion* sv = nullptr; + sv = GetThreadLocalSuperVersion(db_mutex); + sv->Ref(); + if (!ReturnThreadLocalSuperVersion(sv)) { + sv->Unref(); + } + return sv; +} + +SuperVersion* ColumnFamilyData::GetThreadLocalSuperVersion( + InstrumentedMutex* db_mutex) { + SuperVersion* sv = nullptr; + // The SuperVersion is cached in thread local storage to avoid acquiring + // mutex when SuperVersion does not change since the last use. When a new + // SuperVersion is installed, the compaction or flush thread cleans up + // cached SuperVersion in all existing thread local storage. To avoid + // acquiring mutex for this operation, we use atomic Swap() on the thread + // local pointer to guarantee exclusive access. If the thread local pointer + // is being used while a new SuperVersion is installed, the cached + // SuperVersion can become stale. In that case, the background thread would + // have swapped in kSVObsolete. We re-check the value at when returning + // SuperVersion back to thread local, with an atomic compare and swap. + // The superversion will need to be released if detected to be stale. + void* ptr = local_sv_->Swap(SuperVersion::kSVInUse); + // Invariant: + // (1) Scrape (always) installs kSVObsolete in ThreadLocal storage + // (2) the Swap above (always) installs kSVInUse, ThreadLocal storage + // should only keep kSVInUse before ReturnThreadLocalSuperVersion call + // (if no Scrape happens). + assert(ptr != SuperVersion::kSVInUse); + sv = static_cast(ptr); + if (sv == SuperVersion::kSVObsolete || + sv->version_number != super_version_number_.load()) { + RecordTick(ioptions_.statistics, NUMBER_SUPERVERSION_ACQUIRES); + SuperVersion* sv_to_delete = nullptr; + + if (sv && sv->Unref()) { + RecordTick(ioptions_.statistics, NUMBER_SUPERVERSION_CLEANUPS); + db_mutex->Lock(); + // NOTE: underlying resources held by superversion (sst files) might + // not be released until the next background job. + sv->Cleanup(); + sv_to_delete = sv; + } else { + db_mutex->Lock(); + } + sv = super_version_->Ref(); + db_mutex->Unlock(); + + delete sv_to_delete; + } + assert(sv != nullptr); + return sv; +} + +bool ColumnFamilyData::ReturnThreadLocalSuperVersion(SuperVersion* sv) { + assert(sv != nullptr); + // Put the SuperVersion back + void* expected = SuperVersion::kSVInUse; + if (local_sv_->CompareAndSwap(static_cast(sv), expected)) { + // When we see kSVInUse in the ThreadLocal, we are sure ThreadLocal + // storage has not been altered and no Scrape has happened. The + // SuperVersion is still current. + return true; + } else { + // ThreadLocal scrape happened in the process of this GetImpl call (after + // thread local Swap() at the beginning and before CompareAndSwap()). + // This means the SuperVersion it holds is obsolete. + assert(expected == SuperVersion::kSVObsolete); + } + return false; +} + +SuperVersion* ColumnFamilyData::InstallSuperVersion( + SuperVersion* new_superversion, InstrumentedMutex* db_mutex) { + db_mutex->AssertHeld(); + return InstallSuperVersion(new_superversion, db_mutex, mutable_cf_options_); +} + +SuperVersion* ColumnFamilyData::InstallSuperVersion( + SuperVersion* new_superversion, InstrumentedMutex* db_mutex, + const MutableCFOptions& mutable_cf_options) { + new_superversion->db_mutex = db_mutex; + new_superversion->mutable_cf_options = mutable_cf_options; + new_superversion->Init(mem_, imm_.current(), current_); + SuperVersion* old_superversion = super_version_; + super_version_ = new_superversion; + ++super_version_number_; + super_version_->version_number = super_version_number_; + // Reset SuperVersions cached in thread local storage + ResetThreadLocalSuperVersions(); + + RecalculateWriteStallConditions(mutable_cf_options); + + if (old_superversion != nullptr && old_superversion->Unref()) { + old_superversion->Cleanup(); + return old_superversion; // will let caller delete outside of mutex + } + return nullptr; +} + +void ColumnFamilyData::ResetThreadLocalSuperVersions() { + autovector sv_ptrs; + local_sv_->Scrape(&sv_ptrs, SuperVersion::kSVObsolete); + for (auto ptr : sv_ptrs) { + assert(ptr); + if (ptr == SuperVersion::kSVInUse) { + continue; + } + auto sv = static_cast(ptr); + if (sv->Unref()) { + sv->Cleanup(); + delete sv; + } + } +} + +#ifndef ROCKSDB_LITE +Status ColumnFamilyData::SetOptions( + const std::unordered_map& options_map) { + MutableCFOptions new_mutable_cf_options; + Status s = GetMutableOptionsFromStrings(mutable_cf_options_, options_map, + &new_mutable_cf_options); + if (s.ok()) { + mutable_cf_options_ = new_mutable_cf_options; + mutable_cf_options_.RefreshDerivedOptions(ioptions_); + } + return s; +} +#endif // ROCKSDB_LITE + +ColumnFamilySet::ColumnFamilySet(const std::string& dbname, + const DBOptions* db_options, + const EnvOptions& env_options, + Cache* table_cache, + WriteBufferManager* write_buffer_manager, + WriteController* write_controller) + : max_column_family_(0), + dummy_cfd_(new ColumnFamilyData(0, "", nullptr, nullptr, nullptr, + ColumnFamilyOptions(), db_options, + env_options, nullptr)), + default_cfd_cache_(nullptr), + db_name_(dbname), + db_options_(db_options), + env_options_(env_options), + table_cache_(table_cache), + write_buffer_manager_(write_buffer_manager), + write_controller_(write_controller) { + // initialize linked list + dummy_cfd_->prev_ = dummy_cfd_; + dummy_cfd_->next_ = dummy_cfd_; +} + +ColumnFamilySet::~ColumnFamilySet() { + while (column_family_data_.size() > 0) { + // cfd destructor will delete itself from column_family_data_ + auto cfd = column_family_data_.begin()->second; + cfd->Unref(); + delete cfd; + } + dummy_cfd_->Unref(); + delete dummy_cfd_; +} + +ColumnFamilyData* ColumnFamilySet::GetDefault() const { + assert(default_cfd_cache_ != nullptr); + return default_cfd_cache_; +} + +ColumnFamilyData* ColumnFamilySet::GetColumnFamily(uint32_t id) const { + auto cfd_iter = column_family_data_.find(id); + if (cfd_iter != column_family_data_.end()) { + return cfd_iter->second; + } else { + return nullptr; + } +} + +ColumnFamilyData* ColumnFamilySet::GetColumnFamily(const std::string& name) + const { + auto cfd_iter = column_families_.find(name); + if (cfd_iter != column_families_.end()) { + auto cfd = GetColumnFamily(cfd_iter->second); + assert(cfd != nullptr); + return cfd; + } else { + return nullptr; + } +} + +uint32_t ColumnFamilySet::GetNextColumnFamilyID() { + return ++max_column_family_; +} + +uint32_t ColumnFamilySet::GetMaxColumnFamily() { return max_column_family_; } + +void ColumnFamilySet::UpdateMaxColumnFamily(uint32_t new_max_column_family) { + max_column_family_ = std::max(new_max_column_family, max_column_family_); +} + +size_t ColumnFamilySet::NumberOfColumnFamilies() const { + return column_families_.size(); +} + +// under a DB mutex AND write thread +ColumnFamilyData* ColumnFamilySet::CreateColumnFamily( + const std::string& name, uint32_t id, Version* dummy_versions, + const ColumnFamilyOptions& options) { + assert(column_families_.find(name) == column_families_.end()); + ColumnFamilyData* new_cfd = new ColumnFamilyData( + id, name, dummy_versions, table_cache_, write_buffer_manager_, options, + db_options_, env_options_, this); + column_families_.insert({name, id}); + column_family_data_.insert({id, new_cfd}); + max_column_family_ = std::max(max_column_family_, id); + // add to linked list + new_cfd->next_ = dummy_cfd_; + auto prev = dummy_cfd_->prev_; + new_cfd->prev_ = prev; + prev->next_ = new_cfd; + dummy_cfd_->prev_ = new_cfd; + if (id == 0) { + default_cfd_cache_ = new_cfd; + } + return new_cfd; +} + +// REQUIRES: DB mutex held +void ColumnFamilySet::FreeDeadColumnFamilies() { + autovector to_delete; + for (auto cfd = dummy_cfd_->next_; cfd != dummy_cfd_; cfd = cfd->next_) { + if (cfd->refs_.load(std::memory_order_relaxed) == 0) { + to_delete.push_back(cfd); + } + } + for (auto cfd : to_delete) { + // this is very rare, so it's not a problem that we do it under a mutex + delete cfd; + } +} + +// under a DB mutex AND from a write thread +void ColumnFamilySet::RemoveColumnFamily(ColumnFamilyData* cfd) { + auto cfd_iter = column_family_data_.find(cfd->GetID()); + assert(cfd_iter != column_family_data_.end()); + column_family_data_.erase(cfd_iter); + column_families_.erase(cfd->GetName()); +} + +// under a DB mutex OR from a write thread +bool ColumnFamilyMemTablesImpl::Seek(uint32_t column_family_id) { + if (column_family_id == 0) { + // optimization for common case + current_ = column_family_set_->GetDefault(); + } else { + current_ = column_family_set_->GetColumnFamily(column_family_id); + } + handle_.SetCFD(current_); + return current_ != nullptr; +} + +uint64_t ColumnFamilyMemTablesImpl::GetLogNumber() const { + assert(current_ != nullptr); + return current_->GetLogNumber(); +} + +MemTable* ColumnFamilyMemTablesImpl::GetMemTable() const { + assert(current_ != nullptr); + return current_->mem(); +} + +ColumnFamilyHandle* ColumnFamilyMemTablesImpl::GetColumnFamilyHandle() { + assert(current_ != nullptr); + return &handle_; +} + +uint32_t GetColumnFamilyID(ColumnFamilyHandle* column_family) { + uint32_t column_family_id = 0; + if (column_family != nullptr) { + auto cfh = reinterpret_cast(column_family); + column_family_id = cfh->GetID(); + } + return column_family_id; +} + +const Comparator* GetColumnFamilyUserComparator( + ColumnFamilyHandle* column_family) { + if (column_family != nullptr) { + auto cfh = reinterpret_cast(column_family); + return cfh->user_comparator(); + } + return nullptr; +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/column_family.h b/external/rocksdb/db/column_family.h new file mode 100755 index 0000000000..43f249b257 --- /dev/null +++ b/external/rocksdb/db/column_family.h @@ -0,0 +1,551 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include +#include +#include +#include + +#include "db/memtable_list.h" +#include "db/write_batch_internal.h" +#include "db/write_controller.h" +#include "db/table_cache.h" +#include "db/table_properties_collector.h" +#include "rocksdb/compaction_job_stats.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "util/mutable_cf_options.h" +#include "util/thread_local.h" + +namespace rocksdb { + +class Version; +class VersionSet; +class MemTable; +class MemTableListVersion; +class CompactionPicker; +class Compaction; +class InternalKey; +class InternalStats; +class ColumnFamilyData; +class DBImpl; +class LogBuffer; +class InstrumentedMutex; +class InstrumentedMutexLock; + +extern const double kSlowdownRatio; + +// ColumnFamilyHandleImpl is the class that clients use to access different +// column families. It has non-trivial destructor, which gets called when client +// is done using the column family +class ColumnFamilyHandleImpl : public ColumnFamilyHandle { + public: + // create while holding the mutex + ColumnFamilyHandleImpl( + ColumnFamilyData* cfd, DBImpl* db, InstrumentedMutex* mutex); + // destroy without mutex + virtual ~ColumnFamilyHandleImpl(); + virtual ColumnFamilyData* cfd() const { return cfd_; } + virtual const Comparator* user_comparator() const; + + virtual uint32_t GetID() const override; + virtual const std::string& GetName() const override; + virtual Status GetDescriptor(ColumnFamilyDescriptor* desc) override; + + private: + ColumnFamilyData* cfd_; + DBImpl* db_; + InstrumentedMutex* mutex_; +}; + +// Does not ref-count ColumnFamilyData +// We use this dummy ColumnFamilyHandleImpl because sometimes MemTableInserter +// calls DBImpl methods. When this happens, MemTableInserter need access to +// ColumnFamilyHandle (same as the client would need). In that case, we feed +// MemTableInserter dummy ColumnFamilyHandle and enable it to call DBImpl +// methods +class ColumnFamilyHandleInternal : public ColumnFamilyHandleImpl { + public: + ColumnFamilyHandleInternal() + : ColumnFamilyHandleImpl(nullptr, nullptr, nullptr) {} + + void SetCFD(ColumnFamilyData* _cfd) { internal_cfd_ = _cfd; } + virtual ColumnFamilyData* cfd() const override { return internal_cfd_; } + + private: + ColumnFamilyData* internal_cfd_; +}; + +// holds references to memtable, all immutable memtables and version +struct SuperVersion { + // Accessing members of this class is not thread-safe and requires external + // synchronization (ie db mutex held or on write thread). + MemTable* mem; + MemTableListVersion* imm; + Version* current; + MutableCFOptions mutable_cf_options; + // Version number of the current SuperVersion + uint64_t version_number; + + InstrumentedMutex* db_mutex; + + // should be called outside the mutex + SuperVersion() = default; + ~SuperVersion(); + SuperVersion* Ref(); + // If Unref() returns true, Cleanup() should be called with mutex held + // before deleting this SuperVersion. + bool Unref(); + + // call these two methods with db mutex held + // Cleanup unrefs mem, imm and current. Also, it stores all memtables + // that needs to be deleted in to_delete vector. Unrefing those + // objects needs to be done in the mutex + void Cleanup(); + void Init(MemTable* new_mem, MemTableListVersion* new_imm, + Version* new_current); + + // The value of dummy is not actually used. kSVInUse takes its address as a + // mark in the thread local storage to indicate the SuperVersion is in use + // by thread. This way, the value of kSVInUse is guaranteed to have no + // conflict with SuperVersion object address and portable on different + // platform. + static int dummy; + static void* const kSVInUse; + static void* const kSVObsolete; + + private: + std::atomic refs; + // We need to_delete because during Cleanup(), imm->Unref() returns + // all memtables that we need to free through this vector. We then + // delete all those memtables outside of mutex, during destruction + autovector to_delete; +}; + +extern Status CheckCompressionSupported(const ColumnFamilyOptions& cf_options); + +extern Status CheckConcurrentWritesSupported( + const ColumnFamilyOptions& cf_options); + +extern ColumnFamilyOptions SanitizeOptions(const DBOptions& db_options, + const InternalKeyComparator* icmp, + const ColumnFamilyOptions& src); +// Wrap user defined table proproties collector factories `from cf_options` +// into internal ones in int_tbl_prop_collector_factories. Add a system internal +// one too. +extern void GetIntTblPropCollectorFactory( + const ColumnFamilyOptions& cf_options, + std::vector>* + int_tbl_prop_collector_factories); + +class ColumnFamilySet; + +// This class keeps all the data that a column family needs. +// Most methods require DB mutex held, unless otherwise noted +class ColumnFamilyData { + public: + ~ColumnFamilyData(); + + // thread-safe + uint32_t GetID() const { return id_; } + // thread-safe + const std::string& GetName() const { return name_; } + + // Ref() can only be called from a context where the caller can guarantee + // that ColumnFamilyData is alive (while holding a non-zero ref already, + // holding a DB mutex, or as the leader in a write batch group). + void Ref() { refs_.fetch_add(1, std::memory_order_relaxed); } + + // Unref decreases the reference count, but does not handle deletion + // when the count goes to 0. If this method returns true then the + // caller should delete the instance immediately, or later, by calling + // FreeDeadColumnFamilies(). Unref() can only be called while holding + // a DB mutex, or during single-threaded recovery. + bool Unref() { + int old_refs = refs_.fetch_sub(1, std::memory_order_relaxed); + assert(old_refs > 0); + return old_refs == 1; + } + + // SetDropped() can only be called under following conditions: + // 1) Holding a DB mutex, + // 2) from single-threaded write thread, AND + // 3) from single-threaded VersionSet::LogAndApply() + // After dropping column family no other operation on that column family + // will be executed. All the files and memory will be, however, kept around + // until client drops the column family handle. That way, client can still + // access data from dropped column family. + // Column family can be dropped and still alive. In that state: + // *) Compaction and flush is not executed on the dropped column family. + // *) Client can continue reading from column family. Writes will fail unless + // WriteOptions::ignore_missing_column_families is true + // When the dropped column family is unreferenced, then we: + // *) Remove column family from the linked list maintained by ColumnFamilySet + // *) delete all memory associated with that column family + // *) delete all the files associated with that column family + void SetDropped(); + bool IsDropped() const { return dropped_; } + + // thread-safe + int NumberLevels() const { return ioptions_.num_levels; } + + void SetLogNumber(uint64_t log_number) { log_number_ = log_number; } + uint64_t GetLogNumber() const { return log_number_; } + + // !!! To be deprecated! Please don't not use this function anymore! + const Options* options() const { return &options_; } + + // thread-safe + const EnvOptions* soptions() const; + const ImmutableCFOptions* ioptions() const { return &ioptions_; } + // REQUIRES: DB mutex held + // This returns the MutableCFOptions used by current SuperVersion + // You should use this API to reference MutableCFOptions most of the time. + const MutableCFOptions* GetCurrentMutableCFOptions() const { + return &(super_version_->mutable_cf_options); + } + // REQUIRES: DB mutex held + // This returns the latest MutableCFOptions, which may be not in effect yet. + const MutableCFOptions* GetLatestMutableCFOptions() const { + return &mutable_cf_options_; + } +#ifndef ROCKSDB_LITE + // REQUIRES: DB mutex held + Status SetOptions( + const std::unordered_map& options_map); +#endif // ROCKSDB_LITE + + InternalStats* internal_stats() { return internal_stats_.get(); } + + MemTableList* imm() { return &imm_; } + MemTable* mem() { return mem_; } + Version* current() { return current_; } + Version* dummy_versions() { return dummy_versions_; } + void SetCurrent(Version* _current); + uint64_t GetNumLiveVersions() const; // REQUIRE: DB mutex held + uint64_t GetTotalSstFilesSize() const; // REQUIRE: DB mutex held + void SetMemtable(MemTable* new_mem) { mem_ = new_mem; } + + // See Memtable constructor for explanation of earliest_seq param. + MemTable* ConstructNewMemtable(const MutableCFOptions& mutable_cf_options, + SequenceNumber earliest_seq); + void CreateNewMemtable(const MutableCFOptions& mutable_cf_options, + SequenceNumber earliest_seq); + + TableCache* table_cache() const { return table_cache_.get(); } + + // See documentation in compaction_picker.h + // REQUIRES: DB mutex held + bool NeedsCompaction() const; + // REQUIRES: DB mutex held + Compaction* PickCompaction(const MutableCFOptions& mutable_options, + LogBuffer* log_buffer); + // A flag to tell a manual compaction is to compact all levels together + // instad of for specific level. + static const int kCompactAllLevels; + // A flag to tell a manual compaction's output is base level. + static const int kCompactToBaseLevel; + // REQUIRES: DB mutex held + Compaction* CompactRange(const MutableCFOptions& mutable_cf_options, + int input_level, int output_level, + uint32_t output_path_id, const InternalKey* begin, + const InternalKey* end, InternalKey** compaction_end, + bool* manual_conflict); + + CompactionPicker* compaction_picker() { return compaction_picker_.get(); } + // thread-safe + const Comparator* user_comparator() const { + return internal_comparator_.user_comparator(); + } + // thread-safe + const InternalKeyComparator& internal_comparator() const { + return internal_comparator_; + } + + const std::vector>* + int_tbl_prop_collector_factories() const { + return &int_tbl_prop_collector_factories_; + } + + SuperVersion* GetSuperVersion() { return super_version_; } + // thread-safe + // Return a already referenced SuperVersion to be used safely. + SuperVersion* GetReferencedSuperVersion(InstrumentedMutex* db_mutex); + // thread-safe + // Get SuperVersion stored in thread local storage. If it does not exist, + // get a reference from a current SuperVersion. + SuperVersion* GetThreadLocalSuperVersion(InstrumentedMutex* db_mutex); + // Try to return SuperVersion back to thread local storage. Retrun true on + // success and false on failure. It fails when the thread local storage + // contains anything other than SuperVersion::kSVInUse flag. + bool ReturnThreadLocalSuperVersion(SuperVersion* sv); + // thread-safe + uint64_t GetSuperVersionNumber() const { + return super_version_number_.load(); + } + // will return a pointer to SuperVersion* if previous SuperVersion + // if its reference count is zero and needs deletion or nullptr if not + // As argument takes a pointer to allocated SuperVersion to enable + // the clients to allocate SuperVersion outside of mutex. + // IMPORTANT: Only call this from DBImpl::InstallSuperVersion() + SuperVersion* InstallSuperVersion(SuperVersion* new_superversion, + InstrumentedMutex* db_mutex, + const MutableCFOptions& mutable_cf_options); + SuperVersion* InstallSuperVersion(SuperVersion* new_superversion, + InstrumentedMutex* db_mutex); + + void ResetThreadLocalSuperVersions(); + + // Protected by DB mutex + void set_pending_flush(bool value) { pending_flush_ = value; } + void set_pending_compaction(bool value) { pending_compaction_ = value; } + bool pending_flush() { return pending_flush_; } + bool pending_compaction() { return pending_compaction_; } + + // Recalculate some small conditions, which are changed only during + // compaction, adding new memtable and/or + // recalculation of compaction score. These values are used in + // DBImpl::MakeRoomForWrite function to decide, if it need to make + // a write stall + void RecalculateWriteStallConditions( + const MutableCFOptions& mutable_cf_options); + + private: + friend class ColumnFamilySet; + ColumnFamilyData(uint32_t id, const std::string& name, + Version* dummy_versions, Cache* table_cache, + WriteBufferManager* write_buffer_manager, + const ColumnFamilyOptions& options, + const DBOptions* db_options, const EnvOptions& env_options, + ColumnFamilySet* column_family_set); + + uint32_t id_; + const std::string name_; + Version* dummy_versions_; // Head of circular doubly-linked list of versions. + Version* current_; // == dummy_versions->prev_ + + std::atomic refs_; // outstanding references to ColumnFamilyData + bool dropped_; // true if client dropped it + + const InternalKeyComparator internal_comparator_; + std::vector> + int_tbl_prop_collector_factories_; + + const Options options_; + const ImmutableCFOptions ioptions_; + MutableCFOptions mutable_cf_options_; + + std::unique_ptr table_cache_; + + std::unique_ptr internal_stats_; + + WriteBufferManager* write_buffer_manager_; + + MemTable* mem_; + MemTableList imm_; + SuperVersion* super_version_; + + // An ordinal representing the current SuperVersion. Updated by + // InstallSuperVersion(), i.e. incremented every time super_version_ + // changes. + std::atomic super_version_number_; + + // Thread's local copy of SuperVersion pointer + // This needs to be destructed before mutex_ + std::unique_ptr local_sv_; + + // pointers for a circular linked list. we use it to support iterations over + // all column families that are alive (note: dropped column families can also + // be alive as long as client holds a reference) + ColumnFamilyData* next_; + ColumnFamilyData* prev_; + + // This is the earliest log file number that contains data from this + // Column Family. All earlier log files must be ignored and not + // recovered from + uint64_t log_number_; + + // An object that keeps all the compaction stats + // and picks the next compaction + std::unique_ptr compaction_picker_; + + ColumnFamilySet* column_family_set_; + + std::unique_ptr write_controller_token_; + + // If true --> this ColumnFamily is currently present in DBImpl::flush_queue_ + bool pending_flush_; + + // If true --> this ColumnFamily is currently present in + // DBImpl::compaction_queue_ + bool pending_compaction_; + + uint64_t prev_compaction_needed_bytes_; +}; + +// ColumnFamilySet has interesting thread-safety requirements +// * CreateColumnFamily() or RemoveColumnFamily() -- need to be protected by DB +// mutex AND executed in the write thread. +// CreateColumnFamily() should ONLY be called from VersionSet::LogAndApply() AND +// single-threaded write thread. It is also called during Recovery and in +// DumpManifest(). +// RemoveColumnFamily() is only called from SetDropped(). DB mutex needs to be +// held and it needs to be executed from the write thread. SetDropped() also +// guarantees that it will be called only from single-threaded LogAndApply(), +// but this condition is not that important. +// * Iteration -- hold DB mutex, but you can release it in the body of +// iteration. If you release DB mutex in body, reference the column +// family before the mutex and unreference after you unlock, since the column +// family might get dropped when the DB mutex is released +// * GetDefault() -- thread safe +// * GetColumnFamily() -- either inside of DB mutex or from a write thread +// * GetNextColumnFamilyID(), GetMaxColumnFamily(), UpdateMaxColumnFamily(), +// NumberOfColumnFamilies -- inside of DB mutex +class ColumnFamilySet { + public: + // ColumnFamilySet supports iteration + class iterator { + public: + explicit iterator(ColumnFamilyData* cfd) + : current_(cfd) {} + iterator& operator++() { + // dropped column families might still be included in this iteration + // (we're only removing them when client drops the last reference to the + // column family). + // dummy is never dead, so this will never be infinite + do { + current_ = current_->next_; + } while (current_->refs_.load(std::memory_order_relaxed) == 0); + return *this; + } + bool operator!=(const iterator& other) { + return this->current_ != other.current_; + } + ColumnFamilyData* operator*() { return current_; } + + private: + ColumnFamilyData* current_; + }; + + ColumnFamilySet(const std::string& dbname, const DBOptions* db_options, + const EnvOptions& env_options, Cache* table_cache, + WriteBufferManager* write_buffer_manager, + WriteController* write_controller); + ~ColumnFamilySet(); + + ColumnFamilyData* GetDefault() const; + // GetColumnFamily() calls return nullptr if column family is not found + ColumnFamilyData* GetColumnFamily(uint32_t id) const; + ColumnFamilyData* GetColumnFamily(const std::string& name) const; + // this call will return the next available column family ID. it guarantees + // that there is no column family with id greater than or equal to the + // returned value in the current running instance or anytime in RocksDB + // instance history. + uint32_t GetNextColumnFamilyID(); + uint32_t GetMaxColumnFamily(); + void UpdateMaxColumnFamily(uint32_t new_max_column_family); + size_t NumberOfColumnFamilies() const; + + ColumnFamilyData* CreateColumnFamily(const std::string& name, uint32_t id, + Version* dummy_version, + const ColumnFamilyOptions& options); + + iterator begin() { return iterator(dummy_cfd_->next_); } + iterator end() { return iterator(dummy_cfd_); } + + // REQUIRES: DB mutex held + // Don't call while iterating over ColumnFamilySet + void FreeDeadColumnFamilies(); + + Cache* get_table_cache() { return table_cache_; } + + private: + friend class ColumnFamilyData; + // helper function that gets called from cfd destructor + // REQUIRES: DB mutex held + void RemoveColumnFamily(ColumnFamilyData* cfd); + + // column_families_ and column_family_data_ need to be protected: + // * when mutating both conditions have to be satisfied: + // 1. DB mutex locked + // 2. thread currently in single-threaded write thread + // * when reading, at least one condition needs to be satisfied: + // 1. DB mutex locked + // 2. accessed from a single-threaded write thread + std::unordered_map column_families_; + std::unordered_map column_family_data_; + + uint32_t max_column_family_; + ColumnFamilyData* dummy_cfd_; + // We don't hold the refcount here, since default column family always exists + // We are also not responsible for cleaning up default_cfd_cache_. This is + // just a cache that makes common case (accessing default column family) + // faster + ColumnFamilyData* default_cfd_cache_; + + const std::string db_name_; + const DBOptions* const db_options_; + const EnvOptions env_options_; + Cache* table_cache_; + WriteBufferManager* write_buffer_manager_; + WriteController* write_controller_; +}; + +// We use ColumnFamilyMemTablesImpl to provide WriteBatch a way to access +// memtables of different column families (specified by ID in the write batch) +class ColumnFamilyMemTablesImpl : public ColumnFamilyMemTables { + public: + explicit ColumnFamilyMemTablesImpl(ColumnFamilySet* column_family_set) + : column_family_set_(column_family_set), current_(nullptr) {} + + // Constructs a ColumnFamilyMemTablesImpl equivalent to one constructed + // with the arguments used to construct *orig. + explicit ColumnFamilyMemTablesImpl(ColumnFamilyMemTablesImpl* orig) + : column_family_set_(orig->column_family_set_), current_(nullptr) {} + + // sets current_ to ColumnFamilyData with column_family_id + // returns false if column family doesn't exist + // REQUIRES: use this function of DBImpl::column_family_memtables_ should be + // under a DB mutex OR from a write thread + bool Seek(uint32_t column_family_id) override; + + // Returns log number of the selected column family + // REQUIRES: under a DB mutex OR from a write thread + uint64_t GetLogNumber() const override; + + // REQUIRES: Seek() called first + // REQUIRES: use this function of DBImpl::column_family_memtables_ should be + // under a DB mutex OR from a write thread + virtual MemTable* GetMemTable() const override; + + // Returns column family handle for the selected column family + // REQUIRES: use this function of DBImpl::column_family_memtables_ should be + // under a DB mutex OR from a write thread + virtual ColumnFamilyHandle* GetColumnFamilyHandle() override; + + // Cannot be called while another thread is calling Seek(). + // REQUIRES: use this function of DBImpl::column_family_memtables_ should be + // under a DB mutex OR from a write thread + virtual ColumnFamilyData* current() override { return current_; } + + private: + ColumnFamilySet* column_family_set_; + ColumnFamilyData* current_; + ColumnFamilyHandleInternal handle_; +}; + +extern uint32_t GetColumnFamilyID(ColumnFamilyHandle* column_family); + +extern const Comparator* GetColumnFamilyUserComparator( + ColumnFamilyHandle* column_family); + +} // namespace rocksdb diff --git a/external/rocksdb/db/column_family_test.cc b/external/rocksdb/db/column_family_test.cc new file mode 100755 index 0000000000..99bfbfb1ab --- /dev/null +++ b/external/rocksdb/db/column_family_test.cc @@ -0,0 +1,3130 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include +#include +#include +#include + +#include "db/db_impl.h" +#include "db/db_test_util.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/iterator.h" +#include "util/coding.h" +#include "util/fault_injection_test_env.h" +#include "util/options_parser.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "util/testharness.h" +#include "util/testutil.h" +#include "utilities/merge_operators.h" + +namespace rocksdb { + +static const int kValueSize = 1000; + +namespace { +std::string RandomString(Random* rnd, int len) { + std::string r; + test::RandomString(rnd, len, &r); + return r; +} +} // anonymous namespace + +// counts how many operations were performed +class EnvCounter : public EnvWrapper { + public: + explicit EnvCounter(Env* base) + : EnvWrapper(base), num_new_writable_file_(0) {} + int GetNumberOfNewWritableFileCalls() { + return num_new_writable_file_; + } + Status NewWritableFile(const std::string& f, unique_ptr* r, + const EnvOptions& soptions) override { + ++num_new_writable_file_; + return EnvWrapper::NewWritableFile(f, r, soptions); + } + + private: + int num_new_writable_file_; +}; + +class ColumnFamilyTest : public testing::Test { + public: + ColumnFamilyTest() : rnd_(139) { + env_ = new EnvCounter(Env::Default()); + dbname_ = test::TmpDir() + "/column_family_test"; + db_options_.create_if_missing = true; + db_options_.fail_if_options_file_error = true; + db_options_.env = env_; + DestroyDB(dbname_, Options(db_options_, column_family_options_)); + } + + ~ColumnFamilyTest() { + Close(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + Destroy(); + delete env_; + } + + // Return the value to associate with the specified key + Slice Value(int k, std::string* storage) { + if (k == 0) { + // Ugh. Random seed of 0 used to produce no entropy. This code + // preserves the implementation that was in place when all of the + // magic values in this file were picked. + *storage = std::string(kValueSize, ' '); + return Slice(*storage); + } else { + Random r(k); + return test::RandomString(&r, kValueSize, storage); + } + } + + void Build(int base, int n, int flush_every = 0) { + std::string key_space, value_space; + WriteBatch batch; + + for (int i = 0; i < n; i++) { + if (flush_every != 0 && i != 0 && i % flush_every == 0) { + DBImpl* dbi = reinterpret_cast(db_); + dbi->TEST_FlushMemTable(); + } + + int keyi = base + i; + Slice key(DBTestBase::Key(keyi)); + + batch.Clear(); + batch.Put(handles_[0], key, Value(keyi, &value_space)); + batch.Put(handles_[1], key, Value(keyi, &value_space)); + batch.Put(handles_[2], key, Value(keyi, &value_space)); + ASSERT_OK(db_->Write(WriteOptions(), &batch)); + } + } + + void CheckMissed() { + uint64_t next_expected = 0; + uint64_t missed = 0; + int bad_keys = 0; + int bad_values = 0; + int correct = 0; + std::string value_space; + for (int cf = 0; cf < 3; cf++) { + next_expected = 0; + Iterator* iter = db_->NewIterator(ReadOptions(false, true), handles_[cf]); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + uint64_t key; + Slice in(iter->key()); + in.remove_prefix(3); + if (!ConsumeDecimalNumber(&in, &key) || !in.empty() || + key < next_expected) { + bad_keys++; + continue; + } + missed += (key - next_expected); + next_expected = key + 1; + if (iter->value() != Value(static_cast(key), &value_space)) { + bad_values++; + } else { + correct++; + } + } + delete iter; + } + + ASSERT_EQ(0, bad_keys); + ASSERT_EQ(0, bad_values); + ASSERT_EQ(0, missed); + (void)correct; + } + + void Close() { + for (auto h : handles_) { + if (h) { + db_->DestroyColumnFamilyHandle(h); + } + } + handles_.clear(); + names_.clear(); + delete db_; + db_ = nullptr; + } + + Status TryOpen(std::vector cf, + std::vector options = {}) { + std::vector column_families; + names_.clear(); + for (size_t i = 0; i < cf.size(); ++i) { + column_families.push_back(ColumnFamilyDescriptor( + cf[i], options.size() == 0 ? column_family_options_ : options[i])); + names_.push_back(cf[i]); + } + return DB::Open(db_options_, dbname_, column_families, &handles_, &db_); + } + + Status OpenReadOnly(std::vector cf, + std::vector options = {}) { + std::vector column_families; + names_.clear(); + for (size_t i = 0; i < cf.size(); ++i) { + column_families.push_back(ColumnFamilyDescriptor( + cf[i], options.size() == 0 ? column_family_options_ : options[i])); + names_.push_back(cf[i]); + } + return DB::OpenForReadOnly(db_options_, dbname_, column_families, &handles_, + &db_); + } + +#ifndef ROCKSDB_LITE // ReadOnlyDB is not supported + void AssertOpenReadOnly(std::vector cf, + std::vector options = {}) { + ASSERT_OK(OpenReadOnly(cf, options)); + } +#endif // !ROCKSDB_LITE + + + void Open(std::vector cf, + std::vector options = {}) { + ASSERT_OK(TryOpen(cf, options)); + } + + void Open() { + Open({"default"}); + } + + DBImpl* dbfull() { return reinterpret_cast(db_); } + + int GetProperty(int cf, std::string property) { + std::string value; + EXPECT_TRUE(dbfull()->GetProperty(handles_[cf], property, &value)); +#ifndef CYGWIN + return std::stoi(value); +#else + return std::strtol(value.c_str(), 0 /* off */, 10 /* base */); +#endif + } + + void Destroy() { + Close(); + ASSERT_OK(DestroyDB(dbname_, Options(db_options_, column_family_options_))); + } + + void CreateColumnFamilies( + const std::vector& cfs, + const std::vector options = {}) { + int cfi = static_cast(handles_.size()); + handles_.resize(cfi + cfs.size()); + names_.resize(cfi + cfs.size()); + for (size_t i = 0; i < cfs.size(); ++i) { + const auto& current_cf_opt = + options.size() == 0 ? column_family_options_ : options[i]; + ASSERT_OK( + db_->CreateColumnFamily(current_cf_opt, cfs[i], &handles_[cfi])); + names_[cfi] = cfs[i]; + +#ifndef ROCKSDB_LITE // RocksDBLite does not support GetDescriptor + // Verify the CF options of the returned CF handle. + ColumnFamilyDescriptor desc; + ASSERT_OK(handles_[cfi]->GetDescriptor(&desc)); + RocksDBOptionsParser::VerifyCFOptions(desc.options, current_cf_opt); +#endif // !ROCKSDB_LITE + cfi++; + } + } + + void Reopen(const std::vector options = {}) { + std::vector names; + for (auto name : names_) { + if (name != "") { + names.push_back(name); + } + } + Close(); + assert(options.size() == 0 || names.size() == options.size()); + Open(names, options); + } + + void CreateColumnFamiliesAndReopen(const std::vector& cfs) { + CreateColumnFamilies(cfs); + Reopen(); + } + + void DropColumnFamilies(const std::vector& cfs) { + for (auto cf : cfs) { + ASSERT_OK(db_->DropColumnFamily(handles_[cf])); + db_->DestroyColumnFamilyHandle(handles_[cf]); + handles_[cf] = nullptr; + names_[cf] = ""; + } + } + + void PutRandomData(int cf, int num, int key_value_size, bool save = false) { + for (int i = 0; i < num; ++i) { + // 10 bytes for key, rest is value + if (!save) { + ASSERT_OK(Put(cf, test::RandomKey(&rnd_, 11), + RandomString(&rnd_, key_value_size - 10))); + } else { + std::string key = test::RandomKey(&rnd_, 11); + keys_.insert(key); + ASSERT_OK(Put(cf, key, RandomString(&rnd_, key_value_size - 10))); + } + } + } + + void WaitForFlush(int cf) { +#ifndef ROCKSDB_LITE // TEST functions are not supported in lite + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable(handles_[cf])); +#endif // !ROCKSDB_LITE + } + + void WaitForCompaction() { +#ifndef ROCKSDB_LITE // TEST functions are not supported in lite + ASSERT_OK(dbfull()->TEST_WaitForCompact()); +#endif // !ROCKSDB_LITE + } + + uint64_t MaxTotalInMemoryState() { +#ifndef ROCKSDB_LITE + return dbfull()->TEST_MaxTotalInMemoryState(); +#else + return 0; +#endif // !ROCKSDB_LITE + } + + void AssertMaxTotalInMemoryState(uint64_t value) { + ASSERT_EQ(value, MaxTotalInMemoryState()); + } + + Status Put(int cf, const std::string& key, const std::string& value) { + return db_->Put(WriteOptions(), handles_[cf], Slice(key), Slice(value)); + } + Status Merge(int cf, const std::string& key, const std::string& value) { + return db_->Merge(WriteOptions(), handles_[cf], Slice(key), Slice(value)); + } + Status Flush(int cf) { + return db_->Flush(FlushOptions(), handles_[cf]); + } + + std::string Get(int cf, const std::string& key) { + ReadOptions options; + options.verify_checksums = true; + std::string result; + Status s = db_->Get(options, handles_[cf], Slice(key), &result); + if (s.IsNotFound()) { + result = "NOT_FOUND"; + } else if (!s.ok()) { + result = s.ToString(); + } + return result; + } + + void CompactAll(int cf) { + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), handles_[cf], nullptr, + nullptr)); + } + + void Compact(int cf, const Slice& start, const Slice& limit) { + ASSERT_OK( + db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit)); + } + + int NumTableFilesAtLevel(int level, int cf) { + return GetProperty(cf, + "rocksdb.num-files-at-level" + ToString(level)); + } + +#ifndef ROCKSDB_LITE + // Return spread of files per level + std::string FilesPerLevel(int cf) { + std::string result; + int last_non_zero_offset = 0; + for (int level = 0; level < dbfull()->NumberLevels(handles_[cf]); level++) { + int f = NumTableFilesAtLevel(level, cf); + char buf[100]; + snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f); + result += buf; + if (f > 0) { + last_non_zero_offset = static_cast(result.size()); + } + } + result.resize(last_non_zero_offset); + return result; + } +#endif + + void AssertFilesPerLevel(const std::string& value, int cf) { +#ifndef ROCKSDB_LITE + ASSERT_EQ(value, FilesPerLevel(cf)); +#endif + } + +#ifndef ROCKSDB_LITE // GetLiveFilesMetaData is not supported + int CountLiveFiles() { + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + return static_cast(metadata.size()); + } +#endif // !ROCKSDB_LITE + + void AssertCountLiveFiles(int expected_value) { +#ifndef ROCKSDB_LITE + ASSERT_EQ(expected_value, CountLiveFiles()); +#endif + } + + // Do n memtable flushes, each of which produces an sstable + // covering the range [small,large]. + void MakeTables(int cf, int n, const std::string& small, + const std::string& large) { + for (int i = 0; i < n; i++) { + ASSERT_OK(Put(cf, small, "begin")); + ASSERT_OK(Put(cf, large, "end")); + ASSERT_OK(db_->Flush(FlushOptions(), handles_[cf])); + } + } + +#ifndef ROCKSDB_LITE // GetSortedWalFiles is not supported + int CountLiveLogFiles() { + int micros_wait_for_log_deletion = 20000; + env_->SleepForMicroseconds(micros_wait_for_log_deletion); + int ret = 0; + VectorLogPtr wal_files; + Status s; + // GetSortedWalFiles is a flakey function -- it gets all the wal_dir + // children files and then later checks for their existence. if some of the + // log files doesn't exist anymore, it reports an error. it does all of this + // without DB mutex held, so if a background process deletes the log file + // while the function is being executed, it returns an error. We retry the + // function 10 times to avoid the error failing the test + for (int retries = 0; retries < 10; ++retries) { + wal_files.clear(); + s = db_->GetSortedWalFiles(wal_files); + if (s.ok()) { + break; + } + } + EXPECT_OK(s); + for (const auto& wal : wal_files) { + if (wal->Type() == kAliveLogFile) { + ++ret; + } + } + return ret; + return 0; + } +#endif // !ROCKSDB_LITE + + void AssertCountLiveLogFiles(int value) { +#ifndef ROCKSDB_LITE // GetSortedWalFiles is not supported + ASSERT_EQ(value, CountLiveLogFiles()); +#endif // !ROCKSDB_LITE + } + + void AssertNumberOfImmutableMemtables(std::vector num_per_cf) { + assert(num_per_cf.size() == handles_.size()); + +#ifndef ROCKSDB_LITE // GetProperty is not supported in lite + for (size_t i = 0; i < num_per_cf.size(); ++i) { + ASSERT_EQ(num_per_cf[i], GetProperty(static_cast(i), + "rocksdb.num-immutable-mem-table")); + } +#endif // !ROCKSDB_LITE + } + + void CopyFile(const std::string& source, const std::string& destination, + uint64_t size = 0) { + const EnvOptions soptions; + unique_ptr srcfile; + ASSERT_OK(env_->NewSequentialFile(source, &srcfile, soptions)); + unique_ptr destfile; + ASSERT_OK(env_->NewWritableFile(destination, &destfile, soptions)); + + if (size == 0) { + // default argument means copy everything + ASSERT_OK(env_->GetFileSize(source, &size)); + } + + char buffer[4096]; + Slice slice; + while (size > 0) { + uint64_t one = std::min(uint64_t(sizeof(buffer)), size); + ASSERT_OK(srcfile->Read(one, &slice, buffer)); + ASSERT_OK(destfile->Append(slice)); + size -= slice.size(); + } + ASSERT_OK(destfile->Close()); + } + + std::vector handles_; + std::vector names_; + std::set keys_; + ColumnFamilyOptions column_family_options_; + DBOptions db_options_; + std::string dbname_; + DB* db_ = nullptr; + EnvCounter* env_; + Random rnd_; +}; + +TEST_F(ColumnFamilyTest, DontReuseColumnFamilyID) { + for (int iter = 0; iter < 3; ++iter) { + Open(); + CreateColumnFamilies({"one", "two", "three"}); + for (size_t i = 0; i < handles_.size(); ++i) { + auto cfh = reinterpret_cast(handles_[i]); + ASSERT_EQ(i, cfh->GetID()); + } + if (iter == 1) { + Reopen(); + } + DropColumnFamilies({3}); + Reopen(); + if (iter == 2) { + // this tests if max_column_family is correctly persisted with + // WriteSnapshot() + Reopen(); + } + CreateColumnFamilies({"three2"}); + // ID 3 that was used for dropped column family "three" should not be reused + auto cfh3 = reinterpret_cast(handles_[3]); + ASSERT_EQ(4U, cfh3->GetID()); + Close(); + Destroy(); + } +} + +class FlushEmptyCFTestWithParam : public ColumnFamilyTest, + public testing::WithParamInterface { + public: + FlushEmptyCFTestWithParam() { allow_2pc_ = GetParam(); } + + // Required if inheriting from testing::WithParamInterface<> + static void SetUpTestCase() {} + static void TearDownTestCase() {} + + bool allow_2pc_; +}; + +TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest) { + std::unique_ptr fault_env( + new FaultInjectionTestEnv(env_)); + db_options_.env = fault_env.get(); + db_options_.allow_2pc = allow_2pc_; + Open(); + CreateColumnFamilies({"one", "two"}); + // Generate log file A. + ASSERT_OK(Put(1, "foo", "v1")); // seqID 1 + + Reopen(); + // Log file A is not dropped after reopening because default column family's + // min log number is 0. + // It flushes to SST file X + ASSERT_OK(Put(1, "foo", "v1")); // seqID 2 + ASSERT_OK(Put(1, "bar", "v2")); // seqID 3 + // Current log file is file B now. While flushing, a new log file C is created + // and is set to current. Boths' min log number is set to file C in memory, so + // after flushing file B is deleted. At the same time, the min log number of + // default CF is not written to manifest. Log file A still remains. + // Flushed to SST file Y. + Flush(1); + Flush(0); + ASSERT_OK(Put(1, "bar", "v3")); // seqID 4 + ASSERT_OK(Put(1, "foo", "v4")); // seqID 5 + + // Preserve file system state up to here to simulate a crash condition. + fault_env->SetFilesystemActive(false); + std::vector names; + for (auto name : names_) { + if (name != "") { + names.push_back(name); + } + } + + Close(); + fault_env->ResetState(); + + // Before opening, there are four files: + // Log file A contains seqID 1 + // Log file C contains seqID 4, 5 + // SST file X contains seqID 1 + // SST file Y contains seqID 2, 3 + // Min log number: + // default CF: 0 + // CF one, two: C + // When opening the DB, all the seqID should be preserved. + Open(names, {}); + ASSERT_EQ("v4", Get(1, "foo")); + ASSERT_EQ("v3", Get(1, "bar")); + Close(); + + db_options_.env = env_; +} + +TEST_P(FlushEmptyCFTestWithParam, FlushEmptyCFTest2) { + std::unique_ptr fault_env( + new FaultInjectionTestEnv(env_)); + db_options_.env = fault_env.get(); + db_options_.allow_2pc = allow_2pc_; + Open(); + CreateColumnFamilies({"one", "two"}); + // Generate log file A. + ASSERT_OK(Put(1, "foo", "v1")); // seqID 1 + + Reopen(); + // Log file A is not dropped after reopening because default column family's + // min log number is 0. + // It flushes to SST file X + ASSERT_OK(Put(1, "foo", "v1")); // seqID 2 + ASSERT_OK(Put(1, "bar", "v2")); // seqID 3 + // Current log file is file B now. While flushing, a new log file C is created + // and is set to current. Both CFs' min log number is set to file C so after + // flushing file B is deleted. Log file A still remains. + // Flushed to SST file Y. + Flush(1); + ASSERT_OK(Put(0, "bar", "v2")); // seqID 4 + ASSERT_OK(Put(2, "bar", "v2")); // seqID 5 + ASSERT_OK(Put(1, "bar", "v3")); // seqID 6 + // Flushing all column families. This forces all CFs' min log to current. This + // is written to the manifest file. Log file C is cleared. + Flush(0); + Flush(1); + Flush(2); + // Write to log file D + ASSERT_OK(Put(1, "bar", "v4")); // seqID 7 + ASSERT_OK(Put(1, "bar", "v5")); // seqID 8 + // Preserve file system state up to here to simulate a crash condition. + fault_env->SetFilesystemActive(false); + std::vector names; + for (auto name : names_) { + if (name != "") { + names.push_back(name); + } + } + + Close(); + fault_env->ResetState(); + // Before opening, there are two logfiles: + // Log file A contains seqID 1 + // Log file D contains seqID 7, 8 + // Min log number: + // default CF: D + // CF one, two: D + // When opening the DB, log file D should be replayed using the seqID + // specified in the file. + Open(names, {}); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("v5", Get(1, "bar")); + Close(); + + db_options_.env = env_; +} + +INSTANTIATE_TEST_CASE_P(FlushEmptyCFTestWithParam, FlushEmptyCFTestWithParam, + ::testing::Bool()); + +TEST_F(ColumnFamilyTest, AddDrop) { + Open(); + CreateColumnFamilies({"one", "two", "three"}); + ASSERT_EQ("NOT_FOUND", Get(1, "fodor")); + ASSERT_EQ("NOT_FOUND", Get(2, "fodor")); + DropColumnFamilies({2}); + ASSERT_EQ("NOT_FOUND", Get(1, "fodor")); + CreateColumnFamilies({"four"}); + ASSERT_EQ("NOT_FOUND", Get(3, "fodor")); + ASSERT_OK(Put(1, "fodor", "mirko")); + ASSERT_EQ("mirko", Get(1, "fodor")); + ASSERT_EQ("NOT_FOUND", Get(3, "fodor")); + Close(); + ASSERT_TRUE(TryOpen({"default"}).IsInvalidArgument()); + Open({"default", "one", "three", "four"}); + DropColumnFamilies({1}); + Reopen(); + Close(); + + std::vector families; + ASSERT_OK(DB::ListColumnFamilies(db_options_, dbname_, &families)); + std::sort(families.begin(), families.end()); + ASSERT_TRUE(families == + std::vector({"default", "four", "three"})); +} + +TEST_F(ColumnFamilyTest, DropTest) { + // first iteration - dont reopen DB before dropping + // second iteration - reopen DB before dropping + for (int iter = 0; iter < 2; ++iter) { + Open({"default"}); + CreateColumnFamiliesAndReopen({"pikachu"}); + for (int i = 0; i < 100; ++i) { + ASSERT_OK(Put(1, ToString(i), "bar" + ToString(i))); + } + ASSERT_OK(Flush(1)); + + if (iter == 1) { + Reopen(); + } + ASSERT_EQ("bar1", Get(1, "1")); + + AssertCountLiveFiles(1); + DropColumnFamilies({1}); + // make sure that all files are deleted when we drop the column family + AssertCountLiveFiles(0); + Destroy(); + } +} + +TEST_F(ColumnFamilyTest, WriteBatchFailure) { + Open(); + CreateColumnFamiliesAndReopen({"one", "two"}); + WriteBatch batch; + batch.Put(handles_[0], Slice("existing"), Slice("column-family")); + batch.Put(handles_[1], Slice("non-existing"), Slice("column-family")); + ASSERT_OK(db_->Write(WriteOptions(), &batch)); + DropColumnFamilies({1}); + WriteOptions woptions_ignore_missing_cf; + woptions_ignore_missing_cf.ignore_missing_column_families = true; + batch.Put(handles_[0], Slice("still here"), Slice("column-family")); + ASSERT_OK(db_->Write(woptions_ignore_missing_cf, &batch)); + ASSERT_EQ("column-family", Get(0, "still here")); + Status s = db_->Write(WriteOptions(), &batch); + ASSERT_TRUE(s.IsInvalidArgument()); + Close(); +} + +TEST_F(ColumnFamilyTest, ReadWrite) { + Open(); + CreateColumnFamiliesAndReopen({"one", "two"}); + ASSERT_OK(Put(0, "foo", "v1")); + ASSERT_OK(Put(0, "bar", "v2")); + ASSERT_OK(Put(1, "mirko", "v3")); + ASSERT_OK(Put(0, "foo", "v2")); + ASSERT_OK(Put(2, "fodor", "v5")); + + for (int iter = 0; iter <= 3; ++iter) { + ASSERT_EQ("v2", Get(0, "foo")); + ASSERT_EQ("v2", Get(0, "bar")); + ASSERT_EQ("v3", Get(1, "mirko")); + ASSERT_EQ("v5", Get(2, "fodor")); + ASSERT_EQ("NOT_FOUND", Get(0, "fodor")); + ASSERT_EQ("NOT_FOUND", Get(1, "fodor")); + ASSERT_EQ("NOT_FOUND", Get(2, "foo")); + if (iter <= 1) { + Reopen(); + } + } + Close(); +} + +TEST_F(ColumnFamilyTest, IgnoreRecoveredLog) { + std::string backup_logs = dbname_ + "/backup_logs"; + + // delete old files in backup_logs directory + ASSERT_OK(env_->CreateDirIfMissing(dbname_)); + ASSERT_OK(env_->CreateDirIfMissing(backup_logs)); + std::vector old_files; + env_->GetChildren(backup_logs, &old_files); + for (auto& file : old_files) { + if (file != "." && file != "..") { + env_->DeleteFile(backup_logs + "/" + file); + } + } + + column_family_options_.merge_operator = + MergeOperators::CreateUInt64AddOperator(); + db_options_.wal_dir = dbname_ + "/logs"; + Destroy(); + Open(); + CreateColumnFamilies({"cf1", "cf2"}); + + // fill up the DB + std::string one, two, three; + PutFixed64(&one, 1); + PutFixed64(&two, 2); + PutFixed64(&three, 3); + ASSERT_OK(Merge(0, "foo", one)); + ASSERT_OK(Merge(1, "mirko", one)); + ASSERT_OK(Merge(0, "foo", one)); + ASSERT_OK(Merge(2, "bla", one)); + ASSERT_OK(Merge(2, "fodor", one)); + ASSERT_OK(Merge(0, "bar", one)); + ASSERT_OK(Merge(2, "bla", one)); + ASSERT_OK(Merge(1, "mirko", two)); + ASSERT_OK(Merge(1, "franjo", one)); + + // copy the logs to backup + std::vector logs; + env_->GetChildren(db_options_.wal_dir, &logs); + for (auto& log : logs) { + if (log != ".." && log != ".") { + CopyFile(db_options_.wal_dir + "/" + log, backup_logs + "/" + log); + } + } + + // recover the DB + Close(); + + // 1. check consistency + // 2. copy the logs from backup back to WAL dir. if the recovery happens + // again on the same log files, this should lead to incorrect results + // due to applying merge operator twice + // 3. check consistency + for (int iter = 0; iter < 2; ++iter) { + // assert consistency + Open({"default", "cf1", "cf2"}); + ASSERT_EQ(two, Get(0, "foo")); + ASSERT_EQ(one, Get(0, "bar")); + ASSERT_EQ(three, Get(1, "mirko")); + ASSERT_EQ(one, Get(1, "franjo")); + ASSERT_EQ(one, Get(2, "fodor")); + ASSERT_EQ(two, Get(2, "bla")); + Close(); + + if (iter == 0) { + // copy the logs from backup back to wal dir + for (auto& log : logs) { + if (log != ".." && log != ".") { + CopyFile(backup_logs + "/" + log, db_options_.wal_dir + "/" + log); + } + } + } + } +} + +TEST_F(ColumnFamilyTest, FlushTest) { + Open(); + CreateColumnFamiliesAndReopen({"one", "two"}); + ASSERT_OK(Put(0, "foo", "v1")); + ASSERT_OK(Put(0, "bar", "v2")); + ASSERT_OK(Put(1, "mirko", "v3")); + ASSERT_OK(Put(0, "foo", "v2")); + ASSERT_OK(Put(2, "fodor", "v5")); + + for (int j = 0; j < 2; j++) { + ReadOptions ro; + std::vector iterators; + // Hold super version. + if (j == 0) { + ASSERT_OK(db_->NewIterators(ro, handles_, &iterators)); + } + + for (int i = 0; i < 3; ++i) { + uint64_t max_total_in_memory_state = + MaxTotalInMemoryState(); + Flush(i); + AssertMaxTotalInMemoryState(max_total_in_memory_state); + } + ASSERT_OK(Put(1, "foofoo", "bar")); + ASSERT_OK(Put(0, "foofoo", "bar")); + + for (auto* it : iterators) { + delete it; + } + } + Reopen(); + + for (int iter = 0; iter <= 2; ++iter) { + ASSERT_EQ("v2", Get(0, "foo")); + ASSERT_EQ("v2", Get(0, "bar")); + ASSERT_EQ("v3", Get(1, "mirko")); + ASSERT_EQ("v5", Get(2, "fodor")); + ASSERT_EQ("NOT_FOUND", Get(0, "fodor")); + ASSERT_EQ("NOT_FOUND", Get(1, "fodor")); + ASSERT_EQ("NOT_FOUND", Get(2, "foo")); + if (iter <= 1) { + Reopen(); + } + } + Close(); +} + +// Makes sure that obsolete log files get deleted +TEST_F(ColumnFamilyTest, LogDeletionTest) { + db_options_.max_total_wal_size = std::numeric_limits::max(); + column_family_options_.arena_block_size = 4 * 1024; + column_family_options_.write_buffer_size = 100000; // 100KB + Open(); + CreateColumnFamilies({"one", "two", "three", "four"}); + // Each bracket is one log file. if number is in (), it means + // we don't need it anymore (it's been flushed) + // [] + AssertCountLiveLogFiles(0); + PutRandomData(0, 1, 100); + // [0] + PutRandomData(1, 1, 100); + // [0, 1] + PutRandomData(1, 1000, 100); + WaitForFlush(1); + // [0, (1)] [1] + AssertCountLiveLogFiles(2); + PutRandomData(0, 1, 100); + // [0, (1)] [0, 1] + AssertCountLiveLogFiles(2); + PutRandomData(2, 1, 100); + // [0, (1)] [0, 1, 2] + PutRandomData(2, 1000, 100); + WaitForFlush(2); + // [0, (1)] [0, 1, (2)] [2] + AssertCountLiveLogFiles(3); + PutRandomData(2, 1000, 100); + WaitForFlush(2); + // [0, (1)] [0, 1, (2)] [(2)] [2] + AssertCountLiveLogFiles(4); + PutRandomData(3, 1, 100); + // [0, (1)] [0, 1, (2)] [(2)] [2, 3] + PutRandomData(1, 1, 100); + // [0, (1)] [0, 1, (2)] [(2)] [1, 2, 3] + AssertCountLiveLogFiles(4); + PutRandomData(1, 1000, 100); + WaitForFlush(1); + // [0, (1)] [0, (1), (2)] [(2)] [(1), 2, 3] [1] + AssertCountLiveLogFiles(5); + PutRandomData(0, 1000, 100); + WaitForFlush(0); + // [(0), (1)] [(0), (1), (2)] [(2)] [(1), 2, 3] [1, (0)] [0] + // delete obsolete logs --> + // [(1), 2, 3] [1, (0)] [0] + AssertCountLiveLogFiles(3); + PutRandomData(0, 1000, 100); + WaitForFlush(0); + // [(1), 2, 3] [1, (0)], [(0)] [0] + AssertCountLiveLogFiles(4); + PutRandomData(1, 1000, 100); + WaitForFlush(1); + // [(1), 2, 3] [(1), (0)] [(0)] [0, (1)] [1] + AssertCountLiveLogFiles(5); + PutRandomData(2, 1000, 100); + WaitForFlush(2); + // [(1), (2), 3] [(1), (0)] [(0)] [0, (1)] [1, (2)], [2] + AssertCountLiveLogFiles(6); + PutRandomData(3, 1000, 100); + WaitForFlush(3); + // [(1), (2), (3)] [(1), (0)] [(0)] [0, (1)] [1, (2)], [2, (3)] [3] + // delete obsolete logs --> + // [0, (1)] [1, (2)], [2, (3)] [3] + AssertCountLiveLogFiles(4); + Close(); +} + +TEST_F(ColumnFamilyTest, CrashAfterFlush) { + std::unique_ptr fault_env( + new FaultInjectionTestEnv(env_)); + db_options_.env = fault_env.get(); + Open(); + CreateColumnFamilies({"one"}); + + WriteBatch batch; + batch.Put(handles_[0], Slice("foo"), Slice("bar")); + batch.Put(handles_[1], Slice("foo"), Slice("bar")); + ASSERT_OK(db_->Write(WriteOptions(), &batch)); + Flush(0); + fault_env->SetFilesystemActive(false); + + std::vector names; + for (auto name : names_) { + if (name != "") { + names.push_back(name); + } + } + Close(); + fault_env->DropUnsyncedFileData(); + fault_env->ResetState(); + Open(names, {}); + + // Write batch should be atomic. + ASSERT_EQ(Get(0, "foo"), Get(1, "foo")); + + Close(); + db_options_.env = env_; +} + +// Makes sure that obsolete log files get deleted +TEST_F(ColumnFamilyTest, DifferentWriteBufferSizes) { + // disable flushing stale column families + db_options_.max_total_wal_size = std::numeric_limits::max(); + Open(); + CreateColumnFamilies({"one", "two", "three"}); + ColumnFamilyOptions default_cf, one, two, three; + // setup options. all column families have max_write_buffer_number setup to 10 + // "default" -> 100KB memtable, start flushing immediatelly + // "one" -> 200KB memtable, start flushing with two immutable memtables + // "two" -> 1MB memtable, start flushing with three immutable memtables + // "three" -> 90KB memtable, start flushing with four immutable memtables + default_cf.write_buffer_size = 100000; + default_cf.arena_block_size = 4 * 4096; + default_cf.max_write_buffer_number = 10; + default_cf.min_write_buffer_number_to_merge = 1; + default_cf.max_write_buffer_number_to_maintain = 0; + one.write_buffer_size = 200000; + one.arena_block_size = 4 * 4096; + one.max_write_buffer_number = 10; + one.min_write_buffer_number_to_merge = 2; + one.max_write_buffer_number_to_maintain = 1; + two.write_buffer_size = 1000000; + two.arena_block_size = 4 * 4096; + two.max_write_buffer_number = 10; + two.min_write_buffer_number_to_merge = 3; + two.max_write_buffer_number_to_maintain = 2; + three.write_buffer_size = 4096 * 22; + three.arena_block_size = 4096; + three.max_write_buffer_number = 10; + three.min_write_buffer_number_to_merge = 4; + three.max_write_buffer_number_to_maintain = -1; + + Reopen({default_cf, one, two, three}); + + int micros_wait_for_flush = 10000; + PutRandomData(0, 100, 1000); + WaitForFlush(0); + AssertNumberOfImmutableMemtables({0, 0, 0, 0}); + AssertCountLiveLogFiles(1); + PutRandomData(1, 200, 1000); + env_->SleepForMicroseconds(micros_wait_for_flush); + AssertNumberOfImmutableMemtables({0, 1, 0, 0}); + AssertCountLiveLogFiles(2); + PutRandomData(2, 1000, 1000); + env_->SleepForMicroseconds(micros_wait_for_flush); + AssertNumberOfImmutableMemtables({0, 1, 1, 0}); + AssertCountLiveLogFiles(3); + PutRandomData(2, 1000, 1000); + env_->SleepForMicroseconds(micros_wait_for_flush); + AssertNumberOfImmutableMemtables({0, 1, 2, 0}); + AssertCountLiveLogFiles(4); + PutRandomData(3, 93, 990); + env_->SleepForMicroseconds(micros_wait_for_flush); + AssertNumberOfImmutableMemtables({0, 1, 2, 1}); + AssertCountLiveLogFiles(5); + PutRandomData(3, 88, 990); + env_->SleepForMicroseconds(micros_wait_for_flush); + AssertNumberOfImmutableMemtables({0, 1, 2, 2}); + AssertCountLiveLogFiles(6); + PutRandomData(3, 88, 990); + env_->SleepForMicroseconds(micros_wait_for_flush); + AssertNumberOfImmutableMemtables({0, 1, 2, 3}); + AssertCountLiveLogFiles(7); + PutRandomData(0, 100, 1000); + WaitForFlush(0); + AssertNumberOfImmutableMemtables({0, 1, 2, 3}); + AssertCountLiveLogFiles(8); + PutRandomData(2, 100, 10000); + WaitForFlush(2); + AssertNumberOfImmutableMemtables({0, 1, 0, 3}); + AssertCountLiveLogFiles(9); + PutRandomData(3, 88, 990); + WaitForFlush(3); + AssertNumberOfImmutableMemtables({0, 1, 0, 0}); + AssertCountLiveLogFiles(10); + PutRandomData(3, 88, 990); + env_->SleepForMicroseconds(micros_wait_for_flush); + AssertNumberOfImmutableMemtables({0, 1, 0, 1}); + AssertCountLiveLogFiles(11); + PutRandomData(1, 200, 1000); + WaitForFlush(1); + AssertNumberOfImmutableMemtables({0, 0, 0, 1}); + AssertCountLiveLogFiles(5); + PutRandomData(3, 88 * 3, 990); + WaitForFlush(3); + PutRandomData(3, 88 * 4, 990); + WaitForFlush(3); + AssertNumberOfImmutableMemtables({0, 0, 0, 0}); + AssertCountLiveLogFiles(12); + PutRandomData(0, 100, 1000); + WaitForFlush(0); + AssertNumberOfImmutableMemtables({0, 0, 0, 0}); + AssertCountLiveLogFiles(12); + PutRandomData(2, 3 * 1000, 1000); + WaitForFlush(2); + AssertNumberOfImmutableMemtables({0, 0, 0, 0}); + AssertCountLiveLogFiles(12); + PutRandomData(1, 2*200, 1000); + WaitForFlush(1); + AssertNumberOfImmutableMemtables({0, 0, 0, 0}); + AssertCountLiveLogFiles(7); + Close(); +} + +#ifndef ROCKSDB_LITE // Cuckoo is not supported in lite +TEST_F(ColumnFamilyTest, MemtableNotSupportSnapshot) { + Open(); + auto* s1 = dbfull()->GetSnapshot(); + ASSERT_TRUE(s1 != nullptr); + dbfull()->ReleaseSnapshot(s1); + + // Add a column family that doesn't support snapshot + ColumnFamilyOptions first; + first.memtable_factory.reset(NewHashCuckooRepFactory(1024 * 1024)); + CreateColumnFamilies({"first"}, {first}); + auto* s2 = dbfull()->GetSnapshot(); + ASSERT_TRUE(s2 == nullptr); + + // Add a column family that supports snapshot. Snapshot stays not supported. + ColumnFamilyOptions second; + CreateColumnFamilies({"second"}, {second}); + auto* s3 = dbfull()->GetSnapshot(); + ASSERT_TRUE(s3 == nullptr); + Close(); +} +#endif // !ROCKSDB_LITE + +TEST_F(ColumnFamilyTest, DifferentMergeOperators) { + Open(); + CreateColumnFamilies({"first", "second"}); + ColumnFamilyOptions default_cf, first, second; + first.merge_operator = MergeOperators::CreateUInt64AddOperator(); + second.merge_operator = MergeOperators::CreateStringAppendOperator(); + Reopen({default_cf, first, second}); + + std::string one, two, three; + PutFixed64(&one, 1); + PutFixed64(&two, 2); + PutFixed64(&three, 3); + + ASSERT_OK(Put(0, "foo", two)); + ASSERT_OK(Put(0, "foo", one)); + ASSERT_TRUE(Merge(0, "foo", two).IsNotSupported()); + ASSERT_EQ(Get(0, "foo"), one); + + ASSERT_OK(Put(1, "foo", two)); + ASSERT_OK(Put(1, "foo", one)); + ASSERT_OK(Merge(1, "foo", two)); + ASSERT_EQ(Get(1, "foo"), three); + + ASSERT_OK(Put(2, "foo", two)); + ASSERT_OK(Put(2, "foo", one)); + ASSERT_OK(Merge(2, "foo", two)); + ASSERT_EQ(Get(2, "foo"), one + "," + two); + Close(); +} + +TEST_F(ColumnFamilyTest, DifferentCompactionStyles) { + Open(); + CreateColumnFamilies({"one", "two"}); + ColumnFamilyOptions default_cf, one, two; + db_options_.max_open_files = 20; // only 10 files in file cache + db_options_.disableDataSync = true; + + default_cf.compaction_style = kCompactionStyleLevel; + default_cf.num_levels = 3; + default_cf.write_buffer_size = 64 << 10; // 64KB + default_cf.target_file_size_base = 30 << 10; + default_cf.source_compaction_factor = 100; + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + one.compaction_style = kCompactionStyleUniversal; + + one.num_levels = 1; + // trigger compaction if there are >= 4 files + one.level0_file_num_compaction_trigger = 4; + one.write_buffer_size = 120000; + + two.compaction_style = kCompactionStyleLevel; + two.num_levels = 4; + two.level0_file_num_compaction_trigger = 3; + two.write_buffer_size = 100000; + + Reopen({default_cf, one, two}); + + // SETUP column family "one" -- universal style + for (int i = 0; i < one.level0_file_num_compaction_trigger - 1; ++i) { + PutRandomData(1, 10, 12000); + PutRandomData(1, 1, 10); + WaitForFlush(1); + AssertFilesPerLevel(ToString(i + 1), 1); + } + + // SETUP column family "two" -- level style with 4 levels + for (int i = 0; i < two.level0_file_num_compaction_trigger - 1; ++i) { + PutRandomData(2, 10, 12000); + PutRandomData(2, 1, 10); + WaitForFlush(2); + AssertFilesPerLevel(ToString(i + 1), 2); + } + + // TRIGGER compaction "one" + PutRandomData(1, 10, 12000); + PutRandomData(1, 1, 10); + + // TRIGGER compaction "two" + PutRandomData(2, 10, 12000); + PutRandomData(2, 1, 10); + + // WAIT for compactions + WaitForCompaction(); + + // VERIFY compaction "one" + AssertFilesPerLevel("1", 1); + + // VERIFY compaction "two" + AssertFilesPerLevel("0,1", 2); + CompactAll(2); + AssertFilesPerLevel("0,1", 2); + + Close(); +} + +#ifndef ROCKSDB_LITE +// Sync points not supported in RocksDB Lite + +TEST_F(ColumnFamilyTest, MultipleManualCompactions) { + Open(); + CreateColumnFamilies({"one", "two"}); + ColumnFamilyOptions default_cf, one, two; + db_options_.max_open_files = 20; // only 10 files in file cache + db_options_.disableDataSync = true; + db_options_.max_background_compactions = 3; + + default_cf.compaction_style = kCompactionStyleLevel; + default_cf.num_levels = 3; + default_cf.write_buffer_size = 64 << 10; // 64KB + default_cf.target_file_size_base = 30 << 10; + default_cf.source_compaction_factor = 100; + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + one.compaction_style = kCompactionStyleUniversal; + + one.num_levels = 1; + // trigger compaction if there are >= 4 files + one.level0_file_num_compaction_trigger = 4; + one.write_buffer_size = 120000; + + two.compaction_style = kCompactionStyleLevel; + two.num_levels = 4; + two.level0_file_num_compaction_trigger = 3; + two.write_buffer_size = 100000; + + Reopen({default_cf, one, two}); + + // SETUP column family "one" -- universal style + for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(i + 1), 1); + } + bool cf_1_1 = true; + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"ColumnFamilyTest::MultiManual:4", "ColumnFamilyTest::MultiManual:1"}, + {"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:5"}, + {"ColumnFamilyTest::MultiManual:2", "ColumnFamilyTest::MultiManual:3"}}); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + if (cf_1_1) { + TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:4"); + cf_1_1 = false; + TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:3"); + } + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + std::vector threads; + threads.emplace_back([&] { + CompactRangeOptions compact_options; + compact_options.exclusive_manual_compaction = false; + ASSERT_OK( + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)); + }); + + // SETUP column family "two" -- level style with 4 levels + for (int i = 0; i < two.level0_file_num_compaction_trigger - 2; ++i) { + PutRandomData(2, 10, 12000); + PutRandomData(2, 1, 10); + WaitForFlush(2); + AssertFilesPerLevel(ToString(i + 1), 2); + } + threads.emplace_back([&] { + TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:1"); + CompactRangeOptions compact_options; + compact_options.exclusive_manual_compaction = false; + ASSERT_OK( + db_->CompactRange(compact_options, handles_[2], nullptr, nullptr)); + TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:2"); + }); + + TEST_SYNC_POINT("ColumnFamilyTest::MultiManual:5"); + for (auto& t : threads) { + t.join(); + } + + // VERIFY compaction "one" + AssertFilesPerLevel("1", 1); + + // VERIFY compaction "two" + AssertFilesPerLevel("0,1", 2); + CompactAll(2); + AssertFilesPerLevel("0,1", 2); + // Compare against saved keys + std::set::iterator key_iter = keys_.begin(); + while (key_iter != keys_.end()) { + ASSERT_NE("NOT_FOUND", Get(1, *key_iter)); + key_iter++; + } + Close(); +} + +TEST_F(ColumnFamilyTest, AutomaticAndManualCompactions) { + Open(); + CreateColumnFamilies({"one", "two"}); + ColumnFamilyOptions default_cf, one, two; + db_options_.max_open_files = 20; // only 10 files in file cache + db_options_.disableDataSync = true; + db_options_.max_background_compactions = 3; + db_options_.base_background_compactions = 3; + + default_cf.compaction_style = kCompactionStyleLevel; + default_cf.num_levels = 3; + default_cf.write_buffer_size = 64 << 10; // 64KB + default_cf.target_file_size_base = 30 << 10; + default_cf.source_compaction_factor = 100; + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + one.compaction_style = kCompactionStyleUniversal; + + one.num_levels = 1; + // trigger compaction if there are >= 4 files + one.level0_file_num_compaction_trigger = 4; + one.write_buffer_size = 120000; + + two.compaction_style = kCompactionStyleLevel; + two.num_levels = 4; + two.level0_file_num_compaction_trigger = 3; + two.write_buffer_size = 100000; + + Reopen({default_cf, one, two}); + + bool cf_1_1 = true; + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"ColumnFamilyTest::AutoManual:4", "ColumnFamilyTest::AutoManual:1"}, + {"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:5"}, + {"ColumnFamilyTest::AutoManual:2", "ColumnFamilyTest::AutoManual:3"}}); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + if (cf_1_1) { + cf_1_1 = false; + TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4"); + TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:3"); + } + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + // SETUP column family "one" -- universal style + for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(i + 1), 1); + } + + TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:1"); + + // SETUP column family "two" -- level style with 4 levels + for (int i = 0; i < two.level0_file_num_compaction_trigger - 2; ++i) { + PutRandomData(2, 10, 12000); + PutRandomData(2, 1, 10); + WaitForFlush(2); + AssertFilesPerLevel(ToString(i + 1), 2); + } + std::thread threads([&] { + CompactRangeOptions compact_options; + compact_options.exclusive_manual_compaction = false; + ASSERT_OK( + db_->CompactRange(compact_options, handles_[2], nullptr, nullptr)); + TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:2"); + }); + + TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:5"); + threads.join(); + + // WAIT for compactions + WaitForCompaction(); + + // VERIFY compaction "one" + AssertFilesPerLevel("1", 1); + + // VERIFY compaction "two" + AssertFilesPerLevel("0,1", 2); + CompactAll(2); + AssertFilesPerLevel("0,1", 2); + // Compare against saved keys + std::set::iterator key_iter = keys_.begin(); + while (key_iter != keys_.end()) { + ASSERT_NE("NOT_FOUND", Get(1, *key_iter)); + key_iter++; + } + Close(); +} + +TEST_F(ColumnFamilyTest, ManualAndAutomaticCompactions) { + Open(); + CreateColumnFamilies({"one", "two"}); + ColumnFamilyOptions default_cf, one, two; + db_options_.max_open_files = 20; // only 10 files in file cache + db_options_.disableDataSync = true; + db_options_.max_background_compactions = 3; + db_options_.base_background_compactions = 3; + + default_cf.compaction_style = kCompactionStyleLevel; + default_cf.num_levels = 3; + default_cf.write_buffer_size = 64 << 10; // 64KB + default_cf.target_file_size_base = 30 << 10; + default_cf.source_compaction_factor = 100; + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + one.compaction_style = kCompactionStyleUniversal; + + one.num_levels = 1; + // trigger compaction if there are >= 4 files + one.level0_file_num_compaction_trigger = 4; + one.write_buffer_size = 120000; + + two.compaction_style = kCompactionStyleLevel; + two.num_levels = 4; + two.level0_file_num_compaction_trigger = 3; + two.write_buffer_size = 100000; + + Reopen({default_cf, one, two}); + + // SETUP column family "one" -- universal style + for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(i + 1), 1); + } + bool cf_1_1 = true; + bool cf_1_2 = true; + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"ColumnFamilyTest::ManualAuto:4", "ColumnFamilyTest::ManualAuto:1"}, + {"ColumnFamilyTest::ManualAuto:5", "ColumnFamilyTest::ManualAuto:2"}, + {"ColumnFamilyTest::ManualAuto:2", "ColumnFamilyTest::ManualAuto:3"}}); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + if (cf_1_1) { + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); + cf_1_1 = false; + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:3"); + } else if (cf_1_2) { + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:2"); + cf_1_2 = false; + } + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + std::thread threads([&] { + CompactRangeOptions compact_options; + compact_options.exclusive_manual_compaction = false; + ASSERT_OK( + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)); + }); + + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1"); + + // SETUP column family "two" -- level style with 4 levels + for (int i = 0; i < two.level0_file_num_compaction_trigger; ++i) { + PutRandomData(2, 10, 12000); + PutRandomData(2, 1, 10); + WaitForFlush(2); + AssertFilesPerLevel(ToString(i + 1), 2); + } + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:5"); + threads.join(); + + // WAIT for compactions + WaitForCompaction(); + + // VERIFY compaction "one" + AssertFilesPerLevel("1", 1); + + // VERIFY compaction "two" + AssertFilesPerLevel("0,1", 2); + CompactAll(2); + AssertFilesPerLevel("0,1", 2); + // Compare against saved keys + std::set::iterator key_iter = keys_.begin(); + while (key_iter != keys_.end()) { + ASSERT_NE("NOT_FOUND", Get(1, *key_iter)); + key_iter++; + } + Close(); +} + +TEST_F(ColumnFamilyTest, SameCFManualManualCompactions) { + Open(); + CreateColumnFamilies({"one"}); + ColumnFamilyOptions default_cf, one; + db_options_.max_open_files = 20; // only 10 files in file cache + db_options_.disableDataSync = true; + db_options_.max_background_compactions = 3; + db_options_.base_background_compactions = 3; + + default_cf.compaction_style = kCompactionStyleLevel; + default_cf.num_levels = 3; + default_cf.write_buffer_size = 64 << 10; // 64KB + default_cf.target_file_size_base = 30 << 10; + default_cf.source_compaction_factor = 100; + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + one.compaction_style = kCompactionStyleUniversal; + + one.num_levels = 1; + // trigger compaction if there are >= 4 files + one.level0_file_num_compaction_trigger = 4; + one.write_buffer_size = 120000; + + Reopen({default_cf, one}); + + // SETUP column family "one" -- universal style + for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(i + 1), 1); + } + bool cf_1_1 = true; + bool cf_1_2 = true; + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"ColumnFamilyTest::ManualManual:4", "ColumnFamilyTest::ManualManual:2"}, + {"ColumnFamilyTest::ManualManual:4", "ColumnFamilyTest::ManualManual:5"}, + {"ColumnFamilyTest::ManualManual:1", "ColumnFamilyTest::ManualManual:2"}, + {"ColumnFamilyTest::ManualManual:1", + "ColumnFamilyTest::ManualManual:3"}}); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + if (cf_1_1) { + TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:4"); + cf_1_1 = false; + TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:3"); + } else if (cf_1_2) { + TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:2"); + cf_1_2 = false; + } + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + std::thread threads([&] { + CompactRangeOptions compact_options; + compact_options.exclusive_manual_compaction = true; + ASSERT_OK( + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)); + }); + + TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:5"); + + WaitForFlush(1); + + // Add more L0 files and force another manual compaction + for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i), + 1); + } + + std::thread threads1([&] { + CompactRangeOptions compact_options; + compact_options.exclusive_manual_compaction = false; + ASSERT_OK( + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)); + }); + + TEST_SYNC_POINT("ColumnFamilyTest::ManualManual:1"); + + threads.join(); + threads1.join(); + WaitForCompaction(); + // VERIFY compaction "one" + ASSERT_LE(NumTableFilesAtLevel(0, 1), 2); + + // Compare against saved keys + std::set::iterator key_iter = keys_.begin(); + while (key_iter != keys_.end()) { + ASSERT_NE("NOT_FOUND", Get(1, *key_iter)); + key_iter++; + } + Close(); +} + +TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactions) { + Open(); + CreateColumnFamilies({"one"}); + ColumnFamilyOptions default_cf, one; + db_options_.max_open_files = 20; // only 10 files in file cache + db_options_.disableDataSync = true; + db_options_.max_background_compactions = 3; + db_options_.base_background_compactions = 3; + + default_cf.compaction_style = kCompactionStyleLevel; + default_cf.num_levels = 3; + default_cf.write_buffer_size = 64 << 10; // 64KB + default_cf.target_file_size_base = 30 << 10; + default_cf.source_compaction_factor = 100; + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + one.compaction_style = kCompactionStyleUniversal; + + one.num_levels = 1; + // trigger compaction if there are >= 4 files + one.level0_file_num_compaction_trigger = 4; + one.write_buffer_size = 120000; + + Reopen({default_cf, one}); + + // SETUP column family "one" -- universal style + for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(i + 1), 1); + } + bool cf_1_1 = true; + bool cf_1_2 = true; + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"ColumnFamilyTest::ManualAuto:4", "ColumnFamilyTest::ManualAuto:2"}, + {"ColumnFamilyTest::ManualAuto:4", "ColumnFamilyTest::ManualAuto:5"}, + {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:2"}, + {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}}); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + if (cf_1_1) { + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); + cf_1_1 = false; + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:3"); + } else if (cf_1_2) { + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:2"); + cf_1_2 = false; + } + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + std::thread threads([&] { + CompactRangeOptions compact_options; + compact_options.exclusive_manual_compaction = false; + ASSERT_OK( + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)); + }); + + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:5"); + + WaitForFlush(1); + + // Add more L0 files and force automatic compaction + for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i), + 1); + } + + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1"); + + threads.join(); + WaitForCompaction(); + // VERIFY compaction "one" + ASSERT_LE(NumTableFilesAtLevel(0, 1), 2); + + // Compare against saved keys + std::set::iterator key_iter = keys_.begin(); + while (key_iter != keys_.end()) { + ASSERT_NE("NOT_FOUND", Get(1, *key_iter)); + key_iter++; + } + Close(); +} + +TEST_F(ColumnFamilyTest, SameCFManualAutomaticCompactionsLevel) { + Open(); + CreateColumnFamilies({"one"}); + ColumnFamilyOptions default_cf, one; + db_options_.max_open_files = 20; // only 10 files in file cache + db_options_.disableDataSync = true; + db_options_.max_background_compactions = 3; + db_options_.base_background_compactions = 3; + + default_cf.compaction_style = kCompactionStyleLevel; + default_cf.num_levels = 3; + default_cf.write_buffer_size = 64 << 10; // 64KB + default_cf.target_file_size_base = 30 << 10; + default_cf.source_compaction_factor = 100; + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + one.compaction_style = kCompactionStyleLevel; + + one.num_levels = 1; + // trigger compaction if there are >= 4 files + one.level0_file_num_compaction_trigger = 4; + one.write_buffer_size = 120000; + + Reopen({default_cf, one}); + + // SETUP column family "one" -- level style + for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(i + 1), 1); + } + bool cf_1_1 = true; + bool cf_1_2 = true; + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"ColumnFamilyTest::ManualAuto:4", "ColumnFamilyTest::ManualAuto:2"}, + {"ColumnFamilyTest::ManualAuto:4", "ColumnFamilyTest::ManualAuto:5"}, + {"ColumnFamilyTest::ManualAuto:3", "ColumnFamilyTest::ManualAuto:2"}, + {"LevelCompactionPicker::PickCompactionBySize:0", + "ColumnFamilyTest::ManualAuto:3"}, + {"ColumnFamilyTest::ManualAuto:1", "ColumnFamilyTest::ManualAuto:3"}}); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + if (cf_1_1) { + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:4"); + cf_1_1 = false; + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:3"); + } else if (cf_1_2) { + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:2"); + cf_1_2 = false; + } + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + std::thread threads([&] { + CompactRangeOptions compact_options; + compact_options.exclusive_manual_compaction = false; + ASSERT_OK( + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)); + }); + + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:5"); + + // Add more L0 files and force automatic compaction + for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i), + 1); + } + + TEST_SYNC_POINT("ColumnFamilyTest::ManualAuto:1"); + + threads.join(); + WaitForCompaction(); + // VERIFY compaction "one" + AssertFilesPerLevel("0,1", 1); + + // Compare against saved keys + std::set::iterator key_iter = keys_.begin(); + while (key_iter != keys_.end()) { + ASSERT_NE("NOT_FOUND", Get(1, *key_iter)); + key_iter++; + } + Close(); +} + +// This test checks for automatic getting a conflict if there is a +// manual which has not yet been scheduled. +// The manual compaction waits in NotScheduled +// We generate more files and then trigger an automatic compaction +// This will wait because there is an unscheduled manual compaction. +// Once the conflict is hit, the manual compaction starts and ends +// Then another automatic will start and end. +TEST_F(ColumnFamilyTest, SameCFManualAutomaticConflict) { + Open(); + CreateColumnFamilies({"one"}); + ColumnFamilyOptions default_cf, one; + db_options_.max_open_files = 20; // only 10 files in file cache + db_options_.disableDataSync = true; + db_options_.max_background_compactions = 3; + db_options_.base_background_compactions = 3; + + default_cf.compaction_style = kCompactionStyleLevel; + default_cf.num_levels = 3; + default_cf.write_buffer_size = 64 << 10; // 64KB + default_cf.target_file_size_base = 30 << 10; + default_cf.source_compaction_factor = 100; + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + one.compaction_style = kCompactionStyleUniversal; + + one.num_levels = 1; + // trigger compaction if there are >= 4 files + one.level0_file_num_compaction_trigger = 4; + one.write_buffer_size = 120000; + + Reopen({default_cf, one}); + + // SETUP column family "one" -- universal style + for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(i + 1), 1); + } + bool cf_1_1 = true; + bool cf_1_2 = true; + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"DBImpl::BackgroundCompaction()::Conflict", + "ColumnFamilyTest::ManualAutoCon:7"}, + {"ColumnFamilyTest::ManualAutoCon:9", + "ColumnFamilyTest::ManualAutoCon:8"}, + {"ColumnFamilyTest::ManualAutoCon:2", + "ColumnFamilyTest::ManualAutoCon:6"}, + {"ColumnFamilyTest::ManualAutoCon:4", + "ColumnFamilyTest::ManualAutoCon:5"}, + {"ColumnFamilyTest::ManualAutoCon:1", + "ColumnFamilyTest::ManualAutoCon:2"}, + {"ColumnFamilyTest::ManualAutoCon:1", + "ColumnFamilyTest::ManualAutoCon:3"}}); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + if (cf_1_1) { + TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:4"); + cf_1_1 = false; + TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:3"); + } else if (cf_1_2) { + cf_1_2 = false; + TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:2"); + } + }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::RunManualCompaction:NotScheduled", [&](void* arg) { + InstrumentedMutex* mutex = static_cast(arg); + mutex->Unlock(); + TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:9"); + TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:7"); + mutex->Lock(); + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + std::thread threads([&] { + CompactRangeOptions compact_options; + compact_options.exclusive_manual_compaction = false; + ASSERT_OK( + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)); + TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:6"); + }); + + TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:8"); + WaitForFlush(1); + + // Add more L0 files and force automatic compaction + for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(one.level0_file_num_compaction_trigger + i), + 1); + } + + TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:5"); + // Add more L0 files and force automatic compaction + for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + } + TEST_SYNC_POINT("ColumnFamilyTest::ManualAutoCon:1"); + + threads.join(); + WaitForCompaction(); + // VERIFY compaction "one" + ASSERT_LE(NumTableFilesAtLevel(0, 1), 3); + + // Compare against saved keys + std::set::iterator key_iter = keys_.begin(); + while (key_iter != keys_.end()) { + ASSERT_NE("NOT_FOUND", Get(1, *key_iter)); + key_iter++; + } + + Close(); +} + +// In this test, we generate enough files to trigger automatic compactions. +// The automatic compaction waits in NonTrivial:AfterRun +// We generate more files and then trigger an automatic compaction +// This will wait because the automatic compaction has files it needs. +// Once the conflict is hit, the automatic compaction starts and ends +// Then the manual will run and end. +TEST_F(ColumnFamilyTest, SameCFAutomaticManualCompactions) { + Open(); + CreateColumnFamilies({"one"}); + ColumnFamilyOptions default_cf, one; + db_options_.max_open_files = 20; // only 10 files in file cache + db_options_.disableDataSync = true; + db_options_.max_background_compactions = 3; + db_options_.base_background_compactions = 3; + + default_cf.compaction_style = kCompactionStyleLevel; + default_cf.num_levels = 3; + default_cf.write_buffer_size = 64 << 10; // 64KB + default_cf.target_file_size_base = 30 << 10; + default_cf.source_compaction_factor = 100; + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + default_cf.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + one.compaction_style = kCompactionStyleUniversal; + + one.num_levels = 1; + // trigger compaction if there are >= 4 files + one.level0_file_num_compaction_trigger = 4; + one.write_buffer_size = 120000; + + Reopen({default_cf, one}); + + bool cf_1_1 = true; + bool cf_1_2 = true; + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"ColumnFamilyTest::AutoManual:4", "ColumnFamilyTest::AutoManual:2"}, + {"ColumnFamilyTest::AutoManual:4", "ColumnFamilyTest::AutoManual:5"}, + {"CompactionPicker::CompactRange:Conflict", + "ColumnFamilyTest::AutoManual:3"}}); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + if (cf_1_1) { + TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:4"); + cf_1_1 = false; + TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:3"); + } else if (cf_1_2) { + TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:2"); + cf_1_2 = false; + } + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // SETUP column family "one" -- universal style + for (int i = 0; i < one.level0_file_num_compaction_trigger; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + AssertFilesPerLevel(ToString(i + 1), 1); + } + + TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:5"); + + // Add another L0 file and force automatic compaction + for (int i = 0; i < one.level0_file_num_compaction_trigger - 2; ++i) { + PutRandomData(1, 10, 12000, true); + PutRandomData(1, 1, 10, true); + WaitForFlush(1); + } + + CompactRangeOptions compact_options; + compact_options.exclusive_manual_compaction = false; + ASSERT_OK(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr)); + + TEST_SYNC_POINT("ColumnFamilyTest::AutoManual:1"); + + WaitForCompaction(); + // VERIFY compaction "one" + AssertFilesPerLevel("1", 1); + // Compare against saved keys + std::set::iterator key_iter = keys_.begin(); + while (key_iter != keys_.end()) { + ASSERT_NE("NOT_FOUND", Get(1, *key_iter)); + key_iter++; + } + + Close(); +} +#endif // !ROCKSDB_LITE + +#ifndef ROCKSDB_LITE // Tailing interator not supported +namespace { +std::string IterStatus(Iterator* iter) { + std::string result; + if (iter->Valid()) { + result = iter->key().ToString() + "->" + iter->value().ToString(); + } else { + result = "(invalid)"; + } + return result; +} +} // anonymous namespace + +TEST_F(ColumnFamilyTest, NewIteratorsTest) { + // iter == 0 -- no tailing + // iter == 2 -- tailing + for (int iter = 0; iter < 2; ++iter) { + Open(); + CreateColumnFamiliesAndReopen({"one", "two"}); + ASSERT_OK(Put(0, "a", "b")); + ASSERT_OK(Put(1, "b", "a")); + ASSERT_OK(Put(2, "c", "m")); + ASSERT_OK(Put(2, "v", "t")); + std::vector iterators; + ReadOptions options; + options.tailing = (iter == 1); + ASSERT_OK(db_->NewIterators(options, handles_, &iterators)); + + for (auto it : iterators) { + it->SeekToFirst(); + } + ASSERT_EQ(IterStatus(iterators[0]), "a->b"); + ASSERT_EQ(IterStatus(iterators[1]), "b->a"); + ASSERT_EQ(IterStatus(iterators[2]), "c->m"); + + ASSERT_OK(Put(1, "x", "x")); + + for (auto it : iterators) { + it->Next(); + } + + ASSERT_EQ(IterStatus(iterators[0]), "(invalid)"); + if (iter == 0) { + // no tailing + ASSERT_EQ(IterStatus(iterators[1]), "(invalid)"); + } else { + // tailing + ASSERT_EQ(IterStatus(iterators[1]), "x->x"); + } + ASSERT_EQ(IterStatus(iterators[2]), "v->t"); + + for (auto it : iterators) { + delete it; + } + Destroy(); + } +} +#endif // !ROCKSDB_LITE + +#ifndef ROCKSDB_LITE // ReadOnlyDB is not supported +TEST_F(ColumnFamilyTest, ReadOnlyDBTest) { + Open(); + CreateColumnFamiliesAndReopen({"one", "two", "three", "four"}); + ASSERT_OK(Put(0, "a", "b")); + ASSERT_OK(Put(1, "foo", "bla")); + ASSERT_OK(Put(2, "foo", "blabla")); + ASSERT_OK(Put(3, "foo", "blablabla")); + ASSERT_OK(Put(4, "foo", "blablablabla")); + + DropColumnFamilies({2}); + Close(); + // open only a subset of column families + AssertOpenReadOnly({"default", "one", "four"}); + ASSERT_EQ("NOT_FOUND", Get(0, "foo")); + ASSERT_EQ("bla", Get(1, "foo")); + ASSERT_EQ("blablablabla", Get(2, "foo")); + + + // test newiterators + { + std::vector iterators; + ASSERT_OK(db_->NewIterators(ReadOptions(), handles_, &iterators)); + for (auto it : iterators) { + it->SeekToFirst(); + } + ASSERT_EQ(IterStatus(iterators[0]), "a->b"); + ASSERT_EQ(IterStatus(iterators[1]), "foo->bla"); + ASSERT_EQ(IterStatus(iterators[2]), "foo->blablablabla"); + for (auto it : iterators) { + it->Next(); + } + ASSERT_EQ(IterStatus(iterators[0]), "(invalid)"); + ASSERT_EQ(IterStatus(iterators[1]), "(invalid)"); + ASSERT_EQ(IterStatus(iterators[2]), "(invalid)"); + + for (auto it : iterators) { + delete it; + } + } + + Close(); + // can't open dropped column family + Status s = OpenReadOnly({"default", "one", "two"}); + ASSERT_TRUE(!s.ok()); + + // Can't open without specifying default column family + s = OpenReadOnly({"one", "four"}); + ASSERT_TRUE(!s.ok()); +} +#endif // !ROCKSDB_LITE + +TEST_F(ColumnFamilyTest, DontRollEmptyLogs) { + Open(); + CreateColumnFamiliesAndReopen({"one", "two", "three", "four"}); + + for (size_t i = 0; i < handles_.size(); ++i) { + PutRandomData(static_cast(i), 10, 100); + } + int num_writable_file_start = env_->GetNumberOfNewWritableFileCalls(); + // this will trigger the flushes + for (int i = 0; i <= 4; ++i) { + ASSERT_OK(Flush(i)); + } + + for (int i = 0; i < 4; ++i) { + WaitForFlush(i); + } + int total_new_writable_files = + env_->GetNumberOfNewWritableFileCalls() - num_writable_file_start; + ASSERT_EQ(static_cast(total_new_writable_files), handles_.size() + 1); + Close(); +} + +TEST_F(ColumnFamilyTest, FlushStaleColumnFamilies) { + Open(); + CreateColumnFamilies({"one", "two"}); + ColumnFamilyOptions default_cf, one, two; + default_cf.write_buffer_size = 100000; // small write buffer size + default_cf.arena_block_size = 4096; + default_cf.disable_auto_compactions = true; + one.disable_auto_compactions = true; + two.disable_auto_compactions = true; + db_options_.max_total_wal_size = 210000; + + Reopen({default_cf, one, two}); + + PutRandomData(2, 1, 10); // 10 bytes + for (int i = 0; i < 2; ++i) { + PutRandomData(0, 100, 1000); // flush + WaitForFlush(0); + + AssertCountLiveFiles(i + 1); + } + // third flush. now, CF [two] should be detected as stale and flushed + // column family 1 should not be flushed since it's empty + PutRandomData(0, 100, 1000); // flush + WaitForFlush(0); + WaitForFlush(2); + // 3 files for default column families, 1 file for column family [two], zero + // files for column family [one], because it's empty + AssertCountLiveFiles(4); + + Flush(0); + ASSERT_EQ(0, dbfull()->TEST_total_log_size()); + Close(); +} + +TEST_F(ColumnFamilyTest, CreateMissingColumnFamilies) { + Status s = TryOpen({"one", "two"}); + ASSERT_TRUE(!s.ok()); + db_options_.create_missing_column_families = true; + s = TryOpen({"default", "one", "two"}); + ASSERT_TRUE(s.ok()); + Close(); +} + +TEST_F(ColumnFamilyTest, SanitizeOptions) { + DBOptions db_options; + for (int s = kCompactionStyleLevel; s <= kCompactionStyleUniversal; ++s) { + for (int l = 0; l <= 2; l++) { + for (int i = 1; i <= 3; i++) { + for (int j = 1; j <= 3; j++) { + for (int k = 1; k <= 3; k++) { + ColumnFamilyOptions original; + original.compaction_style = static_cast(s); + original.num_levels = l; + original.level0_stop_writes_trigger = i; + original.level0_slowdown_writes_trigger = j; + original.level0_file_num_compaction_trigger = k; + original.write_buffer_size = + l * 4 * 1024 * 1024 + i * 1024 * 1024 + j * 1024 + k; + + ColumnFamilyOptions result = + SanitizeOptions(db_options, nullptr, original); + ASSERT_TRUE(result.level0_stop_writes_trigger >= + result.level0_slowdown_writes_trigger); + ASSERT_TRUE(result.level0_slowdown_writes_trigger >= + result.level0_file_num_compaction_trigger); + ASSERT_TRUE(result.level0_file_num_compaction_trigger == + original.level0_file_num_compaction_trigger); + if (s == kCompactionStyleLevel) { + ASSERT_GE(result.num_levels, 2); + } else { + ASSERT_GE(result.num_levels, 1); + if (original.num_levels >= 1) { + ASSERT_EQ(result.num_levels, original.num_levels); + } + } + + // Make sure Sanitize options sets arena_block_size to 1/8 of + // the write_buffer_size, rounded up to a multiple of 4k. + size_t expected_arena_block_size = + l * 4 * 1024 * 1024 / 8 + i * 1024 * 1024 / 8; + if (j + k != 0) { + // not a multiple of 4k, round up 4k + expected_arena_block_size += 4 * 1024; + } + ASSERT_EQ(expected_arena_block_size, result.arena_block_size); + } + } + } + } + } +} + +TEST_F(ColumnFamilyTest, ReadDroppedColumnFamily) { + // iter 0 -- drop CF, don't reopen + // iter 1 -- delete CF, reopen + for (int iter = 0; iter < 2; ++iter) { + db_options_.create_missing_column_families = true; + db_options_.max_open_files = 20; + // delete obsolete files always + db_options_.delete_obsolete_files_period_micros = 0; + Open({"default", "one", "two"}); + ColumnFamilyOptions options; + options.level0_file_num_compaction_trigger = 100; + options.level0_slowdown_writes_trigger = 200; + options.level0_stop_writes_trigger = 200; + options.write_buffer_size = 100000; // small write buffer size + Reopen({options, options, options}); + + // 1MB should create ~10 files for each CF + int kKeysNum = 10000; + PutRandomData(0, kKeysNum, 100); + PutRandomData(1, kKeysNum, 100); + PutRandomData(2, kKeysNum, 100); + + { + std::unique_ptr iterator( + db_->NewIterator(ReadOptions(), handles_[2])); + iterator->SeekToFirst(); + + if (iter == 0) { + // Drop CF two + ASSERT_OK(db_->DropColumnFamily(handles_[2])); + } else { + // delete CF two + db_->DestroyColumnFamilyHandle(handles_[2]); + handles_[2] = nullptr; + } + // Make sure iterator created can still be used. + int count = 0; + for (; iterator->Valid(); iterator->Next()) { + ASSERT_OK(iterator->status()); + ++count; + } + ASSERT_OK(iterator->status()); + ASSERT_EQ(count, kKeysNum); + } + + // Add bunch more data to other CFs + PutRandomData(0, kKeysNum, 100); + PutRandomData(1, kKeysNum, 100); + + if (iter == 1) { + Reopen(); + } + + // Since we didn't delete CF handle, RocksDB's contract guarantees that + // we're still able to read dropped CF + for (int i = 0; i < 3; ++i) { + std::unique_ptr iterator( + db_->NewIterator(ReadOptions(), handles_[i])); + int count = 0; + for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) { + ASSERT_OK(iterator->status()); + ++count; + } + ASSERT_OK(iterator->status()); + ASSERT_EQ(count, kKeysNum * ((i == 2) ? 1 : 2)); + } + + Close(); + Destroy(); + } +} + +TEST_F(ColumnFamilyTest, FlushAndDropRaceCondition) { + db_options_.create_missing_column_families = true; + Open({"default", "one"}); + ColumnFamilyOptions options; + options.level0_file_num_compaction_trigger = 100; + options.level0_slowdown_writes_trigger = 200; + options.level0_stop_writes_trigger = 200; + options.max_write_buffer_number = 20; + options.write_buffer_size = 100000; // small write buffer size + Reopen({options, options}); + + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"VersionSet::LogAndApply::ColumnFamilyDrop:0", + "FlushJob::WriteLevel0Table"}, + {"VersionSet::LogAndApply::ColumnFamilyDrop:1", + "FlushJob::InstallResults"}, + {"FlushJob::InstallResults", + "VersionSet::LogAndApply::ColumnFamilyDrop:2"}}); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + test::SleepingBackgroundTask sleeping_task; + + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task, + Env::Priority::HIGH); + + // 1MB should create ~10 files for each CF + int kKeysNum = 10000; + PutRandomData(1, kKeysNum, 100); + + std::vector threads; + threads.emplace_back([&] { ASSERT_OK(db_->DropColumnFamily(handles_[1])); }); + + sleeping_task.WakeUp(); + sleeping_task.WaitUntilDone(); + sleeping_task.Reset(); + // now we sleep again. this is just so we're certain that flush job finished + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task, + Env::Priority::HIGH); + sleeping_task.WakeUp(); + sleeping_task.WaitUntilDone(); + + { + // Since we didn't delete CF handle, RocksDB's contract guarantees that + // we're still able to read dropped CF + std::unique_ptr iterator( + db_->NewIterator(ReadOptions(), handles_[1])); + int count = 0; + for (iterator->SeekToFirst(); iterator->Valid(); iterator->Next()) { + ASSERT_OK(iterator->status()); + ++count; + } + ASSERT_OK(iterator->status()); + ASSERT_EQ(count, kKeysNum); + } + for (auto& t : threads) { + t.join(); + } + + Close(); + Destroy(); +} + +#ifndef ROCKSDB_LITE +// skipped as persisting options is not supported in ROCKSDB_LITE +namespace { +std::atomic test_stage(0); +const int kMainThreadStartPersistingOptionsFile = 1; +const int kChildThreadFinishDroppingColumnFamily = 2; +const int kChildThreadWaitingMainThreadPersistOptions = 3; +void DropSingleColumnFamily(ColumnFamilyTest* cf_test, int cf_id, + std::vector* comparators) { + while (test_stage < kMainThreadStartPersistingOptionsFile) { + Env::Default()->SleepForMicroseconds(100); + } + cf_test->DropColumnFamilies({cf_id}); + if ((*comparators)[cf_id]) { + delete (*comparators)[cf_id]; + (*comparators)[cf_id] = nullptr; + } + test_stage = kChildThreadFinishDroppingColumnFamily; +} +} // namespace + +TEST_F(ColumnFamilyTest, CreateAndDropRace) { + const int kCfCount = 5; + std::vector cf_opts; + std::vector comparators; + for (int i = 0; i < kCfCount; ++i) { + cf_opts.emplace_back(); + comparators.push_back(new test::SimpleSuffixReverseComparator()); + cf_opts.back().comparator = comparators.back(); + } + db_options_.create_if_missing = true; + db_options_.create_missing_column_families = true; + + auto main_thread_id = std::this_thread::get_id(); + + rocksdb::SyncPoint::GetInstance()->SetCallBack("PersistRocksDBOptions:start", + [&](void* arg) { + auto current_thread_id = std::this_thread::get_id(); + // If it's the main thread hitting this sync-point, then it + // will be blocked until some other thread update the test_stage. + if (main_thread_id == current_thread_id) { + test_stage = kMainThreadStartPersistingOptionsFile; + while (test_stage < kChildThreadFinishDroppingColumnFamily) { + Env::Default()->SleepForMicroseconds(100); + } + } + }); + + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "WriteThread::EnterUnbatched:Wait", [&](void* arg) { + // This means a thread doing DropColumnFamily() is waiting for + // other thread to finish persisting options. + // In such case, we update the test_stage to unblock the main thread. + test_stage = kChildThreadWaitingMainThreadPersistOptions; + + // Note that based on the test setting, this must not be the + // main thread. + ASSERT_NE(main_thread_id, std::this_thread::get_id()); + }); + + // Create a database with four column families + Open({"default", "one", "two", "three"}, + {cf_opts[0], cf_opts[1], cf_opts[2], cf_opts[3]}); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // Start a thread that will drop the first column family + // and its comparator + std::thread drop_cf_thread(DropSingleColumnFamily, this, 1, &comparators); + + DropColumnFamilies({2}); + + drop_cf_thread.join(); + Close(); + Destroy(); + for (auto* comparator : comparators) { + if (comparator) { + delete comparator; + } + } +} +#endif // !ROCKSDB_LITE + +TEST_F(ColumnFamilyTest, WriteStallSingleColumnFamily) { + const uint64_t kBaseRate = 810000u; + db_options_.delayed_write_rate = kBaseRate; + db_options_.base_background_compactions = 2; + db_options_.max_background_compactions = 6; + + Open({"default"}); + ColumnFamilyData* cfd = + static_cast(db_->DefaultColumnFamily())->cfd(); + + VersionStorageInfo* vstorage = cfd->current()->storage_info(); + + MutableCFOptions mutable_cf_options( + Options(db_options_, column_family_options_), + ImmutableCFOptions(Options(db_options_, column_family_options_))); + + mutable_cf_options.level0_slowdown_writes_trigger = 20; + mutable_cf_options.level0_stop_writes_trigger = 10000; + mutable_cf_options.soft_pending_compaction_bytes_limit = 200; + mutable_cf_options.hard_pending_compaction_bytes_limit = 2000; + mutable_cf_options.disable_auto_compactions = false; + + vstorage->TEST_set_estimated_compaction_needed_bytes(50); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(201); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate()); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(400); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(500); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2 / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(450); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(205); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(202); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(201); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(198); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(399); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(599); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(2001); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(3001); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(390); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(100); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + vstorage->set_l0_delay_trigger_count(100); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage->set_l0_delay_trigger_count(101); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2 / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->set_l0_delay_trigger_count(0); + vstorage->TEST_set_estimated_compaction_needed_bytes(300); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2 / 1.2 / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->set_l0_delay_trigger_count(101); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2 / 1.2 / 1.2 / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(200); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2 / 1.2 / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->set_l0_delay_trigger_count(0); + vstorage->TEST_set_estimated_compaction_needed_bytes(0); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + mutable_cf_options.disable_auto_compactions = true; + dbfull()->TEST_write_controler().set_delayed_write_rate(kBaseRate); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + vstorage->set_l0_delay_trigger_count(50); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->set_l0_delay_trigger_count(60); + vstorage->TEST_set_estimated_compaction_needed_bytes(300); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate()); + + mutable_cf_options.disable_auto_compactions = false; + vstorage->set_l0_delay_trigger_count(70); + vstorage->TEST_set_estimated_compaction_needed_bytes(500); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->set_l0_delay_trigger_count(71); + vstorage->TEST_set_estimated_compaction_needed_bytes(501); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); +} + +TEST_F(ColumnFamilyTest, CompactionSpeedupSingleColumnFamily) { + db_options_.base_background_compactions = 2; + db_options_.max_background_compactions = 6; + Open({"default"}); + ColumnFamilyData* cfd = + static_cast(db_->DefaultColumnFamily())->cfd(); + + VersionStorageInfo* vstorage = cfd->current()->storage_info(); + + MutableCFOptions mutable_cf_options( + Options(db_options_, column_family_options_), + ImmutableCFOptions(Options(db_options_, column_family_options_))); + + // Speed up threshold = min(4 * 2, 4 + (36 - 4)/4) = 8 + mutable_cf_options.level0_file_num_compaction_trigger = 4; + mutable_cf_options.level0_slowdown_writes_trigger = 36; + mutable_cf_options.level0_stop_writes_trigger = 50; + // Speedup threshold = 200 / 4 = 50 + mutable_cf_options.soft_pending_compaction_bytes_limit = 200; + mutable_cf_options.hard_pending_compaction_bytes_limit = 2000; + + vstorage->TEST_set_estimated_compaction_needed_bytes(40); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(2, dbfull()->BGCompactionsAllowed()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(50); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(300); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(45); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(2, dbfull()->BGCompactionsAllowed()); + + vstorage->set_l0_delay_trigger_count(7); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(2, dbfull()->BGCompactionsAllowed()); + + vstorage->set_l0_delay_trigger_count(9); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage->set_l0_delay_trigger_count(6); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(2, dbfull()->BGCompactionsAllowed()); + + // Speed up threshold = min(4 * 2, 4 + (12 - 4)/4) = 6 + mutable_cf_options.level0_file_num_compaction_trigger = 4; + mutable_cf_options.level0_slowdown_writes_trigger = 16; + mutable_cf_options.level0_stop_writes_trigger = 30; + + vstorage->set_l0_delay_trigger_count(5); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(2, dbfull()->BGCompactionsAllowed()); + + vstorage->set_l0_delay_trigger_count(7); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage->set_l0_delay_trigger_count(3); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(2, dbfull()->BGCompactionsAllowed()); +} + +TEST_F(ColumnFamilyTest, WriteStallTwoColumnFamilies) { + const uint64_t kBaseRate = 810000u; + db_options_.delayed_write_rate = kBaseRate; + Open(); + CreateColumnFamilies({"one"}); + ColumnFamilyData* cfd = + static_cast(db_->DefaultColumnFamily())->cfd(); + VersionStorageInfo* vstorage = cfd->current()->storage_info(); + + ColumnFamilyData* cfd1 = + static_cast(handles_[1])->cfd(); + VersionStorageInfo* vstorage1 = cfd1->current()->storage_info(); + + MutableCFOptions mutable_cf_options( + Options(db_options_, column_family_options_), + ImmutableCFOptions(Options(db_options_, column_family_options_))); + mutable_cf_options.level0_slowdown_writes_trigger = 20; + mutable_cf_options.level0_stop_writes_trigger = 10000; + mutable_cf_options.soft_pending_compaction_bytes_limit = 200; + mutable_cf_options.hard_pending_compaction_bytes_limit = 2000; + + MutableCFOptions mutable_cf_options1 = mutable_cf_options; + mutable_cf_options1.soft_pending_compaction_bytes_limit = 500; + + vstorage->TEST_set_estimated_compaction_needed_bytes(50); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + vstorage1->TEST_set_estimated_compaction_needed_bytes(201); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + vstorage1->TEST_set_estimated_compaction_needed_bytes(600); + cfd1->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(70); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate, dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage1->TEST_set_estimated_compaction_needed_bytes(800); + cfd1->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(300); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2 / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage1->TEST_set_estimated_compaction_needed_bytes(700); + cfd1->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(500); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2 / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); + + vstorage1->TEST_set_estimated_compaction_needed_bytes(600); + cfd1->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_TRUE(!dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + ASSERT_EQ(kBaseRate / 1.2, + dbfull()->TEST_write_controler().delayed_write_rate()); +} + +TEST_F(ColumnFamilyTest, CompactionSpeedupTwoColumnFamilies) { + db_options_.base_background_compactions = 2; + db_options_.max_background_compactions = 6; + column_family_options_.soft_pending_compaction_bytes_limit = 200; + column_family_options_.hard_pending_compaction_bytes_limit = 2000; + Open(); + CreateColumnFamilies({"one"}); + ColumnFamilyData* cfd = + static_cast(db_->DefaultColumnFamily())->cfd(); + VersionStorageInfo* vstorage = cfd->current()->storage_info(); + + ColumnFamilyData* cfd1 = + static_cast(handles_[1])->cfd(); + VersionStorageInfo* vstorage1 = cfd1->current()->storage_info(); + + MutableCFOptions mutable_cf_options( + Options(db_options_, column_family_options_), + ImmutableCFOptions(Options(db_options_, column_family_options_))); + // Speed up threshold = min(4 * 2, 4 + (36 - 4)/4) = 8 + mutable_cf_options.level0_file_num_compaction_trigger = 4; + mutable_cf_options.level0_slowdown_writes_trigger = 36; + mutable_cf_options.level0_stop_writes_trigger = 30; + // Speedup threshold = 200 / 4 = 50 + mutable_cf_options.soft_pending_compaction_bytes_limit = 200; + mutable_cf_options.hard_pending_compaction_bytes_limit = 2000; + + MutableCFOptions mutable_cf_options1 = mutable_cf_options; + mutable_cf_options1.level0_slowdown_writes_trigger = 16; + + vstorage->TEST_set_estimated_compaction_needed_bytes(40); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(2, dbfull()->BGCompactionsAllowed()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(60); + cfd1->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(2, dbfull()->BGCompactionsAllowed()); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage1->TEST_set_estimated_compaction_needed_bytes(30); + cfd1->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage1->TEST_set_estimated_compaction_needed_bytes(70); + cfd1->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage->TEST_set_estimated_compaction_needed_bytes(20); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage1->TEST_set_estimated_compaction_needed_bytes(3); + cfd1->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(2, dbfull()->BGCompactionsAllowed()); + + vstorage->set_l0_delay_trigger_count(9); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage1->set_l0_delay_trigger_count(2); + cfd1->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(6, dbfull()->BGCompactionsAllowed()); + + vstorage->set_l0_delay_trigger_count(0); + cfd->RecalculateWriteStallConditions(mutable_cf_options); + ASSERT_EQ(2, dbfull()->BGCompactionsAllowed()); +} + +#ifndef ROCKSDB_LITE +TEST_F(ColumnFamilyTest, FlushCloseWALFiles) { + SpecialEnv env(Env::Default()); + db_options_.env = &env; + db_options_.max_background_flushes = 1; + column_family_options_.memtable_factory.reset(new SpecialSkipListFactory(2)); + Open(); + CreateColumnFamilies({"one"}); + ASSERT_OK(Put(1, "fodor", "mirko")); + ASSERT_OK(Put(0, "fodor", "mirko")); + ASSERT_OK(Put(1, "fodor", "mirko")); + + // Block flush jobs from running + test::SleepingBackgroundTask sleeping_task; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task, + Env::Priority::HIGH); + + WriteOptions wo; + wo.sync = true; + ASSERT_OK(db_->Put(wo, handles_[1], "fodor", "mirko")); + + ASSERT_EQ(2, env.num_open_wal_file_.load()); + + sleeping_task.WakeUp(); + sleeping_task.WaitUntilDone(); + WaitForFlush(1); + ASSERT_EQ(1, env.num_open_wal_file_.load()); + + Reopen(); + ASSERT_EQ("mirko", Get(0, "fodor")); + ASSERT_EQ("mirko", Get(1, "fodor")); + db_options_.env = env_; + Close(); +} +#endif // !ROCKSDB_LITE + +TEST_F(ColumnFamilyTest, IteratorCloseWALFile1) { + SpecialEnv env(Env::Default()); + db_options_.env = &env; + db_options_.max_background_flushes = 1; + column_family_options_.memtable_factory.reset(new SpecialSkipListFactory(2)); + Open(); + CreateColumnFamilies({"one"}); + ASSERT_OK(Put(1, "fodor", "mirko")); + // Create an iterator holding the current super version. + Iterator* it = db_->NewIterator(ReadOptions(), handles_[1]); + // A flush will make `it` hold the last reference of its super version. + Flush(1); + + ASSERT_OK(Put(1, "fodor", "mirko")); + ASSERT_OK(Put(0, "fodor", "mirko")); + ASSERT_OK(Put(1, "fodor", "mirko")); + + // Flush jobs will close previous WAL files after finishing. By + // block flush jobs from running, we trigger a condition where + // the iterator destructor should close the WAL files. + test::SleepingBackgroundTask sleeping_task; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task, + Env::Priority::HIGH); + + WriteOptions wo; + wo.sync = true; + ASSERT_OK(db_->Put(wo, handles_[1], "fodor", "mirko")); + + ASSERT_EQ(2, env.num_open_wal_file_.load()); + // Deleting the iterator will clear its super version, triggering + // closing all files + delete it; + ASSERT_EQ(1, env.num_open_wal_file_.load()); + + sleeping_task.WakeUp(); + sleeping_task.WaitUntilDone(); + WaitForFlush(1); + + Reopen(); + ASSERT_EQ("mirko", Get(0, "fodor")); + ASSERT_EQ("mirko", Get(1, "fodor")); + db_options_.env = env_; + Close(); +} + +TEST_F(ColumnFamilyTest, IteratorCloseWALFile2) { + SpecialEnv env(Env::Default()); + // Allow both of flush and purge job to schedule. + env.SetBackgroundThreads(2, Env::HIGH); + db_options_.env = &env; + db_options_.max_background_flushes = 1; + column_family_options_.memtable_factory.reset(new SpecialSkipListFactory(2)); + Open(); + CreateColumnFamilies({"one"}); + ASSERT_OK(Put(1, "fodor", "mirko")); + // Create an iterator holding the current super version. + ReadOptions ro; + ro.background_purge_on_iterator_cleanup = true; + Iterator* it = db_->NewIterator(ro, handles_[1]); + // A flush will make `it` hold the last reference of its super version. + Flush(1); + + ASSERT_OK(Put(1, "fodor", "mirko")); + ASSERT_OK(Put(0, "fodor", "mirko")); + ASSERT_OK(Put(1, "fodor", "mirko")); + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"ColumnFamilyTest::IteratorCloseWALFile2:0", + "DBImpl::BGWorkPurge:start"}, + {"ColumnFamilyTest::IteratorCloseWALFile2:2", + "DBImpl::BackgroundCallFlush:start"}, + {"DBImpl::BGWorkPurge:end", "ColumnFamilyTest::IteratorCloseWALFile2:1"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + WriteOptions wo; + wo.sync = true; + ASSERT_OK(db_->Put(wo, handles_[1], "fodor", "mirko")); + + ASSERT_EQ(2, env.num_open_wal_file_.load()); + // Deleting the iterator will clear its super version, triggering + // closing all files + delete it; + ASSERT_EQ(2, env.num_open_wal_file_.load()); + + TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:0"); + TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:1"); + ASSERT_EQ(1, env.num_open_wal_file_.load()); + TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:2"); + WaitForFlush(1); + ASSERT_EQ(1, env.num_open_wal_file_.load()); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + Reopen(); + ASSERT_EQ("mirko", Get(0, "fodor")); + ASSERT_EQ("mirko", Get(1, "fodor")); + db_options_.env = env_; + Close(); +} + +#ifndef ROCKSDB_LITE // TEST functions are not supported in lite +TEST_F(ColumnFamilyTest, ForwardIteratorCloseWALFile) { + SpecialEnv env(Env::Default()); + // Allow both of flush and purge job to schedule. + env.SetBackgroundThreads(2, Env::HIGH); + db_options_.env = &env; + db_options_.max_background_flushes = 1; + column_family_options_.memtable_factory.reset(new SpecialSkipListFactory(3)); + column_family_options_.level0_file_num_compaction_trigger = 2; + Open(); + CreateColumnFamilies({"one"}); + ASSERT_OK(Put(1, "fodor", "mirko")); + ASSERT_OK(Put(1, "fodar2", "mirko")); + Flush(1); + + // Create an iterator holding the current super version, as well as + // the SST file just flushed. + ReadOptions ro; + ro.tailing = true; + ro.background_purge_on_iterator_cleanup = true; + Iterator* it = db_->NewIterator(ro, handles_[1]); + // A flush will make `it` hold the last reference of its super version. + + ASSERT_OK(Put(1, "fodor", "mirko")); + ASSERT_OK(Put(1, "fodar2", "mirko")); + Flush(1); + + WaitForCompaction(); + + ASSERT_OK(Put(1, "fodor", "mirko")); + ASSERT_OK(Put(1, "fodor", "mirko")); + ASSERT_OK(Put(0, "fodor", "mirko")); + ASSERT_OK(Put(1, "fodor", "mirko")); + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"ColumnFamilyTest::IteratorCloseWALFile2:0", + "DBImpl::BGWorkPurge:start"}, + {"ColumnFamilyTest::IteratorCloseWALFile2:2", + "DBImpl::BackgroundCallFlush:start"}, + {"DBImpl::BGWorkPurge:end", "ColumnFamilyTest::IteratorCloseWALFile2:1"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + WriteOptions wo; + wo.sync = true; + ASSERT_OK(db_->Put(wo, handles_[1], "fodor", "mirko")); + + env.delete_count_.store(0); + ASSERT_EQ(2, env.num_open_wal_file_.load()); + // Deleting the iterator will clear its super version, triggering + // closing all files + it->Seek(""); + ASSERT_EQ(2, env.num_open_wal_file_.load()); + ASSERT_EQ(0, env.delete_count_.load()); + + TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:0"); + TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:1"); + ASSERT_EQ(1, env.num_open_wal_file_.load()); + ASSERT_EQ(1, env.delete_count_.load()); + TEST_SYNC_POINT("ColumnFamilyTest::IteratorCloseWALFile2:2"); + WaitForFlush(1); + ASSERT_EQ(1, env.num_open_wal_file_.load()); + ASSERT_EQ(1, env.delete_count_.load()); + + delete it; + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + Reopen(); + ASSERT_EQ("mirko", Get(0, "fodor")); + ASSERT_EQ("mirko", Get(1, "fodor")); + db_options_.env = env_; + Close(); +} +#endif // !ROCKSDB_LITE + +// Disable on windows because SyncWAL requires env->IsSyncThreadSafe() +// to return true which is not so in unbuffered mode. +#ifndef OS_WIN +TEST_F(ColumnFamilyTest, LogSyncConflictFlush) { + Open(); + CreateColumnFamiliesAndReopen({"one", "two"}); + + Put(0, "", ""); + Put(1, "foo", "bar"); + + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"DBImpl::SyncWAL:BeforeMarkLogsSynced:1", + "ColumnFamilyTest::LogSyncConflictFlush:1"}, + {"ColumnFamilyTest::LogSyncConflictFlush:2", + "DBImpl::SyncWAL:BeforeMarkLogsSynced:2"}}); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + std::thread thread([&] { db_->SyncWAL(); }); + + TEST_SYNC_POINT("ColumnFamilyTest::LogSyncConflictFlush:1"); + Flush(1); + Put(1, "foo", "bar"); + Flush(1); + + TEST_SYNC_POINT("ColumnFamilyTest::LogSyncConflictFlush:2"); + + thread.join(); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + Close(); +} +#endif + +// this test is placed here, because the infrastructure for Column Family +// test is being used to ensure a roll of wal files. +// Basic idea is to test that WAL truncation is being detected and not +// ignored +TEST_F(ColumnFamilyTest, DISABLED_LogTruncationTest) { + Open(); + CreateColumnFamiliesAndReopen({"one", "two"}); + + Build(0, 100); + + // Flush the 0th column family to force a roll of the wal log + Flush(0); + + // Add some more entries + Build(100, 100); + + std::vector filenames; + ASSERT_OK(env_->GetChildren(dbname_, &filenames)); + + // collect wal files + std::vector logfs; + for (size_t i = 0; i < filenames.size(); i++) { + uint64_t number; + FileType type; + if (!(ParseFileName(filenames[i], &number, &type))) continue; + + if (type != kLogFile) continue; + + logfs.push_back(filenames[i]); + } + + std::sort(logfs.begin(), logfs.end()); + ASSERT_GE(logfs.size(), 2); + + // Take the last but one file, and truncate it + std::string fpath = dbname_ + "/" + logfs[logfs.size() - 2]; + std::vector names_save = names_; + + uint64_t fsize; + ASSERT_OK(env_->GetFileSize(fpath, &fsize)); + ASSERT_GT(fsize, 0); + + Close(); + + std::string backup_logs = dbname_ + "/backup_logs"; + std::string t_fpath = backup_logs + "/" + logfs[logfs.size() - 2]; + + ASSERT_OK(env_->CreateDirIfMissing(backup_logs)); + // Not sure how easy it is to make this data driven. + // need to read back the WAL file and truncate last 10 + // entries + CopyFile(fpath, t_fpath, fsize - 9180); + + ASSERT_OK(env_->DeleteFile(fpath)); + ASSERT_OK(env_->RenameFile(t_fpath, fpath)); + + db_options_.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery; + + OpenReadOnly(names_save); + + CheckMissed(); + + Close(); + + Open(names_save); + + CheckMissed(); + + Close(); + + // cleanup + env_->DeleteDir(backup_logs); +} +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/compact_files_test.cc b/external/rocksdb/db/compact_files_test.cc new file mode 100755 index 0000000000..794defb115 --- /dev/null +++ b/external/rocksdb/db/compact_files_test.cc @@ -0,0 +1,227 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#include +#include +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "util/testharness.h" + +namespace rocksdb { + +class CompactFilesTest : public testing::Test { + public: + CompactFilesTest() { + env_ = Env::Default(); + db_name_ = test::TmpDir(env_) + "/compact_files_test"; + } + + std::string db_name_; + Env* env_; +}; + +// A class which remembers the name of each flushed file. +class FlushedFileCollector : public EventListener { + public: + FlushedFileCollector() {} + ~FlushedFileCollector() {} + + virtual void OnFlushCompleted( + DB* db, const FlushJobInfo& info) override { + std::lock_guard lock(mutex_); + flushed_files_.push_back(info.file_path); + } + + std::vector GetFlushedFiles() { + std::lock_guard lock(mutex_); + std::vector result; + for (auto fname : flushed_files_) { + result.push_back(fname); + } + return result; + } + + private: + std::vector flushed_files_; + std::mutex mutex_; +}; + +TEST_F(CompactFilesTest, L0ConflictsFiles) { + Options options; + // to trigger compaction more easily + const int kWriteBufferSize = 10000; + const int kLevel0Trigger = 2; + options.create_if_missing = true; + options.compaction_style = kCompactionStyleLevel; + // Small slowdown and stop trigger for experimental purpose. + options.level0_slowdown_writes_trigger = 20; + options.level0_stop_writes_trigger = 20; + options.level0_stop_writes_trigger = 20; + options.write_buffer_size = kWriteBufferSize; + options.level0_file_num_compaction_trigger = kLevel0Trigger; + options.compression = kNoCompression; + + DB* db = nullptr; + DestroyDB(db_name_, options); + Status s = DB::Open(options, db_name_, &db); + assert(s.ok()); + assert(db); + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"CompactFilesImpl:0", "BackgroundCallCompaction:0"}, + {"BackgroundCallCompaction:1", "CompactFilesImpl:1"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // create couple files + // Background compaction starts and waits in BackgroundCallCompaction:0 + for (int i = 0; i < kLevel0Trigger * 4; ++i) { + db->Put(WriteOptions(), ToString(i), ""); + db->Put(WriteOptions(), ToString(100 - i), ""); + db->Flush(FlushOptions()); + } + + rocksdb::ColumnFamilyMetaData meta; + db->GetColumnFamilyMetaData(&meta); + std::string file1; + for (auto& file : meta.levels[0].files) { + ASSERT_EQ(0, meta.levels[0].level); + if (file1 == "") { + file1 = file.db_path + "/" + file.name; + } else { + std::string file2 = file.db_path + "/" + file.name; + // Another thread starts a compact files and creates an L0 compaction + // The background compaction then notices that there is an L0 compaction + // already in progress and doesn't do an L0 compaction + // Once the background compaction finishes, the compact files finishes + ASSERT_OK( + db->CompactFiles(rocksdb::CompactionOptions(), {file1, file2}, 0)); + break; + } + } + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + delete db; +} + +TEST_F(CompactFilesTest, ObsoleteFiles) { + Options options; + // to trigger compaction more easily + const int kWriteBufferSize = 10000; + options.create_if_missing = true; + // Disable RocksDB background compaction. + options.compaction_style = kCompactionStyleNone; + // Small slowdown and stop trigger for experimental purpose. + options.level0_slowdown_writes_trigger = 20; + options.level0_stop_writes_trigger = 20; + options.write_buffer_size = kWriteBufferSize; + options.max_write_buffer_number = 2; + options.compression = kNoCompression; + + // Add listener + FlushedFileCollector* collector = new FlushedFileCollector(); + options.listeners.emplace_back(collector); + + DB* db = nullptr; + DestroyDB(db_name_, options); + Status s = DB::Open(options, db_name_, &db); + assert(s.ok()); + assert(db); + + // create couple files + for (int i = 1000; i < 2000; ++i) { + db->Put(WriteOptions(), ToString(i), + std::string(kWriteBufferSize / 10, 'a' + (i % 26))); + } + + auto l0_files = collector->GetFlushedFiles(); + ASSERT_OK(db->CompactFiles(CompactionOptions(), l0_files, 1)); + + // verify all compaction input files are deleted + for (auto fname : l0_files) { + ASSERT_EQ(Status::NotFound(), env_->FileExists(fname)); + } + delete db; +} + +TEST_F(CompactFilesTest, CapturingPendingFiles) { + Options options; + options.create_if_missing = true; + // Disable RocksDB background compaction. + options.compaction_style = kCompactionStyleNone; + // Always do full scans for obsolete files (needed to reproduce the issue). + options.delete_obsolete_files_period_micros = 0; + + // Add listener. + FlushedFileCollector* collector = new FlushedFileCollector(); + options.listeners.emplace_back(collector); + + DB* db = nullptr; + DestroyDB(db_name_, options); + Status s = DB::Open(options, db_name_, &db); + assert(s.ok()); + assert(db); + + // Create 5 files. + for (int i = 0; i < 5; ++i) { + db->Put(WriteOptions(), "key" + ToString(i), "value"); + db->Flush(FlushOptions()); + } + + auto l0_files = collector->GetFlushedFiles(); + EXPECT_EQ(5, l0_files.size()); + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"CompactFilesImpl:2", "CompactFilesTest.CapturingPendingFiles:0"}, + {"CompactFilesTest.CapturingPendingFiles:1", "CompactFilesImpl:3"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // Start compacting files. + std::thread compaction_thread( + [&] { EXPECT_OK(db->CompactFiles(CompactionOptions(), l0_files, 1)); }); + + // In the meantime flush another file. + TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:0"); + db->Put(WriteOptions(), "key5", "value"); + db->Flush(FlushOptions()); + TEST_SYNC_POINT("CompactFilesTest.CapturingPendingFiles:1"); + + compaction_thread.join(); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + delete db; + + // Make sure we can reopen the DB. + s = DB::Open(options, db_name_, &db); + ASSERT_TRUE(s.ok()); + assert(db); + delete db; +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, + "SKIPPED as DBImpl::CompactFiles is not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/compacted_db_impl.cc b/external/rocksdb/db/compacted_db_impl.cc new file mode 100755 index 0000000000..e72a4bd595 --- /dev/null +++ b/external/rocksdb/db/compacted_db_impl.cc @@ -0,0 +1,163 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE +#include "db/compacted_db_impl.h" +#include "db/db_impl.h" +#include "db/version_set.h" +#include "table/get_context.h" + +namespace rocksdb { + +extern void MarkKeyMayExist(void* arg); +extern bool SaveValue(void* arg, const ParsedInternalKey& parsed_key, + const Slice& v, bool hit_and_return); + +CompactedDBImpl::CompactedDBImpl( + const DBOptions& options, const std::string& dbname) + : DBImpl(options, dbname) { +} + +CompactedDBImpl::~CompactedDBImpl() { +} + +size_t CompactedDBImpl::FindFile(const Slice& key) { + size_t left = 0; + size_t right = files_.num_files - 1; + while (left < right) { + size_t mid = (left + right) >> 1; + const FdWithKeyRange& f = files_.files[mid]; + if (user_comparator_->Compare(ExtractUserKey(f.largest_key), key) < 0) { + // Key at "mid.largest" is < "target". Therefore all + // files at or before "mid" are uninteresting. + left = mid + 1; + } else { + // Key at "mid.largest" is >= "target". Therefore all files + // after "mid" are uninteresting. + right = mid; + } + } + return right; +} + +Status CompactedDBImpl::Get(const ReadOptions& options, + ColumnFamilyHandle*, const Slice& key, std::string* value) { + GetContext get_context(user_comparator_, nullptr, nullptr, nullptr, + GetContext::kNotFound, key, value, nullptr, nullptr, + nullptr); + LookupKey lkey(key, kMaxSequenceNumber); + files_.files[FindFile(key)].fd.table_reader->Get( + options, lkey.internal_key(), &get_context); + if (get_context.State() == GetContext::kFound) { + return Status::OK(); + } + return Status::NotFound(); +} + +std::vector CompactedDBImpl::MultiGet(const ReadOptions& options, + const std::vector&, + const std::vector& keys, std::vector* values) { + autovector reader_list; + for (const auto& key : keys) { + const FdWithKeyRange& f = files_.files[FindFile(key)]; + if (user_comparator_->Compare(key, ExtractUserKey(f.smallest_key)) < 0) { + reader_list.push_back(nullptr); + } else { + LookupKey lkey(key, kMaxSequenceNumber); + f.fd.table_reader->Prepare(lkey.internal_key()); + reader_list.push_back(f.fd.table_reader); + } + } + std::vector statuses(keys.size(), Status::NotFound()); + values->resize(keys.size()); + int idx = 0; + for (auto* r : reader_list) { + if (r != nullptr) { + GetContext get_context(user_comparator_, nullptr, nullptr, nullptr, + GetContext::kNotFound, keys[idx], &(*values)[idx], + nullptr, nullptr, nullptr); + LookupKey lkey(keys[idx], kMaxSequenceNumber); + r->Get(options, lkey.internal_key(), &get_context); + if (get_context.State() == GetContext::kFound) { + statuses[idx] = Status::OK(); + } + } + ++idx; + } + return statuses; +} + +Status CompactedDBImpl::Init(const Options& options) { + mutex_.Lock(); + ColumnFamilyDescriptor cf(kDefaultColumnFamilyName, + ColumnFamilyOptions(options)); + Status s = Recover({cf}, true /* read only */, false, true); + if (s.ok()) { + cfd_ = reinterpret_cast( + DefaultColumnFamily())->cfd(); + delete cfd_->InstallSuperVersion(new SuperVersion(), &mutex_); + } + mutex_.Unlock(); + if (!s.ok()) { + return s; + } + NewThreadStatusCfInfo(cfd_); + version_ = cfd_->GetSuperVersion()->current; + user_comparator_ = cfd_->user_comparator(); + auto* vstorage = version_->storage_info(); + if (vstorage->num_non_empty_levels() == 0) { + return Status::NotSupported("no file exists"); + } + const LevelFilesBrief& l0 = vstorage->LevelFilesBrief(0); + // L0 should not have files + if (l0.num_files > 1) { + return Status::NotSupported("L0 contain more than 1 file"); + } + if (l0.num_files == 1) { + if (vstorage->num_non_empty_levels() > 1) { + return Status::NotSupported("Both L0 and other level contain files"); + } + files_ = l0; + return Status::OK(); + } + + for (int i = 1; i < vstorage->num_non_empty_levels() - 1; ++i) { + if (vstorage->LevelFilesBrief(i).num_files > 0) { + return Status::NotSupported("Other levels also contain files"); + } + } + + int level = vstorage->num_non_empty_levels() - 1; + if (vstorage->LevelFilesBrief(level).num_files > 0) { + files_ = vstorage->LevelFilesBrief(level); + return Status::OK(); + } + return Status::NotSupported("no file exists"); +} + +Status CompactedDBImpl::Open(const Options& options, + const std::string& dbname, DB** dbptr) { + *dbptr = nullptr; + + if (options.max_open_files != -1) { + return Status::InvalidArgument("require max_open_files = -1"); + } + if (options.merge_operator.get() != nullptr) { + return Status::InvalidArgument("merge operator is not supported"); + } + DBOptions db_options(options); + std::unique_ptr db(new CompactedDBImpl(db_options, dbname)); + Status s = db->Init(options); + if (s.ok()) { + Log(INFO_LEVEL, db->db_options_.info_log, + "Opened the db as fully compacted mode"); + LogFlush(db->db_options_.info_log); + *dbptr = db.release(); + } + return s; +} + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/compacted_db_impl.h b/external/rocksdb/db/compacted_db_impl.h new file mode 100755 index 0000000000..9c42010a67 --- /dev/null +++ b/external/rocksdb/db/compacted_db_impl.h @@ -0,0 +1,95 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE +#include "db/db_impl.h" +#include +#include + +namespace rocksdb { + +class CompactedDBImpl : public DBImpl { + public: + CompactedDBImpl(const DBOptions& options, const std::string& dbname); + virtual ~CompactedDBImpl(); + + static Status Open(const Options& options, const std::string& dbname, + DB** dbptr); + + // Implementations of the DB interface + using DB::Get; + virtual Status Get(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value) override; + using DB::MultiGet; + virtual std::vector MultiGet( + const ReadOptions& options, + const std::vector&, + const std::vector& keys, std::vector* values) + override; + + using DBImpl::Put; + virtual Status Put(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { + return Status::NotSupported("Not supported in compacted db mode."); + } + using DBImpl::Merge; + virtual Status Merge(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { + return Status::NotSupported("Not supported in compacted db mode."); + } + using DBImpl::Delete; + virtual Status Delete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) override { + return Status::NotSupported("Not supported in compacted db mode."); + } + virtual Status Write(const WriteOptions& options, + WriteBatch* updates) override { + return Status::NotSupported("Not supported in compacted db mode."); + } + using DBImpl::CompactRange; + virtual Status CompactRange(const CompactRangeOptions& options, + ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) override { + return Status::NotSupported("Not supported in compacted db mode."); + } + + virtual Status DisableFileDeletions() override { + return Status::NotSupported("Not supported in compacted db mode."); + } + virtual Status EnableFileDeletions(bool force) override { + return Status::NotSupported("Not supported in compacted db mode."); + } + virtual Status GetLiveFiles(std::vector&, + uint64_t* manifest_file_size, + bool flush_memtable = true) override { + return Status::NotSupported("Not supported in compacted db mode."); + } + using DBImpl::Flush; + virtual Status Flush(const FlushOptions& options, + ColumnFamilyHandle* column_family) override { + return Status::NotSupported("Not supported in compacted db mode."); + } + + private: + friend class DB; + inline size_t FindFile(const Slice& key); + Status Init(const Options& options); + + ColumnFamilyData* cfd_; + Version* version_; + const Comparator* user_comparator_; + LevelFilesBrief files_; + + // No copying allowed + CompactedDBImpl(const CompactedDBImpl&); + void operator=(const CompactedDBImpl&); +}; +} +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/compaction.cc b/external/rocksdb/db/compaction.cc new file mode 100755 index 0000000000..cb96676c00 --- /dev/null +++ b/external/rocksdb/db/compaction.cc @@ -0,0 +1,442 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/compaction.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include + +#include "db/column_family.h" +#include "rocksdb/compaction_filter.h" +#include "util/logging.h" +#include "util/sync_point.h" + +namespace rocksdb { + +uint64_t TotalFileSize(const std::vector& files) { + uint64_t sum = 0; + for (size_t i = 0; i < files.size() && files[i]; i++) { + sum += files[i]->fd.GetFileSize(); + } + return sum; +} + +void Compaction::SetInputVersion(Version* _input_version) { + input_version_ = _input_version; + cfd_ = input_version_->cfd(); + + cfd_->Ref(); + input_version_->Ref(); + edit_.SetColumnFamily(cfd_->GetID()); +} + +void Compaction::GetBoundaryKeys( + VersionStorageInfo* vstorage, + const std::vector& inputs, Slice* smallest_user_key, + Slice* largest_user_key) { + bool initialized = false; + const Comparator* ucmp = vstorage->InternalComparator()->user_comparator(); + for (size_t i = 0; i < inputs.size(); ++i) { + if (inputs[i].files.empty()) { + continue; + } + if (inputs[i].level == 0) { + // we need to consider all files on level 0 + for (const auto* f : inputs[i].files) { + const Slice& start_user_key = f->smallest.user_key(); + if (!initialized || + ucmp->Compare(start_user_key, *smallest_user_key) < 0) { + *smallest_user_key = start_user_key; + } + const Slice& end_user_key = f->largest.user_key(); + if (!initialized || + ucmp->Compare(end_user_key, *largest_user_key) > 0) { + *largest_user_key = end_user_key; + } + initialized = true; + } + } else { + // we only need to consider the first and last file + const Slice& start_user_key = inputs[i].files[0]->smallest.user_key(); + if (!initialized || + ucmp->Compare(start_user_key, *smallest_user_key) < 0) { + *smallest_user_key = start_user_key; + } + const Slice& end_user_key = inputs[i].files.back()->largest.user_key(); + if (!initialized || ucmp->Compare(end_user_key, *largest_user_key) > 0) { + *largest_user_key = end_user_key; + } + initialized = true; + } + } +} + +// helper function to determine if compaction is creating files at the +// bottommost level +bool Compaction::IsBottommostLevel( + int output_level, VersionStorageInfo* vstorage, + const std::vector& inputs) { + if (inputs[0].level == 0 && + inputs[0].files.back() != vstorage->LevelFiles(0).back()) { + return false; + } + + Slice smallest_key, largest_key; + GetBoundaryKeys(vstorage, inputs, &smallest_key, &largest_key); + + // Checks whether there are files living beyond the output_level. + // If lower levels have files, it checks for overlap between files + // if the compaction process and those files. + // Bottomlevel optimizations can be made if there are no files in + // lower levels or if there is no overlap with the files in + // the lower levels. + for (int i = output_level + 1; i < vstorage->num_levels(); i++) { + // It is not the bottommost level if there are files in higher + // levels when the output level is 0 or if there are files in + // higher levels which overlap with files to be compacted. + // output_level == 0 means that we want it to be considered + // s the bottommost level only if the last file on the level + // is a part of the files to be compacted - this is verified by + // the first if condition in this function + if (vstorage->NumLevelFiles(i) > 0 && + (output_level == 0 || + vstorage->OverlapInLevel(i, &smallest_key, &largest_key))) { + return false; + } + } + return true; +} + +// test function to validate the functionality of IsBottommostLevel() +// function -- determines if compaction with inputs and storage is bottommost +bool Compaction::TEST_IsBottommostLevel( + int output_level, VersionStorageInfo* vstorage, + const std::vector& inputs) { + return IsBottommostLevel(output_level, vstorage, inputs); +} + +bool Compaction::IsFullCompaction( + VersionStorageInfo* vstorage, + const std::vector& inputs) { + size_t num_files_in_compaction = 0; + size_t total_num_files = 0; + for (int l = 0; l < vstorage->num_levels(); l++) { + total_num_files += vstorage->NumLevelFiles(l); + } + for (size_t i = 0; i < inputs.size(); i++) { + num_files_in_compaction += inputs[i].size(); + } + return num_files_in_compaction == total_num_files; +} + +Compaction::Compaction(VersionStorageInfo* vstorage, + const MutableCFOptions& _mutable_cf_options, + std::vector _inputs, + int _output_level, uint64_t _target_file_size, + uint64_t _max_grandparent_overlap_bytes, + uint32_t _output_path_id, CompressionType _compression, + std::vector _grandparents, + bool _manual_compaction, double _score, + bool _deletion_compaction, + CompactionReason _compaction_reason) + : start_level_(_inputs[0].level), + output_level_(_output_level), + max_output_file_size_(_target_file_size), + max_grandparent_overlap_bytes_(_max_grandparent_overlap_bytes), + mutable_cf_options_(_mutable_cf_options), + input_version_(nullptr), + number_levels_(vstorage->num_levels()), + cfd_(nullptr), + output_path_id_(_output_path_id), + output_compression_(_compression), + deletion_compaction_(_deletion_compaction), + inputs_(std::move(_inputs)), + grandparents_(std::move(_grandparents)), + score_(_score), + bottommost_level_(IsBottommostLevel(output_level_, vstorage, inputs_)), + is_full_compaction_(IsFullCompaction(vstorage, inputs_)), + is_manual_compaction_(_manual_compaction), + compaction_reason_(_compaction_reason) { + MarkFilesBeingCompacted(true); + if (is_manual_compaction_) { + compaction_reason_ = CompactionReason::kManualCompaction; + } + +#ifndef NDEBUG + for (size_t i = 1; i < inputs_.size(); ++i) { + assert(inputs_[i].level > inputs_[i - 1].level); + } +#endif + + // setup input_levels_ + { + input_levels_.resize(num_input_levels()); + for (size_t which = 0; which < num_input_levels(); which++) { + DoGenerateLevelFilesBrief(&input_levels_[which], inputs_[which].files, + &arena_); + } + } + + GetBoundaryKeys(vstorage, inputs_, &smallest_user_key_, &largest_user_key_); +} + +Compaction::~Compaction() { + if (input_version_ != nullptr) { + input_version_->Unref(); + } + if (cfd_ != nullptr) { + if (cfd_->Unref()) { + delete cfd_; + } + } +} + +bool Compaction::InputCompressionMatchesOutput() const { + VersionStorageInfo* vstorage = input_version_->storage_info(); + int base_level = vstorage->base_level(); + bool matches = + (GetCompressionType(*cfd_->ioptions(), vstorage, mutable_cf_options_, + start_level_, base_level) == output_compression_); + if (matches) { + TEST_SYNC_POINT("Compaction::InputCompressionMatchesOutput:Matches"); + return true; + } + TEST_SYNC_POINT("Compaction::InputCompressionMatchesOutput:DidntMatch"); + return matches; +} + +bool Compaction::IsTrivialMove() const { + // Avoid a move if there is lots of overlapping grandparent data. + // Otherwise, the move could create a parent file that will require + // a very expensive merge later on. + // If start_level_== output_level_, the purpose is to force compaction + // filter to be applied to that level, and thus cannot be a trivial move. + + // Check if start level have files with overlapping ranges + if (start_level_ == 0 && + input_version_->storage_info()->level0_non_overlapping() == false) { + // We cannot move files from L0 to L1 if the files are overlapping + return false; + } + + if (is_manual_compaction_ && + (cfd_->ioptions()->compaction_filter != nullptr || + cfd_->ioptions()->compaction_filter_factory != nullptr)) { + // This is a manual compaction and we have a compaction filter that should + // be executed, we cannot do a trivial move + return false; + } + + // Used in universal compaction, where trivial move can be done if the + // input files are non overlapping + if ((cfd_->ioptions()->compaction_options_universal.allow_trivial_move) && + (output_level_ != 0)) { + return is_trivial_move_; + } + + return (start_level_ != output_level_ && num_input_levels() == 1 && + input(0, 0)->fd.GetPathId() == output_path_id() && + InputCompressionMatchesOutput() && + TotalFileSize(grandparents_) <= max_grandparent_overlap_bytes_); +} + +void Compaction::AddInputDeletions(VersionEdit* out_edit) { + for (size_t which = 0; which < num_input_levels(); which++) { + for (size_t i = 0; i < inputs_[which].size(); i++) { + out_edit->DeleteFile(level(which), inputs_[which][i]->fd.GetNumber()); + } + } +} + +bool Compaction::KeyNotExistsBeyondOutputLevel( + const Slice& user_key, std::vector* level_ptrs) const { + assert(input_version_ != nullptr); + assert(level_ptrs != nullptr); + assert(level_ptrs->size() == static_cast(number_levels_)); + assert(cfd_->ioptions()->compaction_style != kCompactionStyleFIFO); + if (cfd_->ioptions()->compaction_style == kCompactionStyleUniversal) { + return bottommost_level_; + } + // Maybe use binary search to find right entry instead of linear search? + const Comparator* user_cmp = cfd_->user_comparator(); + for (int lvl = output_level_ + 1; lvl < number_levels_; lvl++) { + const std::vector& files = + input_version_->storage_info()->LevelFiles(lvl); + for (; level_ptrs->at(lvl) < files.size(); level_ptrs->at(lvl)++) { + auto* f = files[level_ptrs->at(lvl)]; + if (user_cmp->Compare(user_key, f->largest.user_key()) <= 0) { + // We've advanced far enough + if (user_cmp->Compare(user_key, f->smallest.user_key()) >= 0) { + // Key falls in this file's range, so definitely + // exists beyond output level + return false; + } + break; + } + } + } + return true; +} + +// Mark (or clear) each file that is being compacted +void Compaction::MarkFilesBeingCompacted(bool mark_as_compacted) { + for (size_t i = 0; i < num_input_levels(); i++) { + for (size_t j = 0; j < inputs_[i].size(); j++) { + assert(mark_as_compacted ? !inputs_[i][j]->being_compacted + : inputs_[i][j]->being_compacted); + inputs_[i][j]->being_compacted = mark_as_compacted; + } + } +} + +// Sample output: +// If compacting 3 L0 files, 2 L3 files and 1 L4 file, and outputting to L5, +// print: "3@0 + 2@3 + 1@4 files to L5" +const char* Compaction::InputLevelSummary( + InputLevelSummaryBuffer* scratch) const { + int len = 0; + bool is_first = true; + for (auto& input_level : inputs_) { + if (input_level.empty()) { + continue; + } + if (!is_first) { + len += + snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, " + "); + } else { + is_first = false; + } + len += snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, + "%" ROCKSDB_PRIszt "@%d", input_level.size(), + input_level.level); + } + snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, + " files to L%d", output_level()); + + return scratch->buffer; +} + +uint64_t Compaction::CalculateTotalInputSize() const { + uint64_t size = 0; + for (auto& input_level : inputs_) { + for (auto f : input_level.files) { + size += f->fd.GetFileSize(); + } + } + return size; +} + +void Compaction::ReleaseCompactionFiles(Status status) { + MarkFilesBeingCompacted(false); + cfd_->compaction_picker()->ReleaseCompactionFiles(this, status); +} + +void Compaction::ResetNextCompactionIndex() { + assert(input_version_ != nullptr); + input_version_->storage_info()->ResetNextCompactionIndex(start_level_); +} + +namespace { +int InputSummary(const std::vector& files, char* output, + int len) { + *output = '\0'; + int write = 0; + for (size_t i = 0; i < files.size(); i++) { + int sz = len - write; + int ret; + char sztxt[16]; + AppendHumanBytes(files.at(i)->fd.GetFileSize(), sztxt, 16); + ret = snprintf(output + write, sz, "%" PRIu64 "(%s) ", + files.at(i)->fd.GetNumber(), sztxt); + if (ret < 0 || ret >= sz) break; + write += ret; + } + // if files.size() is non-zero, overwrite the last space + return write - !!files.size(); +} +} // namespace + +void Compaction::Summary(char* output, int len) { + int write = + snprintf(output, len, "Base version %" PRIu64 " Base level %d, inputs: [", + input_version_->GetVersionNumber(), start_level_); + if (write < 0 || write >= len) { + return; + } + + for (size_t level_iter = 0; level_iter < num_input_levels(); ++level_iter) { + if (level_iter > 0) { + write += snprintf(output + write, len - write, "], ["); + if (write < 0 || write >= len) { + return; + } + } + write += + InputSummary(inputs_[level_iter].files, output + write, len - write); + if (write < 0 || write >= len) { + return; + } + } + + snprintf(output + write, len - write, "]"); +} + +uint64_t Compaction::OutputFilePreallocationSize() const { + uint64_t preallocation_size = 0; + + if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel || + output_level() > 0) { + preallocation_size = max_output_file_size_; + } else { + // output_level() == 0 + assert(num_input_levels() > 0); + for (const auto& f : inputs_[0].files) { + preallocation_size += f->fd.GetFileSize(); + } + } + // Over-estimate slightly so we don't end up just barely crossing + // the threshold + return preallocation_size + (preallocation_size / 10); +} + +std::unique_ptr Compaction::CreateCompactionFilter() const { + if (!cfd_->ioptions()->compaction_filter_factory) { + return nullptr; + } + + CompactionFilter::Context context; + context.is_full_compaction = is_full_compaction_; + context.is_manual_compaction = is_manual_compaction_; + context.column_family_id = cfd_->GetID(); + return cfd_->ioptions()->compaction_filter_factory->CreateCompactionFilter( + context); +} + +bool Compaction::IsOutputLevelEmpty() const { + return inputs_.back().level != output_level_ || inputs_.back().empty(); +} + +bool Compaction::ShouldFormSubcompactions() const { + if (mutable_cf_options_.max_subcompactions <= 1 || cfd_ == nullptr) { + return false; + } + if (cfd_->ioptions()->compaction_style == kCompactionStyleLevel) { + return start_level_ == 0 && !IsOutputLevelEmpty(); + } else if (cfd_->ioptions()->compaction_style == kCompactionStyleUniversal) { + return number_levels_ > 1 && output_level_ > 0; + } else { + return false; + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/compaction.h b/external/rocksdb/db/compaction.h new file mode 100755 index 0000000000..a596f313a5 --- /dev/null +++ b/external/rocksdb/db/compaction.h @@ -0,0 +1,312 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include "util/arena.h" +#include "util/autovector.h" +#include "util/mutable_cf_options.h" +#include "db/version_set.h" + +namespace rocksdb { + +// The structure that manages compaction input files associated +// with the same physical level. +struct CompactionInputFiles { + int level; + std::vector files; + inline bool empty() const { return files.empty(); } + inline size_t size() const { return files.size(); } + inline void clear() { files.clear(); } + inline FileMetaData* operator[](size_t i) const { return files[i]; } +}; + +class Version; +class ColumnFamilyData; +class VersionStorageInfo; +class CompactionFilter; + +// A Compaction encapsulates information about a compaction. +class Compaction { + public: + Compaction(VersionStorageInfo* input_version, + const MutableCFOptions& mutable_cf_options, + std::vector inputs, int output_level, + uint64_t target_file_size, uint64_t max_grandparent_overlap_bytes, + uint32_t output_path_id, CompressionType compression, + std::vector grandparents, + bool manual_compaction = false, double score = -1, + bool deletion_compaction = false, + CompactionReason compaction_reason = CompactionReason::kUnknown); + + // No copying allowed + Compaction(const Compaction&) = delete; + void operator=(const Compaction&) = delete; + + ~Compaction(); + + // Returns the level associated to the specified compaction input level. + // If compaction_input_level is not specified, then input_level is set to 0. + int level(size_t compaction_input_level = 0) const { + return inputs_[compaction_input_level].level; + } + + int start_level() const { return start_level_; } + + // Outputs will go to this level + int output_level() const { return output_level_; } + + // Returns the number of input levels in this compaction. + size_t num_input_levels() const { return inputs_.size(); } + + // Return the object that holds the edits to the descriptor done + // by this compaction. + VersionEdit* edit() { return &edit_; } + + // Returns the number of input files associated to the specified + // compaction input level. + // The function will return 0 if when "compaction_input_level" < 0 + // or "compaction_input_level" >= "num_input_levels()". + size_t num_input_files(size_t compaction_input_level) const { + if (compaction_input_level < inputs_.size()) { + return inputs_[compaction_input_level].size(); + } + return 0; + } + + // Returns input version of the compaction + Version* input_version() const { return input_version_; } + + // Returns the ColumnFamilyData associated with the compaction. + ColumnFamilyData* column_family_data() const { return cfd_; } + + // Returns the file meta data of the 'i'th input file at the + // specified compaction input level. + // REQUIREMENT: "compaction_input_level" must be >= 0 and + // < "input_levels()" + FileMetaData* input(size_t compaction_input_level, size_t i) const { + assert(compaction_input_level < inputs_.size()); + return inputs_[compaction_input_level][i]; + } + + // Returns the list of file meta data of the specified compaction + // input level. + // REQUIREMENT: "compaction_input_level" must be >= 0 and + // < "input_levels()" + const std::vector* inputs(size_t compaction_input_level) { + assert(compaction_input_level < inputs_.size()); + return &inputs_[compaction_input_level].files; + } + + // Returns the LevelFilesBrief of the specified compaction input level. + const LevelFilesBrief* input_levels(size_t compaction_input_level) const { + return &input_levels_[compaction_input_level]; + } + + // Maximum size of files to build during this compaction. + uint64_t max_output_file_size() const { return max_output_file_size_; } + + // What compression for output + CompressionType output_compression() const { return output_compression_; } + + // Whether need to write output file to second DB path. + uint32_t output_path_id() const { return output_path_id_; } + + // Is this a trivial compaction that can be implemented by just + // moving a single input file to the next level (no merging or splitting) + bool IsTrivialMove() const; + + // If true, then the compaction can be done by simply deleting input files. + bool deletion_compaction() const { return deletion_compaction_; } + + // Add all inputs to this compaction as delete operations to *edit. + void AddInputDeletions(VersionEdit* edit); + + // Returns true if the available information we have guarantees that + // the input "user_key" does not exist in any level beyond "output_level()". + bool KeyNotExistsBeyondOutputLevel(const Slice& user_key, + std::vector* level_ptrs) const; + + // Clear all files to indicate that they are not being compacted + // Delete this compaction from the list of running compactions. + // + // Requirement: DB mutex held + void ReleaseCompactionFiles(Status status); + + // Returns the summary of the compaction in "output" with maximum "len" + // in bytes. The caller is responsible for the memory management of + // "output". + void Summary(char* output, int len); + + // Return the score that was used to pick this compaction run. + double score() const { return score_; } + + // Is this compaction creating a file in the bottom most level? + bool bottommost_level() const { return bottommost_level_; } + + // Does this compaction include all sst files? + bool is_full_compaction() const { return is_full_compaction_; } + + // Was this compaction triggered manually by the client? + bool is_manual_compaction() const { return is_manual_compaction_; } + + // Used when allow_trivial_move option is set in + // Universal compaction. If all the input files are + // non overlapping, then is_trivial_move_ variable + // will be set true, else false + void set_is_trivial_move(bool trivial_move) { + is_trivial_move_ = trivial_move; + } + + // Used when allow_trivial_move option is set in + // Universal compaction. Returns true, if the input files + // are non-overlapping and can be trivially moved. + bool is_trivial_move() const { return is_trivial_move_; } + + // How many total levels are there? + int number_levels() const { return number_levels_; } + + // Return the MutableCFOptions that should be used throughout the compaction + // procedure + const MutableCFOptions* mutable_cf_options() const { + return &mutable_cf_options_; + } + + // Returns the size in bytes that the output file should be preallocated to. + // In level compaction, that is max_file_size_. In universal compaction, that + // is the sum of all input file sizes. + uint64_t OutputFilePreallocationSize() const; + + void SetInputVersion(Version* input_version); + + struct InputLevelSummaryBuffer { + char buffer[128]; + }; + + const char* InputLevelSummary(InputLevelSummaryBuffer* scratch) const; + + uint64_t CalculateTotalInputSize() const; + + // In case of compaction error, reset the nextIndex that is used + // to pick up the next file to be compacted from files_by_size_ + void ResetNextCompactionIndex(); + + // Create a CompactionFilter from compaction_filter_factory + std::unique_ptr CreateCompactionFilter() const; + + // Is the input level corresponding to output_level_ empty? + bool IsOutputLevelEmpty() const; + + // Should this compaction be broken up into smaller ones run in parallel? + bool ShouldFormSubcompactions() const; + + // test function to validate the functionality of IsBottommostLevel() + // function -- determines if compaction with inputs and storage is bottommost + static bool TEST_IsBottommostLevel( + int output_level, VersionStorageInfo* vstorage, + const std::vector& inputs); + + TablePropertiesCollection GetOutputTableProperties() const { + return output_table_properties_; + } + + void SetOutputTableProperties(TablePropertiesCollection tp) { + output_table_properties_ = std::move(tp); + } + + Slice GetSmallestUserKey() const { return smallest_user_key_; } + + Slice GetLargestUserKey() const { return largest_user_key_; } + + CompactionReason compaction_reason() { return compaction_reason_; } + + const std::vector& grandparents() const { + return grandparents_; + } + + uint64_t max_grandparent_overlap_bytes() const { + return max_grandparent_overlap_bytes_; + } + + private: + // mark (or clear) all files that are being compacted + void MarkFilesBeingCompacted(bool mark_as_compacted); + + // get the smallest and largest key present in files to be compacted + static void GetBoundaryKeys(VersionStorageInfo* vstorage, + const std::vector& inputs, + Slice* smallest_key, Slice* largest_key); + + // helper function to determine if compaction with inputs and storage is + // bottommost + static bool IsBottommostLevel( + int output_level, VersionStorageInfo* vstorage, + const std::vector& inputs); + + static bool IsFullCompaction(VersionStorageInfo* vstorage, + const std::vector& inputs); + + const int start_level_; // the lowest level to be compacted + const int output_level_; // levels to which output files are stored + uint64_t max_output_file_size_; + uint64_t max_grandparent_overlap_bytes_; + MutableCFOptions mutable_cf_options_; + Version* input_version_; + VersionEdit edit_; + const int number_levels_; + ColumnFamilyData* cfd_; + Arena arena_; // Arena used to allocate space for file_levels_ + + const uint32_t output_path_id_; + CompressionType output_compression_; + // If true, then the comaction can be done by simply deleting input files. + const bool deletion_compaction_; + + // Compaction input files organized by level. Constant after construction + const std::vector inputs_; + + // A copy of inputs_, organized more closely in memory + autovector input_levels_; + + // State used to check for number of overlapping grandparent files + // (grandparent == "output_level_ + 1") + std::vector grandparents_; + const double score_; // score that was used to pick this compaction. + + // Is this compaction creating a file in the bottom most level? + const bool bottommost_level_; + // Does this compaction include all sst files? + const bool is_full_compaction_; + + // Is this compaction requested by the client? + const bool is_manual_compaction_; + + // True if we can do trivial move in Universal multi level + // compaction + bool is_trivial_move_; + + // Does input compression match the output compression? + bool InputCompressionMatchesOutput() const; + + // table properties of output files + TablePropertiesCollection output_table_properties_; + + // smallest user keys in compaction + Slice smallest_user_key_; + + // largest user keys in compaction + Slice largest_user_key_; + + // Reason for compaction + CompactionReason compaction_reason_; +}; + +// Utility function +extern uint64_t TotalFileSize(const std::vector& files); + +} // namespace rocksdb diff --git a/external/rocksdb/db/compaction_iterator.cc b/external/rocksdb/db/compaction_iterator.cc new file mode 100755 index 0000000000..aa160bee78 --- /dev/null +++ b/external/rocksdb/db/compaction_iterator.cc @@ -0,0 +1,450 @@ +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/compaction_iterator.h" +#include "table/internal_iterator.h" + +namespace rocksdb { + +CompactionIterator::CompactionIterator( + InternalIterator* input, const Comparator* cmp, MergeHelper* merge_helper, + SequenceNumber last_sequence, std::vector* snapshots, + SequenceNumber earliest_write_conflict_snapshot, Env* env, + bool expect_valid_internal_key, const Compaction* compaction, + const CompactionFilter* compaction_filter, LogBuffer* log_buffer) + : input_(input), + cmp_(cmp), + merge_helper_(merge_helper), + snapshots_(snapshots), + earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot), + env_(env), + expect_valid_internal_key_(expect_valid_internal_key), + compaction_(compaction), + compaction_filter_(compaction_filter), + log_buffer_(log_buffer), + merge_out_iter_(merge_helper_) { + assert(compaction_filter_ == nullptr || compaction_ != nullptr); + bottommost_level_ = + compaction_ == nullptr ? false : compaction_->bottommost_level(); + if (compaction_ != nullptr) { + level_ptrs_ = std::vector(compaction_->number_levels(), 0); + } + + if (snapshots_->size() == 0) { + // optimize for fast path if there are no snapshots + visible_at_tip_ = last_sequence; + earliest_snapshot_ = visible_at_tip_; + latest_snapshot_ = 0; + } else { + visible_at_tip_ = 0; + earliest_snapshot_ = snapshots_->at(0); + latest_snapshot_ = snapshots_->back(); + } + if (compaction_filter_ != nullptr && compaction_filter_->IgnoreSnapshots()) { + ignore_snapshots_ = true; + } else { + ignore_snapshots_ = false; + } + input_->SetPinnedItersMgr(&pinned_iters_mgr_); +} + +CompactionIterator::~CompactionIterator() { + // input_ Iteartor lifetime is longer than pinned_iters_mgr_ lifetime + input_->SetPinnedItersMgr(nullptr); +} + +void CompactionIterator::ResetRecordCounts() { + iter_stats_.num_record_drop_user = 0; + iter_stats_.num_record_drop_hidden = 0; + iter_stats_.num_record_drop_obsolete = 0; +} + +void CompactionIterator::SeekToFirst() { + NextFromInput(); + PrepareOutput(); +} + +void CompactionIterator::Next() { + // If there is a merge output, return it before continuing to process the + // input. + if (merge_out_iter_.Valid()) { + merge_out_iter_.Next(); + + // Check if we returned all records of the merge output. + if (merge_out_iter_.Valid()) { + key_ = merge_out_iter_.key(); + value_ = merge_out_iter_.value(); + bool valid_key __attribute__((__unused__)) = + ParseInternalKey(key_, &ikey_); + // MergeUntil stops when it encounters a corrupt key and does not + // include them in the result, so we expect the keys here to be valid. + assert(valid_key); + // Keep current_key_ in sync. + current_key_.UpdateInternalKey(ikey_.sequence, ikey_.type); + key_ = current_key_.GetKey(); + ikey_.user_key = current_key_.GetUserKey(); + valid_ = true; + } else { + // We consumed all pinned merge operands, release pinned iterators + pinned_iters_mgr_.ReleasePinnedIterators(); + // MergeHelper moves the iterator to the first record after the merged + // records, so even though we reached the end of the merge output, we do + // not want to advance the iterator. + NextFromInput(); + } + } else { + // Only advance the input iterator if there is no merge output and the + // iterator is not already at the next record. + if (!at_next_) { + input_->Next(); + } + NextFromInput(); + } + + if (valid_) { + // Record that we've ouputted a record for the current key. + has_outputted_key_ = true; + } + + PrepareOutput(); +} + +void CompactionIterator::NextFromInput() { + at_next_ = false; + valid_ = false; + + while (!valid_ && input_->Valid()) { + key_ = input_->key(); + value_ = input_->value(); + iter_stats_.num_input_records++; + + if (!ParseInternalKey(key_, &ikey_)) { + // If `expect_valid_internal_key_` is false, return the corrupted key + // and let the caller decide what to do with it. + // TODO(noetzli): We should have a more elegant solution for this. + if (expect_valid_internal_key_) { + assert(!"Corrupted internal key not expected."); + status_ = Status::Corruption("Corrupted internal key not expected."); + break; + } + key_ = current_key_.SetKey(key_); + has_current_user_key_ = false; + current_user_key_sequence_ = kMaxSequenceNumber; + current_user_key_snapshot_ = 0; + iter_stats_.num_input_corrupt_records++; + valid_ = true; + break; + } + + // Update input statistics + if (ikey_.type == kTypeDeletion || ikey_.type == kTypeSingleDeletion) { + iter_stats_.num_input_deletion_records++; + } + iter_stats_.total_input_raw_key_bytes += key_.size(); + iter_stats_.total_input_raw_value_bytes += value_.size(); + + // Check whether the user key changed. After this if statement current_key_ + // is a copy of the current input key (maybe converted to a delete by the + // compaction filter). ikey_.user_key is pointing to the copy. + if (!has_current_user_key_ || + !cmp_->Equal(ikey_.user_key, current_user_key_)) { + // First occurrence of this user key + key_ = current_key_.SetKey(key_, &ikey_); + current_user_key_ = ikey_.user_key; + has_current_user_key_ = true; + has_outputted_key_ = false; + current_user_key_sequence_ = kMaxSequenceNumber; + current_user_key_snapshot_ = 0; + + // apply the compaction filter to the first occurrence of the user key + if (compaction_filter_ != nullptr && ikey_.type == kTypeValue && + (visible_at_tip_ || ikey_.sequence > latest_snapshot_ || + ignore_snapshots_)) { + // If the user has specified a compaction filter and the sequence + // number is greater than any external snapshot, then invoke the + // filter. If the return value of the compaction filter is true, + // replace the entry with a deletion marker. + bool value_changed = false; + bool to_delete = false; + compaction_filter_value_.clear(); + { + StopWatchNano timer(env_, true); + to_delete = compaction_filter_->Filter( + compaction_->level(), ikey_.user_key, value_, + &compaction_filter_value_, &value_changed); + iter_stats_.total_filter_time += + env_ != nullptr ? timer.ElapsedNanos() : 0; + } + if (to_delete) { + // convert the current key to a delete + ikey_.type = kTypeDeletion; + current_key_.UpdateInternalKey(ikey_.sequence, kTypeDeletion); + // no value associated with delete + value_.clear(); + iter_stats_.num_record_drop_user++; + } else if (value_changed) { + value_ = compaction_filter_value_; + } + } + } else { + // Update the current key to reflect the new sequence number/type without + // copying the user key. + // TODO(rven): Compaction filter does not process keys in this path + // Need to have the compaction filter process multiple versions + // if we have versions on both sides of a snapshot + current_key_.UpdateInternalKey(ikey_.sequence, ikey_.type); + key_ = current_key_.GetKey(); + ikey_.user_key = current_key_.GetUserKey(); + } + + // If there are no snapshots, then this kv affect visibility at tip. + // Otherwise, search though all existing snapshots to find the earliest + // snapshot that is affected by this kv. + SequenceNumber last_sequence __attribute__((__unused__)) = + current_user_key_sequence_; + current_user_key_sequence_ = ikey_.sequence; + SequenceNumber last_snapshot = current_user_key_snapshot_; + SequenceNumber prev_snapshot = 0; // 0 means no previous snapshot + current_user_key_snapshot_ = + visible_at_tip_ ? visible_at_tip_ : findEarliestVisibleSnapshot( + ikey_.sequence, &prev_snapshot); + + if (clear_and_output_next_key_) { + // In the previous iteration we encountered a single delete that we could + // not compact out. We will keep this Put, but can drop it's data. + // (See Optimization 3, below.) + assert(ikey_.type == kTypeValue); + assert(current_user_key_snapshot_ == last_snapshot); + + value_.clear(); + valid_ = true; + clear_and_output_next_key_ = false; + } else if (ikey_.type == kTypeSingleDeletion) { + // We can compact out a SingleDelete if: + // 1) We encounter the corresponding PUT -OR- we know that this key + // doesn't appear past this output level + // =AND= + // 2) We've already returned a record in this snapshot -OR- + // there are no earlier earliest_write_conflict_snapshot. + // + // Rule 1 is needed for SingleDelete correctness. Rule 2 is needed to + // allow Transactions to do write-conflict checking (if we compacted away + // all keys, then we wouldn't know that a write happened in this + // snapshot). If there is no earlier snapshot, then we know that there + // are no active transactions that need to know about any writes. + // + // Optimization 3: + // If we encounter a SingleDelete followed by a PUT and Rule 2 is NOT + // true, then we must output a SingleDelete. In this case, we will decide + // to also output the PUT. While we are compacting less by outputting the + // PUT now, hopefully this will lead to better compaction in the future + // when Rule 2 is later true (Ie, We are hoping we can later compact out + // both the SingleDelete and the Put, while we couldn't if we only + // outputted the SingleDelete now). + // In this case, we can save space by removing the PUT's value as it will + // never be read. + // + // Deletes and Merges are not supported on the same key that has a + // SingleDelete as it is not possible to correctly do any partial + // compaction of such a combination of operations. The result of mixing + // those operations for a given key is documented as being undefined. So + // we can choose how to handle such a combinations of operations. We will + // try to compact out as much as we can in these cases. + + // The easiest way to process a SingleDelete during iteration is to peek + // ahead at the next key. + ParsedInternalKey next_ikey; + input_->Next(); + + // Check whether the next key exists, is not corrupt, and is the same key + // as the single delete. + if (input_->Valid() && ParseInternalKey(input_->key(), &next_ikey) && + cmp_->Equal(ikey_.user_key, next_ikey.user_key)) { + // Check whether the next key belongs to the same snapshot as the + // SingleDelete. + if (prev_snapshot == 0 || next_ikey.sequence > prev_snapshot) { + if (next_ikey.type == kTypeSingleDeletion) { + // We encountered two SingleDeletes in a row. This could be due to + // unexpected user input. + // Skip the first SingleDelete and let the next iteration decide how + // to handle the second SingleDelete + + // First SingleDelete has been skipped since we already called + // input_->Next(). + ++iter_stats_.num_record_drop_obsolete; + } else if ((ikey_.sequence <= earliest_write_conflict_snapshot_) || + has_outputted_key_) { + // Found a matching value, we can drop the single delete and the + // value. It is safe to drop both records since we've already + // outputted a key in this snapshot, or there is no earlier + // snapshot (Rule 2 above). + + // Note: it doesn't matter whether the second key is a Put or if it + // is an unexpected Merge or Delete. We will compact it out + // either way. + ++iter_stats_.num_record_drop_hidden; + ++iter_stats_.num_record_drop_obsolete; + // Already called input_->Next() once. Call it a second time to + // skip past the second key. + input_->Next(); + } else { + // Found a matching value, but we cannot drop both keys since + // there is an earlier snapshot and we need to leave behind a record + // to know that a write happened in this snapshot (Rule 2 above). + // Clear the value and output the SingleDelete. (The value will be + // outputted on the next iteration.) + ++iter_stats_.num_record_drop_hidden; + + // Setting valid_ to true will output the current SingleDelete + valid_ = true; + + // Set up the Put to be outputted in the next iteration. + // (Optimization 3). + clear_and_output_next_key_ = true; + } + } else { + // We hit the next snapshot without hitting a put, so the iterator + // returns the single delete. + valid_ = true; + } + } else { + // We are at the end of the input, could not parse the next key, or hit + // the next key. The iterator returns the single delete if the key + // possibly exists beyond the current output level. We set + // has_current_user_key to false so that if the iterator is at the next + // key, we do not compare it again against the previous key at the next + // iteration. If the next key is corrupt, we return before the + // comparison, so the value of has_current_user_key does not matter. + has_current_user_key_ = false; + if (compaction_ != nullptr && ikey_.sequence <= earliest_snapshot_ && + compaction_->KeyNotExistsBeyondOutputLevel(ikey_.user_key, + &level_ptrs_)) { + // Key doesn't exist outside of this range. + // Can compact out this SingleDelete. + ++iter_stats_.num_record_drop_obsolete; + } else { + // Output SingleDelete + valid_ = true; + } + } + + if (valid_) { + at_next_ = true; + } + } else if (last_snapshot == current_user_key_snapshot_) { + // If the earliest snapshot is which this key is visible in + // is the same as the visibility of a previous instance of the + // same key, then this kv is not visible in any snapshot. + // Hidden by an newer entry for same user key + // TODO: why not > ? + // + // Note: Dropping this key will not affect TransactionDB write-conflict + // checking since there has already been a record returned for this key + // in this snapshot. + assert(last_sequence >= current_user_key_sequence_); + ++iter_stats_.num_record_drop_hidden; // (A) + input_->Next(); + } else if (compaction_ != nullptr && ikey_.type == kTypeDeletion && + ikey_.sequence <= earliest_snapshot_ && + compaction_->KeyNotExistsBeyondOutputLevel(ikey_.user_key, + &level_ptrs_)) { + // TODO(noetzli): This is the only place where we use compaction_ + // (besides the constructor). We should probably get rid of this + // dependency and find a way to do similar filtering during flushes. + // + // For this user key: + // (1) there is no data in higher levels + // (2) data in lower levels will have larger sequence numbers + // (3) data in layers that are being compacted here and have + // smaller sequence numbers will be dropped in the next + // few iterations of this loop (by rule (A) above). + // Therefore this deletion marker is obsolete and can be dropped. + // + // Note: Dropping this Delete will not affect TransactionDB + // write-conflict checking since it is earlier than any snapshot. + ++iter_stats_.num_record_drop_obsolete; + input_->Next(); + } else if (ikey_.type == kTypeMerge) { + if (!merge_helper_->HasOperator()) { + LogToBuffer(log_buffer_, "Options::merge_operator is null."); + status_ = Status::InvalidArgument( + "merge_operator is not properly initialized."); + return; + } + + pinned_iters_mgr_.StartPinning(); + // We know the merge type entry is not hidden, otherwise we would + // have hit (A) + // We encapsulate the merge related state machine in a different + // object to minimize change to the existing flow. + merge_helper_->MergeUntil(input_, prev_snapshot, bottommost_level_); + merge_out_iter_.SeekToFirst(); + + if (merge_out_iter_.Valid()) { + // NOTE: key, value, and ikey_ refer to old entries. + // These will be correctly set below. + key_ = merge_out_iter_.key(); + value_ = merge_out_iter_.value(); + bool valid_key __attribute__((__unused__)) = + ParseInternalKey(key_, &ikey_); + // MergeUntil stops when it encounters a corrupt key and does not + // include them in the result, so we expect the keys here to valid. + assert(valid_key); + // Keep current_key_ in sync. + current_key_.UpdateInternalKey(ikey_.sequence, ikey_.type); + key_ = current_key_.GetKey(); + ikey_.user_key = current_key_.GetUserKey(); + valid_ = true; + } else { + // all merge operands were filtered out. reset the user key, since the + // batch consumed by the merge operator should not shadow any keys + // coming after the merges + has_current_user_key_ = false; + pinned_iters_mgr_.ReleasePinnedIterators(); + } + } else { + valid_ = true; + } + } +} + +void CompactionIterator::PrepareOutput() { + // Zeroing out the sequence number leads to better compression. + // If this is the bottommost level (no files in lower levels) + // and the earliest snapshot is larger than this seqno + // and the userkey differs from the last userkey in compaction + // then we can squash the seqno to zero. + + // This is safe for TransactionDB write-conflict checking since transactions + // only care about sequence number larger than any active snapshots. + if (bottommost_level_ && valid_ && ikey_.sequence < earliest_snapshot_ && + ikey_.type != kTypeMerge && + !cmp_->Equal(compaction_->GetLargestUserKey(), ikey_.user_key)) { + assert(ikey_.type != kTypeDeletion && ikey_.type != kTypeSingleDeletion); + ikey_.sequence = 0; + current_key_.UpdateInternalKey(0, ikey_.type); + } +} + +inline SequenceNumber CompactionIterator::findEarliestVisibleSnapshot( + SequenceNumber in, SequenceNumber* prev_snapshot) { + assert(snapshots_->size()); + SequenceNumber prev __attribute__((__unused__)) = kMaxSequenceNumber; + for (const auto cur : *snapshots_) { + assert(prev == kMaxSequenceNumber || prev <= cur); + if (cur >= in) { + *prev_snapshot = prev == kMaxSequenceNumber ? 0 : prev; + return cur; + } + prev = cur; + assert(prev < kMaxSequenceNumber); + } + *prev_snapshot = prev; + return kMaxSequenceNumber; +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/compaction_iterator.h b/external/rocksdb/db/compaction_iterator.h new file mode 100755 index 0000000000..01f677b147 --- /dev/null +++ b/external/rocksdb/db/compaction_iterator.h @@ -0,0 +1,155 @@ +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#include +#include +#include +#include + +#include "db/compaction.h" +#include "db/merge_helper.h" +#include "db/pinned_iterators_manager.h" +#include "rocksdb/compaction_filter.h" +#include "util/log_buffer.h" + +namespace rocksdb { + +struct CompactionIteratorStats { + // Compaction statistics + int64_t num_record_drop_user = 0; + int64_t num_record_drop_hidden = 0; + int64_t num_record_drop_obsolete = 0; + uint64_t total_filter_time = 0; + + // Input statistics + // TODO(noetzli): The stats are incomplete. They are lacking everything + // consumed by MergeHelper. + uint64_t num_input_records = 0; + uint64_t num_input_deletion_records = 0; + uint64_t num_input_corrupt_records = 0; + uint64_t total_input_raw_key_bytes = 0; + uint64_t total_input_raw_value_bytes = 0; +}; + +class CompactionIterator { + public: + CompactionIterator(InternalIterator* input, const Comparator* cmp, + MergeHelper* merge_helper, SequenceNumber last_sequence, + std::vector* snapshots, + SequenceNumber earliest_write_conflict_snapshot, Env* env, + bool expect_valid_internal_key, + const Compaction* compaction = nullptr, + const CompactionFilter* compaction_filter = nullptr, + LogBuffer* log_buffer = nullptr); + + ~CompactionIterator(); + + void ResetRecordCounts(); + + // Seek to the beginning of the compaction iterator output. + // + // REQUIRED: Call only once. + void SeekToFirst(); + + // Produces the next record in the compaction. + // + // REQUIRED: SeekToFirst() has been called. + void Next(); + + // Getters + const Slice& key() const { return key_; } + const Slice& value() const { return value_; } + const Status& status() const { return status_; } + const ParsedInternalKey& ikey() const { return ikey_; } + bool Valid() const { return valid_; } + const Slice& user_key() const { return current_user_key_; } + const CompactionIteratorStats& iter_stats() const { return iter_stats_; } + + private: + // Processes the input stream to find the next output + void NextFromInput(); + + // Do last preparations before presenting the output to the callee. At this + // point this only zeroes out the sequence number if possible for better + // compression. + void PrepareOutput(); + + // Given a sequence number, return the sequence number of the + // earliest snapshot that this sequence number is visible in. + // The snapshots themselves are arranged in ascending order of + // sequence numbers. + // Employ a sequential search because the total number of + // snapshots are typically small. + inline SequenceNumber findEarliestVisibleSnapshot( + SequenceNumber in, SequenceNumber* prev_snapshot); + + InternalIterator* input_; + const Comparator* cmp_; + MergeHelper* merge_helper_; + const std::vector* snapshots_; + const SequenceNumber earliest_write_conflict_snapshot_; + Env* env_; + bool expect_valid_internal_key_; + const Compaction* compaction_; + const CompactionFilter* compaction_filter_; + LogBuffer* log_buffer_; + bool bottommost_level_; + bool valid_ = false; + SequenceNumber visible_at_tip_; + SequenceNumber earliest_snapshot_; + SequenceNumber latest_snapshot_; + bool ignore_snapshots_; + + // State + // + // Points to a copy of the current compaction iterator output (current_key_) + // if valid_. + Slice key_; + // Points to the value in the underlying iterator that corresponds to the + // current output. + Slice value_; + // The status is OK unless compaction iterator encounters a merge operand + // while not having a merge operator defined. + Status status_; + // Stores the user key, sequence number and type of the current compaction + // iterator output (or current key in the underlying iterator during + // NextFromInput()). + ParsedInternalKey ikey_; + // Stores whether ikey_.user_key is valid. If set to false, the user key is + // not compared against the current key in the underlying iterator. + bool has_current_user_key_ = false; + bool at_next_ = false; // If false, the iterator + // Holds a copy of the current compaction iterator output (or current key in + // the underlying iterator during NextFromInput()). + IterKey current_key_; + Slice current_user_key_; + SequenceNumber current_user_key_sequence_; + SequenceNumber current_user_key_snapshot_; + + // True if the iterator has already returned a record for the current key. + bool has_outputted_key_ = false; + + // truncated the value of the next key and output it without applying any + // compaction rules. This is used for outputting a put after a single delete. + bool clear_and_output_next_key_ = false; + + MergeOutputIterator merge_out_iter_; + // PinnedIteratorsManager used to pin input_ Iterator blocks while reading + // merge operands and then releasing them after consuming them. + PinnedIteratorsManager pinned_iters_mgr_; + std::string compaction_filter_value_; + // "level_ptrs" holds indices that remember which file of an associated + // level we were last checking during the last call to compaction-> + // KeyNotExistsBeyondOutputLevel(). This allows future calls to the function + // to pick off where it left off since each subcompaction's key range is + // increasing so a later call to the function must be looking for a key that + // is in or beyond the last file checked during the previous call + std::vector level_ptrs_; + CompactionIteratorStats iter_stats_; +}; +} // namespace rocksdb diff --git a/external/rocksdb/db/compaction_iterator_test.cc b/external/rocksdb/db/compaction_iterator_test.cc new file mode 100755 index 0000000000..4cbccca55e --- /dev/null +++ b/external/rocksdb/db/compaction_iterator_test.cc @@ -0,0 +1,71 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/compaction_iterator.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class CompactionIteratorTest : public testing::Test { + public: + CompactionIteratorTest() : cmp_(BytewiseComparator()), snapshots_({}) {} + + void InitIterator(const std::vector& ks, + const std::vector& vs, + SequenceNumber last_sequence) { + merge_helper_.reset(new MergeHelper(Env::Default(), cmp_, nullptr, nullptr, + nullptr, 0U, false, 0)); + iter_.reset(new test::VectorIterator(ks, vs)); + iter_->SeekToFirst(); + c_iter_.reset(new CompactionIterator( + iter_.get(), cmp_, merge_helper_.get(), last_sequence, &snapshots_, + kMaxSequenceNumber, Env::Default(), false)); + } + + const Comparator* cmp_; + std::vector snapshots_; + std::unique_ptr merge_helper_; + std::unique_ptr iter_; + std::unique_ptr c_iter_; +}; + +// It is possible that the output of the compaction iterator is empty even if +// the input is not. +TEST_F(CompactionIteratorTest, EmptyResult) { + InitIterator({test::KeyStr("a", 5, kTypeSingleDeletion), + test::KeyStr("a", 3, kTypeValue)}, + {"", "val"}, 5); + c_iter_->SeekToFirst(); + ASSERT_FALSE(c_iter_->Valid()); +} + +// If there is a corruption after a single deletion, the corrupted key should +// be preserved. +TEST_F(CompactionIteratorTest, CorruptionAfterSingleDeletion) { + InitIterator({test::KeyStr("a", 5, kTypeSingleDeletion), + test::KeyStr("a", 3, kTypeValue, true), + test::KeyStr("b", 10, kTypeValue)}, + {"", "val", "val2"}, 10); + c_iter_->SeekToFirst(); + ASSERT_TRUE(c_iter_->Valid()); + ASSERT_EQ(test::KeyStr("a", 5, kTypeSingleDeletion), + c_iter_->key().ToString()); + c_iter_->Next(); + ASSERT_TRUE(c_iter_->Valid()); + ASSERT_EQ(test::KeyStr("a", 3, kTypeValue, true), c_iter_->key().ToString()); + c_iter_->Next(); + ASSERT_TRUE(c_iter_->Valid()); + ASSERT_EQ(test::KeyStr("b", 10, kTypeValue), c_iter_->key().ToString()); + c_iter_->Next(); + ASSERT_FALSE(c_iter_->Valid()); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/compaction_job.cc b/external/rocksdb/db/compaction_job.cc new file mode 100755 index 0000000000..d86167d5fd --- /dev/null +++ b/external/rocksdb/db/compaction_job.cc @@ -0,0 +1,1286 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/compaction_job.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db/builder.h" +#include "db/db_iter.h" +#include "db/dbformat.h" +#include "db/event_helpers.h" +#include "db/filename.h" +#include "db/log_reader.h" +#include "db/log_writer.h" +#include "db/memtable.h" +#include "db/memtable_list.h" +#include "db/merge_context.h" +#include "db/merge_helper.h" +#include "db/version_set.h" +#include "port/likely.h" +#include "port/port.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/statistics.h" +#include "rocksdb/status.h" +#include "rocksdb/table.h" +#include "table/block.h" +#include "table/block_based_table_factory.h" +#include "table/merger.h" +#include "table/table_builder.h" +#include "util/coding.h" +#include "util/file_reader_writer.h" +#include "util/iostats_context_imp.h" +#include "util/log_buffer.h" +#include "util/logging.h" +#include "util/sst_file_manager_impl.h" +#include "util/mutexlock.h" +#include "util/perf_context_imp.h" +#include "util/stop_watch.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "util/thread_status_util.h" + +namespace rocksdb { + +// Maintains state for each sub-compaction +struct CompactionJob::SubcompactionState { + const Compaction* compaction; + std::unique_ptr c_iter; + + // The boundaries of the key-range this compaction is interested in. No two + // subcompactions may have overlapping key-ranges. + // 'start' is inclusive, 'end' is exclusive, and nullptr means unbounded + Slice *start, *end; + + // The return status of this subcompaction + Status status; + + // Files produced by this subcompaction + struct Output { + FileMetaData meta; + bool finished; + std::shared_ptr table_properties; + }; + + // State kept for output being generated + std::vector outputs; + std::unique_ptr outfile; + std::unique_ptr builder; + Output* current_output() { + if (outputs.empty()) { + // This subcompaction's outptut could be empty if compaction was aborted + // before this subcompaction had a chance to generate any output files. + // When subcompactions are executed sequentially this is more likely and + // will be particulalry likely for the later subcompactions to be empty. + // Once they are run in parallel however it should be much rarer. + return nullptr; + } else { + return &outputs.back(); + } + } + + // State during the subcompaction + uint64_t total_bytes; + uint64_t num_input_records; + uint64_t num_output_records; + CompactionJobStats compaction_job_stats; + uint64_t approx_size; + // An index that used to speed up ShouldStopBefore(). + size_t grandparent_index = 0; + // The number of bytes overlapping between the current output and + // grandparent files used in ShouldStopBefore(). + uint64_t overlapped_bytes = 0; + // A flag determine whether the key has been seen in ShouldStopBefore() + bool seen_key = false; + std::string compression_dict; + + SubcompactionState(Compaction* c, Slice* _start, Slice* _end, + uint64_t size = 0) + : compaction(c), + start(_start), + end(_end), + outfile(nullptr), + builder(nullptr), + total_bytes(0), + num_input_records(0), + num_output_records(0), + approx_size(size), + grandparent_index(0), + overlapped_bytes(0), + seen_key(false), + compression_dict() { + assert(compaction != nullptr); + } + + SubcompactionState(SubcompactionState&& o) { *this = std::move(o); } + + SubcompactionState& operator=(SubcompactionState&& o) { + compaction = std::move(o.compaction); + start = std::move(o.start); + end = std::move(o.end); + status = std::move(o.status); + outputs = std::move(o.outputs); + outfile = std::move(o.outfile); + builder = std::move(o.builder); + total_bytes = std::move(o.total_bytes); + num_input_records = std::move(o.num_input_records); + num_output_records = std::move(o.num_output_records); + compaction_job_stats = std::move(o.compaction_job_stats); + approx_size = std::move(o.approx_size); + grandparent_index = std::move(o.grandparent_index); + overlapped_bytes = std::move(o.overlapped_bytes); + seen_key = std::move(o.seen_key); + compression_dict = std::move(o.compression_dict); + return *this; + } + + // Because member unique_ptrs do not have these. + SubcompactionState(const SubcompactionState&) = delete; + + SubcompactionState& operator=(const SubcompactionState&) = delete; + + // Returns true iff we should stop building the current output + // before processing "internal_key". + bool ShouldStopBefore(const Slice& internal_key) { + const InternalKeyComparator* icmp = + &compaction->column_family_data()->internal_comparator(); + const std::vector& grandparents = compaction->grandparents(); + + // Scan to find earliest grandparent file that contains key. + while (grandparent_index < grandparents.size() && + icmp->Compare(internal_key, + grandparents[grandparent_index]->largest.Encode()) > + 0) { + if (seen_key) { + overlapped_bytes += grandparents[grandparent_index]->fd.GetFileSize(); + } + assert(grandparent_index + 1 >= grandparents.size() || + icmp->Compare( + grandparents[grandparent_index]->largest.Encode(), + grandparents[grandparent_index + 1]->smallest.Encode()) <= 0); + grandparent_index++; + } + seen_key = true; + + if (overlapped_bytes > compaction->max_grandparent_overlap_bytes()) { + // Too much overlap for current output; start new output + overlapped_bytes = 0; + return true; + } + + return false; + } +}; + +// Maintains state for the entire compaction +struct CompactionJob::CompactionState { + Compaction* const compaction; + + // REQUIRED: subcompaction states are stored in order of increasing + // key-range + std::vector sub_compact_states; + Status status; + + uint64_t total_bytes; + uint64_t num_input_records; + uint64_t num_output_records; + + explicit CompactionState(Compaction* c) + : compaction(c), + total_bytes(0), + num_input_records(0), + num_output_records(0) {} + + size_t NumOutputFiles() { + size_t total = 0; + for (auto& s : sub_compact_states) { + total += s.outputs.size(); + } + return total; + } + + Slice SmallestUserKey() { + for (const auto& sub_compact_state : sub_compact_states) { + if (!sub_compact_state.outputs.empty() && + sub_compact_state.outputs[0].finished) { + return sub_compact_state.outputs[0].meta.smallest.user_key(); + } + } + // If there is no finished output, return an empty slice. + return Slice(nullptr, 0); + } + + Slice LargestUserKey() { + for (auto it = sub_compact_states.rbegin(); it < sub_compact_states.rend(); + ++it) { + if (!it->outputs.empty() && it->current_output()->finished) { + assert(it->current_output() != nullptr); + return it->current_output()->meta.largest.user_key(); + } + } + // If there is no finished output, return an empty slice. + return Slice(nullptr, 0); + } +}; + +void CompactionJob::AggregateStatistics() { + for (SubcompactionState& sc : compact_->sub_compact_states) { + compact_->total_bytes += sc.total_bytes; + compact_->num_input_records += sc.num_input_records; + compact_->num_output_records += sc.num_output_records; + } + if (compaction_job_stats_) { + for (SubcompactionState& sc : compact_->sub_compact_states) { + compaction_job_stats_->Add(sc.compaction_job_stats); + } + } +} + +CompactionJob::CompactionJob( + int job_id, Compaction* compaction, const DBOptions& db_options, + const EnvOptions& env_options, VersionSet* versions, + std::atomic* shutting_down, LogBuffer* log_buffer, + Directory* db_directory, Directory* output_directory, Statistics* stats, + InstrumentedMutex* db_mutex, Status* db_bg_error, + std::vector existing_snapshots, + SequenceNumber earliest_write_conflict_snapshot, + std::shared_ptr table_cache, EventLogger* event_logger, + bool paranoid_file_checks, bool measure_io_stats, const std::string& dbname, + CompactionJobStats* compaction_job_stats) + : job_id_(job_id), + compact_(new CompactionState(compaction)), + compaction_job_stats_(compaction_job_stats), + compaction_stats_(1), + dbname_(dbname), + db_options_(db_options), + env_options_(env_options), + env_(db_options.env), + versions_(versions), + shutting_down_(shutting_down), + log_buffer_(log_buffer), + db_directory_(db_directory), + output_directory_(output_directory), + stats_(stats), + db_mutex_(db_mutex), + db_bg_error_(db_bg_error), + existing_snapshots_(std::move(existing_snapshots)), + earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot), + table_cache_(std::move(table_cache)), + event_logger_(event_logger), + paranoid_file_checks_(paranoid_file_checks), + measure_io_stats_(measure_io_stats) { + assert(log_buffer_ != nullptr); + const auto* cfd = compact_->compaction->column_family_data(); + ThreadStatusUtil::SetColumnFamily(cfd, cfd->ioptions()->env, + cfd->options()->enable_thread_tracking); + ThreadStatusUtil::SetThreadOperation(ThreadStatus::OP_COMPACTION); + ReportStartedCompaction(compaction); +} + +CompactionJob::~CompactionJob() { + assert(compact_ == nullptr); + ThreadStatusUtil::ResetThreadStatus(); +} + +void CompactionJob::ReportStartedCompaction( + Compaction* compaction) { + const auto* cfd = compact_->compaction->column_family_data(); + ThreadStatusUtil::SetColumnFamily(cfd, cfd->ioptions()->env, + cfd->options()->enable_thread_tracking); + + ThreadStatusUtil::SetThreadOperationProperty( + ThreadStatus::COMPACTION_JOB_ID, + job_id_); + + ThreadStatusUtil::SetThreadOperationProperty( + ThreadStatus::COMPACTION_INPUT_OUTPUT_LEVEL, + (static_cast(compact_->compaction->start_level()) << 32) + + compact_->compaction->output_level()); + + // In the current design, a CompactionJob is always created + // for non-trivial compaction. + assert(compaction->IsTrivialMove() == false || + compaction->is_manual_compaction() == true); + + ThreadStatusUtil::SetThreadOperationProperty( + ThreadStatus::COMPACTION_PROP_FLAGS, + compaction->is_manual_compaction() + + (compaction->deletion_compaction() << 1)); + + ThreadStatusUtil::SetThreadOperationProperty( + ThreadStatus::COMPACTION_TOTAL_INPUT_BYTES, + compaction->CalculateTotalInputSize()); + + IOSTATS_RESET(bytes_written); + IOSTATS_RESET(bytes_read); + ThreadStatusUtil::SetThreadOperationProperty( + ThreadStatus::COMPACTION_BYTES_WRITTEN, 0); + ThreadStatusUtil::SetThreadOperationProperty( + ThreadStatus::COMPACTION_BYTES_READ, 0); + + // Set the thread operation after operation properties + // to ensure GetThreadList() can always show them all together. + ThreadStatusUtil::SetThreadOperation( + ThreadStatus::OP_COMPACTION); + + if (compaction_job_stats_) { + compaction_job_stats_->is_manual_compaction = + compaction->is_manual_compaction(); + } +} + +void CompactionJob::Prepare() { + AutoThreadOperationStageUpdater stage_updater( + ThreadStatus::STAGE_COMPACTION_PREPARE); + + // Generate file_levels_ for compaction berfore making Iterator + auto* c = compact_->compaction; + assert(c->column_family_data() != nullptr); + assert(c->column_family_data()->current()->storage_info() + ->NumLevelFiles(compact_->compaction->level()) > 0); + + // Is this compaction producing files at the bottommost level? + bottommost_level_ = c->bottommost_level(); + + if (c->ShouldFormSubcompactions()) { + const uint64_t start_micros = env_->NowMicros(); + GenSubcompactionBoundaries(); + MeasureTime(stats_, SUBCOMPACTION_SETUP_TIME, + env_->NowMicros() - start_micros); + + assert(sizes_.size() == boundaries_.size() + 1); + + for (size_t i = 0; i <= boundaries_.size(); i++) { + Slice* start = i == 0 ? nullptr : &boundaries_[i - 1]; + Slice* end = i == boundaries_.size() ? nullptr : &boundaries_[i]; + compact_->sub_compact_states.emplace_back(c, start, end, sizes_[i]); + } + MeasureTime(stats_, NUM_SUBCOMPACTIONS_SCHEDULED, + compact_->sub_compact_states.size()); + } else { + compact_->sub_compact_states.emplace_back(c, nullptr, nullptr); + } +} + +struct RangeWithSize { + Range range; + uint64_t size; + + RangeWithSize(const Slice& a, const Slice& b, uint64_t s = 0) + : range(a, b), size(s) {} +}; + +// Generates a histogram representing potential divisions of key ranges from +// the input. It adds the starting and/or ending keys of certain input files +// to the working set and then finds the approximate size of data in between +// each consecutive pair of slices. Then it divides these ranges into +// consecutive groups such that each group has a similar size. +void CompactionJob::GenSubcompactionBoundaries() { + auto* c = compact_->compaction; + auto* cfd = c->column_family_data(); + const Comparator* cfd_comparator = cfd->user_comparator(); + std::vector bounds; + int start_lvl = c->start_level(); + int out_lvl = c->output_level(); + + // Add the starting and/or ending key of certain input files as a potential + // boundary + for (size_t lvl_idx = 0; lvl_idx < c->num_input_levels(); lvl_idx++) { + int lvl = c->level(lvl_idx); + if (lvl >= start_lvl && lvl <= out_lvl) { + const LevelFilesBrief* flevel = c->input_levels(lvl_idx); + size_t num_files = flevel->num_files; + + if (num_files == 0) { + continue; + } + + if (lvl == 0) { + // For level 0 add the starting and ending key of each file since the + // files may have greatly differing key ranges (not range-partitioned) + for (size_t i = 0; i < num_files; i++) { + bounds.emplace_back(flevel->files[i].smallest_key); + bounds.emplace_back(flevel->files[i].largest_key); + } + } else { + // For all other levels add the smallest/largest key in the level to + // encompass the range covered by that level + bounds.emplace_back(flevel->files[0].smallest_key); + bounds.emplace_back(flevel->files[num_files - 1].largest_key); + if (lvl == out_lvl) { + // For the last level include the starting keys of all files since + // the last level is the largest and probably has the widest key + // range. Since it's range partitioned, the ending key of one file + // and the starting key of the next are very close (or identical). + for (size_t i = 1; i < num_files; i++) { + bounds.emplace_back(flevel->files[i].smallest_key); + } + } + } + } + } + + std::sort(bounds.begin(), bounds.end(), + [cfd_comparator] (const Slice& a, const Slice& b) -> bool { + return cfd_comparator->Compare(ExtractUserKey(a), ExtractUserKey(b)) < 0; + }); + // Remove duplicated entries from bounds + bounds.erase(std::unique(bounds.begin(), bounds.end(), + [cfd_comparator] (const Slice& a, const Slice& b) -> bool { + return cfd_comparator->Compare(ExtractUserKey(a), ExtractUserKey(b)) == 0; + }), bounds.end()); + + // Combine consecutive pairs of boundaries into ranges with an approximate + // size of data covered by keys in that range + uint64_t sum = 0; + std::vector ranges; + auto* v = cfd->current(); + for (auto it = bounds.begin();;) { + const Slice a = *it; + it++; + + if (it == bounds.end()) { + break; + } + + const Slice b = *it; + uint64_t size = versions_->ApproximateSize(v, a, b, start_lvl, out_lvl + 1); + ranges.emplace_back(a, b, size); + sum += size; + } + + // Group the ranges into subcompactions + const double min_file_fill_percent = 4.0 / 5; + uint64_t max_output_files = static_cast( + std::ceil(sum / min_file_fill_percent / + c->mutable_cf_options()->MaxFileSizeForLevel(out_lvl))); + uint64_t subcompactions = + std::min({static_cast(ranges.size()), + static_cast(db_options_.max_subcompactions), + max_output_files}); + + double mean = sum * 1.0 / subcompactions; + + if (subcompactions > 1) { + // Greedily add ranges to the subcompaction until the sum of the ranges' + // sizes becomes >= the expected mean size of a subcompaction + sum = 0; + for (size_t i = 0; i < ranges.size() - 1; i++) { + sum += ranges[i].size; + if (subcompactions == 1) { + // If there's only one left to schedule then it goes to the end so no + // need to put an end boundary + continue; + } + if (sum >= mean) { + boundaries_.emplace_back(ExtractUserKey(ranges[i].range.limit)); + sizes_.emplace_back(sum); + subcompactions--; + sum = 0; + } + } + sizes_.emplace_back(sum + ranges.back().size); + } else { + // Only one range so its size is the total sum of sizes computed above + sizes_.emplace_back(sum); + } +} + +Status CompactionJob::Run() { + AutoThreadOperationStageUpdater stage_updater( + ThreadStatus::STAGE_COMPACTION_RUN); + TEST_SYNC_POINT("CompactionJob::Run():Start"); + log_buffer_->FlushBufferToLog(); + LogCompaction(); + + const size_t num_threads = compact_->sub_compact_states.size(); + assert(num_threads > 0); + const uint64_t start_micros = env_->NowMicros(); + + // Launch a thread for each of subcompactions 1...num_threads-1 + std::vector thread_pool; + thread_pool.reserve(num_threads - 1); + for (size_t i = 1; i < compact_->sub_compact_states.size(); i++) { + thread_pool.emplace_back(&CompactionJob::ProcessKeyValueCompaction, this, + &compact_->sub_compact_states[i]); + } + + // Always schedule the first subcompaction (whether or not there are also + // others) in the current thread to be efficient with resources + ProcessKeyValueCompaction(&compact_->sub_compact_states[0]); + + // Wait for all other threads (if there are any) to finish execution + for (auto& thread : thread_pool) { + thread.join(); + } + + if (output_directory_ && !db_options_.disableDataSync) { + output_directory_->Fsync(); + } + + compaction_stats_.micros = env_->NowMicros() - start_micros; + MeasureTime(stats_, COMPACTION_TIME, compaction_stats_.micros); + + // Check if any thread encountered an error during execution + Status status; + for (const auto& state : compact_->sub_compact_states) { + if (!state.status.ok()) { + status = state.status; + break; + } + } + + TablePropertiesCollection tp; + for (const auto& state : compact_->sub_compact_states) { + for (const auto& output : state.outputs) { + auto fn = TableFileName(db_options_.db_paths, output.meta.fd.GetNumber(), + output.meta.fd.GetPathId()); + tp[fn] = output.table_properties; + } + } + compact_->compaction->SetOutputTableProperties(std::move(tp)); + + // Finish up all book-keeping to unify the subcompaction results + AggregateStatistics(); + UpdateCompactionStats(); + RecordCompactionIOStats(); + LogFlush(db_options_.info_log); + TEST_SYNC_POINT("CompactionJob::Run():End"); + + compact_->status = status; + return status; +} + +Status CompactionJob::Install(const MutableCFOptions& mutable_cf_options) { + AutoThreadOperationStageUpdater stage_updater( + ThreadStatus::STAGE_COMPACTION_INSTALL); + db_mutex_->AssertHeld(); + Status status = compact_->status; + ColumnFamilyData* cfd = compact_->compaction->column_family_data(); + cfd->internal_stats()->AddCompactionStats( + compact_->compaction->output_level(), compaction_stats_); + + if (status.ok()) { + status = InstallCompactionResults(mutable_cf_options); + } + VersionStorageInfo::LevelSummaryStorage tmp; + auto vstorage = cfd->current()->storage_info(); + const auto& stats = compaction_stats_; + LogToBuffer( + log_buffer_, + "[%s] compacted to: %s, MB/sec: %.1f rd, %.1f wr, level %d, " + "files in(%d, %d) out(%d) " + "MB in(%.1f, %.1f) out(%.1f), read-write-amplify(%.1f) " + "write-amplify(%.1f) %s, records in: %d, records dropped: %d\n", + cfd->GetName().c_str(), vstorage->LevelSummary(&tmp), + (stats.bytes_read_non_output_levels + stats.bytes_read_output_level) / + static_cast(stats.micros), + stats.bytes_written / static_cast(stats.micros), + compact_->compaction->output_level(), + stats.num_input_files_in_non_output_levels, + stats.num_input_files_in_output_level, + stats.num_output_files, + stats.bytes_read_non_output_levels / 1048576.0, + stats.bytes_read_output_level / 1048576.0, + stats.bytes_written / 1048576.0, + (stats.bytes_written + stats.bytes_read_output_level + + stats.bytes_read_non_output_levels) / + static_cast(stats.bytes_read_non_output_levels), + stats.bytes_written / + static_cast(stats.bytes_read_non_output_levels), + status.ToString().c_str(), stats.num_input_records, + stats.num_dropped_records); + + UpdateCompactionJobStats(stats); + + auto stream = event_logger_->LogToBuffer(log_buffer_); + stream << "job" << job_id_ + << "event" << "compaction_finished" + << "compaction_time_micros" << compaction_stats_.micros + << "output_level" << compact_->compaction->output_level() + << "num_output_files" << compact_->NumOutputFiles() + << "total_output_size" << compact_->total_bytes + << "num_input_records" << compact_->num_input_records + << "num_output_records" << compact_->num_output_records + << "num_subcompactions" << compact_->sub_compact_states.size(); + + if (measure_io_stats_ && compaction_job_stats_ != nullptr) { + stream << "file_write_nanos" << compaction_job_stats_->file_write_nanos; + stream << "file_range_sync_nanos" + << compaction_job_stats_->file_range_sync_nanos; + stream << "file_fsync_nanos" << compaction_job_stats_->file_fsync_nanos; + stream << "file_prepare_write_nanos" + << compaction_job_stats_->file_prepare_write_nanos; + } + + stream << "lsm_state"; + stream.StartArray(); + for (int level = 0; level < vstorage->num_levels(); ++level) { + stream << vstorage->NumLevelFiles(level); + } + stream.EndArray(); + + CleanupCompaction(); + return status; +} + +void CompactionJob::ProcessKeyValueCompaction(SubcompactionState* sub_compact) { + assert(sub_compact != nullptr); + std::unique_ptr input( + versions_->MakeInputIterator(sub_compact->compaction)); + + AutoThreadOperationStageUpdater stage_updater( + ThreadStatus::STAGE_COMPACTION_PROCESS_KV); + + // I/O measurement variables + PerfLevel prev_perf_level = PerfLevel::kEnableTime; + const uint64_t kRecordStatsEvery = 1000; + uint64_t prev_write_nanos = 0; + uint64_t prev_fsync_nanos = 0; + uint64_t prev_range_sync_nanos = 0; + uint64_t prev_prepare_write_nanos = 0; + if (measure_io_stats_) { + prev_perf_level = GetPerfLevel(); + SetPerfLevel(PerfLevel::kEnableTime); + prev_write_nanos = IOSTATS(write_nanos); + prev_fsync_nanos = IOSTATS(fsync_nanos); + prev_range_sync_nanos = IOSTATS(range_sync_nanos); + prev_prepare_write_nanos = IOSTATS(prepare_write_nanos); + } + + ColumnFamilyData* cfd = sub_compact->compaction->column_family_data(); + const MutableCFOptions* mutable_cf_options = + sub_compact->compaction->mutable_cf_options(); + + // To build compression dictionary, we sample the first output file, assuming + // it'll reach the maximum length, and then use the dictionary for compressing + // subsequent output files. The dictionary may be less than max_dict_bytes if + // the first output file's length is less than the maximum. + const int kSampleLenShift = 6; // 2^6 = 64-byte samples + std::set sample_begin_offsets; + if (bottommost_level_ && + cfd->ioptions()->compression_opts.max_dict_bytes > 0) { + const size_t kMaxSamples = + cfd->ioptions()->compression_opts.max_dict_bytes >> kSampleLenShift; + const size_t kOutFileLen = mutable_cf_options->MaxFileSizeForLevel( + compact_->compaction->output_level()); + if (kOutFileLen != port::kMaxSizet) { + const size_t kOutFileNumSamples = kOutFileLen >> kSampleLenShift; + Random64 generator{versions_->NewFileNumber()}; + for (size_t i = 0; i < kMaxSamples; ++i) { + sample_begin_offsets.insert(generator.Uniform(kOutFileNumSamples) + << kSampleLenShift); + } + } + } + + auto compaction_filter = cfd->ioptions()->compaction_filter; + std::unique_ptr compaction_filter_from_factory = nullptr; + if (compaction_filter == nullptr) { + compaction_filter_from_factory = + sub_compact->compaction->CreateCompactionFilter(); + compaction_filter = compaction_filter_from_factory.get(); + } + MergeHelper merge( + env_, cfd->user_comparator(), cfd->ioptions()->merge_operator, + compaction_filter, db_options_.info_log.get(), + mutable_cf_options->min_partial_merge_operands, + false /* internal key corruption is expected */, + existing_snapshots_.empty() ? 0 : existing_snapshots_.back(), + compact_->compaction->level(), db_options_.statistics.get()); + + TEST_SYNC_POINT("CompactionJob::Run():Inprogress"); + + Slice* start = sub_compact->start; + Slice* end = sub_compact->end; + if (start != nullptr) { + IterKey start_iter; + start_iter.SetInternalKey(*start, kMaxSequenceNumber, kValueTypeForSeek); + input->Seek(start_iter.GetKey()); + } else { + input->SeekToFirst(); + } + + Status status; + sub_compact->c_iter.reset(new CompactionIterator( + input.get(), cfd->user_comparator(), &merge, versions_->LastSequence(), + &existing_snapshots_, earliest_write_conflict_snapshot_, env_, false, + sub_compact->compaction, compaction_filter)); + auto c_iter = sub_compact->c_iter.get(); + c_iter->SeekToFirst(); + const auto& c_iter_stats = c_iter->iter_stats(); + auto sample_begin_offset_iter = sample_begin_offsets.cbegin(); + // data_begin_offset and compression_dict are only valid while generating + // dictionary from the first output file. + size_t data_begin_offset = 0; + std::string compression_dict; + compression_dict.reserve(cfd->ioptions()->compression_opts.max_dict_bytes); + + // TODO(noetzli): check whether we could check !shutting_down_->... only + // only occasionally (see diff D42687) + while (status.ok() && !shutting_down_->load(std::memory_order_acquire) && + !cfd->IsDropped() && c_iter->Valid()) { + // Invariant: c_iter.status() is guaranteed to be OK if c_iter->Valid() + // returns true. + const Slice& key = c_iter->key(); + const Slice& value = c_iter->value(); + + // If an end key (exclusive) is specified, check if the current key is + // >= than it and exit if it is because the iterator is out of its range + if (end != nullptr && + cfd->user_comparator()->Compare(c_iter->user_key(), *end) >= 0) { + break; + } else if (sub_compact->ShouldStopBefore(key) && + sub_compact->builder != nullptr) { + status = FinishCompactionOutputFile(input->status(), sub_compact); + if (!status.ok()) { + break; + } + } + + if (c_iter_stats.num_input_records % kRecordStatsEvery == + kRecordStatsEvery - 1) { + RecordDroppedKeys(c_iter_stats, &sub_compact->compaction_job_stats); + c_iter->ResetRecordCounts(); + RecordCompactionIOStats(); + } + + // Open output file if necessary + if (sub_compact->builder == nullptr) { + status = OpenCompactionOutputFile(sub_compact); + if (!status.ok()) { + break; + } + } + assert(sub_compact->builder != nullptr); + assert(sub_compact->current_output() != nullptr); + sub_compact->builder->Add(key, value); + sub_compact->current_output()->meta.UpdateBoundaries( + key, c_iter->ikey().sequence); + sub_compact->num_output_records++; + + if (sub_compact->outputs.size() == 1) { // first output file + // Check if this key/value overlaps any sample intervals; if so, appends + // overlapping portions to the dictionary. + for (const auto& data_elmt : {key, value}) { + size_t data_end_offset = data_begin_offset + data_elmt.size(); + while (sample_begin_offset_iter != sample_begin_offsets.cend() && + *sample_begin_offset_iter < data_end_offset) { + size_t sample_end_offset = + *sample_begin_offset_iter + (1 << kSampleLenShift); + // Invariant: Because we advance sample iterator while processing the + // data_elmt containing the sample's last byte, the current sample + // cannot end before the current data_elmt. + assert(data_begin_offset < sample_end_offset); + + size_t data_elmt_copy_offset, data_elmt_copy_len; + if (*sample_begin_offset_iter <= data_begin_offset) { + // The sample starts before data_elmt starts, so take bytes starting + // at the beginning of data_elmt. + data_elmt_copy_offset = 0; + } else { + // data_elmt starts before the sample starts, so take bytes starting + // at the below offset into data_elmt. + data_elmt_copy_offset = + *sample_begin_offset_iter - data_begin_offset; + } + if (sample_end_offset <= data_end_offset) { + // The sample ends before data_elmt ends, so take as many bytes as + // needed. + data_elmt_copy_len = + sample_end_offset - (data_begin_offset + data_elmt_copy_offset); + } else { + // data_elmt ends before the sample ends, so take all remaining + // bytes in data_elmt. + data_elmt_copy_len = + data_end_offset - (data_begin_offset + data_elmt_copy_offset); + } + compression_dict.append(&data_elmt.data()[data_elmt_copy_offset], + data_elmt_copy_len); + if (sample_end_offset > data_end_offset) { + // Didn't finish sample. Try to finish it with the next data_elmt. + break; + } + // Next sample may require bytes from same data_elmt. + sample_begin_offset_iter++; + } + data_begin_offset = data_end_offset; + } + } + + // Close output file if it is big enough + // TODO(aekmekji): determine if file should be closed earlier than this + // during subcompactions (i.e. if output size, estimated by input size, is + // going to be 1.2MB and max_output_file_size = 1MB, prefer to have 0.6MB + // and 0.6MB instead of 1MB and 0.2MB) + if (sub_compact->builder->FileSize() >= + sub_compact->compaction->max_output_file_size()) { + status = FinishCompactionOutputFile(input->status(), sub_compact); + if (sub_compact->outputs.size() == 1) { + // Use dictionary from first output file for compression of subsequent + // files. + sub_compact->compression_dict = std::move(compression_dict); + } + } + c_iter->Next(); + } + + sub_compact->num_input_records = c_iter_stats.num_input_records; + sub_compact->compaction_job_stats.num_input_deletion_records = + c_iter_stats.num_input_deletion_records; + sub_compact->compaction_job_stats.num_corrupt_keys = + c_iter_stats.num_input_corrupt_records; + sub_compact->compaction_job_stats.total_input_raw_key_bytes += + c_iter_stats.total_input_raw_key_bytes; + sub_compact->compaction_job_stats.total_input_raw_value_bytes += + c_iter_stats.total_input_raw_value_bytes; + + RecordTick(stats_, FILTER_OPERATION_TOTAL_TIME, + c_iter_stats.total_filter_time); + RecordDroppedKeys(c_iter_stats, &sub_compact->compaction_job_stats); + RecordCompactionIOStats(); + + if (status.ok() && + (shutting_down_->load(std::memory_order_acquire) || cfd->IsDropped())) { + status = Status::ShutdownInProgress( + "Database shutdown or Column family drop during compaction"); + } + if (status.ok() && sub_compact->builder != nullptr) { + status = FinishCompactionOutputFile(input->status(), sub_compact); + } + if (status.ok()) { + status = input->status(); + } + + if (measure_io_stats_) { + sub_compact->compaction_job_stats.file_write_nanos += + IOSTATS(write_nanos) - prev_write_nanos; + sub_compact->compaction_job_stats.file_fsync_nanos += + IOSTATS(fsync_nanos) - prev_fsync_nanos; + sub_compact->compaction_job_stats.file_range_sync_nanos += + IOSTATS(range_sync_nanos) - prev_range_sync_nanos; + sub_compact->compaction_job_stats.file_prepare_write_nanos += + IOSTATS(prepare_write_nanos) - prev_prepare_write_nanos; + if (prev_perf_level != PerfLevel::kEnableTime) { + SetPerfLevel(prev_perf_level); + } + } + + sub_compact->c_iter.reset(); + input.reset(); + sub_compact->status = status; +} + +void CompactionJob::RecordDroppedKeys( + const CompactionIteratorStats& c_iter_stats, + CompactionJobStats* compaction_job_stats) { + if (c_iter_stats.num_record_drop_user > 0) { + RecordTick(stats_, COMPACTION_KEY_DROP_USER, + c_iter_stats.num_record_drop_user); + } + if (c_iter_stats.num_record_drop_hidden > 0) { + RecordTick(stats_, COMPACTION_KEY_DROP_NEWER_ENTRY, + c_iter_stats.num_record_drop_hidden); + if (compaction_job_stats) { + compaction_job_stats->num_records_replaced += + c_iter_stats.num_record_drop_hidden; + } + } + if (c_iter_stats.num_record_drop_obsolete > 0) { + RecordTick(stats_, COMPACTION_KEY_DROP_OBSOLETE, + c_iter_stats.num_record_drop_obsolete); + if (compaction_job_stats) { + compaction_job_stats->num_expired_deletion_records += + c_iter_stats.num_record_drop_obsolete; + } + } +} + +Status CompactionJob::FinishCompactionOutputFile( + const Status& input_status, SubcompactionState* sub_compact) { + AutoThreadOperationStageUpdater stage_updater( + ThreadStatus::STAGE_COMPACTION_SYNC_FILE); + assert(sub_compact != nullptr); + assert(sub_compact->outfile); + assert(sub_compact->builder != nullptr); + assert(sub_compact->current_output() != nullptr); + + uint64_t output_number = sub_compact->current_output()->meta.fd.GetNumber(); + assert(output_number != 0); + + TableProperties table_properties; + // Check for iterator errors + Status s = input_status; + auto meta = &sub_compact->current_output()->meta; + const uint64_t current_entries = sub_compact->builder->NumEntries(); + meta->marked_for_compaction = sub_compact->builder->NeedCompact(); + if (s.ok()) { + s = sub_compact->builder->Finish(); + } else { + sub_compact->builder->Abandon(); + } + const uint64_t current_bytes = sub_compact->builder->FileSize(); + meta->fd.file_size = current_bytes; + sub_compact->current_output()->finished = true; + sub_compact->total_bytes += current_bytes; + + // Finish and check for file errors + if (s.ok() && !db_options_.disableDataSync) { + StopWatch sw(env_, stats_, COMPACTION_OUTFILE_SYNC_MICROS); + s = sub_compact->outfile->Sync(db_options_.use_fsync); + } + if (s.ok()) { + s = sub_compact->outfile->Close(); + } + sub_compact->outfile.reset(); + + ColumnFamilyData* cfd = sub_compact->compaction->column_family_data(); + TableProperties tp; + if (s.ok() && current_entries > 0) { + // Verify that the table is usable + InternalIterator* iter = cfd->table_cache()->NewIterator( + ReadOptions(), env_options_, cfd->internal_comparator(), meta->fd, + nullptr, cfd->internal_stats()->GetFileReadHist( + compact_->compaction->output_level()), + false); + s = iter->status(); + + if (s.ok() && paranoid_file_checks_) { + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) {} + s = iter->status(); + } + + delete iter; + + // Output to event logger and fire events. + if (s.ok()) { + tp = sub_compact->builder->GetTableProperties(); + sub_compact->current_output()->table_properties = + std::make_shared(tp); + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[%s] [JOB %d] Generated table #%" PRIu64 ": %" PRIu64 + " keys, %" PRIu64 " bytes%s", + cfd->GetName().c_str(), job_id_, output_number, current_entries, + current_bytes, + meta->marked_for_compaction ? " (need compaction)" : ""); + } + } + std::string fname = TableFileName(db_options_.db_paths, meta->fd.GetNumber(), + meta->fd.GetPathId()); + EventHelpers::LogAndNotifyTableFileCreationFinished( + event_logger_, cfd->ioptions()->listeners, dbname_, cfd->GetName(), fname, + job_id_, meta->fd, tp, TableFileCreationReason::kCompaction, s); + + // Report new file to SstFileManagerImpl + auto sfm = + static_cast(db_options_.sst_file_manager.get()); + if (sfm && meta->fd.GetPathId() == 0) { + auto fn = TableFileName(cfd->ioptions()->db_paths, meta->fd.GetNumber(), + meta->fd.GetPathId()); + sfm->OnAddFile(fn); + if (sfm->IsMaxAllowedSpaceReached()) { + InstrumentedMutexLock l(db_mutex_); + if (db_bg_error_->ok()) { + s = Status::IOError("Max allowed space was reached"); + *db_bg_error_ = s; + TEST_SYNC_POINT( + "CompactionJob::FinishCompactionOutputFile:MaxAllowedSpaceReached"); + } + } + } + + sub_compact->builder.reset(); + return s; +} + +Status CompactionJob::InstallCompactionResults( + const MutableCFOptions& mutable_cf_options) { + db_mutex_->AssertHeld(); + + auto* compaction = compact_->compaction; + // paranoia: verify that the files that we started with + // still exist in the current version and in the same original level. + // This ensures that a concurrent compaction did not erroneously + // pick the same files to compact_. + if (!versions_->VerifyCompactionFileConsistency(compaction)) { + Compaction::InputLevelSummaryBuffer inputs_summary; + + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "[%s] [JOB %d] Compaction %s aborted", + compaction->column_family_data()->GetName().c_str(), job_id_, + compaction->InputLevelSummary(&inputs_summary)); + return Status::Corruption("Compaction input files inconsistent"); + } + + { + Compaction::InputLevelSummaryBuffer inputs_summary; + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[%s] [JOB %d] Compacted %s => %" PRIu64 " bytes", + compaction->column_family_data()->GetName().c_str(), job_id_, + compaction->InputLevelSummary(&inputs_summary), compact_->total_bytes); + } + + // Add compaction outputs + compaction->AddInputDeletions(compact_->compaction->edit()); + + for (const auto& sub_compact : compact_->sub_compact_states) { + for (const auto& out : sub_compact.outputs) { + compaction->edit()->AddFile(compaction->output_level(), out.meta); + } + } + return versions_->LogAndApply(compaction->column_family_data(), + mutable_cf_options, compaction->edit(), + db_mutex_, db_directory_); +} + +void CompactionJob::RecordCompactionIOStats() { + RecordTick(stats_, COMPACT_READ_BYTES, IOSTATS(bytes_read)); + ThreadStatusUtil::IncreaseThreadOperationProperty( + ThreadStatus::COMPACTION_BYTES_READ, IOSTATS(bytes_read)); + IOSTATS_RESET(bytes_read); + RecordTick(stats_, COMPACT_WRITE_BYTES, IOSTATS(bytes_written)); + ThreadStatusUtil::IncreaseThreadOperationProperty( + ThreadStatus::COMPACTION_BYTES_WRITTEN, IOSTATS(bytes_written)); + IOSTATS_RESET(bytes_written); +} + +Status CompactionJob::OpenCompactionOutputFile( + SubcompactionState* sub_compact) { + assert(sub_compact != nullptr); + assert(sub_compact->builder == nullptr); + // no need to lock because VersionSet::next_file_number_ is atomic + uint64_t file_number = versions_->NewFileNumber(); + std::string fname = TableFileName(db_options_.db_paths, file_number, + sub_compact->compaction->output_path_id()); + // Fire events. + ColumnFamilyData* cfd = sub_compact->compaction->column_family_data(); +#ifndef ROCKSDB_LITE + EventHelpers::NotifyTableFileCreationStarted( + cfd->ioptions()->listeners, dbname_, cfd->GetName(), fname, job_id_, + TableFileCreationReason::kCompaction); +#endif // !ROCKSDB_LITE + // Make the output file + unique_ptr writable_file; + Status s = NewWritableFile(env_, fname, &writable_file, env_options_); + if (!s.ok()) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "[%s] [JOB %d] OpenCompactionOutputFiles for table #%" PRIu64 + " fails at NewWritableFile with status %s", + sub_compact->compaction->column_family_data()->GetName().c_str(), + job_id_, file_number, s.ToString().c_str()); + LogFlush(db_options_.info_log); + EventHelpers::LogAndNotifyTableFileCreationFinished( + event_logger_, cfd->ioptions()->listeners, dbname_, cfd->GetName(), + fname, job_id_, FileDescriptor(), TableProperties(), + TableFileCreationReason::kCompaction, s); + return s; + } + + SubcompactionState::Output out; + out.meta.fd = + FileDescriptor(file_number, sub_compact->compaction->output_path_id(), 0); + out.finished = false; + + sub_compact->outputs.push_back(out); + writable_file->SetIOPriority(Env::IO_LOW); + writable_file->SetPreallocationBlockSize(static_cast( + sub_compact->compaction->OutputFilePreallocationSize())); + sub_compact->outfile.reset( + new WritableFileWriter(std::move(writable_file), env_options_)); + + // If the Column family flag is to only optimize filters for hits, + // we can skip creating filters if this is the bottommost_level where + // data is going to be found + bool skip_filters = + cfd->ioptions()->optimize_filters_for_hits && bottommost_level_; + sub_compact->builder.reset(NewTableBuilder( + *cfd->ioptions(), cfd->internal_comparator(), + cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(), + sub_compact->outfile.get(), sub_compact->compaction->output_compression(), + cfd->ioptions()->compression_opts, &sub_compact->compression_dict, + skip_filters)); + LogFlush(db_options_.info_log); + return s; +} + +void CompactionJob::CleanupCompaction() { + for (SubcompactionState& sub_compact : compact_->sub_compact_states) { + const auto& sub_status = sub_compact.status; + + if (sub_compact.builder != nullptr) { + // May happen if we get a shutdown call in the middle of compaction + sub_compact.builder->Abandon(); + sub_compact.builder.reset(); + } else { + assert(!sub_status.ok() || sub_compact.outfile == nullptr); + } + for (const auto& out : sub_compact.outputs) { + // If this file was inserted into the table cache then remove + // them here because this compaction was not committed. + if (!sub_status.ok()) { + TableCache::Evict(table_cache_.get(), out.meta.fd.GetNumber()); + } + } + } + delete compact_; + compact_ = nullptr; +} + +#ifndef ROCKSDB_LITE +namespace { +void CopyPrefix( + const Slice& src, size_t prefix_length, std::string* dst) { + assert(prefix_length > 0); + size_t length = src.size() > prefix_length ? prefix_length : src.size(); + dst->assign(src.data(), length); +} +} // namespace + +#endif // !ROCKSDB_LITE + +void CompactionJob::UpdateCompactionStats() { + Compaction* compaction = compact_->compaction; + compaction_stats_.num_input_files_in_non_output_levels = 0; + compaction_stats_.num_input_files_in_output_level = 0; + for (int input_level = 0; + input_level < static_cast(compaction->num_input_levels()); + ++input_level) { + if (compaction->start_level() + input_level + != compaction->output_level()) { + UpdateCompactionInputStatsHelper( + &compaction_stats_.num_input_files_in_non_output_levels, + &compaction_stats_.bytes_read_non_output_levels, + input_level); + } else { + UpdateCompactionInputStatsHelper( + &compaction_stats_.num_input_files_in_output_level, + &compaction_stats_.bytes_read_output_level, + input_level); + } + } + + for (const auto& sub_compact : compact_->sub_compact_states) { + size_t num_output_files = sub_compact.outputs.size(); + if (sub_compact.builder != nullptr) { + // An error occurred so ignore the last output. + assert(num_output_files > 0); + --num_output_files; + } + compaction_stats_.num_output_files += static_cast(num_output_files); + + for (const auto& out : sub_compact.outputs) { + compaction_stats_.bytes_written += out.meta.fd.file_size; + } + if (sub_compact.num_input_records > sub_compact.num_output_records) { + compaction_stats_.num_dropped_records += + sub_compact.num_input_records - sub_compact.num_output_records; + } + } +} + +void CompactionJob::UpdateCompactionInputStatsHelper( + int* num_files, uint64_t* bytes_read, int input_level) { + const Compaction* compaction = compact_->compaction; + auto num_input_files = compaction->num_input_files(input_level); + *num_files += static_cast(num_input_files); + + for (size_t i = 0; i < num_input_files; ++i) { + const auto* file_meta = compaction->input(input_level, i); + *bytes_read += file_meta->fd.GetFileSize(); + compaction_stats_.num_input_records += + static_cast(file_meta->num_entries); + } +} + +void CompactionJob::UpdateCompactionJobStats( + const InternalStats::CompactionStats& stats) const { +#ifndef ROCKSDB_LITE + if (compaction_job_stats_) { + compaction_job_stats_->elapsed_micros = stats.micros; + + // input information + compaction_job_stats_->total_input_bytes = + stats.bytes_read_non_output_levels + + stats.bytes_read_output_level; + compaction_job_stats_->num_input_records = + compact_->num_input_records; + compaction_job_stats_->num_input_files = + stats.num_input_files_in_non_output_levels + + stats.num_input_files_in_output_level; + compaction_job_stats_->num_input_files_at_output_level = + stats.num_input_files_in_output_level; + + // output information + compaction_job_stats_->total_output_bytes = stats.bytes_written; + compaction_job_stats_->num_output_records = + compact_->num_output_records; + compaction_job_stats_->num_output_files = stats.num_output_files; + + if (compact_->NumOutputFiles() > 0U) { + CopyPrefix( + compact_->SmallestUserKey(), + CompactionJobStats::kMaxPrefixLength, + &compaction_job_stats_->smallest_output_key_prefix); + CopyPrefix( + compact_->LargestUserKey(), + CompactionJobStats::kMaxPrefixLength, + &compaction_job_stats_->largest_output_key_prefix); + } + } +#endif // !ROCKSDB_LITE +} + +void CompactionJob::LogCompaction() { + Compaction* compaction = compact_->compaction; + ColumnFamilyData* cfd = compaction->column_family_data(); + + // Let's check if anything will get logged. Don't prepare all the info if + // we're not logging + if (db_options_.info_log_level <= InfoLogLevel::INFO_LEVEL) { + Compaction::InputLevelSummaryBuffer inputs_summary; + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[%s] [JOB %d] Compacting %s, score %.2f", cfd->GetName().c_str(), + job_id_, compaction->InputLevelSummary(&inputs_summary), + compaction->score()); + char scratch[2345]; + compaction->Summary(scratch, sizeof(scratch)); + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[%s] Compaction start summary: %s\n", cfd->GetName().c_str(), scratch); + // build event logger report + auto stream = event_logger_->Log(); + stream << "job" << job_id_ << "event" + << "compaction_started"; + for (size_t i = 0; i < compaction->num_input_levels(); ++i) { + stream << ("files_L" + ToString(compaction->level(i))); + stream.StartArray(); + for (auto f : *compaction->inputs(i)) { + stream << f->fd.GetNumber(); + } + stream.EndArray(); + } + stream << "score" << compaction->score() << "input_data_size" + << compaction->CalculateTotalInputSize(); + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/compaction_job.h b/external/rocksdb/db/compaction_job.h new file mode 100755 index 0000000000..b2a592ea9d --- /dev/null +++ b/external/rocksdb/db/compaction_job.h @@ -0,0 +1,159 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db/column_family.h" +#include "db/compaction_iterator.h" +#include "db/dbformat.h" +#include "db/flush_scheduler.h" +#include "db/internal_stats.h" +#include "db/job_context.h" +#include "db/log_writer.h" +#include "db/memtable_list.h" +#include "db/version_edit.h" +#include "db/write_controller.h" +#include "db/write_thread.h" +#include "port/port.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/compaction_job_stats.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/transaction_log.h" +#include "table/scoped_arena_iterator.h" +#include "util/autovector.h" +#include "util/event_logger.h" +#include "util/stop_watch.h" +#include "util/thread_local.h" + +namespace rocksdb { + +class MemTable; +class TableCache; +class Version; +class VersionEdit; +class VersionSet; +class Arena; + +class CompactionJob { + public: + CompactionJob(int job_id, Compaction* compaction, const DBOptions& db_options, + const EnvOptions& env_options, VersionSet* versions, + std::atomic* shutting_down, LogBuffer* log_buffer, + Directory* db_directory, Directory* output_directory, + Statistics* stats, InstrumentedMutex* db_mutex, + Status* db_bg_error, + std::vector existing_snapshots, + SequenceNumber earliest_write_conflict_snapshot, + std::shared_ptr table_cache, EventLogger* event_logger, + bool paranoid_file_checks, bool measure_io_stats, + const std::string& dbname, + CompactionJobStats* compaction_job_stats); + + ~CompactionJob(); + + // no copy/move + CompactionJob(CompactionJob&& job) = delete; + CompactionJob(const CompactionJob& job) = delete; + CompactionJob& operator=(const CompactionJob& job) = delete; + + // REQUIRED: mutex held + void Prepare(); + // REQUIRED mutex not held + Status Run(); + + // REQUIRED: mutex held + Status Install(const MutableCFOptions& mutable_cf_options); + + private: + struct SubcompactionState; + + void AggregateStatistics(); + void GenSubcompactionBoundaries(); + + // update the thread status for starting a compaction. + void ReportStartedCompaction(Compaction* compaction); + void AllocateCompactionOutputFileNumbers(); + // Call compaction filter. Then iterate through input and compact the + // kv-pairs + void ProcessKeyValueCompaction(SubcompactionState* sub_compact); + + Status FinishCompactionOutputFile(const Status& input_status, + SubcompactionState* sub_compact); + Status InstallCompactionResults(const MutableCFOptions& mutable_cf_options); + void RecordCompactionIOStats(); + Status OpenCompactionOutputFile(SubcompactionState* sub_compact); + void CleanupCompaction(); + void UpdateCompactionJobStats( + const InternalStats::CompactionStats& stats) const; + void RecordDroppedKeys(const CompactionIteratorStats& c_iter_stats, + CompactionJobStats* compaction_job_stats = nullptr); + + void UpdateCompactionStats(); + void UpdateCompactionInputStatsHelper( + int* num_files, uint64_t* bytes_read, int input_level); + + void LogCompaction(); + + int job_id_; + + // CompactionJob state + struct CompactionState; + CompactionState* compact_; + CompactionJobStats* compaction_job_stats_; + InternalStats::CompactionStats compaction_stats_; + + // DBImpl state + const std::string& dbname_; + const DBOptions& db_options_; + const EnvOptions& env_options_; + + Env* env_; + VersionSet* versions_; + std::atomic* shutting_down_; + LogBuffer* log_buffer_; + Directory* db_directory_; + Directory* output_directory_; + Statistics* stats_; + InstrumentedMutex* db_mutex_; + Status* db_bg_error_; + // If there were two snapshots with seq numbers s1 and + // s2 and s1 < s2, and if we find two instances of a key k1 then lies + // entirely within s1 and s2, then the earlier version of k1 can be safely + // deleted because that version is not visible in any snapshot. + std::vector existing_snapshots_; + + // This is the earliest snapshot that could be used for write-conflict + // checking by a transaction. For any user-key newer than this snapshot, we + // should make sure not to remove evidence that a write occurred. + SequenceNumber earliest_write_conflict_snapshot_; + + std::shared_ptr table_cache_; + + EventLogger* event_logger_; + + bool bottommost_level_; + bool paranoid_file_checks_; + bool measure_io_stats_; + // Stores the Slices that designate the boundaries for each subcompaction + std::vector boundaries_; + // Stores the approx size of keys covered in the range of each subcompaction + std::vector sizes_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/compaction_job_stats_test.cc b/external/rocksdb/db/compaction_job_stats_test.cc new file mode 100755 index 0000000000..a05b0ba64a --- /dev/null +++ b/external/rocksdb/db/compaction_job_stats_test.cc @@ -0,0 +1,1049 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db/db_impl.h" +#include "db/dbformat.h" +#include "db/filename.h" +#include "db/job_context.h" +#include "db/version_set.h" +#include "db/write_batch_internal.h" +#include "memtable/hash_linklist_rep.h" +#include "port/stack_trace.h" +#include "rocksdb/cache.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/convenience.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/experimental.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/options.h" +#include "rocksdb/perf_context.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/table.h" +#include "rocksdb/table_properties.h" +#include "rocksdb/thread_status.h" +#include "rocksdb/utilities/checkpoint.h" +#include "rocksdb/utilities/write_batch_with_index.h" +#include "table/block_based_table_factory.h" +#include "table/mock_table.h" +#include "table/plain_table_factory.h" +#include "table/scoped_arena_iterator.h" +#include "util/compression.h" +#include "util/hash.h" +#include "util/logging.h" +#include "util/mock_env.h" +#include "util/mutexlock.h" +#include "util/rate_limiter.h" +#include "util/statistics.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "util/testharness.h" +#include "util/testutil.h" +#include "util/thread_status_util.h" +#include "util/xfunc.h" +#include "utilities/merge_operators.h" + +#if !defined(IOS_CROSS_COMPILE) +#ifndef ROCKSDB_LITE +namespace rocksdb { + +static std::string RandomString(Random* rnd, int len, double ratio) { + std::string r; + test::CompressibleString(rnd, ratio, len, &r); + return r; +} + +std::string Key(uint64_t key, int length) { + const int kBufSize = 1000; + char buf[kBufSize]; + if (length > kBufSize) { + length = kBufSize; + } + snprintf(buf, kBufSize, "%0*" PRIu64, length, key); + return std::string(buf); +} + +class CompactionJobStatsTest : public testing::Test, + public testing::WithParamInterface { + public: + std::string dbname_; + std::string alternative_wal_dir_; + Env* env_; + DB* db_; + std::vector handles_; + uint32_t max_subcompactions_; + + Options last_options_; + + CompactionJobStatsTest() : env_(Env::Default()) { + env_->SetBackgroundThreads(1, Env::LOW); + env_->SetBackgroundThreads(1, Env::HIGH); + dbname_ = test::TmpDir(env_) + "/compaction_job_stats_test"; + alternative_wal_dir_ = dbname_ + "/wal"; + Options options; + options.create_if_missing = true; + max_subcompactions_ = GetParam(); + options.max_subcompactions = max_subcompactions_; + auto delete_options = options; + delete_options.wal_dir = alternative_wal_dir_; + EXPECT_OK(DestroyDB(dbname_, delete_options)); + // Destroy it for not alternative WAL dir is used. + EXPECT_OK(DestroyDB(dbname_, options)); + db_ = nullptr; + Reopen(options); + } + + ~CompactionJobStatsTest() { + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + rocksdb::SyncPoint::GetInstance()->LoadDependency({}); + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); + Close(); + Options options; + options.db_paths.emplace_back(dbname_, 0); + options.db_paths.emplace_back(dbname_ + "_2", 0); + options.db_paths.emplace_back(dbname_ + "_3", 0); + options.db_paths.emplace_back(dbname_ + "_4", 0); + EXPECT_OK(DestroyDB(dbname_, options)); + } + + // Required if inheriting from testing::WithParamInterface<> + static void SetUpTestCase() {} + static void TearDownTestCase() {} + + DBImpl* dbfull() { + return reinterpret_cast(db_); + } + + void CreateColumnFamilies(const std::vector& cfs, + const Options& options) { + ColumnFamilyOptions cf_opts(options); + size_t cfi = handles_.size(); + handles_.resize(cfi + cfs.size()); + for (auto cf : cfs) { + ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++])); + } + } + + void CreateAndReopenWithCF(const std::vector& cfs, + const Options& options) { + CreateColumnFamilies(cfs, options); + std::vector cfs_plus_default = cfs; + cfs_plus_default.insert(cfs_plus_default.begin(), kDefaultColumnFamilyName); + ReopenWithColumnFamilies(cfs_plus_default, options); + } + + void ReopenWithColumnFamilies(const std::vector& cfs, + const std::vector& options) { + ASSERT_OK(TryReopenWithColumnFamilies(cfs, options)); + } + + void ReopenWithColumnFamilies(const std::vector& cfs, + const Options& options) { + ASSERT_OK(TryReopenWithColumnFamilies(cfs, options)); + } + + Status TryReopenWithColumnFamilies( + const std::vector& cfs, + const std::vector& options) { + Close(); + EXPECT_EQ(cfs.size(), options.size()); + std::vector column_families; + for (size_t i = 0; i < cfs.size(); ++i) { + column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i])); + } + DBOptions db_opts = DBOptions(options[0]); + return DB::Open(db_opts, dbname_, column_families, &handles_, &db_); + } + + Status TryReopenWithColumnFamilies(const std::vector& cfs, + const Options& options) { + Close(); + std::vector v_opts(cfs.size(), options); + return TryReopenWithColumnFamilies(cfs, v_opts); + } + + void Reopen(const Options& options) { + ASSERT_OK(TryReopen(options)); + } + + void Close() { + for (auto h : handles_) { + delete h; + } + handles_.clear(); + delete db_; + db_ = nullptr; + } + + void DestroyAndReopen(const Options& options) { + // Destroy using last options + Destroy(last_options_); + ASSERT_OK(TryReopen(options)); + } + + void Destroy(const Options& options) { + Close(); + ASSERT_OK(DestroyDB(dbname_, options)); + } + + Status ReadOnlyReopen(const Options& options) { + return DB::OpenForReadOnly(options, dbname_, &db_); + } + + Status TryReopen(const Options& options) { + Close(); + last_options_ = options; + return DB::Open(options, dbname_, &db_); + } + + Status Flush(int cf = 0) { + if (cf == 0) { + return db_->Flush(FlushOptions()); + } else { + return db_->Flush(FlushOptions(), handles_[cf]); + } + } + + Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()) { + return db_->Put(wo, k, v); + } + + Status Put(int cf, const Slice& k, const Slice& v, + WriteOptions wo = WriteOptions()) { + return db_->Put(wo, handles_[cf], k, v); + } + + Status Delete(const std::string& k) { + return db_->Delete(WriteOptions(), k); + } + + Status Delete(int cf, const std::string& k) { + return db_->Delete(WriteOptions(), handles_[cf], k); + } + + std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { + ReadOptions options; + options.verify_checksums = true; + options.snapshot = snapshot; + std::string result; + Status s = db_->Get(options, k, &result); + if (s.IsNotFound()) { + result = "NOT_FOUND"; + } else if (!s.ok()) { + result = s.ToString(); + } + return result; + } + + std::string Get(int cf, const std::string& k, + const Snapshot* snapshot = nullptr) { + ReadOptions options; + options.verify_checksums = true; + options.snapshot = snapshot; + std::string result; + Status s = db_->Get(options, handles_[cf], k, &result); + if (s.IsNotFound()) { + result = "NOT_FOUND"; + } else if (!s.ok()) { + result = s.ToString(); + } + return result; + } + + int NumTableFilesAtLevel(int level, int cf = 0) { + std::string property; + if (cf == 0) { + // default cfd + EXPECT_TRUE(db_->GetProperty( + "rocksdb.num-files-at-level" + NumberToString(level), &property)); + } else { + EXPECT_TRUE(db_->GetProperty( + handles_[cf], "rocksdb.num-files-at-level" + NumberToString(level), + &property)); + } + return atoi(property.c_str()); + } + + // Return spread of files per level + std::string FilesPerLevel(int cf = 0) { + int num_levels = + (cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]); + std::string result; + size_t last_non_zero_offset = 0; + for (int level = 0; level < num_levels; level++) { + int f = NumTableFilesAtLevel(level, cf); + char buf[100]; + snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f); + result += buf; + if (f > 0) { + last_non_zero_offset = result.size(); + } + } + result.resize(last_non_zero_offset); + return result; + } + + uint64_t Size(const Slice& start, const Slice& limit, int cf = 0) { + Range r(start, limit); + uint64_t size; + if (cf == 0) { + db_->GetApproximateSizes(&r, 1, &size); + } else { + db_->GetApproximateSizes(handles_[1], &r, 1, &size); + } + return size; + } + + void Compact(int cf, const Slice& start, const Slice& limit, + uint32_t target_path_id) { + CompactRangeOptions compact_options; + compact_options.target_path_id = target_path_id; + ASSERT_OK(db_->CompactRange(compact_options, handles_[cf], &start, &limit)); + } + + void Compact(int cf, const Slice& start, const Slice& limit) { + ASSERT_OK( + db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit)); + } + + void Compact(const Slice& start, const Slice& limit) { + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &start, &limit)); + } + + void TEST_Compact(int level, int cf, const Slice& start, const Slice& limit) { + ASSERT_OK(dbfull()->TEST_CompactRange(level, &start, &limit, handles_[cf], + true /* disallow trivial move */)); + } + + // Do n memtable compactions, each of which produces an sstable + // covering the range [small,large]. + void MakeTables(int n, const std::string& small, const std::string& large, + int cf = 0) { + for (int i = 0; i < n; i++) { + ASSERT_OK(Put(cf, small, "begin")); + ASSERT_OK(Put(cf, large, "end")); + ASSERT_OK(Flush(cf)); + } + } + + static void SetDeletionCompactionStats( + CompactionJobStats *stats, uint64_t input_deletions, + uint64_t expired_deletions, uint64_t records_replaced) { + stats->num_input_deletion_records = input_deletions; + stats->num_expired_deletion_records = expired_deletions; + stats->num_records_replaced = records_replaced; + } + + void MakeTableWithKeyValues( + Random* rnd, uint64_t smallest, uint64_t largest, + int key_size, int value_size, uint64_t interval, + double ratio, int cf = 0) { + for (auto key = smallest; key < largest; key += interval) { + ASSERT_OK(Put(cf, Slice(Key(key, key_size)), + Slice(RandomString(rnd, value_size, ratio)))); + } + ASSERT_OK(Flush(cf)); + } + + // This function behaves with the implicit understanding that two + // rounds of keys are inserted into the database, as per the behavior + // of the DeletionStatsTest. + void SelectivelyDeleteKeys(uint64_t smallest, uint64_t largest, + uint64_t interval, int deletion_interval, int key_size, + uint64_t cutoff_key_num, CompactionJobStats* stats, int cf = 0) { + + // interval needs to be >= 2 so that deletion entries can be inserted + // that are intended to not result in an actual key deletion by using + // an offset of 1 from another existing key + ASSERT_GE(interval, 2); + + uint64_t ctr = 1; + uint32_t deletions_made = 0; + uint32_t num_deleted = 0; + uint32_t num_expired = 0; + for (auto key = smallest; key <= largest; key += interval, ctr++) { + if (ctr % deletion_interval == 0) { + ASSERT_OK(Delete(cf, Key(key, key_size))); + deletions_made++; + num_deleted++; + + if (key > cutoff_key_num) { + num_expired++; + } + } + } + + // Insert some deletions for keys that don't exist that + // are both in and out of the key range + ASSERT_OK(Delete(cf, Key(smallest+1, key_size))); + deletions_made++; + + ASSERT_OK(Delete(cf, Key(smallest-1, key_size))); + deletions_made++; + num_expired++; + + ASSERT_OK(Delete(cf, Key(smallest-9, key_size))); + deletions_made++; + num_expired++; + + ASSERT_OK(Flush(cf)); + SetDeletionCompactionStats(stats, deletions_made, num_expired, + num_deleted); + } +}; + +// An EventListener which helps verify the compaction results in +// test CompactionJobStatsTest. +class CompactionJobStatsChecker : public EventListener { + public: + CompactionJobStatsChecker() + : compression_enabled_(false), verify_next_comp_io_stats_(false) {} + + size_t NumberOfUnverifiedStats() { return expected_stats_.size(); } + + void set_verify_next_comp_io_stats(bool v) { verify_next_comp_io_stats_ = v; } + + // Once a compaction completed, this function will verify the returned + // CompactionJobInfo with the oldest CompactionJobInfo added earlier + // in "expected_stats_" which has not yet being used for verification. + virtual void OnCompactionCompleted(DB *db, const CompactionJobInfo& ci) { + if (verify_next_comp_io_stats_) { + ASSERT_GT(ci.stats.file_write_nanos, 0); + ASSERT_GT(ci.stats.file_range_sync_nanos, 0); + ASSERT_GT(ci.stats.file_fsync_nanos, 0); + ASSERT_GT(ci.stats.file_prepare_write_nanos, 0); + verify_next_comp_io_stats_ = false; + } + + std::lock_guard lock(mutex_); + if (expected_stats_.size()) { + Verify(ci.stats, expected_stats_.front()); + expected_stats_.pop(); + } + } + + // A helper function which verifies whether two CompactionJobStats + // match. The verification of all compaction stats are done by + // ASSERT_EQ except for the total input / output bytes, which we + // use ASSERT_GE and ASSERT_LE with a reasonable bias --- + // 10% in uncompressed case and 20% when compression is used. + virtual void Verify(const CompactionJobStats& current_stats, + const CompactionJobStats& stats) { + // time + ASSERT_GT(current_stats.elapsed_micros, 0U); + + ASSERT_EQ(current_stats.num_input_records, + stats.num_input_records); + ASSERT_EQ(current_stats.num_input_files, + stats.num_input_files); + ASSERT_EQ(current_stats.num_input_files_at_output_level, + stats.num_input_files_at_output_level); + + ASSERT_EQ(current_stats.num_output_records, + stats.num_output_records); + ASSERT_EQ(current_stats.num_output_files, + stats.num_output_files); + + ASSERT_EQ(current_stats.is_manual_compaction, + stats.is_manual_compaction); + + // file size + double kFileSizeBias = compression_enabled_ ? 0.20 : 0.10; + ASSERT_GE(current_stats.total_input_bytes * (1.00 + kFileSizeBias), + stats.total_input_bytes); + ASSERT_LE(current_stats.total_input_bytes, + stats.total_input_bytes * (1.00 + kFileSizeBias)); + ASSERT_GE(current_stats.total_output_bytes * (1.00 + kFileSizeBias), + stats.total_output_bytes); + ASSERT_LE(current_stats.total_output_bytes, + stats.total_output_bytes * (1.00 + kFileSizeBias)); + ASSERT_EQ(current_stats.total_input_raw_key_bytes, + stats.total_input_raw_key_bytes); + ASSERT_EQ(current_stats.total_input_raw_value_bytes, + stats.total_input_raw_value_bytes); + + ASSERT_EQ(current_stats.num_records_replaced, + stats.num_records_replaced); + + ASSERT_EQ(current_stats.num_corrupt_keys, + stats.num_corrupt_keys); + + ASSERT_EQ( + std::string(current_stats.smallest_output_key_prefix), + std::string(stats.smallest_output_key_prefix)); + ASSERT_EQ( + std::string(current_stats.largest_output_key_prefix), + std::string(stats.largest_output_key_prefix)); + } + + // Add an expected compaction stats, which will be used to + // verify the CompactionJobStats returned by the OnCompactionCompleted() + // callback. + void AddExpectedStats(const CompactionJobStats& stats) { + std::lock_guard lock(mutex_); + expected_stats_.push(stats); + } + + void EnableCompression(bool flag) { + compression_enabled_ = flag; + } + + bool verify_next_comp_io_stats() const { return verify_next_comp_io_stats_; } + + private: + std::mutex mutex_; + std::queue expected_stats_; + bool compression_enabled_; + bool verify_next_comp_io_stats_; +}; + +// An EventListener which helps verify the compaction statistics in +// the test DeletionStatsTest. +class CompactionJobDeletionStatsChecker : public CompactionJobStatsChecker { + public: + // Verifies whether two CompactionJobStats match. + void Verify(const CompactionJobStats& current_stats, + const CompactionJobStats& stats) { + ASSERT_EQ( + current_stats.num_input_deletion_records, + stats.num_input_deletion_records); + ASSERT_EQ( + current_stats.num_expired_deletion_records, + stats.num_expired_deletion_records); + ASSERT_EQ( + current_stats.num_records_replaced, + stats.num_records_replaced); + + ASSERT_EQ(current_stats.num_corrupt_keys, + stats.num_corrupt_keys); + } +}; + +namespace { + +uint64_t EstimatedFileSize( + uint64_t num_records, size_t key_size, size_t value_size, + double compression_ratio = 1.0, + size_t block_size = 4096, + int bloom_bits_per_key = 10) { + const size_t kPerKeyOverhead = 8; + const size_t kFooterSize = 512; + + uint64_t data_size = + static_cast( + num_records * (key_size + value_size * compression_ratio + + kPerKeyOverhead)); + + return data_size + kFooterSize + + num_records * bloom_bits_per_key / 8 // filter block + + data_size * (key_size + 8) / block_size; // index block +} + +namespace { + +void CopyPrefix( + const Slice& src, size_t prefix_length, std::string* dst) { + assert(prefix_length > 0); + size_t length = src.size() > prefix_length ? prefix_length : src.size(); + dst->assign(src.data(), length); +} + +} // namespace + +CompactionJobStats NewManualCompactionJobStats( + const std::string& smallest_key, const std::string& largest_key, + size_t num_input_files, size_t num_input_files_at_output_level, + uint64_t num_input_records, size_t key_size, size_t value_size, + size_t num_output_files, uint64_t num_output_records, + double compression_ratio, uint64_t num_records_replaced, + bool is_manual = true) { + CompactionJobStats stats; + stats.Reset(); + + stats.num_input_records = num_input_records; + stats.num_input_files = num_input_files; + stats.num_input_files_at_output_level = num_input_files_at_output_level; + + stats.num_output_records = num_output_records; + stats.num_output_files = num_output_files; + + stats.total_input_bytes = + EstimatedFileSize( + num_input_records / num_input_files, + key_size, value_size, compression_ratio) * num_input_files; + stats.total_output_bytes = + EstimatedFileSize( + num_output_records / num_output_files, + key_size, value_size, compression_ratio) * num_output_files; + stats.total_input_raw_key_bytes = + num_input_records * (key_size + 8); + stats.total_input_raw_value_bytes = + num_input_records * value_size; + + stats.is_manual_compaction = is_manual; + + stats.num_records_replaced = num_records_replaced; + + CopyPrefix(smallest_key, + CompactionJobStats::kMaxPrefixLength, + &stats.smallest_output_key_prefix); + CopyPrefix(largest_key, + CompactionJobStats::kMaxPrefixLength, + &stats.largest_output_key_prefix); + + return stats; +} + +CompressionType GetAnyCompression() { + if (Snappy_Supported()) { + return kSnappyCompression; + } else if (Zlib_Supported()) { + return kZlibCompression; + } else if (BZip2_Supported()) { + return kBZip2Compression; + } else if (LZ4_Supported()) { + return kLZ4Compression; + } else if (XPRESS_Supported()) { + return kXpressCompression; + } + + return kNoCompression; +} + +} // namespace + +TEST_P(CompactionJobStatsTest, CompactionJobStatsTest) { + Random rnd(301); + const int kBufSize = 100; + char buf[kBufSize]; + uint64_t key_base = 100000000l; + // Note: key_base must be multiple of num_keys_per_L0_file + int num_keys_per_L0_file = 100; + const int kTestScale = 8; + const int kKeySize = 10; + const int kValueSize = 1000; + const double kCompressionRatio = 0.5; + double compression_ratio = 1.0; + uint64_t key_interval = key_base / num_keys_per_L0_file; + + // Whenever a compaction completes, this listener will try to + // verify whether the returned CompactionJobStats matches + // what we expect. The expected CompactionJobStats is added + // via AddExpectedStats(). + auto* stats_checker = new CompactionJobStatsChecker(); + Options options; + options.listeners.emplace_back(stats_checker); + options.create_if_missing = true; + options.max_background_flushes = 0; + // just enough setting to hold off auto-compaction. + options.level0_file_num_compaction_trigger = kTestScale + 1; + options.num_levels = 3; + options.compression = kNoCompression; + options.max_subcompactions = max_subcompactions_; + options.bytes_per_sync = 512 * 1024; + + options.report_bg_io_stats = true; + for (int test = 0; test < 2; ++test) { + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // 1st Phase: generate "num_L0_files" L0 files. + int num_L0_files = 0; + for (uint64_t start_key = key_base; + start_key <= key_base * kTestScale; + start_key += key_base) { + MakeTableWithKeyValues( + &rnd, start_key, start_key + key_base - 1, + kKeySize, kValueSize, key_interval, + compression_ratio, 1); + snprintf(buf, kBufSize, "%d", ++num_L0_files); + ASSERT_EQ(std::string(buf), FilesPerLevel(1)); + } + ASSERT_EQ(ToString(num_L0_files), FilesPerLevel(1)); + + // 2nd Phase: perform L0 -> L1 compaction. + int L0_compaction_count = 6; + int count = 1; + std::string smallest_key; + std::string largest_key; + for (uint64_t start_key = key_base; + start_key <= key_base * L0_compaction_count; + start_key += key_base, count++) { + smallest_key = Key(start_key, 10); + largest_key = Key(start_key + key_base - key_interval, 10); + stats_checker->AddExpectedStats( + NewManualCompactionJobStats( + smallest_key, largest_key, + 1, 0, num_keys_per_L0_file, + kKeySize, kValueSize, + 1, num_keys_per_L0_file, + compression_ratio, 0)); + ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U); + TEST_Compact(0, 1, smallest_key, largest_key); + snprintf(buf, kBufSize, "%d,%d", num_L0_files - count, count); + ASSERT_EQ(std::string(buf), FilesPerLevel(1)); + } + + // compact two files into one in the last L0 -> L1 compaction + int num_remaining_L0 = num_L0_files - L0_compaction_count; + smallest_key = Key(key_base * (L0_compaction_count + 1), 10); + largest_key = Key(key_base * (kTestScale + 1) - key_interval, 10); + stats_checker->AddExpectedStats( + NewManualCompactionJobStats( + smallest_key, largest_key, + num_remaining_L0, + 0, num_keys_per_L0_file * num_remaining_L0, + kKeySize, kValueSize, + 1, num_keys_per_L0_file * num_remaining_L0, + compression_ratio, 0)); + ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U); + TEST_Compact(0, 1, smallest_key, largest_key); + + int num_L1_files = num_L0_files - num_remaining_L0 + 1; + num_L0_files = 0; + snprintf(buf, kBufSize, "%d,%d", num_L0_files, num_L1_files); + ASSERT_EQ(std::string(buf), FilesPerLevel(1)); + + // 3rd Phase: generate sparse L0 files (wider key-range, same num of keys) + int sparseness = 2; + for (uint64_t start_key = key_base; + start_key <= key_base * kTestScale; + start_key += key_base * sparseness) { + MakeTableWithKeyValues( + &rnd, start_key, start_key + key_base * sparseness - 1, + kKeySize, kValueSize, + key_base * sparseness / num_keys_per_L0_file, + compression_ratio, 1); + snprintf(buf, kBufSize, "%d,%d", ++num_L0_files, num_L1_files); + ASSERT_EQ(std::string(buf), FilesPerLevel(1)); + } + + // 4th Phase: perform L0 -> L1 compaction again, expect higher write amp + // When subcompactions are enabled, the number of output files increases + // by 1 because multiple threads are consuming the input and generating + // output files without coordinating to see if the output could fit into + // a smaller number of files like it does when it runs sequentially + int num_output_files = options.max_subcompactions > 1 ? 2 : 1; + for (uint64_t start_key = key_base; + num_L0_files > 1; + start_key += key_base * sparseness) { + smallest_key = Key(start_key, 10); + largest_key = + Key(start_key + key_base * sparseness - key_interval, 10); + stats_checker->AddExpectedStats( + NewManualCompactionJobStats( + smallest_key, largest_key, + 3, 2, num_keys_per_L0_file * 3, + kKeySize, kValueSize, + num_output_files, + num_keys_per_L0_file * 2, // 1/3 of the data will be updated. + compression_ratio, + num_keys_per_L0_file)); + ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U); + Compact(1, smallest_key, largest_key); + if (options.max_subcompactions == 1) { + --num_L1_files; + } + snprintf(buf, kBufSize, "%d,%d", --num_L0_files, num_L1_files); + ASSERT_EQ(std::string(buf), FilesPerLevel(1)); + } + + // 5th Phase: Do a full compaction, which involves in two sub-compactions. + // Here we expect to have 1 L0 files and 4 L1 files + // In the first sub-compaction, we expect L0 compaction. + smallest_key = Key(key_base, 10); + largest_key = Key(key_base * (kTestScale + 1) - key_interval, 10); + stats_checker->AddExpectedStats( + NewManualCompactionJobStats( + Key(key_base * (kTestScale + 1 - sparseness), 10), largest_key, + 2, 1, num_keys_per_L0_file * 3, + kKeySize, kValueSize, + 1, num_keys_per_L0_file * 2, + compression_ratio, + num_keys_per_L0_file)); + ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 1U); + Compact(1, smallest_key, largest_key); + + num_L1_files = options.max_subcompactions > 1 ? 7 : 4; + char L1_buf[4]; + snprintf(L1_buf, sizeof(L1_buf), "0,%d", num_L1_files); + std::string L1_files(L1_buf); + ASSERT_EQ(L1_files, FilesPerLevel(1)); + options.compression = GetAnyCompression(); + if (options.compression == kNoCompression) { + break; + } + stats_checker->EnableCompression(true); + compression_ratio = kCompressionRatio; + + for (int i = 0; i < 5; i++) { + ASSERT_OK(Put(1, Slice(Key(key_base + i, 10)), + Slice(RandomString(&rnd, 512 * 1024, 1)))); + } + + ASSERT_OK(Flush(1)); + reinterpret_cast(db_)->TEST_WaitForCompact(); + + stats_checker->set_verify_next_comp_io_stats(true); + std::atomic first_prepare_write(true); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "WritableFileWriter::Append:BeforePrepareWrite", [&](void* arg) { + if (first_prepare_write.load()) { + options.env->SleepForMicroseconds(3); + first_prepare_write.store(false); + } + }); + + std::atomic first_flush(true); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "WritableFileWriter::Flush:BeforeAppend", [&](void* arg) { + if (first_flush.load()) { + options.env->SleepForMicroseconds(3); + first_flush.store(false); + } + }); + + std::atomic first_sync(true); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "WritableFileWriter::SyncInternal:0", [&](void* arg) { + if (first_sync.load()) { + options.env->SleepForMicroseconds(3); + first_sync.store(false); + } + }); + + std::atomic first_range_sync(true); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "WritableFileWriter::RangeSync:0", [&](void* arg) { + if (first_range_sync.load()) { + options.env->SleepForMicroseconds(3); + first_range_sync.store(false); + } + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Compact(1, smallest_key, largest_key); + + ASSERT_TRUE(!stats_checker->verify_next_comp_io_stats()); + ASSERT_TRUE(!first_prepare_write.load()); + ASSERT_TRUE(!first_flush.load()); + ASSERT_TRUE(!first_sync.load()); + ASSERT_TRUE(!first_range_sync.load()); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + } + ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 0U); +} + +TEST_P(CompactionJobStatsTest, DeletionStatsTest) { + Random rnd(301); + uint64_t key_base = 100000l; + // Note: key_base must be multiple of num_keys_per_L0_file + int num_keys_per_L0_file = 20; + const int kTestScale = 8; // make sure this is even + const int kKeySize = 10; + const int kValueSize = 100; + double compression_ratio = 1.0; + uint64_t key_interval = key_base / num_keys_per_L0_file; + uint64_t largest_key_num = key_base * (kTestScale + 1) - key_interval; + uint64_t cutoff_key_num = key_base * (kTestScale / 2 + 1) - key_interval; + const std::string smallest_key = Key(key_base - 10, kKeySize); + const std::string largest_key = Key(largest_key_num + 10, kKeySize); + + // Whenever a compaction completes, this listener will try to + // verify whether the returned CompactionJobStats matches + // what we expect. + auto* stats_checker = new CompactionJobDeletionStatsChecker(); + Options options; + options.listeners.emplace_back(stats_checker); + options.create_if_missing = true; + options.max_background_flushes = 0; + options.level0_file_num_compaction_trigger = kTestScale+1; + options.num_levels = 3; + options.compression = kNoCompression; + options.max_bytes_for_level_multiplier = 2; + options.max_subcompactions = max_subcompactions_; + + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // Stage 1: Generate several L0 files and then send them to L2 by + // using CompactRangeOptions and CompactRange(). These files will + // have a strict subset of the keys from the full key-range + for (uint64_t start_key = key_base; + start_key <= key_base * kTestScale / 2; + start_key += key_base) { + MakeTableWithKeyValues( + &rnd, start_key, start_key + key_base - 1, + kKeySize, kValueSize, key_interval, + compression_ratio, 1); + } + + CompactRangeOptions cr_options; + cr_options.change_level = true; + cr_options.target_level = 2; + db_->CompactRange(cr_options, handles_[1], nullptr, nullptr); + ASSERT_GT(NumTableFilesAtLevel(2, 1), 0); + + // Stage 2: Generate files including keys from the entire key range + for (uint64_t start_key = key_base; + start_key <= key_base * kTestScale; + start_key += key_base) { + MakeTableWithKeyValues( + &rnd, start_key, start_key + key_base - 1, + kKeySize, kValueSize, key_interval, + compression_ratio, 1); + } + + // Send these L0 files to L1 + TEST_Compact(0, 1, smallest_key, largest_key); + ASSERT_GT(NumTableFilesAtLevel(1, 1), 0); + + // Add a new record and flush so now there is a L0 file + // with a value too (not just deletions from the next step) + ASSERT_OK(Put(1, Key(key_base-6, kKeySize), "test")); + ASSERT_OK(Flush(1)); + + // Stage 3: Generate L0 files with some deletions so now + // there are files with the same key range in L0, L1, and L2 + int deletion_interval = 3; + CompactionJobStats first_compaction_stats; + SelectivelyDeleteKeys(key_base, largest_key_num, + key_interval, deletion_interval, kKeySize, cutoff_key_num, + &first_compaction_stats, 1); + + stats_checker->AddExpectedStats(first_compaction_stats); + + // Stage 4: Trigger compaction and verify the stats + TEST_Compact(0, 1, smallest_key, largest_key); +} + +namespace { +int GetUniversalCompactionInputUnits(uint32_t num_flushes) { + uint32_t compaction_input_units; + for (compaction_input_units = 1; + num_flushes >= compaction_input_units; + compaction_input_units *= 2) { + if ((num_flushes & compaction_input_units) != 0) { + return compaction_input_units > 1 ? compaction_input_units : 0; + } + } + return 0; +} +} // namespace + +TEST_P(CompactionJobStatsTest, UniversalCompactionTest) { + Random rnd(301); + uint64_t key_base = 100000000l; + // Note: key_base must be multiple of num_keys_per_L0_file + int num_keys_per_table = 100; + const uint32_t kTestScale = 8; + const int kKeySize = 10; + const int kValueSize = 900; + double compression_ratio = 1.0; + uint64_t key_interval = key_base / num_keys_per_table; + + auto* stats_checker = new CompactionJobStatsChecker(); + Options options; + options.listeners.emplace_back(stats_checker); + options.create_if_missing = true; + options.num_levels = 3; + options.compression = kNoCompression; + options.level0_file_num_compaction_trigger = 2; + options.target_file_size_base = num_keys_per_table * 1000; + options.compaction_style = kCompactionStyleUniversal; + options.compaction_options_universal.size_ratio = 1; + options.compaction_options_universal.max_size_amplification_percent = 1000; + options.max_subcompactions = max_subcompactions_; + + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // Generates the expected CompactionJobStats for each compaction + for (uint32_t num_flushes = 2; num_flushes <= kTestScale; num_flushes++) { + // Here we treat one newly flushed file as an unit. + // + // For example, if a newly flushed file is 100k, and a compaction has + // 4 input units, then this compaction inputs 400k. + uint32_t num_input_units = GetUniversalCompactionInputUnits(num_flushes); + if (num_input_units == 0) { + continue; + } + // The following statement determines the expected smallest key + // based on whether it is a full compaction. A full compaction only + // happens when the number of flushes equals to the number of compaction + // input runs. + uint64_t smallest_key = + (num_flushes == num_input_units) ? + key_base : key_base * (num_flushes - 1); + + stats_checker->AddExpectedStats( + NewManualCompactionJobStats( + Key(smallest_key, 10), + Key(smallest_key + key_base * num_input_units - key_interval, 10), + num_input_units, + num_input_units > 2 ? num_input_units / 2 : 0, + num_keys_per_table * num_input_units, + kKeySize, kValueSize, + num_input_units, + num_keys_per_table * num_input_units, + 1.0, 0, false)); + } + ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 4U); + + for (uint64_t start_key = key_base; + start_key <= key_base * kTestScale; + start_key += key_base) { + MakeTableWithKeyValues( + &rnd, start_key, start_key + key_base - 1, + kKeySize, kValueSize, key_interval, + compression_ratio, 1); + reinterpret_cast(db_)->TEST_WaitForCompact(); + } + ASSERT_EQ(stats_checker->NumberOfUnverifiedStats(), 0U); +} + +INSTANTIATE_TEST_CASE_P(CompactionJobStatsTest, CompactionJobStatsTest, + ::testing::Values(1, 4)); +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, "SKIPPED, not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // !ROCKSDB_LITE + +#else + +int main(int argc, char** argv) { return 0; } +#endif // !defined(IOS_CROSS_COMPILE) diff --git a/external/rocksdb/db/compaction_job_test.cc b/external/rocksdb/db/compaction_job_test.cc new file mode 100755 index 0000000000..e218bd32a0 --- /dev/null +++ b/external/rocksdb/db/compaction_job_test.cc @@ -0,0 +1,929 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#include +#include +#include +#include + +#include "db/column_family.h" +#include "db/compaction_job.h" +#include "db/version_set.h" +#include "rocksdb/cache.h" +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/write_buffer_manager.h" +#include "table/mock_table.h" +#include "util/file_reader_writer.h" +#include "util/string_util.h" +#include "util/testharness.h" +#include "util/testutil.h" +#include "utilities/merge_operators.h" + +namespace rocksdb { + +namespace { + +void VerifyInitializationOfCompactionJobStats( + const CompactionJobStats& compaction_job_stats) { +#if !defined(IOS_CROSS_COMPILE) + ASSERT_EQ(compaction_job_stats.elapsed_micros, 0U); + + ASSERT_EQ(compaction_job_stats.num_input_records, 0U); + ASSERT_EQ(compaction_job_stats.num_input_files, 0U); + ASSERT_EQ(compaction_job_stats.num_input_files_at_output_level, 0U); + + ASSERT_EQ(compaction_job_stats.num_output_records, 0U); + ASSERT_EQ(compaction_job_stats.num_output_files, 0U); + + ASSERT_EQ(compaction_job_stats.is_manual_compaction, true); + + ASSERT_EQ(compaction_job_stats.total_input_bytes, 0U); + ASSERT_EQ(compaction_job_stats.total_output_bytes, 0U); + + ASSERT_EQ(compaction_job_stats.total_input_raw_key_bytes, 0U); + ASSERT_EQ(compaction_job_stats.total_input_raw_value_bytes, 0U); + + ASSERT_EQ(compaction_job_stats.smallest_output_key_prefix[0], 0); + ASSERT_EQ(compaction_job_stats.largest_output_key_prefix[0], 0); + + ASSERT_EQ(compaction_job_stats.num_records_replaced, 0U); + + ASSERT_EQ(compaction_job_stats.num_input_deletion_records, 0U); + ASSERT_EQ(compaction_job_stats.num_expired_deletion_records, 0U); + + ASSERT_EQ(compaction_job_stats.num_corrupt_keys, 0U); +#endif // !defined(IOS_CROSS_COMPILE) +} + +} // namespace + +// TODO(icanadi) Make it simpler once we mock out VersionSet +class CompactionJobTest : public testing::Test { + public: + CompactionJobTest() + : env_(Env::Default()), + dbname_(test::TmpDir() + "/compaction_job_test"), + mutable_cf_options_(Options(), ImmutableCFOptions(Options())), + table_cache_(NewLRUCache(50000, 16)), + write_buffer_manager_(db_options_.db_write_buffer_size), + versions_(new VersionSet(dbname_, &db_options_, env_options_, + table_cache_.get(), &write_buffer_manager_, + &write_controller_)), + shutting_down_(false), + mock_table_factory_(new mock::MockTableFactory()) { + EXPECT_OK(env_->CreateDirIfMissing(dbname_)); + db_options_.db_paths.emplace_back(dbname_, + std::numeric_limits::max()); + } + + std::string GenerateFileName(uint64_t file_number) { + FileMetaData meta; + std::vector db_paths; + db_paths.emplace_back(dbname_, std::numeric_limits::max()); + meta.fd = FileDescriptor(file_number, 0, 0); + return TableFileName(db_paths, meta.fd.GetNumber(), meta.fd.GetPathId()); + } + + std::string KeyStr(const std::string& user_key, const SequenceNumber seq_num, + const ValueType t) { + return InternalKey(user_key, seq_num, t).Encode().ToString(); + } + + void AddMockFile(const stl_wrappers::KVMap& contents, int level = 0) { + assert(contents.size() > 0); + + bool first_key = true; + std::string smallest, largest; + InternalKey smallest_key, largest_key; + SequenceNumber smallest_seqno = kMaxSequenceNumber; + SequenceNumber largest_seqno = 0; + for (auto kv : contents) { + ParsedInternalKey key; + std::string skey; + std::string value; + std::tie(skey, value) = kv; + ParseInternalKey(skey, &key); + + smallest_seqno = std::min(smallest_seqno, key.sequence); + largest_seqno = std::max(largest_seqno, key.sequence); + + if (first_key || + cfd_->user_comparator()->Compare(key.user_key, smallest) < 0) { + smallest.assign(key.user_key.data(), key.user_key.size()); + smallest_key.DecodeFrom(skey); + } + if (first_key || + cfd_->user_comparator()->Compare(key.user_key, largest) > 0) { + largest.assign(key.user_key.data(), key.user_key.size()); + largest_key.DecodeFrom(skey); + } + + first_key = false; + } + + uint64_t file_number = versions_->NewFileNumber(); + EXPECT_OK(mock_table_factory_->CreateMockTable( + env_, GenerateFileName(file_number), std::move(contents))); + + VersionEdit edit; + edit.AddFile(level, file_number, 0, 10, smallest_key, largest_key, + smallest_seqno, largest_seqno, false); + + mutex_.Lock(); + versions_->LogAndApply(versions_->GetColumnFamilySet()->GetDefault(), + mutable_cf_options_, &edit, &mutex_); + mutex_.Unlock(); + } + + void SetLastSequence(const SequenceNumber sequence_number) { + versions_->SetLastSequence(sequence_number + 1); + } + + // returns expected result after compaction + stl_wrappers::KVMap CreateTwoFiles(bool gen_corrupted_keys) { + auto expected_results = mock::MakeMockFile(); + const int kKeysPerFile = 10000; + const int kCorruptKeysPerFile = 200; + const int kMatchingKeys = kKeysPerFile / 2; + SequenceNumber sequence_number = 0; + + auto corrupt_id = [&](int id) { + return gen_corrupted_keys && id > 0 && id <= kCorruptKeysPerFile; + }; + + for (int i = 0; i < 2; ++i) { + auto contents = mock::MakeMockFile(); + for (int k = 0; k < kKeysPerFile; ++k) { + auto key = ToString(i * kMatchingKeys + k); + auto value = ToString(i * kKeysPerFile + k); + InternalKey internal_key(key, ++sequence_number, kTypeValue); + + // This is how the key will look like once it's written in bottommost + // file + InternalKey bottommost_internal_key( + key, (key == "9999") ? sequence_number : 0, kTypeValue); + + if (corrupt_id(k)) { + test::CorruptKeyType(&internal_key); + test::CorruptKeyType(&bottommost_internal_key); + } + contents.insert({ internal_key.Encode().ToString(), value }); + if (i == 1 || k < kMatchingKeys || corrupt_id(k - kMatchingKeys)) { + expected_results.insert( + { bottommost_internal_key.Encode().ToString(), value }); + } + } + + AddMockFile(contents); + } + + SetLastSequence(sequence_number); + + return expected_results; + } + + void NewDB() { + VersionEdit new_db; + new_db.SetLogNumber(0); + new_db.SetNextFile(2); + new_db.SetLastSequence(0); + + const std::string manifest = DescriptorFileName(dbname_, 1); + unique_ptr file; + Status s = env_->NewWritableFile( + manifest, &file, env_->OptimizeForManifestWrite(env_options_)); + ASSERT_OK(s); + unique_ptr file_writer( + new WritableFileWriter(std::move(file), env_options_)); + { + log::Writer log(std::move(file_writer), 0, false); + std::string record; + new_db.EncodeTo(&record); + s = log.AddRecord(record); + } + ASSERT_OK(s); + // Make "CURRENT" file that points to the new manifest file. + s = SetCurrentFile(env_, dbname_, 1, nullptr); + + std::vector column_families; + cf_options_.table_factory = mock_table_factory_; + cf_options_.merge_operator = merge_op_; + cf_options_.compaction_filter = compaction_filter_.get(); + column_families.emplace_back(kDefaultColumnFamilyName, cf_options_); + + EXPECT_OK(versions_->Recover(column_families, false)); + cfd_ = versions_->GetColumnFamilySet()->GetDefault(); + } + + void RunCompaction( + const std::vector>& input_files, + const stl_wrappers::KVMap& expected_results, + const std::vector& snapshots = {}, + SequenceNumber earliest_write_conflict_snapshot = kMaxSequenceNumber) { + auto cfd = versions_->GetColumnFamilySet()->GetDefault(); + + size_t num_input_files = 0; + std::vector compaction_input_files; + for (size_t level = 0; level < input_files.size(); level++) { + auto level_files = input_files[level]; + CompactionInputFiles compaction_level; + compaction_level.level = static_cast(level); + compaction_level.files.insert(compaction_level.files.end(), + level_files.begin(), level_files.end()); + compaction_input_files.push_back(compaction_level); + num_input_files += level_files.size(); + } + + Compaction compaction(cfd->current()->storage_info(), + *cfd->GetLatestMutableCFOptions(), + compaction_input_files, 1, 1024 * 1024, 10, 0, + kNoCompression, {}, true); + compaction.SetInputVersion(cfd->current()); + + LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, db_options_.info_log.get()); + mutex_.Lock(); + EventLogger event_logger(db_options_.info_log.get()); + CompactionJob compaction_job( + 0, &compaction, db_options_, env_options_, versions_.get(), + &shutting_down_, &log_buffer, nullptr, nullptr, nullptr, &mutex_, + &bg_error_, snapshots, earliest_write_conflict_snapshot, table_cache_, + &event_logger, false, false, dbname_, &compaction_job_stats_); + + VerifyInitializationOfCompactionJobStats(compaction_job_stats_); + + compaction_job.Prepare(); + mutex_.Unlock(); + Status s; + s = compaction_job.Run(); + ASSERT_OK(s); + mutex_.Lock(); + ASSERT_OK(compaction_job.Install(*cfd->GetLatestMutableCFOptions())); + mutex_.Unlock(); + + if (expected_results.size() == 0) { + ASSERT_GE(compaction_job_stats_.elapsed_micros, 0U); + ASSERT_EQ(compaction_job_stats_.num_input_files, num_input_files); + ASSERT_EQ(compaction_job_stats_.num_output_files, 0U); + } else { + ASSERT_GE(compaction_job_stats_.elapsed_micros, 0U); + ASSERT_EQ(compaction_job_stats_.num_input_files, num_input_files); + ASSERT_EQ(compaction_job_stats_.num_output_files, 1U); + mock_table_factory_->AssertLatestFile(expected_results); + } + } + + Env* env_; + std::string dbname_; + EnvOptions env_options_; + MutableCFOptions mutable_cf_options_; + std::shared_ptr table_cache_; + WriteController write_controller_; + DBOptions db_options_; + ColumnFamilyOptions cf_options_; + WriteBufferManager write_buffer_manager_; + std::unique_ptr versions_; + InstrumentedMutex mutex_; + std::atomic shutting_down_; + std::shared_ptr mock_table_factory_; + CompactionJobStats compaction_job_stats_; + ColumnFamilyData* cfd_; + std::unique_ptr compaction_filter_; + std::shared_ptr merge_op_; + Status bg_error_; +}; + +TEST_F(CompactionJobTest, Simple) { + NewDB(); + + auto expected_results = CreateTwoFiles(false); + auto cfd = versions_->GetColumnFamilySet()->GetDefault(); + auto files = cfd->current()->storage_info()->LevelFiles(0); + ASSERT_EQ(2U, files.size()); + RunCompaction({ files }, expected_results); +} + +TEST_F(CompactionJobTest, SimpleCorrupted) { + NewDB(); + + auto expected_results = CreateTwoFiles(true); + auto cfd = versions_->GetColumnFamilySet()->GetDefault(); + auto files = cfd->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results); + ASSERT_EQ(compaction_job_stats_.num_corrupt_keys, 400U); +} + +TEST_F(CompactionJobTest, SimpleDeletion) { + NewDB(); + + auto file1 = mock::MakeMockFile({{KeyStr("c", 4U, kTypeDeletion), ""}, + {KeyStr("c", 3U, kTypeValue), "val"}}); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile({{KeyStr("b", 2U, kTypeValue), "val"}, + {KeyStr("b", 1U, kTypeValue), "val"}}); + AddMockFile(file2); + + auto expected_results = + mock::MakeMockFile({{KeyStr("b", 0U, kTypeValue), "val"}}); + + SetLastSequence(4U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results); +} + +TEST_F(CompactionJobTest, SimpleOverwrite) { + NewDB(); + + auto file1 = mock::MakeMockFile({ + {KeyStr("a", 3U, kTypeValue), "val2"}, + {KeyStr("b", 4U, kTypeValue), "val3"}, + }); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"}, + {KeyStr("b", 2U, kTypeValue), "val"}}); + AddMockFile(file2); + + auto expected_results = + mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "val2"}, + {KeyStr("b", 4U, kTypeValue), "val3"}}); + + SetLastSequence(4U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results); +} + +TEST_F(CompactionJobTest, SimpleNonLastLevel) { + NewDB(); + + auto file1 = mock::MakeMockFile({ + {KeyStr("a", 5U, kTypeValue), "val2"}, + {KeyStr("b", 6U, kTypeValue), "val3"}, + }); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile({{KeyStr("a", 3U, kTypeValue), "val"}, + {KeyStr("b", 4U, kTypeValue), "val"}}); + AddMockFile(file2, 1); + + auto file3 = mock::MakeMockFile({{KeyStr("a", 1U, kTypeValue), "val"}, + {KeyStr("b", 2U, kTypeValue), "val"}}); + AddMockFile(file3, 2); + + // Because level 1 is not the last level, the sequence numbers of a and b + // cannot be set to 0 + auto expected_results = + mock::MakeMockFile({{KeyStr("a", 5U, kTypeValue), "val2"}, + {KeyStr("b", 6U, kTypeValue), "val3"}}); + + SetLastSequence(6U); + auto lvl0_files = cfd_->current()->storage_info()->LevelFiles(0); + auto lvl1_files = cfd_->current()->storage_info()->LevelFiles(1); + RunCompaction({lvl0_files, lvl1_files}, expected_results); +} + +TEST_F(CompactionJobTest, SimpleMerge) { + merge_op_ = MergeOperators::CreateStringAppendOperator(); + NewDB(); + + auto file1 = mock::MakeMockFile({ + {KeyStr("a", 5U, kTypeMerge), "5"}, + {KeyStr("a", 4U, kTypeMerge), "4"}, + {KeyStr("a", 3U, kTypeValue), "3"}, + }); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile( + {{KeyStr("b", 2U, kTypeMerge), "2"}, {KeyStr("b", 1U, kTypeValue), "1"}}); + AddMockFile(file2); + + auto expected_results = + mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "3,4,5"}, + {KeyStr("b", 2U, kTypeValue), "1,2"}}); + + SetLastSequence(5U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results); +} + +TEST_F(CompactionJobTest, NonAssocMerge) { + merge_op_ = MergeOperators::CreateStringAppendTESTOperator(); + NewDB(); + + auto file1 = mock::MakeMockFile({ + {KeyStr("a", 5U, kTypeMerge), "5"}, + {KeyStr("a", 4U, kTypeMerge), "4"}, + {KeyStr("a", 3U, kTypeMerge), "3"}, + }); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile( + {{KeyStr("b", 2U, kTypeMerge), "2"}, {KeyStr("b", 1U, kTypeMerge), "1"}}); + AddMockFile(file2); + + auto expected_results = + mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), "3,4,5"}, + {KeyStr("b", 2U, kTypeMerge), "2"}, + {KeyStr("b", 1U, kTypeMerge), "1"}}); + + SetLastSequence(5U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results); +} + +// Filters merge operands with value 10. +TEST_F(CompactionJobTest, MergeOperandFilter) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + compaction_filter_.reset(new test::FilterNumber(10U)); + NewDB(); + + auto file1 = mock::MakeMockFile( + {{KeyStr("a", 5U, kTypeMerge), test::EncodeInt(5U)}, + {KeyStr("a", 4U, kTypeMerge), test::EncodeInt(10U)}, // Filtered + {KeyStr("a", 3U, kTypeMerge), test::EncodeInt(3U)}}); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile({ + {KeyStr("b", 2U, kTypeMerge), test::EncodeInt(2U)}, + {KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)} // Filtered + }); + AddMockFile(file2); + + auto expected_results = + mock::MakeMockFile({{KeyStr("a", 0U, kTypeValue), test::EncodeInt(8U)}, + {KeyStr("b", 2U, kTypeMerge), test::EncodeInt(2U)}}); + + SetLastSequence(5U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results); +} + +TEST_F(CompactionJobTest, FilterSomeMergeOperands) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + compaction_filter_.reset(new test::FilterNumber(10U)); + NewDB(); + + auto file1 = mock::MakeMockFile( + {{KeyStr("a", 5U, kTypeMerge), test::EncodeInt(5U)}, + {KeyStr("a", 4U, kTypeMerge), test::EncodeInt(10U)}, // Filtered + {KeyStr("a", 3U, kTypeValue), test::EncodeInt(5U)}, + {KeyStr("d", 8U, kTypeMerge), test::EncodeInt(10U)}}); + AddMockFile(file1); + + auto file2 = + mock::MakeMockFile({{KeyStr("b", 2U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("c", 2U, kTypeMerge), test::EncodeInt(3U)}, + {KeyStr("c", 1U, kTypeValue), test::EncodeInt(7U)}, + {KeyStr("d", 1U, kTypeValue), test::EncodeInt(6U)}}); + AddMockFile(file2); + + auto file3 = + mock::MakeMockFile({{KeyStr("a", 1U, kTypeMerge), test::EncodeInt(3U)}}); + AddMockFile(file3, 2); + + auto expected_results = mock::MakeMockFile({ + {KeyStr("a", 5U, kTypeValue), test::EncodeInt(10U)}, + {KeyStr("c", 2U, kTypeValue), test::EncodeInt(10U)}, + {KeyStr("d", 1U, kTypeValue), test::EncodeInt(6U)} + // b does not appear because the operands are filtered + }); + + SetLastSequence(5U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results); +} + +// Test where all operands/merge results are filtered out. +TEST_F(CompactionJobTest, FilterAllMergeOperands) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + compaction_filter_.reset(new test::FilterNumber(10U)); + NewDB(); + + auto file1 = + mock::MakeMockFile({{KeyStr("a", 11U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("a", 10U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("a", 9U, kTypeMerge), test::EncodeInt(10U)}}); + AddMockFile(file1); + + auto file2 = + mock::MakeMockFile({{KeyStr("b", 8U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("b", 7U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("b", 6U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("b", 5U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("b", 4U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("b", 3U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("b", 2U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("c", 2U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("c", 1U, kTypeMerge), test::EncodeInt(10U)}}); + AddMockFile(file2); + + auto file3 = + mock::MakeMockFile({{KeyStr("a", 2U, kTypeMerge), test::EncodeInt(10U)}, + {KeyStr("b", 1U, kTypeMerge), test::EncodeInt(10U)}}); + AddMockFile(file3, 2); + + SetLastSequence(11U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + + stl_wrappers::KVMap empty_map; + RunCompaction({files}, empty_map); +} + +TEST_F(CompactionJobTest, SimpleSingleDelete) { + NewDB(); + + auto file1 = mock::MakeMockFile({ + {KeyStr("a", 5U, kTypeDeletion), ""}, + {KeyStr("b", 6U, kTypeSingleDeletion), ""}, + }); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile({{KeyStr("a", 3U, kTypeValue), "val"}, + {KeyStr("b", 4U, kTypeValue), "val"}}); + AddMockFile(file2); + + auto file3 = mock::MakeMockFile({ + {KeyStr("a", 1U, kTypeValue), "val"}, + }); + AddMockFile(file3, 2); + + auto expected_results = + mock::MakeMockFile({{KeyStr("a", 5U, kTypeDeletion), ""}}); + + SetLastSequence(6U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results); +} + +TEST_F(CompactionJobTest, SingleDeleteSnapshots) { + NewDB(); + + auto file1 = mock::MakeMockFile({ + {KeyStr("A", 12U, kTypeSingleDeletion), ""}, + {KeyStr("a", 12U, kTypeSingleDeletion), ""}, + {KeyStr("b", 21U, kTypeSingleDeletion), ""}, + {KeyStr("c", 22U, kTypeSingleDeletion), ""}, + {KeyStr("d", 9U, kTypeSingleDeletion), ""}, + {KeyStr("f", 21U, kTypeSingleDeletion), ""}, + {KeyStr("j", 11U, kTypeSingleDeletion), ""}, + {KeyStr("j", 9U, kTypeSingleDeletion), ""}, + {KeyStr("k", 12U, kTypeSingleDeletion), ""}, + {KeyStr("k", 11U, kTypeSingleDeletion), ""}, + {KeyStr("l", 3U, kTypeSingleDeletion), ""}, + {KeyStr("l", 2U, kTypeSingleDeletion), ""}, + }); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile({ + {KeyStr("0", 2U, kTypeSingleDeletion), ""}, + {KeyStr("a", 11U, kTypeValue), "val1"}, + {KeyStr("b", 11U, kTypeValue), "val2"}, + {KeyStr("c", 21U, kTypeValue), "val3"}, + {KeyStr("d", 8U, kTypeValue), "val4"}, + {KeyStr("e", 2U, kTypeSingleDeletion), ""}, + {KeyStr("f", 1U, kTypeValue), "val1"}, + {KeyStr("g", 11U, kTypeSingleDeletion), ""}, + {KeyStr("h", 2U, kTypeSingleDeletion), ""}, + {KeyStr("m", 12U, kTypeValue), "val1"}, + {KeyStr("m", 11U, kTypeSingleDeletion), ""}, + {KeyStr("m", 8U, kTypeValue), "val2"}, + }); + AddMockFile(file2); + + auto file3 = mock::MakeMockFile({ + {KeyStr("A", 1U, kTypeValue), "val"}, + {KeyStr("e", 1U, kTypeValue), "val"}, + }); + AddMockFile(file3, 2); + + auto expected_results = mock::MakeMockFile({ + {KeyStr("A", 12U, kTypeSingleDeletion), ""}, + {KeyStr("a", 12U, kTypeSingleDeletion), ""}, + {KeyStr("a", 11U, kTypeValue), ""}, + {KeyStr("b", 21U, kTypeSingleDeletion), ""}, + {KeyStr("b", 11U, kTypeValue), "val2"}, + {KeyStr("c", 22U, kTypeSingleDeletion), ""}, + {KeyStr("c", 21U, kTypeValue), ""}, + {KeyStr("e", 2U, kTypeSingleDeletion), ""}, + {KeyStr("f", 21U, kTypeSingleDeletion), ""}, + {KeyStr("f", 1U, kTypeValue), "val1"}, + {KeyStr("g", 11U, kTypeSingleDeletion), ""}, + {KeyStr("j", 11U, kTypeSingleDeletion), ""}, + {KeyStr("k", 11U, kTypeSingleDeletion), ""}, + {KeyStr("m", 12U, kTypeValue), "val1"}, + {KeyStr("m", 11U, kTypeSingleDeletion), ""}, + {KeyStr("m", 8U, kTypeValue), "val2"}, + }); + + SetLastSequence(22U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results, {10U, 20U}, 10U); +} + +TEST_F(CompactionJobTest, EarliestWriteConflictSnapshot) { + NewDB(); + + // Test multiple snapshots where the earliest snapshot is not a + // write-conflic-snapshot. + + auto file1 = mock::MakeMockFile({ + {KeyStr("A", 24U, kTypeSingleDeletion), ""}, + {KeyStr("A", 23U, kTypeValue), "val"}, + {KeyStr("B", 24U, kTypeSingleDeletion), ""}, + {KeyStr("B", 23U, kTypeValue), "val"}, + {KeyStr("D", 24U, kTypeSingleDeletion), ""}, + {KeyStr("G", 32U, kTypeSingleDeletion), ""}, + {KeyStr("G", 31U, kTypeValue), "val"}, + {KeyStr("G", 24U, kTypeSingleDeletion), ""}, + {KeyStr("G", 23U, kTypeValue), "val2"}, + {KeyStr("H", 31U, kTypeValue), "val"}, + {KeyStr("H", 24U, kTypeSingleDeletion), ""}, + {KeyStr("H", 23U, kTypeValue), "val"}, + {KeyStr("I", 35U, kTypeSingleDeletion), ""}, + {KeyStr("I", 34U, kTypeValue), "val2"}, + {KeyStr("I", 33U, kTypeSingleDeletion), ""}, + {KeyStr("I", 32U, kTypeValue), "val3"}, + {KeyStr("I", 31U, kTypeSingleDeletion), ""}, + {KeyStr("J", 34U, kTypeValue), "val"}, + {KeyStr("J", 33U, kTypeSingleDeletion), ""}, + {KeyStr("J", 25U, kTypeValue), "val2"}, + {KeyStr("J", 24U, kTypeSingleDeletion), ""}, + }); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile({ + {KeyStr("A", 14U, kTypeSingleDeletion), ""}, + {KeyStr("A", 13U, kTypeValue), "val2"}, + {KeyStr("C", 14U, kTypeSingleDeletion), ""}, + {KeyStr("C", 13U, kTypeValue), "val"}, + {KeyStr("E", 12U, kTypeSingleDeletion), ""}, + {KeyStr("F", 4U, kTypeSingleDeletion), ""}, + {KeyStr("F", 3U, kTypeValue), "val"}, + {KeyStr("G", 14U, kTypeSingleDeletion), ""}, + {KeyStr("G", 13U, kTypeValue), "val3"}, + {KeyStr("H", 14U, kTypeSingleDeletion), ""}, + {KeyStr("H", 13U, kTypeValue), "val2"}, + {KeyStr("I", 13U, kTypeValue), "val4"}, + {KeyStr("I", 12U, kTypeSingleDeletion), ""}, + {KeyStr("I", 11U, kTypeValue), "val5"}, + {KeyStr("J", 15U, kTypeValue), "val3"}, + {KeyStr("J", 14U, kTypeSingleDeletion), ""}, + }); + AddMockFile(file2); + + auto expected_results = mock::MakeMockFile({ + {KeyStr("A", 24U, kTypeSingleDeletion), ""}, + {KeyStr("A", 23U, kTypeValue), ""}, + {KeyStr("B", 24U, kTypeSingleDeletion), ""}, + {KeyStr("B", 23U, kTypeValue), ""}, + {KeyStr("D", 24U, kTypeSingleDeletion), ""}, + {KeyStr("E", 12U, kTypeSingleDeletion), ""}, + {KeyStr("G", 32U, kTypeSingleDeletion), ""}, + {KeyStr("G", 31U, kTypeValue), ""}, + {KeyStr("H", 31U, kTypeValue), "val"}, + {KeyStr("I", 35U, kTypeSingleDeletion), ""}, + {KeyStr("I", 34U, kTypeValue), ""}, + {KeyStr("I", 31U, kTypeSingleDeletion), ""}, + {KeyStr("I", 13U, kTypeValue), "val4"}, + {KeyStr("J", 34U, kTypeValue), "val"}, + {KeyStr("J", 33U, kTypeSingleDeletion), ""}, + {KeyStr("J", 25U, kTypeValue), "val2"}, + {KeyStr("J", 24U, kTypeSingleDeletion), ""}, + {KeyStr("J", 15U, kTypeValue), "val3"}, + {KeyStr("J", 14U, kTypeSingleDeletion), ""}, + }); + + SetLastSequence(24U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results, {10U, 20U, 30U}, 20U); +} + +TEST_F(CompactionJobTest, SingleDeleteZeroSeq) { + NewDB(); + + auto file1 = mock::MakeMockFile({ + {KeyStr("A", 10U, kTypeSingleDeletion), ""}, + {KeyStr("dummy", 5U, kTypeValue), "val2"}, + }); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile({ + {KeyStr("A", 0U, kTypeValue), "val"}, + }); + AddMockFile(file2); + + auto expected_results = mock::MakeMockFile({ + {KeyStr("dummy", 5U, kTypeValue), "val2"}, + }); + + SetLastSequence(22U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results, {}); +} + +TEST_F(CompactionJobTest, MultiSingleDelete) { + // Tests three scenarios involving multiple single delete/put pairs: + // + // A: Put Snapshot SDel Put SDel -> Put Snapshot SDel + // B: Snapshot Put SDel Put SDel Snapshot -> Snapshot SDel Snapshot + // C: SDel Put SDel Snapshot Put -> Snapshot Put + // D: (Put) SDel Snapshot Put SDel -> (Put) SDel Snapshot SDel + // E: Put SDel Snapshot Put SDel -> Snapshot SDel + // F: Put SDel Put Sdel Snapshot -> removed + // G: Snapshot SDel Put SDel Put -> Snapshot Put SDel + // H: (Put) Put SDel Put Sdel Snapshot -> Removed + // I: (Put) Snapshot Put SDel Put SDel -> SDel + // J: Put Put SDel Put SDel SDel Snapshot Put Put SDel SDel Put + // -> Snapshot Put + // K: SDel SDel Put SDel Put Put Snapshot SDel Put SDel SDel Put SDel + // -> Snapshot Put Snapshot SDel + // L: SDel Put Del Put SDel Snapshot Del Put Del SDel Put SDel + // -> Snapshot SDel + // M: (Put) SDel Put Del Put SDel Snapshot Put Del SDel Put SDel Del + // -> SDel Snapshot Del + NewDB(); + + auto file1 = mock::MakeMockFile({ + {KeyStr("A", 14U, kTypeSingleDeletion), ""}, + {KeyStr("A", 13U, kTypeValue), "val5"}, + {KeyStr("A", 12U, kTypeSingleDeletion), ""}, + {KeyStr("B", 14U, kTypeSingleDeletion), ""}, + {KeyStr("B", 13U, kTypeValue), "val2"}, + {KeyStr("C", 14U, kTypeValue), "val3"}, + {KeyStr("D", 12U, kTypeSingleDeletion), ""}, + {KeyStr("D", 11U, kTypeValue), "val4"}, + {KeyStr("G", 15U, kTypeValue), "val"}, + {KeyStr("G", 14U, kTypeSingleDeletion), ""}, + {KeyStr("G", 13U, kTypeValue), "val"}, + {KeyStr("I", 14U, kTypeSingleDeletion), ""}, + {KeyStr("I", 13U, kTypeValue), "val"}, + {KeyStr("J", 15U, kTypeValue), "val"}, + {KeyStr("J", 14U, kTypeSingleDeletion), ""}, + {KeyStr("J", 13U, kTypeSingleDeletion), ""}, + {KeyStr("J", 12U, kTypeValue), "val"}, + {KeyStr("J", 11U, kTypeValue), "val"}, + {KeyStr("K", 16U, kTypeSingleDeletion), ""}, + {KeyStr("K", 15U, kTypeValue), "val1"}, + {KeyStr("K", 14U, kTypeSingleDeletion), ""}, + {KeyStr("K", 13U, kTypeSingleDeletion), ""}, + {KeyStr("K", 12U, kTypeValue), "val2"}, + {KeyStr("K", 11U, kTypeSingleDeletion), ""}, + {KeyStr("L", 16U, kTypeSingleDeletion), ""}, + {KeyStr("L", 15U, kTypeValue), "val"}, + {KeyStr("L", 14U, kTypeSingleDeletion), ""}, + {KeyStr("L", 13U, kTypeDeletion), ""}, + {KeyStr("L", 12U, kTypeValue), "val"}, + {KeyStr("L", 11U, kTypeDeletion), ""}, + {KeyStr("M", 16U, kTypeDeletion), ""}, + {KeyStr("M", 15U, kTypeSingleDeletion), ""}, + {KeyStr("M", 14U, kTypeValue), "val"}, + {KeyStr("M", 13U, kTypeSingleDeletion), ""}, + {KeyStr("M", 12U, kTypeDeletion), ""}, + {KeyStr("M", 11U, kTypeValue), "val"}, + }); + AddMockFile(file1); + + auto file2 = mock::MakeMockFile({ + {KeyStr("A", 10U, kTypeValue), "val"}, + {KeyStr("B", 12U, kTypeSingleDeletion), ""}, + {KeyStr("B", 11U, kTypeValue), "val2"}, + {KeyStr("C", 10U, kTypeSingleDeletion), ""}, + {KeyStr("C", 9U, kTypeValue), "val6"}, + {KeyStr("C", 8U, kTypeSingleDeletion), ""}, + {KeyStr("D", 10U, kTypeSingleDeletion), ""}, + {KeyStr("E", 12U, kTypeSingleDeletion), ""}, + {KeyStr("E", 11U, kTypeValue), "val"}, + {KeyStr("E", 5U, kTypeSingleDeletion), ""}, + {KeyStr("E", 4U, kTypeValue), "val"}, + {KeyStr("F", 6U, kTypeSingleDeletion), ""}, + {KeyStr("F", 5U, kTypeValue), "val"}, + {KeyStr("F", 4U, kTypeSingleDeletion), ""}, + {KeyStr("F", 3U, kTypeValue), "val"}, + {KeyStr("G", 12U, kTypeSingleDeletion), ""}, + {KeyStr("H", 6U, kTypeSingleDeletion), ""}, + {KeyStr("H", 5U, kTypeValue), "val"}, + {KeyStr("H", 4U, kTypeSingleDeletion), ""}, + {KeyStr("H", 3U, kTypeValue), "val"}, + {KeyStr("I", 12U, kTypeSingleDeletion), ""}, + {KeyStr("I", 11U, kTypeValue), "val"}, + {KeyStr("J", 6U, kTypeSingleDeletion), ""}, + {KeyStr("J", 5U, kTypeSingleDeletion), ""}, + {KeyStr("J", 4U, kTypeValue), "val"}, + {KeyStr("J", 3U, kTypeSingleDeletion), ""}, + {KeyStr("J", 2U, kTypeValue), "val"}, + {KeyStr("K", 8U, kTypeValue), "val3"}, + {KeyStr("K", 7U, kTypeValue), "val4"}, + {KeyStr("K", 6U, kTypeSingleDeletion), ""}, + {KeyStr("K", 5U, kTypeValue), "val5"}, + {KeyStr("K", 2U, kTypeSingleDeletion), ""}, + {KeyStr("K", 1U, kTypeSingleDeletion), ""}, + {KeyStr("L", 5U, kTypeSingleDeletion), ""}, + {KeyStr("L", 4U, kTypeValue), "val"}, + {KeyStr("L", 3U, kTypeDeletion), ""}, + {KeyStr("L", 2U, kTypeValue), "val"}, + {KeyStr("L", 1U, kTypeSingleDeletion), ""}, + {KeyStr("M", 10U, kTypeSingleDeletion), ""}, + {KeyStr("M", 7U, kTypeValue), "val"}, + {KeyStr("M", 5U, kTypeDeletion), ""}, + {KeyStr("M", 4U, kTypeValue), "val"}, + {KeyStr("M", 3U, kTypeSingleDeletion), ""}, + }); + AddMockFile(file2); + + auto file3 = mock::MakeMockFile({ + {KeyStr("D", 1U, kTypeValue), "val"}, + {KeyStr("H", 1U, kTypeValue), "val"}, + {KeyStr("I", 2U, kTypeValue), "val"}, + }); + AddMockFile(file3, 2); + + auto file4 = mock::MakeMockFile({ + {KeyStr("M", 1U, kTypeValue), "val"}, + }); + AddMockFile(file4, 2); + + auto expected_results = + mock::MakeMockFile({{KeyStr("A", 14U, kTypeSingleDeletion), ""}, + {KeyStr("A", 13U, kTypeValue), ""}, + {KeyStr("A", 12U, kTypeSingleDeletion), ""}, + {KeyStr("A", 10U, kTypeValue), "val"}, + {KeyStr("B", 14U, kTypeSingleDeletion), ""}, + {KeyStr("B", 13U, kTypeValue), ""}, + {KeyStr("C", 14U, kTypeValue), "val3"}, + {KeyStr("D", 12U, kTypeSingleDeletion), ""}, + {KeyStr("D", 11U, kTypeValue), ""}, + {KeyStr("D", 10U, kTypeSingleDeletion), ""}, + {KeyStr("E", 12U, kTypeSingleDeletion), ""}, + {KeyStr("E", 11U, kTypeValue), ""}, + {KeyStr("G", 15U, kTypeValue), "val"}, + {KeyStr("G", 12U, kTypeSingleDeletion), ""}, + {KeyStr("I", 14U, kTypeSingleDeletion), ""}, + {KeyStr("I", 13U, kTypeValue), ""}, + {KeyStr("J", 15U, kTypeValue), "val"}, + {KeyStr("K", 16U, kTypeSingleDeletion), ""}, + {KeyStr("K", 15U, kTypeValue), ""}, + {KeyStr("K", 11U, kTypeSingleDeletion), ""}, + {KeyStr("K", 8U, kTypeValue), "val3"}, + {KeyStr("L", 16U, kTypeSingleDeletion), ""}, + {KeyStr("L", 15U, kTypeValue), ""}, + {KeyStr("M", 16U, kTypeDeletion), ""}, + {KeyStr("M", 3U, kTypeSingleDeletion), ""}}); + + SetLastSequence(22U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results, {10U}, 10U); +} + +// This test documents the behavior where a corrupt key follows a deletion or a +// single deletion and the (single) deletion gets removed while the corrupt key +// gets written out. TODO(noetzli): We probably want a better way to treat +// corrupt keys. +TEST_F(CompactionJobTest, CorruptionAfterDeletion) { + NewDB(); + + auto file1 = + mock::MakeMockFile({{test::KeyStr("A", 6U, kTypeValue), "val3"}, + {test::KeyStr("a", 5U, kTypeDeletion), ""}, + {test::KeyStr("a", 4U, kTypeValue, true), "val"}}); + AddMockFile(file1); + + auto file2 = + mock::MakeMockFile({{test::KeyStr("b", 3U, kTypeSingleDeletion), ""}, + {test::KeyStr("b", 2U, kTypeValue, true), "val"}, + {test::KeyStr("c", 1U, kTypeValue), "val2"}}); + AddMockFile(file2); + + auto expected_results = + mock::MakeMockFile({{test::KeyStr("A", 0U, kTypeValue), "val3"}, + {test::KeyStr("a", 0U, kTypeValue, true), "val"}, + {test::KeyStr("b", 0U, kTypeValue, true), "val"}, + {test::KeyStr("c", 1U, kTypeValue), "val2"}}); + + SetLastSequence(6U); + auto files = cfd_->current()->storage_info()->LevelFiles(0); + RunCompaction({files}, expected_results); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, + "SKIPPED as CompactionJobStats is not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/compaction_picker.cc b/external/rocksdb/db/compaction_picker.cc new file mode 100755 index 0000000000..3d3e3e6682 --- /dev/null +++ b/external/rocksdb/db/compaction_picker.cc @@ -0,0 +1,1846 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/compaction_picker.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include +#include + +#include "db/column_family.h" +#include "db/filename.h" +#include "util/log_buffer.h" +#include "util/random.h" +#include "util/statistics.h" +#include "util/string_util.h" +#include "util/sync_point.h" + +namespace rocksdb { + +namespace { +uint64_t TotalCompensatedFileSize(const std::vector& files) { + uint64_t sum = 0; + for (size_t i = 0; i < files.size() && files[i]; i++) { + sum += files[i]->compensated_file_size; + } + return sum; +} + +// Universal compaction is not supported in ROCKSDB_LITE +#ifndef ROCKSDB_LITE + +// Used in universal compaction when trivial move is enabled. +// This structure is used for the construction of min heap +// that contains the file meta data, the level of the file +// and the index of the file in that level + +struct InputFileInfo { + InputFileInfo() : f(nullptr) {} + + FileMetaData* f; + size_t level; + size_t index; +}; + +// Used in universal compaction when trivial move is enabled. +// This comparator is used for the construction of min heap +// based on the smallest key of the file. +struct UserKeyComparator { + explicit UserKeyComparator(const Comparator* ucmp) { ucmp_ = ucmp; } + + bool operator()(InputFileInfo i1, InputFileInfo i2) const { + return (ucmp_->Compare(i1.f->smallest.user_key(), + i2.f->smallest.user_key()) > 0); + } + + private: + const Comparator* ucmp_; +}; + +typedef std::priority_queue, + UserKeyComparator> + SmallestKeyHeap; + +// This function creates the heap that is used to find if the files are +// overlapping during universal compaction when the allow_trivial_move +// is set. +SmallestKeyHeap create_level_heap(Compaction* c, const Comparator* ucmp) { + SmallestKeyHeap smallest_key_priority_q = + SmallestKeyHeap(UserKeyComparator(ucmp)); + + InputFileInfo input_file; + + for (size_t l = 0; l < c->num_input_levels(); l++) { + if (c->num_input_files(l) != 0) { + if (l == 0 && c->start_level() == 0) { + for (size_t i = 0; i < c->num_input_files(0); i++) { + input_file.f = c->input(0, i); + input_file.level = 0; + input_file.index = i; + smallest_key_priority_q.push(std::move(input_file)); + } + } else { + input_file.f = c->input(l, 0); + input_file.level = l; + input_file.index = 0; + smallest_key_priority_q.push(std::move(input_file)); + } + } + } + return smallest_key_priority_q; +} +#endif // !ROCKSDB_LITE +} // anonymous namespace + +// Determine compression type, based on user options, level of the output +// file and whether compression is disabled. +// If enable_compression is false, then compression is always disabled no +// matter what the values of the other two parameters are. +// Otherwise, the compression type is determined based on options and level. +CompressionType GetCompressionType(const ImmutableCFOptions& ioptions, + const VersionStorageInfo* vstorage, + const MutableCFOptions& mutable_cf_options, + int level, int base_level, + const bool enable_compression) { + if (!enable_compression) { + // disable compression + return kNoCompression; + } + + // If bottommost_compression is set and we are compacting to the + // bottommost level then we should use it. + if (ioptions.bottommost_compression != kDisableCompressionOption && + level > base_level && level >= (vstorage->num_non_empty_levels() - 1)) { + return ioptions.bottommost_compression; + } + // If the user has specified a different compression level for each level, + // then pick the compression for that level. + if (!ioptions.compression_per_level.empty()) { + assert(level == 0 || level >= base_level); + int idx = (level == 0) ? 0 : level - base_level + 1; + + const int n = static_cast(ioptions.compression_per_level.size()) - 1; + // It is possible for level_ to be -1; in that case, we use level + // 0's compression. This occurs mostly in backwards compatibility + // situations when the builder doesn't know what level the file + // belongs to. Likewise, if level is beyond the end of the + // specified compression levels, use the last value. + return ioptions.compression_per_level[std::max(0, std::min(idx, n))]; + } else { + return mutable_cf_options.compression; + } +} + +CompactionPicker::CompactionPicker(const ImmutableCFOptions& ioptions, + const InternalKeyComparator* icmp) + : ioptions_(ioptions), icmp_(icmp) {} + +CompactionPicker::~CompactionPicker() {} + +// Delete this compaction from the list of running compactions. +void CompactionPicker::ReleaseCompactionFiles(Compaction* c, Status status) { + if (c->start_level() == 0 || + ioptions_.compaction_style == kCompactionStyleUniversal) { + level0_compactions_in_progress_.erase(c); + } + if (!status.ok()) { + c->ResetNextCompactionIndex(); + } +} + +void CompactionPicker::GetRange(const CompactionInputFiles& inputs, + InternalKey* smallest, InternalKey* largest) { + const int level = inputs.level; + assert(!inputs.empty()); + smallest->Clear(); + largest->Clear(); + + if (level == 0) { + for (size_t i = 0; i < inputs.size(); i++) { + FileMetaData* f = inputs[i]; + if (i == 0) { + *smallest = f->smallest; + *largest = f->largest; + } else { + if (icmp_->Compare(f->smallest, *smallest) < 0) { + *smallest = f->smallest; + } + if (icmp_->Compare(f->largest, *largest) > 0) { + *largest = f->largest; + } + } + } + } else { + *smallest = inputs[0]->smallest; + *largest = inputs[inputs.size() - 1]->largest; + } +} + +void CompactionPicker::GetRange(const CompactionInputFiles& inputs1, + const CompactionInputFiles& inputs2, + InternalKey* smallest, InternalKey* largest) { + assert(!inputs1.empty() || !inputs2.empty()); + if (inputs1.empty()) { + GetRange(inputs2, smallest, largest); + } else if (inputs2.empty()) { + GetRange(inputs1, smallest, largest); + } else { + InternalKey smallest1, smallest2, largest1, largest2; + GetRange(inputs1, &smallest1, &largest1); + GetRange(inputs2, &smallest2, &largest2); + *smallest = + icmp_->Compare(smallest1, smallest2) < 0 ? smallest1 : smallest2; + *largest = icmp_->Compare(largest1, largest2) < 0 ? largest2 : largest1; + } +} + +bool CompactionPicker::ExpandWhileOverlapping(const std::string& cf_name, + VersionStorageInfo* vstorage, + CompactionInputFiles* inputs) { + // This isn't good compaction + assert(!inputs->empty()); + + const int level = inputs->level; + // GetOverlappingInputs will always do the right thing for level-0. + // So we don't need to do any expansion if level == 0. + if (level == 0) { + return true; + } + + InternalKey smallest, largest; + + // Keep expanding inputs until we are sure that there is a "clean cut" + // boundary between the files in input and the surrounding files. + // This will ensure that no parts of a key are lost during compaction. + int hint_index = -1; + size_t old_size; + do { + old_size = inputs->size(); + GetRange(*inputs, &smallest, &largest); + inputs->clear(); + vstorage->GetOverlappingInputs(level, &smallest, &largest, &inputs->files, + hint_index, &hint_index); + } while (inputs->size() > old_size); + + // we started off with inputs non-empty and the previous loop only grew + // inputs. thus, inputs should be non-empty here + assert(!inputs->empty()); + + // If, after the expansion, there are files that are already under + // compaction, then we must drop/cancel this compaction. + if (FilesInCompaction(inputs->files)) { + Log(InfoLogLevel::WARN_LEVEL, ioptions_.info_log, + "[%s] ExpandWhileOverlapping() failure because some of the necessary" + " compaction input files are currently being compacted.", + cf_name.c_str()); + return false; + } + return true; +} + +// Returns true if any one of specified files are being compacted +bool CompactionPicker::FilesInCompaction( + const std::vector& files) { + for (size_t i = 0; i < files.size(); i++) { + if (files[i]->being_compacted) { + return true; + } + } + return false; +} + +Compaction* CompactionPicker::FormCompaction( + const CompactionOptions& compact_options, + const std::vector& input_files, int output_level, + VersionStorageInfo* vstorage, const MutableCFOptions& mutable_cf_options, + uint32_t output_path_id) { + uint64_t max_grandparent_overlap_bytes = + output_level + 1 < vstorage->num_levels() + ? mutable_cf_options.MaxGrandParentOverlapBytes(output_level + 1) + : std::numeric_limits::max(); + assert(input_files.size()); + + // TODO(rven ): we might be able to run concurrent level 0 compaction + // if the key ranges of the two compactions do not overlap, but for now + // we do not allow it. + if ((input_files[0].level == 0) && !level0_compactions_in_progress_.empty()) { + return nullptr; + } + auto c = new Compaction( + vstorage, mutable_cf_options, input_files, output_level, + compact_options.output_file_size_limit, max_grandparent_overlap_bytes, + output_path_id, compact_options.compression, /* grandparents */ {}, true); + + // If it's level 0 compaction, make sure we don't execute any other level 0 + // compactions in parallel + if ((c != nullptr) && (input_files[0].level == 0)) { + level0_compactions_in_progress_.insert(c); + } + return c; +} + +Status CompactionPicker::GetCompactionInputsFromFileNumbers( + std::vector* input_files, + std::unordered_set* input_set, const VersionStorageInfo* vstorage, + const CompactionOptions& compact_options) const { + if (input_set->size() == 0U) { + return Status::InvalidArgument( + "Compaction must include at least one file."); + } + assert(input_files); + + std::vector matched_input_files; + matched_input_files.resize(vstorage->num_levels()); + int first_non_empty_level = -1; + int last_non_empty_level = -1; + // TODO(yhchiang): use a lazy-initialized mapping from + // file_number to FileMetaData in Version. + for (int level = 0; level < vstorage->num_levels(); ++level) { + for (auto file : vstorage->LevelFiles(level)) { + auto iter = input_set->find(file->fd.GetNumber()); + if (iter != input_set->end()) { + matched_input_files[level].files.push_back(file); + input_set->erase(iter); + last_non_empty_level = level; + if (first_non_empty_level == -1) { + first_non_empty_level = level; + } + } + } + } + + if (!input_set->empty()) { + std::string message( + "Cannot find matched SST files for the following file numbers:"); + for (auto fn : *input_set) { + message += " "; + message += ToString(fn); + } + return Status::InvalidArgument(message); + } + + for (int level = first_non_empty_level; level <= last_non_empty_level; + ++level) { + matched_input_files[level].level = level; + input_files->emplace_back(std::move(matched_input_files[level])); + } + + return Status::OK(); +} + +// Returns true if any one of the parent files are being compacted +bool CompactionPicker::RangeInCompaction(VersionStorageInfo* vstorage, + const InternalKey* smallest, + const InternalKey* largest, int level, + int* level_index) { + std::vector inputs; + assert(level < NumberLevels()); + + vstorage->GetOverlappingInputs(level, smallest, largest, &inputs, + *level_index, level_index); + return FilesInCompaction(inputs); +} + +// Populates the set of inputs of all other levels that overlap with the +// start level. +// Now we assume all levels except start level and output level are empty. +// Will also attempt to expand "start level" if that doesn't expand +// "output level" or cause "level" to include a file for compaction that has an +// overlapping user-key with another file. +// REQUIRES: input_level and output_level are different +// REQUIRES: inputs->empty() == false +// Returns false if files on parent level are currently in compaction, which +// means that we can't compact them +bool CompactionPicker::SetupOtherInputs( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, CompactionInputFiles* inputs, + CompactionInputFiles* output_level_inputs, int* parent_index, + int base_index) { + assert(!inputs->empty()); + assert(output_level_inputs->empty()); + const int input_level = inputs->level; + const int output_level = output_level_inputs->level; + assert(input_level != output_level); + + // For now, we only support merging two levels, start level and output level. + // We need to assert other levels are empty. + for (int l = input_level + 1; l < output_level; l++) { + assert(vstorage->NumLevelFiles(l) == 0); + } + + InternalKey smallest, largest; + + // Get the range one last time. + GetRange(*inputs, &smallest, &largest); + + // Populate the set of next-level files (inputs_GetOutputLevelInputs()) to + // include in compaction + vstorage->GetOverlappingInputs(output_level, &smallest, &largest, + &output_level_inputs->files, *parent_index, + parent_index); + if (!output_level_inputs->empty()) { + ExpandWhileOverlapping(cf_name, vstorage, output_level_inputs); + } + + if (FilesInCompaction(output_level_inputs->files)) { + return false; + } + + // See if we can further grow the number of inputs in "level" without + // changing the number of "level+1" files we pick up. We also choose NOT + // to expand if this would cause "level" to include some entries for some + // user key, while excluding other entries for the same user key. This + // can happen when one user key spans multiple files. + if (!output_level_inputs->empty()) { + CompactionInputFiles expanded0; + expanded0.level = input_level; + // Get entire range covered by compaction + InternalKey all_start, all_limit; + GetRange(*inputs, *output_level_inputs, &all_start, &all_limit); + + vstorage->GetOverlappingInputs(input_level, &all_start, &all_limit, + &expanded0.files, base_index, nullptr); + const uint64_t inputs0_size = TotalCompensatedFileSize(inputs->files); + const uint64_t inputs1_size = + TotalCompensatedFileSize(output_level_inputs->files); + const uint64_t expanded0_size = TotalCompensatedFileSize(expanded0.files); + uint64_t limit = + mutable_cf_options.ExpandedCompactionByteSizeLimit(input_level); + if (expanded0.size() > inputs->size() && + inputs1_size + expanded0_size < limit && + !FilesInCompaction(expanded0.files) && + !vstorage->HasOverlappingUserKey(&expanded0.files, input_level)) { + InternalKey new_start, new_limit; + GetRange(expanded0, &new_start, &new_limit); + std::vector expanded1; + vstorage->GetOverlappingInputs(output_level, &new_start, &new_limit, + &expanded1, *parent_index, parent_index); + if (expanded1.size() == output_level_inputs->size() && + !FilesInCompaction(expanded1)) { + Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log, + "[%s] Expanding@%d %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt "(%" PRIu64 + "+%" PRIu64 " bytes) to %" ROCKSDB_PRIszt "+%" ROCKSDB_PRIszt + " (%" PRIu64 "+%" PRIu64 "bytes)\n", + cf_name.c_str(), input_level, inputs->size(), + output_level_inputs->size(), inputs0_size, inputs1_size, + expanded0.size(), expanded1.size(), expanded0_size, inputs1_size); + smallest = new_start; + largest = new_limit; + inputs->files = expanded0.files; + output_level_inputs->files = expanded1; + } + } + } + + return true; +} + +void CompactionPicker::GetGrandparents( + VersionStorageInfo* vstorage, const CompactionInputFiles& inputs, + const CompactionInputFiles& output_level_inputs, + std::vector* grandparents) { + InternalKey start, limit; + GetRange(inputs, output_level_inputs, &start, &limit); + // Compute the set of grandparent files that overlap this compaction + // (parent == level+1; grandparent == level+2) + if (output_level_inputs.level + 1 < NumberLevels()) { + vstorage->GetOverlappingInputs(output_level_inputs.level + 1, &start, + &limit, grandparents); + } +} + +Compaction* CompactionPicker::CompactRange( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, int input_level, int output_level, + uint32_t output_path_id, const InternalKey* begin, const InternalKey* end, + InternalKey** compaction_end, bool* manual_conflict) { + // CompactionPickerFIFO has its own implementation of compact range + assert(ioptions_.compaction_style != kCompactionStyleFIFO); + + if (input_level == ColumnFamilyData::kCompactAllLevels) { + assert(ioptions_.compaction_style == kCompactionStyleUniversal); + + // Universal compaction with more than one level always compacts all the + // files together to the last level. + assert(vstorage->num_levels() > 1); + // DBImpl::CompactRange() set output level to be the last level + assert(output_level == vstorage->num_levels() - 1); + // DBImpl::RunManualCompaction will make full range for universal compaction + assert(begin == nullptr); + assert(end == nullptr); + *compaction_end = nullptr; + + int start_level = 0; + for (; start_level < vstorage->num_levels() && + vstorage->NumLevelFiles(start_level) == 0; + start_level++) { + } + if (start_level == vstorage->num_levels()) { + return nullptr; + } + + if ((start_level == 0) && (!level0_compactions_in_progress_.empty())) { + *manual_conflict = true; + // Only one level 0 compaction allowed + return nullptr; + } + + std::vector inputs(vstorage->num_levels() - + start_level); + for (int level = start_level; level < vstorage->num_levels(); level++) { + inputs[level - start_level].level = level; + auto& files = inputs[level - start_level].files; + for (FileMetaData* f : vstorage->LevelFiles(level)) { + files.push_back(f); + } + if (FilesInCompaction(files)) { + *manual_conflict = true; + return nullptr; + } + } + Compaction* c = new Compaction( + vstorage, mutable_cf_options, std::move(inputs), output_level, + mutable_cf_options.MaxFileSizeForLevel(output_level), + /* max_grandparent_overlap_bytes */ LLONG_MAX, output_path_id, + GetCompressionType(ioptions_, vstorage, mutable_cf_options, + output_level, 1), + /* grandparents */ {}, /* is manual */ true); + if (start_level == 0) { + level0_compactions_in_progress_.insert(c); + } + return c; + } + + CompactionInputFiles inputs; + inputs.level = input_level; + bool covering_the_whole_range = true; + + // All files are 'overlapping' in universal style compaction. + // We have to compact the entire range in one shot. + if (ioptions_.compaction_style == kCompactionStyleUniversal) { + begin = nullptr; + end = nullptr; + } + + vstorage->GetOverlappingInputs(input_level, begin, end, &inputs.files); + if (inputs.empty()) { + return nullptr; + } + + if ((input_level == 0) && (!level0_compactions_in_progress_.empty())) { + // Only one level 0 compaction allowed + TEST_SYNC_POINT("CompactionPicker::CompactRange:Conflict"); + *manual_conflict = true; + return nullptr; + } + + // Avoid compacting too much in one shot in case the range is large. + // But we cannot do this for level-0 since level-0 files can overlap + // and we must not pick one file and drop another older file if the + // two files overlap. + if (input_level > 0) { + const uint64_t limit = mutable_cf_options.MaxFileSizeForLevel(input_level) * + mutable_cf_options.source_compaction_factor; + uint64_t total = 0; + for (size_t i = 0; i + 1 < inputs.size(); ++i) { + uint64_t s = inputs[i]->compensated_file_size; + total += s; + if (total >= limit) { + **compaction_end = inputs[i + 1]->smallest; + covering_the_whole_range = false; + inputs.files.resize(i + 1); + break; + } + } + } + assert(output_path_id < static_cast(ioptions_.db_paths.size())); + + if (ExpandWhileOverlapping(cf_name, vstorage, &inputs) == false) { + // manual compaction is now multi-threaded, so it can + // happen that ExpandWhileOverlapping fails + // we handle it higher in RunManualCompaction + *manual_conflict = true; + return nullptr; + } + + if (covering_the_whole_range) { + *compaction_end = nullptr; + } + + CompactionInputFiles output_level_inputs; + if (output_level == ColumnFamilyData::kCompactToBaseLevel) { + assert(input_level == 0); + output_level = vstorage->base_level(); + assert(output_level > 0); + } + output_level_inputs.level = output_level; + if (input_level != output_level) { + int parent_index = -1; + if (!SetupOtherInputs(cf_name, mutable_cf_options, vstorage, &inputs, + &output_level_inputs, &parent_index, -1)) { + // manual compaction is now multi-threaded, so it can + // happen that SetupOtherInputs fails + // we handle it higher in RunManualCompaction + *manual_conflict = true; + return nullptr; + } + } + + std::vector compaction_inputs({inputs}); + if (!output_level_inputs.empty()) { + compaction_inputs.push_back(output_level_inputs); + } + for (size_t i = 0; i < compaction_inputs.size(); i++) { + if (FilesInCompaction(compaction_inputs[i].files)) { + *manual_conflict = true; + return nullptr; + } + } + + std::vector grandparents; + GetGrandparents(vstorage, inputs, output_level_inputs, &grandparents); + Compaction* compaction = new Compaction( + vstorage, mutable_cf_options, std::move(compaction_inputs), output_level, + mutable_cf_options.MaxFileSizeForLevel(output_level), + mutable_cf_options.MaxGrandParentOverlapBytes(input_level), + output_path_id, + GetCompressionType(ioptions_, vstorage, mutable_cf_options, output_level, + vstorage->base_level()), + std::move(grandparents), /* is manual compaction */ true); + + TEST_SYNC_POINT_CALLBACK("CompactionPicker::CompactRange:Return", compaction); + if (input_level == 0) { + level0_compactions_in_progress_.insert(compaction); + } + + // Creating a compaction influences the compaction score because the score + // takes running compactions into account (by skipping files that are already + // being compacted). Since we just changed compaction score, we recalculate it + // here + vstorage->ComputeCompactionScore(mutable_cf_options); + + return compaction; +} + +#ifndef ROCKSDB_LITE +namespace { +// Test whether two files have overlapping key-ranges. +bool HaveOverlappingKeyRanges(const Comparator* c, const SstFileMetaData& a, + const SstFileMetaData& b) { + if (c->Compare(a.smallestkey, b.smallestkey) >= 0) { + if (c->Compare(a.smallestkey, b.largestkey) <= 0) { + // b.smallestkey <= a.smallestkey <= b.largestkey + return true; + } + } else if (c->Compare(a.largestkey, b.smallestkey) >= 0) { + // a.smallestkey < b.smallestkey <= a.largestkey + return true; + } + if (c->Compare(a.largestkey, b.largestkey) <= 0) { + if (c->Compare(a.largestkey, b.smallestkey) >= 0) { + // b.smallestkey <= a.largestkey <= b.largestkey + return true; + } + } else if (c->Compare(a.smallestkey, b.largestkey) <= 0) { + // a.smallestkey <= b.largestkey < a.largestkey + return true; + } + return false; +} +} // namespace + +Status CompactionPicker::SanitizeCompactionInputFilesForAllLevels( + std::unordered_set* input_files, + const ColumnFamilyMetaData& cf_meta, const int output_level) const { + auto& levels = cf_meta.levels; + auto comparator = icmp_->user_comparator(); + + // TODO(yhchiang): If there is any input files of L1 or up and there + // is at least one L0 files. All L0 files older than the L0 file needs + // to be included. Otherwise, it is a false conditoin + + // TODO(yhchiang): add is_adjustable to CompactionOptions + + // the smallest and largest key of the current compaction input + std::string smallestkey; + std::string largestkey; + // a flag for initializing smallest and largest key + bool is_first = false; + const int kNotFound = -1; + + // For each level, it does the following things: + // 1. Find the first and the last compaction input files + // in the current level. + // 2. Include all files between the first and the last + // compaction input files. + // 3. Update the compaction key-range. + // 4. For all remaining levels, include files that have + // overlapping key-range with the compaction key-range. + for (int l = 0; l <= output_level; ++l) { + auto& current_files = levels[l].files; + int first_included = static_cast(current_files.size()); + int last_included = kNotFound; + + // identify the first and the last compaction input files + // in the current level. + for (size_t f = 0; f < current_files.size(); ++f) { + if (input_files->find(TableFileNameToNumber(current_files[f].name)) != + input_files->end()) { + first_included = std::min(first_included, static_cast(f)); + last_included = std::max(last_included, static_cast(f)); + if (is_first == false) { + smallestkey = current_files[f].smallestkey; + largestkey = current_files[f].largestkey; + is_first = true; + } + } + } + if (last_included == kNotFound) { + continue; + } + + if (l != 0) { + // expend the compaction input of the current level if it + // has overlapping key-range with other non-compaction input + // files in the same level. + while (first_included > 0) { + if (comparator->Compare(current_files[first_included - 1].largestkey, + current_files[first_included].smallestkey) < + 0) { + break; + } + first_included--; + } + + while (last_included < static_cast(current_files.size()) - 1) { + if (comparator->Compare(current_files[last_included + 1].smallestkey, + current_files[last_included].largestkey) > 0) { + break; + } + last_included++; + } + } + + // include all files between the first and the last compaction input files. + for (int f = first_included; f <= last_included; ++f) { + if (current_files[f].being_compacted) { + return Status::Aborted("Necessary compaction input file " + + current_files[f].name + + " is currently being compacted."); + } + input_files->insert(TableFileNameToNumber(current_files[f].name)); + } + + // update smallest and largest key + if (l == 0) { + for (int f = first_included; f <= last_included; ++f) { + if (comparator->Compare(smallestkey, current_files[f].smallestkey) > + 0) { + smallestkey = current_files[f].smallestkey; + } + if (comparator->Compare(largestkey, current_files[f].largestkey) < 0) { + largestkey = current_files[f].largestkey; + } + } + } else { + if (comparator->Compare(smallestkey, + current_files[first_included].smallestkey) > 0) { + smallestkey = current_files[first_included].smallestkey; + } + if (comparator->Compare(largestkey, + current_files[last_included].largestkey) < 0) { + largestkey = current_files[last_included].largestkey; + } + } + + SstFileMetaData aggregated_file_meta; + aggregated_file_meta.smallestkey = smallestkey; + aggregated_file_meta.largestkey = largestkey; + + // For all lower levels, include all overlapping files. + // We need to add overlapping files from the current level too because even + // if there no input_files in level l, we would still need to add files + // which overlap with the range containing the input_files in levels 0 to l + // Level 0 doesn't need to be handled this way because files are sorted by + // time and not by key + for (int m = std::max(l, 1); m <= output_level; ++m) { + for (auto& next_lv_file : levels[m].files) { + if (HaveOverlappingKeyRanges(comparator, aggregated_file_meta, + next_lv_file)) { + if (next_lv_file.being_compacted) { + return Status::Aborted( + "File " + next_lv_file.name + + " that has overlapping key range with one of the compaction " + " input file is currently being compacted."); + } + input_files->insert(TableFileNameToNumber(next_lv_file.name)); + } + } + } + } + return Status::OK(); +} + +Status CompactionPicker::SanitizeCompactionInputFiles( + std::unordered_set* input_files, + const ColumnFamilyMetaData& cf_meta, const int output_level) const { + assert(static_cast(cf_meta.levels.size()) - 1 == + cf_meta.levels[cf_meta.levels.size() - 1].level); + if (output_level >= static_cast(cf_meta.levels.size())) { + return Status::InvalidArgument( + "Output level for column family " + cf_meta.name + + " must between [0, " + + ToString(cf_meta.levels[cf_meta.levels.size() - 1].level) + "]."); + } + + if (output_level > MaxOutputLevel()) { + return Status::InvalidArgument( + "Exceed the maximum output level defined by " + "the current compaction algorithm --- " + + ToString(MaxOutputLevel())); + } + + if (output_level < 0) { + return Status::InvalidArgument("Output level cannot be negative."); + } + + if (input_files->size() == 0) { + return Status::InvalidArgument( + "A compaction must contain at least one file."); + } + + Status s = SanitizeCompactionInputFilesForAllLevels(input_files, cf_meta, + output_level); + + if (!s.ok()) { + return s; + } + + // for all input files, check whether the file number matches + // any currently-existing files. + for (auto file_num : *input_files) { + bool found = false; + for (auto level_meta : cf_meta.levels) { + for (auto file_meta : level_meta.files) { + if (file_num == TableFileNameToNumber(file_meta.name)) { + if (file_meta.being_compacted) { + return Status::Aborted("Specified compaction input file " + + MakeTableFileName("", file_num) + + " is already being compacted."); + } + found = true; + break; + } + } + if (found) { + break; + } + } + if (!found) { + return Status::InvalidArgument( + "Specified compaction input file " + MakeTableFileName("", file_num) + + " does not exist in column family " + cf_meta.name + "."); + } + } + + return Status::OK(); +} +#endif // !ROCKSDB_LITE + +bool LevelCompactionPicker::NeedsCompaction( + const VersionStorageInfo* vstorage) const { + if (!vstorage->FilesMarkedForCompaction().empty()) { + return true; + } + for (int i = 0; i <= vstorage->MaxInputLevel(); i++) { + if (vstorage->CompactionScore(i) >= 1) { + return true; + } + } + return false; +} + +void LevelCompactionPicker::PickFilesMarkedForCompactionExperimental( + const std::string& cf_name, VersionStorageInfo* vstorage, + CompactionInputFiles* inputs, int* level, int* output_level) { + if (vstorage->FilesMarkedForCompaction().empty()) { + return; + } + + auto continuation = [&](std::pair level_file) { + // If it's being compacted it has nothing to do here. + // If this assert() fails that means that some function marked some + // files as being_compacted, but didn't call ComputeCompactionScore() + assert(!level_file.second->being_compacted); + *level = level_file.first; + *output_level = (*level == 0) ? vstorage->base_level() : *level + 1; + + if (*level == 0 && !level0_compactions_in_progress_.empty()) { + return false; + } + + inputs->files = {level_file.second}; + inputs->level = *level; + return ExpandWhileOverlapping(cf_name, vstorage, inputs); + }; + + // take a chance on a random file first + Random64 rnd(/* seed */ reinterpret_cast(vstorage)); + size_t random_file_index = static_cast(rnd.Uniform( + static_cast(vstorage->FilesMarkedForCompaction().size()))); + + if (continuation(vstorage->FilesMarkedForCompaction()[random_file_index])) { + // found the compaction! + return; + } + + for (auto& level_file : vstorage->FilesMarkedForCompaction()) { + if (continuation(level_file)) { + // found the compaction! + return; + } + } + inputs->files.clear(); +} + +Compaction* LevelCompactionPicker::PickCompaction( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, LogBuffer* log_buffer) { + int level = -1; + int output_level = -1; + int parent_index = -1; + int base_index = -1; + CompactionInputFiles inputs; + double score = 0; + CompactionReason compaction_reason = CompactionReason::kUnknown; + + // Find the compactions by size on all levels. + bool skipped_l0 = false; + for (int i = 0; i < NumberLevels() - 1; i++) { + score = vstorage->CompactionScore(i); + level = vstorage->CompactionScoreLevel(i); + assert(i == 0 || score <= vstorage->CompactionScore(i - 1)); + if (score >= 1) { + if (skipped_l0 && level == vstorage->base_level()) { + // If L0->base_level compaction is pending, don't schedule further + // compaction from base level. Otherwise L0->base_level compaction + // may starve. + continue; + } + output_level = (level == 0) ? vstorage->base_level() : level + 1; + if (PickCompactionBySize(vstorage, level, output_level, &inputs, + &parent_index, &base_index) && + ExpandWhileOverlapping(cf_name, vstorage, &inputs)) { + // found the compaction! + if (level == 0) { + // L0 score = `num L0 files` / `level0_file_num_compaction_trigger` + compaction_reason = CompactionReason::kLevelL0FilesNum; + } else { + // L1+ score = `Level files size` / `MaxBytesForLevel` + compaction_reason = CompactionReason::kLevelMaxLevelSize; + } + break; + } else { + // didn't find the compaction, clear the inputs + inputs.clear(); + if (level == 0) { + skipped_l0 = true; + } + } + } + } + + bool is_manual = false; + // if we didn't find a compaction, check if there are any files marked for + // compaction + if (inputs.empty()) { + is_manual = true; + parent_index = base_index = -1; + PickFilesMarkedForCompactionExperimental(cf_name, vstorage, &inputs, &level, + &output_level); + if (!inputs.empty()) { + compaction_reason = CompactionReason::kFilesMarkedForCompaction; + } + } + if (inputs.empty()) { + return nullptr; + } + assert(level >= 0 && output_level >= 0); + + // Two level 0 compaction won't run at the same time, so don't need to worry + // about files on level 0 being compacted. + if (level == 0) { + assert(level0_compactions_in_progress_.empty()); + InternalKey smallest, largest; + GetRange(inputs, &smallest, &largest); + // Note that the next call will discard the file we placed in + // c->inputs_[0] earlier and replace it with an overlapping set + // which will include the picked file. + inputs.files.clear(); + vstorage->GetOverlappingInputs(0, &smallest, &largest, &inputs.files); + + // If we include more L0 files in the same compaction run it can + // cause the 'smallest' and 'largest' key to get extended to a + // larger range. So, re-invoke GetRange to get the new key range + GetRange(inputs, &smallest, &largest); + if (RangeInCompaction(vstorage, &smallest, &largest, output_level, + &parent_index)) { + return nullptr; + } + assert(!inputs.files.empty()); + } + + // Setup input files from output level + CompactionInputFiles output_level_inputs; + output_level_inputs.level = output_level; + if (!SetupOtherInputs(cf_name, mutable_cf_options, vstorage, &inputs, + &output_level_inputs, &parent_index, base_index)) { + return nullptr; + } + + std::vector compaction_inputs({inputs}); + if (!output_level_inputs.empty()) { + compaction_inputs.push_back(output_level_inputs); + } + + std::vector grandparents; + GetGrandparents(vstorage, inputs, output_level_inputs, &grandparents); + auto c = new Compaction( + vstorage, mutable_cf_options, std::move(compaction_inputs), output_level, + mutable_cf_options.MaxFileSizeForLevel(output_level), + mutable_cf_options.MaxGrandParentOverlapBytes(level), + GetPathId(ioptions_, mutable_cf_options, output_level), + GetCompressionType(ioptions_, vstorage, mutable_cf_options, output_level, + vstorage->base_level()), + std::move(grandparents), is_manual, score, + false /* deletion_compaction */, compaction_reason); + + // If it's level 0 compaction, make sure we don't execute any other level 0 + // compactions in parallel + if (level == 0) { + level0_compactions_in_progress_.insert(c); + } + + // Creating a compaction influences the compaction score because the score + // takes running compactions into account (by skipping files that are already + // being compacted). Since we just changed compaction score, we recalculate it + // here + vstorage->ComputeCompactionScore(mutable_cf_options); + + TEST_SYNC_POINT_CALLBACK("LevelCompactionPicker::PickCompaction:Return", c); + + return c; +} + +/* + * Find the optimal path to place a file + * Given a level, finds the path where levels up to it will fit in levels + * up to and including this path + */ +uint32_t LevelCompactionPicker::GetPathId( + const ImmutableCFOptions& ioptions, + const MutableCFOptions& mutable_cf_options, int level) { + uint32_t p = 0; + assert(!ioptions.db_paths.empty()); + + // size remaining in the most recent path + uint64_t current_path_size = ioptions.db_paths[0].target_size; + + uint64_t level_size; + int cur_level = 0; + + level_size = mutable_cf_options.max_bytes_for_level_base; + + // Last path is the fallback + while (p < ioptions.db_paths.size() - 1) { + if (level_size <= current_path_size) { + if (cur_level == level) { + // Does desired level fit in this path? + return p; + } else { + current_path_size -= level_size; + level_size *= mutable_cf_options.max_bytes_for_level_multiplier; + cur_level++; + continue; + } + } + p++; + current_path_size = ioptions.db_paths[p].target_size; + } + return p; +} + +bool LevelCompactionPicker::PickCompactionBySize(VersionStorageInfo* vstorage, + int level, int output_level, + CompactionInputFiles* inputs, + int* parent_index, + int* base_index) { + // level 0 files are overlapping. So we cannot pick more + // than one concurrent compactions at this level. This + // could be made better by looking at key-ranges that are + // being compacted at level 0. + if (level == 0 && !level0_compactions_in_progress_.empty()) { + TEST_SYNC_POINT("LevelCompactionPicker::PickCompactionBySize:0"); + return false; + } + + inputs->clear(); + + assert(level >= 0); + + // Pick the largest file in this level that is not already + // being compacted + const std::vector& file_size = vstorage->FilesByCompactionPri(level); + const std::vector& level_files = vstorage->LevelFiles(level); + + // record the first file that is not yet compacted + int nextIndex = -1; + + for (unsigned int i = vstorage->NextCompactionIndex(level); + i < file_size.size(); i++) { + int index = file_size[i]; + auto* f = level_files[index]; + + // do not pick a file to compact if it is being compacted + // from n-1 level. + if (f->being_compacted) { + continue; + } + + // remember the startIndex for the next call to PickCompaction + if (nextIndex == -1) { + nextIndex = i; + } + + // Do not pick this file if its parents at level+1 are being compacted. + // Maybe we can avoid redoing this work in SetupOtherInputs + *parent_index = -1; + if (RangeInCompaction(vstorage, &f->smallest, &f->largest, output_level, + parent_index)) { + continue; + } + inputs->files.push_back(f); + inputs->level = level; + *base_index = index; + break; + } + + // store where to start the iteration in the next call to PickCompaction + vstorage->SetNextCompactionIndex(level, nextIndex); + + return inputs->size() > 0; +} + +#ifndef ROCKSDB_LITE +bool UniversalCompactionPicker::NeedsCompaction( + const VersionStorageInfo* vstorage) const { + const int kLevel0 = 0; + return vstorage->CompactionScore(kLevel0) >= 1; +} + +void UniversalCompactionPicker::SortedRun::Dump(char* out_buf, + size_t out_buf_size, + bool print_path) const { + if (level == 0) { + assert(file != nullptr); + if (file->fd.GetPathId() == 0 || !print_path) { + snprintf(out_buf, out_buf_size, "file %" PRIu64, file->fd.GetNumber()); + } else { + snprintf(out_buf, out_buf_size, "file %" PRIu64 + "(path " + "%" PRIu32 ")", + file->fd.GetNumber(), file->fd.GetPathId()); + } + } else { + snprintf(out_buf, out_buf_size, "level %d", level); + } +} + +void UniversalCompactionPicker::SortedRun::DumpSizeInfo( + char* out_buf, size_t out_buf_size, size_t sorted_run_count) const { + if (level == 0) { + assert(file != nullptr); + snprintf(out_buf, out_buf_size, + "file %" PRIu64 "[%" ROCKSDB_PRIszt + "] " + "with size %" PRIu64 " (compensated size %" PRIu64 ")", + file->fd.GetNumber(), sorted_run_count, file->fd.GetFileSize(), + file->compensated_file_size); + } else { + snprintf(out_buf, out_buf_size, + "level %d[%" ROCKSDB_PRIszt + "] " + "with size %" PRIu64 " (compensated size %" PRIu64 ")", + level, sorted_run_count, size, compensated_file_size); + } +} + +std::vector +UniversalCompactionPicker::CalculateSortedRuns( + const VersionStorageInfo& vstorage, const ImmutableCFOptions& ioptions) { + std::vector ret; + for (FileMetaData* f : vstorage.LevelFiles(0)) { + ret.emplace_back(0, f, f->fd.GetFileSize(), f->compensated_file_size, + f->being_compacted); + } + for (int level = 1; level < vstorage.num_levels(); level++) { + uint64_t total_compensated_size = 0U; + uint64_t total_size = 0U; + bool being_compacted = false; + bool is_first = true; + for (FileMetaData* f : vstorage.LevelFiles(level)) { + total_compensated_size += f->compensated_file_size; + total_size += f->fd.GetFileSize(); + if (ioptions.compaction_options_universal.allow_trivial_move == true) { + if (f->being_compacted) { + being_compacted = f->being_compacted; + } + } else { + // Compaction always includes all files for a non-zero level, so for a + // non-zero level, all the files should share the same being_compacted + // value. + // This assumption is only valid when + // ioptions.compaction_options_universal.allow_trivial_move is false + assert(is_first || f->being_compacted == being_compacted); + } + if (is_first) { + being_compacted = f->being_compacted; + is_first = false; + } + } + if (total_compensated_size > 0) { + ret.emplace_back(level, nullptr, total_size, total_compensated_size, + being_compacted); + } + } + return ret; +} + +#ifndef NDEBUG +namespace { +// smallest_seqno and largest_seqno are set iff. `files` is not empty. +void GetSmallestLargestSeqno(const std::vector& files, + SequenceNumber* smallest_seqno, + SequenceNumber* largest_seqno) { + bool is_first = true; + for (FileMetaData* f : files) { + assert(f->smallest_seqno <= f->largest_seqno); + if (is_first) { + is_first = false; + *smallest_seqno = f->smallest_seqno; + *largest_seqno = f->largest_seqno; + } else { + if (f->smallest_seqno < *smallest_seqno) { + *smallest_seqno = f->smallest_seqno; + } + if (f->largest_seqno > *largest_seqno) { + *largest_seqno = f->largest_seqno; + } + } + } +} +} // namespace +#endif + +// Algorithm that checks to see if there are any overlapping +// files in the input +bool CompactionPicker::IsInputNonOverlapping(Compaction* c) { + auto comparator = icmp_->user_comparator(); + int first_iter = 1; + + InputFileInfo prev, curr, next; + + SmallestKeyHeap smallest_key_priority_q = + create_level_heap(c, icmp_->user_comparator()); + + while (!smallest_key_priority_q.empty()) { + curr = smallest_key_priority_q.top(); + smallest_key_priority_q.pop(); + + if (first_iter) { + prev = curr; + first_iter = 0; + } else { + if (comparator->Compare(prev.f->largest.user_key(), + curr.f->smallest.user_key()) >= 0) { + // found overlapping files, return false + return false; + } + assert(comparator->Compare(curr.f->largest.user_key(), + prev.f->largest.user_key()) > 0); + prev = curr; + } + + next.f = nullptr; + + if (curr.level != 0 && curr.index < c->num_input_files(curr.level) - 1) { + next.f = c->input(curr.level, curr.index + 1); + next.level = curr.level; + next.index = curr.index + 1; + } + + if (next.f) { + smallest_key_priority_q.push(std::move(next)); + } + } + return true; +} + +// Universal style of compaction. Pick files that are contiguous in +// time-range to compact. +// +Compaction* UniversalCompactionPicker::PickCompaction( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, LogBuffer* log_buffer) { + const int kLevel0 = 0; + double score = vstorage->CompactionScore(kLevel0); + std::vector sorted_runs = + CalculateSortedRuns(*vstorage, ioptions_); + + if (sorted_runs.size() == 0 || + sorted_runs.size() < + (unsigned int)mutable_cf_options.level0_file_num_compaction_trigger) { + LogToBuffer(log_buffer, "[%s] Universal: nothing to do\n", cf_name.c_str()); + return nullptr; + } + VersionStorageInfo::LevelSummaryStorage tmp; + LogToBuffer(log_buffer, 3072, + "[%s] Universal: sorted runs files(%" ROCKSDB_PRIszt "): %s\n", + cf_name.c_str(), sorted_runs.size(), + vstorage->LevelSummary(&tmp)); + + // Check for size amplification first. + Compaction* c; + if ((c = PickCompactionUniversalSizeAmp(cf_name, mutable_cf_options, vstorage, + score, sorted_runs, log_buffer)) != + nullptr) { + LogToBuffer(log_buffer, "[%s] Universal: compacting for size amp\n", + cf_name.c_str()); + } else { + // Size amplification is within limits. Try reducing read + // amplification while maintaining file size ratios. + unsigned int ratio = ioptions_.compaction_options_universal.size_ratio; + + if ((c = PickCompactionUniversalReadAmp( + cf_name, mutable_cf_options, vstorage, score, ratio, UINT_MAX, + sorted_runs, log_buffer)) != nullptr) { + LogToBuffer(log_buffer, "[%s] Universal: compacting for size ratio\n", + cf_name.c_str()); + } else { + // Size amplification and file size ratios are within configured limits. + // If max read amplification is exceeding configured limits, then force + // compaction without looking at filesize ratios and try to reduce + // the number of files to fewer than level0_file_num_compaction_trigger. + // This is guaranteed by NeedsCompaction() + assert(sorted_runs.size() >= + static_cast( + mutable_cf_options.level0_file_num_compaction_trigger)); + unsigned int num_files = + static_cast(sorted_runs.size()) - + mutable_cf_options.level0_file_num_compaction_trigger; + if ((c = PickCompactionUniversalReadAmp( + cf_name, mutable_cf_options, vstorage, score, UINT_MAX, + num_files, sorted_runs, log_buffer)) != nullptr) { + LogToBuffer(log_buffer, + "[%s] Universal: compacting for file num -- %u\n", + cf_name.c_str(), num_files); + } + } + } + if (c == nullptr) { + return nullptr; + } + + if (ioptions_.compaction_options_universal.allow_trivial_move == true) { + c->set_is_trivial_move(IsInputNonOverlapping(c)); + } + +// validate that all the chosen files of L0 are non overlapping in time +#ifndef NDEBUG + SequenceNumber prev_smallest_seqno = 0U; + bool is_first = true; + + size_t level_index = 0U; + if (c->start_level() == 0) { + for (auto f : *c->inputs(0)) { + assert(f->smallest_seqno <= f->largest_seqno); + if (is_first) { + is_first = false; + } else { + assert(prev_smallest_seqno > f->largest_seqno); + } + prev_smallest_seqno = f->smallest_seqno; + } + level_index = 1U; + } + for (; level_index < c->num_input_levels(); level_index++) { + if (c->num_input_files(level_index) != 0) { + SequenceNumber smallest_seqno = 0U; + SequenceNumber largest_seqno = 0U; + GetSmallestLargestSeqno(*(c->inputs(level_index)), &smallest_seqno, + &largest_seqno); + if (is_first) { + is_first = false; + } else if (prev_smallest_seqno > 0) { + // A level is considered as the bottommost level if there are + // no files in higher levels or if files in higher levels do + // not overlap with the files being compacted. Sequence numbers + // of files in bottommost level can be set to 0 to help + // compression. As a result, the following assert may not hold + // if the prev_smallest_seqno is 0. + assert(prev_smallest_seqno > largest_seqno); + } + prev_smallest_seqno = smallest_seqno; + } + } +#endif + // update statistics + MeasureTime(ioptions_.statistics, NUM_FILES_IN_SINGLE_COMPACTION, + c->inputs(0)->size()); + + level0_compactions_in_progress_.insert(c); + + return c; +} + +uint32_t UniversalCompactionPicker::GetPathId( + const ImmutableCFOptions& ioptions, uint64_t file_size) { + // Two conditions need to be satisfied: + // (1) the target path needs to be able to hold the file's size + // (2) Total size left in this and previous paths need to be not + // smaller than expected future file size before this new file is + // compacted, which is estimated based on size_ratio. + // For example, if now we are compacting files of size (1, 1, 2, 4, 8), + // we will make sure the target file, probably with size of 16, will be + // placed in a path so that eventually when new files are generated and + // compacted to (1, 1, 2, 4, 8, 16), all those files can be stored in or + // before the path we chose. + // + // TODO(sdong): now the case of multiple column families is not + // considered in this algorithm. So the target size can be violated in + // that case. We need to improve it. + uint64_t accumulated_size = 0; + uint64_t future_size = + file_size * (100 - ioptions.compaction_options_universal.size_ratio) / + 100; + uint32_t p = 0; + assert(!ioptions.db_paths.empty()); + for (; p < ioptions.db_paths.size() - 1; p++) { + uint64_t target_size = ioptions.db_paths[p].target_size; + if (target_size > file_size && + accumulated_size + (target_size - file_size) > future_size) { + return p; + } + accumulated_size += target_size; + } + return p; +} + +// +// Consider compaction files based on their size differences with +// the next file in time order. +// +Compaction* UniversalCompactionPicker::PickCompactionUniversalReadAmp( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, double score, unsigned int ratio, + unsigned int max_number_of_files_to_compact, + const std::vector& sorted_runs, LogBuffer* log_buffer) { + unsigned int min_merge_width = + ioptions_.compaction_options_universal.min_merge_width; + unsigned int max_merge_width = + ioptions_.compaction_options_universal.max_merge_width; + + const SortedRun* sr = nullptr; + bool done = false; + size_t start_index = 0; + unsigned int candidate_count = 0; + + unsigned int max_files_to_compact = + std::min(max_merge_width, max_number_of_files_to_compact); + min_merge_width = std::max(min_merge_width, 2U); + + // Caller checks the size before executing this function. This invariant is + // important because otherwise we may have a possible integer underflow when + // dealing with unsigned types. + assert(sorted_runs.size() > 0); + + // Considers a candidate file only if it is smaller than the + // total size accumulated so far. + for (size_t loop = 0; loop < sorted_runs.size(); loop++) { + candidate_count = 0; + + // Skip files that are already being compacted + for (sr = nullptr; loop < sorted_runs.size(); loop++) { + sr = &sorted_runs[loop]; + + if (!sr->being_compacted) { + candidate_count = 1; + break; + } + char file_num_buf[kFormatFileNumberBufSize]; + sr->Dump(file_num_buf, sizeof(file_num_buf)); + LogToBuffer(log_buffer, + "[%s] Universal: %s" + "[%d] being compacted, skipping", + cf_name.c_str(), file_num_buf, loop); + + sr = nullptr; + } + + // This file is not being compacted. Consider it as the + // first candidate to be compacted. + uint64_t candidate_size = sr != nullptr ? sr->compensated_file_size : 0; + if (sr != nullptr) { + char file_num_buf[kFormatFileNumberBufSize]; + sr->Dump(file_num_buf, sizeof(file_num_buf), true); + LogToBuffer(log_buffer, "[%s] Universal: Possible candidate %s[%d].", + cf_name.c_str(), file_num_buf, loop); + } + + // Check if the succeeding files need compaction. + for (size_t i = loop + 1; + candidate_count < max_files_to_compact && i < sorted_runs.size(); + i++) { + const SortedRun* succeeding_sr = &sorted_runs[i]; + if (succeeding_sr->being_compacted) { + break; + } + // Pick files if the total/last candidate file size (increased by the + // specified ratio) is still larger than the next candidate file. + // candidate_size is the total size of files picked so far with the + // default kCompactionStopStyleTotalSize; with + // kCompactionStopStyleSimilarSize, it's simply the size of the last + // picked file. + double sz = candidate_size * (100.0 + ratio) / 100.0; + if (sz < static_cast(succeeding_sr->size)) { + break; + } + if (ioptions_.compaction_options_universal.stop_style == + kCompactionStopStyleSimilarSize) { + // Similar-size stopping rule: also check the last picked file isn't + // far larger than the next candidate file. + sz = (succeeding_sr->size * (100.0 + ratio)) / 100.0; + if (sz < static_cast(candidate_size)) { + // If the small file we've encountered begins a run of similar-size + // files, we'll pick them up on a future iteration of the outer + // loop. If it's some lonely straggler, it'll eventually get picked + // by the last-resort read amp strategy which disregards size ratios. + break; + } + candidate_size = succeeding_sr->compensated_file_size; + } else { // default kCompactionStopStyleTotalSize + candidate_size += succeeding_sr->compensated_file_size; + } + candidate_count++; + } + + // Found a series of consecutive files that need compaction. + if (candidate_count >= (unsigned int)min_merge_width) { + start_index = loop; + done = true; + break; + } else { + for (size_t i = loop; + i < loop + candidate_count && i < sorted_runs.size(); i++) { + const SortedRun* skipping_sr = &sorted_runs[i]; + char file_num_buf[256]; + skipping_sr->DumpSizeInfo(file_num_buf, sizeof(file_num_buf), loop); + LogToBuffer(log_buffer, "[%s] Universal: Skipping %s", cf_name.c_str(), + file_num_buf); + } + } + } + if (!done || candidate_count <= 1) { + return nullptr; + } + size_t first_index_after = start_index + candidate_count; + // Compression is enabled if files compacted earlier already reached + // size ratio of compression. + bool enable_compression = true; + int ratio_to_compress = + ioptions_.compaction_options_universal.compression_size_percent; + if (ratio_to_compress >= 0) { + uint64_t total_size = 0; + for (auto& sorted_run : sorted_runs) { + total_size += sorted_run.compensated_file_size; + } + + uint64_t older_file_size = 0; + for (size_t i = sorted_runs.size() - 1; i >= first_index_after; i--) { + older_file_size += sorted_runs[i].size; + if (older_file_size * 100L >= total_size * (long)ratio_to_compress) { + enable_compression = false; + break; + } + } + } + + uint64_t estimated_total_size = 0; + for (unsigned int i = 0; i < first_index_after; i++) { + estimated_total_size += sorted_runs[i].size; + } + uint32_t path_id = GetPathId(ioptions_, estimated_total_size); + int start_level = sorted_runs[start_index].level; + int output_level; + if (first_index_after == sorted_runs.size()) { + output_level = vstorage->num_levels() - 1; + } else if (sorted_runs[first_index_after].level == 0) { + output_level = 0; + } else { + output_level = sorted_runs[first_index_after].level - 1; + } + + std::vector inputs(vstorage->num_levels()); + for (size_t i = 0; i < inputs.size(); ++i) { + inputs[i].level = start_level + static_cast(i); + } + for (size_t i = start_index; i < first_index_after; i++) { + auto& picking_sr = sorted_runs[i]; + if (picking_sr.level == 0) { + FileMetaData* picking_file = picking_sr.file; + inputs[0].files.push_back(picking_file); + } else { + auto& files = inputs[picking_sr.level - start_level].files; + for (auto* f : vstorage->LevelFiles(picking_sr.level)) { + files.push_back(f); + } + } + char file_num_buf[256]; + picking_sr.DumpSizeInfo(file_num_buf, sizeof(file_num_buf), i); + LogToBuffer(log_buffer, "[%s] Universal: Picking %s", cf_name.c_str(), + file_num_buf); + } + + CompactionReason compaction_reason; + if (max_number_of_files_to_compact == UINT_MAX) { + compaction_reason = CompactionReason::kUniversalSortedRunNum; + } else { + compaction_reason = CompactionReason::kUniversalSizeRatio; + } + return new Compaction( + vstorage, mutable_cf_options, std::move(inputs), output_level, + mutable_cf_options.MaxFileSizeForLevel(output_level), LLONG_MAX, path_id, + GetCompressionType(ioptions_, vstorage, mutable_cf_options, start_level, + 1, enable_compression), + /* grandparents */ {}, /* is manual */ false, score, + false /* deletion_compaction */, compaction_reason); +} + +// Look at overall size amplification. If size amplification +// exceeeds the configured value, then do a compaction +// of the candidate files all the way upto the earliest +// base file (overrides configured values of file-size ratios, +// min_merge_width and max_merge_width). +// +Compaction* UniversalCompactionPicker::PickCompactionUniversalSizeAmp( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, double score, + const std::vector& sorted_runs, LogBuffer* log_buffer) { + // percentage flexibilty while reducing size amplification + uint64_t ratio = + ioptions_.compaction_options_universal.max_size_amplification_percent; + + unsigned int candidate_count = 0; + uint64_t candidate_size = 0; + size_t start_index = 0; + const SortedRun* sr = nullptr; + + // Skip files that are already being compacted + for (size_t loop = 0; loop < sorted_runs.size() - 1; loop++) { + sr = &sorted_runs[loop]; + if (!sr->being_compacted) { + start_index = loop; // Consider this as the first candidate. + break; + } + char file_num_buf[kFormatFileNumberBufSize]; + sr->Dump(file_num_buf, sizeof(file_num_buf), true); + LogToBuffer(log_buffer, "[%s] Universal: skipping %s[%d] compacted %s", + cf_name.c_str(), file_num_buf, loop, + " cannot be a candidate to reduce size amp.\n"); + sr = nullptr; + } + + if (sr == nullptr) { + return nullptr; // no candidate files + } + { + char file_num_buf[kFormatFileNumberBufSize]; + sr->Dump(file_num_buf, sizeof(file_num_buf), true); + LogToBuffer(log_buffer, + "[%s] Universal: First candidate %s[%" ROCKSDB_PRIszt "] %s", + cf_name.c_str(), file_num_buf, start_index, + " to reduce size amp.\n"); + } + + // keep adding up all the remaining files + for (size_t loop = start_index; loop < sorted_runs.size() - 1; loop++) { + sr = &sorted_runs[loop]; + if (sr->being_compacted) { + char file_num_buf[kFormatFileNumberBufSize]; + sr->Dump(file_num_buf, sizeof(file_num_buf), true); + LogToBuffer( + log_buffer, "[%s] Universal: Possible candidate %s[%d] %s", + cf_name.c_str(), file_num_buf, start_index, + " is already being compacted. No size amp reduction possible.\n"); + return nullptr; + } + candidate_size += sr->compensated_file_size; + candidate_count++; + } + if (candidate_count == 0) { + return nullptr; + } + + // size of earliest file + uint64_t earliest_file_size = sorted_runs.back().size; + + // size amplification = percentage of additional size + if (candidate_size * 100 < ratio * earliest_file_size) { + LogToBuffer( + log_buffer, + "[%s] Universal: size amp not needed. newer-files-total-size %" PRIu64 + " earliest-file-size %" PRIu64, + cf_name.c_str(), candidate_size, earliest_file_size); + return nullptr; + } else { + LogToBuffer( + log_buffer, + "[%s] Universal: size amp needed. newer-files-total-size %" PRIu64 + " earliest-file-size %" PRIu64, + cf_name.c_str(), candidate_size, earliest_file_size); + } + assert(start_index < sorted_runs.size() - 1); + + // Estimate total file size + uint64_t estimated_total_size = 0; + for (size_t loop = start_index; loop < sorted_runs.size(); loop++) { + estimated_total_size += sorted_runs[loop].size; + } + uint32_t path_id = GetPathId(ioptions_, estimated_total_size); + int start_level = sorted_runs[start_index].level; + + std::vector inputs(vstorage->num_levels()); + for (size_t i = 0; i < inputs.size(); ++i) { + inputs[i].level = start_level + static_cast(i); + } + // We always compact all the files, so always compress. + for (size_t loop = start_index; loop < sorted_runs.size(); loop++) { + auto& picking_sr = sorted_runs[loop]; + if (picking_sr.level == 0) { + FileMetaData* f = picking_sr.file; + inputs[0].files.push_back(f); + } else { + auto& files = inputs[picking_sr.level - start_level].files; + for (auto* f : vstorage->LevelFiles(picking_sr.level)) { + files.push_back(f); + } + } + char file_num_buf[256]; + picking_sr.DumpSizeInfo(file_num_buf, sizeof(file_num_buf), loop); + LogToBuffer(log_buffer, "[%s] Universal: size amp picking %s", + cf_name.c_str(), file_num_buf); + } + + return new Compaction( + vstorage, mutable_cf_options, std::move(inputs), + vstorage->num_levels() - 1, + mutable_cf_options.MaxFileSizeForLevel(vstorage->num_levels() - 1), + /* max_grandparent_overlap_bytes */ LLONG_MAX, path_id, + GetCompressionType(ioptions_, vstorage, mutable_cf_options, + vstorage->num_levels() - 1, 1), + /* grandparents */ {}, /* is manual */ false, score, + false /* deletion_compaction */, + CompactionReason::kUniversalSizeAmplification); +} + +bool FIFOCompactionPicker::NeedsCompaction( + const VersionStorageInfo* vstorage) const { + const int kLevel0 = 0; + return vstorage->CompactionScore(kLevel0) >= 1; +} + +Compaction* FIFOCompactionPicker::PickCompaction( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, LogBuffer* log_buffer) { + assert(vstorage->num_levels() == 1); + const int kLevel0 = 0; + const std::vector& level_files = vstorage->LevelFiles(kLevel0); + uint64_t total_size = 0; + for (const auto& file : level_files) { + total_size += file->fd.file_size; + } + + if (total_size <= ioptions_.compaction_options_fifo.max_table_files_size || + level_files.size() == 0) { + // total size not exceeded + LogToBuffer(log_buffer, + "[%s] FIFO compaction: nothing to do. Total size %" PRIu64 + ", max size %" PRIu64 "\n", + cf_name.c_str(), total_size, + ioptions_.compaction_options_fifo.max_table_files_size); + return nullptr; + } + + if (!level0_compactions_in_progress_.empty()) { + LogToBuffer(log_buffer, + "[%s] FIFO compaction: Already executing compaction. No need " + "to run parallel compactions since compactions are very fast", + cf_name.c_str()); + return nullptr; + } + + std::vector inputs; + inputs.emplace_back(); + inputs[0].level = 0; + // delete old files (FIFO) + for (auto ritr = level_files.rbegin(); ritr != level_files.rend(); ++ritr) { + auto f = *ritr; + total_size -= f->compensated_file_size; + inputs[0].files.push_back(f); + char tmp_fsize[16]; + AppendHumanBytes(f->fd.GetFileSize(), tmp_fsize, sizeof(tmp_fsize)); + LogToBuffer(log_buffer, "[%s] FIFO compaction: picking file %" PRIu64 + " with size %s for deletion", + cf_name.c_str(), f->fd.GetNumber(), tmp_fsize); + if (total_size <= ioptions_.compaction_options_fifo.max_table_files_size) { + break; + } + } + Compaction* c = new Compaction( + vstorage, mutable_cf_options, std::move(inputs), 0, 0, 0, 0, + kNoCompression, {}, /* is manual */ false, vstorage->CompactionScore(0), + /* is deletion compaction */ true, CompactionReason::kFIFOMaxSize); + level0_compactions_in_progress_.insert(c); + return c; +} + +Compaction* FIFOCompactionPicker::CompactRange( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, int input_level, int output_level, + uint32_t output_path_id, const InternalKey* begin, const InternalKey* end, + InternalKey** compaction_end, bool* manual_conflict) { + assert(input_level == 0); + assert(output_level == 0); + *compaction_end = nullptr; + LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, ioptions_.info_log); + Compaction* c = + PickCompaction(cf_name, mutable_cf_options, vstorage, &log_buffer); + log_buffer.FlushBufferToLog(); + return c; +} + +#endif // !ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/db/compaction_picker.h b/external/rocksdb/db/compaction_picker.h new file mode 100755 index 0000000000..fca7319598 --- /dev/null +++ b/external/rocksdb/db/compaction_picker.h @@ -0,0 +1,352 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include +#include +#include +#include +#include + +#include "db/compaction.h" +#include "db/version_set.h" +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "rocksdb/status.h" +#include "util/mutable_cf_options.h" + + +namespace rocksdb { + +class LogBuffer; +class Compaction; +class VersionStorageInfo; +struct CompactionInputFiles; + +class CompactionPicker { + public: + CompactionPicker(const ImmutableCFOptions& ioptions, + const InternalKeyComparator* icmp); + virtual ~CompactionPicker(); + + // Pick level and inputs for a new compaction. + // Returns nullptr if there is no compaction to be done. + // Otherwise returns a pointer to a heap-allocated object that + // describes the compaction. Caller should delete the result. + virtual Compaction* PickCompaction(const std::string& cf_name, + const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, + LogBuffer* log_buffer) = 0; + + // Return a compaction object for compacting the range [begin,end] in + // the specified level. Returns nullptr if there is nothing in that + // level that overlaps the specified range. Caller should delete + // the result. + // + // The returned Compaction might not include the whole requested range. + // In that case, compaction_end will be set to the next key that needs + // compacting. In case the compaction will compact the whole range, + // compaction_end will be set to nullptr. + // Client is responsible for compaction_end storage -- when called, + // *compaction_end should point to valid InternalKey! + virtual Compaction* CompactRange( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, int input_level, int output_level, + uint32_t output_path_id, const InternalKey* begin, const InternalKey* end, + InternalKey** compaction_end, bool* manual_conflict); + + // The maximum allowed output level. Default value is NumberLevels() - 1. + virtual int MaxOutputLevel() const { return NumberLevels() - 1; } + + virtual bool NeedsCompaction(const VersionStorageInfo* vstorage) const = 0; + +// Sanitize the input set of compaction input files. +// When the input parameters do not describe a valid compaction, the +// function will try to fix the input_files by adding necessary +// files. If it's not possible to conver an invalid input_files +// into a valid one by adding more files, the function will return a +// non-ok status with specific reason. +#ifndef ROCKSDB_LITE + Status SanitizeCompactionInputFiles(std::unordered_set* input_files, + const ColumnFamilyMetaData& cf_meta, + const int output_level) const; +#endif // ROCKSDB_LITE + + // Free up the files that participated in a compaction + // + // Requirement: DB mutex held + void ReleaseCompactionFiles(Compaction* c, Status status); + + // Returns true if any one of the specified files are being compacted + bool FilesInCompaction(const std::vector& files); + + // Takes a list of CompactionInputFiles and returns a (manual) Compaction + // object. + Compaction* FormCompaction( + const CompactionOptions& compact_options, + const std::vector& input_files, int output_level, + VersionStorageInfo* vstorage, const MutableCFOptions& mutable_cf_options, + uint32_t output_path_id); + + // Converts a set of compaction input file numbers into + // a list of CompactionInputFiles. + Status GetCompactionInputsFromFileNumbers( + std::vector* input_files, + std::unordered_set* input_set, + const VersionStorageInfo* vstorage, + const CompactionOptions& compact_options) const; + + // Used in universal compaction when the enabled_trivial_move + // option is set. Checks whether there are any overlapping files + // in the input. Returns true if the input files are non + // overlapping. + bool IsInputNonOverlapping(Compaction* c); + + // Is there currently a compaction involving level 0 taking place + bool IsLevel0CompactionInProgress() const { + return !level0_compactions_in_progress_.empty(); + } + + protected: + int NumberLevels() const { return ioptions_.num_levels; } + + // Stores the minimal range that covers all entries in inputs in + // *smallest, *largest. + // REQUIRES: inputs is not empty + void GetRange(const CompactionInputFiles& inputs, InternalKey* smallest, + InternalKey* largest); + + // Stores the minimal range that covers all entries in inputs1 and inputs2 + // in *smallest, *largest. + // REQUIRES: inputs is not empty + void GetRange(const CompactionInputFiles& inputs1, + const CompactionInputFiles& inputs2, InternalKey* smallest, + InternalKey* largest); + + // Add more files to the inputs on "level" to make sure that + // no newer version of a key is compacted to "level+1" while leaving an older + // version in a "level". Otherwise, any Get() will search "level" first, + // and will likely return an old/stale value for the key, since it always + // searches in increasing order of level to find the value. This could + // also scramble the order of merge operands. This function should be + // called any time a new Compaction is created, and its inputs_[0] are + // populated. + // + // Will return false if it is impossible to apply this compaction. + bool ExpandWhileOverlapping(const std::string& cf_name, + VersionStorageInfo* vstorage, + CompactionInputFiles* inputs); + + // Returns true if any one of the parent files are being compacted + bool RangeInCompaction(VersionStorageInfo* vstorage, + const InternalKey* smallest, + const InternalKey* largest, int level, int* index); + + bool SetupOtherInputs(const std::string& cf_name, + const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, + CompactionInputFiles* inputs, + CompactionInputFiles* output_level_inputs, + int* parent_index, int base_index); + + void GetGrandparents(VersionStorageInfo* vstorage, + const CompactionInputFiles& inputs, + const CompactionInputFiles& output_level_inputs, + std::vector* grandparents); + + const ImmutableCFOptions& ioptions_; + +// A helper function to SanitizeCompactionInputFiles() that +// sanitizes "input_files" by adding necessary files. +#ifndef ROCKSDB_LITE + virtual Status SanitizeCompactionInputFilesForAllLevels( + std::unordered_set* input_files, + const ColumnFamilyMetaData& cf_meta, const int output_level) const; +#endif // ROCKSDB_LITE + + // Keeps track of all compactions that are running on Level0. + // It is protected by DB mutex + std::set level0_compactions_in_progress_; + + const InternalKeyComparator* const icmp_; +}; + +class LevelCompactionPicker : public CompactionPicker { + public: + LevelCompactionPicker(const ImmutableCFOptions& ioptions, + const InternalKeyComparator* icmp) + : CompactionPicker(ioptions, icmp) {} + virtual Compaction* PickCompaction(const std::string& cf_name, + const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, + LogBuffer* log_buffer) override; + + virtual bool NeedsCompaction( + const VersionStorageInfo* vstorage) const override; + + // Pick a path ID to place a newly generated file, with its level + static uint32_t GetPathId(const ImmutableCFOptions& ioptions, + const MutableCFOptions& mutable_cf_options, + int level); + + private: + // For the specfied level, pick a file that we want to compact. + // Returns false if there is no file to compact. + // If it returns true, inputs->files.size() will be exactly one. + // If level is 0 and there is already a compaction on that level, this + // function will return false. + bool PickCompactionBySize(VersionStorageInfo* vstorage, int level, + int output_level, CompactionInputFiles* inputs, + int* parent_index, int* base_index); + + // If there is any file marked for compaction, put put it into inputs. + // This is still experimental. It will return meaningful results only if + // clients call experimental feature SuggestCompactRange() + void PickFilesMarkedForCompactionExperimental(const std::string& cf_name, + VersionStorageInfo* vstorage, + CompactionInputFiles* inputs, + int* level, int* output_level); +}; + +#ifndef ROCKSDB_LITE +class UniversalCompactionPicker : public CompactionPicker { + public: + UniversalCompactionPicker(const ImmutableCFOptions& ioptions, + const InternalKeyComparator* icmp) + : CompactionPicker(ioptions, icmp) {} + virtual Compaction* PickCompaction(const std::string& cf_name, + const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, + LogBuffer* log_buffer) override; + + virtual int MaxOutputLevel() const override { return NumberLevels() - 1; } + + virtual bool NeedsCompaction( + const VersionStorageInfo* vstorage) const override; + + private: + struct SortedRun { + SortedRun(int _level, FileMetaData* _file, uint64_t _size, + uint64_t _compensated_file_size, bool _being_compacted) + : level(_level), + file(_file), + size(_size), + compensated_file_size(_compensated_file_size), + being_compacted(_being_compacted) { + assert(compensated_file_size > 0); + assert(level != 0 || file != nullptr); + } + + void Dump(char* out_buf, size_t out_buf_size, + bool print_path = false) const; + + // sorted_run_count is added into the string to print + void DumpSizeInfo(char* out_buf, size_t out_buf_size, + size_t sorted_run_count) const; + + int level; + // `file` Will be null for level > 0. For level = 0, the sorted run is + // for this file. + FileMetaData* file; + // For level > 0, `size` and `compensated_file_size` are sum of sizes all + // files in the level. `being_compacted` should be the same for all files + // in a non-zero level. Use the value here. + uint64_t size; + uint64_t compensated_file_size; + bool being_compacted; + }; + + // Pick Universal compaction to limit read amplification + Compaction* PickCompactionUniversalReadAmp( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, double score, unsigned int ratio, + unsigned int num_files, const std::vector& sorted_runs, + LogBuffer* log_buffer); + + // Pick Universal compaction to limit space amplification. + Compaction* PickCompactionUniversalSizeAmp( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, double score, + const std::vector& sorted_runs, LogBuffer* log_buffer); + + static std::vector CalculateSortedRuns( + const VersionStorageInfo& vstorage, const ImmutableCFOptions& ioptions); + + // Pick a path ID to place a newly generated file, with its estimated file + // size. + static uint32_t GetPathId(const ImmutableCFOptions& ioptions, + uint64_t file_size); +}; + +class FIFOCompactionPicker : public CompactionPicker { + public: + FIFOCompactionPicker(const ImmutableCFOptions& ioptions, + const InternalKeyComparator* icmp) + : CompactionPicker(ioptions, icmp) {} + + virtual Compaction* PickCompaction(const std::string& cf_name, + const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* version, + LogBuffer* log_buffer) override; + + virtual Compaction* CompactRange( + const std::string& cf_name, const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, int input_level, int output_level, + uint32_t output_path_id, const InternalKey* begin, const InternalKey* end, + InternalKey** compaction_end, bool* manual_conflict) override; + + // The maximum allowed output level. Always returns 0. + virtual int MaxOutputLevel() const override { return 0; } + + virtual bool NeedsCompaction( + const VersionStorageInfo* vstorage) const override; +}; + +class NullCompactionPicker : public CompactionPicker { + public: + NullCompactionPicker(const ImmutableCFOptions& ioptions, + const InternalKeyComparator* icmp) + : CompactionPicker(ioptions, icmp) {} + virtual ~NullCompactionPicker() {} + + // Always return "nullptr" + Compaction* PickCompaction(const std::string& cf_name, + const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, + LogBuffer* log_buffer) override { + return nullptr; + } + + // Always return "nullptr" + Compaction* CompactRange(const std::string& cf_name, + const MutableCFOptions& mutable_cf_options, + VersionStorageInfo* vstorage, int input_level, + int output_level, uint32_t output_path_id, + const InternalKey* begin, const InternalKey* end, + InternalKey** compaction_end, + bool* manual_conflict) override { + return nullptr; + } + + // Always returns false. + virtual bool NeedsCompaction( + const VersionStorageInfo* vstorage) const override { + return false; + } +}; +#endif // !ROCKSDB_LITE + +CompressionType GetCompressionType(const ImmutableCFOptions& ioptions, + const VersionStorageInfo* vstorage, + const MutableCFOptions& mutable_cf_options, + int level, int base_level, + const bool enable_compression = true); + +} // namespace rocksdb diff --git a/external/rocksdb/db/compaction_picker_test.cc b/external/rocksdb/db/compaction_picker_test.cc new file mode 100755 index 0000000000..2d3265421a --- /dev/null +++ b/external/rocksdb/db/compaction_picker_test.cc @@ -0,0 +1,1069 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/compaction.h" +#include "db/compaction_picker.h" +#include +#include +#include + +#include "util/logging.h" +#include "util/string_util.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class CountingLogger : public Logger { + public: + using Logger::Logv; + virtual void Logv(const char* format, va_list ap) override { log_count++; } + size_t log_count; +}; + +class CompactionPickerTest : public testing::Test { + public: + const Comparator* ucmp_; + InternalKeyComparator icmp_; + Options options_; + ImmutableCFOptions ioptions_; + MutableCFOptions mutable_cf_options_; + LevelCompactionPicker level_compaction_picker; + std::string cf_name_; + CountingLogger logger_; + LogBuffer log_buffer_; + uint32_t file_num_; + CompactionOptionsFIFO fifo_options_; + std::unique_ptr vstorage_; + std::vector> files_; + // does not own FileMetaData + std::unordered_map> file_map_; + // input files to compaction process. + std::vector input_files_; + int compaction_level_start_; + + CompactionPickerTest() + : ucmp_(BytewiseComparator()), + icmp_(ucmp_), + ioptions_(options_), + mutable_cf_options_(options_, ioptions_), + level_compaction_picker(ioptions_, &icmp_), + cf_name_("dummy"), + log_buffer_(InfoLogLevel::INFO_LEVEL, &logger_), + file_num_(1), + vstorage_(nullptr) { + fifo_options_.max_table_files_size = 1; + mutable_cf_options_.RefreshDerivedOptions(ioptions_); + ioptions_.db_paths.emplace_back("dummy", + std::numeric_limits::max()); + } + + ~CompactionPickerTest() { + } + + void NewVersionStorage(int num_levels, CompactionStyle style) { + DeleteVersionStorage(); + options_.num_levels = num_levels; + vstorage_.reset(new VersionStorageInfo( + &icmp_, ucmp_, options_.num_levels, style, nullptr)); + vstorage_->CalculateBaseBytes(ioptions_, mutable_cf_options_); + } + + void DeleteVersionStorage() { + vstorage_.reset(); + files_.clear(); + file_map_.clear(); + input_files_.clear(); + } + + void Add(int level, uint32_t file_number, const char* smallest, + const char* largest, uint64_t file_size = 1, uint32_t path_id = 0, + SequenceNumber smallest_seq = 100, + SequenceNumber largest_seq = 100) { + assert(level < vstorage_->num_levels()); + FileMetaData* f = new FileMetaData; + f->fd = FileDescriptor(file_number, path_id, file_size); + f->smallest = InternalKey(smallest, smallest_seq, kTypeValue); + f->largest = InternalKey(largest, largest_seq, kTypeValue); + f->smallest_seqno = smallest_seq; + f->largest_seqno = largest_seq; + f->compensated_file_size = file_size; + f->refs = 0; + vstorage_->AddFile(level, f); + files_.emplace_back(f); + file_map_.insert({file_number, {f, level}}); + } + + void SetCompactionInputFilesLevels(int level_count, int start_level) { + input_files_.resize(level_count); + for (int i = 0; i < level_count; ++i) { + input_files_[i].level = start_level + i; + } + compaction_level_start_ = start_level; + } + + void AddToCompactionFiles(uint32_t file_number) { + auto iter = file_map_.find(file_number); + assert(iter != file_map_.end()); + int level = iter->second.second; + assert(level < vstorage_->num_levels()); + input_files_[level - compaction_level_start_].files.emplace_back( + iter->second.first); + } + + void UpdateVersionStorageInfo() { + vstorage_->CalculateBaseBytes(ioptions_, mutable_cf_options_); + vstorage_->UpdateFilesByCompactionPri(mutable_cf_options_); + vstorage_->UpdateNumNonEmptyLevels(); + vstorage_->GenerateFileIndexer(); + vstorage_->GenerateLevelFilesBrief(); + vstorage_->ComputeCompactionScore(mutable_cf_options_); + vstorage_->GenerateLevel0NonOverlapping(); + vstorage_->SetFinalized(); + } +}; + +TEST_F(CompactionPickerTest, Empty) { + NewVersionStorage(6, kCompactionStyleLevel); + UpdateVersionStorageInfo(); + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() == nullptr); +} + +TEST_F(CompactionPickerTest, Single) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + Add(0, 1U, "p", "q"); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() == nullptr); +} + +TEST_F(CompactionPickerTest, Level0Trigger) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + Add(0, 1U, "150", "200"); + Add(0, 2U, "200", "250"); + + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(2U, compaction->num_input_files(0)); + ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, Level1Trigger) { + NewVersionStorage(6, kCompactionStyleLevel); + Add(1, 66U, "150", "200", 1000000000U); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(1U, compaction->num_input_files(0)); + ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, Level1Trigger2) { + NewVersionStorage(6, kCompactionStyleLevel); + Add(1, 66U, "150", "200", 1000000001U); + Add(1, 88U, "201", "300", 1000000000U); + Add(2, 6U, "150", "179", 1000000000U); + Add(2, 7U, "180", "220", 1000000000U); + Add(2, 8U, "221", "300", 1000000000U); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(1U, compaction->num_input_files(0)); + ASSERT_EQ(2U, compaction->num_input_files(1)); + ASSERT_EQ(66U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber()); + ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, LevelMaxScore) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.target_file_size_base = 10000000; + mutable_cf_options_.target_file_size_multiplier = 10; + mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024; + Add(0, 1U, "150", "200", 1000000000U); + // Level 1 score 1.2 + Add(1, 66U, "150", "200", 6000000U); + Add(1, 88U, "201", "300", 6000000U); + // Level 2 score 1.8. File 7 is the largest. Should be picked + Add(2, 6U, "150", "179", 60000000U); + Add(2, 7U, "180", "220", 60000001U); + Add(2, 8U, "221", "300", 60000000U); + // Level 3 score slightly larger than 1 + Add(3, 26U, "150", "170", 260000000U); + Add(3, 27U, "171", "179", 260000000U); + Add(3, 28U, "191", "220", 260000000U); + Add(3, 29U, "221", "300", 260000000U); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(1U, compaction->num_input_files(0)); + ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, NeedsCompactionLevel) { + const int kLevels = 6; + const int kFileCount = 20; + + for (int level = 0; level < kLevels - 1; ++level) { + NewVersionStorage(kLevels, kCompactionStyleLevel); + uint64_t file_size = vstorage_->MaxBytesForLevel(level) * 2 / kFileCount; + for (int file_count = 1; file_count <= kFileCount; ++file_count) { + // start a brand new version in each test. + NewVersionStorage(kLevels, kCompactionStyleLevel); + for (int i = 0; i < file_count; ++i) { + Add(level, i, ToString((i + 100) * 1000).c_str(), + ToString((i + 100) * 1000 + 999).c_str(), + file_size, 0, i * 100, i * 100 + 99); + } + UpdateVersionStorageInfo(); + ASSERT_EQ(vstorage_->CompactionScoreLevel(0), level); + ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()), + vstorage_->CompactionScore(0) >= 1); + // release the version storage + DeleteVersionStorage(); + } + } +} + +TEST_F(CompactionPickerTest, Level0TriggerDynamic) { + int num_levels = ioptions_.num_levels; + ioptions_.level_compaction_dynamic_level_bytes = true; + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + mutable_cf_options_.max_bytes_for_level_base = 200; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + NewVersionStorage(num_levels, kCompactionStyleLevel); + Add(0, 1U, "150", "200"); + Add(0, 2U, "200", "250"); + + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(2U, compaction->num_input_files(0)); + ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber()); + ASSERT_EQ(1, static_cast(compaction->num_input_levels())); + ASSERT_EQ(num_levels - 1, compaction->output_level()); +} + +TEST_F(CompactionPickerTest, Level0TriggerDynamic2) { + int num_levels = ioptions_.num_levels; + ioptions_.level_compaction_dynamic_level_bytes = true; + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + mutable_cf_options_.max_bytes_for_level_base = 200; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + NewVersionStorage(num_levels, kCompactionStyleLevel); + Add(0, 1U, "150", "200"); + Add(0, 2U, "200", "250"); + Add(num_levels - 1, 3U, "200", "250", 300U); + + UpdateVersionStorageInfo(); + ASSERT_EQ(vstorage_->base_level(), num_levels - 2); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(2U, compaction->num_input_files(0)); + ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber()); + ASSERT_EQ(1, static_cast(compaction->num_input_levels())); + ASSERT_EQ(num_levels - 2, compaction->output_level()); +} + +TEST_F(CompactionPickerTest, Level0TriggerDynamic3) { + int num_levels = ioptions_.num_levels; + ioptions_.level_compaction_dynamic_level_bytes = true; + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + mutable_cf_options_.max_bytes_for_level_base = 200; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + NewVersionStorage(num_levels, kCompactionStyleLevel); + Add(0, 1U, "150", "200"); + Add(0, 2U, "200", "250"); + Add(num_levels - 1, 3U, "200", "250", 300U); + Add(num_levels - 1, 4U, "300", "350", 3000U); + + UpdateVersionStorageInfo(); + ASSERT_EQ(vstorage_->base_level(), num_levels - 3); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(2U, compaction->num_input_files(0)); + ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber()); + ASSERT_EQ(1, static_cast(compaction->num_input_levels())); + ASSERT_EQ(num_levels - 3, compaction->output_level()); +} + +TEST_F(CompactionPickerTest, Level0TriggerDynamic4) { + int num_levels = ioptions_.num_levels; + ioptions_.level_compaction_dynamic_level_bytes = true; + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + mutable_cf_options_.max_bytes_for_level_base = 200; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + + NewVersionStorage(num_levels, kCompactionStyleLevel); + Add(0, 1U, "150", "200"); + Add(0, 2U, "200", "250"); + Add(num_levels - 1, 3U, "200", "250", 300U); + Add(num_levels - 1, 4U, "300", "350", 3000U); + Add(num_levels - 3, 5U, "150", "180", 3U); + Add(num_levels - 3, 6U, "181", "300", 3U); + Add(num_levels - 3, 7U, "400", "450", 3U); + + UpdateVersionStorageInfo(); + ASSERT_EQ(vstorage_->base_level(), num_levels - 3); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(2U, compaction->num_input_files(0)); + ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber()); + ASSERT_EQ(2U, compaction->num_input_files(1)); + ASSERT_EQ(num_levels - 3, compaction->level(1)); + ASSERT_EQ(5U, compaction->input(1, 0)->fd.GetNumber()); + ASSERT_EQ(6U, compaction->input(1, 1)->fd.GetNumber()); + ASSERT_EQ(2, static_cast(compaction->num_input_levels())); + ASSERT_EQ(num_levels - 3, compaction->output_level()); +} + +TEST_F(CompactionPickerTest, LevelTriggerDynamic4) { + int num_levels = ioptions_.num_levels; + ioptions_.level_compaction_dynamic_level_bytes = true; + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + mutable_cf_options_.max_bytes_for_level_base = 200; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + mutable_cf_options_.compaction_pri = kMinOverlappingRatio; + NewVersionStorage(num_levels, kCompactionStyleLevel); + Add(0, 1U, "150", "200"); + Add(num_levels - 1, 3U, "200", "250", 300U); + Add(num_levels - 1, 4U, "300", "350", 3000U); + Add(num_levels - 1, 4U, "400", "450", 3U); + Add(num_levels - 2, 5U, "150", "180", 300U); + Add(num_levels - 2, 6U, "181", "350", 500U); + Add(num_levels - 2, 7U, "400", "450", 200U); + + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(1U, compaction->num_input_files(0)); + ASSERT_EQ(5U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(0, compaction->num_input_files(1)); + ASSERT_EQ(1U, compaction->num_input_levels()); + ASSERT_EQ(num_levels - 1, compaction->output_level()); +} + +// Universal and FIFO Compactions are not supported in ROCKSDB_LITE +#ifndef ROCKSDB_LITE +TEST_F(CompactionPickerTest, NeedsCompactionUniversal) { + NewVersionStorage(1, kCompactionStyleUniversal); + UniversalCompactionPicker universal_compaction_picker( + ioptions_, &icmp_); + // must return false when there's no files. + ASSERT_EQ(universal_compaction_picker.NeedsCompaction(vstorage_.get()), + false); + UpdateVersionStorageInfo(); + + // verify the trigger given different number of L0 files. + for (int i = 1; + i <= mutable_cf_options_.level0_file_num_compaction_trigger * 2; ++i) { + NewVersionStorage(1, kCompactionStyleUniversal); + Add(0, i, ToString((i + 100) * 1000).c_str(), + ToString((i + 100) * 1000 + 999).c_str(), 1000000, 0, i * 100, + i * 100 + 99); + UpdateVersionStorageInfo(); + ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()), + vstorage_->CompactionScore(0) >= 1); + } +} +// Tests if the files can be trivially moved in multi level +// universal compaction when allow_trivial_move option is set +// In this test as the input files overlaps, they cannot +// be trivially moved. + +TEST_F(CompactionPickerTest, CannotTrivialMoveUniversal) { + const uint64_t kFileSize = 100000; + + ioptions_.compaction_options_universal.allow_trivial_move = true; + NewVersionStorage(1, kCompactionStyleUniversal); + UniversalCompactionPicker universal_compaction_picker(ioptions_, &icmp_); + // must return false when there's no files. + ASSERT_EQ(universal_compaction_picker.NeedsCompaction(vstorage_.get()), + false); + + NewVersionStorage(3, kCompactionStyleUniversal); + + Add(0, 1U, "150", "200", kFileSize, 0, 500, 550); + Add(0, 2U, "201", "250", kFileSize, 0, 401, 450); + Add(0, 4U, "260", "300", kFileSize, 0, 260, 300); + Add(1, 5U, "100", "151", kFileSize, 0, 200, 251); + Add(1, 3U, "301", "350", kFileSize, 0, 101, 150); + Add(2, 6U, "120", "200", kFileSize, 0, 20, 100); + + UpdateVersionStorageInfo(); + + std::unique_ptr compaction( + universal_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + + ASSERT_TRUE(!compaction->is_trivial_move()); +} +// Tests if the files can be trivially moved in multi level +// universal compaction when allow_trivial_move option is set +// In this test as the input files doesn't overlaps, they should +// be trivially moved. +TEST_F(CompactionPickerTest, AllowsTrivialMoveUniversal) { + const uint64_t kFileSize = 100000; + + ioptions_.compaction_options_universal.allow_trivial_move = true; + UniversalCompactionPicker universal_compaction_picker(ioptions_, &icmp_); + + NewVersionStorage(3, kCompactionStyleUniversal); + + Add(0, 1U, "150", "200", kFileSize, 0, 500, 550); + Add(0, 2U, "201", "250", kFileSize, 0, 401, 450); + Add(0, 4U, "260", "300", kFileSize, 0, 260, 300); + Add(1, 5U, "010", "080", kFileSize, 0, 200, 251); + Add(2, 3U, "301", "350", kFileSize, 0, 101, 150); + + UpdateVersionStorageInfo(); + + std::unique_ptr compaction( + universal_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + + ASSERT_TRUE(compaction->is_trivial_move()); +} + +TEST_F(CompactionPickerTest, NeedsCompactionFIFO) { + NewVersionStorage(1, kCompactionStyleFIFO); + const int kFileCount = + mutable_cf_options_.level0_file_num_compaction_trigger * 3; + const uint64_t kFileSize = 100000; + const uint64_t kMaxSize = kFileSize * kFileCount / 2; + + fifo_options_.max_table_files_size = kMaxSize; + ioptions_.compaction_options_fifo = fifo_options_; + FIFOCompactionPicker fifo_compaction_picker(ioptions_, &icmp_); + UpdateVersionStorageInfo(); + // must return false when there's no files. + ASSERT_EQ(fifo_compaction_picker.NeedsCompaction(vstorage_.get()), false); + + // verify whether compaction is needed based on the current + // size of L0 files. + uint64_t current_size = 0; + for (int i = 1; i <= kFileCount; ++i) { + NewVersionStorage(1, kCompactionStyleFIFO); + Add(0, i, ToString((i + 100) * 1000).c_str(), + ToString((i + 100) * 1000 + 999).c_str(), + kFileSize, 0, i * 100, i * 100 + 99); + current_size += kFileSize; + UpdateVersionStorageInfo(); + ASSERT_EQ(level_compaction_picker.NeedsCompaction(vstorage_.get()), + vstorage_->CompactionScore(0) >= 1); + } +} +#endif // ROCKSDB_LITE + +TEST_F(CompactionPickerTest, CompactionPriMinOverlapping1) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.target_file_size_base = 10000000; + mutable_cf_options_.target_file_size_multiplier = 10; + mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024; + mutable_cf_options_.compaction_pri = kMinOverlappingRatio; + + Add(2, 6U, "150", "179", 50000000U); + Add(2, 7U, "180", "220", 50000000U); + Add(2, 8U, "321", "400", 50000000U); // File not overlapping + Add(2, 9U, "721", "800", 50000000U); + + Add(3, 26U, "150", "170", 260000000U); + Add(3, 27U, "171", "179", 260000000U); + Add(3, 28U, "191", "220", 260000000U); + Add(3, 29U, "221", "300", 260000000U); + Add(3, 30U, "750", "900", 260000000U); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(1U, compaction->num_input_files(0)); + // Pick file 8 because it overlaps with 0 files on level 3. + ASSERT_EQ(8U, compaction->input(0, 0)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, CompactionPriMinOverlapping2) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.target_file_size_base = 10000000; + mutable_cf_options_.target_file_size_multiplier = 10; + mutable_cf_options_.max_bytes_for_level_base = 10 * 1024 * 1024; + mutable_cf_options_.compaction_pri = kMinOverlappingRatio; + + Add(2, 6U, "150", "175", + 60000000U); // Overlaps with file 26, 27, total size 521M + Add(2, 7U, "176", "200", 60000000U); // Overlaps with file 27, 28, total size + // 520M, the smalelst overlapping + Add(2, 8U, "201", "300", + 60000000U); // Overlaps with file 28, 29, total size 521M + + Add(3, 26U, "100", "110", 261000000U); + Add(3, 26U, "150", "170", 261000000U); + Add(3, 27U, "171", "179", 260000000U); + Add(3, 28U, "191", "220", 260000000U); + Add(3, 29U, "221", "300", 261000000U); + Add(3, 30U, "321", "400", 261000000U); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(1U, compaction->num_input_files(0)); + // Picking file 7 because overlapping ratio is the biggest. + ASSERT_EQ(7U, compaction->input(0, 0)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, CompactionPriMinOverlapping3) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.max_bytes_for_level_base = 10000000; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + mutable_cf_options_.compaction_pri = kMinOverlappingRatio; + + // file 7 and 8 over lap with the same file, but file 8 is smaller so + // it will be picked. + Add(2, 6U, "150", "167", 60000000U); // Overlaps with file 26, 27 + Add(2, 7U, "168", "169", 60000000U); // Overlaps with file 27 + Add(2, 8U, "201", "300", 61000000U); // Overlaps with file 28, but the file + // itself is larger. Should be picked. + + Add(3, 26U, "160", "165", 260000000U); + Add(3, 27U, "166", "170", 260000000U); + Add(3, 28U, "180", "400", 260000000U); + Add(3, 29U, "401", "500", 260000000U); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(1U, compaction->num_input_files(0)); + // Picking file 8 because overlapping ratio is the biggest. + ASSERT_EQ(8U, compaction->input(0, 0)->fd.GetNumber()); +} + +// This test exhibits the bug where we don't properly reset parent_index in +// PickCompaction() +TEST_F(CompactionPickerTest, ParentIndexResetBug) { + int num_levels = ioptions_.num_levels; + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + mutable_cf_options_.max_bytes_for_level_base = 200; + NewVersionStorage(num_levels, kCompactionStyleLevel); + Add(0, 1U, "150", "200"); // <- marked for compaction + Add(1, 3U, "400", "500", 600); // <- this one needs compacting + Add(2, 4U, "150", "200"); + Add(2, 5U, "201", "210"); + Add(2, 6U, "300", "310"); + Add(2, 7U, "400", "500"); // <- being compacted + + vstorage_->LevelFiles(2)[3]->being_compacted = true; + vstorage_->LevelFiles(0)[0]->marked_for_compaction = true; + + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); +} + +// This test checks ExpandWhileOverlapping() by having overlapping user keys +// ranges (with different sequence numbers) in the input files. +TEST_F(CompactionPickerTest, OverlappingUserKeys) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.compaction_pri = kByCompensatedSize; + + Add(1, 1U, "100", "150", 1U); + // Overlapping user keys + Add(1, 2U, "200", "400", 1U); + Add(1, 3U, "400", "500", 1000000000U, 0, 0); + Add(2, 4U, "600", "700", 1U); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(1U, compaction->num_input_levels()); + ASSERT_EQ(2U, compaction->num_input_files(0)); + ASSERT_EQ(2U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(3U, compaction->input(0, 1)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, OverlappingUserKeys2) { + NewVersionStorage(6, kCompactionStyleLevel); + // Overlapping user keys on same level and output level + Add(1, 1U, "200", "400", 1000000000U); + Add(1, 2U, "400", "500", 1U, 0, 0); + Add(2, 3U, "000", "100", 1U); + Add(2, 4U, "100", "600", 1U, 0, 0); + Add(2, 5U, "600", "700", 1U, 0, 0); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(2U, compaction->num_input_levels()); + ASSERT_EQ(2U, compaction->num_input_files(0)); + ASSERT_EQ(3U, compaction->num_input_files(1)); + ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber()); + ASSERT_EQ(3U, compaction->input(1, 0)->fd.GetNumber()); + ASSERT_EQ(4U, compaction->input(1, 1)->fd.GetNumber()); + ASSERT_EQ(5U, compaction->input(1, 2)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, OverlappingUserKeys3) { + NewVersionStorage(6, kCompactionStyleLevel); + // Chain of overlapping user key ranges (forces ExpandWhileOverlapping() to + // expand multiple times) + Add(1, 1U, "100", "150", 1U); + Add(1, 2U, "150", "200", 1U, 0, 0); + Add(1, 3U, "200", "250", 1000000000U, 0, 0); + Add(1, 4U, "250", "300", 1U, 0, 0); + Add(1, 5U, "300", "350", 1U, 0, 0); + // Output level overlaps with the beginning and the end of the chain + Add(2, 6U, "050", "100", 1U); + Add(2, 7U, "350", "400", 1U); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(2U, compaction->num_input_levels()); + ASSERT_EQ(5U, compaction->num_input_files(0)); + ASSERT_EQ(2U, compaction->num_input_files(1)); + ASSERT_EQ(1U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(2U, compaction->input(0, 1)->fd.GetNumber()); + ASSERT_EQ(3U, compaction->input(0, 2)->fd.GetNumber()); + ASSERT_EQ(4U, compaction->input(0, 3)->fd.GetNumber()); + ASSERT_EQ(5U, compaction->input(0, 4)->fd.GetNumber()); + ASSERT_EQ(6U, compaction->input(1, 0)->fd.GetNumber()); + ASSERT_EQ(7U, compaction->input(1, 1)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, OverlappingUserKeys4) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.max_bytes_for_level_base = 1000000; + + Add(1, 1U, "100", "150", 1U); + Add(1, 2U, "150", "199", 1U, 0, 0); + Add(1, 3U, "200", "250", 1100000U, 0, 0); + Add(1, 4U, "251", "300", 1U, 0, 0); + Add(1, 5U, "300", "350", 1U, 0, 0); + + Add(2, 6U, "100", "115", 1U); + Add(2, 7U, "125", "325", 1U); + Add(2, 8U, "350", "400", 1U); + UpdateVersionStorageInfo(); + + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); + ASSERT_EQ(2U, compaction->num_input_levels()); + ASSERT_EQ(1U, compaction->num_input_files(0)); + ASSERT_EQ(1U, compaction->num_input_files(1)); + ASSERT_EQ(3U, compaction->input(0, 0)->fd.GetNumber()); + ASSERT_EQ(7U, compaction->input(1, 0)->fd.GetNumber()); +} + +TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri1) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + mutable_cf_options_.max_bytes_for_level_base = 900000000U; + + // 6 L0 files, score 3. + Add(0, 1U, "000", "400", 1U); + Add(0, 2U, "001", "400", 1U, 0, 0); + Add(0, 3U, "001", "400", 1000000000U, 0, 0); + Add(0, 31U, "001", "400", 1000000000U, 0, 0); + Add(0, 32U, "001", "400", 1000000000U, 0, 0); + Add(0, 33U, "001", "400", 1000000000U, 0, 0); + + // L1 total size 2GB, score 2.2. If one file being comapcted, score 1.1. + Add(1, 4U, "050", "300", 1000000000U, 0, 0); + file_map_[4u].first->being_compacted = true; + Add(1, 5U, "301", "350", 1000000000U, 0, 0); + + // Output level overlaps with the beginning and the end of the chain + Add(2, 6U, "050", "100", 1U); + Add(2, 7U, "300", "400", 1U); + + // No compaction should be scheduled, if L0 has higher priority than L1 + // but L0->L1 compaction is blocked by a file in L1 being compacted. + UpdateVersionStorageInfo(); + ASSERT_EQ(0, vstorage_->CompactionScoreLevel(0)); + ASSERT_EQ(1, vstorage_->CompactionScoreLevel(1)); + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() == nullptr); +} + +TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri2) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + mutable_cf_options_.max_bytes_for_level_base = 900000000U; + + // 6 L0 files, score 3. + Add(0, 1U, "000", "400", 1U); + Add(0, 2U, "001", "400", 1U, 0, 0); + Add(0, 3U, "001", "400", 1000000000U, 0, 0); + Add(0, 31U, "001", "400", 1000000000U, 0, 0); + Add(0, 32U, "001", "400", 1000000000U, 0, 0); + Add(0, 33U, "001", "400", 1000000000U, 0, 0); + + // L1 total size 2GB, score 2.2. If one file being comapcted, score 1.1. + Add(1, 4U, "050", "300", 1000000000U, 0, 0); + Add(1, 5U, "301", "350", 1000000000U, 0, 0); + + // Output level overlaps with the beginning and the end of the chain + Add(2, 6U, "050", "100", 1U); + Add(2, 7U, "300", "400", 1U); + + // If no file in L1 being compacted, L0->L1 compaction will be scheduled. + UpdateVersionStorageInfo(); // being_compacted flag is cleared here. + ASSERT_EQ(0, vstorage_->CompactionScoreLevel(0)); + ASSERT_EQ(1, vstorage_->CompactionScoreLevel(1)); + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); +} + +TEST_F(CompactionPickerTest, NotScheduleL1IfL0WithHigherPri3) { + NewVersionStorage(6, kCompactionStyleLevel); + mutable_cf_options_.level0_file_num_compaction_trigger = 2; + mutable_cf_options_.max_bytes_for_level_base = 900000000U; + + // 6 L0 files, score 3. + Add(0, 1U, "000", "400", 1U); + Add(0, 2U, "001", "400", 1U, 0, 0); + Add(0, 3U, "001", "400", 1000000000U, 0, 0); + Add(0, 31U, "001", "400", 1000000000U, 0, 0); + Add(0, 32U, "001", "400", 1000000000U, 0, 0); + Add(0, 33U, "001", "400", 1000000000U, 0, 0); + + // L1 score more than 6. + Add(1, 4U, "050", "300", 1000000000U, 0, 0); + file_map_[4u].first->being_compacted = true; + Add(1, 5U, "301", "350", 1000000000U, 0, 0); + Add(1, 51U, "351", "400", 6000000000U, 0, 0); + + // Output level overlaps with the beginning and the end of the chain + Add(2, 6U, "050", "100", 1U); + Add(2, 7U, "300", "400", 1U); + + // If score in L1 is larger than L0, L1 compaction goes through despite + // there is pending L0 compaction. + UpdateVersionStorageInfo(); + ASSERT_EQ(1, vstorage_->CompactionScoreLevel(0)); + ASSERT_EQ(0, vstorage_->CompactionScoreLevel(1)); + std::unique_ptr compaction(level_compaction_picker.PickCompaction( + cf_name_, mutable_cf_options_, vstorage_.get(), &log_buffer_)); + ASSERT_TRUE(compaction.get() != nullptr); +} + +TEST_F(CompactionPickerTest, EstimateCompactionBytesNeeded1) { + int num_levels = ioptions_.num_levels; + ioptions_.level_compaction_dynamic_level_bytes = false; + mutable_cf_options_.level0_file_num_compaction_trigger = 3; + mutable_cf_options_.max_bytes_for_level_base = 1000; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + NewVersionStorage(num_levels, kCompactionStyleLevel); + Add(0, 1U, "150", "200", 200); + Add(0, 2U, "150", "200", 200); + Add(0, 3U, "150", "200", 200); + // Level 1 is over target by 200 + Add(1, 4U, "400", "500", 600); + Add(1, 5U, "600", "700", 600); + // Level 2 is less than target 10000 even added size of level 1 + // Size ratio of L2/L1 is 9600 / 1200 = 8 + Add(2, 6U, "150", "200", 2500); + Add(2, 7U, "201", "210", 2000); + Add(2, 8U, "300", "310", 2600); + Add(2, 9U, "400", "500", 2500); + // Level 3 exceeds target 100,000 of 1000 + Add(3, 10U, "400", "500", 101000); + // Level 4 exceeds target 1,000,000 by 900 after adding size from level 3 + // Size ratio L4/L3 is 9.9 + // After merge from L3, L4 size is 1000900 + Add(4, 11U, "400", "500", 999900); + Add(5, 11U, "400", "500", 8007200); + + UpdateVersionStorageInfo(); + + ASSERT_EQ(200u * 9u + 10900u + 900u * 9, + vstorage_->estimated_compaction_needed_bytes()); +} + +TEST_F(CompactionPickerTest, EstimateCompactionBytesNeeded2) { + int num_levels = ioptions_.num_levels; + ioptions_.level_compaction_dynamic_level_bytes = false; + mutable_cf_options_.level0_file_num_compaction_trigger = 3; + mutable_cf_options_.max_bytes_for_level_base = 1000; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + NewVersionStorage(num_levels, kCompactionStyleLevel); + Add(0, 1U, "150", "200", 200); + Add(0, 2U, "150", "200", 200); + Add(0, 4U, "150", "200", 200); + Add(0, 5U, "150", "200", 200); + Add(0, 6U, "150", "200", 200); + // Level 1 size will be 1400 after merging with L0 + Add(1, 7U, "400", "500", 200); + Add(1, 8U, "600", "700", 200); + // Level 2 is less than target 10000 even added size of level 1 + Add(2, 9U, "150", "200", 9100); + // Level 3 over the target, but since level 4 is empty, we assume it will be + // a trivial move. + Add(3, 10U, "400", "500", 101000); + + UpdateVersionStorageInfo(); + + // estimated L1->L2 merge: 400 * (9100.0 / 1400.0 + 1.0) + ASSERT_EQ(1400u + 3000u, vstorage_->estimated_compaction_needed_bytes()); +} + +TEST_F(CompactionPickerTest, EstimateCompactionBytesNeeded3) { + int num_levels = ioptions_.num_levels; + ioptions_.level_compaction_dynamic_level_bytes = false; + mutable_cf_options_.level0_file_num_compaction_trigger = 3; + mutable_cf_options_.max_bytes_for_level_base = 1000; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + NewVersionStorage(num_levels, kCompactionStyleLevel); + Add(0, 1U, "150", "200", 2000); + Add(0, 2U, "150", "200", 2000); + Add(0, 4U, "150", "200", 2000); + Add(0, 5U, "150", "200", 2000); + Add(0, 6U, "150", "200", 1000); + // Level 1 size will be 10000 after merging with L0 + Add(1, 7U, "400", "500", 500); + Add(1, 8U, "600", "700", 500); + + Add(2, 9U, "150", "200", 10000); + + UpdateVersionStorageInfo(); + + ASSERT_EQ(10000u + 18000u, vstorage_->estimated_compaction_needed_bytes()); +} + +TEST_F(CompactionPickerTest, EstimateCompactionBytesNeededDynamicLevel) { + int num_levels = ioptions_.num_levels; + ioptions_.level_compaction_dynamic_level_bytes = true; + mutable_cf_options_.level0_file_num_compaction_trigger = 3; + mutable_cf_options_.max_bytes_for_level_base = 1000; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + NewVersionStorage(num_levels, kCompactionStyleLevel); + + // Set Last level size 50000 + // num_levels - 1 target 5000 + // num_levels - 2 is base level with taret 500 + Add(num_levels - 1, 10U, "400", "500", 50000); + + Add(0, 1U, "150", "200", 200); + Add(0, 2U, "150", "200", 200); + Add(0, 4U, "150", "200", 200); + Add(0, 5U, "150", "200", 200); + Add(0, 6U, "150", "200", 200); + // num_levels - 3 is over target by 100 + 1000 + Add(num_levels - 3, 7U, "400", "500", 300); + Add(num_levels - 3, 8U, "600", "700", 300); + // num_levels - 2 is over target by 1100 + 200 + Add(num_levels - 2, 9U, "150", "200", 5200); + + UpdateVersionStorageInfo(); + + // Merging to the second last level: (5200 / 1600 + 1) * 1100 + // Merging to the last level: (50000 / 6300 + 1) * 1300 + ASSERT_EQ(1600u + 4675u + 11617u, + vstorage_->estimated_compaction_needed_bytes()); +} + +TEST_F(CompactionPickerTest, IsBottommostLevelTest) { + // case 1: Higher levels are empty + NewVersionStorage(6, kCompactionStyleLevel); + Add(0, 1U, "a", "m"); + Add(0, 2U, "c", "z"); + Add(1, 3U, "d", "e"); + Add(1, 4U, "l", "p"); + Add(2, 5U, "g", "i"); + Add(2, 6U, "x", "z"); + UpdateVersionStorageInfo(); + SetCompactionInputFilesLevels(2, 1); + AddToCompactionFiles(3U); + AddToCompactionFiles(5U); + bool result = + Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_); + ASSERT_TRUE(result); + + // case 2: Higher levels have no overlap + NewVersionStorage(6, kCompactionStyleLevel); + Add(0, 1U, "a", "m"); + Add(0, 2U, "c", "z"); + Add(1, 3U, "d", "e"); + Add(1, 4U, "l", "p"); + Add(2, 5U, "g", "i"); + Add(2, 6U, "x", "z"); + Add(3, 7U, "k", "p"); + Add(3, 8U, "t", "w"); + Add(4, 9U, "a", "b"); + Add(5, 10U, "c", "cc"); + UpdateVersionStorageInfo(); + SetCompactionInputFilesLevels(2, 1); + AddToCompactionFiles(3U); + AddToCompactionFiles(5U); + result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_); + ASSERT_TRUE(result); + + // case 3.1: Higher levels (level 3) have overlap + NewVersionStorage(6, kCompactionStyleLevel); + Add(0, 1U, "a", "m"); + Add(0, 2U, "c", "z"); + Add(1, 3U, "d", "e"); + Add(1, 4U, "l", "p"); + Add(2, 5U, "g", "i"); + Add(2, 6U, "x", "z"); + Add(3, 7U, "e", "g"); + Add(3, 8U, "h", "k"); + Add(4, 9U, "a", "b"); + Add(5, 10U, "c", "cc"); + UpdateVersionStorageInfo(); + SetCompactionInputFilesLevels(2, 1); + AddToCompactionFiles(3U); + AddToCompactionFiles(5U); + result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_); + ASSERT_FALSE(result); + + // case 3.2: Higher levels (level 5) have overlap + DeleteVersionStorage(); + NewVersionStorage(6, kCompactionStyleLevel); + Add(0, 1U, "a", "m"); + Add(0, 2U, "c", "z"); + Add(1, 3U, "d", "e"); + Add(1, 4U, "l", "p"); + Add(2, 5U, "g", "i"); + Add(2, 6U, "x", "z"); + Add(3, 7U, "j", "k"); + Add(3, 8U, "l", "m"); + Add(4, 9U, "a", "b"); + Add(5, 10U, "c", "cc"); + Add(5, 11U, "h", "k"); + Add(5, 12U, "y", "yy"); + Add(5, 13U, "z", "zz"); + UpdateVersionStorageInfo(); + SetCompactionInputFilesLevels(2, 1); + AddToCompactionFiles(3U); + AddToCompactionFiles(5U); + result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_); + ASSERT_FALSE(result); + + // case 3.3: Higher levels (level 5) have overlap, but it's only overlapping + // one key ("d") + NewVersionStorage(6, kCompactionStyleLevel); + Add(0, 1U, "a", "m"); + Add(0, 2U, "c", "z"); + Add(1, 3U, "d", "e"); + Add(1, 4U, "l", "p"); + Add(2, 5U, "g", "i"); + Add(2, 6U, "x", "z"); + Add(3, 7U, "j", "k"); + Add(3, 8U, "l", "m"); + Add(4, 9U, "a", "b"); + Add(5, 10U, "c", "cc"); + Add(5, 11U, "ccc", "d"); + Add(5, 12U, "y", "yy"); + Add(5, 13U, "z", "zz"); + UpdateVersionStorageInfo(); + SetCompactionInputFilesLevels(2, 1); + AddToCompactionFiles(3U); + AddToCompactionFiles(5U); + result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_); + ASSERT_FALSE(result); + + // Level 0 files overlap + NewVersionStorage(6, kCompactionStyleLevel); + Add(0, 1U, "s", "t"); + Add(0, 2U, "a", "m"); + Add(0, 3U, "b", "z"); + Add(0, 4U, "e", "f"); + Add(5, 10U, "y", "z"); + UpdateVersionStorageInfo(); + SetCompactionInputFilesLevels(1, 0); + AddToCompactionFiles(1U); + AddToCompactionFiles(2U); + AddToCompactionFiles(3U); + AddToCompactionFiles(4U); + result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_); + ASSERT_FALSE(result); + + // Level 0 files don't overlap + NewVersionStorage(6, kCompactionStyleLevel); + Add(0, 1U, "s", "t"); + Add(0, 2U, "a", "m"); + Add(0, 3U, "b", "k"); + Add(0, 4U, "e", "f"); + Add(5, 10U, "y", "z"); + UpdateVersionStorageInfo(); + SetCompactionInputFilesLevels(1, 0); + AddToCompactionFiles(1U); + AddToCompactionFiles(2U); + AddToCompactionFiles(3U); + AddToCompactionFiles(4U); + result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_); + ASSERT_TRUE(result); + + // Level 1 files overlap + NewVersionStorage(6, kCompactionStyleLevel); + Add(0, 1U, "s", "t"); + Add(0, 2U, "a", "m"); + Add(0, 3U, "b", "k"); + Add(0, 4U, "e", "f"); + Add(1, 5U, "a", "m"); + Add(1, 6U, "n", "o"); + Add(1, 7U, "w", "y"); + Add(5, 10U, "y", "z"); + UpdateVersionStorageInfo(); + SetCompactionInputFilesLevels(2, 0); + AddToCompactionFiles(1U); + AddToCompactionFiles(2U); + AddToCompactionFiles(3U); + AddToCompactionFiles(4U); + AddToCompactionFiles(5U); + AddToCompactionFiles(6U); + AddToCompactionFiles(7U); + result = Compaction::TEST_IsBottommostLevel(2, vstorage_.get(), input_files_); + ASSERT_FALSE(result); + + DeleteVersionStorage(); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/comparator_db_test.cc b/external/rocksdb/db/comparator_db_test.cc new file mode 100755 index 0000000000..e4e84107ef --- /dev/null +++ b/external/rocksdb/db/comparator_db_test.cc @@ -0,0 +1,439 @@ +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#include +#include + +#include "memtable/stl_wrappers.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "util/hash.h" +#include "util/kv_map.h" +#include "util/string_util.h" +#include "util/testharness.h" +#include "util/testutil.h" +#include "utilities/merge_operators.h" + +using std::unique_ptr; + +namespace rocksdb { +namespace { + +static const Comparator* comparator; + +class KVIter : public Iterator { + public: + explicit KVIter(const stl_wrappers::KVMap* map) + : map_(map), iter_(map_->end()) {} + virtual bool Valid() const override { return iter_ != map_->end(); } + virtual void SeekToFirst() override { iter_ = map_->begin(); } + virtual void SeekToLast() override { + if (map_->empty()) { + iter_ = map_->end(); + } else { + iter_ = map_->find(map_->rbegin()->first); + } + } + virtual void Seek(const Slice& k) override { + iter_ = map_->lower_bound(k.ToString()); + } + virtual void Next() override { ++iter_; } + virtual void Prev() override { + if (iter_ == map_->begin()) { + iter_ = map_->end(); + return; + } + --iter_; + } + + virtual Slice key() const override { return iter_->first; } + virtual Slice value() const override { return iter_->second; } + virtual Status status() const override { return Status::OK(); } + + private: + const stl_wrappers::KVMap* const map_; + stl_wrappers::KVMap::const_iterator iter_; +}; + +void AssertItersEqual(Iterator* iter1, Iterator* iter2) { + ASSERT_EQ(iter1->Valid(), iter2->Valid()); + if (iter1->Valid()) { + ASSERT_EQ(iter1->key().ToString(), iter2->key().ToString()); + ASSERT_EQ(iter1->value().ToString(), iter2->value().ToString()); + } +} + +// Measuring operations on DB (expect to be empty). +// source_strings are candidate keys +void DoRandomIteraratorTest(DB* db, std::vector source_strings, + Random* rnd, int num_writes, int num_iter_ops, + int num_trigger_flush) { + stl_wrappers::KVMap map((stl_wrappers::LessOfComparator(comparator))); + + for (int i = 0; i < num_writes; i++) { + if (num_trigger_flush > 0 && i != 0 && i % num_trigger_flush == 0) { + db->Flush(FlushOptions()); + } + + int type = rnd->Uniform(2); + int index = rnd->Uniform(static_cast(source_strings.size())); + auto& key = source_strings[index]; + switch (type) { + case 0: + // put + map[key] = key; + ASSERT_OK(db->Put(WriteOptions(), key, key)); + break; + case 1: + // delete + if (map.find(key) != map.end()) { + map.erase(key); + } + ASSERT_OK(db->Delete(WriteOptions(), key)); + break; + default: + assert(false); + } + } + + std::unique_ptr iter(db->NewIterator(ReadOptions())); + std::unique_ptr result_iter(new KVIter(&map)); + + bool is_valid = false; + for (int i = 0; i < num_iter_ops; i++) { + // Random walk and make sure iter and result_iter returns the + // same key and value + int type = rnd->Uniform(6); + ASSERT_OK(iter->status()); + switch (type) { + case 0: + // Seek to First + iter->SeekToFirst(); + result_iter->SeekToFirst(); + break; + case 1: + // Seek to last + iter->SeekToLast(); + result_iter->SeekToLast(); + break; + case 2: { + // Seek to random key + auto key_idx = rnd->Uniform(static_cast(source_strings.size())); + auto key = source_strings[key_idx]; + iter->Seek(key); + result_iter->Seek(key); + break; + } + case 3: + // Next + if (is_valid) { + iter->Next(); + result_iter->Next(); + } else { + continue; + } + break; + case 4: + // Prev + if (is_valid) { + iter->Prev(); + result_iter->Prev(); + } else { + continue; + } + break; + default: { + assert(type == 5); + auto key_idx = rnd->Uniform(static_cast(source_strings.size())); + auto key = source_strings[key_idx]; + std::string result; + auto status = db->Get(ReadOptions(), key, &result); + if (map.find(key) == map.end()) { + ASSERT_TRUE(status.IsNotFound()); + } else { + ASSERT_EQ(map[key], result); + } + break; + } + } + AssertItersEqual(iter.get(), result_iter.get()); + is_valid = iter->Valid(); + } +} + +class DoubleComparator : public Comparator { + public: + DoubleComparator() {} + + virtual const char* Name() const override { return "DoubleComparator"; } + + virtual int Compare(const Slice& a, const Slice& b) const override { +#ifndef CYGWIN + double da = std::stod(a.ToString()); + double db = std::stod(b.ToString()); +#else + double da = std::strtod(a.ToString().c_str(), 0 /* endptr */); + double db = std::strtod(a.ToString().c_str(), 0 /* endptr */); +#endif + if (da == db) { + return a.compare(b); + } else if (da > db) { + return 1; + } else { + return -1; + } + } + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} + + virtual void FindShortSuccessor(std::string* key) const override {} +}; + +class HashComparator : public Comparator { + public: + HashComparator() {} + + virtual const char* Name() const override { return "HashComparator"; } + + virtual int Compare(const Slice& a, const Slice& b) const override { + uint32_t ha = Hash(a.data(), a.size(), 66); + uint32_t hb = Hash(b.data(), b.size(), 66); + if (ha == hb) { + return a.compare(b); + } else if (ha > hb) { + return 1; + } else { + return -1; + } + } + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} + + virtual void FindShortSuccessor(std::string* key) const override {} +}; + +class TwoStrComparator : public Comparator { + public: + TwoStrComparator() {} + + virtual const char* Name() const override { return "TwoStrComparator"; } + + virtual int Compare(const Slice& a, const Slice& b) const override { + assert(a.size() >= 2); + assert(b.size() >= 2); + size_t size_a1 = static_cast(a[0]); + size_t size_b1 = static_cast(b[0]); + size_t size_a2 = static_cast(a[1]); + size_t size_b2 = static_cast(b[1]); + assert(size_a1 + size_a2 + 2 == a.size()); + assert(size_b1 + size_b2 + 2 == b.size()); + + Slice a1 = Slice(a.data() + 2, size_a1); + Slice b1 = Slice(b.data() + 2, size_b1); + Slice a2 = Slice(a.data() + 2 + size_a1, size_a2); + Slice b2 = Slice(b.data() + 2 + size_b1, size_b2); + + if (a1 != b1) { + return a1.compare(b1); + } + return a2.compare(b2); + } + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} + + virtual void FindShortSuccessor(std::string* key) const override {} +}; +} // namespace + +class ComparatorDBTest : public testing::Test { + private: + std::string dbname_; + Env* env_; + DB* db_; + Options last_options_; + std::unique_ptr comparator_guard; + + public: + ComparatorDBTest() : env_(Env::Default()), db_(nullptr) { + comparator = BytewiseComparator(); + dbname_ = test::TmpDir() + "/comparator_db_test"; + EXPECT_OK(DestroyDB(dbname_, last_options_)); + } + + ~ComparatorDBTest() { + delete db_; + EXPECT_OK(DestroyDB(dbname_, last_options_)); + comparator = BytewiseComparator(); + } + + DB* GetDB() { return db_; } + + void SetOwnedComparator(const Comparator* cmp) { + comparator_guard.reset(cmp); + comparator = cmp; + last_options_.comparator = cmp; + } + + // Return the current option configuration. + Options* GetOptions() { return &last_options_; } + + void DestroyAndReopen() { + // Destroy using last options + Destroy(); + ASSERT_OK(TryReopen()); + } + + void Destroy() { + delete db_; + db_ = nullptr; + ASSERT_OK(DestroyDB(dbname_, last_options_)); + } + + Status TryReopen() { + delete db_; + db_ = nullptr; + last_options_.create_if_missing = true; + + return DB::Open(last_options_, dbname_, &db_); + } +}; + +TEST_F(ComparatorDBTest, Bytewise) { + for (int rand_seed = 301; rand_seed < 306; rand_seed++) { + DestroyAndReopen(); + Random rnd(rand_seed); + DoRandomIteraratorTest(GetDB(), + {"a", "b", "c", "d", "e", "f", "g", "h", "i"}, &rnd, + 8, 100, 3); + } +} + +TEST_F(ComparatorDBTest, SimpleSuffixReverseComparator) { + SetOwnedComparator(new test::SimpleSuffixReverseComparator()); + + for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) { + Options* opt = GetOptions(); + opt->comparator = comparator; + DestroyAndReopen(); + Random rnd(rnd_seed); + + std::vector source_strings; + std::vector source_prefixes; + // Randomly generate 5 prefixes + for (int i = 0; i < 5; i++) { + source_prefixes.push_back(test::RandomHumanReadableString(&rnd, 8)); + } + for (int j = 0; j < 20; j++) { + int prefix_index = rnd.Uniform(static_cast(source_prefixes.size())); + std::string key = source_prefixes[prefix_index] + + test::RandomHumanReadableString(&rnd, rnd.Uniform(8)); + source_strings.push_back(key); + } + + DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 30, 600, 66); + } +} + +TEST_F(ComparatorDBTest, Uint64Comparator) { + SetOwnedComparator(test::Uint64Comparator()); + + for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) { + Options* opt = GetOptions(); + opt->comparator = comparator; + DestroyAndReopen(); + Random rnd(rnd_seed); + Random64 rnd64(rnd_seed); + + std::vector source_strings; + // Randomly generate source keys + for (int i = 0; i < 100; i++) { + uint64_t r = rnd64.Next(); + std::string str; + str.resize(8); + memcpy(&str[0], static_cast(&r), 8); + source_strings.push_back(str); + } + + DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66); + } +} + +TEST_F(ComparatorDBTest, DoubleComparator) { + SetOwnedComparator(new DoubleComparator()); + + for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) { + Options* opt = GetOptions(); + opt->comparator = comparator; + DestroyAndReopen(); + Random rnd(rnd_seed); + + std::vector source_strings; + // Randomly generate source keys + for (int i = 0; i < 100; i++) { + uint32_t r = rnd.Next(); + uint32_t divide_order = rnd.Uniform(8); + double to_divide = 1.0; + for (uint32_t j = 0; j < divide_order; j++) { + to_divide *= 10.0; + } + source_strings.push_back(ToString(r / to_divide)); + } + + DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66); + } +} + +TEST_F(ComparatorDBTest, HashComparator) { + SetOwnedComparator(new HashComparator()); + + for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) { + Options* opt = GetOptions(); + opt->comparator = comparator; + DestroyAndReopen(); + Random rnd(rnd_seed); + + std::vector source_strings; + // Randomly generate source keys + for (int i = 0; i < 100; i++) { + source_strings.push_back(test::RandomKey(&rnd, 8)); + } + + DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66); + } +} + +TEST_F(ComparatorDBTest, TwoStrComparator) { + SetOwnedComparator(new TwoStrComparator()); + + for (int rnd_seed = 301; rnd_seed < 316; rnd_seed++) { + Options* opt = GetOptions(); + opt->comparator = comparator; + DestroyAndReopen(); + Random rnd(rnd_seed); + + std::vector source_strings; + // Randomly generate source keys + for (int i = 0; i < 100; i++) { + std::string str; + uint32_t size1 = rnd.Uniform(8); + uint32_t size2 = rnd.Uniform(8); + str.append(1, static_cast(size1)); + str.append(1, static_cast(size2)); + str.append(test::RandomKey(&rnd, size1)); + str.append(test::RandomKey(&rnd, size2)); + source_strings.push_back(str); + } + + DoRandomIteraratorTest(GetDB(), source_strings, &rnd, 200, 1000, 66); + } +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/convenience.cc b/external/rocksdb/db/convenience.cc new file mode 100755 index 0000000000..b1042c74d5 --- /dev/null +++ b/external/rocksdb/db/convenience.cc @@ -0,0 +1,30 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2012 Facebook. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef ROCKSDB_LITE + +#include "rocksdb/convenience.h" + +#include "db/db_impl.h" + +namespace rocksdb { + +void CancelAllBackgroundWork(DB* db, bool wait) { + (dynamic_cast(db))->CancelAllBackgroundWork(wait); +} + +Status DeleteFilesInRange(DB* db, ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) { + return (dynamic_cast(db)) + ->DeleteFilesInRange(column_family, begin, end); +} + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/corruption_test.cc b/external/rocksdb/db/corruption_test.cc new file mode 100755 index 0000000000..e7d82407ae --- /dev/null +++ b/external/rocksdb/db/corruption_test.cc @@ -0,0 +1,504 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef ROCKSDB_LITE + +#include "rocksdb/db.h" + +#include +#include +#include +#include +#include +#include "rocksdb/cache.h" +#include "rocksdb/env.h" +#include "rocksdb/table.h" +#include "rocksdb/write_batch.h" +#include "db/db_impl.h" +#include "db/filename.h" +#include "db/log_format.h" +#include "db/version_set.h" +#include "util/logging.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +static const int kValueSize = 1000; + +class CorruptionTest : public testing::Test { + public: + test::ErrorEnv env_; + std::string dbname_; + shared_ptr tiny_cache_; + Options options_; + DB* db_; + + CorruptionTest() { + tiny_cache_ = NewLRUCache(100); + options_.wal_recovery_mode = WALRecoveryMode::kTolerateCorruptedTailRecords; + options_.env = &env_; + dbname_ = test::TmpDir() + "/corruption_test"; + DestroyDB(dbname_, options_); + + db_ = nullptr; + options_.create_if_missing = true; + BlockBasedTableOptions table_options; + table_options.block_size_deviation = 0; // make unit test pass for now + options_.table_factory.reset(NewBlockBasedTableFactory(table_options)); + Reopen(); + options_.create_if_missing = false; + } + + ~CorruptionTest() { + delete db_; + DestroyDB(dbname_, Options()); + } + + void CloseDb() { + delete db_; + db_ = nullptr; + } + + Status TryReopen(Options* options = nullptr) { + delete db_; + db_ = nullptr; + Options opt = (options ? *options : options_); + opt.env = &env_; + opt.arena_block_size = 4096; + BlockBasedTableOptions table_options; + table_options.block_cache = tiny_cache_; + table_options.block_size_deviation = 0; + opt.table_factory.reset(NewBlockBasedTableFactory(table_options)); + return DB::Open(opt, dbname_, &db_); + } + + void Reopen(Options* options = nullptr) { + ASSERT_OK(TryReopen(options)); + } + + void RepairDB() { + delete db_; + db_ = nullptr; + ASSERT_OK(::rocksdb::RepairDB(dbname_, options_)); + } + + void Build(int n, int flush_every = 0) { + std::string key_space, value_space; + WriteBatch batch; + for (int i = 0; i < n; i++) { + if (flush_every != 0 && i != 0 && i % flush_every == 0) { + DBImpl* dbi = reinterpret_cast(db_); + dbi->TEST_FlushMemTable(); + } + //if ((i % 100) == 0) fprintf(stderr, "@ %d of %d\n", i, n); + Slice key = Key(i, &key_space); + batch.Clear(); + batch.Put(key, Value(i, &value_space)); + ASSERT_OK(db_->Write(WriteOptions(), &batch)); + } + } + + void Check(int min_expected, int max_expected) { + uint64_t next_expected = 0; + uint64_t missed = 0; + int bad_keys = 0; + int bad_values = 0; + int correct = 0; + std::string value_space; + // Do not verify checksums. If we verify checksums then the + // db itself will raise errors because data is corrupted. + // Instead, we want the reads to be successful and this test + // will detect whether the appropriate corruptions have + // occurred. + Iterator* iter = db_->NewIterator(ReadOptions(false, true)); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + uint64_t key; + Slice in(iter->key()); + if (!ConsumeDecimalNumber(&in, &key) || + !in.empty() || + key < next_expected) { + bad_keys++; + continue; + } + missed += (key - next_expected); + next_expected = key + 1; + if (iter->value() != Value(static_cast(key), &value_space)) { + bad_values++; + } else { + correct++; + } + } + delete iter; + + fprintf(stderr, + "expected=%d..%d; got=%d; bad_keys=%d; bad_values=%d; missed=%llu\n", + min_expected, max_expected, correct, bad_keys, bad_values, + static_cast(missed)); + ASSERT_LE(min_expected, correct); + ASSERT_GE(max_expected, correct); + } + + void CorruptFile(const std::string& fname, int offset, int bytes_to_corrupt) { + struct stat sbuf; + if (stat(fname.c_str(), &sbuf) != 0) { + const char* msg = strerror(errno); + ASSERT_TRUE(false) << fname << ": " << msg; + } + + if (offset < 0) { + // Relative to end of file; make it absolute + if (-offset > sbuf.st_size) { + offset = 0; + } else { + offset = static_cast(sbuf.st_size + offset); + } + } + if (offset > sbuf.st_size) { + offset = static_cast(sbuf.st_size); + } + if (offset + bytes_to_corrupt > sbuf.st_size) { + bytes_to_corrupt = static_cast(sbuf.st_size - offset); + } + + // Do it + std::string contents; + Status s = ReadFileToString(Env::Default(), fname, &contents); + ASSERT_TRUE(s.ok()) << s.ToString(); + for (int i = 0; i < bytes_to_corrupt; i++) { + contents[i + offset] ^= 0x80; + } + s = WriteStringToFile(Env::Default(), contents, fname); + ASSERT_TRUE(s.ok()) << s.ToString(); + } + + void Corrupt(FileType filetype, int offset, int bytes_to_corrupt) { + // Pick file to corrupt + std::vector filenames; + ASSERT_OK(env_.GetChildren(dbname_, &filenames)); + uint64_t number; + FileType type; + std::string fname; + int picked_number = -1; + for (size_t i = 0; i < filenames.size(); i++) { + if (ParseFileName(filenames[i], &number, &type) && + type == filetype && + static_cast(number) > picked_number) { // Pick latest file + fname = dbname_ + "/" + filenames[i]; + picked_number = static_cast(number); + } + } + ASSERT_TRUE(!fname.empty()) << filetype; + + CorruptFile(fname, offset, bytes_to_corrupt); + } + + // corrupts exactly one file at level `level`. if no file found at level, + // asserts + void CorruptTableFileAtLevel(int level, int offset, int bytes_to_corrupt) { + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + for (const auto& m : metadata) { + if (m.level == level) { + CorruptFile(dbname_ + "/" + m.name, offset, bytes_to_corrupt); + return; + } + } + ASSERT_TRUE(false) << "no file found at level"; + } + + + int Property(const std::string& name) { + std::string property; + int result; + if (db_->GetProperty(name, &property) && + sscanf(property.c_str(), "%d", &result) == 1) { + return result; + } else { + return -1; + } + } + + // Return the ith key + Slice Key(int i, std::string* storage) { + char buf[100]; + snprintf(buf, sizeof(buf), "%016d", i); + storage->assign(buf, strlen(buf)); + return Slice(*storage); + } + + // Return the value to associate with the specified key + Slice Value(int k, std::string* storage) { + if (k == 0) { + // Ugh. Random seed of 0 used to produce no entropy. This code + // preserves the implementation that was in place when all of the + // magic values in this file were picked. + *storage = std::string(kValueSize, ' '); + return Slice(*storage); + } else { + Random r(k); + return test::RandomString(&r, kValueSize, storage); + } + } +}; + +TEST_F(CorruptionTest, Recovery) { + Build(100); + Check(100, 100); +#ifdef OS_WIN + // On Wndows OS Disk cache does not behave properly + // We do not call FlushBuffers on every Flush. If we do not close + // the log file prior to the corruption we end up with the first + // block not corrupted but only the second. However, under the debugger + // things work just fine but never pass when running normally + // For that reason people may want to run with unbuffered I/O. That option + // is not available for WAL though. + CloseDb(); +#endif + Corrupt(kLogFile, 19, 1); // WriteBatch tag for first record + Corrupt(kLogFile, log::kBlockSize + 1000, 1); // Somewhere in second block + ASSERT_TRUE(!TryReopen().ok()); + options_.paranoid_checks = false; + Reopen(&options_); + + // The 64 records in the first two log blocks are completely lost. + Check(36, 36); +} + +TEST_F(CorruptionTest, RecoverWriteError) { + env_.writable_file_error_ = true; + Status s = TryReopen(); + ASSERT_TRUE(!s.ok()); +} + +TEST_F(CorruptionTest, NewFileErrorDuringWrite) { + // Do enough writing to force minor compaction + env_.writable_file_error_ = true; + const int num = + static_cast(3 + (Options().write_buffer_size / kValueSize)); + std::string value_storage; + Status s; + bool failed = false; + for (int i = 0; i < num; i++) { + WriteBatch batch; + batch.Put("a", Value(100, &value_storage)); + s = db_->Write(WriteOptions(), &batch); + if (!s.ok()) { + failed = true; + } + ASSERT_TRUE(!failed || !s.ok()); + } + ASSERT_TRUE(!s.ok()); + ASSERT_GE(env_.num_writable_file_errors_, 1); + env_.writable_file_error_ = false; + Reopen(); +} + +TEST_F(CorruptionTest, TableFile) { + Build(100); + DBImpl* dbi = reinterpret_cast(db_); + dbi->TEST_FlushMemTable(); + dbi->TEST_CompactRange(0, nullptr, nullptr); + dbi->TEST_CompactRange(1, nullptr, nullptr); + + Corrupt(kTableFile, 100, 1); + Check(99, 99); +} + +TEST_F(CorruptionTest, TableFileIndexData) { + Options options; + // very big, we'll trigger flushes manually + options.write_buffer_size = 100 * 1024 * 1024; + Reopen(&options); + // build 2 tables, flush at 5000 + Build(10000, 5000); + DBImpl* dbi = reinterpret_cast(db_); + dbi->TEST_FlushMemTable(); + + // corrupt an index block of an entire file + Corrupt(kTableFile, -2000, 500); + Reopen(); + // one full file should be readable, since only one was corrupted + // the other file should be fully non-readable, since index was corrupted + Check(5000, 5000); +} + +TEST_F(CorruptionTest, MissingDescriptor) { + Build(1000); + RepairDB(); + Reopen(); + Check(1000, 1000); +} + +TEST_F(CorruptionTest, SequenceNumberRecovery) { + ASSERT_OK(db_->Put(WriteOptions(), "foo", "v1")); + ASSERT_OK(db_->Put(WriteOptions(), "foo", "v2")); + ASSERT_OK(db_->Put(WriteOptions(), "foo", "v3")); + ASSERT_OK(db_->Put(WriteOptions(), "foo", "v4")); + ASSERT_OK(db_->Put(WriteOptions(), "foo", "v5")); + RepairDB(); + Reopen(); + std::string v; + ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); + ASSERT_EQ("v5", v); + // Write something. If sequence number was not recovered properly, + // it will be hidden by an earlier write. + ASSERT_OK(db_->Put(WriteOptions(), "foo", "v6")); + ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); + ASSERT_EQ("v6", v); + Reopen(); + ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); + ASSERT_EQ("v6", v); +} + +TEST_F(CorruptionTest, CorruptedDescriptor) { + ASSERT_OK(db_->Put(WriteOptions(), "foo", "hello")); + DBImpl* dbi = reinterpret_cast(db_); + dbi->TEST_FlushMemTable(); + dbi->TEST_CompactRange(0, nullptr, nullptr); + + Corrupt(kDescriptorFile, 0, 1000); + Status s = TryReopen(); + ASSERT_TRUE(!s.ok()); + + RepairDB(); + Reopen(); + std::string v; + ASSERT_OK(db_->Get(ReadOptions(), "foo", &v)); + ASSERT_EQ("hello", v); +} + +TEST_F(CorruptionTest, CompactionInputError) { + Options options; + Reopen(&options); + Build(10); + DBImpl* dbi = reinterpret_cast(db_); + dbi->TEST_FlushMemTable(); + dbi->TEST_CompactRange(0, nullptr, nullptr); + dbi->TEST_CompactRange(1, nullptr, nullptr); + ASSERT_EQ(1, Property("rocksdb.num-files-at-level2")); + + Corrupt(kTableFile, 100, 1); + Check(9, 9); + + // Force compactions by writing lots of values + Build(10000); + Check(10000, 10000); +} + +TEST_F(CorruptionTest, CompactionInputErrorParanoid) { + Options options; + options.paranoid_checks = true; + options.write_buffer_size = 131072; + options.max_write_buffer_number = 2; + Reopen(&options); + DBImpl* dbi = reinterpret_cast(db_); + + // Fill levels >= 1 + for (int level = 1; level < dbi->NumberLevels(); level++) { + dbi->Put(WriteOptions(), "", "begin"); + dbi->Put(WriteOptions(), "~", "end"); + dbi->TEST_FlushMemTable(); + for (int comp_level = 0; comp_level < dbi->NumberLevels() - level; + ++comp_level) { + dbi->TEST_CompactRange(comp_level, nullptr, nullptr); + } + } + + Reopen(&options); + + dbi = reinterpret_cast(db_); + Build(10); + dbi->TEST_FlushMemTable(); + dbi->TEST_WaitForCompact(); + ASSERT_EQ(1, Property("rocksdb.num-files-at-level0")); + + CorruptTableFileAtLevel(0, 100, 1); + Check(9, 9); + + // Write must eventually fail because of corrupted table + Status s; + std::string tmp1, tmp2; + bool failed = false; + for (int i = 0; i < 10000; i++) { + s = db_->Put(WriteOptions(), Key(i, &tmp1), Value(i, &tmp2)); + if (!s.ok()) { + failed = true; + } + // if one write failed, every subsequent write must fail, too + ASSERT_TRUE(!failed || !s.ok()) << "write did not fail in a corrupted db"; + } + ASSERT_TRUE(!s.ok()) << "write did not fail in corrupted paranoid db"; +} + +TEST_F(CorruptionTest, UnrelatedKeys) { + Build(10); + DBImpl* dbi = reinterpret_cast(db_); + dbi->TEST_FlushMemTable(); + Corrupt(kTableFile, 100, 1); + + std::string tmp1, tmp2; + ASSERT_OK(db_->Put(WriteOptions(), Key(1000, &tmp1), Value(1000, &tmp2))); + std::string v; + ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); + ASSERT_EQ(Value(1000, &tmp2).ToString(), v); + dbi->TEST_FlushMemTable(); + ASSERT_OK(db_->Get(ReadOptions(), Key(1000, &tmp1), &v)); + ASSERT_EQ(Value(1000, &tmp2).ToString(), v); +} + +TEST_F(CorruptionTest, FileSystemStateCorrupted) { + for (int iter = 0; iter < 2; ++iter) { + Options options; + options.paranoid_checks = true; + options.create_if_missing = true; + Reopen(&options); + Build(10); + ASSERT_OK(db_->Flush(FlushOptions())); + DBImpl* dbi = reinterpret_cast(db_); + std::vector metadata; + dbi->GetLiveFilesMetaData(&metadata); + ASSERT_GT(metadata.size(), size_t(0)); + std::string filename = dbname_ + metadata[0].name; + + delete db_; + db_ = nullptr; + + if (iter == 0) { // corrupt file size + unique_ptr file; + env_.NewWritableFile(filename, &file, EnvOptions()); + file->Append(Slice("corrupted sst")); + file.reset(); + } else { // delete the file + env_.DeleteFile(filename); + } + + Status x = TryReopen(&options); + ASSERT_TRUE(x.IsCorruption()); + DestroyDB(dbname_, options_); + Reopen(&options); + } +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, "SKIPPED as RepairDB() is not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/cuckoo_table_db_test.cc b/external/rocksdb/db/cuckoo_table_db_test.cc new file mode 100755 index 0000000000..f48b5b436c --- /dev/null +++ b/external/rocksdb/db/cuckoo_table_db_test.cc @@ -0,0 +1,333 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#include "db/db_impl.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "table/meta_blocks.h" +#include "table/cuckoo_table_factory.h" +#include "table/cuckoo_table_reader.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class CuckooTableDBTest : public testing::Test { + private: + std::string dbname_; + Env* env_; + DB* db_; + + public: + CuckooTableDBTest() : env_(Env::Default()) { + dbname_ = test::TmpDir() + "/cuckoo_table_db_test"; + EXPECT_OK(DestroyDB(dbname_, Options())); + db_ = nullptr; + Reopen(); + } + + ~CuckooTableDBTest() { + delete db_; + EXPECT_OK(DestroyDB(dbname_, Options())); + } + + Options CurrentOptions() { + Options options; + options.table_factory.reset(NewCuckooTableFactory()); + options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true)); + options.allow_mmap_reads = true; + options.create_if_missing = true; + return options; + } + + DBImpl* dbfull() { + return reinterpret_cast(db_); + } + + // The following util methods are copied from plain_table_db_test. + void Reopen(Options* options = nullptr) { + delete db_; + db_ = nullptr; + Options opts; + if (options != nullptr) { + opts = *options; + } else { + opts = CurrentOptions(); + opts.create_if_missing = true; + } + ASSERT_OK(DB::Open(opts, dbname_, &db_)); + } + + Status Put(const Slice& k, const Slice& v) { + return db_->Put(WriteOptions(), k, v); + } + + Status Delete(const std::string& k) { + return db_->Delete(WriteOptions(), k); + } + + std::string Get(const std::string& k) { + ReadOptions options; + std::string result; + Status s = db_->Get(options, k, &result); + if (s.IsNotFound()) { + result = "NOT_FOUND"; + } else if (!s.ok()) { + result = s.ToString(); + } + return result; + } + + int NumTableFilesAtLevel(int level) { + std::string property; + EXPECT_TRUE(db_->GetProperty( + "rocksdb.num-files-at-level" + NumberToString(level), &property)); + return atoi(property.c_str()); + } + + // Return spread of files per level + std::string FilesPerLevel() { + std::string result; + size_t last_non_zero_offset = 0; + for (int level = 0; level < db_->NumberLevels(); level++) { + int f = NumTableFilesAtLevel(level); + char buf[100]; + snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f); + result += buf; + if (f > 0) { + last_non_zero_offset = result.size(); + } + } + result.resize(last_non_zero_offset); + return result; + } +}; + +TEST_F(CuckooTableDBTest, Flush) { + // Try with empty DB first. + ASSERT_TRUE(dbfull() != nullptr); + ASSERT_EQ("NOT_FOUND", Get("key2")); + + // Add some values to db. + Options options = CurrentOptions(); + Reopen(&options); + + ASSERT_OK(Put("key1", "v1")); + ASSERT_OK(Put("key2", "v2")); + ASSERT_OK(Put("key3", "v3")); + dbfull()->TEST_FlushMemTable(); + + TablePropertiesCollection ptc; + reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc); + ASSERT_EQ(1U, ptc.size()); + ASSERT_EQ(3U, ptc.begin()->second->num_entries); + ASSERT_EQ("1", FilesPerLevel()); + + ASSERT_EQ("v1", Get("key1")); + ASSERT_EQ("v2", Get("key2")); + ASSERT_EQ("v3", Get("key3")); + ASSERT_EQ("NOT_FOUND", Get("key4")); + + // Now add more keys and flush. + ASSERT_OK(Put("key4", "v4")); + ASSERT_OK(Put("key5", "v5")); + ASSERT_OK(Put("key6", "v6")); + dbfull()->TEST_FlushMemTable(); + + reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc); + ASSERT_EQ(2U, ptc.size()); + auto row = ptc.begin(); + ASSERT_EQ(3U, row->second->num_entries); + ASSERT_EQ(3U, (++row)->second->num_entries); + ASSERT_EQ("2", FilesPerLevel()); + ASSERT_EQ("v1", Get("key1")); + ASSERT_EQ("v2", Get("key2")); + ASSERT_EQ("v3", Get("key3")); + ASSERT_EQ("v4", Get("key4")); + ASSERT_EQ("v5", Get("key5")); + ASSERT_EQ("v6", Get("key6")); + + ASSERT_OK(Delete("key6")); + ASSERT_OK(Delete("key5")); + ASSERT_OK(Delete("key4")); + dbfull()->TEST_FlushMemTable(); + reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc); + ASSERT_EQ(3U, ptc.size()); + row = ptc.begin(); + ASSERT_EQ(3U, row->second->num_entries); + ASSERT_EQ(3U, (++row)->second->num_entries); + ASSERT_EQ(3U, (++row)->second->num_entries); + ASSERT_EQ("3", FilesPerLevel()); + ASSERT_EQ("v1", Get("key1")); + ASSERT_EQ("v2", Get("key2")); + ASSERT_EQ("v3", Get("key3")); + ASSERT_EQ("NOT_FOUND", Get("key4")); + ASSERT_EQ("NOT_FOUND", Get("key5")); + ASSERT_EQ("NOT_FOUND", Get("key6")); +} + +TEST_F(CuckooTableDBTest, FlushWithDuplicateKeys) { + Options options = CurrentOptions(); + Reopen(&options); + ASSERT_OK(Put("key1", "v1")); + ASSERT_OK(Put("key2", "v2")); + ASSERT_OK(Put("key1", "v3")); // Duplicate + dbfull()->TEST_FlushMemTable(); + + TablePropertiesCollection ptc; + reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc); + ASSERT_EQ(1U, ptc.size()); + ASSERT_EQ(2U, ptc.begin()->second->num_entries); + ASSERT_EQ("1", FilesPerLevel()); + ASSERT_EQ("v3", Get("key1")); + ASSERT_EQ("v2", Get("key2")); +} + +namespace { +static std::string Key(int i) { + char buf[100]; + snprintf(buf, sizeof(buf), "key_______%06d", i); + return std::string(buf); +} +static std::string Uint64Key(uint64_t i) { + std::string str; + str.resize(8); + memcpy(&str[0], static_cast(&i), 8); + return str; +} +} // namespace. + +TEST_F(CuckooTableDBTest, Uint64Comparator) { + Options options = CurrentOptions(); + options.comparator = test::Uint64Comparator(); + Reopen(&options); + + ASSERT_OK(Put(Uint64Key(1), "v1")); + ASSERT_OK(Put(Uint64Key(2), "v2")); + ASSERT_OK(Put(Uint64Key(3), "v3")); + dbfull()->TEST_FlushMemTable(); + + ASSERT_EQ("v1", Get(Uint64Key(1))); + ASSERT_EQ("v2", Get(Uint64Key(2))); + ASSERT_EQ("v3", Get(Uint64Key(3))); + ASSERT_EQ("NOT_FOUND", Get(Uint64Key(4))); + + // Add more keys. + ASSERT_OK(Delete(Uint64Key(2))); // Delete. + dbfull()->TEST_FlushMemTable(); + ASSERT_OK(Put(Uint64Key(3), "v0")); // Update. + ASSERT_OK(Put(Uint64Key(4), "v4")); + dbfull()->TEST_FlushMemTable(); + ASSERT_EQ("v1", Get(Uint64Key(1))); + ASSERT_EQ("NOT_FOUND", Get(Uint64Key(2))); + ASSERT_EQ("v0", Get(Uint64Key(3))); + ASSERT_EQ("v4", Get(Uint64Key(4))); +} + +TEST_F(CuckooTableDBTest, CompactionIntoMultipleFiles) { + // Create a big L0 file and check it compacts into multiple files in L1. + Options options = CurrentOptions(); + options.write_buffer_size = 270 << 10; + // Two SST files should be created, each containing 14 keys. + // Number of buckets will be 16. Total size ~156 KB. + options.target_file_size_base = 160 << 10; + Reopen(&options); + + // Write 28 values, each 10016 B ~ 10KB + for (int idx = 0; idx < 28; ++idx) { + ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + idx))); + } + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ("1", FilesPerLevel()); + + dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr, + true /* disallow trivial move */); + ASSERT_EQ("0,2", FilesPerLevel()); + for (int idx = 0; idx < 28; ++idx) { + ASSERT_EQ(std::string(10000, 'a' + idx), Get(Key(idx))); + } +} + +TEST_F(CuckooTableDBTest, SameKeyInsertedInTwoDifferentFilesAndCompacted) { + // Insert same key twice so that they go to different SST files. Then wait for + // compaction and check if the latest value is stored and old value removed. + Options options = CurrentOptions(); + options.write_buffer_size = 100 << 10; // 100KB + options.level0_file_num_compaction_trigger = 2; + Reopen(&options); + + // Write 11 values, each 10016 B + for (int idx = 0; idx < 11; ++idx) { + ASSERT_OK(Put(Key(idx), std::string(10000, 'a'))); + } + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ("1", FilesPerLevel()); + + // Generate one more file in level-0, and should trigger level-0 compaction + for (int idx = 0; idx < 11; ++idx) { + ASSERT_OK(Put(Key(idx), std::string(10000, 'a' + idx))); + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); + + ASSERT_EQ("0,1", FilesPerLevel()); + for (int idx = 0; idx < 11; ++idx) { + ASSERT_EQ(std::string(10000, 'a' + idx), Get(Key(idx))); + } +} + +TEST_F(CuckooTableDBTest, AdaptiveTable) { + Options options = CurrentOptions(); + + // Write some keys using cuckoo table. + options.table_factory.reset(NewCuckooTableFactory()); + Reopen(&options); + + ASSERT_OK(Put("key1", "v1")); + ASSERT_OK(Put("key2", "v2")); + ASSERT_OK(Put("key3", "v3")); + dbfull()->TEST_FlushMemTable(); + + // Write some keys using plain table. + options.create_if_missing = false; + options.table_factory.reset(NewPlainTableFactory()); + Reopen(&options); + ASSERT_OK(Put("key4", "v4")); + ASSERT_OK(Put("key1", "v5")); + dbfull()->TEST_FlushMemTable(); + + // Write some keys using block based table. + std::shared_ptr block_based_factory( + NewBlockBasedTableFactory()); + options.table_factory.reset(NewAdaptiveTableFactory(block_based_factory)); + Reopen(&options); + ASSERT_OK(Put("key5", "v6")); + ASSERT_OK(Put("key2", "v7")); + dbfull()->TEST_FlushMemTable(); + + ASSERT_EQ("v5", Get("key1")); + ASSERT_EQ("v7", Get("key2")); + ASSERT_EQ("v3", Get("key3")); + ASSERT_EQ("v4", Get("key4")); + ASSERT_EQ("v6", Get("key5")); +} +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, "SKIPPED as Cuckoo table is not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/db_block_cache_test.cc b/external/rocksdb/db/db_block_cache_test.cc new file mode 100755 index 0000000000..96d5be980d --- /dev/null +++ b/external/rocksdb/db/db_block_cache_test.cc @@ -0,0 +1,496 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#include +#include "db/db_test_util.h" +#include "port/stack_trace.h" + +namespace rocksdb { + +class DBBlockCacheTest : public DBTestBase { + private: + size_t miss_count_ = 0; + size_t hit_count_ = 0; + size_t insert_count_ = 0; + size_t failure_count_ = 0; + size_t compressed_miss_count_ = 0; + size_t compressed_hit_count_ = 0; + size_t compressed_insert_count_ = 0; + size_t compressed_failure_count_ = 0; + + public: + const size_t kNumBlocks = 10; + const size_t kValueSize = 100; + + DBBlockCacheTest() : DBTestBase("/db_block_cache_test") {} + + BlockBasedTableOptions GetTableOptions() { + BlockBasedTableOptions table_options; + // Set a small enough block size so that each key-value get its own block. + table_options.block_size = 1; + return table_options; + } + + Options GetOptions(const BlockBasedTableOptions& table_options) { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.avoid_flush_during_recovery = false; + // options.compression = kNoCompression; + options.statistics = rocksdb::CreateDBStatistics(); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + return options; + } + + void InitTable(const Options& options) { + std::string value(kValueSize, 'a'); + for (size_t i = 0; i < kNumBlocks; i++) { + ASSERT_OK(Put(ToString(i), value.c_str())); + } + } + + void RecordCacheCounters(const Options& options) { + miss_count_ = TestGetTickerCount(options, BLOCK_CACHE_MISS); + hit_count_ = TestGetTickerCount(options, BLOCK_CACHE_HIT); + insert_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD); + failure_count_ = TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES); + compressed_miss_count_ = + TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS); + compressed_hit_count_ = + TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT); + compressed_insert_count_ = + TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD); + compressed_failure_count_ = + TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES); + } + + void CheckCacheCounters(const Options& options, size_t expected_misses, + size_t expected_hits, size_t expected_inserts, + size_t expected_failures) { + size_t new_miss_count = TestGetTickerCount(options, BLOCK_CACHE_MISS); + size_t new_hit_count = TestGetTickerCount(options, BLOCK_CACHE_HIT); + size_t new_insert_count = TestGetTickerCount(options, BLOCK_CACHE_ADD); + size_t new_failure_count = + TestGetTickerCount(options, BLOCK_CACHE_ADD_FAILURES); + ASSERT_EQ(miss_count_ + expected_misses, new_miss_count); + ASSERT_EQ(hit_count_ + expected_hits, new_hit_count); + ASSERT_EQ(insert_count_ + expected_inserts, new_insert_count); + ASSERT_EQ(failure_count_ + expected_failures, new_failure_count); + miss_count_ = new_miss_count; + hit_count_ = new_hit_count; + insert_count_ = new_insert_count; + failure_count_ = new_failure_count; + } + + void CheckCompressedCacheCounters(const Options& options, + size_t expected_misses, + size_t expected_hits, + size_t expected_inserts, + size_t expected_failures) { + size_t new_miss_count = + TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS); + size_t new_hit_count = + TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT); + size_t new_insert_count = + TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD); + size_t new_failure_count = + TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_ADD_FAILURES); + ASSERT_EQ(compressed_miss_count_ + expected_misses, new_miss_count); + ASSERT_EQ(compressed_hit_count_ + expected_hits, new_hit_count); + ASSERT_EQ(compressed_insert_count_ + expected_inserts, new_insert_count); + ASSERT_EQ(compressed_failure_count_ + expected_failures, new_failure_count); + compressed_miss_count_ = new_miss_count; + compressed_hit_count_ = new_hit_count; + compressed_insert_count_ = new_insert_count; + compressed_failure_count_ = new_failure_count; + } +}; + +TEST_F(DBBlockCacheTest, TestWithoutCompressedBlockCache) { + ReadOptions read_options; + auto table_options = GetTableOptions(); + auto options = GetOptions(table_options); + InitTable(options); + + std::shared_ptr cache = NewLRUCache(0, 0, false); + table_options.block_cache = cache; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + Reopen(options); + RecordCacheCounters(options); + + std::vector> iterators(kNumBlocks - 1); + Iterator* iter = nullptr; + + // Load blocks into cache. + for (size_t i = 0; i < kNumBlocks - 1; i++) { + iter = db_->NewIterator(read_options); + iter->Seek(ToString(i)); + ASSERT_OK(iter->status()); + CheckCacheCounters(options, 1, 0, 1, 0); + iterators[i].reset(iter); + } + size_t usage = cache->GetUsage(); + ASSERT_LT(0, usage); + cache->SetCapacity(usage); + ASSERT_EQ(usage, cache->GetPinnedUsage()); + + // Test with strict capacity limit. + cache->SetStrictCapacityLimit(true); + iter = db_->NewIterator(read_options); + iter->Seek(ToString(kNumBlocks - 1)); + ASSERT_TRUE(iter->status().IsIncomplete()); + CheckCacheCounters(options, 1, 0, 0, 1); + delete iter; + iter = nullptr; + + // Release interators and access cache again. + for (size_t i = 0; i < kNumBlocks - 1; i++) { + iterators[i].reset(); + CheckCacheCounters(options, 0, 0, 0, 0); + } + ASSERT_EQ(0, cache->GetPinnedUsage()); + for (size_t i = 0; i < kNumBlocks - 1; i++) { + iter = db_->NewIterator(read_options); + iter->Seek(ToString(i)); + ASSERT_OK(iter->status()); + CheckCacheCounters(options, 0, 1, 0, 0); + iterators[i].reset(iter); + } +} + +#ifdef SNAPPY +TEST_F(DBBlockCacheTest, TestWithCompressedBlockCache) { + ReadOptions read_options; + auto table_options = GetTableOptions(); + auto options = GetOptions(table_options); + options.compression = CompressionType::kSnappyCompression; + InitTable(options); + + std::shared_ptr cache = NewLRUCache(0, 0, false); + std::shared_ptr compressed_cache = NewLRUCache(0, 0, false); + table_options.block_cache = cache; + table_options.block_cache_compressed = compressed_cache; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + Reopen(options); + RecordCacheCounters(options); + + std::vector> iterators(kNumBlocks - 1); + Iterator* iter = nullptr; + + // Load blocks into cache. + for (size_t i = 0; i < kNumBlocks - 1; i++) { + iter = db_->NewIterator(read_options); + iter->Seek(ToString(i)); + ASSERT_OK(iter->status()); + CheckCacheCounters(options, 1, 0, 1, 0); + CheckCompressedCacheCounters(options, 1, 0, 1, 0); + iterators[i].reset(iter); + } + size_t usage = cache->GetUsage(); + ASSERT_LT(0, usage); + ASSERT_EQ(usage, cache->GetPinnedUsage()); + size_t compressed_usage = compressed_cache->GetUsage(); + ASSERT_LT(0, compressed_usage); + // Compressed block cache cannot be pinned. + ASSERT_EQ(0, compressed_cache->GetPinnedUsage()); + + // Set strict capacity limit flag. Now block will only load into compressed + // block cache. + cache->SetCapacity(usage); + cache->SetStrictCapacityLimit(true); + ASSERT_EQ(usage, cache->GetPinnedUsage()); + // compressed_cache->SetCapacity(compressed_usage); + compressed_cache->SetCapacity(0); + // compressed_cache->SetStrictCapacityLimit(true); + iter = db_->NewIterator(read_options); + iter->Seek(ToString(kNumBlocks - 1)); + ASSERT_TRUE(iter->status().IsIncomplete()); + CheckCacheCounters(options, 1, 0, 0, 1); + CheckCompressedCacheCounters(options, 1, 0, 1, 0); + delete iter; + iter = nullptr; + + // Clear strict capacity limit flag. This time we shall hit compressed block + // cache. + cache->SetStrictCapacityLimit(false); + iter = db_->NewIterator(read_options); + iter->Seek(ToString(kNumBlocks - 1)); + ASSERT_OK(iter->status()); + CheckCacheCounters(options, 1, 0, 1, 0); + CheckCompressedCacheCounters(options, 0, 1, 0, 0); + delete iter; + iter = nullptr; +} +#endif // SNAPPY + +#ifndef ROCKSDB_LITE + +// Make sure that when options.block_cache is set, after a new table is +// created its index/filter blocks are added to block cache. +TEST_F(DBBlockCacheTest, IndexAndFilterBlocksOfNewTableAddedToCache) { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions table_options; + table_options.cache_index_and_filter_blocks = true; + table_options.filter_policy.reset(NewBloomFilterPolicy(20)); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_OK(Put(1, "key", "val")); + // Create a new table. + ASSERT_OK(Flush(1)); + + // index/filter blocks added to block cache right after table creation. + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(2, /* only index/filter were added */ + TestGetTickerCount(options, BLOCK_CACHE_ADD)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS)); + uint64_t int_num; + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num)); + ASSERT_EQ(int_num, 0U); + + // Make sure filter block is in cache. + std::string value; + ReadOptions ropt; + db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value); + + // Miss count should remain the same. + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + + db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value); + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + + // Make sure index block is in cache. + auto index_block_hit = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT); + value = Get(1, "key"); + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(index_block_hit + 1, + TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); + + value = Get(1, "key"); + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(index_block_hit + 2, + TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); +} + +TEST_F(DBBlockCacheTest, IndexAndFilterBlocksStats) { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions table_options; + table_options.cache_index_and_filter_blocks = true; + // 200 bytes are enough to hold the first two blocks + std::shared_ptr cache = NewLRUCache(200, 0, false); + table_options.block_cache = cache; + table_options.filter_policy.reset(NewBloomFilterPolicy(20)); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_OK(Put(1, "key", "val")); + // Create a new table + ASSERT_OK(Flush(1)); + size_t index_bytes_insert = + TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT); + size_t filter_bytes_insert = + TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT); + ASSERT_GT(index_bytes_insert, 0); + ASSERT_GT(filter_bytes_insert, 0); + ASSERT_EQ(cache->GetUsage(), index_bytes_insert + filter_bytes_insert); + // set the cache capacity to the current usage + cache->SetCapacity(index_bytes_insert + filter_bytes_insert); + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT), 0); + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT), 0); + ASSERT_OK(Put(1, "key2", "val")); + // Create a new table + ASSERT_OK(Flush(1)); + // cache evicted old index and block entries + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_INSERT), + index_bytes_insert); + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_INSERT), + filter_bytes_insert); + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_INDEX_BYTES_EVICT), + index_bytes_insert); + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_FILTER_BYTES_EVICT), + filter_bytes_insert); +} + +TEST_F(DBBlockCacheTest, ParanoidFileChecks) { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + options.level0_file_num_compaction_trigger = 2; + options.paranoid_file_checks = true; + BlockBasedTableOptions table_options; + table_options.cache_index_and_filter_blocks = false; + table_options.filter_policy.reset(NewBloomFilterPolicy(20)); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_OK(Put(1, "1_key", "val")); + ASSERT_OK(Put(1, "9_key", "val")); + // Create a new table. + ASSERT_OK(Flush(1)); + ASSERT_EQ(1, /* read and cache data block */ + TestGetTickerCount(options, BLOCK_CACHE_ADD)); + + ASSERT_OK(Put(1, "1_key2", "val2")); + ASSERT_OK(Put(1, "9_key2", "val2")); + // Create a new SST file. This will further trigger a compaction + // and generate another file. + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(3, /* Totally 3 files created up to now */ + TestGetTickerCount(options, BLOCK_CACHE_ADD)); + + // After disabling options.paranoid_file_checks. NO further block + // is added after generating a new file. + ASSERT_OK( + dbfull()->SetOptions(handles_[1], {{"paranoid_file_checks", "false"}})); + + ASSERT_OK(Put(1, "1_key3", "val3")); + ASSERT_OK(Put(1, "9_key3", "val3")); + ASSERT_OK(Flush(1)); + ASSERT_OK(Put(1, "1_key4", "val4")); + ASSERT_OK(Put(1, "9_key4", "val4")); + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(3, /* Totally 3 files created up to now */ + TestGetTickerCount(options, BLOCK_CACHE_ADD)); +} + +TEST_F(DBBlockCacheTest, CompressedCache) { + if (!Snappy_Supported()) { + return; + } + int num_iter = 80; + + // Run this test three iterations. + // Iteration 1: only a uncompressed block cache + // Iteration 2: only a compressed block cache + // Iteration 3: both block cache and compressed cache + // Iteration 4: both block cache and compressed cache, but DB is not + // compressed + for (int iter = 0; iter < 4; iter++) { + Options options = CurrentOptions(); + options.write_buffer_size = 64 * 1024; // small write buffer + options.statistics = rocksdb::CreateDBStatistics(); + + BlockBasedTableOptions table_options; + switch (iter) { + case 0: + // only uncompressed block cache + table_options.block_cache = NewLRUCache(8 * 1024); + table_options.block_cache_compressed = nullptr; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + break; + case 1: + // no block cache, only compressed cache + table_options.no_block_cache = true; + table_options.block_cache = nullptr; + table_options.block_cache_compressed = NewLRUCache(8 * 1024); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + break; + case 2: + // both compressed and uncompressed block cache + table_options.block_cache = NewLRUCache(1024); + table_options.block_cache_compressed = NewLRUCache(8 * 1024); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + break; + case 3: + // both block cache and compressed cache, but DB is not compressed + // also, make block cache sizes bigger, to trigger block cache hits + table_options.block_cache = NewLRUCache(1024 * 1024); + table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.compression = kNoCompression; + break; + default: + ASSERT_TRUE(false); + } + CreateAndReopenWithCF({"pikachu"}, options); + // default column family doesn't have block cache + Options no_block_cache_opts; + no_block_cache_opts.statistics = options.statistics; + no_block_cache_opts = CurrentOptions(no_block_cache_opts); + BlockBasedTableOptions table_options_no_bc; + table_options_no_bc.no_block_cache = true; + no_block_cache_opts.table_factory.reset( + NewBlockBasedTableFactory(table_options_no_bc)); + ReopenWithColumnFamilies( + {"default", "pikachu"}, + std::vector({no_block_cache_opts, options})); + + Random rnd(301); + + // Write 8MB (80 values, each 100K) + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + std::vector values; + std::string str; + for (int i = 0; i < num_iter; i++) { + if (i % 4 == 0) { // high compression ratio + str = RandomString(&rnd, 1000); + } + values.push_back(str); + ASSERT_OK(Put(1, Key(i), values[i])); + } + + // flush all data from memtable so that reads are from block cache + ASSERT_OK(Flush(1)); + + for (int i = 0; i < num_iter; i++) { + ASSERT_EQ(Get(1, Key(i)), values[i]); + } + + // check that we triggered the appropriate code paths in the cache + switch (iter) { + case 0: + // only uncompressed block cache + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0); + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0); + break; + case 1: + // no block cache, only compressed cache + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0); + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0); + break; + case 2: + // both compressed and uncompressed block cache + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0); + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0); + break; + case 3: + // both compressed and uncompressed block cache + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_MISS), 0); + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_HIT), 0); + ASSERT_GT(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_MISS), 0); + // compressed doesn't have any hits since blocks are not compressed on + // storage + ASSERT_EQ(TestGetTickerCount(options, BLOCK_CACHE_COMPRESSED_HIT), 0); + break; + default: + ASSERT_TRUE(false); + } + + options.create_if_missing = true; + DestroyAndReopen(options); + } +} + +#endif // ROCKSDB_LITE + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_bloom_filter_test.cc b/external/rocksdb/db/db_bloom_filter_test.cc new file mode 100755 index 0000000000..07f2409ab3 --- /dev/null +++ b/external/rocksdb/db/db_bloom_filter_test.cc @@ -0,0 +1,1028 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#include "rocksdb/perf_context.h" + +namespace rocksdb { + +// DB tests related to bloom filter. + +class DBBloomFilterTest : public DBTestBase { + public: + DBBloomFilterTest() : DBTestBase("/db_bloom_filter_test") {} +}; + +class DBBloomFilterTestWithParam : public DBTestBase, + public testing::WithParamInterface { + protected: + bool use_block_based_filter_; + + public: + DBBloomFilterTestWithParam() : DBTestBase("/db_bloom_filter_tests") {} + + ~DBBloomFilterTestWithParam() {} + + void SetUp() override { use_block_based_filter_ = GetParam(); } +}; + +// KeyMayExist can lead to a few false positives, but not false negatives. +// To make test deterministic, use a much larger number of bits per key-20 than +// bits in the key, so that false positives are eliminated +TEST_P(DBBloomFilterTestWithParam, KeyMayExist) { + do { + ReadOptions ropts; + std::string value; + anon::OptionsOverride options_override; + options_override.filter_policy.reset( + NewBloomFilterPolicy(20, use_block_based_filter_)); + Options options = CurrentOptions(options_override); + options.statistics = rocksdb::CreateDBStatistics(); + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "a", &value)); + + ASSERT_OK(Put(1, "a", "b")); + bool value_found = false; + ASSERT_TRUE( + db_->KeyMayExist(ropts, handles_[1], "a", &value, &value_found)); + ASSERT_TRUE(value_found); + ASSERT_EQ("b", value); + + ASSERT_OK(Flush(1)); + value.clear(); + + uint64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS); + uint64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); + ASSERT_TRUE( + db_->KeyMayExist(ropts, handles_[1], "a", &value, &value_found)); + ASSERT_TRUE(!value_found); + // assert that no new files were opened and no new blocks were + // read into block cache. + ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); + ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); + + ASSERT_OK(Delete(1, "a")); + + numopen = TestGetTickerCount(options, NO_FILE_OPENS); + cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); + ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "a", &value)); + ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); + ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); + + ASSERT_OK(Flush(1)); + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1], + true /* disallow trivial move */); + + numopen = TestGetTickerCount(options, NO_FILE_OPENS); + cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); + ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "a", &value)); + ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); + ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); + + ASSERT_OK(Delete(1, "c")); + + numopen = TestGetTickerCount(options, NO_FILE_OPENS); + cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); + ASSERT_TRUE(!db_->KeyMayExist(ropts, handles_[1], "c", &value)); + ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); + ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); + + // KeyMayExist function only checks data in block caches, which is not used + // by plain table format. + } while ( + ChangeOptions(kSkipPlainTable | kSkipHashIndex | kSkipFIFOCompaction)); +} + +TEST_F(DBBloomFilterTest, GetFilterByPrefixBloom) { + Options options = last_options_; + options.prefix_extractor.reset(NewFixedPrefixTransform(8)); + options.statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions bbto; + bbto.filter_policy.reset(NewBloomFilterPolicy(10, false)); + bbto.whole_key_filtering = false; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + DestroyAndReopen(options); + + WriteOptions wo; + ReadOptions ro; + FlushOptions fo; + fo.wait = true; + std::string value; + + ASSERT_OK(dbfull()->Put(wo, "barbarbar", "foo")); + ASSERT_OK(dbfull()->Put(wo, "barbarbar2", "foo2")); + ASSERT_OK(dbfull()->Put(wo, "foofoofoo", "bar")); + + dbfull()->Flush(fo); + + ASSERT_EQ("foo", Get("barbarbar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); + ASSERT_EQ("foo2", Get("barbarbar2")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); + ASSERT_EQ("NOT_FOUND", Get("barbarbar3")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); + + ASSERT_EQ("NOT_FOUND", Get("barfoofoo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + + ASSERT_EQ("NOT_FOUND", Get("foobarbar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 2); + + ro.total_order_seek = true; + ASSERT_TRUE(db_->Get(ro, "foobarbar", &value).IsNotFound()); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 2); +} + +TEST_F(DBBloomFilterTest, WholeKeyFilterProp) { + Options options = last_options_; + options.prefix_extractor.reset(NewFixedPrefixTransform(3)); + options.statistics = rocksdb::CreateDBStatistics(); + + BlockBasedTableOptions bbto; + bbto.filter_policy.reset(NewBloomFilterPolicy(10, false)); + bbto.whole_key_filtering = false; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + DestroyAndReopen(options); + + WriteOptions wo; + ReadOptions ro; + FlushOptions fo; + fo.wait = true; + std::string value; + + ASSERT_OK(dbfull()->Put(wo, "foobar", "foo")); + // Needs insert some keys to make sure files are not filtered out by key + // ranges. + ASSERT_OK(dbfull()->Put(wo, "aaa", "")); + ASSERT_OK(dbfull()->Put(wo, "zzz", "")); + dbfull()->Flush(fo); + + Reopen(options); + ASSERT_EQ("NOT_FOUND", Get("foo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); + ASSERT_EQ("NOT_FOUND", Get("bar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + ASSERT_EQ("foo", Get("foobar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + + // Reopen with whole key filtering enabled and prefix extractor + // NULL. Bloom filter should be off for both of whole key and + // prefix bloom. + bbto.whole_key_filtering = true; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + options.prefix_extractor.reset(); + Reopen(options); + + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + ASSERT_EQ("NOT_FOUND", Get("foo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + ASSERT_EQ("NOT_FOUND", Get("bar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + ASSERT_EQ("foo", Get("foobar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + // Write DB with only full key filtering. + ASSERT_OK(dbfull()->Put(wo, "foobar", "foo")); + // Needs insert some keys to make sure files are not filtered out by key + // ranges. + ASSERT_OK(dbfull()->Put(wo, "aaa", "")); + ASSERT_OK(dbfull()->Put(wo, "zzz", "")); + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + + // Reopen with both of whole key off and prefix extractor enabled. + // Still no bloom filter should be used. + options.prefix_extractor.reset(NewFixedPrefixTransform(3)); + bbto.whole_key_filtering = false; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + Reopen(options); + + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + ASSERT_EQ("NOT_FOUND", Get("foo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + ASSERT_EQ("NOT_FOUND", Get("bar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + ASSERT_EQ("foo", Get("foobar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + + // Try to create a DB with mixed files: + ASSERT_OK(dbfull()->Put(wo, "foobar", "foo")); + // Needs insert some keys to make sure files are not filtered out by key + // ranges. + ASSERT_OK(dbfull()->Put(wo, "aaa", "")); + ASSERT_OK(dbfull()->Put(wo, "zzz", "")); + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + + options.prefix_extractor.reset(); + bbto.whole_key_filtering = true; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + Reopen(options); + + // Try to create a DB with mixed files. + ASSERT_OK(dbfull()->Put(wo, "barfoo", "bar")); + // In this case needs insert some keys to make sure files are + // not filtered out by key ranges. + ASSERT_OK(dbfull()->Put(wo, "aaa", "")); + ASSERT_OK(dbfull()->Put(wo, "zzz", "")); + Flush(); + + // Now we have two files: + // File 1: An older file with prefix bloom. + // File 2: A newer file with whole bloom filter. + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 1); + ASSERT_EQ("NOT_FOUND", Get("foo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 2); + ASSERT_EQ("NOT_FOUND", Get("bar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 3); + ASSERT_EQ("foo", Get("foobar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 4); + ASSERT_EQ("bar", Get("barfoo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 4); + + // Reopen with the same setting: only whole key is used + Reopen(options); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 4); + ASSERT_EQ("NOT_FOUND", Get("foo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 5); + ASSERT_EQ("NOT_FOUND", Get("bar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 6); + ASSERT_EQ("foo", Get("foobar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 7); + ASSERT_EQ("bar", Get("barfoo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 7); + + // Restart with both filters are allowed + options.prefix_extractor.reset(NewFixedPrefixTransform(3)); + bbto.whole_key_filtering = true; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + Reopen(options); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 7); + // File 1 will has it filtered out. + // File 2 will not, as prefix `foo` exists in the file. + ASSERT_EQ("NOT_FOUND", Get("foo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 8); + ASSERT_EQ("NOT_FOUND", Get("bar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 10); + ASSERT_EQ("foo", Get("foobar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 11); + ASSERT_EQ("bar", Get("barfoo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 11); + + // Restart with only prefix bloom is allowed. + options.prefix_extractor.reset(NewFixedPrefixTransform(3)); + bbto.whole_key_filtering = false; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + Reopen(options); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 11); + ASSERT_EQ("NOT_FOUND", Get("foo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 11); + ASSERT_EQ("NOT_FOUND", Get("bar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 12); + ASSERT_EQ("foo", Get("foobar")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 12); + ASSERT_EQ("bar", Get("barfoo")); + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 12); +} + +TEST_P(DBBloomFilterTestWithParam, BloomFilter) { + do { + Options options = CurrentOptions(); + env_->count_random_reads_ = true; + options.env = env_; + // ChangeCompactOptions() only changes compaction style, which does not + // trigger reset of table_factory + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + table_options.filter_policy.reset( + NewBloomFilterPolicy(10, use_block_based_filter_)); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + CreateAndReopenWithCF({"pikachu"}, options); + + // Populate multiple layers + const int N = 10000; + for (int i = 0; i < N; i++) { + ASSERT_OK(Put(1, Key(i), Key(i))); + } + Compact(1, "a", "z"); + for (int i = 0; i < N; i += 100) { + ASSERT_OK(Put(1, Key(i), Key(i))); + } + Flush(1); + + // Prevent auto compactions triggered by seeks + env_->delay_sstable_sync_.store(true, std::memory_order_release); + + // Lookup present keys. Should rarely read from small sstable. + env_->random_read_counter_.Reset(); + for (int i = 0; i < N; i++) { + ASSERT_EQ(Key(i), Get(1, Key(i))); + } + int reads = env_->random_read_counter_.Read(); + fprintf(stderr, "%d present => %d reads\n", N, reads); + ASSERT_GE(reads, N); + ASSERT_LE(reads, N + 2 * N / 100); + + // Lookup present keys. Should rarely read from either sstable. + env_->random_read_counter_.Reset(); + for (int i = 0; i < N; i++) { + ASSERT_EQ("NOT_FOUND", Get(1, Key(i) + ".missing")); + } + reads = env_->random_read_counter_.Read(); + fprintf(stderr, "%d missing => %d reads\n", N, reads); + ASSERT_LE(reads, 3 * N / 100); + + env_->delay_sstable_sync_.store(false, std::memory_order_release); + Close(); + } while (ChangeCompactOptions()); +} + +INSTANTIATE_TEST_CASE_P(DBBloomFilterTestWithParam, DBBloomFilterTestWithParam, + ::testing::Bool()); + +TEST_F(DBBloomFilterTest, BloomFilterRate) { + while (ChangeFilterOptions()) { + Options options = CurrentOptions(); + options.statistics = rocksdb::CreateDBStatistics(); + CreateAndReopenWithCF({"pikachu"}, options); + + const int maxKey = 10000; + for (int i = 0; i < maxKey; i++) { + ASSERT_OK(Put(1, Key(i), Key(i))); + } + // Add a large key to make the file contain wide range + ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555))); + Flush(1); + + // Check if they can be found + for (int i = 0; i < maxKey; i++) { + ASSERT_EQ(Key(i), Get(1, Key(i))); + } + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); + + // Check if filter is useful + for (int i = 0; i < maxKey; i++) { + ASSERT_EQ("NOT_FOUND", Get(1, Key(i + 33333))); + } + ASSERT_GE(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), maxKey * 0.98); + } +} + +TEST_F(DBBloomFilterTest, BloomFilterCompatibility) { + Options options = CurrentOptions(); + options.statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions table_options; + table_options.filter_policy.reset(NewBloomFilterPolicy(10, true)); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + // Create with block based filter + CreateAndReopenWithCF({"pikachu"}, options); + + const int maxKey = 10000; + for (int i = 0; i < maxKey; i++) { + ASSERT_OK(Put(1, Key(i), Key(i))); + } + ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555))); + Flush(1); + + // Check db with full filter + table_options.filter_policy.reset(NewBloomFilterPolicy(10, false)); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + // Check if they can be found + for (int i = 0; i < maxKey; i++) { + ASSERT_EQ(Key(i), Get(1, Key(i))); + } + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); +} + +TEST_F(DBBloomFilterTest, BloomFilterReverseCompatibility) { + Options options = CurrentOptions(); + options.statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions table_options; + table_options.filter_policy.reset(NewBloomFilterPolicy(10, false)); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + // Create with full filter + CreateAndReopenWithCF({"pikachu"}, options); + + const int maxKey = 10000; + for (int i = 0; i < maxKey; i++) { + ASSERT_OK(Put(1, Key(i), Key(i))); + } + ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555))); + Flush(1); + + // Check db with block_based filter + table_options.filter_policy.reset(NewBloomFilterPolicy(10, true)); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + // Check if they can be found + for (int i = 0; i < maxKey; i++) { + ASSERT_EQ(Key(i), Get(1, Key(i))); + } + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); +} + +namespace { +// A wrapped bloom over default FilterPolicy +class WrappedBloom : public FilterPolicy { + public: + explicit WrappedBloom(int bits_per_key) + : filter_(NewBloomFilterPolicy(bits_per_key)), counter_(0) {} + + ~WrappedBloom() { delete filter_; } + + const char* Name() const override { return "WrappedRocksDbFilterPolicy"; } + + void CreateFilter(const rocksdb::Slice* keys, int n, + std::string* dst) const override { + std::unique_ptr user_keys(new rocksdb::Slice[n]); + for (int i = 0; i < n; ++i) { + user_keys[i] = convertKey(keys[i]); + } + return filter_->CreateFilter(user_keys.get(), n, dst); + } + + bool KeyMayMatch(const rocksdb::Slice& key, + const rocksdb::Slice& filter) const override { + counter_++; + return filter_->KeyMayMatch(convertKey(key), filter); + } + + uint32_t GetCounter() { return counter_; } + + private: + const FilterPolicy* filter_; + mutable uint32_t counter_; + + rocksdb::Slice convertKey(const rocksdb::Slice& key) const { return key; } +}; +} // namespace + +TEST_F(DBBloomFilterTest, BloomFilterWrapper) { + Options options = CurrentOptions(); + options.statistics = rocksdb::CreateDBStatistics(); + + BlockBasedTableOptions table_options; + WrappedBloom* policy = new WrappedBloom(10); + table_options.filter_policy.reset(policy); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + CreateAndReopenWithCF({"pikachu"}, options); + + const int maxKey = 10000; + for (int i = 0; i < maxKey; i++) { + ASSERT_OK(Put(1, Key(i), Key(i))); + } + // Add a large key to make the file contain wide range + ASSERT_OK(Put(1, Key(maxKey + 55555), Key(maxKey + 55555))); + ASSERT_EQ(0U, policy->GetCounter()); + Flush(1); + + // Check if they can be found + for (int i = 0; i < maxKey; i++) { + ASSERT_EQ(Key(i), Get(1, Key(i))); + } + ASSERT_EQ(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); + ASSERT_EQ(1U * maxKey, policy->GetCounter()); + + // Check if filter is useful + for (int i = 0; i < maxKey; i++) { + ASSERT_EQ("NOT_FOUND", Get(1, Key(i + 33333))); + } + ASSERT_GE(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), maxKey * 0.98); + ASSERT_EQ(2U * maxKey, policy->GetCounter()); +} + +class SliceTransformLimitedDomain : public SliceTransform { + const char* Name() const override { return "SliceTransformLimitedDomain"; } + + Slice Transform(const Slice& src) const override { + return Slice(src.data(), 5); + } + + bool InDomain(const Slice& src) const override { + // prefix will be x???? + return src.size() >= 5 && src[0] == 'x'; + } + + bool InRange(const Slice& dst) const override { + // prefix will be x???? + return dst.size() == 5 && dst[0] == 'x'; + } +}; + +TEST_F(DBBloomFilterTest, PrefixExtractorFullFilter) { + BlockBasedTableOptions bbto; + // Full Filter Block + bbto.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, false)); + bbto.whole_key_filtering = false; + + Options options = CurrentOptions(); + options.prefix_extractor = std::make_shared(); + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + + DestroyAndReopen(options); + + ASSERT_OK(Put("x1111_AAAA", "val1")); + ASSERT_OK(Put("x1112_AAAA", "val2")); + ASSERT_OK(Put("x1113_AAAA", "val3")); + ASSERT_OK(Put("x1114_AAAA", "val4")); + // Not in domain, wont be added to filter + ASSERT_OK(Put("zzzzz_AAAA", "val5")); + + ASSERT_OK(Flush()); + + ASSERT_EQ(Get("x1111_AAAA"), "val1"); + ASSERT_EQ(Get("x1112_AAAA"), "val2"); + ASSERT_EQ(Get("x1113_AAAA"), "val3"); + ASSERT_EQ(Get("x1114_AAAA"), "val4"); + // Was not added to filter but rocksdb will try to read it from the filter + ASSERT_EQ(Get("zzzzz_AAAA"), "val5"); +} + +TEST_F(DBBloomFilterTest, PrefixExtractorBlockFilter) { + BlockBasedTableOptions bbto; + // Block Filter Block + bbto.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10, true)); + + Options options = CurrentOptions(); + options.prefix_extractor = std::make_shared(); + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + + DestroyAndReopen(options); + + ASSERT_OK(Put("x1113_AAAA", "val3")); + ASSERT_OK(Put("x1114_AAAA", "val4")); + // Not in domain, wont be added to filter + ASSERT_OK(Put("zzzzz_AAAA", "val1")); + ASSERT_OK(Put("zzzzz_AAAB", "val2")); + ASSERT_OK(Put("zzzzz_AAAC", "val3")); + ASSERT_OK(Put("zzzzz_AAAD", "val4")); + + ASSERT_OK(Flush()); + + std::vector iter_res; + auto iter = db_->NewIterator(ReadOptions()); + // Seek to a key that was not in Domain + for (iter->Seek("zzzzz_AAAA"); iter->Valid(); iter->Next()) { + iter_res.emplace_back(iter->value().ToString()); + } + + std::vector expected_res = {"val1", "val2", "val3", "val4"}; + ASSERT_EQ(iter_res, expected_res); + delete iter; +} + +#ifndef ROCKSDB_LITE +class BloomStatsTestWithParam + : public DBBloomFilterTest, + public testing::WithParamInterface> { + public: + BloomStatsTestWithParam() { + use_block_table_ = std::get<0>(GetParam()); + use_block_based_builder_ = std::get<1>(GetParam()); + + options_.create_if_missing = true; + options_.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(4)); + options_.memtable_prefix_bloom_size_ratio = + 8.0 * 1024.0 / static_cast(options_.write_buffer_size); + if (use_block_table_) { + BlockBasedTableOptions table_options; + table_options.hash_index_allow_collision = false; + table_options.filter_policy.reset( + NewBloomFilterPolicy(10, use_block_based_builder_)); + options_.table_factory.reset(NewBlockBasedTableFactory(table_options)); + } else { + PlainTableOptions table_options; + options_.table_factory.reset(NewPlainTableFactory(table_options)); + } + + perf_context.Reset(); + DestroyAndReopen(options_); + } + + ~BloomStatsTestWithParam() { + perf_context.Reset(); + Destroy(options_); + } + + // Required if inheriting from testing::WithParamInterface<> + static void SetUpTestCase() {} + static void TearDownTestCase() {} + + bool use_block_table_; + bool use_block_based_builder_; + Options options_; +}; + +// 1 Insert 2 K-V pairs into DB +// 2 Call Get() for both keys - expext memtable bloom hit stat to be 2 +// 3 Call Get() for nonexisting key - expect memtable bloom miss stat to be 1 +// 4 Call Flush() to create SST +// 5 Call Get() for both keys - expext SST bloom hit stat to be 2 +// 6 Call Get() for nonexisting key - expect SST bloom miss stat to be 1 +// Test both: block and plain SST +TEST_P(BloomStatsTestWithParam, BloomStatsTest) { + std::string key1("AAAA"); + std::string key2("RXDB"); // not in DB + std::string key3("ZBRA"); + std::string value1("Value1"); + std::string value3("Value3"); + + ASSERT_OK(Put(key1, value1, WriteOptions())); + ASSERT_OK(Put(key3, value3, WriteOptions())); + + // check memtable bloom stats + ASSERT_EQ(value1, Get(key1)); + ASSERT_EQ(1, perf_context.bloom_memtable_hit_count); + ASSERT_EQ(value3, Get(key3)); + ASSERT_EQ(2, perf_context.bloom_memtable_hit_count); + ASSERT_EQ(0, perf_context.bloom_memtable_miss_count); + + ASSERT_EQ("NOT_FOUND", Get(key2)); + ASSERT_EQ(1, perf_context.bloom_memtable_miss_count); + ASSERT_EQ(2, perf_context.bloom_memtable_hit_count); + + // sanity checks + ASSERT_EQ(0, perf_context.bloom_sst_hit_count); + ASSERT_EQ(0, perf_context.bloom_sst_miss_count); + + Flush(); + + // sanity checks + ASSERT_EQ(0, perf_context.bloom_sst_hit_count); + ASSERT_EQ(0, perf_context.bloom_sst_miss_count); + + // check SST bloom stats + ASSERT_EQ(value1, Get(key1)); + ASSERT_EQ(1, perf_context.bloom_sst_hit_count); + ASSERT_EQ(value3, Get(key3)); + ASSERT_EQ(2, perf_context.bloom_sst_hit_count); + + ASSERT_EQ("NOT_FOUND", Get(key2)); + ASSERT_EQ(1, perf_context.bloom_sst_miss_count); +} + +// Same scenario as in BloomStatsTest but using an iterator +TEST_P(BloomStatsTestWithParam, BloomStatsTestWithIter) { + std::string key1("AAAA"); + std::string key2("RXDB"); // not in DB + std::string key3("ZBRA"); + std::string value1("Value1"); + std::string value3("Value3"); + + ASSERT_OK(Put(key1, value1, WriteOptions())); + ASSERT_OK(Put(key3, value3, WriteOptions())); + + unique_ptr iter(dbfull()->NewIterator(ReadOptions())); + + // check memtable bloom stats + iter->Seek(key1); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(value1, iter->value().ToString()); + ASSERT_EQ(1, perf_context.bloom_memtable_hit_count); + ASSERT_EQ(0, perf_context.bloom_memtable_miss_count); + + iter->Seek(key3); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(value3, iter->value().ToString()); + ASSERT_EQ(2, perf_context.bloom_memtable_hit_count); + ASSERT_EQ(0, perf_context.bloom_memtable_miss_count); + + iter->Seek(key2); + ASSERT_OK(iter->status()); + ASSERT_TRUE(!iter->Valid()); + ASSERT_EQ(1, perf_context.bloom_memtable_miss_count); + ASSERT_EQ(2, perf_context.bloom_memtable_hit_count); + + Flush(); + + iter.reset(dbfull()->NewIterator(ReadOptions())); + + // Check SST bloom stats + iter->Seek(key1); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(value1, iter->value().ToString()); + ASSERT_EQ(1, perf_context.bloom_sst_hit_count); + + iter->Seek(key3); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(value3, iter->value().ToString()); + ASSERT_EQ(2, perf_context.bloom_sst_hit_count); + + iter->Seek(key2); + ASSERT_OK(iter->status()); + ASSERT_TRUE(!iter->Valid()); + ASSERT_EQ(1, perf_context.bloom_sst_miss_count); + ASSERT_EQ(2, perf_context.bloom_sst_hit_count); +} + +INSTANTIATE_TEST_CASE_P(BloomStatsTestWithParam, BloomStatsTestWithParam, + ::testing::Values(std::make_tuple(true, true), + std::make_tuple(true, false), + std::make_tuple(false, false))); + +namespace { +void PrefixScanInit(DBBloomFilterTest* dbtest) { + char buf[100]; + std::string keystr; + const int small_range_sstfiles = 5; + const int big_range_sstfiles = 5; + + // Generate 11 sst files with the following prefix ranges. + // GROUP 0: [0,10] (level 1) + // GROUP 1: [1,2], [2,3], [3,4], [4,5], [5, 6] (level 0) + // GROUP 2: [0,6], [0,7], [0,8], [0,9], [0,10] (level 0) + // + // A seek with the previous API would do 11 random I/Os (to all the + // files). With the new API and a prefix filter enabled, we should + // only do 2 random I/O, to the 2 files containing the key. + + // GROUP 0 + snprintf(buf, sizeof(buf), "%02d______:start", 0); + keystr = std::string(buf); + ASSERT_OK(dbtest->Put(keystr, keystr)); + snprintf(buf, sizeof(buf), "%02d______:end", 10); + keystr = std::string(buf); + ASSERT_OK(dbtest->Put(keystr, keystr)); + dbtest->Flush(); + dbtest->dbfull()->CompactRange(CompactRangeOptions(), nullptr, + nullptr); // move to level 1 + + // GROUP 1 + for (int i = 1; i <= small_range_sstfiles; i++) { + snprintf(buf, sizeof(buf), "%02d______:start", i); + keystr = std::string(buf); + ASSERT_OK(dbtest->Put(keystr, keystr)); + snprintf(buf, sizeof(buf), "%02d______:end", i + 1); + keystr = std::string(buf); + ASSERT_OK(dbtest->Put(keystr, keystr)); + dbtest->Flush(); + } + + // GROUP 2 + for (int i = 1; i <= big_range_sstfiles; i++) { + snprintf(buf, sizeof(buf), "%02d______:start", 0); + keystr = std::string(buf); + ASSERT_OK(dbtest->Put(keystr, keystr)); + snprintf(buf, sizeof(buf), "%02d______:end", small_range_sstfiles + i + 1); + keystr = std::string(buf); + ASSERT_OK(dbtest->Put(keystr, keystr)); + dbtest->Flush(); + } +} +} // namespace + +TEST_F(DBBloomFilterTest, PrefixScan) { + XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, + kSkipNoPrefix); + while (ChangeFilterOptions()) { + int count; + Slice prefix; + Slice key; + char buf[100]; + Iterator* iter; + snprintf(buf, sizeof(buf), "03______:"); + prefix = Slice(buf, 8); + key = Slice(buf, 9); + ASSERT_EQ(key.difference_offset(prefix), 8); + ASSERT_EQ(prefix.difference_offset(key), 8); + // db configs + env_->count_random_reads_ = true; + Options options = CurrentOptions(); + options.env = env_; + options.prefix_extractor.reset(NewFixedPrefixTransform(8)); + options.disable_auto_compactions = true; + options.max_background_compactions = 2; + options.create_if_missing = true; + options.memtable_factory.reset(NewHashSkipListRepFactory(16)); + + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + table_options.filter_policy.reset(NewBloomFilterPolicy(10)); + table_options.whole_key_filtering = false; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + // 11 RAND I/Os + DestroyAndReopen(options); + PrefixScanInit(this); + count = 0; + env_->random_read_counter_.Reset(); + iter = db_->NewIterator(ReadOptions()); + for (iter->Seek(prefix); iter->Valid(); iter->Next()) { + if (!iter->key().starts_with(prefix)) { + break; + } + count++; + } + ASSERT_OK(iter->status()); + delete iter; + ASSERT_EQ(count, 2); + ASSERT_EQ(env_->random_read_counter_.Read(), 2); + Close(); + } // end of while + XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, 0); +} + +TEST_F(DBBloomFilterTest, OptimizeFiltersForHits) { + Options options = CurrentOptions(); + options.write_buffer_size = 64 * 1024; + options.arena_block_size = 4 * 1024; + options.target_file_size_base = 64 * 1024; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 2; + options.level0_stop_writes_trigger = 4; + options.max_bytes_for_level_base = 256 * 1024; + options.max_write_buffer_number = 2; + options.max_background_compactions = 8; + options.max_background_flushes = 8; + options.compression = kNoCompression; + options.compaction_style = kCompactionStyleLevel; + options.level_compaction_dynamic_level_bytes = true; + BlockBasedTableOptions bbto; + bbto.cache_index_and_filter_blocks = true; + bbto.filter_policy.reset(NewBloomFilterPolicy(10, true)); + bbto.whole_key_filtering = true; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + options.optimize_filters_for_hits = true; + options.statistics = rocksdb::CreateDBStatistics(); + CreateAndReopenWithCF({"mypikachu"}, options); + + int numkeys = 200000; + + // Generate randomly shuffled keys, so the updates are almost + // random. + std::vector keys; + keys.reserve(numkeys); + for (int i = 0; i < numkeys; i += 2) { + keys.push_back(i); + } + std::random_shuffle(std::begin(keys), std::end(keys)); + + int num_inserted = 0; + for (int key : keys) { + ASSERT_OK(Put(1, Key(key), "val")); + if (++num_inserted % 1000 == 0) { + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } + } + ASSERT_OK(Put(1, Key(0), "val")); + ASSERT_OK(Put(1, Key(numkeys), "val")); + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + + if (NumTableFilesAtLevel(0, 1) == 0) { + // No Level 0 file. Create one. + ASSERT_OK(Put(1, Key(0), "val")); + ASSERT_OK(Put(1, Key(numkeys), "val")); + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + } + + for (int i = 1; i < numkeys; i += 2) { + ASSERT_EQ(Get(1, Key(i)), "NOT_FOUND"); + } + + ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L0)); + ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L1)); + ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L2_AND_UP)); + + // Now we have three sorted run, L0, L5 and L6 with most files in L6 have + // no bloom filter. Most keys be checked bloom filters twice. + ASSERT_GT(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 65000 * 2); + ASSERT_LT(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 120000 * 2); + + for (int i = 0; i < numkeys; i += 2) { + ASSERT_EQ(Get(1, Key(i)), "val"); + } + + // Part 2 (read path): rewrite last level with blooms, then verify they get + // cached only if !optimize_filters_for_hits + options.disable_auto_compactions = true; + options.num_levels = 9; + options.optimize_filters_for_hits = false; + options.statistics = CreateDBStatistics(); + bbto.block_cache.reset(); + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + + ReopenWithColumnFamilies({"default", "mypikachu"}, options); + MoveFilesToLevel(7 /* level */, 1 /* column family index */); + + std::string value = Get(1, Key(0)); + uint64_t prev_cache_filter_hits = + TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); + value = Get(1, Key(0)); + ASSERT_EQ(prev_cache_filter_hits + 1, + TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + + // Now that we know the filter blocks exist in the last level files, see if + // filter caching is skipped for this optimization + options.optimize_filters_for_hits = true; + options.statistics = CreateDBStatistics(); + bbto.block_cache.reset(); + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + + ReopenWithColumnFamilies({"default", "mypikachu"}, options); + + value = Get(1, Key(0)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(2 /* index and data block */, + TestGetTickerCount(options, BLOCK_CACHE_ADD)); + + // Check filter block ignored for files preloaded during DB::Open() + options.max_open_files = -1; + options.statistics = CreateDBStatistics(); + bbto.block_cache.reset(); + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + + ReopenWithColumnFamilies({"default", "mypikachu"}, options); + + uint64_t prev_cache_filter_misses = + TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS); + prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); + Get(1, Key(0)); + ASSERT_EQ(prev_cache_filter_misses, + TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(prev_cache_filter_hits, + TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + + // Check filter block ignored for file trivially-moved to bottom level + bbto.block_cache.reset(); + options.max_open_files = 100; // setting > -1 makes it not preload all files + options.statistics = CreateDBStatistics(); + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + + ReopenWithColumnFamilies({"default", "mypikachu"}, options); + + ASSERT_OK(Put(1, Key(numkeys + 1), "val")); + ASSERT_OK(Flush(1)); + + int32_t trivial_move = 0; + int32_t non_trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", + [&](void* arg) { non_trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + CompactRangeOptions compact_options; + compact_options.bottommost_level_compaction = + BottommostLevelCompaction::kSkip; + compact_options.change_level = true; + compact_options.target_level = 7; + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr); + + ASSERT_EQ(trivial_move, 1); + ASSERT_EQ(non_trivial_move, 0); + + prev_cache_filter_hits = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); + prev_cache_filter_misses = + TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS); + value = Get(1, Key(numkeys + 1)); + ASSERT_EQ(prev_cache_filter_hits, + TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(prev_cache_filter_misses, + TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + + // Check filter block not cached for iterator + bbto.block_cache.reset(); + options.statistics = CreateDBStatistics(); + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + + ReopenWithColumnFamilies({"default", "mypikachu"}, options); + + std::unique_ptr iter(db_->NewIterator(ReadOptions(), handles_[1])); + iter->SeekToFirst(); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(2 /* index and data block */, + TestGetTickerCount(options, BLOCK_CACHE_ADD)); +} + +#endif // ROCKSDB_LITE + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_compaction_filter_test.cc b/external/rocksdb/db/db_compaction_filter_test.cc new file mode 100755 index 0000000000..ff6945cf83 --- /dev/null +++ b/external/rocksdb/db/db_compaction_filter_test.cc @@ -0,0 +1,721 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_test_util.h" +#include "port/stack_trace.h" + +namespace rocksdb { + +static int cfilter_count = 0; + +// This is a static filter used for filtering +// kvs during the compaction process. +static std::string NEW_VALUE = "NewValue"; + +class DBTestCompactionFilter : public DBTestBase { + public: + DBTestCompactionFilter() : DBTestBase("/db_compaction_filter_test") {} +}; + +class KeepFilter : public CompactionFilter { + public: + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, bool* value_changed) const + override { + cfilter_count++; + return false; + } + + virtual const char* Name() const override { return "KeepFilter"; } +}; + +class DeleteFilter : public CompactionFilter { + public: + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, bool* value_changed) const + override { + cfilter_count++; + return true; + } + + virtual const char* Name() const override { return "DeleteFilter"; } +}; + +class DeleteISFilter : public CompactionFilter { + public: + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { + cfilter_count++; + int i = std::stoi(key.ToString()); + if (i > 5 && i <= 105) { + return true; + } + return false; + } + + virtual bool IgnoreSnapshots() const override { return true; } + + virtual const char* Name() const override { return "DeleteFilter"; } +}; + +class DelayFilter : public CompactionFilter { + public: + explicit DelayFilter(DBTestBase* d) : db_test(d) {} + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { + db_test->env_->addon_time_.fetch_add(1000); + return true; + } + + virtual const char* Name() const override { return "DelayFilter"; } + + private: + DBTestBase* db_test; +}; + +class ConditionalFilter : public CompactionFilter { + public: + explicit ConditionalFilter(const std::string* filtered_value) + : filtered_value_(filtered_value) {} + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { + return value.ToString() == *filtered_value_; + } + + virtual const char* Name() const override { return "ConditionalFilter"; } + + private: + const std::string* filtered_value_; +}; + +class ChangeFilter : public CompactionFilter { + public: + explicit ChangeFilter() {} + + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, bool* value_changed) const + override { + assert(new_value != nullptr); + *new_value = NEW_VALUE; + *value_changed = true; + return false; + } + + virtual const char* Name() const override { return "ChangeFilter"; } +}; + +class KeepFilterFactory : public CompactionFilterFactory { + public: + explicit KeepFilterFactory(bool check_context = false, + bool check_context_cf_id = false) + : check_context_(check_context), + check_context_cf_id_(check_context_cf_id), + compaction_filter_created_(false) {} + + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + if (check_context_) { + EXPECT_EQ(expect_full_compaction_.load(), context.is_full_compaction); + EXPECT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction); + } + if (check_context_cf_id_) { + EXPECT_EQ(expect_cf_id_.load(), context.column_family_id); + } + compaction_filter_created_ = true; + return std::unique_ptr(new KeepFilter()); + } + + bool compaction_filter_created() const { return compaction_filter_created_; } + + virtual const char* Name() const override { return "KeepFilterFactory"; } + bool check_context_; + bool check_context_cf_id_; + std::atomic_bool expect_full_compaction_; + std::atomic_bool expect_manual_compaction_; + std::atomic expect_cf_id_; + bool compaction_filter_created_; +}; + +class DeleteFilterFactory : public CompactionFilterFactory { + public: + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + if (context.is_manual_compaction) { + return std::unique_ptr(new DeleteFilter()); + } else { + return std::unique_ptr(nullptr); + } + } + + virtual const char* Name() const override { return "DeleteFilterFactory"; } +}; + +// Delete Filter Factory which ignores snapshots +class DeleteISFilterFactory : public CompactionFilterFactory { + public: + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + if (context.is_manual_compaction) { + return std::unique_ptr(new DeleteISFilter()); + } else { + return std::unique_ptr(nullptr); + } + } + + virtual const char* Name() const override { return "DeleteFilterFactory"; } +}; + +class DelayFilterFactory : public CompactionFilterFactory { + public: + explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + return std::unique_ptr(new DelayFilter(db_test)); + } + + virtual const char* Name() const override { return "DelayFilterFactory"; } + + private: + DBTestBase* db_test; +}; + +class ConditionalFilterFactory : public CompactionFilterFactory { + public: + explicit ConditionalFilterFactory(const Slice& filtered_value) + : filtered_value_(filtered_value.ToString()) {} + + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + return std::unique_ptr( + new ConditionalFilter(&filtered_value_)); + } + + virtual const char* Name() const override { + return "ConditionalFilterFactory"; + } + + private: + std::string filtered_value_; +}; + +class ChangeFilterFactory : public CompactionFilterFactory { + public: + explicit ChangeFilterFactory() {} + + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + return std::unique_ptr(new ChangeFilter()); + } + + virtual const char* Name() const override { return "ChangeFilterFactory"; } +}; + +#ifndef ROCKSDB_LITE +TEST_F(DBTestCompactionFilter, CompactionFilter) { + Options options = CurrentOptions(); + options.max_open_files = -1; + options.num_levels = 3; + options.compaction_filter_factory = std::make_shared(); + options = CurrentOptions(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // Write 100K keys, these are written to a few files in L0. + const std::string value(10, 'x'); + for (int i = 0; i < 100000; i++) { + char key[100]; + snprintf(key, sizeof(key), "B%010d", i); + Put(1, key, value); + } + ASSERT_OK(Flush(1)); + + // Push all files to the highest level L2. Verify that + // the compaction is each level invokes the filter for + // all the keys in that level. + cfilter_count = 0; + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + ASSERT_EQ(cfilter_count, 100000); + cfilter_count = 0; + dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + ASSERT_EQ(cfilter_count, 100000); + + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); + ASSERT_NE(NumTableFilesAtLevel(2, 1), 0); + cfilter_count = 0; + + // All the files are in the lowest level. + // Verify that all but the 100001st record + // has sequence number zero. The 100001st record + // is at the tip of this snapshot and cannot + // be zeroed out. + int count = 0; + int total = 0; + Arena arena; + { + ScopedArenaIterator iter( + dbfull()->NewInternalIterator(&arena, handles_[1])); + iter->SeekToFirst(); + ASSERT_OK(iter->status()); + while (iter->Valid()) { + ParsedInternalKey ikey(Slice(), 0, kTypeValue); + ikey.sequence = -1; + ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true); + total++; + if (ikey.sequence != 0) { + count++; + } + iter->Next(); + } + } + ASSERT_EQ(total, 100000); + ASSERT_EQ(count, 1); + + // overwrite all the 100K keys once again. + for (int i = 0; i < 100000; i++) { + char key[100]; + snprintf(key, sizeof(key), "B%010d", i); + ASSERT_OK(Put(1, key, value)); + } + ASSERT_OK(Flush(1)); + + // push all files to the highest level L2. This + // means that all keys should pass at least once + // via the compaction filter + cfilter_count = 0; + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + ASSERT_EQ(cfilter_count, 100000); + cfilter_count = 0; + dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + ASSERT_EQ(cfilter_count, 100000); + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); + ASSERT_NE(NumTableFilesAtLevel(2, 1), 0); + + // create a new database with the compaction + // filter in such a way that it deletes all keys + options.compaction_filter_factory = std::make_shared(); + options.create_if_missing = true; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // write all the keys once again. + for (int i = 0; i < 100000; i++) { + char key[100]; + snprintf(key, sizeof(key), "B%010d", i); + ASSERT_OK(Put(1, key, value)); + } + ASSERT_OK(Flush(1)); + ASSERT_NE(NumTableFilesAtLevel(0, 1), 0); + ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); + ASSERT_EQ(NumTableFilesAtLevel(2, 1), 0); + + // Push all files to the highest level L2. This + // triggers the compaction filter to delete all keys, + // verify that at the end of the compaction process, + // nothing is left. + cfilter_count = 0; + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + ASSERT_EQ(cfilter_count, 100000); + cfilter_count = 0; + dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + ASSERT_EQ(cfilter_count, 0); + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); + + { + // Scan the entire database to ensure that nothing is left + std::unique_ptr iter( + db_->NewIterator(ReadOptions(), handles_[1])); + iter->SeekToFirst(); + count = 0; + while (iter->Valid()) { + count++; + iter->Next(); + } + ASSERT_EQ(count, 0); + } + + // The sequence number of the remaining record + // is not zeroed out even though it is at the + // level Lmax because this record is at the tip + count = 0; + { + ScopedArenaIterator iter( + dbfull()->NewInternalIterator(&arena, handles_[1])); + iter->SeekToFirst(); + ASSERT_OK(iter->status()); + while (iter->Valid()) { + ParsedInternalKey ikey(Slice(), 0, kTypeValue); + ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true); + ASSERT_NE(ikey.sequence, (unsigned)0); + count++; + iter->Next(); + } + ASSERT_EQ(count, 0); + } +} + +// Tests the edge case where compaction does not produce any output -- all +// entries are deleted. The compaction should create bunch of 'DeleteFile' +// entries in VersionEdit, but none of the 'AddFile's. +TEST_F(DBTestCompactionFilter, CompactionFilterDeletesAll) { + Options options = CurrentOptions(); + options.compaction_filter_factory = std::make_shared(); + options.disable_auto_compactions = true; + options.create_if_missing = true; + DestroyAndReopen(options); + + // put some data + for (int table = 0; table < 4; ++table) { + for (int i = 0; i < 10 + table; ++i) { + Put(ToString(table * 100 + i), "val"); + } + Flush(); + } + + // this will produce empty file (delete compaction filter) + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ(0U, CountLiveFiles()); + + Reopen(options); + + Iterator* itr = db_->NewIterator(ReadOptions()); + itr->SeekToFirst(); + // empty db + ASSERT_TRUE(!itr->Valid()); + + delete itr; +} +#endif // ROCKSDB_LITE + +TEST_F(DBTestCompactionFilter, CompactionFilterWithValueChange) { + do { + Options options = CurrentOptions(); + options.num_levels = 3; + options.compaction_filter_factory = + std::make_shared(); + CreateAndReopenWithCF({"pikachu"}, options); + + // Write 100K+1 keys, these are written to a few files + // in L0. We do this so that the current snapshot points + // to the 100001 key.The compaction filter is not invoked + // on keys that are visible via a snapshot because we + // anyways cannot delete it. + const std::string value(10, 'x'); + for (int i = 0; i < 100001; i++) { + char key[100]; + snprintf(key, sizeof(key), "B%010d", i); + Put(1, key, value); + } + + // push all files to lower levels + ASSERT_OK(Flush(1)); + if (option_config_ != kUniversalCompactionMultiLevel && + option_config_ != kUniversalSubcompactions) { + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + } else { + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + } + + // re-write all data again + for (int i = 0; i < 100001; i++) { + char key[100]; + snprintf(key, sizeof(key), "B%010d", i); + Put(1, key, value); + } + + // push all files to lower levels. This should + // invoke the compaction filter for all 100000 keys. + ASSERT_OK(Flush(1)); + if (option_config_ != kUniversalCompactionMultiLevel && + option_config_ != kUniversalSubcompactions) { + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + } else { + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + } + + // verify that all keys now have the new value that + // was set by the compaction process. + for (int i = 0; i < 100001; i++) { + char key[100]; + snprintf(key, sizeof(key), "B%010d", i); + std::string newvalue = Get(1, key); + ASSERT_EQ(newvalue.compare(NEW_VALUE), 0); + } + } while (ChangeCompactOptions()); +} + +TEST_F(DBTestCompactionFilter, CompactionFilterWithMergeOperator) { + std::string one, two, three, four; + PutFixed64(&one, 1); + PutFixed64(&two, 2); + PutFixed64(&three, 3); + PutFixed64(&four, 4); + + Options options = CurrentOptions(); + options.create_if_missing = true; + options.merge_operator = MergeOperators::CreateUInt64AddOperator(); + options.num_levels = 3; + // Filter out keys with value is 2. + options.compaction_filter_factory = + std::make_shared(two); + DestroyAndReopen(options); + + // In the same compaction, a value type needs to be deleted based on + // compaction filter, and there is a merge type for the key. compaction + // filter result is ignored. + ASSERT_OK(db_->Put(WriteOptions(), "foo", two)); + ASSERT_OK(Flush()); + ASSERT_OK(db_->Merge(WriteOptions(), "foo", one)); + ASSERT_OK(Flush()); + std::string newvalue = Get("foo"); + ASSERT_EQ(newvalue, three); + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + newvalue = Get("foo"); + ASSERT_EQ(newvalue, three); + + // value key can be deleted based on compaction filter, leaving only + // merge keys. + ASSERT_OK(db_->Put(WriteOptions(), "bar", two)); + ASSERT_OK(Flush()); + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + newvalue = Get("bar"); + ASSERT_EQ("NOT_FOUND", newvalue); + ASSERT_OK(db_->Merge(WriteOptions(), "bar", two)); + ASSERT_OK(Flush()); + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + newvalue = Get("bar"); + ASSERT_EQ(two, two); + + // Compaction filter never applies to merge keys. + ASSERT_OK(db_->Put(WriteOptions(), "foobar", one)); + ASSERT_OK(Flush()); + ASSERT_OK(db_->Merge(WriteOptions(), "foobar", two)); + ASSERT_OK(Flush()); + newvalue = Get("foobar"); + ASSERT_EQ(newvalue, three); + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + newvalue = Get("foobar"); + ASSERT_EQ(newvalue, three); + + // In the same compaction, both of value type and merge type keys need to be + // deleted based on compaction filter, and there is a merge type for the key. + // For both keys, compaction filter results are ignored. + ASSERT_OK(db_->Put(WriteOptions(), "barfoo", two)); + ASSERT_OK(Flush()); + ASSERT_OK(db_->Merge(WriteOptions(), "barfoo", two)); + ASSERT_OK(Flush()); + newvalue = Get("barfoo"); + ASSERT_EQ(newvalue, four); + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + newvalue = Get("barfoo"); + ASSERT_EQ(newvalue, four); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTestCompactionFilter, CompactionFilterContextManual) { + KeepFilterFactory* filter = new KeepFilterFactory(true, true); + + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.compaction_filter_factory.reset(filter); + options.compression = kNoCompression; + options.level0_file_num_compaction_trigger = 8; + Reopen(options); + int num_keys_per_file = 400; + for (int j = 0; j < 3; j++) { + // Write several keys. + const std::string value(10, 'x'); + for (int i = 0; i < num_keys_per_file; i++) { + char key[100]; + snprintf(key, sizeof(key), "B%08d%02d", i, j); + Put(key, value); + } + dbfull()->TEST_FlushMemTable(); + // Make sure next file is much smaller so automatic compaction will not + // be triggered. + num_keys_per_file /= 2; + } + dbfull()->TEST_WaitForCompact(); + + // Force a manual compaction + cfilter_count = 0; + filter->expect_manual_compaction_.store(true); + filter->expect_full_compaction_.store(true); + filter->expect_cf_id_.store(0); + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ(cfilter_count, 700); + ASSERT_EQ(NumSortedRuns(0), 1); + ASSERT_TRUE(filter->compaction_filter_created()); + + // Verify total number of keys is correct after manual compaction. + { + int count = 0; + int total = 0; + Arena arena; + ScopedArenaIterator iter(dbfull()->NewInternalIterator(&arena)); + iter->SeekToFirst(); + ASSERT_OK(iter->status()); + while (iter->Valid()) { + ParsedInternalKey ikey(Slice(), 0, kTypeValue); + ikey.sequence = -1; + ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true); + total++; + if (ikey.sequence != 0) { + count++; + } + iter->Next(); + } + ASSERT_EQ(total, 700); + ASSERT_EQ(count, 2); + } +} +#endif // ROCKSDB_LITE + +TEST_F(DBTestCompactionFilter, CompactionFilterContextCfId) { + KeepFilterFactory* filter = new KeepFilterFactory(false, true); + filter->expect_cf_id_.store(1); + + Options options = CurrentOptions(); + options.compaction_filter_factory.reset(filter); + options.compression = kNoCompression; + options.level0_file_num_compaction_trigger = 2; + CreateAndReopenWithCF({"pikachu"}, options); + + int num_keys_per_file = 400; + for (int j = 0; j < 3; j++) { + // Write several keys. + const std::string value(10, 'x'); + for (int i = 0; i < num_keys_per_file; i++) { + char key[100]; + snprintf(key, sizeof(key), "B%08d%02d", i, j); + Put(1, key, value); + } + Flush(1); + // Make sure next file is much smaller so automatic compaction will not + // be triggered. + num_keys_per_file /= 2; + } + dbfull()->TEST_WaitForCompact(); + + ASSERT_TRUE(filter->compaction_filter_created()); +} + +#ifndef ROCKSDB_LITE +// Compaction filters should only be applied to records that are newer than the +// latest snapshot. This test inserts records and applies a delete filter. +TEST_F(DBTestCompactionFilter, CompactionFilterSnapshot) { + Options options = CurrentOptions(); + options.compaction_filter_factory = std::make_shared(); + options.disable_auto_compactions = true; + options.create_if_missing = true; + DestroyAndReopen(options); + + // Put some data. + const Snapshot* snapshot = nullptr; + for (int table = 0; table < 4; ++table) { + for (int i = 0; i < 10; ++i) { + Put(ToString(table * 100 + i), "val"); + } + Flush(); + + if (table == 0) { + snapshot = db_->GetSnapshot(); + } + } + assert(snapshot != nullptr); + + cfilter_count = 0; + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + // The filter should delete 10 records. + ASSERT_EQ(30U, cfilter_count); + + // Release the snapshot and compact again -> now all records should be + // removed. + db_->ReleaseSnapshot(snapshot); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ(0U, CountLiveFiles()); +} + +// Compaction filters should only be applied to records that are newer than the +// latest snapshot. However, if the compaction filter asks to ignore snapshots +// records newer than the snapshot will also be processed +TEST_F(DBTestCompactionFilter, CompactionFilterIgnoreSnapshot) { + std::string five = ToString(5); + Options options = CurrentOptions(); + options.compaction_filter_factory = std::make_shared(); + options.disable_auto_compactions = true; + options.create_if_missing = true; + DestroyAndReopen(options); + + // Put some data. + const Snapshot* snapshot = nullptr; + for (int table = 0; table < 4; ++table) { + for (int i = 0; i < 10; ++i) { + Put(ToString(table * 100 + i), "val"); + } + Flush(); + + if (table == 0) { + snapshot = db_->GetSnapshot(); + } + } + assert(snapshot != nullptr); + + cfilter_count = 0; + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + // The filter should delete 40 records. + ASSERT_EQ(40U, cfilter_count); + + { + // Scan the entire database as of the snapshot to ensure + // that nothing is left + ReadOptions read_options; + read_options.snapshot = snapshot; + std::unique_ptr iter(db_->NewIterator(read_options)); + iter->SeekToFirst(); + int count = 0; + while (iter->Valid()) { + count++; + iter->Next(); + } + ASSERT_EQ(count, 6); + read_options.snapshot = 0; + std::unique_ptr iter1(db_->NewIterator(read_options)); + iter1->SeekToFirst(); + count = 0; + while (iter1->Valid()) { + count++; + iter1->Next(); + } + // We have deleted 10 keys from 40 using the compaction filter + // Keys 6-9 before the snapshot and 100-105 after the snapshot + ASSERT_EQ(count, 30); + } + + // Release the snapshot and compact again -> now all records should be + // removed. + db_->ReleaseSnapshot(snapshot); +} +#endif // ROCKSDB_LITE + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_compaction_test.cc b/external/rocksdb/db/db_compaction_test.cc new file mode 100755 index 0000000000..6f7d78c061 --- /dev/null +++ b/external/rocksdb/db/db_compaction_test.cc @@ -0,0 +1,2557 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#include "rocksdb/experimental.h" +#include "rocksdb/utilities/convenience.h" +#include "util/sync_point.h" +namespace rocksdb { + +// SYNC_POINT is not supported in released Windows mode. +#if !defined(ROCKSDB_LITE) + +class DBCompactionTest : public DBTestBase { + public: + DBCompactionTest() : DBTestBase("/db_compaction_test") {} +}; + +class DBCompactionTestWithParam + : public DBTestBase, + public testing::WithParamInterface> { + public: + DBCompactionTestWithParam() : DBTestBase("/db_compaction_test") { + max_subcompactions_ = std::get<0>(GetParam()); + exclusive_manual_compaction_ = std::get<1>(GetParam()); + } + + // Required if inheriting from testing::WithParamInterface<> + static void SetUpTestCase() {} + static void TearDownTestCase() {} + + uint32_t max_subcompactions_; + bool exclusive_manual_compaction_; +}; + +namespace { + +class FlushedFileCollector : public EventListener { + public: + FlushedFileCollector() {} + ~FlushedFileCollector() {} + + virtual void OnFlushCompleted(DB* db, const FlushJobInfo& info) override { + std::lock_guard lock(mutex_); + flushed_files_.push_back(info.file_path); + } + + std::vector GetFlushedFiles() { + std::lock_guard lock(mutex_); + std::vector result; + for (auto fname : flushed_files_) { + result.push_back(fname); + } + return result; + } + + void ClearFlushedFiles() { flushed_files_.clear(); } + + private: + std::vector flushed_files_; + std::mutex mutex_; +}; + +static const int kCDTValueSize = 1000; +static const int kCDTKeysPerBuffer = 4; +static const int kCDTNumLevels = 8; +Options DeletionTriggerOptions(Options options) { + options.compression = kNoCompression; + options.write_buffer_size = kCDTKeysPerBuffer * (kCDTValueSize + 24); + options.min_write_buffer_number_to_merge = 1; + options.max_write_buffer_number_to_maintain = 0; + options.num_levels = kCDTNumLevels; + options.level0_file_num_compaction_trigger = 1; + options.target_file_size_base = options.write_buffer_size * 2; + options.target_file_size_multiplier = 2; + options.max_bytes_for_level_base = + options.target_file_size_base * options.target_file_size_multiplier; + options.max_bytes_for_level_multiplier = 2; + options.disable_auto_compactions = false; + return options; +} + +bool HaveOverlappingKeyRanges( + const Comparator* c, + const SstFileMetaData& a, const SstFileMetaData& b) { + if (c->Compare(a.smallestkey, b.smallestkey) >= 0) { + if (c->Compare(a.smallestkey, b.largestkey) <= 0) { + // b.smallestkey <= a.smallestkey <= b.largestkey + return true; + } + } else if (c->Compare(a.largestkey, b.smallestkey) >= 0) { + // a.smallestkey < b.smallestkey <= a.largestkey + return true; + } + if (c->Compare(a.largestkey, b.largestkey) <= 0) { + if (c->Compare(a.largestkey, b.smallestkey) >= 0) { + // b.smallestkey <= a.largestkey <= b.largestkey + return true; + } + } else if (c->Compare(a.smallestkey, b.largestkey) <= 0) { + // a.smallestkey <= b.largestkey < a.largestkey + return true; + } + return false; +} + +// Identifies all files between level "min_level" and "max_level" +// which has overlapping key range with "input_file_meta". +void GetOverlappingFileNumbersForLevelCompaction( + const ColumnFamilyMetaData& cf_meta, + const Comparator* comparator, + int min_level, int max_level, + const SstFileMetaData* input_file_meta, + std::set* overlapping_file_names) { + std::set overlapping_files; + overlapping_files.insert(input_file_meta); + for (int m = min_level; m <= max_level; ++m) { + for (auto& file : cf_meta.levels[m].files) { + for (auto* included_file : overlapping_files) { + if (HaveOverlappingKeyRanges( + comparator, *included_file, file)) { + overlapping_files.insert(&file); + overlapping_file_names->insert(file.name); + break; + } + } + } + } +} + +void VerifyCompactionResult( + const ColumnFamilyMetaData& cf_meta, + const std::set& overlapping_file_numbers) { +#ifndef NDEBUG + for (auto& level : cf_meta.levels) { + for (auto& file : level.files) { + assert(overlapping_file_numbers.find(file.name) == + overlapping_file_numbers.end()); + } + } +#endif +} + +const SstFileMetaData* PickFileRandomly( + const ColumnFamilyMetaData& cf_meta, + Random* rand, + int* level = nullptr) { + auto file_id = rand->Uniform(static_cast( + cf_meta.file_count)) + 1; + for (auto& level_meta : cf_meta.levels) { + if (file_id <= level_meta.files.size()) { + if (level != nullptr) { + *level = level_meta.level; + } + auto result = rand->Uniform(file_id); + return &(level_meta.files[result]); + } + file_id -= static_cast(level_meta.files.size()); + } + assert(false); + return nullptr; +} +} // anonymous namespace + +// All the TEST_P tests run once with sub_compactions disabled (i.e. +// options.max_subcompactions = 1) and once with it enabled +TEST_P(DBCompactionTestWithParam, CompactionDeletionTrigger) { + for (int tid = 0; tid < 3; ++tid) { + uint64_t db_size[2]; + Options options = DeletionTriggerOptions(CurrentOptions()); + options.max_subcompactions = max_subcompactions_; + + if (tid == 1) { + // the following only disable stats update in DB::Open() + // and should not affect the result of this test. + options.skip_stats_update_on_db_open = true; + } else if (tid == 2) { + // third pass with universal compaction + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = 1; + } + + DestroyAndReopen(options); + Random rnd(301); + + const int kTestSize = kCDTKeysPerBuffer * 1024; + std::vector values; + for (int k = 0; k < kTestSize; ++k) { + values.push_back(RandomString(&rnd, kCDTValueSize)); + ASSERT_OK(Put(Key(k), values[k])); + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + db_size[0] = Size(Key(0), Key(kTestSize - 1)); + + for (int k = 0; k < kTestSize; ++k) { + ASSERT_OK(Delete(Key(k))); + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + db_size[1] = Size(Key(0), Key(kTestSize - 1)); + + // must have much smaller db size. + ASSERT_GT(db_size[0] / 3, db_size[1]); + } +} + +TEST_F(DBCompactionTest, SkipStatsUpdateTest) { + // This test verify UpdateAccumulatedStats is not on + // if options.skip_stats_update_on_db_open = true + // The test will need to be updated if the internal behavior changes. + + Options options = DeletionTriggerOptions(CurrentOptions()); + options.env = env_; + DestroyAndReopen(options); + Random rnd(301); + + const int kTestSize = kCDTKeysPerBuffer * 512; + std::vector values; + for (int k = 0; k < kTestSize; ++k) { + values.push_back(RandomString(&rnd, kCDTValueSize)); + ASSERT_OK(Put(Key(k), values[k])); + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + + // Reopen the DB with stats-update disabled + options.skip_stats_update_on_db_open = true; + env_->random_file_open_counter_.store(0); + Reopen(options); + + // As stats-update is disabled, we expect a very low number of + // random file open. + // Note that this number must be changed accordingly if we change + // the number of files needed to be opened in the DB::Open process. + const int kMaxFileOpenCount = 10; + ASSERT_LT(env_->random_file_open_counter_.load(), kMaxFileOpenCount); + + // Repeat the reopen process, but this time we enable + // stats-update. + options.skip_stats_update_on_db_open = false; + env_->random_file_open_counter_.store(0); + Reopen(options); + + // Since we do a normal stats update on db-open, there + // will be more random open files. + ASSERT_GT(env_->random_file_open_counter_.load(), kMaxFileOpenCount); +} + +TEST_F(DBCompactionTest, TestTableReaderForCompaction) { + Options options = CurrentOptions(); + options.env = env_; + options.new_table_reader_for_compaction_inputs = true; + options.max_open_files = 100; + options.level0_file_num_compaction_trigger = 3; + DestroyAndReopen(options); + Random rnd(301); + + int num_table_cache_lookup = 0; + int num_new_table_reader = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "TableCache::FindTable:0", [&](void* arg) { + assert(arg != nullptr); + bool no_io = *(reinterpret_cast(arg)); + if (!no_io) { + // filter out cases for table properties queries. + num_table_cache_lookup++; + } + }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "TableCache::GetTableReader:0", + [&](void* arg) { num_new_table_reader++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + for (int k = 0; k < options.level0_file_num_compaction_trigger; ++k) { + ASSERT_OK(Put(Key(k), Key(k))); + ASSERT_OK(Put(Key(10 - k), "bar")); + if (k < options.level0_file_num_compaction_trigger - 1) { + num_table_cache_lookup = 0; + Flush(); + dbfull()->TEST_WaitForCompact(); + // preloading iterator issues one table cache lookup and create + // a new table reader. + ASSERT_EQ(num_table_cache_lookup, 1); + ASSERT_EQ(num_new_table_reader, 1); + + num_table_cache_lookup = 0; + num_new_table_reader = 0; + ASSERT_EQ(Key(k), Get(Key(k))); + // lookup iterator from table cache and no need to create a new one. + ASSERT_EQ(num_table_cache_lookup, 1); + ASSERT_EQ(num_new_table_reader, 0); + } + } + + num_table_cache_lookup = 0; + num_new_table_reader = 0; + Flush(); + dbfull()->TEST_WaitForCompact(); + // Preloading iterator issues one table cache lookup and creates + // a new table reader. One file is created for flush and one for compaction. + // Compaction inputs make no table cache look-up. + ASSERT_EQ(num_table_cache_lookup, 2); + // Create new iterator for: + // (1) 1 for verifying flush results + // (2) 3 for compaction input files + // (3) 1 for verifying compaction results. + ASSERT_EQ(num_new_table_reader, 5); + + num_table_cache_lookup = 0; + num_new_table_reader = 0; + ASSERT_EQ(Key(1), Get(Key(1))); + ASSERT_EQ(num_table_cache_lookup, 1); + ASSERT_EQ(num_new_table_reader, 0); + + num_table_cache_lookup = 0; + num_new_table_reader = 0; + CompactRangeOptions cro; + cro.change_level = true; + cro.target_level = 2; + cro.bottommost_level_compaction = BottommostLevelCompaction::kForce; + db_->CompactRange(cro, nullptr, nullptr); + // Only verifying compaction outputs issues one table cache lookup. + ASSERT_EQ(num_table_cache_lookup, 1); + // One for compaction input, one for verifying compaction results. + ASSERT_EQ(num_new_table_reader, 2); + + num_table_cache_lookup = 0; + num_new_table_reader = 0; + ASSERT_EQ(Key(1), Get(Key(1))); + ASSERT_EQ(num_table_cache_lookup, 1); + ASSERT_EQ(num_new_table_reader, 0); + + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); +} + +TEST_P(DBCompactionTestWithParam, CompactionDeletionTriggerReopen) { + for (int tid = 0; tid < 2; ++tid) { + uint64_t db_size[3]; + Options options = DeletionTriggerOptions(CurrentOptions()); + options.max_subcompactions = max_subcompactions_; + + if (tid == 1) { + // second pass with universal compaction + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = 1; + } + + DestroyAndReopen(options); + Random rnd(301); + + // round 1 --- insert key/value pairs. + const int kTestSize = kCDTKeysPerBuffer * 512; + std::vector values; + for (int k = 0; k < kTestSize; ++k) { + values.push_back(RandomString(&rnd, kCDTValueSize)); + ASSERT_OK(Put(Key(k), values[k])); + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + db_size[0] = Size(Key(0), Key(kTestSize - 1)); + Close(); + + // round 2 --- disable auto-compactions and issue deletions. + options.create_if_missing = false; + options.disable_auto_compactions = true; + Reopen(options); + + for (int k = 0; k < kTestSize; ++k) { + ASSERT_OK(Delete(Key(k))); + } + db_size[1] = Size(Key(0), Key(kTestSize - 1)); + Close(); + // as auto_compaction is off, we shouldn't see too much reduce + // in db size. + ASSERT_LT(db_size[0] / 3, db_size[1]); + + // round 3 --- reopen db with auto_compaction on and see if + // deletion compensation still work. + options.disable_auto_compactions = false; + Reopen(options); + // insert relatively small amount of data to trigger auto compaction. + for (int k = 0; k < kTestSize / 10; ++k) { + ASSERT_OK(Put(Key(k), values[k])); + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + db_size[2] = Size(Key(0), Key(kTestSize - 1)); + // this time we're expecting significant drop in size. + ASSERT_GT(db_size[0] / 3, db_size[2]); + } +} + +TEST_F(DBCompactionTest, DisableStatsUpdateReopen) { + uint64_t db_size[3]; + for (int test = 0; test < 2; ++test) { + Options options = DeletionTriggerOptions(CurrentOptions()); + options.skip_stats_update_on_db_open = (test == 0); + + env_->random_read_counter_.Reset(); + DestroyAndReopen(options); + Random rnd(301); + + // round 1 --- insert key/value pairs. + const int kTestSize = kCDTKeysPerBuffer * 512; + std::vector values; + for (int k = 0; k < kTestSize; ++k) { + values.push_back(RandomString(&rnd, kCDTValueSize)); + ASSERT_OK(Put(Key(k), values[k])); + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + db_size[0] = Size(Key(0), Key(kTestSize - 1)); + Close(); + + // round 2 --- disable auto-compactions and issue deletions. + options.create_if_missing = false; + options.disable_auto_compactions = true; + + env_->random_read_counter_.Reset(); + Reopen(options); + + for (int k = 0; k < kTestSize; ++k) { + ASSERT_OK(Delete(Key(k))); + } + db_size[1] = Size(Key(0), Key(kTestSize - 1)); + Close(); + // as auto_compaction is off, we shouldn't see too much reduce + // in db size. + ASSERT_LT(db_size[0] / 3, db_size[1]); + + // round 3 --- reopen db with auto_compaction on and see if + // deletion compensation still work. + options.disable_auto_compactions = false; + Reopen(options); + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + db_size[2] = Size(Key(0), Key(kTestSize - 1)); + + if (options.skip_stats_update_on_db_open) { + // If update stats on DB::Open is disable, we don't expect + // deletion entries taking effect. + ASSERT_LT(db_size[0] / 3, db_size[2]); + } else { + // Otherwise, we should see a significant drop in db size. + ASSERT_GT(db_size[0] / 3, db_size[2]); + } + } +} + + +TEST_P(DBCompactionTestWithParam, CompactionTrigger) { + const int kNumKeysPerFile = 100; + + Options options = CurrentOptions(); + options.write_buffer_size = 110 << 10; // 110KB + options.arena_block_size = 4 << 10; + options.num_levels = 3; + options.level0_file_num_compaction_trigger = 3; + options.max_subcompactions = max_subcompactions_; + options.memtable_factory.reset(new SpecialSkipListFactory(kNumKeysPerFile)); + CreateAndReopenWithCF({"pikachu"}, options); + + Random rnd(301); + + for (int num = 0; num < options.level0_file_num_compaction_trigger - 1; + num++) { + std::vector values; + // Write 100KB (100 values, each 1K) + for (int i = 0; i < kNumKeysPerFile; i++) { + values.push_back(RandomString(&rnd, 990)); + ASSERT_OK(Put(1, Key(i), values[i])); + } + // put extra key to trigger flush + ASSERT_OK(Put(1, "", "")); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + ASSERT_EQ(NumTableFilesAtLevel(0, 1), num + 1); + } + + // generate one more file in level-0, and should trigger level-0 compaction + std::vector values; + for (int i = 0; i < kNumKeysPerFile; i++) { + values.push_back(RandomString(&rnd, 990)); + ASSERT_OK(Put(1, Key(i), values[i])); + } + // put extra key to trigger flush + ASSERT_OK(Put(1, "", "")); + dbfull()->TEST_WaitForCompact(); + + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + ASSERT_EQ(NumTableFilesAtLevel(1, 1), 1); +} + +TEST_F(DBCompactionTest, BGCompactionsAllowed) { + // Create several column families. Make compaction triggers in all of them + // and see number of compactions scheduled to be less than allowed. + const int kNumKeysPerFile = 100; + + Options options = CurrentOptions(); + options.write_buffer_size = 110 << 10; // 110KB + options.arena_block_size = 4 << 10; + options.num_levels = 3; + // Should speed up compaction when there are 4 files. + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 20; + options.soft_pending_compaction_bytes_limit = 1 << 30; // Infinitely large + options.base_background_compactions = 1; + options.max_background_compactions = 3; + options.memtable_factory.reset(new SpecialSkipListFactory(kNumKeysPerFile)); + + // Block all threads in thread pool. + const size_t kTotalTasks = 4; + env_->SetBackgroundThreads(4, Env::LOW); + test::SleepingBackgroundTask sleeping_tasks[kTotalTasks]; + for (size_t i = 0; i < kTotalTasks; i++) { + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, + &sleeping_tasks[i], Env::Priority::LOW); + sleeping_tasks[i].WaitUntilSleeping(); + } + + CreateAndReopenWithCF({"one", "two", "three"}, options); + + Random rnd(301); + for (int cf = 0; cf < 4; cf++) { + for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) { + for (int i = 0; i < kNumKeysPerFile; i++) { + ASSERT_OK(Put(cf, Key(i), "")); + } + // put extra key to trigger flush + ASSERT_OK(Put(cf, "", "")); + dbfull()->TEST_WaitForFlushMemTable(handles_[cf]); + ASSERT_EQ(NumTableFilesAtLevel(0, cf), num + 1); + } + } + + // Now all column families qualify compaction but only one should be + // scheduled, because no column family hits speed up condition. + ASSERT_EQ(1, env_->GetThreadPoolQueueLen(Env::Priority::LOW)); + + // Create two more files for one column family, which triggers speed up + // condition, three compactions will be scheduled. + for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) { + for (int i = 0; i < kNumKeysPerFile; i++) { + ASSERT_OK(Put(2, Key(i), "")); + } + // put extra key to trigger flush + ASSERT_OK(Put(2, "", "")); + dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + ASSERT_EQ(options.level0_file_num_compaction_trigger + num + 1, + NumTableFilesAtLevel(0, 2)); + } + ASSERT_EQ(3, env_->GetThreadPoolQueueLen(Env::Priority::LOW)); + + // Unblock all threads to unblock all compactions. + for (size_t i = 0; i < kTotalTasks; i++) { + sleeping_tasks[i].WakeUp(); + sleeping_tasks[i].WaitUntilDone(); + } + dbfull()->TEST_WaitForCompact(); + + // Verify number of compactions allowed will come back to 1. + + for (size_t i = 0; i < kTotalTasks; i++) { + sleeping_tasks[i].Reset(); + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, + &sleeping_tasks[i], Env::Priority::LOW); + sleeping_tasks[i].WaitUntilSleeping(); + } + for (int cf = 0; cf < 4; cf++) { + for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) { + for (int i = 0; i < kNumKeysPerFile; i++) { + ASSERT_OK(Put(cf, Key(i), "")); + } + // put extra key to trigger flush + ASSERT_OK(Put(cf, "", "")); + dbfull()->TEST_WaitForFlushMemTable(handles_[cf]); + ASSERT_EQ(NumTableFilesAtLevel(0, cf), num + 1); + } + } + + // Now all column families qualify compaction but only one should be + // scheduled, because no column family hits speed up condition. + ASSERT_EQ(1, env_->GetThreadPoolQueueLen(Env::Priority::LOW)); + + for (size_t i = 0; i < kTotalTasks; i++) { + sleeping_tasks[i].WakeUp(); + sleeping_tasks[i].WaitUntilDone(); + } +} + +TEST_P(DBCompactionTestWithParam, CompactionsGenerateMultipleFiles) { + Options options = CurrentOptions(); + options.write_buffer_size = 100000000; // Large write buffer + options.max_subcompactions = max_subcompactions_; + CreateAndReopenWithCF({"pikachu"}, options); + + Random rnd(301); + + // Write 8MB (80 values, each 100K) + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + std::vector values; + for (int i = 0; i < 80; i++) { + values.push_back(RandomString(&rnd, 100000)); + ASSERT_OK(Put(1, Key(i), values[i])); + } + + // Reopening moves updates to level-0 + ReopenWithColumnFamilies({"default", "pikachu"}, options); + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1], + true /* disallow trivial move */); + + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + ASSERT_GT(NumTableFilesAtLevel(1, 1), 1); + for (int i = 0; i < 80; i++) { + ASSERT_EQ(Get(1, Key(i)), values[i]); + } +} + +TEST_F(DBCompactionTest, MinorCompactionsHappen) { + do { + Options options = CurrentOptions(); + options.write_buffer_size = 10000; + CreateAndReopenWithCF({"pikachu"}, options); + + const int N = 500; + + int starting_num_tables = TotalTableFiles(1); + for (int i = 0; i < N; i++) { + ASSERT_OK(Put(1, Key(i), Key(i) + std::string(1000, 'v'))); + } + int ending_num_tables = TotalTableFiles(1); + ASSERT_GT(ending_num_tables, starting_num_tables); + + for (int i = 0; i < N; i++) { + ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(1, Key(i))); + } + + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + for (int i = 0; i < N; i++) { + ASSERT_EQ(Key(i) + std::string(1000, 'v'), Get(1, Key(i))); + } + } while (ChangeCompactOptions()); +} + +TEST_F(DBCompactionTest, UserKeyCrossFile1) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleLevel; + options.level0_file_num_compaction_trigger = 3; + + DestroyAndReopen(options); + + // create first file and flush to l0 + Put("4", "A"); + Put("3", "A"); + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + + Put("2", "A"); + Delete("3"); + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ("NOT_FOUND", Get("3")); + + // move both files down to l1 + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ("NOT_FOUND", Get("3")); + + for (int i = 0; i < 3; i++) { + Put("2", "B"); + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + } + dbfull()->TEST_WaitForCompact(); + + ASSERT_EQ("NOT_FOUND", Get("3")); +} + +TEST_F(DBCompactionTest, UserKeyCrossFile2) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleLevel; + options.level0_file_num_compaction_trigger = 3; + + DestroyAndReopen(options); + + // create first file and flush to l0 + Put("4", "A"); + Put("3", "A"); + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + + Put("2", "A"); + SingleDelete("3"); + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ("NOT_FOUND", Get("3")); + + // move both files down to l1 + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ("NOT_FOUND", Get("3")); + + for (int i = 0; i < 3; i++) { + Put("2", "B"); + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + } + dbfull()->TEST_WaitForCompact(); + + ASSERT_EQ("NOT_FOUND", Get("3")); +} + +TEST_F(DBCompactionTest, ZeroSeqIdCompaction) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleLevel; + options.level0_file_num_compaction_trigger = 3; + + FlushedFileCollector* collector = new FlushedFileCollector(); + options.listeners.emplace_back(collector); + + // compaction options + CompactionOptions compact_opt; + compact_opt.compression = kNoCompression; + compact_opt.output_file_size_limit = 4096; + const size_t key_len = + static_cast(compact_opt.output_file_size_limit) / 5; + + DestroyAndReopen(options); + + std::vector snaps; + + // create first file and flush to l0 + for (auto& key : {"1", "2", "3", "3", "3", "3"}) { + Put(key, std::string(key_len, 'A')); + snaps.push_back(dbfull()->GetSnapshot()); + } + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + + // create second file and flush to l0 + for (auto& key : {"3", "4", "5", "6", "7", "8"}) { + Put(key, std::string(key_len, 'A')); + snaps.push_back(dbfull()->GetSnapshot()); + } + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + + // move both files down to l1 + dbfull()->CompactFiles(compact_opt, collector->GetFlushedFiles(), 1); + + // release snap so that first instance of key(3) can have seqId=0 + for (auto snap : snaps) { + dbfull()->ReleaseSnapshot(snap); + } + + // create 3 files in l0 so to trigger compaction + for (int i = 0; i < options.level0_file_num_compaction_trigger; i++) { + Put("2", std::string(1, 'A')); + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + } + + dbfull()->TEST_WaitForCompact(); + ASSERT_OK(Put("", "")); +} + +// Check that writes done during a memtable compaction are recovered +// if the database is shutdown during the memtable compaction. +TEST_F(DBCompactionTest, RecoverDuringMemtableCompaction) { + do { + Options options = CurrentOptions(); + options.env = env_; + CreateAndReopenWithCF({"pikachu"}, options); + + // Trigger a long memtable compaction and reopen the database during it + ASSERT_OK(Put(1, "foo", "v1")); // Goes to 1st log file + ASSERT_OK(Put(1, "big1", std::string(10000000, 'x'))); // Fills memtable + ASSERT_OK(Put(1, "big2", std::string(1000, 'y'))); // Triggers compaction + ASSERT_OK(Put(1, "bar", "v2")); // Goes to new log file + + ReopenWithColumnFamilies({"default", "pikachu"}, options); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("v2", Get(1, "bar")); + ASSERT_EQ(std::string(10000000, 'x'), Get(1, "big1")); + ASSERT_EQ(std::string(1000, 'y'), Get(1, "big2")); + } while (ChangeOptions()); +} + +TEST_P(DBCompactionTestWithParam, TrivialMoveOneFile) { + int32_t trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.write_buffer_size = 100000000; + options.max_subcompactions = max_subcompactions_; + DestroyAndReopen(options); + + int32_t num_keys = 80; + int32_t value_size = 100 * 1024; // 100 KB + + Random rnd(301); + std::vector values; + for (int i = 0; i < num_keys; i++) { + values.push_back(RandomString(&rnd, value_size)); + ASSERT_OK(Put(Key(i), values[i])); + } + + // Reopening moves updates to L0 + Reopen(options); + ASSERT_EQ(NumTableFilesAtLevel(0, 0), 1); // 1 file in L0 + ASSERT_EQ(NumTableFilesAtLevel(1, 0), 0); // 0 files in L1 + + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(metadata.size(), 1U); + LiveFileMetaData level0_file = metadata[0]; // L0 file meta + + CompactRangeOptions cro; + cro.exclusive_manual_compaction = exclusive_manual_compaction_; + + // Compaction will initiate a trivial move from L0 to L1 + dbfull()->CompactRange(cro, nullptr, nullptr); + + // File moved From L0 to L1 + ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0); // 0 files in L0 + ASSERT_EQ(NumTableFilesAtLevel(1, 0), 1); // 1 file in L1 + + metadata.clear(); + db_->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(metadata.size(), 1U); + ASSERT_EQ(metadata[0].name /* level1_file.name */, level0_file.name); + ASSERT_EQ(metadata[0].size /* level1_file.size */, level0_file.size); + + for (int i = 0; i < num_keys; i++) { + ASSERT_EQ(Get(Key(i)), values[i]); + } + + ASSERT_EQ(trivial_move, 1); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_P(DBCompactionTestWithParam, TrivialMoveNonOverlappingFiles) { + int32_t trivial_move = 0; + int32_t non_trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", + [&](void* arg) { non_trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.write_buffer_size = 10 * 1024 * 1024; + options.max_subcompactions = max_subcompactions_; + + DestroyAndReopen(options); + // non overlapping ranges + std::vector> ranges = { + {100, 199}, + {300, 399}, + {0, 99}, + {200, 299}, + {600, 699}, + {400, 499}, + {500, 550}, + {551, 599}, + }; + int32_t value_size = 10 * 1024; // 10 KB + + Random rnd(301); + std::map values; + for (size_t i = 0; i < ranges.size(); i++) { + for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) { + values[j] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(j), values[j])); + } + ASSERT_OK(Flush()); + } + + int32_t level0_files = NumTableFilesAtLevel(0, 0); + ASSERT_EQ(level0_files, ranges.size()); // Multiple files in L0 + ASSERT_EQ(NumTableFilesAtLevel(1, 0), 0); // No files in L1 + + CompactRangeOptions cro; + cro.exclusive_manual_compaction = exclusive_manual_compaction_; + + // Since data is non-overlapping we expect compaction to initiate + // a trivial move + db_->CompactRange(cro, nullptr, nullptr); + // We expect that all the files were trivially moved from L0 to L1 + ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0); + ASSERT_EQ(NumTableFilesAtLevel(1, 0) /* level1_files */, level0_files); + + for (size_t i = 0; i < ranges.size(); i++) { + for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) { + ASSERT_EQ(Get(Key(j)), values[j]); + } + } + + ASSERT_EQ(trivial_move, 1); + ASSERT_EQ(non_trivial_move, 0); + + trivial_move = 0; + non_trivial_move = 0; + values.clear(); + DestroyAndReopen(options); + // Same ranges as above but overlapping + ranges = { + {100, 199}, + {300, 399}, + {0, 99}, + {200, 299}, + {600, 699}, + {400, 499}, + {500, 560}, // this range overlap with the next one + {551, 599}, + }; + for (size_t i = 0; i < ranges.size(); i++) { + for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) { + values[j] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(j), values[j])); + } + ASSERT_OK(Flush()); + } + + db_->CompactRange(cro, nullptr, nullptr); + + for (size_t i = 0; i < ranges.size(); i++) { + for (int32_t j = ranges[i].first; j <= ranges[i].second; j++) { + ASSERT_EQ(Get(Key(j)), values[j]); + } + } + ASSERT_EQ(trivial_move, 0); + ASSERT_EQ(non_trivial_move, 1); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_P(DBCompactionTestWithParam, TrivialMoveTargetLevel) { + int32_t trivial_move = 0; + int32_t non_trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", + [&](void* arg) { non_trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.write_buffer_size = 10 * 1024 * 1024; + options.num_levels = 7; + options.max_subcompactions = max_subcompactions_; + + DestroyAndReopen(options); + int32_t value_size = 10 * 1024; // 10 KB + + // Add 2 non-overlapping files + Random rnd(301); + std::map values; + + // file 1 [0 => 300] + for (int32_t i = 0; i <= 300; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // file 2 [600 => 700] + for (int32_t i = 600; i <= 700; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // 2 files in L0 + ASSERT_EQ("2", FilesPerLevel(0)); + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 6; + compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; + ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); + // 2 files in L6 + ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel(0)); + + ASSERT_EQ(trivial_move, 1); + ASSERT_EQ(non_trivial_move, 0); + + for (int32_t i = 0; i <= 300; i++) { + ASSERT_EQ(Get(Key(i)), values[i]); + } + for (int32_t i = 600; i <= 700; i++) { + ASSERT_EQ(Get(Key(i)), values[i]); + } +} + +TEST_P(DBCompactionTestWithParam, ManualCompactionPartial) { + int32_t trivial_move = 0; + int32_t non_trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", + [&](void* arg) { non_trivial_move++; }); + bool first = true; + // Purpose of dependencies: + // 4 -> 1: ensure the order of two non-trivial compactions + // 5 -> 2 and 5 -> 3: ensure we do a check before two non-trivial compactions + // are installed + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"DBCompaction::ManualPartial:4", "DBCompaction::ManualPartial:1"}, + {"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:2"}, + {"DBCompaction::ManualPartial:5", "DBCompaction::ManualPartial:3"}}); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + if (first) { + first = false; + TEST_SYNC_POINT("DBCompaction::ManualPartial:4"); + TEST_SYNC_POINT("DBCompaction::ManualPartial:3"); + } else { // second non-trivial compaction + TEST_SYNC_POINT("DBCompaction::ManualPartial:2"); + } + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.write_buffer_size = 10 * 1024 * 1024; + options.num_levels = 7; + options.max_subcompactions = max_subcompactions_; + options.level0_file_num_compaction_trigger = 3; + options.max_background_compactions = 3; + options.target_file_size_base = 1 << 23; // 8 MB + + DestroyAndReopen(options); + int32_t value_size = 10 * 1024; // 10 KB + + // Add 2 non-overlapping files + Random rnd(301); + std::map values; + + // file 1 [0 => 100] + for (int32_t i = 0; i < 100; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // file 2 [100 => 300] + for (int32_t i = 100; i < 300; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // 2 files in L0 + ASSERT_EQ("2", FilesPerLevel(0)); + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 6; + compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; + // Trivial move the two non-overlapping files to level 6 + ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); + // 2 files in L6 + ASSERT_EQ("0,0,0,0,0,0,2", FilesPerLevel(0)); + + ASSERT_EQ(trivial_move, 1); + ASSERT_EQ(non_trivial_move, 0); + + // file 3 [ 0 => 200] + for (int32_t i = 0; i < 200; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // 1 files in L0 + ASSERT_EQ("1,0,0,0,0,0,2", FilesPerLevel(0)); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr, false)); + ASSERT_OK(dbfull()->TEST_CompactRange(1, nullptr, nullptr, nullptr, false)); + ASSERT_OK(dbfull()->TEST_CompactRange(2, nullptr, nullptr, nullptr, false)); + ASSERT_OK(dbfull()->TEST_CompactRange(3, nullptr, nullptr, nullptr, false)); + ASSERT_OK(dbfull()->TEST_CompactRange(4, nullptr, nullptr, nullptr, false)); + // 2 files in L6, 1 file in L5 + ASSERT_EQ("0,0,0,0,0,1,2", FilesPerLevel(0)); + + ASSERT_EQ(trivial_move, 6); + ASSERT_EQ(non_trivial_move, 0); + + std::thread threads([&] { + compact_options.change_level = false; + compact_options.exclusive_manual_compaction = false; + std::string begin_string = Key(0); + std::string end_string = Key(199); + Slice begin(begin_string); + Slice end(end_string); + // First non-trivial compaction is triggered + ASSERT_OK(db_->CompactRange(compact_options, &begin, &end)); + }); + + TEST_SYNC_POINT("DBCompaction::ManualPartial:1"); + // file 4 [300 => 400) + for (int32_t i = 300; i <= 400; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // file 5 [400 => 500) + for (int32_t i = 400; i <= 500; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // file 6 [500 => 600) + for (int32_t i = 500; i <= 600; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + // Second non-trivial compaction is triggered + ASSERT_OK(Flush()); + + // Before two non-trivial compactions are installed, there are 3 files in L0 + ASSERT_EQ("3,0,0,0,0,1,2", FilesPerLevel(0)); + TEST_SYNC_POINT("DBCompaction::ManualPartial:5"); + + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + // After two non-trivial compactions are installed, there is 1 file in L6, and + // 1 file in L1 + ASSERT_EQ("0,1,0,0,0,0,1", FilesPerLevel(0)); + threads.join(); + + for (int32_t i = 0; i < 600; i++) { + ASSERT_EQ(Get(Key(i)), values[i]); + } +} + +TEST_F(DBCompactionTest, ManualPartialFill) { + int32_t trivial_move = 0; + int32_t non_trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", + [&](void* arg) { non_trivial_move++; }); + bool first = true; + bool second = true; + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"DBCompaction::PartialFill:4", "DBCompaction::PartialFill:1"}, + {"DBCompaction::PartialFill:2", "DBCompaction::PartialFill:3"}}); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun", [&](void* arg) { + if (first) { + TEST_SYNC_POINT("DBCompaction::PartialFill:4"); + first = false; + TEST_SYNC_POINT("DBCompaction::PartialFill:3"); + } else if (second) { + } + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.write_buffer_size = 10 * 1024 * 1024; + options.max_bytes_for_level_multiplier = 2; + options.num_levels = 4; + options.level0_file_num_compaction_trigger = 3; + options.max_background_compactions = 3; + + DestroyAndReopen(options); + int32_t value_size = 10 * 1024; // 10 KB + + // Add 2 non-overlapping files + Random rnd(301); + std::map values; + + // file 1 [0 => 100] + for (int32_t i = 0; i < 100; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // file 2 [100 => 300] + for (int32_t i = 100; i < 300; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // 2 files in L0 + ASSERT_EQ("2", FilesPerLevel(0)); + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 2; + ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); + // 2 files in L2 + ASSERT_EQ("0,0,2", FilesPerLevel(0)); + + ASSERT_EQ(trivial_move, 1); + ASSERT_EQ(non_trivial_move, 0); + + // file 3 [ 0 => 200] + for (int32_t i = 0; i < 200; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // 2 files in L2, 1 in L0 + ASSERT_EQ("1,0,2", FilesPerLevel(0)); + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr, false)); + // 2 files in L2, 1 in L1 + ASSERT_EQ("0,1,2", FilesPerLevel(0)); + + ASSERT_EQ(trivial_move, 2); + ASSERT_EQ(non_trivial_move, 0); + + std::thread threads([&] { + compact_options.change_level = false; + compact_options.exclusive_manual_compaction = false; + std::string begin_string = Key(0); + std::string end_string = Key(199); + Slice begin(begin_string); + Slice end(end_string); + ASSERT_OK(db_->CompactRange(compact_options, &begin, &end)); + }); + + TEST_SYNC_POINT("DBCompaction::PartialFill:1"); + // Many files 4 [300 => 4300) + for (int32_t i = 0; i <= 5; i++) { + for (int32_t j = 300; j < 4300; j++) { + if (j == 2300) { + ASSERT_OK(Flush()); + dbfull()->TEST_WaitForFlushMemTable(); + } + values[j] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(j), values[j])); + } + } + + // Verify level sizes + uint64_t target_size = 4 * options.max_bytes_for_level_base; + for (int32_t i = 1; i < options.num_levels; i++) { + ASSERT_LE(SizeAtLevel(i), target_size); + target_size *= options.max_bytes_for_level_multiplier; + } + + TEST_SYNC_POINT("DBCompaction::PartialFill:2"); + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + threads.join(); + + for (int32_t i = 0; i < 4300; i++) { + ASSERT_EQ(Get(Key(i)), values[i]); + } +} + +TEST_F(DBCompactionTest, DeleteFileRange) { + Options options = CurrentOptions(); + options.write_buffer_size = 10 * 1024 * 1024; + options.max_bytes_for_level_multiplier = 2; + options.num_levels = 4; + options.level0_file_num_compaction_trigger = 3; + options.max_background_compactions = 3; + + DestroyAndReopen(options); + int32_t value_size = 10 * 1024; // 10 KB + + // Add 2 non-overlapping files + Random rnd(301); + std::map values; + + // file 1 [0 => 100] + for (int32_t i = 0; i < 100; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // file 2 [100 => 300] + for (int32_t i = 100; i < 300; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // 2 files in L0 + ASSERT_EQ("2", FilesPerLevel(0)); + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 2; + ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); + // 2 files in L2 + ASSERT_EQ("0,0,2", FilesPerLevel(0)); + + // file 3 [ 0 => 200] + for (int32_t i = 0; i < 200; i++) { + values[i] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + // Many files 4 [300 => 4300) + for (int32_t i = 0; i <= 5; i++) { + for (int32_t j = 300; j < 4300; j++) { + if (j == 2300) { + ASSERT_OK(Flush()); + dbfull()->TEST_WaitForFlushMemTable(); + } + values[j] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(j), values[j])); + } + } + ASSERT_OK(Flush()); + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + + // Verify level sizes + uint64_t target_size = 4 * options.max_bytes_for_level_base; + for (int32_t i = 1; i < options.num_levels; i++) { + ASSERT_LE(SizeAtLevel(i), target_size); + target_size *= options.max_bytes_for_level_multiplier; + } + + size_t old_num_files = CountFiles(); + std::string begin_string = Key(1000); + std::string end_string = Key(2000); + Slice begin(begin_string); + Slice end(end_string); + ASSERT_OK(DeleteFilesInRange(db_, db_->DefaultColumnFamily(), &begin, &end)); + + int32_t deleted_count = 0; + for (int32_t i = 0; i < 4300; i++) { + if (i < 1000 || i > 2000) { + ASSERT_EQ(Get(Key(i)), values[i]); + } else { + ReadOptions roptions; + std::string result; + Status s = db_->Get(roptions, Key(i), &result); + ASSERT_TRUE(s.IsNotFound() || s.ok()); + if (s.IsNotFound()) { + deleted_count++; + } + } + } + ASSERT_GT(deleted_count, 0); + begin_string = Key(5000); + end_string = Key(6000); + Slice begin1(begin_string); + Slice end1(end_string); + // Try deleting files in range which contain no keys + ASSERT_OK( + DeleteFilesInRange(db_, db_->DefaultColumnFamily(), &begin1, &end1)); + + // Push data from level 0 to level 1 to force all data to be deleted + // Note that we don't delete level 0 files + compact_options.change_level = true; + compact_options.target_level = 1; + ASSERT_OK(dbfull()->TEST_CompactRange(0, nullptr, nullptr)); + + ASSERT_OK( + DeleteFilesInRange(db_, db_->DefaultColumnFamily(), nullptr, nullptr)); + + int32_t deleted_count2 = 0; + for (int32_t i = 0; i < 4300; i++) { + ReadOptions roptions; + std::string result; + Status s = db_->Get(roptions, Key(i), &result); + ASSERT_TRUE(s.IsNotFound()); + deleted_count2++; + } + ASSERT_GT(deleted_count2, deleted_count); + size_t new_num_files = CountFiles(); + ASSERT_GT(old_num_files, new_num_files); +} + +TEST_P(DBCompactionTestWithParam, TrivialMoveToLastLevelWithFiles) { + int32_t trivial_move = 0; + int32_t non_trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", + [&](void* arg) { non_trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.write_buffer_size = 100000000; + options.max_subcompactions = max_subcompactions_; + DestroyAndReopen(options); + + int32_t value_size = 10 * 1024; // 10 KB + + Random rnd(301); + std::vector values; + // File with keys [ 0 => 99 ] + for (int i = 0; i < 100; i++) { + values.push_back(RandomString(&rnd, value_size)); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + ASSERT_EQ("1", FilesPerLevel(0)); + // Compaction will do L0=>L1 (trivial move) then move L1 files to L3 + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 3; + compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; + ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); + ASSERT_EQ("0,0,0,1", FilesPerLevel(0)); + ASSERT_EQ(trivial_move, 1); + ASSERT_EQ(non_trivial_move, 0); + + // File with keys [ 100 => 199 ] + for (int i = 100; i < 200; i++) { + values.push_back(RandomString(&rnd, value_size)); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + ASSERT_EQ("1,0,0,1", FilesPerLevel(0)); + CompactRangeOptions cro; + cro.exclusive_manual_compaction = exclusive_manual_compaction_; + // Compaction will do L0=>L1 L1=>L2 L2=>L3 (3 trivial moves) + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); + ASSERT_EQ("0,0,0,2", FilesPerLevel(0)); + ASSERT_EQ(trivial_move, 4); + ASSERT_EQ(non_trivial_move, 0); + + for (int i = 0; i < 200; i++) { + ASSERT_EQ(Get(Key(i)), values[i]); + } + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_P(DBCompactionTestWithParam, LevelCompactionThirdPath) { + Options options = CurrentOptions(); + options.db_paths.emplace_back(dbname_, 500 * 1024); + options.db_paths.emplace_back(dbname_ + "_2", 4 * 1024 * 1024); + options.db_paths.emplace_back(dbname_ + "_3", 1024 * 1024 * 1024); + options.memtable_factory.reset( + new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1)); + options.compaction_style = kCompactionStyleLevel; + options.write_buffer_size = 110 << 10; // 110KB + options.arena_block_size = 4 << 10; + options.level0_file_num_compaction_trigger = 2; + options.num_levels = 4; + options.max_bytes_for_level_base = 400 * 1024; + options.max_subcompactions = max_subcompactions_; + // options = CurrentOptions(options); + + std::vector filenames; + env_->GetChildren(options.db_paths[1].path, &filenames); + // Delete archival files. + for (size_t i = 0; i < filenames.size(); ++i) { + env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]); + } + env_->DeleteDir(options.db_paths[1].path); + Reopen(options); + + Random rnd(301); + int key_idx = 0; + + // First three 110KB files are not going to second path. + // After that, (100K, 200K) + for (int num = 0; num < 3; num++) { + GenerateNewFile(&rnd, &key_idx); + } + + // Another 110KB triggers a compaction to 400K file to fill up first path + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(3, GetSstFileCount(options.db_paths[1].path)); + + // (1, 4) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4", FilesPerLevel(0)); + ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 4, 1) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,1", FilesPerLevel(0)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 4, 2) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,2", FilesPerLevel(0)); + ASSERT_EQ(2, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 4, 3) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,3", FilesPerLevel(0)); + ASSERT_EQ(3, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 4, 4) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,4", FilesPerLevel(0)); + ASSERT_EQ(4, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 4, 5) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,5", FilesPerLevel(0)); + ASSERT_EQ(5, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 4, 6) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,6", FilesPerLevel(0)); + ASSERT_EQ(6, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 4, 7) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,7", FilesPerLevel(0)); + ASSERT_EQ(7, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 4, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,8", FilesPerLevel(0)); + ASSERT_EQ(8, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(4, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + for (int i = 0; i < key_idx; i++) { + auto v = Get(Key(i)); + ASSERT_NE(v, "NOT_FOUND"); + ASSERT_TRUE(v.size() == 1 || v.size() == 990); + } + + Reopen(options); + + for (int i = 0; i < key_idx; i++) { + auto v = Get(Key(i)); + ASSERT_NE(v, "NOT_FOUND"); + ASSERT_TRUE(v.size() == 1 || v.size() == 990); + } + + Destroy(options); +} + +TEST_P(DBCompactionTestWithParam, LevelCompactionPathUse) { + Options options = CurrentOptions(); + options.db_paths.emplace_back(dbname_, 500 * 1024); + options.db_paths.emplace_back(dbname_ + "_2", 4 * 1024 * 1024); + options.db_paths.emplace_back(dbname_ + "_3", 1024 * 1024 * 1024); + options.memtable_factory.reset( + new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1)); + options.compaction_style = kCompactionStyleLevel; + options.write_buffer_size = 110 << 10; // 110KB + options.arena_block_size = 4 << 10; + options.level0_file_num_compaction_trigger = 2; + options.num_levels = 4; + options.max_bytes_for_level_base = 400 * 1024; + options.max_subcompactions = max_subcompactions_; + // options = CurrentOptions(options); + + std::vector filenames; + env_->GetChildren(options.db_paths[1].path, &filenames); + // Delete archival files. + for (size_t i = 0; i < filenames.size(); ++i) { + env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]); + } + env_->DeleteDir(options.db_paths[1].path); + Reopen(options); + + Random rnd(301); + int key_idx = 0; + + // Always gets compacted into 1 Level1 file, + // 0/1 Level 0 file + for (int num = 0; num < 3; num++) { + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + } + + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,1", FilesPerLevel(0)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("0,1", FilesPerLevel(0)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,1", FilesPerLevel(0)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("0,1", FilesPerLevel(0)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,1", FilesPerLevel(0)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("0,1", FilesPerLevel(0)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,1", FilesPerLevel(0)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("0,1", FilesPerLevel(0)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + key_idx = 0; + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,1", FilesPerLevel(0)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + for (int i = 0; i < key_idx; i++) { + auto v = Get(Key(i)); + ASSERT_NE(v, "NOT_FOUND"); + ASSERT_TRUE(v.size() == 1 || v.size() == 990); + } + + Reopen(options); + + for (int i = 0; i < key_idx; i++) { + auto v = Get(Key(i)); + ASSERT_NE(v, "NOT_FOUND"); + ASSERT_TRUE(v.size() == 1 || v.size() == 990); + } + + Destroy(options); +} + +TEST_P(DBCompactionTestWithParam, ConvertCompactionStyle) { + Random rnd(301); + int max_key_level_insert = 200; + int max_key_universal_insert = 600; + + // Stage 1: generate a db with level compaction + Options options = CurrentOptions(); + options.write_buffer_size = 110 << 10; // 110KB + options.arena_block_size = 4 << 10; + options.num_levels = 4; + options.level0_file_num_compaction_trigger = 3; + options.max_bytes_for_level_base = 500 << 10; // 500KB + options.max_bytes_for_level_multiplier = 1; + options.target_file_size_base = 200 << 10; // 200KB + options.target_file_size_multiplier = 1; + options.max_subcompactions = max_subcompactions_; + CreateAndReopenWithCF({"pikachu"}, options); + + for (int i = 0; i <= max_key_level_insert; i++) { + // each value is 10K + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + } + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + + ASSERT_GT(TotalTableFiles(1, 4), 1); + int non_level0_num_files = 0; + for (int i = 1; i < options.num_levels; i++) { + non_level0_num_files += NumTableFilesAtLevel(i, 1); + } + ASSERT_GT(non_level0_num_files, 0); + + // Stage 2: reopen with universal compaction - should fail + options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = 1; + options = CurrentOptions(options); + Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options); + ASSERT_TRUE(s.IsInvalidArgument()); + + // Stage 3: compact into a single file and move the file to level 0 + options = CurrentOptions(); + options.disable_auto_compactions = true; + options.target_file_size_base = INT_MAX; + options.target_file_size_multiplier = 1; + options.max_bytes_for_level_base = INT_MAX; + options.max_bytes_for_level_multiplier = 1; + options.num_levels = 4; + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 0; + compact_options.bottommost_level_compaction = + BottommostLevelCompaction::kForce; + compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; + dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr); + + // Only 1 file in L0 + ASSERT_EQ("1", FilesPerLevel(1)); + + // Stage 4: re-open in universal compaction style and do some db operations + options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = 4; + options.write_buffer_size = 110 << 10; // 110KB + options.arena_block_size = 4 << 10; + options.level0_file_num_compaction_trigger = 3; + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + options.num_levels = 1; + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + for (int i = max_key_level_insert / 2; i <= max_key_universal_insert; i++) { + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + } + dbfull()->Flush(FlushOptions()); + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + + for (int i = 1; i < options.num_levels; i++) { + ASSERT_EQ(NumTableFilesAtLevel(i, 1), 0); + } + + // verify keys inserted in both level compaction style and universal + // compaction style + std::string keys_in_db; + Iterator* iter = dbfull()->NewIterator(ReadOptions(), handles_[1]); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + keys_in_db.append(iter->key().ToString()); + keys_in_db.push_back(','); + } + delete iter; + + std::string expected_keys; + for (int i = 0; i <= max_key_universal_insert; i++) { + expected_keys.append(Key(i)); + expected_keys.push_back(','); + } + + ASSERT_EQ(keys_in_db, expected_keys); +} + +TEST_F(DBCompactionTest, L0_CompactionBug_Issue44_a) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "b", "v")); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_OK(Delete(1, "b")); + ASSERT_OK(Delete(1, "a")); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_OK(Delete(1, "a")); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "a", "v")); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_EQ("(a->v)", Contents(1)); + env_->SleepForMicroseconds(1000000); // Wait for compaction to finish + ASSERT_EQ("(a->v)", Contents(1)); + } while (ChangeCompactOptions()); +} + +TEST_F(DBCompactionTest, L0_CompactionBug_Issue44_b) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + Put(1, "", ""); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + Delete(1, "e"); + Put(1, "", ""); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + Put(1, "c", "cv"); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + Put(1, "", ""); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + Put(1, "", ""); + env_->SleepForMicroseconds(1000000); // Wait for compaction to finish + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + Put(1, "d", "dv"); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + Put(1, "", ""); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + Delete(1, "d"); + Delete(1, "b"); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_EQ("(->)(c->cv)", Contents(1)); + env_->SleepForMicroseconds(1000000); // Wait for compaction to finish + ASSERT_EQ("(->)(c->cv)", Contents(1)); + } while (ChangeCompactOptions()); +} + +TEST_P(DBCompactionTestWithParam, ManualCompaction) { + Options options = CurrentOptions(); + options.max_subcompactions = max_subcompactions_; + options.statistics = rocksdb::CreateDBStatistics(); + CreateAndReopenWithCF({"pikachu"}, options); + + // iter - 0 with 7 levels + // iter - 1 with 3 levels + for (int iter = 0; iter < 2; ++iter) { + MakeTables(3, "p", "q", 1); + ASSERT_EQ("1,1,1", FilesPerLevel(1)); + + // Compaction range falls before files + Compact(1, "", "c"); + ASSERT_EQ("1,1,1", FilesPerLevel(1)); + + // Compaction range falls after files + Compact(1, "r", "z"); + ASSERT_EQ("1,1,1", FilesPerLevel(1)); + + // Compaction range overlaps files + Compact(1, "p1", "p9"); + ASSERT_EQ("0,0,1", FilesPerLevel(1)); + + // Populate a different range + MakeTables(3, "c", "e", 1); + ASSERT_EQ("1,1,2", FilesPerLevel(1)); + + // Compact just the new range + Compact(1, "b", "f"); + ASSERT_EQ("0,0,2", FilesPerLevel(1)); + + // Compact all + MakeTables(1, "a", "z", 1); + ASSERT_EQ("1,0,2", FilesPerLevel(1)); + + uint64_t prev_block_cache_add = + options.statistics->getTickerCount(BLOCK_CACHE_ADD); + CompactRangeOptions cro; + cro.exclusive_manual_compaction = exclusive_manual_compaction_; + db_->CompactRange(cro, handles_[1], nullptr, nullptr); + // Verify manual compaction doesn't fill block cache + ASSERT_EQ(prev_block_cache_add, + options.statistics->getTickerCount(BLOCK_CACHE_ADD)); + + ASSERT_EQ("0,0,1", FilesPerLevel(1)); + + if (iter == 0) { + options = CurrentOptions(); + options.max_background_flushes = 0; + options.num_levels = 3; + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + } + } +} + + +TEST_P(DBCompactionTestWithParam, ManualLevelCompactionOutputPathId) { + Options options = CurrentOptions(); + options.db_paths.emplace_back(dbname_ + "_2", 2 * 10485760); + options.db_paths.emplace_back(dbname_ + "_3", 100 * 10485760); + options.db_paths.emplace_back(dbname_ + "_4", 120 * 10485760); + options.max_subcompactions = max_subcompactions_; + CreateAndReopenWithCF({"pikachu"}, options); + + // iter - 0 with 7 levels + // iter - 1 with 3 levels + for (int iter = 0; iter < 2; ++iter) { + for (int i = 0; i < 3; ++i) { + ASSERT_OK(Put(1, "p", "begin")); + ASSERT_OK(Put(1, "q", "end")); + ASSERT_OK(Flush(1)); + } + ASSERT_EQ("3", FilesPerLevel(1)); + ASSERT_EQ(3, GetSstFileCount(options.db_paths[0].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + // Compaction range falls before files + Compact(1, "", "c"); + ASSERT_EQ("3", FilesPerLevel(1)); + + // Compaction range falls after files + Compact(1, "r", "z"); + ASSERT_EQ("3", FilesPerLevel(1)); + + // Compaction range overlaps files + Compact(1, "p1", "p9", 1); + ASSERT_EQ("0,1", FilesPerLevel(1)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + // Populate a different range + for (int i = 0; i < 3; ++i) { + ASSERT_OK(Put(1, "c", "begin")); + ASSERT_OK(Put(1, "e", "end")); + ASSERT_OK(Flush(1)); + } + ASSERT_EQ("3,1", FilesPerLevel(1)); + + // Compact just the new range + Compact(1, "b", "f", 1); + ASSERT_EQ("0,2", FilesPerLevel(1)); + ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + // Compact all + ASSERT_OK(Put(1, "a", "begin")); + ASSERT_OK(Put(1, "z", "end")); + ASSERT_OK(Flush(1)); + ASSERT_EQ("1,2", FilesPerLevel(1)); + ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path)); + CompactRangeOptions compact_options; + compact_options.target_path_id = 1; + compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr); + + ASSERT_EQ("0,1", FilesPerLevel(1)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + if (iter == 0) { + DestroyAndReopen(options); + options = CurrentOptions(); + options.db_paths.emplace_back(dbname_ + "_2", 2 * 10485760); + options.db_paths.emplace_back(dbname_ + "_3", 100 * 10485760); + options.db_paths.emplace_back(dbname_ + "_4", 120 * 10485760); + options.max_background_flushes = 1; + options.num_levels = 3; + options.create_if_missing = true; + CreateAndReopenWithCF({"pikachu"}, options); + } + } +} + +TEST_F(DBCompactionTest, FilesDeletedAfterCompaction) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v2")); + Compact(1, "a", "z"); + const size_t num_files = CountLiveFiles(); + for (int i = 0; i < 10; i++) { + ASSERT_OK(Put(1, "foo", "v2")); + Compact(1, "a", "z"); + } + ASSERT_EQ(CountLiveFiles(), num_files); + } while (ChangeCompactOptions()); +} + +// Check level comapction with compact files +TEST_P(DBCompactionTestWithParam, DISABLED_CompactFilesOnLevelCompaction) { + const int kTestKeySize = 16; + const int kTestValueSize = 984; + const int kEntrySize = kTestKeySize + kTestValueSize; + const int kEntriesPerBuffer = 100; + Options options; + options.create_if_missing = true; + options.write_buffer_size = kEntrySize * kEntriesPerBuffer; + options.compaction_style = kCompactionStyleLevel; + options.target_file_size_base = options.write_buffer_size; + options.max_bytes_for_level_base = options.target_file_size_base * 2; + options.level0_stop_writes_trigger = 2; + options.max_bytes_for_level_multiplier = 2; + options.compression = kNoCompression; + options.max_subcompactions = max_subcompactions_; + options = CurrentOptions(options); + CreateAndReopenWithCF({"pikachu"}, options); + + Random rnd(301); + for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) { + ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize))); + } + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForCompact(); + + ColumnFamilyMetaData cf_meta; + dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); + int output_level = static_cast(cf_meta.levels.size()) - 1; + for (int file_picked = 5; file_picked > 0; --file_picked) { + std::set overlapping_file_names; + std::vector compaction_input_file_names; + for (int f = 0; f < file_picked; ++f) { + int level = 0; + auto file_meta = PickFileRandomly(cf_meta, &rnd, &level); + compaction_input_file_names.push_back(file_meta->name); + GetOverlappingFileNumbersForLevelCompaction( + cf_meta, options.comparator, level, output_level, + file_meta, &overlapping_file_names); + } + + ASSERT_OK(dbfull()->CompactFiles( + CompactionOptions(), handles_[1], + compaction_input_file_names, + output_level)); + + // Make sure all overlapping files do not exist after compaction + dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); + VerifyCompactionResult(cf_meta, overlapping_file_names); + } + + // make sure all key-values are still there. + for (int key = 64 * kEntriesPerBuffer; key >= 0; --key) { + ASSERT_NE(Get(1, ToString(key)), "NOT_FOUND"); + } +} + +TEST_P(DBCompactionTestWithParam, PartialCompactionFailure) { + Options options; + const int kKeySize = 16; + const int kKvSize = 1000; + const int kKeysPerBuffer = 100; + const int kNumL1Files = 5; + options.create_if_missing = true; + options.write_buffer_size = kKeysPerBuffer * kKvSize; + options.max_write_buffer_number = 2; + options.target_file_size_base = + options.write_buffer_size * + (options.max_write_buffer_number - 1); + options.level0_file_num_compaction_trigger = kNumL1Files; + options.max_bytes_for_level_base = + options.level0_file_num_compaction_trigger * + options.target_file_size_base; + options.max_bytes_for_level_multiplier = 2; + options.compression = kNoCompression; + options.max_subcompactions = max_subcompactions_; + + env_->SetBackgroundThreads(1, Env::HIGH); + env_->SetBackgroundThreads(1, Env::LOW); + // stop the compaction thread until we simulate the file creation failure. + test::SleepingBackgroundTask sleeping_task_low; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + + options.env = env_; + + DestroyAndReopen(options); + + const int kNumInsertedKeys = + options.level0_file_num_compaction_trigger * + (options.max_write_buffer_number - 1) * + kKeysPerBuffer; + + Random rnd(301); + std::vector keys; + std::vector values; + for (int k = 0; k < kNumInsertedKeys; ++k) { + keys.emplace_back(RandomString(&rnd, kKeySize)); + values.emplace_back(RandomString(&rnd, kKvSize - kKeySize)); + ASSERT_OK(Put(Slice(keys[k]), Slice(values[k]))); + dbfull()->TEST_WaitForFlushMemTable(); + } + + dbfull()->TEST_FlushMemTable(true); + // Make sure the number of L0 files can trigger compaction. + ASSERT_GE(NumTableFilesAtLevel(0), + options.level0_file_num_compaction_trigger); + + auto previous_num_level0_files = NumTableFilesAtLevel(0); + + // Fail the first file creation. + env_->non_writable_count_ = 1; + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilDone(); + + // Expect compaction to fail here as one file will fail its + // creation. + ASSERT_TRUE(!dbfull()->TEST_WaitForCompact().ok()); + + // Verify L0 -> L1 compaction does fail. + ASSERT_EQ(NumTableFilesAtLevel(1), 0); + + // Verify all L0 files are still there. + ASSERT_EQ(NumTableFilesAtLevel(0), previous_num_level0_files); + + // All key-values must exist after compaction fails. + for (int k = 0; k < kNumInsertedKeys; ++k) { + ASSERT_EQ(values[k], Get(keys[k])); + } + + env_->non_writable_count_ = 0; + + // Make sure RocksDB will not get into corrupted state. + Reopen(options); + + // Verify again after reopen. + for (int k = 0; k < kNumInsertedKeys; ++k) { + ASSERT_EQ(values[k], Get(keys[k])); + } +} + +TEST_P(DBCompactionTestWithParam, DeleteMovedFileAfterCompaction) { + // iter 1 -- delete_obsolete_files_period_micros == 0 + for (int iter = 0; iter < 2; ++iter) { + // This test triggers move compaction and verifies that the file is not + // deleted when it's part of move compaction + Options options = CurrentOptions(); + options.env = env_; + if (iter == 1) { + options.delete_obsolete_files_period_micros = 0; + } + options.create_if_missing = true; + options.level0_file_num_compaction_trigger = + 2; // trigger compaction when we have 2 files + OnFileDeletionListener* listener = new OnFileDeletionListener(); + options.listeners.emplace_back(listener); + options.max_subcompactions = max_subcompactions_; + DestroyAndReopen(options); + + Random rnd(301); + // Create two 1MB sst files + for (int i = 0; i < 2; ++i) { + // Create 1MB sst file + for (int j = 0; j < 100; ++j) { + ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024))); + } + ASSERT_OK(Flush()); + } + // this should execute L0->L1 + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ("0,1", FilesPerLevel(0)); + + // block compactions + test::SleepingBackgroundTask sleeping_task; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task, + Env::Priority::LOW); + + options.max_bytes_for_level_base = 1024 * 1024; // 1 MB + Reopen(options); + std::unique_ptr iterator(db_->NewIterator(ReadOptions())); + ASSERT_EQ("0,1", FilesPerLevel(0)); + // let compactions go + sleeping_task.WakeUp(); + sleeping_task.WaitUntilDone(); + + // this should execute L1->L2 (move) + dbfull()->TEST_WaitForCompact(); + + ASSERT_EQ("0,0,1", FilesPerLevel(0)); + + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(metadata.size(), 1U); + auto moved_file_name = metadata[0].name; + + // Create two more 1MB sst files + for (int i = 0; i < 2; ++i) { + // Create 1MB sst file + for (int j = 0; j < 100; ++j) { + ASSERT_OK(Put(Key(i * 50 + j + 100), RandomString(&rnd, 10 * 1024))); + } + ASSERT_OK(Flush()); + } + // this should execute both L0->L1 and L1->L2 (merge with previous file) + dbfull()->TEST_WaitForCompact(); + + ASSERT_EQ("0,0,2", FilesPerLevel(0)); + + // iterator is holding the file + ASSERT_OK(env_->FileExists(dbname_ + moved_file_name)); + + listener->SetExpectedFileName(dbname_ + moved_file_name); + iterator.reset(); + + // this file should have been compacted away + ASSERT_NOK(env_->FileExists(dbname_ + moved_file_name)); + listener->VerifyMatchedCount(1); + } +} + +TEST_P(DBCompactionTestWithParam, CompressLevelCompaction) { + if (!Zlib_Supported()) { + return; + } + Options options = CurrentOptions(); + options.memtable_factory.reset( + new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1)); + options.compaction_style = kCompactionStyleLevel; + options.write_buffer_size = 110 << 10; // 110KB + options.arena_block_size = 4 << 10; + options.level0_file_num_compaction_trigger = 2; + options.num_levels = 4; + options.max_bytes_for_level_base = 400 * 1024; + options.max_subcompactions = max_subcompactions_; + // First two levels have no compression, so that a trivial move between + // them will be allowed. Level 2 has Zlib compression so that a trivial + // move to level 3 will not be allowed + options.compression_per_level = {kNoCompression, kNoCompression, + kZlibCompression}; + int matches = 0, didnt_match = 0, trivial_move = 0, non_trivial = 0; + + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "Compaction::InputCompressionMatchesOutput:Matches", + [&](void* arg) { matches++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "Compaction::InputCompressionMatchesOutput:DidntMatch", + [&](void* arg) { didnt_match++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", + [&](void* arg) { non_trivial++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Reopen(options); + + Random rnd(301); + int key_idx = 0; + + // First three 110KB files are going to level 0 + // After that, (100K, 200K) + for (int num = 0; num < 3; num++) { + GenerateNewFile(&rnd, &key_idx); + } + + // Another 110KB triggers a compaction to 400K file to fill up level 0 + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(4, GetSstFileCount(dbname_)); + + // (1, 4) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4", FilesPerLevel(0)); + + // (1, 4, 1) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,1", FilesPerLevel(0)); + + // (1, 4, 2) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,2", FilesPerLevel(0)); + + // (1, 4, 3) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,3", FilesPerLevel(0)); + + // (1, 4, 4) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,4", FilesPerLevel(0)); + + // (1, 4, 5) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,5", FilesPerLevel(0)); + + // (1, 4, 6) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,6", FilesPerLevel(0)); + + // (1, 4, 7) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,7", FilesPerLevel(0)); + + // (1, 4, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ("1,4,8", FilesPerLevel(0)); + + ASSERT_EQ(matches, 12); + // Currently, the test relies on the number of calls to + // InputCompressionMatchesOutput() per compaction. + const int kCallsToInputCompressionMatch = 2; + ASSERT_EQ(didnt_match, 8 * kCallsToInputCompressionMatch); + ASSERT_EQ(trivial_move, 12); + ASSERT_EQ(non_trivial, 8); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + for (int i = 0; i < key_idx; i++) { + auto v = Get(Key(i)); + ASSERT_NE(v, "NOT_FOUND"); + ASSERT_TRUE(v.size() == 1 || v.size() == 990); + } + + Reopen(options); + + for (int i = 0; i < key_idx; i++) { + auto v = Get(Key(i)); + ASSERT_NE(v, "NOT_FOUND"); + ASSERT_TRUE(v.size() == 1 || v.size() == 990); + } + + Destroy(options); +} + +TEST_F(DBCompactionTest, SanitizeCompactionOptionsTest) { + Options options = CurrentOptions(); + options.max_background_compactions = 5; + options.soft_pending_compaction_bytes_limit = 0; + options.hard_pending_compaction_bytes_limit = 100; + options.create_if_missing = true; + DestroyAndReopen(options); + ASSERT_EQ(5, db_->GetOptions().base_background_compactions); + ASSERT_EQ(100, db_->GetOptions().soft_pending_compaction_bytes_limit); + + options.base_background_compactions = 4; + options.max_background_compactions = 3; + options.soft_pending_compaction_bytes_limit = 200; + options.hard_pending_compaction_bytes_limit = 150; + DestroyAndReopen(options); + ASSERT_EQ(3, db_->GetOptions().base_background_compactions); + ASSERT_EQ(150, db_->GetOptions().soft_pending_compaction_bytes_limit); +} + +// This tests for a bug that could cause two level0 compactions running +// concurrently +// TODO(aekmekji): Make sure that the reason this fails when run with +// max_subcompactions > 1 is not a correctness issue but just inherent to +// running parallel L0-L1 compactions +TEST_F(DBCompactionTest, SuggestCompactRangeNoTwoLevel0Compactions) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleLevel; + options.write_buffer_size = 110 << 10; + options.arena_block_size = 4 << 10; + options.level0_file_num_compaction_trigger = 4; + options.num_levels = 4; + options.compression = kNoCompression; + options.max_bytes_for_level_base = 450 << 10; + options.target_file_size_base = 98 << 10; + options.max_write_buffer_number = 2; + options.max_background_compactions = 2; + + DestroyAndReopen(options); + + // fill up the DB + Random rnd(301); + for (int num = 0; num < 10; num++) { + GenerateNewRandomFile(&rnd); + } + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"CompactionJob::Run():Start", + "DBCompactionTest::SuggestCompactRangeNoTwoLevel0Compactions:1"}, + {"DBCompactionTest::SuggestCompactRangeNoTwoLevel0Compactions:2", + "CompactionJob::Run():End"}}); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // trigger L0 compaction + for (int num = 0; num < options.level0_file_num_compaction_trigger + 1; + num++) { + GenerateNewRandomFile(&rnd, /* nowait */ true); + ASSERT_OK(Flush()); + } + + TEST_SYNC_POINT( + "DBCompactionTest::SuggestCompactRangeNoTwoLevel0Compactions:1"); + + GenerateNewRandomFile(&rnd, /* nowait */ true); + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_OK(experimental::SuggestCompactRange(db_, nullptr, nullptr)); + for (int num = 0; num < options.level0_file_num_compaction_trigger + 1; + num++) { + GenerateNewRandomFile(&rnd, /* nowait */ true); + ASSERT_OK(Flush()); + } + + TEST_SYNC_POINT( + "DBCompactionTest::SuggestCompactRangeNoTwoLevel0Compactions:2"); + dbfull()->TEST_WaitForCompact(); +} + + +TEST_P(DBCompactionTestWithParam, ForceBottommostLevelCompaction) { + int32_t trivial_move = 0; + int32_t non_trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", + [&](void* arg) { non_trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.write_buffer_size = 100000000; + options.max_subcompactions = max_subcompactions_; + DestroyAndReopen(options); + + int32_t value_size = 10 * 1024; // 10 KB + + Random rnd(301); + std::vector values; + // File with keys [ 0 => 99 ] + for (int i = 0; i < 100; i++) { + values.push_back(RandomString(&rnd, value_size)); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + ASSERT_EQ("1", FilesPerLevel(0)); + // Compaction will do L0=>L1 (trivial move) then move L1 files to L3 + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 3; + ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); + ASSERT_EQ("0,0,0,1", FilesPerLevel(0)); + ASSERT_EQ(trivial_move, 1); + ASSERT_EQ(non_trivial_move, 0); + + // File with keys [ 100 => 199 ] + for (int i = 100; i < 200; i++) { + values.push_back(RandomString(&rnd, value_size)); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + ASSERT_EQ("1,0,0,1", FilesPerLevel(0)); + // Compaction will do L0=>L1 L1=>L2 L2=>L3 (3 trivial moves) + // then compacte the bottommost level L3=>L3 (non trivial move) + compact_options = CompactRangeOptions(); + compact_options.bottommost_level_compaction = + BottommostLevelCompaction::kForce; + ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); + ASSERT_EQ("0,0,0,1", FilesPerLevel(0)); + ASSERT_EQ(trivial_move, 4); + ASSERT_EQ(non_trivial_move, 1); + + // File with keys [ 200 => 299 ] + for (int i = 200; i < 300; i++) { + values.push_back(RandomString(&rnd, value_size)); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Flush()); + + ASSERT_EQ("1,0,0,1", FilesPerLevel(0)); + trivial_move = 0; + non_trivial_move = 0; + compact_options = CompactRangeOptions(); + compact_options.bottommost_level_compaction = + BottommostLevelCompaction::kSkip; + // Compaction will do L0=>L1 L1=>L2 L2=>L3 (3 trivial moves) + // and will skip bottommost level compaction + ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); + ASSERT_EQ("0,0,0,2", FilesPerLevel(0)); + ASSERT_EQ(trivial_move, 3); + ASSERT_EQ(non_trivial_move, 0); + + for (int i = 0; i < 300; i++) { + ASSERT_EQ(Get(Key(i)), values[i]); + } + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +INSTANTIATE_TEST_CASE_P(DBCompactionTestWithParam, DBCompactionTestWithParam, + ::testing::Values(std::make_tuple(1, true), + std::make_tuple(1, false), + std::make_tuple(4, true), + std::make_tuple(4, false))); + +class CompactionPriTest : public DBTestBase, + public testing::WithParamInterface { + public: + CompactionPriTest() : DBTestBase("/compaction_pri_test") { + compaction_pri_ = GetParam(); + } + + // Required if inheriting from testing::WithParamInterface<> + static void SetUpTestCase() {} + static void TearDownTestCase() {} + + uint32_t compaction_pri_; +}; + +TEST_P(CompactionPriTest, Test) { + Options options = CurrentOptions(); + options.write_buffer_size = 16 * 1024; + options.compaction_pri = static_cast(compaction_pri_); + options.hard_pending_compaction_bytes_limit = 256 * 1024; + options.max_bytes_for_level_base = 64 * 1024; + options.max_bytes_for_level_multiplier = 4; + options.compression = kNoCompression; + + DestroyAndReopen(options); + + Random rnd(301); + const int kNKeys = 5000; + int keys[kNKeys]; + for (int i = 0; i < kNKeys; i++) { + keys[i] = i; + } + std::random_shuffle(std::begin(keys), std::end(keys)); + + for (int i = 0; i < kNKeys; i++) { + ASSERT_OK(Put(Key(keys[i]), RandomString(&rnd, 102))); + } + + dbfull()->TEST_WaitForCompact(); + for (int i = 0; i < kNKeys; i++) { + ASSERT_NE("NOT_FOUND", Get(Key(i))); + } +} + +INSTANTIATE_TEST_CASE_P( + CompactionPriTest, CompactionPriTest, + ::testing::Values(CompactionPri::kByCompensatedSize, + CompactionPri::kOldestLargestSeqFirst, + CompactionPri::kOldestSmallestSeqFirst, + CompactionPri::kMinOverlappingRatio)); + +#endif // !defined(ROCKSDB_LITE) +} // namespace rocksdb + +int main(int argc, char** argv) { +#if !defined(ROCKSDB_LITE) + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +#else + return 0; +#endif +} diff --git a/external/rocksdb/db/db_dynamic_level_test.cc b/external/rocksdb/db/db_dynamic_level_test.cc new file mode 100755 index 0000000000..6783c58d00 --- /dev/null +++ b/external/rocksdb/db/db_dynamic_level_test.cc @@ -0,0 +1,507 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// Introduction of SyncPoint effectively disabled building and running this test +// in Release build. +// which is a pity, it is a good test +#if !defined(ROCKSDB_LITE) + +#include "db/db_test_util.h" +#include "port/stack_trace.h" + +namespace rocksdb { +class DBTestDynamicLevel : public DBTestBase { + public: + DBTestDynamicLevel() : DBTestBase("/db_dynamic_level_test") {} +}; + +TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase) { + if (!Snappy_Supported() || !LZ4_Supported()) { + return; + } + // Use InMemoryEnv, or it would be too slow. + unique_ptr env(new MockEnv(env_)); + + const int kNKeys = 1000; + int keys[kNKeys]; + + auto verify_func = [&]() { + for (int i = 0; i < kNKeys; i++) { + ASSERT_NE("NOT_FOUND", Get(Key(i))); + ASSERT_NE("NOT_FOUND", Get(Key(kNKeys * 2 + i))); + if (i < kNKeys / 10) { + ASSERT_EQ("NOT_FOUND", Get(Key(kNKeys + keys[i]))); + } else { + ASSERT_NE("NOT_FOUND", Get(Key(kNKeys + keys[i]))); + } + } + }; + + Random rnd(301); + for (int ordered_insert = 0; ordered_insert <= 1; ordered_insert++) { + for (int i = 0; i < kNKeys; i++) { + keys[i] = i; + } + if (ordered_insert == 0) { + std::random_shuffle(std::begin(keys), std::end(keys)); + } + for (int max_background_compactions = 1; max_background_compactions < 4; + max_background_compactions += 2) { + Options options; + options.env = env.get(); + options.create_if_missing = true; + options.db_write_buffer_size = 2048; + options.write_buffer_size = 2048; + options.max_write_buffer_number = 2; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 2; + options.level0_stop_writes_trigger = 2; + options.target_file_size_base = 2048; + options.level_compaction_dynamic_level_bytes = true; + options.max_bytes_for_level_base = 10240; + options.max_bytes_for_level_multiplier = 4; + options.soft_rate_limit = 1.1; + options.max_background_compactions = max_background_compactions; + options.num_levels = 5; + + options.compression_per_level.resize(3); + options.compression_per_level[0] = kNoCompression; + options.compression_per_level[1] = kLZ4Compression; + options.compression_per_level[2] = kSnappyCompression; + + DestroyAndReopen(options); + + for (int i = 0; i < kNKeys; i++) { + int key = keys[i]; + ASSERT_OK(Put(Key(kNKeys + key), RandomString(&rnd, 102))); + ASSERT_OK(Put(Key(key), RandomString(&rnd, 102))); + ASSERT_OK(Put(Key(kNKeys * 2 + key), RandomString(&rnd, 102))); + ASSERT_OK(Delete(Key(kNKeys + keys[i / 10]))); + env_->SleepForMicroseconds(5000); + } + + uint64_t int_prop; + ASSERT_TRUE(db_->GetIntProperty("rocksdb.background-errors", &int_prop)); + ASSERT_EQ(0U, int_prop); + + // Verify DB + for (int j = 0; j < 2; j++) { + verify_func(); + if (j == 0) { + Reopen(options); + } + } + + // Test compact range works + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + // All data should be in the last level. + ColumnFamilyMetaData cf_meta; + db_->GetColumnFamilyMetaData(&cf_meta); + ASSERT_EQ(5U, cf_meta.levels.size()); + for (int i = 0; i < 4; i++) { + ASSERT_EQ(0U, cf_meta.levels[i].files.size()); + } + ASSERT_GT(cf_meta.levels[4U].files.size(), 0U); + verify_func(); + + Close(); + } + } + + env_->SetBackgroundThreads(1, Env::LOW); + env_->SetBackgroundThreads(1, Env::HIGH); +} + +// Test specific cases in dynamic max bytes +TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBase2) { + Random rnd(301); + int kMaxKey = 1000000; + + Options options = CurrentOptions(); + options.create_if_missing = true; + options.db_write_buffer_size = 204800; + options.write_buffer_size = 20480; + options.max_write_buffer_number = 2; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 9999; + options.level0_stop_writes_trigger = 9999; + options.target_file_size_base = 9102; + options.level_compaction_dynamic_level_bytes = true; + options.max_bytes_for_level_base = 40960; + options.max_bytes_for_level_multiplier = 4; + options.max_background_compactions = 2; + options.num_levels = 5; + options.expanded_compaction_factor = 0; // Force not expanding in compactions + BlockBasedTableOptions table_options; + table_options.block_size = 1024; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + DestroyAndReopen(options); + ASSERT_OK(dbfull()->SetOptions({ + {"disable_auto_compactions", "true"}, + })); + + uint64_t int_prop; + std::string str_prop; + + // Initial base level is the last level + ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); + ASSERT_EQ(4U, int_prop); + + // Put about 28K to L0 + for (int i = 0; i < 70; i++) { + ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), + RandomString(&rnd, 380))); + } + ASSERT_OK(dbfull()->SetOptions({ + {"disable_auto_compactions", "false"}, + })); + Flush(); + dbfull()->TEST_WaitForCompact(); + ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); + ASSERT_EQ(4U, int_prop); + + // Insert extra about 28K to L0. After they are compacted to L4, base level + // should be changed to L3. + ASSERT_OK(dbfull()->SetOptions({ + {"disable_auto_compactions", "true"}, + })); + for (int i = 0; i < 70; i++) { + ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), + RandomString(&rnd, 380))); + } + + ASSERT_OK(dbfull()->SetOptions({ + {"disable_auto_compactions", "false"}, + })); + Flush(); + dbfull()->TEST_WaitForCompact(); + ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); + ASSERT_EQ(3U, int_prop); + ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level1", &str_prop)); + ASSERT_EQ("0", str_prop); + ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level2", &str_prop)); + ASSERT_EQ("0", str_prop); + + // Trigger parallel compaction, and the first one would change the base + // level. + // Hold compaction jobs to make sure + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "CompactionJob::Run():Start", + [&](void* arg) { env_->SleepForMicroseconds(100000); }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + ASSERT_OK(dbfull()->SetOptions({ + {"disable_auto_compactions", "true"}, + })); + // Write about 40K more + for (int i = 0; i < 100; i++) { + ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), + RandomString(&rnd, 380))); + } + ASSERT_OK(dbfull()->SetOptions({ + {"disable_auto_compactions", "false"}, + })); + Flush(); + // Wait for 200 milliseconds before proceeding compactions to make sure two + // parallel ones are executed. + env_->SleepForMicroseconds(200000); + dbfull()->TEST_WaitForCompact(); + ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); + ASSERT_EQ(3U, int_prop); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + // Trigger a condition that the compaction changes base level and L0->Lbase + // happens at the same time. + // We try to make last levels' targets to be 40K, 160K, 640K, add triggers + // another compaction from 40K->160K. + ASSERT_OK(dbfull()->SetOptions({ + {"disable_auto_compactions", "true"}, + })); + // Write about 600K more + for (int i = 0; i < 1500; i++) { + ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), + RandomString(&rnd, 380))); + } + ASSERT_OK(dbfull()->SetOptions({ + {"disable_auto_compactions", "false"}, + })); + Flush(); + dbfull()->TEST_WaitForCompact(); + ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); + ASSERT_EQ(2U, int_prop); + + // A manual compaction will trigger the base level to become L2 + // Keep Writing data until base level changed 2->1. There will be L0->L2 + // compaction going on at the same time. + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"CompactionJob::Run():Start", "DynamicLevelMaxBytesBase2:0"}, + {"DynamicLevelMaxBytesBase2:1", "CompactionJob::Run():End"}, + {"DynamicLevelMaxBytesBase2:compact_range_finish", + "FlushJob::WriteLevel0Table"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + std::thread thread([this] { + TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:compact_range_start"); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:compact_range_finish"); + }); + + TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:0"); + for (int i = 0; i < 2; i++) { + ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), + RandomString(&rnd, 380))); + } + TEST_SYNC_POINT("DynamicLevelMaxBytesBase2:1"); + + Flush(); + + thread.join(); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); + + ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); + ASSERT_EQ(1U, int_prop); +} + +// Test specific cases in dynamic max bytes +TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesCompactRange) { + Random rnd(301); + int kMaxKey = 1000000; + + Options options = CurrentOptions(); + options.create_if_missing = true; + options.db_write_buffer_size = 2048; + options.write_buffer_size = 2048; + options.max_write_buffer_number = 2; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 9999; + options.level0_stop_writes_trigger = 9999; + options.target_file_size_base = 2; + options.level_compaction_dynamic_level_bytes = true; + options.max_bytes_for_level_base = 10240; + options.max_bytes_for_level_multiplier = 4; + options.max_background_compactions = 1; + const int kNumLevels = 5; + options.num_levels = kNumLevels; + options.expanded_compaction_factor = 0; // Force not expanding in compactions + BlockBasedTableOptions table_options; + table_options.block_size = 1024; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + DestroyAndReopen(options); + + // Compact against empty DB + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + + uint64_t int_prop; + std::string str_prop; + + // Initial base level is the last level + ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); + ASSERT_EQ(4U, int_prop); + + // Put about 7K to L0 + for (int i = 0; i < 140; i++) { + ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), + RandomString(&rnd, 80))); + } + Flush(); + dbfull()->TEST_WaitForCompact(); + if (NumTableFilesAtLevel(0) == 0) { + // Make sure level 0 is not empty + ASSERT_OK(Put(Key(static_cast(rnd.Uniform(kMaxKey))), + RandomString(&rnd, 80))); + Flush(); + } + + ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); + ASSERT_EQ(3U, int_prop); + ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level1", &str_prop)); + ASSERT_EQ("0", str_prop); + ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level2", &str_prop)); + ASSERT_EQ("0", str_prop); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); + + std::set output_levels; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "CompactionPicker::CompactRange:Return", [&](void* arg) { + Compaction* compaction = reinterpret_cast(arg); + output_levels.insert(compaction->output_level()); + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ(output_levels.size(), 2); + ASSERT_TRUE(output_levels.find(3) != output_levels.end()); + ASSERT_TRUE(output_levels.find(4) != output_levels.end()); + ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level0", &str_prop)); + ASSERT_EQ("0", str_prop); + ASSERT_TRUE(db_->GetProperty("rocksdb.num-files-at-level3", &str_prop)); + ASSERT_EQ("0", str_prop); + // Base level is still level 3. + ASSERT_TRUE(db_->GetIntProperty("rocksdb.base-level", &int_prop)); + ASSERT_EQ(3U, int_prop); +} + +TEST_F(DBTestDynamicLevel, DynamicLevelMaxBytesBaseInc) { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.db_write_buffer_size = 2048; + options.write_buffer_size = 2048; + options.max_write_buffer_number = 2; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 2; + options.level0_stop_writes_trigger = 2; + options.target_file_size_base = 2048; + options.level_compaction_dynamic_level_bytes = true; + options.max_bytes_for_level_base = 10240; + options.max_bytes_for_level_multiplier = 4; + options.soft_rate_limit = 1.1; + options.max_background_compactions = 2; + options.num_levels = 5; + + DestroyAndReopen(options); + + int non_trivial = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", + [&](void* arg) { non_trivial++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Random rnd(301); + const int total_keys = 3000; + const int random_part_size = 100; + for (int i = 0; i < total_keys; i++) { + std::string value = RandomString(&rnd, random_part_size); + PutFixed32(&value, static_cast(i)); + ASSERT_OK(Put(Key(i), value)); + } + Flush(); + dbfull()->TEST_WaitForCompact(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + ASSERT_EQ(non_trivial, 0); + + for (int i = 0; i < total_keys; i++) { + std::string value = Get(Key(i)); + ASSERT_EQ(DecodeFixed32(value.c_str() + random_part_size), + static_cast(i)); + } + + env_->SetBackgroundThreads(1, Env::LOW); + env_->SetBackgroundThreads(1, Env::HIGH); +} + +TEST_F(DBTestDynamicLevel, MigrateToDynamicLevelMaxBytesBase) { + Random rnd(301); + const int kMaxKey = 2000; + + Options options; + options.create_if_missing = true; + options.db_write_buffer_size = 2048; + options.write_buffer_size = 2048; + options.max_write_buffer_number = 8; + options.level0_file_num_compaction_trigger = 4; + options.level0_slowdown_writes_trigger = 4; + options.level0_stop_writes_trigger = 8; + options.target_file_size_base = 2048; + options.level_compaction_dynamic_level_bytes = false; + options.max_bytes_for_level_base = 10240; + options.max_bytes_for_level_multiplier = 4; + options.soft_rate_limit = 1.1; + options.num_levels = 8; + + DestroyAndReopen(options); + + auto verify_func = [&](int num_keys, bool if_sleep) { + for (int i = 0; i < num_keys; i++) { + ASSERT_NE("NOT_FOUND", Get(Key(kMaxKey + i))); + if (i < num_keys / 10) { + ASSERT_EQ("NOT_FOUND", Get(Key(i))); + } else { + ASSERT_NE("NOT_FOUND", Get(Key(i))); + } + if (if_sleep && i % 1000 == 0) { + // Without it, valgrind may choose not to give another + // thread a chance to run before finishing the function, + // causing the test to be extremely slow. + env_->SleepForMicroseconds(1); + } + } + }; + + int total_keys = 1000; + for (int i = 0; i < total_keys; i++) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 102))); + ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102))); + ASSERT_OK(Delete(Key(i / 10))); + } + verify_func(total_keys, false); + dbfull()->TEST_WaitForCompact(); + + options.level_compaction_dynamic_level_bytes = true; + options.disable_auto_compactions = true; + Reopen(options); + verify_func(total_keys, false); + + std::atomic_bool compaction_finished; + compaction_finished = false; + // Issue manual compaction in one thread and still verify DB state + // in main thread. + std::thread t([&]() { + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = options.num_levels - 1; + dbfull()->CompactRange(compact_options, nullptr, nullptr); + compaction_finished.store(true); + }); + do { + verify_func(total_keys, true); + } while (!compaction_finished.load()); + t.join(); + + ASSERT_OK(dbfull()->SetOptions({ + {"disable_auto_compactions", "false"}, + })); + + int total_keys2 = 2000; + for (int i = total_keys; i < total_keys2; i++) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 102))); + ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102))); + ASSERT_OK(Delete(Key(i / 10))); + } + + verify_func(total_keys2, false); + dbfull()->TEST_WaitForCompact(); + verify_func(total_keys2, false); + + // Base level is not level 1 + ASSERT_EQ(NumTableFilesAtLevel(1), 0); + ASSERT_EQ(NumTableFilesAtLevel(2), 0); +} +} // namespace rocksdb + +#endif // !defined(ROCKSDB_LITE) + +int main(int argc, char** argv) { +#if !defined(ROCKSDB_LITE) + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +#else + return 0; +#endif +} diff --git a/external/rocksdb/db/db_filesnapshot.cc b/external/rocksdb/db/db_filesnapshot.cc new file mode 100755 index 0000000000..7ed1f1d363 --- /dev/null +++ b/external/rocksdb/db/db_filesnapshot.cc @@ -0,0 +1,154 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2012 Facebook. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. + +#ifndef ROCKSDB_LITE + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include +#include "db/db_impl.h" +#include "db/filename.h" +#include "db/job_context.h" +#include "db/version_set.h" +#include "port/port.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "util/file_util.h" +#include "util/mutexlock.h" +#include "util/sync_point.h" +#include "util/testharness.h" + +namespace rocksdb { + +Status DBImpl::DisableFileDeletions() { + InstrumentedMutexLock l(&mutex_); + ++disable_delete_obsolete_files_; + if (disable_delete_obsolete_files_ == 1) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "File Deletions Disabled"); + } else { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "File Deletions Disabled, but already disabled. Counter: %d", + disable_delete_obsolete_files_); + } + return Status::OK(); +} + +Status DBImpl::EnableFileDeletions(bool force) { + // Job id == 0 means that this is not our background process, but rather + // user thread + JobContext job_context(0); + bool should_purge_files = false; + { + InstrumentedMutexLock l(&mutex_); + if (force) { + // if force, we need to enable file deletions right away + disable_delete_obsolete_files_ = 0; + } else if (disable_delete_obsolete_files_ > 0) { + --disable_delete_obsolete_files_; + } + if (disable_delete_obsolete_files_ == 0) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "File Deletions Enabled"); + should_purge_files = true; + FindObsoleteFiles(&job_context, true); + } else { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "File Deletions Enable, but not really enabled. Counter: %d", + disable_delete_obsolete_files_); + } + } + if (should_purge_files) { + PurgeObsoleteFiles(job_context); + } + job_context.Clean(); + LogFlush(db_options_.info_log); + return Status::OK(); +} + +int DBImpl::IsFileDeletionsEnabled() const { + return disable_delete_obsolete_files_; +} + +Status DBImpl::GetLiveFiles(std::vector& ret, + uint64_t* manifest_file_size, + bool flush_memtable) { + *manifest_file_size = 0; + + mutex_.Lock(); + + if (flush_memtable) { + // flush all dirty data to disk. + Status status; + for (auto cfd : *versions_->GetColumnFamilySet()) { + if (cfd->IsDropped()) { + continue; + } + cfd->Ref(); + mutex_.Unlock(); + status = FlushMemTable(cfd, FlushOptions()); + TEST_SYNC_POINT("DBImpl::GetLiveFiles:1"); + TEST_SYNC_POINT("DBImpl::GetLiveFiles:2"); + mutex_.Lock(); + cfd->Unref(); + if (!status.ok()) { + break; + } + } + versions_->GetColumnFamilySet()->FreeDeadColumnFamilies(); + + if (!status.ok()) { + mutex_.Unlock(); + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Cannot Flush data %s\n", status.ToString().c_str()); + return status; + } + } + + // Make a set of all of the live *.sst files + std::vector live; + for (auto cfd : *versions_->GetColumnFamilySet()) { + if (cfd->IsDropped()) { + continue; + } + cfd->current()->AddLiveFiles(&live); + } + + ret.clear(); + ret.reserve(live.size() + 3); // *.sst + CURRENT + MANIFEST + OPTIONS + + // create names of the live files. The names are not absolute + // paths, instead they are relative to dbname_; + for (auto live_file : live) { + ret.push_back(MakeTableFileName("", live_file.GetNumber())); + } + + ret.push_back(CurrentFileName("")); + ret.push_back(DescriptorFileName("", versions_->manifest_file_number())); + ret.push_back(OptionsFileName("", versions_->options_file_number())); + + // find length of manifest file while holding the mutex lock + *manifest_file_size = versions_->manifest_file_size(); + + mutex_.Unlock(); + return Status::OK(); +} + +Status DBImpl::GetSortedWalFiles(VectorLogPtr& files) { + return wal_manager_.GetSortedWalFiles(files); +} + +} + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/db_flush_test.cc b/external/rocksdb/db/db_flush_test.cc new file mode 100755 index 0000000000..ab4b1ab4c3 --- /dev/null +++ b/external/rocksdb/db/db_flush_test.cc @@ -0,0 +1,56 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#include "util/sync_point.h" + +namespace rocksdb { + +class DBFlushTest : public DBTestBase { + public: + DBFlushTest() : DBTestBase("/db_flush_test") {} +}; + +// We had issue when two background threads trying to flush at the same time, +// only one of them get committed. The test verifies the issue is fixed. +TEST_F(DBFlushTest, FlushWhileWritingManifest) { + Options options; + options.disable_auto_compactions = true; + options.max_background_flushes = 2; + Reopen(options); + FlushOptions no_wait; + no_wait.wait = false; + + SyncPoint::GetInstance()->LoadDependency( + {{"VersionSet::LogAndApply:WriteManifest", + "DBFlushTest::FlushWhileWritingManifest:1"}, + {"MemTableList::InstallMemtableFlushResults:InProgress", + "VersionSet::LogAndApply:WriteManifestDone"}}); + SyncPoint::GetInstance()->EnableProcessing(); + + ASSERT_OK(Put("foo", "v")); + ASSERT_OK(dbfull()->Flush(no_wait)); + TEST_SYNC_POINT("DBFlushTest::FlushWhileWritingManifest:1"); + ASSERT_OK(Put("bar", "v")); + ASSERT_OK(dbfull()->Flush(no_wait)); + // If the issue is hit we will wait here forever. + dbfull()->TEST_WaitForFlushMemTable(); +#ifndef ROCKSDB_LITE + ASSERT_EQ(2, TotalTableFiles()); +#endif // ROCKSDB_LITE +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_impl.cc b/external/rocksdb/db/db_impl.cc new file mode 100755 index 0000000000..f750ef8390 --- /dev/null +++ b/external/rocksdb/db/db_impl.cc @@ -0,0 +1,6220 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "db/db_impl.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif +#include +#include +#ifdef OS_SOLARIS +#include +#endif +#ifdef ROCKSDB_JEMALLOC +#include "jemalloc/jemalloc.h" +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db/auto_roll_logger.h" +#include "db/builder.h" +#include "db/compaction_job.h" +#include "db/db_info_dumper.h" +#include "db/db_iter.h" +#include "db/dbformat.h" +#include "db/event_helpers.h" +#include "db/filename.h" +#include "db/flush_job.h" +#include "db/forward_iterator.h" +#include "db/job_context.h" +#include "db/log_reader.h" +#include "db/log_writer.h" +#include "db/managed_iterator.h" +#include "db/memtable.h" +#include "db/memtable_list.h" +#include "db/merge_context.h" +#include "db/merge_helper.h" +#include "db/table_cache.h" +#include "db/table_properties_collector.h" +#include "db/transaction_log_impl.h" +#include "db/version_set.h" +#include "db/write_batch_internal.h" +#include "db/write_callback.h" +#include "db/xfunc_test_points.h" +#include "memtable/hash_linklist_rep.h" +#include "memtable/hash_skiplist_rep.h" +#include "port/likely.h" +#include "port/port.h" +#include "rocksdb/cache.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/statistics.h" +#include "rocksdb/status.h" +#include "rocksdb/table.h" +#include "rocksdb/version.h" +#include "rocksdb/wal_filter.h" +#include "rocksdb/write_buffer_manager.h" +#include "table/block.h" +#include "table/block_based_table_factory.h" +#include "table/merger.h" +#include "table/table_builder.h" +#include "table/two_level_iterator.h" +#include "util/autovector.h" +#include "util/build_version.h" +#include "util/coding.h" +#include "util/compression.h" +#include "util/crc32c.h" +#include "util/file_reader_writer.h" +#include "util/file_util.h" +#include "util/iostats_context_imp.h" +#include "util/log_buffer.h" +#include "util/logging.h" +#include "util/mutexlock.h" +#include "util/options_helper.h" +#include "util/options_parser.h" +#include "util/perf_context_imp.h" +#include "util/sst_file_manager_impl.h" +#include "util/stop_watch.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "util/thread_status_updater.h" +#include "util/thread_status_util.h" +#include "util/xfunc.h" + +namespace rocksdb { + +const std::string kDefaultColumnFamilyName("default"); + +void DumpRocksDBBuildVersion(Logger * log); + +struct DBImpl::WriteContext { + autovector superversions_to_free_; + autovector memtables_to_free_; + + ~WriteContext() { + for (auto& sv : superversions_to_free_) { + delete sv; + } + for (auto& m : memtables_to_free_) { + delete m; + } + } +}; + +Options SanitizeOptions(const std::string& dbname, + const InternalKeyComparator* icmp, + const Options& src) { + auto db_options = SanitizeOptions(dbname, DBOptions(src)); + auto cf_options = SanitizeOptions(db_options, icmp, ColumnFamilyOptions(src)); + return Options(db_options, cf_options); +} + +DBOptions SanitizeOptions(const std::string& dbname, const DBOptions& src) { + DBOptions result = src; + + // result.max_open_files means an "infinite" open files. + if (result.max_open_files != -1) { + int max_max_open_files = port::GetMaxOpenFiles(); + if (max_max_open_files == -1) { + max_max_open_files = 1000000; + } + ClipToRange(&result.max_open_files, 20, max_max_open_files); + } + + if (result.info_log == nullptr) { + Status s = CreateLoggerFromOptions(dbname, result, &result.info_log); + if (!s.ok()) { + // No place suitable for logging + result.info_log = nullptr; + } + } + if (!result.write_buffer_manager) { + result.write_buffer_manager.reset( + new WriteBufferManager(result.db_write_buffer_size)); + } + if (result.base_background_compactions == -1) { + result.base_background_compactions = result.max_background_compactions; + } + if (result.base_background_compactions > result.max_background_compactions) { + result.base_background_compactions = result.max_background_compactions; + } + result.env->IncBackgroundThreadsIfNeeded(src.max_background_compactions, + Env::Priority::LOW); + result.env->IncBackgroundThreadsIfNeeded(src.max_background_flushes, + Env::Priority::HIGH); + + if (result.rate_limiter.get() != nullptr) { + if (result.bytes_per_sync == 0) { + result.bytes_per_sync = 1024 * 1024; + } + } + + if (result.WAL_ttl_seconds > 0 || result.WAL_size_limit_MB > 0) { + result.recycle_log_file_num = false; + } + + if (result.recycle_log_file_num && + (result.wal_recovery_mode == WALRecoveryMode::kPointInTimeRecovery || + result.wal_recovery_mode == WALRecoveryMode::kAbsoluteConsistency)) { + // kPointInTimeRecovery is indistinguishable from + // kTolerateCorruptedTailRecords in recycle mode since we define + // the "end" of the log as the first corrupt record we encounter. + // kAbsoluteConsistency doesn't make sense because even a clean + // shutdown leaves old junk at the end of the log file. + result.wal_recovery_mode = WALRecoveryMode::kTolerateCorruptedTailRecords; + } + + if (result.wal_dir.empty()) { + // Use dbname as default + result.wal_dir = dbname; + } + if (result.wal_dir.back() == '/') { + result.wal_dir = result.wal_dir.substr(0, result.wal_dir.size() - 1); + } + + if (result.db_paths.size() == 0) { + result.db_paths.emplace_back(dbname, std::numeric_limits::max()); + } + + if (result.compaction_readahead_size > 0) { + result.new_table_reader_for_compaction_inputs = true; + } + + // Force flush on DB open if 2PC is enabled, since with 2PC we have no + // guarantee that consecutive log files have consecutive sequence id, which + // make recovery complicated. + if (result.allow_2pc) { + result.avoid_flush_during_recovery = false; + } + + return result; +} + +namespace { + +Status SanitizeOptionsByTable( + const DBOptions& db_opts, + const std::vector& column_families) { + Status s; + for (auto cf : column_families) { + s = cf.options.table_factory->SanitizeOptions(db_opts, cf.options); + if (!s.ok()) { + return s; + } + } + return Status::OK(); +} + +static Status ValidateOptions( + const DBOptions& db_options, + const std::vector& column_families) { + Status s; + + for (auto& cfd : column_families) { + s = CheckCompressionSupported(cfd.options); + if (s.ok() && db_options.allow_concurrent_memtable_write) { + s = CheckConcurrentWritesSupported(cfd.options); + } + if (!s.ok()) { + return s; + } + if (db_options.db_paths.size() > 1) { + if ((cfd.options.compaction_style != kCompactionStyleUniversal) && + (cfd.options.compaction_style != kCompactionStyleLevel)) { + return Status::NotSupported( + "More than one DB paths are only supported in " + "universal and level compaction styles. "); + } + } + } + + if (db_options.db_paths.size() > 4) { + return Status::NotSupported( + "More than four DB paths are not supported yet. "); + } + + if (db_options.allow_mmap_reads && !db_options.allow_os_buffer) { + // Protect against assert in PosixMMapReadableFile constructor + return Status::NotSupported( + "If memory mapped reads (allow_mmap_reads) are enabled " + "then os caching (allow_os_buffer) must also be enabled. "); + } + + return Status::OK(); +} + +CompressionType GetCompressionFlush( + const ImmutableCFOptions& ioptions, + const MutableCFOptions& mutable_cf_options) { + // Compressing memtable flushes might not help unless the sequential load + // optimization is used for leveled compaction. Otherwise the CPU and + // latency overhead is not offset by saving much space. + + bool can_compress; + + if (ioptions.compaction_style == kCompactionStyleUniversal) { + can_compress = + (ioptions.compaction_options_universal.compression_size_percent < 0); + } else { + // For leveled compress when min_level_to_compress == 0. + can_compress = ioptions.compression_per_level.empty() || + ioptions.compression_per_level[0] != kNoCompression; + } + + if (can_compress) { + return mutable_cf_options.compression; + } else { + return kNoCompression; + } +} + +void DumpSupportInfo(Logger* logger) { + Log(InfoLogLevel::INFO_LEVEL, logger, "Compression algorithms supported:"); + Log(InfoLogLevel::INFO_LEVEL, logger, "\tSnappy supported: %d", + Snappy_Supported()); + Log(InfoLogLevel::INFO_LEVEL, logger, "\tZlib supported: %d", + Zlib_Supported()); + Log(InfoLogLevel::INFO_LEVEL, logger, "\tBzip supported: %d", + BZip2_Supported()); + Log(InfoLogLevel::INFO_LEVEL, logger, "\tLZ4 supported: %d", LZ4_Supported()); + Log(InfoLogLevel::INFO_LEVEL, logger, "Fast CRC32 supported: %d", + crc32c::IsFastCrc32Supported()); +} + +} // namespace + +DBImpl::DBImpl(const DBOptions& options, const std::string& dbname) + : env_(options.env), + dbname_(dbname), + db_options_(SanitizeOptions(dbname, options)), + stats_(db_options_.statistics.get()), + db_lock_(nullptr), + mutex_(stats_, env_, DB_MUTEX_WAIT_MICROS, options.use_adaptive_mutex), + shutting_down_(false), + bg_cv_(&mutex_), + logfile_number_(0), + log_dir_synced_(false), + log_empty_(true), + default_cf_handle_(nullptr), + log_sync_cv_(&mutex_), + total_log_size_(0), + max_total_in_memory_state_(0), + is_snapshot_supported_(true), + write_buffer_manager_(db_options_.write_buffer_manager.get()), + write_thread_(options.enable_write_thread_adaptive_yield + ? options.write_thread_max_yield_usec + : 0, + options.write_thread_slow_yield_usec), + write_controller_(options.delayed_write_rate), + last_batch_group_size_(0), + unscheduled_flushes_(0), + unscheduled_compactions_(0), + bg_compaction_scheduled_(0), + num_running_compactions_(0), + bg_flush_scheduled_(0), + num_running_flushes_(0), + bg_purge_scheduled_(0), + disable_delete_obsolete_files_(0), + delete_obsolete_files_next_run_( + options.env->NowMicros() + + db_options_.delete_obsolete_files_period_micros), + last_stats_dump_time_microsec_(0), + next_job_id_(1), + has_unpersisted_data_(false), + env_options_(db_options_), +#ifndef ROCKSDB_LITE + wal_manager_(db_options_, env_options_), +#endif // ROCKSDB_LITE + event_logger_(db_options_.info_log.get()), + bg_work_paused_(0), + bg_compaction_paused_(0), + refitting_level_(false), + opened_successfully_(false) { + env_->GetAbsolutePath(dbname, &db_absolute_path_); + + // Reserve ten files or so for other uses and give the rest to TableCache. + // Give a large number for setting of "infinite" open files. + const int table_cache_size = (db_options_.max_open_files == -1) ? + 4194304 : db_options_.max_open_files - 10; + table_cache_ = + NewLRUCache(table_cache_size, db_options_.table_cache_numshardbits); + + versions_.reset(new VersionSet(dbname_, &db_options_, env_options_, + table_cache_.get(), write_buffer_manager_, + &write_controller_)); + column_family_memtables_.reset( + new ColumnFamilyMemTablesImpl(versions_->GetColumnFamilySet())); + + DumpRocksDBBuildVersion(db_options_.info_log.get()); + DumpDBFileSummary(db_options_, dbname_); + db_options_.Dump(db_options_.info_log.get()); + DumpSupportInfo(db_options_.info_log.get()); +} + +// Will lock the mutex_, will wait for completion if wait is true +void DBImpl::CancelAllBackgroundWork(bool wait) { + InstrumentedMutexLock l(&mutex_); + shutting_down_.store(true, std::memory_order_release); + bg_cv_.SignalAll(); + if (!wait) { + return; + } + // Wait for background work to finish + while (bg_compaction_scheduled_ || bg_flush_scheduled_) { + bg_cv_.Wait(); + } +} + +DBImpl::~DBImpl() { + mutex_.Lock(); + + if (!shutting_down_.load(std::memory_order_acquire) && + has_unpersisted_data_) { + for (auto cfd : *versions_->GetColumnFamilySet()) { + if (!cfd->IsDropped() && !cfd->mem()->IsEmpty()) { + cfd->Ref(); + mutex_.Unlock(); + FlushMemTable(cfd, FlushOptions()); + mutex_.Lock(); + cfd->Unref(); + } + } + versions_->GetColumnFamilySet()->FreeDeadColumnFamilies(); + } + mutex_.Unlock(); + // CancelAllBackgroundWork called with false means we just set the shutdown + // marker. After this we do a variant of the waiting and unschedule work + // (to consider: moving all the waiting into CancelAllBackgroundWork(true)) + CancelAllBackgroundWork(false); + int compactions_unscheduled = env_->UnSchedule(this, Env::Priority::LOW); + int flushes_unscheduled = env_->UnSchedule(this, Env::Priority::HIGH); + mutex_.Lock(); + bg_compaction_scheduled_ -= compactions_unscheduled; + bg_flush_scheduled_ -= flushes_unscheduled; + + // Wait for background work to finish + while (bg_compaction_scheduled_ || bg_flush_scheduled_ || + bg_purge_scheduled_) { + TEST_SYNC_POINT("DBImpl::~DBImpl:WaitJob"); + bg_cv_.Wait(); + } + EraseThreadStatusDbInfo(); + flush_scheduler_.Clear(); + + while (!flush_queue_.empty()) { + auto cfd = PopFirstFromFlushQueue(); + if (cfd->Unref()) { + delete cfd; + } + } + while (!compaction_queue_.empty()) { + auto cfd = PopFirstFromCompactionQueue(); + if (cfd->Unref()) { + delete cfd; + } + } + + if (default_cf_handle_ != nullptr) { + // we need to delete handle outside of lock because it does its own locking + mutex_.Unlock(); + delete default_cf_handle_; + mutex_.Lock(); + } + + // Clean up obsolete files due to SuperVersion release. + // (1) Need to delete to obsolete files before closing because RepairDB() + // scans all existing files in the file system and builds manifest file. + // Keeping obsolete files confuses the repair process. + // (2) Need to check if we Open()/Recover() the DB successfully before + // deleting because if VersionSet recover fails (may be due to corrupted + // manifest file), it is not able to identify live files correctly. As a + // result, all "live" files can get deleted by accident. However, corrupted + // manifest is recoverable by RepairDB(). + if (opened_successfully_) { + JobContext job_context(next_job_id_.fetch_add(1)); + FindObsoleteFiles(&job_context, true); + + mutex_.Unlock(); + // manifest number starting from 2 + job_context.manifest_file_number = 1; + if (job_context.HaveSomethingToDelete()) { + PurgeObsoleteFiles(job_context); + } + job_context.Clean(); + mutex_.Lock(); + } + + for (auto l : logs_to_free_) { + delete l; + } + for (auto& log : logs_) { + log.ClearWriter(); + } + logs_.clear(); + + // Table cache may have table handles holding blocks from the block cache. + // We need to release them before the block cache is destroyed. The block + // cache may be destroyed inside versions_.reset(), when column family data + // list is destroyed, so leaving handles in table cache after + // versions_.reset() may cause issues. + // Here we clean all unreferenced handles in table cache. + // Now we assume all user queries have finished, so only version set itself + // can possibly hold the blocks from block cache. After releasing unreferenced + // handles here, only handles held by version set left and inside + // versions_.reset(), we will release them. There, we need to make sure every + // time a handle is released, we erase it from the cache too. By doing that, + // we can guarantee that after versions_.reset(), table cache is empty + // so the cache can be safely destroyed. + table_cache_->EraseUnRefEntries(); + + for (auto& txn_entry : recovered_transactions_) { + delete txn_entry.second; + } + + // versions need to be destroyed before table_cache since it can hold + // references to table_cache. + versions_.reset(); + mutex_.Unlock(); + if (db_lock_ != nullptr) { + env_->UnlockFile(db_lock_); + } + + LogFlush(db_options_.info_log); +} + +Status DBImpl::NewDB() { + VersionEdit new_db; + new_db.SetLogNumber(0); + new_db.SetNextFile(2); + new_db.SetLastSequence(0); + + Status s; + + Log(InfoLogLevel::INFO_LEVEL, + db_options_.info_log, "Creating manifest 1 \n"); + const std::string manifest = DescriptorFileName(dbname_, 1); + { + unique_ptr file; + EnvOptions env_options = env_->OptimizeForManifestWrite(env_options_); + s = NewWritableFile(env_, manifest, &file, env_options); + if (!s.ok()) { + return s; + } + file->SetPreallocationBlockSize(db_options_.manifest_preallocation_size); + unique_ptr file_writer( + new WritableFileWriter(std::move(file), env_options)); + log::Writer log(std::move(file_writer), 0, false); + std::string record; + new_db.EncodeTo(&record); + s = log.AddRecord(record); + if (s.ok()) { + s = SyncManifest(env_, &db_options_, log.file()); + } + } + if (s.ok()) { + // Make "CURRENT" file that points to the new manifest file. + s = SetCurrentFile(env_, dbname_, 1, directories_.GetDbDir()); + } else { + env_->DeleteFile(manifest); + } + return s; +} + +void DBImpl::MaybeIgnoreError(Status* s) const { + if (s->ok() || db_options_.paranoid_checks) { + // No change needed + } else { + Log(InfoLogLevel::WARN_LEVEL, + db_options_.info_log, "Ignoring error %s", s->ToString().c_str()); + *s = Status::OK(); + } +} + +const Status DBImpl::CreateArchivalDirectory() { + if (db_options_.WAL_ttl_seconds > 0 || db_options_.WAL_size_limit_MB > 0) { + std::string archivalPath = ArchivalDirectory(db_options_.wal_dir); + return env_->CreateDirIfMissing(archivalPath); + } + return Status::OK(); +} + +void DBImpl::PrintStatistics() { + auto dbstats = db_options_.statistics.get(); + if (dbstats) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "STATISTICS:\n %s", + dbstats->ToString().c_str()); + } +} + +#ifndef ROCKSDB_LITE +#ifdef ROCKSDB_JEMALLOC +typedef struct { + char* cur; + char* end; +} MallocStatus; + +static void GetJemallocStatus(void* mstat_arg, const char* status) { + MallocStatus* mstat = reinterpret_cast(mstat_arg); + size_t status_len = status ? strlen(status) : 0; + size_t buf_size = (size_t)(mstat->end - mstat->cur); + if (!status_len || status_len > buf_size) { + return; + } + + snprintf(mstat->cur, buf_size, "%s", status); + mstat->cur += status_len; +} +#endif // ROCKSDB_JEMALLOC + +static void DumpMallocStats(std::string* stats) { +#ifdef ROCKSDB_JEMALLOC + MallocStatus mstat; + const uint kMallocStatusLen = 1000000; + std::unique_ptr buf{new char[kMallocStatusLen + 1]}; + mstat.cur = buf.get(); + mstat.end = buf.get() + kMallocStatusLen; + malloc_stats_print(GetJemallocStatus, &mstat, ""); + stats->append(buf.get()); +#endif // ROCKSDB_JEMALLOC +} +#endif // !ROCKSDB_LITE + +void DBImpl::MaybeDumpStats() { + if (db_options_.stats_dump_period_sec == 0) return; + + const uint64_t now_micros = env_->NowMicros(); + + if (last_stats_dump_time_microsec_ + + db_options_.stats_dump_period_sec * 1000000 + <= now_micros) { + // Multiple threads could race in here simultaneously. + // However, the last one will update last_stats_dump_time_microsec_ + // atomically. We could see more than one dump during one dump + // period in rare cases. + last_stats_dump_time_microsec_ = now_micros; + +#ifndef ROCKSDB_LITE + const DBPropertyInfo* cf_property_info = + GetPropertyInfo(DB::Properties::kCFStats); + assert(cf_property_info != nullptr); + const DBPropertyInfo* db_property_info = + GetPropertyInfo(DB::Properties::kDBStats); + assert(db_property_info != nullptr); + + std::string stats; + { + InstrumentedMutexLock l(&mutex_); + for (auto cfd : *versions_->GetColumnFamilySet()) { + cfd->internal_stats()->GetStringProperty( + *cf_property_info, DB::Properties::kCFStats, &stats); + } + default_cf_internal_stats_->GetStringProperty( + *db_property_info, DB::Properties::kDBStats, &stats); + } + if (db_options_.dump_malloc_stats) { + DumpMallocStats(&stats); + } + Log(InfoLogLevel::WARN_LEVEL, + db_options_.info_log, "------- DUMPING STATS -------"); + Log(InfoLogLevel::WARN_LEVEL, + db_options_.info_log, "%s", stats.c_str()); +#endif // !ROCKSDB_LITE + + PrintStatistics(); + } +} + +uint64_t DBImpl::FindMinPrepLogReferencedByMemTable() { + uint64_t min_log = 0; + + // we must look through the memtables for two phase transactions + // that have been committed but not yet flushed + for (auto loop_cfd : *versions_->GetColumnFamilySet()) { + if (loop_cfd->IsDropped()) { + continue; + } + + auto log = loop_cfd->imm()->GetMinLogContainingPrepSection(); + + if (log > 0 && (min_log == 0 || log < min_log)) { + min_log = log; + } + + log = loop_cfd->mem()->GetMinLogContainingPrepSection(); + + if (log > 0 && (min_log == 0 || log < min_log)) { + min_log = log; + } + } + + return min_log; +} + +void DBImpl::MarkLogAsHavingPrepSectionFlushed(uint64_t log) { + assert(log != 0); + std::lock_guard lock(prep_heap_mutex_); + auto it = prepared_section_completed_.find(log); + assert(it != prepared_section_completed_.end()); + it->second += 1; +} + +void DBImpl::MarkLogAsContainingPrepSection(uint64_t log) { + assert(log != 0); + std::lock_guard lock(prep_heap_mutex_); + min_log_with_prep_.push(log); + auto it = prepared_section_completed_.find(log); + if (it == prepared_section_completed_.end()) { + prepared_section_completed_[log] = 0; + } +} + +uint64_t DBImpl::FindMinLogContainingOutstandingPrep() { + std::lock_guard lock(prep_heap_mutex_); + uint64_t min_log = 0; + + // first we look in the prepared heap where we keep + // track of transactions that have been prepared (written to WAL) + // but not yet committed. + while (!min_log_with_prep_.empty()) { + min_log = min_log_with_prep_.top(); + + auto it = prepared_section_completed_.find(min_log); + + // value was marked as 'deleted' from heap + if (it != prepared_section_completed_.end() && it->second > 0) { + it->second -= 1; + min_log_with_prep_.pop(); + + // back to squere one... + min_log = 0; + continue; + } else { + // found a valid value + break; + } + } + + return min_log; +} + +void DBImpl::ScheduleBgLogWriterClose(JobContext* job_context) { + if (!job_context->logs_to_free.empty()) { + for (auto l : job_context->logs_to_free) { + AddToLogsToFreeQueue(l); + } + job_context->logs_to_free.clear(); + SchedulePurge(); + } +} + +// * Returns the list of live files in 'sst_live' +// If it's doing full scan: +// * Returns the list of all files in the filesystem in +// 'full_scan_candidate_files'. +// Otherwise, gets obsolete files from VersionSet. +// no_full_scan = true -- never do the full scan using GetChildren() +// force = false -- don't force the full scan, except every +// db_options_.delete_obsolete_files_period_micros +// force = true -- force the full scan +void DBImpl::FindObsoleteFiles(JobContext* job_context, bool force, + bool no_full_scan) { + mutex_.AssertHeld(); + + // if deletion is disabled, do nothing + if (disable_delete_obsolete_files_ > 0) { + return; + } + + bool doing_the_full_scan = false; + + // logic for figurint out if we're doing the full scan + if (no_full_scan) { + doing_the_full_scan = false; + } else if (force || db_options_.delete_obsolete_files_period_micros == 0) { + doing_the_full_scan = true; + } else { + const uint64_t now_micros = env_->NowMicros(); + if (delete_obsolete_files_next_run_ < now_micros) { + doing_the_full_scan = true; + delete_obsolete_files_next_run_ = + now_micros + db_options_.delete_obsolete_files_period_micros; + } + } + + // don't delete files that might be currently written to from compaction + // threads + // Since job_context->min_pending_output is set, until file scan finishes, + // mutex_ cannot be released. Otherwise, we might see no min_pending_output + // here but later find newer generated unfinalized files while scannint. + if (!pending_outputs_.empty()) { + job_context->min_pending_output = *pending_outputs_.begin(); + } else { + // delete all of them + job_context->min_pending_output = std::numeric_limits::max(); + } + + // Get obsolete files. This function will also update the list of + // pending files in VersionSet(). + versions_->GetObsoleteFiles(&job_context->sst_delete_files, + &job_context->manifest_delete_files, + job_context->min_pending_output); + + // store the current filenum, lognum, etc + job_context->manifest_file_number = versions_->manifest_file_number(); + job_context->pending_manifest_file_number = + versions_->pending_manifest_file_number(); + job_context->log_number = versions_->MinLogNumber(); + + if (allow_2pc()) { + // if are 2pc we must consider logs containing prepared + // sections of outstanding transactions. + // + // We must check min logs with outstanding prep before we check + // logs referneces by memtables because a log referenced by the + // first data structure could transition to the second under us. + // + // TODO(horuff): iterating over all column families under db mutex. + // should find more optimial solution + auto min_log_in_prep_heap = FindMinLogContainingOutstandingPrep(); + + if (min_log_in_prep_heap != 0 && + min_log_in_prep_heap < job_context->log_number) { + job_context->log_number = min_log_in_prep_heap; + } + + auto min_log_refed_by_mem = FindMinPrepLogReferencedByMemTable(); + + if (min_log_refed_by_mem != 0 && + min_log_refed_by_mem < job_context->log_number) { + job_context->log_number = min_log_refed_by_mem; + } + } + + job_context->prev_log_number = versions_->prev_log_number(); + + versions_->AddLiveFiles(&job_context->sst_live); + if (doing_the_full_scan) { + for (size_t path_id = 0; path_id < db_options_.db_paths.size(); path_id++) { + // set of all files in the directory. We'll exclude files that are still + // alive in the subsequent processings. + std::vector files; + env_->GetChildren(db_options_.db_paths[path_id].path, + &files); // Ignore errors + for (std::string file : files) { + // TODO(icanadi) clean up this mess to avoid having one-off "/" prefixes + job_context->full_scan_candidate_files.emplace_back( + "/" + file, static_cast(path_id)); + } + } + + //Add log files in wal_dir + if (db_options_.wal_dir != dbname_) { + std::vector log_files; + env_->GetChildren(db_options_.wal_dir, &log_files); // Ignore errors + for (std::string log_file : log_files) { + job_context->full_scan_candidate_files.emplace_back(log_file, 0); + } + } + // Add info log files in db_log_dir + if (!db_options_.db_log_dir.empty() && db_options_.db_log_dir != dbname_) { + std::vector info_log_files; + // Ignore errors + env_->GetChildren(db_options_.db_log_dir, &info_log_files); + for (std::string log_file : info_log_files) { + job_context->full_scan_candidate_files.emplace_back(log_file, 0); + } + } + } + + if (!alive_log_files_.empty()) { + uint64_t min_log_number = job_context->log_number; + size_t num_alive_log_files = alive_log_files_.size(); + // find newly obsoleted log files + while (alive_log_files_.begin()->number < min_log_number) { + auto& earliest = *alive_log_files_.begin(); + if (db_options_.recycle_log_file_num > log_recycle_files.size()) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "adding log %" PRIu64 " to recycle list\n", earliest.number); + log_recycle_files.push_back(earliest.number); + } else { + job_context->log_delete_files.push_back(earliest.number); + } + if (job_context->size_log_to_delete == 0) { + job_context->prev_total_log_size = total_log_size_; + job_context->num_alive_log_files = num_alive_log_files; + } + job_context->size_log_to_delete += earliest.size; + total_log_size_ -= earliest.size; + alive_log_files_.pop_front(); + // Current log should always stay alive since it can't have + // number < MinLogNumber(). + assert(alive_log_files_.size()); + } + while (!logs_.empty() && logs_.front().number < min_log_number) { + auto& log = logs_.front(); + if (log.getting_synced) { + log_sync_cv_.Wait(); + // logs_ could have changed while we were waiting. + continue; + } + logs_to_free_.push_back(log.ReleaseWriter()); + logs_.pop_front(); + } + // Current log cannot be obsolete. + assert(!logs_.empty()); + } + + // We're just cleaning up for DB::Write(). + assert(job_context->logs_to_free.empty()); + job_context->logs_to_free = logs_to_free_; + logs_to_free_.clear(); +} + +namespace { +bool CompareCandidateFile(const JobContext::CandidateFileInfo& first, + const JobContext::CandidateFileInfo& second) { + if (first.file_name > second.file_name) { + return true; + } else if (first.file_name < second.file_name) { + return false; + } else { + return (first.path_id > second.path_id); + } +} +}; // namespace + +// Delete obsolete files and log status and information of file deletion +void DBImpl::DeleteObsoleteFileImpl(Status file_deletion_status, int job_id, + const std::string& fname, FileType type, + uint64_t number, uint32_t path_id) { + if (type == kTableFile) { + file_deletion_status = DeleteSSTFile(&db_options_, fname, path_id); + } else { + file_deletion_status = env_->DeleteFile(fname); + } + if (file_deletion_status.ok()) { + Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log, + "[JOB %d] Delete %s type=%d #%" PRIu64 " -- %s\n", job_id, + fname.c_str(), type, number, file_deletion_status.ToString().c_str()); + } else if (env_->FileExists(fname).IsNotFound()) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[JOB %d] Tried to delete a non-existing file %s type=%d #%" PRIu64 + " -- %s\n", + job_id, fname.c_str(), type, number, + file_deletion_status.ToString().c_str()); + } else { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "[JOB %d] Failed to delete %s type=%d #%" PRIu64 " -- %s\n", job_id, + fname.c_str(), type, number, file_deletion_status.ToString().c_str()); + } + if (type == kTableFile) { + EventHelpers::LogAndNotifyTableFileDeletion( + &event_logger_, job_id, number, fname, file_deletion_status, GetName(), + db_options_.listeners); + } +} + +// Diffs the files listed in filenames and those that do not +// belong to live files are posibly removed. Also, removes all the +// files in sst_delete_files and log_delete_files. +// It is not necessary to hold the mutex when invoking this method. +void DBImpl::PurgeObsoleteFiles(const JobContext& state, bool schedule_only) { + // we'd better have sth to delete + assert(state.HaveSomethingToDelete()); + + // this checks if FindObsoleteFiles() was run before. If not, don't do + // PurgeObsoleteFiles(). If FindObsoleteFiles() was run, we need to also + // run PurgeObsoleteFiles(), even if disable_delete_obsolete_files_ is true + if (state.manifest_file_number == 0) { + return; + } + + // Now, convert live list to an unordered map, WITHOUT mutex held; + // set is slow. + std::unordered_map sst_live_map; + for (const FileDescriptor& fd : state.sst_live) { + sst_live_map[fd.GetNumber()] = &fd; + } + + auto candidate_files = state.full_scan_candidate_files; + candidate_files.reserve( + candidate_files.size() + state.sst_delete_files.size() + + state.log_delete_files.size() + state.manifest_delete_files.size()); + // We may ignore the dbname when generating the file names. + const char* kDumbDbName = ""; + for (auto file : state.sst_delete_files) { + candidate_files.emplace_back( + MakeTableFileName(kDumbDbName, file->fd.GetNumber()), + file->fd.GetPathId()); + delete file; + } + + for (auto file_num : state.log_delete_files) { + if (file_num > 0) { + candidate_files.emplace_back(LogFileName(kDumbDbName, file_num).substr(1), + 0); + } + } + for (const auto& filename : state.manifest_delete_files) { + candidate_files.emplace_back(filename, 0); + } + + // dedup state.candidate_files so we don't try to delete the same + // file twice + std::sort(candidate_files.begin(), candidate_files.end(), + CompareCandidateFile); + candidate_files.erase( + std::unique(candidate_files.begin(), candidate_files.end()), + candidate_files.end()); + + if (state.prev_total_log_size > 0) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[JOB %d] Try to delete WAL files size %" PRIu64 + ", prev total WAL file size %" PRIu64 + ", number of live WAL files %" ROCKSDB_PRIszt ".\n", + state.job_id, state.size_log_to_delete, state.prev_total_log_size, + state.num_alive_log_files); + } + + std::vector old_info_log_files; + InfoLogPrefix info_log_prefix(!db_options_.db_log_dir.empty(), dbname_); + for (const auto& candidate_file : candidate_files) { + std::string to_delete = candidate_file.file_name; + uint32_t path_id = candidate_file.path_id; + uint64_t number; + FileType type; + // Ignore file if we cannot recognize it. + if (!ParseFileName(to_delete, &number, info_log_prefix.prefix, &type)) { + continue; + } + + bool keep = true; + switch (type) { + case kLogFile: + keep = ((number >= state.log_number) || + (number == state.prev_log_number)); + break; + case kDescriptorFile: + // Keep my manifest file, and any newer incarnations' + // (can happen during manifest roll) + keep = (number >= state.manifest_file_number); + break; + case kTableFile: + // If the second condition is not there, this makes + // DontDeletePendingOutputs fail + keep = (sst_live_map.find(number) != sst_live_map.end()) || + number >= state.min_pending_output; + break; + case kTempFile: + // Any temp files that are currently being written to must + // be recorded in pending_outputs_, which is inserted into "live". + // Also, SetCurrentFile creates a temp file when writing out new + // manifest, which is equal to state.pending_manifest_file_number. We + // should not delete that file + // + // TODO(yhchiang): carefully modify the third condition to safely + // remove the temp options files. + keep = (sst_live_map.find(number) != sst_live_map.end()) || + (number == state.pending_manifest_file_number) || + (to_delete.find(kOptionsFileNamePrefix) != std::string::npos); + break; + case kInfoLogFile: + keep = true; + if (number != 0) { + old_info_log_files.push_back(to_delete); + } + break; + case kCurrentFile: + case kDBLockFile: + case kIdentityFile: + case kMetaDatabase: + case kOptionsFile: + keep = true; + break; + } + + if (keep) { + continue; + } + + std::string fname; + if (type == kTableFile) { + // evict from cache + TableCache::Evict(table_cache_.get(), number); + fname = TableFileName(db_options_.db_paths, number, path_id); + } else { + fname = ((type == kLogFile) ? + db_options_.wal_dir : dbname_) + "/" + to_delete; + } + +#ifndef ROCKSDB_LITE + if (type == kLogFile && (db_options_.WAL_ttl_seconds > 0 || + db_options_.WAL_size_limit_MB > 0)) { + wal_manager_.ArchiveWALFile(fname, number); + continue; + } +#endif // !ROCKSDB_LITE + + Status file_deletion_status; + if (schedule_only) { + InstrumentedMutexLock guard_lock(&mutex_); + SchedulePendingPurge(fname, type, number, path_id, state.job_id); + } else { + DeleteObsoleteFileImpl(file_deletion_status, state.job_id, fname, type, + number, path_id); + } + } + + // Delete old info log files. + size_t old_info_log_file_count = old_info_log_files.size(); + if (old_info_log_file_count != 0 && + old_info_log_file_count >= db_options_.keep_log_file_num) { + std::sort(old_info_log_files.begin(), old_info_log_files.end()); + size_t end = old_info_log_file_count - db_options_.keep_log_file_num; + for (unsigned int i = 0; i <= end; i++) { + std::string& to_delete = old_info_log_files.at(i); + std::string full_path_to_delete = (db_options_.db_log_dir.empty() ? + dbname_ : db_options_.db_log_dir) + "/" + to_delete; + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[JOB %d] Delete info log file %s\n", state.job_id, + full_path_to_delete.c_str()); + Status s = env_->DeleteFile(full_path_to_delete); + if (!s.ok()) { + if (env_->FileExists(full_path_to_delete).IsNotFound()) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[JOB %d] Tried to delete non-existing info log file %s FAILED " + "-- %s\n", + state.job_id, to_delete.c_str(), s.ToString().c_str()); + } else { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "[JOB %d] Delete info log file %s FAILED -- %s\n", state.job_id, + to_delete.c_str(), s.ToString().c_str()); + } + } + } + } +#ifndef ROCKSDB_LITE + wal_manager_.PurgeObsoleteWALFiles(); +#endif // ROCKSDB_LITE + LogFlush(db_options_.info_log); +} + +void DBImpl::DeleteObsoleteFiles() { + mutex_.AssertHeld(); + JobContext job_context(next_job_id_.fetch_add(1)); + FindObsoleteFiles(&job_context, true); + + mutex_.Unlock(); + if (job_context.HaveSomethingToDelete()) { + PurgeObsoleteFiles(job_context); + } + job_context.Clean(); + mutex_.Lock(); +} + +Status DBImpl::Directories::CreateAndNewDirectory( + Env* env, const std::string& dirname, + std::unique_ptr* directory) const { + // We call CreateDirIfMissing() as the directory may already exist (if we + // are reopening a DB), when this happens we don't want creating the + // directory to cause an error. However, we need to check if creating the + // directory fails or else we may get an obscure message about the lock + // file not existing. One real-world example of this occurring is if + // env->CreateDirIfMissing() doesn't create intermediate directories, e.g. + // when dbname_ is "dir/db" but when "dir" doesn't exist. + Status s = env->CreateDirIfMissing(dirname); + if (!s.ok()) { + return s; + } + return env->NewDirectory(dirname, directory); +} + +Status DBImpl::Directories::SetDirectories( + Env* env, const std::string& dbname, const std::string& wal_dir, + const std::vector& data_paths) { + Status s = CreateAndNewDirectory(env, dbname, &db_dir_); + if (!s.ok()) { + return s; + } + if (!wal_dir.empty() && dbname != wal_dir) { + s = CreateAndNewDirectory(env, wal_dir, &wal_dir_); + if (!s.ok()) { + return s; + } + } + + data_dirs_.clear(); + for (auto& p : data_paths) { + const std::string db_path = p.path; + if (db_path == dbname) { + data_dirs_.emplace_back(nullptr); + } else { + std::unique_ptr path_directory; + s = CreateAndNewDirectory(env, db_path, &path_directory); + if (!s.ok()) { + return s; + } + data_dirs_.emplace_back(path_directory.release()); + } + } + assert(data_dirs_.size() == data_paths.size()); + return Status::OK(); +} + +Directory* DBImpl::Directories::GetDataDir(size_t path_id) { + assert(path_id < data_dirs_.size()); + Directory* ret_dir = data_dirs_[path_id].get(); + if (ret_dir == nullptr) { + // Should use db_dir_ + return db_dir_.get(); + } + return ret_dir; +} + +Status DBImpl::Recover( + const std::vector& column_families, bool read_only, + bool error_if_log_file_exist, bool error_if_data_exists_in_logs) { + mutex_.AssertHeld(); + + bool is_new_db = false; + assert(db_lock_ == nullptr); + if (!read_only) { + Status s = directories_.SetDirectories(env_, dbname_, db_options_.wal_dir, + db_options_.db_paths); + if (!s.ok()) { + return s; + } + + s = env_->LockFile(LockFileName(dbname_), &db_lock_); + if (!s.ok()) { + return s; + } + + s = env_->FileExists(CurrentFileName(dbname_)); + if (s.IsNotFound()) { + if (db_options_.create_if_missing) { + s = NewDB(); + is_new_db = true; + if (!s.ok()) { + return s; + } + } else { + return Status::InvalidArgument( + dbname_, "does not exist (create_if_missing is false)"); + } + } else if (s.ok()) { + if (db_options_.error_if_exists) { + return Status::InvalidArgument( + dbname_, "exists (error_if_exists is true)"); + } + } else { + // Unexpected error reading file + assert(s.IsIOError()); + return s; + } + // Check for the IDENTITY file and create it if not there + s = env_->FileExists(IdentityFileName(dbname_)); + if (s.IsNotFound()) { + s = SetIdentityFile(env_, dbname_); + if (!s.ok()) { + return s; + } + } else if (!s.ok()) { + assert(s.IsIOError()); + return s; + } + } + + Status s = versions_->Recover(column_families, read_only); + if (db_options_.paranoid_checks && s.ok()) { + s = CheckConsistency(); + } + if (s.ok()) { + SequenceNumber next_sequence(kMaxSequenceNumber); + default_cf_handle_ = new ColumnFamilyHandleImpl( + versions_->GetColumnFamilySet()->GetDefault(), this, &mutex_); + default_cf_internal_stats_ = default_cf_handle_->cfd()->internal_stats(); + single_column_family_mode_ = + versions_->GetColumnFamilySet()->NumberOfColumnFamilies() == 1; + + // Recover from all newer log files than the ones named in the + // descriptor (new log files may have been added by the previous + // incarnation without registering them in the descriptor). + // + // Note that prev_log_number() is no longer used, but we pay + // attention to it in case we are recovering a database + // produced by an older version of rocksdb. + std::vector filenames; + s = env_->GetChildren(db_options_.wal_dir, &filenames); + if (!s.ok()) { + return s; + } + + std::vector logs; + for (size_t i = 0; i < filenames.size(); i++) { + uint64_t number; + FileType type; + if (ParseFileName(filenames[i], &number, &type) && type == kLogFile) { + if (is_new_db) { + return Status::Corruption( + "While creating a new Db, wal_dir contains " + "existing log file: ", + filenames[i]); + } else { + logs.push_back(number); + } + } + } + + if (logs.size() > 0) { + if (error_if_log_file_exist) { + return Status::Corruption( + "The db was opened in readonly mode with error_if_log_file_exist" + "flag but a log file already exists"); + } else if (error_if_data_exists_in_logs) { + for (auto& log : logs) { + std::string fname = LogFileName(db_options_.wal_dir, log); + uint64_t bytes; + s = env_->GetFileSize(fname, &bytes); + if (s.ok()) { + if (bytes > 0) { + return Status::Corruption( + "error_if_data_exists_in_logs is set but there are data " + " in log files."); + } + } + } + } + } + + if (!logs.empty()) { + // Recover in the order in which the logs were generated + std::sort(logs.begin(), logs.end()); + s = RecoverLogFiles(logs, &next_sequence, read_only); + if (!s.ok()) { + // Clear memtables if recovery failed + for (auto cfd : *versions_->GetColumnFamilySet()) { + cfd->CreateNewMemtable(*cfd->GetLatestMutableCFOptions(), + kMaxSequenceNumber); + } + } + } + SetTickerCount(stats_, SEQUENCE_NUMBER, versions_->LastSequence()); + } + + // Initial value + max_total_in_memory_state_ = 0; + for (auto cfd : *versions_->GetColumnFamilySet()) { + auto* mutable_cf_options = cfd->GetLatestMutableCFOptions(); + max_total_in_memory_state_ += mutable_cf_options->write_buffer_size * + mutable_cf_options->max_write_buffer_number; + } + + return s; +} + +// REQUIRES: log_numbers are sorted in ascending order +Status DBImpl::RecoverLogFiles(const std::vector& log_numbers, + SequenceNumber* next_sequence, bool read_only) { + struct LogReporter : public log::Reader::Reporter { + Env* env; + Logger* info_log; + const char* fname; + Status* status; // nullptr if db_options_.paranoid_checks==false + virtual void Corruption(size_t bytes, const Status& s) override { + Log(InfoLogLevel::WARN_LEVEL, + info_log, "%s%s: dropping %d bytes; %s", + (this->status == nullptr ? "(ignoring error) " : ""), + fname, static_cast(bytes), s.ToString().c_str()); + if (this->status != nullptr && this->status->ok()) { + *this->status = s; + } + } + }; + + mutex_.AssertHeld(); + Status status; + std::unordered_map version_edits; + // no need to refcount because iteration is under mutex + for (auto cfd : *versions_->GetColumnFamilySet()) { + VersionEdit edit; + edit.SetColumnFamily(cfd->GetID()); + version_edits.insert({cfd->GetID(), edit}); + } + int job_id = next_job_id_.fetch_add(1); + { + auto stream = event_logger_.Log(); + stream << "job" << job_id << "event" + << "recovery_started"; + stream << "log_files"; + stream.StartArray(); + for (auto log_number : log_numbers) { + stream << log_number; + } + stream.EndArray(); + } + +#ifndef ROCKSDB_LITE + if (db_options_.wal_filter != nullptr) { + std::map cf_name_id_map; + std::map cf_lognumber_map; + for (auto cfd : *versions_->GetColumnFamilySet()) { + cf_name_id_map.insert( + std::make_pair(cfd->GetName(), cfd->GetID())); + cf_lognumber_map.insert( + std::make_pair(cfd->GetID(), cfd->GetLogNumber())); + } + + db_options_.wal_filter->ColumnFamilyLogNumberMap( + cf_lognumber_map, + cf_name_id_map); + } +#endif + + bool stop_replay_by_wal_filter = false; + bool stop_replay_for_corruption = false; + bool flushed = false; + SequenceNumber recovered_sequence = 0; + for (auto log_number : log_numbers) { + // The previous incarnation may not have written any MANIFEST + // records after allocating this log number. So we manually + // update the file number allocation counter in VersionSet. + versions_->MarkFileNumberUsedDuringRecovery(log_number); + // Open the log file + std::string fname = LogFileName(db_options_.wal_dir, log_number); + + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "Recovering log #%" PRIu64 " mode %d", log_number, + db_options_.wal_recovery_mode); + auto logFileDropped = [this, &fname]() { + uint64_t bytes; + if (env_->GetFileSize(fname, &bytes).ok()) { + auto info_log = db_options_.info_log.get(); + Log(InfoLogLevel::WARN_LEVEL, info_log, "%s: dropping %d bytes", + fname.c_str(), static_cast(bytes)); + } + }; + if (stop_replay_by_wal_filter) { + logFileDropped(); + continue; + } + + unique_ptr file_reader; + { + unique_ptr file; + status = env_->NewSequentialFile(fname, &file, env_options_); + if (!status.ok()) { + MaybeIgnoreError(&status); + if (!status.ok()) { + return status; + } else { + // Fail with one log file, but that's ok. + // Try next one. + continue; + } + } + file_reader.reset(new SequentialFileReader(std::move(file))); + } + + // Create the log reader. + LogReporter reporter; + reporter.env = env_; + reporter.info_log = db_options_.info_log.get(); + reporter.fname = fname.c_str(); + if (!db_options_.paranoid_checks || + db_options_.wal_recovery_mode == + WALRecoveryMode::kSkipAnyCorruptedRecords) { + reporter.status = nullptr; + } else { + reporter.status = &status; + } + // We intentially make log::Reader do checksumming even if + // paranoid_checks==false so that corruptions cause entire commits + // to be skipped instead of propagating bad information (like overly + // large sequence numbers). + log::Reader reader(db_options_.info_log, std::move(file_reader), &reporter, + true /*checksum*/, 0 /*initial_offset*/, log_number); + + // Determine if we should tolerate incomplete records at the tail end of the + // Read all the records and add to a memtable + std::string scratch; + Slice record; + WriteBatch batch; + + while ( + !stop_replay_by_wal_filter && + reader.ReadRecord(&record, &scratch, db_options_.wal_recovery_mode) && + status.ok()) { + if (record.size() < WriteBatchInternal::kHeader) { + reporter.Corruption(record.size(), + Status::Corruption("log record too small")); + continue; + } + WriteBatchInternal::SetContents(&batch, record); + SequenceNumber sequence = WriteBatchInternal::Sequence(&batch); + + // In point-in-time recovery mode, if sequence id of log files are + // consecutive, we continue recovery despite corruption. This could happen + // when we open and write to a corrupted DB, where sequence id will start + // from the last sequence id we recovered. + if (db_options_.wal_recovery_mode == + WALRecoveryMode::kPointInTimeRecovery) { + if (sequence == recovered_sequence + 1) { + stop_replay_for_corruption = false; + } + if (stop_replay_for_corruption) { + logFileDropped(); + break; + } + } + + recovered_sequence = sequence; + bool no_prev_seq = true; + if (*next_sequence == kMaxSequenceNumber) { + *next_sequence = sequence; + } else { + no_prev_seq = false; + WriteBatchInternal::SetSequence(&batch, *next_sequence); + } + +#ifndef ROCKSDB_LITE + if (db_options_.wal_filter != nullptr) { + WriteBatch new_batch; + bool batch_changed = false; + + WalFilter::WalProcessingOption wal_processing_option = + db_options_.wal_filter->LogRecordFound(log_number, fname, batch, + &new_batch, &batch_changed); + + switch (wal_processing_option) { + case WalFilter::WalProcessingOption::kContinueProcessing: + // do nothing, proceeed normally + break; + case WalFilter::WalProcessingOption::kIgnoreCurrentRecord: + // skip current record + continue; + case WalFilter::WalProcessingOption::kStopReplay: + // skip current record and stop replay + stop_replay_by_wal_filter = true; + continue; + case WalFilter::WalProcessingOption::kCorruptedRecord: { + status = Status::Corruption("Corruption reported by Wal Filter ", + db_options_.wal_filter->Name()); + MaybeIgnoreError(&status); + if (!status.ok()) { + reporter.Corruption(record.size(), status); + continue; + } + break; + } + default: { + assert(false); // unhandled case + status = Status::NotSupported( + "Unknown WalProcessingOption returned" + " by Wal Filter ", + db_options_.wal_filter->Name()); + MaybeIgnoreError(&status); + if (!status.ok()) { + return status; + } else { + // Ignore the error with current record processing. + continue; + } + } + } + + if (batch_changed) { + // Make sure that the count in the new batch is + // within the orignal count. + int new_count = WriteBatchInternal::Count(&new_batch); + int original_count = WriteBatchInternal::Count(&batch); + if (new_count > original_count) { + Log(InfoLogLevel::FATAL_LEVEL, db_options_.info_log, + "Recovering log #%" PRIu64 + " mode %d log filter %s returned " + "more records (%d) than original (%d) which is not allowed. " + "Aborting recovery.", + log_number, db_options_.wal_recovery_mode, + db_options_.wal_filter->Name(), new_count, original_count); + status = Status::NotSupported( + "More than original # of records " + "returned by Wal Filter ", + db_options_.wal_filter->Name()); + return status; + } + // Set the same sequence number in the new_batch + // as the original batch. + WriteBatchInternal::SetSequence(&new_batch, + WriteBatchInternal::Sequence(&batch)); + batch = new_batch; + } + } +#endif // ROCKSDB_LITE + + // If column family was not found, it might mean that the WAL write + // batch references to the column family that was dropped after the + // insert. We don't want to fail the whole write batch in that case -- + // we just ignore the update. + // That's why we set ignore missing column families to true + bool has_valid_writes = false; + // If we pass DB through and options.max_successive_merges is hit + // during recovery, Get() will be issued which will try to acquire + // DB mutex and cause deadlock, as DB mutex is already held. + // The DB pointer is not needed unless 2PC is used. + // TODO(sdong) fix the allow_2pc case too. + status = WriteBatchInternal::InsertInto( + &batch, column_family_memtables_.get(), &flush_scheduler_, true, + log_number, db_options_.allow_2pc ? this : nullptr, + false /* concurrent_memtable_writes */, next_sequence, + &has_valid_writes); + // If it is the first log file and there is no column family updated + // after replaying the file, this file may be a stale file. We ignore + // sequence IDs from the file. Otherwise, if a newer stale log file that + // has been deleted, the sequenceID may be wrong. + if (no_prev_seq && !has_valid_writes) { + *next_sequence = kMaxSequenceNumber; + } + MaybeIgnoreError(&status); + if (!status.ok()) { + // We are treating this as a failure while reading since we read valid + // blocks that do not form coherent data + reporter.Corruption(record.size(), status); + continue; + } + + if (has_valid_writes && !read_only) { + // we can do this because this is called before client has access to the + // DB and there is only a single thread operating on DB + ColumnFamilyData* cfd; + + while ((cfd = flush_scheduler_.TakeNextColumnFamily()) != nullptr) { + cfd->Unref(); + // If this asserts, it means that InsertInto failed in + // filtering updates to already-flushed column families + assert(cfd->GetLogNumber() <= log_number); + auto iter = version_edits.find(cfd->GetID()); + assert(iter != version_edits.end()); + VersionEdit* edit = &iter->second; + status = WriteLevel0TableForRecovery(job_id, cfd, cfd->mem(), edit); + if (!status.ok()) { + // Reflect errors immediately so that conditions like full + // file-systems cause the DB::Open() to fail. + return status; + } + flushed = true; + + cfd->CreateNewMemtable(*cfd->GetLatestMutableCFOptions(), + *next_sequence); + } + } + } + + if (!status.ok()) { + if (db_options_.wal_recovery_mode == + WALRecoveryMode::kSkipAnyCorruptedRecords) { + // We should ignore all errors unconditionally + status = Status::OK(); + } else if (db_options_.wal_recovery_mode == + WALRecoveryMode::kPointInTimeRecovery) { + // We should ignore the error but not continue replaying + status = Status::OK(); + stop_replay_for_corruption = true; + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "Point in time recovered to log #%" PRIu64 " seq #%" PRIu64, + log_number, *next_sequence); + } else { + assert(db_options_.wal_recovery_mode == + WALRecoveryMode::kTolerateCorruptedTailRecords + || db_options_.wal_recovery_mode == + WALRecoveryMode::kAbsoluteConsistency); + return status; + } + } + + flush_scheduler_.Clear(); + auto last_sequence = *next_sequence - 1; + if ((*next_sequence != kMaxSequenceNumber) && + (versions_->LastSequence() <= last_sequence)) { + versions_->SetLastSequence(last_sequence); + } + } + + if (!read_only) { + // no need to refcount since client still doesn't have access + // to the DB and can not drop column families while we iterate + auto max_log_number = log_numbers.back(); + for (auto cfd : *versions_->GetColumnFamilySet()) { + auto iter = version_edits.find(cfd->GetID()); + assert(iter != version_edits.end()); + VersionEdit* edit = &iter->second; + + if (cfd->GetLogNumber() > max_log_number) { + // Column family cfd has already flushed the data + // from all logs. Memtable has to be empty because + // we filter the updates based on log_number + // (in WriteBatch::InsertInto) + assert(cfd->mem()->GetFirstSequenceNumber() == 0); + assert(edit->NumEntries() == 0); + continue; + } + + // flush the final memtable (if non-empty) + if (cfd->mem()->GetFirstSequenceNumber() != 0) { + // If flush happened in the middle of recovery (e.g. due to memtable + // being full), we flush at the end. Otherwise we'll need to record + // where we were on last flush, which make the logic complicated. + if (flushed || !db_options_.avoid_flush_during_recovery) { + status = WriteLevel0TableForRecovery(job_id, cfd, cfd->mem(), edit); + if (!status.ok()) { + // Recovery failed + break; + } + flushed = true; + + cfd->CreateNewMemtable(*cfd->GetLatestMutableCFOptions(), + *next_sequence); + } + } + + // write MANIFEST with update + // writing log_number in the manifest means that any log file + // with number strongly less than (log_number + 1) is already + // recovered and should be ignored on next reincarnation. + // Since we already recovered max_log_number, we want all logs + // with numbers `<= max_log_number` (includes this one) to be ignored + if (flushed || cfd->mem()->GetFirstSequenceNumber() == 0) { + edit->SetLogNumber(max_log_number + 1); + } + // we must mark the next log number as used, even though it's + // not actually used. that is because VersionSet assumes + // VersionSet::next_file_number_ always to be strictly greater than any + // log number + versions_->MarkFileNumberUsedDuringRecovery(max_log_number + 1); + status = versions_->LogAndApply( + cfd, *cfd->GetLatestMutableCFOptions(), edit, &mutex_); + if (!status.ok()) { + // Recovery failed + break; + } + } + } + + event_logger_.Log() << "job" << job_id << "event" + << "recovery_finished"; + + return status; +} + +Status DBImpl::WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd, + MemTable* mem, VersionEdit* edit) { + mutex_.AssertHeld(); + const uint64_t start_micros = env_->NowMicros(); + FileMetaData meta; + auto pending_outputs_inserted_elem = + CaptureCurrentFileNumberInPendingOutputs(); + meta.fd = FileDescriptor(versions_->NewFileNumber(), 0, 0); + ReadOptions ro; + ro.total_order_seek = true; + Arena arena; + Status s; + TableProperties table_properties; + { + ScopedArenaIterator iter(mem->NewIterator(ro, &arena)); + Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log, + "[%s] [WriteLevel0TableForRecovery]" + " Level-0 table #%" PRIu64 ": started", + cfd->GetName().c_str(), meta.fd.GetNumber()); + + // Get the latest mutable cf options while the mutex is still locked + const MutableCFOptions mutable_cf_options = + *cfd->GetLatestMutableCFOptions(); + bool paranoid_file_checks = + cfd->GetLatestMutableCFOptions()->paranoid_file_checks; + { + mutex_.Unlock(); + + SequenceNumber earliest_write_conflict_snapshot; + std::vector snapshot_seqs = + snapshots_.GetAll(&earliest_write_conflict_snapshot); + + s = BuildTable( + dbname_, env_, *cfd->ioptions(), mutable_cf_options, env_options_, + cfd->table_cache(), iter.get(), &meta, cfd->internal_comparator(), + cfd->int_tbl_prop_collector_factories(), cfd->GetID(), cfd->GetName(), + snapshot_seqs, earliest_write_conflict_snapshot, + GetCompressionFlush(*cfd->ioptions(), mutable_cf_options), + cfd->ioptions()->compression_opts, paranoid_file_checks, + cfd->internal_stats(), TableFileCreationReason::kRecovery, + &event_logger_, job_id); + LogFlush(db_options_.info_log); + Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log, + "[%s] [WriteLevel0TableForRecovery]" + " Level-0 table #%" PRIu64 ": %" PRIu64 " bytes %s", + cfd->GetName().c_str(), meta.fd.GetNumber(), meta.fd.GetFileSize(), + s.ToString().c_str()); + mutex_.Lock(); + } + } + ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem); + + // Note that if file_size is zero, the file has been deleted and + // should not be added to the manifest. + int level = 0; + if (s.ok() && meta.fd.GetFileSize() > 0) { + edit->AddFile(level, meta.fd.GetNumber(), meta.fd.GetPathId(), + meta.fd.GetFileSize(), meta.smallest, meta.largest, + meta.smallest_seqno, meta.largest_seqno, + meta.marked_for_compaction); + } + + InternalStats::CompactionStats stats(1); + stats.micros = env_->NowMicros() - start_micros; + stats.bytes_written = meta.fd.GetFileSize(); + stats.num_output_files = 1; + cfd->internal_stats()->AddCompactionStats(level, stats); + cfd->internal_stats()->AddCFStats( + InternalStats::BYTES_FLUSHED, meta.fd.GetFileSize()); + RecordTick(stats_, COMPACT_WRITE_BYTES, meta.fd.GetFileSize()); + return s; +} + +Status DBImpl::SyncClosedLogs(JobContext* job_context) { + mutex_.AssertHeld(); + autovector logs_to_sync; + uint64_t current_log_number = logfile_number_; + while (logs_.front().number < current_log_number && + logs_.front().getting_synced) { + log_sync_cv_.Wait(); + } + for (auto it = logs_.begin(); + it != logs_.end() && it->number < current_log_number; ++it) { + auto& log = *it; + assert(!log.getting_synced); + log.getting_synced = true; + logs_to_sync.push_back(log.writer); + } + + Status s; + if (!logs_to_sync.empty()) { + mutex_.Unlock(); + + for (log::Writer* log : logs_to_sync) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[JOB %d] Syncing log #%" PRIu64, job_context->job_id, + log->get_log_number()); + s = log->file()->Sync(db_options_.use_fsync); + } + if (s.ok()) { + s = directories_.GetWalDir()->Fsync(); + } + + mutex_.Lock(); + + // "number <= current_log_number - 1" is equivalent to + // "number < current_log_number". + MarkLogsSynced(current_log_number - 1, true, s); + if (!s.ok()) { + bg_error_ = s; + return s; + } + } + return s; +} + +Status DBImpl::FlushMemTableToOutputFile( + ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options, + bool* made_progress, JobContext* job_context, LogBuffer* log_buffer) { + mutex_.AssertHeld(); + assert(cfd->imm()->NumNotFlushed() != 0); + assert(cfd->imm()->IsFlushPending()); + + SequenceNumber earliest_write_conflict_snapshot; + std::vector snapshot_seqs = + snapshots_.GetAll(&earliest_write_conflict_snapshot); + + FlushJob flush_job( + dbname_, cfd, db_options_, mutable_cf_options, env_options_, + versions_.get(), &mutex_, &shutting_down_, snapshot_seqs, + earliest_write_conflict_snapshot, job_context, log_buffer, + directories_.GetDbDir(), directories_.GetDataDir(0U), + GetCompressionFlush(*cfd->ioptions(), mutable_cf_options), stats_, + &event_logger_, mutable_cf_options.report_bg_io_stats); + + FileMetaData file_meta; + + flush_job.PickMemTable(); + + Status s; + if (logfile_number_ > 0 && + versions_->GetColumnFamilySet()->NumberOfColumnFamilies() > 0 && + !db_options_.disableDataSync) { + // If there are more than one column families, we need to make sure that + // all the log files except the most recent one are synced. Otherwise if + // the host crashes after flushing and before WAL is persistent, the + // flushed SST may contain data from write batches whose updates to + // other column families are missing. + // SyncClosedLogs() may unlock and re-lock the db_mutex. + s = SyncClosedLogs(job_context); + } + + // Within flush_job.Run, rocksdb may call event listener to notify + // file creation and deletion. + // + // Note that flush_job.Run will unlock and lock the db_mutex, + // and EventListener callback will be called when the db_mutex + // is unlocked by the current thread. + if (s.ok()) { + s = flush_job.Run(&file_meta); + } + + if (s.ok()) { + InstallSuperVersionAndScheduleWorkWrapper(cfd, job_context, + mutable_cf_options); + if (made_progress) { + *made_progress = 1; + } + VersionStorageInfo::LevelSummaryStorage tmp; + LogToBuffer(log_buffer, "[%s] Level summary: %s\n", cfd->GetName().c_str(), + cfd->current()->storage_info()->LevelSummary(&tmp)); + } + + if (!s.ok() && !s.IsShutdownInProgress() && db_options_.paranoid_checks && + bg_error_.ok()) { + // if a bad error happened (not ShutdownInProgress) and paranoid_checks is + // true, mark DB read-only + bg_error_ = s; + } + if (s.ok()) { +#ifndef ROCKSDB_LITE + // may temporarily unlock and lock the mutex. + NotifyOnFlushCompleted(cfd, &file_meta, mutable_cf_options, + job_context->job_id, flush_job.GetTableProperties()); +#endif // ROCKSDB_LITE + auto sfm = + static_cast(db_options_.sst_file_manager.get()); + if (sfm) { + // Notify sst_file_manager that a new file was added + std::string file_path = MakeTableFileName(db_options_.db_paths[0].path, + file_meta.fd.GetNumber()); + sfm->OnAddFile(file_path); + if (sfm->IsMaxAllowedSpaceReached() && bg_error_.ok()) { + bg_error_ = Status::IOError("Max allowed space was reached"); + TEST_SYNC_POINT( + "DBImpl::FlushMemTableToOutputFile:MaxAllowedSpaceReached"); + } + } + } + return s; +} + +void DBImpl::NotifyOnFlushCompleted(ColumnFamilyData* cfd, + FileMetaData* file_meta, + const MutableCFOptions& mutable_cf_options, + int job_id, TableProperties prop) { +#ifndef ROCKSDB_LITE + if (db_options_.listeners.size() == 0U) { + return; + } + mutex_.AssertHeld(); + if (shutting_down_.load(std::memory_order_acquire)) { + return; + } + bool triggered_writes_slowdown = + (cfd->current()->storage_info()->NumLevelFiles(0) >= + mutable_cf_options.level0_slowdown_writes_trigger); + bool triggered_writes_stop = + (cfd->current()->storage_info()->NumLevelFiles(0) >= + mutable_cf_options.level0_stop_writes_trigger); + // release lock while notifying events + mutex_.Unlock(); + { + FlushJobInfo info; + info.cf_name = cfd->GetName(); + // TODO(yhchiang): make db_paths dynamic in case flush does not + // go to L0 in the future. + info.file_path = MakeTableFileName(db_options_.db_paths[0].path, + file_meta->fd.GetNumber()); + info.thread_id = env_->GetThreadID(); + info.job_id = job_id; + info.triggered_writes_slowdown = triggered_writes_slowdown; + info.triggered_writes_stop = triggered_writes_stop; + info.smallest_seqno = file_meta->smallest_seqno; + info.largest_seqno = file_meta->largest_seqno; + info.table_properties = prop; + for (auto listener : db_options_.listeners) { + listener->OnFlushCompleted(this, info); + } + } + mutex_.Lock(); + // no need to signal bg_cv_ as it will be signaled at the end of the + // flush process. +#endif // ROCKSDB_LITE +} + +Status DBImpl::CompactRange(const CompactRangeOptions& options, + ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) { + if (options.target_path_id >= db_options_.db_paths.size()) { + return Status::InvalidArgument("Invalid target path ID"); + } + + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + bool exclusive = options.exclusive_manual_compaction; + + Status s = FlushMemTable(cfd, FlushOptions()); + if (!s.ok()) { + LogFlush(db_options_.info_log); + return s; + } + + int max_level_with_files = 0; + { + InstrumentedMutexLock l(&mutex_); + Version* base = cfd->current(); + for (int level = 1; level < base->storage_info()->num_non_empty_levels(); + level++) { + if (base->storage_info()->OverlapInLevel(level, begin, end)) { + max_level_with_files = level; + } + } + } + + int final_output_level = 0; + if (cfd->ioptions()->compaction_style == kCompactionStyleUniversal && + cfd->NumberLevels() > 1) { + // Always compact all files together. + s = RunManualCompaction(cfd, ColumnFamilyData::kCompactAllLevels, + cfd->NumberLevels() - 1, options.target_path_id, + begin, end, exclusive); + final_output_level = cfd->NumberLevels() - 1; + } else { + for (int level = 0; level <= max_level_with_files; level++) { + int output_level; + // in case the compaction is universal or if we're compacting the + // bottom-most level, the output level will be the same as input one. + // level 0 can never be the bottommost level (i.e. if all files are in + // level 0, we will compact to level 1) + if (cfd->ioptions()->compaction_style == kCompactionStyleUniversal || + cfd->ioptions()->compaction_style == kCompactionStyleFIFO) { + output_level = level; + } else if (level == max_level_with_files && level > 0) { + if (options.bottommost_level_compaction == + BottommostLevelCompaction::kSkip) { + // Skip bottommost level compaction + continue; + } else if (options.bottommost_level_compaction == + BottommostLevelCompaction::kIfHaveCompactionFilter && + cfd->ioptions()->compaction_filter == nullptr && + cfd->ioptions()->compaction_filter_factory == nullptr) { + // Skip bottommost level compaction since we don't have a compaction + // filter + continue; + } + output_level = level; + } else { + output_level = level + 1; + if (cfd->ioptions()->compaction_style == kCompactionStyleLevel && + cfd->ioptions()->level_compaction_dynamic_level_bytes && + level == 0) { + output_level = ColumnFamilyData::kCompactToBaseLevel; + } + } + s = RunManualCompaction(cfd, level, output_level, options.target_path_id, + begin, end, exclusive); + if (!s.ok()) { + break; + } + if (output_level == ColumnFamilyData::kCompactToBaseLevel) { + final_output_level = cfd->NumberLevels() - 1; + } else if (output_level > final_output_level) { + final_output_level = output_level; + } + TEST_SYNC_POINT("DBImpl::RunManualCompaction()::1"); + TEST_SYNC_POINT("DBImpl::RunManualCompaction()::2"); + } + } + if (!s.ok()) { + LogFlush(db_options_.info_log); + return s; + } + + if (options.change_level) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[RefitLevel] waiting for background threads to stop"); + s = PauseBackgroundWork(); + if (s.ok()) { + s = ReFitLevel(cfd, final_output_level, options.target_level); + } + ContinueBackgroundWork(); + } + LogFlush(db_options_.info_log); + + { + InstrumentedMutexLock l(&mutex_); + // an automatic compaction that has been scheduled might have been + // preempted by the manual compactions. Need to schedule it back. + MaybeScheduleFlushOrCompaction(); + } + + return s; +} + +Status DBImpl::CompactFiles( + const CompactionOptions& compact_options, + ColumnFamilyHandle* column_family, + const std::vector& input_file_names, + const int output_level, const int output_path_id) { +#ifdef ROCKSDB_LITE + // not supported in lite version + return Status::NotSupported("Not supported in ROCKSDB LITE"); +#else + if (column_family == nullptr) { + return Status::InvalidArgument("ColumnFamilyHandle must be non-null."); + } + + auto cfd = reinterpret_cast(column_family)->cfd(); + assert(cfd); + + Status s; + JobContext job_context(0, true); + LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, + db_options_.info_log.get()); + + // Perform CompactFiles + SuperVersion* sv = GetAndRefSuperVersion(cfd); + { + InstrumentedMutexLock l(&mutex_); + + s = CompactFilesImpl(compact_options, cfd, sv->current, + input_file_names, output_level, + output_path_id, &job_context, &log_buffer); + } + ReturnAndCleanupSuperVersion(cfd, sv); + + // Find and delete obsolete files + { + InstrumentedMutexLock l(&mutex_); + // If !s.ok(), this means that Compaction failed. In that case, we want + // to delete all obsolete files we might have created and we force + // FindObsoleteFiles(). This is because job_context does not + // catch all created files if compaction failed. + FindObsoleteFiles(&job_context, !s.ok()); + } // release the mutex + + // delete unnecessary files if any, this is done outside the mutex + if (job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) { + // Have to flush the info logs before bg_compaction_scheduled_-- + // because if bg_flush_scheduled_ becomes 0 and the lock is + // released, the deconstructor of DB can kick in and destroy all the + // states of DB so info_log might not be available after that point. + // It also applies to access other states that DB owns. + log_buffer.FlushBufferToLog(); + if (job_context.HaveSomethingToDelete()) { + // no mutex is locked here. No need to Unlock() and Lock() here. + PurgeObsoleteFiles(job_context); + } + job_context.Clean(); + } + + return s; +#endif // ROCKSDB_LITE +} + +#ifndef ROCKSDB_LITE +Status DBImpl::CompactFilesImpl( + const CompactionOptions& compact_options, ColumnFamilyData* cfd, + Version* version, const std::vector& input_file_names, + const int output_level, int output_path_id, JobContext* job_context, + LogBuffer* log_buffer) { + mutex_.AssertHeld(); + + if (shutting_down_.load(std::memory_order_acquire)) { + return Status::ShutdownInProgress(); + } + + std::unordered_set input_set; + for (auto file_name : input_file_names) { + input_set.insert(TableFileNameToNumber(file_name)); + } + + ColumnFamilyMetaData cf_meta; + // TODO(yhchiang): can directly use version here if none of the + // following functions call is pluggable to external developers. + version->GetColumnFamilyMetaData(&cf_meta); + + if (output_path_id < 0) { + if (db_options_.db_paths.size() == 1U) { + output_path_id = 0; + } else { + return Status::NotSupported( + "Automatic output path selection is not " + "yet supported in CompactFiles()"); + } + } + + Status s = cfd->compaction_picker()->SanitizeCompactionInputFiles( + &input_set, cf_meta, output_level); + if (!s.ok()) { + return s; + } + + std::vector input_files; + s = cfd->compaction_picker()->GetCompactionInputsFromFileNumbers( + &input_files, &input_set, version->storage_info(), compact_options); + if (!s.ok()) { + return s; + } + + for (auto inputs : input_files) { + if (cfd->compaction_picker()->FilesInCompaction(inputs.files)) { + return Status::Aborted( + "Some of the necessary compaction input " + "files are already being compacted"); + } + } + + // At this point, CompactFiles will be run. + bg_compaction_scheduled_++; + + unique_ptr c; + assert(cfd->compaction_picker()); + c.reset(cfd->compaction_picker()->FormCompaction( + compact_options, input_files, output_level, version->storage_info(), + *cfd->GetLatestMutableCFOptions(), output_path_id)); + if (!c) { + return Status::Aborted("Another Level 0 compaction is running"); + } + c->SetInputVersion(version); + // deletion compaction currently not allowed in CompactFiles. + assert(!c->deletion_compaction()); + running_compactions_.insert(c.get()); + + SequenceNumber earliest_write_conflict_snapshot; + std::vector snapshot_seqs = + snapshots_.GetAll(&earliest_write_conflict_snapshot); + + auto pending_outputs_inserted_elem = + CaptureCurrentFileNumberInPendingOutputs(); + + assert(is_snapshot_supported_ || snapshots_.empty()); + CompactionJob compaction_job( + job_context->job_id, c.get(), db_options_, env_options_, versions_.get(), + &shutting_down_, log_buffer, directories_.GetDbDir(), + directories_.GetDataDir(c->output_path_id()), stats_, &mutex_, &bg_error_, + snapshot_seqs, earliest_write_conflict_snapshot, table_cache_, + &event_logger_, c->mutable_cf_options()->paranoid_file_checks, + c->mutable_cf_options()->report_bg_io_stats, dbname_, + nullptr); // Here we pass a nullptr for CompactionJobStats because + // CompactFiles does not trigger OnCompactionCompleted(), + // which is the only place where CompactionJobStats is + // returned. The idea of not triggering OnCompationCompleted() + // is that CompactFiles runs in the caller thread, so the user + // should always know when it completes. As a result, it makes + // less sense to notify the users something they should already + // know. + // + // In the future, if we would like to add CompactionJobStats + // support for CompactFiles, we should have CompactFiles API + // pass a pointer of CompactionJobStats as the out-value + // instead of using EventListener. + + // Creating a compaction influences the compaction score because the score + // takes running compactions into account (by skipping files that are already + // being compacted). Since we just changed compaction score, we recalculate it + // here. + version->storage_info()->ComputeCompactionScore(*c->mutable_cf_options()); + + compaction_job.Prepare(); + + mutex_.Unlock(); + TEST_SYNC_POINT("CompactFilesImpl:0"); + TEST_SYNC_POINT("CompactFilesImpl:1"); + compaction_job.Run(); + TEST_SYNC_POINT("CompactFilesImpl:2"); + TEST_SYNC_POINT("CompactFilesImpl:3"); + mutex_.Lock(); + + Status status = compaction_job.Install(*c->mutable_cf_options()); + if (status.ok()) { + InstallSuperVersionAndScheduleWorkWrapper( + c->column_family_data(), job_context, *c->mutable_cf_options()); + } + c->ReleaseCompactionFiles(s); + + ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem); + + running_compactions_.erase(c.get()); + + if (status.ok()) { + // Done + } else if (status.IsShutdownInProgress()) { + // Ignore compaction errors found during shutting down + } else { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "[%s] [JOB %d] Compaction error: %s", + c->column_family_data()->GetName().c_str(), job_context->job_id, + status.ToString().c_str()); + if (db_options_.paranoid_checks && bg_error_.ok()) { + bg_error_ = status; + } + } + + c.reset(); + + bg_compaction_scheduled_--; + if (bg_compaction_scheduled_ == 0) { + bg_cv_.SignalAll(); + } + + return status; +} +#endif // ROCKSDB_LITE + +Status DBImpl::PauseBackgroundWork() { + InstrumentedMutexLock guard_lock(&mutex_); + bg_compaction_paused_++; + while (bg_compaction_scheduled_ > 0 || bg_flush_scheduled_ > 0) { + bg_cv_.Wait(); + } + bg_work_paused_++; + return Status::OK(); +} + +Status DBImpl::ContinueBackgroundWork() { + InstrumentedMutexLock guard_lock(&mutex_); + if (bg_work_paused_ == 0) { + return Status::InvalidArgument(); + } + assert(bg_work_paused_ > 0); + assert(bg_compaction_paused_ > 0); + bg_compaction_paused_--; + bg_work_paused_--; + // It's sufficient to check just bg_work_paused_ here since + // bg_work_paused_ is always no greater than bg_compaction_paused_ + if (bg_work_paused_ == 0) { + MaybeScheduleFlushOrCompaction(); + } + return Status::OK(); +} + +void DBImpl::NotifyOnCompactionCompleted( + ColumnFamilyData* cfd, Compaction *c, const Status &st, + const CompactionJobStats& compaction_job_stats, + const int job_id) { +#ifndef ROCKSDB_LITE + if (db_options_.listeners.size() == 0U) { + return; + } + mutex_.AssertHeld(); + if (shutting_down_.load(std::memory_order_acquire)) { + return; + } + // release lock while notifying events + mutex_.Unlock(); + TEST_SYNC_POINT("DBImpl::NotifyOnCompactionCompleted::UnlockMutex"); + { + CompactionJobInfo info; + info.cf_name = cfd->GetName(); + info.status = st; + info.thread_id = env_->GetThreadID(); + info.job_id = job_id; + info.base_input_level = c->start_level(); + info.output_level = c->output_level(); + info.stats = compaction_job_stats; + info.table_properties = c->GetOutputTableProperties(); + info.compaction_reason = c->compaction_reason(); + info.compression = c->output_compression(); + for (size_t i = 0; i < c->num_input_levels(); ++i) { + for (const auto fmd : *c->inputs(i)) { + auto fn = TableFileName(db_options_.db_paths, fmd->fd.GetNumber(), + fmd->fd.GetPathId()); + info.input_files.push_back(fn); + if (info.table_properties.count(fn) == 0) { + std::shared_ptr tp; + auto s = cfd->current()->GetTableProperties(&tp, fmd, &fn); + if (s.ok()) { + info.table_properties[fn] = tp; + } + } + } + } + for (const auto newf : c->edit()->GetNewFiles()) { + info.output_files.push_back( + TableFileName(db_options_.db_paths, + newf.second.fd.GetNumber(), + newf.second.fd.GetPathId())); + } + for (auto listener : db_options_.listeners) { + listener->OnCompactionCompleted(this, info); + } + } + mutex_.Lock(); + // no need to signal bg_cv_ as it will be signaled at the end of the + // flush process. +#endif // ROCKSDB_LITE +} + +Status DBImpl::SetOptions(ColumnFamilyHandle* column_family, + const std::unordered_map& options_map) { +#ifdef ROCKSDB_LITE + return Status::NotSupported("Not supported in ROCKSDB LITE"); +#else + auto* cfd = reinterpret_cast(column_family)->cfd(); + if (options_map.empty()) { + Log(InfoLogLevel::WARN_LEVEL, + db_options_.info_log, "SetOptions() on column family [%s], empty input", + cfd->GetName().c_str()); + return Status::InvalidArgument("empty input"); + } + + MutableCFOptions new_options; + Status s; + Status persist_options_status; + { + InstrumentedMutexLock l(&mutex_); + s = cfd->SetOptions(options_map); + if (s.ok()) { + new_options = *cfd->GetLatestMutableCFOptions(); + // Trigger possible flush/compactions. This has to be before we persist + // options to file, otherwise there will be a deadlock with writer + // thread. + auto* old_sv = + InstallSuperVersionAndScheduleWork(cfd, nullptr, new_options); + delete old_sv; + + // Persist RocksDB options under the single write thread + WriteThread::Writer w; + write_thread_.EnterUnbatched(&w, &mutex_); + + persist_options_status = WriteOptionsFile(); + + write_thread_.ExitUnbatched(&w); + } + } + + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "SetOptions() on column family [%s], inputs:", + cfd->GetName().c_str()); + for (const auto& o : options_map) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "%s: %s\n", o.first.c_str(), o.second.c_str()); + } + if (s.ok()) { + Log(InfoLogLevel::INFO_LEVEL, + db_options_.info_log, "[%s] SetOptions succeeded", + cfd->GetName().c_str()); + new_options.Dump(db_options_.info_log.get()); + if (!persist_options_status.ok()) { + if (db_options_.fail_if_options_file_error) { + s = Status::IOError( + "SetOptions succeeded, but unable to persist options", + persist_options_status.ToString()); + } + Warn(db_options_.info_log, + "Unable to persist options in SetOptions() -- %s", + persist_options_status.ToString().c_str()); + } + } else { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "[%s] SetOptions failed", cfd->GetName().c_str()); + } + LogFlush(db_options_.info_log); + return s; +#endif // ROCKSDB_LITE +} + +// return the same level if it cannot be moved +int DBImpl::FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd, + const MutableCFOptions& mutable_cf_options, int level) { + mutex_.AssertHeld(); + const auto* vstorage = cfd->current()->storage_info(); + int minimum_level = level; + for (int i = level - 1; i > 0; --i) { + // stop if level i is not empty + if (vstorage->NumLevelFiles(i) > 0) break; + // stop if level i is too small (cannot fit the level files) + if (vstorage->MaxBytesForLevel(i) < vstorage->NumLevelBytes(level)) { + break; + } + + minimum_level = i; + } + return minimum_level; +} + +// REQUIREMENT: block all background work by calling PauseBackgroundWork() +// before calling this function +Status DBImpl::ReFitLevel(ColumnFamilyData* cfd, int level, int target_level) { + assert(level < cfd->NumberLevels()); + if (target_level >= cfd->NumberLevels()) { + return Status::InvalidArgument("Target level exceeds number of levels"); + } + + std::unique_ptr superversion_to_free; + std::unique_ptr new_superversion(new SuperVersion()); + + Status status; + + InstrumentedMutexLock guard_lock(&mutex_); + + // only allow one thread refitting + if (refitting_level_) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[ReFitLevel] another thread is refitting"); + return Status::NotSupported("another thread is refitting"); + } + refitting_level_ = true; + + const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions(); + // move to a smaller level + int to_level = target_level; + if (target_level < 0) { + to_level = FindMinimumEmptyLevelFitting(cfd, mutable_cf_options, level); + } + + auto* vstorage = cfd->current()->storage_info(); + if (to_level > level) { + if (level == 0) { + return Status::NotSupported( + "Cannot change from level 0 to other levels."); + } + // Check levels are empty for a trivial move + for (int l = level + 1; l <= to_level; l++) { + if (vstorage->NumLevelFiles(l) > 0) { + return Status::NotSupported( + "Levels between source and target are not empty for a move."); + } + } + } + if (to_level != level) { + Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log, + "[%s] Before refitting:\n%s", cfd->GetName().c_str(), + cfd->current()->DebugString().data()); + + VersionEdit edit; + edit.SetColumnFamily(cfd->GetID()); + for (const auto& f : vstorage->LevelFiles(level)) { + edit.DeleteFile(level, f->fd.GetNumber()); + edit.AddFile(to_level, f->fd.GetNumber(), f->fd.GetPathId(), + f->fd.GetFileSize(), f->smallest, f->largest, + f->smallest_seqno, f->largest_seqno, + f->marked_for_compaction); + } + Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log, + "[%s] Apply version edit:\n%s", cfd->GetName().c_str(), + edit.DebugString().data()); + + status = versions_->LogAndApply(cfd, mutable_cf_options, &edit, &mutex_, + directories_.GetDbDir()); + superversion_to_free.reset(InstallSuperVersionAndScheduleWork( + cfd, new_superversion.release(), mutable_cf_options)); + + Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log, + "[%s] LogAndApply: %s\n", cfd->GetName().c_str(), + status.ToString().data()); + + if (status.ok()) { + Log(InfoLogLevel::DEBUG_LEVEL, db_options_.info_log, + "[%s] After refitting:\n%s", cfd->GetName().c_str(), + cfd->current()->DebugString().data()); + } + } + + refitting_level_ = false; + + return status; +} + +int DBImpl::NumberLevels(ColumnFamilyHandle* column_family) { + auto cfh = reinterpret_cast(column_family); + return cfh->cfd()->NumberLevels(); +} + +int DBImpl::MaxMemCompactionLevel(ColumnFamilyHandle* column_family) { + return 0; +} + +int DBImpl::Level0StopWriteTrigger(ColumnFamilyHandle* column_family) { + auto cfh = reinterpret_cast(column_family); + InstrumentedMutexLock l(&mutex_); + return cfh->cfd()->GetSuperVersion()-> + mutable_cf_options.level0_stop_writes_trigger; +} + +Status DBImpl::Flush(const FlushOptions& flush_options, + ColumnFamilyHandle* column_family) { + auto cfh = reinterpret_cast(column_family); + return FlushMemTable(cfh->cfd(), flush_options); +} + +Status DBImpl::SyncWAL() { + autovector logs_to_sync; + bool need_log_dir_sync; + uint64_t current_log_number; + + { + InstrumentedMutexLock l(&mutex_); + assert(!logs_.empty()); + + // This SyncWAL() call only cares about logs up to this number. + current_log_number = logfile_number_; + + while (logs_.front().number <= current_log_number && + logs_.front().getting_synced) { + log_sync_cv_.Wait(); + } + // First check that logs are safe to sync in background. + for (auto it = logs_.begin(); + it != logs_.end() && it->number <= current_log_number; ++it) { + if (!it->writer->file()->writable_file()->IsSyncThreadSafe()) { + return Status::NotSupported( + "SyncWAL() is not supported for this implementation of WAL file", + db_options_.allow_mmap_writes + ? "try setting Options::allow_mmap_writes to false" + : Slice()); + } + } + for (auto it = logs_.begin(); + it != logs_.end() && it->number <= current_log_number; ++it) { + auto& log = *it; + assert(!log.getting_synced); + log.getting_synced = true; + logs_to_sync.push_back(log.writer); + } + + need_log_dir_sync = !log_dir_synced_; + } + + RecordTick(stats_, WAL_FILE_SYNCED); + Status status; + for (log::Writer* log : logs_to_sync) { + status = log->file()->SyncWithoutFlush(db_options_.use_fsync); + if (!status.ok()) { + break; + } + } + if (status.ok() && need_log_dir_sync) { + status = directories_.GetWalDir()->Fsync(); + } + + TEST_SYNC_POINT("DBImpl::SyncWAL:BeforeMarkLogsSynced:1"); + { + InstrumentedMutexLock l(&mutex_); + MarkLogsSynced(current_log_number, need_log_dir_sync, status); + } + TEST_SYNC_POINT("DBImpl::SyncWAL:BeforeMarkLogsSynced:2"); + + return status; +} + +void DBImpl::MarkLogsSynced( + uint64_t up_to, bool synced_dir, const Status& status) { + mutex_.AssertHeld(); + if (synced_dir && + logfile_number_ == up_to && + status.ok()) { + log_dir_synced_ = true; + } + for (auto it = logs_.begin(); it != logs_.end() && it->number <= up_to;) { + auto& log = *it; + assert(log.getting_synced); + if (status.ok() && logs_.size() > 1) { + logs_to_free_.push_back(log.ReleaseWriter()); + it = logs_.erase(it); + } else { + log.getting_synced = false; + ++it; + } + } + assert(logs_.empty() || logs_[0].number > up_to || + (logs_.size() == 1 && !logs_[0].getting_synced)); + log_sync_cv_.SignalAll(); +} + +SequenceNumber DBImpl::GetLatestSequenceNumber() const { + return versions_->LastSequence(); +} + +Status DBImpl::RunManualCompaction(ColumnFamilyData* cfd, int input_level, + int output_level, uint32_t output_path_id, + const Slice* begin, const Slice* end, + bool exclusive, bool disallow_trivial_move) { + assert(input_level == ColumnFamilyData::kCompactAllLevels || + input_level >= 0); + + InternalKey begin_storage, end_storage; + CompactionArg* ca; + + bool scheduled = false; + bool manual_conflict = false; + ManualCompaction manual; + manual.cfd = cfd; + manual.input_level = input_level; + manual.output_level = output_level; + manual.output_path_id = output_path_id; + manual.done = false; + manual.in_progress = false; + manual.incomplete = false; + manual.exclusive = exclusive; + manual.disallow_trivial_move = disallow_trivial_move; + // For universal compaction, we enforce every manual compaction to compact + // all files. + if (begin == nullptr || + cfd->ioptions()->compaction_style == kCompactionStyleUniversal || + cfd->ioptions()->compaction_style == kCompactionStyleFIFO) { + manual.begin = nullptr; + } else { + begin_storage.SetMaxPossibleForUserKey(*begin); + manual.begin = &begin_storage; + } + if (end == nullptr || + cfd->ioptions()->compaction_style == kCompactionStyleUniversal || + cfd->ioptions()->compaction_style == kCompactionStyleFIFO) { + manual.end = nullptr; + } else { + end_storage.SetMinPossibleForUserKey(*end); + manual.end = &end_storage; + } + + InstrumentedMutexLock l(&mutex_); + + // When a manual compaction arrives, temporarily disable scheduling of + // non-manual compactions and wait until the number of scheduled compaction + // jobs drops to zero. This is needed to ensure that this manual compaction + // can compact any range of keys/files. + // + // HasPendingManualCompaction() is true when at least one thread is inside + // RunManualCompaction(), i.e. during that time no other compaction will + // get scheduled (see MaybeScheduleFlushOrCompaction). + // + // Note that the following loop doesn't stop more that one thread calling + // RunManualCompaction() from getting to the second while loop below. + // However, only one of them will actually schedule compaction, while + // others will wait on a condition variable until it completes. + + AddManualCompaction(&manual); + TEST_SYNC_POINT_CALLBACK("DBImpl::RunManualCompaction:NotScheduled", &mutex_); + if (exclusive) { + while (bg_compaction_scheduled_ > 0) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[%s] Manual compaction waiting for all other scheduled background " + "compactions to finish", + cfd->GetName().c_str()); + bg_cv_.Wait(); + } + } + + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[%s] Manual compaction starting", + cfd->GetName().c_str()); + + // We don't check bg_error_ here, because if we get the error in compaction, + // the compaction will set manual.status to bg_error_ and set manual.done to + // true. + while (!manual.done) { + assert(HasPendingManualCompaction()); + manual_conflict = false; + if (ShouldntRunManualCompaction(&manual) || (manual.in_progress == true) || + scheduled || + ((manual.manual_end = &manual.tmp_storage1)&&( + (manual.compaction = manual.cfd->CompactRange( + *manual.cfd->GetLatestMutableCFOptions(), manual.input_level, + manual.output_level, manual.output_path_id, manual.begin, + manual.end, &manual.manual_end, &manual_conflict)) == + nullptr) && + manual_conflict)) { + // exclusive manual compactions should not see a conflict during + // CompactRange + assert(!exclusive || !manual_conflict); + // Running either this or some other manual compaction + bg_cv_.Wait(); + if (scheduled && manual.incomplete == true) { + assert(!manual.in_progress); + scheduled = false; + manual.incomplete = false; + } + } else if (!scheduled) { + if (manual.compaction == nullptr) { + manual.done = true; + bg_cv_.SignalAll(); + continue; + } + ca = new CompactionArg; + ca->db = this; + ca->m = &manual; + manual.incomplete = false; + bg_compaction_scheduled_++; + env_->Schedule(&DBImpl::BGWorkCompaction, ca, Env::Priority::LOW, this, + &DBImpl::UnscheduleCallback); + scheduled = true; + } + } + + assert(!manual.in_progress); + assert(HasPendingManualCompaction()); + RemoveManualCompaction(&manual); + bg_cv_.SignalAll(); + return manual.status; +} + +InternalIterator* DBImpl::NewInternalIterator( + Arena* arena, ColumnFamilyHandle* column_family) { + ColumnFamilyData* cfd; + if (column_family == nullptr) { + cfd = default_cf_handle_->cfd(); + } else { + auto cfh = reinterpret_cast(column_family); + cfd = cfh->cfd(); + } + + mutex_.Lock(); + SuperVersion* super_version = cfd->GetSuperVersion()->Ref(); + mutex_.Unlock(); + ReadOptions roptions; + return NewInternalIterator(roptions, cfd, super_version, arena); +} + +Status DBImpl::FlushMemTable(ColumnFamilyData* cfd, + const FlushOptions& flush_options) { + Status s; + { + WriteContext context; + InstrumentedMutexLock guard_lock(&mutex_); + + if (cfd->imm()->NumNotFlushed() == 0 && cfd->mem()->IsEmpty()) { + // Nothing to flush + return Status::OK(); + } + + WriteThread::Writer w; + write_thread_.EnterUnbatched(&w, &mutex_); + + // SwitchMemtable() will release and reacquire mutex + // during execution + s = SwitchMemtable(cfd, &context); + write_thread_.ExitUnbatched(&w); + + cfd->imm()->FlushRequested(); + + // schedule flush + SchedulePendingFlush(cfd); + MaybeScheduleFlushOrCompaction(); + } + + if (s.ok() && flush_options.wait) { + // Wait until the compaction completes + s = WaitForFlushMemTable(cfd); + } + return s; +} + +Status DBImpl::WaitForFlushMemTable(ColumnFamilyData* cfd) { + Status s; + // Wait until the compaction completes + InstrumentedMutexLock l(&mutex_); + while (cfd->imm()->NumNotFlushed() > 0 && bg_error_.ok()) { + if (shutting_down_.load(std::memory_order_acquire)) { + return Status::ShutdownInProgress(); + } + bg_cv_.Wait(); + } + if (!bg_error_.ok()) { + s = bg_error_; + } + return s; +} + +Status DBImpl::EnableAutoCompaction( + const std::vector& column_family_handles) { + Status s; + for (auto cf_ptr : column_family_handles) { + Status status = + this->SetOptions(cf_ptr, {{"disable_auto_compactions", "false"}}); + if (!status.ok()) { + s = status; + } + } + + return s; +} + +void DBImpl::MaybeScheduleFlushOrCompaction() { + mutex_.AssertHeld(); + if (!opened_successfully_) { + // Compaction may introduce data race to DB open + return; + } + if (bg_work_paused_ > 0) { + // we paused the background work + return; + } else if (shutting_down_.load(std::memory_order_acquire)) { + // DB is being deleted; no more background compactions + return; + } + + while (unscheduled_flushes_ > 0 && + bg_flush_scheduled_ < db_options_.max_background_flushes) { + unscheduled_flushes_--; + bg_flush_scheduled_++; + env_->Schedule(&DBImpl::BGWorkFlush, this, Env::Priority::HIGH, this); + } + + auto bg_compactions_allowed = BGCompactionsAllowed(); + + // special case -- if max_background_flushes == 0, then schedule flush on a + // compaction thread + if (db_options_.max_background_flushes == 0) { + while (unscheduled_flushes_ > 0 && + bg_flush_scheduled_ + bg_compaction_scheduled_ < + bg_compactions_allowed) { + unscheduled_flushes_--; + bg_flush_scheduled_++; + env_->Schedule(&DBImpl::BGWorkFlush, this, Env::Priority::LOW, this); + } + } + + if (bg_compaction_paused_ > 0) { + // we paused the background compaction + return; + } + + if (HasExclusiveManualCompaction()) { + // only manual compactions are allowed to run. don't schedule automatic + // compactions + return; + } + + while (bg_compaction_scheduled_ < bg_compactions_allowed && + unscheduled_compactions_ > 0) { + CompactionArg* ca = new CompactionArg; + ca->db = this; + ca->m = nullptr; + bg_compaction_scheduled_++; + unscheduled_compactions_--; + env_->Schedule(&DBImpl::BGWorkCompaction, ca, Env::Priority::LOW, this, + &DBImpl::UnscheduleCallback); + } +} + +void DBImpl::SchedulePurge() { + mutex_.AssertHeld(); + assert(opened_successfully_); + + // Purge operations are put into High priority queue + bg_purge_scheduled_++; + env_->Schedule(&DBImpl::BGWorkPurge, this, Env::Priority::HIGH, nullptr); +} + +int DBImpl::BGCompactionsAllowed() const { + if (write_controller_.NeedSpeedupCompaction()) { + return db_options_.max_background_compactions; + } else { + return db_options_.base_background_compactions; + } +} + +void DBImpl::AddToCompactionQueue(ColumnFamilyData* cfd) { + assert(!cfd->pending_compaction()); + cfd->Ref(); + compaction_queue_.push_back(cfd); + cfd->set_pending_compaction(true); +} + +ColumnFamilyData* DBImpl::PopFirstFromCompactionQueue() { + assert(!compaction_queue_.empty()); + auto cfd = *compaction_queue_.begin(); + compaction_queue_.pop_front(); + assert(cfd->pending_compaction()); + cfd->set_pending_compaction(false); + return cfd; +} + +void DBImpl::AddToFlushQueue(ColumnFamilyData* cfd) { + assert(!cfd->pending_flush()); + cfd->Ref(); + flush_queue_.push_back(cfd); + cfd->set_pending_flush(true); +} + +ColumnFamilyData* DBImpl::PopFirstFromFlushQueue() { + assert(!flush_queue_.empty()); + auto cfd = *flush_queue_.begin(); + flush_queue_.pop_front(); + assert(cfd->pending_flush()); + cfd->set_pending_flush(false); + return cfd; +} + +void DBImpl::SchedulePendingFlush(ColumnFamilyData* cfd) { + if (!cfd->pending_flush() && cfd->imm()->IsFlushPending()) { + AddToFlushQueue(cfd); + ++unscheduled_flushes_; + } +} + +void DBImpl::SchedulePendingCompaction(ColumnFamilyData* cfd) { + if (!cfd->pending_compaction() && cfd->NeedsCompaction()) { + AddToCompactionQueue(cfd); + ++unscheduled_compactions_; + } +} + +void DBImpl::SchedulePendingPurge(std::string fname, FileType type, + uint64_t number, uint32_t path_id, + int job_id) { + mutex_.AssertHeld(); + PurgeFileInfo file_info(fname, type, number, path_id, job_id); + purge_queue_.push_back(std::move(file_info)); +} + +void DBImpl::BGWorkFlush(void* db) { + IOSTATS_SET_THREAD_POOL_ID(Env::Priority::HIGH); + TEST_SYNC_POINT("DBImpl::BGWorkFlush"); + reinterpret_cast(db)->BackgroundCallFlush(); + TEST_SYNC_POINT("DBImpl::BGWorkFlush:done"); +} + +void DBImpl::BGWorkCompaction(void* arg) { + CompactionArg ca = *(reinterpret_cast(arg)); + delete reinterpret_cast(arg); + IOSTATS_SET_THREAD_POOL_ID(Env::Priority::LOW); + TEST_SYNC_POINT("DBImpl::BGWorkCompaction"); + reinterpret_cast(ca.db)->BackgroundCallCompaction(ca.m); +} + +void DBImpl::BGWorkPurge(void* db) { + IOSTATS_SET_THREAD_POOL_ID(Env::Priority::HIGH); + TEST_SYNC_POINT("DBImpl::BGWorkPurge:start"); + reinterpret_cast(db)->BackgroundCallPurge(); + TEST_SYNC_POINT("DBImpl::BGWorkPurge:end"); +} + +void DBImpl::UnscheduleCallback(void* arg) { + CompactionArg ca = *(reinterpret_cast(arg)); + delete reinterpret_cast(arg); + if ((ca.m != nullptr) && (ca.m->compaction != nullptr)) { + delete ca.m->compaction; + } + TEST_SYNC_POINT("DBImpl::UnscheduleCallback"); +} + +void DBImpl::BackgroundCallPurge() { + mutex_.Lock(); + + // We use one single loop to clear both queues so that after existing the loop + // both queues are empty. This is stricter than what is needed, but can make + // it easier for us to reason the correctness. + while (!purge_queue_.empty() || !logs_to_free_queue_.empty()) { + if (!purge_queue_.empty()) { + auto purge_file = purge_queue_.begin(); + auto fname = purge_file->fname; + auto type = purge_file->type; + auto number = purge_file->number; + auto path_id = purge_file->path_id; + auto job_id = purge_file->job_id; + purge_queue_.pop_front(); + + mutex_.Unlock(); + Status file_deletion_status; + DeleteObsoleteFileImpl(file_deletion_status, job_id, fname, type, number, + path_id); + mutex_.Lock(); + } else { + assert(!logs_to_free_queue_.empty()); + log::Writer* log_writer = *(logs_to_free_queue_.begin()); + logs_to_free_queue_.pop_front(); + mutex_.Unlock(); + delete log_writer; + mutex_.Lock(); + } + } + bg_purge_scheduled_--; + + bg_cv_.SignalAll(); + // IMPORTANT:there should be no code after calling SignalAll. This call may + // signal the DB destructor that it's OK to proceed with destruction. In + // that case, all DB variables will be dealloacated and referencing them + // will cause trouble. + mutex_.Unlock(); +} + +Status DBImpl::BackgroundFlush(bool* made_progress, JobContext* job_context, + LogBuffer* log_buffer) { + mutex_.AssertHeld(); + + Status status = bg_error_; + if (status.ok() && shutting_down_.load(std::memory_order_acquire)) { + status = Status::ShutdownInProgress(); + } + + if (!status.ok()) { + return status; + } + + ColumnFamilyData* cfd = nullptr; + while (!flush_queue_.empty()) { + // This cfd is already referenced + auto first_cfd = PopFirstFromFlushQueue(); + + if (first_cfd->IsDropped() || !first_cfd->imm()->IsFlushPending()) { + // can't flush this CF, try next one + if (first_cfd->Unref()) { + delete first_cfd; + } + continue; + } + + // found a flush! + cfd = first_cfd; + break; + } + + if (cfd != nullptr) { + const MutableCFOptions mutable_cf_options = + *cfd->GetLatestMutableCFOptions(); + LogToBuffer( + log_buffer, + "Calling FlushMemTableToOutputFile with column " + "family [%s], flush slots available %d, compaction slots allowed %d, " + "compaction slots scheduled %d", + cfd->GetName().c_str(), db_options_.max_background_flushes, + bg_flush_scheduled_, BGCompactionsAllowed() - bg_compaction_scheduled_); + status = FlushMemTableToOutputFile(cfd, mutable_cf_options, made_progress, + job_context, log_buffer); + if (cfd->Unref()) { + delete cfd; + } + } + return status; +} + +void DBImpl::BackgroundCallFlush() { + bool made_progress = false; + JobContext job_context(next_job_id_.fetch_add(1), true); + assert(bg_flush_scheduled_); + + TEST_SYNC_POINT("DBImpl::BackgroundCallFlush:start"); + + LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, db_options_.info_log.get()); + { + InstrumentedMutexLock l(&mutex_); + num_running_flushes_++; + + auto pending_outputs_inserted_elem = + CaptureCurrentFileNumberInPendingOutputs(); + + Status s = BackgroundFlush(&made_progress, &job_context, &log_buffer); + if (!s.ok() && !s.IsShutdownInProgress()) { + // Wait a little bit before retrying background flush in + // case this is an environmental problem and we do not want to + // chew up resources for failed flushes for the duration of + // the problem. + uint64_t error_cnt = + default_cf_internal_stats_->BumpAndGetBackgroundErrorCount(); + bg_cv_.SignalAll(); // In case a waiter can proceed despite the error + mutex_.Unlock(); + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Waiting after background flush error: %s" + "Accumulated background error counts: %" PRIu64, + s.ToString().c_str(), error_cnt); + log_buffer.FlushBufferToLog(); + LogFlush(db_options_.info_log); + env_->SleepForMicroseconds(1000000); + mutex_.Lock(); + } + + ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem); + + // If flush failed, we want to delete all temporary files that we might have + // created. Thus, we force full scan in FindObsoleteFiles() + FindObsoleteFiles(&job_context, !s.ok() && !s.IsShutdownInProgress()); + // delete unnecessary files if any, this is done outside the mutex + if (job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) { + mutex_.Unlock(); + // Have to flush the info logs before bg_flush_scheduled_-- + // because if bg_flush_scheduled_ becomes 0 and the lock is + // released, the deconstructor of DB can kick in and destroy all the + // states of DB so info_log might not be available after that point. + // It also applies to access other states that DB owns. + log_buffer.FlushBufferToLog(); + if (job_context.HaveSomethingToDelete()) { + PurgeObsoleteFiles(job_context); + } + job_context.Clean(); + mutex_.Lock(); + } + + assert(num_running_flushes_ > 0); + num_running_flushes_--; + bg_flush_scheduled_--; + // See if there's more work to be done + MaybeScheduleFlushOrCompaction(); + bg_cv_.SignalAll(); + // IMPORTANT: there should be no code after calling SignalAll. This call may + // signal the DB destructor that it's OK to proceed with destruction. In + // that case, all DB variables will be dealloacated and referencing them + // will cause trouble. + } +} + +void DBImpl::BackgroundCallCompaction(void* arg) { + bool made_progress = false; + ManualCompaction* m = reinterpret_cast(arg); + JobContext job_context(next_job_id_.fetch_add(1), true); + TEST_SYNC_POINT("BackgroundCallCompaction:0"); + MaybeDumpStats(); + LogBuffer log_buffer(InfoLogLevel::INFO_LEVEL, db_options_.info_log.get()); + { + InstrumentedMutexLock l(&mutex_); + num_running_compactions_++; + + auto pending_outputs_inserted_elem = + CaptureCurrentFileNumberInPendingOutputs(); + + assert(bg_compaction_scheduled_); + Status s = + BackgroundCompaction(&made_progress, &job_context, &log_buffer, m); + TEST_SYNC_POINT("BackgroundCallCompaction:1"); + if (!s.ok() && !s.IsShutdownInProgress()) { + // Wait a little bit before retrying background compaction in + // case this is an environmental problem and we do not want to + // chew up resources for failed compactions for the duration of + // the problem. + uint64_t error_cnt = + default_cf_internal_stats_->BumpAndGetBackgroundErrorCount(); + bg_cv_.SignalAll(); // In case a waiter can proceed despite the error + mutex_.Unlock(); + log_buffer.FlushBufferToLog(); + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Waiting after background compaction error: %s, " + "Accumulated background error counts: %" PRIu64, + s.ToString().c_str(), error_cnt); + LogFlush(db_options_.info_log); + env_->SleepForMicroseconds(1000000); + mutex_.Lock(); + } + + ReleaseFileNumberFromPendingOutputs(pending_outputs_inserted_elem); + + // If compaction failed, we want to delete all temporary files that we might + // have created (they might not be all recorded in job_context in case of a + // failure). Thus, we force full scan in FindObsoleteFiles() + FindObsoleteFiles(&job_context, !s.ok() && !s.IsShutdownInProgress()); + + // delete unnecessary files if any, this is done outside the mutex + if (job_context.HaveSomethingToDelete() || !log_buffer.IsEmpty()) { + mutex_.Unlock(); + // Have to flush the info logs before bg_compaction_scheduled_-- + // because if bg_flush_scheduled_ becomes 0 and the lock is + // released, the deconstructor of DB can kick in and destroy all the + // states of DB so info_log might not be available after that point. + // It also applies to access other states that DB owns. + log_buffer.FlushBufferToLog(); + if (job_context.HaveSomethingToDelete()) { + PurgeObsoleteFiles(job_context); + } + job_context.Clean(); + mutex_.Lock(); + } + + assert(num_running_compactions_ > 0); + num_running_compactions_--; + bg_compaction_scheduled_--; + + versions_->GetColumnFamilySet()->FreeDeadColumnFamilies(); + + // See if there's more work to be done + MaybeScheduleFlushOrCompaction(); + if (made_progress || bg_compaction_scheduled_ == 0 || + HasPendingManualCompaction()) { + // signal if + // * made_progress -- need to wakeup DelayWrite + // * bg_compaction_scheduled_ == 0 -- need to wakeup ~DBImpl + // * HasPendingManualCompaction -- need to wakeup RunManualCompaction + // If none of this is true, there is no need to signal since nobody is + // waiting for it + bg_cv_.SignalAll(); + } + // IMPORTANT: there should be no code after calling SignalAll. This call may + // signal the DB destructor that it's OK to proceed with destruction. In + // that case, all DB variables will be dealloacated and referencing them + // will cause trouble. + } +} + +Status DBImpl::BackgroundCompaction(bool* made_progress, + JobContext* job_context, + LogBuffer* log_buffer, void* arg) { + ManualCompaction* manual_compaction = + reinterpret_cast(arg); + *made_progress = false; + mutex_.AssertHeld(); + TEST_SYNC_POINT("DBImpl::BackgroundCompaction:Start"); + + bool is_manual = (manual_compaction != nullptr); + + // (manual_compaction->in_progress == false); + bool trivial_move_disallowed = + is_manual && manual_compaction->disallow_trivial_move; + + CompactionJobStats compaction_job_stats; + Status status = bg_error_; + if (status.ok() && shutting_down_.load(std::memory_order_acquire)) { + status = Status::ShutdownInProgress(); + } + + if (!status.ok()) { + if (is_manual) { + manual_compaction->status = status; + manual_compaction->done = true; + manual_compaction->in_progress = false; + delete manual_compaction->compaction; + manual_compaction = nullptr; + } + return status; + } + + if (is_manual) { + // another thread cannot pick up the same work + manual_compaction->in_progress = true; + } + + unique_ptr c; + // InternalKey manual_end_storage; + // InternalKey* manual_end = &manual_end_storage; + if (is_manual) { + ManualCompaction* m = manual_compaction; + assert(m->in_progress); + c.reset(std::move(m->compaction)); + if (!c) { + m->done = true; + m->manual_end = nullptr; + LogToBuffer(log_buffer, + "[%s] Manual compaction from level-%d from %s .. " + "%s; nothing to do\n", + m->cfd->GetName().c_str(), m->input_level, + (m->begin ? m->begin->DebugString().c_str() : "(begin)"), + (m->end ? m->end->DebugString().c_str() : "(end)")); + } else { + LogToBuffer(log_buffer, + "[%s] Manual compaction from level-%d to level-%d from %s .. " + "%s; will stop at %s\n", + m->cfd->GetName().c_str(), m->input_level, c->output_level(), + (m->begin ? m->begin->DebugString().c_str() : "(begin)"), + (m->end ? m->end->DebugString().c_str() : "(end)"), + ((m->done || m->manual_end == nullptr) + ? "(end)" + : m->manual_end->DebugString().c_str())); + } + } else if (!compaction_queue_.empty()) { + // cfd is referenced here + auto cfd = PopFirstFromCompactionQueue(); + // We unreference here because the following code will take a Ref() on + // this cfd if it is going to use it (Compaction class holds a + // reference). + // This will all happen under a mutex so we don't have to be afraid of + // somebody else deleting it. + if (cfd->Unref()) { + delete cfd; + // This was the last reference of the column family, so no need to + // compact. + return Status::OK(); + } + + if (HaveManualCompaction(cfd)) { + // Can't compact right now, but try again later + TEST_SYNC_POINT("DBImpl::BackgroundCompaction()::Conflict"); + return Status::OK(); + } + + // Pick up latest mutable CF Options and use it throughout the + // compaction job + // Compaction makes a copy of the latest MutableCFOptions. It should be used + // throughout the compaction procedure to make sure consistency. It will + // eventually be installed into SuperVersion + auto* mutable_cf_options = cfd->GetLatestMutableCFOptions(); + if (!mutable_cf_options->disable_auto_compactions && !cfd->IsDropped()) { + // NOTE: try to avoid unnecessary copy of MutableCFOptions if + // compaction is not necessary. Need to make sure mutex is held + // until we make a copy in the following code + TEST_SYNC_POINT("DBImpl::BackgroundCompaction():BeforePickCompaction"); + c.reset(cfd->PickCompaction(*mutable_cf_options, log_buffer)); + TEST_SYNC_POINT("DBImpl::BackgroundCompaction():AfterPickCompaction"); + if (c != nullptr) { + // update statistics + MeasureTime(stats_, NUM_FILES_IN_SINGLE_COMPACTION, + c->inputs(0)->size()); + // There are three things that can change compaction score: + // 1) When flush or compaction finish. This case is covered by + // InstallSuperVersionAndScheduleWork + // 2) When MutableCFOptions changes. This case is also covered by + // InstallSuperVersionAndScheduleWork, because this is when the new + // options take effect. + // 3) When we Pick a new compaction, we "remove" those files being + // compacted from the calculation, which then influences compaction + // score. Here we check if we need the new compaction even without the + // files that are currently being compacted. If we need another + // compaction, we might be able to execute it in parallel, so we add it + // to the queue and schedule a new thread. + if (cfd->NeedsCompaction()) { + // Yes, we need more compactions! + AddToCompactionQueue(cfd); + ++unscheduled_compactions_; + MaybeScheduleFlushOrCompaction(); + } + } + } + } + + if (c != nullptr) { + running_compactions_.insert(c.get()); + } + + if (!c) { + // Nothing to do + LogToBuffer(log_buffer, "Compaction nothing to do"); + } else if (c->deletion_compaction()) { + // TODO(icanadi) Do we want to honor snapshots here? i.e. not delete old + // file if there is alive snapshot pointing to it + assert(c->num_input_files(1) == 0); + assert(c->level() == 0); + assert(c->column_family_data()->ioptions()->compaction_style == + kCompactionStyleFIFO); + + compaction_job_stats.num_input_files = c->num_input_files(0); + + for (const auto& f : *c->inputs(0)) { + c->edit()->DeleteFile(c->level(), f->fd.GetNumber()); + } + status = versions_->LogAndApply(c->column_family_data(), + *c->mutable_cf_options(), c->edit(), + &mutex_, directories_.GetDbDir()); + InstallSuperVersionAndScheduleWorkWrapper( + c->column_family_data(), job_context, *c->mutable_cf_options()); + LogToBuffer(log_buffer, "[%s] Deleted %d files\n", + c->column_family_data()->GetName().c_str(), + c->num_input_files(0)); + *made_progress = true; + } else if (!trivial_move_disallowed && c->IsTrivialMove()) { + TEST_SYNC_POINT("DBImpl::BackgroundCompaction:TrivialMove"); + // Instrument for event update + // TODO(yhchiang): add op details for showing trivial-move. + ThreadStatusUtil::SetColumnFamily( + c->column_family_data(), c->column_family_data()->ioptions()->env, + c->column_family_data()->options()->enable_thread_tracking); + ThreadStatusUtil::SetThreadOperation(ThreadStatus::OP_COMPACTION); + + compaction_job_stats.num_input_files = c->num_input_files(0); + + // Move files to next level + int32_t moved_files = 0; + int64_t moved_bytes = 0; + for (unsigned int l = 0; l < c->num_input_levels(); l++) { + if (c->level(l) == c->output_level()) { + continue; + } + for (size_t i = 0; i < c->num_input_files(l); i++) { + FileMetaData* f = c->input(l, i); + c->edit()->DeleteFile(c->level(l), f->fd.GetNumber()); + c->edit()->AddFile(c->output_level(), f->fd.GetNumber(), + f->fd.GetPathId(), f->fd.GetFileSize(), f->smallest, + f->largest, f->smallest_seqno, f->largest_seqno, + f->marked_for_compaction); + + LogToBuffer(log_buffer, + "[%s] Moving #%" PRIu64 " to level-%d %" PRIu64 " bytes\n", + c->column_family_data()->GetName().c_str(), + f->fd.GetNumber(), c->output_level(), f->fd.GetFileSize()); + ++moved_files; + moved_bytes += f->fd.GetFileSize(); + } + } + + status = versions_->LogAndApply(c->column_family_data(), + *c->mutable_cf_options(), c->edit(), + &mutex_, directories_.GetDbDir()); + // Use latest MutableCFOptions + InstallSuperVersionAndScheduleWorkWrapper( + c->column_family_data(), job_context, *c->mutable_cf_options()); + + VersionStorageInfo::LevelSummaryStorage tmp; + c->column_family_data()->internal_stats()->IncBytesMoved(c->output_level(), + moved_bytes); + { + event_logger_.LogToBuffer(log_buffer) + << "job" << job_context->job_id << "event" + << "trivial_move" + << "destination_level" << c->output_level() << "files" << moved_files + << "total_files_size" << moved_bytes; + } + LogToBuffer( + log_buffer, + "[%s] Moved #%d files to level-%d %" PRIu64 " bytes %s: %s\n", + c->column_family_data()->GetName().c_str(), moved_files, + c->output_level(), moved_bytes, status.ToString().c_str(), + c->column_family_data()->current()->storage_info()->LevelSummary(&tmp)); + *made_progress = true; + + // Clear Instrument + ThreadStatusUtil::ResetThreadStatus(); + } else { + int output_level __attribute__((unused)) = c->output_level(); + TEST_SYNC_POINT_CALLBACK("DBImpl::BackgroundCompaction:NonTrivial", + &output_level); + + SequenceNumber earliest_write_conflict_snapshot; + std::vector snapshot_seqs = + snapshots_.GetAll(&earliest_write_conflict_snapshot); + + assert(is_snapshot_supported_ || snapshots_.empty()); + CompactionJob compaction_job( + job_context->job_id, c.get(), db_options_, env_options_, + versions_.get(), &shutting_down_, log_buffer, directories_.GetDbDir(), + directories_.GetDataDir(c->output_path_id()), stats_, &mutex_, + &bg_error_, snapshot_seqs, earliest_write_conflict_snapshot, + table_cache_, &event_logger_, + c->mutable_cf_options()->paranoid_file_checks, + c->mutable_cf_options()->report_bg_io_stats, dbname_, + &compaction_job_stats); + compaction_job.Prepare(); + + mutex_.Unlock(); + compaction_job.Run(); + TEST_SYNC_POINT("DBImpl::BackgroundCompaction:NonTrivial:AfterRun"); + mutex_.Lock(); + + status = compaction_job.Install(*c->mutable_cf_options()); + if (status.ok()) { + InstallSuperVersionAndScheduleWorkWrapper( + c->column_family_data(), job_context, *c->mutable_cf_options()); + } + *made_progress = true; + } + if (c != nullptr) { + c->ReleaseCompactionFiles(status); + *made_progress = true; + NotifyOnCompactionCompleted( + c->column_family_data(), c.get(), status, + compaction_job_stats, job_context->job_id); + running_compactions_.erase(c.get()); + } + // this will unref its input_version and column_family_data + c.reset(); + + if (status.ok()) { + // Done + } else if (status.IsShutdownInProgress()) { + // Ignore compaction errors found during shutting down + } else { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, "Compaction error: %s", + status.ToString().c_str()); + if (db_options_.paranoid_checks && bg_error_.ok()) { + bg_error_ = status; + } + } + + if (is_manual) { + ManualCompaction* m = manual_compaction; + if (!status.ok()) { + m->status = status; + m->done = true; + } + // For universal compaction: + // Because universal compaction always happens at level 0, so one + // compaction will pick up all overlapped files. No files will be + // filtered out due to size limit and left for a successive compaction. + // So we can safely conclude the current compaction. + // + // Also note that, if we don't stop here, then the current compaction + // writes a new file back to level 0, which will be used in successive + // compaction. Hence the manual compaction will never finish. + // + // Stop the compaction if manual_end points to nullptr -- this means + // that we compacted the whole range. manual_end should always point + // to nullptr in case of universal compaction + if (m->manual_end == nullptr) { + m->done = true; + } + if (!m->done) { + // We only compacted part of the requested range. Update *m + // to the range that is left to be compacted. + // Universal and FIFO compactions should always compact the whole range + assert(m->cfd->ioptions()->compaction_style != + kCompactionStyleUniversal || + m->cfd->ioptions()->num_levels > 1); + assert(m->cfd->ioptions()->compaction_style != kCompactionStyleFIFO); + m->tmp_storage = *m->manual_end; + m->begin = &m->tmp_storage; + m->incomplete = true; + } + m->in_progress = false; // not being processed anymore + } + TEST_SYNC_POINT("DBImpl::BackgroundCompaction:Finish"); + return status; +} + +bool DBImpl::HasPendingManualCompaction() { + return (!manual_compaction_dequeue_.empty()); +} + +void DBImpl::AddManualCompaction(DBImpl::ManualCompaction* m) { + manual_compaction_dequeue_.push_back(m); +} + +void DBImpl::RemoveManualCompaction(DBImpl::ManualCompaction* m) { + // Remove from queue + std::deque::iterator it = + manual_compaction_dequeue_.begin(); + while (it != manual_compaction_dequeue_.end()) { + if (m == (*it)) { + it = manual_compaction_dequeue_.erase(it); + return; + } + it++; + } + assert(false); + return; +} + +bool DBImpl::ShouldntRunManualCompaction(ManualCompaction* m) { + if (m->exclusive) { + return (bg_compaction_scheduled_ > 0); + } + std::deque::iterator it = + manual_compaction_dequeue_.begin(); + bool seen = false; + while (it != manual_compaction_dequeue_.end()) { + if (m == (*it)) { + it++; + seen = true; + continue; + } else if (MCOverlap(m, (*it)) && (!seen && !(*it)->in_progress)) { + // Consider the other manual compaction *it, conflicts if: + // overlaps with m + // and (*it) is ahead in the queue and is not yet in progress + return true; + } + it++; + } + return false; +} + +bool DBImpl::HaveManualCompaction(ColumnFamilyData* cfd) { + // Remove from priority queue + std::deque::iterator it = + manual_compaction_dequeue_.begin(); + while (it != manual_compaction_dequeue_.end()) { + if ((*it)->exclusive) { + return true; + } + if ((cfd == (*it)->cfd) && (!((*it)->in_progress || (*it)->done))) { + // Allow automatic compaction if manual compaction is + // is in progress + return true; + } + it++; + } + return false; +} + +bool DBImpl::HasExclusiveManualCompaction() { + // Remove from priority queue + std::deque::iterator it = + manual_compaction_dequeue_.begin(); + while (it != manual_compaction_dequeue_.end()) { + if ((*it)->exclusive) { + return true; + } + it++; + } + return false; +} + +bool DBImpl::MCOverlap(ManualCompaction* m, ManualCompaction* m1) { + if ((m->exclusive) || (m1->exclusive)) { + return true; + } + if (m->cfd != m1->cfd) { + return false; + } + return true; +} + +namespace { +struct IterState { + IterState(DBImpl* _db, InstrumentedMutex* _mu, SuperVersion* _super_version, + bool _background_purge) + : db(_db), + mu(_mu), + super_version(_super_version), + background_purge(_background_purge) {} + + DBImpl* db; + InstrumentedMutex* mu; + SuperVersion* super_version; + bool background_purge; +}; + +static void CleanupIteratorState(void* arg1, void* arg2) { + IterState* state = reinterpret_cast(arg1); + + if (state->super_version->Unref()) { + // Job id == 0 means that this is not our background process, but rather + // user thread + JobContext job_context(0); + + state->mu->Lock(); + state->super_version->Cleanup(); + state->db->FindObsoleteFiles(&job_context, false, true); + if (state->background_purge) { + state->db->ScheduleBgLogWriterClose(&job_context); + } + state->mu->Unlock(); + + delete state->super_version; + if (job_context.HaveSomethingToDelete()) { + if (state->background_purge) { + // PurgeObsoleteFiles here does not delete files. Instead, it adds the + // files to be deleted to a job queue, and deletes it in a separate + // background thread. + state->db->PurgeObsoleteFiles(job_context, true /* schedule only */); + state->mu->Lock(); + state->db->SchedulePurge(); + state->mu->Unlock(); + } else { + state->db->PurgeObsoleteFiles(job_context); + } + } + job_context.Clean(); + } + + delete state; +} +} // namespace + +InternalIterator* DBImpl::NewInternalIterator(const ReadOptions& read_options, + ColumnFamilyData* cfd, + SuperVersion* super_version, + Arena* arena) { + InternalIterator* internal_iter; + assert(arena != nullptr); + // Need to create internal iterator from the arena. + MergeIteratorBuilder merge_iter_builder(&cfd->internal_comparator(), arena); + // Collect iterator for mutable mem + merge_iter_builder.AddIterator( + super_version->mem->NewIterator(read_options, arena)); + // Collect all needed child iterators for immutable memtables + super_version->imm->AddIterators(read_options, &merge_iter_builder); + // Collect iterators for files in L0 - Ln + super_version->current->AddIterators(read_options, env_options_, + &merge_iter_builder); + internal_iter = merge_iter_builder.Finish(); + IterState* cleanup = + new IterState(this, &mutex_, super_version, + read_options.background_purge_on_iterator_cleanup); + internal_iter->RegisterCleanup(CleanupIteratorState, cleanup, nullptr); + + return internal_iter; +} + +ColumnFamilyHandle* DBImpl::DefaultColumnFamily() const { + return default_cf_handle_; +} + +Status DBImpl::Get(const ReadOptions& read_options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value) { + return GetImpl(read_options, column_family, key, value); +} + +// JobContext gets created and destructed outside of the lock -- +// we +// use this convinently to: +// * malloc one SuperVersion() outside of the lock -- new_superversion +// * delete SuperVersion()s outside of the lock -- superversions_to_free +// +// However, if InstallSuperVersionAndScheduleWork() gets called twice with the +// same job_context, we can't reuse the SuperVersion() that got +// malloced because +// first call already used it. In that rare case, we take a hit and create a +// new SuperVersion() inside of the mutex. We do similar thing +// for superversion_to_free +void DBImpl::InstallSuperVersionAndScheduleWorkWrapper( + ColumnFamilyData* cfd, JobContext* job_context, + const MutableCFOptions& mutable_cf_options) { + mutex_.AssertHeld(); + SuperVersion* old_superversion = InstallSuperVersionAndScheduleWork( + cfd, job_context->new_superversion, mutable_cf_options); + job_context->new_superversion = nullptr; + job_context->superversions_to_free.push_back(old_superversion); +} + +SuperVersion* DBImpl::InstallSuperVersionAndScheduleWork( + ColumnFamilyData* cfd, SuperVersion* new_sv, + const MutableCFOptions& mutable_cf_options) { + mutex_.AssertHeld(); + + // Update max_total_in_memory_state_ + size_t old_memtable_size = 0; + auto* old_sv = cfd->GetSuperVersion(); + if (old_sv) { + old_memtable_size = old_sv->mutable_cf_options.write_buffer_size * + old_sv->mutable_cf_options.max_write_buffer_number; + } + + auto* old = cfd->InstallSuperVersion( + new_sv ? new_sv : new SuperVersion(), &mutex_, mutable_cf_options); + + // Whenever we install new SuperVersion, we might need to issue new flushes or + // compactions. + SchedulePendingFlush(cfd); + SchedulePendingCompaction(cfd); + MaybeScheduleFlushOrCompaction(); + + // Update max_total_in_memory_state_ + max_total_in_memory_state_ = + max_total_in_memory_state_ - old_memtable_size + + mutable_cf_options.write_buffer_size * + mutable_cf_options.max_write_buffer_number; + return old; +} + +Status DBImpl::GetImpl(const ReadOptions& read_options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value, bool* value_found) { + StopWatch sw(env_, stats_, DB_GET); + PERF_TIMER_GUARD(get_snapshot_time); + + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + + SequenceNumber snapshot; + if (read_options.snapshot != nullptr) { + snapshot = reinterpret_cast( + read_options.snapshot)->number_; + } else { + snapshot = versions_->LastSequence(); + } + // Acquire SuperVersion + SuperVersion* sv = GetAndRefSuperVersion(cfd); + // Prepare to store a list of merge operations if merge occurs. + MergeContext merge_context; + + Status s; + // First look in the memtable, then in the immutable memtable (if any). + // s is both in/out. When in, s could either be OK or MergeInProgress. + // merge_operands will contain the sequence of merges in the latter case. + LookupKey lkey(key, snapshot); + PERF_TIMER_STOP(get_snapshot_time); + + bool skip_memtable = + (read_options.read_tier == kPersistedTier && has_unpersisted_data_); + bool done = false; + if (!skip_memtable) { + if (sv->mem->Get(lkey, value, &s, &merge_context)) { + done = true; + RecordTick(stats_, MEMTABLE_HIT); + } else if (sv->imm->Get(lkey, value, &s, &merge_context)) { + done = true; + RecordTick(stats_, MEMTABLE_HIT); + } + } + if (!done) { + PERF_TIMER_GUARD(get_from_output_files_time); + sv->current->Get(read_options, lkey, value, &s, &merge_context, + value_found); + RecordTick(stats_, MEMTABLE_MISS); + } + + { + PERF_TIMER_GUARD(get_post_process_time); + + ReturnAndCleanupSuperVersion(cfd, sv); + + RecordTick(stats_, NUMBER_KEYS_READ); + RecordTick(stats_, BYTES_READ, value->size()); + MeasureTime(stats_, BYTES_PER_READ, value->size()); + } + return s; +} + +std::vector DBImpl::MultiGet( + const ReadOptions& read_options, + const std::vector& column_family, + const std::vector& keys, std::vector* values) { + + StopWatch sw(env_, stats_, DB_MULTIGET); + PERF_TIMER_GUARD(get_snapshot_time); + + SequenceNumber snapshot; + + struct MultiGetColumnFamilyData { + ColumnFamilyData* cfd; + SuperVersion* super_version; + }; + std::unordered_map multiget_cf_data; + // fill up and allocate outside of mutex + for (auto cf : column_family) { + auto cfh = reinterpret_cast(cf); + auto cfd = cfh->cfd(); + if (multiget_cf_data.find(cfd->GetID()) == multiget_cf_data.end()) { + auto mgcfd = new MultiGetColumnFamilyData(); + mgcfd->cfd = cfd; + multiget_cf_data.insert({cfd->GetID(), mgcfd}); + } + } + + mutex_.Lock(); + if (read_options.snapshot != nullptr) { + snapshot = reinterpret_cast( + read_options.snapshot)->number_; + } else { + snapshot = versions_->LastSequence(); + } + for (auto mgd_iter : multiget_cf_data) { + mgd_iter.second->super_version = + mgd_iter.second->cfd->GetSuperVersion()->Ref(); + } + mutex_.Unlock(); + + // Contain a list of merge operations if merge occurs. + MergeContext merge_context; + + // Note: this always resizes the values array + size_t num_keys = keys.size(); + std::vector stat_list(num_keys); + values->resize(num_keys); + + // Keep track of bytes that we read for statistics-recording later + uint64_t bytes_read = 0; + PERF_TIMER_STOP(get_snapshot_time); + + // For each of the given keys, apply the entire "get" process as follows: + // First look in the memtable, then in the immutable memtable (if any). + // s is both in/out. When in, s could either be OK or MergeInProgress. + // merge_operands will contain the sequence of merges in the latter case. + for (size_t i = 0; i < num_keys; ++i) { + merge_context.Clear(); + Status& s = stat_list[i]; + std::string* value = &(*values)[i]; + + LookupKey lkey(keys[i], snapshot); + auto cfh = reinterpret_cast(column_family[i]); + auto mgd_iter = multiget_cf_data.find(cfh->cfd()->GetID()); + assert(mgd_iter != multiget_cf_data.end()); + auto mgd = mgd_iter->second; + auto super_version = mgd->super_version; + bool skip_memtable = + (read_options.read_tier == kPersistedTier && has_unpersisted_data_); + bool done = false; + if (!skip_memtable) { + if (super_version->mem->Get(lkey, value, &s, &merge_context)) { + done = true; + // TODO(?): RecordTick(stats_, MEMTABLE_HIT)? + } else if (super_version->imm->Get(lkey, value, &s, &merge_context)) { + done = true; + // TODO(?): RecordTick(stats_, MEMTABLE_HIT)? + } + } + if (!done) { + PERF_TIMER_GUARD(get_from_output_files_time); + super_version->current->Get(read_options, lkey, value, &s, + &merge_context); + // TODO(?): RecordTick(stats_, MEMTABLE_MISS)? + } + + if (s.ok()) { + bytes_read += value->size(); + } + } + + // Post processing (decrement reference counts and record statistics) + PERF_TIMER_GUARD(get_post_process_time); + autovector superversions_to_delete; + + // TODO(icanadi) do we need lock here or just around Cleanup()? + mutex_.Lock(); + for (auto mgd_iter : multiget_cf_data) { + auto mgd = mgd_iter.second; + if (mgd->super_version->Unref()) { + mgd->super_version->Cleanup(); + superversions_to_delete.push_back(mgd->super_version); + } + } + mutex_.Unlock(); + + for (auto td : superversions_to_delete) { + delete td; + } + for (auto mgd : multiget_cf_data) { + delete mgd.second; + } + + RecordTick(stats_, NUMBER_MULTIGET_CALLS); + RecordTick(stats_, NUMBER_MULTIGET_KEYS_READ, num_keys); + RecordTick(stats_, NUMBER_MULTIGET_BYTES_READ, bytes_read); + MeasureTime(stats_, BYTES_PER_MULTIGET, bytes_read); + PERF_TIMER_STOP(get_post_process_time); + + return stat_list; +} + +Status DBImpl::CreateColumnFamily(const ColumnFamilyOptions& cf_options, + const std::string& column_family_name, + ColumnFamilyHandle** handle) { + Status s; + Status persist_options_status; + *handle = nullptr; + + s = CheckCompressionSupported(cf_options); + if (s.ok() && db_options_.allow_concurrent_memtable_write) { + s = CheckConcurrentWritesSupported(cf_options); + } + if (!s.ok()) { + return s; + } + + { + InstrumentedMutexLock l(&mutex_); + + if (versions_->GetColumnFamilySet()->GetColumnFamily(column_family_name) != + nullptr) { + return Status::InvalidArgument("Column family already exists"); + } + VersionEdit edit; + edit.AddColumnFamily(column_family_name); + uint32_t new_id = versions_->GetColumnFamilySet()->GetNextColumnFamilyID(); + edit.SetColumnFamily(new_id); + edit.SetLogNumber(logfile_number_); + edit.SetComparatorName(cf_options.comparator->Name()); + + // LogAndApply will both write the creation in MANIFEST and create + // ColumnFamilyData object + Options opt(db_options_, cf_options); + { // write thread + WriteThread::Writer w; + write_thread_.EnterUnbatched(&w, &mutex_); + // LogAndApply will both write the creation in MANIFEST and create + // ColumnFamilyData object + s = versions_->LogAndApply( + nullptr, MutableCFOptions(opt, ImmutableCFOptions(opt)), &edit, + &mutex_, directories_.GetDbDir(), false, &cf_options); + + if (s.ok()) { + // If the column family was created successfully, we then persist + // the updated RocksDB options under the same single write thread + persist_options_status = WriteOptionsFile(); + } + write_thread_.ExitUnbatched(&w); + } + if (s.ok()) { + single_column_family_mode_ = false; + auto* cfd = + versions_->GetColumnFamilySet()->GetColumnFamily(column_family_name); + assert(cfd != nullptr); + delete InstallSuperVersionAndScheduleWork( + cfd, nullptr, *cfd->GetLatestMutableCFOptions()); + + if (!cfd->mem()->IsSnapshotSupported()) { + is_snapshot_supported_ = false; + } + + *handle = new ColumnFamilyHandleImpl(cfd, this, &mutex_); + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "Created column family [%s] (ID %u)", + column_family_name.c_str(), (unsigned)cfd->GetID()); + } else { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Creating column family [%s] FAILED -- %s", + column_family_name.c_str(), s.ToString().c_str()); + } + } // InstrumentedMutexLock l(&mutex_) + + // this is outside the mutex + if (s.ok()) { + NewThreadStatusCfInfo( + reinterpret_cast(*handle)->cfd()); + if (!persist_options_status.ok()) { + if (db_options_.fail_if_options_file_error) { + s = Status::IOError( + "ColumnFamily has been created, but unable to persist" + "options in CreateColumnFamily()", + persist_options_status.ToString().c_str()); + } + Warn(db_options_.info_log, + "Unable to persist options in CreateColumnFamily() -- %s", + persist_options_status.ToString().c_str()); + } + } + return s; +} + +Status DBImpl::DropColumnFamily(ColumnFamilyHandle* column_family) { + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + if (cfd->GetID() == 0) { + return Status::InvalidArgument("Can't drop default column family"); + } + + bool cf_support_snapshot = cfd->mem()->IsSnapshotSupported(); + + VersionEdit edit; + edit.DropColumnFamily(); + edit.SetColumnFamily(cfd->GetID()); + + Status s; + Status options_persist_status; + { + InstrumentedMutexLock l(&mutex_); + if (cfd->IsDropped()) { + s = Status::InvalidArgument("Column family already dropped!\n"); + } + if (s.ok()) { + // we drop column family from a single write thread + WriteThread::Writer w; + write_thread_.EnterUnbatched(&w, &mutex_); + s = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(), + &edit, &mutex_); + if (s.ok()) { + // If the column family was dropped successfully, we then persist + // the updated RocksDB options under the same single write thread + options_persist_status = WriteOptionsFile(); + } + write_thread_.ExitUnbatched(&w); + } + + if (!cf_support_snapshot) { + // Dropped Column Family doesn't support snapshot. Need to recalculate + // is_snapshot_supported_. + bool new_is_snapshot_supported = true; + for (auto c : *versions_->GetColumnFamilySet()) { + if (!c->IsDropped() && !c->mem()->IsSnapshotSupported()) { + new_is_snapshot_supported = false; + break; + } + } + is_snapshot_supported_ = new_is_snapshot_supported; + } + } + + if (s.ok()) { + // Note that here we erase the associated cf_info of the to-be-dropped + // cfd before its ref-count goes to zero to avoid having to erase cf_info + // later inside db_mutex. + EraseThreadStatusCfInfo(cfd); + assert(cfd->IsDropped()); + auto* mutable_cf_options = cfd->GetLatestMutableCFOptions(); + max_total_in_memory_state_ -= mutable_cf_options->write_buffer_size * + mutable_cf_options->max_write_buffer_number; + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "Dropped column family with id %u\n", cfd->GetID()); + + if (!options_persist_status.ok()) { + if (db_options_.fail_if_options_file_error) { + s = Status::IOError( + "ColumnFamily has been dropped, but unable to persist " + "options in DropColumnFamily()", + options_persist_status.ToString().c_str()); + } + Warn(db_options_.info_log, + "Unable to persist options in DropColumnFamily() -- %s", + options_persist_status.ToString().c_str()); + } + } else { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Dropping column family with id %u FAILED -- %s\n", + cfd->GetID(), s.ToString().c_str()); + } + + return s; +} + +bool DBImpl::KeyMayExist(const ReadOptions& read_options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value, bool* value_found) { + if (value_found != nullptr) { + // falsify later if key-may-exist but can't fetch value + *value_found = true; + } + ReadOptions roptions = read_options; + roptions.read_tier = kBlockCacheTier; // read from block cache only + auto s = GetImpl(roptions, column_family, key, value, value_found); + + // If block_cache is enabled and the index block of the table didn't + // not present in block_cache, the return value will be Status::Incomplete. + // In this case, key may still exist in the table. + return s.ok() || s.IsIncomplete(); +} + +Iterator* DBImpl::NewIterator(const ReadOptions& read_options, + ColumnFamilyHandle* column_family) { + if (read_options.read_tier == kPersistedTier) { + return NewErrorIterator(Status::NotSupported( + "ReadTier::kPersistedData is not yet supported in iterators.")); + } + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + + XFUNC_TEST("", "managed_new", managed_new1, xf_manage_new, + reinterpret_cast(this), + const_cast(&read_options), is_snapshot_supported_); + if (read_options.managed) { +#ifdef ROCKSDB_LITE + // not supported in lite version + return NewErrorIterator(Status::InvalidArgument( + "Managed Iterators not supported in RocksDBLite.")); +#else + if ((read_options.tailing) || (read_options.snapshot != nullptr) || + (is_snapshot_supported_)) { + return new ManagedIterator(this, read_options, cfd); + } + // Managed iter not supported + return NewErrorIterator(Status::InvalidArgument( + "Managed Iterators not supported without snapshots.")); +#endif + } else if (read_options.tailing) { +#ifdef ROCKSDB_LITE + // not supported in lite version + return nullptr; +#else + SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_); + auto iter = new ForwardIterator(this, read_options, cfd, sv); + return NewDBIterator( + env_, *cfd->ioptions(), cfd->user_comparator(), iter, + kMaxSequenceNumber, + sv->mutable_cf_options.max_sequential_skip_in_iterations, + sv->version_number, read_options.iterate_upper_bound, + read_options.prefix_same_as_start, read_options.pin_data); +#endif + } else { + SequenceNumber latest_snapshot = versions_->LastSequence(); + SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_); + + auto snapshot = + read_options.snapshot != nullptr + ? reinterpret_cast( + read_options.snapshot)->number_ + : latest_snapshot; + + // Try to generate a DB iterator tree in continuous memory area to be + // cache friendly. Here is an example of result: + // +-------------------------------+ + // | | + // | ArenaWrappedDBIter | + // | + | + // | +---> Inner Iterator ------------+ + // | | | | + // | | +-- -- -- -- -- -- -- --+ | + // | +--- | Arena | | + // | | | | + // | Allocated Memory: | | + // | | +-------------------+ | + // | | | DBIter | <---+ + // | | + | + // | | | +-> iter_ ------------+ + // | | | | | + // | | +-------------------+ | + // | | | MergingIterator | <---+ + // | | + | + // | | | +->child iter1 ------------+ + // | | | | | | + // | | +->child iter2 ----------+ | + // | | | | | | | + // | | | +->child iter3 --------+ | | + // | | | | | | + // | | +-------------------+ | | | + // | | | Iterator1 | <--------+ + // | | +-------------------+ | | + // | | | Iterator2 | <------+ + // | | +-------------------+ | + // | | | Iterator3 | <----+ + // | | +-------------------+ + // | | | + // +-------+-----------------------+ + // + // ArenaWrappedDBIter inlines an arena area where all the iterators in + // the iterator tree are allocated in the order of being accessed when + // querying. + // Laying out the iterators in the order of being accessed makes it more + // likely that any iterator pointer is close to the iterator it points to so + // that they are likely to be in the same cache line and/or page. + ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator( + env_, *cfd->ioptions(), cfd->user_comparator(), snapshot, + sv->mutable_cf_options.max_sequential_skip_in_iterations, + sv->version_number, read_options.iterate_upper_bound, + read_options.prefix_same_as_start, read_options.pin_data); + + InternalIterator* internal_iter = + NewInternalIterator(read_options, cfd, sv, db_iter->GetArena()); + db_iter->SetIterUnderDBIter(internal_iter); + + return db_iter; + } + // To stop compiler from complaining + return nullptr; +} + +Status DBImpl::NewIterators( + const ReadOptions& read_options, + const std::vector& column_families, + std::vector* iterators) { + if (read_options.read_tier == kPersistedTier) { + return Status::NotSupported( + "ReadTier::kPersistedData is not yet supported in iterators."); + } + iterators->clear(); + iterators->reserve(column_families.size()); + XFUNC_TEST("", "managed_new", managed_new1, xf_manage_new, + reinterpret_cast(this), + const_cast(&read_options), is_snapshot_supported_); + if (read_options.managed) { +#ifdef ROCKSDB_LITE + return Status::InvalidArgument( + "Managed interator not supported in RocksDB lite"); +#else + if ((!read_options.tailing) && (read_options.snapshot == nullptr) && + (!is_snapshot_supported_)) { + return Status::InvalidArgument( + "Managed interator not supported without snapshots"); + } + for (auto cfh : column_families) { + auto cfd = reinterpret_cast(cfh)->cfd(); + auto iter = new ManagedIterator(this, read_options, cfd); + iterators->push_back(iter); + } +#endif + } else if (read_options.tailing) { +#ifdef ROCKSDB_LITE + return Status::InvalidArgument( + "Tailing interator not supported in RocksDB lite"); +#else + for (auto cfh : column_families) { + auto cfd = reinterpret_cast(cfh)->cfd(); + SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_); + auto iter = new ForwardIterator(this, read_options, cfd, sv); + iterators->push_back(NewDBIterator( + env_, *cfd->ioptions(), cfd->user_comparator(), iter, + kMaxSequenceNumber, + sv->mutable_cf_options.max_sequential_skip_in_iterations, + sv->version_number, nullptr, false, read_options.pin_data)); + } +#endif + } else { + SequenceNumber latest_snapshot = versions_->LastSequence(); + + for (size_t i = 0; i < column_families.size(); ++i) { + auto* cfd = reinterpret_cast( + column_families[i])->cfd(); + SuperVersion* sv = cfd->GetReferencedSuperVersion(&mutex_); + + auto snapshot = + read_options.snapshot != nullptr + ? reinterpret_cast( + read_options.snapshot)->number_ + : latest_snapshot; + + ArenaWrappedDBIter* db_iter = NewArenaWrappedDbIterator( + env_, *cfd->ioptions(), cfd->user_comparator(), snapshot, + sv->mutable_cf_options.max_sequential_skip_in_iterations, + sv->version_number, nullptr, false, read_options.pin_data); + InternalIterator* internal_iter = + NewInternalIterator(read_options, cfd, sv, db_iter->GetArena()); + db_iter->SetIterUnderDBIter(internal_iter); + iterators->push_back(db_iter); + } + } + + return Status::OK(); +} + +const Snapshot* DBImpl::GetSnapshot() { return GetSnapshotImpl(false); } + +#ifndef ROCKSDB_LITE +const Snapshot* DBImpl::GetSnapshotForWriteConflictBoundary() { + return GetSnapshotImpl(true); +} +#endif // ROCKSDB_LITE + +const Snapshot* DBImpl::GetSnapshotImpl(bool is_write_conflict_boundary) { + int64_t unix_time = 0; + env_->GetCurrentTime(&unix_time); // Ignore error + SnapshotImpl* s = new SnapshotImpl; + + InstrumentedMutexLock l(&mutex_); + // returns null if the underlying memtable does not support snapshot. + if (!is_snapshot_supported_) { + delete s; + return nullptr; + } + return snapshots_.New(s, versions_->LastSequence(), unix_time, + is_write_conflict_boundary); +} + +void DBImpl::ReleaseSnapshot(const Snapshot* s) { + const SnapshotImpl* casted_s = reinterpret_cast(s); + { + InstrumentedMutexLock l(&mutex_); + snapshots_.Delete(casted_s); + } + delete casted_s; +} + +// Convenience methods +Status DBImpl::Put(const WriteOptions& o, ColumnFamilyHandle* column_family, + const Slice& key, const Slice& val) { + return DB::Put(o, column_family, key, val); +} + +Status DBImpl::Merge(const WriteOptions& o, ColumnFamilyHandle* column_family, + const Slice& key, const Slice& val) { + auto cfh = reinterpret_cast(column_family); + if (!cfh->cfd()->ioptions()->merge_operator) { + return Status::NotSupported("Provide a merge_operator when opening DB"); + } else { + return DB::Merge(o, column_family, key, val); + } +} + +Status DBImpl::Delete(const WriteOptions& write_options, + ColumnFamilyHandle* column_family, const Slice& key) { + return DB::Delete(write_options, column_family, key); +} + +Status DBImpl::SingleDelete(const WriteOptions& write_options, + ColumnFamilyHandle* column_family, + const Slice& key) { + return DB::SingleDelete(write_options, column_family, key); +} + +Status DBImpl::Write(const WriteOptions& write_options, WriteBatch* my_batch) { + return WriteImpl(write_options, my_batch, nullptr, nullptr); +} + +#ifndef ROCKSDB_LITE +Status DBImpl::WriteWithCallback(const WriteOptions& write_options, + WriteBatch* my_batch, + WriteCallback* callback) { + return WriteImpl(write_options, my_batch, callback, nullptr); +} +#endif // ROCKSDB_LITE + +Status DBImpl::WriteImpl(const WriteOptions& write_options, + WriteBatch* my_batch, WriteCallback* callback, + uint64_t* log_used, uint64_t log_ref, + bool disable_memtable) { + if (my_batch == nullptr) { + return Status::Corruption("Batch is nullptr!"); + } + if (write_options.timeout_hint_us != 0) { + return Status::InvalidArgument("timeout_hint_us is deprecated"); + } + + Status status; + + bool xfunc_attempted_write = false; + XFUNC_TEST("transaction", "transaction_xftest_write_impl", + xf_transaction_write1, xf_transaction_write, write_options, + db_options_, my_batch, callback, this, &status, + &xfunc_attempted_write); + if (xfunc_attempted_write) { + // Test already did the write + return status; + } + + PERF_TIMER_GUARD(write_pre_and_post_process_time); + WriteThread::Writer w; + w.batch = my_batch; + w.sync = write_options.sync; + w.disableWAL = write_options.disableWAL; + w.disable_memtable = disable_memtable; + w.in_batch_group = false; + w.callback = callback; + w.log_ref = log_ref; + + if (!write_options.disableWAL) { + RecordTick(stats_, WRITE_WITH_WAL); + } + + StopWatch write_sw(env_, db_options_.statistics.get(), DB_WRITE); + + write_thread_.JoinBatchGroup(&w); + if (w.state == WriteThread::STATE_PARALLEL_FOLLOWER) { + // we are a non-leader in a parallel group + PERF_TIMER_GUARD(write_memtable_time); + + if (log_used != nullptr) { + *log_used = w.log_used; + } + + if (w.ShouldWriteToMemtable()) { + ColumnFamilyMemTablesImpl column_family_memtables( + versions_->GetColumnFamilySet()); + WriteBatchInternal::SetSequence(w.batch, w.sequence); + w.status = WriteBatchInternal::InsertInto( + &w, &column_family_memtables, &flush_scheduler_, + write_options.ignore_missing_column_families, 0 /*log_number*/, this, + true /*concurrent_memtable_writes*/); + } + + if (write_thread_.CompleteParallelWorker(&w)) { + // we're responsible for early exit + auto last_sequence = w.parallel_group->last_sequence; + SetTickerCount(stats_, SEQUENCE_NUMBER, last_sequence); + versions_->SetLastSequence(last_sequence); + write_thread_.EarlyExitParallelGroup(&w); + } + assert(w.state == WriteThread::STATE_COMPLETED); + // STATE_COMPLETED conditional below handles exit + + status = w.FinalStatus(); + } + if (w.state == WriteThread::STATE_COMPLETED) { + if (log_used != nullptr) { + *log_used = w.log_used; + } + // write is complete and leader has updated sequence + RecordTick(stats_, WRITE_DONE_BY_OTHER); + return w.FinalStatus(); + } + // else we are the leader of the write batch group + assert(w.state == WriteThread::STATE_GROUP_LEADER); + + WriteContext context; + mutex_.Lock(); + + if (!write_options.disableWAL) { + default_cf_internal_stats_->AddDBStats(InternalStats::WRITE_WITH_WAL, 1); + } + + RecordTick(stats_, WRITE_DONE_BY_SELF); + default_cf_internal_stats_->AddDBStats(InternalStats::WRITE_DONE_BY_SELF, 1); + + // Once reaches this point, the current writer "w" will try to do its write + // job. It may also pick up some of the remaining writers in the "writers_" + // when it finds suitable, and finish them in the same write batch. + // This is how a write job could be done by the other writer. + assert(!single_column_family_mode_ || + versions_->GetColumnFamilySet()->NumberOfColumnFamilies() == 1); + + uint64_t max_total_wal_size = (db_options_.max_total_wal_size == 0) + ? 4 * max_total_in_memory_state_ + : db_options_.max_total_wal_size; + if (UNLIKELY(!single_column_family_mode_ && + alive_log_files_.begin()->getting_flushed == false && + total_log_size_ > max_total_wal_size)) { + uint64_t flush_column_family_if_log_file = alive_log_files_.begin()->number; + alive_log_files_.begin()->getting_flushed = true; + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "Flushing all column families with data in WAL number %" PRIu64 + ". Total log size is %" PRIu64 " while max_total_wal_size is %" PRIu64, + flush_column_family_if_log_file, total_log_size_, max_total_wal_size); + // no need to refcount because drop is happening in write thread, so can't + // happen while we're in the write thread + for (auto cfd : *versions_->GetColumnFamilySet()) { + if (cfd->IsDropped()) { + continue; + } + if (cfd->GetLogNumber() <= flush_column_family_if_log_file) { + status = SwitchMemtable(cfd, &context); + if (!status.ok()) { + break; + } + cfd->imm()->FlushRequested(); + SchedulePendingFlush(cfd); + } + } + MaybeScheduleFlushOrCompaction(); + } else if (UNLIKELY(write_buffer_manager_->ShouldFlush())) { + // Before a new memtable is added in SwitchMemtable(), + // write_buffer_manager_->ShouldFlush() will keep returning true. If another + // thread is writing to another DB with the same write buffer, they may also + // be flushed. We may end up with flushing much more DBs than needed. It's + // suboptimal but still correct. + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "Flushing column family with largest mem table size. Write buffer is " + "using %" PRIu64 " bytes out of a total of %" PRIu64 ".", + write_buffer_manager_->memory_usage(), + write_buffer_manager_->buffer_size()); + // no need to refcount because drop is happening in write thread, so can't + // happen while we're in the write thread + ColumnFamilyData* largest_cfd = nullptr; + size_t largest_cfd_size = 0; + + for (auto cfd : *versions_->GetColumnFamilySet()) { + if (cfd->IsDropped()) { + continue; + } + if (!cfd->mem()->IsEmpty()) { + // We only consider active mem table, hoping immutable memtable is + // already in the process of flushing. + size_t cfd_size = cfd->mem()->ApproximateMemoryUsage(); + if (largest_cfd == nullptr || cfd_size > largest_cfd_size) { + largest_cfd = cfd; + largest_cfd_size = cfd_size; + } + } + } + if (largest_cfd != nullptr) { + status = SwitchMemtable(largest_cfd, &context); + if (status.ok()) { + largest_cfd->imm()->FlushRequested(); + SchedulePendingFlush(largest_cfd); + MaybeScheduleFlushOrCompaction(); + } + } + } + + if (UNLIKELY(status.ok() && !bg_error_.ok())) { + status = bg_error_; + } + + if (UNLIKELY(status.ok() && !flush_scheduler_.Empty())) { + status = ScheduleFlushes(&context); + } + + if (UNLIKELY(status.ok() && (write_controller_.IsStopped() || + write_controller_.NeedsDelay()))) { + PERF_TIMER_STOP(write_pre_and_post_process_time); + PERF_TIMER_GUARD(write_delay_time); + // We don't know size of curent batch so that we always use the size + // for previous one. It might create a fairness issue that expiration + // might happen for smaller writes but larger writes can go through. + // Can optimize it if it is an issue. + status = DelayWrite(last_batch_group_size_); + PERF_TIMER_START(write_pre_and_post_process_time); + } + + uint64_t last_sequence = versions_->LastSequence(); + WriteThread::Writer* last_writer = &w; + autovector write_group; + bool need_log_sync = !write_options.disableWAL && write_options.sync; + bool need_log_dir_sync = need_log_sync && !log_dir_synced_; + + if (status.ok()) { + if (need_log_sync) { + while (logs_.front().getting_synced) { + log_sync_cv_.Wait(); + } + for (auto& log : logs_) { + assert(!log.getting_synced); + log.getting_synced = true; + } + } + + // Add to log and apply to memtable. We can release the lock + // during this phase since &w is currently responsible for logging + // and protects against concurrent loggers and concurrent writes + // into memtables + } + + mutex_.Unlock(); + + // At this point the mutex is unlocked + + bool exit_completed_early = false; + last_batch_group_size_ = + write_thread_.EnterAsBatchGroupLeader(&w, &last_writer, &write_group); + + if (status.ok()) { + // Rules for when we can update the memtable concurrently + // 1. supported by memtable + // 2. Puts are not okay if inplace_update_support + // 3. Deletes or SingleDeletes are not okay if filtering deletes + // (controlled by both batch and memtable setting) + // 4. Merges are not okay + // + // Rules 1..3 are enforced by checking the options + // during startup (CheckConcurrentWritesSupported), so if + // options.allow_concurrent_memtable_write is true then they can be + // assumed to be true. Rule 4 is checked for each batch. We could + // relax rules 2 and 3 if we could prevent write batches from referring + // more than once to a particular key. + bool parallel = + db_options_.allow_concurrent_memtable_write && write_group.size() > 1; + int total_count = 0; + uint64_t total_byte_size = 0; + for (auto writer : write_group) { + if (writer->CheckCallback(this)) { + if (writer->ShouldWriteToMemtable()) { + total_count += WriteBatchInternal::Count(writer->batch); + parallel = parallel && !writer->batch->HasMerge(); + } + + if (writer->ShouldWriteToWAL()) { + total_byte_size = WriteBatchInternal::AppendedByteSize( + total_byte_size, WriteBatchInternal::ByteSize(writer->batch)); + } + } + } + + const SequenceNumber current_sequence = last_sequence + 1; + last_sequence += total_count; + + // Record statistics + RecordTick(stats_, NUMBER_KEYS_WRITTEN, total_count); + RecordTick(stats_, BYTES_WRITTEN, total_byte_size); + MeasureTime(stats_, BYTES_PER_WRITE, total_byte_size); + PERF_TIMER_STOP(write_pre_and_post_process_time); + + if (write_options.disableWAL) { + has_unpersisted_data_ = true; + } + + uint64_t log_size = 0; + if (!write_options.disableWAL) { + PERF_TIMER_GUARD(write_wal_time); + + WriteBatch* merged_batch = nullptr; + if (write_group.size() == 1 && write_group[0]->ShouldWriteToWAL()) { + merged_batch = write_group[0]->batch; + write_group[0]->log_used = logfile_number_; + } else { + // WAL needs all of the batches flattened into a single batch. + // We could avoid copying here with an iov-like AddRecord + // interface + merged_batch = &tmp_batch_; + for (auto writer : write_group) { + if (writer->ShouldWriteToWAL()) { + WriteBatchInternal::Append(merged_batch, writer->batch); + } + writer->log_used = logfile_number_; + } + } + + if (log_used != nullptr) { + *log_used = logfile_number_; + } + + WriteBatchInternal::SetSequence(merged_batch, current_sequence); + + Slice log_entry = WriteBatchInternal::Contents(merged_batch); + status = logs_.back().writer->AddRecord(log_entry); + total_log_size_ += log_entry.size(); + alive_log_files_.back().AddSize(log_entry.size()); + log_empty_ = false; + log_size = log_entry.size(); + RecordTick(stats_, WAL_FILE_BYTES, log_size); + if (status.ok() && need_log_sync) { + RecordTick(stats_, WAL_FILE_SYNCED); + StopWatch sw(env_, stats_, WAL_FILE_SYNC_MICROS); + // It's safe to access logs_ with unlocked mutex_ here because: + // - we've set getting_synced=true for all logs, + // so other threads won't pop from logs_ while we're here, + // - only writer thread can push to logs_, and we're in + // writer thread, so no one will push to logs_, + // - as long as other threads don't modify it, it's safe to read + // from std::deque from multiple threads concurrently. + for (auto& log : logs_) { + status = log.writer->file()->Sync(db_options_.use_fsync); + if (!status.ok()) { + break; + } + } + if (status.ok() && need_log_dir_sync) { + // We only sync WAL directory the first time WAL syncing is + // requested, so that in case users never turn on WAL sync, + // we can avoid the disk I/O in the write code path. + status = directories_.GetWalDir()->Fsync(); + } + } + + if (merged_batch == &tmp_batch_) { + tmp_batch_.Clear(); + } + } + if (status.ok()) { + PERF_TIMER_GUARD(write_memtable_time); + + { + // Update stats while we are an exclusive group leader, so we know + // that nobody else can be writing to these particular stats. + // We're optimistic, updating the stats before we successfully + // commit. That lets us release our leader status early in + // some cases. + auto stats = default_cf_internal_stats_; + stats->AddDBStats(InternalStats::BYTES_WRITTEN, total_byte_size); + stats->AddDBStats(InternalStats::NUMBER_KEYS_WRITTEN, total_count); + if (!write_options.disableWAL) { + if (write_options.sync) { + stats->AddDBStats(InternalStats::WAL_FILE_SYNCED, 1); + } + stats->AddDBStats(InternalStats::WAL_FILE_BYTES, log_size); + } + uint64_t for_other = write_group.size() - 1; + if (for_other > 0) { + stats->AddDBStats(InternalStats::WRITE_DONE_BY_OTHER, for_other); + if (!write_options.disableWAL) { + stats->AddDBStats(InternalStats::WRITE_WITH_WAL, for_other); + } + } + } + + if (!parallel) { + status = WriteBatchInternal::InsertInto( + write_group, current_sequence, column_family_memtables_.get(), + &flush_scheduler_, write_options.ignore_missing_column_families, + 0 /*log_number*/, this); + + if (status.ok()) { + // There were no write failures. Set leader's status + // in case the write callback returned a non-ok status. + status = w.FinalStatus(); + } + + } else { + WriteThread::ParallelGroup pg; + pg.leader = &w; + pg.last_writer = last_writer; + pg.last_sequence = last_sequence; + pg.early_exit_allowed = !need_log_sync; + pg.running.store(static_cast(write_group.size()), + std::memory_order_relaxed); + write_thread_.LaunchParallelFollowers(&pg, current_sequence); + + if (w.ShouldWriteToMemtable()) { + // do leader write + ColumnFamilyMemTablesImpl column_family_memtables( + versions_->GetColumnFamilySet()); + assert(w.sequence == current_sequence); + WriteBatchInternal::SetSequence(w.batch, w.sequence); + w.status = WriteBatchInternal::InsertInto( + &w, &column_family_memtables, &flush_scheduler_, + write_options.ignore_missing_column_families, 0 /*log_number*/, + this, true /*concurrent_memtable_writes*/); + } + + // CompleteParallelWorker returns true if this thread should + // handle exit, false means somebody else did + exit_completed_early = !write_thread_.CompleteParallelWorker(&w); + status = w.FinalStatus(); + } + + if (!exit_completed_early && w.status.ok()) { + SetTickerCount(stats_, SEQUENCE_NUMBER, last_sequence); + versions_->SetLastSequence(last_sequence); + if (!need_log_sync) { + write_thread_.ExitAsBatchGroupLeader(&w, last_writer, w.status); + exit_completed_early = true; + } + } + + // A non-OK status here indicates that the state implied by the + // WAL has diverged from the in-memory state. This could be + // because of a corrupt write_batch (very bad), or because the + // client specified an invalid column family and didn't specify + // ignore_missing_column_families. + // + // Is setting bg_error_ enough here? This will at least stop + // compaction and fail any further writes. + if (!status.ok() && bg_error_.ok() && !w.CallbackFailed()) { + bg_error_ = status; + } + } + } + PERF_TIMER_START(write_pre_and_post_process_time); + + if (db_options_.paranoid_checks && !status.ok() && !w.CallbackFailed() && + !status.IsBusy()) { + mutex_.Lock(); + if (bg_error_.ok()) { + bg_error_ = status; // stop compaction & fail any further writes + } + mutex_.Unlock(); + } + + if (need_log_sync) { + mutex_.Lock(); + MarkLogsSynced(logfile_number_, need_log_dir_sync, status); + mutex_.Unlock(); + } + + if (!exit_completed_early) { + write_thread_.ExitAsBatchGroupLeader(&w, last_writer, w.status); + } + + return status; +} + +// REQUIRES: mutex_ is held +// REQUIRES: this thread is currently at the front of the writer queue +Status DBImpl::DelayWrite(uint64_t num_bytes) { + uint64_t time_delayed = 0; + bool delayed = false; + { + StopWatch sw(env_, stats_, WRITE_STALL, &time_delayed); + auto delay = write_controller_.GetDelay(env_, num_bytes); + if (delay > 0) { + mutex_.Unlock(); + delayed = true; + TEST_SYNC_POINT("DBImpl::DelayWrite:Sleep"); + // hopefully we don't have to sleep more than 2 billion microseconds + env_->SleepForMicroseconds(static_cast(delay)); + mutex_.Lock(); + } + + while (bg_error_.ok() && write_controller_.IsStopped()) { + delayed = true; + TEST_SYNC_POINT("DBImpl::DelayWrite:Wait"); + bg_cv_.Wait(); + } + } + if (delayed) { + default_cf_internal_stats_->AddDBStats(InternalStats::WRITE_STALL_MICROS, + time_delayed); + RecordTick(stats_, STALL_MICROS, time_delayed); + } + + return bg_error_; +} + +Status DBImpl::ScheduleFlushes(WriteContext* context) { + ColumnFamilyData* cfd; + while ((cfd = flush_scheduler_.TakeNextColumnFamily()) != nullptr) { + auto status = SwitchMemtable(cfd, context); + if (cfd->Unref()) { + delete cfd; + } + if (!status.ok()) { + return status; + } + } + return Status::OK(); +} + +#ifndef ROCKSDB_LITE +void DBImpl::NotifyOnMemTableSealed(ColumnFamilyData* cfd, + const MemTableInfo& mem_table_info) { + if (db_options_.listeners.size() == 0U) { + return; + } + if (shutting_down_.load(std::memory_order_acquire)) { + return; + } + + for (auto listener : db_options_.listeners) { + listener->OnMemTableSealed(mem_table_info); + } +} +#endif // ROCKSDB_LITE + +// REQUIRES: mutex_ is held +// REQUIRES: this thread is currently at the front of the writer queue +Status DBImpl::SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context) { + mutex_.AssertHeld(); + unique_ptr lfile; + log::Writer* new_log = nullptr; + MemTable* new_mem = nullptr; + + // Attempt to switch to a new memtable and trigger flush of old. + // Do this without holding the dbmutex lock. + assert(versions_->prev_log_number() == 0); + bool creating_new_log = !log_empty_; + uint64_t recycle_log_number = 0; + if (creating_new_log && db_options_.recycle_log_file_num && + !log_recycle_files.empty()) { + recycle_log_number = log_recycle_files.front(); + log_recycle_files.pop_front(); + } + uint64_t new_log_number = + creating_new_log ? versions_->NewFileNumber() : logfile_number_; + SuperVersion* new_superversion = nullptr; + const MutableCFOptions mutable_cf_options = *cfd->GetLatestMutableCFOptions(); + + // Set current_memtble_info for memtable sealed callback +#ifndef ROCKSDB_LITE + MemTableInfo memtable_info; + memtable_info.cf_name = cfd->GetName(); + memtable_info.first_seqno = cfd->mem()->GetFirstSequenceNumber(); + memtable_info.earliest_seqno = cfd->mem()->GetEarliestSequenceNumber(); + memtable_info.num_entries = cfd->mem()->num_entries(); + memtable_info.num_deletes = cfd->mem()->num_deletes(); +#endif // ROCKSDB_LITE + // Log this later after lock release. It may be outdated, e.g., if background + // flush happens before logging, but that should be ok. + int num_imm_unflushed = cfd->imm()->NumNotFlushed(); + mutex_.Unlock(); + Status s; + { + if (creating_new_log) { + EnvOptions opt_env_opt = + env_->OptimizeForLogWrite(env_options_, db_options_); + if (recycle_log_number) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "reusing log %" PRIu64 " from recycle list\n", recycle_log_number); + s = env_->ReuseWritableFile( + LogFileName(db_options_.wal_dir, new_log_number), + LogFileName(db_options_.wal_dir, recycle_log_number), &lfile, + opt_env_opt); + } else { + s = NewWritableFile(env_, + LogFileName(db_options_.wal_dir, new_log_number), + &lfile, opt_env_opt); + } + if (s.ok()) { + // Our final size should be less than write_buffer_size + // (compression, etc) but err on the side of caution. + lfile->SetPreallocationBlockSize( + mutable_cf_options.write_buffer_size / 10 + + mutable_cf_options.write_buffer_size); + unique_ptr file_writer( + new WritableFileWriter(std::move(lfile), opt_env_opt)); + new_log = new log::Writer(std::move(file_writer), new_log_number, + db_options_.recycle_log_file_num > 0); + } + } + + if (s.ok()) { + SequenceNumber seq = versions_->LastSequence(); + new_mem = cfd->ConstructNewMemtable(mutable_cf_options, seq); + new_superversion = new SuperVersion(); + } + +#ifndef ROCKSDB_LITE + // PLEASE NOTE: We assume that there are no failable operations + // after lock is acquired below since we are already notifying + // client about mem table becoming immutable. + NotifyOnMemTableSealed(cfd, memtable_info); +#endif //ROCKSDB_LITE + } + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[%s] New memtable created with log file: #%" PRIu64 + ". Immutable memtables: %d.\n", + cfd->GetName().c_str(), new_log_number, num_imm_unflushed); + mutex_.Lock(); + if (!s.ok()) { + // how do we fail if we're not creating new log? + assert(creating_new_log); + assert(!new_mem); + assert(!new_log); + return s; + } + if (creating_new_log) { + logfile_number_ = new_log_number; + assert(new_log != nullptr); + log_empty_ = true; + log_dir_synced_ = false; + logs_.emplace_back(logfile_number_, new_log); + alive_log_files_.push_back(LogFileNumberSize(logfile_number_)); + for (auto loop_cfd : *versions_->GetColumnFamilySet()) { + // all this is just optimization to delete logs that + // are no longer needed -- if CF is empty, that means it + // doesn't need that particular log to stay alive, so we just + // advance the log number. no need to persist this in the manifest + if (loop_cfd->mem()->GetFirstSequenceNumber() == 0 && + loop_cfd->imm()->NumNotFlushed() == 0) { + loop_cfd->SetLogNumber(logfile_number_); + } + } + } + cfd->mem()->SetNextLogNumber(logfile_number_); + cfd->imm()->Add(cfd->mem(), &context->memtables_to_free_); + new_mem->Ref(); + cfd->SetMemtable(new_mem); + context->superversions_to_free_.push_back(InstallSuperVersionAndScheduleWork( + cfd, new_superversion, mutable_cf_options)); + return s; +} + +#ifndef ROCKSDB_LITE +Status DBImpl::GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, + TablePropertiesCollection* props) { + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + + // Increment the ref count + mutex_.Lock(); + auto version = cfd->current(); + version->Ref(); + mutex_.Unlock(); + + auto s = version->GetPropertiesOfAllTables(props); + + // Decrement the ref count + mutex_.Lock(); + version->Unref(); + mutex_.Unlock(); + + return s; +} + +Status DBImpl::GetPropertiesOfTablesInRange(ColumnFamilyHandle* column_family, + const Range* range, std::size_t n, + TablePropertiesCollection* props) { + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + + // Increment the ref count + mutex_.Lock(); + auto version = cfd->current(); + version->Ref(); + mutex_.Unlock(); + + auto s = version->GetPropertiesOfTablesInRange(range, n, props); + + // Decrement the ref count + mutex_.Lock(); + version->Unref(); + mutex_.Unlock(); + + return s; +} + +#endif // ROCKSDB_LITE + +const std::string& DBImpl::GetName() const { + return dbname_; +} + +Env* DBImpl::GetEnv() const { + return env_; +} + +const Options& DBImpl::GetOptions(ColumnFamilyHandle* column_family) const { + auto cfh = reinterpret_cast(column_family); + return *cfh->cfd()->options(); +} + +const DBOptions& DBImpl::GetDBOptions() const { return db_options_; } + +bool DBImpl::GetProperty(ColumnFamilyHandle* column_family, + const Slice& property, std::string* value) { + const DBPropertyInfo* property_info = GetPropertyInfo(property); + value->clear(); + auto cfd = reinterpret_cast(column_family)->cfd(); + if (property_info == nullptr) { + return false; + } else if (property_info->handle_int) { + uint64_t int_value; + bool ret_value = + GetIntPropertyInternal(cfd, *property_info, false, &int_value); + if (ret_value) { + *value = ToString(int_value); + } + return ret_value; + } else if (property_info->handle_string) { + InstrumentedMutexLock l(&mutex_); + return cfd->internal_stats()->GetStringProperty(*property_info, property, + value); + } + // Shouldn't reach here since exactly one of handle_string and handle_int + // should be non-nullptr. + assert(false); + return false; +} + +bool DBImpl::GetIntProperty(ColumnFamilyHandle* column_family, + const Slice& property, uint64_t* value) { + const DBPropertyInfo* property_info = GetPropertyInfo(property); + if (property_info == nullptr || property_info->handle_int == nullptr) { + return false; + } + auto cfd = reinterpret_cast(column_family)->cfd(); + return GetIntPropertyInternal(cfd, *property_info, false, value); +} + +bool DBImpl::GetIntPropertyInternal(ColumnFamilyData* cfd, + const DBPropertyInfo& property_info, + bool is_locked, uint64_t* value) { + assert(property_info.handle_int != nullptr); + if (!property_info.need_out_of_mutex) { + if (is_locked) { + mutex_.AssertHeld(); + return cfd->internal_stats()->GetIntProperty(property_info, value, this); + } else { + InstrumentedMutexLock l(&mutex_); + return cfd->internal_stats()->GetIntProperty(property_info, value, this); + } + } else { + SuperVersion* sv = nullptr; + if (!is_locked) { + sv = GetAndRefSuperVersion(cfd); + } else { + sv = cfd->GetSuperVersion(); + } + + bool ret = cfd->internal_stats()->GetIntPropertyOutOfMutex( + property_info, sv->current, value); + + if (!is_locked) { + ReturnAndCleanupSuperVersion(cfd, sv); + } + + return ret; + } +} + +bool DBImpl::GetAggregatedIntProperty(const Slice& property, + uint64_t* aggregated_value) { + const DBPropertyInfo* property_info = GetPropertyInfo(property); + if (property_info == nullptr || property_info->handle_int == nullptr) { + return false; + } + + uint64_t sum = 0; + { + // Needs mutex to protect the list of column families. + InstrumentedMutexLock l(&mutex_); + uint64_t value; + for (auto* cfd : *versions_->GetColumnFamilySet()) { + if (GetIntPropertyInternal(cfd, *property_info, true, &value)) { + sum += value; + } else { + return false; + } + } + } + *aggregated_value = sum; + return true; +} + +SuperVersion* DBImpl::GetAndRefSuperVersion(ColumnFamilyData* cfd) { + // TODO(ljin): consider using GetReferencedSuperVersion() directly + return cfd->GetThreadLocalSuperVersion(&mutex_); +} + +// REQUIRED: this function should only be called on the write thread or if the +// mutex is held. +SuperVersion* DBImpl::GetAndRefSuperVersion(uint32_t column_family_id) { + auto column_family_set = versions_->GetColumnFamilySet(); + auto cfd = column_family_set->GetColumnFamily(column_family_id); + if (!cfd) { + return nullptr; + } + + return GetAndRefSuperVersion(cfd); +} + +// REQUIRED: mutex is NOT held +SuperVersion* DBImpl::GetAndRefSuperVersionUnlocked(uint32_t column_family_id) { + ColumnFamilyData* cfd; + { + InstrumentedMutexLock l(&mutex_); + auto column_family_set = versions_->GetColumnFamilySet(); + cfd = column_family_set->GetColumnFamily(column_family_id); + } + + if (!cfd) { + return nullptr; + } + + return GetAndRefSuperVersion(cfd); +} + +void DBImpl::ReturnAndCleanupSuperVersion(ColumnFamilyData* cfd, + SuperVersion* sv) { + bool unref_sv = !cfd->ReturnThreadLocalSuperVersion(sv); + + if (unref_sv) { + // Release SuperVersion + if (sv->Unref()) { + { + InstrumentedMutexLock l(&mutex_); + sv->Cleanup(); + } + delete sv; + RecordTick(stats_, NUMBER_SUPERVERSION_CLEANUPS); + } + RecordTick(stats_, NUMBER_SUPERVERSION_RELEASES); + } +} + +// REQUIRED: this function should only be called on the write thread. +void DBImpl::ReturnAndCleanupSuperVersion(uint32_t column_family_id, + SuperVersion* sv) { + auto column_family_set = versions_->GetColumnFamilySet(); + auto cfd = column_family_set->GetColumnFamily(column_family_id); + + // If SuperVersion is held, and we successfully fetched a cfd using + // GetAndRefSuperVersion(), it must still exist. + assert(cfd != nullptr); + ReturnAndCleanupSuperVersion(cfd, sv); +} + +// REQUIRED: Mutex should NOT be held. +void DBImpl::ReturnAndCleanupSuperVersionUnlocked(uint32_t column_family_id, + SuperVersion* sv) { + ColumnFamilyData* cfd; + { + InstrumentedMutexLock l(&mutex_); + auto column_family_set = versions_->GetColumnFamilySet(); + cfd = column_family_set->GetColumnFamily(column_family_id); + } + + // If SuperVersion is held, and we successfully fetched a cfd using + // GetAndRefSuperVersion(), it must still exist. + assert(cfd != nullptr); + ReturnAndCleanupSuperVersion(cfd, sv); +} + +// REQUIRED: this function should only be called on the write thread or if the +// mutex is held. +ColumnFamilyHandle* DBImpl::GetColumnFamilyHandle(uint32_t column_family_id) { + ColumnFamilyMemTables* cf_memtables = column_family_memtables_.get(); + + if (!cf_memtables->Seek(column_family_id)) { + return nullptr; + } + + return cf_memtables->GetColumnFamilyHandle(); +} + +// REQUIRED: mutex is NOT held. +ColumnFamilyHandle* DBImpl::GetColumnFamilyHandleUnlocked( + uint32_t column_family_id) { + ColumnFamilyMemTables* cf_memtables = column_family_memtables_.get(); + + InstrumentedMutexLock l(&mutex_); + + if (!cf_memtables->Seek(column_family_id)) { + return nullptr; + } + + return cf_memtables->GetColumnFamilyHandle(); +} + +void DBImpl::GetApproximateSizes(ColumnFamilyHandle* column_family, + const Range* range, int n, uint64_t* sizes, + bool include_memtable) { + Version* v; + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + SuperVersion* sv = GetAndRefSuperVersion(cfd); + v = sv->current; + + for (int i = 0; i < n; i++) { + // Convert user_key into a corresponding internal key. + InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek); + InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek); + sizes[i] = versions_->ApproximateSize(v, k1.Encode(), k2.Encode()); + if (include_memtable) { + sizes[i] += sv->mem->ApproximateSize(k1.Encode(), k2.Encode()); + sizes[i] += sv->imm->ApproximateSize(k1.Encode(), k2.Encode()); + } + } + + ReturnAndCleanupSuperVersion(cfd, sv); +} + +std::list::iterator +DBImpl::CaptureCurrentFileNumberInPendingOutputs() { + // We need to remember the iterator of our insert, because after the + // background job is done, we need to remove that element from + // pending_outputs_. + pending_outputs_.push_back(versions_->current_next_file_number()); + auto pending_outputs_inserted_elem = pending_outputs_.end(); + --pending_outputs_inserted_elem; + return pending_outputs_inserted_elem; +} + +void DBImpl::ReleaseFileNumberFromPendingOutputs( + std::list::iterator v) { + pending_outputs_.erase(v); +} + +#ifndef ROCKSDB_LITE +Status DBImpl::GetUpdatesSince( + SequenceNumber seq, unique_ptr* iter, + const TransactionLogIterator::ReadOptions& read_options) { + + RecordTick(stats_, GET_UPDATES_SINCE_CALLS); + if (seq > versions_->LastSequence()) { + return Status::NotFound("Requested sequence not yet written in the db"); + } + return wal_manager_.GetUpdatesSince(seq, iter, read_options, versions_.get()); +} + +Status DBImpl::DeleteFile(std::string name) { + uint64_t number; + FileType type; + WalFileType log_type; + if (!ParseFileName(name, &number, &type, &log_type) || + (type != kTableFile && type != kLogFile)) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "DeleteFile %s failed.\n", name.c_str()); + return Status::InvalidArgument("Invalid file name"); + } + + Status status; + if (type == kLogFile) { + // Only allow deleting archived log files + if (log_type != kArchivedLogFile) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "DeleteFile %s failed - not archived log.\n", + name.c_str()); + return Status::NotSupported("Delete only supported for archived logs"); + } + status = env_->DeleteFile(db_options_.wal_dir + "/" + name.c_str()); + if (!status.ok()) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "DeleteFile %s failed -- %s.\n", + name.c_str(), status.ToString().c_str()); + } + return status; + } + + int level; + FileMetaData* metadata; + ColumnFamilyData* cfd; + VersionEdit edit; + JobContext job_context(next_job_id_.fetch_add(1), true); + { + InstrumentedMutexLock l(&mutex_); + status = versions_->GetMetadataForFile(number, &level, &metadata, &cfd); + if (!status.ok()) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "DeleteFile %s failed. File not found\n", name.c_str()); + job_context.Clean(); + return Status::InvalidArgument("File not found"); + } + assert(level < cfd->NumberLevels()); + + // If the file is being compacted no need to delete. + if (metadata->being_compacted) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "DeleteFile %s Skipped. File about to be compacted\n", name.c_str()); + job_context.Clean(); + return Status::OK(); + } + + // Only the files in the last level can be deleted externally. + // This is to make sure that any deletion tombstones are not + // lost. Check that the level passed is the last level. + auto* vstoreage = cfd->current()->storage_info(); + for (int i = level + 1; i < cfd->NumberLevels(); i++) { + if (vstoreage->NumLevelFiles(i) != 0) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "DeleteFile %s FAILED. File not in last level\n", name.c_str()); + job_context.Clean(); + return Status::InvalidArgument("File not in last level"); + } + } + // if level == 0, it has to be the oldest file + if (level == 0 && + vstoreage->LevelFiles(0).back()->fd.GetNumber() != number) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "DeleteFile %s failed ---" + " target file in level 0 must be the oldest.", name.c_str()); + job_context.Clean(); + return Status::InvalidArgument("File in level 0, but not oldest"); + } + edit.SetColumnFamily(cfd->GetID()); + edit.DeleteFile(level, number); + status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(), + &edit, &mutex_, directories_.GetDbDir()); + if (status.ok()) { + InstallSuperVersionAndScheduleWorkWrapper( + cfd, &job_context, *cfd->GetLatestMutableCFOptions()); + } + FindObsoleteFiles(&job_context, false); + } // lock released here + + LogFlush(db_options_.info_log); + // remove files outside the db-lock + if (job_context.HaveSomethingToDelete()) { + // Call PurgeObsoleteFiles() without holding mutex. + PurgeObsoleteFiles(job_context); + } + job_context.Clean(); + return status; +} + +Status DBImpl::DeleteFilesInRange(ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) { + Status status; + auto cfh = reinterpret_cast(column_family); + ColumnFamilyData* cfd = cfh->cfd(); + VersionEdit edit; + std::vector deleted_files; + JobContext job_context(next_job_id_.fetch_add(1), true); + { + InstrumentedMutexLock l(&mutex_); + Version* input_version = cfd->current(); + + auto* vstorage = input_version->storage_info(); + for (int i = 1; i < cfd->NumberLevels(); i++) { + if (vstorage->LevelFiles(i).empty() || + !vstorage->OverlapInLevel(i, begin, end)) { + continue; + } + std::vector level_files; + InternalKey begin_storage, end_storage, *begin_key, *end_key; + if (begin == nullptr) { + begin_key = nullptr; + } else { + begin_storage.SetMaxPossibleForUserKey(*begin); + begin_key = &begin_storage; + } + if (end == nullptr) { + end_key = nullptr; + } else { + end_storage.SetMinPossibleForUserKey(*end); + end_key = &end_storage; + } + + vstorage->GetOverlappingInputs(i, begin_key, end_key, &level_files, -1, + nullptr, false); + FileMetaData* level_file; + for (uint32_t j = 0; j < level_files.size(); j++) { + level_file = level_files[j]; + if (((begin == nullptr) || + (cfd->internal_comparator().user_comparator()->Compare( + level_file->smallest.user_key(), *begin) >= 0)) && + ((end == nullptr) || + (cfd->internal_comparator().user_comparator()->Compare( + level_file->largest.user_key(), *end) <= 0))) { + if (level_file->being_compacted) { + continue; + } + edit.SetColumnFamily(cfd->GetID()); + edit.DeleteFile(i, level_file->fd.GetNumber()); + deleted_files.push_back(level_file); + level_file->being_compacted = true; + } + } + } + if (edit.GetDeletedFiles().empty()) { + job_context.Clean(); + return Status::OK(); + } + input_version->Ref(); + status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(), + &edit, &mutex_, directories_.GetDbDir()); + if (status.ok()) { + InstallSuperVersionAndScheduleWorkWrapper( + cfd, &job_context, *cfd->GetLatestMutableCFOptions()); + } + for (auto* deleted_file : deleted_files) { + deleted_file->being_compacted = false; + } + input_version->Unref(); + FindObsoleteFiles(&job_context, false); + } // lock released here + + LogFlush(db_options_.info_log); + // remove files outside the db-lock + if (job_context.HaveSomethingToDelete()) { + // Call PurgeObsoleteFiles() without holding mutex. + PurgeObsoleteFiles(job_context); + } + job_context.Clean(); + return status; +} + +void DBImpl::GetLiveFilesMetaData(std::vector* metadata) { + InstrumentedMutexLock l(&mutex_); + versions_->GetLiveFilesMetaData(metadata); +} + +void DBImpl::GetColumnFamilyMetaData( + ColumnFamilyHandle* column_family, + ColumnFamilyMetaData* cf_meta) { + assert(column_family); + auto* cfd = reinterpret_cast(column_family)->cfd(); + auto* sv = GetAndRefSuperVersion(cfd); + sv->current->GetColumnFamilyMetaData(cf_meta); + ReturnAndCleanupSuperVersion(cfd, sv); +} + +#endif // ROCKSDB_LITE + +Status DBImpl::CheckConsistency() { + mutex_.AssertHeld(); + std::vector metadata; + versions_->GetLiveFilesMetaData(&metadata); + + std::string corruption_messages; + for (const auto& md : metadata) { + // md.name has a leading "/". + std::string file_path = md.db_path + md.name; + + uint64_t fsize = 0; + Status s = env_->GetFileSize(file_path, &fsize); + if (!s.ok() && + env_->GetFileSize(Rocks2LevelTableFileName(file_path), &fsize).ok()) { + s = Status::OK(); + } + if (!s.ok()) { + corruption_messages += + "Can't access " + md.name + ": " + s.ToString() + "\n"; + } else if (fsize != md.size) { + corruption_messages += "Sst file size mismatch: " + file_path + + ". Size recorded in manifest " + + ToString(md.size) + ", actual size " + + ToString(fsize) + "\n"; + } + } + if (corruption_messages.size() == 0) { + return Status::OK(); + } else { + return Status::Corruption(corruption_messages); + } +} + +Status DBImpl::GetDbIdentity(std::string& identity) const { + std::string idfilename = IdentityFileName(dbname_); + const EnvOptions soptions; + unique_ptr id_file_reader; + Status s; + { + unique_ptr idfile; + s = env_->NewSequentialFile(idfilename, &idfile, soptions); + if (!s.ok()) { + return s; + } + id_file_reader.reset(new SequentialFileReader(std::move(idfile))); + } + + uint64_t file_size; + s = env_->GetFileSize(idfilename, &file_size); + if (!s.ok()) { + return s; + } + char* buffer = reinterpret_cast(alloca(file_size)); + Slice id; + s = id_file_reader->Read(static_cast(file_size), &id, buffer); + if (!s.ok()) { + return s; + } + identity.assign(id.ToString()); + // If last character is '\n' remove it from identity + if (identity.size() > 0 && identity.back() == '\n') { + identity.pop_back(); + } + return s; +} + +// Default implementations of convenience methods that subclasses of DB +// can call if they wish +Status DB::Put(const WriteOptions& opt, ColumnFamilyHandle* column_family, + const Slice& key, const Slice& value) { + // Pre-allocate size of write batch conservatively. + // 8 bytes are taken by header, 4 bytes for count, 1 byte for type, + // and we allocate 11 extra bytes for key length, as well as value length. + WriteBatch batch(key.size() + value.size() + 24); + batch.Put(column_family, key, value); + return Write(opt, &batch); +} + +Status DB::Delete(const WriteOptions& opt, ColumnFamilyHandle* column_family, + const Slice& key) { + WriteBatch batch; + batch.Delete(column_family, key); + return Write(opt, &batch); +} + +Status DB::SingleDelete(const WriteOptions& opt, + ColumnFamilyHandle* column_family, const Slice& key) { + WriteBatch batch; + batch.SingleDelete(column_family, key); + return Write(opt, &batch); +} + +Status DB::Merge(const WriteOptions& opt, ColumnFamilyHandle* column_family, + const Slice& key, const Slice& value) { + WriteBatch batch; + batch.Merge(column_family, key, value); + return Write(opt, &batch); +} + +// Default implementation -- returns not supported status +Status DB::CreateColumnFamily(const ColumnFamilyOptions& cf_options, + const std::string& column_family_name, + ColumnFamilyHandle** handle) { + return Status::NotSupported(""); +} +Status DB::DropColumnFamily(ColumnFamilyHandle* column_family) { + return Status::NotSupported(""); +} +Status DB::DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family) { + delete column_family; + return Status::OK(); +} + +DB::~DB() { } + +Status DB::Open(const Options& options, const std::string& dbname, DB** dbptr) { + DBOptions db_options(options); + ColumnFamilyOptions cf_options(options); + std::vector column_families; + column_families.push_back( + ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options)); + std::vector handles; + Status s = DB::Open(db_options, dbname, column_families, &handles, dbptr); + if (s.ok()) { + assert(handles.size() == 1); + // i can delete the handle since DBImpl is always holding a reference to + // default column family + delete handles[0]; + } + return s; +} + +Status DB::Open(const DBOptions& db_options, const std::string& dbname, + const std::vector& column_families, + std::vector* handles, DB** dbptr) { + Status s = SanitizeOptionsByTable(db_options, column_families); + if (!s.ok()) { + return s; + } + + s = ValidateOptions(db_options, column_families); + if (!s.ok()) { + return s; + } + + *dbptr = nullptr; + handles->clear(); + + size_t max_write_buffer_size = 0; + for (auto cf : column_families) { + max_write_buffer_size = + std::max(max_write_buffer_size, cf.options.write_buffer_size); + } + + DBImpl* impl = new DBImpl(db_options, dbname); + s = impl->env_->CreateDirIfMissing(impl->db_options_.wal_dir); + if (s.ok()) { + for (auto db_path : impl->db_options_.db_paths) { + s = impl->env_->CreateDirIfMissing(db_path.path); + if (!s.ok()) { + break; + } + } + } + + if (!s.ok()) { + delete impl; + return s; + } + + s = impl->CreateArchivalDirectory(); + if (!s.ok()) { + delete impl; + return s; + } + impl->mutex_.Lock(); + // Handles create_if_missing, error_if_exists + s = impl->Recover(column_families); + if (s.ok()) { + uint64_t new_log_number = impl->versions_->NewFileNumber(); + unique_ptr lfile; + EnvOptions soptions(db_options); + EnvOptions opt_env_options = + impl->db_options_.env->OptimizeForLogWrite(soptions, impl->db_options_); + s = NewWritableFile(impl->db_options_.env, + LogFileName(impl->db_options_.wal_dir, new_log_number), + &lfile, opt_env_options); + if (s.ok()) { + lfile->SetPreallocationBlockSize((max_write_buffer_size / 10) + max_write_buffer_size); + impl->logfile_number_ = new_log_number; + unique_ptr file_writer( + new WritableFileWriter(std::move(lfile), opt_env_options)); + impl->logs_.emplace_back( + new_log_number, + new log::Writer(std::move(file_writer), new_log_number, + impl->db_options_.recycle_log_file_num > 0)); + + // set column family handles + for (auto cf : column_families) { + auto cfd = + impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name); + if (cfd != nullptr) { + handles->push_back( + new ColumnFamilyHandleImpl(cfd, impl, &impl->mutex_)); + impl->NewThreadStatusCfInfo(cfd); + } else { + if (db_options.create_missing_column_families) { + // missing column family, create it + ColumnFamilyHandle* handle; + impl->mutex_.Unlock(); + s = impl->CreateColumnFamily(cf.options, cf.name, &handle); + impl->mutex_.Lock(); + if (s.ok()) { + handles->push_back(handle); + } else { + break; + } + } else { + s = Status::InvalidArgument("Column family not found: ", cf.name); + break; + } + } + } + } + if (s.ok()) { + for (auto cfd : *impl->versions_->GetColumnFamilySet()) { + delete impl->InstallSuperVersionAndScheduleWork( + cfd, nullptr, *cfd->GetLatestMutableCFOptions()); + } + impl->alive_log_files_.push_back( + DBImpl::LogFileNumberSize(impl->logfile_number_)); + impl->DeleteObsoleteFiles(); + s = impl->directories_.GetDbDir()->Fsync(); + } + } + + if (s.ok()) { + for (auto cfd : *impl->versions_->GetColumnFamilySet()) { + if (cfd->ioptions()->compaction_style == kCompactionStyleFIFO) { + auto* vstorage = cfd->current()->storage_info(); + for (int i = 1; i < vstorage->num_levels(); ++i) { + int num_files = vstorage->NumLevelFiles(i); + if (num_files > 0) { + s = Status::InvalidArgument( + "Not all files are at level 0. Cannot " + "open with FIFO compaction style."); + break; + } + } + } + if (!cfd->mem()->IsSnapshotSupported()) { + impl->is_snapshot_supported_ = false; + } + if (cfd->ioptions()->merge_operator != nullptr && + !cfd->mem()->IsMergeOperatorSupported()) { + s = Status::InvalidArgument( + "The memtable of column family %s does not support merge operator " + "its options.merge_operator is non-null", cfd->GetName().c_str()); + } + if (!s.ok()) { + break; + } + } + } + TEST_SYNC_POINT("DBImpl::Open:Opened"); + Status persist_options_status; + if (s.ok()) { + // Persist RocksDB Options before scheduling the compaction. + // The WriteOptionsFile() will release and lock the mutex internally. + persist_options_status = impl->WriteOptionsFile(); + + *dbptr = impl; + impl->opened_successfully_ = true; + impl->MaybeScheduleFlushOrCompaction(); + } + impl->mutex_.Unlock(); + + auto sfm = static_cast( + impl->db_options_.sst_file_manager.get()); + if (s.ok() && sfm) { + // Notify SstFileManager about all sst files that already exist in + // db_paths[0] when the DB is opened. + auto& db_path = impl->db_options_.db_paths[0]; + std::vector existing_files; + impl->db_options_.env->GetChildren(db_path.path, &existing_files); + for (auto& file_name : existing_files) { + uint64_t file_number; + FileType file_type; + std::string file_path = db_path.path + "/" + file_name; + if (ParseFileName(file_name, &file_number, &file_type) && + file_type == kTableFile) { + sfm->OnAddFile(file_path); + } + } + } + + if (s.ok()) { + Log(InfoLogLevel::INFO_LEVEL, impl->db_options_.info_log, "DB pointer %p", + impl); + LogFlush(impl->db_options_.info_log); + if (!persist_options_status.ok()) { + if (db_options.fail_if_options_file_error) { + s = Status::IOError( + "DB::Open() failed --- Unable to persist Options file", + persist_options_status.ToString()); + } + Warn(impl->db_options_.info_log, + "Unable to persist options in DB::Open() -- %s", + persist_options_status.ToString().c_str()); + } + } + if (!s.ok()) { + for (auto* h : *handles) { + delete h; + } + handles->clear(); + delete impl; + *dbptr = nullptr; + } + return s; +} + +Status DB::ListColumnFamilies(const DBOptions& db_options, + const std::string& name, + std::vector* column_families) { + return VersionSet::ListColumnFamilies(column_families, name, db_options.env); +} + +Snapshot::~Snapshot() { +} + +Status DestroyDB(const std::string& dbname, const Options& options) { + const InternalKeyComparator comparator(options.comparator); + const Options& soptions(SanitizeOptions(dbname, &comparator, options)); + Env* env = soptions.env; + std::vector filenames; + + // Ignore error in case directory does not exist + env->GetChildren(dbname, &filenames); + + FileLock* lock; + const std::string lockname = LockFileName(dbname); + Status result = env->LockFile(lockname, &lock); + if (result.ok()) { + uint64_t number; + FileType type; + InfoLogPrefix info_log_prefix(!options.db_log_dir.empty(), dbname); + for (size_t i = 0; i < filenames.size(); i++) { + if (ParseFileName(filenames[i], &number, info_log_prefix.prefix, &type) && + type != kDBLockFile) { // Lock file will be deleted at end + Status del; + std::string path_to_delete = dbname + "/" + filenames[i]; + if (type == kMetaDatabase) { + del = DestroyDB(path_to_delete, options); + } else if (type == kTableFile) { + del = DeleteSSTFile(&options, path_to_delete, 0); + } else { + del = env->DeleteFile(path_to_delete); + } + if (result.ok() && !del.ok()) { + result = del; + } + } + } + + for (size_t path_id = 0; path_id < options.db_paths.size(); path_id++) { + const auto& db_path = options.db_paths[path_id]; + env->GetChildren(db_path.path, &filenames); + for (size_t i = 0; i < filenames.size(); i++) { + if (ParseFileName(filenames[i], &number, &type) && + type == kTableFile) { // Lock file will be deleted at end + std::string table_path = db_path.path + "/" + filenames[i]; + Status del = DeleteSSTFile(&options, table_path, + static_cast(path_id)); + if (result.ok() && !del.ok()) { + result = del; + } + } + } + } + + std::vector walDirFiles; + std::string archivedir = ArchivalDirectory(dbname); + if (dbname != soptions.wal_dir) { + env->GetChildren(soptions.wal_dir, &walDirFiles); + archivedir = ArchivalDirectory(soptions.wal_dir); + } + + // Delete log files in the WAL dir + for (const auto& file : walDirFiles) { + if (ParseFileName(file, &number, &type) && type == kLogFile) { + Status del = env->DeleteFile(soptions.wal_dir + "/" + file); + if (result.ok() && !del.ok()) { + result = del; + } + } + } + + std::vector archiveFiles; + env->GetChildren(archivedir, &archiveFiles); + // Delete archival files. + for (size_t i = 0; i < archiveFiles.size(); ++i) { + if (ParseFileName(archiveFiles[i], &number, &type) && + type == kLogFile) { + Status del = env->DeleteFile(archivedir + "/" + archiveFiles[i]); + if (result.ok() && !del.ok()) { + result = del; + } + } + } + + // ignore case where no archival directory is present. + env->DeleteDir(archivedir); + + env->UnlockFile(lock); // Ignore error since state is already gone + env->DeleteFile(lockname); + env->DeleteDir(dbname); // Ignore error in case dir contains other files + env->DeleteDir(soptions.wal_dir); + } + return result; +} + +Status DBImpl::WriteOptionsFile() { +#ifndef ROCKSDB_LITE + mutex_.AssertHeld(); + + std::vector cf_names; + std::vector cf_opts; + + // This part requires mutex to protect the column family options + for (auto cfd : *versions_->GetColumnFamilySet()) { + if (cfd->IsDropped()) { + continue; + } + cf_names.push_back(cfd->GetName()); + cf_opts.push_back(BuildColumnFamilyOptions( + *cfd->options(), *cfd->GetLatestMutableCFOptions())); + } + + // Unlock during expensive operations. New writes cannot get here + // because the single write thread ensures all new writes get queued. + mutex_.Unlock(); + + std::string file_name = + TempOptionsFileName(GetName(), versions_->NewFileNumber()); + Status s = PersistRocksDBOptions(GetDBOptions(), cf_names, cf_opts, file_name, + GetEnv()); + + if (s.ok()) { + s = RenameTempFileToOptionsFile(file_name); + } + mutex_.Lock(); + return s; +#else + return Status::OK(); +#endif // !ROCKSDB_LITE +} + +#ifndef ROCKSDB_LITE +namespace { +void DeleteOptionsFilesHelper(const std::map& filenames, + const size_t num_files_to_keep, + const std::shared_ptr& info_log, + Env* env) { + if (filenames.size() <= num_files_to_keep) { + return; + } + for (auto iter = std::next(filenames.begin(), num_files_to_keep); + iter != filenames.end(); ++iter) { + if (!env->DeleteFile(iter->second).ok()) { + Warn(info_log, "Unable to delete options file %s", iter->second.c_str()); + } + } +} +} // namespace +#endif // !ROCKSDB_LITE + +Status DBImpl::DeleteObsoleteOptionsFiles() { +#ifndef ROCKSDB_LITE + std::vector filenames; + // use ordered map to store keep the filenames sorted from the newest + // to the oldest. + std::map options_filenames; + Status s; + s = GetEnv()->GetChildren(GetName(), &filenames); + if (!s.ok()) { + return s; + } + for (auto& filename : filenames) { + uint64_t file_number; + FileType type; + if (ParseFileName(filename, &file_number, &type) && type == kOptionsFile) { + options_filenames.insert( + {std::numeric_limits::max() - file_number, + GetName() + "/" + filename}); + } + } + + // Keeps the latest 2 Options file + const size_t kNumOptionsFilesKept = 2; + DeleteOptionsFilesHelper(options_filenames, kNumOptionsFilesKept, + db_options_.info_log, GetEnv()); + return Status::OK(); +#else + return Status::OK(); +#endif // !ROCKSDB_LITE +} + +Status DBImpl::RenameTempFileToOptionsFile(const std::string& file_name) { +#ifndef ROCKSDB_LITE + Status s; + + versions_->options_file_number_ = versions_->NewFileNumber(); + std::string options_file_name = + OptionsFileName(GetName(), versions_->options_file_number_); + // Retry if the file name happen to conflict with an existing one. + s = GetEnv()->RenameFile(file_name, options_file_name); + + DeleteObsoleteOptionsFiles(); + return s; +#else + return Status::OK(); +#endif // !ROCKSDB_LITE +} + +#if ROCKSDB_USING_THREAD_STATUS + +void DBImpl::NewThreadStatusCfInfo( + ColumnFamilyData* cfd) const { + if (db_options_.enable_thread_tracking) { + ThreadStatusUtil::NewColumnFamilyInfo(this, cfd, cfd->GetName(), + cfd->ioptions()->env); + } +} + +void DBImpl::EraseThreadStatusCfInfo( + ColumnFamilyData* cfd) const { + if (db_options_.enable_thread_tracking) { + ThreadStatusUtil::EraseColumnFamilyInfo(cfd); + } +} + +void DBImpl::EraseThreadStatusDbInfo() const { + if (db_options_.enable_thread_tracking) { + ThreadStatusUtil::EraseDatabaseInfo(this); + } +} + +#else +void DBImpl::NewThreadStatusCfInfo( + ColumnFamilyData* cfd) const { +} + +void DBImpl::EraseThreadStatusCfInfo( + ColumnFamilyData* cfd) const { +} + +void DBImpl::EraseThreadStatusDbInfo() const { +} +#endif // ROCKSDB_USING_THREAD_STATUS + +// +// A global method that can dump out the build version +void DumpRocksDBBuildVersion(Logger * log) { +#if !defined(IOS_CROSS_COMPILE) + // if we compile with Xcode, we don't run build_detect_vesion, so we don't + // generate util/build_version.cc + Header(log, "RocksDB version: %d.%d.%d\n", ROCKSDB_MAJOR, ROCKSDB_MINOR, + ROCKSDB_PATCH); + Header(log, "Git sha %s", rocksdb_build_git_sha); + Header(log, "Compile date %s", rocksdb_build_compile_date); +#endif +} + +#ifndef ROCKSDB_LITE +SequenceNumber DBImpl::GetEarliestMemTableSequenceNumber(SuperVersion* sv, + bool include_history) { + // Find the earliest sequence number that we know we can rely on reading + // from the memtable without needing to check sst files. + SequenceNumber earliest_seq = + sv->imm->GetEarliestSequenceNumber(include_history); + if (earliest_seq == kMaxSequenceNumber) { + earliest_seq = sv->mem->GetEarliestSequenceNumber(); + } + assert(sv->mem->GetEarliestSequenceNumber() >= earliest_seq); + + return earliest_seq; +} +#endif // ROCKSDB_LITE + +#ifndef ROCKSDB_LITE +Status DBImpl::GetLatestSequenceForKey(SuperVersion* sv, const Slice& key, + bool cache_only, SequenceNumber* seq, + bool* found_record_for_key) { + Status s; + MergeContext merge_context; + + SequenceNumber current_seq = versions_->LastSequence(); + LookupKey lkey(key, current_seq); + + *seq = kMaxSequenceNumber; + *found_record_for_key = false; + + // Check if there is a record for this key in the latest memtable + sv->mem->Get(lkey, nullptr, &s, &merge_context, seq); + + if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { + // unexpected error reading memtable. + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Unexpected status returned from MemTable::Get: %s\n", + s.ToString().c_str()); + + return s; + } + + if (*seq != kMaxSequenceNumber) { + // Found a sequence number, no need to check immutable memtables + *found_record_for_key = true; + return Status::OK(); + } + + // Check if there is a record for this key in the immutable memtables + sv->imm->Get(lkey, nullptr, &s, &merge_context, seq); + + if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { + // unexpected error reading memtable. + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Unexpected status returned from MemTableList::Get: %s\n", + s.ToString().c_str()); + + return s; + } + + if (*seq != kMaxSequenceNumber) { + // Found a sequence number, no need to check memtable history + *found_record_for_key = true; + return Status::OK(); + } + + // Check if there is a record for this key in the immutable memtables + sv->imm->GetFromHistory(lkey, nullptr, &s, &merge_context, seq); + + if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { + // unexpected error reading memtable. + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Unexpected status returned from MemTableList::GetFromHistory: %s\n", + s.ToString().c_str()); + + return s; + } + + if (*seq != kMaxSequenceNumber) { + // Found a sequence number, no need to check SST files + *found_record_for_key = true; + return Status::OK(); + } + + // TODO(agiardullo): possible optimization: consider checking cached + // SST files if cache_only=true? + if (!cache_only) { + // Check tables + ReadOptions read_options; + + sv->current->Get(read_options, lkey, nullptr, &s, &merge_context, + nullptr /* value_found */, found_record_for_key, seq); + + if (!(s.ok() || s.IsNotFound() || s.IsMergeInProgress())) { + // unexpected error reading SST files + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Unexpected status returned from Version::Get: %s\n", + s.ToString().c_str()); + + return s; + } + } + + return Status::OK(); +} +#endif // ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/db/db_impl.h b/external/rocksdb/db/db_impl.h new file mode 100755 index 0000000000..3d0eab55cb --- /dev/null +++ b/external/rocksdb/db/db_impl.h @@ -0,0 +1,1086 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db/column_family.h" +#include "db/compaction_job.h" +#include "db/dbformat.h" +#include "db/flush_job.h" +#include "db/flush_scheduler.h" +#include "db/internal_stats.h" +#include "db/log_writer.h" +#include "db/snapshot_impl.h" +#include "db/version_edit.h" +#include "db/wal_manager.h" +#include "db/write_controller.h" +#include "db/write_thread.h" +#include "memtable_list.h" +#include "port/port.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/transaction_log.h" +#include "rocksdb/write_buffer_manager.h" +#include "table/scoped_arena_iterator.h" +#include "util/autovector.h" +#include "util/event_logger.h" +#include "util/hash.h" +#include "util/instrumented_mutex.h" +#include "util/stop_watch.h" +#include "util/thread_local.h" + +namespace rocksdb { + +class MemTable; +class TableCache; +class Version; +class VersionEdit; +class VersionSet; +class Arena; +class WriteCallback; +struct JobContext; +struct ExternalSstFileInfo; +struct MemTableInfo; + +class DBImpl : public DB { + public: + DBImpl(const DBOptions& options, const std::string& dbname); + virtual ~DBImpl(); + + // Implementations of the DB interface + using DB::Put; + virtual Status Put(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override; + using DB::Merge; + virtual Status Merge(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override; + using DB::Delete; + virtual Status Delete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) override; + using DB::SingleDelete; + virtual Status SingleDelete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) override; + using DB::Write; + virtual Status Write(const WriteOptions& options, + WriteBatch* updates) override; + + using DB::Get; + virtual Status Get(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value) override; + using DB::MultiGet; + virtual std::vector MultiGet( + const ReadOptions& options, + const std::vector& column_family, + const std::vector& keys, + std::vector* values) override; + + virtual Status CreateColumnFamily(const ColumnFamilyOptions& options, + const std::string& column_family, + ColumnFamilyHandle** handle) override; + virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override; + + // Returns false if key doesn't exist in the database and true if it may. + // If value_found is not passed in as null, then return the value if found in + // memory. On return, if value was found, then value_found will be set to true + // , otherwise false. + using DB::KeyMayExist; + virtual bool KeyMayExist(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value, + bool* value_found = nullptr) override; + using DB::NewIterator; + virtual Iterator* NewIterator(const ReadOptions& options, + ColumnFamilyHandle* column_family) override; + virtual Status NewIterators( + const ReadOptions& options, + const std::vector& column_families, + std::vector* iterators) override; + virtual const Snapshot* GetSnapshot() override; + virtual void ReleaseSnapshot(const Snapshot* snapshot) override; + using DB::GetProperty; + virtual bool GetProperty(ColumnFamilyHandle* column_family, + const Slice& property, std::string* value) override; + using DB::GetIntProperty; + virtual bool GetIntProperty(ColumnFamilyHandle* column_family, + const Slice& property, uint64_t* value) override; + using DB::GetAggregatedIntProperty; + virtual bool GetAggregatedIntProperty(const Slice& property, + uint64_t* aggregated_value) override; + using DB::GetApproximateSizes; + virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, + const Range* range, int n, uint64_t* sizes, + bool include_memtable = false) override; + using DB::CompactRange; + virtual Status CompactRange(const CompactRangeOptions& options, + ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) override; + + using DB::CompactFiles; + virtual Status CompactFiles(const CompactionOptions& compact_options, + ColumnFamilyHandle* column_family, + const std::vector& input_file_names, + const int output_level, + const int output_path_id = -1) override; + + virtual Status PauseBackgroundWork() override; + virtual Status ContinueBackgroundWork() override; + + virtual Status EnableAutoCompaction( + const std::vector& column_family_handles) override; + + using DB::SetOptions; + Status SetOptions( + ColumnFamilyHandle* column_family, + const std::unordered_map& options_map) override; + + using DB::NumberLevels; + virtual int NumberLevels(ColumnFamilyHandle* column_family) override; + using DB::MaxMemCompactionLevel; + virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) override; + using DB::Level0StopWriteTrigger; + virtual int Level0StopWriteTrigger( + ColumnFamilyHandle* column_family) override; + virtual const std::string& GetName() const override; + virtual Env* GetEnv() const override; + using DB::GetOptions; + virtual const Options& GetOptions( + ColumnFamilyHandle* column_family) const override; + using DB::GetDBOptions; + virtual const DBOptions& GetDBOptions() const override; + using DB::Flush; + virtual Status Flush(const FlushOptions& options, + ColumnFamilyHandle* column_family) override; + virtual Status SyncWAL() override; + + virtual SequenceNumber GetLatestSequenceNumber() const override; + +#ifndef ROCKSDB_LITE + virtual Status DisableFileDeletions() override; + virtual Status EnableFileDeletions(bool force) override; + virtual int IsFileDeletionsEnabled() const; + // All the returned filenames start with "/" + virtual Status GetLiveFiles(std::vector&, + uint64_t* manifest_file_size, + bool flush_memtable = true) override; + virtual Status GetSortedWalFiles(VectorLogPtr& files) override; + + virtual Status GetUpdatesSince( + SequenceNumber seq_number, unique_ptr* iter, + const TransactionLogIterator::ReadOptions& + read_options = TransactionLogIterator::ReadOptions()) override; + virtual Status DeleteFile(std::string name) override; + Status DeleteFilesInRange(ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end); + + virtual void GetLiveFilesMetaData( + std::vector* metadata) override; + + // Obtains the meta data of the specified column family of the DB. + // Status::NotFound() will be returned if the current DB does not have + // any column family match the specified name. + // TODO(yhchiang): output parameter is placed in the end in this codebase. + virtual void GetColumnFamilyMetaData( + ColumnFamilyHandle* column_family, + ColumnFamilyMetaData* metadata) override; + + // experimental API + Status SuggestCompactRange(ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end); + + Status PromoteL0(ColumnFamilyHandle* column_family, int target_level); + + // Similar to Write() but will call the callback once on the single write + // thread to determine whether it is safe to perform the write. + virtual Status WriteWithCallback(const WriteOptions& write_options, + WriteBatch* my_batch, + WriteCallback* callback); + + // Returns the sequence number that is guaranteed to be smaller than or equal + // to the sequence number of any key that could be inserted into the current + // memtables. It can then be assumed that any write with a larger(or equal) + // sequence number will be present in this memtable or a later memtable. + // + // If the earliest sequence number could not be determined, + // kMaxSequenceNumber will be returned. + // + // If include_history=true, will also search Memtables in MemTableList + // History. + SequenceNumber GetEarliestMemTableSequenceNumber(SuperVersion* sv, + bool include_history); + + // For a given key, check to see if there are any records for this key + // in the memtables, including memtable history. If cache_only is false, + // SST files will also be checked. + // + // If a key is found, *found_record_for_key will be set to true and + // *seq will be set to the stored sequence number for the latest + // operation on this key or kMaxSequenceNumber if unknown. + // If no key is found, *found_record_for_key will be set to false. + // + // Note: If cache_only=false, it is possible for *seq to be set to 0 if + // the sequence number has been cleared from the record. If the caller is + // holding an active db snapshot, we know the missing sequence must be less + // than the snapshot's sequence number (sequence numbers are only cleared + // when there are no earlier active snapshots). + // + // If NotFound is returned and found_record_for_key is set to false, then no + // record for this key was found. If the caller is holding an active db + // snapshot, we know that no key could have existing after this snapshot + // (since we do not compact keys that have an earlier snapshot). + // + // Returns OK or NotFound on success, + // other status on unexpected error. + Status GetLatestSequenceForKey(SuperVersion* sv, const Slice& key, + bool cache_only, SequenceNumber* seq, + bool* found_record_for_key); + + using DB::AddFile; + virtual Status AddFile(ColumnFamilyHandle* column_family, + const std::vector& file_info_list, + bool move_file) override; + virtual Status AddFile(ColumnFamilyHandle* column_family, + const std::vector& file_path_list, + bool move_file) override; + +#endif // ROCKSDB_LITE + + // Similar to GetSnapshot(), but also lets the db know that this snapshot + // will be used for transaction write-conflict checking. The DB can then + // make sure not to compact any keys that would prevent a write-conflict from + // being detected. + const Snapshot* GetSnapshotForWriteConflictBoundary(); + + // checks if all live files exist on file system and that their file sizes + // match to our in-memory records + virtual Status CheckConsistency(); + + virtual Status GetDbIdentity(std::string& identity) const override; + + Status RunManualCompaction(ColumnFamilyData* cfd, int input_level, + int output_level, uint32_t output_path_id, + const Slice* begin, const Slice* end, + bool exclusive, + bool disallow_trivial_move = false); + + // Return an internal iterator over the current state of the database. + // The keys of this iterator are internal keys (see format.h). + // The returned iterator should be deleted when no longer needed. + InternalIterator* NewInternalIterator( + Arena* arena, ColumnFamilyHandle* column_family = nullptr); + +#ifndef NDEBUG + // Extra methods (for testing) that are not in the public DB interface + // Implemented in db_impl_debug.cc + + // Compact any files in the named level that overlap [*begin, *end] + Status TEST_CompactRange(int level, const Slice* begin, const Slice* end, + ColumnFamilyHandle* column_family = nullptr, + bool disallow_trivial_move = false); + + // Force current memtable contents to be flushed. + Status TEST_FlushMemTable(bool wait = true, + ColumnFamilyHandle* cfh = nullptr); + + // Wait for memtable compaction + Status TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family = nullptr); + + // Wait for any compaction + Status TEST_WaitForCompact(); + + // Return the maximum overlapping data (in bytes) at next level for any + // file at a level >= 1. + int64_t TEST_MaxNextLevelOverlappingBytes(ColumnFamilyHandle* column_family = + nullptr); + + // Return the current manifest file no. + uint64_t TEST_Current_Manifest_FileNo(); + + // get total level0 file size. Only for testing. + uint64_t TEST_GetLevel0TotalSize(); + + void TEST_GetFilesMetaData(ColumnFamilyHandle* column_family, + std::vector>* metadata); + + void TEST_LockMutex(); + + void TEST_UnlockMutex(); + + // REQUIRES: mutex locked + void* TEST_BeginWrite(); + + // REQUIRES: mutex locked + // pass the pointer that you got from TEST_BeginWrite() + void TEST_EndWrite(void* w); + + uint64_t TEST_MaxTotalInMemoryState() const { + return max_total_in_memory_state_; + } + + size_t TEST_LogsToFreeSize(); + + uint64_t TEST_LogfileNumber(); + + uint64_t TEST_total_log_size() const { return total_log_size_; } + + // Returns column family name to ImmutableCFOptions map. + Status TEST_GetAllImmutableCFOptions( + std::unordered_map* iopts_map); + + // Return the lastest MutableCFOptions of of a column family + Status TEST_GetLatestMutableCFOptions(ColumnFamilyHandle* column_family, + MutableCFOptions* mutable_cf_opitons); + + Cache* TEST_table_cache() { return table_cache_.get(); } + + WriteController& TEST_write_controler() { return write_controller_; } + + uint64_t TEST_FindMinLogContainingOutstandingPrep(); + uint64_t TEST_FindMinPrepLogReferencedByMemTable(); + +#endif // NDEBUG + + // Return maximum background compaction allowed to be scheduled based on + // compaction status. + int BGCompactionsAllowed() const; + + // move logs pending closing from job_context to the DB queue and + // schedule a purge + void ScheduleBgLogWriterClose(JobContext* job_context); + + // Returns the list of live files in 'live' and the list + // of all files in the filesystem in 'candidate_files'. + // If force == false and the last call was less than + // db_options_.delete_obsolete_files_period_micros microseconds ago, + // it will not fill up the job_context + void FindObsoleteFiles(JobContext* job_context, bool force, + bool no_full_scan = false); + + // Diffs the files listed in filenames and those that do not + // belong to live files are posibly removed. Also, removes all the + // files in sst_delete_files and log_delete_files. + // It is not necessary to hold the mutex when invoking this method. + void PurgeObsoleteFiles(const JobContext& background_contet, + bool schedule_only = false); + + void SchedulePurge(); + + ColumnFamilyHandle* DefaultColumnFamily() const override; + + const SnapshotList& snapshots() const { return snapshots_; } + + void CancelAllBackgroundWork(bool wait); + + // Find Super version and reference it. Based on options, it might return + // the thread local cached one. + // Call ReturnAndCleanupSuperVersion() when it is no longer needed. + SuperVersion* GetAndRefSuperVersion(ColumnFamilyData* cfd); + + // Similar to the previous function but looks up based on a column family id. + // nullptr will be returned if this column family no longer exists. + // REQUIRED: this function should only be called on the write thread or if the + // mutex is held. + SuperVersion* GetAndRefSuperVersion(uint32_t column_family_id); + + // Same as above, should called without mutex held and not on write thread. + SuperVersion* GetAndRefSuperVersionUnlocked(uint32_t column_family_id); + + // Un-reference the super version and return it to thread local cache if + // needed. If it is the last reference of the super version. Clean it up + // after un-referencing it. + void ReturnAndCleanupSuperVersion(ColumnFamilyData* cfd, SuperVersion* sv); + + // Similar to the previous function but looks up based on a column family id. + // nullptr will be returned if this column family no longer exists. + // REQUIRED: this function should only be called on the write thread. + void ReturnAndCleanupSuperVersion(uint32_t colun_family_id, SuperVersion* sv); + + // Same as above, should called without mutex held and not on write thread. + void ReturnAndCleanupSuperVersionUnlocked(uint32_t colun_family_id, + SuperVersion* sv); + + // REQUIRED: this function should only be called on the write thread or if the + // mutex is held. Return value only valid until next call to this function or + // mutex is released. + ColumnFamilyHandle* GetColumnFamilyHandle(uint32_t column_family_id); + + // Same as above, should called without mutex held and not on write thread. + ColumnFamilyHandle* GetColumnFamilyHandleUnlocked(uint32_t column_family_id); + + // Returns the number of currently running flushes. + // REQUIREMENT: mutex_ must be held when calling this function. + int num_running_flushes() { + mutex_.AssertHeld(); + return num_running_flushes_; + } + + // Returns the number of currently running compactions. + // REQUIREMENT: mutex_ must be held when calling this function. + int num_running_compactions() { + mutex_.AssertHeld(); + return num_running_compactions_; + } + + // hollow transactions shell used for recovery. + // these will then be passed to TransactionDB so that + // locks can be reacquired before writing can resume. + struct RecoveredTransaction { + uint64_t log_number_; + std::string name_; + WriteBatch* batch_; + explicit RecoveredTransaction(const uint64_t log, const std::string& name, + WriteBatch* batch) + : log_number_(log), name_(name), batch_(batch) {} + + ~RecoveredTransaction() { delete batch_; } + }; + + bool allow_2pc() const { return db_options_.allow_2pc; } + + std::unordered_map + recovered_transactions() { + return recovered_transactions_; + } + + RecoveredTransaction* GetRecoveredTransaction(const std::string& name) { + auto it = recovered_transactions_.find(name); + if (it == recovered_transactions_.end()) { + return nullptr; + } else { + return it->second; + } + } + + void InsertRecoveredTransaction(const uint64_t log, const std::string& name, + WriteBatch* batch) { + recovered_transactions_[name] = new RecoveredTransaction(log, name, batch); + MarkLogAsContainingPrepSection(log); + } + + void DeleteRecoveredTransaction(const std::string& name) { + auto it = recovered_transactions_.find(name); + assert(it != recovered_transactions_.end()); + auto* trx = it->second; + recovered_transactions_.erase(it); + MarkLogAsHavingPrepSectionFlushed(trx->log_number_); + delete trx; + } + + void DeleteAllRecoveredTransactions() { + for (auto it = recovered_transactions_.begin(); + it != recovered_transactions_.end(); it++) { + delete it->second; + } + recovered_transactions_.clear(); + } + + void MarkLogAsHavingPrepSectionFlushed(uint64_t log); + void MarkLogAsContainingPrepSection(uint64_t log); + void AddToLogsToFreeQueue(log::Writer* log_writer) { + logs_to_free_queue_.push_back(log_writer); + } + + Status NewDB(); + + protected: + Env* const env_; + const std::string dbname_; + unique_ptr versions_; + const DBOptions db_options_; + Statistics* stats_; + std::unordered_map + recovered_transactions_; + + InternalIterator* NewInternalIterator(const ReadOptions&, + ColumnFamilyData* cfd, + SuperVersion* super_version, + Arena* arena); + + // Except in DB::Open(), WriteOptionsFile can only be called when: + // 1. WriteThread::Writer::EnterUnbatched() is used. + // 2. db_mutex is held + Status WriteOptionsFile(); + + // The following two functions can only be called when: + // 1. WriteThread::Writer::EnterUnbatched() is used. + // 2. db_mutex is NOT held + Status RenameTempFileToOptionsFile(const std::string& file_name); + Status DeleteObsoleteOptionsFiles(); + + void NotifyOnFlushCompleted(ColumnFamilyData* cfd, FileMetaData* file_meta, + const MutableCFOptions& mutable_cf_options, + int job_id, TableProperties prop); + + void NotifyOnCompactionCompleted(ColumnFamilyData* cfd, + Compaction *c, const Status &st, + const CompactionJobStats& job_stats, + int job_id); + void NotifyOnMemTableSealed(ColumnFamilyData* cfd, + const MemTableInfo& mem_table_info); + + void NewThreadStatusCfInfo(ColumnFamilyData* cfd) const; + + void EraseThreadStatusCfInfo(ColumnFamilyData* cfd) const; + + void EraseThreadStatusDbInfo() const; + + Status WriteImpl(const WriteOptions& options, WriteBatch* updates, + WriteCallback* callback = nullptr, + uint64_t* log_used = nullptr, uint64_t log_ref = 0, + bool disable_memtable = false); + + uint64_t FindMinLogContainingOutstandingPrep(); + uint64_t FindMinPrepLogReferencedByMemTable(); + + private: + friend class DB; + friend class InternalStats; + friend class TransactionImpl; +#ifndef ROCKSDB_LITE + friend class ForwardIterator; +#endif + friend struct SuperVersion; + friend class CompactedDBImpl; +#ifndef NDEBUG + friend class XFTransactionWriteHandler; +#endif + struct CompactionState; + + struct WriteContext; + + struct PurgeFileInfo; + + // Recover the descriptor from persistent storage. May do a significant + // amount of work to recover recently logged updates. Any changes to + // be made to the descriptor are added to *edit. + Status Recover(const std::vector& column_families, + bool read_only = false, bool error_if_log_file_exist = false, + bool error_if_data_exists_in_logs = false); + + void MaybeIgnoreError(Status* s) const; + + const Status CreateArchivalDirectory(); + + // Delete any unneeded files and stale in-memory entries. + void DeleteObsoleteFiles(); + // Delete obsolete files and log status and information of file deletion + void DeleteObsoleteFileImpl(Status file_deletion_status, int job_id, + const std::string& fname, FileType type, + uint64_t number, uint32_t path_id); + + // Background process needs to call + // auto x = CaptureCurrentFileNumberInPendingOutputs() + // auto file_num = versions_->NewFileNumber(); + // + // ReleaseFileNumberFromPendingOutputs(x) + // This will protect any file with number `file_num` or greater from being + // deleted while is running. + // ----------- + // This function will capture current file number and append it to + // pending_outputs_. This will prevent any background process to delete any + // file created after this point. + std::list::iterator CaptureCurrentFileNumberInPendingOutputs(); + // This function should be called with the result of + // CaptureCurrentFileNumberInPendingOutputs(). It then marks that any file + // created between the calls CaptureCurrentFileNumberInPendingOutputs() and + // ReleaseFileNumberFromPendingOutputs() can now be deleted (if it's not live + // and blocked by any other pending_outputs_ calls) + void ReleaseFileNumberFromPendingOutputs(std::list::iterator v); + + Status SyncClosedLogs(JobContext* job_context); + + // Flush the in-memory write buffer to storage. Switches to a new + // log-file/memtable and writes a new descriptor iff successful. + Status FlushMemTableToOutputFile(ColumnFamilyData* cfd, + const MutableCFOptions& mutable_cf_options, + bool* madeProgress, JobContext* job_context, + LogBuffer* log_buffer); + + // REQUIRES: log_numbers are sorted in ascending order + Status RecoverLogFiles(const std::vector& log_numbers, + SequenceNumber* max_sequence, bool read_only); + + // The following two methods are used to flush a memtable to + // storage. The first one is used at database RecoveryTime (when the + // database is opened) and is heavyweight because it holds the mutex + // for the entire period. The second method WriteLevel0Table supports + // concurrent flush memtables to storage. + Status WriteLevel0TableForRecovery(int job_id, ColumnFamilyData* cfd, + MemTable* mem, VersionEdit* edit); + + // num_bytes: for slowdown case, delay time is calculated based on + // `num_bytes` going through. + Status DelayWrite(uint64_t num_bytes); + + Status ScheduleFlushes(WriteContext* context); + + Status SwitchMemtable(ColumnFamilyData* cfd, WriteContext* context); + + // Force current memtable contents to be flushed. + Status FlushMemTable(ColumnFamilyData* cfd, const FlushOptions& options); + + // Wait for memtable flushed + Status WaitForFlushMemTable(ColumnFamilyData* cfd); + +#ifndef ROCKSDB_LITE + // Finds the lowest level in the DB that the ingested file can be added to + // REQUIRES: mutex_ held + int PickLevelForIngestedFile(ColumnFamilyData* cfd, + const ExternalSstFileInfo& file_info); + + Status CompactFilesImpl( + const CompactionOptions& compact_options, ColumnFamilyData* cfd, + Version* version, const std::vector& input_file_names, + const int output_level, int output_path_id, JobContext* job_context, + LogBuffer* log_buffer); + Status ReadExternalSstFileInfo(ColumnFamilyHandle* column_family, + const std::string& file_path, + ExternalSstFileInfo* file_info); + +#endif // ROCKSDB_LITE + + ColumnFamilyData* GetColumnFamilyDataByName(const std::string& cf_name); + + void MaybeScheduleFlushOrCompaction(); + void SchedulePendingFlush(ColumnFamilyData* cfd); + void SchedulePendingCompaction(ColumnFamilyData* cfd); + void SchedulePendingPurge(std::string fname, FileType type, uint64_t number, + uint32_t path_id, int job_id); + static void BGWorkCompaction(void* arg); + static void BGWorkFlush(void* db); + static void BGWorkPurge(void* arg); + static void UnscheduleCallback(void* arg); + void BackgroundCallCompaction(void* arg); + void BackgroundCallFlush(); + void BackgroundCallPurge(); + Status BackgroundCompaction(bool* madeProgress, JobContext* job_context, + LogBuffer* log_buffer, void* m = 0); + Status BackgroundFlush(bool* madeProgress, JobContext* job_context, + LogBuffer* log_buffer); + + void PrintStatistics(); + + // dump rocksdb.stats to LOG + void MaybeDumpStats(); + + // Return the minimum empty level that could hold the total data in the + // input level. Return the input level, if such level could not be found. + int FindMinimumEmptyLevelFitting(ColumnFamilyData* cfd, + const MutableCFOptions& mutable_cf_options, int level); + + // Move the files in the input level to the target level. + // If target_level < 0, automatically calculate the minimum level that could + // hold the data set. + Status ReFitLevel(ColumnFamilyData* cfd, int level, int target_level = -1); + + // helper functions for adding and removing from flush & compaction queues + void AddToCompactionQueue(ColumnFamilyData* cfd); + ColumnFamilyData* PopFirstFromCompactionQueue(); + void AddToFlushQueue(ColumnFamilyData* cfd); + ColumnFamilyData* PopFirstFromFlushQueue(); + + // helper function to call after some of the logs_ were synced + void MarkLogsSynced(uint64_t up_to, bool synced_dir, const Status& status); + + const Snapshot* GetSnapshotImpl(bool is_write_conflict_boundary); + + // table_cache_ provides its own synchronization + std::shared_ptr table_cache_; + + // Lock over the persistent DB state. Non-nullptr iff successfully acquired. + FileLock* db_lock_; + + // The mutex for options file related operations. + // NOTE: should never acquire options_file_mutex_ and mutex_ at the + // same time. + InstrumentedMutex options_files_mutex_; + // State below is protected by mutex_ + InstrumentedMutex mutex_; + + std::atomic shutting_down_; + // This condition variable is signaled on these conditions: + // * whenever bg_compaction_scheduled_ goes down to 0 + // * if AnyManualCompaction, whenever a compaction finishes, even if it hasn't + // made any progress + // * whenever a compaction made any progress + // * whenever bg_flush_scheduled_ or bg_purge_scheduled_ value decreases + // (i.e. whenever a flush is done, even if it didn't make any progress) + // * whenever there is an error in background purge, flush or compaction + InstrumentedCondVar bg_cv_; + uint64_t logfile_number_; + std::deque + log_recycle_files; // a list of log files that we can recycle + bool log_dir_synced_; + bool log_empty_; + ColumnFamilyHandleImpl* default_cf_handle_; + InternalStats* default_cf_internal_stats_; + unique_ptr column_family_memtables_; + struct LogFileNumberSize { + explicit LogFileNumberSize(uint64_t _number) + : number(_number) {} + void AddSize(uint64_t new_size) { size += new_size; } + uint64_t number; + uint64_t size = 0; + bool getting_flushed = false; + }; + struct LogWriterNumber { + // pass ownership of _writer + LogWriterNumber(uint64_t _number, log::Writer* _writer) + : number(_number), writer(_writer) {} + + log::Writer* ReleaseWriter() { + auto* w = writer; + writer = nullptr; + return w; + } + void ClearWriter() { + delete writer; + writer = nullptr; + } + + uint64_t number; + // Visual Studio doesn't support deque's member to be noncopyable because + // of a unique_ptr as a member. + log::Writer* writer; // own + // true for some prefix of logs_ + bool getting_synced = false; + }; + std::deque alive_log_files_; + // Log files that aren't fully synced, and the current log file. + // Synchronization: + // - push_back() is done from write thread with locked mutex_, + // - pop_front() is done from any thread with locked mutex_, + // - back() and items with getting_synced=true are not popped, + // - it follows that write thread with unlocked mutex_ can safely access + // back() and items with getting_synced=true. + std::deque logs_; + // Signaled when getting_synced becomes false for some of the logs_. + InstrumentedCondVar log_sync_cv_; + uint64_t total_log_size_; + // only used for dynamically adjusting max_total_wal_size. it is a sum of + // [write_buffer_size * max_write_buffer_number] over all column families + uint64_t max_total_in_memory_state_; + // If true, we have only one (default) column family. We use this to optimize + // some code-paths + bool single_column_family_mode_; + // If this is non-empty, we need to delete these log files in background + // threads. Protected by db mutex. + autovector logs_to_free_; + + bool is_snapshot_supported_; + + // Class to maintain directories for all database paths other than main one. + class Directories { + public: + Status SetDirectories(Env* env, const std::string& dbname, + const std::string& wal_dir, + const std::vector& data_paths); + + Directory* GetDataDir(size_t path_id); + + Directory* GetWalDir() { + if (wal_dir_) { + return wal_dir_.get(); + } + return db_dir_.get(); + } + + Directory* GetDbDir() { return db_dir_.get(); } + + private: + std::unique_ptr db_dir_; + std::vector> data_dirs_; + std::unique_ptr wal_dir_; + + Status CreateAndNewDirectory(Env* env, const std::string& dirname, + std::unique_ptr* directory) const; + }; + + Directories directories_; + + WriteBufferManager* write_buffer_manager_; + + WriteThread write_thread_; + + WriteBatch tmp_batch_; + + WriteController write_controller_; + + // Size of the last batch group. In slowdown mode, next write needs to + // sleep if it uses up the quota. + uint64_t last_batch_group_size_; + + FlushScheduler flush_scheduler_; + + SnapshotList snapshots_; + + // For each background job, pending_outputs_ keeps the current file number at + // the time that background job started. + // FindObsoleteFiles()/PurgeObsoleteFiles() never deletes any file that has + // number bigger than any of the file number in pending_outputs_. Since file + // numbers grow monotonically, this also means that pending_outputs_ is always + // sorted. After a background job is done executing, its file number is + // deleted from pending_outputs_, which allows PurgeObsoleteFiles() to clean + // it up. + // State is protected with db mutex. + std::list pending_outputs_; + + // PurgeFileInfo is a structure to hold information of files to be deleted in + // purge_queue_ + struct PurgeFileInfo { + std::string fname; + FileType type; + uint64_t number; + uint32_t path_id; + int job_id; + PurgeFileInfo(std::string fn, FileType t, uint64_t num, uint32_t pid, + int jid) + : fname(fn), type(t), number(num), path_id(pid), job_id(jid) {} + }; + + // flush_queue_ and compaction_queue_ hold column families that we need to + // flush and compact, respectively. + // A column family is inserted into flush_queue_ when it satisfies condition + // cfd->imm()->IsFlushPending() + // A column family is inserted into compaction_queue_ when it satisfied + // condition cfd->NeedsCompaction() + // Column families in this list are all Ref()-erenced + // TODO(icanadi) Provide some kind of ReferencedColumnFamily class that will + // do RAII on ColumnFamilyData + // Column families are in this queue when they need to be flushed or + // compacted. Consumers of these queues are flush and compaction threads. When + // column family is put on this queue, we increase unscheduled_flushes_ and + // unscheduled_compactions_. When these variables are bigger than zero, that + // means we need to schedule background threads for compaction and thread. + // Once the background threads are scheduled, we decrease unscheduled_flushes_ + // and unscheduled_compactions_. That way we keep track of number of + // compaction and flush threads we need to schedule. This scheduling is done + // in MaybeScheduleFlushOrCompaction() + // invariant(column family present in flush_queue_ <==> + // ColumnFamilyData::pending_flush_ == true) + std::deque flush_queue_; + // invariant(column family present in compaction_queue_ <==> + // ColumnFamilyData::pending_compaction_ == true) + std::deque compaction_queue_; + + // A queue to store filenames of the files to be purged + std::deque purge_queue_; + + // A queue to store log writers to close + std::deque logs_to_free_queue_; + int unscheduled_flushes_; + int unscheduled_compactions_; + + // count how many background compactions are running or have been scheduled + int bg_compaction_scheduled_; + + // stores the number of compactions are currently running + int num_running_compactions_; + + // number of background memtable flush jobs, submitted to the HIGH pool + int bg_flush_scheduled_; + + // stores the number of flushes are currently running + int num_running_flushes_; + + // number of background obsolete file purge jobs, submitted to the HIGH pool + int bg_purge_scheduled_; + + // Information for a manual compaction + struct ManualCompaction { + ColumnFamilyData* cfd; + int input_level; + int output_level; + uint32_t output_path_id; + Status status; + bool done; + bool in_progress; // compaction request being processed? + bool incomplete; // only part of requested range compacted + bool exclusive; // current behavior of only one manual + bool disallow_trivial_move; // Force actual compaction to run + const InternalKey* begin; // nullptr means beginning of key range + const InternalKey* end; // nullptr means end of key range + InternalKey* manual_end; // how far we are compacting + InternalKey tmp_storage; // Used to keep track of compaction progress + InternalKey tmp_storage1; // Used to keep track of compaction progress + Compaction* compaction; + }; + std::deque manual_compaction_dequeue_; + + struct CompactionArg { + DBImpl* db; + ManualCompaction* m; + }; + + // Have we encountered a background error in paranoid mode? + Status bg_error_; + + // shall we disable deletion of obsolete files + // if 0 the deletion is enabled. + // if non-zero, files will not be getting deleted + // This enables two different threads to call + // EnableFileDeletions() and DisableFileDeletions() + // without any synchronization + int disable_delete_obsolete_files_; + + // next time when we should run DeleteObsoleteFiles with full scan + uint64_t delete_obsolete_files_next_run_; + + // last time stats were dumped to LOG + std::atomic last_stats_dump_time_microsec_; + + // Each flush or compaction gets its own job id. this counter makes sure + // they're unique + std::atomic next_job_id_; + + // A flag indicating whether the current rocksdb database has any + // data that is not yet persisted into either WAL or SST file. + // Used when disableWAL is true. + bool has_unpersisted_data_; + + static const int KEEP_LOG_FILE_NUM = 1000; + // MSVC version 1800 still does not have constexpr for ::max() + static const uint64_t kNoTimeOut = port::kMaxUint64; + + std::string db_absolute_path_; + + // The options to access storage files + const EnvOptions env_options_; + + // A set of compactions that are running right now + // REQUIRES: mutex held + std::unordered_set running_compactions_; + +#ifndef ROCKSDB_LITE + WalManager wal_manager_; +#endif // ROCKSDB_LITE + + // Unified interface for logging events + EventLogger event_logger_; + + // A value of > 0 temporarily disables scheduling of background work + int bg_work_paused_; + + // A value of > 0 temporarily disables scheduling of background compaction + int bg_compaction_paused_; + + // Guard against multiple concurrent refitting + bool refitting_level_; + + // Indicate DB was opened successfully + bool opened_successfully_; + + // minmum log number still containing prepared data. + // this is used by FindObsoleteFiles to determine which + // flushed logs we must keep around because they still + // contain prepared data which has not been flushed or rolled back + std::priority_queue, std::greater> + min_log_with_prep_; + + // to be used in conjunction with min_log_with_prep_. + // once a transaction with data in log L is committed or rolled back + // rather than removing the value from the heap we add that value + // to prepared_section_completed_ which maps LOG -> instance_count + // since a log could contain multiple prepared sections + // + // when trying to determine the minmum log still active we first + // consult min_log_with_prep_. while that root value maps to + // a value > 0 in prepared_section_completed_ we decrement the + // instance_count for that log and pop the root value in + // min_log_with_prep_. This will work the same as a min_heap + // where we are deleteing arbitrary elements and the up heaping. + std::unordered_map prepared_section_completed_; + std::mutex prep_heap_mutex_; + + // No copying allowed + DBImpl(const DBImpl&); + void operator=(const DBImpl&); + + // Return the earliest snapshot where seqno is visible. + // Store the snapshot right before that, if any, in prev_snapshot + inline SequenceNumber findEarliestVisibleSnapshot( + SequenceNumber in, + std::vector& snapshots, + SequenceNumber* prev_snapshot); + + // Background threads call this function, which is just a wrapper around + // the InstallSuperVersion() function. Background threads carry + // job_context which can have new_superversion already + // allocated. + void InstallSuperVersionAndScheduleWorkWrapper( + ColumnFamilyData* cfd, JobContext* job_context, + const MutableCFOptions& mutable_cf_options); + + // All ColumnFamily state changes go through this function. Here we analyze + // the new state and we schedule background work if we detect that the new + // state needs flush or compaction. + SuperVersion* InstallSuperVersionAndScheduleWork( + ColumnFamilyData* cfd, SuperVersion* new_sv, + const MutableCFOptions& mutable_cf_options); + +#ifndef ROCKSDB_LITE + using DB::GetPropertiesOfAllTables; + virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, + TablePropertiesCollection* props) + override; + virtual Status GetPropertiesOfTablesInRange( + ColumnFamilyHandle* column_family, const Range* range, std::size_t n, + TablePropertiesCollection* props) override; + +#endif // ROCKSDB_LITE + + // Function that Get and KeyMayExist call with no_io true or false + // Note: 'value_found' from KeyMayExist propagates here + Status GetImpl(const ReadOptions& options, ColumnFamilyHandle* column_family, + const Slice& key, std::string* value, + bool* value_found = nullptr); + + bool GetIntPropertyInternal(ColumnFamilyData* cfd, + const DBPropertyInfo& property_info, + bool is_locked, uint64_t* value); + + bool HasPendingManualCompaction(); + bool HasExclusiveManualCompaction(); + void AddManualCompaction(ManualCompaction* m); + void RemoveManualCompaction(ManualCompaction* m); + bool ShouldntRunManualCompaction(ManualCompaction* m); + bool HaveManualCompaction(ColumnFamilyData* cfd); + bool MCOverlap(ManualCompaction* m, ManualCompaction* m1); +}; + +// Sanitize db options. The caller should delete result.info_log if +// it is not equal to src.info_log. +extern Options SanitizeOptions(const std::string& db, + const InternalKeyComparator* icmp, + const Options& src); +extern DBOptions SanitizeOptions(const std::string& db, const DBOptions& src); + +// Fix user-supplied options to be reasonable +template +static void ClipToRange(T* ptr, V minvalue, V maxvalue) { + if (static_cast(*ptr) > maxvalue) *ptr = maxvalue; + if (static_cast(*ptr) < minvalue) *ptr = minvalue; +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/db_impl_add_file.cc b/external/rocksdb/db/db_impl_add_file.cc new file mode 100755 index 0000000000..3019f33913 --- /dev/null +++ b/external/rocksdb/db/db_impl_add_file.cc @@ -0,0 +1,419 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/db_impl.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include + +#include "db/builder.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/sst_file_writer.h" +#include "table/table_builder.h" +#include "util/file_reader_writer.h" +#include "util/file_util.h" +#include "util/sync_point.h" + +namespace rocksdb { + +namespace { +// RAII Timer +class StatsTimer { + public: + explicit StatsTimer(Env* env, uint64_t* counter) + : env_(env), counter_(counter), start_micros_(env_->NowMicros()) {} + ~StatsTimer() { *counter_ += env_->NowMicros() - start_micros_; } + + private: + Env* env_; + uint64_t* counter_; + uint64_t start_micros_; +}; +} // anonymous namespace + +#ifndef ROCKSDB_LITE + +Status DBImpl::ReadExternalSstFileInfo(ColumnFamilyHandle* column_family, + const std::string& file_path, + ExternalSstFileInfo* file_info) { + Status status; + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + + file_info->file_path = file_path; + status = env_->GetFileSize(file_path, &file_info->file_size); + if (!status.ok()) { + return status; + } + + // Access the file using TableReader to extract + // version, number of entries, smallest user key, largest user key + std::unique_ptr sst_file; + status = env_->NewRandomAccessFile(file_path, &sst_file, env_options_); + if (!status.ok()) { + return status; + } + std::unique_ptr sst_file_reader; + sst_file_reader.reset(new RandomAccessFileReader(std::move(sst_file))); + + std::unique_ptr table_reader; + status = cfd->ioptions()->table_factory->NewTableReader( + TableReaderOptions(*cfd->ioptions(), env_options_, + cfd->internal_comparator()), + std::move(sst_file_reader), file_info->file_size, &table_reader); + if (!status.ok()) { + return status; + } + + // Get the external sst file version from table properties + const UserCollectedProperties& user_collected_properties = + table_reader->GetTableProperties()->user_collected_properties; + UserCollectedProperties::const_iterator external_sst_file_version_iter = + user_collected_properties.find(ExternalSstFilePropertyNames::kVersion); + if (external_sst_file_version_iter == user_collected_properties.end()) { + return Status::InvalidArgument("Generated table version not found"); + } + + file_info->version = + DecodeFixed32(external_sst_file_version_iter->second.c_str()); + if (file_info->version == 1) { + // version 1 imply that all sequence numbers in table equal 0 + file_info->sequence_number = 0; + } else { + return Status::InvalidArgument("Generated table version is not supported"); + } + // Get number of entries in table + file_info->num_entries = table_reader->GetTableProperties()->num_entries; + + ParsedInternalKey key; + std::unique_ptr iter( + table_reader->NewIterator(ReadOptions())); + + // Get first (smallest) key from file + iter->SeekToFirst(); + if (!ParseInternalKey(iter->key(), &key)) { + return Status::Corruption("Generated table have corrupted keys"); + } + if (key.sequence != 0) { + return Status::Corruption("Generated table have non zero sequence number"); + } + file_info->smallest_key = key.user_key.ToString(); + + // Get last (largest) key from file + iter->SeekToLast(); + if (!ParseInternalKey(iter->key(), &key)) { + return Status::Corruption("Generated table have corrupted keys"); + } + if (key.sequence != 0) { + return Status::Corruption("Generated table have non zero sequence number"); + } + file_info->largest_key = key.user_key.ToString(); + + return Status::OK(); +} + +Status DBImpl::AddFile(ColumnFamilyHandle* column_family, + const std::vector& file_path_list, + bool move_file) { + Status status; + auto num_files = file_path_list.size(); + if (num_files == 0) { + return Status::InvalidArgument("The list of files is empty"); + } + + std::vector file_infos(num_files); + std::vector file_info_list(num_files); + for (size_t i = 0; i < num_files; i++) { + status = ReadExternalSstFileInfo(column_family, file_path_list[i], + &file_info_list[i]); + if (!status.ok()) { + return status; + } + } + return AddFile(column_family, file_info_list, move_file); +} + +Status DBImpl::AddFile(ColumnFamilyHandle* column_family, + const std::vector& file_info_list, + bool move_file) { + Status status; + auto cfh = reinterpret_cast(column_family); + ColumnFamilyData* cfd = cfh->cfd(); + + auto num_files = file_info_list.size(); + if (num_files == 0) { + return Status::InvalidArgument("The list of files is empty"); + } + + // Verify that passed files dont have overlapping ranges + if (num_files > 1) { + std::vector sorted_file_info_list(num_files); + for (size_t i = 0; i < num_files; i++) { + sorted_file_info_list[i] = &file_info_list[i]; + } + + auto* vstorage = cfd->current()->storage_info(); + std::sort( + sorted_file_info_list.begin(), sorted_file_info_list.end(), + [&vstorage, &file_info_list](const ExternalSstFileInfo* info1, + const ExternalSstFileInfo* info2) { + return vstorage->InternalComparator()->user_comparator()->Compare( + info1->smallest_key, info2->smallest_key) < 0; + }); + + for (size_t i = 0; i < num_files - 1; i++) { + if (sorted_file_info_list[i]->largest_key >= + sorted_file_info_list[i + 1]->smallest_key) { + return Status::NotSupported("Cannot add overlapping range among files"); + } + } + } + + std::vector micro_list(num_files, 0); + std::vector meta_list(num_files); + for (size_t i = 0; i < num_files; i++) { + StatsTimer t(env_, µ_list[i]); + if (file_info_list[i].num_entries == 0) { + return Status::InvalidArgument("File contain no entries"); + } + if (file_info_list[i].version != 1) { + return Status::InvalidArgument( + "Generated table version is not supported"); + } + // version 1 imply that file have only Put Operations with Sequence Number = + // 0 + + meta_list[i].smallest = + InternalKey(file_info_list[i].smallest_key, + file_info_list[i].sequence_number, ValueType::kTypeValue); + meta_list[i].largest = + InternalKey(file_info_list[i].largest_key, + file_info_list[i].sequence_number, ValueType::kTypeValue); + if (!meta_list[i].smallest.Valid() || !meta_list[i].largest.Valid()) { + return Status::Corruption("Generated table have corrupted keys"); + } + meta_list[i].smallest_seqno = file_info_list[i].sequence_number; + meta_list[i].largest_seqno = file_info_list[i].sequence_number; + if (meta_list[i].smallest_seqno != 0 || meta_list[i].largest_seqno != 0) { + return Status::InvalidArgument( + "Non zero sequence numbers are not supported"); + } + } + + std::vector::iterator> pending_outputs_inserted_elem_list( + num_files); + // Generate locations for the new tables + { + InstrumentedMutexLock l(&mutex_); + for (size_t i = 0; i < num_files; i++) { + StatsTimer t(env_, µ_list[i]); + pending_outputs_inserted_elem_list[i] = + CaptureCurrentFileNumberInPendingOutputs(); + meta_list[i].fd = FileDescriptor(versions_->NewFileNumber(), 0, + file_info_list[i].file_size); + } + } + + // Copy/Move external files into DB + std::vector db_fname_list(num_files); + size_t j = 0; + for (; j < num_files; j++) { + StatsTimer t(env_, µ_list[j]); + db_fname_list[j] = + TableFileName(db_options_.db_paths, meta_list[j].fd.GetNumber(), + meta_list[j].fd.GetPathId()); + if (move_file) { + status = env_->LinkFile(file_info_list[j].file_path, db_fname_list[j]); + if (status.IsNotSupported()) { + // Original file is on a different FS, use copy instead of hard linking + status = + CopyFile(env_, file_info_list[j].file_path, db_fname_list[j], 0); + } + } else { + status = CopyFile(env_, file_info_list[j].file_path, db_fname_list[j], 0); + } + TEST_SYNC_POINT("DBImpl::AddFile:FileCopied"); + if (!status.ok()) { + for (size_t i = 0; i < j; i++) { + Status s = env_->DeleteFile(db_fname_list[i]); + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "AddFile() clean up for file %s failed : %s", + db_fname_list[i].c_str(), s.ToString().c_str()); + } + } + return status; + } + } + + { + InstrumentedMutexLock l(&mutex_); + const MutableCFOptions mutable_cf_options = + *cfd->GetLatestMutableCFOptions(); + + WriteThread::Writer w; + write_thread_.EnterUnbatched(&w, &mutex_); + + if (!snapshots_.empty()) { + // Check that no snapshots are being held + status = + Status::NotSupported("Cannot add a file while holding snapshots"); + } + + if (status.ok()) { + // Verify that added file key range dont overlap with any keys in DB + SuperVersion* sv = cfd->GetSuperVersion()->Ref(); + Arena arena; + ReadOptions ro; + ro.total_order_seek = true; + ScopedArenaIterator iter(NewInternalIterator(ro, cfd, sv, &arena)); + + for (size_t i = 0; i < num_files; i++) { + StatsTimer t(env_, µ_list[i]); + InternalKey range_start(file_info_list[i].smallest_key, + kMaxSequenceNumber, kTypeValue); + iter->Seek(range_start.Encode()); + status = iter->status(); + + if (status.ok() && iter->Valid()) { + ParsedInternalKey seek_result; + if (ParseInternalKey(iter->key(), &seek_result)) { + auto* vstorage = cfd->current()->storage_info(); + if (vstorage->InternalComparator()->user_comparator()->Compare( + seek_result.user_key, file_info_list[i].largest_key) <= 0) { + status = Status::NotSupported("Cannot add overlapping range"); + break; + } + } else { + status = Status::Corruption("DB have corrupted keys"); + break; + } + } + } + } + + // The level the file will be ingested into + std::vector target_level_list(num_files, 0); + if (status.ok()) { + // Add files to L0 + VersionEdit edit; + edit.SetColumnFamily(cfd->GetID()); + for (size_t i = 0; i < num_files; i++) { + StatsTimer t(env_, µ_list[i]); + // Add file to the lowest possible level + target_level_list[i] = PickLevelForIngestedFile(cfd, file_info_list[i]); + edit.AddFile(target_level_list[i], meta_list[i].fd.GetNumber(), + meta_list[i].fd.GetPathId(), meta_list[i].fd.GetFileSize(), + meta_list[i].smallest, meta_list[i].largest, + meta_list[i].smallest_seqno, meta_list[i].largest_seqno, + meta_list[i].marked_for_compaction); + } + status = versions_->LogAndApply(cfd, mutable_cf_options, &edit, &mutex_, + directories_.GetDbDir()); + } + write_thread_.ExitUnbatched(&w); + + if (status.ok()) { + delete InstallSuperVersionAndScheduleWork(cfd, nullptr, + mutable_cf_options); + } + for (size_t i = 0; i < num_files; i++) { + // Update internal stats + InternalStats::CompactionStats stats(1); + stats.micros = micro_list[i]; + stats.bytes_written = meta_list[i].fd.GetFileSize(); + stats.num_output_files = 1; + cfd->internal_stats()->AddCompactionStats(target_level_list[i], stats); + cfd->internal_stats()->AddCFStats(InternalStats::BYTES_INGESTED_ADD_FILE, + meta_list[i].fd.GetFileSize()); + ReleaseFileNumberFromPendingOutputs( + pending_outputs_inserted_elem_list[i]); + } + } + + if (!status.ok()) { + // We failed to add the files to the database + for (size_t i = 0; i < num_files; i++) { + Status s = env_->DeleteFile(db_fname_list[i]); + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "AddFile() clean up for file %s failed : %s", + db_fname_list[i].c_str(), s.ToString().c_str()); + } + } + } else if (status.ok() && move_file) { + // The files were moved and added successfully, remove original file links + for (size_t i = 0; i < num_files; i++) { + Status s = env_->DeleteFile(file_info_list[i].file_path); + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "%s was added to DB successfully but failed to remove original " + "file " + "link : %s", + file_info_list[i].file_path.c_str(), s.ToString().c_str()); + } + } + } + return status; +} + +// Finds the lowest level in the DB that the ingested file can be added to +int DBImpl::PickLevelForIngestedFile(ColumnFamilyData* cfd, + const ExternalSstFileInfo& file_info) { + mutex_.AssertHeld(); + + int target_level = 0; + auto* vstorage = cfd->current()->storage_info(); + auto* ucmp = vstorage->InternalComparator()->user_comparator(); + + Slice file_smallest_user_key(file_info.smallest_key); + Slice file_largest_user_key(file_info.largest_key); + + for (int lvl = cfd->NumberLevels() - 1; lvl >= vstorage->base_level(); + lvl--) { + if (vstorage->OverlapInLevel(lvl, &file_smallest_user_key, + &file_largest_user_key) == false) { + // Make sure that the file dont overlap with the output of any + // compaction running right now + Slice compaction_smallest_user_key; + Slice compaction_largest_user_key; + bool overlap_with_compaction_output = false; + for (Compaction* c : running_compactions_) { + if (c->column_family_data()->GetID() != cfd->GetID() || + c->output_level() != lvl) { + continue; + } + + compaction_smallest_user_key = c->GetSmallestUserKey(); + compaction_largest_user_key = c->GetLargestUserKey(); + + if (ucmp->Compare(file_smallest_user_key, + compaction_largest_user_key) <= 0 && + ucmp->Compare(file_largest_user_key, + compaction_smallest_user_key) >= 0) { + overlap_with_compaction_output = true; + break; + } + } + + if (overlap_with_compaction_output == false) { + // Level lvl is the lowest level that dont have any files with key + // range overlapping with our file key range and no compactions + // planning to add overlapping files in it. + target_level = lvl; + break; + } + } + } + + return target_level; +} +#endif // ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/db/db_impl_debug.cc b/external/rocksdb/db/db_impl_debug.cc new file mode 100755 index 0000000000..37a58a307f --- /dev/null +++ b/external/rocksdb/db/db_impl_debug.cc @@ -0,0 +1,182 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef NDEBUG + +#include "db/db_impl.h" +#include "util/thread_status_updater.h" + +namespace rocksdb { + +uint64_t DBImpl::TEST_GetLevel0TotalSize() { + InstrumentedMutexLock l(&mutex_); + return default_cf_handle_->cfd()->current()->storage_info()->NumLevelBytes(0); +} + +int64_t DBImpl::TEST_MaxNextLevelOverlappingBytes( + ColumnFamilyHandle* column_family) { + ColumnFamilyData* cfd; + if (column_family == nullptr) { + cfd = default_cf_handle_->cfd(); + } else { + auto cfh = reinterpret_cast(column_family); + cfd = cfh->cfd(); + } + InstrumentedMutexLock l(&mutex_); + return cfd->current()->storage_info()->MaxNextLevelOverlappingBytes(); +} + +void DBImpl::TEST_GetFilesMetaData( + ColumnFamilyHandle* column_family, + std::vector>* metadata) { + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + InstrumentedMutexLock l(&mutex_); + metadata->resize(NumberLevels()); + for (int level = 0; level < NumberLevels(); level++) { + const std::vector& files = + cfd->current()->storage_info()->LevelFiles(level); + + (*metadata)[level].clear(); + for (const auto& f : files) { + (*metadata)[level].push_back(*f); + } + } +} + +uint64_t DBImpl::TEST_Current_Manifest_FileNo() { + return versions_->manifest_file_number(); +} + +Status DBImpl::TEST_CompactRange(int level, const Slice* begin, + const Slice* end, + ColumnFamilyHandle* column_family, + bool disallow_trivial_move) { + ColumnFamilyData* cfd; + if (column_family == nullptr) { + cfd = default_cf_handle_->cfd(); + } else { + auto cfh = reinterpret_cast(column_family); + cfd = cfh->cfd(); + } + int output_level = + (cfd->ioptions()->compaction_style == kCompactionStyleUniversal || + cfd->ioptions()->compaction_style == kCompactionStyleFIFO) + ? level + : level + 1; + return RunManualCompaction(cfd, level, output_level, 0, begin, end, true, + disallow_trivial_move); +} + +Status DBImpl::TEST_FlushMemTable(bool wait, ColumnFamilyHandle* cfh) { + FlushOptions fo; + fo.wait = wait; + ColumnFamilyData* cfd; + if (cfh == nullptr) { + cfd = default_cf_handle_->cfd(); + } else { + auto cfhi = reinterpret_cast(cfh); + cfd = cfhi->cfd(); + } + return FlushMemTable(cfd, fo); +} + +Status DBImpl::TEST_WaitForFlushMemTable(ColumnFamilyHandle* column_family) { + ColumnFamilyData* cfd; + if (column_family == nullptr) { + cfd = default_cf_handle_->cfd(); + } else { + auto cfh = reinterpret_cast(column_family); + cfd = cfh->cfd(); + } + return WaitForFlushMemTable(cfd); +} + +Status DBImpl::TEST_WaitForCompact() { + // Wait until the compaction completes + + // TODO: a bug here. This function actually does not necessarily + // wait for compact. It actually waits for scheduled compaction + // OR flush to finish. + + InstrumentedMutexLock l(&mutex_); + while ((bg_compaction_scheduled_ || bg_flush_scheduled_) && bg_error_.ok()) { + bg_cv_.Wait(); + } + return bg_error_; +} + +void DBImpl::TEST_LockMutex() { + mutex_.Lock(); +} + +void DBImpl::TEST_UnlockMutex() { + mutex_.Unlock(); +} + +void* DBImpl::TEST_BeginWrite() { + auto w = new WriteThread::Writer(); + write_thread_.EnterUnbatched(w, &mutex_); + return reinterpret_cast(w); +} + +void DBImpl::TEST_EndWrite(void* w) { + auto writer = reinterpret_cast(w); + write_thread_.ExitUnbatched(writer); + delete writer; +} + +size_t DBImpl::TEST_LogsToFreeSize() { + InstrumentedMutexLock l(&mutex_); + return logs_to_free_.size(); +} + +uint64_t DBImpl::TEST_LogfileNumber() { + InstrumentedMutexLock l(&mutex_); + return logfile_number_; +} + +Status DBImpl::TEST_GetAllImmutableCFOptions( + std::unordered_map* iopts_map) { + std::vector cf_names; + std::vector iopts; + { + InstrumentedMutexLock l(&mutex_); + for (auto cfd : *versions_->GetColumnFamilySet()) { + cf_names.push_back(cfd->GetName()); + iopts.push_back(cfd->ioptions()); + } + } + iopts_map->clear(); + for (size_t i = 0; i < cf_names.size(); ++i) { + iopts_map->insert({cf_names[i], iopts[i]}); + } + + return Status::OK(); +} + +uint64_t DBImpl::TEST_FindMinLogContainingOutstandingPrep() { + return FindMinLogContainingOutstandingPrep(); +} + +uint64_t DBImpl::TEST_FindMinPrepLogReferencedByMemTable() { + return FindMinPrepLogReferencedByMemTable(); +} + +Status DBImpl::TEST_GetLatestMutableCFOptions( + ColumnFamilyHandle* column_family, MutableCFOptions* mutable_cf_options) { + InstrumentedMutexLock l(&mutex_); + + auto cfh = reinterpret_cast(column_family); + *mutable_cf_options = *cfh->cfd()->GetLatestMutableCFOptions(); + return Status::OK(); +} + +} // namespace rocksdb +#endif // NDEBUG diff --git a/external/rocksdb/db/db_impl_experimental.cc b/external/rocksdb/db/db_impl_experimental.cc new file mode 100755 index 0000000000..90e034cd0c --- /dev/null +++ b/external/rocksdb/db/db_impl_experimental.cc @@ -0,0 +1,150 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_impl.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include + +#include "db/column_family.h" +#include "db/job_context.h" +#include "db/version_set.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +#ifndef ROCKSDB_LITE +Status DBImpl::SuggestCompactRange(ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) { + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + InternalKey start_key, end_key; + if (begin != nullptr) { + start_key.SetMaxPossibleForUserKey(*begin); + } + if (end != nullptr) { + end_key.SetMinPossibleForUserKey(*end); + } + { + InstrumentedMutexLock l(&mutex_); + auto vstorage = cfd->current()->storage_info(); + for (int level = 0; level < vstorage->num_non_empty_levels() - 1; ++level) { + std::vector inputs; + vstorage->GetOverlappingInputs( + level, begin == nullptr ? nullptr : &start_key, + end == nullptr ? nullptr : &end_key, &inputs); + for (auto f : inputs) { + f->marked_for_compaction = true; + } + } + // Since we have some more files to compact, we should also recompute + // compaction score + vstorage->ComputeCompactionScore(*cfd->GetLatestMutableCFOptions()); + SchedulePendingCompaction(cfd); + MaybeScheduleFlushOrCompaction(); + } + return Status::OK(); +} + +Status DBImpl::PromoteL0(ColumnFamilyHandle* column_family, int target_level) { + assert(column_family); + + if (target_level < 1) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "PromoteL0 FAILED. Invalid target level %d\n", target_level); + return Status::InvalidArgument("Invalid target level"); + } + + Status status; + VersionEdit edit; + JobContext job_context(next_job_id_.fetch_add(1), true); + { + InstrumentedMutexLock l(&mutex_); + auto* cfd = static_cast(column_family)->cfd(); + const auto* vstorage = cfd->current()->storage_info(); + + if (target_level >= vstorage->num_levels()) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "PromoteL0 FAILED. Target level %d does not exist\n", target_level); + job_context.Clean(); + return Status::InvalidArgument("Target level does not exist"); + } + + // Sort L0 files by range. + const InternalKeyComparator* icmp = &cfd->internal_comparator(); + auto l0_files = vstorage->LevelFiles(0); + std::sort(l0_files.begin(), l0_files.end(), + [icmp](FileMetaData* f1, FileMetaData* f2) { + return icmp->Compare(f1->largest, f2->largest) < 0; + }); + + // Check that no L0 file is being compacted and that they have + // non-overlapping ranges. + for (size_t i = 0; i < l0_files.size(); ++i) { + auto f = l0_files[i]; + if (f->being_compacted) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "PromoteL0 FAILED. File %" PRIu64 " being compacted\n", + f->fd.GetNumber()); + job_context.Clean(); + return Status::InvalidArgument("PromoteL0 called during L0 compaction"); + } + + if (i == 0) continue; + auto prev_f = l0_files[i - 1]; + if (icmp->Compare(prev_f->largest, f->smallest) >= 0) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "PromoteL0 FAILED. Files %" PRIu64 " and %" PRIu64 + " have overlapping ranges\n", + prev_f->fd.GetNumber(), f->fd.GetNumber()); + job_context.Clean(); + return Status::InvalidArgument("L0 has overlapping files"); + } + } + + // Check that all levels up to target_level are empty. + for (int level = 1; level <= target_level; ++level) { + if (vstorage->NumLevelFiles(level) > 0) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "PromoteL0 FAILED. Level %d not empty\n", level); + job_context.Clean(); + return Status::InvalidArgument( + "All levels up to target_level " + "must be empty"); + } + } + + edit.SetColumnFamily(cfd->GetID()); + for (const auto& f : l0_files) { + edit.DeleteFile(0, f->fd.GetNumber()); + edit.AddFile(target_level, f->fd.GetNumber(), f->fd.GetPathId(), + f->fd.GetFileSize(), f->smallest, f->largest, + f->smallest_seqno, f->largest_seqno, + f->marked_for_compaction); + } + + status = versions_->LogAndApply(cfd, *cfd->GetLatestMutableCFOptions(), + &edit, &mutex_, directories_.GetDbDir()); + if (status.ok()) { + InstallSuperVersionAndScheduleWorkWrapper( + cfd, &job_context, *cfd->GetLatestMutableCFOptions()); + } + } // lock released here + LogFlush(db_options_.info_log); + job_context.Clean(); + + return status; +} +#endif // ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/db/db_impl_readonly.cc b/external/rocksdb/db/db_impl_readonly.cc new file mode 100755 index 0000000000..57c14df149 --- /dev/null +++ b/external/rocksdb/db/db_impl_readonly.cc @@ -0,0 +1,189 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + + +#include "db/db_impl_readonly.h" + +#include "db/compacted_db_impl.h" +#include "db/db_impl.h" +#include "db/merge_context.h" +#include "db/db_iter.h" +#include "util/perf_context_imp.h" + +namespace rocksdb { + +#ifndef ROCKSDB_LITE + +DBImplReadOnly::DBImplReadOnly(const DBOptions& db_options, + const std::string& dbname) + : DBImpl(db_options, dbname) { + Log(INFO_LEVEL, db_options_.info_log, "Opening the db in read only mode"); + LogFlush(db_options_.info_log); +} + +DBImplReadOnly::~DBImplReadOnly() { +} + +// Implementations of the DB interface +Status DBImplReadOnly::Get(const ReadOptions& read_options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value) { + Status s; + SequenceNumber snapshot = versions_->LastSequence(); + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + SuperVersion* super_version = cfd->GetSuperVersion(); + MergeContext merge_context; + LookupKey lkey(key, snapshot); + if (super_version->mem->Get(lkey, value, &s, &merge_context)) { + } else { + PERF_TIMER_GUARD(get_from_output_files_time); + super_version->current->Get(read_options, lkey, value, &s, &merge_context); + } + return s; +} + +Iterator* DBImplReadOnly::NewIterator(const ReadOptions& read_options, + ColumnFamilyHandle* column_family) { + auto cfh = reinterpret_cast(column_family); + auto cfd = cfh->cfd(); + SuperVersion* super_version = cfd->GetSuperVersion()->Ref(); + SequenceNumber latest_snapshot = versions_->LastSequence(); + auto db_iter = NewArenaWrappedDbIterator( + env_, *cfd->ioptions(), cfd->user_comparator(), + (read_options.snapshot != nullptr + ? reinterpret_cast(read_options.snapshot) + ->number_ + : latest_snapshot), + super_version->mutable_cf_options.max_sequential_skip_in_iterations, + super_version->version_number); + auto internal_iter = NewInternalIterator( + read_options, cfd, super_version, db_iter->GetArena()); + db_iter->SetIterUnderDBIter(internal_iter); + return db_iter; +} + +Status DBImplReadOnly::NewIterators( + const ReadOptions& read_options, + const std::vector& column_families, + std::vector* iterators) { + if (iterators == nullptr) { + return Status::InvalidArgument("iterators not allowed to be nullptr"); + } + iterators->clear(); + iterators->reserve(column_families.size()); + SequenceNumber latest_snapshot = versions_->LastSequence(); + + for (auto cfh : column_families) { + auto* cfd = reinterpret_cast(cfh)->cfd(); + auto* sv = cfd->GetSuperVersion()->Ref(); + auto* db_iter = NewArenaWrappedDbIterator( + env_, *cfd->ioptions(), cfd->user_comparator(), + (read_options.snapshot != nullptr + ? reinterpret_cast(read_options.snapshot) + ->number_ + : latest_snapshot), + sv->mutable_cf_options.max_sequential_skip_in_iterations, + sv->version_number); + auto* internal_iter = NewInternalIterator( + read_options, cfd, sv, db_iter->GetArena()); + db_iter->SetIterUnderDBIter(internal_iter); + iterators->push_back(db_iter); + } + + return Status::OK(); +} + +Status DB::OpenForReadOnly(const Options& options, const std::string& dbname, + DB** dbptr, bool error_if_log_file_exist) { + *dbptr = nullptr; + + // Try to first open DB as fully compacted DB + Status s; + s = CompactedDBImpl::Open(options, dbname, dbptr); + if (s.ok()) { + return s; + } + + DBOptions db_options(options); + ColumnFamilyOptions cf_options(options); + std::vector column_families; + column_families.push_back( + ColumnFamilyDescriptor(kDefaultColumnFamilyName, cf_options)); + std::vector handles; + + s = DB::OpenForReadOnly(db_options, dbname, column_families, &handles, dbptr); + if (s.ok()) { + assert(handles.size() == 1); + // i can delete the handle since DBImpl is always holding a + // reference to default column family + delete handles[0]; + } + return s; +} + +Status DB::OpenForReadOnly( + const DBOptions& db_options, const std::string& dbname, + const std::vector& column_families, + std::vector* handles, DB** dbptr, + bool error_if_log_file_exist) { + *dbptr = nullptr; + handles->clear(); + + DBImplReadOnly* impl = new DBImplReadOnly(db_options, dbname); + impl->mutex_.Lock(); + Status s = impl->Recover(column_families, true /* read only */, + error_if_log_file_exist); + if (s.ok()) { + // set column family handles + for (auto cf : column_families) { + auto cfd = + impl->versions_->GetColumnFamilySet()->GetColumnFamily(cf.name); + if (cfd == nullptr) { + s = Status::InvalidArgument("Column family not found: ", cf.name); + break; + } + handles->push_back(new ColumnFamilyHandleImpl(cfd, impl, &impl->mutex_)); + } + } + if (s.ok()) { + for (auto cfd : *impl->versions_->GetColumnFamilySet()) { + delete cfd->InstallSuperVersion(new SuperVersion(), &impl->mutex_); + } + } + impl->mutex_.Unlock(); + if (s.ok()) { + *dbptr = impl; + for (auto* h : *handles) { + impl->NewThreadStatusCfInfo( + reinterpret_cast(h)->cfd()); + } + } else { + for (auto h : *handles) { + delete h; + } + handles->clear(); + delete impl; + } + return s; +} + +#else // !ROCKSDB_LITE + +Status DB::OpenForReadOnly(const Options& options, const std::string& dbname, + DB** dbptr, bool error_if_log_file_exist) { + return Status::NotSupported("Not supported in ROCKSDB_LITE."); +} + +Status DB::OpenForReadOnly( + const DBOptions& db_options, const std::string& dbname, + const std::vector& column_families, + std::vector* handles, DB** dbptr, + bool error_if_log_file_exist) { + return Status::NotSupported("Not supported in ROCKSDB_LITE."); +} +#endif // !ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/db/db_impl_readonly.h b/external/rocksdb/db/db_impl_readonly.h new file mode 100755 index 0000000000..a410a4e32b --- /dev/null +++ b/external/rocksdb/db/db_impl_readonly.h @@ -0,0 +1,117 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#ifndef ROCKSDB_LITE + +#include "db/db_impl.h" +#include +#include + +namespace rocksdb { + +class DBImplReadOnly : public DBImpl { + public: + DBImplReadOnly(const DBOptions& options, const std::string& dbname); + virtual ~DBImplReadOnly(); + + // Implementations of the DB interface + using DB::Get; + virtual Status Get(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value) override; + + // TODO: Implement ReadOnly MultiGet? + + using DBImpl::NewIterator; + virtual Iterator* NewIterator(const ReadOptions&, + ColumnFamilyHandle* column_family) override; + + virtual Status NewIterators( + const ReadOptions& options, + const std::vector& column_families, + std::vector* iterators) override; + + using DBImpl::Put; + virtual Status Put(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { + return Status::NotSupported("Not supported operation in read only mode."); + } + using DBImpl::Merge; + virtual Status Merge(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { + return Status::NotSupported("Not supported operation in read only mode."); + } + using DBImpl::Delete; + virtual Status Delete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) override { + return Status::NotSupported("Not supported operation in read only mode."); + } + using DBImpl::SingleDelete; + virtual Status SingleDelete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) override { + return Status::NotSupported("Not supported operation in read only mode."); + } + virtual Status Write(const WriteOptions& options, + WriteBatch* updates) override { + return Status::NotSupported("Not supported operation in read only mode."); + } + using DBImpl::CompactRange; + virtual Status CompactRange(const CompactRangeOptions& options, + ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) override { + return Status::NotSupported("Not supported operation in read only mode."); + } + + using DBImpl::CompactFiles; + virtual Status CompactFiles( + const CompactionOptions& compact_options, + ColumnFamilyHandle* column_family, + const std::vector& input_file_names, + const int output_level, const int output_path_id = -1) override { + return Status::NotSupported("Not supported operation in read only mode."); + } + +#ifndef ROCKSDB_LITE + virtual Status DisableFileDeletions() override { + return Status::NotSupported("Not supported operation in read only mode."); + } + + virtual Status EnableFileDeletions(bool force) override { + return Status::NotSupported("Not supported operation in read only mode."); + } + virtual Status GetLiveFiles(std::vector&, + uint64_t* manifest_file_size, + bool flush_memtable = true) override { + return Status::NotSupported("Not supported operation in read only mode."); + } +#endif // ROCKSDB_LITE + + using DBImpl::Flush; + virtual Status Flush(const FlushOptions& options, + ColumnFamilyHandle* column_family) override { + return Status::NotSupported("Not supported operation in read only mode."); + } + + using DBImpl::SyncWAL; + virtual Status SyncWAL() override { + return Status::NotSupported("Not supported operation in read only mode."); + } + + private: + friend class DB; + + // No copying allowed + DBImplReadOnly(const DBImplReadOnly&); + void operator=(const DBImplReadOnly&); +}; +} + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/db_info_dumper.cc b/external/rocksdb/db/db_info_dumper.cc new file mode 100755 index 0000000000..56cf3e288f --- /dev/null +++ b/external/rocksdb/db/db_info_dumper.cc @@ -0,0 +1,127 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include "db/db_info_dumper.h" + +#include +#include +#include +#include +#include + +#include "db/filename.h" +#include "rocksdb/options.h" +#include "rocksdb/env.h" + +namespace rocksdb { + +void DumpDBFileSummary(const DBOptions& options, const std::string& dbname) { + if (options.info_log == nullptr) { + return; + } + + auto* env = options.env; + uint64_t number = 0; + FileType type = kInfoLogFile; + + std::vector files; + uint64_t file_num = 0; + uint64_t file_size; + std::string file_info, wal_info; + + Header(options.info_log, "DB SUMMARY\n"); + // Get files in dbname dir + if (!env->GetChildren(dbname, &files).ok()) { + Error(options.info_log, + "Error when reading %s dir\n", dbname.c_str()); + } + std::sort(files.begin(), files.end()); + for (std::string file : files) { + if (!ParseFileName(file, &number, &type)) { + continue; + } + switch (type) { + case kCurrentFile: + Header(options.info_log, "CURRENT file: %s\n", file.c_str()); + break; + case kIdentityFile: + Header(options.info_log, "IDENTITY file: %s\n", file.c_str()); + break; + case kDescriptorFile: + env->GetFileSize(dbname + "/" + file, &file_size); + Header(options.info_log, "MANIFEST file: %s size: %" PRIu64 " Bytes\n", + file.c_str(), file_size); + break; + case kLogFile: + env->GetFileSize(dbname + "/" + file, &file_size); + char str[16]; + snprintf(str, sizeof(str), "%" PRIu64, file_size); + wal_info.append(file).append(" size: "). + append(str).append(" ; "); + break; + case kTableFile: + if (++file_num < 10) { + file_info.append(file).append(" "); + } + break; + default: + break; + } + } + + // Get sst files in db_path dir + for (auto& db_path : options.db_paths) { + if (dbname.compare(db_path.path) != 0) { + if (!env->GetChildren(db_path.path, &files).ok()) { + Error(options.info_log, + "Error when reading %s dir\n", + db_path.path.c_str()); + continue; + } + std::sort(files.begin(), files.end()); + for (std::string file : files) { + if (ParseFileName(file, &number, &type)) { + if (type == kTableFile && ++file_num < 10) { + file_info.append(file).append(" "); + } + } + } + } + Header(options.info_log, + "SST files in %s dir, Total Num: %" PRIu64 ", files: %s\n", + db_path.path.c_str(), file_num, file_info.c_str()); + file_num = 0; + file_info.clear(); + } + + // Get wal file in wal_dir + if (dbname.compare(options.wal_dir) != 0) { + if (!env->GetChildren(options.wal_dir, &files).ok()) { + Error(options.info_log, + "Error when reading %s dir\n", + options.wal_dir.c_str()); + return; + } + wal_info.clear(); + for (std::string file : files) { + if (ParseFileName(file, &number, &type)) { + if (type == kLogFile) { + env->GetFileSize(options.wal_dir + "/" + file, &file_size); + char str[16]; + snprintf(str, sizeof(str), "%" PRIu64, file_size); + wal_info.append(file).append(" size: "). + append(str).append(" ; "); + } + } + } + } + Header(options.info_log, "Write Ahead Log file in %s: %s\n", + options.wal_dir.c_str(), wal_info.c_str()); +} +} // namespace rocksdb diff --git a/external/rocksdb/db/db_info_dumper.h b/external/rocksdb/db/db_info_dumper.h new file mode 100755 index 0000000000..470b6224f2 --- /dev/null +++ b/external/rocksdb/db/db_info_dumper.h @@ -0,0 +1,13 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#include + +#include "rocksdb/options.h" + +namespace rocksdb { +void DumpDBFileSummary(const DBOptions& options, const std::string& dbname); +} // namespace rocksdb diff --git a/external/rocksdb/db/db_inplace_update_test.cc b/external/rocksdb/db/db_inplace_update_test.cc new file mode 100755 index 0000000000..2acc25700d --- /dev/null +++ b/external/rocksdb/db/db_inplace_update_test.cc @@ -0,0 +1,165 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "db/db_test_util.h" +#include "port/stack_trace.h" + +namespace rocksdb { + +class DBTestInPlaceUpdate : public DBTestBase { + public: + DBTestInPlaceUpdate() : DBTestBase("/db_inplace_update_test") {} +}; + +TEST_F(DBTestInPlaceUpdate, InPlaceUpdate) { + do { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.inplace_update_support = true; + options.env = env_; + options.write_buffer_size = 100000; + CreateAndReopenWithCF({"pikachu"}, options); + + // Update key with values of smaller size + int numValues = 10; + for (int i = numValues; i > 0; i--) { + std::string value = DummyString(i, 'a'); + ASSERT_OK(Put(1, "key", value)); + ASSERT_EQ(value, Get(1, "key")); + } + + // Only 1 instance for that key. + validateNumberOfEntries(1, 1); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTestInPlaceUpdate, InPlaceUpdateLargeNewValue) { + do { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.inplace_update_support = true; + options.env = env_; + options.write_buffer_size = 100000; + CreateAndReopenWithCF({"pikachu"}, options); + + // Update key with values of larger size + int numValues = 10; + for (int i = 0; i < numValues; i++) { + std::string value = DummyString(i, 'a'); + ASSERT_OK(Put(1, "key", value)); + ASSERT_EQ(value, Get(1, "key")); + } + + // All 10 updates exist in the internal iterator + validateNumberOfEntries(numValues, 1); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerSize) { + do { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.inplace_update_support = true; + + options.env = env_; + options.write_buffer_size = 100000; + options.inplace_callback = + rocksdb::DBTestInPlaceUpdate::updateInPlaceSmallerSize; + CreateAndReopenWithCF({"pikachu"}, options); + + // Update key with values of smaller size + int numValues = 10; + ASSERT_OK(Put(1, "key", DummyString(numValues, 'a'))); + ASSERT_EQ(DummyString(numValues, 'c'), Get(1, "key")); + + for (int i = numValues; i > 0; i--) { + ASSERT_OK(Put(1, "key", DummyString(i, 'a'))); + ASSERT_EQ(DummyString(i - 1, 'b'), Get(1, "key")); + } + + // Only 1 instance for that key. + validateNumberOfEntries(1, 1); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackSmallerVarintSize) { + do { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.inplace_update_support = true; + + options.env = env_; + options.write_buffer_size = 100000; + options.inplace_callback = + rocksdb::DBTestInPlaceUpdate::updateInPlaceSmallerVarintSize; + CreateAndReopenWithCF({"pikachu"}, options); + + // Update key with values of smaller varint size + int numValues = 265; + ASSERT_OK(Put(1, "key", DummyString(numValues, 'a'))); + ASSERT_EQ(DummyString(numValues, 'c'), Get(1, "key")); + + for (int i = numValues; i > 0; i--) { + ASSERT_OK(Put(1, "key", DummyString(i, 'a'))); + ASSERT_EQ(DummyString(1, 'b'), Get(1, "key")); + } + + // Only 1 instance for that key. + validateNumberOfEntries(1, 1); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackLargeNewValue) { + do { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.inplace_update_support = true; + + options.env = env_; + options.write_buffer_size = 100000; + options.inplace_callback = + rocksdb::DBTestInPlaceUpdate::updateInPlaceLargerSize; + CreateAndReopenWithCF({"pikachu"}, options); + + // Update key with values of larger size + int numValues = 10; + for (int i = 0; i < numValues; i++) { + ASSERT_OK(Put(1, "key", DummyString(i, 'a'))); + ASSERT_EQ(DummyString(i, 'c'), Get(1, "key")); + } + + // No inplace updates. All updates are puts with new seq number + // All 10 updates exist in the internal iterator + validateNumberOfEntries(numValues, 1); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTestInPlaceUpdate, InPlaceUpdateCallbackNoAction) { + do { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.inplace_update_support = true; + + options.env = env_; + options.write_buffer_size = 100000; + options.inplace_callback = + rocksdb::DBTestInPlaceUpdate::updateInPlaceNoAction; + CreateAndReopenWithCF({"pikachu"}, options); + + // Callback function requests no actions from db + ASSERT_OK(Put(1, "key", DummyString(1, 'a'))); + ASSERT_EQ(Get(1, "key"), "NOT_FOUND"); + } while (ChangeCompactOptions()); +} +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_io_failure_test.cc b/external/rocksdb/db/db_io_failure_test.cc new file mode 100755 index 0000000000..4d66e1d1a3 --- /dev/null +++ b/external/rocksdb/db/db_io_failure_test.cc @@ -0,0 +1,259 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_test_util.h" +#include "port/stack_trace.h" + +namespace rocksdb { + +class DBIOFailureTest : public DBTestBase { + public: + DBIOFailureTest() : DBTestBase("/db_io_failure_test") {} +}; + +#ifndef ROCKSDB_LITE +// Check that number of files does not grow when writes are dropped +TEST_F(DBIOFailureTest, DropWrites) { + do { + Options options = CurrentOptions(); + options.env = env_; + options.paranoid_checks = false; + Reopen(options); + + ASSERT_OK(Put("foo", "v1")); + ASSERT_EQ("v1", Get("foo")); + Compact("a", "z"); + const size_t num_files = CountFiles(); + // Force out-of-space errors + env_->drop_writes_.store(true, std::memory_order_release); + env_->sleep_counter_.Reset(); + env_->no_sleep_ = true; + for (int i = 0; i < 5; i++) { + if (option_config_ != kUniversalCompactionMultiLevel && + option_config_ != kUniversalSubcompactions) { + for (int level = 0; level < dbfull()->NumberLevels(); level++) { + if (level > 0 && level == dbfull()->NumberLevels() - 1) { + break; + } + dbfull()->TEST_CompactRange(level, nullptr, nullptr, nullptr, + true /* disallow trivial move */); + } + } else { + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + } + } + + std::string property_value; + ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value)); + ASSERT_EQ("5", property_value); + + env_->drop_writes_.store(false, std::memory_order_release); + ASSERT_LT(CountFiles(), num_files + 3); + + // Check that compaction attempts slept after errors + // TODO @krad: Figure out why ASSERT_EQ 5 keeps failing in certain compiler + // versions + ASSERT_GE(env_->sleep_counter_.Read(), 4); + } while (ChangeCompactOptions()); +} + +// Check background error counter bumped on flush failures. +TEST_F(DBIOFailureTest, DropWritesFlush) { + do { + Options options = CurrentOptions(); + options.env = env_; + options.max_background_flushes = 1; + Reopen(options); + + ASSERT_OK(Put("foo", "v1")); + // Force out-of-space errors + env_->drop_writes_.store(true, std::memory_order_release); + + std::string property_value; + // Background error count is 0 now. + ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value)); + ASSERT_EQ("0", property_value); + + dbfull()->TEST_FlushMemTable(true); + + ASSERT_TRUE(db_->GetProperty("rocksdb.background-errors", &property_value)); + ASSERT_EQ("1", property_value); + + env_->drop_writes_.store(false, std::memory_order_release); + } while (ChangeCompactOptions()); +} +#endif // ROCKSDB_LITE + +// Check that CompactRange() returns failure if there is not enough space left +// on device +TEST_F(DBIOFailureTest, NoSpaceCompactRange) { + do { + Options options = CurrentOptions(); + options.env = env_; + options.disable_auto_compactions = true; + Reopen(options); + + // generate 5 tables + for (int i = 0; i < 5; ++i) { + ASSERT_OK(Put(Key(i), Key(i) + "v")); + ASSERT_OK(Flush()); + } + + // Force out-of-space errors + env_->no_space_.store(true, std::memory_order_release); + + Status s = dbfull()->TEST_CompactRange(0, nullptr, nullptr, nullptr, + true /* disallow trivial move */); + ASSERT_TRUE(s.IsIOError()); + + env_->no_space_.store(false, std::memory_order_release); + } while (ChangeCompactOptions()); +} + +TEST_F(DBIOFailureTest, NonWritableFileSystem) { + do { + Options options = CurrentOptions(); + options.write_buffer_size = 4096; + options.arena_block_size = 4096; + options.env = env_; + Reopen(options); + ASSERT_OK(Put("foo", "v1")); + env_->non_writeable_rate_.store(100); + std::string big(100000, 'x'); + int errors = 0; + for (int i = 0; i < 20; i++) { + if (!Put("foo", big).ok()) { + errors++; + env_->SleepForMicroseconds(100000); + } + } + ASSERT_GT(errors, 0); + env_->non_writeable_rate_.store(0); + } while (ChangeCompactOptions()); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBIOFailureTest, ManifestWriteError) { + // Test for the following problem: + // (a) Compaction produces file F + // (b) Log record containing F is written to MANIFEST file, but Sync() fails + // (c) GC deletes F + // (d) After reopening DB, reads fail since deleted F is named in log record + + // We iterate twice. In the second iteration, everything is the + // same except the log record never makes it to the MANIFEST file. + for (int iter = 0; iter < 2; iter++) { + std::atomic* error_type = (iter == 0) ? &env_->manifest_sync_error_ + : &env_->manifest_write_error_; + + // Insert foo=>bar mapping + Options options = CurrentOptions(); + options.env = env_; + options.create_if_missing = true; + options.error_if_exists = false; + options.paranoid_checks = true; + DestroyAndReopen(options); + ASSERT_OK(Put("foo", "bar")); + ASSERT_EQ("bar", Get("foo")); + + // Memtable compaction (will succeed) + Flush(); + ASSERT_EQ("bar", Get("foo")); + const int last = 2; + MoveFilesToLevel(2); + ASSERT_EQ(NumTableFilesAtLevel(last), 1); // foo=>bar is now in last level + + // Merging compaction (will fail) + error_type->store(true, std::memory_order_release); + dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail + ASSERT_EQ("bar", Get("foo")); + + error_type->store(false, std::memory_order_release); + + // Since paranoid_checks=true, writes should fail + ASSERT_NOK(Put("foo2", "bar2")); + + // Recovery: should not lose data + ASSERT_EQ("bar", Get("foo")); + + // Try again with paranoid_checks=false + Close(); + options.paranoid_checks = false; + Reopen(options); + + // Merging compaction (will fail) + error_type->store(true, std::memory_order_release); + dbfull()->TEST_CompactRange(last, nullptr, nullptr); // Should fail + ASSERT_EQ("bar", Get("foo")); + + // Recovery: should not lose data + error_type->store(false, std::memory_order_release); + Reopen(options); + ASSERT_EQ("bar", Get("foo")); + + // Since paranoid_checks=false, writes should succeed + ASSERT_OK(Put("foo2", "bar2")); + ASSERT_EQ("bar", Get("foo")); + ASSERT_EQ("bar2", Get("foo2")); + } +} +#endif // ROCKSDB_LITE + +TEST_F(DBIOFailureTest, PutFailsParanoid) { + // Test the following: + // (a) A random put fails in paranoid mode (simulate by sync fail) + // (b) All other puts have to fail, even if writes would succeed + // (c) All of that should happen ONLY if paranoid_checks = true + + Options options = CurrentOptions(); + options.env = env_; + options.create_if_missing = true; + options.error_if_exists = false; + options.paranoid_checks = true; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + Status s; + + ASSERT_OK(Put(1, "foo", "bar")); + ASSERT_OK(Put(1, "foo1", "bar1")); + // simulate error + env_->log_write_error_.store(true, std::memory_order_release); + s = Put(1, "foo2", "bar2"); + ASSERT_TRUE(!s.ok()); + env_->log_write_error_.store(false, std::memory_order_release); + s = Put(1, "foo3", "bar3"); + // the next put should fail, too + ASSERT_TRUE(!s.ok()); + // but we're still able to read + ASSERT_EQ("bar", Get(1, "foo")); + + // do the same thing with paranoid checks off + options.paranoid_checks = false; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_OK(Put(1, "foo", "bar")); + ASSERT_OK(Put(1, "foo1", "bar1")); + // simulate error + env_->log_write_error_.store(true, std::memory_order_release); + s = Put(1, "foo2", "bar2"); + ASSERT_TRUE(!s.ok()); + env_->log_write_error_.store(false, std::memory_order_release); + s = Put(1, "foo3", "bar3"); + // the next put should NOT fail + ASSERT_TRUE(s.ok()); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_iter.cc b/external/rocksdb/db/db_iter.cc new file mode 100755 index 0000000000..5de3b406b9 --- /dev/null +++ b/external/rocksdb/db/db_iter.cc @@ -0,0 +1,965 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_iter.h" +#include +#include +#include +#include + +#include "db/dbformat.h" +#include "db/filename.h" +#include "db/merge_context.h" +#include "db/merge_helper.h" +#include "db/pinned_iterators_manager.h" +#include "port/port.h" +#include "rocksdb/env.h" +#include "rocksdb/iterator.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/options.h" +#include "table/internal_iterator.h" +#include "util/arena.h" +#include "util/logging.h" +#include "util/mutexlock.h" +#include "util/perf_context_imp.h" +#include "util/string_util.h" + +namespace rocksdb { + +#if 0 +static void DumpInternalIter(Iterator* iter) { + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ParsedInternalKey k; + if (!ParseInternalKey(iter->key(), &k)) { + fprintf(stderr, "Corrupt '%s'\n", EscapeString(iter->key()).c_str()); + } else { + fprintf(stderr, "@ '%s'\n", k.DebugString().c_str()); + } + } +} +#endif + +// Memtables and sstables that make the DB representation contain +// (userkey,seq,type) => uservalue entries. DBIter +// combines multiple entries for the same userkey found in the DB +// representation into a single entry while accounting for sequence +// numbers, deletion markers, overwrites, etc. +class DBIter: public Iterator { + public: + // The following is grossly complicated. TODO: clean it up + // Which direction is the iterator currently moving? + // (1) When moving forward, the internal iterator is positioned at + // the exact entry that yields this->key(), this->value() + // (2) When moving backwards, the internal iterator is positioned + // just before all entries whose user key == this->key(). + enum Direction { + kForward, + kReverse + }; + + // LocalStatistics contain Statistics counters that will be aggregated per + // each iterator instance and then will be sent to the global statistics when + // the iterator is destroyed. + // + // The purpose of this approach is to avoid perf regression happening + // when multiple threads bump the atomic counters from a DBIter::Next(). + struct LocalStatistics { + explicit LocalStatistics() { ResetCounters(); } + + void ResetCounters() { + next_count_ = 0; + next_found_count_ = 0; + prev_count_ = 0; + prev_found_count_ = 0; + bytes_read_ = 0; + } + + void BumpGlobalStatistics(Statistics* global_statistics) { + RecordTick(global_statistics, NUMBER_DB_NEXT, next_count_); + RecordTick(global_statistics, NUMBER_DB_NEXT_FOUND, next_found_count_); + RecordTick(global_statistics, NUMBER_DB_PREV, prev_count_); + RecordTick(global_statistics, NUMBER_DB_PREV_FOUND, prev_found_count_); + RecordTick(global_statistics, ITER_BYTES_READ, bytes_read_); + ResetCounters(); + } + + // Map to Tickers::NUMBER_DB_NEXT + uint64_t next_count_; + // Map to Tickers::NUMBER_DB_NEXT_FOUND + uint64_t next_found_count_; + // Map to Tickers::NUMBER_DB_PREV + uint64_t prev_count_; + // Map to Tickers::NUMBER_DB_PREV_FOUND + uint64_t prev_found_count_; + // Map to Tickers::ITER_BYTES_READ + uint64_t bytes_read_; + }; + + DBIter(Env* env, const ImmutableCFOptions& ioptions, const Comparator* cmp, + InternalIterator* iter, SequenceNumber s, bool arena_mode, + uint64_t max_sequential_skip_in_iterations, uint64_t version_number, + const Slice* iterate_upper_bound = nullptr, + bool prefix_same_as_start = false, bool pin_data = false) + : arena_mode_(arena_mode), + env_(env), + logger_(ioptions.info_log), + user_comparator_(cmp), + merge_operator_(ioptions.merge_operator), + iter_(iter), + sequence_(s), + direction_(kForward), + valid_(false), + current_entry_is_merged_(false), + statistics_(ioptions.statistics), + version_number_(version_number), + iterate_upper_bound_(iterate_upper_bound), + prefix_same_as_start_(prefix_same_as_start), + pin_thru_lifetime_(pin_data) { + RecordTick(statistics_, NO_ITERATORS); + prefix_extractor_ = ioptions.prefix_extractor; + max_skip_ = max_sequential_skip_in_iterations; + if (pin_thru_lifetime_) { + pinned_iters_mgr_.StartPinning(); + } + if (iter_) { + iter_->SetPinnedItersMgr(&pinned_iters_mgr_); + } + } + virtual ~DBIter() { + // Release pinned data if any + pinned_iters_mgr_.ReleasePinnedIterators(); + RecordTick(statistics_, NO_ITERATORS, -1); + local_stats_.BumpGlobalStatistics(statistics_); + if (!arena_mode_) { + delete iter_; + } else { + iter_->~InternalIterator(); + } + } + virtual void SetIter(InternalIterator* iter) { + assert(iter_ == nullptr); + iter_ = iter; + iter_->SetPinnedItersMgr(&pinned_iters_mgr_); + } + virtual bool Valid() const override { return valid_; } + virtual Slice key() const override { + assert(valid_); + return saved_key_.GetKey(); + } + virtual Slice value() const override { + assert(valid_); + if (current_entry_is_merged_) { + // If pinned_value_ is set then the result of merge operator is one of + // the merge operands and we should return it. + return pinned_value_.data() ? pinned_value_ : saved_value_; + } else if (direction_ == kReverse) { + return pinned_value_; + } else { + return iter_->value(); + } + } + virtual Status status() const override { + if (status_.ok()) { + return iter_->status(); + } else { + return status_; + } + } + + virtual Status GetProperty(std::string prop_name, + std::string* prop) override { + if (prop == nullptr) { + return Status::InvalidArgument("prop is nullptr"); + } + if (prop_name == "rocksdb.iterator.super-version-number") { + // First try to pass the value returned from inner iterator. + if (!iter_->GetProperty(prop_name, prop).ok()) { + *prop = ToString(version_number_); + } + return Status::OK(); + } else if (prop_name == "rocksdb.iterator.is-key-pinned") { + if (valid_) { + *prop = (pin_thru_lifetime_ && saved_key_.IsKeyPinned()) ? "1" : "0"; + } else { + *prop = "Iterator is not valid."; + } + return Status::OK(); + } + return Status::InvalidArgument("Undentified property."); + } + + virtual void Next() override; + virtual void Prev() override; + virtual void Seek(const Slice& target) override; + virtual void SeekToFirst() override; + virtual void SeekToLast() override; + + private: + void ReverseToBackward(); + void PrevInternal(); + void FindParseableKey(ParsedInternalKey* ikey, Direction direction); + bool FindValueForCurrentKey(); + bool FindValueForCurrentKeyUsingSeek(); + void FindPrevUserKey(); + void FindNextUserKey(); + inline void FindNextUserEntry(bool skipping, bool prefix_check); + void FindNextUserEntryInternal(bool skipping, bool prefix_check); + bool ParseKey(ParsedInternalKey* key); + void MergeValuesNewToOld(); + + // Temporarily pin the blocks that we encounter until ReleaseTempPinnedData() + // is called + void TempPinData() { + if (!pin_thru_lifetime_) { + pinned_iters_mgr_.StartPinning(); + } + } + + // Release blocks pinned by TempPinData() + void ReleaseTempPinnedData() { + if (!pin_thru_lifetime_) { + pinned_iters_mgr_.ReleasePinnedIterators(); + } + } + + inline void ClearSavedValue() { + if (saved_value_.capacity() > 1048576) { + std::string empty; + swap(empty, saved_value_); + } else { + saved_value_.clear(); + } + } + + const SliceTransform* prefix_extractor_; + bool arena_mode_; + Env* const env_; + Logger* logger_; + const Comparator* const user_comparator_; + const MergeOperator* const merge_operator_; + InternalIterator* iter_; + SequenceNumber const sequence_; + + Status status_; + IterKey saved_key_; + std::string saved_value_; + Slice pinned_value_; + Direction direction_; + bool valid_; + bool current_entry_is_merged_; + Statistics* statistics_; + uint64_t max_skip_; + uint64_t version_number_; + const Slice* iterate_upper_bound_; + IterKey prefix_start_buf_; + Slice prefix_start_key_; + const bool prefix_same_as_start_; + // Means that we will pin all data blocks we read as long the Iterator + // is not deleted, will be true if ReadOptions::pin_data is true + const bool pin_thru_lifetime_; + // List of operands for merge operator. + MergeContext merge_context_; + LocalStatistics local_stats_; + PinnedIteratorsManager pinned_iters_mgr_; + + // No copying allowed + DBIter(const DBIter&); + void operator=(const DBIter&); +}; + +inline bool DBIter::ParseKey(ParsedInternalKey* ikey) { + if (!ParseInternalKey(iter_->key(), ikey)) { + status_ = Status::Corruption("corrupted internal key in DBIter"); + Log(InfoLogLevel::ERROR_LEVEL, + logger_, "corrupted internal key in DBIter: %s", + iter_->key().ToString(true).c_str()); + return false; + } else { + return true; + } +} + +void DBIter::Next() { + assert(valid_); + + // Release temporarily pinned blocks from last operation + ReleaseTempPinnedData(); + if (direction_ == kReverse) { + FindNextUserKey(); + direction_ = kForward; + if (!iter_->Valid()) { + iter_->SeekToFirst(); + } + } else if (iter_->Valid() && !current_entry_is_merged_) { + // If the current value is not a merge, the iter position is the + // current key, which is already returned. We can safely issue a + // Next() without checking the current key. + // If the current key is a merge, very likely iter already points + // to the next internal position. + iter_->Next(); + PERF_COUNTER_ADD(internal_key_skipped_count, 1); + } + + if (statistics_ != nullptr) { + local_stats_.next_count_++; + } + // Now we point to the next internal position, for both of merge and + // not merge cases. + if (!iter_->Valid()) { + valid_ = false; + return; + } + FindNextUserEntry(true /* skipping the current user key */, prefix_same_as_start_); + if (statistics_ != nullptr && valid_) { + local_stats_.next_found_count_++; + local_stats_.bytes_read_ += (key().size() + value().size()); + } +} + +// PRE: saved_key_ has the current user key if skipping +// POST: saved_key_ should have the next user key if valid_, +// if the current entry is a result of merge +// current_entry_is_merged_ => true +// saved_value_ => the merged value +// +// NOTE: In between, saved_key_ can point to a user key that has +// a delete marker +// +// The prefix_check parameter controls whether we check the iterated +// keys against the prefix of the seeked key. Set to false when +// performing a seek without a key (e.g. SeekToFirst). Set to +// prefix_same_as_start_ for other iterations. +inline void DBIter::FindNextUserEntry(bool skipping, bool prefix_check) { + PERF_TIMER_GUARD(find_next_user_entry_time); + FindNextUserEntryInternal(skipping, prefix_check); +} + +// Actual implementation of DBIter::FindNextUserEntry() +void DBIter::FindNextUserEntryInternal(bool skipping, bool prefix_check) { + // Loop until we hit an acceptable entry to yield + assert(iter_->Valid()); + assert(direction_ == kForward); + current_entry_is_merged_ = false; + uint64_t num_skipped = 0; + do { + ParsedInternalKey ikey; + + if (ParseKey(&ikey)) { + if (iterate_upper_bound_ != nullptr && + user_comparator_->Compare(ikey.user_key, *iterate_upper_bound_) >= 0) { + break; + } + + if (prefix_extractor_ && prefix_check && + prefix_extractor_->Transform(ikey.user_key).compare(prefix_start_key_) != 0) { + break; + } + + if (ikey.sequence <= sequence_) { + if (skipping && + user_comparator_->Compare(ikey.user_key, saved_key_.GetKey()) <= 0) { + num_skipped++; // skip this entry + PERF_COUNTER_ADD(internal_key_skipped_count, 1); + } else { + switch (ikey.type) { + case kTypeDeletion: + case kTypeSingleDeletion: + // Arrange to skip all upcoming entries for this key since + // they are hidden by this deletion. + saved_key_.SetKey( + ikey.user_key, + !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */); + skipping = true; + num_skipped = 0; + PERF_COUNTER_ADD(internal_delete_skipped_count, 1); + break; + case kTypeValue: + valid_ = true; + saved_key_.SetKey( + ikey.user_key, + !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */); + return; + case kTypeMerge: + // By now, we are sure the current ikey is going to yield a value + saved_key_.SetKey( + ikey.user_key, + !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */); + current_entry_is_merged_ = true; + valid_ = true; + MergeValuesNewToOld(); // Go to a different state machine + return; + default: + assert(false); + break; + } + } + } + } + // If we have sequentially iterated via numerous keys and still not + // found the next user-key, then it is better to seek so that we can + // avoid too many key comparisons. We seek to the last occurrence of + // our current key by looking for sequence number 0 and type deletion + // (the smallest type). + if (skipping && num_skipped > max_skip_) { + num_skipped = 0; + std::string last_key; + AppendInternalKey(&last_key, ParsedInternalKey(saved_key_.GetKey(), 0, + kTypeDeletion)); + iter_->Seek(last_key); + RecordTick(statistics_, NUMBER_OF_RESEEKS_IN_ITERATION); + } else { + iter_->Next(); + } + } while (iter_->Valid()); + valid_ = false; +} + +// Merge values of the same user key starting from the current iter_ position +// Scan from the newer entries to older entries. +// PRE: iter_->key() points to the first merge type entry +// saved_key_ stores the user key +// POST: saved_value_ has the merged value for the user key +// iter_ points to the next entry (or invalid) +void DBIter::MergeValuesNewToOld() { + if (!merge_operator_) { + Log(InfoLogLevel::ERROR_LEVEL, + logger_, "Options::merge_operator is null."); + status_ = Status::InvalidArgument("merge_operator_ must be set."); + valid_ = false; + return; + } + + // Temporarily pin the blocks that hold merge operands + TempPinData(); + merge_context_.Clear(); + // Start the merge process by pushing the first operand + merge_context_.PushOperand(iter_->value(), + iter_->IsValuePinned() /* operand_pinned */); + + ParsedInternalKey ikey; + for (iter_->Next(); iter_->Valid(); iter_->Next()) { + if (!ParseKey(&ikey)) { + // skip corrupted key + continue; + } + + if (!user_comparator_->Equal(ikey.user_key, saved_key_.GetKey())) { + // hit the next user key, stop right here + break; + } else if (kTypeDeletion == ikey.type || kTypeSingleDeletion == ikey.type) { + // hit a delete with the same user key, stop right here + // iter_ is positioned after delete + iter_->Next(); + break; + } else if (kTypeValue == ikey.type) { + // hit a put, merge the put value with operands and store the + // final result in saved_value_. We are done! + // ignore corruption if there is any. + const Slice val = iter_->value(); + MergeHelper::TimedFullMerge(merge_operator_, ikey.user_key, &val, + merge_context_.GetOperands(), &saved_value_, + logger_, statistics_, env_, &pinned_value_); + // iter_ is positioned after put + iter_->Next(); + return; + } else if (kTypeMerge == ikey.type) { + // hit a merge, add the value as an operand and run associative merge. + // when complete, add result to operands and continue. + merge_context_.PushOperand(iter_->value(), + iter_->IsValuePinned() /* operand_pinned */); + } else { + assert(false); + } + } + + // we either exhausted all internal keys under this user key, or hit + // a deletion marker. + // feed null as the existing value to the merge operator, such that + // client can differentiate this scenario and do things accordingly. + MergeHelper::TimedFullMerge(merge_operator_, saved_key_.GetKey(), nullptr, + merge_context_.GetOperands(), &saved_value_, + logger_, statistics_, env_, &pinned_value_); +} + +void DBIter::Prev() { + assert(valid_); + ReleaseTempPinnedData(); + if (direction_ == kForward) { + ReverseToBackward(); + } + PrevInternal(); + if (statistics_ != nullptr) { + local_stats_.prev_count_++; + if (valid_) { + local_stats_.prev_found_count_++; + local_stats_.bytes_read_ += (key().size() + value().size()); + } + } + if (valid_ && prefix_extractor_ && prefix_same_as_start_ && + prefix_extractor_->Transform(saved_key_.GetKey()) + .compare(prefix_start_key_) != 0) { + valid_ = false; + } +} + +void DBIter::ReverseToBackward() { + if (current_entry_is_merged_) { + // Not placed in the same key. Need to call Prev() until finding the + // previous key. + if (!iter_->Valid()) { + iter_->SeekToLast(); + } + ParsedInternalKey ikey; + FindParseableKey(&ikey, kReverse); + while (iter_->Valid() && + user_comparator_->Compare(ikey.user_key, saved_key_.GetKey()) > 0) { + iter_->Prev(); + FindParseableKey(&ikey, kReverse); + } + } +#ifndef NDEBUG + if (iter_->Valid()) { + ParsedInternalKey ikey; + assert(ParseKey(&ikey)); + assert(user_comparator_->Compare(ikey.user_key, saved_key_.GetKey()) <= 0); + } +#endif + + FindPrevUserKey(); + direction_ = kReverse; +} + +void DBIter::PrevInternal() { + if (!iter_->Valid()) { + valid_ = false; + return; + } + + ParsedInternalKey ikey; + + while (iter_->Valid()) { + saved_key_.SetKey(ExtractUserKey(iter_->key()), + !iter_->IsKeyPinned() || !pin_thru_lifetime_ /* copy */); + if (FindValueForCurrentKey()) { + valid_ = true; + if (!iter_->Valid()) { + return; + } + FindParseableKey(&ikey, kReverse); + if (user_comparator_->Equal(ikey.user_key, saved_key_.GetKey())) { + FindPrevUserKey(); + } + return; + } + if (!iter_->Valid()) { + break; + } + FindParseableKey(&ikey, kReverse); + if (user_comparator_->Equal(ikey.user_key, saved_key_.GetKey())) { + FindPrevUserKey(); + } + } + // We haven't found any key - iterator is not valid + assert(!iter_->Valid()); + valid_ = false; +} + +// This function checks, if the entry with biggest sequence_number <= sequence_ +// is non kTypeDeletion or kTypeSingleDeletion. If it's not, we save value in +// saved_value_ +bool DBIter::FindValueForCurrentKey() { + assert(iter_->Valid()); + merge_context_.Clear(); + current_entry_is_merged_ = false; + // last entry before merge (could be kTypeDeletion, kTypeSingleDeletion or + // kTypeValue) + ValueType last_not_merge_type = kTypeDeletion; + ValueType last_key_entry_type = kTypeDeletion; + + ParsedInternalKey ikey; + FindParseableKey(&ikey, kReverse); + + // Temporarily pin blocks that hold (merge operands / the value) + ReleaseTempPinnedData(); + TempPinData(); + size_t num_skipped = 0; + while (iter_->Valid() && ikey.sequence <= sequence_ && + user_comparator_->Equal(ikey.user_key, saved_key_.GetKey())) { + // We iterate too much: let's use Seek() to avoid too much key comparisons + if (num_skipped >= max_skip_) { + return FindValueForCurrentKeyUsingSeek(); + } + + last_key_entry_type = ikey.type; + switch (last_key_entry_type) { + case kTypeValue: + merge_context_.Clear(); + assert(iter_->IsValuePinned()); + pinned_value_ = iter_->value(); + last_not_merge_type = kTypeValue; + break; + case kTypeDeletion: + case kTypeSingleDeletion: + merge_context_.Clear(); + last_not_merge_type = last_key_entry_type; + PERF_COUNTER_ADD(internal_delete_skipped_count, 1); + break; + case kTypeMerge: + assert(merge_operator_ != nullptr); + merge_context_.PushOperandBack( + iter_->value(), iter_->IsValuePinned() /* operand_pinned */); + break; + default: + assert(false); + } + + PERF_COUNTER_ADD(internal_key_skipped_count, 1); + assert(user_comparator_->Equal(ikey.user_key, saved_key_.GetKey())); + iter_->Prev(); + ++num_skipped; + FindParseableKey(&ikey, kReverse); + } + + switch (last_key_entry_type) { + case kTypeDeletion: + case kTypeSingleDeletion: + valid_ = false; + return false; + case kTypeMerge: + current_entry_is_merged_ = true; + if (last_not_merge_type == kTypeDeletion) { + MergeHelper::TimedFullMerge(merge_operator_, saved_key_.GetKey(), + nullptr, merge_context_.GetOperands(), + &saved_value_, logger_, statistics_, env_, + &pinned_value_); + } else { + assert(last_not_merge_type == kTypeValue); + MergeHelper::TimedFullMerge(merge_operator_, saved_key_.GetKey(), + &pinned_value_, + merge_context_.GetOperands(), &saved_value_, + logger_, statistics_, env_, &pinned_value_); + } + break; + case kTypeValue: + // do nothing - we've already has value in saved_value_ + break; + default: + assert(false); + break; + } + valid_ = true; + return true; +} + +// This function is used in FindValueForCurrentKey. +// We use Seek() function instead of Prev() to find necessary value +bool DBIter::FindValueForCurrentKeyUsingSeek() { + // FindValueForCurrentKey will enable pinning before calling + // FindValueForCurrentKeyUsingSeek() + assert(pinned_iters_mgr_.PinningEnabled()); + std::string last_key; + AppendInternalKey(&last_key, ParsedInternalKey(saved_key_.GetKey(), sequence_, + kValueTypeForSeek)); + iter_->Seek(last_key); + RecordTick(statistics_, NUMBER_OF_RESEEKS_IN_ITERATION); + + // assume there is at least one parseable key for this user key + ParsedInternalKey ikey; + FindParseableKey(&ikey, kForward); + + if (ikey.type == kTypeValue || ikey.type == kTypeDeletion || + ikey.type == kTypeSingleDeletion) { + if (ikey.type == kTypeValue) { + assert(iter_->IsValuePinned()); + pinned_value_ = iter_->value(); + valid_ = true; + return true; + } + valid_ = false; + return false; + } + + // kTypeMerge. We need to collect all kTypeMerge values and save them + // in operands + current_entry_is_merged_ = true; + merge_context_.Clear(); + while (iter_->Valid() && + user_comparator_->Equal(ikey.user_key, saved_key_.GetKey()) && + ikey.type == kTypeMerge) { + merge_context_.PushOperand(iter_->value(), + iter_->IsValuePinned() /* operand_pinned */); + iter_->Next(); + FindParseableKey(&ikey, kForward); + } + + if (!iter_->Valid() || + !user_comparator_->Equal(ikey.user_key, saved_key_.GetKey()) || + ikey.type == kTypeDeletion || ikey.type == kTypeSingleDeletion) { + MergeHelper::TimedFullMerge(merge_operator_, saved_key_.GetKey(), nullptr, + merge_context_.GetOperands(), &saved_value_, + logger_, statistics_, env_, &pinned_value_); + // Make iter_ valid and point to saved_key_ + if (!iter_->Valid() || + !user_comparator_->Equal(ikey.user_key, saved_key_.GetKey())) { + iter_->Seek(last_key); + RecordTick(statistics_, NUMBER_OF_RESEEKS_IN_ITERATION); + } + valid_ = true; + return true; + } + + const Slice& val = iter_->value(); + MergeHelper::TimedFullMerge(merge_operator_, saved_key_.GetKey(), &val, + merge_context_.GetOperands(), &saved_value_, + logger_, statistics_, env_, &pinned_value_); + valid_ = true; + return true; +} + +// Used in Next to change directions +// Go to next user key +// Don't use Seek(), +// because next user key will be very close +void DBIter::FindNextUserKey() { + if (!iter_->Valid()) { + return; + } + ParsedInternalKey ikey; + FindParseableKey(&ikey, kForward); + while (iter_->Valid() && + !user_comparator_->Equal(ikey.user_key, saved_key_.GetKey())) { + iter_->Next(); + FindParseableKey(&ikey, kForward); + } +} + +// Go to previous user_key +void DBIter::FindPrevUserKey() { + if (!iter_->Valid()) { + return; + } + size_t num_skipped = 0; + ParsedInternalKey ikey; + FindParseableKey(&ikey, kReverse); + int cmp; + while (iter_->Valid() && ((cmp = user_comparator_->Compare( + ikey.user_key, saved_key_.GetKey())) == 0 || + (cmp > 0 && ikey.sequence > sequence_))) { + if (cmp == 0) { + if (num_skipped >= max_skip_) { + num_skipped = 0; + IterKey last_key; + last_key.SetInternalKey(ParsedInternalKey( + saved_key_.GetKey(), kMaxSequenceNumber, kValueTypeForSeek)); + iter_->Seek(last_key.GetKey()); + RecordTick(statistics_, NUMBER_OF_RESEEKS_IN_ITERATION); + } else { + ++num_skipped; + } + } + iter_->Prev(); + FindParseableKey(&ikey, kReverse); + } +} + +// Skip all unparseable keys +void DBIter::FindParseableKey(ParsedInternalKey* ikey, Direction direction) { + while (iter_->Valid() && !ParseKey(ikey)) { + if (direction == kReverse) { + iter_->Prev(); + } else { + iter_->Next(); + } + } +} + +void DBIter::Seek(const Slice& target) { + StopWatch sw(env_, statistics_, DB_SEEK); + ReleaseTempPinnedData(); + saved_key_.Clear(); + // now savved_key is used to store internal key. + saved_key_.SetInternalKey(target, sequence_); + + { + PERF_TIMER_GUARD(seek_internal_seek_time); + iter_->Seek(saved_key_.GetKey()); + } + + RecordTick(statistics_, NUMBER_DB_SEEK); + if (iter_->Valid()) { + if (prefix_extractor_ && prefix_same_as_start_) { + prefix_start_key_ = prefix_extractor_->Transform(target); + } + direction_ = kForward; + ClearSavedValue(); + FindNextUserEntry(false /* not skipping */, prefix_same_as_start_); + if (!valid_) { + prefix_start_key_.clear(); + } + if (statistics_ != nullptr) { + if (valid_) { + RecordTick(statistics_, NUMBER_DB_SEEK_FOUND); + RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size()); + } + } + } else { + valid_ = false; + } + if (valid_ && prefix_extractor_ && prefix_same_as_start_) { + prefix_start_buf_.SetKey(prefix_start_key_); + prefix_start_key_ = prefix_start_buf_.GetKey(); + } +} + +void DBIter::SeekToFirst() { + // Don't use iter_::Seek() if we set a prefix extractor + // because prefix seek will be used. + if (prefix_extractor_ != nullptr) { + max_skip_ = std::numeric_limits::max(); + } + direction_ = kForward; + ReleaseTempPinnedData(); + ClearSavedValue(); + + { + PERF_TIMER_GUARD(seek_internal_seek_time); + iter_->SeekToFirst(); + } + + RecordTick(statistics_, NUMBER_DB_SEEK); + if (iter_->Valid()) { + FindNextUserEntry(false /* not skipping */, false /* no prefix check */); + if (statistics_ != nullptr) { + if (valid_) { + RecordTick(statistics_, NUMBER_DB_SEEK_FOUND); + RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size()); + } + } + } else { + valid_ = false; + } + if (valid_ && prefix_extractor_ && prefix_same_as_start_) { + prefix_start_buf_.SetKey(prefix_extractor_->Transform(saved_key_.GetKey())); + prefix_start_key_ = prefix_start_buf_.GetKey(); + } +} + +void DBIter::SeekToLast() { + // Don't use iter_::Seek() if we set a prefix extractor + // because prefix seek will be used. + if (prefix_extractor_ != nullptr) { + max_skip_ = std::numeric_limits::max(); + } + direction_ = kReverse; + ReleaseTempPinnedData(); + ClearSavedValue(); + + { + PERF_TIMER_GUARD(seek_internal_seek_time); + iter_->SeekToLast(); + } + // When the iterate_upper_bound is set to a value, + // it will seek to the last key before the + // ReadOptions.iterate_upper_bound + if (iter_->Valid() && iterate_upper_bound_ != nullptr) { + saved_key_.SetKey(*iterate_upper_bound_, false /* copy */); + std::string last_key; + AppendInternalKey(&last_key, + ParsedInternalKey(saved_key_.GetKey(), kMaxSequenceNumber, + kValueTypeForSeek)); + + iter_->Seek(last_key); + + if (!iter_->Valid()) { + iter_->SeekToLast(); + } else { + iter_->Prev(); + if (!iter_->Valid()) { + valid_ = false; + return; + } + } + } + PrevInternal(); + if (statistics_ != nullptr) { + RecordTick(statistics_, NUMBER_DB_SEEK); + if (valid_) { + RecordTick(statistics_, NUMBER_DB_SEEK_FOUND); + RecordTick(statistics_, ITER_BYTES_READ, key().size() + value().size()); + } + } + if (valid_ && prefix_extractor_ && prefix_same_as_start_) { + prefix_start_buf_.SetKey(prefix_extractor_->Transform(saved_key_.GetKey())); + prefix_start_key_ = prefix_start_buf_.GetKey(); + } +} + +Iterator* NewDBIterator(Env* env, const ImmutableCFOptions& ioptions, + const Comparator* user_key_comparator, + InternalIterator* internal_iter, + const SequenceNumber& sequence, + uint64_t max_sequential_skip_in_iterations, + uint64_t version_number, + const Slice* iterate_upper_bound, + bool prefix_same_as_start, bool pin_data) { + DBIter* db_iter = + new DBIter(env, ioptions, user_key_comparator, internal_iter, sequence, + false, max_sequential_skip_in_iterations, version_number, + iterate_upper_bound, prefix_same_as_start, pin_data); + return db_iter; +} + +ArenaWrappedDBIter::~ArenaWrappedDBIter() { db_iter_->~DBIter(); } + +void ArenaWrappedDBIter::SetDBIter(DBIter* iter) { db_iter_ = iter; } + +void ArenaWrappedDBIter::SetIterUnderDBIter(InternalIterator* iter) { + static_cast(db_iter_)->SetIter(iter); +} + +inline bool ArenaWrappedDBIter::Valid() const { return db_iter_->Valid(); } +inline void ArenaWrappedDBIter::SeekToFirst() { db_iter_->SeekToFirst(); } +inline void ArenaWrappedDBIter::SeekToLast() { db_iter_->SeekToLast(); } +inline void ArenaWrappedDBIter::Seek(const Slice& target) { + db_iter_->Seek(target); +} +inline void ArenaWrappedDBIter::Next() { db_iter_->Next(); } +inline void ArenaWrappedDBIter::Prev() { db_iter_->Prev(); } +inline Slice ArenaWrappedDBIter::key() const { return db_iter_->key(); } +inline Slice ArenaWrappedDBIter::value() const { return db_iter_->value(); } +inline Status ArenaWrappedDBIter::status() const { return db_iter_->status(); } +inline Status ArenaWrappedDBIter::GetProperty(std::string prop_name, + std::string* prop) { + return db_iter_->GetProperty(prop_name, prop); +} +void ArenaWrappedDBIter::RegisterCleanup(CleanupFunction function, void* arg1, + void* arg2) { + db_iter_->RegisterCleanup(function, arg1, arg2); +} + +ArenaWrappedDBIter* NewArenaWrappedDbIterator( + Env* env, const ImmutableCFOptions& ioptions, + const Comparator* user_key_comparator, const SequenceNumber& sequence, + uint64_t max_sequential_skip_in_iterations, uint64_t version_number, + const Slice* iterate_upper_bound, bool prefix_same_as_start, + bool pin_data) { + ArenaWrappedDBIter* iter = new ArenaWrappedDBIter(); + Arena* arena = iter->GetArena(); + auto mem = arena->AllocateAligned(sizeof(DBIter)); + DBIter* db_iter = + new (mem) DBIter(env, ioptions, user_key_comparator, nullptr, sequence, + true, max_sequential_skip_in_iterations, version_number, + iterate_upper_bound, prefix_same_as_start, pin_data); + + iter->SetDBIter(db_iter); + + return iter; +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/db_iter.h b/external/rocksdb/db/db_iter.h new file mode 100755 index 0000000000..89db7ad384 --- /dev/null +++ b/external/rocksdb/db/db_iter.h @@ -0,0 +1,81 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include +#include "rocksdb/db.h" +#include "rocksdb/iterator.h" +#include "db/dbformat.h" +#include "util/arena.h" +#include "util/autovector.h" + +namespace rocksdb { + +class Arena; +class DBIter; +class InternalIterator; + +// Return a new iterator that converts internal keys (yielded by +// "*internal_iter") that were live at the specified "sequence" number +// into appropriate user keys. +extern Iterator* NewDBIterator( + Env* env, const ImmutableCFOptions& options, + const Comparator* user_key_comparator, InternalIterator* internal_iter, + const SequenceNumber& sequence, uint64_t max_sequential_skip_in_iterations, + uint64_t version_number, const Slice* iterate_upper_bound = nullptr, + bool prefix_same_as_start = false, bool pin_data = false); + +// A wrapper iterator which wraps DB Iterator and the arena, with which the DB +// iterator is supposed be allocated. This class is used as an entry point of +// a iterator hierarchy whose memory can be allocated inline. In that way, +// accessing the iterator tree can be more cache friendly. It is also faster +// to allocate. +class ArenaWrappedDBIter : public Iterator { + public: + virtual ~ArenaWrappedDBIter(); + + // Get the arena to be used to allocate memory for DBIter to be wrapped, + // as well as child iterators in it. + virtual Arena* GetArena() { return &arena_; } + + // Set the DB Iterator to be wrapped + + virtual void SetDBIter(DBIter* iter); + + // Set the internal iterator wrapped inside the DB Iterator. Usually it is + // a merging iterator. + virtual void SetIterUnderDBIter(InternalIterator* iter); + virtual bool Valid() const override; + virtual void SeekToFirst() override; + virtual void SeekToLast() override; + virtual void Seek(const Slice& target) override; + virtual void Next() override; + virtual void Prev() override; + virtual Slice key() const override; + virtual Slice value() const override; + virtual Status status() const override; + + void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2); + virtual Status GetProperty(std::string prop_name, std::string* prop) override; + + private: + DBIter* db_iter_; + Arena arena_; +}; + +// Generate the arena wrapped iterator class. +extern ArenaWrappedDBIter* NewArenaWrappedDbIterator( + Env* env, const ImmutableCFOptions& options, + const Comparator* user_key_comparator, const SequenceNumber& sequence, + uint64_t max_sequential_skip_in_iterations, uint64_t version_number, + const Slice* iterate_upper_bound = nullptr, + bool prefix_same_as_start = false, bool pin_data = false); + +} // namespace rocksdb diff --git a/external/rocksdb/db/db_iter_test.cc b/external/rocksdb/db/db_iter_test.cc new file mode 100755 index 0000000000..30956e35c7 --- /dev/null +++ b/external/rocksdb/db/db_iter_test.cc @@ -0,0 +1,2298 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +#include "db/db_iter.h" +#include "db/dbformat.h" +#include "rocksdb/comparator.h" +#include "rocksdb/options.h" +#include "rocksdb/perf_context.h" +#include "rocksdb/slice.h" +#include "rocksdb/statistics.h" +#include "table/iterator_wrapper.h" +#include "table/merger.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "util/testharness.h" +#include "utilities/merge_operators.h" + +namespace rocksdb { + +static uint64_t TestGetTickerCount(const Options& options, + Tickers ticker_type) { + return options.statistics->getTickerCount(ticker_type); +} + +class TestIterator : public InternalIterator { + public: + explicit TestIterator(const Comparator* comparator) + : initialized_(false), + valid_(false), + sequence_number_(0), + iter_(0), + cmp(comparator) {} + + void AddPut(std::string argkey, std::string argvalue) { + Add(argkey, kTypeValue, argvalue); + } + + void AddDeletion(std::string argkey) { + Add(argkey, kTypeDeletion, std::string()); + } + + void AddSingleDeletion(std::string argkey) { + Add(argkey, kTypeSingleDeletion, std::string()); + } + + void AddMerge(std::string argkey, std::string argvalue) { + Add(argkey, kTypeMerge, argvalue); + } + + void Add(std::string argkey, ValueType type, std::string argvalue) { + Add(argkey, type, argvalue, sequence_number_++); + } + + void Add(std::string argkey, ValueType type, std::string argvalue, + size_t seq_num, bool update_iter = false) { + valid_ = true; + ParsedInternalKey internal_key(argkey, seq_num, type); + data_.push_back( + std::pair(std::string(), argvalue)); + AppendInternalKey(&data_.back().first, internal_key); + if (update_iter && valid_ && cmp.Compare(data_.back().first, key()) < 0) { + // insert a key smaller than current key + Finish(); + // data_[iter_] is not anymore the current element of the iterator. + // Increment it to reposition it to the right position. + iter_++; + } + } + + // should be called before operations with iterator + void Finish() { + initialized_ = true; + std::sort(data_.begin(), data_.end(), + [this](std::pair a, + std::pair b) { + return (cmp.Compare(a.first, b.first) < 0); + }); + } + + virtual bool Valid() const override { + assert(initialized_); + return valid_; + } + + virtual void SeekToFirst() override { + assert(initialized_); + valid_ = (data_.size() > 0); + iter_ = 0; + } + + virtual void SeekToLast() override { + assert(initialized_); + valid_ = (data_.size() > 0); + iter_ = data_.size() - 1; + } + + virtual void Seek(const Slice& target) override { + assert(initialized_); + SeekToFirst(); + if (!valid_) { + return; + } + while (iter_ < data_.size() && + (cmp.Compare(data_[iter_].first, target) < 0)) { + ++iter_; + } + + if (iter_ == data_.size()) { + valid_ = false; + } + } + + virtual void Next() override { + assert(initialized_); + if (data_.empty() || (iter_ == data_.size() - 1)) { + valid_ = false; + } else { + ++iter_; + } + } + + virtual void Prev() override { + assert(initialized_); + if (iter_ == 0) { + valid_ = false; + } else { + --iter_; + } + } + + virtual Slice key() const override { + assert(initialized_); + return data_[iter_].first; + } + + virtual Slice value() const override { + assert(initialized_); + return data_[iter_].second; + } + + virtual Status status() const override { + assert(initialized_); + return Status::OK(); + } + + virtual bool IsKeyPinned() const override { return true; } + virtual bool IsValuePinned() const override { return true; } + + private: + bool initialized_; + bool valid_; + size_t sequence_number_; + size_t iter_; + + InternalKeyComparator cmp; + std::vector> data_; +}; + +class DBIteratorTest : public testing::Test { + public: + Env* env_; + + DBIteratorTest() : env_(Env::Default()) {} +}; + +TEST_F(DBIteratorTest, DBIteratorPrevNext) { + Options options; + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddDeletion("a"); + internal_iter->AddDeletion("a"); + internal_iter->AddDeletion("a"); + internal_iter->AddDeletion("a"); + internal_iter->AddPut("a", "val_a"); + + internal_iter->AddPut("b", "val_b"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0)); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "val_b"); + + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "val_a"); + + db_iter->Next(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "val_b"); + + db_iter->Next(); + ASSERT_TRUE(!db_iter->Valid()); + } + // Test to check the SeekToLast() with iterate_upper_bound not set + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + internal_iter->AddPut("b", "val_b"); + internal_iter->AddPut("c", "val_c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0)); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + } + + // Test to check the SeekToLast() with iterate_upper_bound set + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + internal_iter->AddPut("c", "val_c"); + internal_iter->AddPut("d", "val_d"); + internal_iter->AddPut("e", "val_e"); + internal_iter->AddPut("f", "val_f"); + internal_iter->Finish(); + + Slice prefix("d"); + + ReadOptions ro; + ro.iterate_upper_bound = &prefix; + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0, + ro.iterate_upper_bound)); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + + db_iter->Next(); + ASSERT_TRUE(!db_iter->Valid()); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + } + // Test to check the SeekToLast() iterate_upper_bound set to a key that + // is not Put yet + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + internal_iter->AddPut("c", "val_c"); + internal_iter->AddPut("d", "val_d"); + internal_iter->Finish(); + + Slice prefix("z"); + + ReadOptions ro; + ro.iterate_upper_bound = &prefix; + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0, + ro.iterate_upper_bound)); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "d"); + + db_iter->Next(); + ASSERT_TRUE(!db_iter->Valid()); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "d"); + + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + } + // Test to check the SeekToLast() with iterate_upper_bound set to the + // first key + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + internal_iter->AddPut("b", "val_b"); + internal_iter->Finish(); + + Slice prefix("a"); + + ReadOptions ro; + ro.iterate_upper_bound = &prefix; + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0, + ro.iterate_upper_bound)); + + db_iter->SeekToLast(); + ASSERT_TRUE(!db_iter->Valid()); + } + // Test case to check SeekToLast with iterate_upper_bound set + // (same key put may times - SeekToLast should start with the + // maximum sequence id of the upper bound) + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + internal_iter->AddPut("c", "val_c"); + internal_iter->AddPut("c", "val_c"); + internal_iter->AddPut("c", "val_c"); + internal_iter->AddPut("c", "val_c"); + internal_iter->AddPut("c", "val_c"); + internal_iter->AddPut("c", "val_c"); + internal_iter->AddPut("c", "val_c"); + internal_iter->Finish(); + + Slice prefix("c"); + + ReadOptions ro; + ro.iterate_upper_bound = &prefix; + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 7, options.max_sequential_skip_in_iterations, 0, + ro.iterate_upper_bound)); + + SetPerfLevel(kEnableCount); + ASSERT_TRUE(GetPerfLevel() == kEnableCount); + + perf_context.Reset(); + db_iter->SeekToLast(); + + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(static_cast(perf_context.internal_key_skipped_count), 1); + ASSERT_EQ(db_iter->key().ToString(), "b"); + + SetPerfLevel(kDisable); + } + // Test to check the SeekToLast() with the iterate_upper_bound set + // (Checking the value of the key which has sequence ids greater than + // and less that the iterator's sequence id) + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + + internal_iter->AddPut("a", "val_a1"); + internal_iter->AddPut("a", "val_a2"); + internal_iter->AddPut("b", "val_b1"); + internal_iter->AddPut("c", "val_c1"); + internal_iter->AddPut("c", "val_c2"); + internal_iter->AddPut("c", "val_c3"); + internal_iter->AddPut("b", "val_b2"); + internal_iter->AddPut("d", "val_d1"); + internal_iter->Finish(); + + Slice prefix("c"); + + ReadOptions ro; + ro.iterate_upper_bound = &prefix; + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 4, options.max_sequential_skip_in_iterations, 0, + ro.iterate_upper_bound)); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "val_b1"); + } + + // Test to check the SeekToLast() with the iterate_upper_bound set to the + // key that is deleted + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddDeletion("a"); + internal_iter->AddPut("b", "val_b"); + internal_iter->AddPut("c", "val_c"); + internal_iter->Finish(); + + Slice prefix("a"); + + ReadOptions ro; + ro.iterate_upper_bound = &prefix; + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0, + ro.iterate_upper_bound)); + + db_iter->SeekToLast(); + ASSERT_TRUE(!db_iter->Valid()); + } + // Test to check the SeekToLast() with the iterate_upper_bound set + // (Deletion cases) + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + internal_iter->AddDeletion("b"); + internal_iter->AddPut("c", "val_c"); + internal_iter->Finish(); + + Slice prefix("c"); + + ReadOptions ro; + ro.iterate_upper_bound = &prefix; + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0, + ro.iterate_upper_bound)); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + + db_iter->Next(); + ASSERT_TRUE(!db_iter->Valid()); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + } + // Test to check the SeekToLast() with iterate_upper_bound set + // (Deletion cases - Lot of internal keys after the upper_bound + // is deleted) + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + internal_iter->AddDeletion("c"); + internal_iter->AddDeletion("d"); + internal_iter->AddDeletion("e"); + internal_iter->AddDeletion("f"); + internal_iter->AddDeletion("g"); + internal_iter->AddDeletion("h"); + internal_iter->Finish(); + + Slice prefix("c"); + + ReadOptions ro; + ro.iterate_upper_bound = &prefix; + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 7, options.max_sequential_skip_in_iterations, 0, + ro.iterate_upper_bound)); + + SetPerfLevel(kEnableCount); + ASSERT_TRUE(GetPerfLevel() == kEnableCount); + + perf_context.Reset(); + db_iter->SeekToLast(); + + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(static_cast(perf_context.internal_delete_skipped_count), 0); + ASSERT_EQ(db_iter->key().ToString(), "b"); + + SetPerfLevel(kDisable); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddDeletion("a"); + internal_iter->AddDeletion("a"); + internal_iter->AddDeletion("a"); + internal_iter->AddDeletion("a"); + internal_iter->AddPut("a", "val_a"); + + internal_iter->AddPut("b", "val_b"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0)); + + db_iter->SeekToFirst(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "val_a"); + + db_iter->Next(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "val_b"); + + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "val_a"); + + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("b", "val_b"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 2, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "val_b"); + + db_iter->Next(); + ASSERT_TRUE(!db_iter->Valid()); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "val_b"); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("a", "val_a"); + internal_iter->AddPut("a", "val_a"); + + internal_iter->AddPut("b", "val_b"); + + internal_iter->AddPut("c", "val_c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "val_c"); + + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "val_b"); + + db_iter->Next(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "val_c"); + } +} + +TEST_F(DBIteratorTest, DBIteratorEmpty) { + Options options; + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 0, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 0, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToFirst(); + ASSERT_TRUE(!db_iter->Valid()); + } +} + +TEST_F(DBIteratorTest, DBIteratorUseSkipCountSkips) { + Options options; + options.statistics = rocksdb::CreateDBStatistics(); + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + for (size_t i = 0; i < 200; ++i) { + internal_iter->AddPut("a", "a"); + internal_iter->AddPut("b", "b"); + internal_iter->AddPut("c", "c"); + } + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, 2, + options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "c"); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 1u); + + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "b"); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 2u); + + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "a"); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 3u); + + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 3u); +} + +TEST_F(DBIteratorTest, DBIteratorUseSkip) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + { + for (size_t i = 0; i < 200; ++i) { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("b", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + for (size_t k = 0; k < 200; ++k) { + internal_iter->AddPut("c", ToString(k)); + } + internal_iter->Finish(); + + options.statistics = rocksdb::CreateDBStatistics(); + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), + internal_iter, i + 2, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), ToString(i)); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_2"); + db_iter->Prev(); + + ASSERT_TRUE(!db_iter->Valid()); + } + } + + { + for (size_t i = 0; i < 200; ++i) { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("b", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + for (size_t k = 0; k < 200; ++k) { + internal_iter->AddDeletion("c"); + } + internal_iter->AddPut("c", "200"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), + internal_iter, i + 2, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_2"); + db_iter->Prev(); + + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("b", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + for (size_t i = 0; i < 200; ++i) { + internal_iter->AddDeletion("c"); + } + internal_iter->AddPut("c", "200"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), + internal_iter, 202, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "200"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_2"); + db_iter->Prev(); + + ASSERT_TRUE(!db_iter->Valid()); + } + } + + { + for (size_t i = 0; i < 200; ++i) { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + for (size_t k = 0; k < 200; ++k) { + internal_iter->AddDeletion("c"); + } + internal_iter->AddPut("c", "200"); + internal_iter->Finish(); + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), + internal_iter, i, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(!db_iter->Valid()); + + db_iter->SeekToFirst(); + ASSERT_TRUE(!db_iter->Valid()); + } + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + for (size_t i = 0; i < 200; ++i) { + internal_iter->AddDeletion("c"); + } + internal_iter->AddPut("c", "200"); + internal_iter->Finish(); + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 200, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "200"); + + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + + db_iter->SeekToFirst(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "200"); + + db_iter->Next(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + for (size_t i = 0; i < 200; ++i) { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("b", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + for (size_t k = 0; k < 200; ++k) { + internal_iter->AddPut("d", ToString(k)); + } + + for (size_t k = 0; k < 200; ++k) { + internal_iter->AddPut("c", ToString(k)); + } + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), + internal_iter, i + 2, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "d"); + ASSERT_EQ(db_iter->value().ToString(), ToString(i)); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_2"); + db_iter->Prev(); + + ASSERT_TRUE(!db_iter->Valid()); + } + } + + { + for (size_t i = 0; i < 200; ++i) { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("b", "b"); + internal_iter->AddMerge("a", "a"); + for (size_t k = 0; k < 200; ++k) { + internal_iter->AddMerge("c", ToString(k)); + } + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), + internal_iter, i + 2, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "c"); + std::string merge_result = "0"; + for (size_t j = 1; j <= i; ++j) { + merge_result += "," + ToString(j); + } + ASSERT_EQ(db_iter->value().ToString(), merge_result); + + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "b"); + + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "a"); + + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + } +} + +TEST_F(DBIteratorTest, DBIterator1) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "0"); + internal_iter->AddPut("b", "0"); + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("a", "1"); + internal_iter->AddMerge("b", "2"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, 1, + options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToFirst(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "0"); + db_iter->Next(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + db_iter->Next(); + ASSERT_FALSE(db_iter->Valid()); +} + +TEST_F(DBIteratorTest, DBIterator2) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "0"); + internal_iter->AddPut("b", "0"); + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("a", "1"); + internal_iter->AddMerge("b", "2"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, 0, + options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToFirst(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "0"); + db_iter->Next(); + ASSERT_TRUE(!db_iter->Valid()); +} + +TEST_F(DBIteratorTest, DBIterator3) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "0"); + internal_iter->AddPut("b", "0"); + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("a", "1"); + internal_iter->AddMerge("b", "2"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, 2, + options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToFirst(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "0"); + db_iter->Next(); + ASSERT_TRUE(!db_iter->Valid()); +} +TEST_F(DBIteratorTest, DBIterator4) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "0"); + internal_iter->AddPut("b", "0"); + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("a", "1"); + internal_iter->AddMerge("b", "2"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, 4, + options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToFirst(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "0,1"); + db_iter->Next(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "2"); + db_iter->Next(); + ASSERT_TRUE(!db_iter->Valid()); +} + +TEST_F(DBIteratorTest, DBIterator5) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddPut("a", "put_1"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 0, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddPut("a", "put_1"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 1, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1,merge_2"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddPut("a", "put_1"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 2, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1,merge_2,merge_3"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddPut("a", "put_1"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 3, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "put_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddPut("a", "put_1"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 4, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "put_1,merge_4"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddPut("a", "put_1"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 5, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "put_1,merge_4,merge_5"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddPut("a", "put_1"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 6, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "put_1,merge_4,merge_5,merge_6"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } +} + +TEST_F(DBIteratorTest, DBIterator6) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddDeletion("a"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 0, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddDeletion("a"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 1, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1,merge_2"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddDeletion("a"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 2, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1,merge_2,merge_3"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddDeletion("a"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 3, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddDeletion("a"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 4, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_4"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddDeletion("a"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 5, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("a", "merge_3"); + internal_iter->AddDeletion("a"); + internal_iter->AddMerge("a", "merge_4"); + internal_iter->AddMerge("a", "merge_5"); + internal_iter->AddMerge("a", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 6, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5,merge_6"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } +} + +TEST_F(DBIteratorTest, DBIterator7) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddPut("b", "val"); + internal_iter->AddMerge("b", "merge_2"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_3"); + + internal_iter->AddMerge("c", "merge_4"); + internal_iter->AddMerge("c", "merge_5"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_6"); + internal_iter->AddMerge("b", "merge_7"); + internal_iter->AddMerge("b", "merge_8"); + internal_iter->AddMerge("b", "merge_9"); + internal_iter->AddMerge("b", "merge_10"); + internal_iter->AddMerge("b", "merge_11"); + + internal_iter->AddDeletion("c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 0, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddPut("b", "val"); + internal_iter->AddMerge("b", "merge_2"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_3"); + + internal_iter->AddMerge("c", "merge_4"); + internal_iter->AddMerge("c", "merge_5"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_6"); + internal_iter->AddMerge("b", "merge_7"); + internal_iter->AddMerge("b", "merge_8"); + internal_iter->AddMerge("b", "merge_9"); + internal_iter->AddMerge("b", "merge_10"); + internal_iter->AddMerge("b", "merge_11"); + + internal_iter->AddDeletion("c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 2, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "val,merge_2"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddPut("b", "val"); + internal_iter->AddMerge("b", "merge_2"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_3"); + + internal_iter->AddMerge("c", "merge_4"); + internal_iter->AddMerge("c", "merge_5"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_6"); + internal_iter->AddMerge("b", "merge_7"); + internal_iter->AddMerge("b", "merge_8"); + internal_iter->AddMerge("b", "merge_9"); + internal_iter->AddMerge("b", "merge_10"); + internal_iter->AddMerge("b", "merge_11"); + + internal_iter->AddDeletion("c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 4, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_3"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddPut("b", "val"); + internal_iter->AddMerge("b", "merge_2"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_3"); + + internal_iter->AddMerge("c", "merge_4"); + internal_iter->AddMerge("c", "merge_5"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_6"); + internal_iter->AddMerge("b", "merge_7"); + internal_iter->AddMerge("b", "merge_8"); + internal_iter->AddMerge("b", "merge_9"); + internal_iter->AddMerge("b", "merge_10"); + internal_iter->AddMerge("b", "merge_11"); + + internal_iter->AddDeletion("c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 5, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "merge_4"); + db_iter->Prev(); + + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_3"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddPut("b", "val"); + internal_iter->AddMerge("b", "merge_2"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_3"); + + internal_iter->AddMerge("c", "merge_4"); + internal_iter->AddMerge("c", "merge_5"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_6"); + internal_iter->AddMerge("b", "merge_7"); + internal_iter->AddMerge("b", "merge_8"); + internal_iter->AddMerge("b", "merge_9"); + internal_iter->AddMerge("b", "merge_10"); + internal_iter->AddMerge("b", "merge_11"); + + internal_iter->AddDeletion("c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 6, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_3"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddPut("b", "val"); + internal_iter->AddMerge("b", "merge_2"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_3"); + + internal_iter->AddMerge("c", "merge_4"); + internal_iter->AddMerge("c", "merge_5"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_6"); + internal_iter->AddMerge("b", "merge_7"); + internal_iter->AddMerge("b", "merge_8"); + internal_iter->AddMerge("b", "merge_9"); + internal_iter->AddMerge("b", "merge_10"); + internal_iter->AddMerge("b", "merge_11"); + + internal_iter->AddDeletion("c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 7, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddPut("b", "val"); + internal_iter->AddMerge("b", "merge_2"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_3"); + + internal_iter->AddMerge("c", "merge_4"); + internal_iter->AddMerge("c", "merge_5"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_6"); + internal_iter->AddMerge("b", "merge_7"); + internal_iter->AddMerge("b", "merge_8"); + internal_iter->AddMerge("b", "merge_9"); + internal_iter->AddMerge("b", "merge_10"); + internal_iter->AddMerge("b", "merge_11"); + + internal_iter->AddDeletion("c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 9, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_6,merge_7"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddPut("b", "val"); + internal_iter->AddMerge("b", "merge_2"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_3"); + + internal_iter->AddMerge("c", "merge_4"); + internal_iter->AddMerge("c", "merge_5"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_6"); + internal_iter->AddMerge("b", "merge_7"); + internal_iter->AddMerge("b", "merge_8"); + internal_iter->AddMerge("b", "merge_9"); + internal_iter->AddMerge("b", "merge_10"); + internal_iter->AddMerge("b", "merge_11"); + + internal_iter->AddDeletion("c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 13, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "merge_4,merge_5"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), + "merge_6,merge_7,merge_8,merge_9,merge_10,merge_11"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } + + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddPut("b", "val"); + internal_iter->AddMerge("b", "merge_2"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_3"); + + internal_iter->AddMerge("c", "merge_4"); + internal_iter->AddMerge("c", "merge_5"); + + internal_iter->AddDeletion("b"); + internal_iter->AddMerge("b", "merge_6"); + internal_iter->AddMerge("b", "merge_7"); + internal_iter->AddMerge("b", "merge_8"); + internal_iter->AddMerge("b", "merge_9"); + internal_iter->AddMerge("b", "merge_10"); + internal_iter->AddMerge("b", "merge_11"); + + internal_iter->AddDeletion("c"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 14, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), + "merge_6,merge_7,merge_8,merge_9,merge_10,merge_11"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1"); + db_iter->Prev(); + ASSERT_TRUE(!db_iter->Valid()); + } +} + +TEST_F(DBIteratorTest, DBIterator8) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddDeletion("a"); + internal_iter->AddPut("a", "0"); + internal_iter->AddPut("b", "0"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "0"); + + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "0"); +} + +// TODO(3.13): fix the issue of Seek() then Prev() which might not necessary +// return the biggest element smaller than the seek key. +TEST_F(DBIteratorTest, DBIterator9) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + { + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddMerge("a", "merge_1"); + internal_iter->AddMerge("a", "merge_2"); + internal_iter->AddMerge("b", "merge_3"); + internal_iter->AddMerge("b", "merge_4"); + internal_iter->AddMerge("d", "merge_5"); + internal_iter->AddMerge("d", "merge_6"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0)); + + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_3,merge_4"); + db_iter->Next(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "d"); + ASSERT_EQ(db_iter->value().ToString(), "merge_5,merge_6"); + + db_iter->Seek("b"); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_3,merge_4"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "merge_1,merge_2"); + + db_iter->Seek("c"); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "d"); + ASSERT_EQ(db_iter->value().ToString(), "merge_5,merge_6"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "merge_3,merge_4"); + } +} + +// TODO(3.13): fix the issue of Seek() then Prev() which might not necessary +// return the biggest element smaller than the seek key. +TEST_F(DBIteratorTest, DBIterator10) { + Options options; + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "1"); + internal_iter->AddPut("b", "2"); + internal_iter->AddPut("c", "3"); + internal_iter->AddPut("d", "4"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, + 10, options.max_sequential_skip_in_iterations, 0)); + + db_iter->Seek("c"); + ASSERT_TRUE(db_iter->Valid()); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "2"); + + db_iter->Next(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "3"); +} + +TEST_F(DBIteratorTest, SeekToLastOccurrenceSeq0) { + Options options; + options.merge_operator = nullptr; + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "1"); + internal_iter->AddPut("b", "2"); + internal_iter->Finish(); + + std::unique_ptr db_iter( + NewDBIterator(env_, ImmutableCFOptions(options), BytewiseComparator(), + internal_iter, 10, 0 /* force seek */, 0)); + db_iter->SeekToFirst(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "1"); + db_iter->Next(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + ASSERT_EQ(db_iter->value().ToString(), "2"); + db_iter->Next(); + ASSERT_FALSE(db_iter->Valid()); +} + +TEST_F(DBIteratorTest, DBIterator11) { + Options options; + options.merge_operator = MergeOperators::CreateFromStringId("stringappend"); + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "0"); + internal_iter->AddPut("b", "0"); + internal_iter->AddSingleDeletion("b"); + internal_iter->AddMerge("a", "1"); + internal_iter->AddMerge("b", "2"); + internal_iter->Finish(); + + std::unique_ptr db_iter(NewDBIterator( + env_, ImmutableCFOptions(options), BytewiseComparator(), internal_iter, 1, + options.max_sequential_skip_in_iterations, 0)); + db_iter->SeekToFirst(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "0"); + db_iter->Next(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "b"); + db_iter->Next(); + ASSERT_FALSE(db_iter->Valid()); +} + +TEST_F(DBIteratorTest, DBIterator12) { + Options options; + options.merge_operator = nullptr; + + TestIterator* internal_iter = new TestIterator(BytewiseComparator()); + internal_iter->AddPut("a", "1"); + internal_iter->AddPut("b", "2"); + internal_iter->AddPut("c", "3"); + internal_iter->AddSingleDeletion("b"); + internal_iter->Finish(); + + std::unique_ptr db_iter( + NewDBIterator(env_, ImmutableCFOptions(options), BytewiseComparator(), + internal_iter, 10, 0, 0)); + db_iter->SeekToLast(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "c"); + ASSERT_EQ(db_iter->value().ToString(), "3"); + db_iter->Prev(); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_EQ(db_iter->key().ToString(), "a"); + ASSERT_EQ(db_iter->value().ToString(), "1"); + db_iter->Prev(); + ASSERT_FALSE(db_iter->Valid()); +} + +class DBIterWithMergeIterTest : public testing::Test { + public: + DBIterWithMergeIterTest() + : env_(Env::Default()), icomp_(BytewiseComparator()) { + options_.merge_operator = nullptr; + + internal_iter1_ = new TestIterator(BytewiseComparator()); + internal_iter1_->Add("a", kTypeValue, "1", 3u); + internal_iter1_->Add("f", kTypeValue, "2", 5u); + internal_iter1_->Add("g", kTypeValue, "3", 7u); + internal_iter1_->Finish(); + + internal_iter2_ = new TestIterator(BytewiseComparator()); + internal_iter2_->Add("a", kTypeValue, "4", 6u); + internal_iter2_->Add("b", kTypeValue, "5", 1u); + internal_iter2_->Add("c", kTypeValue, "6", 2u); + internal_iter2_->Add("d", kTypeValue, "7", 3u); + internal_iter2_->Finish(); + + std::vector child_iters; + child_iters.push_back(internal_iter1_); + child_iters.push_back(internal_iter2_); + InternalKeyComparator icomp(BytewiseComparator()); + InternalIterator* merge_iter = + NewMergingIterator(&icomp_, &child_iters[0], 2u); + + db_iter_.reset(NewDBIterator(env_, ImmutableCFOptions(options_), + BytewiseComparator(), merge_iter, + 8 /* read data earlier than seqId 8 */, + 3 /* max iterators before reseek */, 0)); + } + + Env* env_; + Options options_; + TestIterator* internal_iter1_; + TestIterator* internal_iter2_; + InternalKeyComparator icomp_; + Iterator* merge_iter_; + std::unique_ptr db_iter_; +}; + +TEST_F(DBIterWithMergeIterTest, InnerMergeIterator1) { + db_iter_->SeekToFirst(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "a"); + ASSERT_EQ(db_iter_->value().ToString(), "4"); + db_iter_->Next(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "b"); + ASSERT_EQ(db_iter_->value().ToString(), "5"); + db_iter_->Next(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "c"); + ASSERT_EQ(db_iter_->value().ToString(), "6"); + db_iter_->Next(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "d"); + ASSERT_EQ(db_iter_->value().ToString(), "7"); + db_iter_->Next(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "f"); + ASSERT_EQ(db_iter_->value().ToString(), "2"); + db_iter_->Next(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "g"); + ASSERT_EQ(db_iter_->value().ToString(), "3"); + db_iter_->Next(); + ASSERT_FALSE(db_iter_->Valid()); +} + +TEST_F(DBIterWithMergeIterTest, InnerMergeIterator2) { + // Test Prev() when one child iterator is at its end. + db_iter_->Seek("g"); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "g"); + ASSERT_EQ(db_iter_->value().ToString(), "3"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "f"); + ASSERT_EQ(db_iter_->value().ToString(), "2"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "d"); + ASSERT_EQ(db_iter_->value().ToString(), "7"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "c"); + ASSERT_EQ(db_iter_->value().ToString(), "6"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "b"); + ASSERT_EQ(db_iter_->value().ToString(), "5"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "a"); + ASSERT_EQ(db_iter_->value().ToString(), "4"); +} + +TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace1) { + // Test Prev() when one child iterator is at its end but more rows + // are added. + db_iter_->Seek("f"); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "f"); + ASSERT_EQ(db_iter_->value().ToString(), "2"); + + // Test call back inserts a key in the end of the mem table after + // MergeIterator::Prev() realized the mem table iterator is at its end + // and before an SeekToLast() is called. + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "MergeIterator::Prev:BeforeSeekToLast", + [&](void* arg) { internal_iter2_->Add("z", kTypeValue, "7", 12u); }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "d"); + ASSERT_EQ(db_iter_->value().ToString(), "7"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "c"); + ASSERT_EQ(db_iter_->value().ToString(), "6"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "b"); + ASSERT_EQ(db_iter_->value().ToString(), "5"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "a"); + ASSERT_EQ(db_iter_->value().ToString(), "4"); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace2) { + // Test Prev() when one child iterator is at its end but more rows + // are added. + db_iter_->Seek("f"); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "f"); + ASSERT_EQ(db_iter_->value().ToString(), "2"); + + // Test call back inserts entries for update a key in the end of the + // mem table after MergeIterator::Prev() realized the mem tableiterator is at + // its end and before an SeekToLast() is called. + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "MergeIterator::Prev:BeforeSeekToLast", [&](void* arg) { + internal_iter2_->Add("z", kTypeValue, "7", 12u); + internal_iter2_->Add("z", kTypeValue, "7", 11u); + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "d"); + ASSERT_EQ(db_iter_->value().ToString(), "7"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "c"); + ASSERT_EQ(db_iter_->value().ToString(), "6"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "b"); + ASSERT_EQ(db_iter_->value().ToString(), "5"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "a"); + ASSERT_EQ(db_iter_->value().ToString(), "4"); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace3) { + // Test Prev() when one child iterator is at its end but more rows + // are added and max_skipped is triggered. + db_iter_->Seek("f"); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "f"); + ASSERT_EQ(db_iter_->value().ToString(), "2"); + + // Test call back inserts entries for update a key in the end of the + // mem table after MergeIterator::Prev() realized the mem table iterator is at + // its end and before an SeekToLast() is called. + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "MergeIterator::Prev:BeforeSeekToLast", [&](void* arg) { + internal_iter2_->Add("z", kTypeValue, "7", 16u, true); + internal_iter2_->Add("z", kTypeValue, "7", 15u, true); + internal_iter2_->Add("z", kTypeValue, "7", 14u, true); + internal_iter2_->Add("z", kTypeValue, "7", 13u, true); + internal_iter2_->Add("z", kTypeValue, "7", 12u, true); + internal_iter2_->Add("z", kTypeValue, "7", 11u, true); + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "d"); + ASSERT_EQ(db_iter_->value().ToString(), "7"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "c"); + ASSERT_EQ(db_iter_->value().ToString(), "6"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "b"); + ASSERT_EQ(db_iter_->value().ToString(), "5"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "a"); + ASSERT_EQ(db_iter_->value().ToString(), "4"); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace4) { + // Test Prev() when one child iterator has more rows inserted + // between Seek() and Prev() when changing directions. + internal_iter2_->Add("z", kTypeValue, "9", 4u); + + db_iter_->Seek("g"); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "g"); + ASSERT_EQ(db_iter_->value().ToString(), "3"); + + // Test call back inserts entries for update a key before "z" in + // mem table after MergeIterator::Prev() calls mem table iterator's + // Seek() and before calling Prev() + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "MergeIterator::Prev:BeforePrev", [&](void* arg) { + IteratorWrapper* it = reinterpret_cast(arg); + if (it->key().starts_with("z")) { + internal_iter2_->Add("x", kTypeValue, "7", 16u, true); + internal_iter2_->Add("x", kTypeValue, "7", 15u, true); + internal_iter2_->Add("x", kTypeValue, "7", 14u, true); + internal_iter2_->Add("x", kTypeValue, "7", 13u, true); + internal_iter2_->Add("x", kTypeValue, "7", 12u, true); + internal_iter2_->Add("x", kTypeValue, "7", 11u, true); + } + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "f"); + ASSERT_EQ(db_iter_->value().ToString(), "2"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "d"); + ASSERT_EQ(db_iter_->value().ToString(), "7"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "c"); + ASSERT_EQ(db_iter_->value().ToString(), "6"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "b"); + ASSERT_EQ(db_iter_->value().ToString(), "5"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "a"); + ASSERT_EQ(db_iter_->value().ToString(), "4"); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace5) { + internal_iter2_->Add("z", kTypeValue, "9", 4u); + + // Test Prev() when one child iterator has more rows inserted + // between Seek() and Prev() when changing directions. + db_iter_->Seek("g"); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "g"); + ASSERT_EQ(db_iter_->value().ToString(), "3"); + + // Test call back inserts entries for update a key before "z" in + // mem table after MergeIterator::Prev() calls mem table iterator's + // Seek() and before calling Prev() + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "MergeIterator::Prev:BeforePrev", [&](void* arg) { + IteratorWrapper* it = reinterpret_cast(arg); + if (it->key().starts_with("z")) { + internal_iter2_->Add("x", kTypeValue, "7", 16u, true); + internal_iter2_->Add("x", kTypeValue, "7", 15u, true); + } + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "f"); + ASSERT_EQ(db_iter_->value().ToString(), "2"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "d"); + ASSERT_EQ(db_iter_->value().ToString(), "7"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "c"); + ASSERT_EQ(db_iter_->value().ToString(), "6"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "b"); + ASSERT_EQ(db_iter_->value().ToString(), "5"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "a"); + ASSERT_EQ(db_iter_->value().ToString(), "4"); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace6) { + internal_iter2_->Add("z", kTypeValue, "9", 4u); + + // Test Prev() when one child iterator has more rows inserted + // between Seek() and Prev() when changing directions. + db_iter_->Seek("g"); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "g"); + ASSERT_EQ(db_iter_->value().ToString(), "3"); + + // Test call back inserts an entry for update a key before "z" in + // mem table after MergeIterator::Prev() calls mem table iterator's + // Seek() and before calling Prev() + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "MergeIterator::Prev:BeforePrev", [&](void* arg) { + IteratorWrapper* it = reinterpret_cast(arg); + if (it->key().starts_with("z")) { + internal_iter2_->Add("x", kTypeValue, "7", 16u, true); + } + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "f"); + ASSERT_EQ(db_iter_->value().ToString(), "2"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "d"); + ASSERT_EQ(db_iter_->value().ToString(), "7"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "c"); + ASSERT_EQ(db_iter_->value().ToString(), "6"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "b"); + ASSERT_EQ(db_iter_->value().ToString(), "5"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "a"); + ASSERT_EQ(db_iter_->value().ToString(), "4"); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace7) { + internal_iter1_->Add("u", kTypeValue, "10", 4u); + internal_iter1_->Add("v", kTypeValue, "11", 4u); + internal_iter1_->Add("w", kTypeValue, "12", 4u); + internal_iter2_->Add("z", kTypeValue, "9", 4u); + + // Test Prev() when one child iterator has more rows inserted + // between Seek() and Prev() when changing directions. + db_iter_->Seek("g"); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "g"); + ASSERT_EQ(db_iter_->value().ToString(), "3"); + + // Test call back inserts entries for update a key before "z" in + // mem table after MergeIterator::Prev() calls mem table iterator's + // Seek() and before calling Prev() + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "MergeIterator::Prev:BeforePrev", [&](void* arg) { + IteratorWrapper* it = reinterpret_cast(arg); + if (it->key().starts_with("z")) { + internal_iter2_->Add("x", kTypeValue, "7", 16u, true); + internal_iter2_->Add("x", kTypeValue, "7", 15u, true); + internal_iter2_->Add("x", kTypeValue, "7", 14u, true); + internal_iter2_->Add("x", kTypeValue, "7", 13u, true); + internal_iter2_->Add("x", kTypeValue, "7", 12u, true); + internal_iter2_->Add("x", kTypeValue, "7", 11u, true); + } + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "f"); + ASSERT_EQ(db_iter_->value().ToString(), "2"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "d"); + ASSERT_EQ(db_iter_->value().ToString(), "7"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "c"); + ASSERT_EQ(db_iter_->value().ToString(), "6"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "b"); + ASSERT_EQ(db_iter_->value().ToString(), "5"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "a"); + ASSERT_EQ(db_iter_->value().ToString(), "4"); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBIterWithMergeIterTest, InnerMergeIteratorDataRace8) { + // internal_iter1_: a, f, g + // internal_iter2_: a, b, c, d, adding (z) + internal_iter2_->Add("z", kTypeValue, "9", 4u); + + // Test Prev() when one child iterator has more rows inserted + // between Seek() and Prev() when changing directions. + db_iter_->Seek("g"); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "g"); + ASSERT_EQ(db_iter_->value().ToString(), "3"); + + // Test call back inserts two keys before "z" in mem table after + // MergeIterator::Prev() calls mem table iterator's Seek() and + // before calling Prev() + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "MergeIterator::Prev:BeforePrev", [&](void* arg) { + IteratorWrapper* it = reinterpret_cast(arg); + if (it->key().starts_with("z")) { + internal_iter2_->Add("x", kTypeValue, "7", 16u, true); + internal_iter2_->Add("y", kTypeValue, "7", 17u, true); + } + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "f"); + ASSERT_EQ(db_iter_->value().ToString(), "2"); + db_iter_->Prev(); + ASSERT_TRUE(db_iter_->Valid()); + ASSERT_EQ(db_iter_->key().ToString(), "d"); + ASSERT_EQ(db_iter_->value().ToString(), "7"); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_iterator_test.cc b/external/rocksdb/db/db_iterator_test.cc new file mode 100755 index 0000000000..a971835c02 --- /dev/null +++ b/external/rocksdb/db/db_iterator_test.cc @@ -0,0 +1,1606 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#include "rocksdb/iostats_context.h" +#include "rocksdb/perf_context.h" +#include "port/port.h" + +namespace rocksdb { + +class DBIteratorTest : public DBTestBase { + public: + DBIteratorTest() : DBTestBase("/db_iterator_test") {} +}; + +TEST_F(DBIteratorTest, IteratorProperty) { + // The test needs to be changed if kPersistedTier is supported in iterator. + Options options = CurrentOptions(); + CreateAndReopenWithCF({"pikachu"}, options); + Put(1, "1", "2"); + ReadOptions ropt; + ropt.pin_data = false; + { + unique_ptr iter(db_->NewIterator(ropt, handles_[1])); + iter->SeekToFirst(); + std::string prop_value; + ASSERT_NOK(iter->GetProperty("non_existing.value", &prop_value)); + ASSERT_OK(iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value)); + ASSERT_EQ("0", prop_value); + iter->Next(); + ASSERT_OK(iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value)); + ASSERT_EQ("Iterator is not valid.", prop_value); + } + Close(); +} + +TEST_F(DBIteratorTest, PersistedTierOnIterator) { + // The test needs to be changed if kPersistedTier is supported in iterator. + Options options = CurrentOptions(); + CreateAndReopenWithCF({"pikachu"}, options); + ReadOptions ropt; + ropt.read_tier = kPersistedTier; + + auto* iter = db_->NewIterator(ropt, handles_[1]); + ASSERT_TRUE(iter->status().IsNotSupported()); + delete iter; + + std::vector iters; + ASSERT_TRUE(db_->NewIterators(ropt, {handles_[1]}, &iters).IsNotSupported()); + Close(); +} + +TEST_F(DBIteratorTest, NonBlockingIteration) { + do { + ReadOptions non_blocking_opts, regular_opts; + Options options = CurrentOptions(); + options.statistics = rocksdb::CreateDBStatistics(); + non_blocking_opts.read_tier = kBlockCacheTier; + CreateAndReopenWithCF({"pikachu"}, options); + // write one kv to the database. + ASSERT_OK(Put(1, "a", "b")); + + // scan using non-blocking iterator. We should find it because + // it is in memtable. + Iterator* iter = db_->NewIterator(non_blocking_opts, handles_[1]); + int count = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ASSERT_OK(iter->status()); + count++; + } + ASSERT_EQ(count, 1); + delete iter; + + // flush memtable to storage. Now, the key should not be in the + // memtable neither in the block cache. + ASSERT_OK(Flush(1)); + + // verify that a non-blocking iterator does not find any + // kvs. Neither does it do any IOs to storage. + uint64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS); + uint64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); + iter = db_->NewIterator(non_blocking_opts, handles_[1]); + count = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + count++; + } + ASSERT_EQ(count, 0); + ASSERT_TRUE(iter->status().IsIncomplete()); + ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); + ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); + delete iter; + + // read in the specified block via a regular get + ASSERT_EQ(Get(1, "a"), "b"); + + // verify that we can find it via a non-blocking scan + numopen = TestGetTickerCount(options, NO_FILE_OPENS); + cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); + iter = db_->NewIterator(non_blocking_opts, handles_[1]); + count = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ASSERT_OK(iter->status()); + count++; + } + ASSERT_EQ(count, 1); + ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); + ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); + delete iter; + + // This test verifies block cache behaviors, which is not used by plain + // table format. + // Exclude kHashCuckoo as it does not support iteration currently + } while (ChangeOptions(kSkipPlainTable | kSkipNoSeekToLast | kSkipHashCuckoo | + kSkipMmapReads)); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBIteratorTest, ManagedNonBlockingIteration) { + do { + ReadOptions non_blocking_opts, regular_opts; + Options options = CurrentOptions(); + options.statistics = rocksdb::CreateDBStatistics(); + non_blocking_opts.read_tier = kBlockCacheTier; + non_blocking_opts.managed = true; + CreateAndReopenWithCF({"pikachu"}, options); + // write one kv to the database. + ASSERT_OK(Put(1, "a", "b")); + + // scan using non-blocking iterator. We should find it because + // it is in memtable. + Iterator* iter = db_->NewIterator(non_blocking_opts, handles_[1]); + int count = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ASSERT_OK(iter->status()); + count++; + } + ASSERT_EQ(count, 1); + delete iter; + + // flush memtable to storage. Now, the key should not be in the + // memtable neither in the block cache. + ASSERT_OK(Flush(1)); + + // verify that a non-blocking iterator does not find any + // kvs. Neither does it do any IOs to storage. + int64_t numopen = TestGetTickerCount(options, NO_FILE_OPENS); + int64_t cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); + iter = db_->NewIterator(non_blocking_opts, handles_[1]); + count = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + count++; + } + ASSERT_EQ(count, 0); + ASSERT_TRUE(iter->status().IsIncomplete()); + ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); + ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); + delete iter; + + // read in the specified block via a regular get + ASSERT_EQ(Get(1, "a"), "b"); + + // verify that we can find it via a non-blocking scan + numopen = TestGetTickerCount(options, NO_FILE_OPENS); + cache_added = TestGetTickerCount(options, BLOCK_CACHE_ADD); + iter = db_->NewIterator(non_blocking_opts, handles_[1]); + count = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ASSERT_OK(iter->status()); + count++; + } + ASSERT_EQ(count, 1); + ASSERT_EQ(numopen, TestGetTickerCount(options, NO_FILE_OPENS)); + ASSERT_EQ(cache_added, TestGetTickerCount(options, BLOCK_CACHE_ADD)); + delete iter; + + // This test verifies block cache behaviors, which is not used by plain + // table format. + // Exclude kHashCuckoo as it does not support iteration currently + } while (ChangeOptions(kSkipPlainTable | kSkipNoSeekToLast | kSkipHashCuckoo | + kSkipMmapReads)); +} +#endif // ROCKSDB_LITE + +TEST_F(DBIteratorTest, IterSeekBeforePrev) { + ASSERT_OK(Put("a", "b")); + ASSERT_OK(Put("c", "d")); + dbfull()->Flush(FlushOptions()); + ASSERT_OK(Put("0", "f")); + ASSERT_OK(Put("1", "h")); + dbfull()->Flush(FlushOptions()); + ASSERT_OK(Put("2", "j")); + auto iter = db_->NewIterator(ReadOptions()); + iter->Seek(Slice("c")); + iter->Prev(); + iter->Seek(Slice("a")); + iter->Prev(); + delete iter; +} + +namespace { +std::string MakeLongKey(size_t length, char c) { + return std::string(length, c); +} +} // namespace + +TEST_F(DBIteratorTest, IterLongKeys) { + ASSERT_OK(Put(MakeLongKey(20, 0), "0")); + ASSERT_OK(Put(MakeLongKey(32, 2), "2")); + ASSERT_OK(Put("a", "b")); + dbfull()->Flush(FlushOptions()); + ASSERT_OK(Put(MakeLongKey(50, 1), "1")); + ASSERT_OK(Put(MakeLongKey(127, 3), "3")); + ASSERT_OK(Put(MakeLongKey(64, 4), "4")); + auto iter = db_->NewIterator(ReadOptions()); + + // Create a key that needs to be skipped for Seq too new + iter->Seek(MakeLongKey(20, 0)); + ASSERT_EQ(IterStatus(iter), MakeLongKey(20, 0) + "->0"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), MakeLongKey(50, 1) + "->1"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), MakeLongKey(32, 2) + "->2"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), MakeLongKey(127, 3) + "->3"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), MakeLongKey(64, 4) + "->4"); + delete iter; + + iter = db_->NewIterator(ReadOptions()); + iter->Seek(MakeLongKey(50, 1)); + ASSERT_EQ(IterStatus(iter), MakeLongKey(50, 1) + "->1"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), MakeLongKey(32, 2) + "->2"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), MakeLongKey(127, 3) + "->3"); + delete iter; +} + +TEST_F(DBIteratorTest, IterNextWithNewerSeq) { + ASSERT_OK(Put("0", "0")); + dbfull()->Flush(FlushOptions()); + ASSERT_OK(Put("a", "b")); + ASSERT_OK(Put("c", "d")); + ASSERT_OK(Put("d", "e")); + auto iter = db_->NewIterator(ReadOptions()); + + // Create a key that needs to be skipped for Seq too new + for (uint64_t i = 0; i < last_options_.max_sequential_skip_in_iterations + 1; + i++) { + ASSERT_OK(Put("b", "f")); + } + + iter->Seek(Slice("a")); + ASSERT_EQ(IterStatus(iter), "a->b"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "c->d"); + delete iter; +} + +TEST_F(DBIteratorTest, IterPrevWithNewerSeq) { + ASSERT_OK(Put("0", "0")); + dbfull()->Flush(FlushOptions()); + ASSERT_OK(Put("a", "b")); + ASSERT_OK(Put("c", "d")); + ASSERT_OK(Put("d", "e")); + auto iter = db_->NewIterator(ReadOptions()); + + // Create a key that needs to be skipped for Seq too new + for (uint64_t i = 0; i < last_options_.max_sequential_skip_in_iterations + 1; + i++) { + ASSERT_OK(Put("b", "f")); + } + + iter->Seek(Slice("d")); + ASSERT_EQ(IterStatus(iter), "d->e"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "c->d"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "a->b"); + + iter->Prev(); + delete iter; +} + +TEST_F(DBIteratorTest, IterPrevWithNewerSeq2) { + ASSERT_OK(Put("0", "0")); + dbfull()->Flush(FlushOptions()); + ASSERT_OK(Put("a", "b")); + ASSERT_OK(Put("c", "d")); + ASSERT_OK(Put("d", "e")); + auto iter = db_->NewIterator(ReadOptions()); + iter->Seek(Slice("c")); + ASSERT_EQ(IterStatus(iter), "c->d"); + + // Create a key that needs to be skipped for Seq too new + for (uint64_t i = 0; i < last_options_.max_sequential_skip_in_iterations + 1; + i++) { + ASSERT_OK(Put("b", "f")); + } + + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "a->b"); + + iter->Prev(); + delete iter; +} + +TEST_F(DBIteratorTest, IterEmpty) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); + + iter->SeekToFirst(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + iter->SeekToLast(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + iter->Seek("foo"); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + delete iter; + } while (ChangeCompactOptions()); +} + +TEST_F(DBIteratorTest, IterSingle) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "a", "va")); + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); + + iter->SeekToFirst(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + iter->SeekToFirst(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + iter->SeekToLast(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + iter->SeekToLast(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + iter->Seek(""); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + iter->Seek("a"); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + iter->Seek("b"); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + delete iter; + } while (ChangeCompactOptions()); +} + +TEST_F(DBIteratorTest, IterMulti) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "a", "va")); + ASSERT_OK(Put(1, "b", "vb")); + ASSERT_OK(Put(1, "c", "vc")); + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); + + iter->SeekToFirst(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "b->vb"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "c->vc"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + iter->SeekToFirst(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + iter->SeekToLast(); + ASSERT_EQ(IterStatus(iter), "c->vc"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "b->vb"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + iter->SeekToLast(); + ASSERT_EQ(IterStatus(iter), "c->vc"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + iter->Seek(""); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Seek("a"); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Seek("ax"); + ASSERT_EQ(IterStatus(iter), "b->vb"); + + iter->Seek("b"); + ASSERT_EQ(IterStatus(iter), "b->vb"); + iter->Seek("z"); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + // Switch from reverse to forward + iter->SeekToLast(); + iter->Prev(); + iter->Prev(); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "b->vb"); + + // Switch from forward to reverse + iter->SeekToFirst(); + iter->Next(); + iter->Next(); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "b->vb"); + + // Make sure iter stays at snapshot + ASSERT_OK(Put(1, "a", "va2")); + ASSERT_OK(Put(1, "a2", "va3")); + ASSERT_OK(Put(1, "b", "vb2")); + ASSERT_OK(Put(1, "c", "vc2")); + ASSERT_OK(Delete(1, "b")); + iter->SeekToFirst(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "b->vb"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "c->vc"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + iter->SeekToLast(); + ASSERT_EQ(IterStatus(iter), "c->vc"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "b->vb"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + delete iter; + } while (ChangeCompactOptions()); +} + +// Check that we can skip over a run of user keys +// by using reseek rather than sequential scan +TEST_F(DBIteratorTest, IterReseek) { + anon::OptionsOverride options_override; + options_override.skip_policy = kSkipNoSnapshot; + Options options = CurrentOptions(options_override); + options.max_sequential_skip_in_iterations = 3; + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // insert three keys with same userkey and verify that + // reseek is not invoked. For each of these test cases, + // verify that we can find the next key "b". + ASSERT_OK(Put(1, "a", "zero")); + ASSERT_OK(Put(1, "a", "one")); + ASSERT_OK(Put(1, "a", "two")); + ASSERT_OK(Put(1, "b", "bone")); + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); + iter->SeekToFirst(); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0); + ASSERT_EQ(IterStatus(iter), "a->two"); + iter->Next(); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0); + ASSERT_EQ(IterStatus(iter), "b->bone"); + delete iter; + + // insert a total of three keys with same userkey and verify + // that reseek is still not invoked. + ASSERT_OK(Put(1, "a", "three")); + iter = db_->NewIterator(ReadOptions(), handles_[1]); + iter->SeekToFirst(); + ASSERT_EQ(IterStatus(iter), "a->three"); + iter->Next(); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0); + ASSERT_EQ(IterStatus(iter), "b->bone"); + delete iter; + + // insert a total of four keys with same userkey and verify + // that reseek is invoked. + ASSERT_OK(Put(1, "a", "four")); + iter = db_->NewIterator(ReadOptions(), handles_[1]); + iter->SeekToFirst(); + ASSERT_EQ(IterStatus(iter), "a->four"); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 0); + iter->Next(); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), 1); + ASSERT_EQ(IterStatus(iter), "b->bone"); + delete iter; + + // Testing reverse iterator + // At this point, we have three versions of "a" and one version of "b". + // The reseek statistics is already at 1. + int num_reseeks = static_cast( + TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION)); + + // Insert another version of b and assert that reseek is not invoked + ASSERT_OK(Put(1, "b", "btwo")); + iter = db_->NewIterator(ReadOptions(), handles_[1]); + iter->SeekToLast(); + ASSERT_EQ(IterStatus(iter), "b->btwo"); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), + num_reseeks); + iter->Prev(); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), + num_reseeks + 1); + ASSERT_EQ(IterStatus(iter), "a->four"); + delete iter; + + // insert two more versions of b. This makes a total of 4 versions + // of b and 4 versions of a. + ASSERT_OK(Put(1, "b", "bthree")); + ASSERT_OK(Put(1, "b", "bfour")); + iter = db_->NewIterator(ReadOptions(), handles_[1]); + iter->SeekToLast(); + ASSERT_EQ(IterStatus(iter), "b->bfour"); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), + num_reseeks + 2); + iter->Prev(); + + // the previous Prev call should have invoked reseek + ASSERT_EQ(TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION), + num_reseeks + 3); + ASSERT_EQ(IterStatus(iter), "a->four"); + delete iter; +} + +TEST_F(DBIteratorTest, IterSmallAndLargeMix) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "a", "va")); + ASSERT_OK(Put(1, "b", std::string(100000, 'b'))); + ASSERT_OK(Put(1, "c", "vc")); + ASSERT_OK(Put(1, "d", std::string(100000, 'd'))); + ASSERT_OK(Put(1, "e", std::string(100000, 'e'))); + + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); + + iter->SeekToFirst(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b')); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "c->vc"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd')); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e')); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + iter->SeekToLast(); + ASSERT_EQ(IterStatus(iter), "e->" + std::string(100000, 'e')); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "d->" + std::string(100000, 'd')); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "c->vc"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "b->" + std::string(100000, 'b')); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "a->va"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "(invalid)"); + + delete iter; + } while (ChangeCompactOptions()); +} + +TEST_F(DBIteratorTest, IterMultiWithDelete) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "ka", "va")); + ASSERT_OK(Put(1, "kb", "vb")); + ASSERT_OK(Put(1, "kc", "vc")); + ASSERT_OK(Delete(1, "kb")); + ASSERT_EQ("NOT_FOUND", Get(1, "kb")); + + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); + iter->Seek("kc"); + ASSERT_EQ(IterStatus(iter), "kc->vc"); + if (!CurrentOptions().merge_operator) { + // TODO: merge operator does not support backward iteration yet + if (kPlainTableAllBytesPrefix != option_config_ && + kBlockBasedTableWithWholeKeyHashIndex != option_config_ && + kHashLinkList != option_config_) { + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "ka->va"); + } + } + delete iter; + } while (ChangeOptions()); +} + +TEST_F(DBIteratorTest, IterPrevMaxSkip) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + for (int i = 0; i < 2; i++) { + ASSERT_OK(Put(1, "key1", "v1")); + ASSERT_OK(Put(1, "key2", "v2")); + ASSERT_OK(Put(1, "key3", "v3")); + ASSERT_OK(Put(1, "key4", "v4")); + ASSERT_OK(Put(1, "key5", "v5")); + } + + VerifyIterLast("key5->v5", 1); + + ASSERT_OK(Delete(1, "key5")); + VerifyIterLast("key4->v4", 1); + + ASSERT_OK(Delete(1, "key4")); + VerifyIterLast("key3->v3", 1); + + ASSERT_OK(Delete(1, "key3")); + VerifyIterLast("key2->v2", 1); + + ASSERT_OK(Delete(1, "key2")); + VerifyIterLast("key1->v1", 1); + + ASSERT_OK(Delete(1, "key1")); + VerifyIterLast("(invalid)", 1); + } while (ChangeOptions(kSkipMergePut | kSkipNoSeekToLast)); +} + +TEST_F(DBIteratorTest, IterWithSnapshot) { + anon::OptionsOverride options_override; + options_override.skip_policy = kSkipNoSnapshot; + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override)); + ASSERT_OK(Put(1, "key1", "val1")); + ASSERT_OK(Put(1, "key2", "val2")); + ASSERT_OK(Put(1, "key3", "val3")); + ASSERT_OK(Put(1, "key4", "val4")); + ASSERT_OK(Put(1, "key5", "val5")); + + const Snapshot* snapshot = db_->GetSnapshot(); + ReadOptions options; + options.snapshot = snapshot; + Iterator* iter = db_->NewIterator(options, handles_[1]); + + // Put more values after the snapshot + ASSERT_OK(Put(1, "key100", "val100")); + ASSERT_OK(Put(1, "key101", "val101")); + + iter->Seek("key5"); + ASSERT_EQ(IterStatus(iter), "key5->val5"); + if (!CurrentOptions().merge_operator) { + // TODO: merge operator does not support backward iteration yet + if (kPlainTableAllBytesPrefix != option_config_ && + kBlockBasedTableWithWholeKeyHashIndex != option_config_ && + kHashLinkList != option_config_) { + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "key4->val4"); + iter->Prev(); + ASSERT_EQ(IterStatus(iter), "key3->val3"); + + iter->Next(); + ASSERT_EQ(IterStatus(iter), "key4->val4"); + iter->Next(); + ASSERT_EQ(IterStatus(iter), "key5->val5"); + } + iter->Next(); + ASSERT_TRUE(!iter->Valid()); + } + db_->ReleaseSnapshot(snapshot); + delete iter; + // skip as HashCuckooRep does not support snapshot + } while (ChangeOptions(kSkipHashCuckoo)); +} + +TEST_F(DBIteratorTest, IteratorPinsRef) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + Put(1, "foo", "hello"); + + // Get iterator that will yield the current contents of the DB. + Iterator* iter = db_->NewIterator(ReadOptions(), handles_[1]); + + // Write to force compactions + Put(1, "foo", "newvalue1"); + for (int i = 0; i < 100; i++) { + // 100K values + ASSERT_OK(Put(1, Key(i), Key(i) + std::string(100000, 'v'))); + } + Put(1, "foo", "newvalue2"); + + iter->SeekToFirst(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("foo", iter->key().ToString()); + ASSERT_EQ("hello", iter->value().ToString()); + iter->Next(); + ASSERT_TRUE(!iter->Valid()); + delete iter; + } while (ChangeCompactOptions()); +} + +TEST_F(DBIteratorTest, DBIteratorBoundTest) { + Options options = CurrentOptions(); + options.env = env_; + options.create_if_missing = true; + + options.prefix_extractor = nullptr; + DestroyAndReopen(options); + ASSERT_OK(Put("a", "0")); + ASSERT_OK(Put("foo", "bar")); + ASSERT_OK(Put("foo1", "bar1")); + ASSERT_OK(Put("g1", "0")); + + // testing basic case with no iterate_upper_bound and no prefix_extractor + { + ReadOptions ro; + ro.iterate_upper_bound = nullptr; + + std::unique_ptr iter(db_->NewIterator(ro)); + + iter->Seek("foo"); + + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("foo")), 0); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("foo1")), 0); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("g1")), 0); + } + + // testing iterate_upper_bound and forward iterator + // to make sure it stops at bound + { + ReadOptions ro; + // iterate_upper_bound points beyond the last expected entry + Slice prefix("foo2"); + ro.iterate_upper_bound = &prefix; + + std::unique_ptr iter(db_->NewIterator(ro)); + + iter->Seek("foo"); + + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("foo")), 0); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(("foo1")), 0); + + iter->Next(); + // should stop here... + ASSERT_TRUE(!iter->Valid()); + } + // Testing SeekToLast with iterate_upper_bound set + { + ReadOptions ro; + + Slice prefix("foo"); + ro.iterate_upper_bound = &prefix; + + std::unique_ptr iter(db_->NewIterator(ro)); + + iter->SeekToLast(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("a")), 0); + } + + // prefix is the first letter of the key + options.prefix_extractor.reset(NewFixedPrefixTransform(1)); + + DestroyAndReopen(options); + ASSERT_OK(Put("a", "0")); + ASSERT_OK(Put("foo", "bar")); + ASSERT_OK(Put("foo1", "bar1")); + ASSERT_OK(Put("g1", "0")); + + // testing with iterate_upper_bound and prefix_extractor + // Seek target and iterate_upper_bound are not is same prefix + // This should be an error + { + ReadOptions ro; + Slice upper_bound("g"); + ro.iterate_upper_bound = &upper_bound; + + std::unique_ptr iter(db_->NewIterator(ro)); + + iter->Seek("foo"); + + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("foo", iter->key().ToString()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("foo1", iter->key().ToString()); + + iter->Next(); + ASSERT_TRUE(!iter->Valid()); + } + + // testing that iterate_upper_bound prevents iterating over deleted items + // if the bound has already reached + { + options.prefix_extractor = nullptr; + DestroyAndReopen(options); + ASSERT_OK(Put("a", "0")); + ASSERT_OK(Put("b", "0")); + ASSERT_OK(Put("b1", "0")); + ASSERT_OK(Put("c", "0")); + ASSERT_OK(Put("d", "0")); + ASSERT_OK(Put("e", "0")); + ASSERT_OK(Delete("c")); + ASSERT_OK(Delete("d")); + + // base case with no bound + ReadOptions ro; + ro.iterate_upper_bound = nullptr; + + std::unique_ptr iter(db_->NewIterator(ro)); + + iter->Seek("b"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("b")), 0); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(("b1")), 0); + + perf_context.Reset(); + iter->Next(); + + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(static_cast(perf_context.internal_delete_skipped_count), 2); + + // now testing with iterate_bound + Slice prefix("c"); + ro.iterate_upper_bound = &prefix; + + iter.reset(db_->NewIterator(ro)); + + perf_context.Reset(); + + iter->Seek("b"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Slice("b")), 0); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(("b1")), 0); + + iter->Next(); + // the iteration should stop as soon as the bound key is reached + // even though the key is deleted + // hence internal_delete_skipped_count should be 0 + ASSERT_TRUE(!iter->Valid()); + ASSERT_EQ(static_cast(perf_context.internal_delete_skipped_count), 0); + } +} + +// TODO(3.13): fix the issue of Seek() + Prev() which might not necessary +// return the biggest key which is smaller than the seek key. +TEST_F(DBIteratorTest, PrevAfterMerge) { + Options options; + options.create_if_missing = true; + options.merge_operator = MergeOperators::CreatePutOperator(); + DestroyAndReopen(options); + + // write three entries with different keys using Merge() + WriteOptions wopts; + db_->Merge(wopts, "1", "data1"); + db_->Merge(wopts, "2", "data2"); + db_->Merge(wopts, "3", "data3"); + + std::unique_ptr it(db_->NewIterator(ReadOptions())); + + it->Seek("2"); + ASSERT_TRUE(it->Valid()); + ASSERT_EQ("2", it->key().ToString()); + + it->Prev(); + ASSERT_TRUE(it->Valid()); + ASSERT_EQ("1", it->key().ToString()); +} + +TEST_F(DBIteratorTest, PinnedDataIteratorRandomized) { + enum TestConfig { + NORMAL, + CLOSE_AND_OPEN, + COMPACT_BEFORE_READ, + FLUSH_EVERY_1000, + MAX + }; + + // Generate Random data + Random rnd(301); + + int puts = 100000; + int key_pool = static_cast(puts * 0.7); + int key_size = 100; + int val_size = 1000; + int seeks_percentage = 20; // 20% of keys will be used to test seek() + int delete_percentage = 20; // 20% of keys will be deleted + int merge_percentage = 20; // 20% of keys will be added using Merge() + + for (int run_config = 0; run_config < TestConfig::MAX; run_config++) { + Options options = CurrentOptions(); + BlockBasedTableOptions table_options; + table_options.use_delta_encoding = false; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.merge_operator = MergeOperators::CreatePutOperator(); + DestroyAndReopen(options); + + std::vector generated_keys(key_pool); + for (int i = 0; i < key_pool; i++) { + generated_keys[i] = RandomString(&rnd, key_size); + } + + std::map true_data; + std::vector random_keys; + std::vector deleted_keys; + for (int i = 0; i < puts; i++) { + auto& k = generated_keys[rnd.Next() % key_pool]; + auto v = RandomString(&rnd, val_size); + + // Insert data to true_data map and to DB + true_data[k] = v; + if (rnd.OneIn(static_cast(100.0 / merge_percentage))) { + ASSERT_OK(db_->Merge(WriteOptions(), k, v)); + } else { + ASSERT_OK(Put(k, v)); + } + + // Pick random keys to be used to test Seek() + if (rnd.OneIn(static_cast(100.0 / seeks_percentage))) { + random_keys.push_back(k); + } + + // Delete some random keys + if (rnd.OneIn(static_cast(100.0 / delete_percentage))) { + deleted_keys.push_back(k); + true_data.erase(k); + ASSERT_OK(Delete(k)); + } + + if (run_config == TestConfig::FLUSH_EVERY_1000) { + if (i && i % 1000 == 0) { + Flush(); + } + } + } + + if (run_config == TestConfig::CLOSE_AND_OPEN) { + Close(); + Reopen(options); + } else if (run_config == TestConfig::COMPACT_BEFORE_READ) { + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + } + + ReadOptions ro; + ro.pin_data = true; + auto iter = db_->NewIterator(ro); + + { + // Test Seek to random keys + printf("Testing seek on %" ROCKSDB_PRIszt " keys\n", random_keys.size()); + std::vector keys_slices; + std::vector true_keys; + for (auto& k : random_keys) { + iter->Seek(k); + if (!iter->Valid()) { + ASSERT_EQ(true_data.lower_bound(k), true_data.end()); + continue; + } + std::string prop_value; + ASSERT_OK( + iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value)); + ASSERT_EQ("1", prop_value); + keys_slices.push_back(iter->key()); + true_keys.push_back(true_data.lower_bound(k)->first); + } + + for (size_t i = 0; i < keys_slices.size(); i++) { + ASSERT_EQ(keys_slices[i].ToString(), true_keys[i]); + } + } + + { + // Test iterating all data forward + printf("Testing iterating forward on all keys\n"); + std::vector all_keys; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + std::string prop_value; + ASSERT_OK( + iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value)); + ASSERT_EQ("1", prop_value); + all_keys.push_back(iter->key()); + } + ASSERT_EQ(all_keys.size(), true_data.size()); + + // Verify that all keys slices are valid + auto data_iter = true_data.begin(); + for (size_t i = 0; i < all_keys.size(); i++) { + ASSERT_EQ(all_keys[i].ToString(), data_iter->first); + data_iter++; + } + } + + { + // Test iterating all data backward + printf("Testing iterating backward on all keys\n"); + std::vector all_keys; + for (iter->SeekToLast(); iter->Valid(); iter->Prev()) { + std::string prop_value; + ASSERT_OK( + iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value)); + ASSERT_EQ("1", prop_value); + all_keys.push_back(iter->key()); + } + ASSERT_EQ(all_keys.size(), true_data.size()); + + // Verify that all keys slices are valid (backward) + auto data_iter = true_data.rbegin(); + for (size_t i = 0; i < all_keys.size(); i++) { + ASSERT_EQ(all_keys[i].ToString(), data_iter->first); + data_iter++; + } + } + + delete iter; + } +} + +#ifndef ROCKSDB_LITE +TEST_F(DBIteratorTest, PinnedDataIteratorMultipleFiles) { + Options options = CurrentOptions(); + BlockBasedTableOptions table_options; + table_options.use_delta_encoding = false; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.disable_auto_compactions = true; + options.write_buffer_size = 1024 * 1024 * 10; // 10 Mb + DestroyAndReopen(options); + + std::map true_data; + + // Generate 4 sst files in L2 + Random rnd(301); + for (int i = 1; i <= 1000; i++) { + std::string k = Key(i * 3); + std::string v = RandomString(&rnd, 100); + ASSERT_OK(Put(k, v)); + true_data[k] = v; + if (i % 250 == 0) { + ASSERT_OK(Flush()); + } + } + ASSERT_EQ(FilesPerLevel(0), "4"); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ(FilesPerLevel(0), "0,4"); + + // Generate 4 sst files in L0 + for (int i = 1; i <= 1000; i++) { + std::string k = Key(i * 2); + std::string v = RandomString(&rnd, 100); + ASSERT_OK(Put(k, v)); + true_data[k] = v; + if (i % 250 == 0) { + ASSERT_OK(Flush()); + } + } + ASSERT_EQ(FilesPerLevel(0), "4,4"); + + // Add some keys/values in memtables + for (int i = 1; i <= 1000; i++) { + std::string k = Key(i); + std::string v = RandomString(&rnd, 100); + ASSERT_OK(Put(k, v)); + true_data[k] = v; + } + ASSERT_EQ(FilesPerLevel(0), "4,4"); + + ReadOptions ro; + ro.pin_data = true; + auto iter = db_->NewIterator(ro); + + std::vector> results; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + std::string prop_value; + ASSERT_OK(iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value)); + ASSERT_EQ("1", prop_value); + results.emplace_back(iter->key(), iter->value().ToString()); + } + + ASSERT_EQ(results.size(), true_data.size()); + auto data_iter = true_data.begin(); + for (size_t i = 0; i < results.size(); i++, data_iter++) { + auto& kv = results[i]; + ASSERT_EQ(kv.first, data_iter->first); + ASSERT_EQ(kv.second, data_iter->second); + } + + delete iter; +} +#endif + +TEST_F(DBIteratorTest, PinnedDataIteratorMergeOperator) { + Options options = CurrentOptions(); + BlockBasedTableOptions table_options; + table_options.use_delta_encoding = false; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.merge_operator = MergeOperators::CreateUInt64AddOperator(); + DestroyAndReopen(options); + + std::string numbers[7]; + for (int val = 0; val <= 6; val++) { + PutFixed64(numbers + val, val); + } + + // +1 all keys in range [ 0 => 999] + for (int i = 0; i < 1000; i++) { + WriteOptions wo; + ASSERT_OK(db_->Merge(wo, Key(i), numbers[1])); + } + + // +2 all keys divisible by 2 in range [ 0 => 999] + for (int i = 0; i < 1000; i += 2) { + WriteOptions wo; + ASSERT_OK(db_->Merge(wo, Key(i), numbers[2])); + } + + // +3 all keys divisible by 5 in range [ 0 => 999] + for (int i = 0; i < 1000; i += 5) { + WriteOptions wo; + ASSERT_OK(db_->Merge(wo, Key(i), numbers[3])); + } + + ReadOptions ro; + ro.pin_data = true; + auto iter = db_->NewIterator(ro); + + std::vector> results; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + std::string prop_value; + ASSERT_OK(iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value)); + ASSERT_EQ("1", prop_value); + results.emplace_back(iter->key(), iter->value().ToString()); + } + + ASSERT_EQ(results.size(), 1000); + for (size_t i = 0; i < results.size(); i++) { + auto& kv = results[i]; + ASSERT_EQ(kv.first, Key(static_cast(i))); + int expected_val = 1; + if (i % 2 == 0) { + expected_val += 2; + } + if (i % 5 == 0) { + expected_val += 3; + } + ASSERT_EQ(kv.second, numbers[expected_val]); + } + + delete iter; +} + +TEST_F(DBIteratorTest, PinnedDataIteratorReadAfterUpdate) { + Options options = CurrentOptions(); + BlockBasedTableOptions table_options; + table_options.use_delta_encoding = false; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.write_buffer_size = 100000; + DestroyAndReopen(options); + + Random rnd(301); + + std::map true_data; + for (int i = 0; i < 1000; i++) { + std::string k = RandomString(&rnd, 10); + std::string v = RandomString(&rnd, 1000); + ASSERT_OK(Put(k, v)); + true_data[k] = v; + } + + ReadOptions ro; + ro.pin_data = true; + auto iter = db_->NewIterator(ro); + + // Delete 50% of the keys and update the other 50% + for (auto& kv : true_data) { + if (rnd.OneIn(2)) { + ASSERT_OK(Delete(kv.first)); + } else { + std::string new_val = RandomString(&rnd, 1000); + ASSERT_OK(Put(kv.first, new_val)); + } + } + + std::vector> results; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + std::string prop_value; + ASSERT_OK(iter->GetProperty("rocksdb.iterator.is-key-pinned", &prop_value)); + ASSERT_EQ("1", prop_value); + results.emplace_back(iter->key(), iter->value().ToString()); + } + + auto data_iter = true_data.begin(); + for (size_t i = 0; i < results.size(); i++, data_iter++) { + auto& kv = results[i]; + ASSERT_EQ(kv.first, data_iter->first); + ASSERT_EQ(kv.second, data_iter->second); + } + + delete iter; +} + +TEST_F(DBIteratorTest, IterPrevKeyCrossingBlocks) { + Options options = CurrentOptions(); + BlockBasedTableOptions table_options; + table_options.block_size = 1; // every block will contain one entry + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.merge_operator = MergeOperators::CreateStringAppendTESTOperator(); + options.disable_auto_compactions = true; + options.max_sequential_skip_in_iterations = 8; + + DestroyAndReopen(options); + + // Putting such deletes will force DBIter::Prev() to fallback to a Seek + for (int file_num = 0; file_num < 10; file_num++) { + ASSERT_OK(Delete("key4")); + ASSERT_OK(Flush()); + } + + // First File containing 5 blocks of puts + ASSERT_OK(Put("key1", "val1.0")); + ASSERT_OK(Put("key2", "val2.0")); + ASSERT_OK(Put("key3", "val3.0")); + ASSERT_OK(Put("key4", "val4.0")); + ASSERT_OK(Put("key5", "val5.0")); + ASSERT_OK(Flush()); + + // Second file containing 9 blocks of merge operands + ASSERT_OK(db_->Merge(WriteOptions(), "key1", "val1.1")); + ASSERT_OK(db_->Merge(WriteOptions(), "key1", "val1.2")); + + ASSERT_OK(db_->Merge(WriteOptions(), "key2", "val2.1")); + ASSERT_OK(db_->Merge(WriteOptions(), "key2", "val2.2")); + ASSERT_OK(db_->Merge(WriteOptions(), "key2", "val2.3")); + + ASSERT_OK(db_->Merge(WriteOptions(), "key3", "val3.1")); + ASSERT_OK(db_->Merge(WriteOptions(), "key3", "val3.2")); + ASSERT_OK(db_->Merge(WriteOptions(), "key3", "val3.3")); + ASSERT_OK(db_->Merge(WriteOptions(), "key3", "val3.4")); + ASSERT_OK(Flush()); + + { + ReadOptions ro; + ro.fill_cache = false; + Iterator* iter = db_->NewIterator(ro); + + iter->SeekToLast(); + ASSERT_EQ(iter->key().ToString(), "key5"); + ASSERT_EQ(iter->value().ToString(), "val5.0"); + + iter->Prev(); + ASSERT_EQ(iter->key().ToString(), "key4"); + ASSERT_EQ(iter->value().ToString(), "val4.0"); + + iter->Prev(); + ASSERT_EQ(iter->key().ToString(), "key3"); + ASSERT_EQ(iter->value().ToString(), "val3.0,val3.1,val3.2,val3.3,val3.4"); + + iter->Prev(); + ASSERT_EQ(iter->key().ToString(), "key2"); + ASSERT_EQ(iter->value().ToString(), "val2.0,val2.1,val2.2,val2.3"); + + iter->Prev(); + ASSERT_EQ(iter->key().ToString(), "key1"); + ASSERT_EQ(iter->value().ToString(), "val1.0,val1.1,val1.2"); + + delete iter; + } +} + +TEST_F(DBIteratorTest, IterPrevKeyCrossingBlocksRandomized) { + Options options = CurrentOptions(); + options.merge_operator = MergeOperators::CreateStringAppendTESTOperator(); + options.disable_auto_compactions = true; + options.level0_slowdown_writes_trigger = (1 << 30); + options.level0_stop_writes_trigger = (1 << 30); + options.max_sequential_skip_in_iterations = 8; + DestroyAndReopen(options); + + const int kNumKeys = 500; + // Small number of merge operands to make sure that DBIter::Prev() dont + // fall back to Seek() + const int kNumMergeOperands = 3; + // Use value size that will make sure that every block contain 1 key + const int kValSize = + static_cast(BlockBasedTableOptions().block_size) * 4; + // Percentage of keys that wont get merge operations + const int kNoMergeOpPercentage = 20; + // Percentage of keys that will be deleted + const int kDeletePercentage = 10; + + // For half of the key range we will write multiple deletes first to + // force DBIter::Prev() to fall back to Seek() + for (int file_num = 0; file_num < 10; file_num++) { + for (int i = 0; i < kNumKeys; i += 2) { + ASSERT_OK(Delete(Key(i))); + } + ASSERT_OK(Flush()); + } + + Random rnd(301); + std::map true_data; + std::string gen_key; + std::string gen_val; + + for (int i = 0; i < kNumKeys; i++) { + gen_key = Key(i); + gen_val = RandomString(&rnd, kValSize); + + ASSERT_OK(Put(gen_key, gen_val)); + true_data[gen_key] = gen_val; + } + ASSERT_OK(Flush()); + + // Separate values and merge operands in different file so that we + // make sure that we dont merge them while flushing but actually + // merge them in the read path + for (int i = 0; i < kNumKeys; i++) { + if (rnd.OneIn(static_cast(100.0 / kNoMergeOpPercentage))) { + // Dont give merge operations for some keys + continue; + } + + for (int j = 0; j < kNumMergeOperands; j++) { + gen_key = Key(i); + gen_val = RandomString(&rnd, kValSize); + + ASSERT_OK(db_->Merge(WriteOptions(), gen_key, gen_val)); + true_data[gen_key] += "," + gen_val; + } + } + ASSERT_OK(Flush()); + + for (int i = 0; i < kNumKeys; i++) { + if (rnd.OneIn(static_cast(100.0 / kDeletePercentage))) { + gen_key = Key(i); + + ASSERT_OK(Delete(gen_key)); + true_data.erase(gen_key); + } + } + ASSERT_OK(Flush()); + + { + ReadOptions ro; + ro.fill_cache = false; + Iterator* iter = db_->NewIterator(ro); + auto data_iter = true_data.rbegin(); + + for (iter->SeekToLast(); iter->Valid(); iter->Prev()) { + ASSERT_EQ(iter->key().ToString(), data_iter->first); + ASSERT_EQ(iter->value().ToString(), data_iter->second); + data_iter++; + } + ASSERT_EQ(data_iter, true_data.rend()); + + delete iter; + } + + { + ReadOptions ro; + ro.fill_cache = false; + Iterator* iter = db_->NewIterator(ro); + auto data_iter = true_data.rbegin(); + + int entries_right = 0; + std::string seek_key; + for (iter->SeekToLast(); iter->Valid(); iter->Prev()) { + // Verify key/value of current position + ASSERT_EQ(iter->key().ToString(), data_iter->first); + ASSERT_EQ(iter->value().ToString(), data_iter->second); + + bool restore_position_with_seek = rnd.Uniform(2); + if (restore_position_with_seek) { + seek_key = iter->key().ToString(); + } + + // Do some Next() operations the restore the iterator to orignal position + int next_count = + entries_right > 0 ? rnd.Uniform(std::min(entries_right, 10)) : 0; + for (int i = 0; i < next_count; i++) { + iter->Next(); + data_iter--; + + ASSERT_EQ(iter->key().ToString(), data_iter->first); + ASSERT_EQ(iter->value().ToString(), data_iter->second); + } + + if (restore_position_with_seek) { + // Restore orignal position using Seek() + iter->Seek(seek_key); + for (int i = 0; i < next_count; i++) { + data_iter++; + } + + ASSERT_EQ(iter->key().ToString(), data_iter->first); + ASSERT_EQ(iter->value().ToString(), data_iter->second); + } else { + // Restore original position using Prev() + for (int i = 0; i < next_count; i++) { + iter->Prev(); + data_iter++; + + ASSERT_EQ(iter->key().ToString(), data_iter->first); + ASSERT_EQ(iter->value().ToString(), data_iter->second); + } + } + + entries_right++; + data_iter++; + } + ASSERT_EQ(data_iter, true_data.rend()); + + delete iter; + } +} + +TEST_F(DBIteratorTest, IteratorWithLocalStatistics) { + Options options = CurrentOptions(); + options.statistics = rocksdb::CreateDBStatistics(); + DestroyAndReopen(options); + + Random rnd(301); + for (int i = 0; i < 1000; i++) { + // Key 10 bytes / Value 10 bytes + ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10))); + } + + std::atomic total_next(0); + std::atomic total_next_found(0); + std::atomic total_prev(0); + std::atomic total_prev_found(0); + std::atomic total_bytes(0); + + std::vector threads; + std::function reader_func_next = [&]() { + Iterator* iter = db_->NewIterator(ReadOptions()); + + iter->SeekToFirst(); + // Seek will bump ITER_BYTES_READ + total_bytes += iter->key().size(); + total_bytes += iter->value().size(); + while (true) { + iter->Next(); + total_next++; + + if (!iter->Valid()) { + break; + } + total_next_found++; + total_bytes += iter->key().size(); + total_bytes += iter->value().size(); + } + + delete iter; + }; + + std::function reader_func_prev = [&]() { + Iterator* iter = db_->NewIterator(ReadOptions()); + + iter->SeekToLast(); + // Seek will bump ITER_BYTES_READ + total_bytes += iter->key().size(); + total_bytes += iter->value().size(); + while (true) { + iter->Prev(); + total_prev++; + + if (!iter->Valid()) { + break; + } + total_prev_found++; + total_bytes += iter->key().size(); + total_bytes += iter->value().size(); + } + + delete iter; + }; + + for (int i = 0; i < 10; i++) { + threads.emplace_back(reader_func_next); + } + for (int i = 0; i < 15; i++) { + threads.emplace_back(reader_func_prev); + } + + for (auto& t : threads) { + t.join(); + } + + ASSERT_EQ(TestGetTickerCount(options, NUMBER_DB_NEXT), total_next); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_DB_NEXT_FOUND), + total_next_found); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_DB_PREV), total_prev); + ASSERT_EQ(TestGetTickerCount(options, NUMBER_DB_PREV_FOUND), + total_prev_found); + ASSERT_EQ(TestGetTickerCount(options, ITER_BYTES_READ), total_bytes); +} + +TEST_F(DBIteratorTest, ReadAhead) { + Options options; + env_->count_random_reads_ = true; + options.env = env_; + options.disable_auto_compactions = true; + options.write_buffer_size = 4 << 20; + options.statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions table_options; + table_options.block_size = 1024; + table_options.no_block_cache = true; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + Reopen(options); + + std::string value(1024, 'a'); + for (int i = 0; i < 100; i++) { + Put(Key(i), value); + } + ASSERT_OK(Flush()); + MoveFilesToLevel(2); + + for (int i = 0; i < 100; i++) { + Put(Key(i), value); + } + ASSERT_OK(Flush()); + MoveFilesToLevel(1); + + for (int i = 0; i < 100; i++) { + Put(Key(i), value); + } + ASSERT_OK(Flush()); +#ifndef ROCKSDB_LITE + ASSERT_EQ("1,1,1", FilesPerLevel()); +#endif // !ROCKSDB_LITE + + env_->random_read_bytes_counter_ = 0; + options.statistics->setTickerCount(NO_FILE_OPENS, 0); + ReadOptions read_options; + auto* iter = db_->NewIterator(read_options); + iter->SeekToFirst(); + int64_t num_file_opens = TestGetTickerCount(options, NO_FILE_OPENS); + size_t bytes_read = env_->random_read_bytes_counter_; + delete iter; + + env_->random_read_bytes_counter_ = 0; + options.statistics->setTickerCount(NO_FILE_OPENS, 0); + read_options.readahead_size = 1024 * 10; + iter = db_->NewIterator(read_options); + iter->SeekToFirst(); + int64_t num_file_opens_readahead = TestGetTickerCount(options, NO_FILE_OPENS); + size_t bytes_read_readahead = env_->random_read_bytes_counter_; + delete iter; + ASSERT_EQ(num_file_opens + 3, num_file_opens_readahead); + ASSERT_GT(bytes_read_readahead, bytes_read); + ASSERT_GT(bytes_read_readahead, read_options.readahead_size * 3); + + // Verify correctness. + iter = db_->NewIterator(read_options); + int count = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ASSERT_EQ(value, iter->value()); + count++; + } + ASSERT_EQ(100, count); + for (int i = 0; i < 100; i++) { + iter->Seek(Key(i)); + ASSERT_EQ(value, iter->value()); + } + delete iter; +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_log_iter_test.cc b/external/rocksdb/db/db_log_iter_test.cc new file mode 100755 index 0000000000..956f601a7a --- /dev/null +++ b/external/rocksdb/db/db_log_iter_test.cc @@ -0,0 +1,290 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// Introduction of SyncPoint effectively disabled building and running this test +// in Release build. +// which is a pity, it is a good test +#if !defined(ROCKSDB_LITE) + +#include "db/db_test_util.h" +#include "port/stack_trace.h" + +namespace rocksdb { + +class DBTestXactLogIterator : public DBTestBase { + public: + DBTestXactLogIterator() : DBTestBase("/db_log_iter_test") {} + + std::unique_ptr OpenTransactionLogIter( + const SequenceNumber seq) { + unique_ptr iter; + Status status = dbfull()->GetUpdatesSince(seq, &iter); + EXPECT_OK(status); + EXPECT_TRUE(iter->Valid()); + return iter; + } +}; + +namespace { +SequenceNumber ReadRecords( + std::unique_ptr& iter, + int& count) { + count = 0; + SequenceNumber lastSequence = 0; + BatchResult res; + while (iter->Valid()) { + res = iter->GetBatch(); + EXPECT_TRUE(res.sequence > lastSequence); + ++count; + lastSequence = res.sequence; + EXPECT_OK(iter->status()); + iter->Next(); + } + return res.sequence; +} + +void ExpectRecords( + const int expected_no_records, + std::unique_ptr& iter) { + int num_records; + ReadRecords(iter, num_records); + ASSERT_EQ(num_records, expected_no_records); +} +} // namespace + +TEST_F(DBTestXactLogIterator, TransactionLogIterator) { + do { + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + Put(0, "key1", DummyString(1024)); + Put(1, "key2", DummyString(1024)); + Put(1, "key2", DummyString(1024)); + ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 3U); + { + auto iter = OpenTransactionLogIter(0); + ExpectRecords(3, iter); + } + ReopenWithColumnFamilies({"default", "pikachu"}, options); + env_->SleepForMicroseconds(2 * 1000 * 1000); + { + Put(0, "key4", DummyString(1024)); + Put(1, "key5", DummyString(1024)); + Put(0, "key6", DummyString(1024)); + } + { + auto iter = OpenTransactionLogIter(0); + ExpectRecords(6, iter); + } + } while (ChangeCompactOptions()); +} + +#ifndef NDEBUG // sync point is not included with DNDEBUG build +TEST_F(DBTestXactLogIterator, TransactionLogIteratorRace) { + static const int LOG_ITERATOR_RACE_TEST_COUNT = 2; + static const char* sync_points[LOG_ITERATOR_RACE_TEST_COUNT][4] = { + {"WalManager::GetSortedWalFiles:1", "WalManager::PurgeObsoleteFiles:1", + "WalManager::PurgeObsoleteFiles:2", "WalManager::GetSortedWalFiles:2"}, + {"WalManager::GetSortedWalsOfType:1", + "WalManager::PurgeObsoleteFiles:1", + "WalManager::PurgeObsoleteFiles:2", + "WalManager::GetSortedWalsOfType:2"}}; + for (int test = 0; test < LOG_ITERATOR_RACE_TEST_COUNT; ++test) { + // Setup sync point dependency to reproduce the race condition of + // a log file moved to archived dir, in the middle of GetSortedWalFiles + rocksdb::SyncPoint::GetInstance()->LoadDependency( + { { sync_points[test][0], sync_points[test][1] }, + { sync_points[test][2], sync_points[test][3] }, + }); + + do { + rocksdb::SyncPoint::GetInstance()->ClearTrace(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + Put("key1", DummyString(1024)); + dbfull()->Flush(FlushOptions()); + Put("key2", DummyString(1024)); + dbfull()->Flush(FlushOptions()); + Put("key3", DummyString(1024)); + dbfull()->Flush(FlushOptions()); + Put("key4", DummyString(1024)); + ASSERT_EQ(dbfull()->GetLatestSequenceNumber(), 4U); + + { + auto iter = OpenTransactionLogIter(0); + ExpectRecords(4, iter); + } + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + // trigger async flush, and log move. Well, log move will + // wait until the GetSortedWalFiles:1 to reproduce the race + // condition + FlushOptions flush_options; + flush_options.wait = false; + dbfull()->Flush(flush_options); + + // "key5" would be written in a new memtable and log + Put("key5", DummyString(1024)); + { + // this iter would miss "key4" if not fixed + auto iter = OpenTransactionLogIter(0); + ExpectRecords(5, iter); + } + } while (ChangeCompactOptions()); + } +} +#endif + +TEST_F(DBTestXactLogIterator, TransactionLogIteratorStallAtLastRecord) { + do { + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + Put("key1", DummyString(1024)); + auto iter = OpenTransactionLogIter(0); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + iter->Next(); + ASSERT_TRUE(!iter->Valid()); + ASSERT_OK(iter->status()); + Put("key2", DummyString(1024)); + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTestXactLogIterator, TransactionLogIteratorCheckAfterRestart) { + do { + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + Put("key1", DummyString(1024)); + Put("key2", DummyString(1023)); + dbfull()->Flush(FlushOptions()); + Reopen(options); + auto iter = OpenTransactionLogIter(0); + ExpectRecords(2, iter); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTestXactLogIterator, TransactionLogIteratorCorruptedLog) { + do { + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + for (int i = 0; i < 1024; i++) { + Put("key"+ToString(i), DummyString(10)); + } + dbfull()->Flush(FlushOptions()); + // Corrupt this log to create a gap + rocksdb::VectorLogPtr wal_files; + ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files)); + const auto logfile_path = dbname_ + "/" + wal_files.front()->PathName(); + if (mem_env_) { + mem_env_->Truncate(logfile_path, wal_files.front()->SizeFileBytes() / 2); + } else { + ASSERT_EQ(0, truncate(logfile_path.c_str(), + wal_files.front()->SizeFileBytes() / 2)); + } + + // Insert a new entry to a new log file + Put("key1025", DummyString(10)); + // Try to read from the beginning. Should stop before the gap and read less + // than 1025 entries + auto iter = OpenTransactionLogIter(0); + int count; + SequenceNumber last_sequence_read = ReadRecords(iter, count); + ASSERT_LT(last_sequence_read, 1025U); + // Try to read past the gap, should be able to seek to key1025 + auto iter2 = OpenTransactionLogIter(last_sequence_read + 1); + ExpectRecords(1, iter2); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTestXactLogIterator, TransactionLogIteratorBatchOperations) { + do { + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + WriteBatch batch; + batch.Put(handles_[1], "key1", DummyString(1024)); + batch.Put(handles_[0], "key2", DummyString(1024)); + batch.Put(handles_[1], "key3", DummyString(1024)); + batch.Delete(handles_[0], "key2"); + dbfull()->Write(WriteOptions(), &batch); + Flush(1); + Flush(0); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + Put(1, "key4", DummyString(1024)); + auto iter = OpenTransactionLogIter(3); + ExpectRecords(2, iter); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTestXactLogIterator, TransactionLogIteratorBlobs) { + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + { + WriteBatch batch; + batch.Put(handles_[1], "key1", DummyString(1024)); + batch.Put(handles_[0], "key2", DummyString(1024)); + batch.PutLogData(Slice("blob1")); + batch.Put(handles_[1], "key3", DummyString(1024)); + batch.PutLogData(Slice("blob2")); + batch.Delete(handles_[0], "key2"); + dbfull()->Write(WriteOptions(), &batch); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + } + + auto res = OpenTransactionLogIter(0)->GetBatch(); + struct Handler : public WriteBatch::Handler { + std::string seen; + virtual Status PutCF(uint32_t cf, const Slice& key, + const Slice& value) override { + seen += "Put(" + ToString(cf) + ", " + key.ToString() + ", " + + ToString(value.size()) + ")"; + return Status::OK(); + } + virtual Status MergeCF(uint32_t cf, const Slice& key, + const Slice& value) override { + seen += "Merge(" + ToString(cf) + ", " + key.ToString() + ", " + + ToString(value.size()) + ")"; + return Status::OK(); + } + virtual void LogData(const Slice& blob) override { + seen += "LogData(" + blob.ToString() + ")"; + } + virtual Status DeleteCF(uint32_t cf, const Slice& key) override { + seen += "Delete(" + ToString(cf) + ", " + key.ToString() + ")"; + return Status::OK(); + } + } handler; + res.writeBatchPtr->Iterate(&handler); + ASSERT_EQ( + "Put(1, key1, 1024)" + "Put(0, key2, 1024)" + "LogData(blob1)" + "Put(1, key3, 1024)" + "LogData(blob2)" + "Delete(0, key2)", + handler.seen); +} +} // namespace rocksdb + +#endif // !defined(ROCKSDB_LITE) + +int main(int argc, char** argv) { +#if !defined(ROCKSDB_LITE) + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +#else + return 0; +#endif +} diff --git a/external/rocksdb/db/db_options_test.cc b/external/rocksdb/db/db_options_test.cc new file mode 100755 index 0000000000..0d484d79f3 --- /dev/null +++ b/external/rocksdb/db/db_options_test.cc @@ -0,0 +1,131 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#include +#include + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#include "util/sync_point.h" + +namespace rocksdb { + +class DBOptionsTest : public DBTestBase { + public: + DBOptionsTest() : DBTestBase("/db_options_test") {} +}; + +// RocksDB lite don't support dynamic options. +#ifndef ROCKSDB_LITE + +TEST_F(DBOptionsTest, EnableAutoCompactionAndTriggerStall) { + const std::string kValue(1024, 'v'); + for (int method_type = 0; method_type < 2; method_type++) { + for (int option_type = 0; option_type < 4; option_type++) { + Options options; + options.create_if_missing = true; + options.disable_auto_compactions = true; + options.write_buffer_size = 1024 * 1024; + options.compression = CompressionType::kNoCompression; + options.level0_file_num_compaction_trigger = 1; + options.level0_stop_writes_trigger = std::numeric_limits::max(); + options.level0_slowdown_writes_trigger = std::numeric_limits::max(); + options.hard_pending_compaction_bytes_limit = + std::numeric_limits::max(); + options.soft_pending_compaction_bytes_limit = + std::numeric_limits::max(); + + DestroyAndReopen(options); + for (int i = 0; i < 1024 * 2; i++) { + Put(Key(i), kValue); + } + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ(2, NumTableFilesAtLevel(0)); + uint64_t l0_size = SizeAtLevel(0); + + switch (option_type) { + case 0: + // test with level0_stop_writes_trigger + options.level0_stop_writes_trigger = 2; + options.level0_slowdown_writes_trigger = 2; + break; + case 1: + options.level0_slowdown_writes_trigger = 2; + break; + case 2: + options.hard_pending_compaction_bytes_limit = l0_size; + options.soft_pending_compaction_bytes_limit = l0_size; + break; + case 3: + options.soft_pending_compaction_bytes_limit = l0_size; + break; + } + Reopen(options); + dbfull()->TEST_WaitForCompact(); + ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped()); + ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay()); + + SyncPoint::GetInstance()->LoadDependency( + {{"DBOptionsTest::EnableAutoCompactionAndTriggerStall:1", + "BackgroundCallCompaction:0"}, + {"DBImpl::BackgroundCompaction():BeforePickCompaction", + "DBOptionsTest::EnableAutoCompactionAndTriggerStall:2"}, + {"DBOptionsTest::EnableAutoCompactionAndTriggerStall:3", + "DBImpl::BackgroundCompaction():AfterPickCompaction"}}); + // Block background compaction. + SyncPoint::GetInstance()->EnableProcessing(); + + switch (method_type) { + case 0: + ASSERT_OK( + dbfull()->SetOptions({{"disable_auto_compactions", "false"}})); + break; + case 1: + ASSERT_OK(dbfull()->EnableAutoCompaction( + {dbfull()->DefaultColumnFamily()})); + break; + } + TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:1"); + // Wait for stall condition recalculate. + TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:2"); + + switch (option_type) { + case 0: + ASSERT_TRUE(dbfull()->TEST_write_controler().IsStopped()); + break; + case 1: + ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + break; + case 2: + ASSERT_TRUE(dbfull()->TEST_write_controler().IsStopped()); + break; + case 3: + ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped()); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + break; + } + TEST_SYNC_POINT("DBOptionsTest::EnableAutoCompactionAndTriggerStall:3"); + + // Background compaction executed. + dbfull()->TEST_WaitForCompact(); + ASSERT_FALSE(dbfull()->TEST_write_controler().IsStopped()); + ASSERT_FALSE(dbfull()->TEST_write_controler().NeedsDelay()); + } + } +} + +#endif // ROCKSDB_LITE + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_properties_test.cc b/external/rocksdb/db/db_properties_test.cc new file mode 100755 index 0000000000..d1e0478d41 --- /dev/null +++ b/external/rocksdb/db/db_properties_test.cc @@ -0,0 +1,1278 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include + +#include +#include + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#include "rocksdb/options.h" +#include "rocksdb/perf_context.h" +#include "rocksdb/perf_level.h" +#include "rocksdb/table.h" +#include "util/random.h" +#include "util/string_util.h" + +namespace rocksdb { + +class DBPropertiesTest : public DBTestBase { + public: + DBPropertiesTest() : DBTestBase("/db_properties_test") {} +}; + +#ifndef ROCKSDB_LITE +TEST_F(DBPropertiesTest, Empty) { + do { + Options options; + options.env = env_; + options.write_buffer_size = 100000; // Small write buffer + options = CurrentOptions(options); + CreateAndReopenWithCF({"pikachu"}, options); + + std::string num; + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); + ASSERT_EQ("0", num); + + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); + ASSERT_EQ("1", num); + + // Block sync calls + env_->delay_sstable_sync_.store(true, std::memory_order_release); + Put(1, "k1", std::string(100000, 'x')); // Fill memtable + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); + ASSERT_EQ("2", num); + + Put(1, "k2", std::string(100000, 'y')); // Trigger compaction + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); + ASSERT_EQ("1", num); + + ASSERT_EQ("v1", Get(1, "foo")); + // Release sync calls + env_->delay_sstable_sync_.store(false, std::memory_order_release); + + ASSERT_OK(db_->DisableFileDeletions()); + ASSERT_TRUE( + dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num)); + ASSERT_EQ("1", num); + + ASSERT_OK(db_->DisableFileDeletions()); + ASSERT_TRUE( + dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num)); + ASSERT_EQ("2", num); + + ASSERT_OK(db_->DisableFileDeletions()); + ASSERT_TRUE( + dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num)); + ASSERT_EQ("3", num); + + ASSERT_OK(db_->EnableFileDeletions(false)); + ASSERT_TRUE( + dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num)); + ASSERT_EQ("2", num); + + ASSERT_OK(db_->EnableFileDeletions()); + ASSERT_TRUE( + dbfull()->GetProperty("rocksdb.is-file-deletions-enabled", &num)); + ASSERT_EQ("0", num); + } while (ChangeOptions()); +} + +TEST_F(DBPropertiesTest, CurrentVersionNumber) { + uint64_t v1, v2, v3; + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v1)); + Put("12345678", ""); + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v2)); + Flush(); + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.current-super-version-number", &v3)); + + ASSERT_EQ(v1, v2); + ASSERT_GT(v3, v2); +} + +TEST_F(DBPropertiesTest, GetAggregatedIntPropertyTest) { + const int kKeySize = 100; + const int kValueSize = 500; + const int kKeyNum = 100; + + Options options; + options.env = env_; + options.create_if_missing = true; + options.write_buffer_size = (kKeySize + kValueSize) * kKeyNum / 10; + // Make them never flush + options.min_write_buffer_number_to_merge = 1000; + options.max_write_buffer_number = 1000; + options = CurrentOptions(options); + CreateAndReopenWithCF({"one", "two", "three", "four"}, options); + + Random rnd(301); + for (auto* handle : handles_) { + for (int i = 0; i < kKeyNum; ++i) { + db_->Put(WriteOptions(), handle, RandomString(&rnd, kKeySize), + RandomString(&rnd, kValueSize)); + } + } + + uint64_t manual_sum = 0; + uint64_t api_sum = 0; + uint64_t value = 0; + for (auto* handle : handles_) { + ASSERT_TRUE( + db_->GetIntProperty(handle, DB::Properties::kSizeAllMemTables, &value)); + manual_sum += value; + } + ASSERT_TRUE(db_->GetAggregatedIntProperty(DB::Properties::kSizeAllMemTables, + &api_sum)); + ASSERT_GT(manual_sum, 0); + ASSERT_EQ(manual_sum, api_sum); + + ASSERT_FALSE(db_->GetAggregatedIntProperty(DB::Properties::kDBStats, &value)); + + uint64_t before_flush_trm; + uint64_t after_flush_trm; + for (auto* handle : handles_) { + ASSERT_TRUE(db_->GetAggregatedIntProperty( + DB::Properties::kEstimateTableReadersMem, &before_flush_trm)); + + // Issue flush and expect larger memory usage of table readers. + db_->Flush(FlushOptions(), handle); + + ASSERT_TRUE(db_->GetAggregatedIntProperty( + DB::Properties::kEstimateTableReadersMem, &after_flush_trm)); + ASSERT_GT(after_flush_trm, before_flush_trm); + } +} + +namespace { +void ResetTableProperties(TableProperties* tp) { + tp->data_size = 0; + tp->index_size = 0; + tp->filter_size = 0; + tp->raw_key_size = 0; + tp->raw_value_size = 0; + tp->num_data_blocks = 0; + tp->num_entries = 0; +} + +void ParseTablePropertiesString(std::string tp_string, TableProperties* tp) { + double dummy_double; + std::replace(tp_string.begin(), tp_string.end(), ';', ' '); + std::replace(tp_string.begin(), tp_string.end(), '=', ' '); + ResetTableProperties(tp); + + sscanf(tp_string.c_str(), + "# data blocks %" SCNu64 " # entries %" SCNu64 " raw key size %" SCNu64 + " raw average key size %lf " + " raw value size %" SCNu64 + " raw average value size %lf " + " data block size %" SCNu64 " index block size %" SCNu64 + " filter block size %" SCNu64, + &tp->num_data_blocks, &tp->num_entries, &tp->raw_key_size, + &dummy_double, &tp->raw_value_size, &dummy_double, &tp->data_size, + &tp->index_size, &tp->filter_size); +} + +void VerifySimilar(uint64_t a, uint64_t b, double bias) { + ASSERT_EQ(a == 0U, b == 0U); + if (a == 0) { + return; + } + double dbl_a = static_cast(a); + double dbl_b = static_cast(b); + if (dbl_a > dbl_b) { + ASSERT_LT(static_cast(dbl_a - dbl_b) / (dbl_a + dbl_b), bias); + } else { + ASSERT_LT(static_cast(dbl_b - dbl_a) / (dbl_a + dbl_b), bias); + } +} + +void VerifyTableProperties(const TableProperties& base_tp, + const TableProperties& new_tp, + double filter_size_bias = 0.1, + double index_size_bias = 0.1, + double data_size_bias = 0.1, + double num_data_blocks_bias = 0.05) { + VerifySimilar(base_tp.data_size, new_tp.data_size, data_size_bias); + VerifySimilar(base_tp.index_size, new_tp.index_size, index_size_bias); + VerifySimilar(base_tp.filter_size, new_tp.filter_size, filter_size_bias); + VerifySimilar(base_tp.num_data_blocks, new_tp.num_data_blocks, + num_data_blocks_bias); + ASSERT_EQ(base_tp.raw_key_size, new_tp.raw_key_size); + ASSERT_EQ(base_tp.raw_value_size, new_tp.raw_value_size); + ASSERT_EQ(base_tp.num_entries, new_tp.num_entries); +} + +void GetExpectedTableProperties(TableProperties* expected_tp, + const int kKeySize, const int kValueSize, + const int kKeysPerTable, const int kTableCount, + const int kBloomBitsPerKey, + const size_t kBlockSize) { + const int kKeyCount = kTableCount * kKeysPerTable; + const int kAvgSuccessorSize = kKeySize / 5; + const int kEncodingSavePerKey = kKeySize / 4; + expected_tp->raw_key_size = kKeyCount * (kKeySize + 8); + expected_tp->raw_value_size = kKeyCount * kValueSize; + expected_tp->num_entries = kKeyCount; + expected_tp->num_data_blocks = + kTableCount * + (kKeysPerTable * (kKeySize - kEncodingSavePerKey + kValueSize)) / + kBlockSize; + expected_tp->data_size = + kTableCount * (kKeysPerTable * (kKeySize + 8 + kValueSize)); + expected_tp->index_size = + expected_tp->num_data_blocks * (kAvgSuccessorSize + 8); + expected_tp->filter_size = + kTableCount * (kKeysPerTable * kBloomBitsPerKey / 8); +} +} // anonymous namespace + +TEST_F(DBPropertiesTest, ValidatePropertyInfo) { + for (const auto& ppt_name_and_info : InternalStats::ppt_name_to_info) { + // If C++ gets a std::string_literal, this would be better to check at + // compile-time using static_assert. + ASSERT_TRUE(ppt_name_and_info.first.empty() || + !isdigit(ppt_name_and_info.first.back())); + + ASSERT_TRUE((ppt_name_and_info.second.handle_string == nullptr) != + (ppt_name_and_info.second.handle_int == nullptr)); + } +} + +TEST_F(DBPropertiesTest, ValidateSampleNumber) { + // When "max_open_files" is -1, we read all the files for + // "rocksdb.estimate-num-keys" computation, which is the ground truth. + // Otherwise, we sample 20 newest files to make an estimation. + // Formula: lastest_20_files_active_key_ratio * total_files + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.level0_stop_writes_trigger = 1000; + DestroyAndReopen(options); + int key = 0; + for (int files = 20; files >= 10; files -= 10) { + for (int i = 0; i < files; i++) { + int rows = files / 10; + for (int j = 0; j < rows; j++) { + db_->Put(WriteOptions(), std::to_string(++key), "foo"); + } + db_->Flush(FlushOptions()); + } + } + std::string num; + Reopen(options); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num)); + ASSERT_EQ("45", num); + options.max_open_files = -1; + Reopen(options); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num)); + ASSERT_EQ("50", num); +} + +TEST_F(DBPropertiesTest, AggregatedTableProperties) { + for (int kTableCount = 40; kTableCount <= 100; kTableCount += 30) { + const int kKeysPerTable = 100; + const int kKeySize = 80; + const int kValueSize = 200; + const int kBloomBitsPerKey = 20; + + Options options = CurrentOptions(); + options.level0_file_num_compaction_trigger = 8; + options.compression = kNoCompression; + options.create_if_missing = true; + + BlockBasedTableOptions table_options; + table_options.filter_policy.reset( + NewBloomFilterPolicy(kBloomBitsPerKey, false)); + table_options.block_size = 1024; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + + DestroyAndReopen(options); + + Random rnd(5632); + for (int table = 1; table <= kTableCount; ++table) { + for (int i = 0; i < kKeysPerTable; ++i) { + db_->Put(WriteOptions(), RandomString(&rnd, kKeySize), + RandomString(&rnd, kValueSize)); + } + db_->Flush(FlushOptions()); + } + std::string property; + db_->GetProperty(DB::Properties::kAggregatedTableProperties, &property); + + TableProperties expected_tp; + GetExpectedTableProperties(&expected_tp, kKeySize, kValueSize, + kKeysPerTable, kTableCount, kBloomBitsPerKey, + table_options.block_size); + + TableProperties output_tp; + ParseTablePropertiesString(property, &output_tp); + + VerifyTableProperties(expected_tp, output_tp); + } +} + +TEST_F(DBPropertiesTest, ReadLatencyHistogramByLevel) { + Options options = CurrentOptions(); + options.write_buffer_size = 110 << 10; + options.level0_file_num_compaction_trigger = 6; + options.num_levels = 4; + options.compression = kNoCompression; + options.max_bytes_for_level_base = 4500 << 10; + options.target_file_size_base = 98 << 10; + options.max_write_buffer_number = 2; + options.statistics = rocksdb::CreateDBStatistics(); + options.max_open_files = 100; + + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + + DestroyAndReopen(options); + int key_index = 0; + Random rnd(301); + for (int num = 0; num < 8; num++) { + Put("foo", "bar"); + GenerateNewFile(&rnd, &key_index); + dbfull()->TEST_WaitForCompact(); + } + dbfull()->TEST_WaitForCompact(); + + std::string prop; + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop)); + + // Get() after flushes, See latency histogram tracked. + for (int key = 0; key < key_index; key++) { + Get(Key(key)); + } + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop)); + ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram")); + ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram")); + ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); + + // Reopen and issue Get(). See thee latency tracked + Reopen(options); + dbfull()->TEST_WaitForCompact(); + for (int key = 0; key < key_index; key++) { + Get(Key(key)); + } + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop)); + ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram")); + ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram")); + ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); + + // Reopen and issue iterating. See thee latency tracked + Reopen(options); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop)); + ASSERT_EQ(std::string::npos, prop.find("** Level 0 read latency histogram")); + ASSERT_EQ(std::string::npos, prop.find("** Level 1 read latency histogram")); + ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); + { + unique_ptr iter(db_->NewIterator(ReadOptions())); + for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) { + } + } + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop)); + ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram")); + ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram")); + ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); + + // options.max_open_files preloads table readers. + options.max_open_files = -1; + Reopen(options); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop)); + ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram")); + ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram")); + ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); + for (int key = 0; key < key_index; key++) { + Get(Key(key)); + } + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.dbstats", &prop)); + ASSERT_NE(std::string::npos, prop.find("** Level 0 read latency histogram")); + ASSERT_NE(std::string::npos, prop.find("** Level 1 read latency histogram")); + ASSERT_EQ(std::string::npos, prop.find("** Level 2 read latency histogram")); +} + +TEST_F(DBPropertiesTest, AggregatedTablePropertiesAtLevel) { + const int kTableCount = 100; + const int kKeysPerTable = 10; + const int kKeySize = 50; + const int kValueSize = 400; + const int kMaxLevel = 7; + const int kBloomBitsPerKey = 20; + Random rnd(301); + Options options = CurrentOptions(); + options.level0_file_num_compaction_trigger = 8; + options.compression = kNoCompression; + options.create_if_missing = true; + options.level0_file_num_compaction_trigger = 2; + options.target_file_size_base = 8192; + options.max_bytes_for_level_base = 10000; + options.max_bytes_for_level_multiplier = 2; + // This ensures there no compaction happening when we call GetProperty(). + options.disable_auto_compactions = true; + + BlockBasedTableOptions table_options; + table_options.filter_policy.reset( + NewBloomFilterPolicy(kBloomBitsPerKey, false)); + table_options.block_size = 1024; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + + DestroyAndReopen(options); + + std::string level_tp_strings[kMaxLevel]; + std::string tp_string; + TableProperties level_tps[kMaxLevel]; + TableProperties tp, sum_tp, expected_tp; + for (int table = 1; table <= kTableCount; ++table) { + for (int i = 0; i < kKeysPerTable; ++i) { + db_->Put(WriteOptions(), RandomString(&rnd, kKeySize), + RandomString(&rnd, kValueSize)); + } + db_->Flush(FlushOptions()); + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ResetTableProperties(&sum_tp); + for (int level = 0; level < kMaxLevel; ++level) { + db_->GetProperty( + DB::Properties::kAggregatedTablePropertiesAtLevel + ToString(level), + &level_tp_strings[level]); + ParseTablePropertiesString(level_tp_strings[level], &level_tps[level]); + sum_tp.data_size += level_tps[level].data_size; + sum_tp.index_size += level_tps[level].index_size; + sum_tp.filter_size += level_tps[level].filter_size; + sum_tp.raw_key_size += level_tps[level].raw_key_size; + sum_tp.raw_value_size += level_tps[level].raw_value_size; + sum_tp.num_data_blocks += level_tps[level].num_data_blocks; + sum_tp.num_entries += level_tps[level].num_entries; + } + db_->GetProperty(DB::Properties::kAggregatedTableProperties, &tp_string); + ParseTablePropertiesString(tp_string, &tp); + ASSERT_EQ(sum_tp.data_size, tp.data_size); + ASSERT_EQ(sum_tp.index_size, tp.index_size); + ASSERT_EQ(sum_tp.filter_size, tp.filter_size); + ASSERT_EQ(sum_tp.raw_key_size, tp.raw_key_size); + ASSERT_EQ(sum_tp.raw_value_size, tp.raw_value_size); + ASSERT_EQ(sum_tp.num_data_blocks, tp.num_data_blocks); + ASSERT_EQ(sum_tp.num_entries, tp.num_entries); + if (table > 3) { + GetExpectedTableProperties(&expected_tp, kKeySize, kValueSize, + kKeysPerTable, table, kBloomBitsPerKey, + table_options.block_size); + // Gives larger bias here as index block size, filter block size, + // and data block size become much harder to estimate in this test. + VerifyTableProperties(tp, expected_tp, 0.5, 0.4, 0.4, 0.25); + } + } +} + +TEST_F(DBPropertiesTest, NumImmutableMemTable) { + do { + Options options = CurrentOptions(); + WriteOptions writeOpt = WriteOptions(); + writeOpt.disableWAL = true; + options.max_write_buffer_number = 4; + options.min_write_buffer_number_to_merge = 3; + options.max_write_buffer_number_to_maintain = 4; + options.write_buffer_size = 1000000; + CreateAndReopenWithCF({"pikachu"}, options); + + std::string big_value(1000000 * 2, 'x'); + std::string num; + SetPerfLevel(kEnableTime); + ASSERT_TRUE(GetPerfLevel() == kEnableTime); + + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k1", big_value)); + ASSERT_TRUE(dbfull()->GetProperty(handles_[1], + "rocksdb.num-immutable-mem-table", &num)); + ASSERT_EQ(num, "0"); + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num)); + ASSERT_EQ(num, "0"); + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); + ASSERT_EQ(num, "1"); + perf_context.Reset(); + Get(1, "k1"); + ASSERT_EQ(1, static_cast(perf_context.get_from_memtable_count)); + + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value)); + ASSERT_TRUE(dbfull()->GetProperty(handles_[1], + "rocksdb.num-immutable-mem-table", &num)); + ASSERT_EQ(num, "1"); + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); + ASSERT_EQ(num, "1"); + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.num-entries-imm-mem-tables", &num)); + ASSERT_EQ(num, "1"); + + perf_context.Reset(); + Get(1, "k1"); + ASSERT_EQ(2, static_cast(perf_context.get_from_memtable_count)); + perf_context.Reset(); + Get(1, "k2"); + ASSERT_EQ(1, static_cast(perf_context.get_from_memtable_count)); + + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", big_value)); + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.cur-size-active-mem-table", &num)); + ASSERT_TRUE(dbfull()->GetProperty(handles_[1], + "rocksdb.num-immutable-mem-table", &num)); + ASSERT_EQ(num, "2"); + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.num-entries-active-mem-table", &num)); + ASSERT_EQ(num, "1"); + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.num-entries-imm-mem-tables", &num)); + ASSERT_EQ(num, "2"); + perf_context.Reset(); + Get(1, "k2"); + ASSERT_EQ(2, static_cast(perf_context.get_from_memtable_count)); + perf_context.Reset(); + Get(1, "k3"); + ASSERT_EQ(1, static_cast(perf_context.get_from_memtable_count)); + perf_context.Reset(); + Get(1, "k1"); + ASSERT_EQ(3, static_cast(perf_context.get_from_memtable_count)); + + ASSERT_OK(Flush(1)); + ASSERT_TRUE(dbfull()->GetProperty(handles_[1], + "rocksdb.num-immutable-mem-table", &num)); + ASSERT_EQ(num, "0"); + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], DB::Properties::kNumImmutableMemTableFlushed, &num)); + ASSERT_EQ(num, "3"); + ASSERT_TRUE(dbfull()->GetProperty( + handles_[1], "rocksdb.cur-size-active-mem-table", &num)); + // "192" is the size of the metadata of an empty skiplist, this would + // break if we change the default skiplist implementation + ASSERT_EQ(num, "192"); + + uint64_t int_num; + uint64_t base_total_size; + ASSERT_TRUE(dbfull()->GetIntProperty( + handles_[1], "rocksdb.estimate-num-keys", &base_total_size)); + + ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k2")); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k3", "")); + ASSERT_OK(dbfull()->Delete(writeOpt, handles_[1], "k3")); + ASSERT_TRUE(dbfull()->GetIntProperty( + handles_[1], "rocksdb.num-deletes-active-mem-table", &int_num)); + ASSERT_EQ(int_num, 2U); + ASSERT_TRUE(dbfull()->GetIntProperty( + handles_[1], "rocksdb.num-entries-active-mem-table", &int_num)); + ASSERT_EQ(int_num, 3U); + + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value)); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "k2", big_value)); + ASSERT_TRUE(dbfull()->GetIntProperty( + handles_[1], "rocksdb.num-entries-imm-mem-tables", &int_num)); + ASSERT_EQ(int_num, 4U); + ASSERT_TRUE(dbfull()->GetIntProperty( + handles_[1], "rocksdb.num-deletes-imm-mem-tables", &int_num)); + ASSERT_EQ(int_num, 2U); + + ASSERT_TRUE(dbfull()->GetIntProperty( + handles_[1], "rocksdb.estimate-num-keys", &int_num)); + ASSERT_EQ(int_num, base_total_size + 1); + + SetPerfLevel(kDisable); + ASSERT_TRUE(GetPerfLevel() == kDisable); + } while (ChangeCompactOptions()); +} + +TEST_F(DBPropertiesTest, GetProperty) { + // Set sizes to both background thread pool to be 1 and block them. + env_->SetBackgroundThreads(1, Env::HIGH); + env_->SetBackgroundThreads(1, Env::LOW); + test::SleepingBackgroundTask sleeping_task_low; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + test::SleepingBackgroundTask sleeping_task_high; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, + &sleeping_task_high, Env::Priority::HIGH); + + Options options = CurrentOptions(); + WriteOptions writeOpt = WriteOptions(); + writeOpt.disableWAL = true; + options.compaction_style = kCompactionStyleUniversal; + options.level0_file_num_compaction_trigger = 1; + options.compaction_options_universal.size_ratio = 50; + options.max_background_compactions = 1; + options.max_background_flushes = 1; + options.max_write_buffer_number = 10; + options.min_write_buffer_number_to_merge = 1; + options.max_write_buffer_number_to_maintain = 0; + options.write_buffer_size = 1000000; + Reopen(options); + + std::string big_value(1000000 * 2, 'x'); + std::string num; + uint64_t int_num; + SetPerfLevel(kEnableTime); + + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num)); + ASSERT_EQ(int_num, 0U); + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.estimate-live-data-size", &int_num)); + ASSERT_EQ(int_num, 0U); + + ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value)); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num)); + ASSERT_EQ(num, "0"); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num)); + ASSERT_EQ(num, "0"); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num)); + ASSERT_EQ(num, "0"); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num)); + ASSERT_EQ(num, "1"); + perf_context.Reset(); + + ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value)); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num)); + ASSERT_EQ(num, "1"); + ASSERT_OK(dbfull()->Delete(writeOpt, "k-non-existing")); + ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value)); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.num-immutable-mem-table", &num)); + ASSERT_EQ(num, "2"); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num)); + ASSERT_EQ(num, "1"); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num)); + ASSERT_EQ(num, "0"); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num)); + ASSERT_EQ(num, "2"); + // Verify the same set of properties through GetIntProperty + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.num-immutable-mem-table", &int_num)); + ASSERT_EQ(int_num, 2U); + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.mem-table-flush-pending", &int_num)); + ASSERT_EQ(int_num, 1U); + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.compaction-pending", &int_num)); + ASSERT_EQ(int_num, 0U); + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num)); + ASSERT_EQ(int_num, 2U); + + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num)); + ASSERT_EQ(int_num, 0U); + + sleeping_task_high.WakeUp(); + sleeping_task_high.WaitUntilDone(); + dbfull()->TEST_WaitForFlushMemTable(); + + ASSERT_OK(dbfull()->Put(writeOpt, "k4", big_value)); + ASSERT_OK(dbfull()->Put(writeOpt, "k5", big_value)); + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.mem-table-flush-pending", &num)); + ASSERT_EQ(num, "0"); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.compaction-pending", &num)); + ASSERT_EQ(num, "1"); + ASSERT_TRUE(dbfull()->GetProperty("rocksdb.estimate-num-keys", &num)); + ASSERT_EQ(num, "4"); + + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num)); + ASSERT_GT(int_num, 0U); + + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilDone(); + + // Wait for compaction to be done. This is important because otherwise RocksDB + // might schedule a compaction when reopening the database, failing assertion + // (A) as a result. + dbfull()->TEST_WaitForCompact(); + options.max_open_files = 10; + Reopen(options); + // After reopening, no table reader is loaded, so no memory for table readers + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num)); + ASSERT_EQ(int_num, 0U); // (A) + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.estimate-num-keys", &int_num)); + ASSERT_GT(int_num, 0U); + + // After reading a key, at least one table reader is loaded. + Get("k5"); + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.estimate-table-readers-mem", &int_num)); + ASSERT_GT(int_num, 0U); + + // Test rocksdb.num-live-versions + { + options.level0_file_num_compaction_trigger = 20; + Reopen(options); + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num)); + ASSERT_EQ(int_num, 1U); + + // Use an iterator to hold current version + std::unique_ptr iter1(dbfull()->NewIterator(ReadOptions())); + + ASSERT_OK(dbfull()->Put(writeOpt, "k6", big_value)); + Flush(); + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num)); + ASSERT_EQ(int_num, 2U); + + // Use an iterator to hold current version + std::unique_ptr iter2(dbfull()->NewIterator(ReadOptions())); + + ASSERT_OK(dbfull()->Put(writeOpt, "k7", big_value)); + Flush(); + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num)); + ASSERT_EQ(int_num, 3U); + + iter2.reset(); + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num)); + ASSERT_EQ(int_num, 2U); + + iter1.reset(); + ASSERT_TRUE( + dbfull()->GetIntProperty("rocksdb.num-live-versions", &int_num)); + ASSERT_EQ(int_num, 1U); + } +} + +TEST_F(DBPropertiesTest, ApproximateMemoryUsage) { + const int kNumRounds = 10; + // TODO(noetzli) kFlushesPerRound does not really correlate with how many + // flushes happen. + const int kFlushesPerRound = 10; + const int kWritesPerFlush = 10; + const int kKeySize = 100; + const int kValueSize = 1000; + Options options; + options.write_buffer_size = 1000; // small write buffer + options.min_write_buffer_number_to_merge = 4; + options.compression = kNoCompression; + options.create_if_missing = true; + options = CurrentOptions(options); + DestroyAndReopen(options); + + Random rnd(301); + + std::vector iters; + + uint64_t active_mem; + uint64_t unflushed_mem; + uint64_t all_mem; + uint64_t prev_all_mem; + + // Phase 0. The verify the initial value of all these properties are the same + // as we have no mem-tables. + dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem); + dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem); + dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem); + ASSERT_EQ(all_mem, active_mem); + ASSERT_EQ(all_mem, unflushed_mem); + + // Phase 1. Simply issue Put() and expect "cur-size-all-mem-tables" equals to + // "size-all-mem-tables" + for (int r = 0; r < kNumRounds; ++r) { + for (int f = 0; f < kFlushesPerRound; ++f) { + for (int w = 0; w < kWritesPerFlush; ++w) { + Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize)); + } + } + // Make sure that there is no flush between getting the two properties. + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem); + dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem); + // in no iterator case, these two number should be the same. + ASSERT_EQ(unflushed_mem, all_mem); + } + prev_all_mem = all_mem; + + // Phase 2. Keep issuing Put() but also create new iterators. This time we + // expect "size-all-mem-tables" > "cur-size-all-mem-tables". + for (int r = 0; r < kNumRounds; ++r) { + iters.push_back(db_->NewIterator(ReadOptions())); + for (int f = 0; f < kFlushesPerRound; ++f) { + for (int w = 0; w < kWritesPerFlush; ++w) { + Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValueSize)); + } + } + // Force flush to prevent flush from happening between getting the + // properties or after getting the properties and before the new round. + Flush(); + + // In the second round, add iterators. + dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem); + dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem); + dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem); + ASSERT_GT(all_mem, active_mem); + ASSERT_GT(all_mem, unflushed_mem); + ASSERT_GT(all_mem, prev_all_mem); + prev_all_mem = all_mem; + } + + // Phase 3. Delete iterators and expect "size-all-mem-tables" shrinks + // whenever we release an iterator. + for (auto* iter : iters) { + delete iter; + dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem); + // Expect the size shrinking + ASSERT_LT(all_mem, prev_all_mem); + prev_all_mem = all_mem; + } + + // Expect all these three counters to be the same. + dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem); + dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem); + dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem); + ASSERT_EQ(active_mem, unflushed_mem); + ASSERT_EQ(unflushed_mem, all_mem); + + // Phase 5. Reopen, and expect all these three counters to be the same again. + Reopen(options); + dbfull()->GetIntProperty("rocksdb.cur-size-active-mem-table", &active_mem); + dbfull()->GetIntProperty("rocksdb.cur-size-all-mem-tables", &unflushed_mem); + dbfull()->GetIntProperty("rocksdb.size-all-mem-tables", &all_mem); + ASSERT_EQ(active_mem, unflushed_mem); + ASSERT_EQ(unflushed_mem, all_mem); +} + +TEST_F(DBPropertiesTest, EstimatePendingCompBytes) { + // Set sizes to both background thread pool to be 1 and block them. + env_->SetBackgroundThreads(1, Env::HIGH); + env_->SetBackgroundThreads(1, Env::LOW); + test::SleepingBackgroundTask sleeping_task_low; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + + Options options = CurrentOptions(); + WriteOptions writeOpt = WriteOptions(); + writeOpt.disableWAL = true; + options.compaction_style = kCompactionStyleLevel; + options.level0_file_num_compaction_trigger = 2; + options.max_background_compactions = 1; + options.max_background_flushes = 1; + options.max_write_buffer_number = 10; + options.min_write_buffer_number_to_merge = 1; + options.max_write_buffer_number_to_maintain = 0; + options.write_buffer_size = 1000000; + Reopen(options); + + std::string big_value(1000000 * 2, 'x'); + std::string num; + uint64_t int_num; + + ASSERT_OK(dbfull()->Put(writeOpt, "k1", big_value)); + Flush(); + ASSERT_TRUE(dbfull()->GetIntProperty( + "rocksdb.estimate-pending-compaction-bytes", &int_num)); + ASSERT_EQ(int_num, 0U); + + ASSERT_OK(dbfull()->Put(writeOpt, "k2", big_value)); + Flush(); + ASSERT_TRUE(dbfull()->GetIntProperty( + "rocksdb.estimate-pending-compaction-bytes", &int_num)); + ASSERT_EQ(int_num, 0U); + + ASSERT_OK(dbfull()->Put(writeOpt, "k3", big_value)); + Flush(); + ASSERT_TRUE(dbfull()->GetIntProperty( + "rocksdb.estimate-pending-compaction-bytes", &int_num)); + ASSERT_GT(int_num, 0U); + + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilDone(); + + dbfull()->TEST_WaitForCompact(); + ASSERT_TRUE(dbfull()->GetIntProperty( + "rocksdb.estimate-pending-compaction-bytes", &int_num)); + ASSERT_EQ(int_num, 0U); +} + +TEST_F(DBPropertiesTest, EstimateCompressionRatio) { + if (!Snappy_Supported()) { + return; + } + const int kNumL0Files = 3; + const int kNumEntriesPerFile = 1000; + + Options options = CurrentOptions(); + options.compression_per_level = {kNoCompression, kSnappyCompression}; + options.disable_auto_compactions = true; + options.max_background_flushes = 0; + options.num_levels = 2; + Reopen(options); + + // compression ratio is -1.0 when no open files at level + ASSERT_EQ(CompressionRatioAtLevel(0), -1.0); + + const std::string kVal(100, 'a'); + for (int i = 0; i < kNumL0Files; ++i) { + for (int j = 0; j < kNumEntriesPerFile; ++j) { + // Put common data ("key") at end to prevent delta encoding from + // compressing the key effectively + std::string key = ToString(i) + ToString(j) + "key"; + ASSERT_OK(dbfull()->Put(WriteOptions(), key, kVal)); + } + Flush(); + } + + // no compression at L0, so ratio is less than one + ASSERT_LT(CompressionRatioAtLevel(0), 1.0); + ASSERT_GT(CompressionRatioAtLevel(0), 0.0); + ASSERT_EQ(CompressionRatioAtLevel(1), -1.0); + + dbfull()->TEST_CompactRange(0, nullptr, nullptr); + + ASSERT_EQ(CompressionRatioAtLevel(0), -1.0); + // Data at L1 should be highly compressed thanks to Snappy and redundant data + // in values (ratio is 12.846 as of 4/19/2016). + ASSERT_GT(CompressionRatioAtLevel(1), 10.0); +} + +#endif // ROCKSDB_LITE + +class CountingUserTblPropCollector : public TablePropertiesCollector { + public: + const char* Name() const override { return "CountingUserTblPropCollector"; } + + Status Finish(UserCollectedProperties* properties) override { + std::string encoded; + PutVarint32(&encoded, count_); + *properties = UserCollectedProperties{ + {"CountingUserTblPropCollector", message_}, {"Count", encoded}, + }; + return Status::OK(); + } + + Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, + SequenceNumber seq, uint64_t file_size) override { + ++count_; + return Status::OK(); + } + + virtual UserCollectedProperties GetReadableProperties() const override { + return UserCollectedProperties{}; + } + + private: + std::string message_ = "Rocksdb"; + uint32_t count_ = 0; +}; + +class CountingUserTblPropCollectorFactory + : public TablePropertiesCollectorFactory { + public: + explicit CountingUserTblPropCollectorFactory( + uint32_t expected_column_family_id) + : expected_column_family_id_(expected_column_family_id), + num_created_(0) {} + virtual TablePropertiesCollector* CreateTablePropertiesCollector( + TablePropertiesCollectorFactory::Context context) override { + EXPECT_EQ(expected_column_family_id_, context.column_family_id); + num_created_++; + return new CountingUserTblPropCollector(); + } + const char* Name() const override { + return "CountingUserTblPropCollectorFactory"; + } + void set_expected_column_family_id(uint32_t v) { + expected_column_family_id_ = v; + } + uint32_t expected_column_family_id_; + uint32_t num_created_; +}; + +class CountingDeleteTabPropCollector : public TablePropertiesCollector { + public: + const char* Name() const override { return "CountingDeleteTabPropCollector"; } + + Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, + SequenceNumber seq, uint64_t file_size) override { + if (type == kEntryDelete) { + num_deletes_++; + } + return Status::OK(); + } + + bool NeedCompact() const override { return num_deletes_ > 10; } + + UserCollectedProperties GetReadableProperties() const override { + return UserCollectedProperties{}; + } + + Status Finish(UserCollectedProperties* properties) override { + *properties = + UserCollectedProperties{{"num_delete", ToString(num_deletes_)}}; + return Status::OK(); + } + + private: + uint32_t num_deletes_ = 0; +}; + +class CountingDeleteTabPropCollectorFactory + : public TablePropertiesCollectorFactory { + public: + virtual TablePropertiesCollector* CreateTablePropertiesCollector( + TablePropertiesCollectorFactory::Context context) override { + return new CountingDeleteTabPropCollector(); + } + const char* Name() const override { + return "CountingDeleteTabPropCollectorFactory"; + } +}; + +#ifndef ROCKSDB_LITE +TEST_F(DBPropertiesTest, GetUserDefinedTableProperties) { + Options options = CurrentOptions(); + options.level0_file_num_compaction_trigger = (1 << 30); + options.max_background_flushes = 0; + options.table_properties_collector_factories.resize(1); + std::shared_ptr collector_factory = + std::make_shared(0); + options.table_properties_collector_factories[0] = collector_factory; + Reopen(options); + // Create 4 tables + for (int table = 0; table < 4; ++table) { + for (int i = 0; i < 10 + table; ++i) { + db_->Put(WriteOptions(), ToString(table * 100 + i), "val"); + } + db_->Flush(FlushOptions()); + } + + TablePropertiesCollection props; + ASSERT_OK(db_->GetPropertiesOfAllTables(&props)); + ASSERT_EQ(4U, props.size()); + uint32_t sum = 0; + for (const auto& item : props) { + auto& user_collected = item.second->user_collected_properties; + ASSERT_TRUE(user_collected.find("CountingUserTblPropCollector") != + user_collected.end()); + ASSERT_EQ(user_collected.at("CountingUserTblPropCollector"), "Rocksdb"); + ASSERT_TRUE(user_collected.find("Count") != user_collected.end()); + Slice key(user_collected.at("Count")); + uint32_t count; + ASSERT_TRUE(GetVarint32(&key, &count)); + sum += count; + } + ASSERT_EQ(10u + 11u + 12u + 13u, sum); + + ASSERT_GT(collector_factory->num_created_, 0U); + collector_factory->num_created_ = 0; + dbfull()->TEST_CompactRange(0, nullptr, nullptr); + ASSERT_GT(collector_factory->num_created_, 0U); +} +#endif // ROCKSDB_LITE + +TEST_F(DBPropertiesTest, UserDefinedTablePropertiesContext) { + Options options = CurrentOptions(); + options.level0_file_num_compaction_trigger = 3; + options.max_background_flushes = 0; + options.table_properties_collector_factories.resize(1); + std::shared_ptr collector_factory = + std::make_shared(1); + options.table_properties_collector_factories[0] = collector_factory, + CreateAndReopenWithCF({"pikachu"}, options); + // Create 2 files + for (int table = 0; table < 2; ++table) { + for (int i = 0; i < 10 + table; ++i) { + Put(1, ToString(table * 100 + i), "val"); + } + Flush(1); + } + ASSERT_GT(collector_factory->num_created_, 0U); + + collector_factory->num_created_ = 0; + // Trigger automatic compactions. + for (int table = 0; table < 3; ++table) { + for (int i = 0; i < 10 + table; ++i) { + Put(1, ToString(table * 100 + i), "val"); + } + Flush(1); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_GT(collector_factory->num_created_, 0U); + + collector_factory->num_created_ = 0; + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + ASSERT_GT(collector_factory->num_created_, 0U); + + // Come back to write to default column family + collector_factory->num_created_ = 0; + collector_factory->set_expected_column_family_id(0); // default CF + // Create 4 tables in default column family + for (int table = 0; table < 2; ++table) { + for (int i = 0; i < 10 + table; ++i) { + Put(ToString(table * 100 + i), "val"); + } + Flush(); + } + ASSERT_GT(collector_factory->num_created_, 0U); + + collector_factory->num_created_ = 0; + // Trigger automatic compactions. + for (int table = 0; table < 3; ++table) { + for (int i = 0; i < 10 + table; ++i) { + Put(ToString(table * 100 + i), "val"); + } + Flush(); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_GT(collector_factory->num_created_, 0U); + + collector_factory->num_created_ = 0; + dbfull()->TEST_CompactRange(0, nullptr, nullptr); + ASSERT_GT(collector_factory->num_created_, 0U); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBPropertiesTest, TablePropertiesNeedCompactTest) { + Random rnd(301); + + Options options; + options.create_if_missing = true; + options.write_buffer_size = 4096; + options.max_write_buffer_number = 8; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 2; + options.level0_stop_writes_trigger = 4; + options.target_file_size_base = 2048; + options.max_bytes_for_level_base = 10240; + options.max_bytes_for_level_multiplier = 4; + options.soft_pending_compaction_bytes_limit = 1024 * 1024; + options.num_levels = 8; + + std::shared_ptr collector_factory = + std::make_shared(); + options.table_properties_collector_factories.resize(1); + options.table_properties_collector_factories[0] = collector_factory; + + DestroyAndReopen(options); + + const int kMaxKey = 1000; + for (int i = 0; i < kMaxKey; i++) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 102))); + ASSERT_OK(Put(Key(kMaxKey + i), RandomString(&rnd, 102))); + } + Flush(); + dbfull()->TEST_WaitForCompact(); + if (NumTableFilesAtLevel(0) == 1) { + // Clear Level 0 so that when later flush a file with deletions, + // we don't trigger an organic compaction. + ASSERT_OK(Put(Key(0), "")); + ASSERT_OK(Put(Key(kMaxKey * 2), "")); + Flush(); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_EQ(NumTableFilesAtLevel(0), 0); + + { + int c = 0; + std::unique_ptr iter(db_->NewIterator(ReadOptions())); + iter->Seek(Key(kMaxKey - 100)); + while (iter->Valid() && iter->key().compare(Key(kMaxKey + 100)) < 0) { + iter->Next(); + ++c; + } + ASSERT_EQ(c, 200); + } + + Delete(Key(0)); + for (int i = kMaxKey - 100; i < kMaxKey + 100; i++) { + Delete(Key(i)); + } + Delete(Key(kMaxKey * 2)); + + Flush(); + dbfull()->TEST_WaitForCompact(); + + { + SetPerfLevel(kEnableCount); + perf_context.Reset(); + int c = 0; + std::unique_ptr iter(db_->NewIterator(ReadOptions())); + iter->Seek(Key(kMaxKey - 100)); + while (iter->Valid() && iter->key().compare(Key(kMaxKey + 100)) < 0) { + iter->Next(); + } + ASSERT_EQ(c, 0); + ASSERT_LT(perf_context.internal_delete_skipped_count, 30u); + ASSERT_LT(perf_context.internal_key_skipped_count, 30u); + SetPerfLevel(kDisable); + } +} + +TEST_F(DBPropertiesTest, NeedCompactHintPersistentTest) { + Random rnd(301); + + Options options; + options.create_if_missing = true; + options.max_write_buffer_number = 8; + options.level0_file_num_compaction_trigger = 10; + options.level0_slowdown_writes_trigger = 10; + options.level0_stop_writes_trigger = 10; + options.disable_auto_compactions = true; + + std::shared_ptr collector_factory = + std::make_shared(); + options.table_properties_collector_factories.resize(1); + options.table_properties_collector_factories[0] = collector_factory; + + DestroyAndReopen(options); + + const int kMaxKey = 100; + for (int i = 0; i < kMaxKey; i++) { + ASSERT_OK(Put(Key(i), "")); + } + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + + for (int i = 1; i < kMaxKey - 1; i++) { + Delete(Key(i)); + } + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ(NumTableFilesAtLevel(0), 2); + + // Restart the DB. Although number of files didn't reach + // options.level0_file_num_compaction_trigger, compaction should + // still be triggered because of the need-compaction hint. + options.disable_auto_compactions = false; + Reopen(options); + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(NumTableFilesAtLevel(0), 0); + { + SetPerfLevel(kEnableCount); + perf_context.Reset(); + int c = 0; + std::unique_ptr iter(db_->NewIterator(ReadOptions())); + for (iter->Seek(Key(0)); iter->Valid(); iter->Next()) { + c++; + } + ASSERT_EQ(c, 2); + ASSERT_EQ(perf_context.internal_delete_skipped_count, 0); + // We iterate every key twice. Is it a bug? + ASSERT_LE(perf_context.internal_key_skipped_count, 2); + SetPerfLevel(kDisable); + } +} +#endif // ROCKSDB_LITE +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_sst_test.cc b/external/rocksdb/db/db_sst_test.cc new file mode 100755 index 0000000000..58da7a9cab --- /dev/null +++ b/external/rocksdb/db/db_sst_test.cc @@ -0,0 +1,1733 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#include "rocksdb/sst_file_manager.h" +#include "rocksdb/sst_file_writer.h" +#include "util/sst_file_manager_impl.h" +#include "port/port.h" + +namespace rocksdb { + +class DBSSTTest : public DBTestBase { + public: + DBSSTTest() : DBTestBase("/db_sst_test") {} +}; + +TEST_F(DBSSTTest, DontDeletePendingOutputs) { + Options options; + options.env = env_; + options.create_if_missing = true; + DestroyAndReopen(options); + + // Every time we write to a table file, call FOF/POF with full DB scan. This + // will make sure our pending_outputs_ protection work correctly + std::function purge_obsolete_files_function = [&]() { + JobContext job_context(0); + dbfull()->TEST_LockMutex(); + dbfull()->FindObsoleteFiles(&job_context, true /*force*/); + dbfull()->TEST_UnlockMutex(); + dbfull()->PurgeObsoleteFiles(job_context); + job_context.Clean(); + }; + + env_->table_write_callback_ = &purge_obsolete_files_function; + + for (int i = 0; i < 2; ++i) { + ASSERT_OK(Put("a", "begin")); + ASSERT_OK(Put("z", "end")); + ASSERT_OK(Flush()); + } + + // If pending output guard does not work correctly, PurgeObsoleteFiles() will + // delete the file that Compaction is trying to create, causing this: error + // db/db_test.cc:975: IO error: + // /tmp/rocksdbtest-1552237650/db_test/000009.sst: No such file or directory + Compact("a", "b"); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBSSTTest, DontDeleteMovedFile) { + // This test triggers move compaction and verifies that the file is not + // deleted when it's part of move compaction + Options options = CurrentOptions(); + options.env = env_; + options.create_if_missing = true; + options.max_bytes_for_level_base = 1024 * 1024; // 1 MB + options.level0_file_num_compaction_trigger = + 2; // trigger compaction when we have 2 files + DestroyAndReopen(options); + + Random rnd(301); + // Create two 1MB sst files + for (int i = 0; i < 2; ++i) { + // Create 1MB sst file + for (int j = 0; j < 100; ++j) { + ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024))); + } + ASSERT_OK(Flush()); + } + // this should execute both L0->L1 and L1->(move)->L2 compactions + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ("0,0,1", FilesPerLevel(0)); + + // If the moved file is actually deleted (the move-safeguard in + // ~Version::Version() is not there), we get this failure: + // Corruption: Can't access /000009.sst + Reopen(options); +} + +// This reproduces a bug where we don't delete a file because when it was +// supposed to be deleted, it was blocked by pending_outputs +// Consider: +// 1. current file_number is 13 +// 2. compaction (1) starts, blocks deletion of all files starting with 13 +// (pending outputs) +// 3. file 13 is created by compaction (2) +// 4. file 13 is consumed by compaction (3) and file 15 was created. Since file +// 13 has no references, it is put into VersionSet::obsolete_files_ +// 5. FindObsoleteFiles() gets file 13 from VersionSet::obsolete_files_. File 13 +// is deleted from obsolete_files_ set. +// 6. PurgeObsoleteFiles() tries to delete file 13, but this file is blocked by +// pending outputs since compaction (1) is still running. It is not deleted and +// it is not present in obsolete_files_ anymore. Therefore, we never delete it. +TEST_F(DBSSTTest, DeleteObsoleteFilesPendingOutputs) { + Options options = CurrentOptions(); + options.env = env_; + options.write_buffer_size = 2 * 1024 * 1024; // 2 MB + options.max_bytes_for_level_base = 1024 * 1024; // 1 MB + options.level0_file_num_compaction_trigger = + 2; // trigger compaction when we have 2 files + options.max_background_flushes = 2; + options.max_background_compactions = 2; + + OnFileDeletionListener* listener = new OnFileDeletionListener(); + options.listeners.emplace_back(listener); + + Reopen(options); + + Random rnd(301); + // Create two 1MB sst files + for (int i = 0; i < 2; ++i) { + // Create 1MB sst file + for (int j = 0; j < 100; ++j) { + ASSERT_OK(Put(Key(i * 50 + j), RandomString(&rnd, 10 * 1024))); + } + ASSERT_OK(Flush()); + } + // this should execute both L0->L1 and L1->(move)->L2 compactions + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ("0,0,1", FilesPerLevel(0)); + + test::SleepingBackgroundTask blocking_thread; + port::Mutex mutex_; + bool already_blocked(false); + + // block the flush + std::function block_first_time = [&]() { + bool blocking = false; + { + MutexLock l(&mutex_); + if (!already_blocked) { + blocking = true; + already_blocked = true; + } + } + if (blocking) { + blocking_thread.DoSleep(); + } + }; + env_->table_write_callback_ = &block_first_time; + // Insert 2.5MB data, which should trigger a flush because we exceed + // write_buffer_size. The flush will be blocked with block_first_time + // pending_file is protecting all the files created after + for (int j = 0; j < 256; ++j) { + ASSERT_OK(Put(Key(j), RandomString(&rnd, 10 * 1024))); + } + blocking_thread.WaitUntilSleeping(); + + ASSERT_OK(dbfull()->TEST_CompactRange(2, nullptr, nullptr)); + + ASSERT_EQ("0,0,0,1", FilesPerLevel(0)); + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(metadata.size(), 1U); + auto file_on_L2 = metadata[0].name; + listener->SetExpectedFileName(dbname_ + file_on_L2); + + ASSERT_OK(dbfull()->TEST_CompactRange(3, nullptr, nullptr, nullptr, + true /* disallow trivial move */)); + ASSERT_EQ("0,0,0,0,1", FilesPerLevel(0)); + + // finish the flush! + blocking_thread.WakeUp(); + blocking_thread.WaitUntilDone(); + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ("1,0,0,0,1", FilesPerLevel(0)); + + metadata.clear(); + db_->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(metadata.size(), 2U); + + // This file should have been deleted during last compaction + ASSERT_EQ(Status::NotFound(), env_->FileExists(dbname_ + file_on_L2)); + listener->VerifyMatchedCount(1); +} + +#endif // ROCKSDB_LITE + +TEST_F(DBSSTTest, DBWithSstFileManager) { + std::shared_ptr sst_file_manager(NewSstFileManager(env_)); + auto sfm = static_cast(sst_file_manager.get()); + + int files_added = 0; + int files_deleted = 0; + int files_moved = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "SstFileManagerImpl::OnAddFile", [&](void* arg) { files_added++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "SstFileManagerImpl::OnDeleteFile", [&](void* arg) { files_deleted++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "SstFileManagerImpl::OnMoveFile", [&](void* arg) { files_moved++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.sst_file_manager = sst_file_manager; + DestroyAndReopen(options); + + Random rnd(301); + for (int i = 0; i < 25; i++) { + GenerateNewRandomFile(&rnd); + ASSERT_OK(Flush()); + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + // Verify that we are tracking all sst files in dbname_ + ASSERT_EQ(sfm->GetTrackedFiles(), GetAllSSTFiles()); + } + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + + auto files_in_db = GetAllSSTFiles(); + // Verify that we are tracking all sst files in dbname_ + ASSERT_EQ(sfm->GetTrackedFiles(), files_in_db); + // Verify the total files size + uint64_t total_files_size = 0; + for (auto& file_to_size : files_in_db) { + total_files_size += file_to_size.second; + } + ASSERT_EQ(sfm->GetTotalSize(), total_files_size); + // We flushed at least 25 files + ASSERT_GE(files_added, 25); + // Compaction must have deleted some files + ASSERT_GT(files_deleted, 0); + // No files were moved + ASSERT_EQ(files_moved, 0); + + Close(); + Reopen(options); + ASSERT_EQ(sfm->GetTrackedFiles(), files_in_db); + ASSERT_EQ(sfm->GetTotalSize(), total_files_size); + + // Verify that we track all the files again after the DB is closed and opened + Close(); + sst_file_manager.reset(NewSstFileManager(env_)); + options.sst_file_manager = sst_file_manager; + sfm = static_cast(sst_file_manager.get()); + + Reopen(options); + ASSERT_EQ(sfm->GetTrackedFiles(), files_in_db); + ASSERT_EQ(sfm->GetTotalSize(), total_files_size); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBSSTTest, RateLimitedDelete) { + Destroy(last_options_); + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"DBSSTTest::RateLimitedDelete:1", + "DeleteScheduler::BackgroundEmptyTrash"}, + }); + + std::vector penalties; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DeleteScheduler::BackgroundEmptyTrash:Wait", + [&](void* arg) { penalties.push_back(*(static_cast(arg))); }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.env = env_; + + std::string trash_dir = test::TmpDir(env_) + "/trash"; + int64_t rate_bytes_per_sec = 1024 * 10; // 10 Kbs / Sec + Status s; + options.sst_file_manager.reset(NewSstFileManager( + env_, nullptr, trash_dir, rate_bytes_per_sec, false, &s)); + ASSERT_OK(s); + auto sfm = static_cast(options.sst_file_manager.get()); + + ASSERT_OK(TryReopen(options)); + // Create 4 files in L0 + for (char v = 'a'; v <= 'd'; v++) { + ASSERT_OK(Put("Key2", DummyString(1024, v))); + ASSERT_OK(Put("Key3", DummyString(1024, v))); + ASSERT_OK(Put("Key4", DummyString(1024, v))); + ASSERT_OK(Put("Key1", DummyString(1024, v))); + ASSERT_OK(Put("Key4", DummyString(1024, v))); + ASSERT_OK(Flush()); + } + // We created 4 sst files in L0 + ASSERT_EQ("4", FilesPerLevel(0)); + + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + + // Compaction will move the 4 files in L0 to trash and create 1 L1 file + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ("0,1", FilesPerLevel(0)); + + uint64_t delete_start_time = env_->NowMicros(); + // Hold BackgroundEmptyTrash + TEST_SYNC_POINT("DBSSTTest::RateLimitedDelete:1"); + sfm->WaitForEmptyTrash(); + uint64_t time_spent_deleting = env_->NowMicros() - delete_start_time; + + uint64_t total_files_size = 0; + uint64_t expected_penlty = 0; + ASSERT_EQ(penalties.size(), metadata.size()); + for (size_t i = 0; i < metadata.size(); i++) { + total_files_size += metadata[i].size; + expected_penlty = ((total_files_size * 1000000) / rate_bytes_per_sec); + ASSERT_EQ(expected_penlty, penalties[i]); + } + ASSERT_GT(time_spent_deleting, expected_penlty * 0.9); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +// Create a DB with 2 db_paths, and generate multiple files in the 2 +// db_paths using CompactRangeOptions, make sure that files that were +// deleted from first db_path were deleted using DeleteScheduler and +// files in the second path were not. +TEST_F(DBSSTTest, DeleteSchedulerMultipleDBPaths) { + int bg_delete_file = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DeleteScheduler::DeleteTrashFile:DeleteFile", + [&](void* arg) { bg_delete_file++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.db_paths.emplace_back(dbname_, 1024 * 100); + options.db_paths.emplace_back(dbname_ + "_2", 1024 * 100); + options.env = env_; + + std::string trash_dir = test::TmpDir(env_) + "/trash"; + int64_t rate_bytes_per_sec = 1024 * 1024; // 1 Mb / Sec + Status s; + options.sst_file_manager.reset(NewSstFileManager( + env_, nullptr, trash_dir, rate_bytes_per_sec, false, &s)); + ASSERT_OK(s); + auto sfm = static_cast(options.sst_file_manager.get()); + + DestroyAndReopen(options); + + // Create 4 files in L0 + for (int i = 0; i < 4; i++) { + ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'A'))); + ASSERT_OK(Flush()); + } + // We created 4 sst files in L0 + ASSERT_EQ("4", FilesPerLevel(0)); + // Compaction will delete files from L0 in first db path and generate a new + // file in L1 in second db path + CompactRangeOptions compact_options; + compact_options.target_path_id = 1; + Slice begin("Key0"); + Slice end("Key3"); + ASSERT_OK(db_->CompactRange(compact_options, &begin, &end)); + ASSERT_EQ("0,1", FilesPerLevel(0)); + + // Create 4 files in L0 + for (int i = 4; i < 8; i++) { + ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'B'))); + ASSERT_OK(Flush()); + } + ASSERT_EQ("4,1", FilesPerLevel(0)); + + // Compaction will delete files from L0 in first db path and generate a new + // file in L1 in second db path + begin = "Key4"; + end = "Key7"; + ASSERT_OK(db_->CompactRange(compact_options, &begin, &end)); + ASSERT_EQ("0,2", FilesPerLevel(0)); + + sfm->WaitForEmptyTrash(); + ASSERT_EQ(bg_delete_file, 8); + + compact_options.bottommost_level_compaction = + BottommostLevelCompaction::kForce; + ASSERT_OK(db_->CompactRange(compact_options, nullptr, nullptr)); + ASSERT_EQ("0,1", FilesPerLevel(0)); + + sfm->WaitForEmptyTrash(); + ASSERT_EQ(bg_delete_file, 8); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBSSTTest, DestroyDBWithRateLimitedDelete) { + int bg_delete_file = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DeleteScheduler::DeleteTrashFile:DeleteFile", + [&](void* arg) { bg_delete_file++; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.env = env_; + DestroyAndReopen(options); + + // Create 4 files in L0 + for (int i = 0; i < 4; i++) { + ASSERT_OK(Put("Key" + ToString(i), DummyString(1024, 'A'))); + ASSERT_OK(Flush()); + } + // We created 4 sst files in L0 + ASSERT_EQ("4", FilesPerLevel(0)); + + // Close DB and destroy it using DeleteScheduler + Close(); + std::string trash_dir = test::TmpDir(env_) + "/trash"; + int64_t rate_bytes_per_sec = 1024 * 1024; // 1 Mb / Sec + Status s; + options.sst_file_manager.reset(NewSstFileManager( + env_, nullptr, trash_dir, rate_bytes_per_sec, false, &s)); + ASSERT_OK(s); + ASSERT_OK(DestroyDB(dbname_, options)); + + auto sfm = static_cast(options.sst_file_manager.get()); + sfm->WaitForEmptyTrash(); + // We have deleted the 4 sst files in the delete_scheduler + ASSERT_EQ(bg_delete_file, 4); +} +#endif // ROCKSDB_LITE + +TEST_F(DBSSTTest, DBWithMaxSpaceAllowed) { + std::shared_ptr sst_file_manager(NewSstFileManager(env_)); + auto sfm = static_cast(sst_file_manager.get()); + + Options options = CurrentOptions(); + options.sst_file_manager = sst_file_manager; + options.disable_auto_compactions = true; + DestroyAndReopen(options); + + Random rnd(301); + + // Generate a file containing 100 keys. + for (int i = 0; i < 100; i++) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 50))); + } + ASSERT_OK(Flush()); + + uint64_t first_file_size = 0; + auto files_in_db = GetAllSSTFiles(&first_file_size); + ASSERT_EQ(sfm->GetTotalSize(), first_file_size); + + // Set the maximum allowed space usage to the current total size + sfm->SetMaxAllowedSpaceUsage(first_file_size + 1); + + ASSERT_OK(Put("key1", "val1")); + // This flush will cause bg_error_ and will fail + ASSERT_NOK(Flush()); +} + +TEST_F(DBSSTTest, DBWithMaxSpaceAllowedRandomized) { + // This test will set a maximum allowed space for the DB, then it will + // keep filling the DB until the limit is reached and bg_error_ is set. + // When bg_error_ is set we will verify that the DB size is greater + // than the limit. + + std::vector max_space_limits_mbs = {1, 2, 4, 8, 10}; + + bool bg_error_set = false; + uint64_t total_sst_files_size = 0; + + int reached_max_space_on_flush = 0; + int reached_max_space_on_compaction = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::FlushMemTableToOutputFile:MaxAllowedSpaceReached", + [&](void* arg) { + bg_error_set = true; + GetAllSSTFiles(&total_sst_files_size); + reached_max_space_on_flush++; + }); + + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "CompactionJob::FinishCompactionOutputFile:MaxAllowedSpaceReached", + [&](void* arg) { + bg_error_set = true; + GetAllSSTFiles(&total_sst_files_size); + reached_max_space_on_compaction++; + }); + + for (auto limit_mb : max_space_limits_mbs) { + bg_error_set = false; + total_sst_files_size = 0; + rocksdb::SyncPoint::GetInstance()->ClearTrace(); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + std::shared_ptr sst_file_manager(NewSstFileManager(env_)); + auto sfm = static_cast(sst_file_manager.get()); + + Options options = CurrentOptions(); + options.sst_file_manager = sst_file_manager; + options.write_buffer_size = 1024 * 512; // 512 Kb + DestroyAndReopen(options); + Random rnd(301); + + sfm->SetMaxAllowedSpaceUsage(limit_mb * 1024 * 1024); + + int keys_written = 0; + uint64_t estimated_db_size = 0; + while (true) { + auto s = Put(RandomString(&rnd, 10), RandomString(&rnd, 50)); + if (!s.ok()) { + break; + } + keys_written++; + // Check the estimated db size vs the db limit just to make sure we + // dont run into an infinite loop + estimated_db_size = keys_written * 60; // ~60 bytes per key + ASSERT_LT(estimated_db_size, limit_mb * 1024 * 1024 * 2); + } + ASSERT_TRUE(bg_error_set); + ASSERT_GE(total_sst_files_size, limit_mb * 1024 * 1024); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + } + + ASSERT_GT(reached_max_space_on_flush, 0); + ASSERT_GT(reached_max_space_on_compaction, 0); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBSSTTest, OpenDBWithInfiniteMaxOpenFiles) { + // Open DB with infinite max open files + // - First iteration use 1 thread to open files + // - Second iteration use 5 threads to open files + for (int iter = 0; iter < 2; iter++) { + Options options; + options.create_if_missing = true; + options.write_buffer_size = 100000; + options.disable_auto_compactions = true; + options.max_open_files = -1; + if (iter == 0) { + options.max_file_opening_threads = 1; + } else { + options.max_file_opening_threads = 5; + } + options = CurrentOptions(options); + DestroyAndReopen(options); + + // Create 12 Files in L0 (then move then to L2) + for (int i = 0; i < 12; i++) { + std::string k = "L2_" + Key(i); + ASSERT_OK(Put(k, k + std::string(1000, 'a'))); + ASSERT_OK(Flush()); + } + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 2; + db_->CompactRange(compact_options, nullptr, nullptr); + + // Create 12 Files in L0 + for (int i = 0; i < 12; i++) { + std::string k = "L0_" + Key(i); + ASSERT_OK(Put(k, k + std::string(1000, 'a'))); + ASSERT_OK(Flush()); + } + Close(); + + // Reopening the DB will load all exisitng files + Reopen(options); + ASSERT_EQ("12,0,12", FilesPerLevel(0)); + std::vector> files; + dbfull()->TEST_GetFilesMetaData(db_->DefaultColumnFamily(), &files); + + for (const auto& level : files) { + for (const auto& file : level) { + ASSERT_TRUE(file.table_reader_handle != nullptr); + } + } + + for (int i = 0; i < 12; i++) { + ASSERT_EQ(Get("L0_" + Key(i)), "L0_" + Key(i) + std::string(1000, 'a')); + ASSERT_EQ(Get("L2_" + Key(i)), "L2_" + Key(i) + std::string(1000, 'a')); + } + } +} + +TEST_F(DBSSTTest, GetTotalSstFilesSize) { + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.compression = kNoCompression; + DestroyAndReopen(options); + // Generate 5 files in L0 + for (int i = 0; i < 5; i++) { + for (int j = 0; j < 10; j++) { + std::string val = "val_file_" + ToString(i); + ASSERT_OK(Put(Key(j), val)); + } + Flush(); + } + ASSERT_EQ("5", FilesPerLevel(0)); + + std::vector live_files_meta; + dbfull()->GetLiveFilesMetaData(&live_files_meta); + ASSERT_EQ(live_files_meta.size(), 5); + uint64_t single_file_size = live_files_meta[0].size; + + uint64_t live_sst_files_size = 0; + uint64_t total_sst_files_size = 0; + for (const auto& file_meta : live_files_meta) { + live_sst_files_size += file_meta.size; + } + + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", + &total_sst_files_size)); + // Live SST files = 5 + // Total SST files = 5 + ASSERT_EQ(live_sst_files_size, 5 * single_file_size); + ASSERT_EQ(total_sst_files_size, 5 * single_file_size); + + // hold current version + std::unique_ptr iter1(dbfull()->NewIterator(ReadOptions())); + + // Compact 5 files into 1 file in L0 + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ("0,1", FilesPerLevel(0)); + + live_files_meta.clear(); + dbfull()->GetLiveFilesMetaData(&live_files_meta); + ASSERT_EQ(live_files_meta.size(), 1); + + live_sst_files_size = 0; + total_sst_files_size = 0; + for (const auto& file_meta : live_files_meta) { + live_sst_files_size += file_meta.size; + } + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", + &total_sst_files_size)); + // Live SST files = 1 (compacted file) + // Total SST files = 6 (5 original files + compacted file) + ASSERT_EQ(live_sst_files_size, 1 * single_file_size); + ASSERT_EQ(total_sst_files_size, 6 * single_file_size); + + // hold current version + std::unique_ptr iter2(dbfull()->NewIterator(ReadOptions())); + + // Delete all keys and compact, this will delete all live files + for (int i = 0; i < 10; i++) { + ASSERT_OK(Delete(Key(i))); + } + Flush(); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ("", FilesPerLevel(0)); + + live_files_meta.clear(); + dbfull()->GetLiveFilesMetaData(&live_files_meta); + ASSERT_EQ(live_files_meta.size(), 0); + + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", + &total_sst_files_size)); + // Live SST files = 0 + // Total SST files = 6 (5 original files + compacted file) + ASSERT_EQ(total_sst_files_size, 6 * single_file_size); + + iter1.reset(); + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", + &total_sst_files_size)); + // Live SST files = 0 + // Total SST files = 1 (compacted file) + ASSERT_EQ(total_sst_files_size, 1 * single_file_size); + + iter2.reset(); + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", + &total_sst_files_size)); + // Live SST files = 0 + // Total SST files = 0 + ASSERT_EQ(total_sst_files_size, 0); +} + +TEST_F(DBSSTTest, GetTotalSstFilesSizeVersionsFilesShared) { + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.compression = kNoCompression; + DestroyAndReopen(options); + // Generate 5 files in L0 + for (int i = 0; i < 5; i++) { + ASSERT_OK(Put(Key(i), "val")); + Flush(); + } + ASSERT_EQ("5", FilesPerLevel(0)); + + std::vector live_files_meta; + dbfull()->GetLiveFilesMetaData(&live_files_meta); + ASSERT_EQ(live_files_meta.size(), 5); + uint64_t single_file_size = live_files_meta[0].size; + + uint64_t live_sst_files_size = 0; + uint64_t total_sst_files_size = 0; + for (const auto& file_meta : live_files_meta) { + live_sst_files_size += file_meta.size; + } + + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", + &total_sst_files_size)); + + // Live SST files = 5 + // Total SST files = 5 + ASSERT_EQ(live_sst_files_size, 5 * single_file_size); + ASSERT_EQ(total_sst_files_size, 5 * single_file_size); + + // hold current version + std::unique_ptr iter1(dbfull()->NewIterator(ReadOptions())); + + // Compaction will do trivial move from L0 to L1 + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ("0,5", FilesPerLevel(0)); + + live_files_meta.clear(); + dbfull()->GetLiveFilesMetaData(&live_files_meta); + ASSERT_EQ(live_files_meta.size(), 5); + + live_sst_files_size = 0; + total_sst_files_size = 0; + for (const auto& file_meta : live_files_meta) { + live_sst_files_size += file_meta.size; + } + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", + &total_sst_files_size)); + // Live SST files = 5 + // Total SST files = 5 (used in 2 version) + ASSERT_EQ(live_sst_files_size, 5 * single_file_size); + ASSERT_EQ(total_sst_files_size, 5 * single_file_size); + + // hold current version + std::unique_ptr iter2(dbfull()->NewIterator(ReadOptions())); + + // Delete all keys and compact, this will delete all live files + for (int i = 0; i < 5; i++) { + ASSERT_OK(Delete(Key(i))); + } + Flush(); + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + ASSERT_EQ("", FilesPerLevel(0)); + + live_files_meta.clear(); + dbfull()->GetLiveFilesMetaData(&live_files_meta); + ASSERT_EQ(live_files_meta.size(), 0); + + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", + &total_sst_files_size)); + // Live SST files = 0 + // Total SST files = 5 (used in 2 version) + ASSERT_EQ(total_sst_files_size, 5 * single_file_size); + + iter1.reset(); + iter2.reset(); + + ASSERT_TRUE(dbfull()->GetIntProperty("rocksdb.total-sst-files-size", + &total_sst_files_size)); + // Live SST files = 0 + // Total SST files = 0 + ASSERT_EQ(total_sst_files_size, 0); +} + +TEST_F(DBSSTTest, AddExternalSstFile) { + do { + std::string sst_files_folder = test::TmpDir(env_) + "/sst_files/"; + env_->CreateDir(sst_files_folder); + Options options = CurrentOptions(); + options.env = env_; + + SstFileWriter sst_file_writer(EnvOptions(), options, options.comparator); + + // file1.sst (0 => 99) + std::string file1 = sst_files_folder + "file1.sst"; + ASSERT_OK(sst_file_writer.Open(file1)); + for (int k = 0; k < 100; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val")); + } + ExternalSstFileInfo file1_info; + Status s = sst_file_writer.Finish(&file1_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file1_info.file_path, file1); + ASSERT_EQ(file1_info.num_entries, 100); + ASSERT_EQ(file1_info.smallest_key, Key(0)); + ASSERT_EQ(file1_info.largest_key, Key(99)); + // sst_file_writer already finished, cannot add this value + s = sst_file_writer.Add(Key(100), "bad_val"); + ASSERT_FALSE(s.ok()) << s.ToString(); + + // file2.sst (100 => 199) + std::string file2 = sst_files_folder + "file2.sst"; + ASSERT_OK(sst_file_writer.Open(file2)); + for (int k = 100; k < 200; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val")); + } + // Cannot add this key because it's not after last added key + s = sst_file_writer.Add(Key(99), "bad_val"); + ASSERT_FALSE(s.ok()) << s.ToString(); + ExternalSstFileInfo file2_info; + s = sst_file_writer.Finish(&file2_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file2_info.file_path, file2); + ASSERT_EQ(file2_info.num_entries, 100); + ASSERT_EQ(file2_info.smallest_key, Key(100)); + ASSERT_EQ(file2_info.largest_key, Key(199)); + + // file3.sst (195 => 299) + // This file values overlap with file2 values + std::string file3 = sst_files_folder + "file3.sst"; + ASSERT_OK(sst_file_writer.Open(file3)); + for (int k = 195; k < 300; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val_overlap")); + } + ExternalSstFileInfo file3_info; + s = sst_file_writer.Finish(&file3_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file3_info.file_path, file3); + ASSERT_EQ(file3_info.num_entries, 105); + ASSERT_EQ(file3_info.smallest_key, Key(195)); + ASSERT_EQ(file3_info.largest_key, Key(299)); + + // file4.sst (30 => 39) + // This file values overlap with file1 values + std::string file4 = sst_files_folder + "file4.sst"; + ASSERT_OK(sst_file_writer.Open(file4)); + for (int k = 30; k < 40; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val_overlap")); + } + ExternalSstFileInfo file4_info; + s = sst_file_writer.Finish(&file4_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file4_info.file_path, file4); + ASSERT_EQ(file4_info.num_entries, 10); + ASSERT_EQ(file4_info.smallest_key, Key(30)); + ASSERT_EQ(file4_info.largest_key, Key(39)); + + // file5.sst (400 => 499) + std::string file5 = sst_files_folder + "file5.sst"; + ASSERT_OK(sst_file_writer.Open(file5)); + for (int k = 400; k < 500; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val")); + } + ExternalSstFileInfo file5_info; + s = sst_file_writer.Finish(&file5_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file5_info.file_path, file5); + ASSERT_EQ(file5_info.num_entries, 100); + ASSERT_EQ(file5_info.smallest_key, Key(400)); + ASSERT_EQ(file5_info.largest_key, Key(499)); + + // Cannot create an empty sst file + std::string file_empty = sst_files_folder + "file_empty.sst"; + ExternalSstFileInfo file_empty_info; + s = sst_file_writer.Finish(&file_empty_info); + ASSERT_NOK(s); + + DestroyAndReopen(options); + // Add file using file path + s = db_->AddFile(std::vector(1, file1)); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U); + for (int k = 0; k < 100; k++) { + ASSERT_EQ(Get(Key(k)), Key(k) + "_val"); + } + + // Add file while holding a snapshot will fail + const Snapshot* s1 = db_->GetSnapshot(); + if (s1 != nullptr) { + ASSERT_NOK(db_->AddFile(std::vector(1, file2_info))); + db_->ReleaseSnapshot(s1); + } + // We can add the file after releaseing the snapshot + ASSERT_OK(db_->AddFile(std::vector(1, file2_info))); + + ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U); + for (int k = 0; k < 200; k++) { + ASSERT_EQ(Get(Key(k)), Key(k) + "_val"); + } + + // This file has overlapping values with the exisitng data + s = db_->AddFile(std::vector(1, file3)); + ASSERT_FALSE(s.ok()) << s.ToString(); + + // This file has overlapping values with the exisitng data + s = db_->AddFile(std::vector(1, file4_info)); + ASSERT_FALSE(s.ok()) << s.ToString(); + + // Overwrite values of keys divisible by 5 + for (int k = 0; k < 200; k += 5) { + ASSERT_OK(Put(Key(k), Key(k) + "_val_new")); + } + ASSERT_NE(db_->GetLatestSequenceNumber(), 0U); + + // Key range of file5 (400 => 499) dont overlap with any keys in DB + ASSERT_OK(db_->AddFile(std::vector(1, file5))); + + // Make sure values are correct before and after flush/compaction + for (int i = 0; i < 2; i++) { + for (int k = 0; k < 200; k++) { + std::string value = Key(k) + "_val"; + if (k % 5 == 0) { + value += "_new"; + } + ASSERT_EQ(Get(Key(k)), value); + } + for (int k = 400; k < 500; k++) { + std::string value = Key(k) + "_val"; + ASSERT_EQ(Get(Key(k)), value); + } + ASSERT_OK(Flush()); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + } + + Close(); + options.disable_auto_compactions = true; + Reopen(options); + + // Delete keys in range (400 => 499) + for (int k = 400; k < 500; k++) { + ASSERT_OK(Delete(Key(k))); + } + // We deleted range (400 => 499) but cannot add file5 because + // of the range tombstones + ASSERT_NOK(db_->AddFile(std::vector(1, file5))); + + // Compacting the DB will remove the tombstones + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + + // Now we can add the file + ASSERT_OK(db_->AddFile(std::vector(1, file5))); + + // Verify values of file5 in DB + for (int k = 400; k < 500; k++) { + std::string value = Key(k) + "_val"; + ASSERT_EQ(Get(Key(k)), value); + } + } while (ChangeOptions(kSkipPlainTable | kSkipUniversalCompaction | + kSkipFIFOCompaction)); +} + +TEST_F(DBSSTTest, AddExternalSstFileList) { + do { + std::string sst_files_folder = test::TmpDir(env_) + "/sst_files/"; + env_->CreateDir(sst_files_folder); + Options options = CurrentOptions(); + options.env = env_; + + SstFileWriter sst_file_writer(EnvOptions(), options, options.comparator); + + // file1.sst (0 => 99) + std::string file1 = sst_files_folder + "file1.sst"; + ASSERT_OK(sst_file_writer.Open(file1)); + for (int k = 0; k < 100; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val")); + } + ExternalSstFileInfo file1_info; + Status s = sst_file_writer.Finish(&file1_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file1_info.file_path, file1); + ASSERT_EQ(file1_info.num_entries, 100); + ASSERT_EQ(file1_info.smallest_key, Key(0)); + ASSERT_EQ(file1_info.largest_key, Key(99)); + // sst_file_writer already finished, cannot add this value + s = sst_file_writer.Add(Key(100), "bad_val"); + ASSERT_FALSE(s.ok()) << s.ToString(); + + // file2.sst (100 => 199) + std::string file2 = sst_files_folder + "file2.sst"; + ASSERT_OK(sst_file_writer.Open(file2)); + for (int k = 100; k < 200; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val")); + } + // Cannot add this key because it's not after last added key + s = sst_file_writer.Add(Key(99), "bad_val"); + ASSERT_FALSE(s.ok()) << s.ToString(); + ExternalSstFileInfo file2_info; + s = sst_file_writer.Finish(&file2_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file2_info.file_path, file2); + ASSERT_EQ(file2_info.num_entries, 100); + ASSERT_EQ(file2_info.smallest_key, Key(100)); + ASSERT_EQ(file2_info.largest_key, Key(199)); + + // file3.sst (195 => 199) + // This file values overlap with file2 values + std::string file3 = sst_files_folder + "file3.sst"; + ASSERT_OK(sst_file_writer.Open(file3)); + for (int k = 195; k < 200; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val_overlap")); + } + ExternalSstFileInfo file3_info; + s = sst_file_writer.Finish(&file3_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file3_info.file_path, file3); + ASSERT_EQ(file3_info.num_entries, 5); + ASSERT_EQ(file3_info.smallest_key, Key(195)); + ASSERT_EQ(file3_info.largest_key, Key(199)); + + // file4.sst (30 => 39) + // This file values overlap with file1 values + std::string file4 = sst_files_folder + "file4.sst"; + ASSERT_OK(sst_file_writer.Open(file4)); + for (int k = 30; k < 40; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val_overlap")); + } + ExternalSstFileInfo file4_info; + s = sst_file_writer.Finish(&file4_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file4_info.file_path, file4); + ASSERT_EQ(file4_info.num_entries, 10); + ASSERT_EQ(file4_info.smallest_key, Key(30)); + ASSERT_EQ(file4_info.largest_key, Key(39)); + + // file5.sst (200 => 299) + std::string file5 = sst_files_folder + "file5.sst"; + ASSERT_OK(sst_file_writer.Open(file5)); + for (int k = 200; k < 300; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val")); + } + ExternalSstFileInfo file5_info; + s = sst_file_writer.Finish(&file5_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file5_info.file_path, file5); + ASSERT_EQ(file5_info.num_entries, 100); + ASSERT_EQ(file5_info.smallest_key, Key(200)); + ASSERT_EQ(file5_info.largest_key, Key(299)); + + // list 1 has internal key range conflict + std::vector file_list0({file1, file2}); + std::vector file_list1({file3, file2, file1}); + std::vector file_list2({file5}); + std::vector file_list3({file3, file4}); + + std::vector info_list0({file1_info, file2_info}); + std::vector info_list1( + {file3_info, file2_info, file1_info}); + std::vector info_list2({file5_info}); + std::vector info_list3({file3_info, file4_info}); + + DestroyAndReopen(options); + + // This list of files have key ranges are overlapping with each other + s = db_->AddFile(file_list1); + ASSERT_FALSE(s.ok()) << s.ToString(); + s = db_->AddFile(info_list1); + ASSERT_FALSE(s.ok()) << s.ToString(); + + // Add files using file path list + s = db_->AddFile(file_list0); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U); + for (int k = 0; k < 200; k++) { + ASSERT_EQ(Get(Key(k)), Key(k) + "_val"); + } + + // Add file while holding a snapshot will fail + const Snapshot* s1 = db_->GetSnapshot(); + if (s1 != nullptr) { + ASSERT_NOK(db_->AddFile(info_list2)); + db_->ReleaseSnapshot(s1); + } + // We can add the file after releaseing the snapshot + ASSERT_OK(db_->AddFile(info_list2)); + ASSERT_EQ(db_->GetLatestSequenceNumber(), 0U); + for (int k = 0; k < 300; k++) { + ASSERT_EQ(Get(Key(k)), Key(k) + "_val"); + } + + // This file list has overlapping values with the exisitng data + s = db_->AddFile(file_list3); + ASSERT_FALSE(s.ok()) << s.ToString(); + s = db_->AddFile(info_list3); + ASSERT_FALSE(s.ok()) << s.ToString(); + + // Overwrite values of keys divisible by 5 + for (int k = 0; k < 200; k += 5) { + ASSERT_OK(Put(Key(k), Key(k) + "_val_new")); + } + ASSERT_NE(db_->GetLatestSequenceNumber(), 0U); + + // Make sure values are correct before and after flush/compaction + for (int i = 0; i < 2; i++) { + for (int k = 0; k < 200; k++) { + std::string value = Key(k) + "_val"; + if (k % 5 == 0) { + value += "_new"; + } + ASSERT_EQ(Get(Key(k)), value); + } + for (int k = 200; k < 300; k++) { + std::string value = Key(k) + "_val"; + ASSERT_EQ(Get(Key(k)), value); + } + ASSERT_OK(Flush()); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + } + + // Delete keys in range (200 => 299) + for (int k = 200; k < 300; k++) { + ASSERT_OK(Delete(Key(k))); + } + // We deleted range (200 => 299) but cannot add file5 because + // of the range tombstones + ASSERT_NOK(db_->AddFile(file_list2)); + + // Compacting the DB will remove the tombstones + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + + // Now we can add the file + ASSERT_OK(db_->AddFile(file_list2)); + + // Verify values of file5 in DB + for (int k = 200; k < 300; k++) { + std::string value = Key(k) + "_val"; + ASSERT_EQ(Get(Key(k)), value); + } + } while (ChangeOptions(kSkipPlainTable | kSkipUniversalCompaction | + kSkipFIFOCompaction)); +} + +TEST_F(DBSSTTest, AddExternalSstFileListAtomicity) { + do { + std::string sst_files_folder = test::TmpDir(env_) + "/sst_files/"; + env_->CreateDir(sst_files_folder); + Options options = CurrentOptions(); + options.env = env_; + + SstFileWriter sst_file_writer(EnvOptions(), options, options.comparator); + + // files[0].sst (0 => 99) + // files[1].sst (100 => 199) + // ... + // file[8].sst (800 => 899) + int n = 9; + std::vector files(n); + std::vector files_info(n); + for (int i = 0; i < n; i++) { + files[i] = sst_files_folder + "file" + std::to_string(i) + ".sst"; + ASSERT_OK(sst_file_writer.Open(files[i])); + for (int k = i * 100; k < (i + 1) * 100; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val")); + } + Status s = sst_file_writer.Finish(&files_info[i]); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(files_info[i].file_path, files[i]); + ASSERT_EQ(files_info[i].num_entries, 100); + ASSERT_EQ(files_info[i].smallest_key, Key(i * 100)); + ASSERT_EQ(files_info[i].largest_key, Key((i + 1) * 100 - 1)); + } + files.push_back(sst_files_folder + "file" + std::to_string(n) + ".sst"); + auto s = db_->AddFile(files); + ASSERT_NOK(s) << s.ToString(); + for (int k = 0; k < n * 100; k++) { + ASSERT_EQ("NOT_FOUND", Get(Key(k))); + } + s = db_->AddFile(files_info); + ASSERT_OK(s); + for (int k = 0; k < n * 100; k++) { + std::string value = Key(k) + "_val"; + ASSERT_EQ(Get(Key(k)), value); + } + } while (ChangeOptions(kSkipPlainTable | kSkipUniversalCompaction | + kSkipFIFOCompaction)); +} +// This test reporduce a bug that can happen in some cases if the DB started +// purging obsolete files when we are adding an external sst file. +// This situation may result in deleting the file while it's being added. +TEST_F(DBSSTTest, AddExternalSstFilePurgeObsoleteFilesBug) { + std::string sst_files_folder = test::TmpDir(env_) + "/sst_files/"; + env_->CreateDir(sst_files_folder); + Options options = CurrentOptions(); + options.env = env_; + SstFileWriter sst_file_writer(EnvOptions(), options, options.comparator); + + // file1.sst (0 => 500) + std::string sst_file_path = sst_files_folder + "file1.sst"; + Status s = sst_file_writer.Open(sst_file_path); + ASSERT_OK(s); + for (int i = 0; i < 500; i++) { + std::string k = Key(i); + s = sst_file_writer.Add(k, k + "_val"); + ASSERT_OK(s); + } + + ExternalSstFileInfo sst_file_info; + s = sst_file_writer.Finish(&sst_file_info); + ASSERT_OK(s); + + options.delete_obsolete_files_period_micros = 0; + options.disable_auto_compactions = true; + DestroyAndReopen(options); + + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::AddFile:FileCopied", [&](void* arg) { + ASSERT_OK(Put("aaa", "bbb")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("aaa", "xxx")); + ASSERT_OK(Flush()); + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + s = db_->AddFile(std::vector(1, sst_file_path)); + ASSERT_OK(s); + + for (int i = 0; i < 500; i++) { + std::string k = Key(i); + std::string v = k + "_val"; + ASSERT_EQ(Get(k), v); + } + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBSSTTest, AddExternalSstFileNoCopy) { + std::string sst_files_folder = test::TmpDir(env_) + "/sst_files/"; + env_->CreateDir(sst_files_folder); + Options options = CurrentOptions(); + options.env = env_; + const ImmutableCFOptions ioptions(options); + + SstFileWriter sst_file_writer(EnvOptions(), options, options.comparator); + + // file1.sst (0 => 99) + std::string file1 = sst_files_folder + "file1.sst"; + ASSERT_OK(sst_file_writer.Open(file1)); + for (int k = 0; k < 100; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val")); + } + ExternalSstFileInfo file1_info; + Status s = sst_file_writer.Finish(&file1_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file1_info.file_path, file1); + ASSERT_EQ(file1_info.num_entries, 100); + ASSERT_EQ(file1_info.smallest_key, Key(0)); + ASSERT_EQ(file1_info.largest_key, Key(99)); + + // file2.sst (100 => 299) + std::string file2 = sst_files_folder + "file2.sst"; + ASSERT_OK(sst_file_writer.Open(file2)); + for (int k = 100; k < 300; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val")); + } + ExternalSstFileInfo file2_info; + s = sst_file_writer.Finish(&file2_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file2_info.file_path, file2); + ASSERT_EQ(file2_info.num_entries, 200); + ASSERT_EQ(file2_info.smallest_key, Key(100)); + ASSERT_EQ(file2_info.largest_key, Key(299)); + + // file3.sst (110 => 124) .. overlap with file2.sst + std::string file3 = sst_files_folder + "file3.sst"; + ASSERT_OK(sst_file_writer.Open(file3)); + for (int k = 110; k < 125; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k) + "_val_overlap")); + } + ExternalSstFileInfo file3_info; + s = sst_file_writer.Finish(&file3_info); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(file3_info.file_path, file3); + ASSERT_EQ(file3_info.num_entries, 15); + ASSERT_EQ(file3_info.smallest_key, Key(110)); + ASSERT_EQ(file3_info.largest_key, Key(124)); + s = db_->AddFile(std::vector(1, file1_info), + true /* move file */); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_EQ(Status::NotFound(), env_->FileExists(file1)); + + s = db_->AddFile(std::vector(1, file2_info), + false /* copy file */); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_OK(env_->FileExists(file2)); + + // This file have overlapping values with the exisitng data + s = db_->AddFile(std::vector(1, file2_info), + true /* move file */); + ASSERT_FALSE(s.ok()) << s.ToString(); + ASSERT_OK(env_->FileExists(file3)); + + for (int k = 0; k < 300; k++) { + ASSERT_EQ(Get(Key(k)), Key(k) + "_val"); + } +} + +TEST_F(DBSSTTest, AddExternalSstFileMultiThreaded) { + std::string sst_files_folder = test::TmpDir(env_) + "/sst_files/"; + // Bulk load 10 files every file contain 1000 keys + int num_files = 10; + int keys_per_file = 1000; + + // Generate file names + std::vector file_names; + for (int i = 0; i < num_files; i++) { + std::string file_name = "file_" + ToString(i) + ".sst"; + file_names.push_back(sst_files_folder + file_name); + } + + do { + env_->CreateDir(sst_files_folder); + Options options = CurrentOptions(); + + std::atomic thread_num(0); + std::function write_file_func = [&]() { + int file_idx = thread_num.fetch_add(1); + int range_start = file_idx * keys_per_file; + int range_end = range_start + keys_per_file; + + SstFileWriter sst_file_writer(EnvOptions(), options, options.comparator); + + ASSERT_OK(sst_file_writer.Open(file_names[file_idx])); + + for (int k = range_start; k < range_end; k++) { + ASSERT_OK(sst_file_writer.Add(Key(k), Key(k))); + } + + Status s = sst_file_writer.Finish(); + ASSERT_TRUE(s.ok()) << s.ToString(); + }; + // Write num_files files in parallel + std::vector sst_writer_threads; + for (int i = 0; i < num_files; ++i) { + sst_writer_threads.emplace_back(write_file_func); + } + + for (auto& t : sst_writer_threads) { + t.join(); + } + + fprintf(stderr, "Wrote %d files (%d keys)\n", num_files, + num_files * keys_per_file); + + thread_num.store(0); + std::atomic files_added(0); + std::function load_file_func = [&]() { + // We intentionally add every file twice, and assert that it was added + // only once and the other add failed + int thread_id = thread_num.fetch_add(1); + int file_idx = thread_id / 2; + // sometimes we use copy, sometimes link .. the result should be the same + bool move_file = (thread_id % 3 == 0); + + Status s = db_->AddFile(std::vector(1, file_names[file_idx]), + move_file); + if (s.ok()) { + files_added++; + } + }; + // Bulk load num_files files in parallel + std::vector add_file_threads; + DestroyAndReopen(options); + for (int i = 0; i < num_files * 2; ++i) { + add_file_threads.emplace_back(load_file_func); + } + + for (auto& t : add_file_threads) { + t.join(); + } + ASSERT_EQ(files_added.load(), num_files); + fprintf(stderr, "Loaded %d files (%d keys)\n", num_files, + num_files * keys_per_file); + + // Overwrite values of keys divisible by 100 + for (int k = 0; k < num_files * keys_per_file; k += 100) { + std::string key = Key(k); + Status s = Put(key, key + "_new"); + ASSERT_TRUE(s.ok()); + } + + for (int i = 0; i < 2; i++) { + // Make sure the values are correct before and after flush/compaction + for (int k = 0; k < num_files * keys_per_file; ++k) { + std::string key = Key(k); + std::string value = (k % 100 == 0) ? (key + "_new") : key; + ASSERT_EQ(Get(key), value); + } + ASSERT_OK(Flush()); + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + } + + fprintf(stderr, "Verified %d values\n", num_files * keys_per_file); + } while (ChangeOptions(kSkipPlainTable | kSkipUniversalCompaction | + kSkipFIFOCompaction)); +} + +TEST_F(DBSSTTest, AddExternalSstFileOverlappingRanges) { + std::string sst_files_folder = test::TmpDir(env_) + "/sst_files/"; + Random rnd(301); + do { + env_->CreateDir(sst_files_folder); + Options options = CurrentOptions(); + DestroyAndReopen(options); + + SstFileWriter sst_file_writer(EnvOptions(), options, options.comparator); + + printf("Option config = %d\n", option_config_); + std::vector> key_ranges; + for (int i = 0; i < 500; i++) { + int range_start = rnd.Uniform(20000); + int keys_per_range = 10 + rnd.Uniform(41); + + key_ranges.emplace_back(range_start, range_start + keys_per_range); + } + + int memtable_add = 0; + int success_add_file = 0; + int failed_add_file = 0; + std::map true_data; + for (size_t i = 0; i < key_ranges.size(); i++) { + int range_start = key_ranges[i].first; + int range_end = key_ranges[i].second; + + Status s; + std::string range_val = "range_" + ToString(i); + + // For 20% of ranges we use DB::Put, for 80% we use DB::AddFile + if (i && i % 5 == 0) { + // Use DB::Put to insert range (insert into memtable) + range_val += "_put"; + for (int k = range_start; k <= range_end; k++) { + s = Put(Key(k), range_val); + ASSERT_OK(s); + } + memtable_add++; + } else { + // Use DB::AddFile to insert range + range_val += "_add_file"; + + // Generate the file containing the range + std::string file_name = sst_files_folder + env_->GenerateUniqueId(); + ASSERT_OK(sst_file_writer.Open(file_name)); + for (int k = range_start; k <= range_end; k++) { + s = sst_file_writer.Add(Key(k), range_val); + ASSERT_OK(s); + } + ExternalSstFileInfo file_info; + s = sst_file_writer.Finish(&file_info); + ASSERT_OK(s); + + // Insert the generated file + s = db_->AddFile(std::vector(1, file_info)); + + auto it = true_data.lower_bound(Key(range_start)); + if (it != true_data.end() && it->first <= Key(range_end)) { + // This range overlap with data already exist in DB + ASSERT_NOK(s); + failed_add_file++; + } else { + ASSERT_OK(s); + success_add_file++; + } + } + + if (s.ok()) { + // Update true_data map to include the new inserted data + for (int k = range_start; k <= range_end; k++) { + true_data[Key(k)] = range_val; + } + } + + // Flush / Compact the DB + if (i && i % 50 == 0) { + Flush(); + } + if (i && i % 75 == 0) { + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + } + } + + printf( + "Total: %" ROCKSDB_PRIszt " ranges\n" + "AddFile()|Success: %d ranges\n" + "AddFile()|RangeConflict: %d ranges\n" + "Put(): %d ranges\n", + key_ranges.size(), success_add_file, failed_add_file, memtable_add); + + // Verify the correctness of the data + for (const auto& kv : true_data) { + ASSERT_EQ(Get(kv.first), kv.second); + } + printf("keys/values verified\n"); + } while (ChangeOptions(kSkipPlainTable | kSkipUniversalCompaction | + kSkipFIFOCompaction)); +} + +TEST_F(DBSSTTest, AddExternalSstFilePickedLevel) { + std::string sst_files_folder = test::TmpDir(env_) + "/sst_files/"; + env_->CreateDir(sst_files_folder); + Options options = CurrentOptions(); + options.disable_auto_compactions = false; + options.level0_file_num_compaction_trigger = 4; + options.num_levels = 4; + options.env = env_; + DestroyAndReopen(options); + + std::vector> file_to_keys; + + // File 0 will go to last level (L3) + file_to_keys.push_back({1, 10}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + EXPECT_EQ(FilesPerLevel(), "0,0,0,1"); + + // File 1 will go to level L2 (since it overlap with file 0 in L3) + file_to_keys.push_back({2, 9}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + EXPECT_EQ(FilesPerLevel(), "0,0,1,1"); + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"DBSSTTest::AddExternalSstFilePickedLevel:0", + "BackgroundCallCompaction:0"}, + {"DBImpl::BackgroundCompaction:Start", + "DBSSTTest::AddExternalSstFilePickedLevel:1"}, + {"DBSSTTest::AddExternalSstFilePickedLevel:2", + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // Flush 4 files containing the same keys + for (int i = 0; i < 4; i++) { + ASSERT_OK(Put(Key(3), Key(3) + "put")); + ASSERT_OK(Put(Key(8), Key(8) + "put")); + ASSERT_OK(Flush()); + } + + // Wait for BackgroundCompaction() to be called + TEST_SYNC_POINT("DBSSTTest::AddExternalSstFilePickedLevel:0"); + TEST_SYNC_POINT("DBSSTTest::AddExternalSstFilePickedLevel:1"); + + EXPECT_EQ(FilesPerLevel(), "4,0,1,1"); + + // This file overlaps with file 0 (L3), file 1 (L2) and the + // output of compaction going to L1 + file_to_keys.push_back({4, 7}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + EXPECT_EQ(FilesPerLevel(), "5,0,1,1"); + + // This file does not overlap with any file or with the running compaction + file_to_keys.push_back({9000, 9001}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + EXPECT_EQ(FilesPerLevel(), "5,0,1,2"); + + // Hold compaction from finishing + TEST_SYNC_POINT("DBSSTTest::AddExternalSstFilePickedLevel:2"); + + dbfull()->TEST_WaitForCompact(); + EXPECT_EQ(FilesPerLevel(), "1,1,1,2"); + + for (size_t file_id = 0; file_id < file_to_keys.size(); file_id++) { + for (auto& key_id : file_to_keys[file_id]) { + std::string k = Key(key_id); + std::string v = k + ToString(file_id); + if (key_id == 3 || key_id == 8) { + v = k + "put"; + } + + ASSERT_EQ(Get(k), v); + } + } + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBSSTTest, AddExternalSstFilePickedLevelDynamic) { + std::string sst_files_folder = test::TmpDir(env_) + "/sst_files/"; + env_->CreateDir(sst_files_folder); + Options options = CurrentOptions(); + options.disable_auto_compactions = false; + options.level0_file_num_compaction_trigger = 4; + options.level_compaction_dynamic_level_bytes = true; + options.num_levels = 4; + options.env = env_; + DestroyAndReopen(options); + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"DBSSTTest::AddExternalSstFilePickedLevelDynamic:0", + "BackgroundCallCompaction:0"}, + {"DBImpl::BackgroundCompaction:Start", + "DBSSTTest::AddExternalSstFilePickedLevelDynamic:1"}, + {"DBSSTTest::AddExternalSstFilePickedLevelDynamic:2", + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // Flush 4 files containing the same keys + for (int i = 0; i < 4; i++) { + for (int k = 20; k <= 30; k++) { + ASSERT_OK(Put(Key(k), Key(k) + "put")); + } + for (int k = 50; k <= 60; k++) { + ASSERT_OK(Put(Key(k), Key(k) + "put")); + } + ASSERT_OK(Flush()); + } + + // Wait for BackgroundCompaction() to be called + TEST_SYNC_POINT("DBSSTTest::AddExternalSstFilePickedLevelDynamic:0"); + TEST_SYNC_POINT("DBSSTTest::AddExternalSstFilePickedLevelDynamic:1"); + std::vector> file_to_keys; + + // This file overlaps with the output of the compaction (going to L3) + // so the file will be added to L0 since L3 is the base level + file_to_keys.push_back({31, 32, 33, 34}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + EXPECT_EQ(FilesPerLevel(), "5"); + + // This file does not overlap with the current running compactiong + file_to_keys.push_back({9000, 9001}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + EXPECT_EQ(FilesPerLevel(), "5,0,0,1"); + + // Hold compaction from finishing + TEST_SYNC_POINT("DBSSTTest::AddExternalSstFilePickedLevelDynamic:2"); + + // Output of the compaction will go to L3 + dbfull()->TEST_WaitForCompact(); + EXPECT_EQ(FilesPerLevel(), "1,0,0,2"); + + Close(); + options.disable_auto_compactions = true; + Reopen(options); + + file_to_keys.push_back({1, 15, 19}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + ASSERT_EQ(FilesPerLevel(), "1,0,0,3"); + + file_to_keys.push_back({1000, 1001, 1002}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + ASSERT_EQ(FilesPerLevel(), "1,0,0,4"); + + file_to_keys.push_back({500, 600, 700}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + ASSERT_EQ(FilesPerLevel(), "1,0,0,5"); + + // File 5 overlaps with file 2 (L3 / base level) + file_to_keys.push_back({2, 10}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + ASSERT_EQ(FilesPerLevel(), "2,0,0,5"); + + // File 6 overlaps with file 2 (L3 / base level) and file 5 (L0) + file_to_keys.push_back({3, 9}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + ASSERT_EQ(FilesPerLevel(), "3,0,0,5"); + + // Verify data in files + for (size_t file_id = 0; file_id < file_to_keys.size(); file_id++) { + for (auto& key_id : file_to_keys[file_id]) { + std::string k = Key(key_id); + std::string v = k + ToString(file_id); + + ASSERT_EQ(Get(k), v); + } + } + + // Write range [5 => 10] to L0 + for (int i = 5; i <= 10; i++) { + std::string k = Key(i); + std::string v = k + "put"; + ASSERT_OK(Put(k, v)); + } + ASSERT_OK(Flush()); + ASSERT_EQ(FilesPerLevel(), "4,0,0,5"); + + // File 7 overlaps with file 4 (L3) + file_to_keys.push_back({650, 651, 652}); + ASSERT_OK(GenerateAndAddExternalFile(options, file_to_keys.back(), + file_to_keys.size() - 1)); + ASSERT_EQ(FilesPerLevel(), "5,0,0,5"); + + for (size_t file_id = 0; file_id < file_to_keys.size(); file_id++) { + for (auto& key_id : file_to_keys[file_id]) { + std::string k = Key(key_id); + std::string v = k + ToString(file_id); + if (key_id >= 5 && key_id <= 10) { + v = k + "put"; + } + + ASSERT_EQ(Get(k), v); + } + } + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +#endif // ROCKSDB_LITE + +// 1 Create some SST files by inserting K-V pairs into DB +// 2 Close DB and change suffix from ".sst" to ".ldb" for every other SST file +// 3 Open DB and check if all key can be read +TEST_F(DBSSTTest, SSTsWithLdbSuffixHandling) { + Options options = CurrentOptions(); + options.write_buffer_size = 110 << 10; // 110KB + options.num_levels = 4; + DestroyAndReopen(options); + + Random rnd(301); + int key_id = 0; + for (int i = 0; i < 10; ++i) { + GenerateNewFile(&rnd, &key_id, false); + } + Flush(); + Close(); + int const num_files = GetSstFileCount(dbname_); + ASSERT_GT(num_files, 0); + + std::vector filenames; + GetSstFiles(dbname_, &filenames); + int num_ldb_files = 0; + for (size_t i = 0; i < filenames.size(); ++i) { + if (i & 1) { + continue; + } + std::string const rdb_name = dbname_ + "/" + filenames[i]; + std::string const ldb_name = Rocks2LevelTableFileName(rdb_name); + ASSERT_TRUE(env_->RenameFile(rdb_name, ldb_name).ok()); + ++num_ldb_files; + } + ASSERT_GT(num_ldb_files, 0); + ASSERT_EQ(num_files, GetSstFileCount(dbname_)); + + Reopen(options); + for (int k = 0; k < key_id; ++k) { + ASSERT_NE("NOT_FOUND", Get(Key(k))); + } + Destroy(options); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_table_properties_test.cc b/external/rocksdb/db/db_table_properties_test.cc new file mode 100755 index 0000000000..de2ace0d2f --- /dev/null +++ b/external/rocksdb/db/db_table_properties_test.cc @@ -0,0 +1,260 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include +#include + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#include "rocksdb/db.h" +#include "util/testharness.h" +#include "util/testutil.h" + +#ifndef ROCKSDB_LITE + +namespace rocksdb { + +// A helper function that ensures the table properties returned in +// `GetPropertiesOfAllTablesTest` is correct. +// This test assumes entries size is different for each of the tables. +namespace { + +void VerifyTableProperties(DB* db, uint64_t expected_entries_size) { + TablePropertiesCollection props; + ASSERT_OK(db->GetPropertiesOfAllTables(&props)); + + ASSERT_EQ(4U, props.size()); + std::unordered_set unique_entries; + + // Indirect test + uint64_t sum = 0; + for (const auto& item : props) { + unique_entries.insert(item.second->num_entries); + sum += item.second->num_entries; + } + + ASSERT_EQ(props.size(), unique_entries.size()); + ASSERT_EQ(expected_entries_size, sum); +} +} // namespace + +class DBTablePropertiesTest : public DBTestBase { + public: + DBTablePropertiesTest() : DBTestBase("/db_table_properties_test") {} + TablePropertiesCollection TestGetPropertiesOfTablesInRange( + std::vector ranges, std::size_t* num_properties = nullptr, + std::size_t* num_files = nullptr); +}; + +TEST_F(DBTablePropertiesTest, GetPropertiesOfAllTablesTest) { + Options options = CurrentOptions(); + options.level0_file_num_compaction_trigger = 8; + Reopen(options); + // Create 4 tables + for (int table = 0; table < 4; ++table) { + for (int i = 0; i < 10 + table; ++i) { + db_->Put(WriteOptions(), ToString(table * 100 + i), "val"); + } + db_->Flush(FlushOptions()); + } + + // 1. Read table properties directly from file + Reopen(options); + VerifyTableProperties(db_, 10 + 11 + 12 + 13); + + // 2. Put two tables to table cache and + Reopen(options); + // fetch key from 1st and 2nd table, which will internally place that table to + // the table cache. + for (int i = 0; i < 2; ++i) { + Get(ToString(i * 100 + 0)); + } + + VerifyTableProperties(db_, 10 + 11 + 12 + 13); + + // 3. Put all tables to table cache + Reopen(options); + // fetch key from 1st and 2nd table, which will internally place that table to + // the table cache. + for (int i = 0; i < 4; ++i) { + Get(ToString(i * 100 + 0)); + } + VerifyTableProperties(db_, 10 + 11 + 12 + 13); +} + +TablePropertiesCollection +DBTablePropertiesTest::TestGetPropertiesOfTablesInRange( + std::vector ranges, std::size_t* num_properties, + std::size_t* num_files) { + + // Since we deref zero element in the vector it can not be empty + // otherwise we pass an address to some random memory + EXPECT_GT(ranges.size(), 0U); + // run the query + TablePropertiesCollection props; + EXPECT_OK(db_->GetPropertiesOfTablesInRange( + db_->DefaultColumnFamily(), &ranges[0], ranges.size(), &props)); + + // Make sure that we've received properties for those and for those files + // only which fall within requested ranges + std::vector vmd; + db_->GetLiveFilesMetaData(&vmd); + for (auto& md : vmd) { + std::string fn = md.db_path + md.name; + bool in_range = false; + for (auto& r : ranges) { + // smallestkey < limit && largestkey >= start + if (r.limit.compare(md.smallestkey) >= 0 && + r.start.compare(md.largestkey) <= 0) { + in_range = true; + EXPECT_GT(props.count(fn), 0); + } + } + if (!in_range) { + EXPECT_EQ(props.count(fn), 0); + } + } + + if (num_properties) { + *num_properties = props.size(); + } + + if (num_files) { + *num_files = vmd.size(); + } + return props; +} + +TEST_F(DBTablePropertiesTest, GetPropertiesOfTablesInRange) { + // Fixed random sead + Random rnd(301); + + Options options; + options.create_if_missing = true; + options.write_buffer_size = 4096; + options.max_write_buffer_number = 3; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 2; + options.level0_stop_writes_trigger = 4; + options.target_file_size_base = 2048; + options.max_bytes_for_level_base = 10240; + options.max_bytes_for_level_multiplier = 4; + options.hard_pending_compaction_bytes_limit = 16 * 1024; + options.num_levels = 8; + + DestroyAndReopen(options); + + // build a decent LSM + for (int i = 0; i < 10000; i++) { + ASSERT_OK(Put(test::RandomKey(&rnd, 5), RandomString(&rnd, 102))); + } + Flush(); + dbfull()->TEST_WaitForCompact(); + if (NumTableFilesAtLevel(0) == 0) { + ASSERT_OK(Put(test::RandomKey(&rnd, 5), RandomString(&rnd, 102))); + Flush(); + } + + db_->PauseBackgroundWork(); + + // Ensure that we have at least L0, L1 and L2 + ASSERT_GT(NumTableFilesAtLevel(0), 0); + ASSERT_GT(NumTableFilesAtLevel(1), 0); + ASSERT_GT(NumTableFilesAtLevel(2), 0); + + // Query the largest range + std::size_t num_properties, num_files; + TestGetPropertiesOfTablesInRange( + {Range(test::RandomKey(&rnd, 5, test::RandomKeyType::SMALLEST), + test::RandomKey(&rnd, 5, test::RandomKeyType::LARGEST))}, + &num_properties, &num_files); + ASSERT_EQ(num_properties, num_files); + + // Query the empty range + TestGetPropertiesOfTablesInRange( + {Range(test::RandomKey(&rnd, 5, test::RandomKeyType::LARGEST), + test::RandomKey(&rnd, 5, test::RandomKeyType::SMALLEST))}, + &num_properties, &num_files); + ASSERT_GT(num_files, 0); + ASSERT_EQ(num_properties, 0); + + // Query the middle rangee + TestGetPropertiesOfTablesInRange( + {Range(test::RandomKey(&rnd, 5, test::RandomKeyType::MIDDLE), + test::RandomKey(&rnd, 5, test::RandomKeyType::LARGEST))}, + &num_properties, &num_files); + ASSERT_GT(num_files, 0); + ASSERT_GT(num_files, num_properties); + ASSERT_GT(num_properties, 0); + + // Query a bunch of random ranges + for (int j = 0; j < 100; j++) { + // create a bunch of ranges + std::vector random_keys; + // Random returns numbers with zero included + // when we pass empty ranges TestGetPropertiesOfTablesInRange() + // derefs random memory in the empty ranges[0] + // so want to be greater than zero and even since + // the below loop requires that random_keys.size() to be even. + auto n = 2 * (rnd.Uniform(50) + 1); + + for (uint32_t i = 0; i < n; ++i) { + random_keys.push_back(test::RandomKey(&rnd, 5)); + } + + ASSERT_GT(random_keys.size(), 0U); + ASSERT_EQ((random_keys.size() % 2), 0U); + + std::vector ranges; + auto it = random_keys.begin(); + while (it != random_keys.end()) { + ranges.push_back(Range(*it, *(it + 1))); + it += 2; + } + + TestGetPropertiesOfTablesInRange(std::move(ranges)); + } +} + +TEST_F(DBTablePropertiesTest, GetColumnFamilyNameProperty) { + std::string kExtraCfName = "pikachu"; + CreateAndReopenWithCF({kExtraCfName}, CurrentOptions()); + + // Create one table per CF, then verify it was created with the column family + // name property. + for (int cf = 0; cf < 2; ++cf) { + Put(cf, "key", "val"); + Flush(cf); + + TablePropertiesCollection fname_to_props; + ASSERT_OK(db_->GetPropertiesOfAllTables(handles_[cf], &fname_to_props)); + ASSERT_EQ(1U, fname_to_props.size()); + + std::string expected_cf_name; + if (cf > 0) { + expected_cf_name = kExtraCfName; + } else { + expected_cf_name = kDefaultColumnFamilyName; + } + ASSERT_EQ(expected_cf_name, + fname_to_props.begin()->second->column_family_name); + ASSERT_EQ(cf, static_cast( + fname_to_props.begin()->second->column_family_id)); + } +} + +} // namespace rocksdb + +#endif // ROCKSDB_LITE + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_tailing_iter_test.cc b/external/rocksdb/db/db_tailing_iter_test.cc new file mode 100755 index 0000000000..bfb62926e1 --- /dev/null +++ b/external/rocksdb/db/db_tailing_iter_test.cc @@ -0,0 +1,714 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// Introduction of SyncPoint effectively disabled building and running this test +// in Release build. +// which is a pity, it is a good test +#if !defined(ROCKSDB_LITE) + +#include "db/db_test_util.h" +#include "db/forward_iterator.h" +#include "port/stack_trace.h" + +namespace rocksdb { + +class DBTestTailingIterator : public DBTestBase { + public: + DBTestTailingIterator() : DBTestBase("/db_tailing_iterator_test") {} +}; + +TEST_F(DBTestTailingIterator, TailingIteratorSingle) { + ReadOptions read_options; + read_options.tailing = true; + + std::unique_ptr iter(db_->NewIterator(read_options)); + iter->SeekToFirst(); + ASSERT_TRUE(!iter->Valid()); + + // add a record and check that iter can see it + ASSERT_OK(db_->Put(WriteOptions(), "mirko", "fodor")); + iter->SeekToFirst(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().ToString(), "mirko"); + + iter->Next(); + ASSERT_TRUE(!iter->Valid()); +} + +TEST_F(DBTestTailingIterator, TailingIteratorKeepAdding) { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ReadOptions read_options; + read_options.tailing = true; + + std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + std::string value(1024, 'a'); + + const int num_records = 10000; + for (int i = 0; i < num_records; ++i) { + char buf[32]; + snprintf(buf, sizeof(buf), "%016d", i); + + Slice key(buf, 16); + ASSERT_OK(Put(1, key, value)); + + iter->Seek(key); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(key), 0); + } +} + +TEST_F(DBTestTailingIterator, TailingIteratorSeekToNext) { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ReadOptions read_options; + read_options.tailing = true; + + std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + std::unique_ptr itern(db_->NewIterator(read_options, handles_[1])); + std::string value(1024, 'a'); + + const int num_records = 1000; + for (int i = 1; i < num_records; ++i) { + char buf1[32]; + char buf2[32]; + snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5); + + Slice key(buf1, 20); + ASSERT_OK(Put(1, key, value)); + + if (i % 100 == 99) { + ASSERT_OK(Flush(1)); + } + + snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2); + Slice target(buf2, 20); + iter->Seek(target); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(key), 0); + if (i == 1) { + itern->SeekToFirst(); + } else { + itern->Next(); + } + ASSERT_TRUE(itern->Valid()); + ASSERT_EQ(itern->key().compare(key), 0); + } + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + for (int i = 2 * num_records; i > 0; --i) { + char buf1[32]; + char buf2[32]; + snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5); + + Slice key(buf1, 20); + ASSERT_OK(Put(1, key, value)); + + if (i % 100 == 99) { + ASSERT_OK(Flush(1)); + } + + snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2); + Slice target(buf2, 20); + iter->Seek(target); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(key), 0); + } +} + +TEST_F(DBTestTailingIterator, TailingIteratorTrimSeekToNext) { + const uint64_t k150KB = 150 * 1024; + Options options; + options.write_buffer_size = k150KB; + options.max_write_buffer_number = 3; + options.min_write_buffer_number_to_merge = 2; + CreateAndReopenWithCF({"pikachu"}, options); + ReadOptions read_options; + read_options.tailing = true; + int num_iters, deleted_iters; + + char bufe[32]; + snprintf(bufe, sizeof(bufe), "00b0%016d", 0); + Slice keyu(bufe, 20); + read_options.iterate_upper_bound = &keyu; + std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + std::unique_ptr itern(db_->NewIterator(read_options, handles_[1])); + std::unique_ptr iterh(db_->NewIterator(read_options, handles_[1])); + std::string value(1024, 'a'); + bool file_iters_deleted = false; + bool file_iters_renewed_null = false; + bool file_iters_renewed_copy = false; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "ForwardIterator::SeekInternal:Return", [&](void* arg) { + ForwardIterator* fiter = reinterpret_cast(arg); + ASSERT_TRUE(!file_iters_deleted || + fiter->TEST_CheckDeletedIters(&deleted_iters, &num_iters)); + }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "ForwardIterator::Next:Return", [&](void* arg) { + ForwardIterator* fiter = reinterpret_cast(arg); + ASSERT_TRUE(!file_iters_deleted || + fiter->TEST_CheckDeletedIters(&deleted_iters, &num_iters)); + }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "ForwardIterator::RenewIterators:Null", + [&](void* arg) { file_iters_renewed_null = true; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "ForwardIterator::RenewIterators:Copy", + [&](void* arg) { file_iters_renewed_copy = true; }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + const int num_records = 1000; + for (int i = 1; i < num_records; ++i) { + char buf1[32]; + char buf2[32]; + char buf3[32]; + char buf4[32]; + snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5); + snprintf(buf3, sizeof(buf3), "00b0%016d", i * 5); + + Slice key(buf1, 20); + ASSERT_OK(Put(1, key, value)); + Slice keyn(buf3, 20); + ASSERT_OK(Put(1, keyn, value)); + + if (i % 100 == 99) { + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + if (i == 299) { + file_iters_deleted = true; + } + snprintf(buf4, sizeof(buf4), "00a0%016d", i * 5 / 2); + Slice target(buf4, 20); + iterh->Seek(target); + ASSERT_TRUE(iter->Valid()); + for (int j = (i + 1) * 5 / 2; j < i * 5; j += 5) { + iterh->Next(); + ASSERT_TRUE(iterh->Valid()); + } + if (i == 299) { + file_iters_deleted = false; + } + } + + file_iters_deleted = true; + snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2); + Slice target(buf2, 20); + iter->Seek(target); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(key), 0); + ASSERT_LE(num_iters, 1); + if (i == 1) { + itern->SeekToFirst(); + } else { + itern->Next(); + } + ASSERT_TRUE(itern->Valid()); + ASSERT_EQ(itern->key().compare(key), 0); + ASSERT_LE(num_iters, 1); + file_iters_deleted = false; + } + ASSERT_TRUE(file_iters_renewed_null); + ASSERT_TRUE(file_iters_renewed_copy); + iter = 0; + itern = 0; + iterh = 0; + BlockBasedTableOptions table_options; + table_options.no_block_cache = true; + table_options.block_cache_compressed = nullptr; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + read_options.read_tier = kBlockCacheTier; + std::unique_ptr iteri(db_->NewIterator(read_options, handles_[1])); + char buf5[32]; + snprintf(buf5, sizeof(buf5), "00a0%016d", (num_records / 2) * 5 - 2); + Slice target1(buf5, 20); + iteri->Seek(target1); + ASSERT_TRUE(iteri->status().IsIncomplete()); + iteri = 0; + + read_options.read_tier = kReadAllTier; + options.table_factory.reset(NewBlockBasedTableFactory()); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + iter.reset(db_->NewIterator(read_options, handles_[1])); + for (int i = 2 * num_records; i > 0; --i) { + char buf1[32]; + char buf2[32]; + snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5); + + Slice key(buf1, 20); + ASSERT_OK(Put(1, key, value)); + + if (i % 100 == 99) { + ASSERT_OK(Flush(1)); + } + + snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2); + Slice target(buf2, 20); + iter->Seek(target); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(key), 0); + } +} + +TEST_F(DBTestTailingIterator, TailingIteratorDeletes) { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ReadOptions read_options; + read_options.tailing = true; + + std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + + // write a single record, read it using the iterator, then delete it + ASSERT_OK(Put(1, "0test", "test")); + iter->SeekToFirst(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().ToString(), "0test"); + ASSERT_OK(Delete(1, "0test")); + + // write many more records + const int num_records = 10000; + std::string value(1024, 'A'); + + for (int i = 0; i < num_records; ++i) { + char buf[32]; + snprintf(buf, sizeof(buf), "1%015d", i); + + Slice key(buf, 16); + ASSERT_OK(Put(1, key, value)); + } + + // force a flush to make sure that no records are read from memtable + ASSERT_OK(Flush(1)); + + // skip "0test" + iter->Next(); + + // make sure we can read all new records using the existing iterator + int count = 0; + for (; iter->Valid(); iter->Next(), ++count) ; + + ASSERT_EQ(count, num_records); +} + +TEST_F(DBTestTailingIterator, TailingIteratorPrefixSeek) { + XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, + kSkipNoPrefix); + ReadOptions read_options; + read_options.tailing = true; + + Options options = CurrentOptions(); + options.env = env_; + options.create_if_missing = true; + options.disable_auto_compactions = true; + options.prefix_extractor.reset(NewFixedPrefixTransform(2)); + options.memtable_factory.reset(NewHashSkipListRepFactory(16)); + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(Put(1, "0101", "test")); + + ASSERT_OK(Flush(1)); + + ASSERT_OK(Put(1, "0202", "test")); + + // Seek(0102) shouldn't find any records since 0202 has a different prefix + iter->Seek("0102"); + ASSERT_TRUE(!iter->Valid()); + + iter->Seek("0202"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().ToString(), "0202"); + + iter->Next(); + ASSERT_TRUE(!iter->Valid()); + XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, 0); +} + +TEST_F(DBTestTailingIterator, TailingIteratorIncomplete) { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ReadOptions read_options; + read_options.tailing = true; + read_options.read_tier = kBlockCacheTier; + + std::string key("key"); + std::string value("value"); + + ASSERT_OK(db_->Put(WriteOptions(), key, value)); + + std::unique_ptr iter(db_->NewIterator(read_options)); + iter->SeekToFirst(); + // we either see the entry or it's not in cache + ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete()); + + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + iter->SeekToFirst(); + // should still be true after compaction + ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete()); +} + +TEST_F(DBTestTailingIterator, TailingIteratorSeekToSame) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.write_buffer_size = 1000; + CreateAndReopenWithCF({"pikachu"}, options); + + ReadOptions read_options; + read_options.tailing = true; + + const int NROWS = 10000; + // Write rows with keys 00000, 00002, 00004 etc. + for (int i = 0; i < NROWS; ++i) { + char buf[100]; + snprintf(buf, sizeof(buf), "%05d", 2*i); + std::string key(buf); + std::string value("value"); + ASSERT_OK(db_->Put(WriteOptions(), key, value)); + } + + std::unique_ptr iter(db_->NewIterator(read_options)); + // Seek to 00001. We expect to find 00002. + std::string start_key = "00001"; + iter->Seek(start_key); + ASSERT_TRUE(iter->Valid()); + + std::string found = iter->key().ToString(); + ASSERT_EQ("00002", found); + + // Now seek to the same key. The iterator should remain in the same + // position. + iter->Seek(found); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(found, iter->key().ToString()); +} + +// Sets iterate_upper_bound and verifies that ForwardIterator doesn't call +// Seek() on immutable iterators when target key is >= prev_key and all +// iterators, including the memtable iterator, are over the upper bound. +TEST_F(DBTestTailingIterator, TailingIteratorUpperBound) { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + + const Slice upper_bound("20", 3); + ReadOptions read_options; + read_options.tailing = true; + read_options.iterate_upper_bound = &upper_bound; + + ASSERT_OK(Put(1, "11", "11")); + ASSERT_OK(Put(1, "12", "12")); + ASSERT_OK(Put(1, "22", "22")); + ASSERT_OK(Flush(1)); // flush all those keys to an immutable SST file + + // Add another key to the memtable. + ASSERT_OK(Put(1, "21", "21")); + + std::unique_ptr it(db_->NewIterator(read_options, handles_[1])); + it->Seek("12"); + ASSERT_TRUE(it->Valid()); + ASSERT_EQ("12", it->key().ToString()); + + it->Next(); + // Not valid since "21" is over the upper bound. + ASSERT_FALSE(it->Valid()); + + // This keeps track of the number of times NeedToSeekImmutable() was true. + int immutable_seeks = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "ForwardIterator::SeekInternal:Immutable", + [&](void* arg) { ++immutable_seeks; }); + + // Seek to 13. This should not require any immutable seeks. + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + it->Seek("13"); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + ASSERT_FALSE(it->Valid()); + ASSERT_EQ(0, immutable_seeks); +} + +TEST_F(DBTestTailingIterator, ManagedTailingIteratorSingle) { + ReadOptions read_options; + read_options.tailing = true; + read_options.managed = true; + + std::unique_ptr iter(db_->NewIterator(read_options)); + iter->SeekToFirst(); + ASSERT_TRUE(!iter->Valid()); + + // add a record and check that iter can see it + ASSERT_OK(db_->Put(WriteOptions(), "mirko", "fodor")); + iter->SeekToFirst(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().ToString(), "mirko"); + + iter->Next(); + ASSERT_TRUE(!iter->Valid()); +} + +TEST_F(DBTestTailingIterator, ManagedTailingIteratorKeepAdding) { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ReadOptions read_options; + read_options.tailing = true; + read_options.managed = true; + + std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + std::string value(1024, 'a'); + + const int num_records = 10000; + for (int i = 0; i < num_records; ++i) { + char buf[32]; + snprintf(buf, sizeof(buf), "%016d", i); + + Slice key(buf, 16); + ASSERT_OK(Put(1, key, value)); + + iter->Seek(key); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(key), 0); + } +} + +TEST_F(DBTestTailingIterator, ManagedTailingIteratorSeekToNext) { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ReadOptions read_options; + read_options.tailing = true; + read_options.managed = true; + + std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + std::string value(1024, 'a'); + + const int num_records = 1000; + for (int i = 1; i < num_records; ++i) { + char buf1[32]; + char buf2[32]; + snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5); + + Slice key(buf1, 20); + ASSERT_OK(Put(1, key, value)); + + if (i % 100 == 99) { + ASSERT_OK(Flush(1)); + } + + snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2); + Slice target(buf2, 20); + iter->Seek(target); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(key), 0); + } + for (int i = 2 * num_records; i > 0; --i) { + char buf1[32]; + char buf2[32]; + snprintf(buf1, sizeof(buf1), "00a0%016d", i * 5); + + Slice key(buf1, 20); + ASSERT_OK(Put(1, key, value)); + + if (i % 100 == 99) { + ASSERT_OK(Flush(1)); + } + + snprintf(buf2, sizeof(buf2), "00a0%016d", i * 5 - 2); + Slice target(buf2, 20); + iter->Seek(target); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(key), 0); + } +} + +TEST_F(DBTestTailingIterator, ManagedTailingIteratorDeletes) { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ReadOptions read_options; + read_options.tailing = true; + read_options.managed = true; + + std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + + // write a single record, read it using the iterator, then delete it + ASSERT_OK(Put(1, "0test", "test")); + iter->SeekToFirst(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().ToString(), "0test"); + ASSERT_OK(Delete(1, "0test")); + + // write many more records + const int num_records = 10000; + std::string value(1024, 'A'); + + for (int i = 0; i < num_records; ++i) { + char buf[32]; + snprintf(buf, sizeof(buf), "1%015d", i); + + Slice key(buf, 16); + ASSERT_OK(Put(1, key, value)); + } + + // force a flush to make sure that no records are read from memtable + ASSERT_OK(Flush(1)); + + // skip "0test" + iter->Next(); + + // make sure we can read all new records using the existing iterator + int count = 0; + for (; iter->Valid(); iter->Next(), ++count) { + } + + ASSERT_EQ(count, num_records); +} + +TEST_F(DBTestTailingIterator, ManagedTailingIteratorPrefixSeek) { + XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, + kSkipNoPrefix); + ReadOptions read_options; + read_options.tailing = true; + read_options.managed = true; + + Options options = CurrentOptions(); + options.env = env_; + options.create_if_missing = true; + options.disable_auto_compactions = true; + options.prefix_extractor.reset(NewFixedPrefixTransform(2)); + options.memtable_factory.reset(NewHashSkipListRepFactory(16)); + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + std::unique_ptr iter(db_->NewIterator(read_options, handles_[1])); + ASSERT_OK(Put(1, "0101", "test")); + + ASSERT_OK(Flush(1)); + + ASSERT_OK(Put(1, "0202", "test")); + + // Seek(0102) shouldn't find any records since 0202 has a different prefix + iter->Seek("0102"); + ASSERT_TRUE(!iter->Valid()); + + iter->Seek("0202"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().ToString(), "0202"); + + iter->Next(); + ASSERT_TRUE(!iter->Valid()); + XFUNC_TEST("", "dbtest_prefix", prefix_skip1, XFuncPoint::SetSkip, 0); +} + +TEST_F(DBTestTailingIterator, ManagedTailingIteratorIncomplete) { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ReadOptions read_options; + read_options.tailing = true; + read_options.managed = true; + read_options.read_tier = kBlockCacheTier; + + std::string key = "key"; + std::string value = "value"; + + ASSERT_OK(db_->Put(WriteOptions(), key, value)); + + std::unique_ptr iter(db_->NewIterator(read_options)); + iter->SeekToFirst(); + // we either see the entry or it's not in cache + ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete()); + + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + iter->SeekToFirst(); + // should still be true after compaction + ASSERT_TRUE(iter->Valid() || iter->status().IsIncomplete()); +} + +TEST_F(DBTestTailingIterator, ManagedTailingIteratorSeekToSame) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.write_buffer_size = 1000; + CreateAndReopenWithCF({"pikachu"}, options); + + ReadOptions read_options; + read_options.tailing = true; + read_options.managed = true; + + const int NROWS = 10000; + // Write rows with keys 00000, 00002, 00004 etc. + for (int i = 0; i < NROWS; ++i) { + char buf[100]; + snprintf(buf, sizeof(buf), "%05d", 2 * i); + std::string key(buf); + std::string value("value"); + ASSERT_OK(db_->Put(WriteOptions(), key, value)); + } + + std::unique_ptr iter(db_->NewIterator(read_options)); + // Seek to 00001. We expect to find 00002. + std::string start_key = "00001"; + iter->Seek(start_key); + ASSERT_TRUE(iter->Valid()); + + std::string found = iter->key().ToString(); + ASSERT_EQ("00002", found); + + // Now seek to the same key. The iterator should remain in the same + // position. + iter->Seek(found); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(found, iter->key().ToString()); +} + +TEST_F(DBTestTailingIterator, ForwardIteratorVersionProperty) { + Options options = CurrentOptions(); + options.write_buffer_size = 1000; + + ReadOptions read_options; + read_options.tailing = true; + + Put("foo", "bar"); + + uint64_t v1, v2, v3, v4; + { + std::unique_ptr iter(db_->NewIterator(read_options)); + iter->Seek("foo"); + std::string prop_value; + ASSERT_OK(iter->GetProperty("rocksdb.iterator.super-version-number", + &prop_value)); + v1 = static_cast(std::atoi(prop_value.c_str())); + + Put("foo1", "bar1"); + Flush(); + + ASSERT_OK(iter->GetProperty("rocksdb.iterator.super-version-number", + &prop_value)); + v2 = static_cast(std::atoi(prop_value.c_str())); + + iter->Seek("f"); + + ASSERT_OK(iter->GetProperty("rocksdb.iterator.super-version-number", + &prop_value)); + v3 = static_cast(std::atoi(prop_value.c_str())); + + ASSERT_EQ(v1, v2); + ASSERT_GT(v3, v2); + } + + { + std::unique_ptr iter(db_->NewIterator(read_options)); + iter->Seek("foo"); + std::string prop_value; + ASSERT_OK(iter->GetProperty("rocksdb.iterator.super-version-number", + &prop_value)); + v4 = static_cast(std::atoi(prop_value.c_str())); + } + ASSERT_EQ(v3, v4); +} +} // namespace rocksdb + +#endif // !defined(ROCKSDB_LITE) + +int main(int argc, char** argv) { +#if !defined(ROCKSDB_LITE) + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +#else + return 0; +#endif +} diff --git a/external/rocksdb/db/db_test.cc b/external/rocksdb/db/db_test.cc new file mode 100755 index 0000000000..5bcc6a2136 --- /dev/null +++ b/external/rocksdb/db/db_test.cc @@ -0,0 +1,5724 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// Introduction of SyncPoint effectively disabled building and running this test +// in Release build. +// which is a pity, it is a good test +#include +#include +#include +#include +#include +#include +#ifndef OS_WIN +#include +#endif +#ifdef OS_SOLARIS +#include +#endif + +#include "db/db_impl.h" +#include "db/db_test_util.h" +#include "db/dbformat.h" +#include "db/filename.h" +#include "db/job_context.h" +#include "db/version_set.h" +#include "db/write_batch_internal.h" +#include "memtable/hash_linklist_rep.h" +#include "port/stack_trace.h" +#include "rocksdb/cache.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/convenience.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/experimental.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/options.h" +#include "rocksdb/perf_context.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/snapshot.h" +#include "rocksdb/table.h" +#include "rocksdb/table_properties.h" +#include "rocksdb/thread_status.h" +#include "rocksdb/utilities/checkpoint.h" +#include "rocksdb/utilities/optimistic_transaction_db.h" +#include "rocksdb/utilities/write_batch_with_index.h" +#include "table/block_based_table_factory.h" +#include "table/mock_table.h" +#include "table/plain_table_factory.h" +#include "table/scoped_arena_iterator.h" +#include "util/compression.h" +#include "util/file_reader_writer.h" +#include "util/hash.h" +#include "util/logging.h" +#include "util/mock_env.h" +#include "util/mutexlock.h" +#include "util/rate_limiter.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "util/testharness.h" +#include "util/testutil.h" +#include "util/thread_status_util.h" +#include "util/xfunc.h" +#include "utilities/merge_operators.h" + +namespace rocksdb { + +class DBTest : public DBTestBase { + public: + DBTest() : DBTestBase("/db_test") {} +}; + +class DBTestWithParam + : public DBTest, + public testing::WithParamInterface> { + public: + DBTestWithParam() { + max_subcompactions_ = std::get<0>(GetParam()); + exclusive_manual_compaction_ = std::get<1>(GetParam()); + } + + // Required if inheriting from testing::WithParamInterface<> + static void SetUpTestCase() {} + static void TearDownTestCase() {} + + uint32_t max_subcompactions_; + bool exclusive_manual_compaction_; +}; + +TEST_F(DBTest, MockEnvTest) { + unique_ptr env{new MockEnv(Env::Default())}; + Options options; + options.create_if_missing = true; + options.env = env.get(); + DB* db; + + const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")}; + const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")}; + + ASSERT_OK(DB::Open(options, "/dir/db", &db)); + for (size_t i = 0; i < 3; ++i) { + ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i])); + } + + for (size_t i = 0; i < 3; ++i) { + std::string res; + ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); + ASSERT_TRUE(res == vals[i]); + } + + Iterator* iterator = db->NewIterator(ReadOptions()); + iterator->SeekToFirst(); + for (size_t i = 0; i < 3; ++i) { + ASSERT_TRUE(iterator->Valid()); + ASSERT_TRUE(keys[i] == iterator->key()); + ASSERT_TRUE(vals[i] == iterator->value()); + iterator->Next(); + } + ASSERT_TRUE(!iterator->Valid()); + delete iterator; + +// TEST_FlushMemTable() is not supported in ROCKSDB_LITE +#ifndef ROCKSDB_LITE + DBImpl* dbi = reinterpret_cast(db); + ASSERT_OK(dbi->TEST_FlushMemTable()); + + for (size_t i = 0; i < 3; ++i) { + std::string res; + ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); + ASSERT_TRUE(res == vals[i]); + } +#endif // ROCKSDB_LITE + + delete db; +} + +// NewMemEnv returns nullptr in ROCKSDB_LITE since class InMemoryEnv isn't +// defined. +#ifndef ROCKSDB_LITE +TEST_F(DBTest, MemEnvTest) { + unique_ptr env{NewMemEnv(Env::Default())}; + Options options; + options.create_if_missing = true; + options.env = env.get(); + DB* db; + + const Slice keys[] = {Slice("aaa"), Slice("bbb"), Slice("ccc")}; + const Slice vals[] = {Slice("foo"), Slice("bar"), Slice("baz")}; + + ASSERT_OK(DB::Open(options, "/dir/db", &db)); + for (size_t i = 0; i < 3; ++i) { + ASSERT_OK(db->Put(WriteOptions(), keys[i], vals[i])); + } + + for (size_t i = 0; i < 3; ++i) { + std::string res; + ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); + ASSERT_TRUE(res == vals[i]); + } + + Iterator* iterator = db->NewIterator(ReadOptions()); + iterator->SeekToFirst(); + for (size_t i = 0; i < 3; ++i) { + ASSERT_TRUE(iterator->Valid()); + ASSERT_TRUE(keys[i] == iterator->key()); + ASSERT_TRUE(vals[i] == iterator->value()); + iterator->Next(); + } + ASSERT_TRUE(!iterator->Valid()); + delete iterator; + + DBImpl* dbi = reinterpret_cast(db); + ASSERT_OK(dbi->TEST_FlushMemTable()); + + for (size_t i = 0; i < 3; ++i) { + std::string res; + ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); + ASSERT_TRUE(res == vals[i]); + } + + delete db; + + options.create_if_missing = false; + ASSERT_OK(DB::Open(options, "/dir/db", &db)); + for (size_t i = 0; i < 3; ++i) { + std::string res; + ASSERT_OK(db->Get(ReadOptions(), keys[i], &res)); + ASSERT_TRUE(res == vals[i]); + } + delete db; +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, WriteEmptyBatch) { + Options options = CurrentOptions(); + options.env = env_; + options.write_buffer_size = 100000; + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_OK(Put(1, "foo", "bar")); + WriteOptions wo; + wo.sync = true; + wo.disableWAL = false; + WriteBatch empty_batch; + ASSERT_OK(dbfull()->Write(wo, &empty_batch)); + + // make sure we can re-open it. + ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options)); + ASSERT_EQ("bar", Get(1, "foo")); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, ReadOnlyDB) { + ASSERT_OK(Put("foo", "v1")); + ASSERT_OK(Put("bar", "v2")); + ASSERT_OK(Put("foo", "v3")); + Close(); + + auto options = CurrentOptions(); + assert(options.env = env_); + ASSERT_OK(ReadOnlyReopen(options)); + ASSERT_EQ("v3", Get("foo")); + ASSERT_EQ("v2", Get("bar")); + Iterator* iter = db_->NewIterator(ReadOptions()); + int count = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ASSERT_OK(iter->status()); + ++count; + } + ASSERT_EQ(count, 2); + delete iter; + Close(); + + // Reopen and flush memtable. + Reopen(options); + Flush(); + Close(); + // Now check keys in read only mode. + ASSERT_OK(ReadOnlyReopen(options)); + ASSERT_EQ("v3", Get("foo")); + ASSERT_EQ("v2", Get("bar")); + ASSERT_TRUE(db_->SyncWAL().IsNotSupported()); +} + +TEST_F(DBTest, CompactedDB) { + const uint64_t kFileSize = 1 << 20; + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.write_buffer_size = kFileSize; + options.target_file_size_base = kFileSize; + options.max_bytes_for_level_base = 1 << 30; + options.compression = kNoCompression; + Reopen(options); + // 1 L0 file, use CompactedDB if max_open_files = -1 + ASSERT_OK(Put("aaa", DummyString(kFileSize / 2, '1'))); + Flush(); + Close(); + ASSERT_OK(ReadOnlyReopen(options)); + Status s = Put("new", "value"); + ASSERT_EQ(s.ToString(), + "Not implemented: Not supported operation in read only mode."); + ASSERT_EQ(DummyString(kFileSize / 2, '1'), Get("aaa")); + Close(); + options.max_open_files = -1; + ASSERT_OK(ReadOnlyReopen(options)); + s = Put("new", "value"); + ASSERT_EQ(s.ToString(), + "Not implemented: Not supported in compacted db mode."); + ASSERT_EQ(DummyString(kFileSize / 2, '1'), Get("aaa")); + Close(); + Reopen(options); + // Add more L0 files + ASSERT_OK(Put("bbb", DummyString(kFileSize / 2, '2'))); + Flush(); + ASSERT_OK(Put("aaa", DummyString(kFileSize / 2, 'a'))); + Flush(); + ASSERT_OK(Put("bbb", DummyString(kFileSize / 2, 'b'))); + ASSERT_OK(Put("eee", DummyString(kFileSize / 2, 'e'))); + Flush(); + Close(); + + ASSERT_OK(ReadOnlyReopen(options)); + // Fallback to read-only DB + s = Put("new", "value"); + ASSERT_EQ(s.ToString(), + "Not implemented: Not supported operation in read only mode."); + Close(); + + // Full compaction + Reopen(options); + // Add more keys + ASSERT_OK(Put("fff", DummyString(kFileSize / 2, 'f'))); + ASSERT_OK(Put("hhh", DummyString(kFileSize / 2, 'h'))); + ASSERT_OK(Put("iii", DummyString(kFileSize / 2, 'i'))); + ASSERT_OK(Put("jjj", DummyString(kFileSize / 2, 'j'))); + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ(3, NumTableFilesAtLevel(1)); + Close(); + + // CompactedDB + ASSERT_OK(ReadOnlyReopen(options)); + s = Put("new", "value"); + ASSERT_EQ(s.ToString(), + "Not implemented: Not supported in compacted db mode."); + ASSERT_EQ("NOT_FOUND", Get("abc")); + ASSERT_EQ(DummyString(kFileSize / 2, 'a'), Get("aaa")); + ASSERT_EQ(DummyString(kFileSize / 2, 'b'), Get("bbb")); + ASSERT_EQ("NOT_FOUND", Get("ccc")); + ASSERT_EQ(DummyString(kFileSize / 2, 'e'), Get("eee")); + ASSERT_EQ(DummyString(kFileSize / 2, 'f'), Get("fff")); + ASSERT_EQ("NOT_FOUND", Get("ggg")); + ASSERT_EQ(DummyString(kFileSize / 2, 'h'), Get("hhh")); + ASSERT_EQ(DummyString(kFileSize / 2, 'i'), Get("iii")); + ASSERT_EQ(DummyString(kFileSize / 2, 'j'), Get("jjj")); + ASSERT_EQ("NOT_FOUND", Get("kkk")); + + // MultiGet + std::vector values; + std::vector status_list = dbfull()->MultiGet( + ReadOptions(), + std::vector({Slice("aaa"), Slice("ccc"), Slice("eee"), + Slice("ggg"), Slice("iii"), Slice("kkk")}), + &values); + ASSERT_EQ(status_list.size(), static_cast(6)); + ASSERT_EQ(values.size(), static_cast(6)); + ASSERT_OK(status_list[0]); + ASSERT_EQ(DummyString(kFileSize / 2, 'a'), values[0]); + ASSERT_TRUE(status_list[1].IsNotFound()); + ASSERT_OK(status_list[2]); + ASSERT_EQ(DummyString(kFileSize / 2, 'e'), values[2]); + ASSERT_TRUE(status_list[3].IsNotFound()); + ASSERT_OK(status_list[4]); + ASSERT_EQ(DummyString(kFileSize / 2, 'i'), values[4]); + ASSERT_TRUE(status_list[5].IsNotFound()); + + Reopen(options); + // Add a key + ASSERT_OK(Put("fff", DummyString(kFileSize / 2, 'f'))); + Close(); + ASSERT_OK(ReadOnlyReopen(options)); + s = Put("new", "value"); + ASSERT_EQ(s.ToString(), + "Not implemented: Not supported operation in read only mode."); +} + +TEST_F(DBTest, LevelLimitReopen) { + Options options = CurrentOptions(); + CreateAndReopenWithCF({"pikachu"}, options); + + const std::string value(1024 * 1024, ' '); + int i = 0; + while (NumTableFilesAtLevel(2, 1) == 0) { + ASSERT_OK(Put(1, Key(i++), value)); + } + + options.num_levels = 1; + options.max_bytes_for_level_multiplier_additional.resize(1, 1); + Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options); + ASSERT_EQ(s.IsInvalidArgument(), true); + ASSERT_EQ(s.ToString(), + "Invalid argument: db has more levels than options.num_levels"); + + options.num_levels = 10; + options.max_bytes_for_level_multiplier_additional.resize(10, 1); + ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options)); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, PutDeleteGet) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_OK(Put(1, "foo", "v2")); + ASSERT_EQ("v2", Get(1, "foo")); + ASSERT_OK(Delete(1, "foo")); + ASSERT_EQ("NOT_FOUND", Get(1, "foo")); + } while (ChangeOptions()); +} + +TEST_F(DBTest, PutSingleDeleteGet) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_OK(Put(1, "foo2", "v2")); + ASSERT_EQ("v2", Get(1, "foo2")); + ASSERT_OK(SingleDelete(1, "foo")); + ASSERT_EQ("NOT_FOUND", Get(1, "foo")); + // Skip HashCuckooRep as it does not support single delete. FIFO and + // universal compaction do not apply to the test case. Skip MergePut + // because single delete does not get removed when it encounters a merge. + } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | + kSkipUniversalCompaction | kSkipMergePut)); +} + +TEST_F(DBTest, ReadFromPersistedTier) { + do { + Random rnd(301); + Options options = CurrentOptions(); + for (int disableWAL = 0; disableWAL <= 1; ++disableWAL) { + CreateAndReopenWithCF({"pikachu"}, options); + WriteOptions wopt; + wopt.disableWAL = (disableWAL == 1); + // 1st round: put but not flush + ASSERT_OK(db_->Put(wopt, handles_[1], "foo", "first")); + ASSERT_OK(db_->Put(wopt, handles_[1], "bar", "one")); + ASSERT_EQ("first", Get(1, "foo")); + ASSERT_EQ("one", Get(1, "bar")); + + // Read directly from persited data. + ReadOptions ropt; + ropt.read_tier = kPersistedTier; + std::string value; + if (wopt.disableWAL) { + // as data has not yet being flushed, we expect not found. + ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).IsNotFound()); + ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).IsNotFound()); + } else { + ASSERT_OK(db_->Get(ropt, handles_[1], "foo", &value)); + ASSERT_OK(db_->Get(ropt, handles_[1], "bar", &value)); + } + + // Multiget + std::vector multiget_cfs; + multiget_cfs.push_back(handles_[1]); + multiget_cfs.push_back(handles_[1]); + std::vector multiget_keys; + multiget_keys.push_back("foo"); + multiget_keys.push_back("bar"); + std::vector multiget_values; + auto statuses = + db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values); + if (wopt.disableWAL) { + ASSERT_TRUE(statuses[0].IsNotFound()); + ASSERT_TRUE(statuses[1].IsNotFound()); + } else { + ASSERT_OK(statuses[0]); + ASSERT_OK(statuses[1]); + } + + // 2nd round: flush and put a new value in memtable. + ASSERT_OK(Flush(1)); + ASSERT_OK(db_->Put(wopt, handles_[1], "rocksdb", "hello")); + + // once the data has been flushed, we are able to get the + // data when kPersistedTier is used. + ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).ok()); + ASSERT_EQ(value, "first"); + ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).ok()); + ASSERT_EQ(value, "one"); + if (wopt.disableWAL) { + ASSERT_TRUE( + db_->Get(ropt, handles_[1], "rocksdb", &value).IsNotFound()); + } else { + ASSERT_OK(db_->Get(ropt, handles_[1], "rocksdb", &value)); + ASSERT_EQ(value, "hello"); + } + + // Expect same result in multiget + multiget_cfs.push_back(handles_[1]); + multiget_keys.push_back("rocksdb"); + statuses = + db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values); + ASSERT_TRUE(statuses[0].ok()); + ASSERT_EQ("first", multiget_values[0]); + ASSERT_TRUE(statuses[1].ok()); + ASSERT_EQ("one", multiget_values[1]); + if (wopt.disableWAL) { + ASSERT_TRUE(statuses[2].IsNotFound()); + } else { + ASSERT_OK(statuses[2]); + } + + // 3rd round: delete and flush + ASSERT_OK(db_->Delete(wopt, handles_[1], "foo")); + Flush(1); + ASSERT_OK(db_->Delete(wopt, handles_[1], "bar")); + + ASSERT_TRUE(db_->Get(ropt, handles_[1], "foo", &value).IsNotFound()); + if (wopt.disableWAL) { + // Still expect finding the value as its delete has not yet being + // flushed. + ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).ok()); + ASSERT_EQ(value, "one"); + } else { + ASSERT_TRUE(db_->Get(ropt, handles_[1], "bar", &value).IsNotFound()); + } + ASSERT_TRUE(db_->Get(ropt, handles_[1], "rocksdb", &value).ok()); + ASSERT_EQ(value, "hello"); + + statuses = + db_->MultiGet(ropt, multiget_cfs, multiget_keys, &multiget_values); + ASSERT_TRUE(statuses[0].IsNotFound()); + if (wopt.disableWAL) { + ASSERT_TRUE(statuses[1].ok()); + ASSERT_EQ("one", multiget_values[1]); + } else { + ASSERT_TRUE(statuses[1].IsNotFound()); + } + ASSERT_TRUE(statuses[2].ok()); + ASSERT_EQ("hello", multiget_values[2]); + if (wopt.disableWAL == 0) { + DestroyAndReopen(options); + } + } + } while (ChangeOptions(kSkipHashCuckoo)); +} + +TEST_F(DBTest, SingleDeleteFlush) { + // Test to check whether flushing preserves a single delete hidden + // behind a put. + do { + Random rnd(301); + + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + CreateAndReopenWithCF({"pikachu"}, options); + + // Put values on second level (so that they will not be in the same + // compaction as the other operations. + Put(1, "foo", "first"); + Put(1, "bar", "one"); + ASSERT_OK(Flush(1)); + MoveFilesToLevel(2, 1); + + // (Single) delete hidden by a put + SingleDelete(1, "foo"); + Put(1, "foo", "second"); + Delete(1, "bar"); + Put(1, "bar", "two"); + ASSERT_OK(Flush(1)); + + SingleDelete(1, "foo"); + Delete(1, "bar"); + ASSERT_OK(Flush(1)); + + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + + ASSERT_EQ("NOT_FOUND", Get(1, "bar")); + ASSERT_EQ("NOT_FOUND", Get(1, "foo")); + // Skip HashCuckooRep as it does not support single delete. FIFO and + // universal compaction do not apply to the test case. Skip MergePut + // because merges cannot be combined with single deletions. + } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | + kSkipUniversalCompaction | kSkipMergePut)); +} + +TEST_F(DBTest, SingleDeletePutFlush) { + // Single deletes that encounter the matching put in a flush should get + // removed. + do { + Random rnd(301); + + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + CreateAndReopenWithCF({"pikachu"}, options); + + Put(1, "foo", Slice()); + Put(1, "a", Slice()); + SingleDelete(1, "a"); + ASSERT_OK(Flush(1)); + + ASSERT_EQ("[ ]", AllEntriesFor("a", 1)); + // Skip HashCuckooRep as it does not support single delete. FIFO and + // universal compaction do not apply to the test case. Skip MergePut + // because merges cannot be combined with single deletions. + } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | + kSkipUniversalCompaction | kSkipMergePut)); +} + +TEST_F(DBTest, EmptyFlush) { + // It is possible to produce empty flushes when using single deletes. Tests + // whether empty flushes cause issues. + do { + Random rnd(301); + + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + CreateAndReopenWithCF({"pikachu"}, options); + + Put(1, "a", Slice()); + SingleDelete(1, "a"); + ASSERT_OK(Flush(1)); + + ASSERT_EQ("[ ]", AllEntriesFor("a", 1)); + // Skip HashCuckooRep as it does not support single delete. FIFO and + // universal compaction do not apply to the test case. Skip MergePut + // because merges cannot be combined with single deletions. + } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | + kSkipUniversalCompaction | kSkipMergePut)); +} + +// Disable because not all platform can run it. +// It requires more than 9GB memory to run it, With single allocation +// of more than 3GB. +TEST_F(DBTest, DISABLED_VeryLargeValue) { + const size_t kValueSize = 3221225472u; // 3GB value + const size_t kKeySize = 8388608u; // 8MB key + std::string raw(kValueSize, 'v'); + std::string key1(kKeySize, 'c'); + std::string key2(kKeySize, 'd'); + + Options options = CurrentOptions(); + options.env = env_; + options.write_buffer_size = 100000; // Small write buffer + options.paranoid_checks = true; + DestroyAndReopen(options); + + ASSERT_OK(Put("boo", "v1")); + ASSERT_OK(Put("foo", "v1")); + ASSERT_OK(Put(key1, raw)); + raw[0] = 'w'; + ASSERT_OK(Put(key2, raw)); + dbfull()->TEST_WaitForFlushMemTable(); + + ASSERT_EQ(1, NumTableFilesAtLevel(0)); + + std::string value; + Status s = db_->Get(ReadOptions(), key1, &value); + ASSERT_OK(s); + ASSERT_EQ(kValueSize, value.size()); + ASSERT_EQ('v', value[0]); + + s = db_->Get(ReadOptions(), key2, &value); + ASSERT_OK(s); + ASSERT_EQ(kValueSize, value.size()); + ASSERT_EQ('w', value[0]); + + // Compact all files. + Flush(); + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + + // Check DB is not in read-only state. + ASSERT_OK(Put("boo", "v1")); + + s = db_->Get(ReadOptions(), key1, &value); + ASSERT_OK(s); + ASSERT_EQ(kValueSize, value.size()); + ASSERT_EQ('v', value[0]); + + s = db_->Get(ReadOptions(), key2, &value); + ASSERT_OK(s); + ASSERT_EQ(kValueSize, value.size()); + ASSERT_EQ('w', value[0]); +} + +TEST_F(DBTest, GetFromImmutableLayer) { + do { + Options options = CurrentOptions(); + options.env = env_; + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_EQ("v1", Get(1, "foo")); + + // Block sync calls + env_->delay_sstable_sync_.store(true, std::memory_order_release); + Put(1, "k1", std::string(100000, 'x')); // Fill memtable + Put(1, "k2", std::string(100000, 'y')); // Trigger flush + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("NOT_FOUND", Get(0, "foo")); + // Release sync calls + env_->delay_sstable_sync_.store(false, std::memory_order_release); + } while (ChangeOptions()); +} + +TEST_F(DBTest, GetFromVersions) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_OK(Flush(1)); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("NOT_FOUND", Get(0, "foo")); + } while (ChangeOptions()); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, GetSnapshot) { + anon::OptionsOverride options_override; + options_override.skip_policy = kSkipNoSnapshot; + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override)); + // Try with both a short key and a long key + for (int i = 0; i < 2; i++) { + std::string key = (i == 0) ? std::string("foo") : std::string(200, 'x'); + ASSERT_OK(Put(1, key, "v1")); + const Snapshot* s1 = db_->GetSnapshot(); + if (option_config_ == kHashCuckoo) { + // Unsupported case. + ASSERT_TRUE(s1 == nullptr); + break; + } + ASSERT_OK(Put(1, key, "v2")); + ASSERT_EQ("v2", Get(1, key)); + ASSERT_EQ("v1", Get(1, key, s1)); + ASSERT_OK(Flush(1)); + ASSERT_EQ("v2", Get(1, key)); + ASSERT_EQ("v1", Get(1, key, s1)); + db_->ReleaseSnapshot(s1); + } + } while (ChangeOptions()); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, GetLevel0Ordering) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + // Check that we process level-0 files in correct order. The code + // below generates two level-0 files where the earlier one comes + // before the later one in the level-0 file list since the earlier + // one has a smaller "smallest" key. + ASSERT_OK(Put(1, "bar", "b")); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_OK(Flush(1)); + ASSERT_OK(Put(1, "foo", "v2")); + ASSERT_OK(Flush(1)); + ASSERT_EQ("v2", Get(1, "foo")); + } while (ChangeOptions()); +} + +TEST_F(DBTest, WrongLevel0Config) { + Options options = CurrentOptions(); + Close(); + ASSERT_OK(DestroyDB(dbname_, options)); + options.level0_stop_writes_trigger = 1; + options.level0_slowdown_writes_trigger = 2; + options.level0_file_num_compaction_trigger = 3; + ASSERT_OK(DB::Open(options, dbname_, &db_)); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, GetOrderedByLevels) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + Compact(1, "a", "z"); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_OK(Put(1, "foo", "v2")); + ASSERT_EQ("v2", Get(1, "foo")); + ASSERT_OK(Flush(1)); + ASSERT_EQ("v2", Get(1, "foo")); + } while (ChangeOptions()); +} + +TEST_F(DBTest, GetPicksCorrectFile) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + // Arrange to have multiple files in a non-level-0 level. + ASSERT_OK(Put(1, "a", "va")); + Compact(1, "a", "b"); + ASSERT_OK(Put(1, "x", "vx")); + Compact(1, "x", "y"); + ASSERT_OK(Put(1, "f", "vf")); + Compact(1, "f", "g"); + ASSERT_EQ("va", Get(1, "a")); + ASSERT_EQ("vf", Get(1, "f")); + ASSERT_EQ("vx", Get(1, "x")); + } while (ChangeOptions()); +} + +TEST_F(DBTest, GetEncountersEmptyLevel) { + do { + Options options = CurrentOptions(); + options.disableDataSync = true; + CreateAndReopenWithCF({"pikachu"}, options); + // Arrange for the following to happen: + // * sstable A in level 0 + // * nothing in level 1 + // * sstable B in level 2 + // Then do enough Get() calls to arrange for an automatic compaction + // of sstable A. A bug would cause the compaction to be marked as + // occurring at level 1 (instead of the correct level 0). + + // Step 1: First place sstables in levels 0 and 2 + Put(1, "a", "begin"); + Put(1, "z", "end"); + ASSERT_OK(Flush(1)); + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + Put(1, "a", "begin"); + Put(1, "z", "end"); + ASSERT_OK(Flush(1)); + ASSERT_GT(NumTableFilesAtLevel(0, 1), 0); + ASSERT_GT(NumTableFilesAtLevel(2, 1), 0); + + // Step 2: clear level 1 if necessary. + dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1); + ASSERT_EQ(NumTableFilesAtLevel(1, 1), 0); + ASSERT_EQ(NumTableFilesAtLevel(2, 1), 1); + + // Step 3: read a bunch of times + for (int i = 0; i < 1000; i++) { + ASSERT_EQ("NOT_FOUND", Get(1, "missing")); + } + + // Step 4: Wait for compaction to finish + dbfull()->TEST_WaitForCompact(); + + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 1); // XXX + } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction)); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, CheckLock) { + do { + DB* localdb; + Options options = CurrentOptions(); + ASSERT_OK(TryReopen(options)); + + // second open should fail + ASSERT_TRUE(!(DB::Open(options, dbname_, &localdb)).ok()); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTest, FlushMultipleMemtable) { + do { + Options options = CurrentOptions(); + WriteOptions writeOpt = WriteOptions(); + writeOpt.disableWAL = true; + options.max_write_buffer_number = 4; + options.min_write_buffer_number_to_merge = 3; + options.max_write_buffer_number_to_maintain = -1; + CreateAndReopenWithCF({"pikachu"}, options); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1")); + ASSERT_OK(Flush(1)); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1")); + + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("v1", Get(1, "bar")); + ASSERT_OK(Flush(1)); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTest, FlushEmptyColumnFamily) { + // Block flush thread and disable compaction thread + env_->SetBackgroundThreads(1, Env::HIGH); + env_->SetBackgroundThreads(1, Env::LOW); + test::SleepingBackgroundTask sleeping_task_low; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + test::SleepingBackgroundTask sleeping_task_high; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, + &sleeping_task_high, Env::Priority::HIGH); + + Options options = CurrentOptions(); + // disable compaction + options.disable_auto_compactions = true; + WriteOptions writeOpt = WriteOptions(); + writeOpt.disableWAL = true; + options.max_write_buffer_number = 2; + options.min_write_buffer_number_to_merge = 1; + options.max_write_buffer_number_to_maintain = 1; + CreateAndReopenWithCF({"pikachu"}, options); + + // Compaction can still go through even if no thread can flush the + // mem table. + ASSERT_OK(Flush(0)); + ASSERT_OK(Flush(1)); + + // Insert can go through + ASSERT_OK(dbfull()->Put(writeOpt, handles_[0], "foo", "v1")); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1")); + + ASSERT_EQ("v1", Get(0, "foo")); + ASSERT_EQ("v1", Get(1, "bar")); + + sleeping_task_high.WakeUp(); + sleeping_task_high.WaitUntilDone(); + + // Flush can still go through. + ASSERT_OK(Flush(0)); + ASSERT_OK(Flush(1)); + + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilDone(); +} + +TEST_F(DBTest, FLUSH) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + WriteOptions writeOpt = WriteOptions(); + writeOpt.disableWAL = true; + SetPerfLevel(kEnableTime); + ; + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1")); + // this will now also flush the last 2 writes + ASSERT_OK(Flush(1)); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1")); + + perf_context.Reset(); + Get(1, "foo"); + ASSERT_TRUE((int)perf_context.get_from_output_files_time > 0); + + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("v1", Get(1, "bar")); + + writeOpt.disableWAL = true; + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v2")); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v2")); + ASSERT_OK(Flush(1)); + + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_EQ("v2", Get(1, "bar")); + perf_context.Reset(); + ASSERT_EQ("v2", Get(1, "foo")); + ASSERT_TRUE((int)perf_context.get_from_output_files_time > 0); + + writeOpt.disableWAL = false; + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v3")); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v3")); + ASSERT_OK(Flush(1)); + + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + // 'foo' should be there because its put + // has WAL enabled. + ASSERT_EQ("v3", Get(1, "foo")); + ASSERT_EQ("v3", Get(1, "bar")); + + SetPerfLevel(kDisable); + } while (ChangeCompactOptions()); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, FlushSchedule) { + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.level0_stop_writes_trigger = 1 << 10; + options.level0_slowdown_writes_trigger = 1 << 10; + options.min_write_buffer_number_to_merge = 1; + options.max_write_buffer_number_to_maintain = 1; + options.max_write_buffer_number = 2; + options.write_buffer_size = 120 * 1024; + CreateAndReopenWithCF({"pikachu"}, options); + std::vector threads; + + std::atomic thread_num(0); + // each column family will have 5 thread, each thread generating 2 memtables. + // each column family should end up with 10 table files + std::function fill_memtable_func = [&]() { + int a = thread_num.fetch_add(1); + Random rnd(a); + WriteOptions wo; + // this should fill up 2 memtables + for (int k = 0; k < 5000; ++k) { + ASSERT_OK(db_->Put(wo, handles_[a & 1], RandomString(&rnd, 13), "")); + } + }; + + for (int i = 0; i < 10; ++i) { + threads.emplace_back(fill_memtable_func); + } + + for (auto& t : threads) { + t.join(); + } + + auto default_tables = GetNumberOfSstFilesForColumnFamily(db_, "default"); + auto pikachu_tables = GetNumberOfSstFilesForColumnFamily(db_, "pikachu"); + ASSERT_LE(default_tables, static_cast(10)); + ASSERT_GT(default_tables, static_cast(0)); + ASSERT_LE(pikachu_tables, static_cast(10)); + ASSERT_GT(pikachu_tables, static_cast(0)); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, ManifestRollOver) { + do { + Options options; + options.max_manifest_file_size = 10; // 10 bytes + options = CurrentOptions(options); + CreateAndReopenWithCF({"pikachu"}, options); + { + ASSERT_OK(Put(1, "manifest_key1", std::string(1000, '1'))); + ASSERT_OK(Put(1, "manifest_key2", std::string(1000, '2'))); + ASSERT_OK(Put(1, "manifest_key3", std::string(1000, '3'))); + uint64_t manifest_before_flush = dbfull()->TEST_Current_Manifest_FileNo(); + ASSERT_OK(Flush(1)); // This should trigger LogAndApply. + uint64_t manifest_after_flush = dbfull()->TEST_Current_Manifest_FileNo(); + ASSERT_GT(manifest_after_flush, manifest_before_flush); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + ASSERT_GT(dbfull()->TEST_Current_Manifest_FileNo(), manifest_after_flush); + // check if a new manifest file got inserted or not. + ASSERT_EQ(std::string(1000, '1'), Get(1, "manifest_key1")); + ASSERT_EQ(std::string(1000, '2'), Get(1, "manifest_key2")); + ASSERT_EQ(std::string(1000, '3'), Get(1, "manifest_key3")); + } + } while (ChangeCompactOptions()); +} + +TEST_F(DBTest, IdentityAcrossRestarts) { + do { + std::string id1; + ASSERT_OK(db_->GetDbIdentity(id1)); + + Options options = CurrentOptions(); + Reopen(options); + std::string id2; + ASSERT_OK(db_->GetDbIdentity(id2)); + // id1 should match id2 because identity was not regenerated + ASSERT_EQ(id1.compare(id2), 0); + + std::string idfilename = IdentityFileName(dbname_); + ASSERT_OK(env_->DeleteFile(idfilename)); + Reopen(options); + std::string id3; + ASSERT_OK(db_->GetDbIdentity(id3)); + // id1 should NOT match id3 because identity was regenerated + ASSERT_NE(id1.compare(id3), 0); + } while (ChangeCompactOptions()); +} + +namespace { +class KeepFilter : public CompactionFilter { + public: + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { + return false; + } + + virtual const char* Name() const override { return "KeepFilter"; } +}; + +class KeepFilterFactory : public CompactionFilterFactory { + public: + explicit KeepFilterFactory(bool check_context = false) + : check_context_(check_context) {} + + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + if (check_context_) { + EXPECT_EQ(expect_full_compaction_.load(), context.is_full_compaction); + EXPECT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction); + } + return std::unique_ptr(new KeepFilter()); + } + + virtual const char* Name() const override { return "KeepFilterFactory"; } + bool check_context_; + std::atomic_bool expect_full_compaction_; + std::atomic_bool expect_manual_compaction_; +}; + +class DelayFilter : public CompactionFilter { + public: + explicit DelayFilter(DBTestBase* d) : db_test(d) {} + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { + db_test->env_->addon_time_.fetch_add(1000); + return true; + } + + virtual const char* Name() const override { return "DelayFilter"; } + + private: + DBTestBase* db_test; +}; + +class DelayFilterFactory : public CompactionFilterFactory { + public: + explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + return std::unique_ptr(new DelayFilter(db_test)); + } + + virtual const char* Name() const override { return "DelayFilterFactory"; } + + private: + DBTestBase* db_test; +}; +} // namespace + +#ifndef ROCKSDB_LITE + +static std::string CompressibleString(Random* rnd, int len) { + std::string r; + test::CompressibleString(rnd, 0.8, len, &r); + return r; +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, FailMoreDbPaths) { + Options options = CurrentOptions(); + options.db_paths.emplace_back(dbname_, 10000000); + options.db_paths.emplace_back(dbname_ + "_2", 1000000); + options.db_paths.emplace_back(dbname_ + "_3", 1000000); + options.db_paths.emplace_back(dbname_ + "_4", 1000000); + options.db_paths.emplace_back(dbname_ + "_5", 1000000); + ASSERT_TRUE(TryReopen(options).IsNotSupported()); +} + +void CheckColumnFamilyMeta(const ColumnFamilyMetaData& cf_meta) { + uint64_t cf_size = 0; + uint64_t cf_csize = 0; + size_t file_count = 0; + for (auto level_meta : cf_meta.levels) { + uint64_t level_size = 0; + uint64_t level_csize = 0; + file_count += level_meta.files.size(); + for (auto file_meta : level_meta.files) { + level_size += file_meta.size; + } + ASSERT_EQ(level_meta.size, level_size); + cf_size += level_size; + cf_csize += level_csize; + } + ASSERT_EQ(cf_meta.file_count, file_count); + ASSERT_EQ(cf_meta.size, cf_size); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, ColumnFamilyMetaDataTest) { + Options options = CurrentOptions(); + options.create_if_missing = true; + DestroyAndReopen(options); + + Random rnd(301); + int key_index = 0; + ColumnFamilyMetaData cf_meta; + for (int i = 0; i < 100; ++i) { + GenerateNewFile(&rnd, &key_index); + db_->GetColumnFamilyMetaData(&cf_meta); + CheckColumnFamilyMeta(cf_meta); + } +} + +namespace { +void MinLevelHelper(DBTest* self, Options& options) { + Random rnd(301); + + for (int num = 0; num < options.level0_file_num_compaction_trigger - 1; + num++) { + std::vector values; + // Write 120KB (12 values, each 10K) + for (int i = 0; i < 12; i++) { + values.push_back(DBTestBase::RandomString(&rnd, 10000)); + ASSERT_OK(self->Put(DBTestBase::Key(i), values[i])); + } + self->dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ(self->NumTableFilesAtLevel(0), num + 1); + } + + // generate one more file in level-0, and should trigger level-0 compaction + std::vector values; + for (int i = 0; i < 12; i++) { + values.push_back(DBTestBase::RandomString(&rnd, 10000)); + ASSERT_OK(self->Put(DBTestBase::Key(i), values[i])); + } + self->dbfull()->TEST_WaitForCompact(); + + ASSERT_EQ(self->NumTableFilesAtLevel(0), 0); + ASSERT_EQ(self->NumTableFilesAtLevel(1), 1); +} + +// returns false if the calling-Test should be skipped +bool MinLevelToCompress(CompressionType& type, Options& options, int wbits, + int lev, int strategy) { + fprintf(stderr, + "Test with compression options : window_bits = %d, level = %d, " + "strategy = %d}\n", + wbits, lev, strategy); + options.write_buffer_size = 100 << 10; // 100KB + options.arena_block_size = 4096; + options.num_levels = 3; + options.level0_file_num_compaction_trigger = 3; + options.create_if_missing = true; + + if (Snappy_Supported()) { + type = kSnappyCompression; + fprintf(stderr, "using snappy\n"); + } else if (Zlib_Supported()) { + type = kZlibCompression; + fprintf(stderr, "using zlib\n"); + } else if (BZip2_Supported()) { + type = kBZip2Compression; + fprintf(stderr, "using bzip2\n"); + } else if (LZ4_Supported()) { + type = kLZ4Compression; + fprintf(stderr, "using lz4\n"); + } else if (XPRESS_Supported()) { + type = kXpressCompression; + fprintf(stderr, "using xpress\n"); + } else if (ZSTD_Supported()) { + type = kZSTDNotFinalCompression; + fprintf(stderr, "using ZSTD\n"); + } else { + fprintf(stderr, "skipping test, compression disabled\n"); + return false; + } + options.compression_per_level.resize(options.num_levels); + + // do not compress L0 + for (int i = 0; i < 1; i++) { + options.compression_per_level[i] = kNoCompression; + } + for (int i = 1; i < options.num_levels; i++) { + options.compression_per_level[i] = type; + } + return true; +} +} // namespace + +TEST_F(DBTest, MinLevelToCompress1) { + Options options = CurrentOptions(); + CompressionType type = kSnappyCompression; + if (!MinLevelToCompress(type, options, -14, -1, 0)) { + return; + } + Reopen(options); + MinLevelHelper(this, options); + + // do not compress L0 and L1 + for (int i = 0; i < 2; i++) { + options.compression_per_level[i] = kNoCompression; + } + for (int i = 2; i < options.num_levels; i++) { + options.compression_per_level[i] = type; + } + DestroyAndReopen(options); + MinLevelHelper(this, options); +} + +TEST_F(DBTest, MinLevelToCompress2) { + Options options = CurrentOptions(); + CompressionType type = kSnappyCompression; + if (!MinLevelToCompress(type, options, 15, -1, 0)) { + return; + } + Reopen(options); + MinLevelHelper(this, options); + + // do not compress L0 and L1 + for (int i = 0; i < 2; i++) { + options.compression_per_level[i] = kNoCompression; + } + for (int i = 2; i < options.num_levels; i++) { + options.compression_per_level[i] = type; + } + DestroyAndReopen(options); + MinLevelHelper(this, options); +} + +TEST_F(DBTest, RepeatedWritesToSameKey) { + do { + Options options = CurrentOptions(); + options.env = env_; + options.write_buffer_size = 100000; // Small write buffer + CreateAndReopenWithCF({"pikachu"}, options); + + // We must have at most one file per level except for level-0, + // which may have up to kL0_StopWritesTrigger files. + const int kMaxFiles = + options.num_levels + options.level0_stop_writes_trigger; + + Random rnd(301); + std::string value = + RandomString(&rnd, static_cast(2 * options.write_buffer_size)); + for (int i = 0; i < 5 * kMaxFiles; i++) { + ASSERT_OK(Put(1, "key", value)); + ASSERT_LE(TotalTableFiles(1), kMaxFiles); + } + } while (ChangeCompactOptions()); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, SparseMerge) { + do { + Options options = CurrentOptions(); + options.compression = kNoCompression; + CreateAndReopenWithCF({"pikachu"}, options); + + FillLevels("A", "Z", 1); + + // Suppose there is: + // small amount of data with prefix A + // large amount of data with prefix B + // small amount of data with prefix C + // and that recent updates have made small changes to all three prefixes. + // Check that we do not do a compaction that merges all of B in one shot. + const std::string value(1000, 'x'); + Put(1, "A", "va"); + // Write approximately 100MB of "B" values + for (int i = 0; i < 100000; i++) { + char key[100]; + snprintf(key, sizeof(key), "B%010d", i); + Put(1, key, value); + } + Put(1, "C", "vc"); + ASSERT_OK(Flush(1)); + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + + // Make sparse update + Put(1, "A", "va2"); + Put(1, "B100", "bvalue2"); + Put(1, "C", "vc2"); + ASSERT_OK(Flush(1)); + + // Compactions should not cause us to create a situation where + // a file overlaps too much data at the next level. + ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]), + 20 * 1048576); + dbfull()->TEST_CompactRange(0, nullptr, nullptr); + ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]), + 20 * 1048576); + dbfull()->TEST_CompactRange(1, nullptr, nullptr); + ASSERT_LE(dbfull()->TEST_MaxNextLevelOverlappingBytes(handles_[1]), + 20 * 1048576); + } while (ChangeCompactOptions()); +} + +#ifndef ROCKSDB_LITE +static bool Between(uint64_t val, uint64_t low, uint64_t high) { + bool result = (val >= low) && (val <= high); + if (!result) { + fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", + (unsigned long long)(val), (unsigned long long)(low), + (unsigned long long)(high)); + } + return result; +} + +TEST_F(DBTest, ApproximateSizesMemTable) { + Options options = CurrentOptions(); + options.write_buffer_size = 100000000; // Large write buffer + options.compression = kNoCompression; + options.create_if_missing = true; + DestroyAndReopen(options); + + const int N = 128; + Random rnd(301); + for (int i = 0; i < N; i++) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + } + + uint64_t size; + std::string start = Key(50); + std::string end = Key(60); + Range r(start, end); + db_->GetApproximateSizes(&r, 1, &size, true); + ASSERT_GT(size, 6000); + ASSERT_LT(size, 204800); + // Zero if not including mem table + db_->GetApproximateSizes(&r, 1, &size, false); + ASSERT_EQ(size, 0); + + start = Key(500); + end = Key(600); + r = Range(start, end); + db_->GetApproximateSizes(&r, 1, &size, true); + ASSERT_EQ(size, 0); + + for (int i = 0; i < N; i++) { + ASSERT_OK(Put(Key(1000 + i), RandomString(&rnd, 1024))); + } + + start = Key(500); + end = Key(600); + r = Range(start, end); + db_->GetApproximateSizes(&r, 1, &size, true); + ASSERT_EQ(size, 0); + + start = Key(100); + end = Key(1020); + r = Range(start, end); + db_->GetApproximateSizes(&r, 1, &size, true); + ASSERT_GT(size, 6000); + + options.max_write_buffer_number = 8; + options.min_write_buffer_number_to_merge = 5; + options.write_buffer_size = 1024 * N; // Not very large + DestroyAndReopen(options); + + int keys[N * 3]; + for (int i = 0; i < N; i++) { + keys[i * 3] = i * 5; + keys[i * 3 + 1] = i * 5 + 1; + keys[i * 3 + 2] = i * 5 + 2; + } + std::random_shuffle(std::begin(keys), std::end(keys)); + + for (int i = 0; i < N * 3; i++) { + ASSERT_OK(Put(Key(keys[i] + 1000), RandomString(&rnd, 1024))); + } + + start = Key(100); + end = Key(300); + r = Range(start, end); + db_->GetApproximateSizes(&r, 1, &size, true); + ASSERT_EQ(size, 0); + + start = Key(1050); + end = Key(1080); + r = Range(start, end); + db_->GetApproximateSizes(&r, 1, &size, true); + ASSERT_GT(size, 6000); + + start = Key(2100); + end = Key(2300); + r = Range(start, end); + db_->GetApproximateSizes(&r, 1, &size, true); + ASSERT_EQ(size, 0); + + start = Key(1050); + end = Key(1080); + r = Range(start, end); + uint64_t size_with_mt, size_without_mt; + db_->GetApproximateSizes(&r, 1, &size_with_mt, true); + ASSERT_GT(size_with_mt, 6000); + db_->GetApproximateSizes(&r, 1, &size_without_mt, false); + ASSERT_EQ(size_without_mt, 0); + + Flush(); + + for (int i = 0; i < N; i++) { + ASSERT_OK(Put(Key(i + 1000), RandomString(&rnd, 1024))); + } + + start = Key(1050); + end = Key(1080); + r = Range(start, end); + db_->GetApproximateSizes(&r, 1, &size_with_mt, true); + db_->GetApproximateSizes(&r, 1, &size_without_mt, false); + ASSERT_GT(size_with_mt, size_without_mt); + ASSERT_GT(size_without_mt, 6000); +} + +TEST_F(DBTest, ApproximateSizes) { + do { + Options options = CurrentOptions(); + options.write_buffer_size = 100000000; // Large write buffer + options.compression = kNoCompression; + options.create_if_missing = true; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0)); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + ASSERT_TRUE(Between(Size("", "xyz", 1), 0, 0)); + + // Write 8MB (80 values, each 100K) + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + const int N = 80; + static const int S1 = 100000; + static const int S2 = 105000; // Allow some expansion from metadata + Random rnd(301); + for (int i = 0; i < N; i++) { + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, S1))); + } + + // 0 because GetApproximateSizes() does not account for memtable space + ASSERT_TRUE(Between(Size("", Key(50), 1), 0, 0)); + + // Check sizes across recovery by reopening a few times + for (int run = 0; run < 3; run++) { + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + for (int compact_start = 0; compact_start < N; compact_start += 10) { + for (int i = 0; i < N; i += 10) { + ASSERT_TRUE(Between(Size("", Key(i), 1), S1 * i, S2 * i)); + ASSERT_TRUE(Between(Size("", Key(i) + ".suffix", 1), S1 * (i + 1), + S2 * (i + 1))); + ASSERT_TRUE(Between(Size(Key(i), Key(i + 10), 1), S1 * 10, S2 * 10)); + } + ASSERT_TRUE(Between(Size("", Key(50), 1), S1 * 50, S2 * 50)); + ASSERT_TRUE( + Between(Size("", Key(50) + ".suffix", 1), S1 * 50, S2 * 50)); + + std::string cstart_str = Key(compact_start); + std::string cend_str = Key(compact_start + 9); + Slice cstart = cstart_str; + Slice cend = cend_str; + dbfull()->TEST_CompactRange(0, &cstart, &cend, handles_[1]); + } + + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + ASSERT_GT(NumTableFilesAtLevel(1, 1), 0); + } + // ApproximateOffsetOf() is not yet implemented in plain table format. + } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction | + kSkipPlainTable | kSkipHashIndex)); +} + +TEST_F(DBTest, ApproximateSizes_MixOfSmallAndLarge) { + do { + Options options = CurrentOptions(); + options.compression = kNoCompression; + CreateAndReopenWithCF({"pikachu"}, options); + + Random rnd(301); + std::string big1 = RandomString(&rnd, 100000); + ASSERT_OK(Put(1, Key(0), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(1), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(2), big1)); + ASSERT_OK(Put(1, Key(3), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(4), big1)); + ASSERT_OK(Put(1, Key(5), RandomString(&rnd, 10000))); + ASSERT_OK(Put(1, Key(6), RandomString(&rnd, 300000))); + ASSERT_OK(Put(1, Key(7), RandomString(&rnd, 10000))); + + // Check sizes across recovery by reopening a few times + for (int run = 0; run < 3; run++) { + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + ASSERT_TRUE(Between(Size("", Key(0), 1), 0, 0)); + ASSERT_TRUE(Between(Size("", Key(1), 1), 10000, 11000)); + ASSERT_TRUE(Between(Size("", Key(2), 1), 20000, 21000)); + ASSERT_TRUE(Between(Size("", Key(3), 1), 120000, 121000)); + ASSERT_TRUE(Between(Size("", Key(4), 1), 130000, 131000)); + ASSERT_TRUE(Between(Size("", Key(5), 1), 230000, 231000)); + ASSERT_TRUE(Between(Size("", Key(6), 1), 240000, 241000)); + ASSERT_TRUE(Between(Size("", Key(7), 1), 540000, 541000)); + ASSERT_TRUE(Between(Size("", Key(8), 1), 550000, 560000)); + + ASSERT_TRUE(Between(Size(Key(3), Key(5), 1), 110000, 111000)); + + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + } + // ApproximateOffsetOf() is not yet implemented in plain table format. + } while (ChangeOptions(kSkipPlainTable)); +} +#endif // ROCKSDB_LITE + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, Snapshot) { + anon::OptionsOverride options_override; + options_override.skip_policy = kSkipNoSnapshot; + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions(options_override)); + Put(0, "foo", "0v1"); + Put(1, "foo", "1v1"); + + const Snapshot* s1 = db_->GetSnapshot(); + ASSERT_EQ(1U, GetNumSnapshots()); + uint64_t time_snap1 = GetTimeOldestSnapshots(); + ASSERT_GT(time_snap1, 0U); + Put(0, "foo", "0v2"); + Put(1, "foo", "1v2"); + + env_->addon_time_.fetch_add(1); + + const Snapshot* s2 = db_->GetSnapshot(); + ASSERT_EQ(2U, GetNumSnapshots()); + ASSERT_EQ(time_snap1, GetTimeOldestSnapshots()); + Put(0, "foo", "0v3"); + Put(1, "foo", "1v3"); + + { + ManagedSnapshot s3(db_); + ASSERT_EQ(3U, GetNumSnapshots()); + ASSERT_EQ(time_snap1, GetTimeOldestSnapshots()); + + Put(0, "foo", "0v4"); + Put(1, "foo", "1v4"); + ASSERT_EQ("0v1", Get(0, "foo", s1)); + ASSERT_EQ("1v1", Get(1, "foo", s1)); + ASSERT_EQ("0v2", Get(0, "foo", s2)); + ASSERT_EQ("1v2", Get(1, "foo", s2)); + ASSERT_EQ("0v3", Get(0, "foo", s3.snapshot())); + ASSERT_EQ("1v3", Get(1, "foo", s3.snapshot())); + ASSERT_EQ("0v4", Get(0, "foo")); + ASSERT_EQ("1v4", Get(1, "foo")); + } + + ASSERT_EQ(2U, GetNumSnapshots()); + ASSERT_EQ(time_snap1, GetTimeOldestSnapshots()); + ASSERT_EQ("0v1", Get(0, "foo", s1)); + ASSERT_EQ("1v1", Get(1, "foo", s1)); + ASSERT_EQ("0v2", Get(0, "foo", s2)); + ASSERT_EQ("1v2", Get(1, "foo", s2)); + ASSERT_EQ("0v4", Get(0, "foo")); + ASSERT_EQ("1v4", Get(1, "foo")); + + db_->ReleaseSnapshot(s1); + ASSERT_EQ("0v2", Get(0, "foo", s2)); + ASSERT_EQ("1v2", Get(1, "foo", s2)); + ASSERT_EQ("0v4", Get(0, "foo")); + ASSERT_EQ("1v4", Get(1, "foo")); + ASSERT_EQ(1U, GetNumSnapshots()); + ASSERT_LT(time_snap1, GetTimeOldestSnapshots()); + + db_->ReleaseSnapshot(s2); + ASSERT_EQ(0U, GetNumSnapshots()); + ASSERT_EQ("0v4", Get(0, "foo")); + ASSERT_EQ("1v4", Get(1, "foo")); + } while (ChangeOptions(kSkipHashCuckoo)); +} + +TEST_F(DBTest, HiddenValuesAreRemoved) { + anon::OptionsOverride options_override; + options_override.skip_policy = kSkipNoSnapshot; + do { + Options options = CurrentOptions(options_override); + CreateAndReopenWithCF({"pikachu"}, options); + Random rnd(301); + FillLevels("a", "z", 1); + + std::string big = RandomString(&rnd, 50000); + Put(1, "foo", big); + Put(1, "pastfoo", "v"); + const Snapshot* snapshot = db_->GetSnapshot(); + Put(1, "foo", "tiny"); + Put(1, "pastfoo2", "v2"); // Advance sequence number one more + + ASSERT_OK(Flush(1)); + ASSERT_GT(NumTableFilesAtLevel(0, 1), 0); + + ASSERT_EQ(big, Get(1, "foo", snapshot)); + ASSERT_TRUE(Between(Size("", "pastfoo", 1), 50000, 60000)); + db_->ReleaseSnapshot(snapshot); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny, " + big + " ]"); + Slice x("x"); + dbfull()->TEST_CompactRange(0, nullptr, &x, handles_[1]); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]"); + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + ASSERT_GE(NumTableFilesAtLevel(1, 1), 1); + dbfull()->TEST_CompactRange(1, nullptr, &x, handles_[1]); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ tiny ]"); + + ASSERT_TRUE(Between(Size("", "pastfoo", 1), 0, 1000)); + // ApproximateOffsetOf() is not yet implemented in plain table format, + // which is used by Size(). + // skip HashCuckooRep as it does not support snapshot + } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction | + kSkipPlainTable | kSkipHashCuckoo)); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, CompactBetweenSnapshots) { + anon::OptionsOverride options_override; + options_override.skip_policy = kSkipNoSnapshot; + do { + Options options = CurrentOptions(options_override); + options.disable_auto_compactions = true; + CreateAndReopenWithCF({"pikachu"}, options); + Random rnd(301); + FillLevels("a", "z", 1); + + Put(1, "foo", "first"); + const Snapshot* snapshot1 = db_->GetSnapshot(); + Put(1, "foo", "second"); + Put(1, "foo", "third"); + Put(1, "foo", "fourth"); + const Snapshot* snapshot2 = db_->GetSnapshot(); + Put(1, "foo", "fifth"); + Put(1, "foo", "sixth"); + + // All entries (including duplicates) exist + // before any compaction or flush is triggered. + ASSERT_EQ(AllEntriesFor("foo", 1), + "[ sixth, fifth, fourth, third, second, first ]"); + ASSERT_EQ("sixth", Get(1, "foo")); + ASSERT_EQ("fourth", Get(1, "foo", snapshot2)); + ASSERT_EQ("first", Get(1, "foo", snapshot1)); + + // After a flush, "second", "third" and "fifth" should + // be removed + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth, fourth, first ]"); + + // after we release the snapshot1, only two values left + db_->ReleaseSnapshot(snapshot1); + FillLevels("a", "z", 1); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + + // We have only one valid snapshot snapshot2. Since snapshot1 is + // not valid anymore, "first" should be removed by a compaction. + ASSERT_EQ("sixth", Get(1, "foo")); + ASSERT_EQ("fourth", Get(1, "foo", snapshot2)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth, fourth ]"); + + // after we release the snapshot2, only one value should be left + db_->ReleaseSnapshot(snapshot2); + FillLevels("a", "z", 1); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + ASSERT_EQ("sixth", Get(1, "foo")); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ sixth ]"); + // skip HashCuckooRep as it does not support snapshot + } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction)); +} + +TEST_F(DBTest, UnremovableSingleDelete) { + // If we compact: + // + // Put(A, v1) Snapshot SingleDelete(A) Put(A, v2) + // + // We do not want to end up with: + // + // Put(A, v1) Snapshot Put(A, v2) + // + // Because a subsequent SingleDelete(A) would delete the Put(A, v2) + // but not Put(A, v1), so Get(A) would return v1. + anon::OptionsOverride options_override; + options_override.skip_policy = kSkipNoSnapshot; + do { + Options options = CurrentOptions(options_override); + options.disable_auto_compactions = true; + CreateAndReopenWithCF({"pikachu"}, options); + + Put(1, "foo", "first"); + const Snapshot* snapshot = db_->GetSnapshot(); + SingleDelete(1, "foo"); + Put(1, "foo", "second"); + ASSERT_OK(Flush(1)); + + ASSERT_EQ("first", Get(1, "foo", snapshot)); + ASSERT_EQ("second", Get(1, "foo")); + + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + ASSERT_EQ("[ second, SDEL, first ]", AllEntriesFor("foo", 1)); + + SingleDelete(1, "foo"); + + ASSERT_EQ("first", Get(1, "foo", snapshot)); + ASSERT_EQ("NOT_FOUND", Get(1, "foo")); + + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + + ASSERT_EQ("first", Get(1, "foo", snapshot)); + ASSERT_EQ("NOT_FOUND", Get(1, "foo")); + db_->ReleaseSnapshot(snapshot); + // Skip HashCuckooRep as it does not support single delete. FIFO and + // universal compaction do not apply to the test case. Skip MergePut + // because single delete does not get removed when it encounters a merge. + } while (ChangeOptions(kSkipHashCuckoo | kSkipFIFOCompaction | + kSkipUniversalCompaction | kSkipMergePut)); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, DeletionMarkers1) { + Options options = CurrentOptions(); + options.max_background_flushes = 0; + CreateAndReopenWithCF({"pikachu"}, options); + Put(1, "foo", "v1"); + ASSERT_OK(Flush(1)); + const int last = 2; + MoveFilesToLevel(last, 1); + // foo => v1 is now in last level + ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1); + + // Place a table at level last-1 to prevent merging with preceding mutation + Put(1, "a", "begin"); + Put(1, "z", "end"); + Flush(1); + MoveFilesToLevel(last - 1, 1); + ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1); + ASSERT_EQ(NumTableFilesAtLevel(last - 1, 1), 1); + + Delete(1, "foo"); + Put(1, "foo", "v2"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, DEL, v1 ]"); + ASSERT_OK(Flush(1)); // Moves to level last-2 + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]"); + Slice z("z"); + dbfull()->TEST_CompactRange(last - 2, nullptr, &z, handles_[1]); + // DEL eliminated, but v1 remains because we aren't compacting that level + // (DEL can be eliminated because v2 hides v1). + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]"); + dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1]); + // Merging last-1 w/ last, so we are the base level for "foo", so + // DEL is removed. (as is v1). + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]"); +} + +TEST_F(DBTest, DeletionMarkers2) { + Options options = CurrentOptions(); + CreateAndReopenWithCF({"pikachu"}, options); + Put(1, "foo", "v1"); + ASSERT_OK(Flush(1)); + const int last = 2; + MoveFilesToLevel(last, 1); + // foo => v1 is now in last level + ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1); + + // Place a table at level last-1 to prevent merging with preceding mutation + Put(1, "a", "begin"); + Put(1, "z", "end"); + Flush(1); + MoveFilesToLevel(last - 1, 1); + ASSERT_EQ(NumTableFilesAtLevel(last, 1), 1); + ASSERT_EQ(NumTableFilesAtLevel(last - 1, 1), 1); + + Delete(1, "foo"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]"); + ASSERT_OK(Flush(1)); // Moves to level last-2 + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]"); + dbfull()->TEST_CompactRange(last - 2, nullptr, nullptr, handles_[1]); + // DEL kept: "last" file overlaps + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v1 ]"); + dbfull()->TEST_CompactRange(last - 1, nullptr, nullptr, handles_[1]); + // Merging last-1 w/ last, so we are the base level for "foo", so + // DEL is removed. (as is v1). + ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); +} + +TEST_F(DBTest, OverlapInLevel0) { + do { + Options options = CurrentOptions(); + CreateAndReopenWithCF({"pikachu"}, options); + + // Fill levels 1 and 2 to disable the pushing of new memtables to levels > + // 0. + ASSERT_OK(Put(1, "100", "v100")); + ASSERT_OK(Put(1, "999", "v999")); + Flush(1); + MoveFilesToLevel(2, 1); + ASSERT_OK(Delete(1, "100")); + ASSERT_OK(Delete(1, "999")); + Flush(1); + MoveFilesToLevel(1, 1); + ASSERT_EQ("0,1,1", FilesPerLevel(1)); + + // Make files spanning the following ranges in level-0: + // files[0] 200 .. 900 + // files[1] 300 .. 500 + // Note that files are sorted by smallest key. + ASSERT_OK(Put(1, "300", "v300")); + ASSERT_OK(Put(1, "500", "v500")); + Flush(1); + ASSERT_OK(Put(1, "200", "v200")); + ASSERT_OK(Put(1, "600", "v600")); + ASSERT_OK(Put(1, "900", "v900")); + Flush(1); + ASSERT_EQ("2,1,1", FilesPerLevel(1)); + + // Compact away the placeholder files we created initially + dbfull()->TEST_CompactRange(1, nullptr, nullptr, handles_[1]); + dbfull()->TEST_CompactRange(2, nullptr, nullptr, handles_[1]); + ASSERT_EQ("2", FilesPerLevel(1)); + + // Do a memtable compaction. Before bug-fix, the compaction would + // not detect the overlap with level-0 files and would incorrectly place + // the deletion in a deeper level. + ASSERT_OK(Delete(1, "600")); + Flush(1); + ASSERT_EQ("3", FilesPerLevel(1)); + ASSERT_EQ("NOT_FOUND", Get(1, "600")); + } while (ChangeOptions(kSkipUniversalCompaction | kSkipFIFOCompaction)); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, ComparatorCheck) { + class NewComparator : public Comparator { + public: + virtual const char* Name() const override { + return "rocksdb.NewComparator"; + } + virtual int Compare(const Slice& a, const Slice& b) const override { + return BytewiseComparator()->Compare(a, b); + } + virtual void FindShortestSeparator(std::string* s, + const Slice& l) const override { + BytewiseComparator()->FindShortestSeparator(s, l); + } + virtual void FindShortSuccessor(std::string* key) const override { + BytewiseComparator()->FindShortSuccessor(key); + } + }; + Options new_options, options; + NewComparator cmp; + do { + options = CurrentOptions(); + CreateAndReopenWithCF({"pikachu"}, options); + new_options = CurrentOptions(); + new_options.comparator = &cmp; + // only the non-default column family has non-matching comparator + Status s = TryReopenWithColumnFamilies( + {"default", "pikachu"}, std::vector({options, new_options})); + ASSERT_TRUE(!s.ok()); + ASSERT_TRUE(s.ToString().find("comparator") != std::string::npos) + << s.ToString(); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTest, CustomComparator) { + class NumberComparator : public Comparator { + public: + virtual const char* Name() const override { + return "test.NumberComparator"; + } + virtual int Compare(const Slice& a, const Slice& b) const override { + return ToNumber(a) - ToNumber(b); + } + virtual void FindShortestSeparator(std::string* s, + const Slice& l) const override { + ToNumber(*s); // Check format + ToNumber(l); // Check format + } + virtual void FindShortSuccessor(std::string* key) const override { + ToNumber(*key); // Check format + } + + private: + static int ToNumber(const Slice& x) { + // Check that there are no extra characters. + EXPECT_TRUE(x.size() >= 2 && x[0] == '[' && x[x.size() - 1] == ']') + << EscapeString(x); + int val; + char ignored; + EXPECT_TRUE(sscanf(x.ToString().c_str(), "[%i]%c", &val, &ignored) == 1) + << EscapeString(x); + return val; + } + }; + Options new_options; + NumberComparator cmp; + do { + new_options = CurrentOptions(); + new_options.create_if_missing = true; + new_options.comparator = &cmp; + new_options.write_buffer_size = 4096; // Compact more often + new_options.arena_block_size = 4096; + new_options = CurrentOptions(new_options); + DestroyAndReopen(new_options); + CreateAndReopenWithCF({"pikachu"}, new_options); + ASSERT_OK(Put(1, "[10]", "ten")); + ASSERT_OK(Put(1, "[0x14]", "twenty")); + for (int i = 0; i < 2; i++) { + ASSERT_EQ("ten", Get(1, "[10]")); + ASSERT_EQ("ten", Get(1, "[0xa]")); + ASSERT_EQ("twenty", Get(1, "[20]")); + ASSERT_EQ("twenty", Get(1, "[0x14]")); + ASSERT_EQ("NOT_FOUND", Get(1, "[15]")); + ASSERT_EQ("NOT_FOUND", Get(1, "[0xf]")); + Compact(1, "[0]", "[9999]"); + } + + for (int run = 0; run < 2; run++) { + for (int i = 0; i < 1000; i++) { + char buf[100]; + snprintf(buf, sizeof(buf), "[%d]", i * 10); + ASSERT_OK(Put(1, buf, buf)); + } + Compact(1, "[0]", "[1000000]"); + } + } while (ChangeCompactOptions()); +} + +TEST_F(DBTest, DBOpen_Options) { + Options options = CurrentOptions(); + std::string dbname = test::TmpDir(env_) + "/db_options_test"; + ASSERT_OK(DestroyDB(dbname, options)); + + // Does not exist, and create_if_missing == false: error + DB* db = nullptr; + options.create_if_missing = false; + Status s = DB::Open(options, dbname, &db); + ASSERT_TRUE(strstr(s.ToString().c_str(), "does not exist") != nullptr); + ASSERT_TRUE(db == nullptr); + + // Does not exist, and create_if_missing == true: OK + options.create_if_missing = true; + s = DB::Open(options, dbname, &db); + ASSERT_OK(s); + ASSERT_TRUE(db != nullptr); + + delete db; + db = nullptr; + + // Does exist, and error_if_exists == true: error + options.create_if_missing = false; + options.error_if_exists = true; + s = DB::Open(options, dbname, &db); + ASSERT_TRUE(strstr(s.ToString().c_str(), "exists") != nullptr); + ASSERT_TRUE(db == nullptr); + + // Does exist, and error_if_exists == false: OK + options.create_if_missing = true; + options.error_if_exists = false; + s = DB::Open(options, dbname, &db); + ASSERT_OK(s); + ASSERT_TRUE(db != nullptr); + + delete db; + db = nullptr; +} + +TEST_F(DBTest, DBOpen_Change_NumLevels) { + Options options = CurrentOptions(); + options.create_if_missing = true; + DestroyAndReopen(options); + ASSERT_TRUE(db_ != nullptr); + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_OK(Put(1, "a", "123")); + ASSERT_OK(Put(1, "b", "234")); + Flush(1); + MoveFilesToLevel(3, 1); + Close(); + + options.create_if_missing = false; + options.num_levels = 2; + Status s = TryReopenWithColumnFamilies({"default", "pikachu"}, options); + ASSERT_TRUE(strstr(s.ToString().c_str(), "Invalid argument") != nullptr); + ASSERT_TRUE(db_ == nullptr); +} + +TEST_F(DBTest, DestroyDBMetaDatabase) { + std::string dbname = test::TmpDir(env_) + "/db_meta"; + ASSERT_OK(env_->CreateDirIfMissing(dbname)); + std::string metadbname = MetaDatabaseName(dbname, 0); + ASSERT_OK(env_->CreateDirIfMissing(metadbname)); + std::string metametadbname = MetaDatabaseName(metadbname, 0); + ASSERT_OK(env_->CreateDirIfMissing(metametadbname)); + + // Destroy previous versions if they exist. Using the long way. + Options options = CurrentOptions(); + ASSERT_OK(DestroyDB(metametadbname, options)); + ASSERT_OK(DestroyDB(metadbname, options)); + ASSERT_OK(DestroyDB(dbname, options)); + + // Setup databases + DB* db = nullptr; + ASSERT_OK(DB::Open(options, dbname, &db)); + delete db; + db = nullptr; + ASSERT_OK(DB::Open(options, metadbname, &db)); + delete db; + db = nullptr; + ASSERT_OK(DB::Open(options, metametadbname, &db)); + delete db; + db = nullptr; + + // Delete databases + ASSERT_OK(DestroyDB(dbname, options)); + + // Check if deletion worked. + options.create_if_missing = false; + ASSERT_TRUE(!(DB::Open(options, dbname, &db)).ok()); + ASSERT_TRUE(!(DB::Open(options, metadbname, &db)).ok()); + ASSERT_TRUE(!(DB::Open(options, metametadbname, &db)).ok()); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, SnapshotFiles) { + do { + Options options = CurrentOptions(); + options.write_buffer_size = 100000000; // Large write buffer + CreateAndReopenWithCF({"pikachu"}, options); + + Random rnd(301); + + // Write 8MB (80 values, each 100K) + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + std::vector values; + for (int i = 0; i < 80; i++) { + values.push_back(RandomString(&rnd, 100000)); + ASSERT_OK(Put((i < 40), Key(i), values[i])); + } + + // assert that nothing makes it to disk yet. + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + + // get a file snapshot + uint64_t manifest_number = 0; + uint64_t manifest_size = 0; + std::vector files; + dbfull()->DisableFileDeletions(); + dbfull()->GetLiveFiles(files, &manifest_size); + + // CURRENT, MANIFEST, OPTIONS, *.sst files (one for each CF) + ASSERT_EQ(files.size(), 5U); + + uint64_t number = 0; + FileType type; + + // copy these files to a new snapshot directory + std::string snapdir = dbname_ + ".snapdir/"; + ASSERT_OK(env_->CreateDirIfMissing(snapdir)); + + for (size_t i = 0; i < files.size(); i++) { + // our clients require that GetLiveFiles returns + // files with "/" as first character! + ASSERT_EQ(files[i][0], '/'); + std::string src = dbname_ + files[i]; + std::string dest = snapdir + files[i]; + + uint64_t size; + ASSERT_OK(env_->GetFileSize(src, &size)); + + // record the number and the size of the + // latest manifest file + if (ParseFileName(files[i].substr(1), &number, &type)) { + if (type == kDescriptorFile) { + if (number > manifest_number) { + manifest_number = number; + ASSERT_GE(size, manifest_size); + size = manifest_size; // copy only valid MANIFEST data + } + } + } + CopyFile(src, dest, size); + } + + // release file snapshot + dbfull()->DisableFileDeletions(); + // overwrite one key, this key should not appear in the snapshot + std::vector extras; + for (unsigned int i = 0; i < 1; i++) { + extras.push_back(RandomString(&rnd, 100000)); + ASSERT_OK(Put(0, Key(i), extras[i])); + } + + // verify that data in the snapshot are correct + std::vector column_families; + column_families.emplace_back("default", ColumnFamilyOptions()); + column_families.emplace_back("pikachu", ColumnFamilyOptions()); + std::vector cf_handles; + DB* snapdb; + DBOptions opts; + opts.env = env_; + opts.create_if_missing = false; + Status stat = + DB::Open(opts, snapdir, column_families, &cf_handles, &snapdb); + ASSERT_OK(stat); + + ReadOptions roptions; + std::string val; + for (unsigned int i = 0; i < 80; i++) { + stat = snapdb->Get(roptions, cf_handles[i < 40], Key(i), &val); + ASSERT_EQ(values[i].compare(val), 0); + } + for (auto cfh : cf_handles) { + delete cfh; + } + delete snapdb; + + // look at the new live files after we added an 'extra' key + // and after we took the first snapshot. + uint64_t new_manifest_number = 0; + uint64_t new_manifest_size = 0; + std::vector newfiles; + dbfull()->DisableFileDeletions(); + dbfull()->GetLiveFiles(newfiles, &new_manifest_size); + + // find the new manifest file. assert that this manifest file is + // the same one as in the previous snapshot. But its size should be + // larger because we added an extra key after taking the + // previous shapshot. + for (size_t i = 0; i < newfiles.size(); i++) { + std::string src = dbname_ + "/" + newfiles[i]; + // record the lognumber and the size of the + // latest manifest file + if (ParseFileName(newfiles[i].substr(1), &number, &type)) { + if (type == kDescriptorFile) { + if (number > new_manifest_number) { + uint64_t size; + new_manifest_number = number; + ASSERT_OK(env_->GetFileSize(src, &size)); + ASSERT_GE(size, new_manifest_size); + } + } + } + } + ASSERT_EQ(manifest_number, new_manifest_number); + ASSERT_GT(new_manifest_size, manifest_size); + + // release file snapshot + dbfull()->DisableFileDeletions(); + } while (ChangeCompactOptions()); +} +#endif + +TEST_F(DBTest, CompactOnFlush) { + anon::OptionsOverride options_override; + options_override.skip_policy = kSkipNoSnapshot; + do { + Options options = CurrentOptions(options_override); + options.disable_auto_compactions = true; + CreateAndReopenWithCF({"pikachu"}, options); + + Put(1, "foo", "v1"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v1 ]"); + + // Write two new keys + Put(1, "a", "begin"); + Put(1, "z", "end"); + Flush(1); + + // Case1: Delete followed by a put + Delete(1, "foo"); + Put(1, "foo", "v2"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, DEL, v1 ]"); + + // After the current memtable is flushed, the DEL should + // have been removed + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2, v1 ]"); + + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v2 ]"); + + // Case 2: Delete followed by another delete + Delete(1, "foo"); + Delete(1, "foo"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, DEL, v2 ]"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v2 ]"); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); + + // Case 3: Put followed by a delete + Put(1, "foo", "v3"); + Delete(1, "foo"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL, v3 ]"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ DEL ]"); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); + + // Case 4: Put followed by another Put + Put(1, "foo", "v4"); + Put(1, "foo", "v5"); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5, v4 ]"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5 ]"); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v5 ]"); + + // clear database + Delete(1, "foo"); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); + + // Case 5: Put followed by snapshot followed by another Put + // Both puts should remain. + Put(1, "foo", "v6"); + const Snapshot* snapshot = db_->GetSnapshot(); + Put(1, "foo", "v7"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v7, v6 ]"); + db_->ReleaseSnapshot(snapshot); + + // clear database + Delete(1, "foo"); + dbfull()->CompactRange(CompactRangeOptions(), handles_[1], nullptr, + nullptr); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ ]"); + + // Case 5: snapshot followed by a put followed by another Put + // Only the last put should remain. + const Snapshot* snapshot1 = db_->GetSnapshot(); + Put(1, "foo", "v8"); + Put(1, "foo", "v9"); + ASSERT_OK(Flush(1)); + ASSERT_EQ(AllEntriesFor("foo", 1), "[ v9 ]"); + db_->ReleaseSnapshot(snapshot1); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTest, FlushOneColumnFamily) { + Options options = CurrentOptions(); + CreateAndReopenWithCF({"pikachu", "ilya", "muromec", "dobrynia", "nikitich", + "alyosha", "popovich"}, + options); + + ASSERT_OK(Put(0, "Default", "Default")); + ASSERT_OK(Put(1, "pikachu", "pikachu")); + ASSERT_OK(Put(2, "ilya", "ilya")); + ASSERT_OK(Put(3, "muromec", "muromec")); + ASSERT_OK(Put(4, "dobrynia", "dobrynia")); + ASSERT_OK(Put(5, "nikitich", "nikitich")); + ASSERT_OK(Put(6, "alyosha", "alyosha")); + ASSERT_OK(Put(7, "popovich", "popovich")); + + for (int i = 0; i < 8; ++i) { + Flush(i); + auto tables = ListTableFiles(env_, dbname_); + ASSERT_EQ(tables.size(), i + 1U); + } +} + +TEST_F(DBTest, PurgeInfoLogs) { + Options options = CurrentOptions(); + options.keep_log_file_num = 5; + options.create_if_missing = true; + for (int mode = 0; mode <= 1; mode++) { + if (mode == 1) { + options.db_log_dir = dbname_ + "_logs"; + env_->CreateDirIfMissing(options.db_log_dir); + } else { + options.db_log_dir = ""; + } + for (int i = 0; i < 8; i++) { + Reopen(options); + } + + std::vector files; + env_->GetChildren(options.db_log_dir.empty() ? dbname_ : options.db_log_dir, + &files); + int info_log_count = 0; + for (std::string file : files) { + if (file.find("LOG") != std::string::npos) { + info_log_count++; + } + } + ASSERT_EQ(5, info_log_count); + + Destroy(options); + // For mode (1), test DestroyDB() to delete all the logs under DB dir. + // For mode (2), no info log file should have been put under DB dir. + std::vector db_files; + env_->GetChildren(dbname_, &db_files); + for (std::string file : db_files) { + ASSERT_TRUE(file.find("LOG") == std::string::npos); + } + + if (mode == 1) { + // Cleaning up + env_->GetChildren(options.db_log_dir, &files); + for (std::string file : files) { + env_->DeleteFile(options.db_log_dir + "/" + file); + } + env_->DeleteDir(options.db_log_dir); + } + } +} + +#ifndef ROCKSDB_LITE +// Multi-threaded test: +namespace { + +static const int kColumnFamilies = 10; +static const int kNumThreads = 10; +static const int kTestSeconds = 10; +static const int kNumKeys = 1000; + +struct MTState { + DBTest* test; + std::atomic stop; + std::atomic counter[kNumThreads]; + std::atomic thread_done[kNumThreads]; +}; + +struct MTThread { + MTState* state; + int id; +}; + +static void MTThreadBody(void* arg) { + MTThread* t = reinterpret_cast(arg); + int id = t->id; + DB* db = t->state->test->db_; + int counter = 0; + fprintf(stderr, "... starting thread %d\n", id); + Random rnd(1000 + id); + char valbuf[1500]; + while (t->state->stop.load(std::memory_order_acquire) == false) { + t->state->counter[id].store(counter, std::memory_order_release); + + int key = rnd.Uniform(kNumKeys); + char keybuf[20]; + snprintf(keybuf, sizeof(keybuf), "%016d", key); + + if (rnd.OneIn(2)) { + // Write values of the form . + // into each of the CFs + // We add some padding for force compactions. + int unique_id = rnd.Uniform(1000000); + + // Half of the time directly use WriteBatch. Half of the time use + // WriteBatchWithIndex. + if (rnd.OneIn(2)) { + WriteBatch batch; + for (int cf = 0; cf < kColumnFamilies; ++cf) { + snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id, + static_cast(counter), cf, unique_id); + batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf)); + } + ASSERT_OK(db->Write(WriteOptions(), &batch)); + } else { + WriteBatchWithIndex batch(db->GetOptions().comparator); + for (int cf = 0; cf < kColumnFamilies; ++cf) { + snprintf(valbuf, sizeof(valbuf), "%d.%d.%d.%d.%-1000d", key, id, + static_cast(counter), cf, unique_id); + batch.Put(t->state->test->handles_[cf], Slice(keybuf), Slice(valbuf)); + } + ASSERT_OK(db->Write(WriteOptions(), batch.GetWriteBatch())); + } + } else { + // Read a value and verify that it matches the pattern written above + // and that writes to all column families were atomic (unique_id is the + // same) + std::vector keys(kColumnFamilies, Slice(keybuf)); + std::vector values; + std::vector statuses = + db->MultiGet(ReadOptions(), t->state->test->handles_, keys, &values); + Status s = statuses[0]; + // all statuses have to be the same + for (size_t i = 1; i < statuses.size(); ++i) { + // they are either both ok or both not-found + ASSERT_TRUE((s.ok() && statuses[i].ok()) || + (s.IsNotFound() && statuses[i].IsNotFound())); + } + if (s.IsNotFound()) { + // Key has not yet been written + } else { + // Check that the writer thread counter is >= the counter in the value + ASSERT_OK(s); + int unique_id = -1; + for (int i = 0; i < kColumnFamilies; ++i) { + int k, w, c, cf, u; + ASSERT_EQ(5, sscanf(values[i].c_str(), "%d.%d.%d.%d.%d", &k, &w, &c, + &cf, &u)) + << values[i]; + ASSERT_EQ(k, key); + ASSERT_GE(w, 0); + ASSERT_LT(w, kNumThreads); + ASSERT_LE(c, t->state->counter[w].load(std::memory_order_acquire)); + ASSERT_EQ(cf, i); + if (i == 0) { + unique_id = u; + } else { + // this checks that updates across column families happened + // atomically -- all unique ids are the same + ASSERT_EQ(u, unique_id); + } + } + } + } + counter++; + } + t->state->thread_done[id].store(true, std::memory_order_release); + fprintf(stderr, "... stopping thread %d after %d ops\n", id, int(counter)); +} + +} // namespace + +class MultiThreadedDBTest : public DBTest, + public ::testing::WithParamInterface { + public: + virtual void SetUp() override { option_config_ = GetParam(); } + + static std::vector GenerateOptionConfigs() { + std::vector optionConfigs; + for (int optionConfig = kDefault; optionConfig < kEnd; ++optionConfig) { + // skip as HashCuckooRep does not support snapshot + if (optionConfig != kHashCuckoo) { + optionConfigs.push_back(optionConfig); + } + } + return optionConfigs; + } +}; + +TEST_P(MultiThreadedDBTest, MultiThreaded) { + anon::OptionsOverride options_override; + options_override.skip_policy = kSkipNoSnapshot; + std::vector cfs; + for (int i = 1; i < kColumnFamilies; ++i) { + cfs.push_back(ToString(i)); + } + CreateAndReopenWithCF(cfs, CurrentOptions(options_override)); + // Initialize state + MTState mt; + mt.test = this; + mt.stop.store(false, std::memory_order_release); + for (int id = 0; id < kNumThreads; id++) { + mt.counter[id].store(0, std::memory_order_release); + mt.thread_done[id].store(false, std::memory_order_release); + } + + // Start threads + MTThread thread[kNumThreads]; + for (int id = 0; id < kNumThreads; id++) { + thread[id].state = &mt; + thread[id].id = id; + env_->StartThread(MTThreadBody, &thread[id]); + } + + // Let them run for a while + env_->SleepForMicroseconds(kTestSeconds * 1000000); + + // Stop the threads and wait for them to finish + mt.stop.store(true, std::memory_order_release); + for (int id = 0; id < kNumThreads; id++) { + while (mt.thread_done[id].load(std::memory_order_acquire) == false) { + env_->SleepForMicroseconds(100000); + } + } +} + +INSTANTIATE_TEST_CASE_P( + MultiThreaded, MultiThreadedDBTest, + ::testing::ValuesIn(MultiThreadedDBTest::GenerateOptionConfigs())); +#endif // ROCKSDB_LITE + +// Group commit test: +namespace { + +static const int kGCNumThreads = 4; +static const int kGCNumKeys = 1000; + +struct GCThread { + DB* db; + int id; + std::atomic done; +}; + +static void GCThreadBody(void* arg) { + GCThread* t = reinterpret_cast(arg); + int id = t->id; + DB* db = t->db; + WriteOptions wo; + + for (int i = 0; i < kGCNumKeys; ++i) { + std::string kv(ToString(i + id * kGCNumKeys)); + ASSERT_OK(db->Put(wo, kv, kv)); + } + t->done = true; +} + +} // namespace + +TEST_F(DBTest, GroupCommitTest) { + do { + Options options = CurrentOptions(); + options.env = env_; + env_->log_write_slowdown_.store(100); + options.statistics = rocksdb::CreateDBStatistics(); + Reopen(options); + + // Start threads + GCThread thread[kGCNumThreads]; + for (int id = 0; id < kGCNumThreads; id++) { + thread[id].id = id; + thread[id].db = db_; + thread[id].done = false; + env_->StartThread(GCThreadBody, &thread[id]); + } + + for (int id = 0; id < kGCNumThreads; id++) { + while (thread[id].done == false) { + env_->SleepForMicroseconds(100000); + } + } + env_->log_write_slowdown_.store(0); + + ASSERT_GT(TestGetTickerCount(options, WRITE_DONE_BY_OTHER), 0); + + std::vector expected_db; + for (int i = 0; i < kGCNumThreads * kGCNumKeys; ++i) { + expected_db.push_back(ToString(i)); + } + std::sort(expected_db.begin(), expected_db.end()); + + Iterator* itr = db_->NewIterator(ReadOptions()); + itr->SeekToFirst(); + for (auto x : expected_db) { + ASSERT_TRUE(itr->Valid()); + ASSERT_EQ(itr->key().ToString(), x); + ASSERT_EQ(itr->value().ToString(), x); + itr->Next(); + } + ASSERT_TRUE(!itr->Valid()); + delete itr; + + HistogramData hist_data = {0, 0, 0, 0, 0}; + options.statistics->histogramData(DB_WRITE, &hist_data); + ASSERT_GT(hist_data.average, 0.0); + } while (ChangeOptions(kSkipNoSeekToLast)); +} + +namespace { +typedef std::map KVMap; +} + +class ModelDB : public DB { + public: + class ModelSnapshot : public Snapshot { + public: + KVMap map_; + + virtual SequenceNumber GetSequenceNumber() const override { + // no need to call this + assert(false); + return 0; + } + }; + + explicit ModelDB(const Options& options) : options_(options) {} + using DB::Put; + virtual Status Put(const WriteOptions& o, ColumnFamilyHandle* cf, + const Slice& k, const Slice& v) override { + WriteBatch batch; + batch.Put(cf, k, v); + return Write(o, &batch); + } + using DB::Delete; + virtual Status Delete(const WriteOptions& o, ColumnFamilyHandle* cf, + const Slice& key) override { + WriteBatch batch; + batch.Delete(cf, key); + return Write(o, &batch); + } + using DB::SingleDelete; + virtual Status SingleDelete(const WriteOptions& o, ColumnFamilyHandle* cf, + const Slice& key) override { + WriteBatch batch; + batch.SingleDelete(cf, key); + return Write(o, &batch); + } + using DB::Merge; + virtual Status Merge(const WriteOptions& o, ColumnFamilyHandle* cf, + const Slice& k, const Slice& v) override { + WriteBatch batch; + batch.Merge(cf, k, v); + return Write(o, &batch); + } + using DB::Get; + virtual Status Get(const ReadOptions& options, ColumnFamilyHandle* cf, + const Slice& key, std::string* value) override { + return Status::NotSupported(key); + } + + using DB::MultiGet; + virtual std::vector MultiGet( + const ReadOptions& options, + const std::vector& column_family, + const std::vector& keys, + std::vector* values) override { + std::vector s(keys.size(), + Status::NotSupported("Not implemented.")); + return s; + } + +#ifndef ROCKSDB_LITE + using DB::AddFile; + virtual Status AddFile(ColumnFamilyHandle* column_family, + const std::vector& file_info_list, + bool move_file) override { + return Status::NotSupported("Not implemented."); + } + virtual Status AddFile(ColumnFamilyHandle* column_family, + const std::vector& file_path_list, + bool move_file) override { + return Status::NotSupported("Not implemented."); + } + + using DB::GetPropertiesOfAllTables; + virtual Status GetPropertiesOfAllTables( + ColumnFamilyHandle* column_family, + TablePropertiesCollection* props) override { + return Status(); + } + + virtual Status GetPropertiesOfTablesInRange( + ColumnFamilyHandle* column_family, const Range* range, std::size_t n, + TablePropertiesCollection* props) override { + return Status(); + } +#endif // ROCKSDB_LITE + + using DB::KeyMayExist; + virtual bool KeyMayExist(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value, + bool* value_found = nullptr) override { + if (value_found != nullptr) { + *value_found = false; + } + return true; // Not Supported directly + } + using DB::NewIterator; + virtual Iterator* NewIterator(const ReadOptions& options, + ColumnFamilyHandle* column_family) override { + if (options.snapshot == nullptr) { + KVMap* saved = new KVMap; + *saved = map_; + return new ModelIter(saved, true); + } else { + const KVMap* snapshot_state = + &(reinterpret_cast(options.snapshot)->map_); + return new ModelIter(snapshot_state, false); + } + } + virtual Status NewIterators( + const ReadOptions& options, + const std::vector& column_family, + std::vector* iterators) override { + return Status::NotSupported("Not supported yet"); + } + virtual const Snapshot* GetSnapshot() override { + ModelSnapshot* snapshot = new ModelSnapshot; + snapshot->map_ = map_; + return snapshot; + } + + virtual void ReleaseSnapshot(const Snapshot* snapshot) override { + delete reinterpret_cast(snapshot); + } + + virtual Status Write(const WriteOptions& options, + WriteBatch* batch) override { + class Handler : public WriteBatch::Handler { + public: + KVMap* map_; + virtual void Put(const Slice& key, const Slice& value) override { + (*map_)[key.ToString()] = value.ToString(); + } + virtual void Merge(const Slice& key, const Slice& value) override { + // ignore merge for now + // (*map_)[key.ToString()] = value.ToString(); + } + virtual void Delete(const Slice& key) override { + map_->erase(key.ToString()); + } + }; + Handler handler; + handler.map_ = &map_; + return batch->Iterate(&handler); + } + + using DB::GetProperty; + virtual bool GetProperty(ColumnFamilyHandle* column_family, + const Slice& property, std::string* value) override { + return false; + } + using DB::GetIntProperty; + virtual bool GetIntProperty(ColumnFamilyHandle* column_family, + const Slice& property, uint64_t* value) override { + return false; + } + using DB::GetAggregatedIntProperty; + virtual bool GetAggregatedIntProperty(const Slice& property, + uint64_t* value) override { + return false; + } + using DB::GetApproximateSizes; + virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, + const Range* range, int n, uint64_t* sizes, + bool include_memtable) override { + for (int i = 0; i < n; i++) { + sizes[i] = 0; + } + } + using DB::CompactRange; + virtual Status CompactRange(const CompactRangeOptions& options, + ColumnFamilyHandle* column_family, + const Slice* start, const Slice* end) override { + return Status::NotSupported("Not supported operation."); + } + + using DB::CompactFiles; + virtual Status CompactFiles(const CompactionOptions& compact_options, + ColumnFamilyHandle* column_family, + const std::vector& input_file_names, + const int output_level, + const int output_path_id = -1) override { + return Status::NotSupported("Not supported operation."); + } + + Status PauseBackgroundWork() override { + return Status::NotSupported("Not supported operation."); + } + + Status ContinueBackgroundWork() override { + return Status::NotSupported("Not supported operation."); + } + + Status EnableAutoCompaction( + const std::vector& column_family_handles) override { + return Status::NotSupported("Not supported operation."); + } + + using DB::NumberLevels; + virtual int NumberLevels(ColumnFamilyHandle* column_family) override { + return 1; + } + + using DB::MaxMemCompactionLevel; + virtual int MaxMemCompactionLevel( + ColumnFamilyHandle* column_family) override { + return 1; + } + + using DB::Level0StopWriteTrigger; + virtual int Level0StopWriteTrigger( + ColumnFamilyHandle* column_family) override { + return -1; + } + + virtual const std::string& GetName() const override { return name_; } + + virtual Env* GetEnv() const override { return nullptr; } + + using DB::GetOptions; + virtual const Options& GetOptions( + ColumnFamilyHandle* column_family) const override { + return options_; + } + + using DB::GetDBOptions; + virtual const DBOptions& GetDBOptions() const override { return options_; } + + using DB::Flush; + virtual Status Flush(const rocksdb::FlushOptions& options, + ColumnFamilyHandle* column_family) override { + Status ret; + return ret; + } + + virtual Status SyncWAL() override { return Status::OK(); } + +#ifndef ROCKSDB_LITE + virtual Status DisableFileDeletions() override { return Status::OK(); } + + virtual Status EnableFileDeletions(bool force) override { + return Status::OK(); + } + virtual Status GetLiveFiles(std::vector&, uint64_t* size, + bool flush_memtable = true) override { + return Status::OK(); + } + + virtual Status GetSortedWalFiles(VectorLogPtr& files) override { + return Status::OK(); + } + + virtual Status DeleteFile(std::string name) override { return Status::OK(); } + + virtual Status GetUpdatesSince( + rocksdb::SequenceNumber, unique_ptr*, + const TransactionLogIterator::ReadOptions& read_options = + TransactionLogIterator::ReadOptions()) override { + return Status::NotSupported("Not supported in Model DB"); + } + + virtual void GetColumnFamilyMetaData( + ColumnFamilyHandle* column_family, + ColumnFamilyMetaData* metadata) override {} +#endif // ROCKSDB_LITE + + virtual Status GetDbIdentity(std::string& identity) const override { + return Status::OK(); + } + + virtual SequenceNumber GetLatestSequenceNumber() const override { return 0; } + + virtual ColumnFamilyHandle* DefaultColumnFamily() const override { + return nullptr; + } + + private: + class ModelIter : public Iterator { + public: + ModelIter(const KVMap* map, bool owned) + : map_(map), owned_(owned), iter_(map_->end()) {} + ~ModelIter() { + if (owned_) delete map_; + } + virtual bool Valid() const override { return iter_ != map_->end(); } + virtual void SeekToFirst() override { iter_ = map_->begin(); } + virtual void SeekToLast() override { + if (map_->empty()) { + iter_ = map_->end(); + } else { + iter_ = map_->find(map_->rbegin()->first); + } + } + virtual void Seek(const Slice& k) override { + iter_ = map_->lower_bound(k.ToString()); + } + virtual void Next() override { ++iter_; } + virtual void Prev() override { + if (iter_ == map_->begin()) { + iter_ = map_->end(); + return; + } + --iter_; + } + + virtual Slice key() const override { return iter_->first; } + virtual Slice value() const override { return iter_->second; } + virtual Status status() const override { return Status::OK(); } + + private: + const KVMap* const map_; + const bool owned_; // Do we own map_ + KVMap::const_iterator iter_; + }; + const Options options_; + KVMap map_; + std::string name_ = ""; +}; + +static std::string RandomKey(Random* rnd, int minimum = 0) { + int len; + do { + len = (rnd->OneIn(3) + ? 1 // Short sometimes to encourage collisions + : (rnd->OneIn(100) ? rnd->Skewed(10) : rnd->Uniform(10))); + } while (len < minimum); + return test::RandomKey(rnd, len); +} + +static bool CompareIterators(int step, DB* model, DB* db, + const Snapshot* model_snap, + const Snapshot* db_snap) { + ReadOptions options; + options.snapshot = model_snap; + Iterator* miter = model->NewIterator(options); + options.snapshot = db_snap; + Iterator* dbiter = db->NewIterator(options); + bool ok = true; + int count = 0; + for (miter->SeekToFirst(), dbiter->SeekToFirst(); + ok && miter->Valid() && dbiter->Valid(); miter->Next(), dbiter->Next()) { + count++; + if (miter->key().compare(dbiter->key()) != 0) { + fprintf(stderr, "step %d: Key mismatch: '%s' vs. '%s'\n", step, + EscapeString(miter->key()).c_str(), + EscapeString(dbiter->key()).c_str()); + ok = false; + break; + } + + if (miter->value().compare(dbiter->value()) != 0) { + fprintf(stderr, "step %d: Value mismatch for key '%s': '%s' vs. '%s'\n", + step, EscapeString(miter->key()).c_str(), + EscapeString(miter->value()).c_str(), + EscapeString(miter->value()).c_str()); + ok = false; + } + } + + if (ok) { + if (miter->Valid() != dbiter->Valid()) { + fprintf(stderr, "step %d: Mismatch at end of iterators: %d vs. %d\n", + step, miter->Valid(), dbiter->Valid()); + ok = false; + } + } + delete miter; + delete dbiter; + return ok; +} + +class DBTestRandomized : public DBTest, + public ::testing::WithParamInterface { + public: + virtual void SetUp() override { option_config_ = GetParam(); } + + static std::vector GenerateOptionConfigs() { + std::vector option_configs; + // skip cuckoo hash as it does not support snapshot. + for (int option_config = kDefault; option_config < kEnd; ++option_config) { + if (!ShouldSkipOptions(option_config, kSkipDeletesFilterFirst | + kSkipNoSeekToLast | + kSkipHashCuckoo)) { + option_configs.push_back(option_config); + } + } + option_configs.push_back(kBlockBasedTableWithIndexRestartInterval); + return option_configs; + } +}; + +INSTANTIATE_TEST_CASE_P( + DBTestRandomized, DBTestRandomized, + ::testing::ValuesIn(DBTestRandomized::GenerateOptionConfigs())); + +TEST_P(DBTestRandomized, Randomized) { + anon::OptionsOverride options_override; + options_override.skip_policy = kSkipNoSnapshot; + Options options = CurrentOptions(options_override); + DestroyAndReopen(options); + + Random rnd(test::RandomSeed() + GetParam()); + ModelDB model(options); + const int N = 10000; + const Snapshot* model_snap = nullptr; + const Snapshot* db_snap = nullptr; + std::string k, v; + for (int step = 0; step < N; step++) { + // TODO(sanjay): Test Get() works + int p = rnd.Uniform(100); + int minimum = 0; + if (option_config_ == kHashSkipList || option_config_ == kHashLinkList || + option_config_ == kHashCuckoo || + option_config_ == kPlainTableFirstBytePrefix || + option_config_ == kBlockBasedTableWithWholeKeyHashIndex || + option_config_ == kBlockBasedTableWithPrefixHashIndex) { + minimum = 1; + } + if (p < 45) { // Put + k = RandomKey(&rnd, minimum); + v = RandomString(&rnd, + rnd.OneIn(20) ? 100 + rnd.Uniform(100) : rnd.Uniform(8)); + ASSERT_OK(model.Put(WriteOptions(), k, v)); + ASSERT_OK(db_->Put(WriteOptions(), k, v)); + } else if (p < 90) { // Delete + k = RandomKey(&rnd, minimum); + ASSERT_OK(model.Delete(WriteOptions(), k)); + ASSERT_OK(db_->Delete(WriteOptions(), k)); + } else { // Multi-element batch + WriteBatch b; + const int num = rnd.Uniform(8); + for (int i = 0; i < num; i++) { + if (i == 0 || !rnd.OneIn(10)) { + k = RandomKey(&rnd, minimum); + } else { + // Periodically re-use the same key from the previous iter, so + // we have multiple entries in the write batch for the same key + } + if (rnd.OneIn(2)) { + v = RandomString(&rnd, rnd.Uniform(10)); + b.Put(k, v); + } else { + b.Delete(k); + } + } + ASSERT_OK(model.Write(WriteOptions(), &b)); + ASSERT_OK(db_->Write(WriteOptions(), &b)); + } + + if ((step % 100) == 0) { + // For DB instances that use the hash index + block-based table, the + // iterator will be invalid right when seeking a non-existent key, right + // than return a key that is close to it. + if (option_config_ != kBlockBasedTableWithWholeKeyHashIndex && + option_config_ != kBlockBasedTableWithPrefixHashIndex) { + ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr)); + ASSERT_TRUE(CompareIterators(step, &model, db_, model_snap, db_snap)); + } + + // Save a snapshot from each DB this time that we'll use next + // time we compare things, to make sure the current state is + // preserved with the snapshot + if (model_snap != nullptr) model.ReleaseSnapshot(model_snap); + if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap); + + Reopen(options); + ASSERT_TRUE(CompareIterators(step, &model, db_, nullptr, nullptr)); + + model_snap = model.GetSnapshot(); + db_snap = db_->GetSnapshot(); + } + } + if (model_snap != nullptr) model.ReleaseSnapshot(model_snap); + if (db_snap != nullptr) db_->ReleaseSnapshot(db_snap); +} + +TEST_F(DBTest, MultiGetSimple) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "k1", "v1")); + ASSERT_OK(Put(1, "k2", "v2")); + ASSERT_OK(Put(1, "k3", "v3")); + ASSERT_OK(Put(1, "k4", "v4")); + ASSERT_OK(Delete(1, "k4")); + ASSERT_OK(Put(1, "k5", "v5")); + ASSERT_OK(Delete(1, "no_key")); + + std::vector keys({"k1", "k2", "k3", "k4", "k5", "no_key"}); + + std::vector values(20, "Temporary data to be overwritten"); + std::vector cfs(keys.size(), handles_[1]); + + std::vector s = db_->MultiGet(ReadOptions(), cfs, keys, &values); + ASSERT_EQ(values.size(), keys.size()); + ASSERT_EQ(values[0], "v1"); + ASSERT_EQ(values[1], "v2"); + ASSERT_EQ(values[2], "v3"); + ASSERT_EQ(values[4], "v5"); + + ASSERT_OK(s[0]); + ASSERT_OK(s[1]); + ASSERT_OK(s[2]); + ASSERT_TRUE(s[3].IsNotFound()); + ASSERT_OK(s[4]); + ASSERT_TRUE(s[5].IsNotFound()); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTest, MultiGetEmpty) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + // Empty Key Set + std::vector keys; + std::vector values; + std::vector cfs; + std::vector s = db_->MultiGet(ReadOptions(), cfs, keys, &values); + ASSERT_EQ(s.size(), 0U); + + // Empty Database, Empty Key Set + Options options = CurrentOptions(); + options.create_if_missing = true; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + s = db_->MultiGet(ReadOptions(), cfs, keys, &values); + ASSERT_EQ(s.size(), 0U); + + // Empty Database, Search for Keys + keys.resize(2); + keys[0] = "a"; + keys[1] = "b"; + cfs.push_back(handles_[0]); + cfs.push_back(handles_[1]); + s = db_->MultiGet(ReadOptions(), cfs, keys, &values); + ASSERT_EQ(static_cast(s.size()), 2); + ASSERT_TRUE(s[0].IsNotFound() && s[1].IsNotFound()); + } while (ChangeCompactOptions()); +} + +TEST_F(DBTest, BlockBasedTablePrefixIndexTest) { + // create a DB with block prefix index + BlockBasedTableOptions table_options; + Options options = CurrentOptions(); + table_options.index_type = BlockBasedTableOptions::kHashSearch; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.prefix_extractor.reset(NewFixedPrefixTransform(1)); + + Reopen(options); + ASSERT_OK(Put("k1", "v1")); + Flush(); + ASSERT_OK(Put("k2", "v2")); + + // Reopen it without prefix extractor, make sure everything still works. + // RocksDB should just fall back to the binary index. + table_options.index_type = BlockBasedTableOptions::kBinarySearch; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.prefix_extractor.reset(); + + Reopen(options); + ASSERT_EQ("v1", Get("k1")); + ASSERT_EQ("v2", Get("k2")); +} + +TEST_F(DBTest, ChecksumTest) { + BlockBasedTableOptions table_options; + Options options = CurrentOptions(); + + table_options.checksum = kCRC32c; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + Reopen(options); + ASSERT_OK(Put("a", "b")); + ASSERT_OK(Put("c", "d")); + ASSERT_OK(Flush()); // table with crc checksum + + table_options.checksum = kxxHash; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + Reopen(options); + ASSERT_OK(Put("e", "f")); + ASSERT_OK(Put("g", "h")); + ASSERT_OK(Flush()); // table with xxhash checksum + + table_options.checksum = kCRC32c; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + Reopen(options); + ASSERT_EQ("b", Get("a")); + ASSERT_EQ("d", Get("c")); + ASSERT_EQ("f", Get("e")); + ASSERT_EQ("h", Get("g")); + + table_options.checksum = kCRC32c; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + Reopen(options); + ASSERT_EQ("b", Get("a")); + ASSERT_EQ("d", Get("c")); + ASSERT_EQ("f", Get("e")); + ASSERT_EQ("h", Get("g")); +} + +#ifndef ROCKSDB_LITE +TEST_P(DBTestWithParam, FIFOCompactionTest) { + for (int iter = 0; iter < 2; ++iter) { + // first iteration -- auto compaction + // second iteration -- manual compaction + Options options; + options.compaction_style = kCompactionStyleFIFO; + options.write_buffer_size = 100 << 10; // 100KB + options.arena_block_size = 4096; + options.compaction_options_fifo.max_table_files_size = 500 << 10; // 500KB + options.compression = kNoCompression; + options.create_if_missing = true; + options.max_subcompactions = max_subcompactions_; + if (iter == 1) { + options.disable_auto_compactions = true; + } + options = CurrentOptions(options); + DestroyAndReopen(options); + + Random rnd(301); + for (int i = 0; i < 6; ++i) { + for (int j = 0; j < 110; ++j) { + ASSERT_OK(Put(ToString(i * 100 + j), RandomString(&rnd, 980))); + } + // flush should happen here + ASSERT_OK(dbfull()->TEST_WaitForFlushMemTable()); + } + if (iter == 0) { + ASSERT_OK(dbfull()->TEST_WaitForCompact()); + } else { + CompactRangeOptions cro; + cro.exclusive_manual_compaction = exclusive_manual_compaction_; + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); + } + // only 5 files should survive + ASSERT_EQ(NumTableFilesAtLevel(0), 5); + for (int i = 0; i < 50; ++i) { + // these keys should be deleted in previous compaction + ASSERT_EQ("NOT_FOUND", Get(ToString(i))); + } + } +} +#endif // ROCKSDB_LITE + +// verify that we correctly deprecated timeout_hint_us +TEST_F(DBTest, SimpleWriteTimeoutTest) { + WriteOptions write_opt; + write_opt.timeout_hint_us = 0; + ASSERT_OK(Put(Key(1), Key(1) + std::string(100, 'v'), write_opt)); + write_opt.timeout_hint_us = 10; + ASSERT_NOK(Put(Key(1), Key(1) + std::string(100, 'v'), write_opt)); +} + +#ifndef ROCKSDB_LITE +/* + * This test is not reliable enough as it heavily depends on disk behavior. + */ +TEST_F(DBTest, RateLimitingTest) { + Options options = CurrentOptions(); + options.write_buffer_size = 1 << 20; // 1MB + options.level0_file_num_compaction_trigger = 2; + options.target_file_size_base = 1 << 20; // 1MB + options.max_bytes_for_level_base = 4 << 20; // 4MB + options.max_bytes_for_level_multiplier = 4; + options.compression = kNoCompression; + options.create_if_missing = true; + options.env = env_; + options.IncreaseParallelism(4); + DestroyAndReopen(options); + + WriteOptions wo; + wo.disableWAL = true; + + // # no rate limiting + Random rnd(301); + uint64_t start = env_->NowMicros(); + // Write ~96M data + for (int64_t i = 0; i < (96 << 10); ++i) { + ASSERT_OK( + Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo)); + } + uint64_t elapsed = env_->NowMicros() - start; + double raw_rate = env_->bytes_written_ * 1000000.0 / elapsed; + Close(); + + // # rate limiting with 0.7 x threshold + options.rate_limiter.reset( + NewGenericRateLimiter(static_cast(0.7 * raw_rate))); + env_->bytes_written_ = 0; + DestroyAndReopen(options); + + start = env_->NowMicros(); + // Write ~96M data + for (int64_t i = 0; i < (96 << 10); ++i) { + ASSERT_OK( + Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo)); + } + elapsed = env_->NowMicros() - start; + Close(); + ASSERT_EQ(options.rate_limiter->GetTotalBytesThrough(), env_->bytes_written_); + double ratio = env_->bytes_written_ * 1000000 / elapsed / raw_rate; + fprintf(stderr, "write rate ratio = %.2lf, expected 0.7\n", ratio); + ASSERT_TRUE(ratio < 0.8); + + // # rate limiting with half of the raw_rate + options.rate_limiter.reset( + NewGenericRateLimiter(static_cast(raw_rate / 2))); + env_->bytes_written_ = 0; + DestroyAndReopen(options); + + start = env_->NowMicros(); + // Write ~96M data + for (int64_t i = 0; i < (96 << 10); ++i) { + ASSERT_OK( + Put(RandomString(&rnd, 32), RandomString(&rnd, (1 << 10) + 1), wo)); + } + elapsed = env_->NowMicros() - start; + Close(); + ASSERT_EQ(options.rate_limiter->GetTotalBytesThrough(), env_->bytes_written_); + ratio = env_->bytes_written_ * 1000000 / elapsed / raw_rate; + fprintf(stderr, "write rate ratio = %.2lf, expected 0.5\n", ratio); + ASSERT_LT(ratio, 0.6); +} + +TEST_F(DBTest, TableOptionsSanitizeTest) { + Options options = CurrentOptions(); + options.create_if_missing = true; + DestroyAndReopen(options); + ASSERT_EQ(db_->GetOptions().allow_mmap_reads, false); + + options.table_factory.reset(new PlainTableFactory()); + options.prefix_extractor.reset(NewNoopTransform()); + Destroy(options); + ASSERT_TRUE(!TryReopen(options).IsNotSupported()); + + // Test for check of prefix_extractor when hash index is used for + // block-based table + BlockBasedTableOptions to; + to.index_type = BlockBasedTableOptions::kHashSearch; + options = CurrentOptions(); + options.create_if_missing = true; + options.table_factory.reset(NewBlockBasedTableFactory(to)); + ASSERT_TRUE(TryReopen(options).IsInvalidArgument()); + options.prefix_extractor.reset(NewFixedPrefixTransform(1)); + ASSERT_OK(TryReopen(options)); +} + +// On Windows you can have either memory mapped file or a file +// with unbuffered access. So this asserts and does not make +// sense to run +#ifndef OS_WIN +TEST_F(DBTest, MmapAndBufferOptions) { + Options options = CurrentOptions(); + + options.allow_os_buffer = false; + options.allow_mmap_reads = true; + ASSERT_NOK(TryReopen(options)); + + // All other combinations are acceptable + options.allow_os_buffer = true; + ASSERT_OK(TryReopen(options)); + + options.allow_os_buffer = false; + options.allow_mmap_reads = false; + ASSERT_OK(TryReopen(options)); + + options.allow_os_buffer = true; + ASSERT_OK(TryReopen(options)); +} +#endif + +TEST_F(DBTest, ConcurrentMemtableNotSupported) { + Options options = CurrentOptions(); + options.allow_concurrent_memtable_write = true; + options.soft_pending_compaction_bytes_limit = 0; + options.hard_pending_compaction_bytes_limit = 100; + options.create_if_missing = true; + + DestroyDB(dbname_, options); + options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true, 4)); + ASSERT_NOK(TryReopen(options)); + + options.memtable_factory.reset(new SkipListFactory); + ASSERT_OK(TryReopen(options)); + + ColumnFamilyOptions cf_options(options); + cf_options.memtable_factory.reset( + NewHashLinkListRepFactory(4, 0, 3, true, 4)); + ColumnFamilyHandle* handle; + ASSERT_NOK(db_->CreateColumnFamily(cf_options, "name", &handle)); +} + +#endif // ROCKSDB_LITE + +TEST_F(DBTest, SanitizeNumThreads) { + for (int attempt = 0; attempt < 2; attempt++) { + const size_t kTotalTasks = 8; + test::SleepingBackgroundTask sleeping_tasks[kTotalTasks]; + + Options options = CurrentOptions(); + if (attempt == 0) { + options.max_background_compactions = 3; + options.max_background_flushes = 2; + } + options.create_if_missing = true; + DestroyAndReopen(options); + + for (size_t i = 0; i < kTotalTasks; i++) { + // Insert 5 tasks to low priority queue and 5 tasks to high priority queue + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, + &sleeping_tasks[i], + (i < 4) ? Env::Priority::LOW : Env::Priority::HIGH); + } + + // Wait 100 milliseconds for they are scheduled. + env_->SleepForMicroseconds(100000); + + // pool size 3, total task 4. Queue size should be 1. + ASSERT_EQ(1U, options.env->GetThreadPoolQueueLen(Env::Priority::LOW)); + // pool size 2, total task 4. Queue size should be 2. + ASSERT_EQ(2U, options.env->GetThreadPoolQueueLen(Env::Priority::HIGH)); + + for (size_t i = 0; i < kTotalTasks; i++) { + sleeping_tasks[i].WakeUp(); + sleeping_tasks[i].WaitUntilDone(); + } + + ASSERT_OK(Put("abc", "def")); + ASSERT_EQ("def", Get("abc")); + Flush(); + ASSERT_EQ("def", Get("abc")); + } +} + +TEST_F(DBTest, WriteSingleThreadEntry) { + std::vector threads; + dbfull()->TEST_LockMutex(); + auto w = dbfull()->TEST_BeginWrite(); + threads.emplace_back([&] { Put("a", "b"); }); + env_->SleepForMicroseconds(10000); + threads.emplace_back([&] { Flush(); }); + env_->SleepForMicroseconds(10000); + dbfull()->TEST_UnlockMutex(); + dbfull()->TEST_LockMutex(); + dbfull()->TEST_EndWrite(w); + dbfull()->TEST_UnlockMutex(); + + for (auto& t : threads) { + t.join(); + } +} + +TEST_F(DBTest, DisableDataSyncTest) { + env_->sync_counter_.store(0); + // iter 0 -- no sync + // iter 1 -- sync + for (int iter = 0; iter < 2; ++iter) { + Options options = CurrentOptions(); + options.disableDataSync = iter == 0; + options.create_if_missing = true; + options.num_levels = 10; + options.env = env_; + Reopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + MakeTables(10, "a", "z"); + Compact("a", "z"); + + if (iter == 0) { + ASSERT_EQ(env_->sync_counter_.load(), 0); + } else { + ASSERT_GT(env_->sync_counter_.load(), 0); + } + Destroy(options); + } +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, DynamicMemtableOptions) { + const uint64_t k64KB = 1 << 16; + const uint64_t k128KB = 1 << 17; + const uint64_t k5KB = 5 * 1024; + const int kNumPutsBeforeWaitForFlush = 64; + Options options; + options.env = env_; + options.create_if_missing = true; + options.compression = kNoCompression; + options.max_background_compactions = 1; + options.write_buffer_size = k64KB; + options.arena_block_size = 16 * 1024; + options.max_write_buffer_number = 2; + // Don't trigger compact/slowdown/stop + options.level0_file_num_compaction_trigger = 1024; + options.level0_slowdown_writes_trigger = 1024; + options.level0_stop_writes_trigger = 1024; + DestroyAndReopen(options); + + auto gen_l0_kb = [this, kNumPutsBeforeWaitForFlush](int size) { + Random rnd(301); + for (int i = 0; i < size; i++) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + + // The following condition prevents a race condition between flush jobs + // acquiring work and this thread filling up multiple memtables. Without + // this, the flush might produce less files than expected because + // multiple memtables are flushed into a single L0 file. This race + // condition affects assertion (A). + if (i % kNumPutsBeforeWaitForFlush == kNumPutsBeforeWaitForFlush - 1) { + dbfull()->TEST_WaitForFlushMemTable(); + } + } + dbfull()->TEST_WaitForFlushMemTable(); + }; + + // Test write_buffer_size + gen_l0_kb(64); + ASSERT_EQ(NumTableFilesAtLevel(0), 1); + ASSERT_LT(SizeAtLevel(0), k64KB + k5KB); + ASSERT_GT(SizeAtLevel(0), k64KB - k5KB * 2); + + // Clean up L0 + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ(NumTableFilesAtLevel(0), 0); + + // Increase buffer size + ASSERT_OK(dbfull()->SetOptions({ + {"write_buffer_size", "131072"}, + })); + + // The existing memtable is still 64KB in size, after it becomes immutable, + // the next memtable will be 128KB in size. Write 256KB total, we should + // have a 64KB L0 file, a 128KB L0 file, and a memtable with 64KB data + gen_l0_kb(256); + ASSERT_EQ(NumTableFilesAtLevel(0), 2); // (A) + ASSERT_LT(SizeAtLevel(0), k128KB + k64KB + 2 * k5KB); + ASSERT_GT(SizeAtLevel(0), k128KB + k64KB - 4 * k5KB); + + // Test max_write_buffer_number + // Block compaction thread, which will also block the flushes because + // max_background_flushes == 0, so flushes are getting executed by the + // compaction thread + env_->SetBackgroundThreads(1, Env::LOW); + test::SleepingBackgroundTask sleeping_task_low; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + // Start from scratch and disable compaction/flush. Flush can only happen + // during compaction but trigger is pretty high + options.max_background_flushes = 0; + options.disable_auto_compactions = true; + DestroyAndReopen(options); + + // Put until writes are stopped, bounded by 256 puts. We should see stop at + // ~128KB + int count = 0; + Random rnd(301); + + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::DelayWrite:Wait", + [&](void* arg) { sleeping_task_low.WakeUp(); }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + while (!sleeping_task_low.WokenUp() && count < 256) { + ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions())); + count++; + } + ASSERT_GT(static_cast(count), 128 * 0.8); + ASSERT_LT(static_cast(count), 128 * 1.2); + + sleeping_task_low.WaitUntilDone(); + + // Increase + ASSERT_OK(dbfull()->SetOptions({ + {"max_write_buffer_number", "8"}, + })); + // Clean up memtable and L0 + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + + sleeping_task_low.Reset(); + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + count = 0; + while (!sleeping_task_low.WokenUp() && count < 1024) { + ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions())); + count++; + } +// Windows fails this test. Will tune in the future and figure out +// approp number +#ifndef OS_WIN + ASSERT_GT(static_cast(count), 512 * 0.8); + ASSERT_LT(static_cast(count), 512 * 1.2); +#endif + sleeping_task_low.WaitUntilDone(); + + // Decrease + ASSERT_OK(dbfull()->SetOptions({ + {"max_write_buffer_number", "4"}, + })); + // Clean up memtable and L0 + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + + sleeping_task_low.Reset(); + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + + count = 0; + while (!sleeping_task_low.WokenUp() && count < 1024) { + ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), WriteOptions())); + count++; + } +// Windows fails this test. Will tune in the future and figure out +// approp number +#ifndef OS_WIN + ASSERT_GT(static_cast(count), 256 * 0.8); + ASSERT_LT(static_cast(count), 266 * 1.2); +#endif + sleeping_task_low.WaitUntilDone(); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} +#endif // ROCKSDB_LITE + +#if ROCKSDB_USING_THREAD_STATUS +namespace { +void VerifyOperationCount(Env* env, ThreadStatus::OperationType op_type, + int expected_count) { + int op_count = 0; + std::vector thread_list; + ASSERT_OK(env->GetThreadList(&thread_list)); + for (auto thread : thread_list) { + if (thread.operation_type == op_type) { + op_count++; + } + } + ASSERT_EQ(op_count, expected_count); +} +} // namespace + +TEST_F(DBTest, GetThreadStatus) { + Options options; + options.env = env_; + options.enable_thread_tracking = true; + TryReopen(options); + + std::vector thread_list; + Status s = env_->GetThreadList(&thread_list); + + for (int i = 0; i < 2; ++i) { + // repeat the test with differet number of high / low priority threads + const int kTestCount = 3; + const unsigned int kHighPriCounts[kTestCount] = {3, 2, 5}; + const unsigned int kLowPriCounts[kTestCount] = {10, 15, 3}; + for (int test = 0; test < kTestCount; ++test) { + // Change the number of threads in high / low priority pool. + env_->SetBackgroundThreads(kHighPriCounts[test], Env::HIGH); + env_->SetBackgroundThreads(kLowPriCounts[test], Env::LOW); + // Wait to ensure the all threads has been registered + env_->SleepForMicroseconds(100000); + s = env_->GetThreadList(&thread_list); + ASSERT_OK(s); + unsigned int thread_type_counts[ThreadStatus::NUM_THREAD_TYPES]; + memset(thread_type_counts, 0, sizeof(thread_type_counts)); + for (auto thread : thread_list) { + ASSERT_LT(thread.thread_type, ThreadStatus::NUM_THREAD_TYPES); + thread_type_counts[thread.thread_type]++; + } + // Verify the total number of threades + ASSERT_EQ(thread_type_counts[ThreadStatus::HIGH_PRIORITY] + + thread_type_counts[ThreadStatus::LOW_PRIORITY], + kHighPriCounts[test] + kLowPriCounts[test]); + // Verify the number of high-priority threads + ASSERT_EQ(thread_type_counts[ThreadStatus::HIGH_PRIORITY], + kHighPriCounts[test]); + // Verify the number of low-priority threads + ASSERT_EQ(thread_type_counts[ThreadStatus::LOW_PRIORITY], + kLowPriCounts[test]); + } + if (i == 0) { + // repeat the test with multiple column families + CreateAndReopenWithCF({"pikachu", "about-to-remove"}, options); + env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_, + true); + } + } + db_->DropColumnFamily(handles_[2]); + delete handles_[2]; + handles_.erase(handles_.begin() + 2); + env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_, + true); + Close(); + env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_, + true); +} + +TEST_F(DBTest, DisableThreadStatus) { + Options options; + options.env = env_; + options.enable_thread_tracking = false; + TryReopen(options); + CreateAndReopenWithCF({"pikachu", "about-to-remove"}, options); + // Verify non of the column family info exists + env_->GetThreadStatusUpdater()->TEST_VerifyColumnFamilyInfoMap(handles_, + false); +} + +TEST_F(DBTest, ThreadStatusFlush) { + Options options; + options.env = env_; + options.write_buffer_size = 100000; // Small write buffer + options.enable_thread_tracking = true; + options = CurrentOptions(options); + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"FlushJob::FlushJob()", "DBTest::ThreadStatusFlush:1"}, + {"DBTest::ThreadStatusFlush:2", "FlushJob::WriteLevel0Table"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + CreateAndReopenWithCF({"pikachu"}, options); + VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 0); + + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_EQ("v1", Get(1, "foo")); + VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 0); + + uint64_t num_running_flushes = 0; + db_->GetIntProperty(DB::Properties::kNumRunningFlushes, &num_running_flushes); + ASSERT_EQ(num_running_flushes, 0); + + Put(1, "k1", std::string(100000, 'x')); // Fill memtable + Put(1, "k2", std::string(100000, 'y')); // Trigger flush + + // The first sync point is to make sure there's one flush job + // running when we perform VerifyOperationCount(). + TEST_SYNC_POINT("DBTest::ThreadStatusFlush:1"); + VerifyOperationCount(env_, ThreadStatus::OP_FLUSH, 1); + db_->GetIntProperty(DB::Properties::kNumRunningFlushes, &num_running_flushes); + ASSERT_EQ(num_running_flushes, 1); + // This second sync point is to ensure the flush job will not + // be completed until we already perform VerifyOperationCount(). + TEST_SYNC_POINT("DBTest::ThreadStatusFlush:2"); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_P(DBTestWithParam, ThreadStatusSingleCompaction) { + const int kTestKeySize = 16; + const int kTestValueSize = 984; + const int kEntrySize = kTestKeySize + kTestValueSize; + const int kEntriesPerBuffer = 100; + Options options; + options.create_if_missing = true; + options.write_buffer_size = kEntrySize * kEntriesPerBuffer; + options.compaction_style = kCompactionStyleLevel; + options.target_file_size_base = options.write_buffer_size; + options.max_bytes_for_level_base = options.target_file_size_base * 2; + options.max_bytes_for_level_multiplier = 2; + options.compression = kNoCompression; + options = CurrentOptions(options); + options.env = env_; + options.enable_thread_tracking = true; + const int kNumL0Files = 4; + options.level0_file_num_compaction_trigger = kNumL0Files; + options.max_subcompactions = max_subcompactions_; + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"DBTest::ThreadStatusSingleCompaction:0", "DBImpl::BGWorkCompaction"}, + {"CompactionJob::Run():Start", "DBTest::ThreadStatusSingleCompaction:1"}, + {"DBTest::ThreadStatusSingleCompaction:2", "CompactionJob::Run():End"}, + }); + for (int tests = 0; tests < 2; ++tests) { + DestroyAndReopen(options); + rocksdb::SyncPoint::GetInstance()->ClearTrace(); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Random rnd(301); + // The Put Phase. + for (int file = 0; file < kNumL0Files; ++file) { + for (int key = 0; key < kEntriesPerBuffer; ++key) { + ASSERT_OK(Put(ToString(key + file * kEntriesPerBuffer), + RandomString(&rnd, kTestValueSize))); + } + Flush(); + } + // This makes sure a compaction won't be scheduled until + // we have done with the above Put Phase. + uint64_t num_running_compactions = 0; + db_->GetIntProperty(DB::Properties::kNumRunningCompactions, + &num_running_compactions); + ASSERT_EQ(num_running_compactions, 0); + TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:0"); + ASSERT_GE(NumTableFilesAtLevel(0), + options.level0_file_num_compaction_trigger); + + // This makes sure at least one compaction is running. + TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:1"); + + if (options.enable_thread_tracking) { + // expecting one single L0 to L1 compaction + VerifyOperationCount(env_, ThreadStatus::OP_COMPACTION, 1); + } else { + // If thread tracking is not enabled, compaction count should be 0. + VerifyOperationCount(env_, ThreadStatus::OP_COMPACTION, 0); + } + db_->GetIntProperty(DB::Properties::kNumRunningCompactions, + &num_running_compactions); + ASSERT_EQ(num_running_compactions, 1); + // TODO(yhchiang): adding assert to verify each compaction stage. + TEST_SYNC_POINT("DBTest::ThreadStatusSingleCompaction:2"); + + // repeat the test with disabling thread tracking. + options.enable_thread_tracking = false; + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + } +} + +TEST_P(DBTestWithParam, PreShutdownManualCompaction) { + Options options = CurrentOptions(); + options.max_background_flushes = 0; + options.max_subcompactions = max_subcompactions_; + CreateAndReopenWithCF({"pikachu"}, options); + + // iter - 0 with 7 levels + // iter - 1 with 3 levels + for (int iter = 0; iter < 2; ++iter) { + MakeTables(3, "p", "q", 1); + ASSERT_EQ("1,1,1", FilesPerLevel(1)); + + // Compaction range falls before files + Compact(1, "", "c"); + ASSERT_EQ("1,1,1", FilesPerLevel(1)); + + // Compaction range falls after files + Compact(1, "r", "z"); + ASSERT_EQ("1,1,1", FilesPerLevel(1)); + + // Compaction range overlaps files + Compact(1, "p1", "p9"); + ASSERT_EQ("0,0,1", FilesPerLevel(1)); + + // Populate a different range + MakeTables(3, "c", "e", 1); + ASSERT_EQ("1,1,2", FilesPerLevel(1)); + + // Compact just the new range + Compact(1, "b", "f"); + ASSERT_EQ("0,0,2", FilesPerLevel(1)); + + // Compact all + MakeTables(1, "a", "z", 1); + ASSERT_EQ("1,0,2", FilesPerLevel(1)); + CancelAllBackgroundWork(db_); + db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); + ASSERT_EQ("1,0,2", FilesPerLevel(1)); + + if (iter == 0) { + options = CurrentOptions(); + options.max_background_flushes = 0; + options.num_levels = 3; + options.create_if_missing = true; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + } + } +} + +TEST_F(DBTest, PreShutdownFlush) { + Options options = CurrentOptions(); + options.max_background_flushes = 0; + CreateAndReopenWithCF({"pikachu"}, options); + ASSERT_OK(Put(1, "key", "value")); + CancelAllBackgroundWork(db_); + Status s = + db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); + ASSERT_TRUE(s.IsShutdownInProgress()); +} + +TEST_P(DBTestWithParam, PreShutdownMultipleCompaction) { + const int kTestKeySize = 16; + const int kTestValueSize = 984; + const int kEntrySize = kTestKeySize + kTestValueSize; + const int kEntriesPerBuffer = 40; + const int kNumL0Files = 4; + + const int kHighPriCount = 3; + const int kLowPriCount = 5; + env_->SetBackgroundThreads(kHighPriCount, Env::HIGH); + env_->SetBackgroundThreads(kLowPriCount, Env::LOW); + + Options options; + options.create_if_missing = true; + options.write_buffer_size = kEntrySize * kEntriesPerBuffer; + options.compaction_style = kCompactionStyleLevel; + options.target_file_size_base = options.write_buffer_size; + options.max_bytes_for_level_base = + options.target_file_size_base * kNumL0Files; + options.compression = kNoCompression; + options = CurrentOptions(options); + options.env = env_; + options.enable_thread_tracking = true; + options.level0_file_num_compaction_trigger = kNumL0Files; + options.max_bytes_for_level_multiplier = 2; + options.max_background_compactions = kLowPriCount; + options.level0_stop_writes_trigger = 1 << 10; + options.level0_slowdown_writes_trigger = 1 << 10; + options.max_subcompactions = max_subcompactions_; + + TryReopen(options); + Random rnd(301); + + std::vector thread_list; + // Delay both flush and compaction + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"FlushJob::FlushJob()", "CompactionJob::Run():Start"}, + {"CompactionJob::Run():Start", + "DBTest::PreShutdownMultipleCompaction:Preshutdown"}, + {"CompactionJob::Run():Start", + "DBTest::PreShutdownMultipleCompaction:VerifyCompaction"}, + {"DBTest::PreShutdownMultipleCompaction:Preshutdown", + "CompactionJob::Run():End"}, + {"CompactionJob::Run():End", + "DBTest::PreShutdownMultipleCompaction:VerifyPreshutdown"}}); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // Make rocksdb busy + int key = 0; + // check how many threads are doing compaction using GetThreadList + int operation_count[ThreadStatus::NUM_OP_TYPES] = {0}; + for (int file = 0; file < 16 * kNumL0Files; ++file) { + for (int k = 0; k < kEntriesPerBuffer; ++k) { + ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize))); + } + + Status s = env_->GetThreadList(&thread_list); + for (auto thread : thread_list) { + operation_count[thread.operation_type]++; + } + + // Speed up the test + if (operation_count[ThreadStatus::OP_FLUSH] > 1 && + operation_count[ThreadStatus::OP_COMPACTION] > + 0.6 * options.max_background_compactions) { + break; + } + if (file == 15 * kNumL0Files) { + TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:Preshutdown"); + } + } + + TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:Preshutdown"); + ASSERT_GE(operation_count[ThreadStatus::OP_COMPACTION], 1); + CancelAllBackgroundWork(db_); + TEST_SYNC_POINT("DBTest::PreShutdownMultipleCompaction:VerifyPreshutdown"); + dbfull()->TEST_WaitForCompact(); + // Record the number of compactions at a time. + for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) { + operation_count[i] = 0; + } + Status s = env_->GetThreadList(&thread_list); + for (auto thread : thread_list) { + operation_count[thread.operation_type]++; + } + ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0); +} + +TEST_P(DBTestWithParam, PreShutdownCompactionMiddle) { + const int kTestKeySize = 16; + const int kTestValueSize = 984; + const int kEntrySize = kTestKeySize + kTestValueSize; + const int kEntriesPerBuffer = 40; + const int kNumL0Files = 4; + + const int kHighPriCount = 3; + const int kLowPriCount = 5; + env_->SetBackgroundThreads(kHighPriCount, Env::HIGH); + env_->SetBackgroundThreads(kLowPriCount, Env::LOW); + + Options options; + options.create_if_missing = true; + options.write_buffer_size = kEntrySize * kEntriesPerBuffer; + options.compaction_style = kCompactionStyleLevel; + options.target_file_size_base = options.write_buffer_size; + options.max_bytes_for_level_base = + options.target_file_size_base * kNumL0Files; + options.compression = kNoCompression; + options = CurrentOptions(options); + options.env = env_; + options.enable_thread_tracking = true; + options.level0_file_num_compaction_trigger = kNumL0Files; + options.max_bytes_for_level_multiplier = 2; + options.max_background_compactions = kLowPriCount; + options.level0_stop_writes_trigger = 1 << 10; + options.level0_slowdown_writes_trigger = 1 << 10; + options.max_subcompactions = max_subcompactions_; + + TryReopen(options); + Random rnd(301); + + std::vector thread_list; + // Delay both flush and compaction + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"DBTest::PreShutdownCompactionMiddle:Preshutdown", + "CompactionJob::Run():Inprogress"}, + {"CompactionJob::Run():Start", + "DBTest::PreShutdownCompactionMiddle:VerifyCompaction"}, + {"CompactionJob::Run():Inprogress", "CompactionJob::Run():End"}, + {"CompactionJob::Run():End", + "DBTest::PreShutdownCompactionMiddle:VerifyPreshutdown"}}); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // Make rocksdb busy + int key = 0; + // check how many threads are doing compaction using GetThreadList + int operation_count[ThreadStatus::NUM_OP_TYPES] = {0}; + for (int file = 0; file < 16 * kNumL0Files; ++file) { + for (int k = 0; k < kEntriesPerBuffer; ++k) { + ASSERT_OK(Put(ToString(key++), RandomString(&rnd, kTestValueSize))); + } + + Status s = env_->GetThreadList(&thread_list); + for (auto thread : thread_list) { + operation_count[thread.operation_type]++; + } + + // Speed up the test + if (operation_count[ThreadStatus::OP_FLUSH] > 1 && + operation_count[ThreadStatus::OP_COMPACTION] > + 0.6 * options.max_background_compactions) { + break; + } + if (file == 15 * kNumL0Files) { + TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:VerifyCompaction"); + } + } + + ASSERT_GE(operation_count[ThreadStatus::OP_COMPACTION], 1); + CancelAllBackgroundWork(db_); + TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:Preshutdown"); + TEST_SYNC_POINT("DBTest::PreShutdownCompactionMiddle:VerifyPreshutdown"); + dbfull()->TEST_WaitForCompact(); + // Record the number of compactions at a time. + for (int i = 0; i < ThreadStatus::NUM_OP_TYPES; ++i) { + operation_count[i] = 0; + } + Status s = env_->GetThreadList(&thread_list); + for (auto thread : thread_list) { + operation_count[thread.operation_type]++; + } + ASSERT_EQ(operation_count[ThreadStatus::OP_COMPACTION], 0); +} + +#endif // ROCKSDB_USING_THREAD_STATUS + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, FlushOnDestroy) { + WriteOptions wo; + wo.disableWAL = true; + ASSERT_OK(Put("foo", "v1", wo)); + CancelAllBackgroundWork(db_); +} + +TEST_F(DBTest, DynamicLevelCompressionPerLevel) { + if (!Snappy_Supported()) { + return; + } + const int kNKeys = 120; + int keys[kNKeys]; + for (int i = 0; i < kNKeys; i++) { + keys[i] = i; + } + std::random_shuffle(std::begin(keys), std::end(keys)); + + Random rnd(301); + Options options; + options.create_if_missing = true; + options.db_write_buffer_size = 20480; + options.write_buffer_size = 20480; + options.max_write_buffer_number = 2; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 2; + options.level0_stop_writes_trigger = 2; + options.target_file_size_base = 20480; + options.level_compaction_dynamic_level_bytes = true; + options.max_bytes_for_level_base = 102400; + options.max_bytes_for_level_multiplier = 4; + options.max_background_compactions = 1; + options.num_levels = 5; + + options.compression_per_level.resize(3); + options.compression_per_level[0] = kNoCompression; + options.compression_per_level[1] = kNoCompression; + options.compression_per_level[2] = kSnappyCompression; + + OnFileDeletionListener* listener = new OnFileDeletionListener(); + options.listeners.emplace_back(listener); + + DestroyAndReopen(options); + + // Insert more than 80K. L4 should be base level. Neither L0 nor L4 should + // be compressed, so total data size should be more than 80K. + for (int i = 0; i < 20; i++) { + ASSERT_OK(Put(Key(keys[i]), CompressibleString(&rnd, 4000))); + } + Flush(); + dbfull()->TEST_WaitForCompact(); + + ASSERT_EQ(NumTableFilesAtLevel(1), 0); + ASSERT_EQ(NumTableFilesAtLevel(2), 0); + ASSERT_EQ(NumTableFilesAtLevel(3), 0); + // Assuming each files' metadata is at least 50 bytes/ + ASSERT_GT(SizeAtLevel(0) + SizeAtLevel(4), 20U * 4000U + 50U * 4); + + // Insert 400KB. Some data will be compressed + for (int i = 21; i < 120; i++) { + ASSERT_OK(Put(Key(keys[i]), CompressibleString(&rnd, 4000))); + } + Flush(); + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(NumTableFilesAtLevel(1), 0); + ASSERT_EQ(NumTableFilesAtLevel(2), 0); + ASSERT_LT(SizeAtLevel(0) + SizeAtLevel(3) + SizeAtLevel(4), + 120U * 4000U + 50U * 24); + // Make sure data in files in L3 is not compacted by removing all files + // in L4 and calculate number of rows + ASSERT_OK(dbfull()->SetOptions({ + {"disable_auto_compactions", "true"}, + })); + ColumnFamilyMetaData cf_meta; + db_->GetColumnFamilyMetaData(&cf_meta); + for (auto file : cf_meta.levels[4].files) { + listener->SetExpectedFileName(dbname_ + file.name); + ASSERT_OK(dbfull()->DeleteFile(file.name)); + } + listener->VerifyMatchedCount(cf_meta.levels[4].files.size()); + + int num_keys = 0; + std::unique_ptr iter(db_->NewIterator(ReadOptions())); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + num_keys++; + } + ASSERT_OK(iter->status()); + ASSERT_GT(SizeAtLevel(0) + SizeAtLevel(3), num_keys * 4000U + num_keys * 10U); +} + +TEST_F(DBTest, DynamicLevelCompressionPerLevel2) { + if (!Snappy_Supported() || !LZ4_Supported() || !Zlib_Supported()) { + return; + } + const int kNKeys = 500; + int keys[kNKeys]; + for (int i = 0; i < kNKeys; i++) { + keys[i] = i; + } + std::random_shuffle(std::begin(keys), std::end(keys)); + + Random rnd(301); + Options options; + options.create_if_missing = true; + options.db_write_buffer_size = 6000; + options.write_buffer_size = 6000; + options.max_write_buffer_number = 2; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 2; + options.level0_stop_writes_trigger = 2; + options.soft_pending_compaction_bytes_limit = 1024 * 1024; + + // Use file size to distinguish levels + // L1: 10, L2: 20, L3 40, L4 80 + // L0 is less than 30 + options.target_file_size_base = 10; + options.target_file_size_multiplier = 2; + + options.level_compaction_dynamic_level_bytes = true; + options.max_bytes_for_level_base = 200; + options.max_bytes_for_level_multiplier = 8; + options.max_background_compactions = 1; + options.num_levels = 5; + std::shared_ptr mtf(new mock::MockTableFactory); + options.table_factory = mtf; + + options.compression_per_level.resize(3); + options.compression_per_level[0] = kNoCompression; + options.compression_per_level[1] = kLZ4Compression; + options.compression_per_level[2] = kZlibCompression; + + DestroyAndReopen(options); + // When base level is L4, L4 is LZ4. + std::atomic num_zlib(0); + std::atomic num_lz4(0); + std::atomic num_no(0); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) { + Compaction* compaction = reinterpret_cast(arg); + if (compaction->output_level() == 4) { + ASSERT_TRUE(compaction->output_compression() == kLZ4Compression); + num_lz4.fetch_add(1); + } + }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "FlushJob::WriteLevel0Table:output_compression", [&](void* arg) { + auto* compression = reinterpret_cast(arg); + ASSERT_TRUE(*compression == kNoCompression); + num_no.fetch_add(1); + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + for (int i = 0; i < 100; i++) { + ASSERT_OK(Put(Key(keys[i]), RandomString(&rnd, 200))); + + if (i % 25 == 0) { + dbfull()->TEST_WaitForFlushMemTable(); + } + } + + Flush(); + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); + + ASSERT_EQ(NumTableFilesAtLevel(1), 0); + ASSERT_EQ(NumTableFilesAtLevel(2), 0); + ASSERT_EQ(NumTableFilesAtLevel(3), 0); + ASSERT_GT(NumTableFilesAtLevel(4), 0); + ASSERT_GT(num_no.load(), 2); + ASSERT_GT(num_lz4.load(), 0); + int prev_num_files_l4 = NumTableFilesAtLevel(4); + + // After base level turn L4->L3, L3 becomes LZ4 and L4 becomes Zlib + num_lz4.store(0); + num_no.store(0); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "LevelCompactionPicker::PickCompaction:Return", [&](void* arg) { + Compaction* compaction = reinterpret_cast(arg); + if (compaction->output_level() == 4 && compaction->start_level() == 3) { + ASSERT_TRUE(compaction->output_compression() == kZlibCompression); + num_zlib.fetch_add(1); + } else { + ASSERT_TRUE(compaction->output_compression() == kLZ4Compression); + num_lz4.fetch_add(1); + } + }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "FlushJob::WriteLevel0Table:output_compression", [&](void* arg) { + auto* compression = reinterpret_cast(arg); + ASSERT_TRUE(*compression == kNoCompression); + num_no.fetch_add(1); + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + for (int i = 101; i < 500; i++) { + ASSERT_OK(Put(Key(keys[i]), RandomString(&rnd, 200))); + if (i % 100 == 99) { + Flush(); + dbfull()->TEST_WaitForCompact(); + } + } + + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + ASSERT_EQ(NumTableFilesAtLevel(1), 0); + ASSERT_EQ(NumTableFilesAtLevel(2), 0); + ASSERT_GT(NumTableFilesAtLevel(3), 0); + ASSERT_GT(NumTableFilesAtLevel(4), prev_num_files_l4); + ASSERT_GT(num_no.load(), 2); + ASSERT_GT(num_lz4.load(), 0); + ASSERT_GT(num_zlib.load(), 0); +} + +TEST_F(DBTest, DynamicCompactionOptions) { + // minimum write buffer size is enforced at 64KB + const uint64_t k32KB = 1 << 15; + const uint64_t k64KB = 1 << 16; + const uint64_t k128KB = 1 << 17; + const uint64_t k1MB = 1 << 20; + const uint64_t k4KB = 1 << 12; + Options options; + options.env = env_; + options.create_if_missing = true; + options.compression = kNoCompression; + options.soft_pending_compaction_bytes_limit = 1024 * 1024; + options.write_buffer_size = k64KB; + options.arena_block_size = 4 * k4KB; + options.max_write_buffer_number = 2; + // Compaction related options + options.level0_file_num_compaction_trigger = 3; + options.level0_slowdown_writes_trigger = 4; + options.level0_stop_writes_trigger = 8; + options.max_grandparent_overlap_factor = 10; + options.expanded_compaction_factor = 25; + options.source_compaction_factor = 1; + options.target_file_size_base = k64KB; + options.target_file_size_multiplier = 1; + options.max_bytes_for_level_base = k128KB; + options.max_bytes_for_level_multiplier = 4; + + // Block flush thread and disable compaction thread + env_->SetBackgroundThreads(1, Env::LOW); + env_->SetBackgroundThreads(1, Env::HIGH); + DestroyAndReopen(options); + + auto gen_l0_kb = [this](int start, int size, int stride) { + Random rnd(301); + for (int i = 0; i < size; i++) { + ASSERT_OK(Put(Key(start + stride * i), RandomString(&rnd, 1024))); + } + dbfull()->TEST_WaitForFlushMemTable(); + }; + + // Write 3 files that have the same key range. + // Since level0_file_num_compaction_trigger is 3, compaction should be + // triggered. The compaction should result in one L1 file + gen_l0_kb(0, 64, 1); + ASSERT_EQ(NumTableFilesAtLevel(0), 1); + gen_l0_kb(0, 64, 1); + ASSERT_EQ(NumTableFilesAtLevel(0), 2); + gen_l0_kb(0, 64, 1); + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ("0,1", FilesPerLevel()); + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(1U, metadata.size()); + ASSERT_LE(metadata[0].size, k64KB + k4KB); + ASSERT_GE(metadata[0].size, k64KB - k4KB); + + // Test compaction trigger and target_file_size_base + // Reduce compaction trigger to 2, and reduce L1 file size to 32KB. + // Writing to 64KB L0 files should trigger a compaction. Since these + // 2 L0 files have the same key range, compaction merge them and should + // result in 2 32KB L1 files. + ASSERT_OK(dbfull()->SetOptions({{"level0_file_num_compaction_trigger", "2"}, + {"target_file_size_base", ToString(k32KB)}})); + + gen_l0_kb(0, 64, 1); + ASSERT_EQ("1,1", FilesPerLevel()); + gen_l0_kb(0, 64, 1); + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ("0,2", FilesPerLevel()); + metadata.clear(); + db_->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(2U, metadata.size()); + ASSERT_LE(metadata[0].size, k32KB + k4KB); + ASSERT_GE(metadata[0].size, k32KB - k4KB); + ASSERT_LE(metadata[1].size, k32KB + k4KB); + ASSERT_GE(metadata[1].size, k32KB - k4KB); + + // Test max_bytes_for_level_base + // Increase level base size to 256KB and write enough data that will + // fill L1 and L2. L1 size should be around 256KB while L2 size should be + // around 256KB x 4. + ASSERT_OK( + dbfull()->SetOptions({{"max_bytes_for_level_base", ToString(k1MB)}})); + + // writing 96 x 64KB => 6 * 1024KB + // (L1 + L2) = (1 + 4) * 1024KB + for (int i = 0; i < 96; ++i) { + gen_l0_kb(i, 64, 96); + } + dbfull()->TEST_WaitForCompact(); + ASSERT_GT(SizeAtLevel(1), k1MB / 2); + ASSERT_LT(SizeAtLevel(1), k1MB + k1MB / 2); + + // Within (0.5, 1.5) of 4MB. + ASSERT_GT(SizeAtLevel(2), 2 * k1MB); + ASSERT_LT(SizeAtLevel(2), 6 * k1MB); + + // Test max_bytes_for_level_multiplier and + // max_bytes_for_level_base. Now, reduce both mulitplier and level base, + // After filling enough data that can fit in L1 - L3, we should see L1 size + // reduces to 128KB from 256KB which was asserted previously. Same for L2. + ASSERT_OK( + dbfull()->SetOptions({{"max_bytes_for_level_multiplier", "2"}, + {"max_bytes_for_level_base", ToString(k128KB)}})); + + // writing 20 x 64KB = 10 x 128KB + // (L1 + L2 + L3) = (1 + 2 + 4) * 128KB + for (int i = 0; i < 20; ++i) { + gen_l0_kb(i, 64, 32); + } + dbfull()->TEST_WaitForCompact(); + uint64_t total_size = SizeAtLevel(1) + SizeAtLevel(2) + SizeAtLevel(3); + ASSERT_TRUE(total_size < k128KB * 7 * 1.5); + + // Test level0_stop_writes_trigger. + // Clean up memtable and L0. Block compaction threads. If continue to write + // and flush memtables. We should see put stop after 8 memtable flushes + // since level0_stop_writes_trigger = 8 + dbfull()->TEST_FlushMemTable(true); + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + // Block compaction + test::SleepingBackgroundTask sleeping_task_low; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + sleeping_task_low.WaitUntilSleeping(); + ASSERT_EQ(NumTableFilesAtLevel(0), 0); + int count = 0; + Random rnd(301); + WriteOptions wo; + while (count < 64) { + ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo)); + dbfull()->TEST_FlushMemTable(true); + count++; + if (dbfull()->TEST_write_controler().IsStopped()) { + sleeping_task_low.WakeUp(); + break; + } + } + // Stop trigger = 8 + ASSERT_EQ(count, 8); + // Unblock + sleeping_task_low.WaitUntilDone(); + + // Now reduce level0_stop_writes_trigger to 6. Clear up memtables and L0. + // Block compaction thread again. Perform the put and memtable flushes + // until we see the stop after 6 memtable flushes. + ASSERT_OK(dbfull()->SetOptions({{"level0_stop_writes_trigger", "6"}})); + dbfull()->TEST_FlushMemTable(true); + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ(NumTableFilesAtLevel(0), 0); + + // Block compaction again + sleeping_task_low.Reset(); + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + sleeping_task_low.WaitUntilSleeping(); + count = 0; + while (count < 64) { + ASSERT_OK(Put(Key(count), RandomString(&rnd, 1024), wo)); + dbfull()->TEST_FlushMemTable(true); + count++; + if (dbfull()->TEST_write_controler().IsStopped()) { + sleeping_task_low.WakeUp(); + break; + } + } + ASSERT_EQ(count, 6); + // Unblock + sleeping_task_low.WaitUntilDone(); + + // Test disable_auto_compactions + // Compaction thread is unblocked but auto compaction is disabled. Write + // 4 L0 files and compaction should be triggered. If auto compaction is + // disabled, then TEST_WaitForCompact will be waiting for nothing. Number of + // L0 files do not change after the call. + ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "true"}})); + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ(NumTableFilesAtLevel(0), 0); + + for (int i = 0; i < 4; ++i) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + // Wait for compaction so that put won't stop + dbfull()->TEST_FlushMemTable(true); + } + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(NumTableFilesAtLevel(0), 4); + + // Enable auto compaction and perform the same test, # of L0 files should be + // reduced after compaction. + ASSERT_OK(dbfull()->SetOptions({{"disable_auto_compactions", "false"}})); + dbfull()->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ(NumTableFilesAtLevel(0), 0); + + for (int i = 0; i < 4; ++i) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + // Wait for compaction so that put won't stop + dbfull()->TEST_FlushMemTable(true); + } + dbfull()->TEST_WaitForCompact(); + ASSERT_LT(NumTableFilesAtLevel(0), 4); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, FileCreationRandomFailure) { + Options options; + options.env = env_; + options.create_if_missing = true; + options.write_buffer_size = 100000; // Small write buffer + options.target_file_size_base = 200000; + options.max_bytes_for_level_base = 1000000; + options.max_bytes_for_level_multiplier = 2; + + DestroyAndReopen(options); + Random rnd(301); + + const int kCDTKeysPerBuffer = 4; + const int kTestSize = kCDTKeysPerBuffer * 4096; + const int kTotalIteration = 100; + // the second half of the test involves in random failure + // of file creation. + const int kRandomFailureTest = kTotalIteration / 2; + std::vector values; + for (int i = 0; i < kTestSize; ++i) { + values.push_back("NOT_FOUND"); + } + for (int j = 0; j < kTotalIteration; ++j) { + if (j == kRandomFailureTest) { + env_->non_writeable_rate_.store(90); + } + for (int k = 0; k < kTestSize; ++k) { + // here we expect some of the Put fails. + std::string value = RandomString(&rnd, 100); + Status s = Put(Key(k), Slice(value)); + if (s.ok()) { + // update the latest successful put + values[k] = value; + } + // But everything before we simulate the failure-test should succeed. + if (j < kRandomFailureTest) { + ASSERT_OK(s); + } + } + } + + // If rocksdb does not do the correct job, internal assert will fail here. + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + + // verify we have the latest successful update + for (int k = 0; k < kTestSize; ++k) { + auto v = Get(Key(k)); + ASSERT_EQ(v, values[k]); + } + + // reopen and reverify we have the latest successful update + env_->non_writeable_rate_.store(0); + Reopen(options); + for (int k = 0; k < kTestSize; ++k) { + auto v = Get(Key(k)); + ASSERT_EQ(v, values[k]); + } +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, DynamicMiscOptions) { + // Test max_sequential_skip_in_iterations + Options options; + options.env = env_; + options.create_if_missing = true; + options.max_sequential_skip_in_iterations = 16; + options.compression = kNoCompression; + options.statistics = rocksdb::CreateDBStatistics(); + DestroyAndReopen(options); + + auto assert_reseek_count = [this, &options](int key_start, int num_reseek) { + int key0 = key_start; + int key1 = key_start + 1; + int key2 = key_start + 2; + Random rnd(301); + ASSERT_OK(Put(Key(key0), RandomString(&rnd, 8))); + for (int i = 0; i < 10; ++i) { + ASSERT_OK(Put(Key(key1), RandomString(&rnd, 8))); + } + ASSERT_OK(Put(Key(key2), RandomString(&rnd, 8))); + std::unique_ptr iter(db_->NewIterator(ReadOptions())); + iter->Seek(Key(key1)); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Key(key1)), 0); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->key().compare(Key(key2)), 0); + ASSERT_EQ(num_reseek, + TestGetTickerCount(options, NUMBER_OF_RESEEKS_IN_ITERATION)); + }; + // No reseek + assert_reseek_count(100, 0); + + ASSERT_OK(dbfull()->SetOptions({{"max_sequential_skip_in_iterations", "4"}})); + // Clear memtable and make new option effective + dbfull()->TEST_FlushMemTable(true); + // Trigger reseek + assert_reseek_count(200, 1); + + ASSERT_OK( + dbfull()->SetOptions({{"max_sequential_skip_in_iterations", "16"}})); + // Clear memtable and make new option effective + dbfull()->TEST_FlushMemTable(true); + // No reseek + assert_reseek_count(300, 1); + + MutableCFOptions mutable_cf_options; + CreateAndReopenWithCF({"pikachu"}, options); + // Test soft_pending_compaction_bytes_limit, + // hard_pending_compaction_bytes_limit + ASSERT_OK(dbfull()->SetOptions( + handles_[1], {{"soft_pending_compaction_bytes_limit", "200"}, + {"hard_pending_compaction_bytes_limit", "300"}})); + ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1], + &mutable_cf_options)); + ASSERT_EQ(200, mutable_cf_options.soft_pending_compaction_bytes_limit); + ASSERT_EQ(300, mutable_cf_options.hard_pending_compaction_bytes_limit); + // Test report_bg_io_stats + ASSERT_OK( + dbfull()->SetOptions(handles_[1], {{"report_bg_io_stats", "true"}})); + // sanity check + ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1], + &mutable_cf_options)); + ASSERT_EQ(true, mutable_cf_options.report_bg_io_stats); + // Test min_partial_merge_operands + ASSERT_OK( + dbfull()->SetOptions(handles_[1], {{"min_partial_merge_operands", "4"}})); + ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1], + &mutable_cf_options)); + ASSERT_EQ(4, mutable_cf_options.min_partial_merge_operands); + // Test compression + // sanity check + ASSERT_OK(dbfull()->SetOptions({{"compression", "kNoCompression"}})); + ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[0], + &mutable_cf_options)); + ASSERT_EQ(CompressionType::kNoCompression, mutable_cf_options.compression); + ASSERT_OK(dbfull()->SetOptions({{"compression", "kSnappyCompression"}})); + ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[0], + &mutable_cf_options)); + ASSERT_EQ(CompressionType::kSnappyCompression, + mutable_cf_options.compression); + // Test paranoid_file_checks already done in db_block_cache_test + ASSERT_OK( + dbfull()->SetOptions(handles_[1], {{"paranoid_file_checks", "true"}})); + ASSERT_OK(dbfull()->TEST_GetLatestMutableCFOptions(handles_[1], + &mutable_cf_options)); + ASSERT_EQ(true, mutable_cf_options.report_bg_io_stats); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, L0L1L2AndUpHitCounter) { + Options options = CurrentOptions(); + options.write_buffer_size = 32 * 1024; + options.target_file_size_base = 32 * 1024; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 2; + options.level0_stop_writes_trigger = 4; + options.max_bytes_for_level_base = 64 * 1024; + options.max_write_buffer_number = 2; + options.max_background_compactions = 8; + options.max_background_flushes = 8; + options.statistics = rocksdb::CreateDBStatistics(); + CreateAndReopenWithCF({"mypikachu"}, options); + + int numkeys = 20000; + for (int i = 0; i < numkeys; i++) { + ASSERT_OK(Put(1, Key(i), "val")); + } + ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L0)); + ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L1)); + ASSERT_EQ(0, TestGetTickerCount(options, GET_HIT_L2_AND_UP)); + + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + + for (int i = 0; i < numkeys; i++) { + ASSERT_EQ(Get(1, Key(i)), "val"); + } + + ASSERT_GT(TestGetTickerCount(options, GET_HIT_L0), 100); + ASSERT_GT(TestGetTickerCount(options, GET_HIT_L1), 100); + ASSERT_GT(TestGetTickerCount(options, GET_HIT_L2_AND_UP), 100); + + ASSERT_EQ(numkeys, TestGetTickerCount(options, GET_HIT_L0) + + TestGetTickerCount(options, GET_HIT_L1) + + TestGetTickerCount(options, GET_HIT_L2_AND_UP)); +} + +TEST_F(DBTest, EncodeDecompressedBlockSizeTest) { + // iter 0 -- zlib + // iter 1 -- bzip2 + // iter 2 -- lz4 + // iter 3 -- lz4HC + // iter 4 -- xpress + CompressionType compressions[] = {kZlibCompression, kBZip2Compression, + kLZ4Compression, kLZ4HCCompression, + kXpressCompression}; + for (auto comp : compressions) { + if (!CompressionTypeSupported(comp)) { + continue; + } + // first_table_version 1 -- generate with table_version == 1, read with + // table_version == 2 + // first_table_version 2 -- generate with table_version == 2, read with + // table_version == 1 + for (int first_table_version = 1; first_table_version <= 2; + ++first_table_version) { + BlockBasedTableOptions table_options; + table_options.format_version = first_table_version; + table_options.filter_policy.reset(NewBloomFilterPolicy(10)); + Options options = CurrentOptions(); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.create_if_missing = true; + options.compression = comp; + DestroyAndReopen(options); + + int kNumKeysWritten = 100000; + + Random rnd(301); + for (int i = 0; i < kNumKeysWritten; ++i) { + // compressible string + ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a'))); + } + + table_options.format_version = first_table_version == 1 ? 2 : 1; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + Reopen(options); + for (int i = 0; i < kNumKeysWritten; ++i) { + auto r = Get(Key(i)); + ASSERT_EQ(r.substr(128), std::string(128, 'a')); + } + } + } +} + +TEST_F(DBTest, CompressionStatsTest) { + CompressionType type; + + if (Snappy_Supported()) { + type = kSnappyCompression; + fprintf(stderr, "using snappy\n"); + } else if (Zlib_Supported()) { + type = kZlibCompression; + fprintf(stderr, "using zlib\n"); + } else if (BZip2_Supported()) { + type = kBZip2Compression; + fprintf(stderr, "using bzip2\n"); + } else if (LZ4_Supported()) { + type = kLZ4Compression; + fprintf(stderr, "using lz4\n"); + } else if (XPRESS_Supported()) { + type = kXpressCompression; + fprintf(stderr, "using xpress\n"); + } else if (ZSTD_Supported()) { + type = kZSTDNotFinalCompression; + fprintf(stderr, "using ZSTD\n"); + } else { + fprintf(stderr, "skipping test, compression disabled\n"); + return; + } + + Options options = CurrentOptions(); + options.compression = type; + options.statistics = rocksdb::CreateDBStatistics(); + options.statistics->stats_level_ = StatsLevel::kAll; + DestroyAndReopen(options); + + int kNumKeysWritten = 100000; + + // Check that compressions occur and are counted when compression is turned on + Random rnd(301); + for (int i = 0; i < kNumKeysWritten; ++i) { + // compressible string + ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a'))); + } + ASSERT_OK(Flush()); + ASSERT_GT(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED), 0); + + for (int i = 0; i < kNumKeysWritten; ++i) { + auto r = Get(Key(i)); + } + ASSERT_GT(options.statistics->getTickerCount(NUMBER_BLOCK_DECOMPRESSED), 0); + + options.compression = kNoCompression; + DestroyAndReopen(options); + uint64_t currentCompressions = + options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED); + uint64_t currentDecompressions = + options.statistics->getTickerCount(NUMBER_BLOCK_DECOMPRESSED); + + // Check that compressions do not occur when turned off + for (int i = 0; i < kNumKeysWritten; ++i) { + // compressible string + ASSERT_OK(Put(Key(i), RandomString(&rnd, 128) + std::string(128, 'a'))); + } + ASSERT_OK(Flush()); + ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_COMPRESSED) + - currentCompressions, 0); + + for (int i = 0; i < kNumKeysWritten; ++i) { + auto r = Get(Key(i)); + } + ASSERT_EQ(options.statistics->getTickerCount(NUMBER_BLOCK_DECOMPRESSED) + - currentDecompressions, 0); +} + +TEST_F(DBTest, MutexWaitStatsDisabledByDefault) { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + CreateAndReopenWithCF({"pikachu"}, options); + const uint64_t kMutexWaitDelay = 100; + ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, + kMutexWaitDelay); + ASSERT_OK(Put("hello", "rocksdb")); + ASSERT_EQ(TestGetTickerCount(options, DB_MUTEX_WAIT_MICROS), 0); + ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0); +} + +TEST_F(DBTest, MutexWaitStats) { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + options.statistics->stats_level_ = StatsLevel::kAll; + CreateAndReopenWithCF({"pikachu"}, options); + const uint64_t kMutexWaitDelay = 100; + ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, + kMutexWaitDelay); + ASSERT_OK(Put("hello", "rocksdb")); + ASSERT_GE(TestGetTickerCount(options, DB_MUTEX_WAIT_MICROS), kMutexWaitDelay); + ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0); +} + +TEST_F(DBTest, CloseSpeedup) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleLevel; + options.write_buffer_size = 110 << 10; // 110KB + options.arena_block_size = 4 << 10; + options.level0_file_num_compaction_trigger = 2; + options.num_levels = 4; + options.max_bytes_for_level_base = 400 * 1024; + options.max_write_buffer_number = 16; + + // Block background threads + env_->SetBackgroundThreads(1, Env::LOW); + env_->SetBackgroundThreads(1, Env::HIGH); + test::SleepingBackgroundTask sleeping_task_low; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + test::SleepingBackgroundTask sleeping_task_high; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, + &sleeping_task_high, Env::Priority::HIGH); + + std::vector filenames; + env_->GetChildren(dbname_, &filenames); + // Delete archival files. + for (size_t i = 0; i < filenames.size(); ++i) { + env_->DeleteFile(dbname_ + "/" + filenames[i]); + } + env_->DeleteDir(dbname_); + DestroyAndReopen(options); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + env_->SetBackgroundThreads(1, Env::LOW); + env_->SetBackgroundThreads(1, Env::HIGH); + Random rnd(301); + int key_idx = 0; + + // First three 110KB files are not going to level 2 + // After that, (100K, 200K) + for (int num = 0; num < 5; num++) { + GenerateNewFile(&rnd, &key_idx, true); + } + + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + Close(); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + // Unblock background threads + sleeping_task_high.WakeUp(); + sleeping_task_high.WaitUntilDone(); + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilDone(); + + Destroy(options); +} + +class DelayedMergeOperator : public MergeOperator { + private: + DBTest* db_test_; + + public: + explicit DelayedMergeOperator(DBTest* d) : db_test_(d) {} + + virtual bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const override { + db_test_->env_->addon_time_.fetch_add(1000); + merge_out->new_value = ""; + return true; + } + + virtual const char* Name() const override { return "DelayedMergeOperator"; } +}; + +TEST_F(DBTest, MergeTestTime) { + std::string one, two, three; + PutFixed64(&one, 1); + PutFixed64(&two, 2); + PutFixed64(&three, 3); + + // Enable time profiling + SetPerfLevel(kEnableTime); + this->env_->addon_time_.store(0); + this->env_->time_elapse_only_sleep_ = true; + this->env_->no_sleep_ = true; + Options options = CurrentOptions(); + options.statistics = rocksdb::CreateDBStatistics(); + options.merge_operator.reset(new DelayedMergeOperator(this)); + DestroyAndReopen(options); + + ASSERT_EQ(TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME), 0); + db_->Put(WriteOptions(), "foo", one); + ASSERT_OK(Flush()); + ASSERT_OK(db_->Merge(WriteOptions(), "foo", two)); + ASSERT_OK(Flush()); + ASSERT_OK(db_->Merge(WriteOptions(), "foo", three)); + ASSERT_OK(Flush()); + + ReadOptions opt; + opt.verify_checksums = true; + opt.snapshot = nullptr; + std::string result; + db_->Get(opt, "foo", &result); + + ASSERT_EQ(1000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME)); + + ReadOptions read_options; + std::unique_ptr iter(db_->NewIterator(read_options)); + int count = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ASSERT_OK(iter->status()); + ++count; + } + + ASSERT_EQ(1, count); + ASSERT_EQ(2000000, TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME)); +#if ROCKSDB_USING_THREAD_STATUS + ASSERT_GT(TestGetTickerCount(options, FLUSH_WRITE_BYTES), 0); +#endif // ROCKSDB_USING_THREAD_STATUS + this->env_->time_elapse_only_sleep_ = false; +} + +#ifndef ROCKSDB_LITE +TEST_P(DBTestWithParam, MergeCompactionTimeTest) { + SetPerfLevel(kEnableTime); + Options options = CurrentOptions(); + options.compaction_filter_factory = std::make_shared(); + options.statistics = rocksdb::CreateDBStatistics(); + options.merge_operator.reset(new DelayedMergeOperator(this)); + options.compaction_style = kCompactionStyleUniversal; + options.max_subcompactions = max_subcompactions_; + DestroyAndReopen(options); + + for (int i = 0; i < 1000; i++) { + ASSERT_OK(db_->Merge(WriteOptions(), "foo", "TEST")); + ASSERT_OK(Flush()); + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + + ASSERT_NE(TestGetTickerCount(options, MERGE_OPERATION_TOTAL_TIME), 0); +} + +TEST_P(DBTestWithParam, FilterCompactionTimeTest) { + Options options = CurrentOptions(); + options.compaction_filter_factory = + std::make_shared(this); + options.disable_auto_compactions = true; + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + options.max_subcompactions = max_subcompactions_; + DestroyAndReopen(options); + + // put some data + for (int table = 0; table < 4; ++table) { + for (int i = 0; i < 10 + table; ++i) { + Put(ToString(table * 100 + i), "val"); + } + Flush(); + } + + CompactRangeOptions cro; + cro.exclusive_manual_compaction = exclusive_manual_compaction_; + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); + ASSERT_EQ(0U, CountLiveFiles()); + + Reopen(options); + + Iterator* itr = db_->NewIterator(ReadOptions()); + itr->SeekToFirst(); + ASSERT_NE(TestGetTickerCount(options, FILTER_OPERATION_TOTAL_TIME), 0); + delete itr; +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, TestLogCleanup) { + Options options = CurrentOptions(); + options.write_buffer_size = 64 * 1024; // very small + // only two memtables allowed ==> only two log files + options.max_write_buffer_number = 2; + Reopen(options); + + for (int i = 0; i < 100000; ++i) { + Put(Key(i), "val"); + // only 2 memtables will be alive, so logs_to_free needs to always be below + // 2 + ASSERT_LT(dbfull()->TEST_LogsToFreeSize(), static_cast(3)); + } +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, EmptyCompactedDB) { + Options options = CurrentOptions(); + options.max_open_files = -1; + Close(); + ASSERT_OK(ReadOnlyReopen(options)); + Status s = Put("new", "value"); + ASSERT_TRUE(s.IsNotSupported()); + Close(); +} +#endif // ROCKSDB_LITE + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, SuggestCompactRangeTest) { + class CompactionFilterFactoryGetContext : public CompactionFilterFactory { + public: + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + saved_context = context; + std::unique_ptr empty_filter; + return empty_filter; + } + const char* Name() const override { + return "CompactionFilterFactoryGetContext"; + } + static bool IsManual(CompactionFilterFactory* compaction_filter_factory) { + return reinterpret_cast( + compaction_filter_factory) + ->saved_context.is_manual_compaction; + } + CompactionFilter::Context saved_context; + }; + + Options options = CurrentOptions(); + options.memtable_factory.reset( + new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile)); + options.compaction_style = kCompactionStyleLevel; + options.compaction_filter_factory.reset( + new CompactionFilterFactoryGetContext()); + options.write_buffer_size = 200 << 10; + options.arena_block_size = 4 << 10; + options.level0_file_num_compaction_trigger = 4; + options.num_levels = 4; + options.compression = kNoCompression; + options.max_bytes_for_level_base = 450 << 10; + options.target_file_size_base = 98 << 10; + options.max_grandparent_overlap_factor = 1 << 20; // inf + + Reopen(options); + + Random rnd(301); + + for (int num = 0; num < 3; num++) { + GenerateNewRandomFile(&rnd); + } + + GenerateNewRandomFile(&rnd); + ASSERT_EQ("0,4", FilesPerLevel(0)); + ASSERT_TRUE(!CompactionFilterFactoryGetContext::IsManual( + options.compaction_filter_factory.get())); + + GenerateNewRandomFile(&rnd); + ASSERT_EQ("1,4", FilesPerLevel(0)); + + GenerateNewRandomFile(&rnd); + ASSERT_EQ("2,4", FilesPerLevel(0)); + + GenerateNewRandomFile(&rnd); + ASSERT_EQ("3,4", FilesPerLevel(0)); + + GenerateNewRandomFile(&rnd); + ASSERT_EQ("0,4,4", FilesPerLevel(0)); + + GenerateNewRandomFile(&rnd); + ASSERT_EQ("1,4,4", FilesPerLevel(0)); + + GenerateNewRandomFile(&rnd); + ASSERT_EQ("2,4,4", FilesPerLevel(0)); + + GenerateNewRandomFile(&rnd); + ASSERT_EQ("3,4,4", FilesPerLevel(0)); + + GenerateNewRandomFile(&rnd); + ASSERT_EQ("0,4,8", FilesPerLevel(0)); + + GenerateNewRandomFile(&rnd); + ASSERT_EQ("1,4,8", FilesPerLevel(0)); + + // compact it three times + for (int i = 0; i < 3; ++i) { + ASSERT_OK(experimental::SuggestCompactRange(db_, nullptr, nullptr)); + dbfull()->TEST_WaitForCompact(); + } + + // All files are compacted + ASSERT_EQ(0, NumTableFilesAtLevel(0)); + ASSERT_EQ(0, NumTableFilesAtLevel(1)); + + GenerateNewRandomFile(&rnd); + ASSERT_EQ(1, NumTableFilesAtLevel(0)); + + // nonoverlapping with the file on level 0 + Slice start("a"), end("b"); + ASSERT_OK(experimental::SuggestCompactRange(db_, &start, &end)); + dbfull()->TEST_WaitForCompact(); + + // should not compact the level 0 file + ASSERT_EQ(1, NumTableFilesAtLevel(0)); + + start = Slice("j"); + end = Slice("m"); + ASSERT_OK(experimental::SuggestCompactRange(db_, &start, &end)); + dbfull()->TEST_WaitForCompact(); + ASSERT_TRUE(CompactionFilterFactoryGetContext::IsManual( + options.compaction_filter_factory.get())); + + // now it should compact the level 0 file + ASSERT_EQ(0, NumTableFilesAtLevel(0)); + ASSERT_EQ(1, NumTableFilesAtLevel(1)); +} + +TEST_F(DBTest, PromoteL0) { + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.write_buffer_size = 10 * 1024 * 1024; + DestroyAndReopen(options); + + // non overlapping ranges + std::vector> ranges = { + {81, 160}, {0, 80}, {161, 240}, {241, 320}}; + + int32_t value_size = 10 * 1024; // 10 KB + + Random rnd(301); + std::map values; + for (const auto& range : ranges) { + for (int32_t j = range.first; j < range.second; j++) { + values[j] = RandomString(&rnd, value_size); + ASSERT_OK(Put(Key(j), values[j])); + } + ASSERT_OK(Flush()); + } + + int32_t level0_files = NumTableFilesAtLevel(0, 0); + ASSERT_EQ(level0_files, ranges.size()); + ASSERT_EQ(NumTableFilesAtLevel(1, 0), 0); // No files in L1 + + // Promote L0 level to L2. + ASSERT_OK(experimental::PromoteL0(db_, db_->DefaultColumnFamily(), 2)); + // We expect that all the files were trivially moved from L0 to L2 + ASSERT_EQ(NumTableFilesAtLevel(0, 0), 0); + ASSERT_EQ(NumTableFilesAtLevel(2, 0), level0_files); + + for (const auto& kv : values) { + ASSERT_EQ(Get(Key(kv.first)), kv.second); + } +} + +TEST_F(DBTest, PromoteL0Failure) { + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.write_buffer_size = 10 * 1024 * 1024; + DestroyAndReopen(options); + + // Produce two L0 files with overlapping ranges. + ASSERT_OK(Put(Key(0), "")); + ASSERT_OK(Put(Key(3), "")); + ASSERT_OK(Flush()); + ASSERT_OK(Put(Key(1), "")); + ASSERT_OK(Flush()); + + Status status; + // Fails because L0 has overlapping files. + status = experimental::PromoteL0(db_, db_->DefaultColumnFamily()); + ASSERT_TRUE(status.IsInvalidArgument()); + + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + // Now there is a file in L1. + ASSERT_GE(NumTableFilesAtLevel(1, 0), 1); + + ASSERT_OK(Put(Key(5), "")); + ASSERT_OK(Flush()); + // Fails because L1 is non-empty. + status = experimental::PromoteL0(db_, db_->DefaultColumnFamily()); + ASSERT_TRUE(status.IsInvalidArgument()); +} +#endif // ROCKSDB_LITE + +// Github issue #596 +TEST_F(DBTest, HugeNumberOfLevels) { + Options options = CurrentOptions(); + options.write_buffer_size = 2 * 1024 * 1024; // 2MB + options.max_bytes_for_level_base = 2 * 1024 * 1024; // 2MB + options.num_levels = 12; + options.max_background_compactions = 10; + options.max_bytes_for_level_multiplier = 2; + options.level_compaction_dynamic_level_bytes = true; + DestroyAndReopen(options); + + Random rnd(301); + for (int i = 0; i < 300000; ++i) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + } + + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), nullptr, nullptr)); +} + +TEST_F(DBTest, AutomaticConflictsWithManualCompaction) { + Options options = CurrentOptions(); + options.write_buffer_size = 2 * 1024 * 1024; // 2MB + options.max_bytes_for_level_base = 2 * 1024 * 1024; // 2MB + options.num_levels = 12; + options.max_background_compactions = 10; + options.max_bytes_for_level_multiplier = 2; + options.level_compaction_dynamic_level_bytes = true; + DestroyAndReopen(options); + + Random rnd(301); + for (int i = 0; i < 300000; ++i) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 1024))); + } + + std::atomic callback_count(0); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction()::Conflict", + [&](void* arg) { callback_count.fetch_add(1); }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + CompactRangeOptions croptions; + croptions.exclusive_manual_compaction = false; + ASSERT_OK(db_->CompactRange(croptions, nullptr, nullptr)); + ASSERT_GE(callback_count.load(), 1); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + for (int i = 0; i < 300000; ++i) { + ASSERT_NE("NOT_FOUND", Get(Key(i))); + } +} + +// Github issue #595 +// Large write batch with column families +TEST_F(DBTest, LargeBatchWithColumnFamilies) { + Options options = CurrentOptions(); + options.env = env_; + options.write_buffer_size = 100000; // Small write buffer + CreateAndReopenWithCF({"pikachu"}, options); + int64_t j = 0; + for (int i = 0; i < 5; i++) { + for (int pass = 1; pass <= 3; pass++) { + WriteBatch batch; + size_t write_size = 1024 * 1024 * (5 + i); + fprintf(stderr, "prepare: %" ROCKSDB_PRIszt " MB, pass:%d\n", + (write_size / 1024 / 1024), pass); + for (;;) { + std::string data(3000, j++ % 127 + 20); + data += ToString(j); + batch.Put(handles_[0], Slice(data), Slice(data)); + if (batch.GetDataSize() > write_size) { + break; + } + } + fprintf(stderr, "write: %" ROCKSDB_PRIszt " MB\n", + (batch.GetDataSize() / 1024 / 1024)); + ASSERT_OK(dbfull()->Write(WriteOptions(), &batch)); + fprintf(stderr, "done\n"); + } + } + // make sure we can re-open it. + ASSERT_OK(TryReopenWithColumnFamilies({"default", "pikachu"}, options)); +} + +// Make sure that Flushes can proceed in parallel with CompactRange() +TEST_F(DBTest, FlushesInParallelWithCompactRange) { + // iter == 0 -- leveled + // iter == 1 -- leveled, but throw in a flush between two levels compacting + // iter == 2 -- universal + for (int iter = 0; iter < 3; ++iter) { + Options options = CurrentOptions(); + if (iter < 2) { + options.compaction_style = kCompactionStyleLevel; + } else { + options.compaction_style = kCompactionStyleUniversal; + } + options.write_buffer_size = 110 << 10; + options.level0_file_num_compaction_trigger = 4; + options.num_levels = 4; + options.compression = kNoCompression; + options.max_bytes_for_level_base = 450 << 10; + options.target_file_size_base = 98 << 10; + options.max_write_buffer_number = 2; + + DestroyAndReopen(options); + + Random rnd(301); + for (int num = 0; num < 14; num++) { + GenerateNewRandomFile(&rnd); + } + + if (iter == 1) { + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"DBImpl::RunManualCompaction()::1", + "DBTest::FlushesInParallelWithCompactRange:1"}, + {"DBTest::FlushesInParallelWithCompactRange:2", + "DBImpl::RunManualCompaction()::2"}}); + } else { + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"CompactionJob::Run():Start", + "DBTest::FlushesInParallelWithCompactRange:1"}, + {"DBTest::FlushesInParallelWithCompactRange:2", + "CompactionJob::Run():End"}}); + } + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + std::vector threads; + threads.emplace_back([&]() { Compact("a", "z"); }); + + TEST_SYNC_POINT("DBTest::FlushesInParallelWithCompactRange:1"); + + // this has to start a flush. if flushes are blocked, this will try to + // create + // 3 memtables, and that will fail because max_write_buffer_number is 2 + for (int num = 0; num < 3; num++) { + GenerateNewRandomFile(&rnd, /* nowait */ true); + } + + TEST_SYNC_POINT("DBTest::FlushesInParallelWithCompactRange:2"); + + for (auto& t : threads) { + t.join(); + } + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + } +} + +TEST_F(DBTest, DelayedWriteRate) { + const int kEntriesPerMemTable = 100; + const int kTotalFlushes = 20; + + Options options = CurrentOptions(); + env_->SetBackgroundThreads(1, Env::LOW); + options.env = env_; + env_->no_sleep_ = true; + options.write_buffer_size = 100000000; + options.max_write_buffer_number = 256; + options.max_background_compactions = 1; + options.level0_file_num_compaction_trigger = 3; + options.level0_slowdown_writes_trigger = 3; + options.level0_stop_writes_trigger = 999999; + options.delayed_write_rate = 20000000; // Start with 200MB/s + options.memtable_factory.reset( + new SpecialSkipListFactory(kEntriesPerMemTable)); + + CreateAndReopenWithCF({"pikachu"}, options); + + // Block compactions + test::SleepingBackgroundTask sleeping_task_low; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + + for (int i = 0; i < 3; i++) { + Put(Key(i), std::string(10000, 'x')); + Flush(); + } + + // These writes will be slowed down to 1KB/s + uint64_t estimated_sleep_time = 0; + Random rnd(301); + Put("", ""); + uint64_t cur_rate = options.delayed_write_rate; + for (int i = 0; i < kTotalFlushes; i++) { + uint64_t size_memtable = 0; + for (int j = 0; j < kEntriesPerMemTable; j++) { + auto rand_num = rnd.Uniform(20); + // Spread the size range to more. + size_t entry_size = rand_num * rand_num * rand_num; + WriteOptions wo; + Put(Key(i), std::string(entry_size, 'x'), wo); + size_memtable += entry_size + 18; + // Occasionally sleep a while + if (rnd.Uniform(20) == 6) { + env_->SleepForMicroseconds(2666); + } + } + dbfull()->TEST_WaitForFlushMemTable(); + estimated_sleep_time += size_memtable * 1000000u / cur_rate; + // Slow down twice. One for memtable switch and one for flush finishes. + cur_rate = static_cast(static_cast(cur_rate) / + kSlowdownRatio / kSlowdownRatio); + } + // Estimate the total sleep time fall into the rough range. + ASSERT_GT(env_->addon_time_.load(), + static_cast(estimated_sleep_time / 2)); + ASSERT_LT(env_->addon_time_.load(), + static_cast(estimated_sleep_time * 2)); + + env_->no_sleep_ = false; + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilDone(); +} + +TEST_F(DBTest, HardLimit) { + Options options = CurrentOptions(); + options.env = env_; + env_->SetBackgroundThreads(1, Env::LOW); + options.max_write_buffer_number = 256; + options.write_buffer_size = 110 << 10; // 110KB + options.arena_block_size = 4 * 1024; + options.level0_file_num_compaction_trigger = 4; + options.level0_slowdown_writes_trigger = 999999; + options.level0_stop_writes_trigger = 999999; + options.hard_pending_compaction_bytes_limit = 800 << 10; + options.max_bytes_for_level_base = 10000000000u; + options.max_background_compactions = 1; + options.memtable_factory.reset( + new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1)); + + env_->SetBackgroundThreads(1, Env::LOW); + test::SleepingBackgroundTask sleeping_task_low; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + + CreateAndReopenWithCF({"pikachu"}, options); + + std::atomic callback_count(0); + rocksdb::SyncPoint::GetInstance()->SetCallBack("DBImpl::DelayWrite:Wait", + [&](void* arg) { + callback_count.fetch_add(1); + sleeping_task_low.WakeUp(); + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Random rnd(301); + int key_idx = 0; + for (int num = 0; num < 5; num++) { + GenerateNewFile(&rnd, &key_idx, true); + dbfull()->TEST_WaitForFlushMemTable(); + } + + ASSERT_EQ(0, callback_count.load()); + + for (int num = 0; num < 5; num++) { + GenerateNewFile(&rnd, &key_idx, true); + dbfull()->TEST_WaitForFlushMemTable(); + } + ASSERT_GE(callback_count.load(), 1); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + sleeping_task_low.WaitUntilDone(); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, SoftLimit) { + Options options = CurrentOptions(); + options.env = env_; + options.write_buffer_size = 100000; // Small write buffer + options.max_write_buffer_number = 256; + options.level0_file_num_compaction_trigger = 1; + options.level0_slowdown_writes_trigger = 3; + options.level0_stop_writes_trigger = 999999; + options.delayed_write_rate = 20000; // About 200KB/s limited rate + options.soft_pending_compaction_bytes_limit = 160000; + options.target_file_size_base = 99999999; // All into one file + options.max_bytes_for_level_base = 50000; + options.max_bytes_for_level_multiplier = 10; + options.max_background_compactions = 1; + options.compression = kNoCompression; + + Reopen(options); + + // Generating 360KB in Level 3 + for (int i = 0; i < 72; i++) { + Put(Key(i), std::string(5000, 'x')); + if (i % 10 == 0) { + Flush(); + } + } + dbfull()->TEST_WaitForCompact(); + MoveFilesToLevel(3); + + // Generating 360KB in Level 2 + for (int i = 0; i < 72; i++) { + Put(Key(i), std::string(5000, 'x')); + if (i % 10 == 0) { + Flush(); + } + } + dbfull()->TEST_WaitForCompact(); + MoveFilesToLevel(2); + + Put(Key(0), ""); + + test::SleepingBackgroundTask sleeping_task_low; + // Block compactions + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + sleeping_task_low.WaitUntilSleeping(); + + // Create 3 L0 files, making score of L0 to be 3. + for (int i = 0; i < 3; i++) { + Put(Key(i), std::string(5000, 'x')); + Put(Key(100 - i), std::string(5000, 'x')); + // Flush the file. File size is around 30KB. + Flush(); + } + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilDone(); + sleeping_task_low.Reset(); + dbfull()->TEST_WaitForCompact(); + + // Now there is one L1 file but doesn't trigger soft_rate_limit + // The L1 file size is around 30KB. + ASSERT_EQ(NumTableFilesAtLevel(1), 1); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + // Only allow one compactin going through. + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "BackgroundCallCompaction:0", [&](void* arg) { + // Schedule a sleeping task. + sleeping_task_low.Reset(); + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, + &sleeping_task_low, Env::Priority::LOW); + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + sleeping_task_low.WaitUntilSleeping(); + // Create 3 L0 files, making score of L0 to be 3 + for (int i = 0; i < 3; i++) { + Put(Key(10 + i), std::string(5000, 'x')); + Put(Key(90 - i), std::string(5000, 'x')); + // Flush the file. File size is around 30KB. + Flush(); + } + + // Wake up sleep task to enable compaction to run and waits + // for it to go to sleep state again to make sure one compaction + // goes through. + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilSleeping(); + + // Now there is one L1 file (around 60KB) which exceeds 50KB base by 10KB + // Given level multiplier 10, estimated pending compaction is around 100KB + // doesn't trigger soft_pending_compaction_bytes_limit + ASSERT_EQ(NumTableFilesAtLevel(1), 1); + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + // Create 3 L0 files, making score of L0 to be 3, higher than L0. + for (int i = 0; i < 3; i++) { + Put(Key(20 + i), std::string(5000, 'x')); + Put(Key(80 - i), std::string(5000, 'x')); + // Flush the file. File size is around 30KB. + Flush(); + } + // Wake up sleep task to enable compaction to run and waits + // for it to go to sleep state again to make sure one compaction + // goes through. + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilSleeping(); + + // Now there is one L1 file (around 90KB) which exceeds 50KB base by 40KB + // L2 size is 360KB, so the estimated level fanout 4, estimated pending + // compaction is around 200KB + // triggerring soft_pending_compaction_bytes_limit + ASSERT_EQ(NumTableFilesAtLevel(1), 1); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilSleeping(); + + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + + // shrink level base so L2 will hit soft limit easier. + ASSERT_OK(dbfull()->SetOptions({ + {"max_bytes_for_level_base", "5000"}, + })); + + Put("", ""); + Flush(); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + + sleeping_task_low.WaitUntilSleeping(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + sleeping_task_low.WakeUp(); + sleeping_task_low.WaitUntilDone(); +} + +TEST_F(DBTest, LastWriteBufferDelay) { + Options options = CurrentOptions(); + options.env = env_; + options.write_buffer_size = 100000; + options.max_write_buffer_number = 4; + options.delayed_write_rate = 20000; + options.compression = kNoCompression; + options.disable_auto_compactions = true; + int kNumKeysPerMemtable = 3; + options.memtable_factory.reset( + new SpecialSkipListFactory(kNumKeysPerMemtable)); + + Reopen(options); + test::SleepingBackgroundTask sleeping_task; + // Block flushes + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task, + Env::Priority::HIGH); + sleeping_task.WaitUntilSleeping(); + + // Create 3 L0 files, making score of L0 to be 3. + for (int i = 0; i < 3; i++) { + // Fill one mem table + for (int j = 0; j < kNumKeysPerMemtable; j++) { + Put(Key(j), ""); + } + ASSERT_TRUE(!dbfull()->TEST_write_controler().NeedsDelay()); + } + // Inserting a new entry would create a new mem table, triggering slow down. + Put(Key(0), ""); + ASSERT_TRUE(dbfull()->TEST_write_controler().NeedsDelay()); + + sleeping_task.WakeUp(); + sleeping_task.WaitUntilDone(); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, FailWhenCompressionNotSupportedTest) { + CompressionType compressions[] = {kZlibCompression, kBZip2Compression, + kLZ4Compression, kLZ4HCCompression, + kXpressCompression}; + for (auto comp : compressions) { + if (!CompressionTypeSupported(comp)) { + // not supported, we should fail the Open() + Options options = CurrentOptions(); + options.compression = comp; + ASSERT_TRUE(!TryReopen(options).ok()); + // Try if CreateColumnFamily also fails + options.compression = kNoCompression; + ASSERT_OK(TryReopen(options)); + ColumnFamilyOptions cf_options(options); + cf_options.compression = comp; + ColumnFamilyHandle* handle; + ASSERT_TRUE(!db_->CreateColumnFamily(cf_options, "name", &handle).ok()); + } + } +} + +#ifndef ROCKSDB_LITE +TEST_F(DBTest, RowCache) { + Options options = CurrentOptions(); + options.statistics = rocksdb::CreateDBStatistics(); + options.row_cache = NewLRUCache(8192); + DestroyAndReopen(options); + + ASSERT_OK(Put("foo", "bar")); + ASSERT_OK(Flush()); + + ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 0); + ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 0); + ASSERT_EQ(Get("foo"), "bar"); + ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 0); + ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1); + ASSERT_EQ(Get("foo"), "bar"); + ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_HIT), 1); + ASSERT_EQ(TestGetTickerCount(options, ROW_CACHE_MISS), 1); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest, DeletingOldWalAfterDrop) { + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"Test:AllowFlushes", "DBImpl::BGWorkFlush"}, + {"DBImpl::BGWorkFlush:done", "Test:WaitForFlush"}}); + rocksdb::SyncPoint::GetInstance()->ClearTrace(); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + Options options = CurrentOptions(); + options.max_total_wal_size = 8192; + options.compression = kNoCompression; + options.write_buffer_size = 1 << 20; + options.level0_file_num_compaction_trigger = (1 << 30); + options.level0_slowdown_writes_trigger = (1 << 30); + options.level0_stop_writes_trigger = (1 << 30); + options.disable_auto_compactions = true; + DestroyAndReopen(options); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + CreateColumnFamilies({"cf1", "cf2"}, options); + ASSERT_OK(Put(0, "key1", DummyString(8192))); + ASSERT_OK(Put(0, "key2", DummyString(8192))); + // the oldest wal should now be getting_flushed + ASSERT_OK(db_->DropColumnFamily(handles_[0])); + // all flushes should now do nothing because their CF is dropped + TEST_SYNC_POINT("Test:AllowFlushes"); + TEST_SYNC_POINT("Test:WaitForFlush"); + uint64_t lognum1 = dbfull()->TEST_LogfileNumber(); + ASSERT_OK(Put(1, "key3", DummyString(8192))); + ASSERT_OK(Put(1, "key4", DummyString(8192))); + // new wal should have been created + uint64_t lognum2 = dbfull()->TEST_LogfileNumber(); + EXPECT_GT(lognum2, lognum1); +} + +TEST_F(DBTest, UnsupportedManualSync) { + DestroyAndReopen(CurrentOptions()); + env_->is_wal_sync_thread_safe_.store(false); + Status s = db_->SyncWAL(); + ASSERT_TRUE(s.IsNotSupported()); +} + +INSTANTIATE_TEST_CASE_P(DBTestWithParam, DBTestWithParam, + ::testing::Combine(::testing::Values(1, 4), + ::testing::Bool())); + +TEST_F(DBTest, PauseBackgroundWorkTest) { + Options options = CurrentOptions(); + options.write_buffer_size = 100000; // Small write buffer + Reopen(options); + + std::vector threads; + std::atomic done(false); + db_->PauseBackgroundWork(); + threads.emplace_back([&]() { + Random rnd(301); + for (int i = 0; i < 10000; ++i) { + Put(RandomString(&rnd, 10), RandomString(&rnd, 10)); + } + done.store(true); + }); + env_->SleepForMicroseconds(200000); + // make sure the thread is not done + ASSERT_EQ(false, done.load()); + db_->ContinueBackgroundWork(); + for (auto& t : threads) { + t.join(); + } + // now it's done + ASSERT_EQ(true, done.load()); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_test2.cc b/external/rocksdb/db/db_test2.cc new file mode 100755 index 0000000000..b4e989389f --- /dev/null +++ b/external/rocksdb/db/db_test2.cc @@ -0,0 +1,1841 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#include +#include +#include + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#include "rocksdb/persistent_cache.h" +#include "rocksdb/wal_filter.h" + +namespace rocksdb { + +class DBTest2 : public DBTestBase { + public: + DBTest2() : DBTestBase("/db_test2") {} +}; + +class PrefixFullBloomWithReverseComparator + : public DBTestBase, + public ::testing::WithParamInterface { + public: + PrefixFullBloomWithReverseComparator() + : DBTestBase("/prefix_bloom_reverse") {} + virtual void SetUp() override { if_cache_filter_ = GetParam(); } + bool if_cache_filter_; +}; + +TEST_P(PrefixFullBloomWithReverseComparator, + PrefixFullBloomWithReverseComparator) { + Options options = last_options_; + options.comparator = ReverseBytewiseComparator(); + options.prefix_extractor.reset(NewCappedPrefixTransform(3)); + options.statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions bbto; + if (if_cache_filter_) { + bbto.no_block_cache = false; + bbto.cache_index_and_filter_blocks = true; + bbto.block_cache = NewLRUCache(1); + } + bbto.filter_policy.reset(NewBloomFilterPolicy(10, false)); + bbto.whole_key_filtering = false; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + DestroyAndReopen(options); + + ASSERT_OK(dbfull()->Put(WriteOptions(), "bar123", "foo")); + ASSERT_OK(dbfull()->Put(WriteOptions(), "bar234", "foo2")); + ASSERT_OK(dbfull()->Put(WriteOptions(), "foo123", "foo3")); + + dbfull()->Flush(FlushOptions()); + + if (bbto.block_cache) { + bbto.block_cache->EraseUnRefEntries(); + } + + unique_ptr iter(db_->NewIterator(ReadOptions())); + iter->Seek("bar345"); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("bar234", iter->key().ToString()); + ASSERT_EQ("foo2", iter->value().ToString()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("bar123", iter->key().ToString()); + ASSERT_EQ("foo", iter->value().ToString()); + + iter->Seek("foo234"); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("foo123", iter->key().ToString()); + ASSERT_EQ("foo3", iter->value().ToString()); + + iter->Seek("bar"); + ASSERT_OK(iter->status()); + ASSERT_TRUE(!iter->Valid()); +} + +INSTANTIATE_TEST_CASE_P(PrefixFullBloomWithReverseComparator, + PrefixFullBloomWithReverseComparator, testing::Bool()); + +TEST_F(DBTest2, IteratorPropertyVersionNumber) { + Put("", ""); + Iterator* iter1 = db_->NewIterator(ReadOptions()); + std::string prop_value; + ASSERT_OK( + iter1->GetProperty("rocksdb.iterator.super-version-number", &prop_value)); + uint64_t version_number1 = + static_cast(std::atoi(prop_value.c_str())); + + Put("", ""); + Flush(); + + Iterator* iter2 = db_->NewIterator(ReadOptions()); + ASSERT_OK( + iter2->GetProperty("rocksdb.iterator.super-version-number", &prop_value)); + uint64_t version_number2 = + static_cast(std::atoi(prop_value.c_str())); + + ASSERT_GT(version_number2, version_number1); + + Put("", ""); + + Iterator* iter3 = db_->NewIterator(ReadOptions()); + ASSERT_OK( + iter3->GetProperty("rocksdb.iterator.super-version-number", &prop_value)); + uint64_t version_number3 = + static_cast(std::atoi(prop_value.c_str())); + + ASSERT_EQ(version_number2, version_number3); + + iter1->SeekToFirst(); + ASSERT_OK( + iter1->GetProperty("rocksdb.iterator.super-version-number", &prop_value)); + uint64_t version_number1_new = + static_cast(std::atoi(prop_value.c_str())); + ASSERT_EQ(version_number1, version_number1_new); + + delete iter1; + delete iter2; + delete iter3; +} + +TEST_F(DBTest2, CacheIndexAndFilterWithDBRestart) { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions table_options; + table_options.cache_index_and_filter_blocks = true; + table_options.filter_policy.reset(NewBloomFilterPolicy(20)); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + CreateAndReopenWithCF({"pikachu"}, options); + + Put(1, "a", "begin"); + Put(1, "z", "end"); + ASSERT_OK(Flush(1)); + TryReopenWithColumnFamilies({"default", "pikachu"}, options); + + std::string value; + value = Get(1, "a"); +} + +#ifndef ROCKSDB_LITE +class DBTestSharedWriteBufferAcrossCFs + : public DBTestBase, + public testing::WithParamInterface { + public: + DBTestSharedWriteBufferAcrossCFs() + : DBTestBase("/db_test_shared_write_buffer") {} + void SetUp() override { use_old_interface_ = GetParam(); } + bool use_old_interface_; +}; + +TEST_P(DBTestSharedWriteBufferAcrossCFs, SharedWriteBufferAcrossCFs) { + Options options = CurrentOptions(); + if (use_old_interface_) { + options.db_write_buffer_size = 100000; // this is the real limit + } else { + options.write_buffer_manager.reset(new WriteBufferManager(100000)); + } + options.write_buffer_size = 500000; // this is never hit + CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options); + + // Trigger a flush on CF "nikitich" + ASSERT_OK(Put(0, Key(1), DummyString(1))); + ASSERT_OK(Put(1, Key(1), DummyString(1))); + ASSERT_OK(Put(3, Key(1), DummyString(90000))); + ASSERT_OK(Put(2, Key(2), DummyString(20000))); + ASSERT_OK(Put(2, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[0]); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + dbfull()->TEST_WaitForFlushMemTable(handles_[3]); + { + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), + static_cast(1)); + } + + // "dobrynia": 20KB + // Flush 'dobrynia' + ASSERT_OK(Put(3, Key(2), DummyString(40000))); + ASSERT_OK(Put(2, Key(2), DummyString(70000))); + ASSERT_OK(Put(0, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + dbfull()->TEST_WaitForFlushMemTable(handles_[3]); + { + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), + static_cast(1)); + } + + // "nikitich" still has data of 80KB + // Inserting Data in "dobrynia" triggers "nikitich" flushing. + ASSERT_OK(Put(3, Key(2), DummyString(40000))); + ASSERT_OK(Put(2, Key(2), DummyString(40000))); + ASSERT_OK(Put(0, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + dbfull()->TEST_WaitForFlushMemTable(handles_[3]); + { + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), + static_cast(2)); + } + + // "dobrynia" still has 40KB + ASSERT_OK(Put(1, Key(2), DummyString(20000))); + ASSERT_OK(Put(0, Key(1), DummyString(10000))); + ASSERT_OK(Put(0, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[0]); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + dbfull()->TEST_WaitForFlushMemTable(handles_[3]); + // This should triggers no flush + { + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), + static_cast(2)); + } + + // "default": 10KB, "pikachu": 20KB, "dobrynia": 40KB + ASSERT_OK(Put(1, Key(2), DummyString(40000))); + ASSERT_OK(Put(0, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[0]); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + dbfull()->TEST_WaitForFlushMemTable(handles_[3]); + // This should triggers flush of "pikachu" + { + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), + static_cast(2)); + } + + // "default": 10KB, "dobrynia": 40KB + // Some remaining writes so 'default', 'dobrynia' and 'nikitich' flush on + // closure. + ASSERT_OK(Put(3, Key(1), DummyString(1))); + ReopenWithColumnFamilies({"default", "pikachu", "dobrynia", "nikitich"}, + options); + { + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), + static_cast(2)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), + static_cast(3)); + } +} + +INSTANTIATE_TEST_CASE_P(DBTestSharedWriteBufferAcrossCFs, + DBTestSharedWriteBufferAcrossCFs, ::testing::Bool()); + +TEST_F(DBTest2, SharedWriteBufferLimitAcrossDB) { + std::string dbname2 = test::TmpDir(env_) + "/db_shared_wb_db2"; + Options options = CurrentOptions(); + options.write_buffer_size = 500000; // this is never hit + options.write_buffer_manager.reset(new WriteBufferManager(100000)); + CreateAndReopenWithCF({"cf1", "cf2"}, options); + + ASSERT_OK(DestroyDB(dbname2, options)); + DB* db2 = nullptr; + ASSERT_OK(DB::Open(options, dbname2, &db2)); + + WriteOptions wo; + + // Trigger a flush on cf2 + ASSERT_OK(Put(0, Key(1), DummyString(1))); + ASSERT_OK(Put(1, Key(1), DummyString(1))); + ASSERT_OK(Put(2, Key(1), DummyString(90000))); + + // Insert to DB2 + ASSERT_OK(db2->Put(wo, Key(2), DummyString(20000))); + + ASSERT_OK(Put(2, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[0]); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + static_cast(db2)->TEST_WaitForFlushMemTable(); + { + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf1"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf2"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db2, "default"), + static_cast(0)); + } + + // db2: 20KB + ASSERT_OK(db2->Put(wo, Key(2), DummyString(40000))); + ASSERT_OK(db2->Put(wo, Key(3), DummyString(70000))); + ASSERT_OK(db2->Put(wo, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[0]); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + static_cast(db2)->TEST_WaitForFlushMemTable(); + { + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf1"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf2"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db2, "default"), + static_cast(1)); + } + + // + // Inserting Data in db2 and db_ triggers flushing in db_. + ASSERT_OK(db2->Put(wo, Key(3), DummyString(70000))); + ASSERT_OK(Put(2, Key(2), DummyString(45000))); + ASSERT_OK(Put(0, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[0]); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + static_cast(db2)->TEST_WaitForFlushMemTable(); + { + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf1"), + static_cast(0)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "cf2"), + static_cast(2)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db2, "default"), + static_cast(1)); + } + + delete db2; + ASSERT_OK(DestroyDB(dbname2, options)); +} + +namespace { + void ValidateKeyExistence(DB* db, const std::vector& keys_must_exist, + const std::vector& keys_must_not_exist) { + // Ensure that expected keys exist + std::vector values; + if (keys_must_exist.size() > 0) { + std::vector status_list = + db->MultiGet(ReadOptions(), keys_must_exist, &values); + for (size_t i = 0; i < keys_must_exist.size(); i++) { + ASSERT_OK(status_list[i]); + } + } + + // Ensure that given keys don't exist + if (keys_must_not_exist.size() > 0) { + std::vector status_list = + db->MultiGet(ReadOptions(), keys_must_not_exist, &values); + for (size_t i = 0; i < keys_must_not_exist.size(); i++) { + ASSERT_TRUE(status_list[i].IsNotFound()); + } + } + } + +} // namespace + +TEST_F(DBTest2, WalFilterTest) { + class TestWalFilter : public WalFilter { + private: + // Processing option that is requested to be applied at the given index + WalFilter::WalProcessingOption wal_processing_option_; + // Index at which to apply wal_processing_option_ + // At other indexes default wal_processing_option::kContinueProcessing is + // returned. + size_t apply_option_at_record_index_; + // Current record index, incremented with each record encountered. + size_t current_record_index_; + + public: + TestWalFilter(WalFilter::WalProcessingOption wal_processing_option, + size_t apply_option_for_record_index) + : wal_processing_option_(wal_processing_option), + apply_option_at_record_index_(apply_option_for_record_index), + current_record_index_(0) {} + + virtual WalProcessingOption LogRecord(const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) const override { + WalFilter::WalProcessingOption option_to_return; + + if (current_record_index_ == apply_option_at_record_index_) { + option_to_return = wal_processing_option_; + } + else { + option_to_return = WalProcessingOption::kContinueProcessing; + } + + // Filter is passed as a const object for RocksDB to not modify the + // object, however we modify it for our own purpose here and hence + // cast the constness away. + (const_cast(this)->current_record_index_)++; + + return option_to_return; + } + + virtual const char* Name() const override { return "TestWalFilter"; } + }; + + // Create 3 batches with two keys each + std::vector> batch_keys(3); + + batch_keys[0].push_back("key1"); + batch_keys[0].push_back("key2"); + batch_keys[1].push_back("key3"); + batch_keys[1].push_back("key4"); + batch_keys[2].push_back("key5"); + batch_keys[2].push_back("key6"); + + // Test with all WAL processing options + for (int option = 0; + option < static_cast( + WalFilter::WalProcessingOption::kWalProcessingOptionMax); + option++) { + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + CreateAndReopenWithCF({ "pikachu" }, options); + + // Write given keys in given batches + for (size_t i = 0; i < batch_keys.size(); i++) { + WriteBatch batch; + for (size_t j = 0; j < batch_keys[i].size(); j++) { + batch.Put(handles_[0], batch_keys[i][j], DummyString(1024)); + } + dbfull()->Write(WriteOptions(), &batch); + } + + WalFilter::WalProcessingOption wal_processing_option = + static_cast(option); + + // Create a test filter that would apply wal_processing_option at the first + // record + size_t apply_option_for_record_index = 1; + TestWalFilter test_wal_filter(wal_processing_option, + apply_option_for_record_index); + + // Reopen database with option to use WAL filter + options = OptionsForLogIterTest(); + options.wal_filter = &test_wal_filter; + Status status = + TryReopenWithColumnFamilies({ "default", "pikachu" }, options); + if (wal_processing_option == + WalFilter::WalProcessingOption::kCorruptedRecord) { + assert(!status.ok()); + // In case of corruption we can turn off paranoid_checks to reopen + // databse + options.paranoid_checks = false; + ReopenWithColumnFamilies({ "default", "pikachu" }, options); + } + else { + assert(status.ok()); + } + + // Compute which keys we expect to be found + // and which we expect not to be found after recovery. + std::vector keys_must_exist; + std::vector keys_must_not_exist; + switch (wal_processing_option) { + case WalFilter::WalProcessingOption::kCorruptedRecord: + case WalFilter::WalProcessingOption::kContinueProcessing: { + fprintf(stderr, "Testing with complete WAL processing\n"); + // we expect all records to be processed + for (size_t i = 0; i < batch_keys.size(); i++) { + for (size_t j = 0; j < batch_keys[i].size(); j++) { + keys_must_exist.push_back(Slice(batch_keys[i][j])); + } + } + break; + } + case WalFilter::WalProcessingOption::kIgnoreCurrentRecord: { + fprintf(stderr, + "Testing with ignoring record %" ROCKSDB_PRIszt " only\n", + apply_option_for_record_index); + // We expect the record with apply_option_for_record_index to be not + // found. + for (size_t i = 0; i < batch_keys.size(); i++) { + for (size_t j = 0; j < batch_keys[i].size(); j++) { + if (i == apply_option_for_record_index) { + keys_must_not_exist.push_back(Slice(batch_keys[i][j])); + } + else { + keys_must_exist.push_back(Slice(batch_keys[i][j])); + } + } + } + break; + } + case WalFilter::WalProcessingOption::kStopReplay: { + fprintf(stderr, + "Testing with stopping replay from record %" ROCKSDB_PRIszt + "\n", + apply_option_for_record_index); + // We expect records beyond apply_option_for_record_index to be not + // found. + for (size_t i = 0; i < batch_keys.size(); i++) { + for (size_t j = 0; j < batch_keys[i].size(); j++) { + if (i >= apply_option_for_record_index) { + keys_must_not_exist.push_back(Slice(batch_keys[i][j])); + } + else { + keys_must_exist.push_back(Slice(batch_keys[i][j])); + } + } + } + break; + } + default: + assert(false); // unhandled case + } + + bool checked_after_reopen = false; + + while (true) { + // Ensure that expected keys exists + // and not expected keys don't exist after recovery + ValidateKeyExistence(db_, keys_must_exist, keys_must_not_exist); + + if (checked_after_reopen) { + break; + } + + // reopen database again to make sure previous log(s) are not used + //(even if they were skipped) + // reopn database with option to use WAL filter + options = OptionsForLogIterTest(); + ReopenWithColumnFamilies({ "default", "pikachu" }, options); + + checked_after_reopen = true; + } + } +} + +TEST_F(DBTest2, WalFilterTestWithChangeBatch) { + class ChangeBatchHandler : public WriteBatch::Handler { + private: + // Batch to insert keys in + WriteBatch* new_write_batch_; + // Number of keys to add in the new batch + size_t num_keys_to_add_in_new_batch_; + // Number of keys added to new batch + size_t num_keys_added_; + + public: + ChangeBatchHandler(WriteBatch* new_write_batch, + size_t num_keys_to_add_in_new_batch) + : new_write_batch_(new_write_batch), + num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch), + num_keys_added_(0) {} + virtual void Put(const Slice& key, const Slice& value) override { + if (num_keys_added_ < num_keys_to_add_in_new_batch_) { + new_write_batch_->Put(key, value); + ++num_keys_added_; + } + } + }; + + class TestWalFilterWithChangeBatch : public WalFilter { + private: + // Index at which to start changing records + size_t change_records_from_index_; + // Number of keys to add in the new batch + size_t num_keys_to_add_in_new_batch_; + // Current record index, incremented with each record encountered. + size_t current_record_index_; + + public: + TestWalFilterWithChangeBatch(size_t change_records_from_index, + size_t num_keys_to_add_in_new_batch) + : change_records_from_index_(change_records_from_index), + num_keys_to_add_in_new_batch_(num_keys_to_add_in_new_batch), + current_record_index_(0) {} + + virtual WalProcessingOption LogRecord(const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) const override { + if (current_record_index_ >= change_records_from_index_) { + ChangeBatchHandler handler(new_batch, num_keys_to_add_in_new_batch_); + batch.Iterate(&handler); + *batch_changed = true; + } + + // Filter is passed as a const object for RocksDB to not modify the + // object, however we modify it for our own purpose here and hence + // cast the constness away. + (const_cast(this) + ->current_record_index_)++; + + return WalProcessingOption::kContinueProcessing; + } + + virtual const char* Name() const override { + return "TestWalFilterWithChangeBatch"; + } + }; + + std::vector> batch_keys(3); + + batch_keys[0].push_back("key1"); + batch_keys[0].push_back("key2"); + batch_keys[1].push_back("key3"); + batch_keys[1].push_back("key4"); + batch_keys[2].push_back("key5"); + batch_keys[2].push_back("key6"); + + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + CreateAndReopenWithCF({ "pikachu" }, options); + + // Write given keys in given batches + for (size_t i = 0; i < batch_keys.size(); i++) { + WriteBatch batch; + for (size_t j = 0; j < batch_keys[i].size(); j++) { + batch.Put(handles_[0], batch_keys[i][j], DummyString(1024)); + } + dbfull()->Write(WriteOptions(), &batch); + } + + // Create a test filter that would apply wal_processing_option at the first + // record + size_t change_records_from_index = 1; + size_t num_keys_to_add_in_new_batch = 1; + TestWalFilterWithChangeBatch test_wal_filter_with_change_batch( + change_records_from_index, num_keys_to_add_in_new_batch); + + // Reopen database with option to use WAL filter + options = OptionsForLogIterTest(); + options.wal_filter = &test_wal_filter_with_change_batch; + ReopenWithColumnFamilies({ "default", "pikachu" }, options); + + // Ensure that all keys exist before change_records_from_index_ + // And after that index only single key exists + // as our filter adds only single key for each batch + std::vector keys_must_exist; + std::vector keys_must_not_exist; + + for (size_t i = 0; i < batch_keys.size(); i++) { + for (size_t j = 0; j < batch_keys[i].size(); j++) { + if (i >= change_records_from_index && j >= num_keys_to_add_in_new_batch) { + keys_must_not_exist.push_back(Slice(batch_keys[i][j])); + } + else { + keys_must_exist.push_back(Slice(batch_keys[i][j])); + } + } + } + + bool checked_after_reopen = false; + + while (true) { + // Ensure that expected keys exists + // and not expected keys don't exist after recovery + ValidateKeyExistence(db_, keys_must_exist, keys_must_not_exist); + + if (checked_after_reopen) { + break; + } + + // reopen database again to make sure previous log(s) are not used + //(even if they were skipped) + // reopn database with option to use WAL filter + options = OptionsForLogIterTest(); + ReopenWithColumnFamilies({ "default", "pikachu" }, options); + + checked_after_reopen = true; + } +} + +TEST_F(DBTest2, WalFilterTestWithChangeBatchExtraKeys) { + class TestWalFilterWithChangeBatchAddExtraKeys : public WalFilter { + public: + virtual WalProcessingOption LogRecord(const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) const override { + *new_batch = batch; + new_batch->Put("key_extra", "value_extra"); + *batch_changed = true; + return WalProcessingOption::kContinueProcessing; + } + + virtual const char* Name() const override { + return "WalFilterTestWithChangeBatchExtraKeys"; + } + }; + + std::vector> batch_keys(3); + + batch_keys[0].push_back("key1"); + batch_keys[0].push_back("key2"); + batch_keys[1].push_back("key3"); + batch_keys[1].push_back("key4"); + batch_keys[2].push_back("key5"); + batch_keys[2].push_back("key6"); + + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + CreateAndReopenWithCF({ "pikachu" }, options); + + // Write given keys in given batches + for (size_t i = 0; i < batch_keys.size(); i++) { + WriteBatch batch; + for (size_t j = 0; j < batch_keys[i].size(); j++) { + batch.Put(handles_[0], batch_keys[i][j], DummyString(1024)); + } + dbfull()->Write(WriteOptions(), &batch); + } + + // Create a test filter that would add extra keys + TestWalFilterWithChangeBatchAddExtraKeys test_wal_filter_extra_keys; + + // Reopen database with option to use WAL filter + options = OptionsForLogIterTest(); + options.wal_filter = &test_wal_filter_extra_keys; + Status status = TryReopenWithColumnFamilies({"default", "pikachu"}, options); + ASSERT_TRUE(status.IsNotSupported()); + + // Reopen without filter, now reopen should succeed - previous + // attempt to open must not have altered the db. + options = OptionsForLogIterTest(); + ReopenWithColumnFamilies({ "default", "pikachu" }, options); + + std::vector keys_must_exist; + std::vector keys_must_not_exist; // empty vector + + for (size_t i = 0; i < batch_keys.size(); i++) { + for (size_t j = 0; j < batch_keys[i].size(); j++) { + keys_must_exist.push_back(Slice(batch_keys[i][j])); + } + } + + ValidateKeyExistence(db_, keys_must_exist, keys_must_not_exist); +} + +TEST_F(DBTest2, WalFilterTestWithColumnFamilies) { + class TestWalFilterWithColumnFamilies : public WalFilter { + private: + // column_family_id -> log_number map (provided to WALFilter) + std::map cf_log_number_map_; + // column_family_name -> column_family_id map (provided to WALFilter) + std::map cf_name_id_map_; + // column_family_name -> keys_found_in_wal map + // We store keys that are applicable to the column_family + // during recovery (i.e. aren't already flushed to SST file(s)) + // for verification against the keys we expect. + std::map> cf_wal_keys_; + public: + virtual void ColumnFamilyLogNumberMap( + const std::map& cf_lognumber_map, + const std::map& cf_name_id_map) override { + cf_log_number_map_ = cf_lognumber_map; + cf_name_id_map_ = cf_name_id_map; + } + + virtual WalProcessingOption LogRecordFound(unsigned long long log_number, + const std::string& log_file_name, + const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) override { + class LogRecordBatchHandler : public WriteBatch::Handler { + private: + const std::map & cf_log_number_map_; + std::map> & cf_wal_keys_; + unsigned long long log_number_; + public: + LogRecordBatchHandler(unsigned long long current_log_number, + const std::map & cf_log_number_map, + std::map> & cf_wal_keys) : + cf_log_number_map_(cf_log_number_map), + cf_wal_keys_(cf_wal_keys), + log_number_(current_log_number){} + + virtual Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& /*value*/) override { + auto it = cf_log_number_map_.find(column_family_id); + assert(it != cf_log_number_map_.end()); + unsigned long long log_number_for_cf = it->second; + // If the current record is applicable for column_family_id + // (i.e. isn't flushed to SST file(s) for column_family_id) + // add it to the cf_wal_keys_ map for verification. + if (log_number_ >= log_number_for_cf) { + cf_wal_keys_[column_family_id].push_back(std::string(key.data(), + key.size())); + } + return Status::OK(); + } + } handler(log_number, cf_log_number_map_, cf_wal_keys_); + + batch.Iterate(&handler); + + return WalProcessingOption::kContinueProcessing; + } + + virtual const char* Name() const override { + return "WalFilterTestWithColumnFamilies"; + } + + const std::map>& GetColumnFamilyKeys() { + return cf_wal_keys_; + } + + const std::map & GetColumnFamilyNameIdMap() { + return cf_name_id_map_; + } + }; + + std::vector> batch_keys_pre_flush(3); + + batch_keys_pre_flush[0].push_back("key1"); + batch_keys_pre_flush[0].push_back("key2"); + batch_keys_pre_flush[1].push_back("key3"); + batch_keys_pre_flush[1].push_back("key4"); + batch_keys_pre_flush[2].push_back("key5"); + batch_keys_pre_flush[2].push_back("key6"); + + Options options = OptionsForLogIterTest(); + DestroyAndReopen(options); + CreateAndReopenWithCF({ "pikachu" }, options); + + // Write given keys in given batches + for (size_t i = 0; i < batch_keys_pre_flush.size(); i++) { + WriteBatch batch; + for (size_t j = 0; j < batch_keys_pre_flush[i].size(); j++) { + batch.Put(handles_[0], batch_keys_pre_flush[i][j], DummyString(1024)); + batch.Put(handles_[1], batch_keys_pre_flush[i][j], DummyString(1024)); + } + dbfull()->Write(WriteOptions(), &batch); + } + + //Flush default column-family + db_->Flush(FlushOptions(), handles_[0]); + + // Do some more writes + std::vector> batch_keys_post_flush(3); + + batch_keys_post_flush[0].push_back("key7"); + batch_keys_post_flush[0].push_back("key8"); + batch_keys_post_flush[1].push_back("key9"); + batch_keys_post_flush[1].push_back("key10"); + batch_keys_post_flush[2].push_back("key11"); + batch_keys_post_flush[2].push_back("key12"); + + // Write given keys in given batches + for (size_t i = 0; i < batch_keys_post_flush.size(); i++) { + WriteBatch batch; + for (size_t j = 0; j < batch_keys_post_flush[i].size(); j++) { + batch.Put(handles_[0], batch_keys_post_flush[i][j], DummyString(1024)); + batch.Put(handles_[1], batch_keys_post_flush[i][j], DummyString(1024)); + } + dbfull()->Write(WriteOptions(), &batch); + } + + // On Recovery we should only find the second batch applicable to default CF + // But both batches applicable to pikachu CF + + // Create a test filter that would add extra keys + TestWalFilterWithColumnFamilies test_wal_filter_column_families; + + // Reopen database with option to use WAL filter + options = OptionsForLogIterTest(); + options.wal_filter = &test_wal_filter_column_families; + Status status = + TryReopenWithColumnFamilies({ "default", "pikachu" }, options); + ASSERT_TRUE(status.ok()); + + // verify that handles_[0] only has post_flush keys + // while handles_[1] has pre and post flush keys + auto cf_wal_keys = test_wal_filter_column_families.GetColumnFamilyKeys(); + auto name_id_map = test_wal_filter_column_families.GetColumnFamilyNameIdMap(); + size_t index = 0; + auto keys_cf = cf_wal_keys[name_id_map[kDefaultColumnFamilyName]]; + //default column-family, only post_flush keys are expected + for (size_t i = 0; i < batch_keys_post_flush.size(); i++) { + for (size_t j = 0; j < batch_keys_post_flush[i].size(); j++) { + Slice key_from_the_log(keys_cf[index++]); + Slice batch_key(batch_keys_post_flush[i][j]); + ASSERT_TRUE(key_from_the_log.compare(batch_key) == 0); + } + } + ASSERT_TRUE(index == keys_cf.size()); + + index = 0; + keys_cf = cf_wal_keys[name_id_map["pikachu"]]; + //pikachu column-family, all keys are expected + for (size_t i = 0; i < batch_keys_pre_flush.size(); i++) { + for (size_t j = 0; j < batch_keys_pre_flush[i].size(); j++) { + Slice key_from_the_log(keys_cf[index++]); + Slice batch_key(batch_keys_pre_flush[i][j]); + ASSERT_TRUE(key_from_the_log.compare(batch_key) == 0); + } + } + + for (size_t i = 0; i < batch_keys_post_flush.size(); i++) { + for (size_t j = 0; j < batch_keys_post_flush[i].size(); j++) { + Slice key_from_the_log(keys_cf[index++]); + Slice batch_key(batch_keys_post_flush[i][j]); + ASSERT_TRUE(key_from_the_log.compare(batch_key) == 0); + } + } + ASSERT_TRUE(index == keys_cf.size()); +} + +TEST_F(DBTest2, PresetCompressionDict) { + const size_t kBlockSizeBytes = 4 << 10; + const size_t kL0FileBytes = 128 << 10; + const size_t kApproxPerBlockOverheadBytes = 50; + const int kNumL0Files = 5; + + Options options; + options.arena_block_size = kBlockSizeBytes; + options.compaction_style = kCompactionStyleUniversal; + options.create_if_missing = true; + options.disable_auto_compactions = true; + options.level0_file_num_compaction_trigger = kNumL0Files; + options.memtable_factory.reset( + new SpecialSkipListFactory(kL0FileBytes / kBlockSizeBytes)); + options.num_levels = 2; + options.target_file_size_base = kL0FileBytes; + options.target_file_size_multiplier = 2; + options.write_buffer_size = kL0FileBytes; + BlockBasedTableOptions table_options; + table_options.block_size = kBlockSizeBytes; + std::vector compression_types; + if (Zlib_Supported()) { + compression_types.push_back(kZlibCompression); + } +#if LZ4_VERSION_NUMBER >= 10400 // r124+ + compression_types.push_back(kLZ4Compression); + compression_types.push_back(kLZ4HCCompression); +#endif // LZ4_VERSION_NUMBER >= 10400 +#if ZSTD_VERSION_NUMBER >= 500 // v0.5.0+ + compression_types.push_back(kZSTDNotFinalCompression); +#endif // ZSTD_VERSION_NUMBER >= 500 + + for (auto compression_type : compression_types) { + options.compression = compression_type; + size_t prev_out_bytes; + for (int i = 0; i < 2; ++i) { + // First iteration: compress without preset dictionary + // Second iteration: compress with preset dictionary + // To make sure the compression dictionary was actually used, we verify + // the compressed size is smaller in the second iteration. Also in the + // second iteration, verify the data we get out is the same data we put + // in. + if (i) { + options.compression_opts.max_dict_bytes = kBlockSizeBytes; + } else { + options.compression_opts.max_dict_bytes = 0; + } + + options.statistics = rocksdb::CreateDBStatistics(); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + CreateAndReopenWithCF({"pikachu"}, options); + Random rnd(301); + std::string seq_data = + RandomString(&rnd, kBlockSizeBytes - kApproxPerBlockOverheadBytes); + + ASSERT_EQ(0, NumTableFilesAtLevel(0, 1)); + for (int j = 0; j < kNumL0Files; ++j) { + for (size_t k = 0; k < kL0FileBytes / kBlockSizeBytes + 1; ++k) { + ASSERT_OK(Put(1, Key(static_cast( + j * (kL0FileBytes / kBlockSizeBytes) + k)), + seq_data)); + } + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + ASSERT_EQ(j + 1, NumTableFilesAtLevel(0, 1)); + } + db_->CompactRange(CompactRangeOptions(), handles_[1], nullptr, nullptr); + ASSERT_EQ(0, NumTableFilesAtLevel(0, 1)); + ASSERT_GT(NumTableFilesAtLevel(1, 1), 0); + + size_t out_bytes = 0; + std::vector files; + GetSstFiles(dbname_, &files); + for (const auto& file : files) { + uint64_t curr_bytes; + env_->GetFileSize(dbname_ + "/" + file, &curr_bytes); + out_bytes += static_cast(curr_bytes); + } + + for (size_t j = 0; j < kNumL0Files * (kL0FileBytes / kBlockSizeBytes); + j++) { + ASSERT_EQ(seq_data, Get(1, Key(static_cast(j)))); + } + if (i) { + ASSERT_GT(prev_out_bytes, out_bytes); + } + prev_out_bytes = out_bytes; + DestroyAndReopen(options); + } + } +} + +class CompactionCompressionListener : public EventListener { + public: + explicit CompactionCompressionListener(Options* db_options) + : db_options_(db_options) {} + + void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override { + // Figure out last level with files + int bottommost_level = 0; + for (int level = 0; level < db->NumberLevels(); level++) { + std::string files_at_level; + ASSERT_TRUE( + db->GetProperty("rocksdb.num-files-at-level" + NumberToString(level), + &files_at_level)); + if (files_at_level != "0") { + bottommost_level = level; + } + } + + if (db_options_->bottommost_compression != kDisableCompressionOption && + ci.output_level == bottommost_level && ci.output_level >= 2) { + ASSERT_EQ(ci.compression, db_options_->bottommost_compression); + } else if (db_options_->compression_per_level.size() != 0) { + ASSERT_EQ(ci.compression, + db_options_->compression_per_level[ci.output_level]); + } else { + ASSERT_EQ(ci.compression, db_options_->compression); + } + max_level_checked = std::max(max_level_checked, ci.output_level); + } + + int max_level_checked = 0; + const Options* db_options_; +}; + +TEST_F(DBTest2, CompressionOptions) { + if (!Zlib_Supported() || !Snappy_Supported()) { + return; + } + + Options options = CurrentOptions(); + options.level0_file_num_compaction_trigger = 2; + options.max_bytes_for_level_base = 100; + options.max_bytes_for_level_multiplier = 2; + options.num_levels = 7; + options.max_background_compactions = 1; + options.base_background_compactions = 1; + + CompactionCompressionListener* listener = + new CompactionCompressionListener(&options); + options.listeners.emplace_back(listener); + + const int kKeySize = 5; + const int kValSize = 20; + Random rnd(301); + + for (int iter = 0; iter <= 2; iter++) { + listener->max_level_checked = 0; + + if (iter == 0) { + // Use different compression algorithms for different levels but + // always use Zlib for bottommost level + options.compression_per_level = {kNoCompression, kNoCompression, + kNoCompression, kSnappyCompression, + kSnappyCompression, kSnappyCompression, + kZlibCompression}; + options.compression = kNoCompression; + options.bottommost_compression = kZlibCompression; + } else if (iter == 1) { + // Use Snappy except for bottommost level use ZLib + options.compression_per_level = {}; + options.compression = kSnappyCompression; + options.bottommost_compression = kZlibCompression; + } else if (iter == 2) { + // Use Snappy everywhere + options.compression_per_level = {}; + options.compression = kSnappyCompression; + options.bottommost_compression = kDisableCompressionOption; + } + + DestroyAndReopen(options); + // Write 10 random files + for (int i = 0; i < 10; i++) { + for (int j = 0; j < 5; j++) { + ASSERT_OK( + Put(RandomString(&rnd, kKeySize), RandomString(&rnd, kValSize))); + } + ASSERT_OK(Flush()); + dbfull()->TEST_WaitForCompact(); + } + + // Make sure that we wrote enough to check all 7 levels + ASSERT_EQ(listener->max_level_checked, 6); + } +} + +class CompactionStallTestListener : public EventListener { + public: + CompactionStallTestListener() : compacted_files_cnt_(0) {} + + void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override { + ASSERT_EQ(ci.cf_name, "default"); + ASSERT_EQ(ci.base_input_level, 0); + ASSERT_EQ(ci.compaction_reason, CompactionReason::kLevelL0FilesNum); + compacted_files_cnt_ += ci.input_files.size(); + } + std::atomic compacted_files_cnt_; +}; + +TEST_F(DBTest2, CompactionStall) { + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"DBImpl::BGWorkCompaction", "DBTest2::CompactionStall:0"}, + {"DBImpl::BGWorkCompaction", "DBTest2::CompactionStall:1"}, + {"DBTest2::CompactionStall:2", + "DBImpl::NotifyOnCompactionCompleted::UnlockMutex"}}); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.level0_file_num_compaction_trigger = 4; + options.max_background_compactions = 40; + CompactionStallTestListener* listener = new CompactionStallTestListener(); + options.listeners.emplace_back(listener); + DestroyAndReopen(options); + + Random rnd(301); + + // 4 Files in L0 + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 10; j++) { + ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10))); + } + ASSERT_OK(Flush()); + } + + // Wait for compaction to be triggered + TEST_SYNC_POINT("DBTest2::CompactionStall:0"); + + // Clear "DBImpl::BGWorkCompaction" SYNC_POINT since we want to hold it again + // at DBTest2::CompactionStall::1 + rocksdb::SyncPoint::GetInstance()->ClearTrace(); + + // Another 6 L0 files to trigger compaction again + for (int i = 0; i < 6; i++) { + for (int j = 0; j < 10; j++) { + ASSERT_OK(Put(RandomString(&rnd, 10), RandomString(&rnd, 10))); + } + ASSERT_OK(Flush()); + } + + // Wait for another compaction to be triggered + TEST_SYNC_POINT("DBTest2::CompactionStall:1"); + + // Hold NotifyOnCompactionCompleted in the unlock mutex section + TEST_SYNC_POINT("DBTest2::CompactionStall:2"); + + dbfull()->TEST_WaitForCompact(); + ASSERT_LT(NumTableFilesAtLevel(0), + options.level0_file_num_compaction_trigger); + ASSERT_GT(listener->compacted_files_cnt_.load(), + 10 - options.level0_file_num_compaction_trigger); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +#endif // ROCKSDB_LITE + +TEST_F(DBTest2, FirstSnapshotTest) { + Options options; + options.write_buffer_size = 100000; // Small write buffer + options = CurrentOptions(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // This snapshot will have sequence number 0 what is expected behaviour. + const Snapshot* s1 = db_->GetSnapshot(); + + Put(1, "k1", std::string(100000, 'x')); // Fill memtable + Put(1, "k2", std::string(100000, 'y')); // Trigger flush + + db_->ReleaseSnapshot(s1); +} + +class PinL0IndexAndFilterBlocksTest : public DBTestBase, + public testing::WithParamInterface { + public: + PinL0IndexAndFilterBlocksTest() : DBTestBase("/db_pin_l0_index_bloom_test") {} + virtual void SetUp() override { infinite_max_files_ = GetParam(); } + + void CreateTwoLevels(Options* options) { + if (infinite_max_files_) { + options->max_open_files = -1; + } + options->create_if_missing = true; + options->statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions table_options; + table_options.cache_index_and_filter_blocks = true; + table_options.pin_l0_filter_and_index_blocks_in_cache = true; + table_options.filter_policy.reset(NewBloomFilterPolicy(20)); + options->table_factory.reset(new BlockBasedTableFactory(table_options)); + CreateAndReopenWithCF({"pikachu"}, *options); + + Put(1, "a", "begin"); + Put(1, "z", "end"); + ASSERT_OK(Flush(1)); + // move this table to L1 + dbfull()->TEST_CompactRange(0, nullptr, nullptr, handles_[1]); + + // reset block cache + table_options.block_cache = NewLRUCache(64 * 1024); + options->table_factory.reset(NewBlockBasedTableFactory(table_options)); + TryReopenWithColumnFamilies({"default", "pikachu"}, *options); + // create new table at L0 + Put(1, "a2", "begin2"); + Put(1, "z2", "end2"); + ASSERT_OK(Flush(1)); + + table_options.block_cache->EraseUnRefEntries(); + } + + bool infinite_max_files_; +}; + +TEST_P(PinL0IndexAndFilterBlocksTest, + IndexAndFilterBlocksOfNewTableAddedToCacheWithPinning) { + Options options = CurrentOptions(); + if (infinite_max_files_) { + options.max_open_files = -1; + } + options.create_if_missing = true; + options.statistics = rocksdb::CreateDBStatistics(); + BlockBasedTableOptions table_options; + table_options.cache_index_and_filter_blocks = true; + table_options.pin_l0_filter_and_index_blocks_in_cache = true; + table_options.filter_policy.reset(NewBloomFilterPolicy(20)); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_OK(Put(1, "key", "val")); + // Create a new table. + ASSERT_OK(Flush(1)); + + // index/filter blocks added to block cache right after table creation. + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); + + // only index/filter were added + ASSERT_EQ(2, TestGetTickerCount(options, BLOCK_CACHE_ADD)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_DATA_MISS)); + + std::string value; + // Miss and hit count should remain the same, they're all pinned. + db_->KeyMayExist(ReadOptions(), handles_[1], "key", &value); + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); + + // Miss and hit count should remain the same, they're all pinned. + value = Get(1, "key"); + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(0, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); +} + +TEST_P(PinL0IndexAndFilterBlocksTest, + MultiLevelIndexAndFilterBlocksCachedWithPinning) { + Options options = CurrentOptions(); + PinL0IndexAndFilterBlocksTest::CreateTwoLevels(&options); + // get base cache values + uint64_t fm = TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS); + uint64_t fh = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); + uint64_t im = TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS); + uint64_t ih = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT); + + std::string value; + // this should be read from L0 + // so cache values don't change + value = Get(1, "a2"); + ASSERT_EQ(fm, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(im, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); + + // this should be read from L1 + // the file is opened, prefetching results in a cache filter miss + // the block is loaded and added to the cache, + // then the get results in a cache hit for L1 + // When we have inifinite max_files, there is still cache miss because we have + // reset the block cache + value = Get(1, "a"); + ASSERT_EQ(fm + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(im + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); +} + +TEST_P(PinL0IndexAndFilterBlocksTest, DisablePrefetchingNonL0IndexAndFilter) { + Options options = CurrentOptions(); + PinL0IndexAndFilterBlocksTest::CreateTwoLevels(&options); + + // Get base cache values + uint64_t fm = TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS); + uint64_t fh = TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT); + uint64_t im = TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS); + uint64_t ih = TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT); + + // Reopen database. If max_open_files is set as -1, table readers will be + // preloaded. This will trigger a BlockBasedTable::Open() and prefetch + // L0 index and filter. Level 1's prefetching is disabled in DB::Open() + TryReopenWithColumnFamilies({"default", "pikachu"}, options); + + if (infinite_max_files_) { + // After reopen, cache miss are increased by one because we read (and only + // read) filter and index on L0 + ASSERT_EQ(fm + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(im + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); + } else { + // If max_open_files is not -1, we do not preload table readers, so there is + // no change. + ASSERT_EQ(fm, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(im, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); + } + std::string value; + // this should be read from L0 + value = Get(1, "a2"); + // If max_open_files is -1, we have pinned index and filter in Rep, so there + // will not be changes in index and filter misses or hits. If max_open_files + // is not -1, Get() will open a TableReader and prefetch index and filter. + ASSERT_EQ(fm + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(im + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); + + // this should be read from L1 + value = Get(1, "a"); + if (infinite_max_files_) { + // In inifinite max files case, there's a cache miss in executing Get() + // because index and filter are not prefetched before. + ASSERT_EQ(fm + 2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(fh, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(im + 2, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(ih, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); + } else { + // In this case, cache miss will be increased by one in + // BlockBasedTable::Open() because this is not in DB::Open() code path so we + // will prefetch L1's index and filter. Cache hit will also be increased by + // one because Get() will read index and filter from the block cache + // prefetched in previous Open() call. + ASSERT_EQ(fm + 2, TestGetTickerCount(options, BLOCK_CACHE_FILTER_MISS)); + ASSERT_EQ(fh + 1, TestGetTickerCount(options, BLOCK_CACHE_FILTER_HIT)); + ASSERT_EQ(im + 2, TestGetTickerCount(options, BLOCK_CACHE_INDEX_MISS)); + ASSERT_EQ(ih + 1, TestGetTickerCount(options, BLOCK_CACHE_INDEX_HIT)); + } +} + +INSTANTIATE_TEST_CASE_P(PinL0IndexAndFilterBlocksTest, + PinL0IndexAndFilterBlocksTest, ::testing::Bool()); +#ifndef ROCKSDB_LITE +static void UniqueIdCallback(void* arg) { + int* result = reinterpret_cast(arg); + if (*result == -1) { + *result = 0; + } + + rocksdb::SyncPoint::GetInstance()->ClearTrace(); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "GetUniqueIdFromFile:FS_IOC_GETVERSION", UniqueIdCallback); +} + +class MockPersistentCache : public PersistentCache { + public: + explicit MockPersistentCache(const bool is_compressed, const size_t max_size) + : is_compressed_(is_compressed), max_size_(max_size) { + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "GetUniqueIdFromFile:FS_IOC_GETVERSION", UniqueIdCallback); + } + + virtual ~MockPersistentCache() {} + + Status Insert(const Slice& page_key, const char* data, + const size_t size) override { + MutexLock _(&lock_); + + if (size_ > max_size_) { + size_ -= data_.begin()->second.size(); + data_.erase(data_.begin()); + } + + data_.insert(std::make_pair(page_key.ToString(), std::string(data, size))); + size_ += size; + return Status::OK(); + } + + Status Lookup(const Slice& page_key, std::unique_ptr* data, + size_t* size) override { + MutexLock _(&lock_); + auto it = data_.find(page_key.ToString()); + if (it == data_.end()) { + return Status::NotFound(); + } + + assert(page_key.ToString() == it->first); + data->reset(new char[it->second.size()]); + memcpy(data->get(), it->second.c_str(), it->second.size()); + *size = it->second.size(); + return Status::OK(); + } + + bool IsCompressed() override { return is_compressed_; } + + port::Mutex lock_; + std::map data_; + const bool is_compressed_ = true; + size_t size_ = 0; + const size_t max_size_ = 10 * 1024; // 10KiB +}; + +TEST_F(DBTest2, PersistentCache) { + int num_iter = 80; + + Options options; + options.write_buffer_size = 64 * 1024; // small write buffer + options.statistics = rocksdb::CreateDBStatistics(); + options = CurrentOptions(options); + + auto bsizes = {/*no block cache*/ 0, /*1M*/ 1 * 1024 * 1024}; + auto types = {/*compressed*/ 1, /*uncompressed*/ 0}; + for (auto bsize : bsizes) { + for (auto type : types) { + BlockBasedTableOptions table_options; + table_options.persistent_cache.reset( + new MockPersistentCache(type, 10 * 1024)); + table_options.no_block_cache = true; + table_options.block_cache = bsize ? NewLRUCache(bsize) : nullptr; + table_options.block_cache_compressed = nullptr; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + // default column family doesn't have block cache + Options no_block_cache_opts; + no_block_cache_opts.statistics = options.statistics; + no_block_cache_opts = CurrentOptions(no_block_cache_opts); + BlockBasedTableOptions table_options_no_bc; + table_options_no_bc.no_block_cache = true; + no_block_cache_opts.table_factory.reset( + NewBlockBasedTableFactory(table_options_no_bc)); + ReopenWithColumnFamilies( + {"default", "pikachu"}, + std::vector({no_block_cache_opts, options})); + + Random rnd(301); + + // Write 8MB (80 values, each 100K) + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + std::vector values; + std::string str; + for (int i = 0; i < num_iter; i++) { + if (i % 4 == 0) { // high compression ratio + str = RandomString(&rnd, 1000); + } + values.push_back(str); + ASSERT_OK(Put(1, Key(i), values[i])); + } + + // flush all data from memtable so that reads are from block cache + ASSERT_OK(Flush(1)); + + for (int i = 0; i < num_iter; i++) { + ASSERT_EQ(Get(1, Key(i)), values[i]); + } + + auto hit = options.statistics->getTickerCount(PERSISTENT_CACHE_HIT); + auto miss = options.statistics->getTickerCount(PERSISTENT_CACHE_MISS); + + ASSERT_GT(hit, 0); + ASSERT_GT(miss, 0); + } + } +} + +namespace { +void CountSyncPoint() { + TEST_SYNC_POINT_CALLBACK("DBTest2::MarkedPoint", nullptr /* arg */); +} +} // namespace + +TEST_F(DBTest2, SyncPointMarker) { + std::atomic sync_point_called(0); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBTest2::MarkedPoint", + [&](void* arg) { sync_point_called.fetch_add(1); }); + + // The first dependency enforces Marker can be loaded before MarkedPoint. + // The second checks that thread 1's MarkedPoint should be disabled here. + // Execution order: + // | Thread 1 | Thread 2 | + // | | Marker | + // | MarkedPoint | | + // | Thread1First | | + // | | MarkedPoint | + rocksdb::SyncPoint::GetInstance()->LoadDependencyAndMarkers( + {{"DBTest2::SyncPointMarker:Thread1First", "DBTest2::MarkedPoint"}}, + {{"DBTest2::SyncPointMarker:Marker", "DBTest2::MarkedPoint"}}); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + std::function func1 = [&]() { + CountSyncPoint(); + TEST_SYNC_POINT("DBTest2::SyncPointMarker:Thread1First"); + }; + + std::function func2 = [&]() { + TEST_SYNC_POINT("DBTest2::SyncPointMarker:Marker"); + CountSyncPoint(); + }; + + auto thread1 = std::thread(func1); + auto thread2 = std::thread(func2); + thread1.join(); + thread2.join(); + + // Callback is only executed once + ASSERT_EQ(sync_point_called.load(), 1); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} +#endif + +class MergeOperatorPinningTest : public DBTest2, + public testing::WithParamInterface { + public: + MergeOperatorPinningTest() { disable_block_cache_ = GetParam(); } + + bool disable_block_cache_; +}; + +INSTANTIATE_TEST_CASE_P(MergeOperatorPinningTest, MergeOperatorPinningTest, + ::testing::Bool()); + +#ifndef ROCKSDB_LITE +TEST_P(MergeOperatorPinningTest, OperandsMultiBlocks) { + Options options = CurrentOptions(); + BlockBasedTableOptions table_options; + table_options.block_size = 1; // every block will contain one entry + table_options.no_block_cache = disable_block_cache_; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.merge_operator = MergeOperators::CreateStringAppendTESTOperator(); + options.level0_slowdown_writes_trigger = (1 << 30); + options.level0_stop_writes_trigger = (1 << 30); + options.disable_auto_compactions = true; + DestroyAndReopen(options); + + const int kKeysPerFile = 10; + const int kOperandsPerKeyPerFile = 7; + const int kOperandSize = 100; + // Filse to write in L0 before compacting to lower level + const int kFilesPerLevel = 3; + + Random rnd(301); + std::map true_data; + int batch_num = 1; + int lvl_to_fill = 4; + int key_id = 0; + while (true) { + for (int j = 0; j < kKeysPerFile; j++) { + std::string key = Key(key_id % 35); + key_id++; + for (int k = 0; k < kOperandsPerKeyPerFile; k++) { + std::string val = RandomString(&rnd, kOperandSize); + ASSERT_OK(db_->Merge(WriteOptions(), key, val)); + if (true_data[key].size() == 0) { + true_data[key] = val; + } else { + true_data[key] += "," + val; + } + } + } + + if (lvl_to_fill == -1) { + // Keep last batch in memtable and stop + break; + } + + ASSERT_OK(Flush()); + if (batch_num % kFilesPerLevel == 0) { + if (lvl_to_fill != 0) { + MoveFilesToLevel(lvl_to_fill); + } + lvl_to_fill--; + } + batch_num++; + } + + // 3 L0 files + // 1 L1 file + // 3 L2 files + // 1 L3 file + // 3 L4 Files + ASSERT_EQ(FilesPerLevel(), "3,1,3,1,3"); + + // Verify Get() + for (auto kv : true_data) { + ASSERT_EQ(Get(kv.first), kv.second); + } + + Iterator* iter = db_->NewIterator(ReadOptions()); + + // Verify Iterator::Next() + auto data_iter = true_data.begin(); + for (iter->SeekToFirst(); iter->Valid(); iter->Next(), data_iter++) { + ASSERT_EQ(iter->key().ToString(), data_iter->first); + ASSERT_EQ(iter->value().ToString(), data_iter->second); + } + ASSERT_EQ(data_iter, true_data.end()); + + // Verify Iterator::Prev() + auto data_rev = true_data.rbegin(); + for (iter->SeekToLast(); iter->Valid(); iter->Prev(), data_rev++) { + ASSERT_EQ(iter->key().ToString(), data_rev->first); + ASSERT_EQ(iter->value().ToString(), data_rev->second); + } + ASSERT_EQ(data_rev, true_data.rend()); + + // Verify Iterator::Seek() + for (auto kv : true_data) { + iter->Seek(kv.first); + ASSERT_EQ(kv.first, iter->key().ToString()); + ASSERT_EQ(kv.second, iter->value().ToString()); + } + + delete iter; +} + +TEST_P(MergeOperatorPinningTest, Randomized) { + do { + Options options = CurrentOptions(); + options.merge_operator = MergeOperators::CreateMaxOperator(); + BlockBasedTableOptions table_options; + table_options.no_block_cache = disable_block_cache_; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + DestroyAndReopen(options); + + Random rnd(301); + std::map true_data; + + const int kTotalMerges = 10000; + // Every key gets ~10 operands + const int kKeyRange = kTotalMerges / 10; + const int kOperandSize = 20; + const int kNumPutBefore = kKeyRange / 10; // 10% value + const int kNumPutAfter = kKeyRange / 10; // 10% overwrite + const int kNumDelete = kKeyRange / 10; // 10% delete + + // kNumPutBefore keys will have base values + for (int i = 0; i < kNumPutBefore; i++) { + std::string key = Key(rnd.Next() % kKeyRange); + std::string value = RandomString(&rnd, kOperandSize); + ASSERT_OK(db_->Put(WriteOptions(), key, value)); + + true_data[key] = value; + } + + // Do kTotalMerges merges + for (int i = 0; i < kTotalMerges; i++) { + std::string key = Key(rnd.Next() % kKeyRange); + std::string value = RandomString(&rnd, kOperandSize); + ASSERT_OK(db_->Merge(WriteOptions(), key, value)); + + if (true_data[key] < value) { + true_data[key] = value; + } + } + + // Overwrite random kNumPutAfter keys + for (int i = 0; i < kNumPutAfter; i++) { + std::string key = Key(rnd.Next() % kKeyRange); + std::string value = RandomString(&rnd, kOperandSize); + ASSERT_OK(db_->Put(WriteOptions(), key, value)); + + true_data[key] = value; + } + + // Delete random kNumDelete keys + for (int i = 0; i < kNumDelete; i++) { + std::string key = Key(rnd.Next() % kKeyRange); + ASSERT_OK(db_->Delete(WriteOptions(), key)); + + true_data.erase(key); + } + + VerifyDBFromMap(true_data); + + // Skip HashCuckoo since it does not support merge operators + } while (ChangeOptions(kSkipMergePut | kSkipHashCuckoo)); +} + +class MergeOperatorHook : public MergeOperator { + public: + explicit MergeOperatorHook(std::shared_ptr _merge_op) + : merge_op_(_merge_op) {} + + virtual bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const override { + before_merge_(); + bool res = merge_op_->FullMergeV2(merge_in, merge_out); + after_merge_(); + return res; + } + + virtual const char* Name() const override { return merge_op_->Name(); } + + std::shared_ptr merge_op_; + std::function before_merge_ = []() {}; + std::function after_merge_ = []() {}; +}; + +TEST_P(MergeOperatorPinningTest, EvictCacheBeforeMerge) { + Options options = CurrentOptions(); + + auto merge_hook = + std::make_shared(MergeOperators::CreateMaxOperator()); + options.merge_operator = merge_hook; + options.disable_auto_compactions = true; + options.level0_slowdown_writes_trigger = (1 << 30); + options.level0_stop_writes_trigger = (1 << 30); + options.max_open_files = 20; + BlockBasedTableOptions bbto; + bbto.no_block_cache = disable_block_cache_; + if (bbto.no_block_cache == false) { + bbto.block_cache = NewLRUCache(64 * 1024 * 1024); + } else { + bbto.block_cache = nullptr; + } + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + DestroyAndReopen(options); + + const int kNumOperands = 30; + const int kNumKeys = 1000; + const int kOperandSize = 100; + Random rnd(301); + + // 1000 keys every key have 30 operands, every operand is in a different file + std::map true_data; + for (int i = 0; i < kNumOperands; i++) { + for (int j = 0; j < kNumKeys; j++) { + std::string k = Key(j); + std::string v = RandomString(&rnd, kOperandSize); + ASSERT_OK(db_->Merge(WriteOptions(), k, v)); + + true_data[k] = std::max(true_data[k], v); + } + ASSERT_OK(Flush()); + } + + std::vector file_numbers = ListTableFiles(env_, dbname_); + ASSERT_EQ(file_numbers.size(), kNumOperands); + int merge_cnt = 0; + + // Code executed before merge operation + merge_hook->before_merge_ = [&]() { + // Evict all tables from cache before every merge operation + for (uint64_t num : file_numbers) { + TableCache::Evict(dbfull()->TEST_table_cache(), num); + } + // Decrease cache capacity to force all unrefed blocks to be evicted + if (bbto.block_cache) { + bbto.block_cache->SetCapacity(1); + } + merge_cnt++; + }; + + // Code executed after merge operation + merge_hook->after_merge_ = [&]() { + // Increase capacity again after doing the merge + if (bbto.block_cache) { + bbto.block_cache->SetCapacity(64 * 1024 * 1024); + } + }; + + VerifyDBFromMap(true_data); + ASSERT_EQ(merge_cnt, kNumKeys * 4 /* get + next + prev + seek */); + + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + + VerifyDBFromMap(true_data); +} +#endif // ROCKSDB_LITE + +TEST_F(DBTest2, MaxSuccessiveMergesInRecovery) { + Options options; + options = CurrentOptions(options); + options.merge_operator = MergeOperators::CreatePutOperator(); + DestroyAndReopen(options); + + db_->Put(WriteOptions(), "foo", "bar"); + ASSERT_OK(db_->Merge(WriteOptions(), "foo", "bar")); + ASSERT_OK(db_->Merge(WriteOptions(), "foo", "bar")); + ASSERT_OK(db_->Merge(WriteOptions(), "foo", "bar")); + ASSERT_OK(db_->Merge(WriteOptions(), "foo", "bar")); + ASSERT_OK(db_->Merge(WriteOptions(), "foo", "bar")); + + options.max_successive_merges = 3; + Reopen(options); +} +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/db_test_util.cc b/external/rocksdb/db/db_test_util.cc new file mode 100755 index 0000000000..7ab9b3d109 --- /dev/null +++ b/external/rocksdb/db/db_test_util.cc @@ -0,0 +1,1155 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_test_util.h" + +namespace rocksdb { + +// Special Env used to delay background operations + +SpecialEnv::SpecialEnv(Env* base) + : EnvWrapper(base), + rnd_(301), + sleep_counter_(this), + addon_time_(0), + time_elapse_only_sleep_(false), + no_sleep_(false) { + delay_sstable_sync_.store(false, std::memory_order_release); + drop_writes_.store(false, std::memory_order_release); + no_space_.store(false, std::memory_order_release); + non_writable_.store(false, std::memory_order_release); + count_random_reads_ = false; + count_sequential_reads_ = false; + manifest_sync_error_.store(false, std::memory_order_release); + manifest_write_error_.store(false, std::memory_order_release); + log_write_error_.store(false, std::memory_order_release); + random_file_open_counter_.store(0, std::memory_order_relaxed); + delete_count_.store(0, std::memory_order_relaxed); + num_open_wal_file_.store(0); + log_write_slowdown_ = 0; + bytes_written_ = 0; + sync_counter_ = 0; + non_writeable_rate_ = 0; + new_writable_count_ = 0; + non_writable_count_ = 0; + table_write_callback_ = nullptr; +} + +DBTestBase::DBTestBase(const std::string path) + : option_config_(kDefault), + mem_env_(!getenv("MEM_ENV") ? nullptr : new MockEnv(Env::Default())), + env_(new SpecialEnv(mem_env_ ? mem_env_ : Env::Default())) { + env_->SetBackgroundThreads(1, Env::LOW); + env_->SetBackgroundThreads(1, Env::HIGH); + dbname_ = test::TmpDir(env_) + path; + alternative_wal_dir_ = dbname_ + "/wal"; + alternative_db_log_dir_ = dbname_ + "/db_log_dir"; + auto options = CurrentOptions(); + auto delete_options = options; + delete_options.wal_dir = alternative_wal_dir_; + EXPECT_OK(DestroyDB(dbname_, delete_options)); + // Destroy it for not alternative WAL dir is used. + EXPECT_OK(DestroyDB(dbname_, options)); + db_ = nullptr; + Reopen(options); + Random::GetTLSInstance()->Reset(0xdeadbeef); +} + +DBTestBase::~DBTestBase() { + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + rocksdb::SyncPoint::GetInstance()->LoadDependency({}); + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); + Close(); + Options options; + options.db_paths.emplace_back(dbname_, 0); + options.db_paths.emplace_back(dbname_ + "_2", 0); + options.db_paths.emplace_back(dbname_ + "_3", 0); + options.db_paths.emplace_back(dbname_ + "_4", 0); + EXPECT_OK(DestroyDB(dbname_, options)); + delete env_; +} + +bool DBTestBase::ShouldSkipOptions(int option_config, int skip_mask) { +#ifdef ROCKSDB_LITE + // These options are not supported in ROCKSDB_LITE + if (option_config == kHashSkipList || + option_config == kPlainTableFirstBytePrefix || + option_config == kPlainTableCappedPrefix || + option_config == kPlainTableCappedPrefixNonMmap || + option_config == kPlainTableAllBytesPrefix || + option_config == kVectorRep || option_config == kHashLinkList || + option_config == kHashCuckoo || option_config == kUniversalCompaction || + option_config == kUniversalCompactionMultiLevel || + option_config == kUniversalSubcompactions || + option_config == kFIFOCompaction || + option_config == kConcurrentSkipList) { + return true; + } +#endif + + if ((skip_mask & kSkipUniversalCompaction) && + (option_config == kUniversalCompaction || + option_config == kUniversalCompactionMultiLevel)) { + return true; + } + if ((skip_mask & kSkipMergePut) && option_config == kMergePut) { + return true; + } + if ((skip_mask & kSkipNoSeekToLast) && + (option_config == kHashLinkList || option_config == kHashSkipList)) { + return true; + } + if ((skip_mask & kSkipPlainTable) && + (option_config == kPlainTableAllBytesPrefix || + option_config == kPlainTableFirstBytePrefix || + option_config == kPlainTableCappedPrefix || + option_config == kPlainTableCappedPrefixNonMmap)) { + return true; + } + if ((skip_mask & kSkipHashIndex) && + (option_config == kBlockBasedTableWithPrefixHashIndex || + option_config == kBlockBasedTableWithWholeKeyHashIndex)) { + return true; + } + if ((skip_mask & kSkipHashCuckoo) && (option_config == kHashCuckoo)) { + return true; + } + if ((skip_mask & kSkipFIFOCompaction) && option_config == kFIFOCompaction) { + return true; + } + if ((skip_mask & kSkipMmapReads) && option_config == kWalDirAndMmapReads) { + return true; + } + return false; +} + +// Switch to a fresh database with the next option configuration to +// test. Return false if there are no more configurations to test. +bool DBTestBase::ChangeOptions(int skip_mask) { + for (option_config_++; option_config_ < kEnd; option_config_++) { + if (ShouldSkipOptions(option_config_, skip_mask)) { + continue; + } + break; + } + + if (option_config_ >= kEnd) { + Destroy(last_options_); + return false; + } else { + auto options = CurrentOptions(); + options.create_if_missing = true; + DestroyAndReopen(options); + return true; + } +} + +// Switch between different compaction styles. +bool DBTestBase::ChangeCompactOptions() { + if (option_config_ == kDefault) { + option_config_ = kUniversalCompaction; + Destroy(last_options_); + auto options = CurrentOptions(); + options.create_if_missing = true; + TryReopen(options); + return true; + } else if (option_config_ == kUniversalCompaction) { + option_config_ = kUniversalCompactionMultiLevel; + Destroy(last_options_); + auto options = CurrentOptions(); + options.create_if_missing = true; + TryReopen(options); + return true; + } else if (option_config_ == kUniversalCompactionMultiLevel) { + option_config_ = kLevelSubcompactions; + Destroy(last_options_); + auto options = CurrentOptions(); + assert(options.max_subcompactions > 1); + TryReopen(options); + return true; + } else if (option_config_ == kLevelSubcompactions) { + option_config_ = kUniversalSubcompactions; + Destroy(last_options_); + auto options = CurrentOptions(); + assert(options.max_subcompactions > 1); + TryReopen(options); + return true; + } else { + return false; + } +} + +// Switch between different filter policy +// Jump from kDefault to kFilter to kFullFilter +bool DBTestBase::ChangeFilterOptions() { + if (option_config_ == kDefault) { + option_config_ = kFilter; + } else if (option_config_ == kFilter) { + option_config_ = kFullFilterWithNewTableReaderForCompactions; + } else { + return false; + } + Destroy(last_options_); + + auto options = CurrentOptions(); + options.create_if_missing = true; + TryReopen(options); + return true; +} + +// Return the current option configuration. +Options DBTestBase::CurrentOptions( + const anon::OptionsOverride& options_override) { + Options options; + options.write_buffer_size = 4090 * 4096; + options.target_file_size_base = 2 * 1024 * 1024; + options.max_bytes_for_level_base = 10 * 1024 * 1024; + options.max_open_files = 5000; + options.base_background_compactions = -1; + options.wal_recovery_mode = WALRecoveryMode::kTolerateCorruptedTailRecords; + options.compaction_pri = CompactionPri::kByCompensatedSize; + + return CurrentOptions(options, options_override); +} + +Options DBTestBase::CurrentOptions( + const Options& defaultOptions, + const anon::OptionsOverride& options_override) { + // this redundant copy is to minimize code change w/o having lint error. + Options options = defaultOptions; + XFUNC_TEST("", "dbtest_options", inplace_options1, GetXFTestOptions, + reinterpret_cast(&options), + options_override.skip_policy); + BlockBasedTableOptions table_options; + bool set_block_based_table_factory = true; + switch (option_config_) { +#ifndef ROCKSDB_LITE + case kHashSkipList: + options.prefix_extractor.reset(NewFixedPrefixTransform(1)); + options.memtable_factory.reset(NewHashSkipListRepFactory(16)); + break; + case kPlainTableFirstBytePrefix: + options.table_factory.reset(new PlainTableFactory()); + options.prefix_extractor.reset(NewFixedPrefixTransform(1)); + options.allow_mmap_reads = true; + options.max_sequential_skip_in_iterations = 999999; + set_block_based_table_factory = false; + break; + case kPlainTableCappedPrefix: + options.table_factory.reset(new PlainTableFactory()); + options.prefix_extractor.reset(NewCappedPrefixTransform(8)); + options.allow_mmap_reads = true; + options.max_sequential_skip_in_iterations = 999999; + set_block_based_table_factory = false; + break; + case kPlainTableCappedPrefixNonMmap: + options.table_factory.reset(new PlainTableFactory()); + options.prefix_extractor.reset(NewCappedPrefixTransform(8)); + options.allow_mmap_reads = false; + options.max_sequential_skip_in_iterations = 999999; + set_block_based_table_factory = false; + break; + case kPlainTableAllBytesPrefix: + options.table_factory.reset(new PlainTableFactory()); + options.prefix_extractor.reset(NewNoopTransform()); + options.allow_mmap_reads = true; + options.max_sequential_skip_in_iterations = 999999; + set_block_based_table_factory = false; + break; + case kVectorRep: + options.memtable_factory.reset(new VectorRepFactory(100)); + break; + case kHashLinkList: + options.prefix_extractor.reset(NewFixedPrefixTransform(1)); + options.memtable_factory.reset( + NewHashLinkListRepFactory(4, 0, 3, true, 4)); + break; + case kHashCuckoo: + options.memtable_factory.reset( + NewHashCuckooRepFactory(options.write_buffer_size)); + break; +#endif // ROCKSDB_LITE + case kMergePut: + options.merge_operator = MergeOperators::CreatePutOperator(); + break; + case kFilter: + table_options.filter_policy.reset(NewBloomFilterPolicy(10, true)); + break; + case kFullFilterWithNewTableReaderForCompactions: + table_options.filter_policy.reset(NewBloomFilterPolicy(10, false)); + options.new_table_reader_for_compaction_inputs = true; + options.compaction_readahead_size = 10 * 1024 * 1024; + break; + case kUncompressed: + options.compression = kNoCompression; + break; + case kNumLevel_3: + options.num_levels = 3; + break; + case kDBLogDir: + options.db_log_dir = alternative_db_log_dir_; + break; + case kWalDirAndMmapReads: + options.wal_dir = alternative_wal_dir_; + // mmap reads should be orthogonal to WalDir setting, so we piggyback to + // this option config to test mmap reads as well + options.allow_mmap_reads = true; + break; + case kManifestFileSize: + options.max_manifest_file_size = 50; // 50 bytes + case kPerfOptions: + options.soft_rate_limit = 2.0; + options.delayed_write_rate = 8 * 1024 * 1024; + options.report_bg_io_stats = true; + // TODO(3.13) -- test more options + break; + case kUniversalCompaction: + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = 1; + break; + case kUniversalCompactionMultiLevel: + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = 8; + break; + case kCompressedBlockCache: + options.allow_mmap_writes = true; + table_options.block_cache_compressed = NewLRUCache(8 * 1024 * 1024); + break; + case kInfiniteMaxOpenFiles: + options.max_open_files = -1; + break; + case kxxHashChecksum: { + table_options.checksum = kxxHash; + break; + } + case kFIFOCompaction: { + options.compaction_style = kCompactionStyleFIFO; + break; + } + case kBlockBasedTableWithPrefixHashIndex: { + table_options.index_type = BlockBasedTableOptions::kHashSearch; + options.prefix_extractor.reset(NewFixedPrefixTransform(1)); + break; + } + case kBlockBasedTableWithWholeKeyHashIndex: { + table_options.index_type = BlockBasedTableOptions::kHashSearch; + options.prefix_extractor.reset(NewNoopTransform()); + break; + } + case kBlockBasedTableWithIndexRestartInterval: { + table_options.index_block_restart_interval = 8; + break; + } + case kOptimizeFiltersForHits: { + options.optimize_filters_for_hits = true; + set_block_based_table_factory = true; + break; + } + case kRowCache: { + options.row_cache = NewLRUCache(1024 * 1024); + break; + } + case kRecycleLogFiles: { + options.recycle_log_file_num = 2; + break; + } + case kLevelSubcompactions: { + options.max_subcompactions = 4; + break; + } + case kUniversalSubcompactions: { + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = 8; + options.max_subcompactions = 4; + break; + } + case kConcurrentSkipList: { + options.allow_concurrent_memtable_write = true; + options.enable_write_thread_adaptive_yield = true; + break; + } + + default: + break; + } + + if (options_override.filter_policy) { + table_options.filter_policy = options_override.filter_policy; + } + if (set_block_based_table_factory) { + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + } + options.env = env_; + options.create_if_missing = true; + options.fail_if_options_file_error = true; + return options; +} + +void DBTestBase::CreateColumnFamilies(const std::vector& cfs, + const Options& options) { + ColumnFamilyOptions cf_opts(options); + size_t cfi = handles_.size(); + handles_.resize(cfi + cfs.size()); + for (auto cf : cfs) { + ASSERT_OK(db_->CreateColumnFamily(cf_opts, cf, &handles_[cfi++])); + } +} + +void DBTestBase::CreateAndReopenWithCF(const std::vector& cfs, + const Options& options) { + CreateColumnFamilies(cfs, options); + std::vector cfs_plus_default = cfs; + cfs_plus_default.insert(cfs_plus_default.begin(), kDefaultColumnFamilyName); + ReopenWithColumnFamilies(cfs_plus_default, options); +} + +void DBTestBase::ReopenWithColumnFamilies(const std::vector& cfs, + const std::vector& options) { + ASSERT_OK(TryReopenWithColumnFamilies(cfs, options)); +} + +void DBTestBase::ReopenWithColumnFamilies(const std::vector& cfs, + const Options& options) { + ASSERT_OK(TryReopenWithColumnFamilies(cfs, options)); +} + +Status DBTestBase::TryReopenWithColumnFamilies( + const std::vector& cfs, const std::vector& options) { + Close(); + EXPECT_EQ(cfs.size(), options.size()); + std::vector column_families; + for (size_t i = 0; i < cfs.size(); ++i) { + column_families.push_back(ColumnFamilyDescriptor(cfs[i], options[i])); + } + DBOptions db_opts = DBOptions(options[0]); + return DB::Open(db_opts, dbname_, column_families, &handles_, &db_); +} + +Status DBTestBase::TryReopenWithColumnFamilies( + const std::vector& cfs, const Options& options) { + Close(); + std::vector v_opts(cfs.size(), options); + return TryReopenWithColumnFamilies(cfs, v_opts); +} + +void DBTestBase::Reopen(const Options& options) { + ASSERT_OK(TryReopen(options)); +} + +void DBTestBase::Close() { + for (auto h : handles_) { + db_->DestroyColumnFamilyHandle(h); + } + handles_.clear(); + delete db_; + db_ = nullptr; +} + +void DBTestBase::DestroyAndReopen(const Options& options) { + // Destroy using last options + Destroy(last_options_); + ASSERT_OK(TryReopen(options)); +} + +void DBTestBase::Destroy(const Options& options) { + Close(); + ASSERT_OK(DestroyDB(dbname_, options)); +} + +Status DBTestBase::ReadOnlyReopen(const Options& options) { + return DB::OpenForReadOnly(options, dbname_, &db_); +} + +Status DBTestBase::TryReopen(const Options& options) { + Close(); + last_options_ = options; + return DB::Open(options, dbname_, &db_); +} + +Status DBTestBase::Flush(int cf) { + if (cf == 0) { + return db_->Flush(FlushOptions()); + } else { + return db_->Flush(FlushOptions(), handles_[cf]); + } +} + +Status DBTestBase::Put(const Slice& k, const Slice& v, WriteOptions wo) { + if (kMergePut == option_config_) { + return db_->Merge(wo, k, v); + } else { + return db_->Put(wo, k, v); + } +} + +Status DBTestBase::Put(int cf, const Slice& k, const Slice& v, + WriteOptions wo) { + if (kMergePut == option_config_) { + return db_->Merge(wo, handles_[cf], k, v); + } else { + return db_->Put(wo, handles_[cf], k, v); + } +} + +Status DBTestBase::Delete(const std::string& k) { + return db_->Delete(WriteOptions(), k); +} + +Status DBTestBase::Delete(int cf, const std::string& k) { + return db_->Delete(WriteOptions(), handles_[cf], k); +} + +Status DBTestBase::SingleDelete(const std::string& k) { + return db_->SingleDelete(WriteOptions(), k); +} + +Status DBTestBase::SingleDelete(int cf, const std::string& k) { + return db_->SingleDelete(WriteOptions(), handles_[cf], k); +} + +std::string DBTestBase::Get(const std::string& k, const Snapshot* snapshot) { + ReadOptions options; + options.verify_checksums = true; + options.snapshot = snapshot; + std::string result; + Status s = db_->Get(options, k, &result); + if (s.IsNotFound()) { + result = "NOT_FOUND"; + } else if (!s.ok()) { + result = s.ToString(); + } + return result; +} + +std::string DBTestBase::Get(int cf, const std::string& k, + const Snapshot* snapshot) { + ReadOptions options; + options.verify_checksums = true; + options.snapshot = snapshot; + std::string result; + Status s = db_->Get(options, handles_[cf], k, &result); + if (s.IsNotFound()) { + result = "NOT_FOUND"; + } else if (!s.ok()) { + result = s.ToString(); + } + return result; +} + +uint64_t DBTestBase::GetNumSnapshots() { + uint64_t int_num; + EXPECT_TRUE(dbfull()->GetIntProperty("rocksdb.num-snapshots", &int_num)); + return int_num; +} + +uint64_t DBTestBase::GetTimeOldestSnapshots() { + uint64_t int_num; + EXPECT_TRUE( + dbfull()->GetIntProperty("rocksdb.oldest-snapshot-time", &int_num)); + return int_num; +} + +// Return a string that contains all key,value pairs in order, +// formatted like "(k1->v1)(k2->v2)". +std::string DBTestBase::Contents(int cf) { + std::vector forward; + std::string result; + Iterator* iter = (cf == 0) ? db_->NewIterator(ReadOptions()) + : db_->NewIterator(ReadOptions(), handles_[cf]); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + std::string s = IterStatus(iter); + result.push_back('('); + result.append(s); + result.push_back(')'); + forward.push_back(s); + } + + // Check reverse iteration results are the reverse of forward results + unsigned int matched = 0; + for (iter->SeekToLast(); iter->Valid(); iter->Prev()) { + EXPECT_LT(matched, forward.size()); + EXPECT_EQ(IterStatus(iter), forward[forward.size() - matched - 1]); + matched++; + } + EXPECT_EQ(matched, forward.size()); + + delete iter; + return result; +} + +std::string DBTestBase::AllEntriesFor(const Slice& user_key, int cf) { + Arena arena; + ScopedArenaIterator iter; + if (cf == 0) { + iter.set(dbfull()->NewInternalIterator(&arena)); + } else { + iter.set(dbfull()->NewInternalIterator(&arena, handles_[cf])); + } + InternalKey target(user_key, kMaxSequenceNumber, kTypeValue); + iter->Seek(target.Encode()); + std::string result; + if (!iter->status().ok()) { + result = iter->status().ToString(); + } else { + result = "[ "; + bool first = true; + while (iter->Valid()) { + ParsedInternalKey ikey(Slice(), 0, kTypeValue); + if (!ParseInternalKey(iter->key(), &ikey)) { + result += "CORRUPTED"; + } else { + if (!last_options_.comparator->Equal(ikey.user_key, user_key)) { + break; + } + if (!first) { + result += ", "; + } + first = false; + switch (ikey.type) { + case kTypeValue: + result += iter->value().ToString(); + break; + case kTypeMerge: + // keep it the same as kTypeValue for testing kMergePut + result += iter->value().ToString(); + break; + case kTypeDeletion: + result += "DEL"; + break; + case kTypeSingleDeletion: + result += "SDEL"; + break; + default: + assert(false); + break; + } + } + iter->Next(); + } + if (!first) { + result += " "; + } + result += "]"; + } + return result; +} + +#ifndef ROCKSDB_LITE +int DBTestBase::NumSortedRuns(int cf) { + ColumnFamilyMetaData cf_meta; + if (cf == 0) { + db_->GetColumnFamilyMetaData(&cf_meta); + } else { + db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta); + } + int num_sr = static_cast(cf_meta.levels[0].files.size()); + for (size_t i = 1U; i < cf_meta.levels.size(); i++) { + if (cf_meta.levels[i].files.size() > 0) { + num_sr++; + } + } + return num_sr; +} + +uint64_t DBTestBase::TotalSize(int cf) { + ColumnFamilyMetaData cf_meta; + if (cf == 0) { + db_->GetColumnFamilyMetaData(&cf_meta); + } else { + db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta); + } + return cf_meta.size; +} + +uint64_t DBTestBase::SizeAtLevel(int level) { + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + uint64_t sum = 0; + for (const auto& m : metadata) { + if (m.level == level) { + sum += m.size; + } + } + return sum; +} + +size_t DBTestBase::TotalLiveFiles(int cf) { + ColumnFamilyMetaData cf_meta; + if (cf == 0) { + db_->GetColumnFamilyMetaData(&cf_meta); + } else { + db_->GetColumnFamilyMetaData(handles_[cf], &cf_meta); + } + size_t num_files = 0; + for (auto& level : cf_meta.levels) { + num_files += level.files.size(); + } + return num_files; +} + +size_t DBTestBase::CountLiveFiles() { + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + return metadata.size(); +} +#endif // ROCKSDB_LITE + +int DBTestBase::NumTableFilesAtLevel(int level, int cf) { + std::string property; + if (cf == 0) { + // default cfd + EXPECT_TRUE(db_->GetProperty( + "rocksdb.num-files-at-level" + NumberToString(level), &property)); + } else { + EXPECT_TRUE(db_->GetProperty( + handles_[cf], "rocksdb.num-files-at-level" + NumberToString(level), + &property)); + } + return atoi(property.c_str()); +} + +double DBTestBase::CompressionRatioAtLevel(int level, int cf) { + std::string property; + if (cf == 0) { + // default cfd + EXPECT_TRUE(db_->GetProperty( + "rocksdb.compression-ratio-at-level" + NumberToString(level), + &property)); + } else { + EXPECT_TRUE(db_->GetProperty( + handles_[cf], + "rocksdb.compression-ratio-at-level" + NumberToString(level), + &property)); + } + return std::stod(property); +} + +int DBTestBase::TotalTableFiles(int cf, int levels) { + if (levels == -1) { + levels = CurrentOptions().num_levels; + } + int result = 0; + for (int level = 0; level < levels; level++) { + result += NumTableFilesAtLevel(level, cf); + } + return result; +} + +// Return spread of files per level +std::string DBTestBase::FilesPerLevel(int cf) { + int num_levels = + (cf == 0) ? db_->NumberLevels() : db_->NumberLevels(handles_[1]); + std::string result; + size_t last_non_zero_offset = 0; + for (int level = 0; level < num_levels; level++) { + int f = NumTableFilesAtLevel(level, cf); + char buf[100]; + snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f); + result += buf; + if (f > 0) { + last_non_zero_offset = result.size(); + } + } + result.resize(last_non_zero_offset); + return result; +} + +size_t DBTestBase::CountFiles() { + std::vector files; + env_->GetChildren(dbname_, &files); + + std::vector logfiles; + if (dbname_ != last_options_.wal_dir) { + env_->GetChildren(last_options_.wal_dir, &logfiles); + } + + return files.size() + logfiles.size(); +} + +uint64_t DBTestBase::Size(const Slice& start, const Slice& limit, int cf) { + Range r(start, limit); + uint64_t size; + if (cf == 0) { + db_->GetApproximateSizes(&r, 1, &size); + } else { + db_->GetApproximateSizes(handles_[1], &r, 1, &size); + } + return size; +} + +void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit, + uint32_t target_path_id) { + CompactRangeOptions compact_options; + compact_options.target_path_id = target_path_id; + ASSERT_OK(db_->CompactRange(compact_options, handles_[cf], &start, &limit)); +} + +void DBTestBase::Compact(int cf, const Slice& start, const Slice& limit) { + ASSERT_OK( + db_->CompactRange(CompactRangeOptions(), handles_[cf], &start, &limit)); +} + +void DBTestBase::Compact(const Slice& start, const Slice& limit) { + ASSERT_OK(db_->CompactRange(CompactRangeOptions(), &start, &limit)); +} + +// Do n memtable compactions, each of which produces an sstable +// covering the range [small,large]. +void DBTestBase::MakeTables(int n, const std::string& small, + const std::string& large, int cf) { + for (int i = 0; i < n; i++) { + ASSERT_OK(Put(cf, small, "begin")); + ASSERT_OK(Put(cf, large, "end")); + ASSERT_OK(Flush(cf)); + MoveFilesToLevel(n - i - 1, cf); + } +} + +// Prevent pushing of new sstables into deeper levels by adding +// tables that cover a specified range to all levels. +void DBTestBase::FillLevels(const std::string& smallest, + const std::string& largest, int cf) { + MakeTables(db_->NumberLevels(handles_[cf]), smallest, largest, cf); +} + +void DBTestBase::MoveFilesToLevel(int level, int cf) { + for (int l = 0; l < level; ++l) { + if (cf > 0) { + dbfull()->TEST_CompactRange(l, nullptr, nullptr, handles_[cf]); + } else { + dbfull()->TEST_CompactRange(l, nullptr, nullptr); + } + } +} + +void DBTestBase::DumpFileCounts(const char* label) { + fprintf(stderr, "---\n%s:\n", label); + fprintf(stderr, "maxoverlap: %" PRIu64 "\n", + dbfull()->TEST_MaxNextLevelOverlappingBytes()); + for (int level = 0; level < db_->NumberLevels(); level++) { + int num = NumTableFilesAtLevel(level); + if (num > 0) { + fprintf(stderr, " level %3d : %d files\n", level, num); + } + } +} + +std::string DBTestBase::DumpSSTableList() { + std::string property; + db_->GetProperty("rocksdb.sstables", &property); + return property; +} + +void DBTestBase::GetSstFiles(std::string path, + std::vector* files) { + env_->GetChildren(path, files); + + files->erase( + std::remove_if(files->begin(), files->end(), [](std::string name) { + uint64_t number; + FileType type; + return !(ParseFileName(name, &number, &type) && type == kTableFile); + }), files->end()); +} + +int DBTestBase::GetSstFileCount(std::string path) { + std::vector files; + GetSstFiles(path, &files); + return static_cast(files.size()); +} + +// this will generate non-overlapping files since it keeps increasing key_idx +void DBTestBase::GenerateNewFile(int cf, Random* rnd, int* key_idx, + bool nowait) { + for (int i = 0; i < KNumKeysByGenerateNewFile; i++) { + ASSERT_OK(Put(cf, Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990))); + (*key_idx)++; + } + if (!nowait) { + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } +} + +// this will generate non-overlapping files since it keeps increasing key_idx +void DBTestBase::GenerateNewFile(Random* rnd, int* key_idx, bool nowait) { + for (int i = 0; i < KNumKeysByGenerateNewFile; i++) { + ASSERT_OK(Put(Key(*key_idx), RandomString(rnd, (i == 99) ? 1 : 990))); + (*key_idx)++; + } + if (!nowait) { + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } +} + +const int DBTestBase::kNumKeysByGenerateNewRandomFile = 51; + +void DBTestBase::GenerateNewRandomFile(Random* rnd, bool nowait) { + for (int i = 0; i < kNumKeysByGenerateNewRandomFile; i++) { + ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 2000))); + } + ASSERT_OK(Put("key" + RandomString(rnd, 7), RandomString(rnd, 200))); + if (!nowait) { + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } +} + +std::string DBTestBase::IterStatus(Iterator* iter) { + std::string result; + if (iter->Valid()) { + result = iter->key().ToString() + "->" + iter->value().ToString(); + } else { + result = "(invalid)"; + } + return result; +} + +Options DBTestBase::OptionsForLogIterTest() { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.WAL_ttl_seconds = 1000; + return options; +} + +std::string DBTestBase::DummyString(size_t len, char c) { + return std::string(len, c); +} + +void DBTestBase::VerifyIterLast(std::string expected_key, int cf) { + Iterator* iter; + ReadOptions ro; + if (cf == 0) { + iter = db_->NewIterator(ro); + } else { + iter = db_->NewIterator(ro, handles_[cf]); + } + iter->SeekToLast(); + ASSERT_EQ(IterStatus(iter), expected_key); + delete iter; +} + +// Used to test InplaceUpdate + +// If previous value is nullptr or delta is > than previous value, +// sets newValue with delta +// If previous value is not empty, +// updates previous value with 'b' string of previous value size - 1. +UpdateStatus DBTestBase::updateInPlaceSmallerSize(char* prevValue, + uint32_t* prevSize, + Slice delta, + std::string* newValue) { + if (prevValue == nullptr) { + *newValue = std::string(delta.size(), 'c'); + return UpdateStatus::UPDATED; + } else { + *prevSize = *prevSize - 1; + std::string str_b = std::string(*prevSize, 'b'); + memcpy(prevValue, str_b.c_str(), str_b.size()); + return UpdateStatus::UPDATED_INPLACE; + } +} + +UpdateStatus DBTestBase::updateInPlaceSmallerVarintSize(char* prevValue, + uint32_t* prevSize, + Slice delta, + std::string* newValue) { + if (prevValue == nullptr) { + *newValue = std::string(delta.size(), 'c'); + return UpdateStatus::UPDATED; + } else { + *prevSize = 1; + std::string str_b = std::string(*prevSize, 'b'); + memcpy(prevValue, str_b.c_str(), str_b.size()); + return UpdateStatus::UPDATED_INPLACE; + } +} + +UpdateStatus DBTestBase::updateInPlaceLargerSize(char* prevValue, + uint32_t* prevSize, + Slice delta, + std::string* newValue) { + *newValue = std::string(delta.size(), 'c'); + return UpdateStatus::UPDATED; +} + +UpdateStatus DBTestBase::updateInPlaceNoAction(char* prevValue, + uint32_t* prevSize, Slice delta, + std::string* newValue) { + return UpdateStatus::UPDATE_FAILED; +} + +// Utility method to test InplaceUpdate +void DBTestBase::validateNumberOfEntries(int numValues, int cf) { + ScopedArenaIterator iter; + Arena arena; + if (cf != 0) { + iter.set(dbfull()->NewInternalIterator(&arena, handles_[cf])); + } else { + iter.set(dbfull()->NewInternalIterator(&arena)); + } + iter->SeekToFirst(); + ASSERT_EQ(iter->status().ok(), true); + int seq = numValues; + while (iter->Valid()) { + ParsedInternalKey ikey; + ikey.sequence = -1; + ASSERT_EQ(ParseInternalKey(iter->key(), &ikey), true); + + // checks sequence number for updates + ASSERT_EQ(ikey.sequence, (unsigned)seq--); + iter->Next(); + } + ASSERT_EQ(0, seq); +} + +void DBTestBase::CopyFile(const std::string& source, + const std::string& destination, uint64_t size) { + const EnvOptions soptions; + unique_ptr srcfile; + ASSERT_OK(env_->NewSequentialFile(source, &srcfile, soptions)); + unique_ptr destfile; + ASSERT_OK(env_->NewWritableFile(destination, &destfile, soptions)); + + if (size == 0) { + // default argument means copy everything + ASSERT_OK(env_->GetFileSize(source, &size)); + } + + char buffer[4096]; + Slice slice; + while (size > 0) { + uint64_t one = std::min(uint64_t(sizeof(buffer)), size); + ASSERT_OK(srcfile->Read(one, &slice, buffer)); + ASSERT_OK(destfile->Append(slice)); + size -= slice.size(); + } + ASSERT_OK(destfile->Close()); +} + +std::unordered_map DBTestBase::GetAllSSTFiles( + uint64_t* total_size) { + std::unordered_map res; + + if (total_size) { + *total_size = 0; + } + std::vector files; + env_->GetChildren(dbname_, &files); + for (auto& file_name : files) { + uint64_t number; + FileType type; + std::string file_path = dbname_ + "/" + file_name; + if (ParseFileName(file_name, &number, &type) && type == kTableFile) { + uint64_t file_size = 0; + env_->GetFileSize(file_path, &file_size); + res[file_path] = file_size; + if (total_size) { + *total_size += file_size; + } + } + } + return res; +} + +std::vector DBTestBase::ListTableFiles(Env* env, + const std::string& path) { + std::vector files; + std::vector file_numbers; + env->GetChildren(path, &files); + uint64_t number; + FileType type; + for (size_t i = 0; i < files.size(); ++i) { + if (ParseFileName(files[i], &number, &type)) { + if (type == kTableFile) { + file_numbers.push_back(number); + } + } + } + return file_numbers; +} + +void DBTestBase::VerifyDBFromMap(std::map true_data) { + for (auto& kv : true_data) { + ASSERT_EQ(Get(kv.first), kv.second); + } + + ReadOptions ro; + ro.total_order_seek = true; + Iterator* iter = db_->NewIterator(ro); + // Verify Iterator::Next() + auto data_iter = true_data.begin(); + for (iter->SeekToFirst(); iter->Valid(); iter->Next(), data_iter++) { + ASSERT_EQ(iter->key().ToString(), data_iter->first); + ASSERT_EQ(iter->value().ToString(), data_iter->second); + } + ASSERT_EQ(data_iter, true_data.end()); + + // Verify Iterator::Prev() + auto data_rev = true_data.rbegin(); + for (iter->SeekToLast(); iter->Valid(); iter->Prev(), data_rev++) { + ASSERT_EQ(iter->key().ToString(), data_rev->first); + ASSERT_EQ(iter->value().ToString(), data_rev->second); + } + ASSERT_EQ(data_rev, true_data.rend()); + + // Verify Iterator::Seek() + for (auto kv : true_data) { + iter->Seek(kv.first); + ASSERT_EQ(kv.first, iter->key().ToString()); + ASSERT_EQ(kv.second, iter->value().ToString()); + } + + delete iter; +} + +#ifndef ROCKSDB_LITE + +Status DBTestBase::GenerateAndAddExternalFile(const Options options, + std::vector keys, + size_t file_id) { + std::string file_path = + test::TmpDir(env_) + "/sst_files/" + ToString(file_id); + SstFileWriter sst_file_writer(EnvOptions(), options, options.comparator); + + Status s = sst_file_writer.Open(file_path); + if (!s.ok()) { + return s; + } + for (auto& entry : keys) { + std::string k = Key(entry); + std::string v = k + ToString(file_id); + s = sst_file_writer.Add(k, v); + if (!s.ok()) { + return s; + } + } + s = sst_file_writer.Finish(); + + if (s.ok()) { + s = db_->AddFile(std::vector(1, file_path)); + } + + return s; +} + +uint64_t DBTestBase::GetNumberOfSstFilesForColumnFamily( + DB* db, std::string column_family_name) { + std::vector metadata; + db->GetLiveFilesMetaData(&metadata); + uint64_t result = 0; + for (auto& fileMetadata : metadata) { + result += (fileMetadata.column_family_name == column_family_name); + } + return result; +} +#endif // ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/db/db_test_util.h b/external/rocksdb/db/db_test_util.h new file mode 100755 index 0000000000..237dc51e43 --- /dev/null +++ b/external/rocksdb/db/db_test_util.h @@ -0,0 +1,828 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#ifndef OS_WIN +#include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db/db_impl.h" +#include "db/dbformat.h" +#include "db/filename.h" +#include "memtable/hash_linklist_rep.h" +#include "rocksdb/cache.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/convenience.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/sst_file_writer.h" +#include "rocksdb/statistics.h" +#include "rocksdb/table.h" +#include "rocksdb/utilities/checkpoint.h" +#include "table/block_based_table_factory.h" +#include "table/mock_table.h" +#include "table/plain_table_factory.h" +#include "table/scoped_arena_iterator.h" +#include "util/compression.h" +#include "util/mock_env.h" +#include "util/mutexlock.h" + +#include "util/string_util.h" +// SyncPoint is not supported in Released Windows Mode. +#if !(defined NDEBUG) || !defined(OS_WIN) +#include "util/sync_point.h" +#endif // !(defined NDEBUG) || !defined(OS_WIN) +#include "util/testharness.h" +#include "util/testutil.h" +#include "util/xfunc.h" +#include "utilities/merge_operators.h" + +namespace rocksdb { + +namespace anon { +class AtomicCounter { + public: + explicit AtomicCounter(Env* env = NULL) + : env_(env), cond_count_(&mu_), count_(0) {} + + void Increment() { + MutexLock l(&mu_); + count_++; + cond_count_.SignalAll(); + } + + int Read() { + MutexLock l(&mu_); + return count_; + } + + bool WaitFor(int count) { + MutexLock l(&mu_); + + uint64_t start = env_->NowMicros(); + while (count_ < count) { + uint64_t now = env_->NowMicros(); + cond_count_.TimedWait(now + /*1s*/ 1 * 1000 * 1000); + if (env_->NowMicros() - start > /*10s*/ 10 * 1000 * 1000) { + return false; + } + if (count_ < count) { + GTEST_LOG_(WARNING) << "WaitFor is taking more time than usual"; + } + } + + return true; + } + + void Reset() { + MutexLock l(&mu_); + count_ = 0; + cond_count_.SignalAll(); + } + + private: + Env* env_; + port::Mutex mu_; + port::CondVar cond_count_; + int count_; +}; + +struct OptionsOverride { + std::shared_ptr filter_policy = nullptr; + + // Used as a bit mask of individual enums in which to skip an XF test point + int skip_policy = 0; +}; + +} // namespace anon + +// A hacky skip list mem table that triggers flush after number of entries. +class SpecialMemTableRep : public MemTableRep { + public: + explicit SpecialMemTableRep(MemTableAllocator* allocator, + MemTableRep* memtable, int num_entries_flush) + : MemTableRep(allocator), + memtable_(memtable), + num_entries_flush_(num_entries_flush), + num_entries_(0) {} + + virtual KeyHandle Allocate(const size_t len, char** buf) override { + return memtable_->Allocate(len, buf); + } + + // Insert key into the list. + // REQUIRES: nothing that compares equal to key is currently in the list. + virtual void Insert(KeyHandle handle) override { + memtable_->Insert(handle); + num_entries_++; + } + + // Returns true iff an entry that compares equal to key is in the list. + virtual bool Contains(const char* key) const override { + return memtable_->Contains(key); + } + + virtual size_t ApproximateMemoryUsage() override { + // Return a high memory usage when number of entries exceeds the threshold + // to trigger a flush. + return (num_entries_ < num_entries_flush_) ? 0 : 1024 * 1024 * 1024; + } + + virtual void Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, + const char* entry)) override { + memtable_->Get(k, callback_args, callback_func); + } + + uint64_t ApproximateNumEntries(const Slice& start_ikey, + const Slice& end_ikey) override { + return memtable_->ApproximateNumEntries(start_ikey, end_ikey); + } + + virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override { + return memtable_->GetIterator(arena); + } + + virtual ~SpecialMemTableRep() override {} + + private: + unique_ptr memtable_; + int num_entries_flush_; + int num_entries_; +}; + +// The factory for the hacky skip list mem table that triggers flush after +// number of entries exceeds a threshold. +class SpecialSkipListFactory : public MemTableRepFactory { + public: + // After number of inserts exceeds `num_entries_flush` in a mem table, trigger + // flush. + explicit SpecialSkipListFactory(int num_entries_flush) + : num_entries_flush_(num_entries_flush) {} + + virtual MemTableRep* CreateMemTableRep( + const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, + const SliceTransform* transform, Logger* logger) override { + return new SpecialMemTableRep( + allocator, factory_.CreateMemTableRep(compare, allocator, transform, 0), + num_entries_flush_); + } + virtual const char* Name() const override { return "SkipListFactory"; } + + private: + SkipListFactory factory_; + int num_entries_flush_; +}; + +// Special Env used to delay background operations +class SpecialEnv : public EnvWrapper { + public: + explicit SpecialEnv(Env* base); + + Status NewWritableFile(const std::string& f, unique_ptr* r, + const EnvOptions& soptions) override { + class SSTableFile : public WritableFile { + private: + SpecialEnv* env_; + unique_ptr base_; + + public: + SSTableFile(SpecialEnv* env, unique_ptr&& base) + : env_(env), base_(std::move(base)) {} + Status Append(const Slice& data) override { + if (env_->table_write_callback_) { + (*env_->table_write_callback_)(); + } + if (env_->drop_writes_.load(std::memory_order_acquire)) { + // Drop writes on the floor + return Status::OK(); + } else if (env_->no_space_.load(std::memory_order_acquire)) { + return Status::IOError("No space left on device"); + } else { + env_->bytes_written_ += data.size(); + return base_->Append(data); + } + } + Status Truncate(uint64_t size) override { return base_->Truncate(size); } + Status Close() override { +// SyncPoint is not supported in Released Windows Mode. +#if !(defined NDEBUG) || !defined(OS_WIN) + // Check preallocation size + // preallocation size is never passed to base file. + size_t preallocation_size = preallocation_block_size(); + TEST_SYNC_POINT_CALLBACK("DBTestWritableFile.GetPreallocationStatus", + &preallocation_size); +#endif // !(defined NDEBUG) || !defined(OS_WIN) + return base_->Close(); + } + Status Flush() override { return base_->Flush(); } + Status Sync() override { + ++env_->sync_counter_; + while (env_->delay_sstable_sync_.load(std::memory_order_acquire)) { + env_->SleepForMicroseconds(100000); + } + return base_->Sync(); + } + void SetIOPriority(Env::IOPriority pri) override { + base_->SetIOPriority(pri); + } + Env::IOPriority GetIOPriority() override { + return base_->GetIOPriority(); + } + }; + class ManifestFile : public WritableFile { + public: + ManifestFile(SpecialEnv* env, unique_ptr&& b) + : env_(env), base_(std::move(b)) {} + Status Append(const Slice& data) override { + if (env_->manifest_write_error_.load(std::memory_order_acquire)) { + return Status::IOError("simulated writer error"); + } else { + return base_->Append(data); + } + } + Status Truncate(uint64_t size) override { return base_->Truncate(size); } + Status Close() override { return base_->Close(); } + Status Flush() override { return base_->Flush(); } + Status Sync() override { + ++env_->sync_counter_; + if (env_->manifest_sync_error_.load(std::memory_order_acquire)) { + return Status::IOError("simulated sync error"); + } else { + return base_->Sync(); + } + } + uint64_t GetFileSize() override { return base_->GetFileSize(); } + + private: + SpecialEnv* env_; + unique_ptr base_; + }; + class WalFile : public WritableFile { + public: + WalFile(SpecialEnv* env, unique_ptr&& b) + : env_(env), base_(std::move(b)) { + env_->num_open_wal_file_.fetch_add(1); + } + virtual ~WalFile() { env_->num_open_wal_file_.fetch_add(-1); } + Status Append(const Slice& data) override { +#if !(defined NDEBUG) || !defined(OS_WIN) + TEST_SYNC_POINT("SpecialEnv::WalFile::Append:1"); +#endif + Status s; + if (env_->log_write_error_.load(std::memory_order_acquire)) { + s = Status::IOError("simulated writer error"); + } else { + int slowdown = + env_->log_write_slowdown_.load(std::memory_order_acquire); + if (slowdown > 0) { + env_->SleepForMicroseconds(slowdown); + } + s = base_->Append(data); + } +#if !(defined NDEBUG) || !defined(OS_WIN) + TEST_SYNC_POINT("SpecialEnv::WalFile::Append:2"); +#endif + return s; + } + Status Truncate(uint64_t size) override { return base_->Truncate(size); } + Status Close() override { return base_->Close(); } + Status Flush() override { return base_->Flush(); } + Status Sync() override { + ++env_->sync_counter_; + return base_->Sync(); + } + bool IsSyncThreadSafe() const override { + return env_->is_wal_sync_thread_safe_.load(); + } + + private: + SpecialEnv* env_; + unique_ptr base_; + }; + + if (non_writeable_rate_.load(std::memory_order_acquire) > 0) { + uint32_t random_number; + { + MutexLock l(&rnd_mutex_); + random_number = rnd_.Uniform(100); + } + if (random_number < non_writeable_rate_.load()) { + return Status::IOError("simulated random write error"); + } + } + + new_writable_count_++; + + if (non_writable_count_.load() > 0) { + non_writable_count_--; + return Status::IOError("simulated write error"); + } + + Status s = target()->NewWritableFile(f, r, soptions); + if (s.ok()) { + if (strstr(f.c_str(), ".sst") != nullptr) { + r->reset(new SSTableFile(this, std::move(*r))); + } else if (strstr(f.c_str(), "MANIFEST") != nullptr) { + r->reset(new ManifestFile(this, std::move(*r))); + } else if (strstr(f.c_str(), "log") != nullptr) { + r->reset(new WalFile(this, std::move(*r))); + } + } + return s; + } + + Status NewRandomAccessFile(const std::string& f, + unique_ptr* r, + const EnvOptions& soptions) override { + class CountingFile : public RandomAccessFile { + public: + CountingFile(unique_ptr&& target, + anon::AtomicCounter* counter, + std::atomic* bytes_read) + : target_(std::move(target)), + counter_(counter), + bytes_read_(bytes_read) {} + virtual Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override { + counter_->Increment(); + Status s = target_->Read(offset, n, result, scratch); + *bytes_read_ += result->size(); + return s; + } + + private: + unique_ptr target_; + anon::AtomicCounter* counter_; + std::atomic* bytes_read_; + }; + + Status s = target()->NewRandomAccessFile(f, r, soptions); + random_file_open_counter_++; + if (s.ok() && count_random_reads_) { + r->reset(new CountingFile(std::move(*r), &random_read_counter_, + &random_read_bytes_counter_)); + } + return s; + } + + Status NewSequentialFile(const std::string& f, unique_ptr* r, + const EnvOptions& soptions) override { + class CountingFile : public SequentialFile { + public: + CountingFile(unique_ptr&& target, + anon::AtomicCounter* counter) + : target_(std::move(target)), counter_(counter) {} + virtual Status Read(size_t n, Slice* result, char* scratch) override { + counter_->Increment(); + return target_->Read(n, result, scratch); + } + virtual Status Skip(uint64_t n) override { return target_->Skip(n); } + + private: + unique_ptr target_; + anon::AtomicCounter* counter_; + }; + + Status s = target()->NewSequentialFile(f, r, soptions); + if (s.ok() && count_sequential_reads_) { + r->reset(new CountingFile(std::move(*r), &sequential_read_counter_)); + } + return s; + } + + virtual void SleepForMicroseconds(int micros) override { + sleep_counter_.Increment(); + if (no_sleep_ || time_elapse_only_sleep_) { + addon_time_.fetch_add(micros); + } + if (!no_sleep_) { + target()->SleepForMicroseconds(micros); + } + } + + virtual Status GetCurrentTime(int64_t* unix_time) override { + Status s; + if (!time_elapse_only_sleep_) { + s = target()->GetCurrentTime(unix_time); + } + if (s.ok()) { + *unix_time += addon_time_.load(); + } + return s; + } + + virtual uint64_t NowNanos() override { + return (time_elapse_only_sleep_ ? 0 : target()->NowNanos()) + + addon_time_.load() * 1000; + } + + virtual uint64_t NowMicros() override { + return (time_elapse_only_sleep_ ? 0 : target()->NowMicros()) + + addon_time_.load(); + } + + virtual Status DeleteFile(const std::string& fname) override { + delete_count_.fetch_add(1); + return target()->DeleteFile(fname); + } + + Random rnd_; + port::Mutex rnd_mutex_; // Lock to pretect rnd_ + + // sstable Sync() calls are blocked while this pointer is non-nullptr. + std::atomic delay_sstable_sync_; + + // Drop writes on the floor while this pointer is non-nullptr. + std::atomic drop_writes_; + + // Simulate no-space errors while this pointer is non-nullptr. + std::atomic no_space_; + + // Simulate non-writable file system while this pointer is non-nullptr + std::atomic non_writable_; + + // Force sync of manifest files to fail while this pointer is non-nullptr + std::atomic manifest_sync_error_; + + // Force write to manifest files to fail while this pointer is non-nullptr + std::atomic manifest_write_error_; + + // Force write to log files to fail while this pointer is non-nullptr + std::atomic log_write_error_; + + // Slow down every log write, in micro-seconds. + std::atomic log_write_slowdown_; + + // Number of WAL files that are still open for write. + std::atomic num_open_wal_file_; + + bool count_random_reads_; + anon::AtomicCounter random_read_counter_; + std::atomic random_read_bytes_counter_; + std::atomic random_file_open_counter_; + + bool count_sequential_reads_; + anon::AtomicCounter sequential_read_counter_; + + anon::AtomicCounter sleep_counter_; + + std::atomic bytes_written_; + + std::atomic sync_counter_; + + std::atomic non_writeable_rate_; + + std::atomic new_writable_count_; + + std::atomic non_writable_count_; + + std::function* table_write_callback_; + + std::atomic addon_time_; + + std::atomic delete_count_; + + bool time_elapse_only_sleep_; + + bool no_sleep_; + + std::atomic is_wal_sync_thread_safe_{true}; +}; + +#ifndef ROCKSDB_LITE +class OnFileDeletionListener : public EventListener { + public: + OnFileDeletionListener() : matched_count_(0), expected_file_name_("") {} + + void SetExpectedFileName(const std::string file_name) { + expected_file_name_ = file_name; + } + + void VerifyMatchedCount(size_t expected_value) { + ASSERT_EQ(matched_count_, expected_value); + } + + void OnTableFileDeleted(const TableFileDeletionInfo& info) override { + if (expected_file_name_ != "") { + ASSERT_EQ(expected_file_name_, info.file_path); + expected_file_name_ = ""; + matched_count_++; + } + } + + private: + size_t matched_count_; + std::string expected_file_name_; +}; +#endif + +class DBTestBase : public testing::Test { + protected: + // Sequence of option configurations to try + enum OptionConfig { + kDefault = 0, + kBlockBasedTableWithPrefixHashIndex = 1, + kBlockBasedTableWithWholeKeyHashIndex = 2, + kPlainTableFirstBytePrefix = 3, + kPlainTableCappedPrefix = 4, + kPlainTableCappedPrefixNonMmap = 5, + kPlainTableAllBytesPrefix = 6, + kVectorRep = 7, + kHashLinkList = 8, + kHashCuckoo = 9, + kMergePut = 10, + kFilter = 11, + kFullFilterWithNewTableReaderForCompactions = 12, + kUncompressed = 13, + kNumLevel_3 = 14, + kDBLogDir = 15, + kWalDirAndMmapReads = 16, + kManifestFileSize = 17, + kPerfOptions = 18, + kHashSkipList = 19, + kUniversalCompaction = 20, + kUniversalCompactionMultiLevel = 21, + kCompressedBlockCache = 22, + kInfiniteMaxOpenFiles = 23, + kxxHashChecksum = 24, + kFIFOCompaction = 25, + kOptimizeFiltersForHits = 26, + kRowCache = 27, + kRecycleLogFiles = 28, + kConcurrentSkipList = 29, + kEnd = 30, + kLevelSubcompactions = 31, + kUniversalSubcompactions = 32, + kBlockBasedTableWithIndexRestartInterval = 33, + }; + int option_config_; + + public: + std::string dbname_; + std::string alternative_wal_dir_; + std::string alternative_db_log_dir_; + MockEnv* mem_env_; + SpecialEnv* env_; + DB* db_; + std::vector handles_; + + Options last_options_; + + // Skip some options, as they may not be applicable to a specific test. + // To add more skip constants, use values 4, 8, 16, etc. + enum OptionSkip { + kNoSkip = 0, + kSkipDeletesFilterFirst = 1, + kSkipUniversalCompaction = 2, + kSkipMergePut = 4, + kSkipPlainTable = 8, + kSkipHashIndex = 16, + kSkipNoSeekToLast = 32, + kSkipHashCuckoo = 64, + kSkipFIFOCompaction = 128, + kSkipMmapReads = 256, + }; + + explicit DBTestBase(const std::string path); + + ~DBTestBase(); + + static std::string RandomString(Random* rnd, int len) { + std::string r; + test::RandomString(rnd, len, &r); + return r; + } + + static std::string Key(int i) { + char buf[100]; + snprintf(buf, sizeof(buf), "key%06d", i); + return std::string(buf); + } + + static bool ShouldSkipOptions(int option_config, int skip_mask = kNoSkip); + + // Switch to a fresh database with the next option configuration to + // test. Return false if there are no more configurations to test. + bool ChangeOptions(int skip_mask = kNoSkip); + + // Switch between different compaction styles (we have only 2 now). + bool ChangeCompactOptions(); + + // Switch between different filter policy + // Jump from kDefault to kFilter to kFullFilter + bool ChangeFilterOptions(); + + // Return the current option configuration. + Options CurrentOptions( + const anon::OptionsOverride& options_override = anon::OptionsOverride()); + + Options CurrentOptions( + const Options& defaultOptions, + const anon::OptionsOverride& options_override = anon::OptionsOverride()); + + DBImpl* dbfull() { return reinterpret_cast(db_); } + + void CreateColumnFamilies(const std::vector& cfs, + const Options& options); + + void CreateAndReopenWithCF(const std::vector& cfs, + const Options& options); + + void ReopenWithColumnFamilies(const std::vector& cfs, + const std::vector& options); + + void ReopenWithColumnFamilies(const std::vector& cfs, + const Options& options); + + Status TryReopenWithColumnFamilies(const std::vector& cfs, + const std::vector& options); + + Status TryReopenWithColumnFamilies(const std::vector& cfs, + const Options& options); + + void Reopen(const Options& options); + + void Close(); + + void DestroyAndReopen(const Options& options); + + void Destroy(const Options& options); + + Status ReadOnlyReopen(const Options& options); + + Status TryReopen(const Options& options); + + Status Flush(int cf = 0); + + Status Put(const Slice& k, const Slice& v, WriteOptions wo = WriteOptions()); + + Status Put(int cf, const Slice& k, const Slice& v, + WriteOptions wo = WriteOptions()); + + Status Delete(const std::string& k); + + Status Delete(int cf, const std::string& k); + + Status SingleDelete(const std::string& k); + + Status SingleDelete(int cf, const std::string& k); + + std::string Get(const std::string& k, const Snapshot* snapshot = nullptr); + + std::string Get(int cf, const std::string& k, + const Snapshot* snapshot = nullptr); + + uint64_t GetNumSnapshots(); + + uint64_t GetTimeOldestSnapshots(); + + // Return a string that contains all key,value pairs in order, + // formatted like "(k1->v1)(k2->v2)". + std::string Contents(int cf = 0); + + std::string AllEntriesFor(const Slice& user_key, int cf = 0); + +#ifndef ROCKSDB_LITE + int NumSortedRuns(int cf = 0); + + uint64_t TotalSize(int cf = 0); + + uint64_t SizeAtLevel(int level); + + size_t TotalLiveFiles(int cf = 0); + + size_t CountLiveFiles(); +#endif // ROCKSDB_LITE + + int NumTableFilesAtLevel(int level, int cf = 0); + + double CompressionRatioAtLevel(int level, int cf = 0); + + int TotalTableFiles(int cf = 0, int levels = -1); + + // Return spread of files per level + std::string FilesPerLevel(int cf = 0); + + size_t CountFiles(); + + uint64_t Size(const Slice& start, const Slice& limit, int cf = 0); + + void Compact(int cf, const Slice& start, const Slice& limit, + uint32_t target_path_id); + + void Compact(int cf, const Slice& start, const Slice& limit); + + void Compact(const Slice& start, const Slice& limit); + + // Do n memtable compactions, each of which produces an sstable + // covering the range [small,large]. + void MakeTables(int n, const std::string& small, const std::string& large, + int cf = 0); + + // Prevent pushing of new sstables into deeper levels by adding + // tables that cover a specified range to all levels. + void FillLevels(const std::string& smallest, const std::string& largest, + int cf); + + void MoveFilesToLevel(int level, int cf = 0); + + void DumpFileCounts(const char* label); + + std::string DumpSSTableList(); + + void GetSstFiles(std::string path, std::vector* files); + + int GetSstFileCount(std::string path); + + // this will generate non-overlapping files since it keeps increasing key_idx + void GenerateNewFile(Random* rnd, int* key_idx, bool nowait = false); + + void GenerateNewFile(int fd, Random* rnd, int* key_idx, bool nowait = false); + + static const int kNumKeysByGenerateNewRandomFile; + static const int KNumKeysByGenerateNewFile = 100; + + void GenerateNewRandomFile(Random* rnd, bool nowait = false); + + std::string IterStatus(Iterator* iter); + + Options OptionsForLogIterTest(); + + std::string DummyString(size_t len, char c = 'a'); + + void VerifyIterLast(std::string expected_key, int cf = 0); + + // Used to test InplaceUpdate + + // If previous value is nullptr or delta is > than previous value, + // sets newValue with delta + // If previous value is not empty, + // updates previous value with 'b' string of previous value size - 1. + static UpdateStatus updateInPlaceSmallerSize(char* prevValue, + uint32_t* prevSize, Slice delta, + std::string* newValue); + + static UpdateStatus updateInPlaceSmallerVarintSize(char* prevValue, + uint32_t* prevSize, + Slice delta, + std::string* newValue); + + static UpdateStatus updateInPlaceLargerSize(char* prevValue, + uint32_t* prevSize, Slice delta, + std::string* newValue); + + static UpdateStatus updateInPlaceNoAction(char* prevValue, uint32_t* prevSize, + Slice delta, std::string* newValue); + + // Utility method to test InplaceUpdate + void validateNumberOfEntries(int numValues, int cf = 0); + + void CopyFile(const std::string& source, const std::string& destination, + uint64_t size = 0); + + std::unordered_map GetAllSSTFiles( + uint64_t* total_size = nullptr); + + std::vector ListTableFiles(Env* env, const std::string& path); + + void VerifyDBFromMap(std::map true_data); + +#ifndef ROCKSDB_LITE + Status GenerateAndAddExternalFile(const Options options, + std::vector keys, size_t file_id); + + uint64_t GetNumberOfSstFilesForColumnFamily(DB* db, + std::string column_family_name); +#endif // ROCKSDB_LITE + + uint64_t TestGetTickerCount(const Options& options, Tickers ticker_type) { + return options.statistics->getTickerCount(ticker_type); + } +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/db_universal_compaction_test.cc b/external/rocksdb/db/db_universal_compaction_test.cc new file mode 100755 index 0000000000..7eab802d41 --- /dev/null +++ b/external/rocksdb/db/db_universal_compaction_test.cc @@ -0,0 +1,1337 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#if !defined(ROCKSDB_LITE) +#include "util/sync_point.h" + +namespace rocksdb { + +static std::string CompressibleString(Random* rnd, int len) { + std::string r; + test::CompressibleString(rnd, 0.8, len, &r); + return r; +} + +class DBTestUniversalCompactionBase + : public DBTestBase, + public ::testing::WithParamInterface> { + public: + explicit DBTestUniversalCompactionBase( + const std::string& path) : DBTestBase(path) {} + virtual void SetUp() override { + num_levels_ = std::get<0>(GetParam()); + exclusive_manual_compaction_ = std::get<1>(GetParam()); + } + int num_levels_; + bool exclusive_manual_compaction_; +}; + +class DBTestUniversalCompaction : public DBTestUniversalCompactionBase { + public: + DBTestUniversalCompaction() : + DBTestUniversalCompactionBase("/db_universal_compaction_test") {} +}; + +namespace { +void VerifyCompactionResult( + const ColumnFamilyMetaData& cf_meta, + const std::set& overlapping_file_numbers) { +#ifndef NDEBUG + for (auto& level : cf_meta.levels) { + for (auto& file : level.files) { + assert(overlapping_file_numbers.find(file.name) == + overlapping_file_numbers.end()); + } + } +#endif +} + +class KeepFilter : public CompactionFilter { + public: + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, bool* value_changed) const + override { + return false; + } + + virtual const char* Name() const override { return "KeepFilter"; } +}; + +class KeepFilterFactory : public CompactionFilterFactory { + public: + explicit KeepFilterFactory(bool check_context = false) + : check_context_(check_context) {} + + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + if (check_context_) { + EXPECT_EQ(expect_full_compaction_.load(), context.is_full_compaction); + EXPECT_EQ(expect_manual_compaction_.load(), context.is_manual_compaction); + } + return std::unique_ptr(new KeepFilter()); + } + + virtual const char* Name() const override { return "KeepFilterFactory"; } + bool check_context_; + std::atomic_bool expect_full_compaction_; + std::atomic_bool expect_manual_compaction_; +}; + +class DelayFilter : public CompactionFilter { + public: + explicit DelayFilter(DBTestBase* d) : db_test(d) {} + virtual bool Filter(int level, const Slice& key, const Slice& value, + std::string* new_value, + bool* value_changed) const override { + db_test->env_->addon_time_.fetch_add(1000); + return true; + } + + virtual const char* Name() const override { return "DelayFilter"; } + + private: + DBTestBase* db_test; +}; + +class DelayFilterFactory : public CompactionFilterFactory { + public: + explicit DelayFilterFactory(DBTestBase* d) : db_test(d) {} + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) override { + return std::unique_ptr(new DelayFilter(db_test)); + } + + virtual const char* Name() const override { return "DelayFilterFactory"; } + + private: + DBTestBase* db_test; +}; +} // namespace + +// Make sure we don't trigger a problem if the trigger conditon is given +// to be 0, which is invalid. +TEST_P(DBTestUniversalCompaction, UniversalCompactionSingleSortedRun) { + Options options = CurrentOptions(); + + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = num_levels_; + // Config universal compaction to always compact to one single sorted run. + options.level0_file_num_compaction_trigger = 0; + options.compaction_options_universal.size_ratio = 10; + options.compaction_options_universal.min_merge_width = 2; + options.compaction_options_universal.max_size_amplification_percent = 0; + + options.write_buffer_size = 105 << 10; // 105KB + options.arena_block_size = 4 << 10; + options.target_file_size_base = 32 << 10; // 32KB + // trigger compaction if there are >= 4 files + KeepFilterFactory* filter = new KeepFilterFactory(true); + filter->expect_manual_compaction_.store(false); + options.compaction_filter_factory.reset(filter); + + DestroyAndReopen(options); + ASSERT_EQ(1, db_->GetOptions().level0_file_num_compaction_trigger); + + Random rnd(301); + int key_idx = 0; + + filter->expect_full_compaction_.store(true); + + for (int num = 0; num < 16; num++) { + // Write 100KB file. And immediately it should be compacted to one file. + GenerateNewFile(&rnd, &key_idx); + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(NumSortedRuns(0), 1); + } + ASSERT_OK(Put(Key(key_idx), "")); + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(NumSortedRuns(0), 1); +} + +TEST_P(DBTestUniversalCompaction, OptimizeFiltersForHits) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.compaction_options_universal.size_ratio = 5; + options.num_levels = num_levels_; + options.write_buffer_size = 105 << 10; // 105KB + options.arena_block_size = 4 << 10; + options.target_file_size_base = 32 << 10; // 32KB + // trigger compaction if there are >= 4 files + options.level0_file_num_compaction_trigger = 4; + BlockBasedTableOptions bbto; + bbto.cache_index_and_filter_blocks = true; + bbto.filter_policy.reset(NewBloomFilterPolicy(10, false)); + bbto.whole_key_filtering = true; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + options.optimize_filters_for_hits = true; + options.statistics = rocksdb::CreateDBStatistics(); + options.memtable_factory.reset(new SpecialSkipListFactory(3)); + + DestroyAndReopen(options); + + // block compaction from happening + env_->SetBackgroundThreads(1, Env::LOW); + test::SleepingBackgroundTask sleeping_task_low; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::LOW); + + for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) { + Put(Key(num * 10), "val"); + if (num) { + dbfull()->TEST_WaitForFlushMemTable(); + } + Put(Key(30 + num * 10), "val"); + Put(Key(60 + num * 10), "val"); + } + Put("", ""); + dbfull()->TEST_WaitForFlushMemTable(); + + // Query set of non existing keys + for (int i = 5; i < 90; i += 10) { + ASSERT_EQ(Get(Key(i)), "NOT_FOUND"); + } + + // Make sure bloom filter is used at least once. + ASSERT_GT(TestGetTickerCount(options, BLOOM_FILTER_USEFUL), 0); + auto prev_counter = TestGetTickerCount(options, BLOOM_FILTER_USEFUL); + + // Make sure bloom filter is used for all but the last L0 file when looking + // up a non-existent key that's in the range of all L0 files. + ASSERT_EQ(Get(Key(35)), "NOT_FOUND"); + ASSERT_EQ(prev_counter + NumTableFilesAtLevel(0) - 1, + TestGetTickerCount(options, BLOOM_FILTER_USEFUL)); + prev_counter = TestGetTickerCount(options, BLOOM_FILTER_USEFUL); + + // Unblock compaction and wait it for happening. + sleeping_task_low.WakeUp(); + dbfull()->TEST_WaitForCompact(); + + // The same queries will not trigger bloom filter + for (int i = 5; i < 90; i += 10) { + ASSERT_EQ(Get(Key(i)), "NOT_FOUND"); + } + ASSERT_EQ(prev_counter, TestGetTickerCount(options, BLOOM_FILTER_USEFUL)); +} + +// TODO(kailiu) The tests on UniversalCompaction has some issues: +// 1. A lot of magic numbers ("11" or "12"). +// 2. Made assumption on the memtable flush conditions, which may change from +// time to time. +TEST_P(DBTestUniversalCompaction, UniversalCompactionTrigger) { + Options options; + options.compaction_style = kCompactionStyleUniversal; + options.compaction_options_universal.size_ratio = 5; + options.num_levels = num_levels_; + options.write_buffer_size = 105 << 10; // 105KB + options.arena_block_size = 4 << 10; + options.target_file_size_base = 32 << 10; // 32KB + // trigger compaction if there are >= 4 files + options.level0_file_num_compaction_trigger = 4; + KeepFilterFactory* filter = new KeepFilterFactory(true); + filter->expect_manual_compaction_.store(false); + options.compaction_filter_factory.reset(filter); + + options = CurrentOptions(options); + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBTestWritableFile.GetPreallocationStatus", [&](void* arg) { + ASSERT_TRUE(arg != nullptr); + size_t preallocation_size = *(static_cast(arg)); + if (num_levels_ > 3) { + ASSERT_LE(preallocation_size, options.target_file_size_base * 1.1); + } + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Random rnd(301); + int key_idx = 0; + + filter->expect_full_compaction_.store(true); + // Stage 1: + // Generate a set of files at level 0, but don't trigger level-0 + // compaction. + for (int num = 0; num < options.level0_file_num_compaction_trigger - 1; + num++) { + // Write 100KB + GenerateNewFile(1, &rnd, &key_idx); + } + + // Generate one more file at level-0, which should trigger level-0 + // compaction. + GenerateNewFile(1, &rnd, &key_idx); + // Suppose each file flushed from mem table has size 1. Now we compact + // (level0_file_num_compaction_trigger+1)=4 files and should have a big + // file of size 4. + ASSERT_EQ(NumSortedRuns(1), 1); + + // Stage 2: + // Now we have one file at level 0, with size 4. We also have some data in + // mem table. Let's continue generating new files at level 0, but don't + // trigger level-0 compaction. + // First, clean up memtable before inserting new data. This will generate + // a level-0 file, with size around 0.4 (according to previously written + // data amount). + filter->expect_full_compaction_.store(false); + ASSERT_OK(Flush(1)); + for (int num = 0; num < options.level0_file_num_compaction_trigger - 3; + num++) { + GenerateNewFile(1, &rnd, &key_idx); + ASSERT_EQ(NumSortedRuns(1), num + 3); + } + + // Generate one more file at level-0, which should trigger level-0 + // compaction. + GenerateNewFile(1, &rnd, &key_idx); + // Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1. + // After compaction, we should have 2 files, with size 4, 2.4. + ASSERT_EQ(NumSortedRuns(1), 2); + + // Stage 3: + // Now we have 2 files at level 0, with size 4 and 2.4. Continue + // generating new files at level 0. + for (int num = 0; num < options.level0_file_num_compaction_trigger - 3; + num++) { + GenerateNewFile(1, &rnd, &key_idx); + ASSERT_EQ(NumSortedRuns(1), num + 3); + } + + // Generate one more file at level-0, which should trigger level-0 + // compaction. + GenerateNewFile(1, &rnd, &key_idx); + // Before compaction, we have 4 files at level 0, with size 4, 2.4, 1, 1. + // After compaction, we should have 3 files, with size 4, 2.4, 2. + ASSERT_EQ(NumSortedRuns(1), 3); + + // Stage 4: + // Now we have 3 files at level 0, with size 4, 2.4, 2. Let's generate a + // new file of size 1. + GenerateNewFile(1, &rnd, &key_idx); + dbfull()->TEST_WaitForCompact(); + // Level-0 compaction is triggered, but no file will be picked up. + ASSERT_EQ(NumSortedRuns(1), 4); + + // Stage 5: + // Now we have 4 files at level 0, with size 4, 2.4, 2, 1. Let's generate + // a new file of size 1. + filter->expect_full_compaction_.store(true); + GenerateNewFile(1, &rnd, &key_idx); + dbfull()->TEST_WaitForCompact(); + // All files at level 0 will be compacted into a single one. + ASSERT_EQ(NumSortedRuns(1), 1); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_P(DBTestUniversalCompaction, UniversalCompactionSizeAmplification) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = num_levels_; + options.write_buffer_size = 100 << 10; // 100KB + options.target_file_size_base = 32 << 10; // 32KB + options.level0_file_num_compaction_trigger = 3; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // Trigger compaction if size amplification exceeds 110% + options.compaction_options_universal.max_size_amplification_percent = 110; + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + Random rnd(301); + int key_idx = 0; + + // Generate two files in Level 0. Both files are approx the same size. + for (int num = 0; num < options.level0_file_num_compaction_trigger - 1; + num++) { + // Write 110KB (11 values, each 10K) + for (int i = 0; i < 11; i++) { + ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 10000))); + key_idx++; + } + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + ASSERT_EQ(NumSortedRuns(1), num + 1); + } + ASSERT_EQ(NumSortedRuns(1), 2); + + // Flush whatever is remaining in memtable. This is typically + // small, which should not trigger size ratio based compaction + // but will instead trigger size amplification. + ASSERT_OK(Flush(1)); + + dbfull()->TEST_WaitForCompact(); + + // Verify that size amplification did occur + ASSERT_EQ(NumSortedRuns(1), 1); +} + +TEST_P(DBTestUniversalCompaction, CompactFilesOnUniversalCompaction) { + const int kTestKeySize = 16; + const int kTestValueSize = 984; + const int kEntrySize = kTestKeySize + kTestValueSize; + const int kEntriesPerBuffer = 10; + + ChangeCompactOptions(); + Options options; + options.create_if_missing = true; + options.compaction_style = kCompactionStyleLevel; + options.num_levels = 1; + options.target_file_size_base = options.write_buffer_size; + options.compression = kNoCompression; + options = CurrentOptions(options); + options.write_buffer_size = kEntrySize * kEntriesPerBuffer; + CreateAndReopenWithCF({"pikachu"}, options); + ASSERT_EQ(options.compaction_style, kCompactionStyleUniversal); + Random rnd(301); + for (int key = 1024 * kEntriesPerBuffer; key >= 0; --key) { + ASSERT_OK(Put(1, ToString(key), RandomString(&rnd, kTestValueSize))); + } + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForCompact(); + ColumnFamilyMetaData cf_meta; + dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); + std::vector compaction_input_file_names; + for (auto file : cf_meta.levels[0].files) { + if (rnd.OneIn(2)) { + compaction_input_file_names.push_back(file.name); + } + } + + if (compaction_input_file_names.size() == 0) { + compaction_input_file_names.push_back( + cf_meta.levels[0].files[0].name); + } + + // expect fail since universal compaction only allow L0 output + ASSERT_FALSE(dbfull() + ->CompactFiles(CompactionOptions(), handles_[1], + compaction_input_file_names, 1) + .ok()); + + // expect ok and verify the compacted files no longer exist. + ASSERT_OK(dbfull()->CompactFiles( + CompactionOptions(), handles_[1], + compaction_input_file_names, 0)); + + dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); + VerifyCompactionResult( + cf_meta, + std::set(compaction_input_file_names.begin(), + compaction_input_file_names.end())); + + compaction_input_file_names.clear(); + + // Pick the first and the last file, expect everything is + // compacted into one single file. + compaction_input_file_names.push_back( + cf_meta.levels[0].files[0].name); + compaction_input_file_names.push_back( + cf_meta.levels[0].files[ + cf_meta.levels[0].files.size() - 1].name); + ASSERT_OK(dbfull()->CompactFiles( + CompactionOptions(), handles_[1], + compaction_input_file_names, 0)); + + dbfull()->GetColumnFamilyMetaData(handles_[1], &cf_meta); + ASSERT_EQ(cf_meta.levels[0].files.size(), 1U); +} + +TEST_P(DBTestUniversalCompaction, UniversalCompactionTargetLevel) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.write_buffer_size = 100 << 10; // 100KB + options.num_levels = 7; + options.disable_auto_compactions = true; + DestroyAndReopen(options); + + // Generate 3 overlapping files + Random rnd(301); + for (int i = 0; i < 210; i++) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 100))); + } + ASSERT_OK(Flush()); + + for (int i = 200; i < 300; i++) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 100))); + } + ASSERT_OK(Flush()); + + for (int i = 250; i < 260; i++) { + ASSERT_OK(Put(Key(i), RandomString(&rnd, 100))); + } + ASSERT_OK(Flush()); + + ASSERT_EQ("3", FilesPerLevel(0)); + // Compact all files into 1 file and put it in L4 + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 4; + compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; + db_->CompactRange(compact_options, nullptr, nullptr); + ASSERT_EQ("0,0,0,0,1", FilesPerLevel(0)); +} + + +class DBTestUniversalCompactionMultiLevels + : public DBTestUniversalCompactionBase { + public: + DBTestUniversalCompactionMultiLevels() : + DBTestUniversalCompactionBase( + "/db_universal_compaction_multi_levels_test") {} +}; + +TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionMultiLevels) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = num_levels_; + options.write_buffer_size = 100 << 10; // 100KB + options.level0_file_num_compaction_trigger = 8; + options.max_background_compactions = 3; + options.target_file_size_base = 32 * 1024; + CreateAndReopenWithCF({"pikachu"}, options); + + // Trigger compaction if size amplification exceeds 110% + options.compaction_options_universal.max_size_amplification_percent = 110; + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + Random rnd(301); + int num_keys = 100000; + for (int i = 0; i < num_keys * 2; i++) { + ASSERT_OK(Put(1, Key(i % num_keys), Key(i))); + } + + dbfull()->TEST_WaitForCompact(); + + for (int i = num_keys; i < num_keys * 2; i++) { + ASSERT_EQ(Get(1, Key(i % num_keys)), Key(i)); + } +} +// Tests universal compaction with trivial move enabled +TEST_P(DBTestUniversalCompactionMultiLevels, UniversalCompactionTrivialMove) { + int32_t trivial_move = 0; + int32_t non_trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { + non_trivial_move++; + ASSERT_TRUE(arg != nullptr); + int output_level = *(static_cast(arg)); + ASSERT_EQ(output_level, 0); + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.compaction_options_universal.allow_trivial_move = true; + options.num_levels = 3; + options.write_buffer_size = 100 << 10; // 100KB + options.level0_file_num_compaction_trigger = 3; + options.max_background_compactions = 2; + options.target_file_size_base = 32 * 1024; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // Trigger compaction if size amplification exceeds 110% + options.compaction_options_universal.max_size_amplification_percent = 110; + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + Random rnd(301); + int num_keys = 150000; + for (int i = 0; i < num_keys; i++) { + ASSERT_OK(Put(1, Key(i), Key(i))); + } + std::vector values; + + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + + ASSERT_GT(trivial_move, 0); + ASSERT_GT(non_trivial_move, 0); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +INSTANTIATE_TEST_CASE_P(DBTestUniversalCompactionMultiLevels, + DBTestUniversalCompactionMultiLevels, + ::testing::Combine(::testing::Values(3, 20), + ::testing::Bool())); + +class DBTestUniversalCompactionParallel : + public DBTestUniversalCompactionBase { + public: + DBTestUniversalCompactionParallel() : + DBTestUniversalCompactionBase( + "/db_universal_compaction_prallel_test") {} +}; + +TEST_P(DBTestUniversalCompactionParallel, UniversalCompactionParallel) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = num_levels_; + options.write_buffer_size = 1 << 10; // 1KB + options.level0_file_num_compaction_trigger = 3; + options.max_background_compactions = 3; + options.max_background_flushes = 3; + options.target_file_size_base = 1 * 1024; + options.compaction_options_universal.max_size_amplification_percent = 110; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // Delay every compaction so multiple compactions will happen. + std::atomic num_compactions_running(0); + std::atomic has_parallel(false); + rocksdb::SyncPoint::GetInstance()->SetCallBack("CompactionJob::Run():Start", + [&](void* arg) { + if (num_compactions_running.fetch_add(1) > 0) { + has_parallel.store(true); + return; + } + for (int nwait = 0; nwait < 20000; nwait++) { + if (has_parallel.load() || num_compactions_running.load() > 1) { + has_parallel.store(true); + break; + } + env_->SleepForMicroseconds(1000); + } + }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "CompactionJob::Run():End", + [&](void* arg) { num_compactions_running.fetch_add(-1); }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + Random rnd(301); + int num_keys = 30000; + for (int i = 0; i < num_keys * 2; i++) { + ASSERT_OK(Put(1, Key(i % num_keys), Key(i))); + } + dbfull()->TEST_WaitForCompact(); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + ASSERT_EQ(num_compactions_running.load(), 0); + ASSERT_TRUE(has_parallel.load()); + + for (int i = num_keys; i < num_keys * 2; i++) { + ASSERT_EQ(Get(1, Key(i % num_keys)), Key(i)); + } + + // Reopen and check. + ReopenWithColumnFamilies({"default", "pikachu"}, options); + for (int i = num_keys; i < num_keys * 2; i++) { + ASSERT_EQ(Get(1, Key(i % num_keys)), Key(i)); + } +} + +INSTANTIATE_TEST_CASE_P(DBTestUniversalCompactionParallel, + DBTestUniversalCompactionParallel, + ::testing::Combine(::testing::Values(1, 10), + ::testing::Bool())); + +TEST_P(DBTestUniversalCompaction, UniversalCompactionOptions) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.write_buffer_size = 105 << 10; // 105KB + options.arena_block_size = 4 << 10; // 4KB + options.target_file_size_base = 32 << 10; // 32KB + options.level0_file_num_compaction_trigger = 4; + options.num_levels = num_levels_; + options.compaction_options_universal.compression_size_percent = -1; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + Random rnd(301); + int key_idx = 0; + + for (int num = 0; num < options.level0_file_num_compaction_trigger; num++) { + // Write 100KB (100 values, each 1K) + for (int i = 0; i < 100; i++) { + ASSERT_OK(Put(1, Key(key_idx), RandomString(&rnd, 990))); + key_idx++; + } + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + + if (num < options.level0_file_num_compaction_trigger - 1) { + ASSERT_EQ(NumSortedRuns(1), num + 1); + } + } + + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(NumSortedRuns(1), 1); +} + +TEST_P(DBTestUniversalCompaction, UniversalCompactionStopStyleSimilarSize) { + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.write_buffer_size = 105 << 10; // 105KB + options.arena_block_size = 4 << 10; // 4KB + options.target_file_size_base = 32 << 10; // 32KB + // trigger compaction if there are >= 4 files + options.level0_file_num_compaction_trigger = 4; + options.compaction_options_universal.size_ratio = 10; + options.compaction_options_universal.stop_style = + kCompactionStopStyleSimilarSize; + options.num_levels = num_levels_; + DestroyAndReopen(options); + + Random rnd(301); + int key_idx = 0; + + // Stage 1: + // Generate a set of files at level 0, but don't trigger level-0 + // compaction. + for (int num = 0; num < options.level0_file_num_compaction_trigger - 1; + num++) { + // Write 100KB (100 values, each 1K) + for (int i = 0; i < 100; i++) { + ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990))); + key_idx++; + } + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ(NumSortedRuns(), num + 1); + } + + // Generate one more file at level-0, which should trigger level-0 + // compaction. + for (int i = 0; i < 100; i++) { + ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990))); + key_idx++; + } + dbfull()->TEST_WaitForCompact(); + // Suppose each file flushed from mem table has size 1. Now we compact + // (level0_file_num_compaction_trigger+1)=4 files and should have a big + // file of size 4. + ASSERT_EQ(NumSortedRuns(), 1); + + // Stage 2: + // Now we have one file at level 0, with size 4. We also have some data in + // mem table. Let's continue generating new files at level 0, but don't + // trigger level-0 compaction. + // First, clean up memtable before inserting new data. This will generate + // a level-0 file, with size around 0.4 (according to previously written + // data amount). + dbfull()->Flush(FlushOptions()); + for (int num = 0; num < options.level0_file_num_compaction_trigger - 3; + num++) { + // Write 110KB (11 values, each 10K) + for (int i = 0; i < 100; i++) { + ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990))); + key_idx++; + } + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ(NumSortedRuns(), num + 3); + } + + // Generate one more file at level-0, which should trigger level-0 + // compaction. + for (int i = 0; i < 100; i++) { + ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990))); + key_idx++; + } + dbfull()->TEST_WaitForCompact(); + // Before compaction, we have 4 files at level 0, with size 4, 0.4, 1, 1. + // After compaction, we should have 3 files, with size 4, 0.4, 2. + ASSERT_EQ(NumSortedRuns(), 3); + // Stage 3: + // Now we have 3 files at level 0, with size 4, 0.4, 2. Generate one + // more file at level-0, which should trigger level-0 compaction. + for (int i = 0; i < 100; i++) { + ASSERT_OK(Put(Key(key_idx), RandomString(&rnd, 990))); + key_idx++; + } + dbfull()->TEST_WaitForCompact(); + // Level-0 compaction is triggered, but no file will be picked up. + ASSERT_EQ(NumSortedRuns(), 4); +} + +TEST_P(DBTestUniversalCompaction, UniversalCompactionCompressRatio1) { + if (!Snappy_Supported()) { + return; + } + + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.write_buffer_size = 100 << 10; // 100KB + options.target_file_size_base = 32 << 10; // 32KB + options.level0_file_num_compaction_trigger = 2; + options.num_levels = num_levels_; + options.compaction_options_universal.compression_size_percent = 70; + DestroyAndReopen(options); + + Random rnd(301); + int key_idx = 0; + + // The first compaction (2) is compressed. + for (int num = 0; num < 2; num++) { + // Write 110KB (11 values, each 10K) + for (int i = 0; i < 11; i++) { + ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000))); + key_idx++; + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_LT(TotalSize(), 110000U * 2 * 0.9); + + // The second compaction (4) is compressed + for (int num = 0; num < 2; num++) { + // Write 110KB (11 values, each 10K) + for (int i = 0; i < 11; i++) { + ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000))); + key_idx++; + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_LT(TotalSize(), 110000 * 4 * 0.9); + + // The third compaction (2 4) is compressed since this time it is + // (1 1 3.2) and 3.2/5.2 doesn't reach ratio. + for (int num = 0; num < 2; num++) { + // Write 110KB (11 values, each 10K) + for (int i = 0; i < 11; i++) { + ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000))); + key_idx++; + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_LT(TotalSize(), 110000 * 6 * 0.9); + + // When we start for the compaction up to (2 4 8), the latest + // compressed is not compressed. + for (int num = 0; num < 8; num++) { + // Write 110KB (11 values, each 10K) + for (int i = 0; i < 11; i++) { + ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000))); + key_idx++; + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_GT(TotalSize(), 110000 * 11 * 0.8 + 110000 * 2); +} + +TEST_P(DBTestUniversalCompaction, UniversalCompactionCompressRatio2) { + if (!Snappy_Supported()) { + return; + } + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.write_buffer_size = 100 << 10; // 100KB + options.target_file_size_base = 32 << 10; // 32KB + options.level0_file_num_compaction_trigger = 2; + options.num_levels = num_levels_; + options.compaction_options_universal.compression_size_percent = 95; + DestroyAndReopen(options); + + Random rnd(301); + int key_idx = 0; + + // When we start for the compaction up to (2 4 8), the latest + // compressed is compressed given the size ratio to compress. + for (int num = 0; num < 14; num++) { + // Write 120KB (12 values, each 10K) + for (int i = 0; i < 12; i++) { + ASSERT_OK(Put(Key(key_idx), CompressibleString(&rnd, 10000))); + key_idx++; + } + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_LT(TotalSize(), 120000U * 12 * 0.8 + 120000 * 2); +} + +// Test that checks trivial move in universal compaction +TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest1) { + int32_t trivial_move = 0; + int32_t non_trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", [&](void* arg) { + non_trivial_move++; + ASSERT_TRUE(arg != nullptr); + int output_level = *(static_cast(arg)); + ASSERT_EQ(output_level, 0); + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.compaction_options_universal.allow_trivial_move = true; + options.num_levels = 2; + options.write_buffer_size = 100 << 10; // 100KB + options.level0_file_num_compaction_trigger = 3; + options.max_background_compactions = 1; + options.target_file_size_base = 32 * 1024; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // Trigger compaction if size amplification exceeds 110% + options.compaction_options_universal.max_size_amplification_percent = 110; + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + Random rnd(301); + int num_keys = 250000; + for (int i = 0; i < num_keys; i++) { + ASSERT_OK(Put(1, Key(i), Key(i))); + } + std::vector values; + + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + + ASSERT_GT(trivial_move, 0); + ASSERT_GT(non_trivial_move, 0); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} +// Test that checks trivial move in universal compaction +TEST_P(DBTestUniversalCompaction, UniversalCompactionTrivialMoveTest2) { + int32_t trivial_move = 0; + int32_t non_trivial_move = 0; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:TrivialMove", + [&](void* arg) { trivial_move++; }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BackgroundCompaction:NonTrivial", + [&](void* arg) { non_trivial_move++; }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.compaction_options_universal.allow_trivial_move = true; + options.num_levels = 15; + options.write_buffer_size = 100 << 10; // 100KB + options.level0_file_num_compaction_trigger = 8; + options.max_background_compactions = 4; + options.target_file_size_base = 64 * 1024; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + // Trigger compaction if size amplification exceeds 110% + options.compaction_options_universal.max_size_amplification_percent = 110; + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + Random rnd(301); + int num_keys = 500000; + for (int i = 0; i < num_keys; i++) { + ASSERT_OK(Put(1, Key(i), Key(i))); + } + std::vector values; + + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + + ASSERT_GT(trivial_move, 0); + ASSERT_EQ(non_trivial_move, 0); + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_P(DBTestUniversalCompaction, UniversalCompactionFourPaths) { + Options options = CurrentOptions(); + options.db_paths.emplace_back(dbname_, 300 * 1024); + options.db_paths.emplace_back(dbname_ + "_2", 300 * 1024); + options.db_paths.emplace_back(dbname_ + "_3", 500 * 1024); + options.db_paths.emplace_back(dbname_ + "_4", 1024 * 1024 * 1024); + options.memtable_factory.reset( + new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1)); + options.compaction_style = kCompactionStyleUniversal; + options.compaction_options_universal.size_ratio = 5; + options.write_buffer_size = 111 << 10; // 114KB + options.arena_block_size = 4 << 10; + options.level0_file_num_compaction_trigger = 2; + options.num_levels = 1; + + std::vector filenames; + env_->GetChildren(options.db_paths[1].path, &filenames); + // Delete archival files. + for (size_t i = 0; i < filenames.size(); ++i) { + env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]); + } + env_->DeleteDir(options.db_paths[1].path); + Reopen(options); + + Random rnd(301); + int key_idx = 0; + + // First three 110KB files are not going to second path. + // After that, (100K, 200K) + for (int num = 0; num < 3; num++) { + GenerateNewFile(&rnd, &key_idx); + } + + // Another 110KB triggers a compaction to 400K file to second path + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); + + // (1, 4) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1,1,4) -> (2, 4) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + // (1, 2, 4) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 1, 2, 4) -> (8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); + + // (1, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 1, 8) -> (2, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + + // (1, 2, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 1, 2, 8) -> (4, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); + + // (1, 4, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[3].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[2].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + for (int i = 0; i < key_idx; i++) { + auto v = Get(Key(i)); + ASSERT_NE(v, "NOT_FOUND"); + ASSERT_TRUE(v.size() == 1 || v.size() == 990); + } + + Reopen(options); + + for (int i = 0; i < key_idx; i++) { + auto v = Get(Key(i)); + ASSERT_NE(v, "NOT_FOUND"); + ASSERT_TRUE(v.size() == 1 || v.size() == 990); + } + + Destroy(options); +} + +TEST_P(DBTestUniversalCompaction, IncreaseUniversalCompactionNumLevels) { + std::function verify_func = [&](int num_keys_in_db) { + std::string keys_in_db; + Iterator* iter = dbfull()->NewIterator(ReadOptions(), handles_[1]); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + keys_in_db.append(iter->key().ToString()); + keys_in_db.push_back(','); + } + delete iter; + + std::string expected_keys; + for (int i = 0; i <= num_keys_in_db; i++) { + expected_keys.append(Key(i)); + expected_keys.push_back(','); + } + + ASSERT_EQ(keys_in_db, expected_keys); + }; + + Random rnd(301); + int max_key1 = 200; + int max_key2 = 600; + int max_key3 = 800; + const int KNumKeysPerFile = 10; + + // Stage 1: open a DB with universal compaction, num_levels=1 + Options options = CurrentOptions(); + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = 1; + options.write_buffer_size = 200 << 10; // 200KB + options.level0_file_num_compaction_trigger = 3; + options.memtable_factory.reset(new SpecialSkipListFactory(KNumKeysPerFile)); + options = CurrentOptions(options); + CreateAndReopenWithCF({"pikachu"}, options); + + for (int i = 0; i <= max_key1; i++) { + // each value is 10K + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + + // Stage 2: reopen with universal compaction, num_levels=4 + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = 4; + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + verify_func(max_key1); + + // Insert more keys + for (int i = max_key1 + 1; i <= max_key2; i++) { + // each value is 10K + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + + verify_func(max_key2); + // Compaction to non-L0 has happened. + ASSERT_GT(NumTableFilesAtLevel(options.num_levels - 1, 1), 0); + + // Stage 3: Revert it back to one level and revert to num_levels=1. + options.num_levels = 4; + options.target_file_size_base = INT_MAX; + ReopenWithColumnFamilies({"default", "pikachu"}, options); + // Compact all to level 0 + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 0; + compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; + dbfull()->CompactRange(compact_options, handles_[1], nullptr, nullptr); + // Need to restart it once to remove higher level records in manifest. + ReopenWithColumnFamilies({"default", "pikachu"}, options); + // Final reopen + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = 1; + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + + // Insert more keys + for (int i = max_key2 + 1; i <= max_key3; i++) { + // each value is 10K + ASSERT_OK(Put(1, Key(i), RandomString(&rnd, 10000))); + dbfull()->TEST_WaitForFlushMemTable(handles_[1]); + dbfull()->TEST_WaitForCompact(); + } + ASSERT_OK(Flush(1)); + dbfull()->TEST_WaitForCompact(); + verify_func(max_key3); +} + + +TEST_P(DBTestUniversalCompaction, UniversalCompactionSecondPathRatio) { + if (!Snappy_Supported()) { + return; + } + Options options = CurrentOptions(); + options.db_paths.emplace_back(dbname_, 500 * 1024); + options.db_paths.emplace_back(dbname_ + "_2", 1024 * 1024 * 1024); + options.compaction_style = kCompactionStyleUniversal; + options.compaction_options_universal.size_ratio = 5; + options.write_buffer_size = 111 << 10; // 114KB + options.arena_block_size = 4 << 10; + options.level0_file_num_compaction_trigger = 2; + options.num_levels = 1; + options.memtable_factory.reset( + new SpecialSkipListFactory(KNumKeysByGenerateNewFile - 1)); + + std::vector filenames; + env_->GetChildren(options.db_paths[1].path, &filenames); + // Delete archival files. + for (size_t i = 0; i < filenames.size(); ++i) { + env_->DeleteFile(options.db_paths[1].path + "/" + filenames[i]); + } + env_->DeleteDir(options.db_paths[1].path); + Reopen(options); + + Random rnd(301); + int key_idx = 0; + + // First three 110KB files are not going to second path. + // After that, (100K, 200K) + for (int num = 0; num < 3; num++) { + GenerateNewFile(&rnd, &key_idx); + } + + // Another 110KB triggers a compaction to 400K file to second path + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + + // (1, 4) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1,1,4) -> (2, 4) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 2, 4) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(2, GetSstFileCount(dbname_)); + + // (1, 1, 2, 4) -> (8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + // (1, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 1, 8) -> (2, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + // (1, 2, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(2, GetSstFileCount(dbname_)); + + // (1, 1, 2, 8) -> (4, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(0, GetSstFileCount(dbname_)); + + // (1, 4, 8) + GenerateNewFile(&rnd, &key_idx); + ASSERT_EQ(2, GetSstFileCount(options.db_paths[1].path)); + ASSERT_EQ(1, GetSstFileCount(dbname_)); + + for (int i = 0; i < key_idx; i++) { + auto v = Get(Key(i)); + ASSERT_NE(v, "NOT_FOUND"); + ASSERT_TRUE(v.size() == 1 || v.size() == 990); + } + + Reopen(options); + + for (int i = 0; i < key_idx; i++) { + auto v = Get(Key(i)); + ASSERT_NE(v, "NOT_FOUND"); + ASSERT_TRUE(v.size() == 1 || v.size() == 990); + } + + Destroy(options); +} + +INSTANTIATE_TEST_CASE_P(UniversalCompactionNumLevels, DBTestUniversalCompaction, + ::testing::Combine(::testing::Values(1, 3, 5), + ::testing::Bool())); + +class DBTestUniversalManualCompactionOutputPathId + : public DBTestUniversalCompactionBase { + public: + DBTestUniversalManualCompactionOutputPathId() : + DBTestUniversalCompactionBase( + "/db_universal_compaction_manual_pid_test") {} +}; + +TEST_P(DBTestUniversalManualCompactionOutputPathId, + ManualCompactionOutputPathId) { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.db_paths.emplace_back(dbname_, 1000000000); + options.db_paths.emplace_back(dbname_ + "_2", 1000000000); + options.compaction_style = kCompactionStyleUniversal; + options.num_levels = num_levels_; + options.target_file_size_base = 1 << 30; // Big size + options.level0_file_num_compaction_trigger = 10; + Destroy(options); + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + MakeTables(3, "p", "q", 1); + dbfull()->TEST_WaitForCompact(); + ASSERT_EQ(2, TotalLiveFiles(1)); + ASSERT_EQ(2, GetSstFileCount(options.db_paths[0].path)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path)); + + // Full compaction to DB path 0 + CompactRangeOptions compact_options; + compact_options.target_path_id = 1; + compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr); + ASSERT_EQ(1, TotalLiveFiles(1)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + + ReopenWithColumnFamilies({kDefaultColumnFamilyName, "pikachu"}, options); + ASSERT_EQ(1, TotalLiveFiles(1)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[0].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + + MakeTables(1, "p", "q", 1); + ASSERT_EQ(2, TotalLiveFiles(1)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + + ReopenWithColumnFamilies({kDefaultColumnFamilyName, "pikachu"}, options); + ASSERT_EQ(2, TotalLiveFiles(1)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[1].path)); + + // Full compaction to DB path 0 + compact_options.target_path_id = 0; + compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; + db_->CompactRange(compact_options, handles_[1], nullptr, nullptr); + ASSERT_EQ(1, TotalLiveFiles(1)); + ASSERT_EQ(1, GetSstFileCount(options.db_paths[0].path)); + ASSERT_EQ(0, GetSstFileCount(options.db_paths[1].path)); + + // Fail when compacting to an invalid path ID + compact_options.target_path_id = 2; + compact_options.exclusive_manual_compaction = exclusive_manual_compaction_; + ASSERT_TRUE(db_->CompactRange(compact_options, handles_[1], nullptr, nullptr) + .IsInvalidArgument()); +} + +INSTANTIATE_TEST_CASE_P(DBTestUniversalManualCompactionOutputPathId, + DBTestUniversalManualCompactionOutputPathId, + ::testing::Combine(::testing::Values(1, 8), + ::testing::Bool())); + +} // namespace rocksdb + +#endif // !defined(ROCKSDB_LITE) + +int main(int argc, char** argv) { +#if !defined(ROCKSDB_LITE) + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +#else + return 0; +#endif +} diff --git a/external/rocksdb/db/db_wal_test.cc b/external/rocksdb/db/db_wal_test.cc new file mode 100755 index 0000000000..145bd25fb9 --- /dev/null +++ b/external/rocksdb/db/db_wal_test.cc @@ -0,0 +1,1016 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/db_test_util.h" +#include "port/stack_trace.h" +#include "util/options_helper.h" +#include "util/sync_point.h" + +#include + +namespace rocksdb { +class DBWALTest : public DBTestBase { + public: + DBWALTest() : DBTestBase("/db_wal_test") {} +}; + +TEST_F(DBWALTest, WAL) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + WriteOptions writeOpt = WriteOptions(); + writeOpt.disableWAL = true; + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v1")); + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v1")); + + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("v1", Get(1, "bar")); + + writeOpt.disableWAL = false; + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v2")); + writeOpt.disableWAL = true; + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v2")); + + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + // Both value's should be present. + ASSERT_EQ("v2", Get(1, "bar")); + ASSERT_EQ("v2", Get(1, "foo")); + + writeOpt.disableWAL = true; + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "bar", "v3")); + writeOpt.disableWAL = false; + ASSERT_OK(dbfull()->Put(writeOpt, handles_[1], "foo", "v3")); + + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + // again both values should be present. + ASSERT_EQ("v3", Get(1, "foo")); + ASSERT_EQ("v3", Get(1, "bar")); + } while (ChangeCompactOptions()); +} + +TEST_F(DBWALTest, RollLog) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_OK(Put(1, "baz", "v5")); + + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + for (int i = 0; i < 10; i++) { + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + } + ASSERT_OK(Put(1, "foo", "v4")); + for (int i = 0; i < 10; i++) { + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + } + } while (ChangeOptions()); +} + +TEST_F(DBWALTest, SyncWALNotBlockWrite) { + Options options = CurrentOptions(); + options.max_write_buffer_number = 4; + DestroyAndReopen(options); + + ASSERT_OK(Put("foo1", "bar1")); + ASSERT_OK(Put("foo5", "bar5")); + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"WritableFileWriter::SyncWithoutFlush:1", + "DBWALTest::SyncWALNotBlockWrite:1"}, + {"DBWALTest::SyncWALNotBlockWrite:2", + "WritableFileWriter::SyncWithoutFlush:2"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + std::thread thread([&]() { ASSERT_OK(db_->SyncWAL()); }); + + TEST_SYNC_POINT("DBWALTest::SyncWALNotBlockWrite:1"); + ASSERT_OK(Put("foo2", "bar2")); + ASSERT_OK(Put("foo3", "bar3")); + FlushOptions fo; + fo.wait = false; + ASSERT_OK(db_->Flush(fo)); + ASSERT_OK(Put("foo4", "bar4")); + + TEST_SYNC_POINT("DBWALTest::SyncWALNotBlockWrite:2"); + + thread.join(); + + ASSERT_EQ(Get("foo1"), "bar1"); + ASSERT_EQ(Get("foo2"), "bar2"); + ASSERT_EQ(Get("foo3"), "bar3"); + ASSERT_EQ(Get("foo4"), "bar4"); + ASSERT_EQ(Get("foo5"), "bar5"); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBWALTest, SyncWALNotWaitWrite) { + ASSERT_OK(Put("foo1", "bar1")); + ASSERT_OK(Put("foo3", "bar3")); + + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"SpecialEnv::WalFile::Append:1", "DBWALTest::SyncWALNotWaitWrite:1"}, + {"DBWALTest::SyncWALNotWaitWrite:2", "SpecialEnv::WalFile::Append:2"}, + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + std::thread thread([&]() { ASSERT_OK(Put("foo2", "bar2")); }); + TEST_SYNC_POINT("DBWALTest::SyncWALNotWaitWrite:1"); + ASSERT_OK(db_->SyncWAL()); + TEST_SYNC_POINT("DBWALTest::SyncWALNotWaitWrite:2"); + + thread.join(); + + ASSERT_EQ(Get("foo1"), "bar1"); + ASSERT_EQ(Get("foo2"), "bar2"); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DBWALTest, Recover) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_OK(Put(1, "baz", "v5")); + + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_EQ("v1", Get(1, "foo")); + + ASSERT_EQ("v1", Get(1, "foo")); + ASSERT_EQ("v5", Get(1, "baz")); + ASSERT_OK(Put(1, "bar", "v2")); + ASSERT_OK(Put(1, "foo", "v3")); + + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_EQ("v3", Get(1, "foo")); + ASSERT_OK(Put(1, "foo", "v4")); + ASSERT_EQ("v4", Get(1, "foo")); + ASSERT_EQ("v2", Get(1, "bar")); + ASSERT_EQ("v5", Get(1, "baz")); + } while (ChangeOptions()); +} + +TEST_F(DBWALTest, RecoverWithTableHandle) { + do { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.disable_auto_compactions = true; + options.avoid_flush_during_recovery = false; + DestroyAndReopen(options); + CreateAndReopenWithCF({"pikachu"}, options); + + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_OK(Put(1, "bar", "v2")); + ASSERT_OK(Flush(1)); + ASSERT_OK(Put(1, "foo", "v3")); + ASSERT_OK(Put(1, "bar", "v4")); + ASSERT_OK(Flush(1)); + ASSERT_OK(Put(1, "big", std::string(100, 'a'))); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + + std::vector> files; + dbfull()->TEST_GetFilesMetaData(handles_[1], &files); + size_t total_files = 0; + for (const auto& level : files) { + total_files += level.size(); + } + ASSERT_EQ(total_files, 3); + for (const auto& level : files) { + for (const auto& file : level) { + if (kInfiniteMaxOpenFiles == option_config_) { + ASSERT_TRUE(file.table_reader_handle != nullptr); + } else { + ASSERT_TRUE(file.table_reader_handle == nullptr); + } + } + } + } while (ChangeOptions()); +} + +TEST_F(DBWALTest, IgnoreRecoveredLog) { + std::string backup_logs = dbname_ + "/backup_logs"; + + // delete old files in backup_logs directory + env_->CreateDirIfMissing(backup_logs); + std::vector old_files; + env_->GetChildren(backup_logs, &old_files); + for (auto& file : old_files) { + if (file != "." && file != "..") { + env_->DeleteFile(backup_logs + "/" + file); + } + } + + do { + Options options = CurrentOptions(); + options.create_if_missing = true; + options.merge_operator = MergeOperators::CreateUInt64AddOperator(); + options.wal_dir = dbname_ + "/logs"; + DestroyAndReopen(options); + + // fill up the DB + std::string one, two; + PutFixed64(&one, 1); + PutFixed64(&two, 2); + ASSERT_OK(db_->Merge(WriteOptions(), Slice("foo"), Slice(one))); + ASSERT_OK(db_->Merge(WriteOptions(), Slice("foo"), Slice(one))); + ASSERT_OK(db_->Merge(WriteOptions(), Slice("bar"), Slice(one))); + + // copy the logs to backup + std::vector logs; + env_->GetChildren(options.wal_dir, &logs); + for (auto& log : logs) { + if (log != ".." && log != ".") { + CopyFile(options.wal_dir + "/" + log, backup_logs + "/" + log); + } + } + + // recover the DB + Reopen(options); + ASSERT_EQ(two, Get("foo")); + ASSERT_EQ(one, Get("bar")); + Close(); + + // copy the logs from backup back to wal dir + for (auto& log : logs) { + if (log != ".." && log != ".") { + CopyFile(backup_logs + "/" + log, options.wal_dir + "/" + log); + } + } + // this should ignore the log files, recovery should not happen again + // if the recovery happens, the same merge operator would be called twice, + // leading to incorrect results + Reopen(options); + ASSERT_EQ(two, Get("foo")); + ASSERT_EQ(one, Get("bar")); + Close(); + Destroy(options); + Reopen(options); + Close(); + + // copy the logs from backup back to wal dir + env_->CreateDirIfMissing(options.wal_dir); + for (auto& log : logs) { + if (log != ".." && log != ".") { + CopyFile(backup_logs + "/" + log, options.wal_dir + "/" + log); + } + } + // assert that we successfully recovered only from logs, even though we + // destroyed the DB + Reopen(options); + ASSERT_EQ(two, Get("foo")); + ASSERT_EQ(one, Get("bar")); + + // Recovery will fail if DB directory doesn't exist. + Destroy(options); + // copy the logs from backup back to wal dir + env_->CreateDirIfMissing(options.wal_dir); + for (auto& log : logs) { + if (log != ".." && log != ".") { + CopyFile(backup_logs + "/" + log, options.wal_dir + "/" + log); + // we won't be needing this file no more + env_->DeleteFile(backup_logs + "/" + log); + } + } + Status s = TryReopen(options); + ASSERT_TRUE(!s.ok()); + } while (ChangeOptions(kSkipHashCuckoo)); +} + +TEST_F(DBWALTest, RecoveryWithEmptyLog) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_OK(Put(1, "foo", "v2")); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v3")); + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + ASSERT_EQ("v3", Get(1, "foo")); + } while (ChangeOptions()); +} + +#ifndef ROCKSDB_LITE +TEST_F(DBWALTest, GetSortedWalFiles) { + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + VectorLogPtr log_files; + ASSERT_OK(dbfull()->GetSortedWalFiles(log_files)); + ASSERT_EQ(0, log_files.size()); + + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_OK(dbfull()->GetSortedWalFiles(log_files)); + ASSERT_EQ(1, log_files.size()); + } while (ChangeOptions()); +} + +TEST_F(DBWALTest, RecoveryWithLogDataForSomeCFs) { + // Test for regression of WAL cleanup missing files that don't contain data + // for every column family. + do { + CreateAndReopenWithCF({"pikachu"}, CurrentOptions()); + ASSERT_OK(Put(1, "foo", "v1")); + ASSERT_OK(Put(1, "foo", "v2")); + std::array earliest_log_nums; + for (int i = 0; i < 2; ++i) { + if (i > 0) { + ReopenWithColumnFamilies({"default", "pikachu"}, CurrentOptions()); + } + VectorLogPtr log_files; + ASSERT_OK(dbfull()->GetSortedWalFiles(log_files)); + if (log_files.size() > 0) { + earliest_log_nums[i] = log_files[0]->LogNumber(); + } else { + earliest_log_nums[i] = port::kMaxUint64; + } + } + // Check at least the first WAL was cleaned up during the recovery. + ASSERT_LT(earliest_log_nums[0], earliest_log_nums[1]); + } while (ChangeOptions()); +} + +TEST_F(DBWALTest, RecoverWithLargeLog) { + do { + { + Options options = CurrentOptions(); + CreateAndReopenWithCF({"pikachu"}, options); + ASSERT_OK(Put(1, "big1", std::string(200000, '1'))); + ASSERT_OK(Put(1, "big2", std::string(200000, '2'))); + ASSERT_OK(Put(1, "small3", std::string(10, '3'))); + ASSERT_OK(Put(1, "small4", std::string(10, '4'))); + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 0); + } + + // Make sure that if we re-open with a small write buffer size that + // we flush table files in the middle of a large log file. + Options options; + options.write_buffer_size = 100000; + options = CurrentOptions(options); + ReopenWithColumnFamilies({"default", "pikachu"}, options); + ASSERT_EQ(NumTableFilesAtLevel(0, 1), 3); + ASSERT_EQ(std::string(200000, '1'), Get(1, "big1")); + ASSERT_EQ(std::string(200000, '2'), Get(1, "big2")); + ASSERT_EQ(std::string(10, '3'), Get(1, "small3")); + ASSERT_EQ(std::string(10, '4'), Get(1, "small4")); + ASSERT_GT(NumTableFilesAtLevel(0, 1), 1); + } while (ChangeCompactOptions()); +} + +// In https://reviews.facebook.net/D20661 we change +// recovery behavior: previously for each log file each column family +// memtable was flushed, even it was empty. Now it's changed: +// we try to create the smallest number of table files by merging +// updates from multiple logs +TEST_F(DBWALTest, RecoverCheckFileAmountWithSmallWriteBuffer) { + Options options = CurrentOptions(); + options.write_buffer_size = 5000000; + CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options); + + // Since we will reopen DB with smaller write_buffer_size, + // each key will go to new SST file + ASSERT_OK(Put(1, Key(10), DummyString(1000000))); + ASSERT_OK(Put(1, Key(10), DummyString(1000000))); + ASSERT_OK(Put(1, Key(10), DummyString(1000000))); + ASSERT_OK(Put(1, Key(10), DummyString(1000000))); + + ASSERT_OK(Put(3, Key(10), DummyString(1))); + // Make 'dobrynia' to be flushed and new WAL file to be created + ASSERT_OK(Put(2, Key(10), DummyString(7500000))); + ASSERT_OK(Put(2, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[2]); + { + auto tables = ListTableFiles(env_, dbname_); + ASSERT_EQ(tables.size(), static_cast(1)); + // Make sure 'dobrynia' was flushed: check sst files amount + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), + static_cast(1)); + } + // New WAL file + ASSERT_OK(Put(1, Key(1), DummyString(1))); + ASSERT_OK(Put(1, Key(1), DummyString(1))); + ASSERT_OK(Put(3, Key(10), DummyString(1))); + ASSERT_OK(Put(3, Key(10), DummyString(1))); + ASSERT_OK(Put(3, Key(10), DummyString(1))); + + options.write_buffer_size = 4096; + options.arena_block_size = 4096; + ReopenWithColumnFamilies({"default", "pikachu", "dobrynia", "nikitich"}, + options); + { + // No inserts => default is empty + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(0)); + // First 4 keys goes to separate SSTs + 1 more SST for 2 smaller keys + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"), + static_cast(5)); + // 1 SST for big key + 1 SST for small one + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), + static_cast(2)); + // 1 SST for all keys + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), + static_cast(1)); + } +} + +// In https://reviews.facebook.net/D20661 we change +// recovery behavior: previously for each log file each column family +// memtable was flushed, even it wasn't empty. Now it's changed: +// we try to create the smallest number of table files by merging +// updates from multiple logs +TEST_F(DBWALTest, RecoverCheckFileAmount) { + Options options = CurrentOptions(); + options.write_buffer_size = 100000; + options.arena_block_size = 4 * 1024; + options.avoid_flush_during_recovery = false; + CreateAndReopenWithCF({"pikachu", "dobrynia", "nikitich"}, options); + + ASSERT_OK(Put(0, Key(1), DummyString(1))); + ASSERT_OK(Put(1, Key(1), DummyString(1))); + ASSERT_OK(Put(2, Key(1), DummyString(1))); + + // Make 'nikitich' memtable to be flushed + ASSERT_OK(Put(3, Key(10), DummyString(1002400))); + ASSERT_OK(Put(3, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[3]); + // 4 memtable are not flushed, 1 sst file + { + auto tables = ListTableFiles(env_, dbname_); + ASSERT_EQ(tables.size(), static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), + static_cast(1)); + } + // Memtable for 'nikitich' has flushed, new WAL file has opened + // 4 memtable still not flushed + + // Write to new WAL file + ASSERT_OK(Put(0, Key(1), DummyString(1))); + ASSERT_OK(Put(1, Key(1), DummyString(1))); + ASSERT_OK(Put(2, Key(1), DummyString(1))); + + // Fill up 'nikitich' one more time + ASSERT_OK(Put(3, Key(10), DummyString(1002400))); + // make it flush + ASSERT_OK(Put(3, Key(1), DummyString(1))); + dbfull()->TEST_WaitForFlushMemTable(handles_[3]); + // There are still 4 memtable not flushed, and 2 sst tables + ASSERT_OK(Put(0, Key(1), DummyString(1))); + ASSERT_OK(Put(1, Key(1), DummyString(1))); + ASSERT_OK(Put(2, Key(1), DummyString(1))); + + { + auto tables = ListTableFiles(env_, dbname_); + ASSERT_EQ(tables.size(), static_cast(2)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), + static_cast(2)); + } + + ReopenWithColumnFamilies({"default", "pikachu", "dobrynia", "nikitich"}, + options); + { + std::vector table_files = ListTableFiles(env_, dbname_); + // Check, that records for 'default', 'dobrynia' and 'pikachu' from + // first, second and third WALs went to the same SST. + // So, there is 6 SSTs: three for 'nikitich', one for 'default', one for + // 'dobrynia', one for 'pikachu' + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "default"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "nikitich"), + static_cast(3)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "dobrynia"), + static_cast(1)); + ASSERT_EQ(GetNumberOfSstFilesForColumnFamily(db_, "pikachu"), + static_cast(1)); + } +} + +TEST_F(DBWALTest, SyncMultipleLogs) { + const uint64_t kNumBatches = 2; + const int kBatchSize = 1000; + + Options options = CurrentOptions(); + options.create_if_missing = true; + options.write_buffer_size = 4096; + Reopen(options); + + WriteBatch batch; + WriteOptions wo; + wo.sync = true; + + for (uint64_t b = 0; b < kNumBatches; b++) { + batch.Clear(); + for (int i = 0; i < kBatchSize; i++) { + batch.Put(Key(i), DummyString(128)); + } + + dbfull()->Write(wo, &batch); + } + + ASSERT_OK(dbfull()->SyncWAL()); +} + +// +// Test WAL recovery for the various modes available +// +class RecoveryTestHelper { + public: + // Number of WAL files to generate + static const int kWALFilesCount = 10; + // Starting number for the WAL file name like 00010.log + static const int kWALFileOffset = 10; + // Keys to be written per WAL file + static const int kKeysPerWALFile = 1024; + // Size of the value + static const int kValueSize = 10; + + // Create WAL files with values filled in + static void FillData(DBWALTest* test, const Options& options, + const size_t wal_count, size_t* count) { + const DBOptions& db_options = options; + + *count = 0; + + shared_ptr table_cache = NewLRUCache(50000, 16); + EnvOptions env_options; + WriteBufferManager write_buffer_manager(db_options.db_write_buffer_size); + + unique_ptr versions; + unique_ptr wal_manager; + WriteController write_controller; + + versions.reset(new VersionSet(test->dbname_, &db_options, env_options, + table_cache.get(), &write_buffer_manager, + &write_controller)); + + wal_manager.reset(new WalManager(db_options, env_options)); + + std::unique_ptr current_log_writer; + + for (size_t j = kWALFileOffset; j < wal_count + kWALFileOffset; j++) { + uint64_t current_log_number = j; + std::string fname = LogFileName(test->dbname_, current_log_number); + unique_ptr file; + ASSERT_OK(db_options.env->NewWritableFile(fname, &file, env_options)); + unique_ptr file_writer( + new WritableFileWriter(std::move(file), env_options)); + current_log_writer.reset( + new log::Writer(std::move(file_writer), current_log_number, + db_options.recycle_log_file_num > 0)); + + for (int i = 0; i < kKeysPerWALFile; i++) { + std::string key = "key" + ToString((*count)++); + std::string value = test->DummyString(kValueSize); + assert(current_log_writer.get() != nullptr); + uint64_t seq = versions->LastSequence() + 1; + WriteBatch batch; + batch.Put(key, value); + WriteBatchInternal::SetSequence(&batch, seq); + current_log_writer->AddRecord(WriteBatchInternal::Contents(&batch)); + versions->SetLastSequence(seq); + } + } + } + + // Recreate and fill the store with some data + static size_t FillData(DBWALTest* test, Options* options) { + options->create_if_missing = true; + test->DestroyAndReopen(*options); + test->Close(); + + size_t count = 0; + FillData(test, *options, kWALFilesCount, &count); + return count; + } + + // Read back all the keys we wrote and return the number of keys found + static size_t GetData(DBWALTest* test) { + size_t count = 0; + for (size_t i = 0; i < kWALFilesCount * kKeysPerWALFile; i++) { + if (test->Get("key" + ToString(i)) != "NOT_FOUND") { + ++count; + } + } + return count; + } + + // Manuall corrupt the specified WAL + static void CorruptWAL(DBWALTest* test, const Options& options, + const double off, const double len, + const int wal_file_id, const bool trunc = false) { + Env* env = options.env; + std::string fname = LogFileName(test->dbname_, wal_file_id); + uint64_t size; + ASSERT_OK(env->GetFileSize(fname, &size)); + ASSERT_GT(size, 0); +#ifdef OS_WIN + // Windows disk cache behaves differently. When we truncate + // the original content is still in the cache due to the original + // handle is still open. Generally, in Windows, one prohibits + // shared access to files and it is not needed for WAL but we allow + // it to induce corruption at various tests. + test->Close(); +#endif + if (trunc) { + ASSERT_EQ(0, truncate(fname.c_str(), static_cast(size * off))); + } else { + InduceCorruption(fname, static_cast(size * off), + static_cast(size * len)); + } + } + + // Overwrite data with 'a' from offset for length len + static void InduceCorruption(const std::string& filename, size_t offset, + size_t len) { + ASSERT_GT(len, 0U); + + int fd = open(filename.c_str(), O_RDWR); + + // On windows long is 32-bit + ASSERT_LE(offset, std::numeric_limits::max()); + + ASSERT_GT(fd, 0); + ASSERT_EQ(offset, lseek(fd, static_cast(offset), SEEK_SET)); + + void* buf = alloca(len); + memset(buf, 'a', len); + ASSERT_EQ(len, write(fd, buf, static_cast(len))); + + close(fd); + } +}; + +// Test scope: +// - We expect to open the data store when there is incomplete trailing writes +// at the end of any of the logs +// - We do not expect to open the data store for corruption +TEST_F(DBWALTest, kTolerateCorruptedTailRecords) { + const int jstart = RecoveryTestHelper::kWALFileOffset; + const int jend = jstart + RecoveryTestHelper::kWALFilesCount; + + for (auto trunc : {true, false}) { /* Corruption style */ + for (int i = 0; i < 4; i++) { /* Corruption offset position */ + for (int j = jstart; j < jend; j++) { /* WAL file */ + // Fill data for testing + Options options = CurrentOptions(); + const size_t row_count = RecoveryTestHelper::FillData(this, &options); + // test checksum failure or parsing + RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3, + /*len%=*/.1, /*wal=*/j, trunc); + + if (trunc) { + options.wal_recovery_mode = + WALRecoveryMode::kTolerateCorruptedTailRecords; + options.create_if_missing = false; + ASSERT_OK(TryReopen(options)); + const size_t recovered_row_count = RecoveryTestHelper::GetData(this); + ASSERT_TRUE(i == 0 || recovered_row_count > 0); + ASSERT_LT(recovered_row_count, row_count); + } else { + options.wal_recovery_mode = + WALRecoveryMode::kTolerateCorruptedTailRecords; + ASSERT_NOK(TryReopen(options)); + } + } + } + } +} + +// Test scope: +// We don't expect the data store to be opened if there is any corruption +// (leading, middle or trailing -- incomplete writes or corruption) +TEST_F(DBWALTest, kAbsoluteConsistency) { + const int jstart = RecoveryTestHelper::kWALFileOffset; + const int jend = jstart + RecoveryTestHelper::kWALFilesCount; + + // Verify clean slate behavior + Options options = CurrentOptions(); + const size_t row_count = RecoveryTestHelper::FillData(this, &options); + options.wal_recovery_mode = WALRecoveryMode::kAbsoluteConsistency; + options.create_if_missing = false; + ASSERT_OK(TryReopen(options)); + ASSERT_EQ(RecoveryTestHelper::GetData(this), row_count); + + for (auto trunc : {true, false}) { /* Corruption style */ + for (int i = 0; i < 4; i++) { /* Corruption offset position */ + if (trunc && i == 0) { + continue; + } + + for (int j = jstart; j < jend; j++) { /* wal files */ + // fill with new date + RecoveryTestHelper::FillData(this, &options); + // corrupt the wal + RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3, + /*len%=*/.1, j, trunc); + // verify + options.wal_recovery_mode = WALRecoveryMode::kAbsoluteConsistency; + options.create_if_missing = false; + ASSERT_NOK(TryReopen(options)); + } + } + } +} + +// Test scope: +// - We expect to open data store under all circumstances +// - We expect only data upto the point where the first error was encountered +TEST_F(DBWALTest, kPointInTimeRecovery) { + const int jstart = RecoveryTestHelper::kWALFileOffset; + const int jend = jstart + RecoveryTestHelper::kWALFilesCount; + const int maxkeys = + RecoveryTestHelper::kWALFilesCount * RecoveryTestHelper::kKeysPerWALFile; + + for (auto trunc : {true, false}) { /* Corruption style */ + for (int i = 0; i < 4; i++) { /* Offset of corruption */ + for (int j = jstart; j < jend; j++) { /* WAL file */ + // Fill data for testing + Options options = CurrentOptions(); + const size_t row_count = RecoveryTestHelper::FillData(this, &options); + + // Corrupt the wal + RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3, + /*len%=*/.1, j, trunc); + + // Verify + options.wal_recovery_mode = WALRecoveryMode::kPointInTimeRecovery; + options.create_if_missing = false; + ASSERT_OK(TryReopen(options)); + + // Probe data for invariants + size_t recovered_row_count = RecoveryTestHelper::GetData(this); + ASSERT_LT(recovered_row_count, row_count); + + bool expect_data = true; + for (size_t k = 0; k < maxkeys; ++k) { + bool found = Get("key" + ToString(i)) != "NOT_FOUND"; + if (expect_data && !found) { + expect_data = false; + } + ASSERT_EQ(found, expect_data); + } + + const size_t min = RecoveryTestHelper::kKeysPerWALFile * + (j - RecoveryTestHelper::kWALFileOffset); + ASSERT_GE(recovered_row_count, min); + if (!trunc && i != 0) { + const size_t max = RecoveryTestHelper::kKeysPerWALFile * + (j - RecoveryTestHelper::kWALFileOffset + 1); + ASSERT_LE(recovered_row_count, max); + } + } + } + } +} + +// Test scope: +// - We expect to open the data store under all scenarios +// - We expect to have recovered records past the corruption zone +TEST_F(DBWALTest, kSkipAnyCorruptedRecords) { + const int jstart = RecoveryTestHelper::kWALFileOffset; + const int jend = jstart + RecoveryTestHelper::kWALFilesCount; + + for (auto trunc : {true, false}) { /* Corruption style */ + for (int i = 0; i < 4; i++) { /* Corruption offset */ + for (int j = jstart; j < jend; j++) { /* wal files */ + // Fill data for testing + Options options = CurrentOptions(); + const size_t row_count = RecoveryTestHelper::FillData(this, &options); + + // Corrupt the WAL + RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3, + /*len%=*/.1, j, trunc); + + // Verify behavior + options.wal_recovery_mode = WALRecoveryMode::kSkipAnyCorruptedRecords; + options.create_if_missing = false; + ASSERT_OK(TryReopen(options)); + + // Probe data for invariants + size_t recovered_row_count = RecoveryTestHelper::GetData(this); + ASSERT_LT(recovered_row_count, row_count); + + if (!trunc) { + ASSERT_TRUE(i != 0 || recovered_row_count > 0); + } + } + } + } +} + +TEST_F(DBWALTest, AvoidFlushDuringRecovery) { + Options options = CurrentOptions(); + options.disable_auto_compactions = true; + options.avoid_flush_during_recovery = false; + + // Test with flush after recovery. + Reopen(options); + ASSERT_OK(Put("foo", "v1")); + ASSERT_OK(Put("bar", "v2")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("foo", "v3")); + ASSERT_OK(Put("bar", "v4")); + ASSERT_EQ(1, TotalTableFiles()); + // Reopen DB. Check if WAL logs flushed. + Reopen(options); + ASSERT_EQ("v3", Get("foo")); + ASSERT_EQ("v4", Get("bar")); + ASSERT_EQ(2, TotalTableFiles()); + + // Test without flush after recovery. + options.avoid_flush_during_recovery = true; + DestroyAndReopen(options); + ASSERT_OK(Put("foo", "v5")); + ASSERT_OK(Put("bar", "v6")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("foo", "v7")); + ASSERT_OK(Put("bar", "v8")); + ASSERT_EQ(1, TotalTableFiles()); + // Reopen DB. WAL logs should not be flushed this time. + Reopen(options); + ASSERT_EQ("v7", Get("foo")); + ASSERT_EQ("v8", Get("bar")); + ASSERT_EQ(1, TotalTableFiles()); + + // Force flush with allow_2pc. + options.avoid_flush_during_recovery = true; + options.allow_2pc = true; + ASSERT_OK(Put("foo", "v9")); + ASSERT_OK(Put("bar", "v10")); + ASSERT_OK(Flush()); + ASSERT_OK(Put("foo", "v11")); + ASSERT_OK(Put("bar", "v12")); + Reopen(options); + ASSERT_EQ("v11", Get("foo")); + ASSERT_EQ("v12", Get("bar")); + ASSERT_EQ(2, TotalTableFiles()); +} + +TEST_F(DBWALTest, RecoverWithoutFlush) { + Options options = CurrentOptions(); + options.avoid_flush_during_recovery = true; + options.create_if_missing = false; + options.disable_auto_compactions = true; + options.write_buffer_size = 64 * 1024 * 1024; + + size_t count = RecoveryTestHelper::FillData(this, &options); + auto validateData = [this, count]() { + for (size_t i = 0; i < count; i++) { + ASSERT_NE(Get("key" + ToString(i)), "NOT_FOUND"); + } + }; + Reopen(options); + validateData(); + // Insert some data without flush + ASSERT_OK(Put("foo", "foo_v1")); + ASSERT_OK(Put("bar", "bar_v1")); + Reopen(options); + validateData(); + ASSERT_EQ(Get("foo"), "foo_v1"); + ASSERT_EQ(Get("bar"), "bar_v1"); + // Insert again and reopen + ASSERT_OK(Put("foo", "foo_v2")); + ASSERT_OK(Put("bar", "bar_v2")); + Reopen(options); + validateData(); + ASSERT_EQ(Get("foo"), "foo_v2"); + ASSERT_EQ(Get("bar"), "bar_v2"); + // manual flush and insert again + Flush(); + ASSERT_EQ(Get("foo"), "foo_v2"); + ASSERT_EQ(Get("bar"), "bar_v2"); + ASSERT_OK(Put("foo", "foo_v3")); + ASSERT_OK(Put("bar", "bar_v3")); + Reopen(options); + validateData(); + ASSERT_EQ(Get("foo"), "foo_v3"); + ASSERT_EQ(Get("bar"), "bar_v3"); +} + +TEST_F(DBWALTest, RecoverWithoutFlushMultipleCF) { + const std::string kSmallValue = "v"; + const std::string kLargeValue = DummyString(1024); + Options options = CurrentOptions(); + options.avoid_flush_during_recovery = true; + options.create_if_missing = false; + options.disable_auto_compactions = true; + + auto countWalFiles = [this]() { + VectorLogPtr log_files; + dbfull()->GetSortedWalFiles(log_files); + return log_files.size(); + }; + + // Create DB with multiple column families and multiple log files. + CreateAndReopenWithCF({"one", "two"}, options); + ASSERT_OK(Put(0, "key1", kSmallValue)); + ASSERT_OK(Put(1, "key2", kLargeValue)); + Flush(1); + ASSERT_EQ(1, countWalFiles()); + ASSERT_OK(Put(0, "key3", kSmallValue)); + ASSERT_OK(Put(2, "key4", kLargeValue)); + Flush(2); + ASSERT_EQ(2, countWalFiles()); + + // Reopen, insert and flush. + options.db_write_buffer_size = 64 * 1024 * 1024; + ReopenWithColumnFamilies({"default", "one", "two"}, options); + ASSERT_EQ(Get(0, "key1"), kSmallValue); + ASSERT_EQ(Get(1, "key2"), kLargeValue); + ASSERT_EQ(Get(0, "key3"), kSmallValue); + ASSERT_EQ(Get(2, "key4"), kLargeValue); + // Insert more data. + ASSERT_OK(Put(0, "key5", kLargeValue)); + ASSERT_OK(Put(1, "key6", kLargeValue)); + ASSERT_EQ(3, countWalFiles()); + Flush(1); + ASSERT_OK(Put(2, "key7", kLargeValue)); + ASSERT_EQ(4, countWalFiles()); + + // Reopen twice and validate. + for (int i = 0; i < 2; i++) { + ReopenWithColumnFamilies({"default", "one", "two"}, options); + ASSERT_EQ(Get(0, "key1"), kSmallValue); + ASSERT_EQ(Get(1, "key2"), kLargeValue); + ASSERT_EQ(Get(0, "key3"), kSmallValue); + ASSERT_EQ(Get(2, "key4"), kLargeValue); + ASSERT_EQ(Get(0, "key5"), kLargeValue); + ASSERT_EQ(Get(1, "key6"), kLargeValue); + ASSERT_EQ(Get(2, "key7"), kLargeValue); + ASSERT_EQ(4, countWalFiles()); + } +} + +// In this test we are trying to do the following: +// 1. Create a DB with corrupted WAL log; +// 2. Open with avoid_flush_during_recovery = true; +// 3. Append more data without flushing, which creates new WAL log. +// 4. Open again. See if it can correctly handle previous corruption. +TEST_F(DBWALTest, RecoverFromCorruptedWALWithoutFlush) { + const int jstart = RecoveryTestHelper::kWALFileOffset; + const int jend = jstart + RecoveryTestHelper::kWALFilesCount; + const int kAppendKeys = 100; + Options options = CurrentOptions(); + options.avoid_flush_during_recovery = true; + options.create_if_missing = false; + options.disable_auto_compactions = true; + options.write_buffer_size = 64 * 1024 * 1024; + + auto getAll = [this]() { + std::vector> data; + ReadOptions ropt; + Iterator* iter = dbfull()->NewIterator(ropt); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + data.push_back( + std::make_pair(iter->key().ToString(), iter->value().ToString())); + } + delete iter; + return data; + }; + for (auto& mode : wal_recovery_mode_string_map) { + options.wal_recovery_mode = mode.second; + for (auto trunc : {true, false}) { + for (int i = 0; i < 4; i++) { + for (int j = jstart; j < jend; j++) { + // Create corrupted WAL + RecoveryTestHelper::FillData(this, &options); + RecoveryTestHelper::CorruptWAL(this, options, /*off=*/i * .3, + /*len%=*/.1, /*wal=*/j, trunc); + // Skip the test if DB won't open. + if (!TryReopen(options).ok()) { + ASSERT_TRUE(options.wal_recovery_mode == + WALRecoveryMode::kAbsoluteConsistency || + (!trunc && + options.wal_recovery_mode == + WALRecoveryMode::kTolerateCorruptedTailRecords)); + continue; + } + ASSERT_OK(TryReopen(options)); + // Append some more data. + for (int k = 0; k < kAppendKeys; k++) { + std::string key = "extra_key" + ToString(k); + std::string value = DummyString(RecoveryTestHelper::kValueSize); + ASSERT_OK(Put(key, value)); + } + // Save data for comparison. + auto data = getAll(); + // Reopen. Verify data. + ASSERT_OK(TryReopen(options)); + auto actual_data = getAll(); + ASSERT_EQ(data, actual_data); + } + } + } + } +} + +#endif // ROCKSDB_LITE + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/dbformat.cc b/external/rocksdb/db/dbformat.cc new file mode 100755 index 0000000000..d840aea86b --- /dev/null +++ b/external/rocksdb/db/dbformat.cc @@ -0,0 +1,162 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "db/dbformat.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include "port/port.h" +#include "util/coding.h" +#include "util/perf_context_imp.h" + +namespace rocksdb { + +uint64_t PackSequenceAndType(uint64_t seq, ValueType t) { + assert(seq <= kMaxSequenceNumber); + assert(IsValueType(t)); + return (seq << 8) | t; +} + +void UnPackSequenceAndType(uint64_t packed, uint64_t* seq, ValueType* t) { + *seq = packed >> 8; + *t = static_cast(packed & 0xff); + + assert(*seq <= kMaxSequenceNumber); + assert(IsValueType(*t)); +} + +void AppendInternalKey(std::string* result, const ParsedInternalKey& key) { + result->append(key.user_key.data(), key.user_key.size()); + PutFixed64(result, PackSequenceAndType(key.sequence, key.type)); +} + +std::string ParsedInternalKey::DebugString(bool hex) const { + char buf[50]; + snprintf(buf, sizeof(buf), "' @ %" PRIu64 ": %d", sequence, + static_cast(type)); + std::string result = "'"; + result += user_key.ToString(hex); + result += buf; + return result; +} + +std::string InternalKey::DebugString(bool hex) const { + std::string result; + ParsedInternalKey parsed; + if (ParseInternalKey(rep_, &parsed)) { + result = parsed.DebugString(hex); + } else { + result = "(bad)"; + result.append(EscapeString(rep_)); + } + return result; +} + +const char* InternalKeyComparator::Name() const { + return name_.c_str(); +} + +int InternalKeyComparator::Compare(const Slice& akey, const Slice& bkey) const { + // Order by: + // increasing user key (according to user-supplied comparator) + // decreasing sequence number + // decreasing type (though sequence# should be enough to disambiguate) + int r = user_comparator_->Compare(ExtractUserKey(akey), ExtractUserKey(bkey)); + PERF_COUNTER_ADD(user_key_comparison_count, 1); + if (r == 0) { + const uint64_t anum = DecodeFixed64(akey.data() + akey.size() - 8); + const uint64_t bnum = DecodeFixed64(bkey.data() + bkey.size() - 8); + if (anum > bnum) { + r = -1; + } else if (anum < bnum) { + r = +1; + } + } + return r; +} + +int InternalKeyComparator::Compare(const ParsedInternalKey& a, + const ParsedInternalKey& b) const { + // Order by: + // increasing user key (according to user-supplied comparator) + // decreasing sequence number + // decreasing type (though sequence# should be enough to disambiguate) + int r = user_comparator_->Compare(a.user_key, b.user_key); + PERF_COUNTER_ADD(user_key_comparison_count, 1); + if (r == 0) { + if (a.sequence > b.sequence) { + r = -1; + } else if (a.sequence < b.sequence) { + r = +1; + } else if (a.type > b.type) { + r = -1; + } else if (a.type < b.type) { + r = +1; + } + } + return r; +} + +void InternalKeyComparator::FindShortestSeparator( + std::string* start, + const Slice& limit) const { + // Attempt to shorten the user portion of the key + Slice user_start = ExtractUserKey(*start); + Slice user_limit = ExtractUserKey(limit); + std::string tmp(user_start.data(), user_start.size()); + user_comparator_->FindShortestSeparator(&tmp, user_limit); + if (tmp.size() < user_start.size() && + user_comparator_->Compare(user_start, tmp) < 0) { + // User key has become shorter physically, but larger logically. + // Tack on the earliest possible number to the shortened user key. + PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek)); + assert(this->Compare(*start, tmp) < 0); + assert(this->Compare(tmp, limit) < 0); + start->swap(tmp); + } +} + +void InternalKeyComparator::FindShortSuccessor(std::string* key) const { + Slice user_key = ExtractUserKey(*key); + std::string tmp(user_key.data(), user_key.size()); + user_comparator_->FindShortSuccessor(&tmp); + if (tmp.size() < user_key.size() && + user_comparator_->Compare(user_key, tmp) < 0) { + // User key has become shorter physically, but larger logically. + // Tack on the earliest possible number to the shortened user key. + PutFixed64(&tmp, PackSequenceAndType(kMaxSequenceNumber,kValueTypeForSeek)); + assert(this->Compare(*key, tmp) < 0); + key->swap(tmp); + } +} + +LookupKey::LookupKey(const Slice& _user_key, SequenceNumber s) { + size_t usize = _user_key.size(); + size_t needed = usize + 13; // A conservative estimate + char* dst; + if (needed <= sizeof(space_)) { + dst = space_; + } else { + dst = new char[needed]; + } + start_ = dst; + // NOTE: We don't support users keys of more than 2GB :) + dst = EncodeVarint32(dst, static_cast(usize + 8)); + kstart_ = dst; + memcpy(dst, _user_key.data(), usize); + dst += usize; + EncodeFixed64(dst, PackSequenceAndType(s, kValueTypeForSeek)); + dst += 8; + end_ = dst; +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/dbformat.h b/external/rocksdb/db/dbformat.h new file mode 100755 index 0000000000..5bd154a7a9 --- /dev/null +++ b/external/rocksdb/db/dbformat.h @@ -0,0 +1,491 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include +#include "rocksdb/comparator.h" +#include "rocksdb/db.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/table.h" +#include "rocksdb/types.h" +#include "util/coding.h" +#include "util/logging.h" + +namespace rocksdb { + +class InternalKey; + +// Value types encoded as the last component of internal keys. +// DO NOT CHANGE THESE ENUM VALUES: they are embedded in the on-disk +// data structures. +// The highest bit of the value type needs to be reserved to SST tables +// for them to do more flexible encoding. +enum ValueType : unsigned char { + kTypeDeletion = 0x0, + kTypeValue = 0x1, + kTypeMerge = 0x2, + kTypeLogData = 0x3, // WAL only. + kTypeColumnFamilyDeletion = 0x4, // WAL only. + kTypeColumnFamilyValue = 0x5, // WAL only. + kTypeColumnFamilyMerge = 0x6, // WAL only. + kTypeSingleDeletion = 0x7, + kTypeColumnFamilySingleDeletion = 0x8, // WAL only. + kTypeBeginPrepareXID = 0x9, // WAL only. + kTypeEndPrepareXID = 0xA, // WAL only. + kTypeCommitXID = 0xB, // WAL only. + kTypeRollbackXID = 0xC, // WAL only. + kTypeNoop = 0xD, // WAL only. + kMaxValue = 0x7F // Not used for storing records. +}; + +// kValueTypeForSeek defines the ValueType that should be passed when +// constructing a ParsedInternalKey object for seeking to a particular +// sequence number (since we sort sequence numbers in decreasing order +// and the value type is embedded as the low 8 bits in the sequence +// number in internal keys, we need to use the highest-numbered +// ValueType, not the lowest). +static const ValueType kValueTypeForSeek = kTypeSingleDeletion; + +// Checks whether a type is a value type (i.e. a type used in memtables and sst +// files). +inline bool IsValueType(ValueType t) { + return t <= kTypeMerge || t == kTypeSingleDeletion; +} + +// We leave eight bits empty at the bottom so a type and sequence# +// can be packed together into 64-bits. +static const SequenceNumber kMaxSequenceNumber = + ((0x1ull << 56) - 1); + +struct ParsedInternalKey { + Slice user_key; + SequenceNumber sequence; + ValueType type; + + ParsedInternalKey() { } // Intentionally left uninitialized (for speed) + ParsedInternalKey(const Slice& u, const SequenceNumber& seq, ValueType t) + : user_key(u), sequence(seq), type(t) { } + std::string DebugString(bool hex = false) const; +}; + +// Return the length of the encoding of "key". +inline size_t InternalKeyEncodingLength(const ParsedInternalKey& key) { + return key.user_key.size() + 8; +} + +// Pack a sequence number and a ValueType into a uint64_t +extern uint64_t PackSequenceAndType(uint64_t seq, ValueType t); + +// Given the result of PackSequenceAndType, store the sequence number in *seq +// and the ValueType in *t. +extern void UnPackSequenceAndType(uint64_t packed, uint64_t* seq, ValueType* t); + +// Append the serialization of "key" to *result. +extern void AppendInternalKey(std::string* result, + const ParsedInternalKey& key); + +// Attempt to parse an internal key from "internal_key". On success, +// stores the parsed data in "*result", and returns true. +// +// On error, returns false, leaves "*result" in an undefined state. +extern bool ParseInternalKey(const Slice& internal_key, + ParsedInternalKey* result); + +// Returns the user key portion of an internal key. +inline Slice ExtractUserKey(const Slice& internal_key) { + assert(internal_key.size() >= 8); + return Slice(internal_key.data(), internal_key.size() - 8); +} + +inline ValueType ExtractValueType(const Slice& internal_key) { + assert(internal_key.size() >= 8); + const size_t n = internal_key.size(); + uint64_t num = DecodeFixed64(internal_key.data() + n - 8); + unsigned char c = num & 0xff; + return static_cast(c); +} + +// A comparator for internal keys that uses a specified comparator for +// the user key portion and breaks ties by decreasing sequence number. +class InternalKeyComparator : public Comparator { + private: + const Comparator* user_comparator_; + std::string name_; + public: + explicit InternalKeyComparator(const Comparator* c) : user_comparator_(c), + name_("rocksdb.InternalKeyComparator:" + + std::string(user_comparator_->Name())) { + } + virtual ~InternalKeyComparator() {} + + virtual const char* Name() const override; + virtual int Compare(const Slice& a, const Slice& b) const override; + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override; + virtual void FindShortSuccessor(std::string* key) const override; + + const Comparator* user_comparator() const { return user_comparator_; } + + int Compare(const InternalKey& a, const InternalKey& b) const; + int Compare(const ParsedInternalKey& a, const ParsedInternalKey& b) const; +}; + +// Modules in this directory should keep internal keys wrapped inside +// the following class instead of plain strings so that we do not +// incorrectly use string comparisons instead of an InternalKeyComparator. +class InternalKey { + private: + std::string rep_; + public: + InternalKey() { } // Leave rep_ as empty to indicate it is invalid + InternalKey(const Slice& _user_key, SequenceNumber s, ValueType t) { + AppendInternalKey(&rep_, ParsedInternalKey(_user_key, s, t)); + } + + // sets the internal key to be bigger or equal to all internal keys with this + // user key + void SetMaxPossibleForUserKey(const Slice& _user_key) { + AppendInternalKey(&rep_, ParsedInternalKey(_user_key, kMaxSequenceNumber, + kValueTypeForSeek)); + } + + // sets the internal key to be smaller or equal to all internal keys with this + // user key + void SetMinPossibleForUserKey(const Slice& _user_key) { + AppendInternalKey( + &rep_, ParsedInternalKey(_user_key, 0, static_cast(0))); + } + + bool Valid() const { + ParsedInternalKey parsed; + return ParseInternalKey(Slice(rep_), &parsed); + } + + void DecodeFrom(const Slice& s) { rep_.assign(s.data(), s.size()); } + Slice Encode() const { + assert(!rep_.empty()); + return rep_; + } + + Slice user_key() const { return ExtractUserKey(rep_); } + size_t size() { return rep_.size(); } + + void Set(const Slice& _user_key, SequenceNumber s, ValueType t) { + SetFrom(ParsedInternalKey(_user_key, s, t)); + } + + void SetFrom(const ParsedInternalKey& p) { + rep_.clear(); + AppendInternalKey(&rep_, p); + } + + void Clear() { rep_.clear(); } + + std::string DebugString(bool hex = false) const; +}; + +inline int InternalKeyComparator::Compare( + const InternalKey& a, const InternalKey& b) const { + return Compare(a.Encode(), b.Encode()); +} + +inline bool ParseInternalKey(const Slice& internal_key, + ParsedInternalKey* result) { + const size_t n = internal_key.size(); + if (n < 8) return false; + uint64_t num = DecodeFixed64(internal_key.data() + n - 8); + unsigned char c = num & 0xff; + result->sequence = num >> 8; + result->type = static_cast(c); + assert(result->type <= ValueType::kMaxValue); + result->user_key = Slice(internal_key.data(), n - 8); + return IsValueType(result->type); +} + +// Update the sequence number in the internal key. +// Guarantees not to invalidate ikey.data(). +inline void UpdateInternalKey(std::string* ikey, uint64_t seq, ValueType t) { + size_t ikey_sz = ikey->size(); + assert(ikey_sz >= 8); + uint64_t newval = (seq << 8) | t; + + // Note: Since C++11, strings are guaranteed to be stored contiguously and + // string::operator[]() is guaranteed not to change ikey.data(). + EncodeFixed64(&(*ikey)[ikey_sz - 8], newval); +} + +// Get the sequence number from the internal key +inline uint64_t GetInternalKeySeqno(const Slice& internal_key) { + const size_t n = internal_key.size(); + assert(n >= 8); + uint64_t num = DecodeFixed64(internal_key.data() + n - 8); + return num >> 8; +} + + +// A helper class useful for DBImpl::Get() +class LookupKey { + public: + // Initialize *this for looking up user_key at a snapshot with + // the specified sequence number. + LookupKey(const Slice& _user_key, SequenceNumber sequence); + + ~LookupKey(); + + // Return a key suitable for lookup in a MemTable. + Slice memtable_key() const { + return Slice(start_, static_cast(end_ - start_)); + } + + // Return an internal key (suitable for passing to an internal iterator) + Slice internal_key() const { + return Slice(kstart_, static_cast(end_ - kstart_)); + } + + // Return the user key + Slice user_key() const { + return Slice(kstart_, static_cast(end_ - kstart_ - 8)); + } + + private: + // We construct a char array of the form: + // klength varint32 <-- start_ + // userkey char[klength] <-- kstart_ + // tag uint64 + // <-- end_ + // The array is a suitable MemTable key. + // The suffix starting with "userkey" can be used as an InternalKey. + const char* start_; + const char* kstart_; + const char* end_; + char space_[200]; // Avoid allocation for short keys + + // No copying allowed + LookupKey(const LookupKey&); + void operator=(const LookupKey&); +}; + +inline LookupKey::~LookupKey() { + if (start_ != space_) delete[] start_; +} + +class IterKey { + public: + IterKey() + : buf_(space_), buf_size_(sizeof(space_)), key_(buf_), key_size_(0) {} + + ~IterKey() { ResetBuffer(); } + + Slice GetKey() const { return Slice(key_, key_size_); } + + Slice GetUserKey() const { + assert(key_size_ >= 8); + return Slice(key_, key_size_ - 8); + } + + size_t Size() const { return key_size_; } + + void Clear() { key_size_ = 0; } + + // Append "non_shared_data" to its back, from "shared_len" + // This function is used in Block::Iter::ParseNextKey + // shared_len: bytes in [0, shard_len-1] would be remained + // non_shared_data: data to be append, its length must be >= non_shared_len + void TrimAppend(const size_t shared_len, const char* non_shared_data, + const size_t non_shared_len) { + assert(shared_len <= key_size_); + size_t total_size = shared_len + non_shared_len; + + if (IsKeyPinned() /* key is not in buf_ */) { + // Copy the key from external memory to buf_ (copy shared_len bytes) + EnlargeBufferIfNeeded(total_size); + memcpy(buf_, key_, shared_len); + } else if (total_size > buf_size_) { + // Need to allocate space, delete previous space + char* p = new char[total_size]; + memcpy(p, key_, shared_len); + + if (buf_ != space_) { + delete[] buf_; + } + + buf_ = p; + buf_size_ = total_size; + } + + memcpy(buf_ + shared_len, non_shared_data, non_shared_len); + key_ = buf_; + key_size_ = total_size; + } + + Slice SetKey(const Slice& key, bool copy = true) { + size_t size = key.size(); + if (copy) { + // Copy key to buf_ + EnlargeBufferIfNeeded(size); + memcpy(buf_, key.data(), size); + key_ = buf_; + } else { + // Update key_ to point to external memory + key_ = key.data(); + } + key_size_ = size; + return Slice(key_, key_size_); + } + + // Copies the content of key, updates the reference to the user key in ikey + // and returns a Slice referencing the new copy. + Slice SetKey(const Slice& key, ParsedInternalKey* ikey) { + size_t key_n = key.size(); + assert(key_n >= 8); + SetKey(key); + ikey->user_key = Slice(key_, key_n - 8); + return Slice(key_, key_n); + } + + // Update the sequence number in the internal key. Guarantees not to + // invalidate slices to the key (and the user key). + void UpdateInternalKey(uint64_t seq, ValueType t) { + assert(!IsKeyPinned()); + assert(key_size_ >= 8); + uint64_t newval = (seq << 8) | t; + EncodeFixed64(&buf_[key_size_ - 8], newval); + } + + bool IsKeyPinned() const { return (key_ != buf_); } + + void SetInternalKey(const Slice& key_prefix, const Slice& user_key, + SequenceNumber s, + ValueType value_type = kValueTypeForSeek) { + size_t psize = key_prefix.size(); + size_t usize = user_key.size(); + EnlargeBufferIfNeeded(psize + usize + sizeof(uint64_t)); + if (psize > 0) { + memcpy(buf_, key_prefix.data(), psize); + } + memcpy(buf_ + psize, user_key.data(), usize); + EncodeFixed64(buf_ + usize + psize, PackSequenceAndType(s, value_type)); + + key_ = buf_; + key_size_ = psize + usize + sizeof(uint64_t); + } + + void SetInternalKey(const Slice& user_key, SequenceNumber s, + ValueType value_type = kValueTypeForSeek) { + SetInternalKey(Slice(), user_key, s, value_type); + } + + void Reserve(size_t size) { + EnlargeBufferIfNeeded(size); + key_size_ = size; + } + + void SetInternalKey(const ParsedInternalKey& parsed_key) { + SetInternalKey(Slice(), parsed_key); + } + + void SetInternalKey(const Slice& key_prefix, + const ParsedInternalKey& parsed_key_suffix) { + SetInternalKey(key_prefix, parsed_key_suffix.user_key, + parsed_key_suffix.sequence, parsed_key_suffix.type); + } + + void EncodeLengthPrefixedKey(const Slice& key) { + auto size = key.size(); + EnlargeBufferIfNeeded(size + static_cast(VarintLength(size))); + char* ptr = EncodeVarint32(buf_, static_cast(size)); + memcpy(ptr, key.data(), size); + key_ = buf_; + } + + private: + char* buf_; + size_t buf_size_; + const char* key_; + size_t key_size_; + char space_[32]; // Avoid allocation for short keys + + void ResetBuffer() { + if (buf_ != space_) { + delete[] buf_; + buf_ = space_; + } + buf_size_ = sizeof(space_); + key_size_ = 0; + } + + // Enlarge the buffer size if needed based on key_size. + // By default, static allocated buffer is used. Once there is a key + // larger than the static allocated buffer, another buffer is dynamically + // allocated, until a larger key buffer is requested. In that case, we + // reallocate buffer and delete the old one. + void EnlargeBufferIfNeeded(size_t key_size) { + // If size is smaller than buffer size, continue using current buffer, + // or the static allocated one, as default + if (key_size > buf_size_) { + // Need to enlarge the buffer. + ResetBuffer(); + buf_ = new char[key_size]; + buf_size_ = key_size; + } + } + + // No copying allowed + IterKey(const IterKey&) = delete; + void operator=(const IterKey&) = delete; +}; + +class InternalKeySliceTransform : public SliceTransform { + public: + explicit InternalKeySliceTransform(const SliceTransform* transform) + : transform_(transform) {} + + virtual const char* Name() const override { return transform_->Name(); } + + virtual Slice Transform(const Slice& src) const override { + auto user_key = ExtractUserKey(src); + return transform_->Transform(user_key); + } + + virtual bool InDomain(const Slice& src) const override { + auto user_key = ExtractUserKey(src); + return transform_->InDomain(user_key); + } + + virtual bool InRange(const Slice& dst) const override { + auto user_key = ExtractUserKey(dst); + return transform_->InRange(user_key); + } + + const SliceTransform* user_prefix_extractor() const { return transform_; } + + private: + // Like comparator, InternalKeySliceTransform will not take care of the + // deletion of transform_ + const SliceTransform* const transform_; +}; + +// Read the key of a record from a write batch. +// if this record represent the default column family then cf_record +// must be passed as false, otherwise it must be passed as true. +extern bool ReadKeyFromWriteBatchEntry(Slice* input, Slice* key, + bool cf_record); + +// Read record from a write batch piece from input. +// tag, column_family, key, value and blob are return values. Callers own the +// Slice they point to. +// Tag is defined as ValueType. +// input will be advanced to after the record. +extern Status ReadRecordFromWriteBatch(Slice* input, char* tag, + uint32_t* column_family, Slice* key, + Slice* value, Slice* blob, Slice* xid); +} // namespace rocksdb diff --git a/external/rocksdb/db/dbformat_test.cc b/external/rocksdb/db/dbformat_test.cc new file mode 100755 index 0000000000..b431690cc9 --- /dev/null +++ b/external/rocksdb/db/dbformat_test.cc @@ -0,0 +1,200 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/dbformat.h" +#include "util/logging.h" +#include "util/testharness.h" + +namespace rocksdb { + +static std::string IKey(const std::string& user_key, + uint64_t seq, + ValueType vt) { + std::string encoded; + AppendInternalKey(&encoded, ParsedInternalKey(user_key, seq, vt)); + return encoded; +} + +static std::string Shorten(const std::string& s, const std::string& l) { + std::string result = s; + InternalKeyComparator(BytewiseComparator()).FindShortestSeparator(&result, l); + return result; +} + +static std::string ShortSuccessor(const std::string& s) { + std::string result = s; + InternalKeyComparator(BytewiseComparator()).FindShortSuccessor(&result); + return result; +} + +static void TestKey(const std::string& key, + uint64_t seq, + ValueType vt) { + std::string encoded = IKey(key, seq, vt); + + Slice in(encoded); + ParsedInternalKey decoded("", 0, kTypeValue); + + ASSERT_TRUE(ParseInternalKey(in, &decoded)); + ASSERT_EQ(key, decoded.user_key.ToString()); + ASSERT_EQ(seq, decoded.sequence); + ASSERT_EQ(vt, decoded.type); + + ASSERT_TRUE(!ParseInternalKey(Slice("bar"), &decoded)); +} + +class FormatTest : public testing::Test {}; + +TEST_F(FormatTest, InternalKey_EncodeDecode) { + const char* keys[] = { "", "k", "hello", "longggggggggggggggggggggg" }; + const uint64_t seq[] = { + 1, 2, 3, + (1ull << 8) - 1, 1ull << 8, (1ull << 8) + 1, + (1ull << 16) - 1, 1ull << 16, (1ull << 16) + 1, + (1ull << 32) - 1, 1ull << 32, (1ull << 32) + 1 + }; + for (unsigned int k = 0; k < sizeof(keys) / sizeof(keys[0]); k++) { + for (unsigned int s = 0; s < sizeof(seq) / sizeof(seq[0]); s++) { + TestKey(keys[k], seq[s], kTypeValue); + TestKey("hello", 1, kTypeDeletion); + } + } +} + +TEST_F(FormatTest, InternalKeyShortSeparator) { + // When user keys are same + ASSERT_EQ(IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), + IKey("foo", 99, kTypeValue))); + ASSERT_EQ(IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), + IKey("foo", 101, kTypeValue))); + ASSERT_EQ(IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), + IKey("foo", 100, kTypeValue))); + ASSERT_EQ(IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), + IKey("foo", 100, kTypeDeletion))); + + // When user keys are misordered + ASSERT_EQ(IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), + IKey("bar", 99, kTypeValue))); + + // When user keys are different, but correctly ordered + ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek), + Shorten(IKey("foo", 100, kTypeValue), + IKey("hello", 200, kTypeValue))); + + ASSERT_EQ(IKey("ABC2", kMaxSequenceNumber, kValueTypeForSeek), + Shorten(IKey("ABC1AAAAA", 100, kTypeValue), + IKey("ABC2ABB", 200, kTypeValue))); + + ASSERT_EQ(IKey("AAA2", kMaxSequenceNumber, kValueTypeForSeek), + Shorten(IKey("AAA1AAA", 100, kTypeValue), + IKey("AAA2AA", 200, kTypeValue))); + + ASSERT_EQ( + IKey("AAA2", kMaxSequenceNumber, kValueTypeForSeek), + Shorten(IKey("AAA1AAA", 100, kTypeValue), IKey("AAA4", 200, kTypeValue))); + + ASSERT_EQ( + IKey("AAA1B", kMaxSequenceNumber, kValueTypeForSeek), + Shorten(IKey("AAA1AAA", 100, kTypeValue), IKey("AAA2", 200, kTypeValue))); + + ASSERT_EQ(IKey("AAA2", kMaxSequenceNumber, kValueTypeForSeek), + Shorten(IKey("AAA1AAA", 100, kTypeValue), + IKey("AAA2A", 200, kTypeValue))); + + ASSERT_EQ( + IKey("AAA1", 100, kTypeValue), + Shorten(IKey("AAA1", 100, kTypeValue), IKey("AAA2", 200, kTypeValue))); + + // When start user key is prefix of limit user key + ASSERT_EQ(IKey("foo", 100, kTypeValue), + Shorten(IKey("foo", 100, kTypeValue), + IKey("foobar", 200, kTypeValue))); + + // When limit user key is prefix of start user key + ASSERT_EQ(IKey("foobar", 100, kTypeValue), + Shorten(IKey("foobar", 100, kTypeValue), + IKey("foo", 200, kTypeValue))); +} + +TEST_F(FormatTest, InternalKeyShortestSuccessor) { + ASSERT_EQ(IKey("g", kMaxSequenceNumber, kValueTypeForSeek), + ShortSuccessor(IKey("foo", 100, kTypeValue))); + ASSERT_EQ(IKey("\xff\xff", 100, kTypeValue), + ShortSuccessor(IKey("\xff\xff", 100, kTypeValue))); +} + +TEST_F(FormatTest, IterKeyOperation) { + IterKey k; + const char p[] = "abcdefghijklmnopqrstuvwxyz"; + const char q[] = "0123456789"; + + ASSERT_EQ(std::string(k.GetKey().data(), k.GetKey().size()), + std::string("")); + + k.TrimAppend(0, p, 3); + ASSERT_EQ(std::string(k.GetKey().data(), k.GetKey().size()), + std::string("abc")); + + k.TrimAppend(1, p, 3); + ASSERT_EQ(std::string(k.GetKey().data(), k.GetKey().size()), + std::string("aabc")); + + k.TrimAppend(0, p, 26); + ASSERT_EQ(std::string(k.GetKey().data(), k.GetKey().size()), + std::string("abcdefghijklmnopqrstuvwxyz")); + + k.TrimAppend(26, q, 10); + ASSERT_EQ(std::string(k.GetKey().data(), k.GetKey().size()), + std::string("abcdefghijklmnopqrstuvwxyz0123456789")); + + k.TrimAppend(36, q, 1); + ASSERT_EQ(std::string(k.GetKey().data(), k.GetKey().size()), + std::string("abcdefghijklmnopqrstuvwxyz01234567890")); + + k.TrimAppend(26, q, 1); + ASSERT_EQ(std::string(k.GetKey().data(), k.GetKey().size()), + std::string("abcdefghijklmnopqrstuvwxyz0")); + + // Size going up, memory allocation is triggered + k.TrimAppend(27, p, 26); + ASSERT_EQ(std::string(k.GetKey().data(), k.GetKey().size()), + std::string("abcdefghijklmnopqrstuvwxyz0" + "abcdefghijklmnopqrstuvwxyz")); +} + +TEST_F(FormatTest, UpdateInternalKey) { + std::string user_key("abcdefghijklmnopqrstuvwxyz"); + uint64_t new_seq = 0x123456; + ValueType new_val_type = kTypeDeletion; + + std::string ikey; + AppendInternalKey(&ikey, ParsedInternalKey(user_key, 100U, kTypeValue)); + size_t ikey_size = ikey.size(); + UpdateInternalKey(&ikey, new_seq, new_val_type); + ASSERT_EQ(ikey_size, ikey.size()); + + Slice in(ikey); + ParsedInternalKey decoded; + ASSERT_TRUE(ParseInternalKey(in, &decoded)); + ASSERT_EQ(user_key, decoded.user_key.ToString()); + ASSERT_EQ(new_seq, decoded.sequence); + ASSERT_EQ(new_val_type, decoded.type); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/deletefile_test.cc b/external/rocksdb/db/deletefile_test.cc new file mode 100755 index 0000000000..c8257ccd81 --- /dev/null +++ b/external/rocksdb/db/deletefile_test.cc @@ -0,0 +1,509 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef ROCKSDB_LITE + +#include +#include +#include +#include +#include "db/db_impl.h" +#include "db/filename.h" +#include "db/version_set.h" +#include "db/write_batch_internal.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/transaction_log.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class DeleteFileTest : public testing::Test { + public: + std::string dbname_; + Options options_; + DB* db_; + Env* env_; + int numlevels_; + + DeleteFileTest() { + db_ = nullptr; + env_ = Env::Default(); + options_.delete_obsolete_files_period_micros = 0; // always do full purge + options_.enable_thread_tracking = true; + options_.write_buffer_size = 1024*1024*1000; + options_.target_file_size_base = 1024*1024*1000; + options_.max_bytes_for_level_base = 1024*1024*1000; + options_.WAL_ttl_seconds = 300; // Used to test log files + options_.WAL_size_limit_MB = 1024; // Used to test log files + dbname_ = test::TmpDir() + "/deletefile_test"; + options_.wal_dir = dbname_ + "/wal_files"; + + // clean up all the files that might have been there before + std::vector old_files; + env_->GetChildren(dbname_, &old_files); + for (auto file : old_files) { + env_->DeleteFile(dbname_ + "/" + file); + } + env_->GetChildren(options_.wal_dir, &old_files); + for (auto file : old_files) { + env_->DeleteFile(options_.wal_dir + "/" + file); + } + + DestroyDB(dbname_, options_); + numlevels_ = 7; + EXPECT_OK(ReopenDB(true)); + } + + Status ReopenDB(bool create) { + delete db_; + if (create) { + DestroyDB(dbname_, options_); + } + db_ = nullptr; + options_.create_if_missing = create; + return DB::Open(options_, dbname_, &db_); + } + + void CloseDB() { + delete db_; + db_ = nullptr; + } + + void AddKeys(int numkeys, int startkey = 0) { + WriteOptions options; + options.sync = false; + ReadOptions roptions; + for (int i = startkey; i < (numkeys + startkey) ; i++) { + std::string temp = ToString(i); + Slice key(temp); + Slice value(temp); + ASSERT_OK(db_->Put(options, key, value)); + } + } + + int numKeysInLevels( + std::vector &metadata, + std::vector *keysperlevel = nullptr) { + + if (keysperlevel != nullptr) { + keysperlevel->resize(numlevels_); + } + + int numKeys = 0; + for (size_t i = 0; i < metadata.size(); i++) { + int startkey = atoi(metadata[i].smallestkey.c_str()); + int endkey = atoi(metadata[i].largestkey.c_str()); + int numkeysinfile = (endkey - startkey + 1); + numKeys += numkeysinfile; + if (keysperlevel != nullptr) { + (*keysperlevel)[(int)metadata[i].level] += numkeysinfile; + } + fprintf(stderr, "level %d name %s smallest %s largest %s\n", + metadata[i].level, metadata[i].name.c_str(), + metadata[i].smallestkey.c_str(), + metadata[i].largestkey.c_str()); + } + return numKeys; + } + + void CreateTwoLevels() { + AddKeys(50000, 10000); + DBImpl* dbi = reinterpret_cast(db_); + ASSERT_OK(dbi->TEST_FlushMemTable()); + ASSERT_OK(dbi->TEST_WaitForFlushMemTable()); + for (int i = 0; i < 2; ++i) { + ASSERT_OK(dbi->TEST_CompactRange(i, nullptr, nullptr)); + } + + AddKeys(50000, 10000); + ASSERT_OK(dbi->TEST_FlushMemTable()); + ASSERT_OK(dbi->TEST_WaitForFlushMemTable()); + ASSERT_OK(dbi->TEST_CompactRange(0, nullptr, nullptr)); + } + + void CheckFileTypeCounts(std::string& dir, + int required_log, + int required_sst, + int required_manifest) { + std::vector filenames; + env_->GetChildren(dir, &filenames); + + int log_cnt = 0, sst_cnt = 0, manifest_cnt = 0; + for (auto file : filenames) { + uint64_t number; + FileType type; + if (ParseFileName(file, &number, &type)) { + log_cnt += (type == kLogFile); + sst_cnt += (type == kTableFile); + manifest_cnt += (type == kDescriptorFile); + } + } + ASSERT_EQ(required_log, log_cnt); + ASSERT_EQ(required_sst, sst_cnt); + ASSERT_EQ(required_manifest, manifest_cnt); + } + + static void DoSleep(void* arg) { + auto test = reinterpret_cast(arg); + test->env_->SleepForMicroseconds(2 * 1000 * 1000); + } + + // An empty job to guard all jobs are processed + static void GuardFinish(void* arg) { + TEST_SYNC_POINT("DeleteFileTest::GuardFinish"); + } +}; + +TEST_F(DeleteFileTest, AddKeysAndQueryLevels) { + CreateTwoLevels(); + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + + std::string level1file = ""; + int level1keycount = 0; + std::string level2file = ""; + int level2keycount = 0; + int level1index = 0; + int level2index = 1; + + ASSERT_EQ((int)metadata.size(), 2); + if (metadata[0].level == 2) { + level1index = 1; + level2index = 0; + } + + level1file = metadata[level1index].name; + int startkey = atoi(metadata[level1index].smallestkey.c_str()); + int endkey = atoi(metadata[level1index].largestkey.c_str()); + level1keycount = (endkey - startkey + 1); + level2file = metadata[level2index].name; + startkey = atoi(metadata[level2index].smallestkey.c_str()); + endkey = atoi(metadata[level2index].largestkey.c_str()); + level2keycount = (endkey - startkey + 1); + + // COntrolled setup. Levels 1 and 2 should both have 50K files. + // This is a little fragile as it depends on the current + // compaction heuristics. + ASSERT_EQ(level1keycount, 50000); + ASSERT_EQ(level2keycount, 50000); + + Status status = db_->DeleteFile("0.sst"); + ASSERT_TRUE(status.IsInvalidArgument()); + + // intermediate level files cannot be deleted. + status = db_->DeleteFile(level1file); + ASSERT_TRUE(status.IsInvalidArgument()); + + // Lowest level file deletion should succeed. + ASSERT_OK(db_->DeleteFile(level2file)); + + CloseDB(); +} + +TEST_F(DeleteFileTest, PurgeObsoleteFilesTest) { + CreateTwoLevels(); + // there should be only one (empty) log file because CreateTwoLevels() + // flushes the memtables to disk + CheckFileTypeCounts(options_.wal_dir, 1, 0, 0); + // 2 ssts, 1 manifest + CheckFileTypeCounts(dbname_, 0, 2, 1); + std::string first("0"), last("999999"); + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 2; + Slice first_slice(first), last_slice(last); + db_->CompactRange(compact_options, &first_slice, &last_slice); + // 1 sst after compaction + CheckFileTypeCounts(dbname_, 0, 1, 1); + + // this time, we keep an iterator alive + ReopenDB(true); + Iterator *itr = 0; + CreateTwoLevels(); + itr = db_->NewIterator(ReadOptions()); + db_->CompactRange(compact_options, &first_slice, &last_slice); + // 3 sst after compaction with live iterator + CheckFileTypeCounts(dbname_, 0, 3, 1); + delete itr; + // 1 sst after iterator deletion + CheckFileTypeCounts(dbname_, 0, 1, 1); + + CloseDB(); +} + +TEST_F(DeleteFileTest, BackgroundPurgeTest) { + std::string first("0"), last("999999"); + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 2; + Slice first_slice(first), last_slice(last); + + // We keep an iterator alive + Iterator* itr = 0; + CreateTwoLevels(); + ReadOptions options; + options.background_purge_on_iterator_cleanup = true; + itr = db_->NewIterator(options); + db_->CompactRange(compact_options, &first_slice, &last_slice); + // 3 sst after compaction with live iterator + CheckFileTypeCounts(dbname_, 0, 3, 1); + test::SleepingBackgroundTask sleeping_task_before; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, + &sleeping_task_before, Env::Priority::HIGH); + delete itr; + test::SleepingBackgroundTask sleeping_task_after; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, + &sleeping_task_after, Env::Priority::HIGH); + + // Make sure no purges are executed foreground + CheckFileTypeCounts(dbname_, 0, 3, 1); + sleeping_task_before.WakeUp(); + sleeping_task_before.WaitUntilDone(); + + // Make sure all background purges are executed + sleeping_task_after.WakeUp(); + sleeping_task_after.WaitUntilDone(); + // 1 sst after iterator deletion + CheckFileTypeCounts(dbname_, 0, 1, 1); + + CloseDB(); +} + +// This test is to reproduce a bug that read invalid ReadOption in iterator +// cleanup function +TEST_F(DeleteFileTest, BackgroundPurgeCopyOptions) { + std::string first("0"), last("999999"); + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 2; + Slice first_slice(first), last_slice(last); + + // We keep an iterator alive + Iterator* itr = 0; + CreateTwoLevels(); + ReadOptions* options = new ReadOptions(); + options->background_purge_on_iterator_cleanup = true; + itr = db_->NewIterator(*options); + // ReadOptions is deleted, but iterator cleanup function should not be + // affected + delete options; + + db_->CompactRange(compact_options, &first_slice, &last_slice); + // 3 sst after compaction with live iterator + CheckFileTypeCounts(dbname_, 0, 3, 1); + delete itr; + + test::SleepingBackgroundTask sleeping_task_after; + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, + &sleeping_task_after, Env::Priority::HIGH); + + // Make sure all background purges are executed + sleeping_task_after.WakeUp(); + sleeping_task_after.WaitUntilDone(); + // 1 sst after iterator deletion + CheckFileTypeCounts(dbname_, 0, 1, 1); + + CloseDB(); +} + +TEST_F(DeleteFileTest, BackgroundPurgeTestMultipleJobs) { + std::string first("0"), last("999999"); + CompactRangeOptions compact_options; + compact_options.change_level = true; + compact_options.target_level = 2; + Slice first_slice(first), last_slice(last); + + // We keep an iterator alive + CreateTwoLevels(); + ReadOptions options; + options.background_purge_on_iterator_cleanup = true; + Iterator* itr1 = db_->NewIterator(options); + CreateTwoLevels(); + Iterator* itr2 = db_->NewIterator(options); + db_->CompactRange(compact_options, &first_slice, &last_slice); + // 5 sst files after 2 compactions with 2 live iterators + CheckFileTypeCounts(dbname_, 0, 5, 1); + + // ~DBImpl should wait until all BGWorkPurge are finished + rocksdb::SyncPoint::GetInstance()->LoadDependency( + {{"DBImpl::~DBImpl:WaitJob", "DBImpl::BGWorkPurge"}, + {"DeleteFileTest::GuardFinish", + "DeleteFileTest::BackgroundPurgeTestMultipleJobs:DBClose"}}); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + delete itr1; + env_->Schedule(&DeleteFileTest::DoSleep, this, Env::Priority::HIGH); + delete itr2; + env_->Schedule(&DeleteFileTest::GuardFinish, nullptr, Env::Priority::HIGH); + CloseDB(); + + TEST_SYNC_POINT("DeleteFileTest::BackgroundPurgeTestMultipleJobs:DBClose"); + // 1 sst after iterator deletion + CheckFileTypeCounts(dbname_, 0, 1, 1); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); +} + +TEST_F(DeleteFileTest, DeleteFileWithIterator) { + CreateTwoLevels(); + ReadOptions options; + Iterator* it = db_->NewIterator(options); + std::vector metadata; + db_->GetLiveFilesMetaData(&metadata); + + std::string level2file = ""; + + ASSERT_EQ((int)metadata.size(), 2); + if (metadata[0].level == 1) { + level2file = metadata[1].name; + } else { + level2file = metadata[0].name; + } + + Status status = db_->DeleteFile(level2file); + fprintf(stdout, "Deletion status %s: %s\n", + level2file.c_str(), status.ToString().c_str()); + ASSERT_TRUE(status.ok()); + it->SeekToFirst(); + int numKeysIterated = 0; + while(it->Valid()) { + numKeysIterated++; + it->Next(); + } + ASSERT_EQ(numKeysIterated, 50000); + delete it; + CloseDB(); +} + +TEST_F(DeleteFileTest, DeleteLogFiles) { + AddKeys(10, 0); + VectorLogPtr logfiles; + db_->GetSortedWalFiles(logfiles); + ASSERT_GT(logfiles.size(), 0UL); + // Take the last log file which is expected to be alive and try to delete it + // Should not succeed because live logs are not allowed to be deleted + std::unique_ptr alive_log = std::move(logfiles.back()); + ASSERT_EQ(alive_log->Type(), kAliveLogFile); + ASSERT_OK(env_->FileExists(options_.wal_dir + "/" + alive_log->PathName())); + fprintf(stdout, "Deleting alive log file %s\n", + alive_log->PathName().c_str()); + ASSERT_TRUE(!db_->DeleteFile(alive_log->PathName()).ok()); + ASSERT_OK(env_->FileExists(options_.wal_dir + "/" + alive_log->PathName())); + logfiles.clear(); + + // Call Flush to bring about a new working log file and add more keys + // Call Flush again to flush out memtable and move alive log to archived log + // and try to delete the archived log file + FlushOptions fopts; + db_->Flush(fopts); + AddKeys(10, 0); + db_->Flush(fopts); + db_->GetSortedWalFiles(logfiles); + ASSERT_GT(logfiles.size(), 0UL); + std::unique_ptr archived_log = std::move(logfiles.front()); + ASSERT_EQ(archived_log->Type(), kArchivedLogFile); + ASSERT_OK( + env_->FileExists(options_.wal_dir + "/" + archived_log->PathName())); + fprintf(stdout, "Deleting archived log file %s\n", + archived_log->PathName().c_str()); + ASSERT_OK(db_->DeleteFile(archived_log->PathName())); + ASSERT_EQ(Status::NotFound(), env_->FileExists(options_.wal_dir + "/" + + archived_log->PathName())); + CloseDB(); +} + +TEST_F(DeleteFileTest, DeleteNonDefaultColumnFamily) { + CloseDB(); + DBOptions db_options; + db_options.create_if_missing = true; + db_options.create_missing_column_families = true; + std::vector column_families; + column_families.emplace_back(); + column_families.emplace_back("new_cf", ColumnFamilyOptions()); + + std::vector handles; + rocksdb::DB* db; + ASSERT_OK(DB::Open(db_options, dbname_, column_families, &handles, &db)); + + Random rnd(5); + for (int i = 0; i < 1000; ++i) { + ASSERT_OK(db->Put(WriteOptions(), handles[1], test::RandomKey(&rnd, 10), + test::RandomKey(&rnd, 10))); + } + ASSERT_OK(db->Flush(FlushOptions(), handles[1])); + for (int i = 0; i < 1000; ++i) { + ASSERT_OK(db->Put(WriteOptions(), handles[1], test::RandomKey(&rnd, 10), + test::RandomKey(&rnd, 10))); + } + ASSERT_OK(db->Flush(FlushOptions(), handles[1])); + + std::vector metadata; + db->GetLiveFilesMetaData(&metadata); + ASSERT_EQ(2U, metadata.size()); + ASSERT_EQ("new_cf", metadata[0].column_family_name); + ASSERT_EQ("new_cf", metadata[1].column_family_name); + auto old_file = metadata[0].smallest_seqno < metadata[1].smallest_seqno + ? metadata[0].name + : metadata[1].name; + auto new_file = metadata[0].smallest_seqno > metadata[1].smallest_seqno + ? metadata[0].name + : metadata[1].name; + ASSERT_TRUE(db->DeleteFile(new_file).IsInvalidArgument()); + ASSERT_OK(db->DeleteFile(old_file)); + + { + std::unique_ptr itr(db->NewIterator(ReadOptions(), handles[1])); + int count = 0; + for (itr->SeekToFirst(); itr->Valid(); itr->Next()) { + ASSERT_OK(itr->status()); + ++count; + } + ASSERT_EQ(count, 1000); + } + + delete handles[0]; + delete handles[1]; + delete db; + + ASSERT_OK(DB::Open(db_options, dbname_, column_families, &handles, &db)); + { + std::unique_ptr itr(db->NewIterator(ReadOptions(), handles[1])); + int count = 0; + for (itr->SeekToFirst(); itr->Valid(); itr->Next()) { + ASSERT_OK(itr->status()); + ++count; + } + ASSERT_EQ(count, 1000); + } + + delete handles[0]; + delete handles[1]; + delete db; +} + +} //namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, + "SKIPPED as DBImpl::DeleteFile is not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/event_helpers.cc b/external/rocksdb/db/event_helpers.cc new file mode 100755 index 0000000000..9249837c2b --- /dev/null +++ b/external/rocksdb/db/event_helpers.cc @@ -0,0 +1,137 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/event_helpers.h" + +namespace rocksdb { + +namespace { +template +inline T SafeDivide(T a, T b) { + return b == 0 ? 0 : a / b; +} +} // namespace + +void EventHelpers::AppendCurrentTime(JSONWriter* jwriter) { + *jwriter << "time_micros" + << std::chrono::duration_cast( + std::chrono::system_clock::now().time_since_epoch()).count(); +} + +#ifndef ROCKSDB_LITE +void EventHelpers::NotifyTableFileCreationStarted( + const std::vector>& listeners, + const std::string& db_name, const std::string& cf_name, + const std::string& file_path, int job_id, TableFileCreationReason reason) { + TableFileCreationBriefInfo info; + info.db_name = db_name; + info.cf_name = cf_name; + info.file_path = file_path; + info.job_id = job_id; + info.reason = reason; + for (auto& listener : listeners) { + listener->OnTableFileCreationStarted(info); + } +} +#endif // !ROCKSDB_LITE + +void EventHelpers::LogAndNotifyTableFileCreationFinished( + EventLogger* event_logger, + const std::vector>& listeners, + const std::string& db_name, const std::string& cf_name, + const std::string& file_path, int job_id, const FileDescriptor& fd, + const TableProperties& table_properties, TableFileCreationReason reason, + const Status& s) { + if (s.ok() && event_logger) { + JSONWriter jwriter; + AppendCurrentTime(&jwriter); + jwriter << "cf_name" << cf_name << "job" << job_id << "event" + << "table_file_creation" + << "file_number" << fd.GetNumber() << "file_size" + << fd.GetFileSize(); + + // table_properties + { + jwriter << "table_properties"; + jwriter.StartObject(); + + // basic properties: + jwriter << "data_size" << table_properties.data_size << "index_size" + << table_properties.index_size << "filter_size" + << table_properties.filter_size << "raw_key_size" + << table_properties.raw_key_size << "raw_average_key_size" + << SafeDivide(table_properties.raw_key_size, + table_properties.num_entries) + << "raw_value_size" << table_properties.raw_value_size + << "raw_average_value_size" + << SafeDivide(table_properties.raw_value_size, + table_properties.num_entries) + << "num_data_blocks" << table_properties.num_data_blocks + << "num_entries" << table_properties.num_entries + << "filter_policy_name" << table_properties.filter_policy_name; + + // user collected properties + for (const auto& prop : table_properties.readable_properties) { + jwriter << prop.first << prop.second; + } + jwriter.EndObject(); + } + jwriter.EndObject(); + + event_logger->Log(jwriter); + } + +#ifndef ROCKSDB_LITE + if (listeners.size() == 0) { + return; + } + TableFileCreationInfo info; + info.db_name = db_name; + info.cf_name = cf_name; + info.file_path = file_path; + info.file_size = fd.file_size; + info.job_id = job_id; + info.table_properties = table_properties; + info.reason = reason; + info.status = s; + for (auto& listener : listeners) { + listener->OnTableFileCreated(info); + } +#endif // !ROCKSDB_LITE +} + +void EventHelpers::LogAndNotifyTableFileDeletion( + EventLogger* event_logger, int job_id, + uint64_t file_number, const std::string& file_path, + const Status& status, const std::string& dbname, + const std::vector>& listeners) { + + JSONWriter jwriter; + AppendCurrentTime(&jwriter); + + jwriter << "job" << job_id + << "event" << "table_file_deletion" + << "file_number" << file_number; + if (!status.ok()) { + jwriter << "status" << status.ToString(); + } + + jwriter.EndObject(); + + event_logger->Log(jwriter); + +#ifndef ROCKSDB_LITE + TableFileDeletionInfo info; + info.db_name = dbname; + info.job_id = job_id; + info.file_path = file_path; + info.status = status; + for (auto listener : listeners) { + listener->OnTableFileDeleted(info); + } +#endif // !ROCKSDB_LITE +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/event_helpers.h b/external/rocksdb/db/event_helpers.h new file mode 100755 index 0000000000..e9c111f20f --- /dev/null +++ b/external/rocksdb/db/event_helpers.h @@ -0,0 +1,48 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#include +#include +#include + +#include "db/column_family.h" +#include "db/version_edit.h" +#include "rocksdb/listener.h" +#include "rocksdb/table_properties.h" +#include "util/event_logger.h" + +namespace rocksdb { + +class EventHelpers { + public: + static void AppendCurrentTime(JSONWriter* json_writer); +#ifndef ROCKSDB_LITE + static void NotifyTableFileCreationStarted( + const std::vector>& listeners, + const std::string& db_name, const std::string& cf_name, + const std::string& file_path, int job_id, TableFileCreationReason reason); +#endif // !ROCKSDB_LITE + static void LogAndNotifyTableFileCreationFinished( + EventLogger* event_logger, + const std::vector>& listeners, + const std::string& db_name, const std::string& cf_name, + const std::string& file_path, int job_id, const FileDescriptor& fd, + const TableProperties& table_properties, TableFileCreationReason reason, + const Status& s); + static void LogAndNotifyTableFileDeletion( + EventLogger* event_logger, int job_id, + uint64_t file_number, const std::string& file_path, + const Status& status, const std::string& db_name, + const std::vector>& listeners); + + private: + static void LogAndNotifyTableFileCreation( + EventLogger* event_logger, + const std::vector>& listeners, + const FileDescriptor& fd, const TableFileCreationInfo& info); +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/experimental.cc b/external/rocksdb/db/experimental.cc new file mode 100755 index 0000000000..26b2113d2a --- /dev/null +++ b/external/rocksdb/db/experimental.cc @@ -0,0 +1,51 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "rocksdb/experimental.h" + +#include "db/db_impl.h" + +namespace rocksdb { +namespace experimental { + +#ifndef ROCKSDB_LITE + +Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) { + auto dbimpl = dynamic_cast(db); + if (dbimpl == nullptr) { + return Status::InvalidArgument("Didn't recognize DB object"); + } + + return dbimpl->SuggestCompactRange(column_family, begin, end); +} + +Status PromoteL0(DB* db, ColumnFamilyHandle* column_family, int target_level) { + auto dbimpl = dynamic_cast(db); + if (dbimpl == nullptr) { + return Status::InvalidArgument("Didn't recognize DB object"); + } + return dbimpl->PromoteL0(column_family, target_level); +} + +#else // ROCKSDB_LITE + +Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) { + return Status::NotSupported("Not supported in RocksDB LITE"); +} + +Status PromoteL0(DB* db, ColumnFamilyHandle* column_family, int target_level) { + return Status::NotSupported("Not supported in RocksDB LITE"); +} + +#endif // ROCKSDB_LITE + +Status SuggestCompactRange(DB* db, const Slice* begin, const Slice* end) { + return SuggestCompactRange(db, db->DefaultColumnFamily(), begin, end); +} + +} // namespace experimental +} // namespace rocksdb diff --git a/external/rocksdb/db/fault_injection_test.cc b/external/rocksdb/db/fault_injection_test.cc new file mode 100755 index 0000000000..0883de77b1 --- /dev/null +++ b/external/rocksdb/db/fault_injection_test.cc @@ -0,0 +1,511 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright 2014 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// This test uses a custom Env to keep track of the state of a filesystem as of +// the last "sync". It then checks for data loss errors by purposely dropping +// file data (or entire files) not protected by a "sync". + +#include "db/db_impl.h" +#include "db/filename.h" +#include "db/log_format.h" +#include "db/version_set.h" +#include "rocksdb/cache.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/table.h" +#include "rocksdb/write_batch.h" +#include "util/fault_injection_test_env.h" +#include "util/logging.h" +#include "util/mock_env.h" +#include "util/mutexlock.h" +#include "util/sync_point.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +static const int kValueSize = 1000; +static const int kMaxNumValues = 2000; +static const size_t kNumIterations = 3; + +class FaultInjectionTest : public testing::Test, + public testing::WithParamInterface { + protected: + enum OptionConfig { + kDefault, + kDifferentDataDir, + kWalDir, + kSyncWal, + kWalDirSyncWal, + kMultiLevels, + kEnd, + }; + int option_config_; + // When need to make sure data is persistent, sync WAL + bool sync_use_wal_; + // When need to make sure data is persistent, call DB::CompactRange() + bool sync_use_compact_; + + bool sequential_order_; + + protected: + public: + enum ExpectedVerifResult { kValExpectFound, kValExpectNoError }; + enum ResetMethod { + kResetDropUnsyncedData, + kResetDropRandomUnsyncedData, + kResetDeleteUnsyncedFiles, + kResetDropAndDeleteUnsynced + }; + + std::unique_ptr base_env_; + FaultInjectionTestEnv* env_; + std::string dbname_; + shared_ptr tiny_cache_; + Options options_; + DB* db_; + + FaultInjectionTest() + : option_config_(kDefault), + sync_use_wal_(false), + sync_use_compact_(true), + base_env_(nullptr), + env_(NULL), + db_(NULL) { + } + + ~FaultInjectionTest() { + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); + } + + bool ChangeOptions() { + option_config_++; + if (option_config_ >= kEnd) { + return false; + } else { + if (option_config_ == kMultiLevels) { + base_env_.reset(new MockEnv(Env::Default())); + } + return true; + } + } + + // Return the current option configuration. + Options CurrentOptions() { + sync_use_wal_ = false; + sync_use_compact_ = true; + Options options; + switch (option_config_) { + case kWalDir: + options.wal_dir = test::TmpDir(env_) + "/fault_test_wal"; + break; + case kDifferentDataDir: + options.db_paths.emplace_back(test::TmpDir(env_) + "/fault_test_data", + 1000000U); + break; + case kSyncWal: + sync_use_wal_ = true; + sync_use_compact_ = false; + break; + case kWalDirSyncWal: + options.wal_dir = test::TmpDir(env_) + "/fault_test_wal"; + sync_use_wal_ = true; + sync_use_compact_ = false; + break; + case kMultiLevels: + options.write_buffer_size = 64 * 1024; + options.target_file_size_base = 64 * 1024; + options.level0_file_num_compaction_trigger = 2; + options.level0_slowdown_writes_trigger = 2; + options.level0_stop_writes_trigger = 4; + options.max_bytes_for_level_base = 128 * 1024; + options.max_write_buffer_number = 2; + options.max_background_compactions = 8; + options.max_background_flushes = 8; + sync_use_wal_ = true; + sync_use_compact_ = false; + break; + default: + break; + } + return options; + } + + Status NewDB() { + assert(db_ == NULL); + assert(tiny_cache_ == nullptr); + assert(env_ == NULL); + + env_ = + new FaultInjectionTestEnv(base_env_ ? base_env_.get() : Env::Default()); + + options_ = CurrentOptions(); + options_.env = env_; + options_.paranoid_checks = true; + + BlockBasedTableOptions table_options; + tiny_cache_ = NewLRUCache(100); + table_options.block_cache = tiny_cache_; + options_.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + dbname_ = test::TmpDir() + "/fault_test"; + + EXPECT_OK(DestroyDB(dbname_, options_)); + + options_.create_if_missing = true; + Status s = OpenDB(); + options_.create_if_missing = false; + return s; + } + + void SetUp() override { + sequential_order_ = GetParam(); + ASSERT_OK(NewDB()); + } + + void TearDown() override { + CloseDB(); + + Status s = DestroyDB(dbname_, options_); + + delete env_; + env_ = NULL; + + tiny_cache_.reset(); + + ASSERT_OK(s); + } + + void Build(const WriteOptions& write_options, int start_idx, int num_vals) { + std::string key_space, value_space; + WriteBatch batch; + for (int i = start_idx; i < start_idx + num_vals; i++) { + Slice key = Key(i, &key_space); + batch.Clear(); + batch.Put(key, Value(i, &value_space)); + ASSERT_OK(db_->Write(write_options, &batch)); + } + } + + Status ReadValue(int i, std::string* val) const { + std::string key_space, value_space; + Slice key = Key(i, &key_space); + Value(i, &value_space); + ReadOptions options; + return db_->Get(options, key, val); + } + + Status Verify(int start_idx, int num_vals, + ExpectedVerifResult expected) const { + std::string val; + std::string value_space; + Status s; + for (int i = start_idx; i < start_idx + num_vals && s.ok(); i++) { + Value(i, &value_space); + s = ReadValue(i, &val); + if (s.ok()) { + EXPECT_EQ(value_space, val); + } + if (expected == kValExpectFound) { + if (!s.ok()) { + fprintf(stderr, "Error when read %dth record (expect found): %s\n", i, + s.ToString().c_str()); + return s; + } + } else if (!s.ok() && !s.IsNotFound()) { + fprintf(stderr, "Error when read %dth record: %s\n", i, + s.ToString().c_str()); + return s; + } + } + return Status::OK(); + } + + // Return the ith key + Slice Key(int i, std::string* storage) const { + int num = i; + if (!sequential_order_) { + // random transfer + const int m = 0x5bd1e995; + num *= m; + num ^= num << 24; + } + char buf[100]; + snprintf(buf, sizeof(buf), "%016d", num); + storage->assign(buf, strlen(buf)); + return Slice(*storage); + } + + // Return the value to associate with the specified key + Slice Value(int k, std::string* storage) const { + Random r(k); + return test::RandomString(&r, kValueSize, storage); + } + + void CloseDB() { + delete db_; + db_ = NULL; + } + + Status OpenDB() { + CloseDB(); + env_->ResetState(); + return DB::Open(options_, dbname_, &db_); + } + + void DeleteAllData() { + Iterator* iter = db_->NewIterator(ReadOptions()); + WriteOptions options; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ASSERT_OK(db_->Delete(WriteOptions(), iter->key())); + } + + delete iter; + + FlushOptions flush_options; + flush_options.wait = true; + db_->Flush(flush_options); + } + + // rnd cannot be null for kResetDropRandomUnsyncedData + void ResetDBState(ResetMethod reset_method, Random* rnd = nullptr) { + env_->AssertNoOpenFile(); + switch (reset_method) { + case kResetDropUnsyncedData: + ASSERT_OK(env_->DropUnsyncedFileData()); + break; + case kResetDropRandomUnsyncedData: + ASSERT_OK(env_->DropRandomUnsyncedFileData(rnd)); + break; + case kResetDeleteUnsyncedFiles: + ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync()); + break; + case kResetDropAndDeleteUnsynced: + ASSERT_OK(env_->DropUnsyncedFileData()); + ASSERT_OK(env_->DeleteFilesCreatedAfterLastDirSync()); + break; + default: + assert(false); + } + } + + void PartialCompactTestPreFault(int num_pre_sync, int num_post_sync) { + DeleteAllData(); + + WriteOptions write_options; + write_options.sync = sync_use_wal_; + + Build(write_options, 0, num_pre_sync); + if (sync_use_compact_) { + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + } + write_options.sync = false; + Build(write_options, num_pre_sync, num_post_sync); + } + + void PartialCompactTestReopenWithFault(ResetMethod reset_method, + int num_pre_sync, int num_post_sync, + Random* rnd = nullptr) { + env_->SetFilesystemActive(false); + CloseDB(); + ResetDBState(reset_method, rnd); + ASSERT_OK(OpenDB()); + ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::kValExpectFound)); + ASSERT_OK(Verify(num_pre_sync, num_post_sync, + FaultInjectionTest::kValExpectNoError)); + WaitCompactionFinish(); + ASSERT_OK(Verify(0, num_pre_sync, FaultInjectionTest::kValExpectFound)); + ASSERT_OK(Verify(num_pre_sync, num_post_sync, + FaultInjectionTest::kValExpectNoError)); + } + + void NoWriteTestPreFault() { + } + + void NoWriteTestReopenWithFault(ResetMethod reset_method) { + CloseDB(); + ResetDBState(reset_method); + ASSERT_OK(OpenDB()); + } + + void WaitCompactionFinish() { + static_cast(db_->GetRootDB())->TEST_WaitForCompact(); + ASSERT_OK(db_->Put(WriteOptions(), "", "")); + } +}; + +TEST_P(FaultInjectionTest, FaultTest) { + do { + Random rnd(301); + + for (size_t idx = 0; idx < kNumIterations; idx++) { + int num_pre_sync = rnd.Uniform(kMaxNumValues); + int num_post_sync = rnd.Uniform(kMaxNumValues); + + PartialCompactTestPreFault(num_pre_sync, num_post_sync); + PartialCompactTestReopenWithFault(kResetDropUnsyncedData, num_pre_sync, + num_post_sync); + NoWriteTestPreFault(); + NoWriteTestReopenWithFault(kResetDropUnsyncedData); + + PartialCompactTestPreFault(num_pre_sync, num_post_sync); + PartialCompactTestReopenWithFault(kResetDropRandomUnsyncedData, + num_pre_sync, num_post_sync, &rnd); + NoWriteTestPreFault(); + NoWriteTestReopenWithFault(kResetDropUnsyncedData); + + // Setting a separate data path won't pass the test as we don't sync + // it after creating new files, + PartialCompactTestPreFault(num_pre_sync, num_post_sync); + PartialCompactTestReopenWithFault(kResetDropAndDeleteUnsynced, + num_pre_sync, num_post_sync); + NoWriteTestPreFault(); + NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced); + + PartialCompactTestPreFault(num_pre_sync, num_post_sync); + // No new files created so we expect all values since no files will be + // dropped. + PartialCompactTestReopenWithFault(kResetDeleteUnsyncedFiles, num_pre_sync, + num_post_sync); + NoWriteTestPreFault(); + NoWriteTestReopenWithFault(kResetDeleteUnsyncedFiles); + } + } while (ChangeOptions()); +} + +// Previous log file is not fsynced if sync is forced after log rolling. +TEST_P(FaultInjectionTest, WriteOptionSyncTest) { + test::SleepingBackgroundTask sleeping_task_low; + env_->SetBackgroundThreads(1, Env::HIGH); + // Block the job queue to prevent flush job from running. + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::HIGH); + sleeping_task_low.WaitUntilSleeping(); + + WriteOptions write_options; + write_options.sync = false; + + std::string key_space, value_space; + ASSERT_OK( + db_->Put(write_options, Key(1, &key_space), Value(1, &value_space))); + FlushOptions flush_options; + flush_options.wait = false; + ASSERT_OK(db_->Flush(flush_options)); + write_options.sync = true; + ASSERT_OK( + db_->Put(write_options, Key(2, &key_space), Value(2, &value_space))); + + env_->SetFilesystemActive(false); + NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced); + sleeping_task_low.WakeUp(); + + ASSERT_OK(OpenDB()); + std::string val; + Value(2, &value_space); + ASSERT_OK(ReadValue(2, &val)); + ASSERT_EQ(value_space, val); + + Value(1, &value_space); + ASSERT_OK(ReadValue(1, &val)); + ASSERT_EQ(value_space, val); +} + +TEST_P(FaultInjectionTest, UninstalledCompaction) { + options_.target_file_size_base = 32 * 1024; + options_.write_buffer_size = 100 << 10; // 100KB + options_.level0_file_num_compaction_trigger = 6; + options_.level0_stop_writes_trigger = 1 << 10; + options_.level0_slowdown_writes_trigger = 1 << 10; + options_.max_background_compactions = 1; + OpenDB(); + + if (!sequential_order_) { + rocksdb::SyncPoint::GetInstance()->LoadDependency({ + {"FaultInjectionTest::FaultTest:0", "DBImpl::BGWorkCompaction"}, + {"CompactionJob::Run():End", "FaultInjectionTest::FaultTest:1"}, + {"FaultInjectionTest::FaultTest:2", + "DBImpl::BackgroundCompaction:NonTrivial:AfterRun"}, + }); + } + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + int kNumKeys = 1000; + Build(WriteOptions(), 0, kNumKeys); + FlushOptions flush_options; + flush_options.wait = true; + db_->Flush(flush_options); + ASSERT_OK(db_->Put(WriteOptions(), "", "")); + TEST_SYNC_POINT("FaultInjectionTest::FaultTest:0"); + TEST_SYNC_POINT("FaultInjectionTest::FaultTest:1"); + env_->SetFilesystemActive(false); + TEST_SYNC_POINT("FaultInjectionTest::FaultTest:2"); + CloseDB(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + ResetDBState(kResetDropUnsyncedData); + + std::atomic opened(false); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::Open:Opened", [&](void* arg) { opened.store(true); }); + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "DBImpl::BGWorkCompaction", + [&](void* arg) { ASSERT_TRUE(opened.load()); }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + ASSERT_OK(OpenDB()); + ASSERT_OK(Verify(0, kNumKeys, FaultInjectionTest::kValExpectFound)); + WaitCompactionFinish(); + ASSERT_OK(Verify(0, kNumKeys, FaultInjectionTest::kValExpectFound)); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + rocksdb::SyncPoint::GetInstance()->ClearAllCallBacks(); +} + +TEST_P(FaultInjectionTest, ManualLogSyncTest) { + test::SleepingBackgroundTask sleeping_task_low; + env_->SetBackgroundThreads(1, Env::HIGH); + // Block the job queue to prevent flush job from running. + env_->Schedule(&test::SleepingBackgroundTask::DoSleepTask, &sleeping_task_low, + Env::Priority::HIGH); + sleeping_task_low.WaitUntilSleeping(); + + WriteOptions write_options; + write_options.sync = false; + + std::string key_space, value_space; + ASSERT_OK( + db_->Put(write_options, Key(1, &key_space), Value(1, &value_space))); + FlushOptions flush_options; + flush_options.wait = false; + ASSERT_OK(db_->Flush(flush_options)); + ASSERT_OK( + db_->Put(write_options, Key(2, &key_space), Value(2, &value_space))); + ASSERT_OK(db_->SyncWAL()); + + env_->SetFilesystemActive(false); + NoWriteTestReopenWithFault(kResetDropAndDeleteUnsynced); + sleeping_task_low.WakeUp(); + + ASSERT_OK(OpenDB()); + std::string val; + Value(2, &value_space); + ASSERT_OK(ReadValue(2, &val)); + ASSERT_EQ(value_space, val); + + Value(1, &value_space); + ASSERT_OK(ReadValue(1, &val)); + ASSERT_EQ(value_space, val); +} + +INSTANTIATE_TEST_CASE_P(FaultTest, FaultInjectionTest, ::testing::Bool()); + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/file_indexer.cc b/external/rocksdb/db/file_indexer.cc new file mode 100755 index 0000000000..9b31c2bd65 --- /dev/null +++ b/external/rocksdb/db/file_indexer.cc @@ -0,0 +1,213 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/file_indexer.h" +#include +#include "rocksdb/comparator.h" +#include "db/version_edit.h" + +namespace rocksdb { + +FileIndexer::FileIndexer(const Comparator* ucmp) + : num_levels_(0), ucmp_(ucmp), level_rb_(nullptr) {} + +size_t FileIndexer::NumLevelIndex() const { return next_level_index_.size(); } + +size_t FileIndexer::LevelIndexSize(size_t level) const { + if (level >= next_level_index_.size()) { + return 0; + } + return next_level_index_[level].num_index; +} + +void FileIndexer::GetNextLevelIndex(const size_t level, const size_t file_index, + const int cmp_smallest, + const int cmp_largest, int32_t* left_bound, + int32_t* right_bound) const { + assert(level > 0); + + // Last level, no hint + if (level == num_levels_ - 1) { + *left_bound = 0; + *right_bound = -1; + return; + } + + assert(level < num_levels_ - 1); + assert(static_cast(file_index) <= level_rb_[level]); + + const IndexUnit* index_units = next_level_index_[level].index_units; + const auto& index = index_units[file_index]; + + if (cmp_smallest < 0) { + *left_bound = (level > 0 && file_index > 0) + ? index_units[file_index - 1].largest_lb + : 0; + *right_bound = index.smallest_rb; + } else if (cmp_smallest == 0) { + *left_bound = index.smallest_lb; + *right_bound = index.smallest_rb; + } else if (cmp_smallest > 0 && cmp_largest < 0) { + *left_bound = index.smallest_lb; + *right_bound = index.largest_rb; + } else if (cmp_largest == 0) { + *left_bound = index.largest_lb; + *right_bound = index.largest_rb; + } else if (cmp_largest > 0) { + *left_bound = index.largest_lb; + *right_bound = level_rb_[level + 1]; + } else { + assert(false); + } + + assert(*left_bound >= 0); + assert(*left_bound <= *right_bound + 1); + assert(*right_bound <= level_rb_[level + 1]); +} + +void FileIndexer::UpdateIndex(Arena* arena, const size_t num_levels, + std::vector* const files) { + if (files == nullptr) { + return; + } + if (num_levels == 0) { // uint_32 0-1 would cause bad behavior + num_levels_ = num_levels; + return; + } + assert(level_rb_ == nullptr); // level_rb_ should be init here + + num_levels_ = num_levels; + next_level_index_.resize(num_levels); + + char* mem = arena->AllocateAligned(num_levels_ * sizeof(int32_t)); + level_rb_ = new (mem) int32_t[num_levels_]; + for (size_t i = 0; i < num_levels_; i++) { + level_rb_[i] = -1; + } + + // L1 - Ln-1 + for (size_t level = 1; level < num_levels_ - 1; ++level) { + const auto& upper_files = files[level]; + const int32_t upper_size = static_cast(upper_files.size()); + const auto& lower_files = files[level + 1]; + level_rb_[level] = static_cast(upper_files.size()) - 1; + if (upper_size == 0) { + continue; + } + IndexLevel& index_level = next_level_index_[level]; + index_level.num_index = upper_size; + mem = arena->AllocateAligned(upper_size * sizeof(IndexUnit)); + index_level.index_units = new (mem) IndexUnit[upper_size]; + + CalculateLB( + upper_files, lower_files, &index_level, + [this](const FileMetaData * a, const FileMetaData * b)->int { + return ucmp_->Compare(a->smallest.user_key(), b->largest.user_key()); + }, + [](IndexUnit* index, int32_t f_idx) { index->smallest_lb = f_idx; }); + CalculateLB( + upper_files, lower_files, &index_level, + [this](const FileMetaData * a, const FileMetaData * b)->int { + return ucmp_->Compare(a->largest.user_key(), b->largest.user_key()); + }, + [](IndexUnit* index, int32_t f_idx) { index->largest_lb = f_idx; }); + CalculateRB( + upper_files, lower_files, &index_level, + [this](const FileMetaData * a, const FileMetaData * b)->int { + return ucmp_->Compare(a->smallest.user_key(), b->smallest.user_key()); + }, + [](IndexUnit* index, int32_t f_idx) { index->smallest_rb = f_idx; }); + CalculateRB( + upper_files, lower_files, &index_level, + [this](const FileMetaData * a, const FileMetaData * b)->int { + return ucmp_->Compare(a->largest.user_key(), b->smallest.user_key()); + }, + [](IndexUnit* index, int32_t f_idx) { index->largest_rb = f_idx; }); + } + + level_rb_[num_levels_ - 1] = + static_cast(files[num_levels_ - 1].size()) - 1; +} + +void FileIndexer::CalculateLB( + const std::vector& upper_files, + const std::vector& lower_files, IndexLevel* index_level, + std::function cmp_op, + std::function set_index) { + const int32_t upper_size = static_cast(upper_files.size()); + const int32_t lower_size = static_cast(lower_files.size()); + int32_t upper_idx = 0; + int32_t lower_idx = 0; + + IndexUnit* index = index_level->index_units; + while (upper_idx < upper_size && lower_idx < lower_size) { + int cmp = cmp_op(upper_files[upper_idx], lower_files[lower_idx]); + + if (cmp == 0) { + set_index(&index[upper_idx], lower_idx); + ++upper_idx; + ++lower_idx; + } else if (cmp > 0) { + // Lower level's file (largest) is smaller, a key won't hit in that + // file. Move to next lower file + ++lower_idx; + } else { + // Lower level's file becomes larger, update the index, and + // move to the next upper file + set_index(&index[upper_idx], lower_idx); + ++upper_idx; + } + } + + while (upper_idx < upper_size) { + // Lower files are exhausted, that means the remaining upper files are + // greater than any lower files. Set the index to be the lower level size. + set_index(&index[upper_idx], lower_size); + ++upper_idx; + } +} + +void FileIndexer::CalculateRB( + const std::vector& upper_files, + const std::vector& lower_files, IndexLevel* index_level, + std::function cmp_op, + std::function set_index) { + const int32_t upper_size = static_cast(upper_files.size()); + const int32_t lower_size = static_cast(lower_files.size()); + int32_t upper_idx = upper_size - 1; + int32_t lower_idx = lower_size - 1; + + IndexUnit* index = index_level->index_units; + while (upper_idx >= 0 && lower_idx >= 0) { + int cmp = cmp_op(upper_files[upper_idx], lower_files[lower_idx]); + + if (cmp == 0) { + set_index(&index[upper_idx], lower_idx); + --upper_idx; + --lower_idx; + } else if (cmp < 0) { + // Lower level's file (smallest) is larger, a key won't hit in that + // file. Move to next lower file. + --lower_idx; + } else { + // Lower level's file becomes smaller, update the index, and move to + // the next the upper file + set_index(&index[upper_idx], lower_idx); + --upper_idx; + } + } + while (upper_idx >= 0) { + // Lower files are exhausted, that means the remaining upper files are + // smaller than any lower files. Set it to -1. + set_index(&index[upper_idx], -1); + --upper_idx; + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/file_indexer.h b/external/rocksdb/db/file_indexer.h new file mode 100755 index 0000000000..5eb10bc4dc --- /dev/null +++ b/external/rocksdb/db/file_indexer.h @@ -0,0 +1,142 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include +#include +#include +#include "port/port.h" +#include "util/arena.h" +#include "util/autovector.h" + +namespace rocksdb { + +class Comparator; +struct FileMetaData; +struct FdWithKeyRange; +struct FileLevel; + +// The file tree structure in Version is prebuilt and the range of each file +// is known. On Version::Get(), it uses binary search to find a potential file +// and then check if a target key can be found in the file by comparing the key +// to each file's smallest and largest key. The results of these comparisons +// can be reused beyond checking if a key falls into a file's range. +// With some pre-calculated knowledge, each key comparison that has been done +// can serve as a hint to narrow down further searches: if a key compared to +// be smaller than a file's smallest or largest, that comparison can be used +// to find out the right bound of next binary search. Similarly, if a key +// compared to be larger than a file's smallest or largest, it can be utilized +// to find out the left bound of next binary search. +// With these hints: it can greatly reduce the range of binary search, +// especially for bottom levels, given that one file most likely overlaps with +// only N files from level below (where N is max_bytes_for_level_multiplier). +// So on level L, we will only look at ~N files instead of N^L files on the +// naive approach. +class FileIndexer { + public: + explicit FileIndexer(const Comparator* ucmp); + + size_t NumLevelIndex() const; + + size_t LevelIndexSize(size_t level) const; + + // Return a file index range in the next level to search for a key based on + // smallest and largest key comparison for the current file specified by + // level and file_index. When *left_index < *right_index, both index should + // be valid and fit in the vector size. + void GetNextLevelIndex(const size_t level, const size_t file_index, + const int cmp_smallest, const int cmp_largest, + int32_t* left_bound, int32_t* right_bound) const; + + void UpdateIndex(Arena* arena, const size_t num_levels, + std::vector* const files); + + enum { + // MSVC version 1800 still does not have constexpr for ::max() + kLevelMaxIndex = rocksdb::port::kMaxInt32 + }; + + private: + size_t num_levels_; + const Comparator* ucmp_; + + struct IndexUnit { + IndexUnit() + : smallest_lb(0), largest_lb(0), smallest_rb(-1), largest_rb(-1) {} + // During file search, a key is compared against smallest and largest + // from a FileMetaData. It can have 3 possible outcomes: + // (1) key is smaller than smallest, implying it is also smaller than + // larger. Precalculated index based on "smallest < smallest" can + // be used to provide right bound. + // (2) key is in between smallest and largest. + // Precalculated index based on "smallest > greatest" can be used to + // provide left bound. + // Precalculated index based on "largest < smallest" can be used to + // provide right bound. + // (3) key is larger than largest, implying it is also larger than smallest. + // Precalculated index based on "largest > largest" can be used to + // provide left bound. + // + // As a result, we will need to do: + // Compare smallest (<=) and largest keys from upper level file with + // smallest key from lower level to get a right bound. + // Compare smallest (>=) and largest keys from upper level file with + // largest key from lower level to get a left bound. + // + // Example: + // level 1: [50 - 60] + // level 2: [1 - 40], [45 - 55], [58 - 80] + // A key 35, compared to be less than 50, 3rd file on level 2 can be + // skipped according to rule (1). LB = 0, RB = 1. + // A key 53, sits in the middle 50 and 60. 1st file on level 2 can be + // skipped according to rule (2)-a, but the 3rd file cannot be skipped + // because 60 is greater than 58. LB = 1, RB = 2. + // A key 70, compared to be larger than 60. 1st and 2nd file can be skipped + // according to rule (3). LB = 2, RB = 2. + // + // Point to a left most file in a lower level that may contain a key, + // which compares greater than smallest of a FileMetaData (upper level) + int32_t smallest_lb; + // Point to a left most file in a lower level that may contain a key, + // which compares greater than largest of a FileMetaData (upper level) + int32_t largest_lb; + // Point to a right most file in a lower level that may contain a key, + // which compares smaller than smallest of a FileMetaData (upper level) + int32_t smallest_rb; + // Point to a right most file in a lower level that may contain a key, + // which compares smaller than largest of a FileMetaData (upper level) + int32_t largest_rb; + }; + + // Data structure to store IndexUnits in a whole level + struct IndexLevel { + size_t num_index; + IndexUnit* index_units; + + IndexLevel() : num_index(0), index_units(nullptr) {} + }; + + void CalculateLB( + const std::vector& upper_files, + const std::vector& lower_files, IndexLevel* index_level, + std::function cmp_op, + std::function set_index); + + void CalculateRB( + const std::vector& upper_files, + const std::vector& lower_files, IndexLevel* index_level, + std::function cmp_op, + std::function set_index); + + autovector next_level_index_; + int32_t* level_rb_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/file_indexer_test.cc b/external/rocksdb/db/file_indexer_test.cc new file mode 100755 index 0000000000..9b3cdd4d6d --- /dev/null +++ b/external/rocksdb/db/file_indexer_test.cc @@ -0,0 +1,350 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include +#include "db/file_indexer.h" +#include "db/dbformat.h" +#include "db/version_edit.h" +#include "port/stack_trace.h" +#include "rocksdb/comparator.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class IntComparator : public Comparator { + public: + int Compare(const Slice& a, const Slice& b) const override { + assert(a.size() == 8); + assert(b.size() == 8); + int64_t diff = *reinterpret_cast(a.data()) - + *reinterpret_cast(b.data()); + if (diff < 0) { + return -1; + } else if (diff == 0) { + return 0; + } else { + return 1; + } + } + + const char* Name() const override { return "IntComparator"; } + + void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} + + void FindShortSuccessor(std::string* key) const override {} +}; + +class FileIndexerTest : public testing::Test { + public: + FileIndexerTest() + : kNumLevels(4), files(new std::vector[kNumLevels]) {} + + ~FileIndexerTest() { + ClearFiles(); + delete[] files; + } + + void AddFile(int level, int64_t smallest, int64_t largest) { + auto* f = new FileMetaData(); + f->smallest = IntKey(smallest); + f->largest = IntKey(largest); + files[level].push_back(f); + } + + InternalKey IntKey(int64_t v) { + return InternalKey(Slice(reinterpret_cast(&v), 8), 0, kTypeValue); + } + + void ClearFiles() { + for (uint32_t i = 0; i < kNumLevels; ++i) { + for (auto* f : files[i]) { + delete f; + } + files[i].clear(); + } + } + + void GetNextLevelIndex(const uint32_t level, const uint32_t file_index, + const int cmp_smallest, const int cmp_largest, int32_t* left_index, + int32_t* right_index) { + *left_index = 100; + *right_index = 100; + indexer->GetNextLevelIndex(level, file_index, cmp_smallest, cmp_largest, + left_index, right_index); + } + + int32_t left = 100; + int32_t right = 100; + const uint32_t kNumLevels; + IntComparator ucmp; + FileIndexer* indexer; + + std::vector* files; +}; + +// Case 0: Empty +TEST_F(FileIndexerTest, Empty) { + Arena arena; + indexer = new FileIndexer(&ucmp); + indexer->UpdateIndex(&arena, 0, files); + delete indexer; +} + +// Case 1: no overlap, files are on the left of next level files +TEST_F(FileIndexerTest, no_overlap_left) { + Arena arena; + indexer = new FileIndexer(&ucmp); + // level 1 + AddFile(1, 100, 200); + AddFile(1, 300, 400); + AddFile(1, 500, 600); + // level 2 + AddFile(2, 1500, 1600); + AddFile(2, 1601, 1699); + AddFile(2, 1700, 1800); + // level 3 + AddFile(3, 2500, 2600); + AddFile(3, 2601, 2699); + AddFile(3, 2700, 2800); + indexer->UpdateIndex(&arena, kNumLevels, files); + for (uint32_t level = 1; level < 3; ++level) { + for (uint32_t f = 0; f < 3; ++f) { + GetNextLevelIndex(level, f, -1, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(-1, right); + GetNextLevelIndex(level, f, 0, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(-1, right); + GetNextLevelIndex(level, f, 1, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(-1, right); + GetNextLevelIndex(level, f, 1, 0, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(-1, right); + GetNextLevelIndex(level, f, 1, 1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(2, right); + } + } + delete indexer; + ClearFiles(); +} + +// Case 2: no overlap, files are on the right of next level files +TEST_F(FileIndexerTest, no_overlap_right) { + Arena arena; + indexer = new FileIndexer(&ucmp); + // level 1 + AddFile(1, 2100, 2200); + AddFile(1, 2300, 2400); + AddFile(1, 2500, 2600); + // level 2 + AddFile(2, 1500, 1600); + AddFile(2, 1501, 1699); + AddFile(2, 1700, 1800); + // level 3 + AddFile(3, 500, 600); + AddFile(3, 501, 699); + AddFile(3, 700, 800); + indexer->UpdateIndex(&arena, kNumLevels, files); + for (uint32_t level = 1; level < 3; ++level) { + for (uint32_t f = 0; f < 3; ++f) { + GetNextLevelIndex(level, f, -1, -1, &left, &right); + ASSERT_EQ(f == 0 ? 0 : 3, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(level, f, 0, -1, &left, &right); + ASSERT_EQ(3, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(level, f, 1, -1, &left, &right); + ASSERT_EQ(3, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(level, f, 1, -1, &left, &right); + ASSERT_EQ(3, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(level, f, 1, 0, &left, &right); + ASSERT_EQ(3, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(level, f, 1, 1, &left, &right); + ASSERT_EQ(3, left); + ASSERT_EQ(2, right); + } + } + delete indexer; +} + +// Case 3: empty L2 +TEST_F(FileIndexerTest, empty_L2) { + Arena arena; + indexer = new FileIndexer(&ucmp); + for (uint32_t i = 1; i < kNumLevels; ++i) { + ASSERT_EQ(0U, indexer->LevelIndexSize(i)); + } + // level 1 + AddFile(1, 2100, 2200); + AddFile(1, 2300, 2400); + AddFile(1, 2500, 2600); + // level 3 + AddFile(3, 500, 600); + AddFile(3, 501, 699); + AddFile(3, 700, 800); + indexer->UpdateIndex(&arena, kNumLevels, files); + for (uint32_t f = 0; f < 3; ++f) { + GetNextLevelIndex(1, f, -1, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(-1, right); + GetNextLevelIndex(1, f, 0, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(-1, right); + GetNextLevelIndex(1, f, 1, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(-1, right); + GetNextLevelIndex(1, f, 1, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(-1, right); + GetNextLevelIndex(1, f, 1, 0, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(-1, right); + GetNextLevelIndex(1, f, 1, 1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(-1, right); + } + delete indexer; + ClearFiles(); +} + +// Case 4: mixed +TEST_F(FileIndexerTest, mixed) { + Arena arena; + indexer = new FileIndexer(&ucmp); + // level 1 + AddFile(1, 100, 200); + AddFile(1, 250, 400); + AddFile(1, 450, 500); + // level 2 + AddFile(2, 100, 150); // 0 + AddFile(2, 200, 250); // 1 + AddFile(2, 251, 300); // 2 + AddFile(2, 301, 350); // 3 + AddFile(2, 500, 600); // 4 + // level 3 + AddFile(3, 0, 50); + AddFile(3, 100, 200); + AddFile(3, 201, 250); + indexer->UpdateIndex(&arena, kNumLevels, files); + // level 1, 0 + GetNextLevelIndex(1, 0, -1, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(0, right); + GetNextLevelIndex(1, 0, 0, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(0, right); + GetNextLevelIndex(1, 0, 1, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(1, right); + GetNextLevelIndex(1, 0, 1, 0, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(1, right); + GetNextLevelIndex(1, 0, 1, 1, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(4, right); + // level 1, 1 + GetNextLevelIndex(1, 1, -1, -1, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(1, right); + GetNextLevelIndex(1, 1, 0, -1, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(1, right); + GetNextLevelIndex(1, 1, 1, -1, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(3, right); + GetNextLevelIndex(1, 1, 1, 0, &left, &right); + ASSERT_EQ(4, left); + ASSERT_EQ(3, right); + GetNextLevelIndex(1, 1, 1, 1, &left, &right); + ASSERT_EQ(4, left); + ASSERT_EQ(4, right); + // level 1, 2 + GetNextLevelIndex(1, 2, -1, -1, &left, &right); + ASSERT_EQ(4, left); + ASSERT_EQ(3, right); + GetNextLevelIndex(1, 2, 0, -1, &left, &right); + ASSERT_EQ(4, left); + ASSERT_EQ(3, right); + GetNextLevelIndex(1, 2, 1, -1, &left, &right); + ASSERT_EQ(4, left); + ASSERT_EQ(4, right); + GetNextLevelIndex(1, 2, 1, 0, &left, &right); + ASSERT_EQ(4, left); + ASSERT_EQ(4, right); + GetNextLevelIndex(1, 2, 1, 1, &left, &right); + ASSERT_EQ(4, left); + ASSERT_EQ(4, right); + // level 2, 0 + GetNextLevelIndex(2, 0, -1, -1, &left, &right); + ASSERT_EQ(0, left); + ASSERT_EQ(1, right); + GetNextLevelIndex(2, 0, 0, -1, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(1, right); + GetNextLevelIndex(2, 0, 1, -1, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(1, right); + GetNextLevelIndex(2, 0, 1, 0, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(1, right); + GetNextLevelIndex(2, 0, 1, 1, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(2, right); + // level 2, 1 + GetNextLevelIndex(2, 1, -1, -1, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(1, right); + GetNextLevelIndex(2, 1, 0, -1, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(1, right); + GetNextLevelIndex(2, 1, 1, -1, &left, &right); + ASSERT_EQ(1, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(2, 1, 1, 0, &left, &right); + ASSERT_EQ(2, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(2, 1, 1, 1, &left, &right); + ASSERT_EQ(2, left); + ASSERT_EQ(2, right); + // level 2, [2 - 4], no overlap + for (uint32_t f = 2; f <= 4; ++f) { + GetNextLevelIndex(2, f, -1, -1, &left, &right); + ASSERT_EQ(f == 2 ? 2 : 3, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(2, f, 0, -1, &left, &right); + ASSERT_EQ(3, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(2, f, 1, -1, &left, &right); + ASSERT_EQ(3, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(2, f, 1, 0, &left, &right); + ASSERT_EQ(3, left); + ASSERT_EQ(2, right); + GetNextLevelIndex(2, f, 1, 1, &left, &right); + ASSERT_EQ(3, left); + ASSERT_EQ(2, right); + } + delete indexer; + ClearFiles(); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/filename.cc b/external/rocksdb/db/filename.cc new file mode 100755 index 0000000000..d1f0958bbc --- /dev/null +++ b/external/rocksdb/db/filename.cc @@ -0,0 +1,400 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include "db/filename.h" +#include + +#include +#include +#include +#include "db/dbformat.h" +#include "rocksdb/env.h" +#include "util/file_reader_writer.h" +#include "util/logging.h" +#include "util/stop_watch.h" +#include "util/string_util.h" +#include "util/sync_point.h" + +namespace rocksdb { + +static const std::string kRocksDbTFileExt = "sst"; +static const std::string kLevelDbTFileExt = "ldb"; + +// Given a path, flatten the path name by replacing all chars not in +// {[0-9,a-z,A-Z,-,_,.]} with _. And append '_LOG\0' at the end. +// Return the number of chars stored in dest not including the trailing '\0'. +static size_t GetInfoLogPrefix(const std::string& path, char* dest, int len) { + const char suffix[] = "_LOG"; + + size_t write_idx = 0; + size_t i = 0; + size_t src_len = path.size(); + + while (i < src_len && write_idx < len - sizeof(suffix)) { + if ((path[i] >= 'a' && path[i] <= 'z') || + (path[i] >= '0' && path[i] <= '9') || + (path[i] >= 'A' && path[i] <= 'Z') || + path[i] == '-' || + path[i] == '.' || + path[i] == '_'){ + dest[write_idx++] = path[i]; + } else { + if (i > 0) { + dest[write_idx++] = '_'; + } + } + i++; + } + assert(sizeof(suffix) <= len - write_idx); + // "\0" is automatically added by snprintf + snprintf(dest + write_idx, len - write_idx, suffix); + write_idx += sizeof(suffix) - 1; + return write_idx; +} + +static std::string MakeFileName(const std::string& name, uint64_t number, + const char* suffix) { + char buf[100]; + snprintf(buf, sizeof(buf), "/%06llu.%s", + static_cast(number), + suffix); + return name + buf; +} + +std::string LogFileName(const std::string& name, uint64_t number) { + assert(number > 0); + return MakeFileName(name, number, "log"); +} + +std::string ArchivalDirectory(const std::string& dir) { + return dir + "/" + ARCHIVAL_DIR; +} +std::string ArchivedLogFileName(const std::string& name, uint64_t number) { + assert(number > 0); + return MakeFileName(name + "/" + ARCHIVAL_DIR, number, "log"); +} + +std::string MakeTableFileName(const std::string& path, uint64_t number) { + return MakeFileName(path, number, kRocksDbTFileExt.c_str()); +} + +std::string Rocks2LevelTableFileName(const std::string& fullname) { + assert(fullname.size() > kRocksDbTFileExt.size() + 1); + if (fullname.size() <= kRocksDbTFileExt.size() + 1) { + return ""; + } + return fullname.substr(0, fullname.size() - kRocksDbTFileExt.size()) + + kLevelDbTFileExt; +} + +uint64_t TableFileNameToNumber(const std::string& name) { + uint64_t number = 0; + uint64_t base = 1; + int pos = static_cast(name.find_last_of('.')); + while (--pos >= 0 && name[pos] >= '0' && name[pos] <= '9') { + number += (name[pos] - '0') * base; + base *= 10; + } + return number; +} + +std::string TableFileName(const std::vector& db_paths, uint64_t number, + uint32_t path_id) { + assert(number > 0); + std::string path; + if (path_id >= db_paths.size()) { + path = db_paths.back().path; + } else { + path = db_paths[path_id].path; + } + return MakeTableFileName(path, number); +} + +void FormatFileNumber(uint64_t number, uint32_t path_id, char* out_buf, + size_t out_buf_size) { + if (path_id == 0) { + snprintf(out_buf, out_buf_size, "%" PRIu64, number); + } else { + snprintf(out_buf, out_buf_size, "%" PRIu64 + "(path " + "%" PRIu32 ")", + number, path_id); + } +} + +std::string DescriptorFileName(const std::string& dbname, uint64_t number) { + assert(number > 0); + char buf[100]; + snprintf(buf, sizeof(buf), "/MANIFEST-%06llu", + static_cast(number)); + return dbname + buf; +} + +std::string CurrentFileName(const std::string& dbname) { + return dbname + "/CURRENT"; +} + +std::string LockFileName(const std::string& dbname) { + return dbname + "/LOCK"; +} + +std::string TempFileName(const std::string& dbname, uint64_t number) { + return MakeFileName(dbname, number, kTempFileNameSuffix.c_str()); +} + +InfoLogPrefix::InfoLogPrefix(bool has_log_dir, + const std::string& db_absolute_path) { + if (!has_log_dir) { + const char kInfoLogPrefix[] = "LOG"; + // "\0" is automatically added to the end + snprintf(buf, sizeof(buf), kInfoLogPrefix); + prefix = Slice(buf, sizeof(kInfoLogPrefix) - 1); + } else { + size_t len = GetInfoLogPrefix(db_absolute_path, buf, sizeof(buf)); + prefix = Slice(buf, len); + } +} + +std::string InfoLogFileName(const std::string& dbname, + const std::string& db_path, const std::string& log_dir) { + if (log_dir.empty()) { + return dbname + "/LOG"; + } + + InfoLogPrefix info_log_prefix(true, db_path); + return log_dir + "/" + info_log_prefix.buf; +} + +// Return the name of the old info log file for "dbname". +std::string OldInfoLogFileName(const std::string& dbname, uint64_t ts, + const std::string& db_path, const std::string& log_dir) { + char buf[50]; + snprintf(buf, sizeof(buf), "%llu", static_cast(ts)); + + if (log_dir.empty()) { + return dbname + "/LOG.old." + buf; + } + + InfoLogPrefix info_log_prefix(true, db_path); + return log_dir + "/" + info_log_prefix.buf + ".old." + buf; +} + +std::string OptionsFileName(const std::string& dbname, uint64_t file_num) { + char buffer[256]; + snprintf(buffer, sizeof(buffer), "%s%06" PRIu64, + kOptionsFileNamePrefix.c_str(), file_num); + return dbname + "/" + buffer; +} + +std::string TempOptionsFileName(const std::string& dbname, uint64_t file_num) { + char buffer[256]; + snprintf(buffer, sizeof(buffer), "%s%06" PRIu64 ".%s", + kOptionsFileNamePrefix.c_str(), file_num, + kTempFileNameSuffix.c_str()); + return dbname + "/" + buffer; +} + +std::string MetaDatabaseName(const std::string& dbname, uint64_t number) { + char buf[100]; + snprintf(buf, sizeof(buf), "/METADB-%llu", + static_cast(number)); + return dbname + buf; +} + +std::string IdentityFileName(const std::string& dbname) { + return dbname + "/IDENTITY"; +} + +// Owned filenames have the form: +// dbname/IDENTITY +// dbname/CURRENT +// dbname/LOCK +// dbname/ +// dbname/.old.[0-9]+ +// dbname/MANIFEST-[0-9]+ +// dbname/[0-9]+.(log|sst) +// dbname/METADB-[0-9]+ +// dbname/OPTIONS-[0-9]+ +// dbname/OPTIONS-[0-9]+.dbtmp +// Disregards / at the beginning +bool ParseFileName(const std::string& fname, + uint64_t* number, + FileType* type, + WalFileType* log_type) { + return ParseFileName(fname, number, "", type, log_type); +} + +bool ParseFileName(const std::string& fname, uint64_t* number, + const Slice& info_log_name_prefix, FileType* type, + WalFileType* log_type) { + Slice rest(fname); + if (fname.length() > 1 && fname[0] == '/') { + rest.remove_prefix(1); + } + if (rest == "IDENTITY") { + *number = 0; + *type = kIdentityFile; + } else if (rest == "CURRENT") { + *number = 0; + *type = kCurrentFile; + } else if (rest == "LOCK") { + *number = 0; + *type = kDBLockFile; + } else if (info_log_name_prefix.size() > 0 && + rest.starts_with(info_log_name_prefix)) { + rest.remove_prefix(info_log_name_prefix.size()); + if (rest == "" || rest == ".old") { + *number = 0; + *type = kInfoLogFile; + } else if (rest.starts_with(".old.")) { + uint64_t ts_suffix; + // sizeof also counts the trailing '\0'. + rest.remove_prefix(sizeof(".old.") - 1); + if (!ConsumeDecimalNumber(&rest, &ts_suffix)) { + return false; + } + *number = ts_suffix; + *type = kInfoLogFile; + } + } else if (rest.starts_with("MANIFEST-")) { + rest.remove_prefix(strlen("MANIFEST-")); + uint64_t num; + if (!ConsumeDecimalNumber(&rest, &num)) { + return false; + } + if (!rest.empty()) { + return false; + } + *type = kDescriptorFile; + *number = num; + } else if (rest.starts_with("METADB-")) { + rest.remove_prefix(strlen("METADB-")); + uint64_t num; + if (!ConsumeDecimalNumber(&rest, &num)) { + return false; + } + if (!rest.empty()) { + return false; + } + *type = kMetaDatabase; + *number = num; + } else if (rest.starts_with(kOptionsFileNamePrefix)) { + uint64_t ts_suffix; + bool is_temp_file = false; + rest.remove_prefix(kOptionsFileNamePrefix.size()); + const std::string kTempFileNameSuffixWithDot = + std::string(".") + kTempFileNameSuffix; + if (rest.ends_with(kTempFileNameSuffixWithDot)) { + rest.remove_suffix(kTempFileNameSuffixWithDot.size()); + is_temp_file = true; + } + if (!ConsumeDecimalNumber(&rest, &ts_suffix)) { + return false; + } + *number = ts_suffix; + *type = is_temp_file ? kTempFile : kOptionsFile; + } else { + // Avoid strtoull() to keep filename format independent of the + // current locale + bool archive_dir_found = false; + if (rest.starts_with(ARCHIVAL_DIR)) { + if (rest.size() <= ARCHIVAL_DIR.size()) { + return false; + } + rest.remove_prefix(ARCHIVAL_DIR.size() + 1); // Add 1 to remove / also + if (log_type) { + *log_type = kArchivedLogFile; + } + archive_dir_found = true; + } + uint64_t num; + if (!ConsumeDecimalNumber(&rest, &num)) { + return false; + } + if (rest.size() <= 1 || rest[0] != '.') { + return false; + } + rest.remove_prefix(1); + + Slice suffix = rest; + if (suffix == Slice("log")) { + *type = kLogFile; + if (log_type && !archive_dir_found) { + *log_type = kAliveLogFile; + } + } else if (archive_dir_found) { + return false; // Archive dir can contain only log files + } else if (suffix == Slice(kRocksDbTFileExt) || + suffix == Slice(kLevelDbTFileExt)) { + *type = kTableFile; + } else if (suffix == Slice(kTempFileNameSuffix)) { + *type = kTempFile; + } else { + return false; + } + *number = num; + } + return true; +} + +Status SetCurrentFile(Env* env, const std::string& dbname, + uint64_t descriptor_number, + Directory* directory_to_fsync) { + // Remove leading "dbname/" and add newline to manifest file name + std::string manifest = DescriptorFileName(dbname, descriptor_number); + Slice contents = manifest; + assert(contents.starts_with(dbname + "/")); + contents.remove_prefix(dbname.size() + 1); + std::string tmp = TempFileName(dbname, descriptor_number); + Status s = WriteStringToFile(env, contents.ToString() + "\n", tmp, true); + if (s.ok()) { + TEST_KILL_RANDOM("SetCurrentFile:0", rocksdb_kill_odds * REDUCE_ODDS2); + s = env->RenameFile(tmp, CurrentFileName(dbname)); + TEST_KILL_RANDOM("SetCurrentFile:1", rocksdb_kill_odds * REDUCE_ODDS2); + } + if (s.ok()) { + if (directory_to_fsync != nullptr) { + directory_to_fsync->Fsync(); + } + } else { + env->DeleteFile(tmp); + } + return s; +} + +Status SetIdentityFile(Env* env, const std::string& dbname) { + std::string id = env->GenerateUniqueId(); + assert(!id.empty()); + // Reserve the filename dbname/000000.dbtmp for the temporary identity file + std::string tmp = TempFileName(dbname, 0); + Status s = WriteStringToFile(env, id, tmp, true); + if (s.ok()) { + s = env->RenameFile(tmp, IdentityFileName(dbname)); + } + if (!s.ok()) { + env->DeleteFile(tmp); + } + return s; +} + +Status SyncManifest(Env* env, const DBOptions* db_options, + WritableFileWriter* file) { + TEST_KILL_RANDOM("SyncManifest:0", rocksdb_kill_odds * REDUCE_ODDS2); + if (db_options->disableDataSync) { + return Status::OK(); + } else { + StopWatch sw(env, db_options->statistics.get(), MANIFEST_FILE_SYNC_MICROS); + return file->Sync(db_options->use_fsync); + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/filename.h b/external/rocksdb/db/filename.h new file mode 100755 index 0000000000..9a0a1eee33 --- /dev/null +++ b/external/rocksdb/db/filename.h @@ -0,0 +1,165 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// File names used by DB code + +#pragma once +#include +#include +#include +#include + +#include "port/port.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" +#include "rocksdb/transaction_log.h" + +namespace rocksdb { + +class Env; +class Directory; +class WritableFileWriter; + +enum FileType { + kLogFile, + kDBLockFile, + kTableFile, + kDescriptorFile, + kCurrentFile, + kTempFile, + kInfoLogFile, // Either the current one, or an old one + kMetaDatabase, + kIdentityFile, + kOptionsFile +}; + +// Return the name of the log file with the specified number +// in the db named by "dbname". The result will be prefixed with +// "dbname". +extern std::string LogFileName(const std::string& dbname, uint64_t number); + +static const std::string ARCHIVAL_DIR = "archive"; + +extern std::string ArchivalDirectory(const std::string& dbname); + +// Return the name of the archived log file with the specified number +// in the db named by "dbname". The result will be prefixed with "dbname". +extern std::string ArchivedLogFileName(const std::string& dbname, + uint64_t num); + +extern std::string MakeTableFileName(const std::string& name, uint64_t number); + +// Return the name of sstable with LevelDB suffix +// created from RocksDB sstable suffixed name +extern std::string Rocks2LevelTableFileName(const std::string& fullname); + +// the reverse function of MakeTableFileName +// TODO(yhchiang): could merge this function with ParseFileName() +extern uint64_t TableFileNameToNumber(const std::string& name); + +// Return the name of the sstable with the specified number +// in the db named by "dbname". The result will be prefixed with +// "dbname". +extern std::string TableFileName(const std::vector& db_paths, + uint64_t number, uint32_t path_id); + +// Sufficient buffer size for FormatFileNumber. +const size_t kFormatFileNumberBufSize = 38; + +extern void FormatFileNumber(uint64_t number, uint32_t path_id, char* out_buf, + size_t out_buf_size); + +// Return the name of the descriptor file for the db named by +// "dbname" and the specified incarnation number. The result will be +// prefixed with "dbname". +extern std::string DescriptorFileName(const std::string& dbname, + uint64_t number); + +// Return the name of the current file. This file contains the name +// of the current manifest file. The result will be prefixed with +// "dbname". +extern std::string CurrentFileName(const std::string& dbname); + +// Return the name of the lock file for the db named by +// "dbname". The result will be prefixed with "dbname". +extern std::string LockFileName(const std::string& dbname); + +// Return the name of a temporary file owned by the db named "dbname". +// The result will be prefixed with "dbname". +extern std::string TempFileName(const std::string& dbname, uint64_t number); + +// A helper structure for prefix of info log names. +struct InfoLogPrefix { + char buf[260]; + Slice prefix; + // Prefix with DB absolute path encoded + explicit InfoLogPrefix(bool has_log_dir, const std::string& db_absolute_path); + // Default Prefix + explicit InfoLogPrefix(); +}; + +// Return the name of the info log file for "dbname". +extern std::string InfoLogFileName(const std::string& dbname, + const std::string& db_path = "", + const std::string& log_dir = ""); + +// Return the name of the old info log file for "dbname". +extern std::string OldInfoLogFileName(const std::string& dbname, uint64_t ts, + const std::string& db_path = "", + const std::string& log_dir = ""); + +static const std::string kOptionsFileNamePrefix = "OPTIONS-"; +static const std::string kTempFileNameSuffix = "dbtmp"; + +// Return a options file name given the "dbname" and file number. +// Format: OPTIONS-[number].dbtmp +extern std::string OptionsFileName(const std::string& dbname, + uint64_t file_num); + +// Return a temp options file name given the "dbname" and file number. +// Format: OPTIONS-[number] +extern std::string TempOptionsFileName(const std::string& dbname, + uint64_t file_num); + +// Return the name to use for a metadatabase. The result will be prefixed with +// "dbname". +extern std::string MetaDatabaseName(const std::string& dbname, + uint64_t number); + +// Return the name of the Identity file which stores a unique number for the db +// that will get regenerated if the db loses all its data and is recreated fresh +// either from a backup-image or empty +extern std::string IdentityFileName(const std::string& dbname); + +// If filename is a rocksdb file, store the type of the file in *type. +// The number encoded in the filename is stored in *number. If the +// filename was successfully parsed, returns true. Else return false. +// info_log_name_prefix is the path of info logs. +extern bool ParseFileName(const std::string& filename, uint64_t* number, + const Slice& info_log_name_prefix, FileType* type, + WalFileType* log_type = nullptr); +// Same as previous function, but skip info log files. +extern bool ParseFileName(const std::string& filename, uint64_t* number, + FileType* type, WalFileType* log_type = nullptr); + +// Make the CURRENT file point to the descriptor file with the +// specified number. +extern Status SetCurrentFile(Env* env, const std::string& dbname, + uint64_t descriptor_number, + Directory* directory_to_fsync); + +// Make the IDENTITY file for the db +extern Status SetIdentityFile(Env* env, const std::string& dbname); + +// Sync manifest file `file`. +extern Status SyncManifest(Env* env, const DBOptions* db_options, + WritableFileWriter* file); + +} // namespace rocksdb diff --git a/external/rocksdb/db/filename_test.cc b/external/rocksdb/db/filename_test.cc new file mode 100755 index 0000000000..0f8e37e7fd --- /dev/null +++ b/external/rocksdb/db/filename_test.cc @@ -0,0 +1,180 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/filename.h" + +#include "db/dbformat.h" +#include "port/port.h" +#include "util/logging.h" +#include "util/testharness.h" + +namespace rocksdb { + +class FileNameTest : public testing::Test {}; + +TEST_F(FileNameTest, Parse) { + Slice db; + FileType type; + uint64_t number; + + char kDefautInfoLogDir = 1; + char kDifferentInfoLogDir = 2; + char kNoCheckLogDir = 4; + char kAllMode = kDefautInfoLogDir | kDifferentInfoLogDir | kNoCheckLogDir; + + // Successful parses + static struct { + const char* fname; + uint64_t number; + FileType type; + char mode; + } cases[] = { + {"100.log", 100, kLogFile, kAllMode}, + {"0.log", 0, kLogFile, kAllMode}, + {"0.sst", 0, kTableFile, kAllMode}, + {"CURRENT", 0, kCurrentFile, kAllMode}, + {"LOCK", 0, kDBLockFile, kAllMode}, + {"MANIFEST-2", 2, kDescriptorFile, kAllMode}, + {"MANIFEST-7", 7, kDescriptorFile, kAllMode}, + {"METADB-2", 2, kMetaDatabase, kAllMode}, + {"METADB-7", 7, kMetaDatabase, kAllMode}, + {"LOG", 0, kInfoLogFile, kDefautInfoLogDir}, + {"LOG.old", 0, kInfoLogFile, kDefautInfoLogDir}, + {"LOG.old.6688", 6688, kInfoLogFile, kDefautInfoLogDir}, + {"rocksdb_dir_LOG", 0, kInfoLogFile, kDifferentInfoLogDir}, + {"rocksdb_dir_LOG.old", 0, kInfoLogFile, kDifferentInfoLogDir}, + {"rocksdb_dir_LOG.old.6688", 6688, kInfoLogFile, kDifferentInfoLogDir}, + {"18446744073709551615.log", 18446744073709551615ull, kLogFile, + kAllMode}, }; + for (char mode : {kDifferentInfoLogDir, kDefautInfoLogDir, kNoCheckLogDir}) { + for (unsigned int i = 0; i < sizeof(cases) / sizeof(cases[0]); i++) { + InfoLogPrefix info_log_prefix(mode != kDefautInfoLogDir, "/rocksdb/dir"); + if (cases[i].mode & mode) { + std::string f = cases[i].fname; + if (mode == kNoCheckLogDir) { + ASSERT_TRUE(ParseFileName(f, &number, &type)) << f; + } else { + ASSERT_TRUE(ParseFileName(f, &number, info_log_prefix.prefix, &type)) + << f; + } + ASSERT_EQ(cases[i].type, type) << f; + ASSERT_EQ(cases[i].number, number) << f; + } + } + } + + // Errors + static const char* errors[] = { + "", + "foo", + "foo-dx-100.log", + ".log", + "", + "manifest", + "CURREN", + "CURRENTX", + "MANIFES", + "MANIFEST", + "MANIFEST-", + "XMANIFEST-3", + "MANIFEST-3x", + "META", + "METADB", + "METADB-", + "XMETADB-3", + "METADB-3x", + "LOC", + "LOCKx", + "LO", + "LOGx", + "18446744073709551616.log", + "184467440737095516150.log", + "100", + "100.", + "100.lop" + }; + for (unsigned int i = 0; i < sizeof(errors) / sizeof(errors[0]); i++) { + std::string f = errors[i]; + ASSERT_TRUE(!ParseFileName(f, &number, &type)) << f; + }; +} + +TEST_F(FileNameTest, InfoLogFileName) { + std::string dbname = ("/data/rocksdb"); + std::string db_absolute_path; + Env::Default()->GetAbsolutePath(dbname, &db_absolute_path); + + ASSERT_EQ("/data/rocksdb/LOG", InfoLogFileName(dbname, db_absolute_path, "")); + ASSERT_EQ("/data/rocksdb/LOG.old.666", + OldInfoLogFileName(dbname, 666u, db_absolute_path, "")); + + ASSERT_EQ("/data/rocksdb_log/data_rocksdb_LOG", + InfoLogFileName(dbname, db_absolute_path, "/data/rocksdb_log")); + ASSERT_EQ( + "/data/rocksdb_log/data_rocksdb_LOG.old.666", + OldInfoLogFileName(dbname, 666u, db_absolute_path, "/data/rocksdb_log")); +} + +TEST_F(FileNameTest, Construction) { + uint64_t number; + FileType type; + std::string fname; + + fname = CurrentFileName("foo"); + ASSERT_EQ("foo/", std::string(fname.data(), 4)); + ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); + ASSERT_EQ(0U, number); + ASSERT_EQ(kCurrentFile, type); + + fname = LockFileName("foo"); + ASSERT_EQ("foo/", std::string(fname.data(), 4)); + ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); + ASSERT_EQ(0U, number); + ASSERT_EQ(kDBLockFile, type); + + fname = LogFileName("foo", 192); + ASSERT_EQ("foo/", std::string(fname.data(), 4)); + ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); + ASSERT_EQ(192U, number); + ASSERT_EQ(kLogFile, type); + + fname = TableFileName({DbPath("bar", 0)}, 200, 0); + std::string fname1 = + TableFileName({DbPath("foo", 0), DbPath("bar", 0)}, 200, 1); + ASSERT_EQ(fname, fname1); + ASSERT_EQ("bar/", std::string(fname.data(), 4)); + ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); + ASSERT_EQ(200U, number); + ASSERT_EQ(kTableFile, type); + + fname = DescriptorFileName("bar", 100); + ASSERT_EQ("bar/", std::string(fname.data(), 4)); + ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); + ASSERT_EQ(100U, number); + ASSERT_EQ(kDescriptorFile, type); + + fname = TempFileName("tmp", 999); + ASSERT_EQ("tmp/", std::string(fname.data(), 4)); + ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); + ASSERT_EQ(999U, number); + ASSERT_EQ(kTempFile, type); + + fname = MetaDatabaseName("met", 100); + ASSERT_EQ("met/", std::string(fname.data(), 4)); + ASSERT_TRUE(ParseFileName(fname.c_str() + 4, &number, &type)); + ASSERT_EQ(100U, number); + ASSERT_EQ(kMetaDatabase, type); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/flush_job.cc b/external/rocksdb/db/flush_job.cc new file mode 100755 index 0000000000..bb38f485dd --- /dev/null +++ b/external/rocksdb/db/flush_job.cc @@ -0,0 +1,330 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/flush_job.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include + +#include +#include + +#include "db/builder.h" +#include "db/db_iter.h" +#include "db/dbformat.h" +#include "db/event_helpers.h" +#include "db/filename.h" +#include "db/log_reader.h" +#include "db/log_writer.h" +#include "db/memtable.h" +#include "db/memtable_list.h" +#include "db/merge_context.h" +#include "db/version_set.h" +#include "port/likely.h" +#include "port/port.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/statistics.h" +#include "rocksdb/status.h" +#include "rocksdb/table.h" +#include "table/block.h" +#include "table/block_based_table_factory.h" +#include "table/merger.h" +#include "table/table_builder.h" +#include "table/two_level_iterator.h" +#include "util/coding.h" +#include "util/event_logger.h" +#include "util/file_util.h" +#include "util/iostats_context_imp.h" +#include "util/log_buffer.h" +#include "util/logging.h" +#include "util/mutexlock.h" +#include "util/perf_context_imp.h" +#include "util/stop_watch.h" +#include "util/sync_point.h" +#include "util/thread_status_util.h" + +namespace rocksdb { + +FlushJob::FlushJob(const std::string& dbname, ColumnFamilyData* cfd, + const DBOptions& db_options, + const MutableCFOptions& mutable_cf_options, + const EnvOptions& env_options, VersionSet* versions, + InstrumentedMutex* db_mutex, + std::atomic* shutting_down, + std::vector existing_snapshots, + SequenceNumber earliest_write_conflict_snapshot, + JobContext* job_context, LogBuffer* log_buffer, + Directory* db_directory, Directory* output_file_directory, + CompressionType output_compression, Statistics* stats, + EventLogger* event_logger, bool measure_io_stats) + : dbname_(dbname), + cfd_(cfd), + db_options_(db_options), + mutable_cf_options_(mutable_cf_options), + env_options_(env_options), + versions_(versions), + db_mutex_(db_mutex), + shutting_down_(shutting_down), + existing_snapshots_(std::move(existing_snapshots)), + earliest_write_conflict_snapshot_(earliest_write_conflict_snapshot), + job_context_(job_context), + log_buffer_(log_buffer), + db_directory_(db_directory), + output_file_directory_(output_file_directory), + output_compression_(output_compression), + stats_(stats), + event_logger_(event_logger), + measure_io_stats_(measure_io_stats), + pick_memtable_called(false) { + // Update the thread status to indicate flush. + ReportStartedFlush(); + TEST_SYNC_POINT("FlushJob::FlushJob()"); +} + +FlushJob::~FlushJob() { + ThreadStatusUtil::ResetThreadStatus(); +} + +void FlushJob::ReportStartedFlush() { + ThreadStatusUtil::SetColumnFamily(cfd_, cfd_->ioptions()->env, + cfd_->options()->enable_thread_tracking); + ThreadStatusUtil::SetThreadOperation(ThreadStatus::OP_FLUSH); + ThreadStatusUtil::SetThreadOperationProperty( + ThreadStatus::COMPACTION_JOB_ID, + job_context_->job_id); + IOSTATS_RESET(bytes_written); +} + +void FlushJob::ReportFlushInputSize(const autovector& mems) { + uint64_t input_size = 0; + for (auto* mem : mems) { + input_size += mem->ApproximateMemoryUsage(); + } + ThreadStatusUtil::IncreaseThreadOperationProperty( + ThreadStatus::FLUSH_BYTES_MEMTABLES, + input_size); +} + +void FlushJob::RecordFlushIOStats() { + RecordTick(stats_, FLUSH_WRITE_BYTES, IOSTATS(bytes_written)); + ThreadStatusUtil::IncreaseThreadOperationProperty( + ThreadStatus::FLUSH_BYTES_WRITTEN, IOSTATS(bytes_written)); + IOSTATS_RESET(bytes_written); +} + +void FlushJob::PickMemTable() { + db_mutex_->AssertHeld(); + assert(!pick_memtable_called); + pick_memtable_called = true; + // Save the contents of the earliest memtable as a new Table + cfd_->imm()->PickMemtablesToFlush(&mems_); + if (mems_.empty()) { + return; + } + + ReportFlushInputSize(mems_); + + // entries mems are (implicitly) sorted in ascending order by their created + // time. We will use the first memtable's `edit` to keep the meta info for + // this flush. + MemTable* m = mems_[0]; + edit_ = m->GetEdits(); + edit_->SetPrevLogNumber(0); + // SetLogNumber(log_num) indicates logs with number smaller than log_num + // will no longer be picked up for recovery. + edit_->SetLogNumber(mems_.back()->GetNextLogNumber()); + edit_->SetColumnFamily(cfd_->GetID()); + + // path 0 for level 0 file. + meta_.fd = FileDescriptor(versions_->NewFileNumber(), 0, 0); + + base_ = cfd_->current(); + base_->Ref(); // it is likely that we do not need this reference +} + +Status FlushJob::Run(FileMetaData* file_meta) { + db_mutex_->AssertHeld(); + assert(pick_memtable_called); + AutoThreadOperationStageUpdater stage_run( + ThreadStatus::STAGE_FLUSH_RUN); + if (mems_.empty()) { + LogToBuffer(log_buffer_, "[%s] Nothing in memtable to flush", + cfd_->GetName().c_str()); + return Status::OK(); + } + + // I/O measurement variables + PerfLevel prev_perf_level = PerfLevel::kEnableTime; + uint64_t prev_write_nanos = 0; + uint64_t prev_fsync_nanos = 0; + uint64_t prev_range_sync_nanos = 0; + uint64_t prev_prepare_write_nanos = 0; + if (measure_io_stats_) { + prev_perf_level = GetPerfLevel(); + SetPerfLevel(PerfLevel::kEnableTime); + prev_write_nanos = IOSTATS(write_nanos); + prev_fsync_nanos = IOSTATS(fsync_nanos); + prev_range_sync_nanos = IOSTATS(range_sync_nanos); + prev_prepare_write_nanos = IOSTATS(prepare_write_nanos); + } + + // This will release and re-acquire the mutex. + Status s = WriteLevel0Table(); + + if (s.ok() && + (shutting_down_->load(std::memory_order_acquire) || cfd_->IsDropped())) { + s = Status::ShutdownInProgress( + "Database shutdown or Column family drop during flush"); + } + + if (!s.ok()) { + cfd_->imm()->RollbackMemtableFlush(mems_, meta_.fd.GetNumber()); + } else { + TEST_SYNC_POINT("FlushJob::InstallResults"); + // Replace immutable memtable with the generated Table + s = cfd_->imm()->InstallMemtableFlushResults( + cfd_, mutable_cf_options_, mems_, versions_, db_mutex_, + meta_.fd.GetNumber(), &job_context_->memtables_to_free, db_directory_, + log_buffer_); + } + + if (s.ok() && file_meta != nullptr) { + *file_meta = meta_; + } + RecordFlushIOStats(); + + auto stream = event_logger_->LogToBuffer(log_buffer_); + stream << "job" << job_context_->job_id << "event" + << "flush_finished"; + stream << "lsm_state"; + stream.StartArray(); + auto vstorage = cfd_->current()->storage_info(); + for (int level = 0; level < vstorage->num_levels(); ++level) { + stream << vstorage->NumLevelFiles(level); + } + stream.EndArray(); + stream << "immutable_memtables" << cfd_->imm()->NumNotFlushed(); + + if (measure_io_stats_) { + if (prev_perf_level != PerfLevel::kEnableTime) { + SetPerfLevel(prev_perf_level); + } + stream << "file_write_nanos" << (IOSTATS(write_nanos) - prev_write_nanos); + stream << "file_range_sync_nanos" + << (IOSTATS(range_sync_nanos) - prev_range_sync_nanos); + stream << "file_fsync_nanos" << (IOSTATS(fsync_nanos) - prev_fsync_nanos); + stream << "file_prepare_write_nanos" + << (IOSTATS(prepare_write_nanos) - prev_prepare_write_nanos); + } + + return s; +} + +Status FlushJob::WriteLevel0Table() { + AutoThreadOperationStageUpdater stage_updater( + ThreadStatus::STAGE_FLUSH_WRITE_L0); + db_mutex_->AssertHeld(); + const uint64_t start_micros = db_options_.env->NowMicros(); + Status s; + { + db_mutex_->Unlock(); + if (log_buffer_) { + log_buffer_->FlushBufferToLog(); + } + std::vector memtables; + ReadOptions ro; + ro.total_order_seek = true; + Arena arena; + uint64_t total_num_entries = 0, total_num_deletes = 0; + size_t total_memory_usage = 0; + for (MemTable* m : mems_) { + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[%s] [JOB %d] Flushing memtable with next log file: %" PRIu64 "\n", + cfd_->GetName().c_str(), job_context_->job_id, m->GetNextLogNumber()); + memtables.push_back(m->NewIterator(ro, &arena)); + total_num_entries += m->num_entries(); + total_num_deletes += m->num_deletes(); + total_memory_usage += m->ApproximateMemoryUsage(); + } + + event_logger_->Log() << "job" << job_context_->job_id << "event" + << "flush_started" + << "num_memtables" << mems_.size() << "num_entries" + << total_num_entries << "num_deletes" + << total_num_deletes << "memory_usage" + << total_memory_usage; + + { + ScopedArenaIterator iter( + NewMergingIterator(&cfd_->internal_comparator(), &memtables[0], + static_cast(memtables.size()), &arena)); + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": started", + cfd_->GetName().c_str(), job_context_->job_id, meta_.fd.GetNumber()); + + TEST_SYNC_POINT_CALLBACK("FlushJob::WriteLevel0Table:output_compression", + &output_compression_); + s = BuildTable( + dbname_, db_options_.env, *cfd_->ioptions(), mutable_cf_options_, + env_options_, cfd_->table_cache(), iter.get(), &meta_, + cfd_->internal_comparator(), cfd_->int_tbl_prop_collector_factories(), + cfd_->GetID(), cfd_->GetName(), existing_snapshots_, + earliest_write_conflict_snapshot_, output_compression_, + cfd_->ioptions()->compression_opts, + mutable_cf_options_.paranoid_file_checks, cfd_->internal_stats(), + TableFileCreationReason::kFlush, event_logger_, job_context_->job_id, + Env::IO_HIGH, &table_properties_, 0 /* level */); + LogFlush(db_options_.info_log); + } + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "[%s] [JOB %d] Level-0 flush table #%" PRIu64 ": %" PRIu64 + " bytes %s" + "%s", + cfd_->GetName().c_str(), job_context_->job_id, meta_.fd.GetNumber(), + meta_.fd.GetFileSize(), s.ToString().c_str(), + meta_.marked_for_compaction ? " (needs compaction)" : ""); + + if (!db_options_.disableDataSync && output_file_directory_ != nullptr) { + output_file_directory_->Fsync(); + } + TEST_SYNC_POINT("FlushJob::WriteLevel0Table"); + db_mutex_->Lock(); + } + base_->Unref(); + + // Note that if file_size is zero, the file has been deleted and + // should not be added to the manifest. + if (s.ok() && meta_.fd.GetFileSize() > 0) { + // if we have more than 1 background thread, then we cannot + // insert files directly into higher levels because some other + // threads could be concurrently producing compacted files for + // that key range. + // Add file to L0 + edit_->AddFile(0 /* level */, meta_.fd.GetNumber(), meta_.fd.GetPathId(), + meta_.fd.GetFileSize(), meta_.smallest, meta_.largest, + meta_.smallest_seqno, meta_.largest_seqno, + meta_.marked_for_compaction); + } + + // Note that here we treat flush as level 0 compaction in internal stats + InternalStats::CompactionStats stats(1); + stats.micros = db_options_.env->NowMicros() - start_micros; + stats.bytes_written = meta_.fd.GetFileSize(); + cfd_->internal_stats()->AddCompactionStats(0 /* level */, stats); + cfd_->internal_stats()->AddCFStats(InternalStats::BYTES_FLUSHED, + meta_.fd.GetFileSize()); + RecordFlushIOStats(); + return s; +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/flush_job.h b/external/rocksdb/db/flush_job.h new file mode 100755 index 0000000000..5dc6a98710 --- /dev/null +++ b/external/rocksdb/db/flush_job.h @@ -0,0 +1,107 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "db/column_family.h" +#include "db/dbformat.h" +#include "db/flush_scheduler.h" +#include "db/internal_stats.h" +#include "db/job_context.h" +#include "db/log_writer.h" +#include "db/memtable_list.h" +#include "db/snapshot_impl.h" +#include "db/version_edit.h" +#include "db/write_controller.h" +#include "db/write_thread.h" +#include "port/port.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/transaction_log.h" +#include "table/scoped_arena_iterator.h" +#include "util/autovector.h" +#include "util/event_logger.h" +#include "util/instrumented_mutex.h" +#include "util/stop_watch.h" +#include "util/thread_local.h" + +namespace rocksdb { + +class MemTable; +class TableCache; +class Version; +class VersionEdit; +class VersionSet; +class Arena; + +class FlushJob { + public: + // TODO(icanadi) make effort to reduce number of parameters here + // IMPORTANT: mutable_cf_options needs to be alive while FlushJob is alive + FlushJob(const std::string& dbname, ColumnFamilyData* cfd, + const DBOptions& db_options, + const MutableCFOptions& mutable_cf_options, + const EnvOptions& env_options, VersionSet* versions, + InstrumentedMutex* db_mutex, std::atomic* shutting_down, + std::vector existing_snapshots, + SequenceNumber earliest_write_conflict_snapshot, + JobContext* job_context, LogBuffer* log_buffer, + Directory* db_directory, Directory* output_file_directory, + CompressionType output_compression, Statistics* stats, + EventLogger* event_logger, bool measure_io_stats); + + ~FlushJob(); + + // Require db_mutex held + void PickMemTable(); + Status Run(FileMetaData* file_meta = nullptr); + TableProperties GetTableProperties() const { return table_properties_; } + + private: + void ReportStartedFlush(); + void ReportFlushInputSize(const autovector& mems); + void RecordFlushIOStats(); + Status WriteLevel0Table(); + const std::string& dbname_; + ColumnFamilyData* cfd_; + const DBOptions& db_options_; + const MutableCFOptions& mutable_cf_options_; + const EnvOptions& env_options_; + VersionSet* versions_; + InstrumentedMutex* db_mutex_; + std::atomic* shutting_down_; + std::vector existing_snapshots_; + SequenceNumber earliest_write_conflict_snapshot_; + JobContext* job_context_; + LogBuffer* log_buffer_; + Directory* db_directory_; + Directory* output_file_directory_; + CompressionType output_compression_; + Statistics* stats_; + EventLogger* event_logger_; + TableProperties table_properties_; + bool measure_io_stats_; + + // Variables below are set by PickMemTable(): + FileMetaData meta_; + autovector mems_; + VersionEdit* edit_; + Version* base_; + bool pick_memtable_called; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/flush_job_test.cc b/external/rocksdb/db/flush_job_test.cc new file mode 100755 index 0000000000..9648c01e4a --- /dev/null +++ b/external/rocksdb/db/flush_job_test.cc @@ -0,0 +1,218 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "db/column_family.h" +#include "db/flush_job.h" +#include "db/version_set.h" +#include "rocksdb/cache.h" +#include "rocksdb/write_buffer_manager.h" +#include "table/mock_table.h" +#include "util/file_reader_writer.h" +#include "util/string_util.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +// TODO(icanadi) Mock out everything else: +// 1. VersionSet +// 2. Memtable +class FlushJobTest : public testing::Test { + public: + FlushJobTest() + : env_(Env::Default()), + dbname_(test::TmpDir() + "/flush_job_test"), + table_cache_(NewLRUCache(50000, 16)), + write_buffer_manager_(db_options_.db_write_buffer_size), + versions_(new VersionSet(dbname_, &db_options_, env_options_, + table_cache_.get(), &write_buffer_manager_, + &write_controller_)), + shutting_down_(false), + mock_table_factory_(new mock::MockTableFactory()) { + EXPECT_OK(env_->CreateDirIfMissing(dbname_)); + db_options_.db_paths.emplace_back(dbname_, + std::numeric_limits::max()); + // TODO(icanadi) Remove this once we mock out VersionSet + NewDB(); + std::vector column_families; + cf_options_.table_factory = mock_table_factory_; + column_families.emplace_back(kDefaultColumnFamilyName, cf_options_); + + EXPECT_OK(versions_->Recover(column_families, false)); + } + + void NewDB() { + VersionEdit new_db; + new_db.SetLogNumber(0); + new_db.SetNextFile(2); + new_db.SetLastSequence(0); + + const std::string manifest = DescriptorFileName(dbname_, 1); + unique_ptr file; + Status s = env_->NewWritableFile( + manifest, &file, env_->OptimizeForManifestWrite(env_options_)); + ASSERT_OK(s); + unique_ptr file_writer( + new WritableFileWriter(std::move(file), EnvOptions())); + { + log::Writer log(std::move(file_writer), 0, false); + std::string record; + new_db.EncodeTo(&record); + s = log.AddRecord(record); + } + ASSERT_OK(s); + // Make "CURRENT" file that points to the new manifest file. + s = SetCurrentFile(env_, dbname_, 1, nullptr); + } + + Env* env_; + std::string dbname_; + EnvOptions env_options_; + std::shared_ptr table_cache_; + WriteController write_controller_; + DBOptions db_options_; + WriteBufferManager write_buffer_manager_; + ColumnFamilyOptions cf_options_; + std::unique_ptr versions_; + InstrumentedMutex mutex_; + std::atomic shutting_down_; + std::shared_ptr mock_table_factory_; +}; + +TEST_F(FlushJobTest, Empty) { + JobContext job_context(0); + auto cfd = versions_->GetColumnFamilySet()->GetDefault(); + EventLogger event_logger(db_options_.info_log.get()); + FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(), + db_options_, *cfd->GetLatestMutableCFOptions(), + env_options_, versions_.get(), &mutex_, &shutting_down_, + {}, kMaxSequenceNumber, &job_context, nullptr, nullptr, + nullptr, kNoCompression, nullptr, &event_logger, false); + { + InstrumentedMutexLock l(&mutex_); + flush_job.PickMemTable(); + ASSERT_OK(flush_job.Run()); + } + job_context.Clean(); +} + +TEST_F(FlushJobTest, NonEmpty) { + JobContext job_context(0); + auto cfd = versions_->GetColumnFamilySet()->GetDefault(); + auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(), + kMaxSequenceNumber); + new_mem->Ref(); + auto inserted_keys = mock::MakeMockFile(); + // Test data: + // seqno [ 1, 2 ... 8998, 8999, 9000, 9001, 9002 ... 9999 ] + // key [ 1001, 1002 ... 9998, 9999, 0, 1, 2 ... 999 ] + // Expected: + // smallest_key = "0" + // largest_key = "9999" + // smallest_seqno = 1 + // smallest_seqno = 9999 + for (int i = 1; i < 10000; ++i) { + std::string key(ToString((i + 1000) % 10000)); + std::string value("value" + key); + new_mem->Add(SequenceNumber(i), kTypeValue, key, value); + InternalKey internal_key(key, SequenceNumber(i), kTypeValue); + inserted_keys.insert({internal_key.Encode().ToString(), value}); + } + + autovector to_delete; + cfd->imm()->Add(new_mem, &to_delete); + for (auto& m : to_delete) { + delete m; + } + + EventLogger event_logger(db_options_.info_log.get()); + FlushJob flush_job(dbname_, versions_->GetColumnFamilySet()->GetDefault(), + db_options_, *cfd->GetLatestMutableCFOptions(), + env_options_, versions_.get(), &mutex_, &shutting_down_, + {}, kMaxSequenceNumber, &job_context, nullptr, nullptr, + nullptr, kNoCompression, nullptr, &event_logger, true); + FileMetaData fd; + mutex_.Lock(); + flush_job.PickMemTable(); + ASSERT_OK(flush_job.Run(&fd)); + mutex_.Unlock(); + ASSERT_EQ(ToString(0), fd.smallest.user_key().ToString()); + ASSERT_EQ(ToString(9999), fd.largest.user_key().ToString()); + ASSERT_EQ(1, fd.smallest_seqno); + ASSERT_EQ(9999, fd.largest_seqno); + mock_table_factory_->AssertSingleFile(inserted_keys); + job_context.Clean(); +} + +TEST_F(FlushJobTest, Snapshots) { + JobContext job_context(0); + auto cfd = versions_->GetColumnFamilySet()->GetDefault(); + auto new_mem = cfd->ConstructNewMemtable(*cfd->GetLatestMutableCFOptions(), + kMaxSequenceNumber); + + std::vector snapshots; + std::set snapshots_set; + int keys = 10000; + int max_inserts_per_keys = 8; + + Random rnd(301); + for (int i = 0; i < keys / 2; ++i) { + snapshots.push_back(rnd.Uniform(keys * (max_inserts_per_keys / 2)) + 1); + snapshots_set.insert(snapshots.back()); + } + std::sort(snapshots.begin(), snapshots.end()); + + new_mem->Ref(); + SequenceNumber current_seqno = 0; + auto inserted_keys = mock::MakeMockFile(); + for (int i = 1; i < keys; ++i) { + std::string key(ToString(i)); + int insertions = rnd.Uniform(max_inserts_per_keys); + for (int j = 0; j < insertions; ++j) { + std::string value(test::RandomHumanReadableString(&rnd, 10)); + auto seqno = ++current_seqno; + new_mem->Add(SequenceNumber(seqno), kTypeValue, key, value); + // a key is visible only if: + // 1. it's the last one written (j == insertions - 1) + // 2. there's a snapshot pointing at it + bool visible = (j == insertions - 1) || + (snapshots_set.find(seqno) != snapshots_set.end()); + if (visible) { + InternalKey internal_key(key, seqno, kTypeValue); + inserted_keys.insert({internal_key.Encode().ToString(), value}); + } + } + } + + autovector to_delete; + cfd->imm()->Add(new_mem, &to_delete); + for (auto& m : to_delete) { + delete m; + } + + EventLogger event_logger(db_options_.info_log.get()); + FlushJob flush_job( + dbname_, versions_->GetColumnFamilySet()->GetDefault(), db_options_, + *cfd->GetLatestMutableCFOptions(), env_options_, versions_.get(), &mutex_, + &shutting_down_, snapshots, kMaxSequenceNumber, &job_context, nullptr, + nullptr, nullptr, kNoCompression, nullptr, &event_logger, true); + mutex_.Lock(); + flush_job.PickMemTable(); + ASSERT_OK(flush_job.Run()); + mutex_.Unlock(); + mock_table_factory_->AssertSingleFile(inserted_keys); + job_context.Clean(); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/flush_scheduler.cc b/external/rocksdb/db/flush_scheduler.cc new file mode 100755 index 0000000000..a961f7f0bc --- /dev/null +++ b/external/rocksdb/db/flush_scheduler.cc @@ -0,0 +1,84 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/flush_scheduler.h" + +#include + +#include "db/column_family.h" + +namespace rocksdb { + +void FlushScheduler::ScheduleFlush(ColumnFamilyData* cfd) { +#ifndef NDEBUG + { + std::lock_guard lock(checking_mutex_); + assert(checking_set_.count(cfd) == 0); + checking_set_.insert(cfd); + } +#endif // NDEBUG + cfd->Ref(); +// Suppress false positive clang analyzer warnings. +#ifndef __clang_analyzer__ + Node* node = new Node{cfd, head_.load(std::memory_order_relaxed)}; + while (!head_.compare_exchange_strong( + node->next, node, std::memory_order_relaxed, std::memory_order_relaxed)) { + // failing CAS updates the first param, so we are already set for + // retry. TakeNextColumnFamily won't happen until after another + // inter-thread synchronization, so we don't even need release + // semantics for this CAS + } +#endif // __clang_analyzer__ +} + +ColumnFamilyData* FlushScheduler::TakeNextColumnFamily() { + while (true) { + if (Empty()) { + return nullptr; + } + + // dequeue the head + Node* node = head_.load(std::memory_order_relaxed); + head_.store(node->next, std::memory_order_relaxed); + ColumnFamilyData* cfd = node->column_family; + delete node; + +#ifndef NDEBUG + { + auto iter = checking_set_.find(cfd); + assert(iter != checking_set_.end()); + checking_set_.erase(iter); + } +#endif // NDEBUG + + if (!cfd->IsDropped()) { + // success + return cfd; + } + + // no longer relevant, retry + if (cfd->Unref()) { + delete cfd; + } + } +} + +bool FlushScheduler::Empty() { + auto rv = head_.load(std::memory_order_relaxed) == nullptr; + assert(rv == checking_set_.empty()); + return rv; +} + +void FlushScheduler::Clear() { + ColumnFamilyData* cfd; + while ((cfd = TakeNextColumnFamily()) != nullptr) { + if (cfd->Unref()) { + delete cfd; + } + } + assert(Empty()); +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/flush_scheduler.h b/external/rocksdb/db/flush_scheduler.h new file mode 100755 index 0000000000..820bd7b71c --- /dev/null +++ b/external/rocksdb/db/flush_scheduler.h @@ -0,0 +1,48 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include +#include +#include + +namespace rocksdb { + +class ColumnFamilyData; + +// Unless otherwise noted, all methods on FlushScheduler should be called +// only with the DB mutex held or from a single-threaded recovery context. +class FlushScheduler { + public: + FlushScheduler() : head_(nullptr) {} + + // May be called from multiple threads at once, but not concurrent with + // any other method calls on this instance + void ScheduleFlush(ColumnFamilyData* cfd); + + // Removes and returns Ref()-ed column family. Client needs to Unref(). + // Filters column families that have been dropped. + ColumnFamilyData* TakeNextColumnFamily(); + + bool Empty(); + + void Clear(); + + private: + struct Node { + ColumnFamilyData* column_family; + Node* next; + }; + + std::atomic head_; +#ifndef NDEBUG + std::mutex checking_mutex_; + std::set checking_set_; +#endif // NDEBUG +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/forward_iterator.cc b/external/rocksdb/db/forward_iterator.cc new file mode 100755 index 0000000000..f7eb8ca248 --- /dev/null +++ b/external/rocksdb/db/forward_iterator.cc @@ -0,0 +1,781 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE +#include "db/forward_iterator.h" + +#include +#include +#include + +#include "db/column_family.h" +#include "db/db_impl.h" +#include "db/db_iter.h" +#include "db/dbformat.h" +#include "db/job_context.h" +#include "rocksdb/env.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "table/merger.h" +#include "util/string_util.h" +#include "util/sync_point.h" + +namespace rocksdb { + +// Usage: +// LevelIterator iter; +// iter.SetFileIndex(file_index); +// iter.Seek(target); +// iter.Next() +class LevelIterator : public InternalIterator { + public: + LevelIterator(const ColumnFamilyData* const cfd, + const ReadOptions& read_options, + const std::vector& files) + : cfd_(cfd), read_options_(read_options), files_(files), valid_(false), + file_index_(std::numeric_limits::max()) {} + + void SetFileIndex(uint32_t file_index) { + assert(file_index < files_.size()); + if (file_index != file_index_) { + file_index_ = file_index; + Reset(); + } + valid_ = false; + } + void Reset() { + assert(file_index_ < files_.size()); + file_iter_.reset(cfd_->table_cache()->NewIterator( + read_options_, *(cfd_->soptions()), cfd_->internal_comparator(), + files_[file_index_]->fd, nullptr /* table_reader_ptr */, nullptr, + false)); + } + void SeekToLast() override { + status_ = Status::NotSupported("LevelIterator::SeekToLast()"); + valid_ = false; + } + void Prev() override { + status_ = Status::NotSupported("LevelIterator::Prev()"); + valid_ = false; + } + bool Valid() const override { + return valid_; + } + void SeekToFirst() override { + SetFileIndex(0); + file_iter_->SeekToFirst(); + valid_ = file_iter_->Valid(); + } + void Seek(const Slice& internal_key) override { + assert(file_iter_ != nullptr); + file_iter_->Seek(internal_key); + valid_ = file_iter_->Valid(); + } + void Next() override { + assert(valid_); + file_iter_->Next(); + for (;;) { + if (file_iter_->status().IsIncomplete() || file_iter_->Valid()) { + valid_ = !file_iter_->status().IsIncomplete(); + return; + } + if (file_index_ + 1 >= files_.size()) { + valid_ = false; + return; + } + SetFileIndex(file_index_ + 1); + file_iter_->SeekToFirst(); + } + } + Slice key() const override { + assert(valid_); + return file_iter_->key(); + } + Slice value() const override { + assert(valid_); + return file_iter_->value(); + } + Status status() const override { + if (!status_.ok()) { + return status_; + } else if (file_iter_ && !file_iter_->status().ok()) { + return file_iter_->status(); + } + return Status::OK(); + } + + private: + const ColumnFamilyData* const cfd_; + const ReadOptions& read_options_; + const std::vector& files_; + + bool valid_; + uint32_t file_index_; + Status status_; + std::unique_ptr file_iter_; +}; + +ForwardIterator::ForwardIterator(DBImpl* db, const ReadOptions& read_options, + ColumnFamilyData* cfd, + SuperVersion* current_sv) + : db_(db), + read_options_(read_options), + cfd_(cfd), + prefix_extractor_(cfd->ioptions()->prefix_extractor), + user_comparator_(cfd->user_comparator()), + immutable_min_heap_(MinIterComparator(&cfd_->internal_comparator())), + sv_(current_sv), + mutable_iter_(nullptr), + current_(nullptr), + valid_(false), + status_(Status::OK()), + immutable_status_(Status::OK()), + has_iter_trimmed_for_upper_bound_(false), + current_over_upper_bound_(false), + is_prev_set_(false), + is_prev_inclusive_(false) { + if (sv_) { + RebuildIterators(false); + } +} + +ForwardIterator::~ForwardIterator() { + Cleanup(true); +} + +void ForwardIterator::SVCleanup() { + if (sv_ != nullptr && sv_->Unref()) { + // Job id == 0 means that this is not our background process, but rather + // user thread + JobContext job_context(0); + db_->mutex_.Lock(); + sv_->Cleanup(); + db_->FindObsoleteFiles(&job_context, false, true); + if (read_options_.background_purge_on_iterator_cleanup) { + db_->ScheduleBgLogWriterClose(&job_context); + } + db_->mutex_.Unlock(); + delete sv_; + if (job_context.HaveSomethingToDelete()) { + db_->PurgeObsoleteFiles( + job_context, read_options_.background_purge_on_iterator_cleanup); + } + job_context.Clean(); + } +} + +void ForwardIterator::Cleanup(bool release_sv) { + if (mutable_iter_ != nullptr) { + mutable_iter_->~InternalIterator(); + } + for (auto* m : imm_iters_) { + m->~InternalIterator(); + } + imm_iters_.clear(); + for (auto* f : l0_iters_) { + delete f; + } + l0_iters_.clear(); + for (auto* l : level_iters_) { + delete l; + } + level_iters_.clear(); + + if (release_sv) { + SVCleanup(); + } +} + +bool ForwardIterator::Valid() const { + // See UpdateCurrent(). + return valid_ ? !current_over_upper_bound_ : false; +} + +void ForwardIterator::SeekToFirst() { + if (sv_ == nullptr) { + RebuildIterators(true); + } else if (sv_->version_number != cfd_->GetSuperVersionNumber()) { + RenewIterators(); + } else if (immutable_status_.IsIncomplete()) { + ResetIncompleteIterators(); + } + SeekInternal(Slice(), true); +} + +bool ForwardIterator::IsOverUpperBound(const Slice& internal_key) const { + return !(read_options_.iterate_upper_bound == nullptr || + cfd_->internal_comparator().user_comparator()->Compare( + ExtractUserKey(internal_key), + *read_options_.iterate_upper_bound) < 0); +} + +void ForwardIterator::Seek(const Slice& internal_key) { + if (IsOverUpperBound(internal_key)) { + valid_ = false; + } + if (sv_ == nullptr) { + RebuildIterators(true); + } else if (sv_->version_number != cfd_->GetSuperVersionNumber()) { + RenewIterators(); + } else if (immutable_status_.IsIncomplete()) { + ResetIncompleteIterators(); + } + SeekInternal(internal_key, false); +} + +void ForwardIterator::SeekInternal(const Slice& internal_key, + bool seek_to_first) { + assert(mutable_iter_); + // mutable + seek_to_first ? mutable_iter_->SeekToFirst() : + mutable_iter_->Seek(internal_key); + + // immutable + // TODO(ljin): NeedToSeekImmutable has negative impact on performance + // if it turns to need to seek immutable often. We probably want to have + // an option to turn it off. + if (seek_to_first || NeedToSeekImmutable(internal_key)) { + immutable_status_ = Status::OK(); + if ((has_iter_trimmed_for_upper_bound_) && + (cfd_->internal_comparator().InternalKeyComparator::Compare( + prev_key_.GetKey(), internal_key) > 0)) { + // Some iterators are trimmed. Need to rebuild. + RebuildIterators(true); + // Already seeked mutable iter, so seek again + seek_to_first ? mutable_iter_->SeekToFirst() + : mutable_iter_->Seek(internal_key); + } + { + auto tmp = MinIterHeap(MinIterComparator(&cfd_->internal_comparator())); + immutable_min_heap_.swap(tmp); + } + for (size_t i = 0; i < imm_iters_.size(); i++) { + auto* m = imm_iters_[i]; + seek_to_first ? m->SeekToFirst() : m->Seek(internal_key); + if (!m->status().ok()) { + immutable_status_ = m->status(); + } else if (m->Valid()) { + immutable_min_heap_.push(m); + } + } + + Slice user_key; + if (!seek_to_first) { + user_key = ExtractUserKey(internal_key); + } + const VersionStorageInfo* vstorage = sv_->current->storage_info(); + const std::vector& l0 = vstorage->LevelFiles(0); + for (size_t i = 0; i < l0.size(); ++i) { + if (!l0_iters_[i]) { + continue; + } + if (seek_to_first) { + l0_iters_[i]->SeekToFirst(); + } else { + // If the target key passes over the larget key, we are sure Next() + // won't go over this file. + if (user_comparator_->Compare(user_key, + l0[i]->largest.user_key()) > 0) { + if (read_options_.iterate_upper_bound != nullptr) { + has_iter_trimmed_for_upper_bound_ = true; + delete l0_iters_[i]; + l0_iters_[i] = nullptr; + } + continue; + } + l0_iters_[i]->Seek(internal_key); + } + + if (!l0_iters_[i]->status().ok()) { + immutable_status_ = l0_iters_[i]->status(); + } else if (l0_iters_[i]->Valid()) { + if (!IsOverUpperBound(l0_iters_[i]->key())) { + immutable_min_heap_.push(l0_iters_[i]); + } else { + has_iter_trimmed_for_upper_bound_ = true; + delete l0_iters_[i]; + l0_iters_[i] = nullptr; + } + } + } + + int32_t search_left_bound = 0; + int32_t search_right_bound = FileIndexer::kLevelMaxIndex; + for (int32_t level = 1; level < vstorage->num_levels(); ++level) { + const std::vector& level_files = + vstorage->LevelFiles(level); + if (level_files.empty()) { + search_left_bound = 0; + search_right_bound = FileIndexer::kLevelMaxIndex; + continue; + } + if (level_iters_[level - 1] == nullptr) { + continue; + } + uint32_t f_idx = 0; + const auto& indexer = vstorage->file_indexer(); + if (!seek_to_first) { + if (search_left_bound == search_right_bound) { + f_idx = search_left_bound; + } else if (search_left_bound < search_right_bound) { + f_idx = + FindFileInRange(level_files, internal_key, search_left_bound, + search_right_bound == FileIndexer::kLevelMaxIndex + ? static_cast(level_files.size()) + : search_right_bound); + } else { + // search_left_bound > search_right_bound + // There are only 2 cases this can happen: + // (1) target key is smaller than left most file + // (2) target key is larger than right most file + assert(search_left_bound == (int32_t)level_files.size() || + search_right_bound == -1); + if (search_right_bound == -1) { + assert(search_left_bound == 0); + f_idx = 0; + } else { + indexer.GetNextLevelIndex( + level, level_files.size() - 1, + 1, 1, &search_left_bound, &search_right_bound); + continue; + } + } + + // Prepare hints for the next level + if (f_idx < level_files.size()) { + int cmp_smallest = user_comparator_->Compare( + user_key, level_files[f_idx]->smallest.user_key()); + assert(user_comparator_->Compare( + user_key, level_files[f_idx]->largest.user_key()) <= 0); + indexer.GetNextLevelIndex(level, f_idx, cmp_smallest, -1, + &search_left_bound, &search_right_bound); + } else { + indexer.GetNextLevelIndex( + level, level_files.size() - 1, + 1, 1, &search_left_bound, &search_right_bound); + } + } + + // Seek + if (f_idx < level_files.size()) { + level_iters_[level - 1]->SetFileIndex(f_idx); + seek_to_first ? level_iters_[level - 1]->SeekToFirst() : + level_iters_[level - 1]->Seek(internal_key); + + if (!level_iters_[level - 1]->status().ok()) { + immutable_status_ = level_iters_[level - 1]->status(); + } else if (level_iters_[level - 1]->Valid()) { + if (!IsOverUpperBound(level_iters_[level - 1]->key())) { + immutable_min_heap_.push(level_iters_[level - 1]); + } else { + // Nothing in this level is interesting. Remove. + has_iter_trimmed_for_upper_bound_ = true; + delete level_iters_[level - 1]; + level_iters_[level - 1] = nullptr; + } + } + } + } + + if (seek_to_first) { + is_prev_set_ = false; + } else { + prev_key_.SetKey(internal_key); + is_prev_set_ = true; + is_prev_inclusive_ = true; + } + + TEST_SYNC_POINT_CALLBACK("ForwardIterator::SeekInternal:Immutable", this); + } else if (current_ && current_ != mutable_iter_) { + // current_ is one of immutable iterators, push it back to the heap + immutable_min_heap_.push(current_); + } + + UpdateCurrent(); + TEST_SYNC_POINT_CALLBACK("ForwardIterator::SeekInternal:Return", this); +} + +void ForwardIterator::Next() { + assert(valid_); + bool update_prev_key = false; + + if (sv_ == nullptr || + sv_->version_number != cfd_->GetSuperVersionNumber()) { + std::string current_key = key().ToString(); + Slice old_key(current_key.data(), current_key.size()); + + if (sv_ == nullptr) { + RebuildIterators(true); + } else { + RenewIterators(); + } + SeekInternal(old_key, false); + if (!valid_ || key().compare(old_key) != 0) { + return; + } + } else if (current_ != mutable_iter_) { + // It is going to advance immutable iterator + + if (is_prev_set_ && prefix_extractor_) { + // advance prev_key_ to current_ only if they share the same prefix + update_prev_key = + prefix_extractor_->Transform(prev_key_.GetKey()).compare( + prefix_extractor_->Transform(current_->key())) == 0; + } else { + update_prev_key = true; + } + + + if (update_prev_key) { + prev_key_.SetKey(current_->key()); + is_prev_set_ = true; + is_prev_inclusive_ = false; + } + } + + current_->Next(); + if (current_ != mutable_iter_) { + if (!current_->status().ok()) { + immutable_status_ = current_->status(); + } else if ((current_->Valid()) && (!IsOverUpperBound(current_->key()))) { + immutable_min_heap_.push(current_); + } else { + if ((current_->Valid()) && (IsOverUpperBound(current_->key()))) { + // remove the current iterator + DeleteCurrentIter(); + current_ = nullptr; + } + if (update_prev_key) { + mutable_iter_->Seek(prev_key_.GetKey()); + } + } + } + UpdateCurrent(); + TEST_SYNC_POINT_CALLBACK("ForwardIterator::Next:Return", this); +} + +Slice ForwardIterator::key() const { + assert(valid_); + return current_->key(); +} + +Slice ForwardIterator::value() const { + assert(valid_); + return current_->value(); +} + +Status ForwardIterator::status() const { + if (!status_.ok()) { + return status_; + } else if (!mutable_iter_->status().ok()) { + return mutable_iter_->status(); + } + + return immutable_status_; +} + +Status ForwardIterator::GetProperty(std::string prop_name, std::string* prop) { + assert(prop != nullptr); + if (prop_name == "rocksdb.iterator.super-version-number") { + *prop = ToString(sv_->version_number); + return Status::OK(); + } + return Status::InvalidArgument(); +} + +void ForwardIterator::RebuildIterators(bool refresh_sv) { + // Clean up + Cleanup(refresh_sv); + if (refresh_sv) { + // New + sv_ = cfd_->GetReferencedSuperVersion(&(db_->mutex_)); + } + mutable_iter_ = sv_->mem->NewIterator(read_options_, &arena_); + sv_->imm->AddIterators(read_options_, &imm_iters_, &arena_); + has_iter_trimmed_for_upper_bound_ = false; + + const auto* vstorage = sv_->current->storage_info(); + const auto& l0_files = vstorage->LevelFiles(0); + l0_iters_.reserve(l0_files.size()); + for (const auto* l0 : l0_files) { + if ((read_options_.iterate_upper_bound != nullptr) && + cfd_->internal_comparator().user_comparator()->Compare( + l0->smallest.user_key(), *read_options_.iterate_upper_bound) > 0) { + has_iter_trimmed_for_upper_bound_ = true; + l0_iters_.push_back(nullptr); + continue; + } + l0_iters_.push_back(cfd_->table_cache()->NewIterator( + read_options_, *cfd_->soptions(), cfd_->internal_comparator(), l0->fd)); + } + BuildLevelIterators(vstorage); + current_ = nullptr; + is_prev_set_ = false; +} + +void ForwardIterator::RenewIterators() { + SuperVersion* svnew; + assert(sv_); + svnew = cfd_->GetReferencedSuperVersion(&(db_->mutex_)); + + if (mutable_iter_ != nullptr) { + mutable_iter_->~InternalIterator(); + } + for (auto* m : imm_iters_) { + m->~InternalIterator(); + } + imm_iters_.clear(); + + mutable_iter_ = svnew->mem->NewIterator(read_options_, &arena_); + svnew->imm->AddIterators(read_options_, &imm_iters_, &arena_); + + const auto* vstorage = sv_->current->storage_info(); + const auto& l0_files = vstorage->LevelFiles(0); + const auto* vstorage_new = svnew->current->storage_info(); + const auto& l0_files_new = vstorage_new->LevelFiles(0); + size_t iold, inew; + bool found; + std::vector l0_iters_new; + l0_iters_new.reserve(l0_files_new.size()); + + for (inew = 0; inew < l0_files_new.size(); inew++) { + found = false; + for (iold = 0; iold < l0_files.size(); iold++) { + if (l0_files[iold] == l0_files_new[inew]) { + found = true; + break; + } + } + if (found) { + if (l0_iters_[iold] == nullptr) { + l0_iters_new.push_back(nullptr); + TEST_SYNC_POINT_CALLBACK("ForwardIterator::RenewIterators:Null", this); + } else { + l0_iters_new.push_back(l0_iters_[iold]); + l0_iters_[iold] = nullptr; + TEST_SYNC_POINT_CALLBACK("ForwardIterator::RenewIterators:Copy", this); + } + continue; + } + l0_iters_new.push_back(cfd_->table_cache()->NewIterator( + read_options_, *cfd_->soptions(), cfd_->internal_comparator(), + l0_files_new[inew]->fd)); + } + + for (auto* f : l0_iters_) { + delete f; + } + l0_iters_.clear(); + l0_iters_ = l0_iters_new; + + for (auto* l : level_iters_) { + delete l; + } + level_iters_.clear(); + BuildLevelIterators(vstorage_new); + current_ = nullptr; + is_prev_set_ = false; + SVCleanup(); + sv_ = svnew; +} + +void ForwardIterator::BuildLevelIterators(const VersionStorageInfo* vstorage) { + level_iters_.reserve(vstorage->num_levels() - 1); + for (int32_t level = 1; level < vstorage->num_levels(); ++level) { + const auto& level_files = vstorage->LevelFiles(level); + if ((level_files.empty()) || + ((read_options_.iterate_upper_bound != nullptr) && + (user_comparator_->Compare(*read_options_.iterate_upper_bound, + level_files[0]->smallest.user_key()) < + 0))) { + level_iters_.push_back(nullptr); + if (!level_files.empty()) { + has_iter_trimmed_for_upper_bound_ = true; + } + } else { + level_iters_.push_back( + new LevelIterator(cfd_, read_options_, level_files)); + } + } +} + +void ForwardIterator::ResetIncompleteIterators() { + const auto& l0_files = sv_->current->storage_info()->LevelFiles(0); + for (size_t i = 0; i < l0_iters_.size(); ++i) { + assert(i < l0_files.size()); + if (!l0_iters_[i] || !l0_iters_[i]->status().IsIncomplete()) { + continue; + } + delete l0_iters_[i]; + l0_iters_[i] = cfd_->table_cache()->NewIterator( + read_options_, *cfd_->soptions(), cfd_->internal_comparator(), + l0_files[i]->fd); + } + + for (auto* level_iter : level_iters_) { + if (level_iter && level_iter->status().IsIncomplete()) { + level_iter->Reset(); + } + } + + current_ = nullptr; + is_prev_set_ = false; +} + +void ForwardIterator::UpdateCurrent() { + if (immutable_min_heap_.empty() && !mutable_iter_->Valid()) { + current_ = nullptr; + } else if (immutable_min_heap_.empty()) { + current_ = mutable_iter_; + } else if (!mutable_iter_->Valid()) { + current_ = immutable_min_heap_.top(); + immutable_min_heap_.pop(); + } else { + current_ = immutable_min_heap_.top(); + assert(current_ != nullptr); + assert(current_->Valid()); + int cmp = cfd_->internal_comparator().InternalKeyComparator::Compare( + mutable_iter_->key(), current_->key()); + assert(cmp != 0); + if (cmp > 0) { + immutable_min_heap_.pop(); + } else { + current_ = mutable_iter_; + } + } + valid_ = (current_ != nullptr); + if (!status_.ok()) { + status_ = Status::OK(); + } + + // Upper bound doesn't apply to the memtable iterator. We want Valid() to + // return false when all iterators are over iterate_upper_bound, but can't + // just set valid_ to false, as that would effectively disable the tailing + // optimization (Seek() would be called on all immutable iterators regardless + // of whether the target key is greater than prev_key_). + current_over_upper_bound_ = valid_ && IsOverUpperBound(current_->key()); +} + +bool ForwardIterator::NeedToSeekImmutable(const Slice& target) { + // We maintain the interval (prev_key_, immutable_min_heap_.top()->key()) + // such that there are no records with keys within that range in + // immutable_min_heap_. Since immutable structures (SST files and immutable + // memtables) can't change in this version, we don't need to do a seek if + // 'target' belongs to that interval (immutable_min_heap_.top() is already + // at the correct position). + + if (!valid_ || !current_ || !is_prev_set_ || !immutable_status_.ok()) { + return true; + } + Slice prev_key = prev_key_.GetKey(); + if (prefix_extractor_ && prefix_extractor_->Transform(target).compare( + prefix_extractor_->Transform(prev_key)) != 0) { + return true; + } + if (cfd_->internal_comparator().InternalKeyComparator::Compare( + prev_key, target) >= (is_prev_inclusive_ ? 1 : 0)) { + return true; + } + + if (immutable_min_heap_.empty() && current_ == mutable_iter_) { + // Nothing to seek on. + return false; + } + if (cfd_->internal_comparator().InternalKeyComparator::Compare( + target, current_ == mutable_iter_ ? immutable_min_heap_.top()->key() + : current_->key()) > 0) { + return true; + } + return false; +} + +void ForwardIterator::DeleteCurrentIter() { + const VersionStorageInfo* vstorage = sv_->current->storage_info(); + const std::vector& l0 = vstorage->LevelFiles(0); + for (size_t i = 0; i < l0.size(); ++i) { + if (!l0_iters_[i]) { + continue; + } + if (l0_iters_[i] == current_) { + has_iter_trimmed_for_upper_bound_ = true; + delete l0_iters_[i]; + l0_iters_[i] = nullptr; + return; + } + } + + for (int32_t level = 1; level < vstorage->num_levels(); ++level) { + if (level_iters_[level - 1] == nullptr) { + continue; + } + if (level_iters_[level - 1] == current_) { + has_iter_trimmed_for_upper_bound_ = true; + delete level_iters_[level - 1]; + level_iters_[level - 1] = nullptr; + } + } +} + +bool ForwardIterator::TEST_CheckDeletedIters(int* pdeleted_iters, + int* pnum_iters) { + bool retval = false; + int deleted_iters = 0; + int num_iters = 0; + + const VersionStorageInfo* vstorage = sv_->current->storage_info(); + const std::vector& l0 = vstorage->LevelFiles(0); + for (size_t i = 0; i < l0.size(); ++i) { + if (!l0_iters_[i]) { + retval = true; + deleted_iters++; + } else { + num_iters++; + } + } + + for (int32_t level = 1; level < vstorage->num_levels(); ++level) { + if ((level_iters_[level - 1] == nullptr) && + (!vstorage->LevelFiles(level).empty())) { + retval = true; + deleted_iters++; + } else if (!vstorage->LevelFiles(level).empty()) { + num_iters++; + } + } + if ((!retval) && num_iters <= 1) { + retval = true; + } + if (pdeleted_iters) { + *pdeleted_iters = deleted_iters; + } + if (pnum_iters) { + *pnum_iters = num_iters; + } + return retval; +} + +uint32_t ForwardIterator::FindFileInRange( + const std::vector& files, const Slice& internal_key, + uint32_t left, uint32_t right) { + while (left < right) { + uint32_t mid = (left + right) / 2; + const FileMetaData* f = files[mid]; + if (cfd_->internal_comparator().InternalKeyComparator::Compare( + f->largest.Encode(), internal_key) < 0) { + // Key at "mid.largest" is < "target". Therefore all + // files at or before "mid" are uninteresting. + left = mid + 1; + } else { + // Key at "mid.largest" is >= "target". Therefore all files + // after "mid" are uninteresting. + right = mid; + } + } + return right; +} + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/forward_iterator.h b/external/rocksdb/db/forward_iterator.h new file mode 100755 index 0000000000..b5beeceefc --- /dev/null +++ b/external/rocksdb/db/forward_iterator.h @@ -0,0 +1,136 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#ifndef ROCKSDB_LITE + +#include +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/iterator.h" +#include "rocksdb/options.h" +#include "db/dbformat.h" +#include "table/internal_iterator.h" +#include "util/arena.h" + +namespace rocksdb { + +class DBImpl; +class Env; +struct SuperVersion; +class ColumnFamilyData; +class LevelIterator; +class VersionStorageInfo; +struct FileMetaData; + +class MinIterComparator { + public: + explicit MinIterComparator(const Comparator* comparator) : + comparator_(comparator) {} + + bool operator()(InternalIterator* a, InternalIterator* b) { + return comparator_->Compare(a->key(), b->key()) > 0; + } + private: + const Comparator* comparator_; +}; + +typedef std::priority_queue, + MinIterComparator> MinIterHeap; + +/** + * ForwardIterator is a special type of iterator that only supports Seek() + * and Next(). It is expected to perform better than TailingIterator by + * removing the encapsulation and making all information accessible within + * the iterator. At the current implementation, snapshot is taken at the + * time Seek() is called. The Next() followed do not see new values after. + */ +class ForwardIterator : public InternalIterator { + public: + ForwardIterator(DBImpl* db, const ReadOptions& read_options, + ColumnFamilyData* cfd, SuperVersion* current_sv = nullptr); + virtual ~ForwardIterator(); + + void SeekToLast() override { + status_ = Status::NotSupported("ForwardIterator::SeekToLast()"); + valid_ = false; + } + void Prev() override { + status_ = Status::NotSupported("ForwardIterator::Prev"); + valid_ = false; + } + + virtual bool Valid() const override; + void SeekToFirst() override; + virtual void Seek(const Slice& target) override; + virtual void Next() override; + virtual Slice key() const override; + virtual Slice value() const override; + virtual Status status() const override; + virtual Status GetProperty(std::string prop_name, std::string* prop) override; + + bool TEST_CheckDeletedIters(int* deleted_iters, int* num_iters); + + private: + void Cleanup(bool release_sv); + void SVCleanup(); + void RebuildIterators(bool refresh_sv); + void RenewIterators(); + void BuildLevelIterators(const VersionStorageInfo* vstorage); + void ResetIncompleteIterators(); + void SeekInternal(const Slice& internal_key, bool seek_to_first); + void UpdateCurrent(); + bool NeedToSeekImmutable(const Slice& internal_key); + void DeleteCurrentIter(); + uint32_t FindFileInRange( + const std::vector& files, const Slice& internal_key, + uint32_t left, uint32_t right); + + bool IsOverUpperBound(const Slice& internal_key) const; + + DBImpl* const db_; + const ReadOptions read_options_; + ColumnFamilyData* const cfd_; + const SliceTransform* const prefix_extractor_; + const Comparator* user_comparator_; + MinIterHeap immutable_min_heap_; + + SuperVersion* sv_; + InternalIterator* mutable_iter_; + std::vector imm_iters_; + std::vector l0_iters_; + std::vector level_iters_; + InternalIterator* current_; + bool valid_; + + // Internal iterator status; set only by one of the unsupported methods. + Status status_; + // Status of immutable iterators, maintained here to avoid iterating over + // all of them in status(). + Status immutable_status_; + // Indicates that at least one of the immutable iterators pointed to a key + // larger than iterate_upper_bound and was therefore destroyed. Seek() may + // need to rebuild such iterators. + bool has_iter_trimmed_for_upper_bound_; + // Is current key larger than iterate_upper_bound? If so, makes Valid() + // return false. + bool current_over_upper_bound_; + + // Left endpoint of the range of keys that immutable iterators currently + // cover. When Seek() is called with a key that's within that range, immutable + // iterators don't need to be moved; see NeedToSeekImmutable(). This key is + // included in the range after a Seek(), but excluded when advancing the + // iterator using Next(). + IterKey prev_key_; + bool is_prev_set_; + bool is_prev_inclusive_; + + Arena arena_; +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/forward_iterator_bench.cc b/external/rocksdb/db/forward_iterator_bench.cc new file mode 100755 index 0000000000..0f44a9e448 --- /dev/null +++ b/external/rocksdb/db/forward_iterator_bench.cc @@ -0,0 +1,374 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#if !defined(GFLAGS) || defined(ROCKSDB_LITE) +#include +int main() { + fprintf(stderr, "Please install gflags to run rocksdb tools\n"); + return 1; +} +#elif defined(OS_MACOSX) || defined(OS_WIN) +// Block forward_iterator_bench under MAC and Windows +int main() { return 0; } +#else +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rocksdb/cache.h" +#include "rocksdb/db.h" +#include "rocksdb/status.h" +#include "rocksdb/table.h" +#include "util/testharness.h" + +const int MAX_SHARDS = 100000; + +DEFINE_int32(writers, 8, ""); +DEFINE_int32(readers, 8, ""); +DEFINE_int64(rate, 100000, ""); +DEFINE_int64(value_size, 300, ""); +DEFINE_int64(shards, 1000, ""); +DEFINE_int64(memtable_size, 500000000, ""); +DEFINE_int64(block_cache_size, 300000000, ""); +DEFINE_int64(block_size, 65536, ""); +DEFINE_double(runtime, 300.0, ""); +DEFINE_bool(cache_only_first, true, ""); +DEFINE_bool(iterate_upper_bound, true, ""); + +struct Stats { + char pad1[128] __attribute__((__unused__)); + std::atomic written{0}; + char pad2[128] __attribute__((__unused__)); + std::atomic read{0}; + std::atomic cache_misses{0}; + char pad3[128] __attribute__((__unused__)); +} stats; + +struct Key { + Key() {} + Key(uint64_t shard_in, uint64_t seqno_in) + : shard_be(htobe64(shard_in)), seqno_be(htobe64(seqno_in)) {} + + uint64_t shard() const { return be64toh(shard_be); } + uint64_t seqno() const { return be64toh(seqno_be); } + + private: + uint64_t shard_be; + uint64_t seqno_be; +} __attribute__((__packed__)); + +struct Reader; +struct Writer; + +struct ShardState { + char pad1[128] __attribute__((__unused__)); + std::atomic last_written{0}; + Writer* writer; + Reader* reader; + char pad2[128] __attribute__((__unused__)); + std::atomic last_read{0}; + std::unique_ptr it; + std::unique_ptr it_cacheonly; + Key upper_bound; + rocksdb::Slice upper_bound_slice; + char pad3[128] __attribute__((__unused__)); +}; + +struct Reader { + public: + explicit Reader(std::vector* shard_states, rocksdb::DB* db) + : shard_states_(shard_states), db_(db) { + sem_init(&sem_, 0, 0); + thread_ = std::thread(&Reader::run, this); + } + + void run() { + while (1) { + sem_wait(&sem_); + if (done_.load()) { + break; + } + + uint64_t shard; + { + std::lock_guard guard(queue_mutex_); + assert(!shards_pending_queue_.empty()); + shard = shards_pending_queue_.front(); + shards_pending_queue_.pop(); + shards_pending_set_.reset(shard); + } + readOnceFromShard(shard); + } + } + + void readOnceFromShard(uint64_t shard) { + ShardState& state = (*shard_states_)[shard]; + if (!state.it) { + // Initialize iterators + rocksdb::ReadOptions options; + options.tailing = true; + if (FLAGS_iterate_upper_bound) { + state.upper_bound = Key(shard, std::numeric_limits::max()); + state.upper_bound_slice = rocksdb::Slice( + (const char*)&state.upper_bound, sizeof(state.upper_bound)); + options.iterate_upper_bound = &state.upper_bound_slice; + } + + state.it.reset(db_->NewIterator(options)); + + if (FLAGS_cache_only_first) { + options.read_tier = rocksdb::ReadTier::kBlockCacheTier; + state.it_cacheonly.reset(db_->NewIterator(options)); + } + } + + const uint64_t upto = state.last_written.load(); + for (rocksdb::Iterator* it : {state.it_cacheonly.get(), state.it.get()}) { + if (it == nullptr) { + continue; + } + if (state.last_read.load() >= upto) { + break; + } + bool need_seek = true; + for (uint64_t seq = state.last_read.load() + 1; seq <= upto; ++seq) { + if (need_seek) { + Key from(shard, state.last_read.load() + 1); + it->Seek(rocksdb::Slice((const char*)&from, sizeof(from))); + need_seek = false; + } else { + it->Next(); + } + if (it->status().IsIncomplete()) { + ++::stats.cache_misses; + break; + } + assert(it->Valid()); + assert(it->key().size() == sizeof(Key)); + Key key; + memcpy(&key, it->key().data(), it->key().size()); + // fprintf(stderr, "Expecting (%ld, %ld) read (%ld, %ld)\n", + // shard, seq, key.shard(), key.seqno()); + assert(key.shard() == shard); + assert(key.seqno() == seq); + state.last_read.store(seq); + ++::stats.read; + } + } + } + + void onWrite(uint64_t shard) { + { + std::lock_guard guard(queue_mutex_); + if (!shards_pending_set_.test(shard)) { + shards_pending_queue_.push(shard); + shards_pending_set_.set(shard); + sem_post(&sem_); + } + } + } + + ~Reader() { + done_.store(true); + sem_post(&sem_); + thread_.join(); + } + + private: + char pad1[128] __attribute__((__unused__)); + std::vector* shard_states_; + rocksdb::DB* db_; + std::thread thread_; + sem_t sem_; + std::mutex queue_mutex_; + std::bitset shards_pending_set_; + std::queue shards_pending_queue_; + std::atomic done_{false}; + char pad2[128] __attribute__((__unused__)); +}; + +struct Writer { + explicit Writer(std::vector* shard_states, rocksdb::DB* db) + : shard_states_(shard_states), db_(db) {} + + void start() { thread_ = std::thread(&Writer::run, this); } + + void run() { + std::queue workq; + std::chrono::steady_clock::time_point deadline( + std::chrono::steady_clock::now() + + std::chrono::nanoseconds((uint64_t)(1000000000 * FLAGS_runtime))); + std::vector my_shards; + for (int i = 1; i <= FLAGS_shards; ++i) { + if ((*shard_states_)[i].writer == this) { + my_shards.push_back(i); + } + } + + std::mt19937 rng{std::random_device()()}; + std::uniform_int_distribution shard_dist( + 0, static_cast(my_shards.size()) - 1); + std::string value(FLAGS_value_size, '*'); + + while (1) { + auto now = std::chrono::steady_clock::now(); + if (FLAGS_runtime >= 0 && now >= deadline) { + break; + } + if (workq.empty()) { + for (int i = 0; i < FLAGS_rate; i += FLAGS_writers) { + std::chrono::nanoseconds offset(1000000000LL * i / FLAGS_rate); + workq.push(now + offset); + } + } + while (!workq.empty() && workq.front() < now) { + workq.pop(); + uint64_t shard = my_shards[shard_dist(rng)]; + ShardState& state = (*shard_states_)[shard]; + uint64_t seqno = state.last_written.load() + 1; + Key key(shard, seqno); + // fprintf(stderr, "Writing (%ld, %ld)\n", shard, seqno); + rocksdb::Status status = + db_->Put(rocksdb::WriteOptions(), + rocksdb::Slice((const char*)&key, sizeof(key)), + rocksdb::Slice(value)); + assert(status.ok()); + state.last_written.store(seqno); + state.reader->onWrite(shard); + ++::stats.written; + } + std::this_thread::sleep_for(std::chrono::milliseconds(1)); + } + // fprintf(stderr, "Writer done\n"); + } + + ~Writer() { thread_.join(); } + + private: + char pad1[128] __attribute__((__unused__)); + std::vector* shard_states_; + rocksdb::DB* db_; + std::thread thread_; + char pad2[128] __attribute__((__unused__)); +}; + +struct StatsThread { + explicit StatsThread(rocksdb::DB* db) + : db_(db), thread_(&StatsThread::run, this) {} + + void run() { + // using namespace std::chrono; + auto tstart = std::chrono::steady_clock::now(), tlast = tstart; + uint64_t wlast = 0, rlast = 0; + while (!done_.load()) { + { + std::unique_lock lock(cvm_); + cv_.wait_for(lock, std::chrono::seconds(1)); + } + auto now = std::chrono::steady_clock::now(); + double elapsed = + std::chrono::duration_cast >( + now - tlast).count(); + uint64_t w = ::stats.written.load(); + uint64_t r = ::stats.read.load(); + fprintf(stderr, + "%s elapsed %4lds | written %10ld | w/s %10.0f | read %10ld | " + "r/s %10.0f | cache misses %10ld\n", + db_->GetEnv()->TimeToString(time(nullptr)).c_str(), + std::chrono::duration_cast(now - tstart) + .count(), + w, (w - wlast) / elapsed, r, (r - rlast) / elapsed, + ::stats.cache_misses.load()); + wlast = w; + rlast = r; + tlast = now; + } + } + + ~StatsThread() { + { + std::lock_guard guard(cvm_); + done_.store(true); + } + cv_.notify_all(); + thread_.join(); + } + + private: + rocksdb::DB* db_; + std::mutex cvm_; + std::condition_variable cv_; + std::thread thread_; + std::atomic done_{false}; +}; + +int main(int argc, char** argv) { + GFLAGS::ParseCommandLineFlags(&argc, &argv, true); + + std::mt19937 rng{std::random_device()()}; + rocksdb::Status status; + std::string path = rocksdb::test::TmpDir() + "/forward_iterator_test"; + fprintf(stderr, "db path is %s\n", path.c_str()); + rocksdb::Options options; + options.create_if_missing = true; + options.compression = rocksdb::CompressionType::kNoCompression; + options.compaction_style = rocksdb::CompactionStyle::kCompactionStyleNone; + options.level0_slowdown_writes_trigger = 99999; + options.level0_stop_writes_trigger = 99999; + options.allow_os_buffer = false; + options.write_buffer_size = FLAGS_memtable_size; + rocksdb::BlockBasedTableOptions table_options; + table_options.block_cache = rocksdb::NewLRUCache(FLAGS_block_cache_size); + table_options.block_size = FLAGS_block_size; + options.table_factory.reset( + rocksdb::NewBlockBasedTableFactory(table_options)); + + status = rocksdb::DestroyDB(path, options); + assert(status.ok()); + rocksdb::DB* db_raw; + status = rocksdb::DB::Open(options, path, &db_raw); + assert(status.ok()); + std::unique_ptr db(db_raw); + + std::vector shard_states(FLAGS_shards + 1); + std::deque readers; + while (static_cast(readers.size()) < FLAGS_readers) { + readers.emplace_back(&shard_states, db_raw); + } + std::deque writers; + while (static_cast(writers.size()) < FLAGS_writers) { + writers.emplace_back(&shard_states, db_raw); + } + + // Each shard gets a random reader and random writer assigned to it + for (int i = 1; i <= FLAGS_shards; ++i) { + std::uniform_int_distribution reader_dist(0, FLAGS_readers - 1); + std::uniform_int_distribution writer_dist(0, FLAGS_writers - 1); + shard_states[i].reader = &readers[reader_dist(rng)]; + shard_states[i].writer = &writers[writer_dist(rng)]; + } + + StatsThread stats_thread(db_raw); + for (Writer& w : writers) { + w.start(); + } + + writers.clear(); + readers.clear(); +} +#endif // !defined(GFLAGS) || defined(ROCKSDB_LITE) diff --git a/external/rocksdb/db/inlineskiplist.h b/external/rocksdb/db/inlineskiplist.h new file mode 100755 index 0000000000..cfd47f39f4 --- /dev/null +++ b/external/rocksdb/db/inlineskiplist.h @@ -0,0 +1,657 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional +// grant of patent rights can be found in the PATENTS file in the same +// directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. Use of +// this source code is governed by a BSD-style license that can be found +// in the LICENSE file. See the AUTHORS file for names of contributors. +// +// InlineSkipList is derived from SkipList (skiplist.h), but it optimizes +// the memory layout by requiring that the key storage be allocated through +// the skip list instance. For the common case of SkipList this saves 1 pointer per skip list node and gives better cache +// locality, at the expense of wasted padding from using AllocateAligned +// instead of Allocate for the keys. The unused padding will be from +// 0 to sizeof(void*)-1 bytes, and the space savings are sizeof(void*) +// bytes, so despite the padding the space used is always less than +// SkipList. +// +// Thread safety ------------- +// +// Writes via Insert require external synchronization, most likely a mutex. +// InsertConcurrently can be safely called concurrently with reads and +// with other concurrent inserts. Reads require a guarantee that the +// InlineSkipList will not be destroyed while the read is in progress. +// Apart from that, reads progress without any internal locking or +// synchronization. +// +// Invariants: +// +// (1) Allocated nodes are never deleted until the InlineSkipList is +// destroyed. This is trivially guaranteed by the code since we never +// delete any skip list nodes. +// +// (2) The contents of a Node except for the next/prev pointers are +// immutable after the Node has been linked into the InlineSkipList. +// Only Insert() modifies the list, and it is careful to initialize a +// node and use release-stores to publish the nodes in one or more lists. +// +// ... prev vs. next pointer ordering ... +// + +#pragma once +#include +#include +#include +#include "port/port.h" +#include "util/allocator.h" +#include "util/random.h" + +namespace rocksdb { + +template +class InlineSkipList { + private: + struct Node; + + public: + // Create a new InlineSkipList object that will use "cmp" for comparing + // keys, and will allocate memory using "*allocator". Objects allocated + // in the allocator must remain allocated for the lifetime of the + // skiplist object. + explicit InlineSkipList(Comparator cmp, Allocator* allocator, + int32_t max_height = 12, + int32_t branching_factor = 4); + + // Allocates a key and a skip-list node, returning a pointer to the key + // portion of the node. This method is thread-safe if the allocator + // is thread-safe. + char* AllocateKey(size_t key_size); + + // Inserts a key allocated by AllocateKey, after the actual key value + // has been filled in. + // + // REQUIRES: nothing that compares equal to key is currently in the list. + // REQUIRES: no concurrent calls to INSERT + void Insert(const char* key); + + // Like Insert, but external synchronization is not required. + void InsertConcurrently(const char* key); + + // Returns true iff an entry that compares equal to key is in the list. + bool Contains(const char* key) const; + + // Return estimated number of entries smaller than `key`. + uint64_t EstimateCount(const char* key) const; + + // Iteration over the contents of a skip list + class Iterator { + public: + // Initialize an iterator over the specified list. + // The returned iterator is not valid. + explicit Iterator(const InlineSkipList* list); + + // Change the underlying skiplist used for this iterator + // This enables us not changing the iterator without deallocating + // an old one and then allocating a new one + void SetList(const InlineSkipList* list); + + // Returns true iff the iterator is positioned at a valid node. + bool Valid() const; + + // Returns the key at the current position. + // REQUIRES: Valid() + const char* key() const; + + // Advances to the next position. + // REQUIRES: Valid() + void Next(); + + // Advances to the previous position. + // REQUIRES: Valid() + void Prev(); + + // Advance to the first entry with a key >= target + void Seek(const char* target); + + // Position at the first entry in list. + // Final state of iterator is Valid() iff list is not empty. + void SeekToFirst(); + + // Position at the last entry in list. + // Final state of iterator is Valid() iff list is not empty. + void SeekToLast(); + + private: + const InlineSkipList* list_; + Node* node_; + // Intentionally copyable + }; + + private: + enum MaxPossibleHeightEnum : uint16_t { kMaxPossibleHeight = 32 }; + + const uint16_t kMaxHeight_; + const uint16_t kBranching_; + const uint32_t kScaledInverseBranching_; + + // Immutable after construction + Comparator const compare_; + Allocator* const allocator_; // Allocator used for allocations of nodes + + Node* const head_; + + // Modified only by Insert(). Read racily by readers, but stale + // values are ok. + std::atomic max_height_; // Height of the entire list + + // Used for optimizing sequential insert patterns. Tricky. prev_height_ + // of zero means prev_ is undefined. Otherwise: prev_[i] for i up + // to max_height_ - 1 (inclusive) is the predecessor of prev_[0], and + // prev_height_ is the height of prev_[0]. prev_[0] can only be equal + // to head when max_height_ and prev_height_ are both 1. + Node** prev_; + std::atomic prev_height_; + + inline int GetMaxHeight() const { + return max_height_.load(std::memory_order_relaxed); + } + + int RandomHeight(); + + Node* AllocateNode(size_t key_size, int height); + + bool Equal(const char* a, const char* b) const { + return (compare_(a, b) == 0); + } + + // Return true if key is greater than the data stored in "n". Null n + // is considered infinite. + bool KeyIsAfterNode(const char* key, Node* n) const; + + // Returns the earliest node with a key >= key. + // Return nullptr if there is no such node. + Node* FindGreaterOrEqual(const char* key) const; + + // Return the latest node with a key < key. + // Return head_ if there is no such node. + // Fills prev[level] with pointer to previous node at "level" for every + // level in [0..max_height_-1], if prev is non-null. + Node* FindLessThan(const char* key, Node** prev = nullptr) const; + + // Return the last node in the list. + // Return head_ if list is empty. + Node* FindLast() const; + + // Traverses a single level of the list, setting *out_prev to the last + // node before the key and *out_next to the first node after. Assumes + // that the key is not present in the skip list. On entry, before should + // point to a node that is before the key, and after should point to + // a node that is after the key. after should be nullptr if a good after + // node isn't conveniently available. + void FindLevelSplice(const char* key, Node* before, Node* after, int level, + Node** out_prev, Node** out_next); + + // No copying allowed + InlineSkipList(const InlineSkipList&); + InlineSkipList& operator=(const InlineSkipList&); +}; + +// Implementation details follow + +// The Node data type is more of a pointer into custom-managed memory than +// a traditional C++ struct. The key is stored in the bytes immediately +// after the struct, and the next_ pointers for nodes with height > 1 are +// stored immediately _before_ the struct. This avoids the need to include +// any pointer or sizing data, which reduces per-node memory overheads. +template +struct InlineSkipList::Node { + // Stores the height of the node in the memory location normally used for + // next_[0]. This is used for passing data from AllocateKey to Insert. + void StashHeight(const int height) { + assert(sizeof(int) <= sizeof(next_[0])); + memcpy(&next_[0], &height, sizeof(int)); + } + + // Retrieves the value passed to StashHeight. Undefined after a call + // to SetNext or NoBarrier_SetNext. + int UnstashHeight() const { + int rv; + memcpy(&rv, &next_[0], sizeof(int)); + return rv; + } + + const char* Key() const { return reinterpret_cast(&next_[1]); } + + // Accessors/mutators for links. Wrapped in methods so we can add + // the appropriate barriers as necessary, and perform the necessary + // addressing trickery for storing links below the Node in memory. + Node* Next(int n) { + assert(n >= 0); + // Use an 'acquire load' so that we observe a fully initialized + // version of the returned Node. + return (next_[-n].load(std::memory_order_acquire)); + } + + void SetNext(int n, Node* x) { + assert(n >= 0); + // Use a 'release store' so that anybody who reads through this + // pointer observes a fully initialized version of the inserted node. + next_[-n].store(x, std::memory_order_release); + } + + bool CASNext(int n, Node* expected, Node* x) { + assert(n >= 0); + return next_[-n].compare_exchange_strong(expected, x); + } + + // No-barrier variants that can be safely used in a few locations. + Node* NoBarrier_Next(int n) { + assert(n >= 0); + return next_[-n].load(std::memory_order_relaxed); + } + + void NoBarrier_SetNext(int n, Node* x) { + assert(n >= 0); + next_[-n].store(x, std::memory_order_relaxed); + } + + private: + // next_[0] is the lowest level link (level 0). Higher levels are + // stored _earlier_, so level 1 is at next_[-1]. + std::atomic next_[1]; +}; + +template +inline InlineSkipList::Iterator::Iterator( + const InlineSkipList* list) { + SetList(list); +} + +template +inline void InlineSkipList::Iterator::SetList( + const InlineSkipList* list) { + list_ = list; + node_ = nullptr; +} + +template +inline bool InlineSkipList::Iterator::Valid() const { + return node_ != nullptr; +} + +template +inline const char* InlineSkipList::Iterator::key() const { + assert(Valid()); + return node_->Key(); +} + +template +inline void InlineSkipList::Iterator::Next() { + assert(Valid()); + node_ = node_->Next(0); +} + +template +inline void InlineSkipList::Iterator::Prev() { + // Instead of using explicit "prev" links, we just search for the + // last node that falls before key. + assert(Valid()); + node_ = list_->FindLessThan(node_->Key()); + if (node_ == list_->head_) { + node_ = nullptr; + } +} + +template +inline void InlineSkipList::Iterator::Seek(const char* target) { + node_ = list_->FindGreaterOrEqual(target); +} + +template +inline void InlineSkipList::Iterator::SeekToFirst() { + node_ = list_->head_->Next(0); +} + +template +inline void InlineSkipList::Iterator::SeekToLast() { + node_ = list_->FindLast(); + if (node_ == list_->head_) { + node_ = nullptr; + } +} + +template +int InlineSkipList::RandomHeight() { + auto rnd = Random::GetTLSInstance(); + + // Increase height with probability 1 in kBranching + int height = 1; + while (height < kMaxHeight_ && height < kMaxPossibleHeight && + rnd->Next() < kScaledInverseBranching_) { + height++; + } + assert(height > 0); + assert(height <= kMaxHeight_); + assert(height <= kMaxPossibleHeight); + return height; +} + +template +bool InlineSkipList::KeyIsAfterNode(const char* key, + Node* n) const { + // nullptr n is considered infinite + return (n != nullptr) && (compare_(n->Key(), key) < 0); +} + +template +typename InlineSkipList::Node* +InlineSkipList::FindGreaterOrEqual(const char* key) const { + // Note: It looks like we could reduce duplication by implementing + // this function as FindLessThan(key)->Next(0), but we wouldn't be able + // to exit early on equality and the result wouldn't even be correct. + // A concurrent insert might occur after FindLessThan(key) but before + // we get a chance to call Next(0). + Node* x = head_; + int level = GetMaxHeight() - 1; + Node* last_bigger = nullptr; + while (true) { + Node* next = x->Next(level); + // Make sure the lists are sorted + assert(x == head_ || next == nullptr || KeyIsAfterNode(next->Key(), x)); + // Make sure we haven't overshot during our search + assert(x == head_ || KeyIsAfterNode(key, x)); + int cmp = (next == nullptr || next == last_bigger) + ? 1 + : compare_(next->Key(), key); + if (cmp == 0 || (cmp > 0 && level == 0)) { + return next; + } else if (cmp < 0) { + // Keep searching in this list + x = next; + } else { + // Switch to next list, reuse compare_() result + last_bigger = next; + level--; + } + } +} + +template +typename InlineSkipList::Node* +InlineSkipList::FindLessThan(const char* key, Node** prev) const { + Node* x = head_; + int level = GetMaxHeight() - 1; + // KeyIsAfter(key, last_not_after) is definitely false + Node* last_not_after = nullptr; + while (true) { + Node* next = x->Next(level); + assert(x == head_ || next == nullptr || KeyIsAfterNode(next->Key(), x)); + assert(x == head_ || KeyIsAfterNode(key, x)); + if (next != last_not_after && KeyIsAfterNode(key, next)) { + // Keep searching in this list + x = next; + } else { + if (prev != nullptr) { + prev[level] = x; + } + if (level == 0) { + return x; + } else { + // Switch to next list, reuse KeyIUsAfterNode() result + last_not_after = next; + level--; + } + } + } +} + +template +typename InlineSkipList::Node* +InlineSkipList::FindLast() const { + Node* x = head_; + int level = GetMaxHeight() - 1; + while (true) { + Node* next = x->Next(level); + if (next == nullptr) { + if (level == 0) { + return x; + } else { + // Switch to next list + level--; + } + } else { + x = next; + } + } +} + +template +uint64_t InlineSkipList::EstimateCount(const char* key) const { + uint64_t count = 0; + + Node* x = head_; + int level = GetMaxHeight() - 1; + while (true) { + assert(x == head_ || compare_(x->Key(), key) < 0); + Node* next = x->Next(level); + if (next == nullptr || compare_(next->Key(), key) >= 0) { + if (level == 0) { + return count; + } else { + // Switch to next list + count *= kBranching_; + level--; + } + } else { + x = next; + count++; + } + } +} + +template +InlineSkipList::InlineSkipList(const Comparator cmp, + Allocator* allocator, + int32_t max_height, + int32_t branching_factor) + : kMaxHeight_(max_height), + kBranching_(branching_factor), + kScaledInverseBranching_((Random::kMaxNext + 1) / kBranching_), + compare_(cmp), + allocator_(allocator), + head_(AllocateNode(0, max_height)), + max_height_(1), + prev_height_(1) { + assert(max_height > 0 && kMaxHeight_ == static_cast(max_height)); + assert(branching_factor > 1 && + kBranching_ == static_cast(branching_factor)); + assert(kScaledInverseBranching_ > 0); + // Allocate the prev_ Node* array, directly from the passed-in allocator. + // prev_ does not need to be freed, as its life cycle is tied up with + // the allocator as a whole. + prev_ = reinterpret_cast( + allocator_->AllocateAligned(sizeof(Node*) * kMaxHeight_)); + for (int i = 0; i < kMaxHeight_; i++) { + head_->SetNext(i, nullptr); + prev_[i] = head_; + } +} + +template +char* InlineSkipList::AllocateKey(size_t key_size) { + return const_cast(AllocateNode(key_size, RandomHeight())->Key()); +} + +template +typename InlineSkipList::Node* +InlineSkipList::AllocateNode(size_t key_size, int height) { + auto prefix = sizeof(std::atomic) * (height - 1); + + // prefix is space for the height - 1 pointers that we store before + // the Node instance (next_[-(height - 1) .. -1]). Node starts at + // raw + prefix, and holds the bottom-mode (level 0) skip list pointer + // next_[0]. key_size is the bytes for the key, which comes just after + // the Node. + char* raw = allocator_->AllocateAligned(prefix + sizeof(Node) + key_size); + Node* x = reinterpret_cast(raw + prefix); + + // Once we've linked the node into the skip list we don't actually need + // to know its height, because we can implicitly use the fact that we + // traversed into a node at level h to known that h is a valid level + // for that node. We need to convey the height to the Insert step, + // however, so that it can perform the proper links. Since we're not + // using the pointers at the moment, StashHeight temporarily borrow + // storage from next_[0] for that purpose. + x->StashHeight(height); + return x; +} + +template +void InlineSkipList::Insert(const char* key) { + // InsertConcurrently often can't maintain the prev_ invariants, so + // it just sets prev_height_ to zero, letting us know that we should + // ignore it. A relaxed load suffices here because write thread + // synchronization separates Insert calls from InsertConcurrently calls. + auto prev_height = prev_height_.load(std::memory_order_relaxed); + + // fast path for sequential insertion + if (prev_height > 0 && !KeyIsAfterNode(key, prev_[0]->NoBarrier_Next(0)) && + (prev_[0] == head_ || KeyIsAfterNode(key, prev_[0]))) { + assert(prev_[0] != head_ || (prev_height == 1 && GetMaxHeight() == 1)); + + // Outside of this method prev_[1..max_height_] is the predecessor + // of prev_[0], and prev_height_ refers to prev_[0]. Inside Insert + // prev_[0..max_height - 1] is the predecessor of key. Switch from + // the external state to the internal + for (int i = 1; i < prev_height; i++) { + prev_[i] = prev_[0]; + } + } else { + // TODO(opt): we could use a NoBarrier predecessor search as an + // optimization for architectures where memory_order_acquire needs + // a synchronization instruction. Doesn't matter on x86 + FindLessThan(key, prev_); + } + + // Our data structure does not allow duplicate insertion + assert(prev_[0]->Next(0) == nullptr || !Equal(key, prev_[0]->Next(0)->Key())); + + // Find the Node that we placed before the key in AllocateKey + Node* x = reinterpret_cast(const_cast(key)) - 1; + int height = x->UnstashHeight(); + assert(height >= 1 && height <= kMaxHeight_); + + if (height > GetMaxHeight()) { + for (int i = GetMaxHeight(); i < height; i++) { + prev_[i] = head_; + } + + // It is ok to mutate max_height_ without any synchronization + // with concurrent readers. A concurrent reader that observes + // the new value of max_height_ will see either the old value of + // new level pointers from head_ (nullptr), or a new value set in + // the loop below. In the former case the reader will + // immediately drop to the next level since nullptr sorts after all + // keys. In the latter case the reader will use the new node. + max_height_.store(height, std::memory_order_relaxed); + } + + for (int i = 0; i < height; i++) { + // NoBarrier_SetNext() suffices since we will add a barrier when + // we publish a pointer to "x" in prev[i]. + x->NoBarrier_SetNext(i, prev_[i]->NoBarrier_Next(i)); + prev_[i]->SetNext(i, x); + } + prev_[0] = x; + prev_height_.store(height, std::memory_order_relaxed); +} + +template +void InlineSkipList::FindLevelSplice(const char* key, Node* before, + Node* after, int level, + Node** out_prev, + Node** out_next) { + while (true) { + Node* next = before->Next(level); + assert(before == head_ || next == nullptr || + KeyIsAfterNode(next->Key(), before)); + assert(before == head_ || KeyIsAfterNode(key, before)); + if (next == after || !KeyIsAfterNode(key, next)) { + // found it + *out_prev = before; + *out_next = next; + return; + } + before = next; + } +} + +template +void InlineSkipList::InsertConcurrently(const char* key) { + Node* x = reinterpret_cast(const_cast(key)) - 1; + int height = x->UnstashHeight(); + assert(height >= 1 && height <= kMaxHeight_); + + // We don't have a lock-free algorithm for updating prev_, but we do have + // the option of invalidating the entire sequential-insertion cache. + // prev_'s invariant is that prev_[i] (i > 0) is the predecessor of + // prev_[0] at that level. We're only going to violate that if height + // > 1 and key lands after prev_[height - 1] but before prev_[0]. + // Comparisons are pretty expensive, so an easier version is to just + // clear the cache if height > 1. We only write to prev_height_ if the + // nobody else has, to avoid invalidating the root of the skip list in + // all of the other CPU caches. + if (height > 1 && prev_height_.load(std::memory_order_relaxed) != 0) { + prev_height_.store(0, std::memory_order_relaxed); + } + + int max_height = max_height_.load(std::memory_order_relaxed); + while (height > max_height) { + if (max_height_.compare_exchange_strong(max_height, height)) { + // successfully updated it + max_height = height; + break; + } + // else retry, possibly exiting the loop because somebody else + // increased it + } + assert(max_height <= kMaxPossibleHeight); + + Node* prev[kMaxPossibleHeight + 1]; + Node* next[kMaxPossibleHeight + 1]; + prev[max_height] = head_; + next[max_height] = nullptr; + for (int i = max_height - 1; i >= 0; --i) { + FindLevelSplice(key, prev[i + 1], next[i + 1], i, &prev[i], &next[i]); + } + for (int i = 0; i < height; ++i) { + while (true) { + x->NoBarrier_SetNext(i, next[i]); + if (prev[i]->CASNext(i, next[i], x)) { + // success + break; + } + // CAS failed, we need to recompute prev and next. It is unlikely + // to be helpful to try to use a different level as we redo the + // search, because it should be unlikely that lots of nodes have + // been inserted between prev[i] and next[i]. No point in using + // next[i] as the after hint, because we know it is stale. + FindLevelSplice(key, prev[i], nullptr, i, &prev[i], &next[i]); + } + } +} + +template +bool InlineSkipList::Contains(const char* key) const { + Node* x = FindGreaterOrEqual(key); + if (x != nullptr && Equal(key, x->Key())) { + return true; + } else { + return false; + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/inlineskiplist_test.cc b/external/rocksdb/db/inlineskiplist_test.cc new file mode 100755 index 0000000000..5743bacec6 --- /dev/null +++ b/external/rocksdb/db/inlineskiplist_test.cc @@ -0,0 +1,475 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/inlineskiplist.h" +#include +#include "rocksdb/env.h" +#include "util/concurrent_arena.h" +#include "util/hash.h" +#include "util/random.h" +#include "util/testharness.h" + +namespace rocksdb { + +// Our test skip list stores 8-byte unsigned integers +typedef uint64_t Key; + +static const char* Encode(const uint64_t* key) { + return reinterpret_cast(key); +} + +static Key Decode(const char* key) { + Key rv; + memcpy(&rv, key, sizeof(Key)); + return rv; +} + +struct TestComparator { + int operator()(const char* a, const char* b) const { + if (Decode(a) < Decode(b)) { + return -1; + } else if (Decode(a) > Decode(b)) { + return +1; + } else { + return 0; + } + } +}; + +class InlineSkipTest : public testing::Test {}; + +TEST_F(InlineSkipTest, Empty) { + Arena arena; + TestComparator cmp; + InlineSkipList list(cmp, &arena); + Key key = 10; + ASSERT_TRUE(!list.Contains(Encode(&key))); + + InlineSkipList::Iterator iter(&list); + ASSERT_TRUE(!iter.Valid()); + iter.SeekToFirst(); + ASSERT_TRUE(!iter.Valid()); + key = 100; + iter.Seek(Encode(&key)); + ASSERT_TRUE(!iter.Valid()); + iter.SeekToLast(); + ASSERT_TRUE(!iter.Valid()); +} + +TEST_F(InlineSkipTest, InsertAndLookup) { + const int N = 2000; + const int R = 5000; + Random rnd(1000); + std::set keys; + ConcurrentArena arena; + TestComparator cmp; + InlineSkipList list(cmp, &arena); + for (int i = 0; i < N; i++) { + Key key = rnd.Next() % R; + if (keys.insert(key).second) { + char* buf = list.AllocateKey(sizeof(Key)); + memcpy(buf, &key, sizeof(Key)); + list.Insert(buf); + } + } + + for (Key i = 0; i < R; i++) { + if (list.Contains(Encode(&i))) { + ASSERT_EQ(keys.count(i), 1U); + } else { + ASSERT_EQ(keys.count(i), 0U); + } + } + + // Simple iterator tests + { + InlineSkipList::Iterator iter(&list); + ASSERT_TRUE(!iter.Valid()); + + uint64_t zero = 0; + iter.Seek(Encode(&zero)); + ASSERT_TRUE(iter.Valid()); + ASSERT_EQ(*(keys.begin()), Decode(iter.key())); + + iter.SeekToFirst(); + ASSERT_TRUE(iter.Valid()); + ASSERT_EQ(*(keys.begin()), Decode(iter.key())); + + iter.SeekToLast(); + ASSERT_TRUE(iter.Valid()); + ASSERT_EQ(*(keys.rbegin()), Decode(iter.key())); + } + + // Forward iteration test + for (Key i = 0; i < R; i++) { + InlineSkipList::Iterator iter(&list); + iter.Seek(Encode(&i)); + + // Compare against model iterator + std::set::iterator model_iter = keys.lower_bound(i); + for (int j = 0; j < 3; j++) { + if (model_iter == keys.end()) { + ASSERT_TRUE(!iter.Valid()); + break; + } else { + ASSERT_TRUE(iter.Valid()); + ASSERT_EQ(*model_iter, Decode(iter.key())); + ++model_iter; + iter.Next(); + } + } + } + + // Backward iteration test + { + InlineSkipList::Iterator iter(&list); + iter.SeekToLast(); + + // Compare against model iterator + for (std::set::reverse_iterator model_iter = keys.rbegin(); + model_iter != keys.rend(); ++model_iter) { + ASSERT_TRUE(iter.Valid()); + ASSERT_EQ(*model_iter, Decode(iter.key())); + iter.Prev(); + } + ASSERT_TRUE(!iter.Valid()); + } +} + +// We want to make sure that with a single writer and multiple +// concurrent readers (with no synchronization other than when a +// reader's iterator is created), the reader always observes all the +// data that was present in the skip list when the iterator was +// constructor. Because insertions are happening concurrently, we may +// also observe new values that were inserted since the iterator was +// constructed, but we should never miss any values that were present +// at iterator construction time. +// +// We generate multi-part keys: +// +// where: +// key is in range [0..K-1] +// gen is a generation number for key +// hash is hash(key,gen) +// +// The insertion code picks a random key, sets gen to be 1 + the last +// generation number inserted for that key, and sets hash to Hash(key,gen). +// +// At the beginning of a read, we snapshot the last inserted +// generation number for each key. We then iterate, including random +// calls to Next() and Seek(). For every key we encounter, we +// check that it is either expected given the initial snapshot or has +// been concurrently added since the iterator started. +class ConcurrentTest { + public: + static const uint32_t K = 8; + + private: + static uint64_t key(Key key) { return (key >> 40); } + static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; } + static uint64_t hash(Key key) { return key & 0xff; } + + static uint64_t HashNumbers(uint64_t k, uint64_t g) { + uint64_t data[2] = {k, g}; + return Hash(reinterpret_cast(data), sizeof(data), 0); + } + + static Key MakeKey(uint64_t k, uint64_t g) { + assert(sizeof(Key) == sizeof(uint64_t)); + assert(k <= K); // We sometimes pass K to seek to the end of the skiplist + assert(g <= 0xffffffffu); + return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff)); + } + + static bool IsValidKey(Key k) { + return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff); + } + + static Key RandomTarget(Random* rnd) { + switch (rnd->Next() % 10) { + case 0: + // Seek to beginning + return MakeKey(0, 0); + case 1: + // Seek to end + return MakeKey(K, 0); + default: + // Seek to middle + return MakeKey(rnd->Next() % K, 0); + } + } + + // Per-key generation + struct State { + std::atomic generation[K]; + void Set(int k, int v) { + generation[k].store(v, std::memory_order_release); + } + int Get(int k) { return generation[k].load(std::memory_order_acquire); } + + State() { + for (unsigned int k = 0; k < K; k++) { + Set(k, 0); + } + } + }; + + // Current state of the test + State current_; + + ConcurrentArena arena_; + + // InlineSkipList is not protected by mu_. We just use a single writer + // thread to modify it. + InlineSkipList list_; + + public: + ConcurrentTest() : list_(TestComparator(), &arena_) {} + + // REQUIRES: No concurrent calls to WriteStep or ConcurrentWriteStep + void WriteStep(Random* rnd) { + const uint32_t k = rnd->Next() % K; + const int g = current_.Get(k) + 1; + const Key new_key = MakeKey(k, g); + char* buf = list_.AllocateKey(sizeof(Key)); + memcpy(buf, &new_key, sizeof(Key)); + list_.Insert(buf); + current_.Set(k, g); + } + + // REQUIRES: No concurrent calls for the same k + void ConcurrentWriteStep(uint32_t k) { + const int g = current_.Get(k) + 1; + const Key new_key = MakeKey(k, g); + char* buf = list_.AllocateKey(sizeof(Key)); + memcpy(buf, &new_key, sizeof(Key)); + list_.InsertConcurrently(buf); + ASSERT_EQ(g, current_.Get(k) + 1); + current_.Set(k, g); + } + + void ReadStep(Random* rnd) { + // Remember the initial committed state of the skiplist. + State initial_state; + for (unsigned int k = 0; k < K; k++) { + initial_state.Set(k, current_.Get(k)); + } + + Key pos = RandomTarget(rnd); + InlineSkipList::Iterator iter(&list_); + iter.Seek(Encode(&pos)); + while (true) { + Key current; + if (!iter.Valid()) { + current = MakeKey(K, 0); + } else { + current = Decode(iter.key()); + ASSERT_TRUE(IsValidKey(current)) << current; + } + ASSERT_LE(pos, current) << "should not go backwards"; + + // Verify that everything in [pos,current) was not present in + // initial_state. + while (pos < current) { + ASSERT_LT(key(pos), K) << pos; + + // Note that generation 0 is never inserted, so it is ok if + // <*,0,*> is missing. + ASSERT_TRUE((gen(pos) == 0U) || + (gen(pos) > static_cast(initial_state.Get( + static_cast(key(pos)))))) + << "key: " << key(pos) << "; gen: " << gen(pos) + << "; initgen: " << initial_state.Get(static_cast(key(pos))); + + // Advance to next key in the valid key space + if (key(pos) < key(current)) { + pos = MakeKey(key(pos) + 1, 0); + } else { + pos = MakeKey(key(pos), gen(pos) + 1); + } + } + + if (!iter.Valid()) { + break; + } + + if (rnd->Next() % 2) { + iter.Next(); + pos = MakeKey(key(pos), gen(pos) + 1); + } else { + Key new_target = RandomTarget(rnd); + if (new_target > pos) { + pos = new_target; + iter.Seek(Encode(&new_target)); + } + } + } + } +}; +const uint32_t ConcurrentTest::K; + +// Simple test that does single-threaded testing of the ConcurrentTest +// scaffolding. +TEST_F(InlineSkipTest, ConcurrentReadWithoutThreads) { + ConcurrentTest test; + Random rnd(test::RandomSeed()); + for (int i = 0; i < 10000; i++) { + test.ReadStep(&rnd); + test.WriteStep(&rnd); + } +} + +TEST_F(InlineSkipTest, ConcurrentInsertWithoutThreads) { + ConcurrentTest test; + Random rnd(test::RandomSeed()); + for (int i = 0; i < 10000; i++) { + test.ReadStep(&rnd); + uint32_t base = rnd.Next(); + for (int j = 0; j < 4; ++j) { + test.ConcurrentWriteStep((base + j) % ConcurrentTest::K); + } + } +} + +class TestState { + public: + ConcurrentTest t_; + int seed_; + std::atomic quit_flag_; + std::atomic next_writer_; + + enum ReaderState { STARTING, RUNNING, DONE }; + + explicit TestState(int s) + : seed_(s), + quit_flag_(false), + state_(STARTING), + pending_writers_(0), + state_cv_(&mu_) {} + + void Wait(ReaderState s) { + mu_.Lock(); + while (state_ != s) { + state_cv_.Wait(); + } + mu_.Unlock(); + } + + void Change(ReaderState s) { + mu_.Lock(); + state_ = s; + state_cv_.Signal(); + mu_.Unlock(); + } + + void AdjustPendingWriters(int delta) { + mu_.Lock(); + pending_writers_ += delta; + if (pending_writers_ == 0) { + state_cv_.Signal(); + } + mu_.Unlock(); + } + + void WaitForPendingWriters() { + mu_.Lock(); + while (pending_writers_ != 0) { + state_cv_.Wait(); + } + mu_.Unlock(); + } + + private: + port::Mutex mu_; + ReaderState state_; + int pending_writers_; + port::CondVar state_cv_; +}; + +static void ConcurrentReader(void* arg) { + TestState* state = reinterpret_cast(arg); + Random rnd(state->seed_); + int64_t reads = 0; + state->Change(TestState::RUNNING); + while (!state->quit_flag_.load(std::memory_order_acquire)) { + state->t_.ReadStep(&rnd); + ++reads; + } + state->Change(TestState::DONE); +} + +static void ConcurrentWriter(void* arg) { + TestState* state = reinterpret_cast(arg); + uint32_t k = state->next_writer_++ % ConcurrentTest::K; + state->t_.ConcurrentWriteStep(k); + state->AdjustPendingWriters(-1); +} + +static void RunConcurrentRead(int run) { + const int seed = test::RandomSeed() + (run * 100); + Random rnd(seed); + const int N = 1000; + const int kSize = 1000; + for (int i = 0; i < N; i++) { + if ((i % 100) == 0) { + fprintf(stderr, "Run %d of %d\n", i, N); + } + TestState state(seed + 1); + Env::Default()->Schedule(ConcurrentReader, &state); + state.Wait(TestState::RUNNING); + for (int k = 0; k < kSize; ++k) { + state.t_.WriteStep(&rnd); + } + state.quit_flag_.store(true, std::memory_order_release); + state.Wait(TestState::DONE); + } +} + +static void RunConcurrentInsert(int run, int write_parallelism = 4) { + Env::Default()->SetBackgroundThreads(1 + write_parallelism, + Env::Priority::LOW); + const int seed = test::RandomSeed() + (run * 100); + Random rnd(seed); + const int N = 1000; + const int kSize = 1000; + for (int i = 0; i < N; i++) { + if ((i % 100) == 0) { + fprintf(stderr, "Run %d of %d\n", i, N); + } + TestState state(seed + 1); + Env::Default()->Schedule(ConcurrentReader, &state); + state.Wait(TestState::RUNNING); + for (int k = 0; k < kSize; k += write_parallelism) { + state.next_writer_ = rnd.Next(); + state.AdjustPendingWriters(write_parallelism); + for (int p = 0; p < write_parallelism; ++p) { + Env::Default()->Schedule(ConcurrentWriter, &state); + } + state.WaitForPendingWriters(); + } + state.quit_flag_.store(true, std::memory_order_release); + state.Wait(TestState::DONE); + } +} + +TEST_F(InlineSkipTest, ConcurrentRead1) { RunConcurrentRead(1); } +TEST_F(InlineSkipTest, ConcurrentRead2) { RunConcurrentRead(2); } +TEST_F(InlineSkipTest, ConcurrentRead3) { RunConcurrentRead(3); } +TEST_F(InlineSkipTest, ConcurrentRead4) { RunConcurrentRead(4); } +TEST_F(InlineSkipTest, ConcurrentRead5) { RunConcurrentRead(5); } +TEST_F(InlineSkipTest, ConcurrentInsert1) { RunConcurrentInsert(1); } +TEST_F(InlineSkipTest, ConcurrentInsert2) { RunConcurrentInsert(2); } +TEST_F(InlineSkipTest, ConcurrentInsert3) { RunConcurrentInsert(3); } + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/internal_stats.cc b/external/rocksdb/db/internal_stats.cc new file mode 100755 index 0000000000..5170802374 --- /dev/null +++ b/external/rocksdb/db/internal_stats.cc @@ -0,0 +1,896 @@ +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/internal_stats.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include +#include +#include "db/column_family.h" + +#include "db/db_impl.h" +#include "util/string_util.h" + +namespace rocksdb { + +#ifndef ROCKSDB_LITE +namespace { +const double kMB = 1048576.0; +const double kGB = kMB * 1024; +const double kMicrosInSec = 1000000.0; + +void PrintLevelStatsHeader(char* buf, size_t len, const std::string& cf_name) { + snprintf( + buf, len, + "\n** Compaction Stats [%s] **\n" + "Level Files Size(MB) Score Read(GB) Rn(GB) Rnp1(GB) " + "Write(GB) Wnew(GB) Moved(GB) W-Amp Rd(MB/s) Wr(MB/s) " + "Comp(sec) Comp(cnt) Avg(sec) KeyIn KeyDrop\n" + "--------------------------------------------------------------------" + "-----------------------------------------------------------" + "--------------------------------------\n", + cf_name.c_str()); +} + +void PrintLevelStats(char* buf, size_t len, const std::string& name, + int num_files, int being_compacted, double total_file_size, + double score, double w_amp, + const InternalStats::CompactionStats& stats) { + uint64_t bytes_read = + stats.bytes_read_non_output_levels + stats.bytes_read_output_level; + int64_t bytes_new = + stats.bytes_written - stats.bytes_read_output_level; + double elapsed = (stats.micros + 1) / kMicrosInSec; + std::string num_input_records = NumberToHumanString(stats.num_input_records); + std::string num_dropped_records = + NumberToHumanString(stats.num_dropped_records); + + snprintf(buf, len, + "%4s %6d/%-3d %8.2f %5.1f " /* Level, Files, Size(MB), Score */ + "%8.1f " /* Read(GB) */ + "%7.1f " /* Rn(GB) */ + "%8.1f " /* Rnp1(GB) */ + "%9.1f " /* Write(GB) */ + "%8.1f " /* Wnew(GB) */ + "%9.1f " /* Moved(GB) */ + "%5.1f " /* W-Amp */ + "%8.1f " /* Rd(MB/s) */ + "%8.1f " /* Wr(MB/s) */ + "%9.0f " /* Comp(sec) */ + "%9d " /* Comp(cnt) */ + "%8.3f " /* Avg(sec) */ + "%7s " /* KeyIn */ + "%6s\n", /* KeyDrop */ + name.c_str(), + num_files, being_compacted, total_file_size / kMB, score, + bytes_read / kGB, stats.bytes_read_non_output_levels / kGB, + stats.bytes_read_output_level / kGB, stats.bytes_written / kGB, + bytes_new / kGB, stats.bytes_moved / kGB, w_amp, + bytes_read / kMB / elapsed, stats.bytes_written / kMB / elapsed, + stats.micros / kMicrosInSec, stats.count, + stats.count == 0 ? 0 : stats.micros / kMicrosInSec / stats.count, + num_input_records.c_str(), num_dropped_records.c_str()); +} + +// Assumes that trailing numbers represent an optional argument. This requires +// property names to not end with numbers. +std::pair GetPropertyNameAndArg(const Slice& property) { + Slice name = property, arg = property; + size_t sfx_len = 0; + while (sfx_len < property.size() && + isdigit(property[property.size() - sfx_len - 1])) { + ++sfx_len; + } + name.remove_suffix(sfx_len); + arg.remove_prefix(property.size() - sfx_len); + return {name, arg}; +} +} // anonymous namespace + +static const std::string rocksdb_prefix = "rocksdb."; + +static const std::string num_files_at_level_prefix = "num-files-at-level"; +static const std::string compression_ratio_at_level_prefix = + "compression-ratio-at-level"; +static const std::string allstats = "stats"; +static const std::string sstables = "sstables"; +static const std::string cfstats = "cfstats"; +static const std::string dbstats = "dbstats"; +static const std::string levelstats = "levelstats"; +static const std::string num_immutable_mem_table = "num-immutable-mem-table"; +static const std::string num_immutable_mem_table_flushed = + "num-immutable-mem-table-flushed"; +static const std::string mem_table_flush_pending = "mem-table-flush-pending"; +static const std::string compaction_pending = "compaction-pending"; +static const std::string background_errors = "background-errors"; +static const std::string cur_size_active_mem_table = + "cur-size-active-mem-table"; +static const std::string cur_size_all_mem_tables = "cur-size-all-mem-tables"; +static const std::string size_all_mem_tables = "size-all-mem-tables"; +static const std::string num_entries_active_mem_table = + "num-entries-active-mem-table"; +static const std::string num_entries_imm_mem_tables = + "num-entries-imm-mem-tables"; +static const std::string num_deletes_active_mem_table = + "num-deletes-active-mem-table"; +static const std::string num_deletes_imm_mem_tables = + "num-deletes-imm-mem-tables"; +static const std::string estimate_num_keys = "estimate-num-keys"; +static const std::string estimate_table_readers_mem = + "estimate-table-readers-mem"; +static const std::string is_file_deletions_enabled = + "is-file-deletions-enabled"; +static const std::string num_snapshots = "num-snapshots"; +static const std::string oldest_snapshot_time = "oldest-snapshot-time"; +static const std::string num_live_versions = "num-live-versions"; +static const std::string current_version_number = + "current-super-version-number"; +static const std::string estimate_live_data_size = "estimate-live-data-size"; +static const std::string base_level = "base-level"; +static const std::string total_sst_files_size = "total-sst-files-size"; +static const std::string estimate_pending_comp_bytes = + "estimate-pending-compaction-bytes"; +static const std::string aggregated_table_properties = + "aggregated-table-properties"; +static const std::string aggregated_table_properties_at_level = + aggregated_table_properties + "-at-level"; +static const std::string num_running_compactions = "num-running-compactions"; +static const std::string num_running_flushes = "num-running-flushes"; + +const std::string DB::Properties::kNumFilesAtLevelPrefix = + rocksdb_prefix + num_files_at_level_prefix; +const std::string DB::Properties::kCompressionRatioAtLevelPrefix = + rocksdb_prefix + compression_ratio_at_level_prefix; +const std::string DB::Properties::kStats = rocksdb_prefix + allstats; +const std::string DB::Properties::kSSTables = rocksdb_prefix + sstables; +const std::string DB::Properties::kCFStats = rocksdb_prefix + cfstats; +const std::string DB::Properties::kDBStats = rocksdb_prefix + dbstats; +const std::string DB::Properties::kLevelStats = rocksdb_prefix + levelstats; +const std::string DB::Properties::kNumImmutableMemTable = + rocksdb_prefix + num_immutable_mem_table; +const std::string DB::Properties::kNumImmutableMemTableFlushed = + rocksdb_prefix + num_immutable_mem_table_flushed; +const std::string DB::Properties::kMemTableFlushPending = + rocksdb_prefix + mem_table_flush_pending; +const std::string DB::Properties::kCompactionPending = + rocksdb_prefix + compaction_pending; +const std::string DB::Properties::kNumRunningCompactions = + rocksdb_prefix + num_running_compactions; +const std::string DB::Properties::kNumRunningFlushes = + rocksdb_prefix + num_running_flushes; +const std::string DB::Properties::kBackgroundErrors = + rocksdb_prefix + background_errors; +const std::string DB::Properties::kCurSizeActiveMemTable = + rocksdb_prefix + cur_size_active_mem_table; +const std::string DB::Properties::kCurSizeAllMemTables = + rocksdb_prefix + cur_size_all_mem_tables; +const std::string DB::Properties::kSizeAllMemTables = + rocksdb_prefix + size_all_mem_tables; +const std::string DB::Properties::kNumEntriesActiveMemTable = + rocksdb_prefix + num_entries_active_mem_table; +const std::string DB::Properties::kNumEntriesImmMemTables = + rocksdb_prefix + num_entries_imm_mem_tables; +const std::string DB::Properties::kNumDeletesActiveMemTable = + rocksdb_prefix + num_deletes_active_mem_table; +const std::string DB::Properties::kNumDeletesImmMemTables = + rocksdb_prefix + num_deletes_imm_mem_tables; +const std::string DB::Properties::kEstimateNumKeys = + rocksdb_prefix + estimate_num_keys; +const std::string DB::Properties::kEstimateTableReadersMem = + rocksdb_prefix + estimate_table_readers_mem; +const std::string DB::Properties::kIsFileDeletionsEnabled = + rocksdb_prefix + is_file_deletions_enabled; +const std::string DB::Properties::kNumSnapshots = + rocksdb_prefix + num_snapshots; +const std::string DB::Properties::kOldestSnapshotTime = + rocksdb_prefix + oldest_snapshot_time; +const std::string DB::Properties::kNumLiveVersions = + rocksdb_prefix + num_live_versions; +const std::string DB::Properties::kCurrentSuperVersionNumber = + rocksdb_prefix + current_version_number; +const std::string DB::Properties::kEstimateLiveDataSize = + rocksdb_prefix + estimate_live_data_size; +const std::string DB::Properties::kTotalSstFilesSize = + rocksdb_prefix + total_sst_files_size; +const std::string DB::Properties::kBaseLevel = rocksdb_prefix + base_level; +const std::string DB::Properties::kEstimatePendingCompactionBytes = + rocksdb_prefix + estimate_pending_comp_bytes; +const std::string DB::Properties::kAggregatedTableProperties = + rocksdb_prefix + aggregated_table_properties; +const std::string DB::Properties::kAggregatedTablePropertiesAtLevel = + rocksdb_prefix + aggregated_table_properties_at_level; + +const std::unordered_map InternalStats::ppt_name_to_info = { + {DB::Properties::kNumFilesAtLevelPrefix, + {false, &InternalStats::HandleNumFilesAtLevel, nullptr}}, + {DB::Properties::kCompressionRatioAtLevelPrefix, + {false, &InternalStats::HandleCompressionRatioAtLevelPrefix, nullptr}}, + {DB::Properties::kLevelStats, + {false, &InternalStats::HandleLevelStats, nullptr}}, + {DB::Properties::kStats, {false, &InternalStats::HandleStats, nullptr}}, + {DB::Properties::kCFStats, {false, &InternalStats::HandleCFStats, nullptr}}, + {DB::Properties::kDBStats, {false, &InternalStats::HandleDBStats, nullptr}}, + {DB::Properties::kSSTables, + {false, &InternalStats::HandleSsTables, nullptr}}, + {DB::Properties::kAggregatedTableProperties, + {false, &InternalStats::HandleAggregatedTableProperties, nullptr}}, + {DB::Properties::kAggregatedTablePropertiesAtLevel, + {false, &InternalStats::HandleAggregatedTablePropertiesAtLevel, nullptr}}, + {DB::Properties::kNumImmutableMemTable, + {false, nullptr, &InternalStats::HandleNumImmutableMemTable}}, + {DB::Properties::kNumImmutableMemTableFlushed, + {false, nullptr, &InternalStats::HandleNumImmutableMemTableFlushed}}, + {DB::Properties::kMemTableFlushPending, + {false, nullptr, &InternalStats::HandleMemTableFlushPending}}, + {DB::Properties::kCompactionPending, + {false, nullptr, &InternalStats::HandleCompactionPending}}, + {DB::Properties::kBackgroundErrors, + {false, nullptr, &InternalStats::HandleBackgroundErrors}}, + {DB::Properties::kCurSizeActiveMemTable, + {false, nullptr, &InternalStats::HandleCurSizeActiveMemTable}}, + {DB::Properties::kCurSizeAllMemTables, + {false, nullptr, &InternalStats::HandleCurSizeAllMemTables}}, + {DB::Properties::kSizeAllMemTables, + {false, nullptr, &InternalStats::HandleSizeAllMemTables}}, + {DB::Properties::kNumEntriesActiveMemTable, + {false, nullptr, &InternalStats::HandleNumEntriesActiveMemTable}}, + {DB::Properties::kNumEntriesImmMemTables, + {false, nullptr, &InternalStats::HandleNumEntriesImmMemTables}}, + {DB::Properties::kNumDeletesActiveMemTable, + {false, nullptr, &InternalStats::HandleNumDeletesActiveMemTable}}, + {DB::Properties::kNumDeletesImmMemTables, + {false, nullptr, &InternalStats::HandleNumDeletesImmMemTables}}, + {DB::Properties::kEstimateNumKeys, + {false, nullptr, &InternalStats::HandleEstimateNumKeys}}, + {DB::Properties::kEstimateTableReadersMem, + {true, nullptr, &InternalStats::HandleEstimateTableReadersMem}}, + {DB::Properties::kIsFileDeletionsEnabled, + {false, nullptr, &InternalStats::HandleIsFileDeletionsEnabled}}, + {DB::Properties::kNumSnapshots, + {false, nullptr, &InternalStats::HandleNumSnapshots}}, + {DB::Properties::kOldestSnapshotTime, + {false, nullptr, &InternalStats::HandleOldestSnapshotTime}}, + {DB::Properties::kNumLiveVersions, + {false, nullptr, &InternalStats::HandleNumLiveVersions}}, + {DB::Properties::kCurrentSuperVersionNumber, + {false, nullptr, &InternalStats::HandleCurrentSuperVersionNumber}}, + {DB::Properties::kEstimateLiveDataSize, + {true, nullptr, &InternalStats::HandleEstimateLiveDataSize}}, + {DB::Properties::kBaseLevel, + {false, nullptr, &InternalStats::HandleBaseLevel}}, + {DB::Properties::kTotalSstFilesSize, + {false, nullptr, &InternalStats::HandleTotalSstFilesSize}}, + {DB::Properties::kEstimatePendingCompactionBytes, + {false, nullptr, &InternalStats::HandleEstimatePendingCompactionBytes}}, + {DB::Properties::kNumRunningFlushes, + {false, nullptr, &InternalStats::HandleNumRunningFlushes}}, + {DB::Properties::kNumRunningCompactions, + {false, nullptr, &InternalStats::HandleNumRunningCompactions}}, +}; + +const DBPropertyInfo* GetPropertyInfo(const Slice& property) { + std::string ppt_name = GetPropertyNameAndArg(property).first.ToString(); + auto ppt_info_iter = InternalStats::ppt_name_to_info.find(ppt_name); + if (ppt_info_iter == InternalStats::ppt_name_to_info.end()) { + return nullptr; + } + return &ppt_info_iter->second; +} + +bool InternalStats::GetStringProperty(const DBPropertyInfo& property_info, + const Slice& property, + std::string* value) { + assert(value != nullptr); + assert(property_info.handle_string != nullptr); + Slice arg = GetPropertyNameAndArg(property).second; + return (this->*(property_info.handle_string))(value, arg); +} + +bool InternalStats::GetIntProperty(const DBPropertyInfo& property_info, + uint64_t* value, DBImpl* db) { + assert(value != nullptr); + assert(property_info.handle_int != nullptr && + !property_info.need_out_of_mutex); + db->mutex_.AssertHeld(); + return (this->*(property_info.handle_int))(value, db, nullptr /* version */); +} + +bool InternalStats::GetIntPropertyOutOfMutex( + const DBPropertyInfo& property_info, Version* version, uint64_t* value) { + assert(value != nullptr); + assert(property_info.handle_int != nullptr && + property_info.need_out_of_mutex); + return (this->*(property_info.handle_int))(value, nullptr /* db */, version); +} + +bool InternalStats::HandleNumFilesAtLevel(std::string* value, Slice suffix) { + uint64_t level; + const auto* vstorage = cfd_->current()->storage_info(); + bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty(); + if (!ok || static_cast(level) >= number_levels_) { + return false; + } else { + char buf[100]; + snprintf(buf, sizeof(buf), "%d", + vstorage->NumLevelFiles(static_cast(level))); + *value = buf; + return true; + } +} + +bool InternalStats::HandleCompressionRatioAtLevelPrefix(std::string* value, + Slice suffix) { + uint64_t level; + const auto* vstorage = cfd_->current()->storage_info(); + bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty(); + if (!ok || level >= static_cast(number_levels_)) { + return false; + } + *value = ToString( + vstorage->GetEstimatedCompressionRatioAtLevel(static_cast(level))); + return true; +} + +bool InternalStats::HandleLevelStats(std::string* value, Slice suffix) { + char buf[1000]; + const auto* vstorage = cfd_->current()->storage_info(); + snprintf(buf, sizeof(buf), + "Level Files Size(MB)\n" + "--------------------\n"); + value->append(buf); + + for (int level = 0; level < number_levels_; level++) { + snprintf(buf, sizeof(buf), "%3d %8d %8.0f\n", level, + vstorage->NumLevelFiles(level), + vstorage->NumLevelBytes(level) / kMB); + value->append(buf); + } + return true; +} + +bool InternalStats::HandleStats(std::string* value, Slice suffix) { + if (!HandleCFStats(value, suffix)) { + return false; + } + if (!HandleDBStats(value, suffix)) { + return false; + } + return true; +} + +bool InternalStats::HandleCFStats(std::string* value, Slice suffix) { + DumpCFStats(value); + return true; +} + +bool InternalStats::HandleDBStats(std::string* value, Slice suffix) { + DumpDBStats(value); + return true; +} + +bool InternalStats::HandleSsTables(std::string* value, Slice suffix) { + auto* current = cfd_->current(); + *value = current->DebugString(); + return true; +} + +bool InternalStats::HandleAggregatedTableProperties(std::string* value, + Slice suffix) { + std::shared_ptr tp; + auto s = cfd_->current()->GetAggregatedTableProperties(&tp); + if (!s.ok()) { + return false; + } + *value = tp->ToString(); + return true; +} + +bool InternalStats::HandleAggregatedTablePropertiesAtLevel(std::string* value, + Slice suffix) { + uint64_t level; + bool ok = ConsumeDecimalNumber(&suffix, &level) && suffix.empty(); + if (!ok || static_cast(level) >= number_levels_) { + return false; + } + std::shared_ptr tp; + auto s = cfd_->current()->GetAggregatedTableProperties( + &tp, static_cast(level)); + if (!s.ok()) { + return false; + } + *value = tp->ToString(); + return true; +} + +bool InternalStats::HandleNumImmutableMemTable(uint64_t* value, DBImpl* db, + Version* version) { + *value = cfd_->imm()->NumNotFlushed(); + return true; +} + +bool InternalStats::HandleNumImmutableMemTableFlushed(uint64_t* value, + DBImpl* db, + Version* version) { + *value = cfd_->imm()->NumFlushed(); + return true; +} + +bool InternalStats::HandleMemTableFlushPending(uint64_t* value, DBImpl* db, + Version* version) { + // Return number of mem tables that are ready to flush (made immutable) + *value = (cfd_->imm()->IsFlushPending() ? 1 : 0); + return true; +} + +bool InternalStats::HandleNumRunningFlushes(uint64_t* value, DBImpl* db, + Version* version) { + *value = db->num_running_flushes(); + return true; +} + +bool InternalStats::HandleCompactionPending(uint64_t* value, DBImpl* db, + Version* version) { + // 1 if the system already determines at least one compaction is needed. + // 0 otherwise, + const auto* vstorage = cfd_->current()->storage_info(); + *value = (cfd_->compaction_picker()->NeedsCompaction(vstorage) ? 1 : 0); + return true; +} + +bool InternalStats::HandleNumRunningCompactions(uint64_t* value, DBImpl* db, + Version* version) { + *value = db->num_running_compactions_; + return true; +} + +bool InternalStats::HandleBackgroundErrors(uint64_t* value, DBImpl* db, + Version* version) { + // Accumulated number of errors in background flushes or compactions. + *value = GetBackgroundErrorCount(); + return true; +} + +bool InternalStats::HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* db, + Version* version) { + // Current size of the active memtable + *value = cfd_->mem()->ApproximateMemoryUsage(); + return true; +} + +bool InternalStats::HandleCurSizeAllMemTables(uint64_t* value, DBImpl* db, + Version* version) { + // Current size of the active memtable + immutable memtables + *value = cfd_->mem()->ApproximateMemoryUsage() + + cfd_->imm()->ApproximateUnflushedMemTablesMemoryUsage(); + return true; +} + +bool InternalStats::HandleSizeAllMemTables(uint64_t* value, DBImpl* db, + Version* version) { + *value = cfd_->mem()->ApproximateMemoryUsage() + + cfd_->imm()->ApproximateMemoryUsage(); + return true; +} + +bool InternalStats::HandleNumEntriesActiveMemTable(uint64_t* value, DBImpl* db, + Version* version) { + // Current number of entires in the active memtable + *value = cfd_->mem()->num_entries(); + return true; +} + +bool InternalStats::HandleNumEntriesImmMemTables(uint64_t* value, DBImpl* db, + Version* version) { + // Current number of entries in the immutable memtables + *value = cfd_->imm()->current()->GetTotalNumEntries(); + return true; +} + +bool InternalStats::HandleNumDeletesActiveMemTable(uint64_t* value, DBImpl* db, + Version* version) { + // Current number of entires in the active memtable + *value = cfd_->mem()->num_deletes(); + return true; +} + +bool InternalStats::HandleNumDeletesImmMemTables(uint64_t* value, DBImpl* db, + Version* version) { + // Current number of entries in the immutable memtables + *value = cfd_->imm()->current()->GetTotalNumDeletes(); + return true; +} + +bool InternalStats::HandleEstimateNumKeys(uint64_t* value, DBImpl* db, + Version* version) { + // Estimate number of entries in the column family: + // Use estimated entries in tables + total entries in memtables. + const auto* vstorage = cfd_->current()->storage_info(); + *value = cfd_->mem()->num_entries() + + cfd_->imm()->current()->GetTotalNumEntries() - + (cfd_->mem()->num_deletes() + + cfd_->imm()->current()->GetTotalNumDeletes()) * + 2 + + vstorage->GetEstimatedActiveKeys(); + return true; +} + +bool InternalStats::HandleNumSnapshots(uint64_t* value, DBImpl* db, + Version* version) { + *value = db->snapshots().count(); + return true; +} + +bool InternalStats::HandleOldestSnapshotTime(uint64_t* value, DBImpl* db, + Version* version) { + *value = static_cast(db->snapshots().GetOldestSnapshotTime()); + return true; +} + +bool InternalStats::HandleNumLiveVersions(uint64_t* value, DBImpl* db, + Version* version) { + *value = cfd_->GetNumLiveVersions(); + return true; +} + +bool InternalStats::HandleCurrentSuperVersionNumber(uint64_t* value, DBImpl* db, + Version* version) { + *value = cfd_->GetSuperVersionNumber(); + return true; +} + +bool InternalStats::HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db, + Version* version) { + *value = db->IsFileDeletionsEnabled(); + return true; +} + +bool InternalStats::HandleBaseLevel(uint64_t* value, DBImpl* db, + Version* version) { + const auto* vstorage = cfd_->current()->storage_info(); + *value = vstorage->base_level(); + return true; +} + +bool InternalStats::HandleTotalSstFilesSize(uint64_t* value, DBImpl* db, + Version* version) { + *value = cfd_->GetTotalSstFilesSize(); + return true; +} + +bool InternalStats::HandleEstimatePendingCompactionBytes(uint64_t* value, + DBImpl* db, + Version* version) { + const auto* vstorage = cfd_->current()->storage_info(); + *value = vstorage->estimated_compaction_needed_bytes(); + return true; +} + +bool InternalStats::HandleEstimateTableReadersMem(uint64_t* value, DBImpl* db, + Version* version) { + *value = (version == nullptr) ? 0 : version->GetMemoryUsageByTableReaders(); + return true; +} + +bool InternalStats::HandleEstimateLiveDataSize(uint64_t* value, DBImpl* db, + Version* version) { + const auto* vstorage = cfd_->current()->storage_info(); + *value = vstorage->EstimateLiveDataSize(); + return true; +} + +void InternalStats::DumpDBStats(std::string* value) { + char buf[1000]; + // DB-level stats, only available from default column family + double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec; + double interval_seconds_up = seconds_up - db_stats_snapshot_.seconds_up; + snprintf(buf, sizeof(buf), + "\n** DB Stats **\nUptime(secs): %.1f total, %.1f interval\n", + seconds_up, interval_seconds_up); + value->append(buf); + // Cumulative + uint64_t user_bytes_written = GetDBStats(InternalStats::BYTES_WRITTEN); + uint64_t num_keys_written = GetDBStats(InternalStats::NUMBER_KEYS_WRITTEN); + uint64_t write_other = GetDBStats(InternalStats::WRITE_DONE_BY_OTHER); + uint64_t write_self = GetDBStats(InternalStats::WRITE_DONE_BY_SELF); + uint64_t wal_bytes = GetDBStats(InternalStats::WAL_FILE_BYTES); + uint64_t wal_synced = GetDBStats(InternalStats::WAL_FILE_SYNCED); + uint64_t write_with_wal = GetDBStats(InternalStats::WRITE_WITH_WAL); + uint64_t write_stall_micros = GetDBStats(InternalStats::WRITE_STALL_MICROS); + + const int kHumanMicrosLen = 32; + char human_micros[kHumanMicrosLen]; + + // Data + // writes: total number of write requests. + // keys: total number of key updates issued by all the write requests + // commit groups: number of group commits issued to the DB. Each group can + // contain one or more writes. + // so writes/keys is the average number of put in multi-put or put + // writes/groups is the average group commit size. + // + // The format is the same for interval stats. + snprintf(buf, sizeof(buf), + "Cumulative writes: %s writes, %s keys, %s commit groups, " + "%.1f writes per commit group, ingest: %.2f GB, %.2f MB/s\n", + NumberToHumanString(write_other + write_self).c_str(), + NumberToHumanString(num_keys_written).c_str(), + NumberToHumanString(write_self).c_str(), + (write_other + write_self) / static_cast(write_self + 1), + user_bytes_written / kGB, user_bytes_written / kMB / seconds_up); + value->append(buf); + // WAL + snprintf(buf, sizeof(buf), + "Cumulative WAL: %s writes, %s syncs, " + "%.2f writes per sync, written: %.2f GB, %.2f MB/s\n", + NumberToHumanString(write_with_wal).c_str(), + NumberToHumanString(wal_synced).c_str(), + write_with_wal / static_cast(wal_synced + 1), + wal_bytes / kGB, wal_bytes / kMB / seconds_up); + value->append(buf); + // Stall + AppendHumanMicros(write_stall_micros, human_micros, kHumanMicrosLen, true); + snprintf(buf, sizeof(buf), + "Cumulative stall: %s, %.1f percent\n", + human_micros, + // 10000 = divide by 1M to get secs, then multiply by 100 for pct + write_stall_micros / 10000.0 / std::max(seconds_up, 0.001)); + value->append(buf); + + // Interval + uint64_t interval_write_other = write_other - db_stats_snapshot_.write_other; + uint64_t interval_write_self = write_self - db_stats_snapshot_.write_self; + uint64_t interval_num_keys_written = + num_keys_written - db_stats_snapshot_.num_keys_written; + snprintf(buf, sizeof(buf), + "Interval writes: %s writes, %s keys, %s commit groups, " + "%.1f writes per commit group, ingest: %.2f MB, %.2f MB/s\n", + NumberToHumanString( + interval_write_other + interval_write_self).c_str(), + NumberToHumanString(interval_num_keys_written).c_str(), + NumberToHumanString(interval_write_self).c_str(), + static_cast(interval_write_other + interval_write_self) / + (interval_write_self + 1), + (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB, + (user_bytes_written - db_stats_snapshot_.ingest_bytes) / kMB / + std::max(interval_seconds_up, 0.001)), + value->append(buf); + + uint64_t interval_write_with_wal = + write_with_wal - db_stats_snapshot_.write_with_wal; + uint64_t interval_wal_synced = wal_synced - db_stats_snapshot_.wal_synced; + uint64_t interval_wal_bytes = wal_bytes - db_stats_snapshot_.wal_bytes; + + snprintf(buf, sizeof(buf), + "Interval WAL: %s writes, %s syncs, " + "%.2f writes per sync, written: %.2f MB, %.2f MB/s\n", + NumberToHumanString(interval_write_with_wal).c_str(), + NumberToHumanString(interval_wal_synced).c_str(), + interval_write_with_wal / + static_cast(interval_wal_synced + 1), + interval_wal_bytes / kGB, + interval_wal_bytes / kMB / std::max(interval_seconds_up, 0.001)); + value->append(buf); + + // Stall + AppendHumanMicros( + write_stall_micros - db_stats_snapshot_.write_stall_micros, + human_micros, kHumanMicrosLen, true); + snprintf(buf, sizeof(buf), + "Interval stall: %s, %.1f percent\n", + human_micros, + // 10000 = divide by 1M to get secs, then multiply by 100 for pct + (write_stall_micros - db_stats_snapshot_.write_stall_micros) / + 10000.0 / std::max(interval_seconds_up, 0.001)); + value->append(buf); + + for (int level = 0; level < number_levels_; level++) { + if (!file_read_latency_[level].Empty()) { + char buf2[5000]; + snprintf(buf2, sizeof(buf2), + "** Level %d read latency histogram (micros):\n%s\n", level, + file_read_latency_[level].ToString().c_str()); + value->append(buf2); + } + } + + db_stats_snapshot_.seconds_up = seconds_up; + db_stats_snapshot_.ingest_bytes = user_bytes_written; + db_stats_snapshot_.write_other = write_other; + db_stats_snapshot_.write_self = write_self; + db_stats_snapshot_.num_keys_written = num_keys_written; + db_stats_snapshot_.wal_bytes = wal_bytes; + db_stats_snapshot_.wal_synced = wal_synced; + db_stats_snapshot_.write_with_wal = write_with_wal; + db_stats_snapshot_.write_stall_micros = write_stall_micros; +} + +void InternalStats::DumpCFStats(std::string* value) { + const VersionStorageInfo* vstorage = cfd_->current()->storage_info(); + + int num_levels_to_check = + (cfd_->ioptions()->compaction_style != kCompactionStyleFIFO) + ? vstorage->num_levels() - 1 + : 1; + + // Compaction scores are sorted base on its value. Restore them to the + // level order + std::vector compaction_score(number_levels_, 0); + for (int i = 0; i < num_levels_to_check; ++i) { + compaction_score[vstorage->CompactionScoreLevel(i)] = + vstorage->CompactionScore(i); + } + // Count # of files being compacted for each level + std::vector files_being_compacted(number_levels_, 0); + for (int level = 0; level < number_levels_; ++level) { + for (auto* f : vstorage->LevelFiles(level)) { + if (f->being_compacted) { + ++files_being_compacted[level]; + } + } + } + + char buf[1000]; + // Per-ColumnFamily stats + PrintLevelStatsHeader(buf, sizeof(buf), cfd_->GetName()); + value->append(buf); + + CompactionStats stats_sum(0); + int total_files = 0; + int total_files_being_compacted = 0; + double total_file_size = 0; + for (int level = 0; level < number_levels_; level++) { + int files = vstorage->NumLevelFiles(level); + total_files += files; + total_files_being_compacted += files_being_compacted[level]; + if (comp_stats_[level].micros > 0 || files > 0) { + stats_sum.Add(comp_stats_[level]); + total_file_size += vstorage->NumLevelBytes(level); + double w_amp = + (comp_stats_[level].bytes_read_non_output_levels == 0) ? 0.0 + : static_cast(comp_stats_[level].bytes_written) / + comp_stats_[level].bytes_read_non_output_levels; + PrintLevelStats(buf, sizeof(buf), "L" + ToString(level), files, + files_being_compacted[level], + static_cast(vstorage->NumLevelBytes(level)), + compaction_score[level], + w_amp, comp_stats_[level]); + value->append(buf); + } + } + + uint64_t flush_ingest = cf_stats_value_[BYTES_FLUSHED]; + uint64_t add_file_ingest = cf_stats_value_[BYTES_INGESTED_ADD_FILE]; + uint64_t curr_ingest = flush_ingest + add_file_ingest; + // Cumulative summary + double w_amp = stats_sum.bytes_written / static_cast(curr_ingest + 1); + uint64_t total_stall_count = + cf_stats_count_[LEVEL0_SLOWDOWN_TOTAL] + + cf_stats_count_[LEVEL0_NUM_FILES_TOTAL] + + cf_stats_count_[SOFT_PENDING_COMPACTION_BYTES_LIMIT] + + cf_stats_count_[HARD_PENDING_COMPACTION_BYTES_LIMIT] + + cf_stats_count_[MEMTABLE_COMPACTION] + cf_stats_count_[MEMTABLE_SLOWDOWN]; + // Stats summary across levels + PrintLevelStats(buf, sizeof(buf), "Sum", total_files, + total_files_being_compacted, total_file_size, 0, w_amp, + stats_sum); + value->append(buf); + // Interval summary + uint64_t interval_flush_ingest = + flush_ingest - cf_stats_snapshot_.ingest_bytes_flush; + uint64_t interval_add_file_inget = + add_file_ingest - cf_stats_snapshot_.ingest_bytes_add_file; + uint64_t interval_ingest = + interval_flush_ingest + interval_add_file_inget + 1; + CompactionStats interval_stats(stats_sum); + interval_stats.Subtract(cf_stats_snapshot_.comp_stats); + w_amp = interval_stats.bytes_written / static_cast(interval_ingest); + PrintLevelStats(buf, sizeof(buf), "Int", 0, 0, 0, 0, w_amp, interval_stats); + value->append(buf); + + double seconds_up = (env_->NowMicros() - started_at_ + 1) / kMicrosInSec; + double interval_seconds_up = seconds_up - cf_stats_snapshot_.seconds_up; + snprintf(buf, sizeof(buf), "Uptime(secs): %.1f total, %.1f interval\n", + seconds_up, interval_seconds_up); + value->append(buf); + + snprintf(buf, sizeof(buf), "Flush(GB): cumulative %.3f, interval %.3f\n", + flush_ingest / kGB, interval_flush_ingest / kGB); + snprintf(buf, sizeof(buf), "AddFile(GB): cumulative %.3f, interval %.3f\n", + add_file_ingest / kGB, interval_add_file_inget / kGB); + value->append(buf); + + // Compact + uint64_t compact_bytes_read = 0; + uint64_t compact_bytes_write = 0; + uint64_t compact_micros = 0; + for (int level = 0; level < number_levels_; level++) { + compact_bytes_read += comp_stats_[level].bytes_read_output_level + + comp_stats_[level].bytes_read_non_output_levels; + compact_bytes_write += comp_stats_[level].bytes_written; + compact_micros += comp_stats_[level].micros; + } + + snprintf(buf, sizeof(buf), + "Cumulative compaction: %.2f GB write, %.2f MB/s write, " + "%.2f GB read, %.2f MB/s read, %.1f seconds\n", + compact_bytes_write / kGB, compact_bytes_write / kMB / seconds_up, + compact_bytes_read / kGB, compact_bytes_read / kMB / seconds_up, + compact_micros / kMicrosInSec); + value->append(buf); + + // Compaction interval + uint64_t interval_compact_bytes_write = + compact_bytes_write - cf_stats_snapshot_.compact_bytes_write; + uint64_t interval_compact_bytes_read = + compact_bytes_read - cf_stats_snapshot_.compact_bytes_read; + uint64_t interval_compact_micros = + compact_micros - cf_stats_snapshot_.compact_micros; + + snprintf( + buf, sizeof(buf), + "Interval compaction: %.2f GB write, %.2f MB/s write, " + "%.2f GB read, %.2f MB/s read, %.1f seconds\n", + interval_compact_bytes_write / kGB, + interval_compact_bytes_write / kMB / std::max(interval_seconds_up, 0.001), + interval_compact_bytes_read / kGB, + interval_compact_bytes_read / kMB / std::max(interval_seconds_up, 0.001), + interval_compact_micros / kMicrosInSec); + value->append(buf); + cf_stats_snapshot_.compact_bytes_write = compact_bytes_write; + cf_stats_snapshot_.compact_bytes_read = compact_bytes_read; + cf_stats_snapshot_.compact_micros = compact_micros; + + snprintf(buf, sizeof(buf), "Stalls(count): %" PRIu64 + " level0_slowdown, " + "%" PRIu64 + " level0_slowdown_with_compaction, " + "%" PRIu64 + " level0_numfiles, " + "%" PRIu64 + " level0_numfiles_with_compaction, " + "%" PRIu64 + " stop for pending_compaction_bytes, " + "%" PRIu64 + " slowdown for pending_compaction_bytes, " + "%" PRIu64 + " memtable_compaction, " + "%" PRIu64 + " memtable_slowdown, " + "interval %" PRIu64 " total count\n", + cf_stats_count_[LEVEL0_SLOWDOWN_TOTAL], + cf_stats_count_[LEVEL0_SLOWDOWN_WITH_COMPACTION], + cf_stats_count_[LEVEL0_NUM_FILES_TOTAL], + cf_stats_count_[LEVEL0_NUM_FILES_WITH_COMPACTION], + cf_stats_count_[HARD_PENDING_COMPACTION_BYTES_LIMIT], + cf_stats_count_[SOFT_PENDING_COMPACTION_BYTES_LIMIT], + cf_stats_count_[MEMTABLE_COMPACTION], + cf_stats_count_[MEMTABLE_SLOWDOWN], + total_stall_count - cf_stats_snapshot_.stall_count); + value->append(buf); + + cf_stats_snapshot_.ingest_bytes_flush = flush_ingest; + cf_stats_snapshot_.ingest_bytes_add_file = add_file_ingest; + cf_stats_snapshot_.comp_stats = stats_sum; + cf_stats_snapshot_.stall_count = total_stall_count; +} + + +#else + +const DBPropertyInfo* GetPropertyInfo(const Slice& property) { return nullptr; } + +#endif // !ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/db/internal_stats.h b/external/rocksdb/db/internal_stats.h new file mode 100755 index 0000000000..56e36b692f --- /dev/null +++ b/external/rocksdb/db/internal_stats.h @@ -0,0 +1,449 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// + +#pragma once +#include "db/version_set.h" + +#include +#include + +class ColumnFamilyData; + +namespace rocksdb { + +class MemTableList; +class DBImpl; + +// Config for retrieving a property's value. +struct DBPropertyInfo { + bool need_out_of_mutex; + + // gcc had an internal error for initializing union of pointer-to-member- + // functions. Workaround is to populate exactly one of the following function + // pointers with a non-nullptr value. + + // @param value Value-result argument for storing the property's string value + // @param suffix Argument portion of the property. For example, suffix would + // be "5" for the property "rocksdb.num-files-at-level5". So far, only + // certain string properties take an argument. + bool (InternalStats::*handle_string)(std::string* value, Slice suffix); + + // @param value Value-result argument for storing the property's uint64 value + // @param db Many of the int properties rely on DBImpl methods. + // @param version Version is needed in case the property is retrieved without + // holding db mutex, which is only supported for int properties. + bool (InternalStats::*handle_int)(uint64_t* value, DBImpl* db, + Version* version); +}; + +extern const DBPropertyInfo* GetPropertyInfo(const Slice& property); + +#ifndef ROCKSDB_LITE +class InternalStats { + public: + enum InternalCFStatsType { + LEVEL0_SLOWDOWN_TOTAL, + LEVEL0_SLOWDOWN_WITH_COMPACTION, + MEMTABLE_COMPACTION, + MEMTABLE_SLOWDOWN, + LEVEL0_NUM_FILES_TOTAL, + LEVEL0_NUM_FILES_WITH_COMPACTION, + SOFT_PENDING_COMPACTION_BYTES_LIMIT, + HARD_PENDING_COMPACTION_BYTES_LIMIT, + WRITE_STALLS_ENUM_MAX, + BYTES_FLUSHED, + BYTES_INGESTED_ADD_FILE, + INTERNAL_CF_STATS_ENUM_MAX, + }; + + enum InternalDBStatsType { + WAL_FILE_BYTES, + WAL_FILE_SYNCED, + BYTES_WRITTEN, + NUMBER_KEYS_WRITTEN, + WRITE_DONE_BY_OTHER, + WRITE_DONE_BY_SELF, + WRITE_WITH_WAL, + WRITE_STALL_MICROS, + INTERNAL_DB_STATS_ENUM_MAX, + }; + + InternalStats(int num_levels, Env* env, ColumnFamilyData* cfd) + : db_stats_{}, + cf_stats_value_{}, + cf_stats_count_{}, + comp_stats_(num_levels), + file_read_latency_(num_levels), + bg_error_count_(0), + number_levels_(num_levels), + env_(env), + cfd_(cfd), + started_at_(env->NowMicros()) {} + + // Per level compaction stats. comp_stats_[level] stores the stats for + // compactions that produced data for the specified "level". + struct CompactionStats { + uint64_t micros; + + // The number of bytes read from all non-output levels + uint64_t bytes_read_non_output_levels; + + // The number of bytes read from the compaction output level. + uint64_t bytes_read_output_level; + + // Total number of bytes written during compaction + uint64_t bytes_written; + + // Total number of bytes moved to the output level + uint64_t bytes_moved; + + // The number of compaction input files in all non-output levels. + int num_input_files_in_non_output_levels; + + // The number of compaction input files in the output level. + int num_input_files_in_output_level; + + // The number of compaction output files. + int num_output_files; + + // Total incoming entries during compaction between levels N and N+1 + uint64_t num_input_records; + + // Accumulated diff number of entries + // (num input entries - num output entires) for compaction levels N and N+1 + uint64_t num_dropped_records; + + // Number of compactions done + int count; + + explicit CompactionStats(int _count = 0) + : micros(0), + bytes_read_non_output_levels(0), + bytes_read_output_level(0), + bytes_written(0), + bytes_moved(0), + num_input_files_in_non_output_levels(0), + num_input_files_in_output_level(0), + num_output_files(0), + num_input_records(0), + num_dropped_records(0), + count(_count) {} + + explicit CompactionStats(const CompactionStats& c) + : micros(c.micros), + bytes_read_non_output_levels(c.bytes_read_non_output_levels), + bytes_read_output_level(c.bytes_read_output_level), + bytes_written(c.bytes_written), + bytes_moved(c.bytes_moved), + num_input_files_in_non_output_levels( + c.num_input_files_in_non_output_levels), + num_input_files_in_output_level( + c.num_input_files_in_output_level), + num_output_files(c.num_output_files), + num_input_records(c.num_input_records), + num_dropped_records(c.num_dropped_records), + count(c.count) {} + + void Add(const CompactionStats& c) { + this->micros += c.micros; + this->bytes_read_non_output_levels += c.bytes_read_non_output_levels; + this->bytes_read_output_level += c.bytes_read_output_level; + this->bytes_written += c.bytes_written; + this->bytes_moved += c.bytes_moved; + this->num_input_files_in_non_output_levels += + c.num_input_files_in_non_output_levels; + this->num_input_files_in_output_level += + c.num_input_files_in_output_level; + this->num_output_files += c.num_output_files; + this->num_input_records += c.num_input_records; + this->num_dropped_records += c.num_dropped_records; + this->count += c.count; + } + + void Subtract(const CompactionStats& c) { + this->micros -= c.micros; + this->bytes_read_non_output_levels -= c.bytes_read_non_output_levels; + this->bytes_read_output_level -= c.bytes_read_output_level; + this->bytes_written -= c.bytes_written; + this->bytes_moved -= c.bytes_moved; + this->num_input_files_in_non_output_levels -= + c.num_input_files_in_non_output_levels; + this->num_input_files_in_output_level -= + c.num_input_files_in_output_level; + this->num_output_files -= c.num_output_files; + this->num_input_records -= c.num_input_records; + this->num_dropped_records -= c.num_dropped_records; + this->count -= c.count; + } + }; + + void AddCompactionStats(int level, const CompactionStats& stats) { + comp_stats_[level].Add(stats); + } + + void IncBytesMoved(int level, uint64_t amount) { + comp_stats_[level].bytes_moved += amount; + } + + void AddCFStats(InternalCFStatsType type, uint64_t value) { + cf_stats_value_[type] += value; + ++cf_stats_count_[type]; + } + + void AddDBStats(InternalDBStatsType type, uint64_t value) { + auto& v = db_stats_[type]; + v.store(v.load(std::memory_order_relaxed) + value, + std::memory_order_relaxed); + } + + uint64_t GetDBStats(InternalDBStatsType type) { + return db_stats_[type].load(std::memory_order_relaxed); + } + + HistogramImpl* GetFileReadHist(int level) { + return &file_read_latency_[level]; + } + + uint64_t GetBackgroundErrorCount() const { return bg_error_count_; } + + uint64_t BumpAndGetBackgroundErrorCount() { return ++bg_error_count_; } + + bool GetStringProperty(const DBPropertyInfo& property_info, + const Slice& property, std::string* value); + + bool GetIntProperty(const DBPropertyInfo& property_info, uint64_t* value, + DBImpl* db); + + bool GetIntPropertyOutOfMutex(const DBPropertyInfo& property_info, + Version* version, uint64_t* value); + + // Store a mapping from the user-facing DB::Properties string to our + // DBPropertyInfo struct used internally for retrieving properties. + static const std::unordered_map ppt_name_to_info; + + private: + void DumpDBStats(std::string* value); + void DumpCFStats(std::string* value); + + // Per-DB stats + std::atomic db_stats_[INTERNAL_DB_STATS_ENUM_MAX]; + // Per-ColumnFamily stats + uint64_t cf_stats_value_[INTERNAL_CF_STATS_ENUM_MAX]; + uint64_t cf_stats_count_[INTERNAL_CF_STATS_ENUM_MAX]; + // Per-ColumnFamily/level compaction stats + std::vector comp_stats_; + std::vector file_read_latency_; + + // Used to compute per-interval statistics + struct CFStatsSnapshot { + // ColumnFamily-level stats + CompactionStats comp_stats; + uint64_t ingest_bytes_flush; // Bytes written to L0 (Flush) + uint64_t ingest_bytes_add_file; // Bytes written to L0 (AddFile) + uint64_t stall_count; // Stall count + // Stats from compaction jobs - bytes written, bytes read, duration. + uint64_t compact_bytes_write; + uint64_t compact_bytes_read; + uint64_t compact_micros; + double seconds_up; + + CFStatsSnapshot() + : comp_stats(0), + ingest_bytes_flush(0), + ingest_bytes_add_file(0), + stall_count(0), + compact_bytes_write(0), + compact_bytes_read(0), + compact_micros(0), + seconds_up(0) {} + } cf_stats_snapshot_; + + struct DBStatsSnapshot { + // DB-level stats + uint64_t ingest_bytes; // Bytes written by user + uint64_t wal_bytes; // Bytes written to WAL + uint64_t wal_synced; // Number of times WAL is synced + uint64_t write_with_wal; // Number of writes that request WAL + // These count the number of writes processed by the calling thread or + // another thread. + uint64_t write_other; + uint64_t write_self; + // Total number of keys written. write_self and write_other measure number + // of write requests written, Each of the write request can contain updates + // to multiple keys. num_keys_written is total number of keys updated by all + // those writes. + uint64_t num_keys_written; + // Total time writes delayed by stalls. + uint64_t write_stall_micros; + double seconds_up; + + DBStatsSnapshot() + : ingest_bytes(0), + wal_bytes(0), + wal_synced(0), + write_with_wal(0), + write_other(0), + write_self(0), + num_keys_written(0), + write_stall_micros(0), + seconds_up(0) {} + } db_stats_snapshot_; + + // Handler functions for getting property values. They use "value" as a value- + // result argument, and return true upon successfully setting "value". + bool HandleNumFilesAtLevel(std::string* value, Slice suffix); + bool HandleCompressionRatioAtLevelPrefix(std::string* value, Slice suffix); + bool HandleLevelStats(std::string* value, Slice suffix); + bool HandleStats(std::string* value, Slice suffix); + bool HandleCFStats(std::string* value, Slice suffix); + bool HandleDBStats(std::string* value, Slice suffix); + bool HandleSsTables(std::string* value, Slice suffix); + bool HandleAggregatedTableProperties(std::string* value, Slice suffix); + bool HandleAggregatedTablePropertiesAtLevel(std::string* value, Slice suffix); + bool HandleNumImmutableMemTable(uint64_t* value, DBImpl* db, + Version* version); + bool HandleNumImmutableMemTableFlushed(uint64_t* value, DBImpl* db, + Version* version); + bool HandleMemTableFlushPending(uint64_t* value, DBImpl* db, + Version* version); + bool HandleNumRunningFlushes(uint64_t* value, DBImpl* db, Version* version); + bool HandleCompactionPending(uint64_t* value, DBImpl* db, Version* version); + bool HandleNumRunningCompactions(uint64_t* value, DBImpl* db, + Version* version); + bool HandleBackgroundErrors(uint64_t* value, DBImpl* db, Version* version); + bool HandleCurSizeActiveMemTable(uint64_t* value, DBImpl* db, + Version* version); + bool HandleCurSizeAllMemTables(uint64_t* value, DBImpl* db, Version* version); + bool HandleSizeAllMemTables(uint64_t* value, DBImpl* db, Version* version); + bool HandleNumEntriesActiveMemTable(uint64_t* value, DBImpl* db, + Version* version); + bool HandleNumEntriesImmMemTables(uint64_t* value, DBImpl* db, + Version* version); + bool HandleNumDeletesActiveMemTable(uint64_t* value, DBImpl* db, + Version* version); + bool HandleNumDeletesImmMemTables(uint64_t* value, DBImpl* db, + Version* version); + bool HandleEstimateNumKeys(uint64_t* value, DBImpl* db, Version* version); + bool HandleNumSnapshots(uint64_t* value, DBImpl* db, Version* version); + bool HandleOldestSnapshotTime(uint64_t* value, DBImpl* db, Version* version); + bool HandleNumLiveVersions(uint64_t* value, DBImpl* db, Version* version); + bool HandleCurrentSuperVersionNumber(uint64_t* value, DBImpl* db, + Version* version); + bool HandleIsFileDeletionsEnabled(uint64_t* value, DBImpl* db, + Version* version); + bool HandleBaseLevel(uint64_t* value, DBImpl* db, Version* version); + bool HandleTotalSstFilesSize(uint64_t* value, DBImpl* db, Version* version); + bool HandleEstimatePendingCompactionBytes(uint64_t* value, DBImpl* db, + Version* version); + bool HandleEstimateTableReadersMem(uint64_t* value, DBImpl* db, + Version* version); + bool HandleEstimateLiveDataSize(uint64_t* value, DBImpl* db, + Version* version); + + // Total number of background errors encountered. Every time a flush task + // or compaction task fails, this counter is incremented. The failure can + // be caused by any possible reason, including file system errors, out of + // resources, or input file corruption. Failing when retrying the same flush + // or compaction will cause the counter to increase too. + uint64_t bg_error_count_; + + const int number_levels_; + Env* env_; + ColumnFamilyData* cfd_; + const uint64_t started_at_; +}; + +#else + +class InternalStats { + public: + enum InternalCFStatsType { + LEVEL0_SLOWDOWN_TOTAL, + LEVEL0_SLOWDOWN_WITH_COMPACTION, + MEMTABLE_COMPACTION, + MEMTABLE_SLOWDOWN, + LEVEL0_NUM_FILES_TOTAL, + LEVEL0_NUM_FILES_WITH_COMPACTION, + SOFT_PENDING_COMPACTION_BYTES_LIMIT, + HARD_PENDING_COMPACTION_BYTES_LIMIT, + WRITE_STALLS_ENUM_MAX, + BYTES_FLUSHED, + BYTES_INGESTED_ADD_FILE, + INTERNAL_CF_STATS_ENUM_MAX, + }; + + enum InternalDBStatsType { + WAL_FILE_BYTES, + WAL_FILE_SYNCED, + BYTES_WRITTEN, + NUMBER_KEYS_WRITTEN, + WRITE_DONE_BY_OTHER, + WRITE_DONE_BY_SELF, + WRITE_WITH_WAL, + WRITE_STALL_MICROS, + INTERNAL_DB_STATS_ENUM_MAX, + }; + + InternalStats(int num_levels, Env* env, ColumnFamilyData* cfd) {} + + struct CompactionStats { + uint64_t micros; + uint64_t bytes_read_non_output_levels; + uint64_t bytes_read_output_level; + uint64_t bytes_written; + uint64_t bytes_moved; + int num_input_files_in_non_output_levels; + int num_input_files_in_output_level; + int num_output_files; + uint64_t num_input_records; + uint64_t num_dropped_records; + int count; + + explicit CompactionStats(int _count = 0) {} + + explicit CompactionStats(const CompactionStats& c) {} + + void Add(const CompactionStats& c) {} + + void Subtract(const CompactionStats& c) {} + }; + + void AddCompactionStats(int level, const CompactionStats& stats) {} + + void IncBytesMoved(int level, uint64_t amount) {} + + void AddCFStats(InternalCFStatsType type, uint64_t value) {} + + void AddDBStats(InternalDBStatsType type, uint64_t value) {} + + HistogramImpl* GetFileReadHist(int level) { return nullptr; } + + uint64_t GetBackgroundErrorCount() const { return 0; } + + uint64_t BumpAndGetBackgroundErrorCount() { return 0; } + + bool GetStringProperty(const DBPropertyInfo& property_info, + const Slice& property, std::string* value) { + return false; + } + + bool GetIntProperty(const DBPropertyInfo& property_info, uint64_t* value, + DBImpl* db) const { + return false; + } + + bool GetIntPropertyOutOfMutex(const DBPropertyInfo& property_info, + Version* version, uint64_t* value) const { + return false; + } +}; +#endif // !ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/db/job_context.h b/external/rocksdb/db/job_context.h new file mode 100755 index 0000000000..286d522b73 --- /dev/null +++ b/external/rocksdb/db/job_context.h @@ -0,0 +1,125 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include +#include + +#include "db/log_writer.h" + +namespace rocksdb { + +class MemTable; + +struct JobContext { + inline bool HaveSomethingToDelete() const { + return full_scan_candidate_files.size() || sst_delete_files.size() || + log_delete_files.size() || manifest_delete_files.size() || + new_superversion != nullptr || superversions_to_free.size() > 0 || + memtables_to_free.size() > 0 || logs_to_free.size() > 0; + } + + // Structure to store information for candidate files to delete. + struct CandidateFileInfo { + std::string file_name; + uint32_t path_id; + CandidateFileInfo(std::string name, uint32_t path) + : file_name(std::move(name)), path_id(path) {} + bool operator==(const CandidateFileInfo& other) const { + return file_name == other.file_name && path_id == other.path_id; + } + }; + + // Unique job id + int job_id; + + // a list of all files that we'll consider deleting + // (every once in a while this is filled up with all files + // in the DB directory) + // (filled only if we're doing full scan) + std::vector full_scan_candidate_files; + + // the list of all live sst files that cannot be deleted + std::vector sst_live; + + // a list of sst files that we need to delete + std::vector sst_delete_files; + + // a list of log files that we need to delete + std::vector log_delete_files; + + // a list of manifest files that we need to delete + std::vector manifest_delete_files; + + // a list of memtables to be free + autovector memtables_to_free; + + autovector superversions_to_free; + + autovector logs_to_free; + + SuperVersion* new_superversion; // if nullptr no new superversion + + // the current manifest_file_number, log_number and prev_log_number + // that corresponds to the set of files in 'live'. + uint64_t manifest_file_number; + uint64_t pending_manifest_file_number; + uint64_t log_number; + uint64_t prev_log_number; + + uint64_t min_pending_output = 0; + uint64_t prev_total_log_size = 0; + size_t num_alive_log_files = 0; + uint64_t size_log_to_delete = 0; + + explicit JobContext(int _job_id, bool create_superversion = false) { + job_id = _job_id; + manifest_file_number = 0; + pending_manifest_file_number = 0; + log_number = 0; + prev_log_number = 0; + new_superversion = create_superversion ? new SuperVersion() : nullptr; + } + + // For non-empty JobContext Clean() has to be called at least once before + // before destruction (see asserts in ~JobContext()). Should be called with + // unlocked DB mutex. Destructor doesn't call Clean() to avoid accidentally + // doing potentially slow Clean() with locked DB mutex. + void Clean() { + // free pending memtables + for (auto m : memtables_to_free) { + delete m; + } + // free superversions + for (auto s : superversions_to_free) { + delete s; + } + for (auto l : logs_to_free) { + delete l; + } + // if new_superversion was not used, it will be non-nullptr and needs + // to be freed here + delete new_superversion; + + memtables_to_free.clear(); + superversions_to_free.clear(); + logs_to_free.clear(); + new_superversion = nullptr; + } + + ~JobContext() { + assert(memtables_to_free.size() == 0); + assert(superversions_to_free.size() == 0); + assert(new_superversion == nullptr); + assert(logs_to_free.size() == 0); + } +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/listener_test.cc b/external/rocksdb/db/listener_test.cc new file mode 100755 index 0000000000..000fba6836 --- /dev/null +++ b/external/rocksdb/db/listener_test.cc @@ -0,0 +1,765 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/db_impl.h" +#include "db/db_test_util.h" +#include "db/dbformat.h" +#include "db/filename.h" +#include "db/version_set.h" +#include "db/write_batch_internal.h" +#include "memtable/hash_linklist_rep.h" +#include "rocksdb/cache.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/options.h" +#include "rocksdb/perf_context.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/table.h" +#include "rocksdb/table_properties.h" +#include "table/block_based_table_factory.h" +#include "table/plain_table_factory.h" +#include "util/hash.h" +#include "util/logging.h" +#include "util/mutexlock.h" +#include "util/rate_limiter.h" +#include "util/statistics.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "util/testharness.h" +#include "util/testutil.h" +#include "utilities/merge_operators.h" + +#ifndef ROCKSDB_LITE + +namespace rocksdb { + +class EventListenerTest : public DBTestBase { + public: + EventListenerTest() : DBTestBase("/listener_test") {} + + const size_t k110KB = 110 << 10; +}; + +struct TestPropertiesCollector : public rocksdb::TablePropertiesCollector { + virtual rocksdb::Status AddUserKey(const rocksdb::Slice& key, + const rocksdb::Slice& value, + rocksdb::EntryType type, + rocksdb::SequenceNumber seq, + uint64_t file_size) override { + return Status::OK(); + } + virtual rocksdb::Status Finish( + rocksdb::UserCollectedProperties* properties) override { + properties->insert({"0", "1"}); + return Status::OK(); + } + + virtual const char* Name() const override { + return "TestTablePropertiesCollector"; + } + + rocksdb::UserCollectedProperties GetReadableProperties() const override { + rocksdb::UserCollectedProperties ret; + ret["2"] = "3"; + return ret; + } +}; + +class TestPropertiesCollectorFactory : public TablePropertiesCollectorFactory { + public: + virtual TablePropertiesCollector* CreateTablePropertiesCollector( + TablePropertiesCollectorFactory::Context context) override { + return new TestPropertiesCollector; + } + const char* Name() const override { return "TestTablePropertiesCollector"; } +}; + +class TestCompactionListener : public EventListener { + public: + void OnCompactionCompleted(DB *db, const CompactionJobInfo& ci) override { + std::lock_guard lock(mutex_); + compacted_dbs_.push_back(db); + ASSERT_GT(ci.input_files.size(), 0U); + ASSERT_GT(ci.output_files.size(), 0U); + ASSERT_EQ(db->GetEnv()->GetThreadID(), ci.thread_id); + ASSERT_GT(ci.thread_id, 0U); + + for (auto fl : {ci.input_files, ci.output_files}) { + for (auto fn : fl) { + auto it = ci.table_properties.find(fn); + ASSERT_NE(it, ci.table_properties.end()); + auto tp = it->second; + ASSERT_TRUE(tp != nullptr); + ASSERT_EQ(tp->user_collected_properties.find("0")->second, "1"); + } + } + } + + std::vector compacted_dbs_; + std::mutex mutex_; +}; + +TEST_F(EventListenerTest, OnSingleDBCompactionTest) { + const int kTestKeySize = 16; + const int kTestValueSize = 984; + const int kEntrySize = kTestKeySize + kTestValueSize; + const int kEntriesPerBuffer = 100; + const int kNumL0Files = 4; + + Options options; + options.create_if_missing = true; + options.write_buffer_size = kEntrySize * kEntriesPerBuffer; + options.compaction_style = kCompactionStyleLevel; + options.target_file_size_base = options.write_buffer_size; + options.max_bytes_for_level_base = options.target_file_size_base * 2; + options.max_bytes_for_level_multiplier = 2; + options.compression = kNoCompression; +#if ROCKSDB_USING_THREAD_STATUS + options.enable_thread_tracking = true; +#endif // ROCKSDB_USING_THREAD_STATUS + options.level0_file_num_compaction_trigger = kNumL0Files; + options.table_properties_collector_factories.push_back( + std::make_shared()); + + TestCompactionListener* listener = new TestCompactionListener(); + options.listeners.emplace_back(listener); + std::vector cf_names = { + "pikachu", "ilya", "muromec", "dobrynia", + "nikitich", "alyosha", "popovich"}; + CreateAndReopenWithCF(cf_names, options); + ASSERT_OK(Put(1, "pikachu", std::string(90000, 'p'))); + ASSERT_OK(Put(2, "ilya", std::string(90000, 'i'))); + ASSERT_OK(Put(3, "muromec", std::string(90000, 'm'))); + ASSERT_OK(Put(4, "dobrynia", std::string(90000, 'd'))); + ASSERT_OK(Put(5, "nikitich", std::string(90000, 'n'))); + ASSERT_OK(Put(6, "alyosha", std::string(90000, 'a'))); + ASSERT_OK(Put(7, "popovich", std::string(90000, 'p'))); + for (int i = 1; i < 8; ++i) { + ASSERT_OK(Flush(i)); + const Slice kRangeStart = "a"; + const Slice kRangeEnd = "z"; + ASSERT_OK(dbfull()->CompactRange(CompactRangeOptions(), handles_[i], + &kRangeStart, &kRangeEnd)); + dbfull()->TEST_WaitForFlushMemTable(); + dbfull()->TEST_WaitForCompact(); + } + + ASSERT_EQ(listener->compacted_dbs_.size(), cf_names.size()); + for (size_t i = 0; i < cf_names.size(); ++i) { + ASSERT_EQ(listener->compacted_dbs_[i], db_); + } +} + +// This simple Listener can only handle one flush at a time. +class TestFlushListener : public EventListener { + public: + explicit TestFlushListener(Env* env) + : slowdown_count(0), stop_count(0), db_closed(), env_(env) { + db_closed = false; + } + void OnTableFileCreated( + const TableFileCreationInfo& info) override { + // remember the info for later checking the FlushJobInfo. + prev_fc_info_ = info; + ASSERT_GT(info.db_name.size(), 0U); + ASSERT_GT(info.cf_name.size(), 0U); + ASSERT_GT(info.file_path.size(), 0U); + ASSERT_GT(info.job_id, 0); + ASSERT_GT(info.table_properties.data_size, 0U); + ASSERT_GT(info.table_properties.raw_key_size, 0U); + ASSERT_GT(info.table_properties.raw_value_size, 0U); + ASSERT_GT(info.table_properties.num_data_blocks, 0U); + ASSERT_GT(info.table_properties.num_entries, 0U); + +#if ROCKSDB_USING_THREAD_STATUS + // Verify the id of the current thread that created this table + // file matches the id of any active flush or compaction thread. + uint64_t thread_id = env_->GetThreadID(); + std::vector thread_list; + ASSERT_OK(env_->GetThreadList(&thread_list)); + bool found_match = false; + for (auto thread_status : thread_list) { + if (thread_status.operation_type == ThreadStatus::OP_FLUSH || + thread_status.operation_type == ThreadStatus::OP_COMPACTION) { + if (thread_id == thread_status.thread_id) { + found_match = true; + break; + } + } + } + ASSERT_TRUE(found_match); +#endif // ROCKSDB_USING_THREAD_STATUS + } + + void OnFlushCompleted( + DB* db, const FlushJobInfo& info) override { + flushed_dbs_.push_back(db); + flushed_column_family_names_.push_back(info.cf_name); + if (info.triggered_writes_slowdown) { + slowdown_count++; + } + if (info.triggered_writes_stop) { + stop_count++; + } + // verify whether the previously created file matches the flushed file. + ASSERT_EQ(prev_fc_info_.db_name, db->GetName()); + ASSERT_EQ(prev_fc_info_.cf_name, info.cf_name); + ASSERT_EQ(prev_fc_info_.job_id, info.job_id); + ASSERT_EQ(prev_fc_info_.file_path, info.file_path); + ASSERT_EQ(db->GetEnv()->GetThreadID(), info.thread_id); + ASSERT_GT(info.thread_id, 0U); + ASSERT_EQ(info.table_properties.user_collected_properties.find("0")->second, + "1"); + } + + std::vector flushed_column_family_names_; + std::vector flushed_dbs_; + int slowdown_count; + int stop_count; + bool db_closing; + std::atomic_bool db_closed; + TableFileCreationInfo prev_fc_info_; + + protected: + Env* env_; +}; + +TEST_F(EventListenerTest, OnSingleDBFlushTest) { + Options options; + options.write_buffer_size = k110KB; +#if ROCKSDB_USING_THREAD_STATUS + options.enable_thread_tracking = true; +#endif // ROCKSDB_USING_THREAD_STATUS + TestFlushListener* listener = new TestFlushListener(options.env); + options.listeners.emplace_back(listener); + std::vector cf_names = { + "pikachu", "ilya", "muromec", "dobrynia", + "nikitich", "alyosha", "popovich"}; + options.table_properties_collector_factories.push_back( + std::make_shared()); + CreateAndReopenWithCF(cf_names, options); + + ASSERT_OK(Put(1, "pikachu", std::string(90000, 'p'))); + ASSERT_OK(Put(2, "ilya", std::string(90000, 'i'))); + ASSERT_OK(Put(3, "muromec", std::string(90000, 'm'))); + ASSERT_OK(Put(4, "dobrynia", std::string(90000, 'd'))); + ASSERT_OK(Put(5, "nikitich", std::string(90000, 'n'))); + ASSERT_OK(Put(6, "alyosha", std::string(90000, 'a'))); + ASSERT_OK(Put(7, "popovich", std::string(90000, 'p'))); + for (int i = 1; i < 8; ++i) { + ASSERT_OK(Flush(i)); + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ(listener->flushed_dbs_.size(), i); + ASSERT_EQ(listener->flushed_column_family_names_.size(), i); + } + + // make sure call-back functions are called in the right order + for (size_t i = 0; i < cf_names.size(); ++i) { + ASSERT_EQ(listener->flushed_dbs_[i], db_); + ASSERT_EQ(listener->flushed_column_family_names_[i], cf_names[i]); + } +} + +TEST_F(EventListenerTest, MultiCF) { + Options options; + options.write_buffer_size = k110KB; +#if ROCKSDB_USING_THREAD_STATUS + options.enable_thread_tracking = true; +#endif // ROCKSDB_USING_THREAD_STATUS + TestFlushListener* listener = new TestFlushListener(options.env); + options.listeners.emplace_back(listener); + options.table_properties_collector_factories.push_back( + std::make_shared()); + std::vector cf_names = { + "pikachu", "ilya", "muromec", "dobrynia", + "nikitich", "alyosha", "popovich"}; + CreateAndReopenWithCF(cf_names, options); + + ASSERT_OK(Put(1, "pikachu", std::string(90000, 'p'))); + ASSERT_OK(Put(2, "ilya", std::string(90000, 'i'))); + ASSERT_OK(Put(3, "muromec", std::string(90000, 'm'))); + ASSERT_OK(Put(4, "dobrynia", std::string(90000, 'd'))); + ASSERT_OK(Put(5, "nikitich", std::string(90000, 'n'))); + ASSERT_OK(Put(6, "alyosha", std::string(90000, 'a'))); + ASSERT_OK(Put(7, "popovich", std::string(90000, 'p'))); + for (int i = 1; i < 8; ++i) { + ASSERT_OK(Flush(i)); + ASSERT_EQ(listener->flushed_dbs_.size(), i); + ASSERT_EQ(listener->flushed_column_family_names_.size(), i); + } + + // make sure call-back functions are called in the right order + for (size_t i = 0; i < cf_names.size(); i++) { + ASSERT_EQ(listener->flushed_dbs_[i], db_); + ASSERT_EQ(listener->flushed_column_family_names_[i], cf_names[i]); + } +} + +TEST_F(EventListenerTest, MultiDBMultiListeners) { + Options options; +#if ROCKSDB_USING_THREAD_STATUS + options.enable_thread_tracking = true; +#endif // ROCKSDB_USING_THREAD_STATUS + options.table_properties_collector_factories.push_back( + std::make_shared()); + std::vector listeners; + const int kNumDBs = 5; + const int kNumListeners = 10; + for (int i = 0; i < kNumListeners; ++i) { + listeners.emplace_back(new TestFlushListener(options.env)); + } + + std::vector cf_names = { + "pikachu", "ilya", "muromec", "dobrynia", + "nikitich", "alyosha", "popovich"}; + + options.create_if_missing = true; + for (int i = 0; i < kNumListeners; ++i) { + options.listeners.emplace_back(listeners[i]); + } + DBOptions db_opts(options); + ColumnFamilyOptions cf_opts(options); + + std::vector dbs; + std::vector> vec_handles; + + for (int d = 0; d < kNumDBs; ++d) { + ASSERT_OK(DestroyDB(dbname_ + ToString(d), options)); + DB* db; + std::vector handles; + ASSERT_OK(DB::Open(options, dbname_ + ToString(d), &db)); + for (size_t c = 0; c < cf_names.size(); ++c) { + ColumnFamilyHandle* handle; + db->CreateColumnFamily(cf_opts, cf_names[c], &handle); + handles.push_back(handle); + } + + vec_handles.push_back(std::move(handles)); + dbs.push_back(db); + } + + for (int d = 0; d < kNumDBs; ++d) { + for (size_t c = 0; c < cf_names.size(); ++c) { + ASSERT_OK(dbs[d]->Put(WriteOptions(), vec_handles[d][c], + cf_names[c], cf_names[c])); + } + } + + for (size_t c = 0; c < cf_names.size(); ++c) { + for (int d = 0; d < kNumDBs; ++d) { + ASSERT_OK(dbs[d]->Flush(FlushOptions(), vec_handles[d][c])); + reinterpret_cast(dbs[d])->TEST_WaitForFlushMemTable(); + } + } + + for (auto* listener : listeners) { + int pos = 0; + for (size_t c = 0; c < cf_names.size(); ++c) { + for (int d = 0; d < kNumDBs; ++d) { + ASSERT_EQ(listener->flushed_dbs_[pos], dbs[d]); + ASSERT_EQ(listener->flushed_column_family_names_[pos], cf_names[c]); + pos++; + } + } + } + + + for (auto handles : vec_handles) { + for (auto h : handles) { + delete h; + } + handles.clear(); + } + vec_handles.clear(); + + for (auto db : dbs) { + delete db; + } +} + +TEST_F(EventListenerTest, DisableBGCompaction) { + Options options; +#if ROCKSDB_USING_THREAD_STATUS + options.enable_thread_tracking = true; +#endif // ROCKSDB_USING_THREAD_STATUS + TestFlushListener* listener = new TestFlushListener(options.env); + const int kCompactionTrigger = 1; + const int kSlowdownTrigger = 5; + const int kStopTrigger = 100; + options.level0_file_num_compaction_trigger = kCompactionTrigger; + options.level0_slowdown_writes_trigger = kSlowdownTrigger; + options.level0_stop_writes_trigger = kStopTrigger; + options.max_write_buffer_number = 10; + options.listeners.emplace_back(listener); + // BG compaction is disabled. Number of L0 files will simply keeps + // increasing in this test. + options.compaction_style = kCompactionStyleNone; + options.compression = kNoCompression; + options.write_buffer_size = 100000; // Small write buffer + options.table_properties_collector_factories.push_back( + std::make_shared()); + + CreateAndReopenWithCF({"pikachu"}, options); + ColumnFamilyMetaData cf_meta; + db_->GetColumnFamilyMetaData(handles_[1], &cf_meta); + + // keep writing until writes are forced to stop. + for (int i = 0; static_cast(cf_meta.file_count) < kSlowdownTrigger * 10; + ++i) { + Put(1, ToString(i), std::string(10000, 'x'), WriteOptions()); + db_->Flush(FlushOptions(), handles_[1]); + db_->GetColumnFamilyMetaData(handles_[1], &cf_meta); + } + ASSERT_GE(listener->slowdown_count, kSlowdownTrigger * 9); +} + +class TestCompactionReasonListener : public EventListener { + public: + void OnCompactionCompleted(DB* db, const CompactionJobInfo& ci) override { + std::lock_guard lock(mutex_); + compaction_reasons_.push_back(ci.compaction_reason); + } + + std::vector compaction_reasons_; + std::mutex mutex_; +}; + +TEST_F(EventListenerTest, CompactionReasonLevel) { + Options options; + options.create_if_missing = true; + options.memtable_factory.reset( + new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile)); + + TestCompactionReasonListener* listener = new TestCompactionReasonListener(); + options.listeners.emplace_back(listener); + + options.level0_file_num_compaction_trigger = 4; + options.compaction_style = kCompactionStyleLevel; + + DestroyAndReopen(options); + Random rnd(301); + + // Write 4 files in L0 + for (int i = 0; i < 4; i++) { + GenerateNewRandomFile(&rnd); + } + dbfull()->TEST_WaitForCompact(); + + ASSERT_EQ(listener->compaction_reasons_.size(), 1); + ASSERT_EQ(listener->compaction_reasons_[0], + CompactionReason::kLevelL0FilesNum); + + DestroyAndReopen(options); + + // Write 3 non-overlapping files in L0 + for (int k = 1; k <= 30; k++) { + ASSERT_OK(Put(Key(k), Key(k))); + if (k % 10 == 0) { + Flush(); + } + } + + // Do a trivial move from L0 -> L1 + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + + options.max_bytes_for_level_base = 1; + Close(); + listener->compaction_reasons_.clear(); + Reopen(options); + + dbfull()->TEST_WaitForCompact(); + ASSERT_GT(listener->compaction_reasons_.size(), 1); + + for (auto compaction_reason : listener->compaction_reasons_) { + ASSERT_EQ(compaction_reason, CompactionReason::kLevelMaxLevelSize); + } + + options.disable_auto_compactions = true; + Close(); + listener->compaction_reasons_.clear(); + Reopen(options); + + Put("key", "value"); + CompactRangeOptions cro; + cro.bottommost_level_compaction = BottommostLevelCompaction::kForce; + ASSERT_OK(db_->CompactRange(cro, nullptr, nullptr)); + ASSERT_GT(listener->compaction_reasons_.size(), 0); + for (auto compaction_reason : listener->compaction_reasons_) { + ASSERT_EQ(compaction_reason, CompactionReason::kManualCompaction); + } +} + +TEST_F(EventListenerTest, CompactionReasonUniversal) { + Options options; + options.create_if_missing = true; + options.memtable_factory.reset( + new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile)); + + TestCompactionReasonListener* listener = new TestCompactionReasonListener(); + options.listeners.emplace_back(listener); + + options.compaction_style = kCompactionStyleUniversal; + + Random rnd(301); + + options.level0_file_num_compaction_trigger = 8; + options.compaction_options_universal.max_size_amplification_percent = 100000; + options.compaction_options_universal.size_ratio = 100000; + DestroyAndReopen(options); + listener->compaction_reasons_.clear(); + + // Write 8 files in L0 + for (int i = 0; i < 8; i++) { + GenerateNewRandomFile(&rnd); + } + dbfull()->TEST_WaitForCompact(); + + ASSERT_GT(listener->compaction_reasons_.size(), 0); + for (auto compaction_reason : listener->compaction_reasons_) { + ASSERT_EQ(compaction_reason, CompactionReason::kUniversalSortedRunNum); + } + + options.level0_file_num_compaction_trigger = 8; + options.compaction_options_universal.max_size_amplification_percent = 1; + options.compaction_options_universal.size_ratio = 100000; + + DestroyAndReopen(options); + listener->compaction_reasons_.clear(); + + // Write 8 files in L0 + for (int i = 0; i < 8; i++) { + GenerateNewRandomFile(&rnd); + } + dbfull()->TEST_WaitForCompact(); + + ASSERT_GT(listener->compaction_reasons_.size(), 0); + for (auto compaction_reason : listener->compaction_reasons_) { + ASSERT_EQ(compaction_reason, CompactionReason::kUniversalSizeAmplification); + } + + options.disable_auto_compactions = true; + Close(); + listener->compaction_reasons_.clear(); + Reopen(options); + + db_->CompactRange(CompactRangeOptions(), nullptr, nullptr); + + ASSERT_GT(listener->compaction_reasons_.size(), 0); + for (auto compaction_reason : listener->compaction_reasons_) { + ASSERT_EQ(compaction_reason, CompactionReason::kManualCompaction); + } +} + +TEST_F(EventListenerTest, CompactionReasonFIFO) { + Options options; + options.create_if_missing = true; + options.memtable_factory.reset( + new SpecialSkipListFactory(DBTestBase::kNumKeysByGenerateNewRandomFile)); + + TestCompactionReasonListener* listener = new TestCompactionReasonListener(); + options.listeners.emplace_back(listener); + + options.level0_file_num_compaction_trigger = 4; + options.compaction_style = kCompactionStyleFIFO; + options.compaction_options_fifo.max_table_files_size = 1; + + DestroyAndReopen(options); + Random rnd(301); + + // Write 4 files in L0 + for (int i = 0; i < 4; i++) { + GenerateNewRandomFile(&rnd); + } + dbfull()->TEST_WaitForCompact(); + + ASSERT_GT(listener->compaction_reasons_.size(), 0); + for (auto compaction_reason : listener->compaction_reasons_) { + ASSERT_EQ(compaction_reason, CompactionReason::kFIFOMaxSize); + } +} + +class TableFileCreationListener : public EventListener { + public: + class TestEnv : public EnvWrapper { + public: + TestEnv() : EnvWrapper(Env::Default()) {} + + void SetStatus(Status s) { status_ = s; } + + Status NewWritableFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options) { + if (fname.size() > 4 && fname.substr(fname.size() - 4) == ".sst") { + if (!status_.ok()) { + return status_; + } + } + return Env::Default()->NewWritableFile(fname, result, options); + } + + private: + Status status_; + }; + + TableFileCreationListener() { + for (int i = 0; i < 2; i++) { + started_[i] = finished_[i] = failure_[i] = 0; + } + } + + int Index(TableFileCreationReason reason) { + int idx; + switch (reason) { + case TableFileCreationReason::kFlush: + idx = 0; + break; + case TableFileCreationReason::kCompaction: + idx = 1; + break; + default: + idx = -1; + } + return idx; + } + + void CheckAndResetCounters(int flush_started, int flush_finished, + int flush_failure, int compaction_started, + int compaction_finished, int compaction_failure) { + ASSERT_EQ(started_[0], flush_started); + ASSERT_EQ(finished_[0], flush_finished); + ASSERT_EQ(failure_[0], flush_failure); + ASSERT_EQ(started_[1], compaction_started); + ASSERT_EQ(finished_[1], compaction_finished); + ASSERT_EQ(failure_[1], compaction_failure); + for (int i = 0; i < 2; i++) { + started_[i] = finished_[i] = failure_[i] = 0; + } + } + + void OnTableFileCreationStarted( + const TableFileCreationBriefInfo& info) override { + int idx = Index(info.reason); + if (idx >= 0) { + started_[idx]++; + } + ASSERT_GT(info.db_name.size(), 0U); + ASSERT_GT(info.cf_name.size(), 0U); + ASSERT_GT(info.file_path.size(), 0U); + ASSERT_GT(info.job_id, 0); + } + + void OnTableFileCreated(const TableFileCreationInfo& info) override { + int idx = Index(info.reason); + if (idx >= 0) { + finished_[idx]++; + } + ASSERT_GT(info.db_name.size(), 0U); + ASSERT_GT(info.cf_name.size(), 0U); + ASSERT_GT(info.file_path.size(), 0U); + ASSERT_GT(info.job_id, 0); + if (info.status.ok()) { + ASSERT_GT(info.table_properties.data_size, 0U); + ASSERT_GT(info.table_properties.raw_key_size, 0U); + ASSERT_GT(info.table_properties.raw_value_size, 0U); + ASSERT_GT(info.table_properties.num_data_blocks, 0U); + ASSERT_GT(info.table_properties.num_entries, 0U); + } else { + if (idx >= 0) { + failure_[idx]++; + } + } + } + + TestEnv test_env; + int started_[2]; + int finished_[2]; + int failure_[2]; +}; + +TEST_F(EventListenerTest, TableFileCreationListenersTest) { + auto listener = std::make_shared(); + Options options; + options.create_if_missing = true; + options.listeners.push_back(listener); + options.env = &listener->test_env; + DestroyAndReopen(options); + + ASSERT_OK(Put("foo", "aaa")); + ASSERT_OK(Put("bar", "bbb")); + ASSERT_OK(Flush()); + dbfull()->TEST_WaitForFlushMemTable(); + listener->CheckAndResetCounters(1, 1, 0, 0, 0, 0); + + ASSERT_OK(Put("foo", "aaa1")); + ASSERT_OK(Put("bar", "bbb1")); + listener->test_env.SetStatus(Status::NotSupported("not supported")); + ASSERT_NOK(Flush()); + listener->CheckAndResetCounters(1, 1, 1, 0, 0, 0); + listener->test_env.SetStatus(Status::OK()); + + Reopen(options); + ASSERT_OK(Put("foo", "aaa2")); + ASSERT_OK(Put("bar", "bbb2")); + ASSERT_OK(Flush()); + dbfull()->TEST_WaitForFlushMemTable(); + listener->CheckAndResetCounters(1, 1, 0, 0, 0, 0); + + const Slice kRangeStart = "a"; + const Slice kRangeEnd = "z"; + dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd); + dbfull()->TEST_WaitForCompact(); + listener->CheckAndResetCounters(0, 0, 0, 1, 1, 0); + + ASSERT_OK(Put("foo", "aaa3")); + ASSERT_OK(Put("bar", "bbb3")); + ASSERT_OK(Flush()); + listener->test_env.SetStatus(Status::NotSupported("not supported")); + dbfull()->CompactRange(CompactRangeOptions(), &kRangeStart, &kRangeEnd); + dbfull()->TEST_WaitForCompact(); + listener->CheckAndResetCounters(1, 1, 0, 1, 1, 1); +} + +class MemTableSealedListener : public EventListener { +private: + SequenceNumber latest_seq_number_; +public: + MemTableSealedListener() {} + void OnMemTableSealed(const MemTableInfo& info) override { + latest_seq_number_ = info.first_seqno; + } + + void OnFlushCompleted(DB* /*db*/, + const FlushJobInfo& flush_job_info) override { + ASSERT_LE(flush_job_info.smallest_seqno, latest_seq_number_); + } +}; + +TEST_F(EventListenerTest, MemTableSealedListenerTest) { + auto listener = std::make_shared(); + Options options; + options.create_if_missing = true; + options.listeners.push_back(listener); + DestroyAndReopen(options); + + for (unsigned int i = 0; i < 10; i++) { + std::string tag = std::to_string(i); + ASSERT_OK(Put("foo"+tag, "aaa")); + ASSERT_OK(Put("bar"+tag, "bbb")); + + ASSERT_OK(Flush()); + } +} +} // namespace rocksdb + + +#endif // ROCKSDB_LITE + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/log_format.h b/external/rocksdb/db/log_format.h new file mode 100755 index 0000000000..cf48a202f4 --- /dev/null +++ b/external/rocksdb/db/log_format.h @@ -0,0 +1,45 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Log format information shared by reader and writer. +// See ../doc/log_format.txt for more detail. + +#pragma once +namespace rocksdb { +namespace log { + +enum RecordType { + // Zero is reserved for preallocated files + kZeroType = 0, + kFullType = 1, + + // For fragments + kFirstType = 2, + kMiddleType = 3, + kLastType = 4, + + // For recycled log files + kRecyclableFullType = 5, + kRecyclableFirstType = 6, + kRecyclableMiddleType = 7, + kRecyclableLastType = 8, +}; +static const int kMaxRecordType = kRecyclableLastType; + +static const unsigned int kBlockSize = 32768; + +// Header is checksum (4 bytes), type (1 byte), length (2 bytes). +static const int kHeaderSize = 4 + 1 + 2; + +// Recyclable header is checksum (4 bytes), type (1 byte), log number +// (4 bytes), length (2 bytes). +static const int kRecyclableHeaderSize = 4 + 1 + 4 + 2; + +} // namespace log +} // namespace rocksdb diff --git a/external/rocksdb/db/log_reader.cc b/external/rocksdb/db/log_reader.cc new file mode 100755 index 0000000000..2da16a2863 --- /dev/null +++ b/external/rocksdb/db/log_reader.cc @@ -0,0 +1,432 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/log_reader.h" + +#include +#include "rocksdb/env.h" +#include "util/coding.h" +#include "util/crc32c.h" +#include "util/file_reader_writer.h" + +namespace rocksdb { +namespace log { + +Reader::Reporter::~Reporter() { +} + +Reader::Reader(std::shared_ptr info_log, + unique_ptr&& _file, Reporter* reporter, + bool checksum, uint64_t initial_offset, uint64_t log_num) + : info_log_(info_log), + file_(std::move(_file)), + reporter_(reporter), + checksum_(checksum), + backing_store_(new char[kBlockSize]), + buffer_(), + eof_(false), + read_error_(false), + eof_offset_(0), + last_record_offset_(0), + end_of_buffer_offset_(0), + initial_offset_(initial_offset), + log_number_(log_num), + recycled_(false) {} + +Reader::~Reader() { + delete[] backing_store_; +} + +bool Reader::SkipToInitialBlock() { + size_t initial_offset_in_block = initial_offset_ % kBlockSize; + uint64_t block_start_location = initial_offset_ - initial_offset_in_block; + + // Don't search a block if we'd be in the trailer + if (initial_offset_in_block > kBlockSize - 6) { + block_start_location += kBlockSize; + } + + end_of_buffer_offset_ = block_start_location; + + // Skip to start of first block that can contain the initial record + if (block_start_location > 0) { + Status skip_status = file_->Skip(block_start_location); + if (!skip_status.ok()) { + ReportDrop(static_cast(block_start_location), skip_status); + return false; + } + } + + return true; +} + +// For kAbsoluteConsistency, on clean shutdown we don't expect any error +// in the log files. For other modes, we can ignore only incomplete records +// in the last log file, which are presumably due to a write in progress +// during restart (or from log recycling). +// +// TODO krad: Evaluate if we need to move to a more strict mode where we +// restrict the inconsistency to only the last log +bool Reader::ReadRecord(Slice* record, std::string* scratch, + WALRecoveryMode wal_recovery_mode) { + if (last_record_offset_ < initial_offset_) { + if (!SkipToInitialBlock()) { + return false; + } + } + + scratch->clear(); + record->clear(); + bool in_fragmented_record = false; + // Record offset of the logical record that we're reading + // 0 is a dummy value to make compilers happy + uint64_t prospective_record_offset = 0; + + Slice fragment; + while (true) { + uint64_t physical_record_offset = end_of_buffer_offset_ - buffer_.size(); + size_t drop_size = 0; + const unsigned int record_type = ReadPhysicalRecord(&fragment, &drop_size); + switch (record_type) { + case kFullType: + case kRecyclableFullType: + if (in_fragmented_record && !scratch->empty()) { + // Handle bug in earlier versions of log::Writer where + // it could emit an empty kFirstType record at the tail end + // of a block followed by a kFullType or kFirstType record + // at the beginning of the next block. + ReportCorruption(scratch->size(), "partial record without end(1)"); + } + prospective_record_offset = physical_record_offset; + scratch->clear(); + *record = fragment; + last_record_offset_ = prospective_record_offset; + return true; + + case kFirstType: + case kRecyclableFirstType: + if (in_fragmented_record && !scratch->empty()) { + // Handle bug in earlier versions of log::Writer where + // it could emit an empty kFirstType record at the tail end + // of a block followed by a kFullType or kFirstType record + // at the beginning of the next block. + ReportCorruption(scratch->size(), "partial record without end(2)"); + } + prospective_record_offset = physical_record_offset; + scratch->assign(fragment.data(), fragment.size()); + in_fragmented_record = true; + break; + + case kMiddleType: + case kRecyclableMiddleType: + if (!in_fragmented_record) { + ReportCorruption(fragment.size(), + "missing start of fragmented record(1)"); + } else { + scratch->append(fragment.data(), fragment.size()); + } + break; + + case kLastType: + case kRecyclableLastType: + if (!in_fragmented_record) { + ReportCorruption(fragment.size(), + "missing start of fragmented record(2)"); + } else { + scratch->append(fragment.data(), fragment.size()); + *record = Slice(*scratch); + last_record_offset_ = prospective_record_offset; + return true; + } + break; + + case kBadHeader: + if (wal_recovery_mode == WALRecoveryMode::kAbsoluteConsistency) { + // in clean shutdown we don't expect any error in the log files + ReportCorruption(drop_size, "truncated header"); + } + // fall-thru + + case kEof: + if (in_fragmented_record) { + if (wal_recovery_mode == WALRecoveryMode::kAbsoluteConsistency) { + // in clean shutdown we don't expect any error in the log files + ReportCorruption(scratch->size(), "error reading trailing data"); + } + // This can be caused by the writer dying immediately after + // writing a physical record but before completing the next; don't + // treat it as a corruption, just ignore the entire logical record. + scratch->clear(); + } + return false; + + case kOldRecord: + if (wal_recovery_mode != WALRecoveryMode::kSkipAnyCorruptedRecords) { + // Treat a record from a previous instance of the log as EOF. + if (in_fragmented_record) { + if (wal_recovery_mode == WALRecoveryMode::kAbsoluteConsistency) { + // in clean shutdown we don't expect any error in the log files + ReportCorruption(scratch->size(), "error reading trailing data"); + } + // This can be caused by the writer dying immediately after + // writing a physical record but before completing the next; don't + // treat it as a corruption, just ignore the entire logical record. + scratch->clear(); + } + return false; + } + // fall-thru + + case kBadRecord: + if (in_fragmented_record) { + ReportCorruption(scratch->size(), "error in middle of record"); + in_fragmented_record = false; + scratch->clear(); + } + break; + + case kBadRecordLen: + case kBadRecordChecksum: + if (recycled_ && + wal_recovery_mode == + WALRecoveryMode::kTolerateCorruptedTailRecords) { + scratch->clear(); + return false; + } + if (record_type == kBadRecordLen) { + ReportCorruption(drop_size, "bad record length"); + } else { + ReportCorruption(drop_size, "checksum mismatch"); + } + if (in_fragmented_record) { + ReportCorruption(scratch->size(), "error in middle of record"); + in_fragmented_record = false; + scratch->clear(); + } + break; + + default: { + char buf[40]; + snprintf(buf, sizeof(buf), "unknown record type %u", record_type); + ReportCorruption( + (fragment.size() + (in_fragmented_record ? scratch->size() : 0)), + buf); + in_fragmented_record = false; + scratch->clear(); + break; + } + } + } + return false; +} + +uint64_t Reader::LastRecordOffset() { + return last_record_offset_; +} + +void Reader::UnmarkEOF() { + if (read_error_) { + return; + } + + eof_ = false; + + if (eof_offset_ == 0) { + return; + } + + // If the EOF was in the middle of a block (a partial block was read) we have + // to read the rest of the block as ReadPhysicalRecord can only read full + // blocks and expects the file position indicator to be aligned to the start + // of a block. + // + // consumed_bytes + buffer_size() + remaining == kBlockSize + + size_t consumed_bytes = eof_offset_ - buffer_.size(); + size_t remaining = kBlockSize - eof_offset_; + + // backing_store_ is used to concatenate what is left in buffer_ and + // the remainder of the block. If buffer_ already uses backing_store_, + // we just append the new data. + if (buffer_.data() != backing_store_ + consumed_bytes) { + // Buffer_ does not use backing_store_ for storage. + // Copy what is left in buffer_ to backing_store. + memmove(backing_store_ + consumed_bytes, buffer_.data(), buffer_.size()); + } + + Slice read_buffer; + Status status = file_->Read(remaining, &read_buffer, + backing_store_ + eof_offset_); + + size_t added = read_buffer.size(); + end_of_buffer_offset_ += added; + + if (!status.ok()) { + if (added > 0) { + ReportDrop(added, status); + } + + read_error_ = true; + return; + } + + if (read_buffer.data() != backing_store_ + eof_offset_) { + // Read did not write to backing_store_ + memmove(backing_store_ + eof_offset_, read_buffer.data(), + read_buffer.size()); + } + + buffer_ = Slice(backing_store_ + consumed_bytes, + eof_offset_ + added - consumed_bytes); + + if (added < remaining) { + eof_ = true; + eof_offset_ += added; + } else { + eof_offset_ = 0; + } +} + +void Reader::ReportCorruption(size_t bytes, const char* reason) { + ReportDrop(bytes, Status::Corruption(reason)); +} + +void Reader::ReportDrop(size_t bytes, const Status& reason) { + if (reporter_ != nullptr && + end_of_buffer_offset_ - buffer_.size() - bytes >= initial_offset_) { + reporter_->Corruption(bytes, reason); + } +} + +bool Reader::ReadMore(size_t* drop_size, int *error) { + if (!eof_ && !read_error_) { + // Last read was a full read, so this is a trailer to skip + buffer_.clear(); + Status status = file_->Read(kBlockSize, &buffer_, backing_store_); + end_of_buffer_offset_ += buffer_.size(); + if (!status.ok()) { + buffer_.clear(); + ReportDrop(kBlockSize, status); + read_error_ = true; + *error = kEof; + return false; + } else if (buffer_.size() < (size_t)kBlockSize) { + eof_ = true; + eof_offset_ = buffer_.size(); + } + return true; + } else { + // Note that if buffer_ is non-empty, we have a truncated header at the + // end of the file, which can be caused by the writer crashing in the + // middle of writing the header. Unless explicitly requested we don't + // considering this an error, just report EOF. + if (buffer_.size()) { + *drop_size = buffer_.size(); + buffer_.clear(); + *error = kBadHeader; + return false; + } + buffer_.clear(); + *error = kEof; + return false; + } +} + +unsigned int Reader::ReadPhysicalRecord(Slice* result, size_t* drop_size) { + while (true) { + // We need at least the minimum header size + if (buffer_.size() < (size_t)kHeaderSize) { + int r; + if (!ReadMore(drop_size, &r)) { + return r; + } + continue; + } + + // Parse the header + const char* header = buffer_.data(); + const uint32_t a = static_cast(header[4]) & 0xff; + const uint32_t b = static_cast(header[5]) & 0xff; + const unsigned int type = header[6]; + const uint32_t length = a | (b << 8); + int header_size = kHeaderSize; + if (type >= kRecyclableFullType && type <= kRecyclableLastType) { + if (end_of_buffer_offset_ - buffer_.size() == 0) { + recycled_ = true; + } + header_size = kRecyclableHeaderSize; + // We need enough for the larger header + if (buffer_.size() < (size_t)kRecyclableHeaderSize) { + int r; + if (!ReadMore(drop_size, &r)) { + return r; + } + continue; + } + const uint32_t log_num = DecodeFixed32(header + 7); + if (log_num != log_number_) { + return kOldRecord; + } + } + if (header_size + length > buffer_.size()) { + *drop_size = buffer_.size(); + buffer_.clear(); + if (!eof_) { + return kBadRecordLen; + } + // If the end of the file has been reached without reading |length| bytes + // of payload, assume the writer died in the middle of writing the record. + // Don't report a corruption unless requested. + if (*drop_size) { + return kBadHeader; + } + return kEof; + } + + if (type == kZeroType && length == 0) { + // Skip zero length record without reporting any drops since + // such records are produced by the mmap based writing code in + // env_posix.cc that preallocates file regions. + // NOTE: this should never happen in DB written by new RocksDB versions, + // since we turn off mmap writes to manifest and log files + buffer_.clear(); + return kBadRecord; + } + + // Check crc + if (checksum_) { + uint32_t expected_crc = crc32c::Unmask(DecodeFixed32(header)); + uint32_t actual_crc = crc32c::Value(header + 6, length + header_size - 6); + if (actual_crc != expected_crc) { + // Drop the rest of the buffer since "length" itself may have + // been corrupted and if we trust it, we could find some + // fragment of a real log record that just happens to look + // like a valid log record. + *drop_size = buffer_.size(); + buffer_.clear(); + return kBadRecordChecksum; + } + } + + buffer_.remove_prefix(header_size + length); + + // Skip physical record that started before initial_offset_ + if (end_of_buffer_offset_ - buffer_.size() - header_size - length < + initial_offset_) { + result->clear(); + return kBadRecord; + } + + *result = Slice(header + header_size, length); + return type; + } +} + +} // namespace log +} // namespace rocksdb diff --git a/external/rocksdb/db/log_reader.h b/external/rocksdb/db/log_reader.h new file mode 100755 index 0000000000..4451185468 --- /dev/null +++ b/external/rocksdb/db/log_reader.h @@ -0,0 +1,160 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include + +#include "db/log_format.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" +#include "rocksdb/options.h" + +namespace rocksdb { + +class SequentialFileReader; +class Logger; +using std::unique_ptr; + +namespace log { + +/** + * Reader is a general purpose log stream reader implementation. The actual job + * of reading from the device is implemented by the SequentialFile interface. + * + * Please see Writer for details on the file and record layout. + */ +class Reader { + public: + // Interface for reporting errors. + class Reporter { + public: + virtual ~Reporter(); + + // Some corruption was detected. "size" is the approximate number + // of bytes dropped due to the corruption. + virtual void Corruption(size_t bytes, const Status& status) = 0; + }; + + // Create a reader that will return log records from "*file". + // "*file" must remain live while this Reader is in use. + // + // If "reporter" is non-nullptr, it is notified whenever some data is + // dropped due to a detected corruption. "*reporter" must remain + // live while this Reader is in use. + // + // If "checksum" is true, verify checksums if available. + // + // The Reader will start reading at the first record located at physical + // position >= initial_offset within the file. + Reader(std::shared_ptr info_log, + unique_ptr&& file, + Reporter* reporter, bool checksum, uint64_t initial_offset, + uint64_t log_num); + + ~Reader(); + + // Read the next record into *record. Returns true if read + // successfully, false if we hit end of the input. May use + // "*scratch" as temporary storage. The contents filled in *record + // will only be valid until the next mutating operation on this + // reader or the next mutation to *scratch. + bool ReadRecord(Slice* record, std::string* scratch, + WALRecoveryMode wal_recovery_mode = + WALRecoveryMode::kTolerateCorruptedTailRecords); + + // Returns the physical offset of the last record returned by ReadRecord. + // + // Undefined before the first call to ReadRecord. + uint64_t LastRecordOffset(); + + // returns true if the reader has encountered an eof condition. + bool IsEOF() { + return eof_; + } + + // when we know more data has been written to the file. we can use this + // function to force the reader to look again in the file. + // Also aligns the file position indicator to the start of the next block + // by reading the rest of the data from the EOF position to the end of the + // block that was partially read. + void UnmarkEOF(); + + SequentialFileReader* file() { return file_.get(); } + + private: + std::shared_ptr info_log_; + const unique_ptr file_; + Reporter* const reporter_; + bool const checksum_; + char* const backing_store_; + Slice buffer_; + bool eof_; // Last Read() indicated EOF by returning < kBlockSize + bool read_error_; // Error occurred while reading from file + + // Offset of the file position indicator within the last block when an + // EOF was detected. + size_t eof_offset_; + + // Offset of the last record returned by ReadRecord. + uint64_t last_record_offset_; + // Offset of the first location past the end of buffer_. + uint64_t end_of_buffer_offset_; + + // Offset at which to start looking for the first record to return + uint64_t const initial_offset_; + + // which log number this is + uint64_t const log_number_; + + // Whether this is a recycled log file + bool recycled_; + + // Extend record types with the following special values + enum { + kEof = kMaxRecordType + 1, + // Returned whenever we find an invalid physical record. + // Currently there are three situations in which this happens: + // * The record has an invalid CRC (ReadPhysicalRecord reports a drop) + // * The record is a 0-length record (No drop is reported) + // * The record is below constructor's initial_offset (No drop is reported) + kBadRecord = kMaxRecordType + 2, + // Returned when we fail to read a valid header. + kBadHeader = kMaxRecordType + 3, + // Returned when we read an old record from a previous user of the log. + kOldRecord = kMaxRecordType + 4, + // Returned when we get a bad record length + kBadRecordLen = kMaxRecordType + 5, + // Returned when we get a bad record checksum + kBadRecordChecksum = kMaxRecordType + 6, + }; + + // Skips all blocks that are completely before "initial_offset_". + // + // Returns true on success. Handles reporting. + bool SkipToInitialBlock(); + + // Return type, or one of the preceding special values + unsigned int ReadPhysicalRecord(Slice* result, size_t* drop_size); + + // Read some more + bool ReadMore(size_t* drop_size, int *error); + + // Reports dropped bytes to the reporter. + // buffer_ must be updated to remove the dropped bytes prior to invocation. + void ReportCorruption(size_t bytes, const char* reason); + void ReportDrop(size_t bytes, const Status& reason); + + // No copying allowed + Reader(const Reader&); + void operator=(const Reader&); +}; + +} // namespace log +} // namespace rocksdb diff --git a/external/rocksdb/db/log_test.cc b/external/rocksdb/db/log_test.cc new file mode 100755 index 0000000000..c92f09137c --- /dev/null +++ b/external/rocksdb/db/log_test.cc @@ -0,0 +1,739 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/log_reader.h" +#include "db/log_writer.h" +#include "rocksdb/env.h" +#include "util/coding.h" +#include "util/crc32c.h" +#include "util/file_reader_writer.h" +#include "util/random.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { +namespace log { + +// Construct a string of the specified length made out of the supplied +// partial string. +static std::string BigString(const std::string& partial_string, size_t n) { + std::string result; + while (result.size() < n) { + result.append(partial_string); + } + result.resize(n); + return result; +} + +// Construct a string from a number +static std::string NumberString(int n) { + char buf[50]; + snprintf(buf, sizeof(buf), "%d.", n); + return std::string(buf); +} + +// Return a skewed potentially long string +static std::string RandomSkewedString(int i, Random* rnd) { + return BigString(NumberString(i), rnd->Skewed(17)); +} + +class LogTest : public ::testing::TestWithParam { + private: + class StringSource : public SequentialFile { + public: + Slice& contents_; + bool force_error_; + size_t force_error_position_; + bool force_eof_; + size_t force_eof_position_; + bool returned_partial_; + explicit StringSource(Slice& contents) : + contents_(contents), + force_error_(false), + force_error_position_(0), + force_eof_(false), + force_eof_position_(0), + returned_partial_(false) { } + + virtual Status Read(size_t n, Slice* result, char* scratch) override { + EXPECT_TRUE(!returned_partial_) << "must not Read() after eof/error"; + + if (force_error_) { + if (force_error_position_ >= n) { + force_error_position_ -= n; + } else { + *result = Slice(contents_.data(), force_error_position_); + contents_.remove_prefix(force_error_position_); + force_error_ = false; + returned_partial_ = true; + return Status::Corruption("read error"); + } + } + + if (contents_.size() < n) { + n = contents_.size(); + returned_partial_ = true; + } + + if (force_eof_) { + if (force_eof_position_ >= n) { + force_eof_position_ -= n; + } else { + force_eof_ = false; + n = force_eof_position_; + returned_partial_ = true; + } + } + + // By using scratch we ensure that caller has control over the + // lifetime of result.data() + memcpy(scratch, contents_.data(), n); + *result = Slice(scratch, n); + + contents_.remove_prefix(n); + return Status::OK(); + } + + virtual Status Skip(uint64_t n) override { + if (n > contents_.size()) { + contents_.clear(); + return Status::NotFound("in-memory file skipepd past end"); + } + + contents_.remove_prefix(n); + + return Status::OK(); + } + }; + + class ReportCollector : public Reader::Reporter { + public: + size_t dropped_bytes_; + std::string message_; + + ReportCollector() : dropped_bytes_(0) { } + virtual void Corruption(size_t bytes, const Status& status) override { + dropped_bytes_ += bytes; + message_.append(status.ToString()); + } + }; + + std::string& dest_contents() { + auto dest = + dynamic_cast(writer_.file()->writable_file()); + assert(dest); + return dest->contents_; + } + + const std::string& dest_contents() const { + auto dest = + dynamic_cast(writer_.file()->writable_file()); + assert(dest); + return dest->contents_; + } + + void reset_source_contents() { + auto src = dynamic_cast(reader_.file()->file()); + assert(src); + src->contents_ = dest_contents(); + } + + Slice reader_contents_; + unique_ptr dest_holder_; + unique_ptr source_holder_; + ReportCollector report_; + Writer writer_; + Reader reader_; + + // Record metadata for testing initial offset functionality + static size_t initial_offset_record_sizes_[]; + uint64_t initial_offset_last_record_offsets_[4]; + + public: + LogTest() + : reader_contents_(), + dest_holder_(test::GetWritableFileWriter( + new test::StringSink(&reader_contents_))), + source_holder_( + test::GetSequentialFileReader(new StringSource(reader_contents_))), + writer_(std::move(dest_holder_), 123, GetParam()), + reader_(NULL, std::move(source_holder_), &report_, true /*checksum*/, + 0 /*initial_offset*/, 123) { + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + initial_offset_last_record_offsets_[0] = 0; + initial_offset_last_record_offsets_[1] = header_size + 10000; + initial_offset_last_record_offsets_[2] = 2 * (header_size + 10000); + initial_offset_last_record_offsets_[3] = 2 * (header_size + 10000) + + (2 * log::kBlockSize - 1000) + + 3 * header_size; + } + + Slice* get_reader_contents() { return &reader_contents_; } + + void Write(const std::string& msg) { + writer_.AddRecord(Slice(msg)); + } + + size_t WrittenBytes() const { + return dest_contents().size(); + } + + std::string Read(const WALRecoveryMode wal_recovery_mode = + WALRecoveryMode::kTolerateCorruptedTailRecords) { + std::string scratch; + Slice record; + if (reader_.ReadRecord(&record, &scratch, wal_recovery_mode)) { + return record.ToString(); + } else { + return "EOF"; + } + } + + void IncrementByte(int offset, int delta) { + dest_contents()[offset] += delta; + } + + void SetByte(int offset, char new_byte) { + dest_contents()[offset] = new_byte; + } + + void ShrinkSize(int bytes) { + auto dest = + dynamic_cast(writer_.file()->writable_file()); + assert(dest); + dest->Drop(bytes); + } + + void FixChecksum(int header_offset, int len, bool recyclable) { + // Compute crc of type/len/data + int header_size = recyclable ? kRecyclableHeaderSize : kHeaderSize; + uint32_t crc = crc32c::Value(&dest_contents()[header_offset + 6], + header_size - 6 + len); + crc = crc32c::Mask(crc); + EncodeFixed32(&dest_contents()[header_offset], crc); + } + + void ForceError(size_t position = 0) { + auto src = dynamic_cast(reader_.file()->file()); + src->force_error_ = true; + src->force_error_position_ = position; + } + + size_t DroppedBytes() const { + return report_.dropped_bytes_; + } + + std::string ReportMessage() const { + return report_.message_; + } + + void ForceEOF(size_t position = 0) { + auto src = dynamic_cast(reader_.file()->file()); + src->force_eof_ = true; + src->force_eof_position_ = position; + } + + void UnmarkEOF() { + auto src = dynamic_cast(reader_.file()->file()); + src->returned_partial_ = false; + reader_.UnmarkEOF(); + } + + bool IsEOF() { + return reader_.IsEOF(); + } + + // Returns OK iff recorded error message contains "msg" + std::string MatchError(const std::string& msg) const { + if (report_.message_.find(msg) == std::string::npos) { + return report_.message_; + } else { + return "OK"; + } + } + + void WriteInitialOffsetLog() { + for (int i = 0; i < 4; i++) { + std::string record(initial_offset_record_sizes_[i], + static_cast('a' + i)); + Write(record); + } + } + + void CheckOffsetPastEndReturnsNoRecords(uint64_t offset_past_end) { + WriteInitialOffsetLog(); + unique_ptr file_reader( + test::GetSequentialFileReader(new StringSource(reader_contents_))); + unique_ptr offset_reader( + new Reader(NULL, std::move(file_reader), &report_, + true /*checksum*/, WrittenBytes() + offset_past_end, 123)); + Slice record; + std::string scratch; + ASSERT_TRUE(!offset_reader->ReadRecord(&record, &scratch)); + } + + void CheckInitialOffsetRecord(uint64_t initial_offset, + int expected_record_offset) { + WriteInitialOffsetLog(); + unique_ptr file_reader( + test::GetSequentialFileReader(new StringSource(reader_contents_))); + unique_ptr offset_reader( + new Reader(NULL, std::move(file_reader), &report_, + true /*checksum*/, initial_offset, 123)); + Slice record; + std::string scratch; + ASSERT_TRUE(offset_reader->ReadRecord(&record, &scratch)); + ASSERT_EQ(initial_offset_record_sizes_[expected_record_offset], + record.size()); + ASSERT_EQ(initial_offset_last_record_offsets_[expected_record_offset], + offset_reader->LastRecordOffset()); + ASSERT_EQ((char)('a' + expected_record_offset), record.data()[0]); + } + +}; + +size_t LogTest::initial_offset_record_sizes_[] = + {10000, // Two sizable records in first block + 10000, + 2 * log::kBlockSize - 1000, // Span three blocks + 1}; + +TEST_P(LogTest, Empty) { ASSERT_EQ("EOF", Read()); } + +TEST_P(LogTest, ReadWrite) { + Write("foo"); + Write("bar"); + Write(""); + Write("xxxx"); + ASSERT_EQ("foo", Read()); + ASSERT_EQ("bar", Read()); + ASSERT_EQ("", Read()); + ASSERT_EQ("xxxx", Read()); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ("EOF", Read()); // Make sure reads at eof work +} + +TEST_P(LogTest, ManyBlocks) { + for (int i = 0; i < 100000; i++) { + Write(NumberString(i)); + } + for (int i = 0; i < 100000; i++) { + ASSERT_EQ(NumberString(i), Read()); + } + ASSERT_EQ("EOF", Read()); +} + +TEST_P(LogTest, Fragmentation) { + Write("small"); + Write(BigString("medium", 50000)); + Write(BigString("large", 100000)); + ASSERT_EQ("small", Read()); + ASSERT_EQ(BigString("medium", 50000), Read()); + ASSERT_EQ(BigString("large", 100000), Read()); + ASSERT_EQ("EOF", Read()); +} + +TEST_P(LogTest, MarginalTrailer) { + // Make a trailer that is exactly the same length as an empty record. + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + const int n = kBlockSize - 2 * header_size; + Write(BigString("foo", n)); + ASSERT_EQ((unsigned int)(kBlockSize - header_size), WrittenBytes()); + Write(""); + Write("bar"); + ASSERT_EQ(BigString("foo", n), Read()); + ASSERT_EQ("", Read()); + ASSERT_EQ("bar", Read()); + ASSERT_EQ("EOF", Read()); +} + +TEST_P(LogTest, MarginalTrailer2) { + // Make a trailer that is exactly the same length as an empty record. + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + const int n = kBlockSize - 2 * header_size; + Write(BigString("foo", n)); + ASSERT_EQ((unsigned int)(kBlockSize - header_size), WrittenBytes()); + Write("bar"); + ASSERT_EQ(BigString("foo", n), Read()); + ASSERT_EQ("bar", Read()); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ(0U, DroppedBytes()); + ASSERT_EQ("", ReportMessage()); +} + +TEST_P(LogTest, ShortTrailer) { + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + const int n = kBlockSize - 2 * header_size + 4; + Write(BigString("foo", n)); + ASSERT_EQ((unsigned int)(kBlockSize - header_size + 4), WrittenBytes()); + Write(""); + Write("bar"); + ASSERT_EQ(BigString("foo", n), Read()); + ASSERT_EQ("", Read()); + ASSERT_EQ("bar", Read()); + ASSERT_EQ("EOF", Read()); +} + +TEST_P(LogTest, AlignedEof) { + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + const int n = kBlockSize - 2 * header_size + 4; + Write(BigString("foo", n)); + ASSERT_EQ((unsigned int)(kBlockSize - header_size + 4), WrittenBytes()); + ASSERT_EQ(BigString("foo", n), Read()); + ASSERT_EQ("EOF", Read()); +} + +TEST_P(LogTest, RandomRead) { + const int N = 500; + Random write_rnd(301); + for (int i = 0; i < N; i++) { + Write(RandomSkewedString(i, &write_rnd)); + } + Random read_rnd(301); + for (int i = 0; i < N; i++) { + ASSERT_EQ(RandomSkewedString(i, &read_rnd), Read()); + } + ASSERT_EQ("EOF", Read()); +} + +// Tests of all the error paths in log_reader.cc follow: + +TEST_P(LogTest, ReadError) { + Write("foo"); + ForceError(); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ((unsigned int)kBlockSize, DroppedBytes()); + ASSERT_EQ("OK", MatchError("read error")); +} + +TEST_P(LogTest, BadRecordType) { + Write("foo"); + // Type is stored in header[6] + IncrementByte(6, 100); + FixChecksum(0, 3, false); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ(3U, DroppedBytes()); + ASSERT_EQ("OK", MatchError("unknown record type")); +} + +TEST_P(LogTest, TruncatedTrailingRecordIsIgnored) { + Write("foo"); + ShrinkSize(4); // Drop all payload as well as a header byte + ASSERT_EQ("EOF", Read()); + // Truncated last record is ignored, not treated as an error + ASSERT_EQ(0U, DroppedBytes()); + ASSERT_EQ("", ReportMessage()); +} + +TEST_P(LogTest, TruncatedTrailingRecordIsNotIgnored) { + Write("foo"); + ShrinkSize(4); // Drop all payload as well as a header byte + ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency)); + // Truncated last record is ignored, not treated as an error + ASSERT_GT(DroppedBytes(), 0U); + ASSERT_EQ("OK", MatchError("Corruption: truncated header")); +} + +TEST_P(LogTest, BadLength) { + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + const int kPayloadSize = kBlockSize - header_size; + Write(BigString("bar", kPayloadSize)); + Write("foo"); + // Least significant size byte is stored in header[4]. + IncrementByte(4, 1); + if (!GetParam()) { + ASSERT_EQ("foo", Read()); + ASSERT_EQ(kBlockSize, DroppedBytes()); + ASSERT_EQ("OK", MatchError("bad record length")); + } else { + ASSERT_EQ("EOF", Read()); + } +} + +TEST_P(LogTest, BadLengthAtEndIsIgnored) { + Write("foo"); + ShrinkSize(1); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ(0U, DroppedBytes()); + ASSERT_EQ("", ReportMessage()); +} + +TEST_P(LogTest, BadLengthAtEndIsNotIgnored) { + Write("foo"); + ShrinkSize(1); + ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency)); + ASSERT_GT(DroppedBytes(), 0U); + ASSERT_EQ("OK", MatchError("Corruption: truncated header")); +} + +TEST_P(LogTest, ChecksumMismatch) { + Write("foooooo"); + IncrementByte(0, 14); + ASSERT_EQ("EOF", Read()); + if (!GetParam()) { + ASSERT_EQ(14U, DroppedBytes()); + ASSERT_EQ("OK", MatchError("checksum mismatch")); + } else { + ASSERT_EQ(0U, DroppedBytes()); + ASSERT_EQ("", ReportMessage()); + } +} + +TEST_P(LogTest, UnexpectedMiddleType) { + Write("foo"); + SetByte(6, GetParam() ? kRecyclableMiddleType : kMiddleType); + FixChecksum(0, 3, !!GetParam()); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ(3U, DroppedBytes()); + ASSERT_EQ("OK", MatchError("missing start")); +} + +TEST_P(LogTest, UnexpectedLastType) { + Write("foo"); + SetByte(6, GetParam() ? kRecyclableLastType : kLastType); + FixChecksum(0, 3, !!GetParam()); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ(3U, DroppedBytes()); + ASSERT_EQ("OK", MatchError("missing start")); +} + +TEST_P(LogTest, UnexpectedFullType) { + Write("foo"); + Write("bar"); + SetByte(6, GetParam() ? kRecyclableFirstType : kFirstType); + FixChecksum(0, 3, !!GetParam()); + ASSERT_EQ("bar", Read()); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ(3U, DroppedBytes()); + ASSERT_EQ("OK", MatchError("partial record without end")); +} + +TEST_P(LogTest, UnexpectedFirstType) { + Write("foo"); + Write(BigString("bar", 100000)); + SetByte(6, GetParam() ? kRecyclableFirstType : kFirstType); + FixChecksum(0, 3, !!GetParam()); + ASSERT_EQ(BigString("bar", 100000), Read()); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ(3U, DroppedBytes()); + ASSERT_EQ("OK", MatchError("partial record without end")); +} + +TEST_P(LogTest, MissingLastIsIgnored) { + Write(BigString("bar", kBlockSize)); + // Remove the LAST block, including header. + ShrinkSize(14); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ("", ReportMessage()); + ASSERT_EQ(0U, DroppedBytes()); +} + +TEST_P(LogTest, MissingLastIsNotIgnored) { + Write(BigString("bar", kBlockSize)); + // Remove the LAST block, including header. + ShrinkSize(14); + ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency)); + ASSERT_GT(DroppedBytes(), 0U); + ASSERT_EQ("OK", MatchError("Corruption: error reading trailing data")); +} + +TEST_P(LogTest, PartialLastIsIgnored) { + Write(BigString("bar", kBlockSize)); + // Cause a bad record length in the LAST block. + ShrinkSize(1); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ("", ReportMessage()); + ASSERT_EQ(0U, DroppedBytes()); +} + +TEST_P(LogTest, PartialLastIsNotIgnored) { + Write(BigString("bar", kBlockSize)); + // Cause a bad record length in the LAST block. + ShrinkSize(1); + ASSERT_EQ("EOF", Read(WALRecoveryMode::kAbsoluteConsistency)); + ASSERT_GT(DroppedBytes(), 0U); + ASSERT_EQ("OK", MatchError( + "Corruption: truncated headerCorruption: " + "error reading trailing data")); +} + +TEST_P(LogTest, ErrorJoinsRecords) { + // Consider two fragmented records: + // first(R1) last(R1) first(R2) last(R2) + // where the middle two fragments disappear. We do not want + // first(R1),last(R2) to get joined and returned as a valid record. + + // Write records that span two blocks + Write(BigString("foo", kBlockSize)); + Write(BigString("bar", kBlockSize)); + Write("correct"); + + // Wipe the middle block + for (unsigned int offset = kBlockSize; offset < 2*kBlockSize; offset++) { + SetByte(offset, 'x'); + } + + if (!GetParam()) { + ASSERT_EQ("correct", Read()); + ASSERT_EQ("EOF", Read()); + size_t dropped = DroppedBytes(); + ASSERT_LE(dropped, 2 * kBlockSize + 100); + ASSERT_GE(dropped, 2 * kBlockSize); + } else { + ASSERT_EQ("EOF", Read()); + } +} + +TEST_P(LogTest, ReadStart) { CheckInitialOffsetRecord(0, 0); } + +TEST_P(LogTest, ReadSecondOneOff) { CheckInitialOffsetRecord(1, 1); } + +TEST_P(LogTest, ReadSecondTenThousand) { CheckInitialOffsetRecord(10000, 1); } + +TEST_P(LogTest, ReadSecondStart) { + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + CheckInitialOffsetRecord(10000 + header_size, 1); +} + +TEST_P(LogTest, ReadThirdOneOff) { + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + CheckInitialOffsetRecord(10000 + header_size + 1, 2); +} + +TEST_P(LogTest, ReadThirdStart) { + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + CheckInitialOffsetRecord(20000 + 2 * header_size, 2); +} + +TEST_P(LogTest, ReadFourthOneOff) { + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + CheckInitialOffsetRecord(20000 + 2 * header_size + 1, 3); +} + +TEST_P(LogTest, ReadFourthFirstBlockTrailer) { + CheckInitialOffsetRecord(log::kBlockSize - 4, 3); +} + +TEST_P(LogTest, ReadFourthMiddleBlock) { + CheckInitialOffsetRecord(log::kBlockSize + 1, 3); +} + +TEST_P(LogTest, ReadFourthLastBlock) { + CheckInitialOffsetRecord(2 * log::kBlockSize + 1, 3); +} + +TEST_P(LogTest, ReadFourthStart) { + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + CheckInitialOffsetRecord( + 2 * (header_size + 1000) + (2 * log::kBlockSize - 1000) + 3 * header_size, + 3); +} + +TEST_P(LogTest, ReadEnd) { CheckOffsetPastEndReturnsNoRecords(0); } + +TEST_P(LogTest, ReadPastEnd) { CheckOffsetPastEndReturnsNoRecords(5); } + +TEST_P(LogTest, ClearEofSingleBlock) { + Write("foo"); + Write("bar"); + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + ForceEOF(3 + header_size + 2); + ASSERT_EQ("foo", Read()); + UnmarkEOF(); + ASSERT_EQ("bar", Read()); + ASSERT_TRUE(IsEOF()); + ASSERT_EQ("EOF", Read()); + Write("xxx"); + UnmarkEOF(); + ASSERT_EQ("xxx", Read()); + ASSERT_TRUE(IsEOF()); +} + +TEST_P(LogTest, ClearEofMultiBlock) { + size_t num_full_blocks = 5; + int header_size = GetParam() ? kRecyclableHeaderSize : kHeaderSize; + size_t n = (kBlockSize - header_size) * num_full_blocks + 25; + Write(BigString("foo", n)); + Write(BigString("bar", n)); + ForceEOF(n + num_full_blocks * header_size + header_size + 3); + ASSERT_EQ(BigString("foo", n), Read()); + ASSERT_TRUE(IsEOF()); + UnmarkEOF(); + ASSERT_EQ(BigString("bar", n), Read()); + ASSERT_TRUE(IsEOF()); + Write(BigString("xxx", n)); + UnmarkEOF(); + ASSERT_EQ(BigString("xxx", n), Read()); + ASSERT_TRUE(IsEOF()); +} + +TEST_P(LogTest, ClearEofError) { + // If an error occurs during Read() in UnmarkEOF(), the records contained + // in the buffer should be returned on subsequent calls of ReadRecord() + // until no more full records are left, whereafter ReadRecord() should return + // false to indicate that it cannot read any further. + + Write("foo"); + Write("bar"); + UnmarkEOF(); + ASSERT_EQ("foo", Read()); + ASSERT_TRUE(IsEOF()); + Write("xxx"); + ForceError(0); + UnmarkEOF(); + ASSERT_EQ("bar", Read()); + ASSERT_EQ("EOF", Read()); +} + +TEST_P(LogTest, ClearEofError2) { + Write("foo"); + Write("bar"); + UnmarkEOF(); + ASSERT_EQ("foo", Read()); + Write("xxx"); + ForceError(3); + UnmarkEOF(); + ASSERT_EQ("bar", Read()); + ASSERT_EQ("EOF", Read()); + ASSERT_EQ(3U, DroppedBytes()); + ASSERT_EQ("OK", MatchError("read error")); +} + +TEST_P(LogTest, Recycle) { + if (!GetParam()) { + return; // test is only valid for recycled logs + } + Write("foo"); + Write("bar"); + Write("baz"); + Write("bif"); + Write("blitz"); + while (get_reader_contents()->size() < log::kBlockSize * 2) { + Write("xxxxxxxxxxxxxxxx"); + } + unique_ptr dest_holder(test::GetWritableFileWriter( + new test::OverwritingStringSink(get_reader_contents()))); + Writer recycle_writer(std::move(dest_holder), 123, true); + recycle_writer.AddRecord(Slice("foooo")); + recycle_writer.AddRecord(Slice("bar")); + ASSERT_GE(get_reader_contents()->size(), log::kBlockSize * 2); + ASSERT_EQ("foooo", Read()); + ASSERT_EQ("bar", Read()); + ASSERT_EQ("EOF", Read()); +} + +INSTANTIATE_TEST_CASE_P(bool, LogTest, ::testing::Values(0, 2)); + +} // namespace log +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/log_writer.cc b/external/rocksdb/db/log_writer.cc new file mode 100755 index 0000000000..3277088bef --- /dev/null +++ b/external/rocksdb/db/log_writer.cc @@ -0,0 +1,138 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/log_writer.h" + +#include +#include "rocksdb/env.h" +#include "util/coding.h" +#include "util/crc32c.h" +#include "util/file_reader_writer.h" + +namespace rocksdb { +namespace log { + +Writer::Writer(unique_ptr&& dest, + uint64_t log_number, bool recycle_log_files) + : dest_(std::move(dest)), + block_offset_(0), + log_number_(log_number), + recycle_log_files_(recycle_log_files) { + for (int i = 0; i <= kMaxRecordType; i++) { + char t = static_cast(i); + type_crc_[i] = crc32c::Value(&t, 1); + } +} + +Writer::~Writer() { +} + +Status Writer::AddRecord(const Slice& slice) { + const char* ptr = slice.data(); + size_t left = slice.size(); + + // Header size varies depending on whether we are recycling or not. + const int header_size = + recycle_log_files_ ? kRecyclableHeaderSize : kHeaderSize; + + // Fragment the record if necessary and emit it. Note that if slice + // is empty, we still want to iterate once to emit a single + // zero-length record + Status s; + bool begin = true; + do { + const int64_t leftover = kBlockSize - block_offset_; + assert(leftover >= 0); + if (leftover < header_size) { + // Switch to a new block + if (leftover > 0) { + // Fill the trailer (literal below relies on kHeaderSize and + // kRecyclableHeaderSize being <= 11) + assert(header_size <= 11); + dest_->Append( + Slice("\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00", leftover)); + } + block_offset_ = 0; + } + + // Invariant: we never leave < header_size bytes in a block. + assert(static_cast(kBlockSize - block_offset_) >= header_size); + + const size_t avail = kBlockSize - block_offset_ - header_size; + const size_t fragment_length = (left < avail) ? left : avail; + + RecordType type; + const bool end = (left == fragment_length); + if (begin && end) { + type = recycle_log_files_ ? kRecyclableFullType : kFullType; + } else if (begin) { + type = recycle_log_files_ ? kRecyclableFirstType : kFirstType; + } else if (end) { + type = recycle_log_files_ ? kRecyclableLastType : kLastType; + } else { + type = recycle_log_files_ ? kRecyclableMiddleType : kMiddleType; + } + + s = EmitPhysicalRecord(type, ptr, fragment_length); + ptr += fragment_length; + left -= fragment_length; + begin = false; + } while (s.ok() && left > 0); + return s; +} + +Status Writer::EmitPhysicalRecord(RecordType t, const char* ptr, size_t n) { + assert(n <= 0xffff); // Must fit in two bytes + + size_t header_size; + char buf[kRecyclableHeaderSize]; + + // Format the header + buf[4] = static_cast(n & 0xff); + buf[5] = static_cast(n >> 8); + buf[6] = static_cast(t); + + uint32_t crc = type_crc_[t]; + if (t < kRecyclableFullType) { + // Legacy record format + assert(block_offset_ + kHeaderSize + n <= kBlockSize); + header_size = kHeaderSize; + } else { + // Recyclable record format + assert(block_offset_ + kRecyclableHeaderSize + n <= kBlockSize); + header_size = kRecyclableHeaderSize; + + // Only encode low 32-bits of the 64-bit log number. This means + // we will fail to detect an old record if we recycled a log from + // ~4 billion logs ago, but that is effectively impossible, and + // even if it were we'dbe far more likely to see a false positive + // on the 32-bit CRC. + EncodeFixed32(buf + 7, static_cast(log_number_)); + crc = crc32c::Extend(crc, buf + 7, 4); + } + + // Compute the crc of the record type and the payload. + crc = crc32c::Extend(crc, ptr, n); + crc = crc32c::Mask(crc); // Adjust for storage + EncodeFixed32(buf, crc); + + // Write the header and the payload + Status s = dest_->Append(Slice(buf, header_size)); + if (s.ok()) { + s = dest_->Append(Slice(ptr, n)); + if (s.ok()) { + s = dest_->Flush(); + } + } + block_offset_ += header_size + n; + return s; +} + +} // namespace log +} // namespace rocksdb diff --git a/external/rocksdb/db/log_writer.h b/external/rocksdb/db/log_writer.h new file mode 100755 index 0000000000..11267b33d9 --- /dev/null +++ b/external/rocksdb/db/log_writer.h @@ -0,0 +1,105 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#pragma once + +#include + +#include + +#include "db/log_format.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +class WritableFileWriter; + +using std::unique_ptr; + +namespace log { + +/** + * Writer is a general purpose log stream writer. It provides an append-only + * abstraction for writing data. The details of the how the data is written is + * handled by the WriteableFile sub-class implementation. + * + * File format: + * + * File is broken down into variable sized records. The format of each record + * is described below. + * +-----+-------------+--+----+----------+------+-- ... ----+ + * File | r0 | r1 |P | r2 | r3 | r4 | | + * +-----+-------------+--+----+----------+------+-- ... ----+ + * <--- kBlockSize ------>|<-- kBlockSize ------>| + * rn = variable size records + * P = Padding + * + * Data is written out in kBlockSize chunks. If next record does not fit + * into the space left, the leftover space will be padded with \0. + * + * Legacy record format: + * + * +---------+-----------+-----------+--- ... ---+ + * |CRC (4B) | Size (2B) | Type (1B) | Payload | + * +---------+-----------+-----------+--- ... ---+ + * + * CRC = 32bit hash computed over the payload using CRC + * Size = Length of the payload data + * Type = Type of record + * (kZeroType, kFullType, kFirstType, kLastType, kMiddleType ) + * The type is used to group a bunch of records together to represent + * blocks that are larger than kBlockSize + * Payload = Byte stream as long as specified by the payload size + * + * Recyclable record format: + * + * +---------+-----------+-----------+----------------+--- ... ---+ + * |CRC (4B) | Size (2B) | Type (1B) | Log number (4B)| Payload | + * +---------+-----------+-----------+----------------+--- ... ---+ + * + * Same as above, with the addition of + * Log number = 32bit log file number, so that we can distinguish between + * records written by the most recent log writer vs a previous one. + */ +class Writer { + public: + // Create a writer that will append data to "*dest". + // "*dest" must be initially empty. + // "*dest" must remain live while this Writer is in use. + explicit Writer(unique_ptr&& dest, + uint64_t log_number, bool recycle_log_files); + ~Writer(); + + Status AddRecord(const Slice& slice); + + WritableFileWriter* file() { return dest_.get(); } + const WritableFileWriter* file() const { return dest_.get(); } + + uint64_t get_log_number() const { return log_number_; } + + private: + unique_ptr dest_; + size_t block_offset_; // Current offset in block + uint64_t log_number_; + bool recycle_log_files_; + + // crc32c values for all supported record types. These are + // pre-computed to reduce the overhead of computing the crc of the + // record type stored in the header. + uint32_t type_crc_[kMaxRecordType + 1]; + + Status EmitPhysicalRecord(RecordType type, const char* ptr, size_t length); + + // No copying allowed + Writer(const Writer&); + void operator=(const Writer&); +}; + +} // namespace log +} // namespace rocksdb diff --git a/external/rocksdb/db/managed_iterator.cc b/external/rocksdb/db/managed_iterator.cc new file mode 100755 index 0000000000..1d47f933df --- /dev/null +++ b/external/rocksdb/db/managed_iterator.cc @@ -0,0 +1,260 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#include "db/managed_iterator.h" + +#include +#include +#include + +#include "db/column_family.h" +#include "db/db_impl.h" +#include "db/db_iter.h" +#include "db/dbformat.h" +#include "db/xfunc_test_points.h" +#include "rocksdb/env.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "table/merger.h" +#include "util/xfunc.h" + +namespace rocksdb { + +namespace { +// Helper class that locks a mutex on construction and unlocks the mutex when +// the destructor of the MutexLock object is invoked. +// +// Typical usage: +// +// void MyClass::MyMethod() { +// MILock l(&mu_); // mu_ is an instance variable +// ... some complex code, possibly with multiple return paths ... +// } + +class MILock { + public: + explicit MILock(std::mutex* mu, ManagedIterator* mi) : mu_(mu), mi_(mi) { + this->mu_->lock(); + } + ~MILock() { + this->mu_->unlock(); + XFUNC_TEST("managed_xftest_release", "managed_unlock", managed_unlock1, + xf_manage_release, mi_); + } + ManagedIterator* GetManagedIterator() { return mi_; } + + private: + std::mutex* const mu_; + ManagedIterator* mi_; + // No copying allowed + MILock(const MILock&) = delete; + void operator=(const MILock&) = delete; +}; +} // anonymous namespace + +// +// Synchronization between modifiers, releasers, creators +// If iterator operation, wait till (!in_use), set in_use, do op, reset in_use +// if modifying mutable_iter, atomically exchange in_use: +// return if in_use set / otherwise set in use, +// atomically replace new iter with old , reset in use +// The releaser is the new operation and it holds a lock for a very short time +// The existing non-const iterator operations are supposed to be single +// threaded and hold the lock for the duration of the operation +// The existing const iterator operations use the cached key/values +// and don't do any locking. +ManagedIterator::ManagedIterator(DBImpl* db, const ReadOptions& read_options, + ColumnFamilyData* cfd) + : db_(db), + read_options_(read_options), + cfd_(cfd), + svnum_(cfd->GetSuperVersionNumber()), + mutable_iter_(nullptr), + valid_(false), + snapshot_created_(false), + release_supported_(true) { + read_options_.managed = false; + if ((!read_options_.tailing) && (read_options_.snapshot == nullptr)) { + assert(nullptr != (read_options_.snapshot = db_->GetSnapshot())); + snapshot_created_ = true; + } + cfh_.SetCFD(cfd); + mutable_iter_ = unique_ptr(db->NewIterator(read_options_, &cfh_)); + XFUNC_TEST("managed_xftest_dropold", "managed_create", xf_managed_create1, + xf_manage_create, this); +} + +ManagedIterator::~ManagedIterator() { + Lock(); + if (snapshot_created_) { + db_->ReleaseSnapshot(read_options_.snapshot); + snapshot_created_ = false; + read_options_.snapshot = nullptr; + } + UnLock(); +} + +bool ManagedIterator::Valid() const { return valid_; } + +void ManagedIterator::SeekToLast() { + MILock l(&in_use_, this); + if (NeedToRebuild()) { + RebuildIterator(); + } + assert(mutable_iter_ != nullptr); + mutable_iter_->SeekToLast(); + if (mutable_iter_->status().ok()) { + UpdateCurrent(); + } +} + +void ManagedIterator::SeekToFirst() { + MILock l(&in_use_, this); + SeekInternal(Slice(), true); +} + +void ManagedIterator::Seek(const Slice& user_key) { + MILock l(&in_use_, this); + SeekInternal(user_key, false); +} + +void ManagedIterator::SeekInternal(const Slice& user_key, bool seek_to_first) { + if (NeedToRebuild()) { + RebuildIterator(); + } + assert(mutable_iter_ != nullptr); + if (seek_to_first) { + mutable_iter_->SeekToFirst(); + } else { + mutable_iter_->Seek(user_key); + } + UpdateCurrent(); +} + +void ManagedIterator::Prev() { + if (!valid_) { + status_ = Status::InvalidArgument("Iterator value invalid"); + return; + } + MILock l(&in_use_, this); + if (NeedToRebuild()) { + std::string current_key = key().ToString(); + Slice old_key(current_key); + RebuildIterator(); + SeekInternal(old_key, false); + UpdateCurrent(); + if (!valid_) { + return; + } + if (key().compare(old_key) != 0) { + valid_ = false; + status_ = Status::Incomplete("Cannot do Prev now"); + return; + } + } + mutable_iter_->Prev(); + if (mutable_iter_->status().ok()) { + UpdateCurrent(); + status_ = Status::OK(); + } else { + status_ = mutable_iter_->status(); + } +} + +void ManagedIterator::Next() { + if (!valid_) { + status_ = Status::InvalidArgument("Iterator value invalid"); + return; + } + MILock l(&in_use_, this); + if (NeedToRebuild()) { + std::string current_key = key().ToString(); + Slice old_key(current_key.data(), cached_key_.Size()); + RebuildIterator(); + SeekInternal(old_key, false); + UpdateCurrent(); + if (!valid_) { + return; + } + if (key().compare(old_key) != 0) { + valid_ = false; + status_ = Status::Incomplete("Cannot do Next now"); + return; + } + } + mutable_iter_->Next(); + UpdateCurrent(); +} + +Slice ManagedIterator::key() const { + assert(valid_); + return cached_key_.GetKey(); +} + +Slice ManagedIterator::value() const { + assert(valid_); + return cached_value_.GetKey(); +} + +Status ManagedIterator::status() const { return status_; } + +void ManagedIterator::RebuildIterator() { + svnum_ = cfd_->GetSuperVersionNumber(); + mutable_iter_ = unique_ptr(db_->NewIterator(read_options_, &cfh_)); +} + +void ManagedIterator::UpdateCurrent() { + assert(mutable_iter_ != nullptr); + + valid_ = mutable_iter_->Valid(); + if (!valid_) { + status_ = mutable_iter_->status(); + return; + } + + status_ = Status::OK(); + cached_key_.SetKey(mutable_iter_->key()); + cached_value_.SetKey(mutable_iter_->value()); +} + +void ManagedIterator::ReleaseIter(bool only_old) { + if ((mutable_iter_ == nullptr) || (!release_supported_)) { + return; + } + if (svnum_ != cfd_->GetSuperVersionNumber() || !only_old) { + if (!TryLock()) { // Don't release iter if in use + return; + } + mutable_iter_ = nullptr; // in_use for a very short time + UnLock(); + } +} + +bool ManagedIterator::NeedToRebuild() { + if ((mutable_iter_ == nullptr) || (status_.IsIncomplete()) || + (!only_drop_old_ && (svnum_ != cfd_->GetSuperVersionNumber()))) { + return true; + } + return false; +} + +void ManagedIterator::Lock() { + in_use_.lock(); + return; +} + +bool ManagedIterator::TryLock() { return in_use_.try_lock(); } + +void ManagedIterator::UnLock() { + in_use_.unlock(); + XFUNC_TEST("managed_xftest_release", "managed_unlock", managed_unlock1, + xf_manage_release, this); +} + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/managed_iterator.h b/external/rocksdb/db/managed_iterator.h new file mode 100755 index 0000000000..d9a87596ea --- /dev/null +++ b/external/rocksdb/db/managed_iterator.h @@ -0,0 +1,84 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#ifndef ROCKSDB_LITE + +#include +#include +#include +#include + +#include "db/column_family.h" +#include "rocksdb/db.h" +#include "rocksdb/iterator.h" +#include "rocksdb/options.h" +#include "util/arena.h" + +namespace rocksdb { + +class DBImpl; +struct SuperVersion; +class ColumnFamilyData; + +/** + * ManagedIterator is a special type of iterator that supports freeing the + * underlying iterator and still being able to access the current key/value + * pair. This is done by copying the key/value pair so that clients can + * continue to access the data without getting a SIGSEGV. + * The underlying iterator can be freed manually through the call to + * ReleaseIter or automatically (as needed on space pressure or age.) + * The iterator is recreated using the saved original arguments. + */ +class ManagedIterator : public Iterator { + public: + ManagedIterator(DBImpl* db, const ReadOptions& read_options, + ColumnFamilyData* cfd); + virtual ~ManagedIterator(); + + virtual void SeekToLast() override; + virtual void Prev() override; + virtual bool Valid() const override; + void SeekToFirst() override; + virtual void Seek(const Slice& target) override; + virtual void Next() override; + virtual Slice key() const override; + virtual Slice value() const override; + virtual Status status() const override; + void ReleaseIter(bool only_old); + void SetDropOld(bool only_old) { + only_drop_old_ = read_options_.tailing || only_old; + } + + private: + void RebuildIterator(); + void UpdateCurrent(); + void SeekInternal(const Slice& user_key, bool seek_to_first); + bool NeedToRebuild(); + void Lock(); + bool TryLock(); + void UnLock(); + DBImpl* const db_; + ReadOptions read_options_; + ColumnFamilyData* const cfd_; + ColumnFamilyHandleInternal cfh_; + + uint64_t svnum_; + std::unique_ptr mutable_iter_; + // internal iterator status + Status status_; + bool valid_; + + IterKey cached_key_; + IterKey cached_value_; + + bool only_drop_old_ = true; + bool snapshot_created_; + bool release_supported_; + std::mutex in_use_; // is managed iterator in use +}; + +} // namespace rocksdb +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/manual_compaction_test.cc b/external/rocksdb/db/manual_compaction_test.cc new file mode 100755 index 0000000000..0ff52d184d --- /dev/null +++ b/external/rocksdb/db/manual_compaction_test.cc @@ -0,0 +1,155 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Test for issue 178: a manual compaction causes deleted data to reappear. +#include +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/slice.h" +#include "rocksdb/write_batch.h" +#include "util/testharness.h" +#include "port/port.h" + +using namespace rocksdb; + +namespace { + +const int kNumKeys = 1100000; + +std::string Key1(int i) { + char buf[100]; + snprintf(buf, sizeof(buf), "my_key_%d", i); + return buf; +} + +std::string Key2(int i) { + return Key1(i) + "_xxx"; +} + +class ManualCompactionTest : public testing::Test { + public: + ManualCompactionTest() { + // Get rid of any state from an old run. + dbname_ = rocksdb::test::TmpDir() + "/rocksdb_cbug_test"; + DestroyDB(dbname_, rocksdb::Options()); + } + + std::string dbname_; +}; + +class DestroyAllCompactionFilter : public CompactionFilter { + public: + DestroyAllCompactionFilter() {} + + virtual bool Filter(int level, const Slice& key, const Slice& existing_value, + std::string* new_value, + bool* value_changed) const override { + return existing_value.ToString() == "destroy"; + } + + virtual const char* Name() const override { + return "DestroyAllCompactionFilter"; + } +}; + +TEST_F(ManualCompactionTest, CompactTouchesAllKeys) { + for (int iter = 0; iter < 2; ++iter) { + DB* db; + Options options; + if (iter == 0) { // level compaction + options.num_levels = 3; + options.compaction_style = kCompactionStyleLevel; + } else { // universal compaction + options.compaction_style = kCompactionStyleUniversal; + } + options.create_if_missing = true; + options.compression = rocksdb::kNoCompression; + options.compaction_filter = new DestroyAllCompactionFilter(); + ASSERT_OK(DB::Open(options, dbname_, &db)); + + db->Put(WriteOptions(), Slice("key1"), Slice("destroy")); + db->Put(WriteOptions(), Slice("key2"), Slice("destroy")); + db->Put(WriteOptions(), Slice("key3"), Slice("value3")); + db->Put(WriteOptions(), Slice("key4"), Slice("destroy")); + + Slice key4("key4"); + db->CompactRange(CompactRangeOptions(), nullptr, &key4); + Iterator* itr = db->NewIterator(ReadOptions()); + itr->SeekToFirst(); + ASSERT_TRUE(itr->Valid()); + ASSERT_EQ("key3", itr->key().ToString()); + itr->Next(); + ASSERT_TRUE(!itr->Valid()); + delete itr; + + delete options.compaction_filter; + delete db; + DestroyDB(dbname_, options); + } +} + +TEST_F(ManualCompactionTest, Test) { + // Open database. Disable compression since it affects the creation + // of layers and the code below is trying to test against a very + // specific scenario. + rocksdb::DB* db; + rocksdb::Options db_options; + db_options.create_if_missing = true; + db_options.compression = rocksdb::kNoCompression; + ASSERT_OK(rocksdb::DB::Open(db_options, dbname_, &db)); + + // create first key range + rocksdb::WriteBatch batch; + for (int i = 0; i < kNumKeys; i++) { + batch.Put(Key1(i), "value for range 1 key"); + } + ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch)); + + // create second key range + batch.Clear(); + for (int i = 0; i < kNumKeys; i++) { + batch.Put(Key2(i), "value for range 2 key"); + } + ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch)); + + // delete second key range + batch.Clear(); + for (int i = 0; i < kNumKeys; i++) { + batch.Delete(Key2(i)); + } + ASSERT_OK(db->Write(rocksdb::WriteOptions(), &batch)); + + // compact database + std::string start_key = Key1(0); + std::string end_key = Key1(kNumKeys - 1); + rocksdb::Slice least(start_key.data(), start_key.size()); + rocksdb::Slice greatest(end_key.data(), end_key.size()); + + // commenting out the line below causes the example to work correctly + db->CompactRange(CompactRangeOptions(), &least, &greatest); + + // count the keys + rocksdb::Iterator* iter = db->NewIterator(rocksdb::ReadOptions()); + int num_keys = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + num_keys++; + } + delete iter; + ASSERT_EQ(kNumKeys, num_keys) << "Bad number of keys"; + + // close database + delete db; + DestroyDB(dbname_, rocksdb::Options()); +} + +} // anonymous namespace + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/memtable.cc b/external/rocksdb/db/memtable.cc new file mode 100755 index 0000000000..d8e35a289c --- /dev/null +++ b/external/rocksdb/db/memtable.cc @@ -0,0 +1,802 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/memtable.h" + +#include +#include +#include + +#include "db/dbformat.h" +#include "db/merge_context.h" +#include "db/merge_helper.h" +#include "db/pinned_iterators_manager.h" +#include "rocksdb/comparator.h" +#include "rocksdb/env.h" +#include "rocksdb/iterator.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/write_buffer_manager.h" +#include "table/internal_iterator.h" +#include "table/merger.h" +#include "util/arena.h" +#include "util/coding.h" +#include "util/murmurhash.h" +#include "util/mutexlock.h" +#include "util/perf_context_imp.h" +#include "util/statistics.h" +#include "util/stop_watch.h" + +namespace rocksdb { + +MemTableOptions::MemTableOptions(const ImmutableCFOptions& ioptions, + const MutableCFOptions& mutable_cf_options) + : write_buffer_size(mutable_cf_options.write_buffer_size), + arena_block_size(mutable_cf_options.arena_block_size), + memtable_prefix_bloom_bits( + static_cast( + static_cast(mutable_cf_options.write_buffer_size) * + mutable_cf_options.memtable_prefix_bloom_size_ratio) * + 8u), + memtable_huge_page_size(mutable_cf_options.memtable_huge_page_size), + inplace_update_support(ioptions.inplace_update_support), + inplace_update_num_locks(mutable_cf_options.inplace_update_num_locks), + inplace_callback(ioptions.inplace_callback), + max_successive_merges(mutable_cf_options.max_successive_merges), + statistics(ioptions.statistics), + merge_operator(ioptions.merge_operator), + info_log(ioptions.info_log) {} + +MemTable::MemTable(const InternalKeyComparator& cmp, + const ImmutableCFOptions& ioptions, + const MutableCFOptions& mutable_cf_options, + WriteBufferManager* write_buffer_manager, + SequenceNumber earliest_seq) + : comparator_(cmp), + moptions_(ioptions, mutable_cf_options), + refs_(0), + kArenaBlockSize(OptimizeBlockSize(moptions_.arena_block_size)), + arena_(moptions_.arena_block_size, + mutable_cf_options.memtable_huge_page_size), + allocator_(&arena_, write_buffer_manager), + table_(ioptions.memtable_factory->CreateMemTableRep( + comparator_, &allocator_, ioptions.prefix_extractor, + ioptions.info_log)), + data_size_(0), + num_entries_(0), + num_deletes_(0), + flush_in_progress_(false), + flush_completed_(false), + file_number_(0), + first_seqno_(0), + earliest_seqno_(earliest_seq), + mem_next_logfile_number_(0), + min_prep_log_referenced_(0), + locks_(moptions_.inplace_update_support + ? moptions_.inplace_update_num_locks + : 0), + prefix_extractor_(ioptions.prefix_extractor), + flush_state_(FLUSH_NOT_REQUESTED), + env_(ioptions.env) { + UpdateFlushState(); + // something went wrong if we need to flush before inserting anything + assert(!ShouldScheduleFlush()); + + if (prefix_extractor_ && moptions_.memtable_prefix_bloom_bits > 0) { + prefix_bloom_.reset(new DynamicBloom( + &allocator_, moptions_.memtable_prefix_bloom_bits, + ioptions.bloom_locality, 6 /* hard coded 6 probes */, nullptr, + moptions_.memtable_huge_page_size, ioptions.info_log)); + } +} + +MemTable::~MemTable() { assert(refs_ == 0); } + +size_t MemTable::ApproximateMemoryUsage() { + size_t arena_usage = arena_.ApproximateMemoryUsage(); + size_t table_usage = table_->ApproximateMemoryUsage(); + // let MAX_USAGE = std::numeric_limits::max() + // then if arena_usage + total_usage >= MAX_USAGE, return MAX_USAGE. + // the following variation is to avoid numeric overflow. + if (arena_usage >= std::numeric_limits::max() - table_usage) { + return std::numeric_limits::max(); + } + // otherwise, return the actual usage + return arena_usage + table_usage; +} + +bool MemTable::ShouldFlushNow() const { + // In a lot of times, we cannot allocate arena blocks that exactly matches the + // buffer size. Thus we have to decide if we should over-allocate or + // under-allocate. + // This constant variable can be interpreted as: if we still have more than + // "kAllowOverAllocationRatio * kArenaBlockSize" space left, we'd try to over + // allocate one more block. + const double kAllowOverAllocationRatio = 0.6; + + // If arena still have room for new block allocation, we can safely say it + // shouldn't flush. + auto allocated_memory = + table_->ApproximateMemoryUsage() + arena_.MemoryAllocatedBytes(); + + // if we can still allocate one more block without exceeding the + // over-allocation ratio, then we should not flush. + if (allocated_memory + kArenaBlockSize < + moptions_.write_buffer_size + + kArenaBlockSize * kAllowOverAllocationRatio) { + return false; + } + + // if user keeps adding entries that exceeds moptions.write_buffer_size, + // we need to flush earlier even though we still have much available + // memory left. + if (allocated_memory > moptions_.write_buffer_size + + kArenaBlockSize * kAllowOverAllocationRatio) { + return true; + } + + // In this code path, Arena has already allocated its "last block", which + // means the total allocatedmemory size is either: + // (1) "moderately" over allocated the memory (no more than `0.6 * arena + // block size`. Or, + // (2) the allocated memory is less than write buffer size, but we'll stop + // here since if we allocate a new arena block, we'll over allocate too much + // more (half of the arena block size) memory. + // + // In either case, to avoid over-allocate, the last block will stop allocation + // when its usage reaches a certain ratio, which we carefully choose "0.75 + // full" as the stop condition because it addresses the following issue with + // great simplicity: What if the next inserted entry's size is + // bigger than AllocatedAndUnused()? + // + // The answer is: if the entry size is also bigger than 0.25 * + // kArenaBlockSize, a dedicated block will be allocated for it; otherwise + // arena will anyway skip the AllocatedAndUnused() and allocate a new, empty + // and regular block. In either case, we *overly* over-allocated. + // + // Therefore, setting the last block to be at most "0.75 full" avoids both + // cases. + // + // NOTE: the average percentage of waste space of this approach can be counted + // as: "arena block size * 0.25 / write buffer size". User who specify a small + // write buffer size and/or big arena block size may suffer. + return arena_.AllocatedAndUnused() < kArenaBlockSize / 4; +} + +void MemTable::UpdateFlushState() { + auto state = flush_state_.load(std::memory_order_relaxed); + if (state == FLUSH_NOT_REQUESTED && ShouldFlushNow()) { + // ignore CAS failure, because that means somebody else requested + // a flush + flush_state_.compare_exchange_strong(state, FLUSH_REQUESTED, + std::memory_order_relaxed, + std::memory_order_relaxed); + } +} + +int MemTable::KeyComparator::operator()(const char* prefix_len_key1, + const char* prefix_len_key2) const { + // Internal keys are encoded as length-prefixed strings. + Slice k1 = GetLengthPrefixedSlice(prefix_len_key1); + Slice k2 = GetLengthPrefixedSlice(prefix_len_key2); + return comparator.Compare(k1, k2); +} + +int MemTable::KeyComparator::operator()(const char* prefix_len_key, + const Slice& key) + const { + // Internal keys are encoded as length-prefixed strings. + Slice a = GetLengthPrefixedSlice(prefix_len_key); + return comparator.Compare(a, key); +} + +Slice MemTableRep::UserKey(const char* key) const { + Slice slice = GetLengthPrefixedSlice(key); + return Slice(slice.data(), slice.size() - 8); +} + +KeyHandle MemTableRep::Allocate(const size_t len, char** buf) { + *buf = allocator_->Allocate(len); + return static_cast(*buf); +} + +// Encode a suitable internal key target for "target" and return it. +// Uses *scratch as scratch space, and the returned pointer will point +// into this scratch space. +const char* EncodeKey(std::string* scratch, const Slice& target) { + scratch->clear(); + PutVarint32(scratch, static_cast(target.size())); + scratch->append(target.data(), target.size()); + return scratch->data(); +} + +class MemTableIterator : public InternalIterator { + public: + MemTableIterator(const MemTable& mem, const ReadOptions& read_options, + Arena* arena) + : bloom_(nullptr), + prefix_extractor_(mem.prefix_extractor_), + valid_(false), + arena_mode_(arena != nullptr), + value_pinned_(!mem.GetMemTableOptions()->inplace_update_support) { + if (prefix_extractor_ != nullptr && !read_options.total_order_seek) { + bloom_ = mem.prefix_bloom_.get(); + iter_ = mem.table_->GetDynamicPrefixIterator(arena); + } else { + iter_ = mem.table_->GetIterator(arena); + } + } + + ~MemTableIterator() { +#ifndef NDEBUG + // Assert that the MemTableIterator is never deleted while + // Pinning is Enabled. + assert(!pinned_iters_mgr_ || + (pinned_iters_mgr_ && !pinned_iters_mgr_->PinningEnabled())); +#endif + if (arena_mode_) { + iter_->~Iterator(); + } else { + delete iter_; + } + } + +#ifndef NDEBUG + virtual void SetPinnedItersMgr( + PinnedIteratorsManager* pinned_iters_mgr) override { + pinned_iters_mgr_ = pinned_iters_mgr; + } + PinnedIteratorsManager* pinned_iters_mgr_ = nullptr; +#endif + + virtual bool Valid() const override { return valid_; } + virtual void Seek(const Slice& k) override { + PERF_TIMER_GUARD(seek_on_memtable_time); + PERF_COUNTER_ADD(seek_on_memtable_count, 1); + if (bloom_ != nullptr) { + if (!bloom_->MayContain( + prefix_extractor_->Transform(ExtractUserKey(k)))) { + PERF_COUNTER_ADD(bloom_memtable_miss_count, 1); + valid_ = false; + return; + } else { + PERF_COUNTER_ADD(bloom_memtable_hit_count, 1); + } + } + iter_->Seek(k, nullptr); + valid_ = iter_->Valid(); + } + virtual void SeekToFirst() override { + iter_->SeekToFirst(); + valid_ = iter_->Valid(); + } + virtual void SeekToLast() override { + iter_->SeekToLast(); + valid_ = iter_->Valid(); + } + virtual void Next() override { + assert(Valid()); + iter_->Next(); + valid_ = iter_->Valid(); + } + virtual void Prev() override { + assert(Valid()); + iter_->Prev(); + valid_ = iter_->Valid(); + } + virtual Slice key() const override { + assert(Valid()); + return GetLengthPrefixedSlice(iter_->key()); + } + virtual Slice value() const override { + assert(Valid()); + Slice key_slice = GetLengthPrefixedSlice(iter_->key()); + return GetLengthPrefixedSlice(key_slice.data() + key_slice.size()); + } + + virtual Status status() const override { return Status::OK(); } + + virtual bool IsKeyPinned() const override { + // memtable data is always pinned + return true; + } + + virtual bool IsValuePinned() const override { + // memtable value is always pinned, except if we allow inplace update. + return value_pinned_; + } + + private: + DynamicBloom* bloom_; + const SliceTransform* const prefix_extractor_; + MemTableRep::Iterator* iter_; + bool valid_; + bool arena_mode_; + bool value_pinned_; + + // No copying allowed + MemTableIterator(const MemTableIterator&); + void operator=(const MemTableIterator&); +}; + +InternalIterator* MemTable::NewIterator(const ReadOptions& read_options, + Arena* arena) { + assert(arena != nullptr); + auto mem = arena->AllocateAligned(sizeof(MemTableIterator)); + return new (mem) MemTableIterator(*this, read_options, arena); +} + +port::RWMutex* MemTable::GetLock(const Slice& key) { + static murmur_hash hash; + return &locks_[hash(key) % locks_.size()]; +} + +uint64_t MemTable::ApproximateSize(const Slice& start_ikey, + const Slice& end_ikey) { + uint64_t entry_count = table_->ApproximateNumEntries(start_ikey, end_ikey); + if (entry_count == 0) { + return 0; + } + uint64_t n = num_entries_.load(std::memory_order_relaxed); + if (n == 0) { + return 0; + } + if (entry_count > n) { + // table_->ApproximateNumEntries() is just an estimate so it can be larger + // than actual entries we have. Cap it to entries we have to limit the + // inaccuracy. + entry_count = n; + } + uint64_t data_size = data_size_.load(std::memory_order_relaxed); + return entry_count * (data_size / n); +} + +void MemTable::Add(SequenceNumber s, ValueType type, + const Slice& key, /* user key */ + const Slice& value, bool allow_concurrent, + MemTablePostProcessInfo* post_process_info) { + // Format of an entry is concatenation of: + // key_size : varint32 of internal_key.size() + // key bytes : char[internal_key.size()] + // value_size : varint32 of value.size() + // value bytes : char[value.size()] + uint32_t key_size = static_cast(key.size()); + uint32_t val_size = static_cast(value.size()); + uint32_t internal_key_size = key_size + 8; + const uint32_t encoded_len = VarintLength(internal_key_size) + + internal_key_size + VarintLength(val_size) + + val_size; + char* buf = nullptr; + KeyHandle handle = table_->Allocate(encoded_len, &buf); + + char* p = EncodeVarint32(buf, internal_key_size); + memcpy(p, key.data(), key_size); + p += key_size; + uint64_t packed = PackSequenceAndType(s, type); + EncodeFixed64(p, packed); + p += 8; + p = EncodeVarint32(p, val_size); + memcpy(p, value.data(), val_size); + assert((unsigned)(p + val_size - buf) == (unsigned)encoded_len); + if (!allow_concurrent) { + table_->Insert(handle); + + // this is a bit ugly, but is the way to avoid locked instructions + // when incrementing an atomic + num_entries_.store(num_entries_.load(std::memory_order_relaxed) + 1, + std::memory_order_relaxed); + data_size_.store(data_size_.load(std::memory_order_relaxed) + encoded_len, + std::memory_order_relaxed); + if (type == kTypeDeletion) { + num_deletes_.store(num_deletes_.load(std::memory_order_relaxed) + 1, + std::memory_order_relaxed); + } + + if (prefix_bloom_) { + assert(prefix_extractor_); + prefix_bloom_->Add(prefix_extractor_->Transform(key)); + } + + // The first sequence number inserted into the memtable + assert(first_seqno_ == 0 || s > first_seqno_); + if (first_seqno_ == 0) { + first_seqno_.store(s, std::memory_order_relaxed); + + if (earliest_seqno_ == kMaxSequenceNumber) { + earliest_seqno_.store(GetFirstSequenceNumber(), + std::memory_order_relaxed); + } + assert(first_seqno_.load() >= earliest_seqno_.load()); + } + assert(post_process_info == nullptr); + UpdateFlushState(); + } else { + table_->InsertConcurrently(handle); + + assert(post_process_info != nullptr); + post_process_info->num_entries++; + post_process_info->data_size += encoded_len; + if (type == kTypeDeletion) { + post_process_info->num_deletes++; + } + + if (prefix_bloom_) { + assert(prefix_extractor_); + prefix_bloom_->AddConcurrently(prefix_extractor_->Transform(key)); + } + + // atomically update first_seqno_ and earliest_seqno_. + uint64_t cur_seq_num = first_seqno_.load(std::memory_order_relaxed); + while ((cur_seq_num == 0 || s < cur_seq_num) && + !first_seqno_.compare_exchange_weak(cur_seq_num, s)) { + } + uint64_t cur_earliest_seqno = + earliest_seqno_.load(std::memory_order_relaxed); + while ( + (cur_earliest_seqno == kMaxSequenceNumber || s < cur_earliest_seqno) && + !first_seqno_.compare_exchange_weak(cur_earliest_seqno, s)) { + } + } +} + +// Callback from MemTable::Get() +namespace { + +struct Saver { + Status* status; + const LookupKey* key; + bool* found_final_value; // Is value set correctly? Used by KeyMayExist + bool* merge_in_progress; + std::string* value; + SequenceNumber seq; + const MergeOperator* merge_operator; + // the merge operations encountered; + MergeContext* merge_context; + MemTable* mem; + Logger* logger; + Statistics* statistics; + bool inplace_update_support; + Env* env_; +}; +} // namespace + +static bool SaveValue(void* arg, const char* entry) { + Saver* s = reinterpret_cast(arg); + MergeContext* merge_context = s->merge_context; + const MergeOperator* merge_operator = s->merge_operator; + + assert(s != nullptr && merge_context != nullptr); + + // entry format is: + // klength varint32 + // userkey char[klength-8] + // tag uint64 + // vlength varint32 + // value char[vlength] + // Check that it belongs to same user key. We do not check the + // sequence number since the Seek() call above should have skipped + // all entries with overly large sequence numbers. + uint32_t key_length; + const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length); + if (s->mem->GetInternalKeyComparator().user_comparator()->Equal( + Slice(key_ptr, key_length - 8), s->key->user_key())) { + // Correct user key + const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8); + ValueType type; + UnPackSequenceAndType(tag, &s->seq, &type); + + switch (type) { + case kTypeValue: { + if (s->inplace_update_support) { + s->mem->GetLock(s->key->user_key())->ReadLock(); + } + Slice v = GetLengthPrefixedSlice(key_ptr + key_length); + *(s->status) = Status::OK(); + if (*(s->merge_in_progress)) { + *(s->status) = MergeHelper::TimedFullMerge( + merge_operator, s->key->user_key(), &v, + merge_context->GetOperands(), s->value, s->logger, s->statistics, + s->env_); + } else if (s->value != nullptr) { + s->value->assign(v.data(), v.size()); + } + if (s->inplace_update_support) { + s->mem->GetLock(s->key->user_key())->ReadUnlock(); + } + *(s->found_final_value) = true; + return false; + } + case kTypeDeletion: + case kTypeSingleDeletion: { + if (*(s->merge_in_progress)) { + *(s->status) = MergeHelper::TimedFullMerge( + merge_operator, s->key->user_key(), nullptr, + merge_context->GetOperands(), s->value, s->logger, s->statistics, + s->env_); + } else { + *(s->status) = Status::NotFound(); + } + *(s->found_final_value) = true; + return false; + } + case kTypeMerge: { + if (!merge_operator) { + *(s->status) = Status::InvalidArgument( + "merge_operator is not properly initialized."); + // Normally we continue the loop (return true) when we see a merge + // operand. But in case of an error, we should stop the loop + // immediately and pretend we have found the value to stop further + // seek. Otherwise, the later call will override this error status. + *(s->found_final_value) = true; + return false; + } + Slice v = GetLengthPrefixedSlice(key_ptr + key_length); + *(s->merge_in_progress) = true; + merge_context->PushOperand( + v, s->inplace_update_support == false /* operand_pinned */); + return true; + } + default: + assert(false); + return true; + } + } + + // s->state could be Corrupt, merge or notfound + return false; +} + +bool MemTable::Get(const LookupKey& key, std::string* value, Status* s, + MergeContext* merge_context, SequenceNumber* seq) { + // The sequence number is updated synchronously in version_set.h + if (IsEmpty()) { + // Avoiding recording stats for speed. + return false; + } + PERF_TIMER_GUARD(get_from_memtable_time); + + Slice user_key = key.user_key(); + bool found_final_value = false; + bool merge_in_progress = s->IsMergeInProgress(); + bool const may_contain = + nullptr == prefix_bloom_ + ? false + : prefix_bloom_->MayContain(prefix_extractor_->Transform(user_key)); + if (prefix_bloom_ && !may_contain) { + // iter is null if prefix bloom says the key does not exist + PERF_COUNTER_ADD(bloom_memtable_miss_count, 1); + *seq = kMaxSequenceNumber; + } else { + if (prefix_bloom_) { + PERF_COUNTER_ADD(bloom_memtable_hit_count, 1); + } + Saver saver; + saver.status = s; + saver.found_final_value = &found_final_value; + saver.merge_in_progress = &merge_in_progress; + saver.key = &key; + saver.value = value; + saver.seq = kMaxSequenceNumber; + saver.mem = this; + saver.merge_context = merge_context; + saver.merge_operator = moptions_.merge_operator; + saver.logger = moptions_.info_log; + saver.inplace_update_support = moptions_.inplace_update_support; + saver.statistics = moptions_.statistics; + saver.env_ = env_; + table_->Get(key, &saver, SaveValue); + + *seq = saver.seq; + } + + // No change to value, since we have not yet found a Put/Delete + if (!found_final_value && merge_in_progress) { + *s = Status::MergeInProgress(); + } + PERF_COUNTER_ADD(get_from_memtable_count, 1); + return found_final_value; +} + +void MemTable::Update(SequenceNumber seq, + const Slice& key, + const Slice& value) { + LookupKey lkey(key, seq); + Slice mem_key = lkey.memtable_key(); + + std::unique_ptr iter( + table_->GetDynamicPrefixIterator()); + iter->Seek(lkey.internal_key(), mem_key.data()); + + if (iter->Valid()) { + // entry format is: + // key_length varint32 + // userkey char[klength-8] + // tag uint64 + // vlength varint32 + // value char[vlength] + // Check that it belongs to same user key. We do not check the + // sequence number since the Seek() call above should have skipped + // all entries with overly large sequence numbers. + const char* entry = iter->key(); + uint32_t key_length = 0; + const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length); + if (comparator_.comparator.user_comparator()->Equal( + Slice(key_ptr, key_length - 8), lkey.user_key())) { + // Correct user key + const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8); + ValueType type; + SequenceNumber unused; + UnPackSequenceAndType(tag, &unused, &type); + switch (type) { + case kTypeValue: { + Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length); + uint32_t prev_size = static_cast(prev_value.size()); + uint32_t new_size = static_cast(value.size()); + + // Update value, if new value size <= previous value size + if (new_size <= prev_size ) { + char* p = EncodeVarint32(const_cast(key_ptr) + key_length, + new_size); + WriteLock wl(GetLock(lkey.user_key())); + memcpy(p, value.data(), value.size()); + assert((unsigned)((p + value.size()) - entry) == + (unsigned)(VarintLength(key_length) + key_length + + VarintLength(value.size()) + value.size())); + return; + } + } + default: + // If the latest value is kTypeDeletion, kTypeMerge or kTypeLogData + // we don't have enough space for update inplace + Add(seq, kTypeValue, key, value); + return; + } + } + } + + // key doesn't exist + Add(seq, kTypeValue, key, value); +} + +bool MemTable::UpdateCallback(SequenceNumber seq, + const Slice& key, + const Slice& delta) { + LookupKey lkey(key, seq); + Slice memkey = lkey.memtable_key(); + + std::unique_ptr iter( + table_->GetDynamicPrefixIterator()); + iter->Seek(lkey.internal_key(), memkey.data()); + + if (iter->Valid()) { + // entry format is: + // key_length varint32 + // userkey char[klength-8] + // tag uint64 + // vlength varint32 + // value char[vlength] + // Check that it belongs to same user key. We do not check the + // sequence number since the Seek() call above should have skipped + // all entries with overly large sequence numbers. + const char* entry = iter->key(); + uint32_t key_length = 0; + const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length); + if (comparator_.comparator.user_comparator()->Equal( + Slice(key_ptr, key_length - 8), lkey.user_key())) { + // Correct user key + const uint64_t tag = DecodeFixed64(key_ptr + key_length - 8); + ValueType type; + uint64_t unused; + UnPackSequenceAndType(tag, &unused, &type); + switch (type) { + case kTypeValue: { + Slice prev_value = GetLengthPrefixedSlice(key_ptr + key_length); + uint32_t prev_size = static_cast(prev_value.size()); + + char* prev_buffer = const_cast(prev_value.data()); + uint32_t new_prev_size = prev_size; + + std::string str_value; + WriteLock wl(GetLock(lkey.user_key())); + auto status = moptions_.inplace_callback(prev_buffer, &new_prev_size, + delta, &str_value); + if (status == UpdateStatus::UPDATED_INPLACE) { + // Value already updated by callback. + assert(new_prev_size <= prev_size); + if (new_prev_size < prev_size) { + // overwrite the new prev_size + char* p = EncodeVarint32(const_cast(key_ptr) + key_length, + new_prev_size); + if (VarintLength(new_prev_size) < VarintLength(prev_size)) { + // shift the value buffer as well. + memcpy(p, prev_buffer, new_prev_size); + } + } + RecordTick(moptions_.statistics, NUMBER_KEYS_UPDATED); + UpdateFlushState(); + return true; + } else if (status == UpdateStatus::UPDATED) { + Add(seq, kTypeValue, key, Slice(str_value)); + RecordTick(moptions_.statistics, NUMBER_KEYS_WRITTEN); + UpdateFlushState(); + return true; + } else if (status == UpdateStatus::UPDATE_FAILED) { + // No action required. Return. + UpdateFlushState(); + return true; + } + } + default: + break; + } + } + } + // If the latest value is not kTypeValue + // or key doesn't exist + return false; +} + +size_t MemTable::CountSuccessiveMergeEntries(const LookupKey& key) { + Slice memkey = key.memtable_key(); + + // A total ordered iterator is costly for some memtablerep (prefix aware + // reps). By passing in the user key, we allow efficient iterator creation. + // The iterator only needs to be ordered within the same user key. + std::unique_ptr iter( + table_->GetDynamicPrefixIterator()); + iter->Seek(key.internal_key(), memkey.data()); + + size_t num_successive_merges = 0; + + for (; iter->Valid(); iter->Next()) { + const char* entry = iter->key(); + uint32_t key_length = 0; + const char* iter_key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length); + if (!comparator_.comparator.user_comparator()->Equal( + Slice(iter_key_ptr, key_length - 8), key.user_key())) { + break; + } + + const uint64_t tag = DecodeFixed64(iter_key_ptr + key_length - 8); + ValueType type; + uint64_t unused; + UnPackSequenceAndType(tag, &unused, &type); + if (type != kTypeMerge) { + break; + } + + ++num_successive_merges; + } + + return num_successive_merges; +} + +void MemTableRep::Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, const char* entry)) { + auto iter = GetDynamicPrefixIterator(); + for (iter->Seek(k.internal_key(), k.memtable_key().data()); + iter->Valid() && callback_func(callback_args, iter->key()); + iter->Next()) { + } +} + +void MemTable::RefLogContainingPrepSection(uint64_t log) { + assert(log > 0); + auto cur = min_prep_log_referenced_.load(); + while ((log < cur || cur == 0) && + !min_prep_log_referenced_.compare_exchange_strong(cur, log)) { + cur = min_prep_log_referenced_.load(); + } +} + +uint64_t MemTable::GetMinLogContainingPrepSection() { + return min_prep_log_referenced_.load(); +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/memtable.h b/external/rocksdb/db/memtable.h new file mode 100755 index 0000000000..62931613e2 --- /dev/null +++ b/external/rocksdb/db/memtable.h @@ -0,0 +1,400 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include +#include +#include +#include +#include +#include "db/dbformat.h" +#include "db/skiplist.h" +#include "db/version_edit.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/immutable_options.h" +#include "db/memtable_allocator.h" +#include "util/concurrent_arena.h" +#include "util/dynamic_bloom.h" +#include "util/instrumented_mutex.h" +#include "util/mutable_cf_options.h" + +namespace rocksdb { + +class Mutex; +class MemTableIterator; +class MergeContext; +class InternalIterator; + +struct MemTableOptions { + explicit MemTableOptions( + const ImmutableCFOptions& ioptions, + const MutableCFOptions& mutable_cf_options); + size_t write_buffer_size; + size_t arena_block_size; + uint32_t memtable_prefix_bloom_bits; + size_t memtable_huge_page_size; + bool inplace_update_support; + size_t inplace_update_num_locks; + UpdateStatus (*inplace_callback)(char* existing_value, + uint32_t* existing_value_size, + Slice delta_value, + std::string* merged_value); + size_t max_successive_merges; + Statistics* statistics; + MergeOperator* merge_operator; + Logger* info_log; +}; + +// Batched counters to updated when inserting keys in one write batch. +// In post process of the write batch, these can be updated together. +// Only used in concurrent memtable insert case. +struct MemTablePostProcessInfo { + uint64_t data_size = 0; + uint64_t num_entries = 0; + uint64_t num_deletes = 0; +}; + +// Note: Many of the methods in this class have comments indicating that +// external synchromization is required as these methods are not thread-safe. +// It is up to higher layers of code to decide how to prevent concurrent +// invokation of these methods. This is usually done by acquiring either +// the db mutex or the single writer thread. +// +// Some of these methods are documented to only require external +// synchronization if this memtable is immutable. Calling MarkImmutable() is +// not sufficient to guarantee immutability. It is up to higher layers of +// code to determine if this MemTable can still be modified by other threads. +// Eg: The Superversion stores a pointer to the current MemTable (that can +// be modified) and a separate list of the MemTables that can no longer be +// written to (aka the 'immutable memtables'). +class MemTable { + public: + struct KeyComparator : public MemTableRep::KeyComparator { + const InternalKeyComparator comparator; + explicit KeyComparator(const InternalKeyComparator& c) : comparator(c) { } + virtual int operator()(const char* prefix_len_key1, + const char* prefix_len_key2) const override; + virtual int operator()(const char* prefix_len_key, + const Slice& key) const override; + }; + + // MemTables are reference counted. The initial reference count + // is zero and the caller must call Ref() at least once. + // + // earliest_seq should be the current SequenceNumber in the db such that any + // key inserted into this memtable will have an equal or larger seq number. + // (When a db is first created, the earliest sequence number will be 0). + // If the earliest sequence number is not known, kMaxSequenceNumber may be + // used, but this may prevent some transactions from succeeding until the + // first key is inserted into the memtable. + explicit MemTable(const InternalKeyComparator& comparator, + const ImmutableCFOptions& ioptions, + const MutableCFOptions& mutable_cf_options, + WriteBufferManager* write_buffer_manager, + SequenceNumber earliest_seq); + + // Do not delete this MemTable unless Unref() indicates it not in use. + ~MemTable(); + + // Increase reference count. + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable. + void Ref() { ++refs_; } + + // Drop reference count. + // If the refcount goes to zero return this memtable, otherwise return null. + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable. + MemTable* Unref() { + --refs_; + assert(refs_ >= 0); + if (refs_ <= 0) { + return this; + } + return nullptr; + } + + // Returns an estimate of the number of bytes of data in use by this + // data structure. + // + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable (unless this Memtable is immutable). + size_t ApproximateMemoryUsage(); + + // This method heuristically determines if the memtable should continue to + // host more data. + bool ShouldScheduleFlush() const { + return flush_state_.load(std::memory_order_relaxed) == FLUSH_REQUESTED; + } + + // Returns true if a flush should be scheduled and the caller should + // be the one to schedule it + bool MarkFlushScheduled() { + auto before = FLUSH_REQUESTED; + return flush_state_.compare_exchange_strong(before, FLUSH_SCHEDULED, + std::memory_order_relaxed, + std::memory_order_relaxed); + } + + // Return an iterator that yields the contents of the memtable. + // + // The caller must ensure that the underlying MemTable remains live + // while the returned iterator is live. The keys returned by this + // iterator are internal keys encoded by AppendInternalKey in the + // db/dbformat.{h,cc} module. + // + // By default, it returns an iterator for prefix seek if prefix_extractor + // is configured in Options. + // arena: If not null, the arena needs to be used to allocate the Iterator. + // Calling ~Iterator of the iterator will destroy all the states but + // those allocated in arena. + InternalIterator* NewIterator(const ReadOptions& read_options, Arena* arena); + + // Add an entry into memtable that maps key to value at the + // specified sequence number and with the specified type. + // Typically value will be empty if type==kTypeDeletion. + // + // REQUIRES: if allow_concurrent = false, external synchronization to prevent + // simultaneous operations on the same MemTable. + void Add(SequenceNumber seq, ValueType type, const Slice& key, + const Slice& value, bool allow_concurrent = false, + MemTablePostProcessInfo* post_process_info = nullptr); + + // If memtable contains a value for key, store it in *value and return true. + // If memtable contains a deletion for key, store a NotFound() error + // in *status and return true. + // If memtable contains Merge operation as the most recent entry for a key, + // and the merge process does not stop (not reaching a value or delete), + // prepend the current merge operand to *operands. + // store MergeInProgress in s, and return false. + // Else, return false. + // If any operation was found, its most recent sequence number + // will be stored in *seq on success (regardless of whether true/false is + // returned). Otherwise, *seq will be set to kMaxSequenceNumber. + // On success, *s may be set to OK, NotFound, or MergeInProgress. Any other + // status returned indicates a corruption or other unexpected error. + bool Get(const LookupKey& key, std::string* value, Status* s, + MergeContext* merge_context, SequenceNumber* seq); + + bool Get(const LookupKey& key, std::string* value, Status* s, + MergeContext* merge_context) { + SequenceNumber seq; + return Get(key, value, s, merge_context, &seq); + } + + // Attempts to update the new_value inplace, else does normal Add + // Pseudocode + // if key exists in current memtable && prev_value is of type kTypeValue + // if new sizeof(new_value) <= sizeof(prev_value) + // update inplace + // else add(key, new_value) + // else add(key, new_value) + // + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable. + void Update(SequenceNumber seq, + const Slice& key, + const Slice& value); + + // If prev_value for key exists, attempts to update it inplace. + // else returns false + // Pseudocode + // if key exists in current memtable && prev_value is of type kTypeValue + // new_value = delta(prev_value) + // if sizeof(new_value) <= sizeof(prev_value) + // update inplace + // else add(key, new_value) + // else return false + // + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable. + bool UpdateCallback(SequenceNumber seq, + const Slice& key, + const Slice& delta); + + // Returns the number of successive merge entries starting from the newest + // entry for the key up to the last non-merge entry or last entry for the + // key in the memtable. + size_t CountSuccessiveMergeEntries(const LookupKey& key); + + // Update counters and flush status after inserting a whole write batch + // Used in concurrent memtable inserts. + void BatchPostProcess(const MemTablePostProcessInfo& update_counters) { + num_entries_.fetch_add(update_counters.num_entries, + std::memory_order_relaxed); + data_size_.fetch_add(update_counters.data_size, std::memory_order_relaxed); + if (update_counters.num_deletes != 0) { + num_deletes_.fetch_add(update_counters.num_deletes, + std::memory_order_relaxed); + } + UpdateFlushState(); + } + + // Get total number of entries in the mem table. + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable (unless this Memtable is immutable). + uint64_t num_entries() const { + return num_entries_.load(std::memory_order_relaxed); + } + + // Get total number of deletes in the mem table. + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable (unless this Memtable is immutable). + uint64_t num_deletes() const { + return num_deletes_.load(std::memory_order_relaxed); + } + + // Returns the edits area that is needed for flushing the memtable + VersionEdit* GetEdits() { return &edit_; } + + // Returns if there is no entry inserted to the mem table. + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable (unless this Memtable is immutable). + bool IsEmpty() const { return first_seqno_ == 0; } + + // Returns the sequence number of the first element that was inserted + // into the memtable. + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable (unless this Memtable is immutable). + SequenceNumber GetFirstSequenceNumber() { + return first_seqno_.load(std::memory_order_relaxed); + } + + // Returns the sequence number that is guaranteed to be smaller than or equal + // to the sequence number of any key that could be inserted into this + // memtable. It can then be assumed that any write with a larger(or equal) + // sequence number will be present in this memtable or a later memtable. + // + // If the earliest sequence number could not be determined, + // kMaxSequenceNumber will be returned. + SequenceNumber GetEarliestSequenceNumber() { + return earliest_seqno_.load(std::memory_order_relaxed); + } + + // Returns the next active logfile number when this memtable is about to + // be flushed to storage + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable. + uint64_t GetNextLogNumber() { return mem_next_logfile_number_; } + + // Sets the next active logfile number when this memtable is about to + // be flushed to storage + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable. + void SetNextLogNumber(uint64_t num) { mem_next_logfile_number_ = num; } + + // if this memtable contains data from a committed + // two phase transaction we must take note of the + // log which contains that data so we can know + // when to relese that log + void RefLogContainingPrepSection(uint64_t log); + uint64_t GetMinLogContainingPrepSection(); + + // Notify the underlying storage that no more items will be added. + // REQUIRES: external synchronization to prevent simultaneous + // operations on the same MemTable. + // After MarkImmutable() is called, you should not attempt to + // write anything to this MemTable(). (Ie. do not call Add() or Update()). + void MarkImmutable() { + table_->MarkReadOnly(); + allocator_.DoneAllocating(); + } + + // return true if the current MemTableRep supports merge operator. + bool IsMergeOperatorSupported() const { + return table_->IsMergeOperatorSupported(); + } + + // return true if the current MemTableRep supports snapshots. + // inplace update prevents snapshots, + bool IsSnapshotSupported() const { + return table_->IsSnapshotSupported() && !moptions_.inplace_update_support; + } + + uint64_t ApproximateSize(const Slice& start_ikey, const Slice& end_ikey); + + // Get the lock associated for the key + port::RWMutex* GetLock(const Slice& key); + + const InternalKeyComparator& GetInternalKeyComparator() const { + return comparator_.comparator; + } + + const MemTableOptions* GetMemTableOptions() const { return &moptions_; } + + private: + enum FlushStateEnum { FLUSH_NOT_REQUESTED, FLUSH_REQUESTED, FLUSH_SCHEDULED }; + + friend class MemTableIterator; + friend class MemTableBackwardIterator; + friend class MemTableList; + + KeyComparator comparator_; + const MemTableOptions moptions_; + int refs_; + const size_t kArenaBlockSize; + ConcurrentArena arena_; + MemTableAllocator allocator_; + unique_ptr table_; + + // Total data size of all data inserted + std::atomic data_size_; + std::atomic num_entries_; + std::atomic num_deletes_; + + // These are used to manage memtable flushes to storage + bool flush_in_progress_; // started the flush + bool flush_completed_; // finished the flush + uint64_t file_number_; // filled up after flush is complete + + // The updates to be applied to the transaction log when this + // memtable is flushed to storage. + VersionEdit edit_; + + // The sequence number of the kv that was inserted first + std::atomic first_seqno_; + + // The db sequence number at the time of creation or kMaxSequenceNumber + // if not set. + std::atomic earliest_seqno_; + + // The log files earlier than this number can be deleted. + uint64_t mem_next_logfile_number_; + + // the earliest log containing a prepared section + // which has been inserted into this memtable. + std::atomic min_prep_log_referenced_; + + // rw locks for inplace updates + std::vector locks_; + + const SliceTransform* const prefix_extractor_; + std::unique_ptr prefix_bloom_; + + std::atomic flush_state_; + + Env* env_; + + // Returns a heuristic flush decision + bool ShouldFlushNow() const; + + // Updates flush_state_ using ShouldFlushNow() + void UpdateFlushState(); + + // No copying allowed + MemTable(const MemTable&); + MemTable& operator=(const MemTable&); +}; + +extern const char* EncodeKey(std::string* scratch, const Slice& target); + +} // namespace rocksdb diff --git a/external/rocksdb/db/memtable_allocator.cc b/external/rocksdb/db/memtable_allocator.cc new file mode 100755 index 0000000000..9a7204dd88 --- /dev/null +++ b/external/rocksdb/db/memtable_allocator.cc @@ -0,0 +1,59 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/memtable_allocator.h" + +#include +#include "rocksdb/write_buffer_manager.h" +#include "util/arena.h" + +namespace rocksdb { + +MemTableAllocator::MemTableAllocator(Allocator* allocator, + WriteBufferManager* write_buffer_manager) + : allocator_(allocator), + write_buffer_manager_(write_buffer_manager), + bytes_allocated_(0) {} + +MemTableAllocator::~MemTableAllocator() { DoneAllocating(); } + +char* MemTableAllocator::Allocate(size_t bytes) { + assert(write_buffer_manager_ != nullptr); + if (write_buffer_manager_->enabled()) { + bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed); + write_buffer_manager_->ReserveMem(bytes); + } + return allocator_->Allocate(bytes); +} + +char* MemTableAllocator::AllocateAligned(size_t bytes, size_t huge_page_size, + Logger* logger) { + assert(write_buffer_manager_ != nullptr); + if (write_buffer_manager_->enabled()) { + bytes_allocated_.fetch_add(bytes, std::memory_order_relaxed); + write_buffer_manager_->ReserveMem(bytes); + } + return allocator_->AllocateAligned(bytes, huge_page_size, logger); +} + +void MemTableAllocator::DoneAllocating() { + if (write_buffer_manager_ != nullptr) { + if (write_buffer_manager_->enabled()) { + write_buffer_manager_->FreeMem( + bytes_allocated_.load(std::memory_order_relaxed)); + } else { + assert(bytes_allocated_.load(std::memory_order_relaxed) == 0); + } + write_buffer_manager_ = nullptr; + } +} + +size_t MemTableAllocator::BlockSize() const { return allocator_->BlockSize(); } + +} // namespace rocksdb diff --git a/external/rocksdb/db/memtable_allocator.h b/external/rocksdb/db/memtable_allocator.h new file mode 100755 index 0000000000..050e13b365 --- /dev/null +++ b/external/rocksdb/db/memtable_allocator.h @@ -0,0 +1,50 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// This is used by the MemTable to allocate write buffer memory. It connects +// to WriteBufferManager so we can track and enforce overall write buffer +// limits. + +#pragma once + +#include +#include "rocksdb/write_buffer_manager.h" +#include "util/allocator.h" + +namespace rocksdb { + +class Logger; + +class MemTableAllocator : public Allocator { + public: + explicit MemTableAllocator(Allocator* allocator, + WriteBufferManager* write_buffer_manager); + ~MemTableAllocator(); + + // Allocator interface + char* Allocate(size_t bytes) override; + char* AllocateAligned(size_t bytes, size_t huge_page_size = 0, + Logger* logger = nullptr) override; + size_t BlockSize() const override; + + // Call when we're finished allocating memory so we can free it from + // the write buffer's limit. + void DoneAllocating(); + + private: + Allocator* allocator_; + WriteBufferManager* write_buffer_manager_; + std::atomic bytes_allocated_; + + // No copying allowed + MemTableAllocator(const MemTableAllocator&); + void operator=(const MemTableAllocator&); +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/memtable_list.cc b/external/rocksdb/db/memtable_list.cc new file mode 100755 index 0000000000..799887ed1d --- /dev/null +++ b/external/rocksdb/db/memtable_list.cc @@ -0,0 +1,441 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include "db/memtable_list.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include "db/memtable.h" +#include "db/version_set.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/iterator.h" +#include "table/merger.h" +#include "util/coding.h" +#include "util/log_buffer.h" +#include "util/sync_point.h" +#include "util/thread_status_util.h" + +namespace rocksdb { + +class InternalKeyComparator; +class Mutex; +class VersionSet; + +void MemTableListVersion::AddMemTable(MemTable* m) { + memlist_.push_front(m); + *parent_memtable_list_memory_usage_ += m->ApproximateMemoryUsage(); +} + +void MemTableListVersion::UnrefMemTable(autovector* to_delete, + MemTable* m) { + if (m->Unref()) { + to_delete->push_back(m); + assert(*parent_memtable_list_memory_usage_ >= m->ApproximateMemoryUsage()); + *parent_memtable_list_memory_usage_ -= m->ApproximateMemoryUsage(); + } else { + } +} + +MemTableListVersion::MemTableListVersion( + size_t* parent_memtable_list_memory_usage, MemTableListVersion* old) + : max_write_buffer_number_to_maintain_( + old->max_write_buffer_number_to_maintain_), + parent_memtable_list_memory_usage_(parent_memtable_list_memory_usage) { + if (old != nullptr) { + memlist_ = old->memlist_; + for (auto& m : memlist_) { + m->Ref(); + } + + memlist_history_ = old->memlist_history_; + for (auto& m : memlist_history_) { + m->Ref(); + } + } +} + +MemTableListVersion::MemTableListVersion( + size_t* parent_memtable_list_memory_usage, + int max_write_buffer_number_to_maintain) + : max_write_buffer_number_to_maintain_(max_write_buffer_number_to_maintain), + parent_memtable_list_memory_usage_(parent_memtable_list_memory_usage) {} + +void MemTableListVersion::Ref() { ++refs_; } + +// called by superversion::clean() +void MemTableListVersion::Unref(autovector* to_delete) { + assert(refs_ >= 1); + --refs_; + if (refs_ == 0) { + // if to_delete is equal to nullptr it means we're confident + // that refs_ will not be zero + assert(to_delete != nullptr); + for (const auto& m : memlist_) { + UnrefMemTable(to_delete, m); + } + for (const auto& m : memlist_history_) { + UnrefMemTable(to_delete, m); + } + delete this; + } +} + +int MemTableList::NumNotFlushed() const { + int size = static_cast(current_->memlist_.size()); + assert(num_flush_not_started_ <= size); + return size; +} + +int MemTableList::NumFlushed() const { + return static_cast(current_->memlist_history_.size()); +} + +// Search all the memtables starting from the most recent one. +// Return the most recent value found, if any. +// Operands stores the list of merge operations to apply, so far. +bool MemTableListVersion::Get(const LookupKey& key, std::string* value, + Status* s, MergeContext* merge_context, + SequenceNumber* seq) { + return GetFromList(&memlist_, key, value, s, merge_context, seq); +} + +bool MemTableListVersion::GetFromHistory(const LookupKey& key, + std::string* value, Status* s, + MergeContext* merge_context, + SequenceNumber* seq) { + return GetFromList(&memlist_history_, key, value, s, merge_context, seq); +} + +bool MemTableListVersion::GetFromList(std::list* list, + const LookupKey& key, std::string* value, + Status* s, MergeContext* merge_context, + SequenceNumber* seq) { + *seq = kMaxSequenceNumber; + + for (auto& memtable : *list) { + SequenceNumber current_seq = kMaxSequenceNumber; + + bool done = memtable->Get(key, value, s, merge_context, ¤t_seq); + if (*seq == kMaxSequenceNumber) { + // Store the most recent sequence number of any operation on this key. + // Since we only care about the most recent change, we only need to + // return the first operation found when searching memtables in + // reverse-chronological order. + *seq = current_seq; + } + + if (done) { + assert(*seq != kMaxSequenceNumber); + return true; + } + } + return false; +} + +void MemTableListVersion::AddIterators( + const ReadOptions& options, std::vector* iterator_list, + Arena* arena) { + for (auto& m : memlist_) { + iterator_list->push_back(m->NewIterator(options, arena)); + } +} + +void MemTableListVersion::AddIterators( + const ReadOptions& options, MergeIteratorBuilder* merge_iter_builder) { + for (auto& m : memlist_) { + merge_iter_builder->AddIterator( + m->NewIterator(options, merge_iter_builder->GetArena())); + } +} + +uint64_t MemTableListVersion::GetTotalNumEntries() const { + uint64_t total_num = 0; + for (auto& m : memlist_) { + total_num += m->num_entries(); + } + return total_num; +} + +uint64_t MemTableListVersion::ApproximateSize(const Slice& start_ikey, + const Slice& end_ikey) { + uint64_t total_size = 0; + for (auto& m : memlist_) { + total_size += m->ApproximateSize(start_ikey, end_ikey); + } + return total_size; +} + +uint64_t MemTableListVersion::GetTotalNumDeletes() const { + uint64_t total_num = 0; + for (auto& m : memlist_) { + total_num += m->num_deletes(); + } + return total_num; +} + +SequenceNumber MemTableListVersion::GetEarliestSequenceNumber( + bool include_history) const { + if (include_history && !memlist_history_.empty()) { + return memlist_history_.back()->GetEarliestSequenceNumber(); + } else if (!memlist_.empty()) { + return memlist_.back()->GetEarliestSequenceNumber(); + } else { + return kMaxSequenceNumber; + } +} + +// caller is responsible for referencing m +void MemTableListVersion::Add(MemTable* m, autovector* to_delete) { + assert(refs_ == 1); // only when refs_ == 1 is MemTableListVersion mutable + AddMemTable(m); + + TrimHistory(to_delete); +} + +// Removes m from list of memtables not flushed. Caller should NOT Unref m. +void MemTableListVersion::Remove(MemTable* m, + autovector* to_delete) { + assert(refs_ == 1); // only when refs_ == 1 is MemTableListVersion mutable + memlist_.remove(m); + + if (max_write_buffer_number_to_maintain_ > 0) { + memlist_history_.push_front(m); + TrimHistory(to_delete); + } else { + UnrefMemTable(to_delete, m); + } +} + +// Make sure we don't use up too much space in history +void MemTableListVersion::TrimHistory(autovector* to_delete) { + while (memlist_.size() + memlist_history_.size() > + static_cast(max_write_buffer_number_to_maintain_) && + !memlist_history_.empty()) { + MemTable* x = memlist_history_.back(); + memlist_history_.pop_back(); + + UnrefMemTable(to_delete, x); + } +} + +// Returns true if there is at least one memtable on which flush has +// not yet started. +bool MemTableList::IsFlushPending() const { + if ((flush_requested_ && num_flush_not_started_ >= 1) || + (num_flush_not_started_ >= min_write_buffer_number_to_merge_)) { + assert(imm_flush_needed.load(std::memory_order_relaxed)); + return true; + } + return false; +} + +// Returns the memtables that need to be flushed. +void MemTableList::PickMemtablesToFlush(autovector* ret) { + AutoThreadOperationStageUpdater stage_updater( + ThreadStatus::STAGE_PICK_MEMTABLES_TO_FLUSH); + const auto& memlist = current_->memlist_; + for (auto it = memlist.rbegin(); it != memlist.rend(); ++it) { + MemTable* m = *it; + if (!m->flush_in_progress_) { + assert(!m->flush_completed_); + num_flush_not_started_--; + if (num_flush_not_started_ == 0) { + imm_flush_needed.store(false, std::memory_order_release); + } + m->flush_in_progress_ = true; // flushing will start very soon + ret->push_back(m); + } + } + flush_requested_ = false; // start-flush request is complete +} + +void MemTableList::RollbackMemtableFlush(const autovector& mems, + uint64_t file_number) { + AutoThreadOperationStageUpdater stage_updater( + ThreadStatus::STAGE_MEMTABLE_ROLLBACK); + assert(!mems.empty()); + + // If the flush was not successful, then just reset state. + // Maybe a succeeding attempt to flush will be successful. + for (MemTable* m : mems) { + assert(m->flush_in_progress_); + assert(m->file_number_ == 0); + + m->flush_in_progress_ = false; + m->flush_completed_ = false; + m->edit_.Clear(); + num_flush_not_started_++; + } + imm_flush_needed.store(true, std::memory_order_release); +} + +// Record a successful flush in the manifest file +Status MemTableList::InstallMemtableFlushResults( + ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options, + const autovector& mems, VersionSet* vset, InstrumentedMutex* mu, + uint64_t file_number, autovector* to_delete, + Directory* db_directory, LogBuffer* log_buffer) { + AutoThreadOperationStageUpdater stage_updater( + ThreadStatus::STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS); + mu->AssertHeld(); + + // flush was successful + for (size_t i = 0; i < mems.size(); ++i) { + // All the edits are associated with the first memtable of this batch. + assert(i == 0 || mems[i]->GetEdits()->NumEntries() == 0); + + mems[i]->flush_completed_ = true; + mems[i]->file_number_ = file_number; + } + + // if some other thread is already committing, then return + Status s; + if (commit_in_progress_) { + TEST_SYNC_POINT("MemTableList::InstallMemtableFlushResults:InProgress"); + return s; + } + + // Only a single thread can be executing this piece of code + commit_in_progress_ = true; + + // Retry until all completed flushes are committed. New flushes can finish + // while the current thread is writing manifest where mutex is released. + while (s.ok()) { + auto& memlist = current_->memlist_; + if (memlist.empty() || !memlist.back()->flush_completed_) { + break; + } + // scan all memtables from the earliest, and commit those + // (in that order) that have finished flushing. Memetables + // are always committed in the order that they were created. + uint64_t batch_file_number = 0; + size_t batch_count = 0; + autovector edit_list; + // enumerate from the last (earliest) element to see how many batch finished + for (auto it = memlist.rbegin(); it != memlist.rend(); ++it) { + MemTable* m = *it; + if (!m->flush_completed_) { + break; + } + if (it == memlist.rbegin() || batch_file_number != m->file_number_) { + batch_file_number = m->file_number_; + LogToBuffer(log_buffer, + "[%s] Level-0 commit table #%" PRIu64 " started", + cfd->GetName().c_str(), m->file_number_); + edit_list.push_back(&m->edit_); + } + batch_count++; + } + + if (batch_count > 0) { + // this can release and reacquire the mutex. + s = vset->LogAndApply(cfd, mutable_cf_options, edit_list, mu, + db_directory); + + // we will be changing the version in the next code path, + // so we better create a new one, since versions are immutable + InstallNewVersion(); + + // All the later memtables that have the same filenum + // are part of the same batch. They can be committed now. + uint64_t mem_id = 1; // how many memtables have been flushed. + if (s.ok()) { // commit new state + while (batch_count-- > 0) { + MemTable* m = current_->memlist_.back(); + LogToBuffer(log_buffer, "[%s] Level-0 commit table #%" PRIu64 + ": memtable #%" PRIu64 " done", + cfd->GetName().c_str(), m->file_number_, mem_id); + assert(m->file_number_ > 0); + current_->Remove(m, to_delete); + ++mem_id; + } + } else { + for (auto it = current_->memlist_.rbegin(); batch_count-- > 0; it++) { + MemTable* m = *it; + // commit failed. setup state so that we can flush again. + LogToBuffer(log_buffer, "Level-0 commit table #%" PRIu64 + ": memtable #%" PRIu64 " failed", + m->file_number_, mem_id); + m->flush_completed_ = false; + m->flush_in_progress_ = false; + m->edit_.Clear(); + num_flush_not_started_++; + m->file_number_ = 0; + imm_flush_needed.store(true, std::memory_order_release); + ++mem_id; + } + } + } + } + commit_in_progress_ = false; + return s; +} + +// New memtables are inserted at the front of the list. +void MemTableList::Add(MemTable* m, autovector* to_delete) { + assert(static_cast(current_->memlist_.size()) >= num_flush_not_started_); + InstallNewVersion(); + // this method is used to move mutable memtable into an immutable list. + // since mutable memtable is already refcounted by the DBImpl, + // and when moving to the imutable list we don't unref it, + // we don't have to ref the memtable here. we just take over the + // reference from the DBImpl. + current_->Add(m, to_delete); + m->MarkImmutable(); + num_flush_not_started_++; + if (num_flush_not_started_ == 1) { + imm_flush_needed.store(true, std::memory_order_release); + } +} + +// Returns an estimate of the number of bytes of data in use. +size_t MemTableList::ApproximateUnflushedMemTablesMemoryUsage() { + size_t total_size = 0; + for (auto& memtable : current_->memlist_) { + total_size += memtable->ApproximateMemoryUsage(); + } + return total_size; +} + +size_t MemTableList::ApproximateMemoryUsage() { return current_memory_usage_; } + +void MemTableList::InstallNewVersion() { + if (current_->refs_ == 1) { + // we're the only one using the version, just keep using it + } else { + // somebody else holds the current version, we need to create new one + MemTableListVersion* version = current_; + current_ = new MemTableListVersion(¤t_memory_usage_, current_); + current_->Ref(); + version->Unref(); + } +} + +uint64_t MemTableList::GetMinLogContainingPrepSection() { + uint64_t min_log = 0; + + for (auto& m : current_->memlist_) { + // this mem has been flushed it no longer + // needs to hold on the its prep section + if (m->flush_completed_) { + continue; + } + + auto log = m->GetMinLogContainingPrepSection(); + + if (log > 0 && (min_log == 0 || log < min_log)) { + min_log = log; + } + } + + return min_log; +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/memtable_list.h b/external/rocksdb/db/memtable_list.h new file mode 100755 index 0000000000..7f633f026b --- /dev/null +++ b/external/rocksdb/db/memtable_list.h @@ -0,0 +1,241 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#pragma once + +#include +#include +#include +#include +#include + +#include "db/dbformat.h" +#include "db/filename.h" +#include "db/memtable.h" +#include "db/skiplist.h" +#include "rocksdb/db.h" +#include "rocksdb/iterator.h" +#include "rocksdb/options.h" +#include "rocksdb/types.h" +#include "util/autovector.h" +#include "util/instrumented_mutex.h" +#include "util/log_buffer.h" + +namespace rocksdb { + +class ColumnFamilyData; +class InternalKeyComparator; +class InstrumentedMutex; +class MergeIteratorBuilder; + +// keeps a list of immutable memtables in a vector. the list is immutable +// if refcount is bigger than one. It is used as a state for Get() and +// Iterator code paths +// +// This class is not thread-safe. External synchronization is required +// (such as holding the db mutex or being on the write thread). +class MemTableListVersion { + public: + explicit MemTableListVersion(size_t* parent_memtable_list_memory_usage, + MemTableListVersion* old = nullptr); + explicit MemTableListVersion(size_t* parent_memtable_list_memory_usage, + int max_write_buffer_number_to_maintain); + + void Ref(); + void Unref(autovector* to_delete = nullptr); + + // Search all the memtables starting from the most recent one. + // Return the most recent value found, if any. + // + // If any operation was found for this key, its most recent sequence number + // will be stored in *seq on success (regardless of whether true/false is + // returned). Otherwise, *seq will be set to kMaxSequenceNumber. + bool Get(const LookupKey& key, std::string* value, Status* s, + MergeContext* merge_context, SequenceNumber* seq); + + bool Get(const LookupKey& key, std::string* value, Status* s, + MergeContext* merge_context) { + SequenceNumber seq; + return Get(key, value, s, merge_context, &seq); + } + + // Similar to Get(), but searches the Memtable history of memtables that + // have already been flushed. Should only be used from in-memory only + // queries (such as Transaction validation) as the history may contain + // writes that are also present in the SST files. + bool GetFromHistory(const LookupKey& key, std::string* value, Status* s, + MergeContext* merge_context, SequenceNumber* seq); + bool GetFromHistory(const LookupKey& key, std::string* value, Status* s, + MergeContext* merge_context) { + SequenceNumber seq; + return GetFromHistory(key, value, s, merge_context, &seq); + } + + void AddIterators(const ReadOptions& options, + std::vector* iterator_list, + Arena* arena); + + void AddIterators(const ReadOptions& options, + MergeIteratorBuilder* merge_iter_builder); + + uint64_t GetTotalNumEntries() const; + + uint64_t GetTotalNumDeletes() const; + + uint64_t ApproximateSize(const Slice& start_ikey, const Slice& end_ikey); + + // Returns the value of MemTable::GetEarliestSequenceNumber() on the most + // recent MemTable in this list or kMaxSequenceNumber if the list is empty. + // If include_history=true, will also search Memtables in MemTableList + // History. + SequenceNumber GetEarliestSequenceNumber(bool include_history = false) const; + + private: + // REQUIRE: m is an immutable memtable + void Add(MemTable* m, autovector* to_delete); + // REQUIRE: m is an immutable memtable + void Remove(MemTable* m, autovector* to_delete); + + void TrimHistory(autovector* to_delete); + + bool GetFromList(std::list* list, const LookupKey& key, + std::string* value, Status* s, MergeContext* merge_context, + SequenceNumber* seq); + + void AddMemTable(MemTable* m); + + void UnrefMemTable(autovector* to_delete, MemTable* m); + + friend class MemTableList; + + // Immutable MemTables that have not yet been flushed. + std::list memlist_; + + // MemTables that have already been flushed + // (used during Transaction validation) + std::list memlist_history_; + + // Maximum number of MemTables to keep in memory (including both flushed + // and not-yet-flushed tables). + const int max_write_buffer_number_to_maintain_; + + int refs_ = 0; + + size_t* parent_memtable_list_memory_usage_; +}; + +// This class stores references to all the immutable memtables. +// The memtables are flushed to L0 as soon as possible and in +// any order. If there are more than one immutable memtable, their +// flushes can occur concurrently. However, they are 'committed' +// to the manifest in FIFO order to maintain correctness and +// recoverability from a crash. +// +// +// Other than imm_flush_needed, this class is not thread-safe and requires +// external synchronization (such as holding the db mutex or being on the +// write thread.) +class MemTableList { + public: + // A list of memtables. + explicit MemTableList(int min_write_buffer_number_to_merge, + int max_write_buffer_number_to_maintain) + : imm_flush_needed(false), + min_write_buffer_number_to_merge_(min_write_buffer_number_to_merge), + current_(new MemTableListVersion(¤t_memory_usage_, + max_write_buffer_number_to_maintain)), + num_flush_not_started_(0), + commit_in_progress_(false), + flush_requested_(false) { + current_->Ref(); + current_memory_usage_ = 0; + } + + // Should not delete MemTableList without making sure MemTableList::current() + // is Unref()'d. + ~MemTableList() {} + + MemTableListVersion* current() { return current_; } + + // so that background threads can detect non-nullptr pointer to + // determine whether there is anything more to start flushing. + std::atomic imm_flush_needed; + + // Returns the total number of memtables in the list that haven't yet + // been flushed and logged. + int NumNotFlushed() const; + + // Returns total number of memtables in the list that have been + // completely flushed and logged. + int NumFlushed() const; + + // Returns true if there is at least one memtable on which flush has + // not yet started. + bool IsFlushPending() const; + + // Returns the earliest memtables that needs to be flushed. The returned + // memtables are guaranteed to be in the ascending order of created time. + void PickMemtablesToFlush(autovector* mems); + + // Reset status of the given memtable list back to pending state so that + // they can get picked up again on the next round of flush. + void RollbackMemtableFlush(const autovector& mems, + uint64_t file_number); + + // Commit a successful flush in the manifest file + Status InstallMemtableFlushResults( + ColumnFamilyData* cfd, const MutableCFOptions& mutable_cf_options, + const autovector& m, VersionSet* vset, InstrumentedMutex* mu, + uint64_t file_number, autovector* to_delete, + Directory* db_directory, LogBuffer* log_buffer); + + // New memtables are inserted at the front of the list. + // Takes ownership of the referenced held on *m by the caller of Add(). + void Add(MemTable* m, autovector* to_delete); + + // Returns an estimate of the number of bytes of data in use. + size_t ApproximateMemoryUsage(); + + // Returns an estimate of the number of bytes of data used by + // the unflushed mem-tables. + size_t ApproximateUnflushedMemTablesMemoryUsage(); + + // Request a flush of all existing memtables to storage. This will + // cause future calls to IsFlushPending() to return true if this list is + // non-empty (regardless of the min_write_buffer_number_to_merge + // parameter). This flush request will persist until the next time + // PickMemtablesToFlush() is called. + void FlushRequested() { flush_requested_ = true; } + + // Copying allowed + // MemTableList(const MemTableList&); + // void operator=(const MemTableList&); + + size_t* current_memory_usage() { return ¤t_memory_usage_; } + + uint64_t GetMinLogContainingPrepSection(); + + private: + // DB mutex held + void InstallNewVersion(); + + const int min_write_buffer_number_to_merge_; + + MemTableListVersion* current_; + + // the number of elements that still need flushing + int num_flush_not_started_; + + // committing in progress + bool commit_in_progress_; + + // Requested a flush of all memtables to storage + bool flush_requested_; + + // The current memory usage. + size_t current_memory_usage_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/memtable_list_test.cc b/external/rocksdb/db/memtable_list_test.cc new file mode 100755 index 0000000000..42420b876c --- /dev/null +++ b/external/rocksdb/db/memtable_list_test.cc @@ -0,0 +1,601 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/memtable_list.h" +#include +#include +#include +#include "db/merge_context.h" +#include "db/version_set.h" +#include "db/write_controller.h" +#include "rocksdb/db.h" +#include "rocksdb/status.h" +#include "rocksdb/write_buffer_manager.h" +#include "util/string_util.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class MemTableListTest : public testing::Test { + public: + std::string dbname; + DB* db; + Options options; + + MemTableListTest() : db(nullptr) { + dbname = test::TmpDir() + "/memtable_list_test"; + } + + // Create a test db if not yet created + void CreateDB() { + if (db == nullptr) { + options.create_if_missing = true; + DestroyDB(dbname, options); + Status s = DB::Open(options, dbname, &db); + EXPECT_OK(s); + } + } + + ~MemTableListTest() { + if (db) { + delete db; + DestroyDB(dbname, options); + } + } + + // Calls MemTableList::InstallMemtableFlushResults() and sets up all + // structures needed to call this function. + Status Mock_InstallMemtableFlushResults( + MemTableList* list, const MutableCFOptions& mutable_cf_options, + const autovector& m, autovector* to_delete) { + // Create a mock Logger + test::NullLogger logger; + LogBuffer log_buffer(DEBUG_LEVEL, &logger); + + // Create a mock VersionSet + DBOptions db_options; + EnvOptions env_options; + shared_ptr table_cache(NewLRUCache(50000, 16)); + WriteBufferManager write_buffer_manager(db_options.db_write_buffer_size); + WriteController write_controller(10000000u); + + CreateDB(); + VersionSet versions(dbname, &db_options, env_options, table_cache.get(), + &write_buffer_manager, &write_controller); + + // Create mock default ColumnFamilyData + ColumnFamilyOptions cf_options; + std::vector column_families; + column_families.emplace_back(kDefaultColumnFamilyName, cf_options); + EXPECT_OK(versions.Recover(column_families, false)); + + auto column_family_set = versions.GetColumnFamilySet(); + auto cfd = column_family_set->GetColumnFamily(0); + EXPECT_TRUE(cfd != nullptr); + + // Create dummy mutex. + InstrumentedMutex mutex; + InstrumentedMutexLock l(&mutex); + + return list->InstallMemtableFlushResults(cfd, mutable_cf_options, m, + &versions, &mutex, 1, to_delete, + nullptr, &log_buffer); + } +}; + +TEST_F(MemTableListTest, Empty) { + // Create an empty MemTableList and validate basic functions. + MemTableList list(1, 0); + + ASSERT_EQ(0, list.NumNotFlushed()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + ASSERT_FALSE(list.IsFlushPending()); + + autovector mems; + list.PickMemtablesToFlush(&mems); + ASSERT_EQ(0, mems.size()); + + autovector to_delete; + list.current()->Unref(&to_delete); + ASSERT_EQ(0, to_delete.size()); +} + +TEST_F(MemTableListTest, GetTest) { + // Create MemTableList + int min_write_buffer_number_to_merge = 2; + int max_write_buffer_number_to_maintain = 0; + MemTableList list(min_write_buffer_number_to_merge, + max_write_buffer_number_to_maintain); + + SequenceNumber seq = 1; + std::string value; + Status s; + MergeContext merge_context; + autovector to_delete; + + LookupKey lkey("key1", seq); + bool found = list.current()->Get(lkey, &value, &s, &merge_context); + ASSERT_FALSE(found); + + // Create a MemTable + InternalKeyComparator cmp(BytewiseComparator()); + auto factory = std::make_shared(); + options.memtable_factory = factory; + ImmutableCFOptions ioptions(options); + + WriteBufferManager wb(options.db_write_buffer_size); + MemTable* mem = + new MemTable(cmp, ioptions, MutableCFOptions(options, ioptions), &wb, + kMaxSequenceNumber); + mem->Ref(); + + // Write some keys to this memtable. + mem->Add(++seq, kTypeDeletion, "key1", ""); + mem->Add(++seq, kTypeValue, "key2", "value2"); + mem->Add(++seq, kTypeValue, "key1", "value1"); + mem->Add(++seq, kTypeValue, "key2", "value2.2"); + + // Fetch the newly written keys + merge_context.Clear(); + found = mem->Get(LookupKey("key1", seq), &value, &s, &merge_context); + ASSERT_TRUE(s.ok() && found); + ASSERT_EQ(value, "value1"); + + merge_context.Clear(); + found = mem->Get(LookupKey("key1", 2), &value, &s, &merge_context); + // MemTable found out that this key is *not* found (at this sequence#) + ASSERT_TRUE(found && s.IsNotFound()); + + merge_context.Clear(); + found = mem->Get(LookupKey("key2", seq), &value, &s, &merge_context); + ASSERT_TRUE(s.ok() && found); + ASSERT_EQ(value, "value2.2"); + + ASSERT_EQ(4, mem->num_entries()); + ASSERT_EQ(1, mem->num_deletes()); + + // Add memtable to list + list.Add(mem, &to_delete); + + SequenceNumber saved_seq = seq; + + // Create another memtable and write some keys to it + WriteBufferManager wb2(options.db_write_buffer_size); + MemTable* mem2 = + new MemTable(cmp, ioptions, MutableCFOptions(options, ioptions), &wb2, + kMaxSequenceNumber); + mem2->Ref(); + + mem2->Add(++seq, kTypeDeletion, "key1", ""); + mem2->Add(++seq, kTypeValue, "key2", "value2.3"); + + // Add second memtable to list + list.Add(mem2, &to_delete); + + // Fetch keys via MemTableList + merge_context.Clear(); + found = + list.current()->Get(LookupKey("key1", seq), &value, &s, &merge_context); + ASSERT_TRUE(found && s.IsNotFound()); + + merge_context.Clear(); + found = list.current()->Get(LookupKey("key1", saved_seq), &value, &s, + &merge_context); + ASSERT_TRUE(s.ok() && found); + ASSERT_EQ("value1", value); + + merge_context.Clear(); + found = + list.current()->Get(LookupKey("key2", seq), &value, &s, &merge_context); + ASSERT_TRUE(s.ok() && found); + ASSERT_EQ(value, "value2.3"); + + merge_context.Clear(); + found = list.current()->Get(LookupKey("key2", 1), &value, &s, &merge_context); + ASSERT_FALSE(found); + + ASSERT_EQ(2, list.NumNotFlushed()); + + list.current()->Unref(&to_delete); + for (MemTable* m : to_delete) { + delete m; + } +} + +TEST_F(MemTableListTest, GetFromHistoryTest) { + // Create MemTableList + int min_write_buffer_number_to_merge = 2; + int max_write_buffer_number_to_maintain = 2; + MemTableList list(min_write_buffer_number_to_merge, + max_write_buffer_number_to_maintain); + + SequenceNumber seq = 1; + std::string value; + Status s; + MergeContext merge_context; + autovector to_delete; + + LookupKey lkey("key1", seq); + bool found = list.current()->Get(lkey, &value, &s, &merge_context); + ASSERT_FALSE(found); + + // Create a MemTable + InternalKeyComparator cmp(BytewiseComparator()); + auto factory = std::make_shared(); + options.memtable_factory = factory; + ImmutableCFOptions ioptions(options); + + WriteBufferManager wb(options.db_write_buffer_size); + MemTable* mem = + new MemTable(cmp, ioptions, MutableCFOptions(options, ioptions), &wb, + kMaxSequenceNumber); + mem->Ref(); + + // Write some keys to this memtable. + mem->Add(++seq, kTypeDeletion, "key1", ""); + mem->Add(++seq, kTypeValue, "key2", "value2"); + mem->Add(++seq, kTypeValue, "key2", "value2.2"); + + // Fetch the newly written keys + merge_context.Clear(); + found = mem->Get(LookupKey("key1", seq), &value, &s, &merge_context); + // MemTable found out that this key is *not* found (at this sequence#) + ASSERT_TRUE(found && s.IsNotFound()); + + merge_context.Clear(); + found = mem->Get(LookupKey("key2", seq), &value, &s, &merge_context); + ASSERT_TRUE(s.ok() && found); + ASSERT_EQ(value, "value2.2"); + + // Add memtable to list + list.Add(mem, &to_delete); + ASSERT_EQ(0, to_delete.size()); + + // Fetch keys via MemTableList + merge_context.Clear(); + found = + list.current()->Get(LookupKey("key1", seq), &value, &s, &merge_context); + ASSERT_TRUE(found && s.IsNotFound()); + + merge_context.Clear(); + found = + list.current()->Get(LookupKey("key2", seq), &value, &s, &merge_context); + ASSERT_TRUE(s.ok() && found); + ASSERT_EQ("value2.2", value); + + // Flush this memtable from the list. + // (It will then be a part of the memtable history). + autovector to_flush; + list.PickMemtablesToFlush(&to_flush); + ASSERT_EQ(1, to_flush.size()); + + s = Mock_InstallMemtableFlushResults( + &list, MutableCFOptions(options, ioptions), to_flush, &to_delete); + ASSERT_OK(s); + ASSERT_EQ(0, list.NumNotFlushed()); + ASSERT_EQ(1, list.NumFlushed()); + ASSERT_EQ(0, to_delete.size()); + + // Verify keys are no longer in MemTableList + merge_context.Clear(); + found = + list.current()->Get(LookupKey("key1", seq), &value, &s, &merge_context); + ASSERT_FALSE(found); + + merge_context.Clear(); + found = + list.current()->Get(LookupKey("key2", seq), &value, &s, &merge_context); + ASSERT_FALSE(found); + + // Verify keys are present in history + merge_context.Clear(); + found = list.current()->GetFromHistory(LookupKey("key1", seq), &value, &s, + &merge_context); + ASSERT_TRUE(found && s.IsNotFound()); + + merge_context.Clear(); + found = list.current()->GetFromHistory(LookupKey("key2", seq), &value, &s, + &merge_context); + ASSERT_TRUE(found); + ASSERT_EQ("value2.2", value); + + // Create another memtable and write some keys to it + WriteBufferManager wb2(options.db_write_buffer_size); + MemTable* mem2 = + new MemTable(cmp, ioptions, MutableCFOptions(options, ioptions), &wb2, + kMaxSequenceNumber); + mem2->Ref(); + + mem2->Add(++seq, kTypeDeletion, "key1", ""); + mem2->Add(++seq, kTypeValue, "key3", "value3"); + + // Add second memtable to list + list.Add(mem2, &to_delete); + ASSERT_EQ(0, to_delete.size()); + + to_flush.clear(); + list.PickMemtablesToFlush(&to_flush); + ASSERT_EQ(1, to_flush.size()); + + // Flush second memtable + s = Mock_InstallMemtableFlushResults( + &list, MutableCFOptions(options, ioptions), to_flush, &to_delete); + ASSERT_OK(s); + ASSERT_EQ(0, list.NumNotFlushed()); + ASSERT_EQ(2, list.NumFlushed()); + ASSERT_EQ(0, to_delete.size()); + + // Add a third memtable to push the first memtable out of the history + WriteBufferManager wb3(options.db_write_buffer_size); + MemTable* mem3 = + new MemTable(cmp, ioptions, MutableCFOptions(options, ioptions), &wb3, + kMaxSequenceNumber); + mem3->Ref(); + list.Add(mem3, &to_delete); + ASSERT_EQ(1, list.NumNotFlushed()); + ASSERT_EQ(1, list.NumFlushed()); + ASSERT_EQ(1, to_delete.size()); + + // Verify keys are no longer in MemTableList + merge_context.Clear(); + found = + list.current()->Get(LookupKey("key1", seq), &value, &s, &merge_context); + ASSERT_FALSE(found); + + merge_context.Clear(); + found = + list.current()->Get(LookupKey("key2", seq), &value, &s, &merge_context); + ASSERT_FALSE(found); + + merge_context.Clear(); + found = + list.current()->Get(LookupKey("key3", seq), &value, &s, &merge_context); + ASSERT_FALSE(found); + + // Verify that the second memtable's keys are in the history + merge_context.Clear(); + found = list.current()->GetFromHistory(LookupKey("key1", seq), &value, &s, + &merge_context); + ASSERT_TRUE(found && s.IsNotFound()); + + merge_context.Clear(); + found = list.current()->GetFromHistory(LookupKey("key3", seq), &value, &s, + &merge_context); + ASSERT_TRUE(found); + ASSERT_EQ("value3", value); + + // Verify that key2 from the first memtable is no longer in the history + merge_context.Clear(); + found = + list.current()->Get(LookupKey("key2", seq), &value, &s, &merge_context); + ASSERT_FALSE(found); + + // Cleanup + list.current()->Unref(&to_delete); + ASSERT_EQ(3, to_delete.size()); + for (MemTable* m : to_delete) { + delete m; + } +} + +TEST_F(MemTableListTest, FlushPendingTest) { + const int num_tables = 5; + SequenceNumber seq = 1; + Status s; + + auto factory = std::make_shared(); + options.memtable_factory = factory; + ImmutableCFOptions ioptions(options); + InternalKeyComparator cmp(BytewiseComparator()); + WriteBufferManager wb(options.db_write_buffer_size); + autovector to_delete; + + // Create MemTableList + int min_write_buffer_number_to_merge = 3; + int max_write_buffer_number_to_maintain = 7; + MemTableList list(min_write_buffer_number_to_merge, + max_write_buffer_number_to_maintain); + + // Create some MemTables + std::vector tables; + MutableCFOptions mutable_cf_options(options, ioptions); + for (int i = 0; i < num_tables; i++) { + MemTable* mem = new MemTable(cmp, ioptions, mutable_cf_options, &wb, + kMaxSequenceNumber); + mem->Ref(); + + std::string value; + MergeContext merge_context; + + mem->Add(++seq, kTypeValue, "key1", ToString(i)); + mem->Add(++seq, kTypeValue, "keyN" + ToString(i), "valueN"); + mem->Add(++seq, kTypeValue, "keyX" + ToString(i), "value"); + mem->Add(++seq, kTypeValue, "keyM" + ToString(i), "valueM"); + mem->Add(++seq, kTypeDeletion, "keyX" + ToString(i), ""); + + tables.push_back(mem); + } + + // Nothing to flush + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + autovector to_flush; + list.PickMemtablesToFlush(&to_flush); + ASSERT_EQ(0, to_flush.size()); + + // Request a flush even though there is nothing to flush + list.FlushRequested(); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Attempt to 'flush' to clear request for flush + list.PickMemtablesToFlush(&to_flush); + ASSERT_EQ(0, to_flush.size()); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Request a flush again + list.FlushRequested(); + // No flush pending since the list is empty. + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Add 2 tables + list.Add(tables[0], &to_delete); + list.Add(tables[1], &to_delete); + ASSERT_EQ(2, list.NumNotFlushed()); + ASSERT_EQ(0, to_delete.size()); + + // Even though we have less than the minimum to flush, a flush is + // pending since we had previously requested a flush and never called + // PickMemtablesToFlush() to clear the flush. + ASSERT_TRUE(list.IsFlushPending()); + ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Pick tables to flush + list.PickMemtablesToFlush(&to_flush); + ASSERT_EQ(2, to_flush.size()); + ASSERT_EQ(2, list.NumNotFlushed()); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Revert flush + list.RollbackMemtableFlush(to_flush, 0); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); + to_flush.clear(); + + // Add another table + list.Add(tables[2], &to_delete); + // We now have the minimum to flush regardles of whether FlushRequested() + // was called. + ASSERT_TRUE(list.IsFlushPending()); + ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); + ASSERT_EQ(0, to_delete.size()); + + // Pick tables to flush + list.PickMemtablesToFlush(&to_flush); + ASSERT_EQ(3, to_flush.size()); + ASSERT_EQ(3, list.NumNotFlushed()); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Pick tables to flush again + autovector to_flush2; + list.PickMemtablesToFlush(&to_flush2); + ASSERT_EQ(0, to_flush2.size()); + ASSERT_EQ(3, list.NumNotFlushed()); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Add another table + list.Add(tables[3], &to_delete); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); + ASSERT_EQ(0, to_delete.size()); + + // Request a flush again + list.FlushRequested(); + ASSERT_TRUE(list.IsFlushPending()); + ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Pick tables to flush again + list.PickMemtablesToFlush(&to_flush2); + ASSERT_EQ(1, to_flush2.size()); + ASSERT_EQ(4, list.NumNotFlushed()); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Rollback first pick of tables + list.RollbackMemtableFlush(to_flush, 0); + ASSERT_TRUE(list.IsFlushPending()); + ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); + to_flush.clear(); + + // Add another tables + list.Add(tables[4], &to_delete); + ASSERT_EQ(5, list.NumNotFlushed()); + // We now have the minimum to flush regardles of whether FlushRequested() + ASSERT_TRUE(list.IsFlushPending()); + ASSERT_TRUE(list.imm_flush_needed.load(std::memory_order_acquire)); + ASSERT_EQ(0, to_delete.size()); + + // Pick tables to flush + list.PickMemtablesToFlush(&to_flush); + // Should pick 4 of 5 since 1 table has been picked in to_flush2 + ASSERT_EQ(4, to_flush.size()); + ASSERT_EQ(5, list.NumNotFlushed()); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Pick tables to flush again + autovector to_flush3; + ASSERT_EQ(0, to_flush3.size()); // nothing not in progress of being flushed + ASSERT_EQ(5, list.NumNotFlushed()); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Flush the 4 memtables that were picked in to_flush + s = Mock_InstallMemtableFlushResults( + &list, MutableCFOptions(options, ioptions), to_flush, &to_delete); + ASSERT_OK(s); + + // Note: now to_flush contains tables[0,1,2,4]. to_flush2 contains + // tables[3]. + // Current implementation will only commit memtables in the order they were + // created. So InstallMemtableFlushResults will install the first 3 tables + // in to_flush and stop when it encounters a table not yet flushed. + ASSERT_EQ(2, list.NumNotFlushed()); + int num_in_history = std::min(3, max_write_buffer_number_to_maintain); + ASSERT_EQ(num_in_history, list.NumFlushed()); + ASSERT_EQ(5 - list.NumNotFlushed() - num_in_history, to_delete.size()); + + // Request a flush again. Should be nothing to flush + list.FlushRequested(); + ASSERT_FALSE(list.IsFlushPending()); + ASSERT_FALSE(list.imm_flush_needed.load(std::memory_order_acquire)); + + // Flush the 1 memtable that was picked in to_flush2 + s = MemTableListTest::Mock_InstallMemtableFlushResults( + &list, MutableCFOptions(options, ioptions), to_flush2, &to_delete); + ASSERT_OK(s); + + // This will actually install 2 tables. The 1 we told it to flush, and also + // tables[4] which has been waiting for tables[3] to commit. + ASSERT_EQ(0, list.NumNotFlushed()); + num_in_history = std::min(5, max_write_buffer_number_to_maintain); + ASSERT_EQ(num_in_history, list.NumFlushed()); + ASSERT_EQ(5 - list.NumNotFlushed() - num_in_history, to_delete.size()); + + for (const auto& m : to_delete) { + // Refcount should be 0 after calling InstallMemtableFlushResults. + // Verify this, by Ref'ing then UnRef'ing: + m->Ref(); + ASSERT_EQ(m, m->Unref()); + delete m; + } + to_delete.clear(); + + list.current()->Unref(&to_delete); + int to_delete_size = std::min(5, max_write_buffer_number_to_maintain); + ASSERT_EQ(to_delete_size, to_delete.size()); + + for (const auto& m : to_delete) { + // Refcount should be 0 after calling InstallMemtableFlushResults. + // Verify this, by Ref'ing then UnRef'ing: + m->Ref(); + ASSERT_EQ(m, m->Unref()); + delete m; + } + to_delete.clear(); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/memtablerep_bench.cc b/external/rocksdb/db/memtablerep_bench.cc new file mode 100755 index 0000000000..b5875618b9 --- /dev/null +++ b/external/rocksdb/db/memtablerep_bench.cc @@ -0,0 +1,697 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#define __STDC_FORMAT_MACROS + +#ifndef GFLAGS +#include +int main() { + fprintf(stderr, "Please install gflags to run rocksdb tools\n"); + return 1; +} +#else + +#include + +#include +#include +#include +#include +#include +#include + +#include "db/dbformat.h" +#include "db/memtable.h" +#include "port/port.h" +#include "port/stack_trace.h" +#include "rocksdb/comparator.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/options.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/write_buffer_manager.h" +#include "util/arena.h" +#include "util/mutexlock.h" +#include "util/stop_watch.h" +#include "util/testutil.h" + +using GFLAGS::ParseCommandLineFlags; +using GFLAGS::RegisterFlagValidator; +using GFLAGS::SetUsageMessage; + +DEFINE_string(benchmarks, "fillrandom", + "Comma-separated list of benchmarks to run. Options:\n" + "\tfillrandom -- write N random values\n" + "\tfillseq -- write N values in sequential order\n" + "\treadrandom -- read N values in random order\n" + "\treadseq -- scan the DB\n" + "\treadwrite -- 1 thread writes while N - 1 threads " + "do random\n" + "\t reads\n" + "\tseqreadwrite -- 1 thread writes while N - 1 threads " + "do scans\n"); + +DEFINE_string(memtablerep, "skiplist", + "Which implementation of memtablerep to use. See " + "include/memtablerep.h for\n" + " more details. Options:\n" + "\tskiplist -- backed by a skiplist\n" + "\tvector -- backed by an std::vector\n" + "\thashskiplist -- backed by a hash skip list\n" + "\thashlinklist -- backed by a hash linked list\n" + "\tcuckoo -- backed by a cuckoo hash table"); + +DEFINE_int64(bucket_count, 1000000, + "bucket_count parameter to pass into NewHashSkiplistRepFactory or " + "NewHashLinkListRepFactory"); + +DEFINE_int32( + hashskiplist_height, 4, + "skiplist_height parameter to pass into NewHashSkiplistRepFactory"); + +DEFINE_int32( + hashskiplist_branching_factor, 4, + "branching_factor parameter to pass into NewHashSkiplistRepFactory"); + +DEFINE_int32( + huge_page_tlb_size, 0, + "huge_page_tlb_size parameter to pass into NewHashLinkListRepFactory"); + +DEFINE_int32(bucket_entries_logging_threshold, 4096, + "bucket_entries_logging_threshold parameter to pass into " + "NewHashLinkListRepFactory"); + +DEFINE_bool(if_log_bucket_dist_when_flash, true, + "if_log_bucket_dist_when_flash parameter to pass into " + "NewHashLinkListRepFactory"); + +DEFINE_int32( + threshold_use_skiplist, 256, + "threshold_use_skiplist parameter to pass into NewHashLinkListRepFactory"); + +DEFINE_int64( + write_buffer_size, 256, + "write_buffer_size parameter to pass into NewHashCuckooRepFactory"); + +DEFINE_int64( + average_data_size, 64, + "average_data_size parameter to pass into NewHashCuckooRepFactory"); + +DEFINE_int64( + hash_function_count, 4, + "hash_function_count parameter to pass into NewHashCuckooRepFactory"); + +DEFINE_int32( + num_threads, 1, + "Number of concurrent threads to run. If the benchmark includes writes,\n" + "then at most one thread will be a writer"); + +DEFINE_int32(num_operations, 1000000, + "Number of operations to do for write and random read benchmarks"); + +DEFINE_int32(num_scans, 10, + "Number of times for each thread to scan the memtablerep for " + "sequential read " + "benchmarks"); + +DEFINE_int32(item_size, 100, "Number of bytes each item should be"); + +DEFINE_int32(prefix_length, 8, + "Prefix length to pass into NewFixedPrefixTransform"); + +/* VectorRep settings */ +DEFINE_int64(vectorrep_count, 0, + "Number of entries to reserve on VectorRep initialization"); + +DEFINE_int64(seed, 0, + "Seed base for random number generators. " + "When 0 it is deterministic."); + +namespace rocksdb { + +namespace { +struct CallbackVerifyArgs { + bool found; + LookupKey* key; + MemTableRep* table; + InternalKeyComparator* comparator; +}; +} // namespace + +// Helper for quickly generating random data. +class RandomGenerator { + private: + std::string data_; + unsigned int pos_; + + public: + RandomGenerator() { + Random rnd(301); + auto size = (unsigned)std::max(1048576, FLAGS_item_size); + test::RandomString(&rnd, size, &data_); + pos_ = 0; + } + + Slice Generate(unsigned int len) { + assert(len <= data_.size()); + if (pos_ + len > data_.size()) { + pos_ = 0; + } + pos_ += len; + return Slice(data_.data() + pos_ - len, len); + } +}; + +enum WriteMode { SEQUENTIAL, RANDOM, UNIQUE_RANDOM }; + +class KeyGenerator { + public: + KeyGenerator(Random64* rand, WriteMode mode, uint64_t num) + : rand_(rand), mode_(mode), num_(num), next_(0) { + if (mode_ == UNIQUE_RANDOM) { + // NOTE: if memory consumption of this approach becomes a concern, + // we can either break it into pieces and only random shuffle a section + // each time. Alternatively, use a bit map implementation + // (https://reviews.facebook.net/differential/diff/54627/) + values_.resize(num_); + for (uint64_t i = 0; i < num_; ++i) { + values_[i] = i; + } + std::shuffle( + values_.begin(), values_.end(), + std::default_random_engine(static_cast(FLAGS_seed))); + } + } + + uint64_t Next() { + switch (mode_) { + case SEQUENTIAL: + return next_++; + case RANDOM: + return rand_->Next() % num_; + case UNIQUE_RANDOM: + return values_[next_++]; + } + assert(false); + return std::numeric_limits::max(); + } + + private: + Random64* rand_; + WriteMode mode_; + const uint64_t num_; + uint64_t next_; + std::vector values_; +}; + +class BenchmarkThread { + public: + explicit BenchmarkThread(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* bytes_written, uint64_t* bytes_read, + uint64_t* sequence, uint64_t num_ops, + uint64_t* read_hits) + : table_(table), + key_gen_(key_gen), + bytes_written_(bytes_written), + bytes_read_(bytes_read), + sequence_(sequence), + num_ops_(num_ops), + read_hits_(read_hits) {} + + virtual void operator()() = 0; + virtual ~BenchmarkThread() {} + + protected: + MemTableRep* table_; + KeyGenerator* key_gen_; + uint64_t* bytes_written_; + uint64_t* bytes_read_; + uint64_t* sequence_; + uint64_t num_ops_; + uint64_t* read_hits_; + RandomGenerator generator_; +}; + +class FillBenchmarkThread : public BenchmarkThread { + public: + FillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* bytes_written, uint64_t* bytes_read, + uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits) + : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence, + num_ops, read_hits) {} + + void FillOne() { + char* buf = nullptr; + auto internal_key_size = 16; + auto encoded_len = + FLAGS_item_size + VarintLength(internal_key_size) + internal_key_size; + KeyHandle handle = table_->Allocate(encoded_len, &buf); + assert(buf != nullptr); + char* p = EncodeVarint32(buf, internal_key_size); + auto key = key_gen_->Next(); + EncodeFixed64(p, key); + p += 8; + EncodeFixed64(p, ++(*sequence_)); + p += 8; + Slice bytes = generator_.Generate(FLAGS_item_size); + memcpy(p, bytes.data(), FLAGS_item_size); + p += FLAGS_item_size; + assert(p == buf + encoded_len); + table_->Insert(handle); + *bytes_written_ += encoded_len; + } + + void operator()() override { + for (unsigned int i = 0; i < num_ops_; ++i) { + FillOne(); + } + } +}; + +class ConcurrentFillBenchmarkThread : public FillBenchmarkThread { + public: + ConcurrentFillBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* bytes_written, uint64_t* bytes_read, + uint64_t* sequence, uint64_t num_ops, + uint64_t* read_hits, + std::atomic_int* threads_done) + : FillBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence, + num_ops, read_hits) { + threads_done_ = threads_done; + } + + void operator()() override { + // # of read threads will be total threads - write threads (always 1). Loop + // while all reads complete. + while ((*threads_done_).load() < (FLAGS_num_threads - 1)) { + FillOne(); + } + } + + private: + std::atomic_int* threads_done_; +}; + +class ReadBenchmarkThread : public BenchmarkThread { + public: + ReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* bytes_written, uint64_t* bytes_read, + uint64_t* sequence, uint64_t num_ops, uint64_t* read_hits) + : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence, + num_ops, read_hits) {} + + static bool callback(void* arg, const char* entry) { + CallbackVerifyArgs* callback_args = static_cast(arg); + assert(callback_args != nullptr); + uint32_t key_length; + const char* key_ptr = GetVarint32Ptr(entry, entry + 5, &key_length); + if ((callback_args->comparator) + ->user_comparator() + ->Equal(Slice(key_ptr, key_length - 8), + callback_args->key->user_key())) { + callback_args->found = true; + } + return false; + } + + void ReadOne() { + std::string user_key; + auto key = key_gen_->Next(); + PutFixed64(&user_key, key); + LookupKey lookup_key(user_key, *sequence_); + InternalKeyComparator internal_key_comp(BytewiseComparator()); + CallbackVerifyArgs verify_args; + verify_args.found = false; + verify_args.key = &lookup_key; + verify_args.table = table_; + verify_args.comparator = &internal_key_comp; + table_->Get(lookup_key, &verify_args, callback); + if (verify_args.found) { + *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size; + ++*read_hits_; + } + } + void operator()() override { + for (unsigned int i = 0; i < num_ops_; ++i) { + ReadOne(); + } + } +}; + +class SeqReadBenchmarkThread : public BenchmarkThread { + public: + SeqReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* bytes_written, uint64_t* bytes_read, + uint64_t* sequence, uint64_t num_ops, + uint64_t* read_hits) + : BenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence, + num_ops, read_hits) {} + + void ReadOneSeq() { + std::unique_ptr iter(table_->GetIterator()); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + // pretend to read the value + *bytes_read_ += VarintLength(16) + 16 + FLAGS_item_size; + } + ++*read_hits_; + } + + void operator()() override { + for (unsigned int i = 0; i < num_ops_; ++i) { + { ReadOneSeq(); } + } + } +}; + +class ConcurrentReadBenchmarkThread : public ReadBenchmarkThread { + public: + ConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* bytes_written, uint64_t* bytes_read, + uint64_t* sequence, uint64_t num_ops, + uint64_t* read_hits, + std::atomic_int* threads_done) + : ReadBenchmarkThread(table, key_gen, bytes_written, bytes_read, sequence, + num_ops, read_hits) { + threads_done_ = threads_done; + } + + void operator()() override { + for (unsigned int i = 0; i < num_ops_; ++i) { + ReadOne(); + } + ++*threads_done_; + } + + private: + std::atomic_int* threads_done_; +}; + +class SeqConcurrentReadBenchmarkThread : public SeqReadBenchmarkThread { + public: + SeqConcurrentReadBenchmarkThread(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* bytes_written, + uint64_t* bytes_read, uint64_t* sequence, + uint64_t num_ops, uint64_t* read_hits, + std::atomic_int* threads_done) + : SeqReadBenchmarkThread(table, key_gen, bytes_written, bytes_read, + sequence, num_ops, read_hits) { + threads_done_ = threads_done; + } + + void operator()() override { + for (unsigned int i = 0; i < num_ops_; ++i) { + ReadOneSeq(); + } + ++*threads_done_; + } + + private: + std::atomic_int* threads_done_; +}; + +class Benchmark { + public: + explicit Benchmark(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* sequence, uint32_t num_threads) + : table_(table), + key_gen_(key_gen), + sequence_(sequence), + num_threads_(num_threads) {} + + virtual ~Benchmark() {} + virtual void Run() { + std::cout << "Number of threads: " << num_threads_ << std::endl; + std::vector threads; + uint64_t bytes_written = 0; + uint64_t bytes_read = 0; + uint64_t read_hits = 0; + StopWatchNano timer(Env::Default(), true); + RunThreads(&threads, &bytes_written, &bytes_read, true, &read_hits); + auto elapsed_time = static_cast(timer.ElapsedNanos() / 1000); + std::cout << "Elapsed time: " << static_cast(elapsed_time) << " us" + << std::endl; + + if (bytes_written > 0) { + auto MiB_written = static_cast(bytes_written) / (1 << 20); + auto write_throughput = MiB_written / (elapsed_time / 1000000); + std::cout << "Total bytes written: " << MiB_written << " MiB" + << std::endl; + std::cout << "Write throughput: " << write_throughput << " MiB/s" + << std::endl; + auto us_per_op = elapsed_time / num_write_ops_per_thread_; + std::cout << "write us/op: " << us_per_op << std::endl; + } + if (bytes_read > 0) { + auto MiB_read = static_cast(bytes_read) / (1 << 20); + auto read_throughput = MiB_read / (elapsed_time / 1000000); + std::cout << "Total bytes read: " << MiB_read << " MiB" << std::endl; + std::cout << "Read throughput: " << read_throughput << " MiB/s" + << std::endl; + auto us_per_op = elapsed_time / num_read_ops_per_thread_; + std::cout << "read us/op: " << us_per_op << std::endl; + } + } + + virtual void RunThreads(std::vector* threads, + uint64_t* bytes_written, uint64_t* bytes_read, + bool write, uint64_t* read_hits) = 0; + + protected: + MemTableRep* table_; + KeyGenerator* key_gen_; + uint64_t* sequence_; + uint64_t num_write_ops_per_thread_; + uint64_t num_read_ops_per_thread_; + const uint32_t num_threads_; +}; + +class FillBenchmark : public Benchmark { + public: + explicit FillBenchmark(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* sequence) + : Benchmark(table, key_gen, sequence, 1) { + num_write_ops_per_thread_ = FLAGS_num_operations; + } + + void RunThreads(std::vector* threads, uint64_t* bytes_written, + uint64_t* bytes_read, bool write, + uint64_t* read_hits) override { + FillBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, sequence_, + num_write_ops_per_thread_, read_hits)(); + } +}; + +class ReadBenchmark : public Benchmark { + public: + explicit ReadBenchmark(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* sequence) + : Benchmark(table, key_gen, sequence, FLAGS_num_threads) { + num_read_ops_per_thread_ = FLAGS_num_operations / FLAGS_num_threads; + } + + void RunThreads(std::vector* threads, uint64_t* bytes_written, + uint64_t* bytes_read, bool write, + uint64_t* read_hits) override { + for (int i = 0; i < FLAGS_num_threads; ++i) { + threads->emplace_back( + ReadBenchmarkThread(table_, key_gen_, bytes_written, bytes_read, + sequence_, num_read_ops_per_thread_, read_hits)); + } + for (auto& thread : *threads) { + thread.join(); + } + std::cout << "read hit%: " + << (static_cast(*read_hits) / FLAGS_num_operations) * 100 + << std::endl; + } +}; + +class SeqReadBenchmark : public Benchmark { + public: + explicit SeqReadBenchmark(MemTableRep* table, uint64_t* sequence) + : Benchmark(table, nullptr, sequence, FLAGS_num_threads) { + num_read_ops_per_thread_ = FLAGS_num_scans; + } + + void RunThreads(std::vector* threads, uint64_t* bytes_written, + uint64_t* bytes_read, bool write, + uint64_t* read_hits) override { + for (int i = 0; i < FLAGS_num_threads; ++i) { + threads->emplace_back(SeqReadBenchmarkThread( + table_, key_gen_, bytes_written, bytes_read, sequence_, + num_read_ops_per_thread_, read_hits)); + } + for (auto& thread : *threads) { + thread.join(); + } + } +}; + +template +class ReadWriteBenchmark : public Benchmark { + public: + explicit ReadWriteBenchmark(MemTableRep* table, KeyGenerator* key_gen, + uint64_t* sequence) + : Benchmark(table, key_gen, sequence, FLAGS_num_threads) { + num_read_ops_per_thread_ = + FLAGS_num_threads <= 1 + ? 0 + : (FLAGS_num_operations / (FLAGS_num_threads - 1)); + num_write_ops_per_thread_ = FLAGS_num_operations; + } + + void RunThreads(std::vector* threads, uint64_t* bytes_written, + uint64_t* bytes_read, bool write, + uint64_t* read_hits) override { + std::atomic_int threads_done; + threads_done.store(0); + threads->emplace_back(ConcurrentFillBenchmarkThread( + table_, key_gen_, bytes_written, bytes_read, sequence_, + num_write_ops_per_thread_, read_hits, &threads_done)); + for (int i = 1; i < FLAGS_num_threads; ++i) { + threads->emplace_back( + ReadThreadType(table_, key_gen_, bytes_written, bytes_read, sequence_, + num_read_ops_per_thread_, read_hits, &threads_done)); + } + for (auto& thread : *threads) { + thread.join(); + } + } +}; + +} // namespace rocksdb + +void PrintWarnings() { +#if defined(__GNUC__) && !defined(__OPTIMIZE__) + fprintf(stdout, + "WARNING: Optimization is disabled: benchmarks unnecessarily slow\n"); +#endif +#ifndef NDEBUG + fprintf(stdout, + "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); +#endif +} + +int main(int argc, char** argv) { + rocksdb::port::InstallStackTraceHandler(); + SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) + + " [OPTIONS]..."); + ParseCommandLineFlags(&argc, &argv, true); + + PrintWarnings(); + + rocksdb::Options options; + + std::unique_ptr factory; + if (FLAGS_memtablerep == "skiplist") { + factory.reset(new rocksdb::SkipListFactory); +#ifndef ROCKSDB_LITE + } else if (FLAGS_memtablerep == "vector") { + factory.reset(new rocksdb::VectorRepFactory); + } else if (FLAGS_memtablerep == "hashskiplist") { + factory.reset(rocksdb::NewHashSkipListRepFactory( + FLAGS_bucket_count, FLAGS_hashskiplist_height, + FLAGS_hashskiplist_branching_factor)); + options.prefix_extractor.reset( + rocksdb::NewFixedPrefixTransform(FLAGS_prefix_length)); + } else if (FLAGS_memtablerep == "hashlinklist") { + factory.reset(rocksdb::NewHashLinkListRepFactory( + FLAGS_bucket_count, FLAGS_huge_page_tlb_size, + FLAGS_bucket_entries_logging_threshold, + FLAGS_if_log_bucket_dist_when_flash, FLAGS_threshold_use_skiplist)); + options.prefix_extractor.reset( + rocksdb::NewFixedPrefixTransform(FLAGS_prefix_length)); + } else if (FLAGS_memtablerep == "cuckoo") { + factory.reset(rocksdb::NewHashCuckooRepFactory( + FLAGS_write_buffer_size, FLAGS_average_data_size, + static_cast(FLAGS_hash_function_count))); + options.prefix_extractor.reset( + rocksdb::NewFixedPrefixTransform(FLAGS_prefix_length)); +#endif // ROCKSDB_LITE + } else { + fprintf(stdout, "Unknown memtablerep: %s\n", FLAGS_memtablerep.c_str()); + exit(1); + } + + rocksdb::InternalKeyComparator internal_key_comp( + rocksdb::BytewiseComparator()); + rocksdb::MemTable::KeyComparator key_comp(internal_key_comp); + rocksdb::Arena arena; + rocksdb::WriteBufferManager wb(FLAGS_write_buffer_size); + rocksdb::MemTableAllocator memtable_allocator(&arena, &wb); + uint64_t sequence; + auto createMemtableRep = [&] { + sequence = 0; + return factory->CreateMemTableRep(key_comp, &memtable_allocator, + options.prefix_extractor.get(), + options.info_log.get()); + }; + std::unique_ptr memtablerep; + rocksdb::Random64 rng(FLAGS_seed); + const char* benchmarks = FLAGS_benchmarks.c_str(); + while (benchmarks != nullptr) { + std::unique_ptr key_gen; + const char* sep = strchr(benchmarks, ','); + rocksdb::Slice name; + if (sep == nullptr) { + name = benchmarks; + benchmarks = nullptr; + } else { + name = rocksdb::Slice(benchmarks, sep - benchmarks); + benchmarks = sep + 1; + } + std::unique_ptr benchmark; + if (name == rocksdb::Slice("fillseq")) { + memtablerep.reset(createMemtableRep()); + key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::SEQUENTIAL, + FLAGS_num_operations)); + benchmark.reset(new rocksdb::FillBenchmark(memtablerep.get(), + key_gen.get(), &sequence)); + } else if (name == rocksdb::Slice("fillrandom")) { + memtablerep.reset(createMemtableRep()); + key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::UNIQUE_RANDOM, + FLAGS_num_operations)); + benchmark.reset(new rocksdb::FillBenchmark(memtablerep.get(), + key_gen.get(), &sequence)); + } else if (name == rocksdb::Slice("readrandom")) { + key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::RANDOM, + FLAGS_num_operations)); + benchmark.reset(new rocksdb::ReadBenchmark(memtablerep.get(), + key_gen.get(), &sequence)); + } else if (name == rocksdb::Slice("readseq")) { + key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::SEQUENTIAL, + FLAGS_num_operations)); + benchmark.reset( + new rocksdb::SeqReadBenchmark(memtablerep.get(), &sequence)); + } else if (name == rocksdb::Slice("readwrite")) { + memtablerep.reset(createMemtableRep()); + key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::RANDOM, + FLAGS_num_operations)); + benchmark.reset(new rocksdb::ReadWriteBenchmark< + rocksdb::ConcurrentReadBenchmarkThread>(memtablerep.get(), + key_gen.get(), &sequence)); + } else if (name == rocksdb::Slice("seqreadwrite")) { + memtablerep.reset(createMemtableRep()); + key_gen.reset(new rocksdb::KeyGenerator(&rng, rocksdb::RANDOM, + FLAGS_num_operations)); + benchmark.reset(new rocksdb::ReadWriteBenchmark< + rocksdb::SeqConcurrentReadBenchmarkThread>(memtablerep.get(), + key_gen.get(), &sequence)); + } else { + std::cout << "WARNING: skipping unknown benchmark '" << name.ToString() + << std::endl; + continue; + } + std::cout << "Running " << name.ToString() << std::endl; + benchmark->Run(); + } + + return 0; +} + +#endif // GFLAGS diff --git a/external/rocksdb/db/merge_context.h b/external/rocksdb/db/merge_context.h new file mode 100755 index 0000000000..bf8bff0b6e --- /dev/null +++ b/external/rocksdb/db/merge_context.h @@ -0,0 +1,116 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#pragma once +#include +#include +#include "db/dbformat.h" +#include "rocksdb/slice.h" + +namespace rocksdb { + +const std::vector empty_operand_list; + +// The merge context for merging a user key. +// When doing a Get(), DB will create such a class and pass it when +// issuing Get() operation to memtables and version_set. The operands +// will be fetched from the context when issuing partial of full merge. +class MergeContext { + public: + // Clear all the operands + void Clear() { + if (operand_list_) { + operand_list_->clear(); + copied_operands_->clear(); + } + } + + // Push a merge operand + void PushOperand(const Slice& operand_slice, bool operand_pinned = false) { + Initialize(); + SetDirectionBackward(); + + if (operand_pinned) { + operand_list_->push_back(operand_slice); + } else { + // We need to have our own copy of the operand since it's not pinned + copied_operands_->emplace_back( + new std::string(operand_slice.data(), operand_slice.size())); + operand_list_->push_back(*copied_operands_->back()); + } + } + + // Push back a merge operand + void PushOperandBack(const Slice& operand_slice, + bool operand_pinned = false) { + Initialize(); + SetDirectionForward(); + + if (operand_pinned) { + operand_list_->push_back(operand_slice); + } else { + // We need to have our own copy of the operand since it's not pinned + copied_operands_->emplace_back( + new std::string(operand_slice.data(), operand_slice.size())); + operand_list_->push_back(*copied_operands_->back()); + } + } + + // return total number of operands in the list + size_t GetNumOperands() const { + if (!operand_list_) { + return 0; + } + return operand_list_->size(); + } + + // Get the operand at the index. + Slice GetOperand(int index) { + assert(operand_list_); + + SetDirectionForward(); + return (*operand_list_)[index]; + } + + // Return all the operands. + const std::vector& GetOperands() { + if (!operand_list_) { + return empty_operand_list; + } + + SetDirectionForward(); + return *operand_list_; + } + + private: + void Initialize() { + if (!operand_list_) { + operand_list_.reset(new std::vector()); + copied_operands_.reset(new std::vector>()); + } + } + + void SetDirectionForward() { + if (operands_reversed_ == true) { + std::reverse(operand_list_->begin(), operand_list_->end()); + operands_reversed_ = false; + } + } + + void SetDirectionBackward() { + if (operands_reversed_ == false) { + std::reverse(operand_list_->begin(), operand_list_->end()); + operands_reversed_ = true; + } + } + + // List of operands + std::unique_ptr> operand_list_; + // Copy of operands that are not pinned. + std::unique_ptr>> copied_operands_; + bool operands_reversed_ = true; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/merge_helper.cc b/external/rocksdb/db/merge_helper.cc new file mode 100755 index 0000000000..a3d3823fc9 --- /dev/null +++ b/external/rocksdb/db/merge_helper.cc @@ -0,0 +1,326 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/merge_helper.h" + +#include +#include + +#include "db/dbformat.h" +#include "rocksdb/comparator.h" +#include "rocksdb/db.h" +#include "rocksdb/merge_operator.h" +#include "table/internal_iterator.h" +#include "util/perf_context_imp.h" +#include "util/statistics.h" + +namespace rocksdb { + +Status MergeHelper::TimedFullMerge(const MergeOperator* merge_operator, + const Slice& key, const Slice* value, + const std::vector& operands, + std::string* result, Logger* logger, + Statistics* statistics, Env* env, + Slice* result_operand) { + assert(merge_operator != nullptr); + + if (operands.size() == 0) { + assert(value != nullptr && result != nullptr); + result->assign(value->data(), value->size()); + return Status::OK(); + } + + bool success; + Slice tmp_result_operand(nullptr, 0); + const MergeOperator::MergeOperationInput merge_in(key, value, operands, + logger); + MergeOperator::MergeOperationOutput merge_out(*result, tmp_result_operand); + { + // Setup to time the merge + StopWatchNano timer(env, statistics != nullptr); + PERF_TIMER_GUARD(merge_operator_time_nanos); + + // Do the merge + success = merge_operator->FullMergeV2(merge_in, &merge_out); + + if (tmp_result_operand.data()) { + // FullMergeV2 result is an existing operand + if (result_operand != nullptr) { + *result_operand = tmp_result_operand; + } else { + result->assign(tmp_result_operand.data(), tmp_result_operand.size()); + } + } else if (result_operand) { + *result_operand = Slice(nullptr, 0); + } + + RecordTick(statistics, MERGE_OPERATION_TOTAL_TIME, + statistics ? timer.ElapsedNanos() : 0); + } + + if (!success) { + RecordTick(statistics, NUMBER_MERGE_FAILURES); + return Status::Corruption("Error: Could not perform merge."); + } + + return Status::OK(); +} + +// PRE: iter points to the first merge type entry +// POST: iter points to the first entry beyond the merge process (or the end) +// keys_, operands_ are updated to reflect the merge result. +// keys_ stores the list of keys encountered while merging. +// operands_ stores the list of merge operands encountered while merging. +// keys_[i] corresponds to operands_[i] for each i. +Status MergeHelper::MergeUntil(InternalIterator* iter, + const SequenceNumber stop_before, + const bool at_bottom) { + // Get a copy of the internal key, before it's invalidated by iter->Next() + // Also maintain the list of merge operands seen. + assert(HasOperator()); + keys_.clear(); + merge_context_.Clear(); + assert(user_merge_operator_); + bool first_key = true; + + // We need to parse the internal key again as the parsed key is + // backed by the internal key! + // Assume no internal key corruption as it has been successfully parsed + // by the caller. + // original_key_is_iter variable is just caching the information: + // original_key_is_iter == (iter->key().ToString() == original_key) + bool original_key_is_iter = true; + std::string original_key = iter->key().ToString(); + // Important: + // orig_ikey is backed by original_key if keys_.empty() + // orig_ikey is backed by keys_.back() if !keys_.empty() + ParsedInternalKey orig_ikey; + ParseInternalKey(original_key, &orig_ikey); + + Status s; + bool hit_the_next_user_key = false; + for (; iter->Valid(); iter->Next(), original_key_is_iter = false) { + ParsedInternalKey ikey; + assert(keys_.size() == merge_context_.GetNumOperands()); + + if (!ParseInternalKey(iter->key(), &ikey)) { + // stop at corrupted key + if (assert_valid_internal_key_) { + assert(!"Corrupted internal key not expected."); + return Status::Corruption("Corrupted internal key not expected."); + } + break; + } else if (first_key) { + assert(user_comparator_->Equal(ikey.user_key, orig_ikey.user_key)); + first_key = false; + } else if (!user_comparator_->Equal(ikey.user_key, orig_ikey.user_key)) { + // hit a different user key, stop right here + hit_the_next_user_key = true; + break; + } else if (stop_before && ikey.sequence <= stop_before) { + // hit an entry that's visible by the previous snapshot, can't touch that + break; + } + + // At this point we are guaranteed that we need to process this key. + + assert(IsValueType(ikey.type)); + if (ikey.type != kTypeMerge) { + if (ikey.type != kTypeValue && ikey.type != kTypeDeletion) { + // Merges operands can only be used with puts and deletions, single + // deletions are not supported. + assert(false); + // release build doesn't have asserts, so we return error status + return Status::InvalidArgument( + " Merges operands can only be used with puts and deletions, single " + "deletions are not supported."); + } + + // hit a put/delete + // => merge the put value or a nullptr with operands_ + // => store result in operands_.back() (and update keys_.back()) + // => change the entry type to kTypeValue for keys_.back() + // We are done! Success! + + // If there are no operands, just return the Status::OK(). That will cause + // the compaction iterator to write out the key we're currently at, which + // is the put/delete we just encountered. + if (keys_.empty()) { + return Status::OK(); + } + + // TODO(noetzli) If the merge operator returns false, we are currently + // (almost) silently dropping the put/delete. That's probably not what we + // want. + const Slice val = iter->value(); + const Slice* val_ptr = (kTypeValue == ikey.type) ? &val : nullptr; + std::string merge_result; + s = TimedFullMerge(user_merge_operator_, ikey.user_key, val_ptr, + merge_context_.GetOperands(), &merge_result, logger_, + stats_, env_); + + // We store the result in keys_.back() and operands_.back() + // if nothing went wrong (i.e.: no operand corruption on disk) + if (s.ok()) { + // The original key encountered + original_key = std::move(keys_.back()); + orig_ikey.type = kTypeValue; + UpdateInternalKey(&original_key, orig_ikey.sequence, orig_ikey.type); + keys_.clear(); + merge_context_.Clear(); + keys_.emplace_front(std::move(original_key)); + merge_context_.PushOperand(merge_result); + } + + // move iter to the next entry + iter->Next(); + return s; + } else { + // hit a merge + // => if there is a compaction filter, apply it. + // => merge the operand into the front of the operands_ list + // if not filtered + // => then continue because we haven't yet seen a Put/Delete. + // + // Keep queuing keys and operands until we either meet a put / delete + // request or later did a partial merge. + + Slice value_slice = iter->value(); + // add an operand to the list if: + // 1) it's included in one of the snapshots. in that case we *must* write + // it out, no matter what compaction filter says + // 2) it's not filtered by a compaction filter + if (ikey.sequence <= latest_snapshot_ || + !FilterMerge(orig_ikey.user_key, value_slice)) { + if (original_key_is_iter) { + // this is just an optimization that saves us one memcpy + keys_.push_front(std::move(original_key)); + } else { + keys_.push_front(iter->key().ToString()); + } + if (keys_.size() == 1) { + // we need to re-anchor the orig_ikey because it was anchored by + // original_key before + ParseInternalKey(keys_.back(), &orig_ikey); + } + merge_context_.PushOperand(value_slice, + iter->IsValuePinned() /* operand_pinned */); + } + } + } + + if (merge_context_.GetNumOperands() == 0) { + // we filtered out all the merge operands + return Status::OK(); + } + + // We are sure we have seen this key's entire history if we are at the + // last level and exhausted all internal keys of this user key. + // NOTE: !iter->Valid() does not necessarily mean we hit the + // beginning of a user key, as versions of a user key might be + // split into multiple files (even files on the same level) + // and some files might not be included in the compaction/merge. + // + // There are also cases where we have seen the root of history of this + // key without being sure of it. Then, we simply miss the opportunity + // to combine the keys. Since VersionSet::SetupOtherInputs() always makes + // sure that all merge-operands on the same level get compacted together, + // this will simply lead to these merge operands moving to the next level. + // + // So, we only perform the following logic (to merge all operands together + // without a Put/Delete) if we are certain that we have seen the end of key. + bool surely_seen_the_beginning = hit_the_next_user_key && at_bottom; + if (surely_seen_the_beginning) { + // do a final merge with nullptr as the existing value and say + // bye to the merge type (it's now converted to a Put) + assert(kTypeMerge == orig_ikey.type); + assert(merge_context_.GetNumOperands() >= 1); + assert(merge_context_.GetNumOperands() == keys_.size()); + std::string merge_result; + s = TimedFullMerge(user_merge_operator_, orig_ikey.user_key, nullptr, + merge_context_.GetOperands(), &merge_result, logger_, + stats_, env_); + if (s.ok()) { + // The original key encountered + // We are certain that keys_ is not empty here (see assertions couple of + // lines before). + original_key = std::move(keys_.back()); + orig_ikey.type = kTypeValue; + UpdateInternalKey(&original_key, orig_ikey.sequence, orig_ikey.type); + keys_.clear(); + merge_context_.Clear(); + keys_.emplace_front(std::move(original_key)); + merge_context_.PushOperand(merge_result); + } + } else { + // We haven't seen the beginning of the key nor a Put/Delete. + // Attempt to use the user's associative merge function to + // merge the stacked merge operands into a single operand. + // + // TODO(noetzli) The docblock of MergeUntil suggests that a successful + // partial merge returns Status::OK(). Should we change the status code + // after a successful partial merge? + s = Status::MergeInProgress(); + if (merge_context_.GetNumOperands() >= 2 && + merge_context_.GetNumOperands() >= min_partial_merge_operands_) { + bool merge_success = false; + std::string merge_result; + { + StopWatchNano timer(env_, stats_ != nullptr); + PERF_TIMER_GUARD(merge_operator_time_nanos); + merge_success = user_merge_operator_->PartialMergeMulti( + orig_ikey.user_key, + std::deque(merge_context_.GetOperands().begin(), + merge_context_.GetOperands().end()), + &merge_result, logger_); + RecordTick(stats_, MERGE_OPERATION_TOTAL_TIME, + stats_ ? timer.ElapsedNanosSafe() : 0); + } + if (merge_success) { + // Merging of operands (associative merge) was successful. + // Replace operands with the merge result + merge_context_.Clear(); + merge_context_.PushOperand(merge_result); + keys_.erase(keys_.begin(), keys_.end() - 1); + } + } + } + + return s; +} + +MergeOutputIterator::MergeOutputIterator(const MergeHelper* merge_helper) + : merge_helper_(merge_helper) { + it_keys_ = merge_helper_->keys().rend(); + it_values_ = merge_helper_->values().rend(); +} + +void MergeOutputIterator::SeekToFirst() { + const auto& keys = merge_helper_->keys(); + const auto& values = merge_helper_->values(); + assert(keys.size() == values.size()); + it_keys_ = keys.rbegin(); + it_values_ = values.rbegin(); +} + +void MergeOutputIterator::Next() { + ++it_keys_; + ++it_values_; +} + +bool MergeHelper::FilterMerge(const Slice& user_key, const Slice& value_slice) { + if (compaction_filter_ == nullptr) { + return false; + } + if (stats_ != nullptr) { + filter_timer_.Start(); + } + bool to_delete = + compaction_filter_->FilterMergeOperand(level_, user_key, value_slice); + total_filter_time_ += filter_timer_.ElapsedNanosSafe(); + return to_delete; +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/merge_helper.h b/external/rocksdb/db/merge_helper.h new file mode 100755 index 0000000000..7cd992f34f --- /dev/null +++ b/external/rocksdb/db/merge_helper.h @@ -0,0 +1,174 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#ifndef MERGE_HELPER_H +#define MERGE_HELPER_H + +#include +#include +#include + +#include "db/dbformat.h" +#include "db/merge_context.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/env.h" +#include "rocksdb/slice.h" +#include "util/stop_watch.h" + +namespace rocksdb { + +class Comparator; +class Iterator; +class Logger; +class MergeOperator; +class Statistics; +class InternalIterator; + +class MergeHelper { + public: + MergeHelper(Env* env, const Comparator* user_comparator, + const MergeOperator* user_merge_operator, + const CompactionFilter* compaction_filter, Logger* logger, + unsigned min_partial_merge_operands, + bool assert_valid_internal_key, SequenceNumber latest_snapshot, + int level = 0, Statistics* stats = nullptr) + : env_(env), + user_comparator_(user_comparator), + user_merge_operator_(user_merge_operator), + compaction_filter_(compaction_filter), + logger_(logger), + min_partial_merge_operands_(min_partial_merge_operands), + assert_valid_internal_key_(assert_valid_internal_key), + latest_snapshot_(latest_snapshot), + level_(level), + keys_(), + filter_timer_(env_), + total_filter_time_(0U), + stats_(stats) { + assert(user_comparator_ != nullptr); + } + + // Wrapper around MergeOperator::FullMergeV2() that records perf statistics. + // Result of merge will be written to result if status returned is OK. + // If operands is empty, the value will simply be copied to result. + // Returns one of the following statuses: + // - OK: Entries were successfully merged. + // - Corruption: Merge operator reported unsuccessful merge. + static Status TimedFullMerge(const MergeOperator* merge_operator, + const Slice& key, const Slice* value, + const std::vector& operands, + std::string* result, Logger* logger, + Statistics* statistics, Env* env, + Slice* result_operand = nullptr); + + // Merge entries until we hit + // - a corrupted key + // - a Put/Delete, + // - a different user key, + // - a specific sequence number (snapshot boundary), + // or - the end of iteration + // iter: (IN) points to the first merge type entry + // (OUT) points to the first entry not included in the merge process + // stop_before: (IN) a sequence number that merge should not cross. + // 0 means no restriction + // at_bottom: (IN) true if the iterator covers the bottem level, which means + // we could reach the start of the history of this user key. + // + // Returns one of the following statuses: + // - OK: Entries were successfully merged. + // - MergeInProgress: Put/Delete not encountered and unable to merge operands. + // - Corruption: Merge operator reported unsuccessful merge or a corrupted + // key has been encountered and not expected (applies only when compiling + // with asserts removed). + // + // REQUIRED: The first key in the input is not corrupted. + Status MergeUntil(InternalIterator* iter, + const SequenceNumber stop_before = 0, + const bool at_bottom = false); + + // Filters a merge operand using the compaction filter specified + // in the constructor. Returns true if the operand should be filtered out. + bool FilterMerge(const Slice& user_key, const Slice& value_slice); + + // Query the merge result + // These are valid until the next MergeUntil call + // If the merging was successful: + // - keys() contains a single element with the latest sequence number of + // the merges. The type will be Put or Merge. See IMPORTANT 1 note, below. + // - values() contains a single element with the result of merging all the + // operands together + // + // IMPORTANT 1: the key type could change after the MergeUntil call. + // Put/Delete + Merge + ... + Merge => Put + // Merge + ... + Merge => Merge + // + // If the merge operator is not associative, and if a Put/Delete is not found + // then the merging will be unsuccessful. In this case: + // - keys() contains the list of internal keys seen in order of iteration. + // - values() contains the list of values (merges) seen in the same order. + // values() is parallel to keys() so that the first entry in + // keys() is the key associated with the first entry in values() + // and so on. These lists will be the same length. + // All of these pairs will be merges over the same user key. + // See IMPORTANT 2 note below. + // + // IMPORTANT 2: The entries were traversed in order from BACK to FRONT. + // So keys().back() was the first key seen by iterator. + // TODO: Re-style this comment to be like the first one + const std::deque& keys() const { return keys_; } + const std::vector& values() const { + return merge_context_.GetOperands(); + } + uint64_t TotalFilterTime() const { return total_filter_time_; } + bool HasOperator() const { return user_merge_operator_ != nullptr; } + + private: + Env* env_; + const Comparator* user_comparator_; + const MergeOperator* user_merge_operator_; + const CompactionFilter* compaction_filter_; + Logger* logger_; + unsigned min_partial_merge_operands_; + bool assert_valid_internal_key_; // enforce no internal key corruption? + SequenceNumber latest_snapshot_; + int level_; + + // the scratch area that holds the result of MergeUntil + // valid up to the next MergeUntil call + + // Keeps track of the sequence of keys seen + std::deque keys_; + // Parallel with keys_; stores the operands + mutable MergeContext merge_context_; + + StopWatchNano filter_timer_; + uint64_t total_filter_time_; + Statistics* stats_; +}; + +// MergeOutputIterator can be used to iterate over the result of a merge. +class MergeOutputIterator { + public: + // The MergeOutputIterator is bound to a MergeHelper instance. + explicit MergeOutputIterator(const MergeHelper* merge_helper); + + // Seeks to the first record in the output. + void SeekToFirst(); + // Advances to the next record in the output. + void Next(); + + Slice key() { return Slice(*it_keys_); } + Slice value() { return Slice(*it_values_); } + bool Valid() { return it_keys_ != merge_helper_->keys().rend(); } + + private: + const MergeHelper* merge_helper_; + std::deque::const_reverse_iterator it_keys_; + std::vector::const_reverse_iterator it_values_; +}; + +} // namespace rocksdb + +#endif diff --git a/external/rocksdb/db/merge_helper_test.cc b/external/rocksdb/db/merge_helper_test.cc new file mode 100755 index 0000000000..b21f560782 --- /dev/null +++ b/external/rocksdb/db/merge_helper_test.cc @@ -0,0 +1,289 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "db/merge_helper.h" +#include "rocksdb/comparator.h" +#include "util/coding.h" +#include "util/testharness.h" +#include "util/testutil.h" +#include "utilities/merge_operators.h" + +namespace rocksdb { + +class MergeHelperTest : public testing::Test { + public: + MergeHelperTest() { env_ = Env::Default(); } + + ~MergeHelperTest() = default; + + Status Run(SequenceNumber stop_before, bool at_bottom, + SequenceNumber latest_snapshot = 0) { + iter_.reset(new test::VectorIterator(ks_, vs_)); + iter_->SeekToFirst(); + merge_helper_.reset(new MergeHelper(env_, BytewiseComparator(), + merge_op_.get(), filter_.get(), nullptr, + 2U, false, latest_snapshot)); + return merge_helper_->MergeUntil(iter_.get(), stop_before, at_bottom); + } + + void AddKeyVal(const std::string& user_key, const SequenceNumber& seq, + const ValueType& t, const std::string& val, + bool corrupt = false) { + InternalKey ikey(user_key, seq, t); + if (corrupt) { + test::CorruptKeyType(&ikey); + } + ks_.push_back(ikey.Encode().ToString()); + vs_.push_back(val); + } + + Env* env_; + std::unique_ptr iter_; + std::shared_ptr merge_op_; + std::unique_ptr merge_helper_; + std::vector ks_; + std::vector vs_; + std::unique_ptr filter_; +}; + +// If MergeHelper encounters a new key on the last level, we know that +// the key has no more history and it can merge keys. +TEST_F(MergeHelperTest, MergeAtBottomSuccess) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + + AddKeyVal("a", 20, kTypeMerge, test::EncodeInt(1U)); + AddKeyVal("a", 10, kTypeMerge, test::EncodeInt(3U)); + AddKeyVal("b", 10, kTypeMerge, test::EncodeInt(4U)); // <- iter_ after merge + + ASSERT_TRUE(Run(0, true).ok()); + ASSERT_EQ(ks_[2], iter_->key()); + ASSERT_EQ(test::KeyStr("a", 20, kTypeValue), merge_helper_->keys()[0]); + ASSERT_EQ(test::EncodeInt(4U), merge_helper_->values()[0]); + ASSERT_EQ(1U, merge_helper_->keys().size()); + ASSERT_EQ(1U, merge_helper_->values().size()); +} + +// Merging with a value results in a successful merge. +TEST_F(MergeHelperTest, MergeValue) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + + AddKeyVal("a", 40, kTypeMerge, test::EncodeInt(1U)); + AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(3U)); + AddKeyVal("a", 20, kTypeValue, test::EncodeInt(4U)); // <- iter_ after merge + AddKeyVal("a", 10, kTypeMerge, test::EncodeInt(1U)); + + ASSERT_TRUE(Run(0, false).ok()); + ASSERT_EQ(ks_[3], iter_->key()); + ASSERT_EQ(test::KeyStr("a", 40, kTypeValue), merge_helper_->keys()[0]); + ASSERT_EQ(test::EncodeInt(8U), merge_helper_->values()[0]); + ASSERT_EQ(1U, merge_helper_->keys().size()); + ASSERT_EQ(1U, merge_helper_->values().size()); +} + +// Merging stops before a snapshot. +TEST_F(MergeHelperTest, SnapshotBeforeValue) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + + AddKeyVal("a", 50, kTypeMerge, test::EncodeInt(1U)); + AddKeyVal("a", 40, kTypeMerge, test::EncodeInt(3U)); // <- iter_ after merge + AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(1U)); + AddKeyVal("a", 20, kTypeValue, test::EncodeInt(4U)); + AddKeyVal("a", 10, kTypeMerge, test::EncodeInt(1U)); + + ASSERT_TRUE(Run(31, true).IsMergeInProgress()); + ASSERT_EQ(ks_[2], iter_->key()); + ASSERT_EQ(test::KeyStr("a", 50, kTypeMerge), merge_helper_->keys()[0]); + ASSERT_EQ(test::EncodeInt(4U), merge_helper_->values()[0]); + ASSERT_EQ(1U, merge_helper_->keys().size()); + ASSERT_EQ(1U, merge_helper_->values().size()); +} + +// MergeHelper preserves the operand stack for merge operators that +// cannot do a partial merge. +TEST_F(MergeHelperTest, NoPartialMerge) { + merge_op_ = MergeOperators::CreateStringAppendTESTOperator(); + + AddKeyVal("a", 50, kTypeMerge, "v2"); + AddKeyVal("a", 40, kTypeMerge, "v"); // <- iter_ after merge + AddKeyVal("a", 30, kTypeMerge, "v"); + + ASSERT_TRUE(Run(31, true).IsMergeInProgress()); + ASSERT_EQ(ks_[2], iter_->key()); + ASSERT_EQ(test::KeyStr("a", 40, kTypeMerge), merge_helper_->keys()[0]); + ASSERT_EQ("v", merge_helper_->values()[0]); + ASSERT_EQ(test::KeyStr("a", 50, kTypeMerge), merge_helper_->keys()[1]); + ASSERT_EQ("v2", merge_helper_->values()[1]); + ASSERT_EQ(2U, merge_helper_->keys().size()); + ASSERT_EQ(2U, merge_helper_->values().size()); +} + +// A single operand can not be merged. +TEST_F(MergeHelperTest, SingleOperand) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + + AddKeyVal("a", 50, kTypeMerge, test::EncodeInt(1U)); + + ASSERT_TRUE(Run(31, true).IsMergeInProgress()); + ASSERT_FALSE(iter_->Valid()); + ASSERT_EQ(test::KeyStr("a", 50, kTypeMerge), merge_helper_->keys()[0]); + ASSERT_EQ(test::EncodeInt(1U), merge_helper_->values()[0]); + ASSERT_EQ(1U, merge_helper_->keys().size()); + ASSERT_EQ(1U, merge_helper_->values().size()); +} + +// Merging with a deletion turns the deletion into a value +TEST_F(MergeHelperTest, MergeDeletion) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + + AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(3U)); + AddKeyVal("a", 20, kTypeDeletion, ""); + + ASSERT_TRUE(Run(15, false).ok()); + ASSERT_FALSE(iter_->Valid()); + ASSERT_EQ(test::KeyStr("a", 30, kTypeValue), merge_helper_->keys()[0]); + ASSERT_EQ(test::EncodeInt(3U), merge_helper_->values()[0]); + ASSERT_EQ(1U, merge_helper_->keys().size()); + ASSERT_EQ(1U, merge_helper_->values().size()); +} + +// The merge helper stops upon encountering a corrupt key +TEST_F(MergeHelperTest, CorruptKey) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + + AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(3U)); + AddKeyVal("a", 25, kTypeMerge, test::EncodeInt(1U)); + // Corrupt key + AddKeyVal("a", 20, kTypeDeletion, "", true); // <- iter_ after merge + + ASSERT_TRUE(Run(15, false).IsMergeInProgress()); + ASSERT_EQ(ks_[2], iter_->key()); + ASSERT_EQ(test::KeyStr("a", 30, kTypeMerge), merge_helper_->keys()[0]); + ASSERT_EQ(test::EncodeInt(4U), merge_helper_->values()[0]); + ASSERT_EQ(1U, merge_helper_->keys().size()); + ASSERT_EQ(1U, merge_helper_->values().size()); +} + +// The compaction filter is called on every merge operand +TEST_F(MergeHelperTest, FilterMergeOperands) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + filter_.reset(new test::FilterNumber(5U)); + + AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(3U)); + AddKeyVal("a", 29, kTypeMerge, test::EncodeInt(5U)); // Filtered + AddKeyVal("a", 28, kTypeMerge, test::EncodeInt(3U)); + AddKeyVal("a", 27, kTypeMerge, test::EncodeInt(1U)); + AddKeyVal("a", 26, kTypeMerge, test::EncodeInt(5U)); // Filtered + AddKeyVal("a", 25, kTypeValue, test::EncodeInt(1U)); + + ASSERT_TRUE(Run(15, false).ok()); + ASSERT_FALSE(iter_->Valid()); + MergeOutputIterator merge_output_iter(merge_helper_.get()); + merge_output_iter.SeekToFirst(); + ASSERT_EQ(test::KeyStr("a", 30, kTypeValue), + merge_output_iter.key().ToString()); + ASSERT_EQ(test::EncodeInt(8U), merge_output_iter.value().ToString()); + merge_output_iter.Next(); + ASSERT_FALSE(merge_output_iter.Valid()); +} + +TEST_F(MergeHelperTest, FilterAllMergeOperands) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + filter_.reset(new test::FilterNumber(5U)); + + AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(5U)); + AddKeyVal("a", 29, kTypeMerge, test::EncodeInt(5U)); + AddKeyVal("a", 28, kTypeMerge, test::EncodeInt(5U)); + AddKeyVal("a", 27, kTypeMerge, test::EncodeInt(5U)); + AddKeyVal("a", 26, kTypeMerge, test::EncodeInt(5U)); + AddKeyVal("a", 25, kTypeMerge, test::EncodeInt(5U)); + + // filtered out all + ASSERT_TRUE(Run(15, false).ok()); + ASSERT_FALSE(iter_->Valid()); + MergeOutputIterator merge_output_iter(merge_helper_.get()); + merge_output_iter.SeekToFirst(); + ASSERT_FALSE(merge_output_iter.Valid()); + + // we have one operand that will survive because it's a delete + AddKeyVal("a", 24, kTypeDeletion, test::EncodeInt(5U)); + AddKeyVal("b", 23, kTypeValue, test::EncodeInt(5U)); + ASSERT_TRUE(Run(15, true).ok()); + merge_output_iter = MergeOutputIterator(merge_helper_.get()); + ASSERT_TRUE(iter_->Valid()); + merge_output_iter.SeekToFirst(); + ASSERT_FALSE(merge_output_iter.Valid()); + + // when all merge operands are filtered out, we leave the iterator pointing to + // the Put/Delete that survived + ASSERT_EQ(test::KeyStr("a", 24, kTypeDeletion), iter_->key().ToString()); + ASSERT_EQ(test::EncodeInt(5U), iter_->value().ToString()); +} + +// Make sure that merge operands are filtered at the beginning +TEST_F(MergeHelperTest, FilterFirstMergeOperand) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + filter_.reset(new test::FilterNumber(5U)); + + AddKeyVal("a", 31, kTypeMerge, test::EncodeInt(5U)); // Filtered + AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(5U)); // Filtered + AddKeyVal("a", 29, kTypeMerge, test::EncodeInt(2U)); + AddKeyVal("a", 28, kTypeMerge, test::EncodeInt(1U)); + AddKeyVal("a", 27, kTypeMerge, test::EncodeInt(3U)); + AddKeyVal("a", 26, kTypeMerge, test::EncodeInt(5U)); // Filtered + AddKeyVal("a", 25, kTypeMerge, test::EncodeInt(5U)); // Filtered + AddKeyVal("b", 24, kTypeValue, test::EncodeInt(5U)); // next user key + + ASSERT_OK(Run(15, true)); + ASSERT_TRUE(iter_->Valid()); + MergeOutputIterator merge_output_iter(merge_helper_.get()); + merge_output_iter.SeekToFirst(); + // sequence number is 29 here, because the first merge operand got filtered + // out + ASSERT_EQ(test::KeyStr("a", 29, kTypeValue), + merge_output_iter.key().ToString()); + ASSERT_EQ(test::EncodeInt(6U), merge_output_iter.value().ToString()); + merge_output_iter.Next(); + ASSERT_FALSE(merge_output_iter.Valid()); + + // make sure that we're passing user keys into the filter + ASSERT_EQ("a", filter_->last_merge_operand_key()); +} + +// Make sure that merge operands are not filtered out if there's a snapshot +// pointing at them +TEST_F(MergeHelperTest, DontFilterMergeOperandsBeforeSnapshotTest) { + merge_op_ = MergeOperators::CreateUInt64AddOperator(); + filter_.reset(new test::FilterNumber(5U)); + + AddKeyVal("a", 31, kTypeMerge, test::EncodeInt(5U)); + AddKeyVal("a", 30, kTypeMerge, test::EncodeInt(5U)); + AddKeyVal("a", 29, kTypeMerge, test::EncodeInt(2U)); + AddKeyVal("a", 28, kTypeMerge, test::EncodeInt(1U)); + AddKeyVal("a", 27, kTypeMerge, test::EncodeInt(3U)); + AddKeyVal("a", 26, kTypeMerge, test::EncodeInt(5U)); + AddKeyVal("a", 25, kTypeMerge, test::EncodeInt(5U)); + AddKeyVal("b", 24, kTypeValue, test::EncodeInt(5U)); + + ASSERT_OK(Run(15, true, 32)); + ASSERT_TRUE(iter_->Valid()); + MergeOutputIterator merge_output_iter(merge_helper_.get()); + merge_output_iter.SeekToFirst(); + ASSERT_EQ(test::KeyStr("a", 31, kTypeValue), + merge_output_iter.key().ToString()); + ASSERT_EQ(test::EncodeInt(26U), merge_output_iter.value().ToString()); + merge_output_iter.Next(); + ASSERT_FALSE(merge_output_iter.Valid()); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/merge_operator.cc b/external/rocksdb/db/merge_operator.cc new file mode 100755 index 0000000000..d4149f67ec --- /dev/null +++ b/external/rocksdb/db/merge_operator.cc @@ -0,0 +1,86 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +/** + * Back-end implementation details specific to the Merge Operator. + */ + +#include "rocksdb/merge_operator.h" + +namespace rocksdb { + +bool MergeOperator::FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const { + // If FullMergeV2 is not implemented, we convert the operand_list to + // std::deque and pass it to FullMerge + std::deque operand_list_str; + for (auto& op : merge_in.operand_list) { + operand_list_str.emplace_back(op.data(), op.size()); + } + return FullMerge(merge_in.key, merge_in.existing_value, operand_list_str, + &merge_out->new_value, merge_in.logger); +} + +// The default implementation of PartialMergeMulti, which invokes +// PartialMerge multiple times internally and merges two operands at +// a time. +bool MergeOperator::PartialMergeMulti(const Slice& key, + const std::deque& operand_list, + std::string* new_value, + Logger* logger) const { + assert(operand_list.size() >= 2); + // Simply loop through the operands + Slice temp_slice(operand_list[0]); + + for (size_t i = 1; i < operand_list.size(); ++i) { + auto& operand = operand_list[i]; + std::string temp_value; + if (!PartialMerge(key, temp_slice, operand, &temp_value, logger)) { + return false; + } + swap(temp_value, *new_value); + temp_slice = Slice(*new_value); + } + + // The result will be in *new_value. All merges succeeded. + return true; +} + +// Given a "real" merge from the library, call the user's +// associative merge function one-by-one on each of the operands. +// NOTE: It is assumed that the client's merge-operator will handle any errors. +bool AssociativeMergeOperator::FullMergeV2( + const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const { + // Simply loop through the operands + Slice temp_existing; + const Slice* existing_value = merge_in.existing_value; + for (const auto& operand : merge_in.operand_list) { + std::string temp_value; + if (!Merge(merge_in.key, existing_value, operand, &temp_value, + merge_in.logger)) { + return false; + } + swap(temp_value, merge_out->new_value); + temp_existing = Slice(merge_out->new_value); + existing_value = &temp_existing; + } + + // The result will be in *new_value. All merges succeeded. + return true; +} + +// Call the user defined simple merge on the operands; +// NOTE: It is assumed that the client's merge-operator will handle any errors. +bool AssociativeMergeOperator::PartialMerge( + const Slice& key, + const Slice& left_operand, + const Slice& right_operand, + std::string* new_value, + Logger* logger) const { + return Merge(key, &left_operand, right_operand, new_value, logger); +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/merge_test.cc b/external/rocksdb/db/merge_test.cc new file mode 100755 index 0000000000..020f33ba6f --- /dev/null +++ b/external/rocksdb/db/merge_test.cc @@ -0,0 +1,515 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include +#include +#include + +#include "port/stack_trace.h" +#include "rocksdb/cache.h" +#include "rocksdb/comparator.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/utilities/db_ttl.h" +#include "db/dbformat.h" +#include "db/db_impl.h" +#include "db/write_batch_internal.h" +#include "utilities/merge_operators.h" +#include "util/testharness.h" + +using namespace rocksdb; + +namespace { +size_t num_merge_operator_calls; +void resetNumMergeOperatorCalls() { num_merge_operator_calls = 0; } + +size_t num_partial_merge_calls; +void resetNumPartialMergeCalls() { num_partial_merge_calls = 0; } +} + +class CountMergeOperator : public AssociativeMergeOperator { + public: + CountMergeOperator() { + mergeOperator_ = MergeOperators::CreateUInt64AddOperator(); + } + + virtual bool Merge(const Slice& key, + const Slice* existing_value, + const Slice& value, + std::string* new_value, + Logger* logger) const override { + assert(new_value->empty()); + ++num_merge_operator_calls; + if (existing_value == nullptr) { + new_value->assign(value.data(), value.size()); + return true; + } + + return mergeOperator_->PartialMerge( + key, + *existing_value, + value, + new_value, + logger); + } + + virtual bool PartialMergeMulti(const Slice& key, + const std::deque& operand_list, + std::string* new_value, + Logger* logger) const override { + assert(new_value->empty()); + ++num_partial_merge_calls; + return mergeOperator_->PartialMergeMulti(key, operand_list, new_value, + logger); + } + + virtual const char* Name() const override { + return "UInt64AddOperator"; + } + + private: + std::shared_ptr mergeOperator_; +}; + +namespace { +std::shared_ptr OpenDb(const std::string& dbname, const bool ttl = false, + const size_t max_successive_merges = 0, + const uint32_t min_partial_merge_operands = 2) { + DB* db; + Options options; + options.create_if_missing = true; + options.merge_operator = std::make_shared(); + options.max_successive_merges = max_successive_merges; + options.min_partial_merge_operands = min_partial_merge_operands; + Status s; + DestroyDB(dbname, Options()); +// DBWithTTL is not supported in ROCKSDB_LITE +#ifndef ROCKSDB_LITE + if (ttl) { + std::cout << "Opening database with TTL\n"; + DBWithTTL* db_with_ttl; + s = DBWithTTL::Open(options, dbname, &db_with_ttl); + db = db_with_ttl; + } else { + s = DB::Open(options, dbname, &db); + } +#else + assert(!ttl); + s = DB::Open(options, dbname, &db); +#endif // !ROCKSDB_LITE + if (!s.ok()) { + std::cerr << s.ToString() << std::endl; + assert(false); + } + return std::shared_ptr(db); +} +} // namespace + +// Imagine we are maintaining a set of uint64 counters. +// Each counter has a distinct name. And we would like +// to support four high level operations: +// set, add, get and remove +// This is a quick implementation without a Merge operation. +class Counters { + + protected: + std::shared_ptr db_; + + WriteOptions put_option_; + ReadOptions get_option_; + WriteOptions delete_option_; + + uint64_t default_; + + public: + explicit Counters(std::shared_ptr db, uint64_t defaultCount = 0) + : db_(db), + put_option_(), + get_option_(), + delete_option_(), + default_(defaultCount) { + assert(db_); + } + + virtual ~Counters() {} + + // public interface of Counters. + // All four functions return false + // if the underlying level db operation failed. + + // mapped to a levedb Put + bool set(const std::string& key, uint64_t value) { + // just treat the internal rep of int64 as the string + Slice slice((char *)&value, sizeof(value)); + auto s = db_->Put(put_option_, key, slice); + + if (s.ok()) { + return true; + } else { + std::cerr << s.ToString() << std::endl; + return false; + } + } + + // mapped to a rocksdb Delete + bool remove(const std::string& key) { + auto s = db_->Delete(delete_option_, key); + + if (s.ok()) { + return true; + } else { + std::cerr << s.ToString() << std::endl; + return false; + } + } + + // mapped to a rocksdb Get + bool get(const std::string& key, uint64_t* value) { + std::string str; + auto s = db_->Get(get_option_, key, &str); + + if (s.IsNotFound()) { + // return default value if not found; + *value = default_; + return true; + } else if (s.ok()) { + // deserialization + if (str.size() != sizeof(uint64_t)) { + std::cerr << "value corruption\n"; + return false; + } + *value = DecodeFixed64(&str[0]); + return true; + } else { + std::cerr << s.ToString() << std::endl; + return false; + } + } + + // 'add' is implemented as get -> modify -> set + // An alternative is a single merge operation, see MergeBasedCounters + virtual bool add(const std::string& key, uint64_t value) { + uint64_t base = default_; + return get(key, &base) && set(key, base + value); + } + + + // convenience functions for testing + void assert_set(const std::string& key, uint64_t value) { + assert(set(key, value)); + } + + void assert_remove(const std::string& key) { assert(remove(key)); } + + uint64_t assert_get(const std::string& key) { + uint64_t value = default_; + int result = get(key, &value); + assert(result); + if (result == 0) exit(1); // Disable unused variable warning. + return value; + } + + void assert_add(const std::string& key, uint64_t value) { + int result = add(key, value); + assert(result); + if (result == 0) exit(1); // Disable unused variable warning. + } +}; + +// Implement 'add' directly with the new Merge operation +class MergeBasedCounters : public Counters { + private: + WriteOptions merge_option_; // for merge + + public: + explicit MergeBasedCounters(std::shared_ptr db, uint64_t defaultCount = 0) + : Counters(db, defaultCount), + merge_option_() { + } + + // mapped to a rocksdb Merge operation + virtual bool add(const std::string& key, uint64_t value) override { + char encoded[sizeof(uint64_t)]; + EncodeFixed64(encoded, value); + Slice slice(encoded, sizeof(uint64_t)); + auto s = db_->Merge(merge_option_, key, slice); + + if (s.ok()) { + return true; + } else { + std::cerr << s.ToString() << std::endl; + return false; + } + } +}; + +namespace { +void dumpDb(DB* db) { + auto it = unique_ptr(db->NewIterator(ReadOptions())); + for (it->SeekToFirst(); it->Valid(); it->Next()) { + uint64_t value = DecodeFixed64(it->value().data()); + std::cout << it->key().ToString() << ": " << value << std::endl; + } + assert(it->status().ok()); // Check for any errors found during the scan +} + +void testCounters(Counters& counters, DB* db, bool test_compaction) { + + FlushOptions o; + o.wait = true; + + counters.assert_set("a", 1); + + if (test_compaction) db->Flush(o); + + assert(counters.assert_get("a") == 1); + + counters.assert_remove("b"); + + // defaut value is 0 if non-existent + assert(counters.assert_get("b") == 0); + + counters.assert_add("a", 2); + + if (test_compaction) db->Flush(o); + + // 1+2 = 3 + assert(counters.assert_get("a")== 3); + + dumpDb(db); + + std::cout << "1\n"; + + // 1+...+49 = ? + uint64_t sum = 0; + for (int i = 1; i < 50; i++) { + counters.assert_add("b", i); + sum += i; + } + assert(counters.assert_get("b") == sum); + + std::cout << "2\n"; + dumpDb(db); + + std::cout << "3\n"; + + if (test_compaction) { + db->Flush(o); + + std::cout << "Compaction started ...\n"; + db->CompactRange(CompactRangeOptions(), nullptr, nullptr); + std::cout << "Compaction ended\n"; + + dumpDb(db); + + assert(counters.assert_get("a")== 3); + assert(counters.assert_get("b") == sum); + } +} + +void testSuccessiveMerge(Counters& counters, size_t max_num_merges, + size_t num_merges) { + + counters.assert_remove("z"); + uint64_t sum = 0; + + for (size_t i = 1; i <= num_merges; ++i) { + resetNumMergeOperatorCalls(); + counters.assert_add("z", i); + sum += i; + + if (i % (max_num_merges + 1) == 0) { + assert(num_merge_operator_calls == max_num_merges + 1); + } else { + assert(num_merge_operator_calls == 0); + } + + resetNumMergeOperatorCalls(); + assert(counters.assert_get("z") == sum); + assert(num_merge_operator_calls == i % (max_num_merges + 1)); + } +} + +void testPartialMerge(Counters* counters, DB* db, size_t max_merge, + size_t min_merge, size_t count) { + FlushOptions o; + o.wait = true; + + // Test case 1: partial merge should be called when the number of merge + // operands exceeds the threshold. + uint64_t tmp_sum = 0; + resetNumPartialMergeCalls(); + for (size_t i = 1; i <= count; i++) { + counters->assert_add("b", i); + tmp_sum += i; + } + db->Flush(o); + db->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ(tmp_sum, counters->assert_get("b")); + if (count > max_merge) { + // in this case, FullMerge should be called instead. + ASSERT_EQ(num_partial_merge_calls, 0U); + } else { + // if count >= min_merge, then partial merge should be called once. + ASSERT_EQ((count >= min_merge), (num_partial_merge_calls == 1)); + } + + // Test case 2: partial merge should not be called when a put is found. + resetNumPartialMergeCalls(); + tmp_sum = 0; + db->Put(rocksdb::WriteOptions(), "c", "10"); + for (size_t i = 1; i <= count; i++) { + counters->assert_add("c", i); + tmp_sum += i; + } + db->Flush(o); + db->CompactRange(CompactRangeOptions(), nullptr, nullptr); + ASSERT_EQ(tmp_sum, counters->assert_get("c")); + ASSERT_EQ(num_partial_merge_calls, 0U); +} + +void testSingleBatchSuccessiveMerge(DB* db, size_t max_num_merges, + size_t num_merges) { + assert(num_merges > max_num_merges); + + Slice key("BatchSuccessiveMerge"); + uint64_t merge_value = 1; + Slice merge_value_slice((char *)&merge_value, sizeof(merge_value)); + + // Create the batch + WriteBatch batch; + for (size_t i = 0; i < num_merges; ++i) { + batch.Merge(key, merge_value_slice); + } + + // Apply to memtable and count the number of merges + resetNumMergeOperatorCalls(); + { + Status s = db->Write(WriteOptions(), &batch); + assert(s.ok()); + } + ASSERT_EQ( + num_merge_operator_calls, + static_cast(num_merges - (num_merges % (max_num_merges + 1)))); + + // Get the value + resetNumMergeOperatorCalls(); + std::string get_value_str; + { + Status s = db->Get(ReadOptions(), key, &get_value_str); + assert(s.ok()); + } + assert(get_value_str.size() == sizeof(uint64_t)); + uint64_t get_value = DecodeFixed64(&get_value_str[0]); + ASSERT_EQ(get_value, num_merges * merge_value); + ASSERT_EQ(num_merge_operator_calls, + static_cast((num_merges % (max_num_merges + 1)))); +} + +void runTest(int argc, const std::string& dbname, const bool use_ttl = false) { + bool compact = false; + if (argc > 1) { + compact = true; + std::cout << "Turn on Compaction\n"; + } + + { + auto db = OpenDb(dbname, use_ttl); + + { + std::cout << "Test read-modify-write counters... \n"; + Counters counters(db, 0); + testCounters(counters, db.get(), true); + } + + { + std::cout << "Test merge-based counters... \n"; + MergeBasedCounters counters(db, 0); + testCounters(counters, db.get(), compact); + } + } + + DestroyDB(dbname, Options()); + + { + std::cout << "Test merge in memtable... \n"; + size_t max_merge = 5; + auto db = OpenDb(dbname, use_ttl, max_merge); + MergeBasedCounters counters(db, 0); + testCounters(counters, db.get(), compact); + testSuccessiveMerge(counters, max_merge, max_merge * 2); + testSingleBatchSuccessiveMerge(db.get(), 5, 7); + DestroyDB(dbname, Options()); + } + + { + std::cout << "Test Partial-Merge\n"; + size_t max_merge = 100; + for (uint32_t min_merge = 5; min_merge < 25; min_merge += 5) { + for (uint32_t count = min_merge - 1; count <= min_merge + 1; count++) { + auto db = OpenDb(dbname, use_ttl, max_merge, min_merge); + MergeBasedCounters counters(db, 0); + testPartialMerge(&counters, db.get(), max_merge, min_merge, count); + DestroyDB(dbname, Options()); + } + { + auto db = OpenDb(dbname, use_ttl, max_merge, min_merge); + MergeBasedCounters counters(db, 0); + testPartialMerge(&counters, db.get(), max_merge, min_merge, + min_merge * 10); + DestroyDB(dbname, Options()); + } + } + } + + { + std::cout << "Test merge-operator not set after reopen\n"; + { + auto db = OpenDb(dbname); + MergeBasedCounters counters(db, 0); + counters.add("test-key", 1); + counters.add("test-key", 1); + counters.add("test-key", 1); + db->CompactRange(CompactRangeOptions(), nullptr, nullptr); + } + + DB* reopen_db; + ASSERT_OK(DB::Open(Options(), dbname, &reopen_db)); + std::string value; + ASSERT_TRUE(!(reopen_db->Get(ReadOptions(), "test-key", &value).ok())); + delete reopen_db; + DestroyDB(dbname, Options()); + } + + /* Temporary remove this test + { + std::cout << "Test merge-operator not set after reopen (recovery case)\n"; + { + auto db = OpenDb(dbname); + MergeBasedCounters counters(db, 0); + counters.add("test-key", 1); + counters.add("test-key", 1); + counters.add("test-key", 1); + } + + DB* reopen_db; + ASSERT_TRUE(DB::Open(Options(), dbname, &reopen_db).IsInvalidArgument()); + } + */ +} +} // namespace + +int main(int argc, char *argv[]) { + //TODO: Make this test like a general rocksdb unit-test + rocksdb::port::InstallStackTraceHandler(); + runTest(argc, test::TmpDir() + "/merge_testdb"); +// DBWithTTL is not supported in ROCKSDB_LITE +#ifndef ROCKSDB_LITE + runTest(argc, test::TmpDir() + "/merge_testdbttl", true); // Run test on TTL database +#endif // !ROCKSDB_LITE + printf("Passed all tests!\n"); + return 0; +} diff --git a/external/rocksdb/db/options_file_test.cc b/external/rocksdb/db/options_file_test.cc new file mode 100755 index 0000000000..fbbc8c5529 --- /dev/null +++ b/external/rocksdb/db/options_file_test.cc @@ -0,0 +1,119 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE +#include + +#include "db/db_impl.h" +#include "db/db_test_util.h" +#include "rocksdb/options.h" +#include "rocksdb/table.h" +#include "util/testharness.h" + +namespace rocksdb { +class OptionsFileTest : public testing::Test { + public: + OptionsFileTest() : dbname_(test::TmpDir() + "/options_file_test") {} + + std::string dbname_; +}; + +namespace { +void UpdateOptionsFiles(DB* db, + std::unordered_set* filename_history, + int* options_files_count) { + std::vector filenames; + db->GetEnv()->GetChildren(db->GetName(), &filenames); + uint64_t number; + FileType type; + *options_files_count = 0; + for (auto filename : filenames) { + if (ParseFileName(filename, &number, &type) && type == kOptionsFile) { + filename_history->insert(filename); + (*options_files_count)++; + } + } +} + +// Verify whether the current Options Files are the latest ones. +void VerifyOptionsFileName( + DB* db, const std::unordered_set& past_filenames) { + std::vector filenames; + std::unordered_set current_filenames; + db->GetEnv()->GetChildren(db->GetName(), &filenames); + uint64_t number; + FileType type; + for (auto filename : filenames) { + if (ParseFileName(filename, &number, &type) && type == kOptionsFile) { + current_filenames.insert(filename); + } + } + for (auto past_filename : past_filenames) { + if (current_filenames.find(past_filename) != current_filenames.end()) { + continue; + } + for (auto filename : current_filenames) { + ASSERT_GT(filename, past_filename); + } + } +} +} // namespace + +TEST_F(OptionsFileTest, NumberOfOptionsFiles) { + const int kReopenCount = 20; + Options opt; + opt.create_if_missing = true; + DestroyDB(dbname_, opt); + std::unordered_set filename_history; + DB* db; + for (int i = 0; i < kReopenCount; ++i) { + ASSERT_OK(DB::Open(opt, dbname_, &db)); + int num_options_files = 0; + UpdateOptionsFiles(db, &filename_history, &num_options_files); + ASSERT_GT(num_options_files, 0); + ASSERT_LE(num_options_files, 2); + // Make sure we always keep the latest option files. + VerifyOptionsFileName(db, filename_history); + delete db; + } +} + +TEST_F(OptionsFileTest, OptionsFileName) { + const uint64_t kOptionsFileNum = 12345; + uint64_t number; + FileType type; + + auto options_file_name = OptionsFileName("", kOptionsFileNum); + ASSERT_TRUE(ParseFileName(options_file_name, &number, &type, nullptr)); + ASSERT_EQ(type, kOptionsFile); + ASSERT_EQ(number, kOptionsFileNum); + + const uint64_t kTempOptionsFileNum = 54352; + auto temp_options_file_name = TempOptionsFileName("", kTempOptionsFileNum); + ASSERT_TRUE(ParseFileName(temp_options_file_name, &number, &type, nullptr)); + ASSERT_NE(temp_options_file_name.find(kTempFileNameSuffix), + std::string::npos); + ASSERT_EQ(type, kTempFile); + ASSERT_EQ(number, kTempOptionsFileNum); +} +} // namespace rocksdb + +int main(int argc, char** argv) { +#if !(defined NDEBUG) || !defined(OS_WIN) + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +#else + return 0; +#endif // !(defined NDEBUG) || !defined(OS_WIN) +} +#else + +#include + +int main(int argc, char** argv) { + printf("Skipped as Options file is not supported in RocksDBLite.\n"); + return 0; +} +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/perf_context_test.cc b/external/rocksdb/db/perf_context_test.cc new file mode 100755 index 0000000000..90d724dbe4 --- /dev/null +++ b/external/rocksdb/db/perf_context_test.cc @@ -0,0 +1,689 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include +#include +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/perf_context.h" +#include "rocksdb/slice_transform.h" +#include "util/histogram.h" +#include "util/instrumented_mutex.h" +#include "util/stop_watch.h" +#include "util/string_util.h" +#include "util/testharness.h" +#include "util/thread_status_util.h" +#include "utilities/merge_operators.h" + +bool FLAGS_random_key = false; +bool FLAGS_use_set_based_memetable = false; +int FLAGS_total_keys = 100; +int FLAGS_write_buffer_size = 1000000000; +int FLAGS_max_write_buffer_number = 8; +int FLAGS_min_write_buffer_number_to_merge = 7; +bool FLAGS_verbose = false; + +// Path to the database on file system +const std::string kDbName = rocksdb::test::TmpDir() + "/perf_context_test"; + +namespace rocksdb { + +std::shared_ptr OpenDb(bool read_only = false) { + DB* db; + Options options; + options.create_if_missing = true; + options.max_open_files = -1; + options.write_buffer_size = FLAGS_write_buffer_size; + options.max_write_buffer_number = FLAGS_max_write_buffer_number; + options.min_write_buffer_number_to_merge = + FLAGS_min_write_buffer_number_to_merge; + + if (FLAGS_use_set_based_memetable) { +#ifndef ROCKSDB_LITE + options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform(0)); + options.memtable_factory.reset(NewHashSkipListRepFactory()); +#endif // ROCKSDB_LITE + } + + Status s; + if (!read_only) { + s = DB::Open(options, kDbName, &db); + } else { + s = DB::OpenForReadOnly(options, kDbName, &db); + } + EXPECT_OK(s); + return std::shared_ptr(db); +} + +class PerfContextTest : public testing::Test {}; + +TEST_F(PerfContextTest, SeekIntoDeletion) { + DestroyDB(kDbName, Options()); + auto db = OpenDb(); + WriteOptions write_options; + ReadOptions read_options; + + for (int i = 0; i < FLAGS_total_keys; ++i) { + std::string key = "k" + ToString(i); + std::string value = "v" + ToString(i); + + db->Put(write_options, key, value); + } + + for (int i = 0; i < FLAGS_total_keys -1 ; ++i) { + std::string key = "k" + ToString(i); + db->Delete(write_options, key); + } + + HistogramImpl hist_get; + HistogramImpl hist_get_time; + for (int i = 0; i < FLAGS_total_keys - 1; ++i) { + std::string key = "k" + ToString(i); + std::string value; + + perf_context.Reset(); + StopWatchNano timer(Env::Default()); + timer.Start(); + auto status = db->Get(read_options, key, &value); + auto elapsed_nanos = timer.ElapsedNanos(); + ASSERT_TRUE(status.IsNotFound()); + hist_get.Add(perf_context.user_key_comparison_count); + hist_get_time.Add(elapsed_nanos); + } + + if (FLAGS_verbose) { + std::cout << "Get user key comparison: \n" << hist_get.ToString() + << "Get time: \n" << hist_get_time.ToString(); + } + + { + HistogramImpl hist_seek_to_first; + std::unique_ptr iter(db->NewIterator(read_options)); + + perf_context.Reset(); + StopWatchNano timer(Env::Default(), true); + iter->SeekToFirst(); + hist_seek_to_first.Add(perf_context.user_key_comparison_count); + auto elapsed_nanos = timer.ElapsedNanos(); + + if (FLAGS_verbose) { + std::cout << "SeekToFirst uesr key comparison: \n" + << hist_seek_to_first.ToString() + << "ikey skipped: " << perf_context.internal_key_skipped_count + << "\n" + << "idelete skipped: " + << perf_context.internal_delete_skipped_count << "\n" + << "elapsed: " << elapsed_nanos << "\n"; + } + } + + HistogramImpl hist_seek; + for (int i = 0; i < FLAGS_total_keys; ++i) { + std::unique_ptr iter(db->NewIterator(read_options)); + std::string key = "k" + ToString(i); + + perf_context.Reset(); + StopWatchNano timer(Env::Default(), true); + iter->Seek(key); + auto elapsed_nanos = timer.ElapsedNanos(); + hist_seek.Add(perf_context.user_key_comparison_count); + if (FLAGS_verbose) { + std::cout << "seek cmp: " << perf_context.user_key_comparison_count + << " ikey skipped " << perf_context.internal_key_skipped_count + << " idelete skipped " + << perf_context.internal_delete_skipped_count + << " elapsed: " << elapsed_nanos << "ns\n"; + } + + perf_context.Reset(); + ASSERT_TRUE(iter->Valid()); + StopWatchNano timer2(Env::Default(), true); + iter->Next(); + auto elapsed_nanos2 = timer2.ElapsedNanos(); + if (FLAGS_verbose) { + std::cout << "next cmp: " << perf_context.user_key_comparison_count + << "elapsed: " << elapsed_nanos2 << "ns\n"; + } + } + + if (FLAGS_verbose) { + std::cout << "Seek uesr key comparison: \n" << hist_seek.ToString(); + } +} + +TEST_F(PerfContextTest, StopWatchNanoOverhead) { + // profile the timer cost by itself! + const int kTotalIterations = 1000000; + std::vector timings(kTotalIterations); + + StopWatchNano timer(Env::Default(), true); + for (auto& timing : timings) { + timing = timer.ElapsedNanos(true /* reset */); + } + + HistogramImpl histogram; + for (const auto timing : timings) { + histogram.Add(timing); + } + + if (FLAGS_verbose) { + std::cout << histogram.ToString(); + } +} + +TEST_F(PerfContextTest, StopWatchOverhead) { + // profile the timer cost by itself! + const int kTotalIterations = 1000000; + uint64_t elapsed = 0; + std::vector timings(kTotalIterations); + + StopWatch timer(Env::Default(), nullptr, 0, &elapsed); + for (auto& timing : timings) { + timing = elapsed; + } + + HistogramImpl histogram; + uint64_t prev_timing = 0; + for (const auto timing : timings) { + histogram.Add(timing - prev_timing); + prev_timing = timing; + } + + if (FLAGS_verbose) { + std::cout << histogram.ToString(); + } +} + +void ProfileQueries(bool enabled_time = false) { + DestroyDB(kDbName, Options()); // Start this test with a fresh DB + + auto db = OpenDb(); + + WriteOptions write_options; + ReadOptions read_options; + + HistogramImpl hist_put; + + HistogramImpl hist_get; + HistogramImpl hist_get_snapshot; + HistogramImpl hist_get_memtable; + HistogramImpl hist_get_files; + HistogramImpl hist_get_post_process; + HistogramImpl hist_num_memtable_checked; + + HistogramImpl hist_mget; + HistogramImpl hist_mget_snapshot; + HistogramImpl hist_mget_memtable; + HistogramImpl hist_mget_files; + HistogramImpl hist_mget_post_process; + HistogramImpl hist_mget_num_memtable_checked; + + HistogramImpl hist_write_pre_post; + HistogramImpl hist_write_wal_time; + HistogramImpl hist_write_memtable_time; + + uint64_t total_db_mutex_nanos = 0; + + if (FLAGS_verbose) { + std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n"; + } + + std::vector keys; + const int kFlushFlag = -1; + for (int i = 0; i < FLAGS_total_keys; ++i) { + keys.push_back(i); + if (i == FLAGS_total_keys / 2) { + // Issuing a flush in the middle. + keys.push_back(kFlushFlag); + } + } + + if (FLAGS_random_key) { + std::random_shuffle(keys.begin(), keys.end()); + } +#ifndef NDEBUG + ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 1U); +#endif + int num_mutex_waited = 0; + for (const int i : keys) { + if (i == kFlushFlag) { + FlushOptions fo; + db->Flush(fo); + continue; + } + + std::string key = "k" + ToString(i); + std::string value = "v" + ToString(i); + + std::vector values; + + perf_context.Reset(); + db->Put(write_options, key, value); + if (++num_mutex_waited > 3) { +#ifndef NDEBUG + ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U); +#endif + } + hist_write_pre_post.Add(perf_context.write_pre_and_post_process_time); + hist_write_wal_time.Add(perf_context.write_wal_time); + hist_write_memtable_time.Add(perf_context.write_memtable_time); + hist_put.Add(perf_context.user_key_comparison_count); + total_db_mutex_nanos += perf_context.db_mutex_lock_nanos; + } +#ifndef NDEBUG + ThreadStatusUtil::TEST_SetStateDelay(ThreadStatus::STATE_MUTEX_WAIT, 0U); +#endif + + for (const int i : keys) { + if (i == kFlushFlag) { + continue; + } + std::string key = "k" + ToString(i); + std::string expected_value = "v" + ToString(i); + std::string value; + + std::vector multiget_keys = {Slice(key)}; + std::vector values; + + perf_context.Reset(); + ASSERT_OK(db->Get(read_options, key, &value)); + ASSERT_EQ(expected_value, value); + hist_get_snapshot.Add(perf_context.get_snapshot_time); + hist_get_memtable.Add(perf_context.get_from_memtable_time); + hist_get_files.Add(perf_context.get_from_output_files_time); + hist_num_memtable_checked.Add(perf_context.get_from_memtable_count); + hist_get_post_process.Add(perf_context.get_post_process_time); + hist_get.Add(perf_context.user_key_comparison_count); + + perf_context.Reset(); + db->MultiGet(read_options, multiget_keys, &values); + hist_mget_snapshot.Add(perf_context.get_snapshot_time); + hist_mget_memtable.Add(perf_context.get_from_memtable_time); + hist_mget_files.Add(perf_context.get_from_output_files_time); + hist_mget_num_memtable_checked.Add(perf_context.get_from_memtable_count); + hist_mget_post_process.Add(perf_context.get_post_process_time); + hist_mget.Add(perf_context.user_key_comparison_count); + } + + if (FLAGS_verbose) { + std::cout << "Put uesr key comparison: \n" << hist_put.ToString() + << "Get uesr key comparison: \n" << hist_get.ToString() + << "MultiGet uesr key comparison: \n" << hist_get.ToString(); + std::cout << "Put(): Pre and Post Process Time: \n" + << hist_write_pre_post.ToString() << " Writing WAL time: \n" + << hist_write_wal_time.ToString() << "\n" + << " Writing Mem Table time: \n" + << hist_write_memtable_time.ToString() << "\n" + << " Total DB mutex nanos: \n" << total_db_mutex_nanos << "\n"; + + std::cout << "Get(): Time to get snapshot: \n" + << hist_get_snapshot.ToString() + << " Time to get value from memtables: \n" + << hist_get_memtable.ToString() << "\n" + << " Time to get value from output files: \n" + << hist_get_files.ToString() << "\n" + << " Number of memtables checked: \n" + << hist_num_memtable_checked.ToString() << "\n" + << " Time to post process: \n" << hist_get_post_process.ToString() + << "\n"; + + std::cout << "MultiGet(): Time to get snapshot: \n" + << hist_mget_snapshot.ToString() + << " Time to get value from memtables: \n" + << hist_mget_memtable.ToString() << "\n" + << " Time to get value from output files: \n" + << hist_mget_files.ToString() << "\n" + << " Number of memtables checked: \n" + << hist_mget_num_memtable_checked.ToString() << "\n" + << " Time to post process: \n" + << hist_mget_post_process.ToString() << "\n"; + } + + if (enabled_time) { + ASSERT_GT(hist_get.Average(), 0); + ASSERT_GT(hist_get_snapshot.Average(), 0); + ASSERT_GT(hist_get_memtable.Average(), 0); + ASSERT_GT(hist_get_files.Average(), 0); + ASSERT_GT(hist_get_post_process.Average(), 0); + ASSERT_GT(hist_num_memtable_checked.Average(), 0); + + ASSERT_GT(hist_mget.Average(), 0); + ASSERT_GT(hist_mget_snapshot.Average(), 0); + ASSERT_GT(hist_mget_memtable.Average(), 0); + ASSERT_GT(hist_mget_files.Average(), 0); + ASSERT_GT(hist_mget_post_process.Average(), 0); + ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0); +#ifndef NDEBUG + ASSERT_GT(total_db_mutex_nanos, 2000U); +#endif + } + + db.reset(); + db = OpenDb(true); + + hist_get.Clear(); + hist_get_snapshot.Clear(); + hist_get_memtable.Clear(); + hist_get_files.Clear(); + hist_get_post_process.Clear(); + hist_num_memtable_checked.Clear(); + + hist_mget.Clear(); + hist_mget_snapshot.Clear(); + hist_mget_memtable.Clear(); + hist_mget_files.Clear(); + hist_mget_post_process.Clear(); + hist_mget_num_memtable_checked.Clear(); + + for (const int i : keys) { + if (i == kFlushFlag) { + continue; + } + std::string key = "k" + ToString(i); + std::string expected_value = "v" + ToString(i); + std::string value; + + std::vector multiget_keys = {Slice(key)}; + std::vector values; + + perf_context.Reset(); + ASSERT_OK(db->Get(read_options, key, &value)); + ASSERT_EQ(expected_value, value); + hist_get_snapshot.Add(perf_context.get_snapshot_time); + hist_get_memtable.Add(perf_context.get_from_memtable_time); + hist_get_files.Add(perf_context.get_from_output_files_time); + hist_num_memtable_checked.Add(perf_context.get_from_memtable_count); + hist_get_post_process.Add(perf_context.get_post_process_time); + hist_get.Add(perf_context.user_key_comparison_count); + + perf_context.Reset(); + db->MultiGet(read_options, multiget_keys, &values); + hist_mget_snapshot.Add(perf_context.get_snapshot_time); + hist_mget_memtable.Add(perf_context.get_from_memtable_time); + hist_mget_files.Add(perf_context.get_from_output_files_time); + hist_mget_num_memtable_checked.Add(perf_context.get_from_memtable_count); + hist_mget_post_process.Add(perf_context.get_post_process_time); + hist_mget.Add(perf_context.user_key_comparison_count); + } + + if (FLAGS_verbose) { + std::cout << "ReadOnly Get uesr key comparison: \n" << hist_get.ToString() + << "ReadOnly MultiGet uesr key comparison: \n" + << hist_mget.ToString(); + + std::cout << "ReadOnly Get(): Time to get snapshot: \n" + << hist_get_snapshot.ToString() + << " Time to get value from memtables: \n" + << hist_get_memtable.ToString() << "\n" + << " Time to get value from output files: \n" + << hist_get_files.ToString() << "\n" + << " Number of memtables checked: \n" + << hist_num_memtable_checked.ToString() << "\n" + << " Time to post process: \n" << hist_get_post_process.ToString() + << "\n"; + + std::cout << "ReadOnly MultiGet(): Time to get snapshot: \n" + << hist_mget_snapshot.ToString() + << " Time to get value from memtables: \n" + << hist_mget_memtable.ToString() << "\n" + << " Time to get value from output files: \n" + << hist_mget_files.ToString() << "\n" + << " Number of memtables checked: \n" + << hist_mget_num_memtable_checked.ToString() << "\n" + << " Time to post process: \n" + << hist_mget_post_process.ToString() << "\n"; + } + + if (enabled_time) { + ASSERT_GT(hist_get.Average(), 0); + ASSERT_GT(hist_get_memtable.Average(), 0); + ASSERT_GT(hist_get_files.Average(), 0); + ASSERT_GT(hist_num_memtable_checked.Average(), 0); + // In read-only mode Get(), no super version operation is needed + ASSERT_EQ(hist_get_post_process.Average(), 0); + ASSERT_EQ(hist_get_snapshot.Average(), 0); + + ASSERT_GT(hist_mget.Average(), 0); + ASSERT_GT(hist_mget_snapshot.Average(), 0); + ASSERT_GT(hist_mget_memtable.Average(), 0); + ASSERT_GT(hist_mget_files.Average(), 0); + ASSERT_GT(hist_mget_post_process.Average(), 0); + ASSERT_GT(hist_mget_num_memtable_checked.Average(), 0); + } +} + +#ifndef ROCKSDB_LITE +TEST_F(PerfContextTest, KeyComparisonCount) { + SetPerfLevel(kEnableCount); + ProfileQueries(); + + SetPerfLevel(kDisable); + ProfileQueries(); + + SetPerfLevel(kEnableTime); + ProfileQueries(true); +} +#endif // ROCKSDB_LITE + +// make perf_context_test +// export ROCKSDB_TESTS=PerfContextTest.SeekKeyComparison +// For one memtable: +// ./perf_context_test --write_buffer_size=500000 --total_keys=10000 +// For two memtables: +// ./perf_context_test --write_buffer_size=250000 --total_keys=10000 +// Specify --random_key=1 to shuffle the key before insertion +// Results show that, for sequential insertion, worst-case Seek Key comparison +// is close to the total number of keys (linear), when there is only one +// memtable. When there are two memtables, even the avg Seek Key comparison +// starts to become linear to the input size. + +TEST_F(PerfContextTest, SeekKeyComparison) { + DestroyDB(kDbName, Options()); + auto db = OpenDb(); + WriteOptions write_options; + ReadOptions read_options; + + if (FLAGS_verbose) { + std::cout << "Inserting " << FLAGS_total_keys << " key/value pairs\n...\n"; + } + + std::vector keys; + for (int i = 0; i < FLAGS_total_keys; ++i) { + keys.push_back(i); + } + + if (FLAGS_random_key) { + std::random_shuffle(keys.begin(), keys.end()); + } + + HistogramImpl hist_put_time; + HistogramImpl hist_wal_time; + HistogramImpl hist_time_diff; + + SetPerfLevel(kEnableTime); + StopWatchNano timer(Env::Default()); + for (const int i : keys) { + std::string key = "k" + ToString(i); + std::string value = "v" + ToString(i); + + perf_context.Reset(); + timer.Start(); + db->Put(write_options, key, value); + auto put_time = timer.ElapsedNanos(); + hist_put_time.Add(put_time); + hist_wal_time.Add(perf_context.write_wal_time); + hist_time_diff.Add(put_time - perf_context.write_wal_time); + } + + if (FLAGS_verbose) { + std::cout << "Put time:\n" << hist_put_time.ToString() << "WAL time:\n" + << hist_wal_time.ToString() << "time diff:\n" + << hist_time_diff.ToString(); + } + + HistogramImpl hist_seek; + HistogramImpl hist_next; + + for (int i = 0; i < FLAGS_total_keys; ++i) { + std::string key = "k" + ToString(i); + std::string value = "v" + ToString(i); + + std::unique_ptr iter(db->NewIterator(read_options)); + perf_context.Reset(); + iter->Seek(key); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(iter->value().ToString(), value); + hist_seek.Add(perf_context.user_key_comparison_count); + } + + std::unique_ptr iter(db->NewIterator(read_options)); + for (iter->SeekToFirst(); iter->Valid();) { + perf_context.Reset(); + iter->Next(); + hist_next.Add(perf_context.user_key_comparison_count); + } + + if (FLAGS_verbose) { + std::cout << "Seek:\n" << hist_seek.ToString() << "Next:\n" + << hist_next.ToString(); + } +} + +TEST_F(PerfContextTest, DBMutexLockCounter) { + int stats_code[] = {0, static_cast(DB_MUTEX_WAIT_MICROS)}; + for (PerfLevel perf_level : + {PerfLevel::kEnableTimeExceptForMutex, PerfLevel::kEnableTime}) { + for (int c = 0; c < 2; ++c) { + InstrumentedMutex mutex(nullptr, Env::Default(), stats_code[c]); + mutex.Lock(); + std::thread child_thread([&] { + SetPerfLevel(perf_level); + perf_context.Reset(); + ASSERT_EQ(perf_context.db_mutex_lock_nanos, 0); + mutex.Lock(); + mutex.Unlock(); + if (perf_level == PerfLevel::kEnableTimeExceptForMutex || + stats_code[c] != DB_MUTEX_WAIT_MICROS) { + ASSERT_EQ(perf_context.db_mutex_lock_nanos, 0); + } else { + // increment the counter only when it's a DB Mutex + ASSERT_GT(perf_context.db_mutex_lock_nanos, 0); + } + }); + Env::Default()->SleepForMicroseconds(100); + mutex.Unlock(); + child_thread.join(); + } + } +} + +TEST_F(PerfContextTest, FalseDBMutexWait) { + SetPerfLevel(kEnableTime); + int stats_code[] = {0, static_cast(DB_MUTEX_WAIT_MICROS)}; + for (int c = 0; c < 2; ++c) { + InstrumentedMutex mutex(nullptr, Env::Default(), stats_code[c]); + InstrumentedCondVar lock(&mutex); + perf_context.Reset(); + mutex.Lock(); + lock.TimedWait(100); + mutex.Unlock(); + if (stats_code[c] == static_cast(DB_MUTEX_WAIT_MICROS)) { + // increment the counter only when it's a DB Mutex + ASSERT_GT(perf_context.db_condition_wait_nanos, 0); + } else { + ASSERT_EQ(perf_context.db_condition_wait_nanos, 0); + } + } +} + +TEST_F(PerfContextTest, ToString) { + perf_context.Reset(); + perf_context.block_read_count = 12345; + + std::string zero_included = perf_context.ToString(); + ASSERT_NE(std::string::npos, zero_included.find("= 0")); + ASSERT_NE(std::string::npos, zero_included.find("= 12345")); + + std::string zero_excluded = perf_context.ToString(true); + ASSERT_EQ(std::string::npos, zero_excluded.find("= 0")); + ASSERT_NE(std::string::npos, zero_excluded.find("= 12345")); +} + +TEST_F(PerfContextTest, MergeOperatorTime) { + DestroyDB(kDbName, Options()); + DB* db; + Options options; + options.create_if_missing = true; + options.merge_operator = MergeOperators::CreateStringAppendOperator(); + Status s = DB::Open(options, kDbName, &db); + EXPECT_OK(s); + + std::string val; + ASSERT_OK(db->Merge(WriteOptions(), "k1", "val1")); + ASSERT_OK(db->Merge(WriteOptions(), "k1", "val2")); + ASSERT_OK(db->Merge(WriteOptions(), "k1", "val3")); + ASSERT_OK(db->Merge(WriteOptions(), "k1", "val4")); + + SetPerfLevel(kEnableTime); + perf_context.Reset(); + ASSERT_OK(db->Get(ReadOptions(), "k1", &val)); + EXPECT_GT(perf_context.merge_operator_time_nanos, 0); + + ASSERT_OK(db->Flush(FlushOptions())); + + perf_context.Reset(); + ASSERT_OK(db->Get(ReadOptions(), "k1", &val)); + EXPECT_GT(perf_context.merge_operator_time_nanos, 0); + + ASSERT_OK(db->CompactRange(CompactRangeOptions(), nullptr, nullptr)); + + perf_context.Reset(); + ASSERT_OK(db->Get(ReadOptions(), "k1", &val)); + EXPECT_GT(perf_context.merge_operator_time_nanos, 0); + + delete db; +} +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + + for (int i = 1; i < argc; i++) { + int n; + char junk; + + if (sscanf(argv[i], "--write_buffer_size=%d%c", &n, &junk) == 1) { + FLAGS_write_buffer_size = n; + } + + if (sscanf(argv[i], "--total_keys=%d%c", &n, &junk) == 1) { + FLAGS_total_keys = n; + } + + if (sscanf(argv[i], "--random_key=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_random_key = n; + } + + if (sscanf(argv[i], "--use_set_based_memetable=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_use_set_based_memetable = n; + } + + if (sscanf(argv[i], "--verbose=%d%c", &n, &junk) == 1 && + (n == 0 || n == 1)) { + FLAGS_verbose = n; + } + } + + if (FLAGS_verbose) { + std::cout << kDbName << "\n"; + } + + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/pinned_iterators_manager.h b/external/rocksdb/db/pinned_iterators_manager.h new file mode 100755 index 0000000000..38cb89ce0c --- /dev/null +++ b/external/rocksdb/db/pinned_iterators_manager.h @@ -0,0 +1,66 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#pragma once +#include +#include +#include + +#include "table/internal_iterator.h" + +namespace rocksdb { + +// PinnedIteratorsManager will be notified whenever we need to pin an Iterator +// and it will be responsible for deleting pinned Iterators when they are +// not needed anymore. +class PinnedIteratorsManager { + public: + PinnedIteratorsManager() : pinning_enabled(false), pinned_iters_(nullptr) {} + ~PinnedIteratorsManager() { ReleasePinnedIterators(); } + + // Enable Iterators pinning + void StartPinning() { + if (!pinning_enabled) { + pinning_enabled = true; + if (!pinned_iters_) { + pinned_iters_.reset(new std::vector()); + } + } + } + + // Is pinning enabled ? + bool PinningEnabled() { return pinning_enabled; } + + // Take ownership of iter if pinning is enabled and delete it when + // ReleasePinnedIterators() is called + void PinIteratorIfNeeded(InternalIterator* iter) { + if (!pinning_enabled || !iter) { + return; + } + pinned_iters_->push_back(iter); + } + + // Release pinned Iterators + inline void ReleasePinnedIterators() { + if (pinning_enabled) { + pinning_enabled = false; + + // Remove duplicate pointers + std::sort(pinned_iters_->begin(), pinned_iters_->end()); + std::unique(pinned_iters_->begin(), pinned_iters_->end()); + + for (auto& iter : *pinned_iters_) { + delete iter; + } + pinned_iters_->clear(); + } + } + + private: + bool pinning_enabled; + std::unique_ptr> pinned_iters_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/plain_table_db_test.cc b/external/rocksdb/db/plain_table_db_test.cc new file mode 100755 index 0000000000..fdf98b0bf9 --- /dev/null +++ b/external/rocksdb/db/plain_table_db_test.cc @@ -0,0 +1,1183 @@ +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef ROCKSDB_LITE + +#include +#include + +#include "db/db_impl.h" +#include "db/filename.h" +#include "db/version_set.h" +#include "db/write_batch_internal.h" +#include "rocksdb/cache.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/table.h" +#include "table/meta_blocks.h" +#include "table/bloom_block.h" +#include "table/table_builder.h" +#include "table/plain_table_factory.h" +#include "table/plain_table_key_coding.h" +#include "table/plain_table_reader.h" +#include "util/hash.h" +#include "util/logging.h" +#include "util/mutexlock.h" +#include "util/string_util.h" +#include "util/testharness.h" +#include "util/testutil.h" +#include "utilities/merge_operators.h" + +using std::unique_ptr; + +namespace rocksdb { +class PlainTableKeyDecoderTest : public testing::Test {}; + +TEST_F(PlainTableKeyDecoderTest, ReadNonMmap) { + std::string tmp; + Random rnd(301); + const uint32_t kLength = 2222; + Slice contents = test::RandomString(&rnd, kLength, &tmp); + test::StringSource* string_source = + new test::StringSource(contents, 0, false); + + unique_ptr file_reader( + test::GetRandomAccessFileReader(string_source)); + unique_ptr file_info(new PlainTableReaderFileInfo( + std::move(file_reader), EnvOptions(), kLength)); + + { + PlainTableFileReader reader(file_info.get()); + + const uint32_t kReadSize = 77; + for (uint32_t pos = 0; pos < kLength; pos += kReadSize) { + uint32_t read_size = std::min(kLength - pos, kReadSize); + Slice out; + ASSERT_TRUE(reader.Read(pos, read_size, &out)); + ASSERT_EQ(0, out.compare(tmp.substr(pos, read_size))); + } + + ASSERT_LT(uint32_t(string_source->total_reads()), kLength / kReadSize / 2); + } + + std::vector>> reads = { + {{600, 30}, {590, 30}, {600, 20}, {600, 40}}, + {{800, 20}, {100, 20}, {500, 20}, {1500, 20}, {100, 20}, {80, 20}}, + {{1000, 20}, {500, 20}, {1000, 50}}, + {{1000, 20}, {500, 20}, {500, 20}}, + {{1000, 20}, {500, 20}, {200, 20}, {500, 20}}, + {{1000, 20}, {500, 20}, {200, 20}, {1000, 50}}, + {{600, 500}, {610, 20}, {100, 20}}, + {{500, 100}, {490, 100}, {550, 50}}, + }; + + std::vector num_file_reads = {2, 6, 2, 2, 4, 3, 2, 2}; + + for (size_t i = 0; i < reads.size(); i++) { + string_source->set_total_reads(0); + PlainTableFileReader reader(file_info.get()); + for (auto p : reads[i]) { + Slice out; + ASSERT_TRUE(reader.Read(p.first, p.second, &out)); + ASSERT_EQ(0, out.compare(tmp.substr(p.first, p.second))); + } + ASSERT_EQ(num_file_reads[i], string_source->total_reads()); + } +} + +class PlainTableDBTest : public testing::Test, + public testing::WithParamInterface { + protected: + private: + std::string dbname_; + Env* env_; + DB* db_; + + bool mmap_mode_; + Options last_options_; + + public: + PlainTableDBTest() : env_(Env::Default()) {} + + ~PlainTableDBTest() { + delete db_; + EXPECT_OK(DestroyDB(dbname_, Options())); + } + + void SetUp() override { + mmap_mode_ = GetParam(); + dbname_ = test::TmpDir() + "/plain_table_db_test"; + EXPECT_OK(DestroyDB(dbname_, Options())); + db_ = nullptr; + Reopen(); + } + + // Return the current option configuration. + Options CurrentOptions() { + Options options; + + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 0; + plain_table_options.bloom_bits_per_key = 2; + plain_table_options.hash_table_ratio = 0.8; + plain_table_options.index_sparseness = 3; + plain_table_options.huge_page_tlb_size = 0; + plain_table_options.encoding_type = kPrefix; + plain_table_options.full_scan_mode = false; + plain_table_options.store_index_in_file = false; + + options.table_factory.reset(NewPlainTableFactory(plain_table_options)); + options.memtable_factory.reset(NewHashLinkListRepFactory(4, 0, 3, true)); + + options.prefix_extractor.reset(NewFixedPrefixTransform(8)); + options.allow_mmap_reads = mmap_mode_; + return options; + } + + DBImpl* dbfull() { + return reinterpret_cast(db_); + } + + void Reopen(Options* options = nullptr) { + ASSERT_OK(TryReopen(options)); + } + + void Close() { + delete db_; + db_ = nullptr; + } + + void DestroyAndReopen(Options* options = nullptr) { + //Destroy using last options + Destroy(&last_options_); + ASSERT_OK(TryReopen(options)); + } + + void Destroy(Options* options) { + delete db_; + db_ = nullptr; + ASSERT_OK(DestroyDB(dbname_, *options)); + } + + Status PureReopen(Options* options, DB** db) { + return DB::Open(*options, dbname_, db); + } + + Status TryReopen(Options* options = nullptr) { + delete db_; + db_ = nullptr; + Options opts; + if (options != nullptr) { + opts = *options; + } else { + opts = CurrentOptions(); + opts.create_if_missing = true; + } + last_options_ = opts; + + return DB::Open(opts, dbname_, &db_); + } + + Status Put(const Slice& k, const Slice& v) { + return db_->Put(WriteOptions(), k, v); + } + + Status Delete(const std::string& k) { + return db_->Delete(WriteOptions(), k); + } + + std::string Get(const std::string& k, const Snapshot* snapshot = nullptr) { + ReadOptions options; + options.snapshot = snapshot; + std::string result; + Status s = db_->Get(options, k, &result); + if (s.IsNotFound()) { + result = "NOT_FOUND"; + } else if (!s.ok()) { + result = s.ToString(); + } + return result; + } + + + int NumTableFilesAtLevel(int level) { + std::string property; + EXPECT_TRUE(db_->GetProperty( + "rocksdb.num-files-at-level" + NumberToString(level), &property)); + return atoi(property.c_str()); + } + + // Return spread of files per level + std::string FilesPerLevel() { + std::string result; + size_t last_non_zero_offset = 0; + for (int level = 0; level < db_->NumberLevels(); level++) { + int f = NumTableFilesAtLevel(level); + char buf[100]; + snprintf(buf, sizeof(buf), "%s%d", (level ? "," : ""), f); + result += buf; + if (f > 0) { + last_non_zero_offset = result.size(); + } + } + result.resize(last_non_zero_offset); + return result; + } + + std::string IterStatus(Iterator* iter) { + std::string result; + if (iter->Valid()) { + result = iter->key().ToString() + "->" + iter->value().ToString(); + } else { + result = "(invalid)"; + } + return result; + } +}; + +TEST_P(PlainTableDBTest, Empty) { + ASSERT_TRUE(dbfull() != nullptr); + ASSERT_EQ("NOT_FOUND", Get("0000000000000foo")); +} + +extern const uint64_t kPlainTableMagicNumber; + +class TestPlainTableReader : public PlainTableReader { + public: + TestPlainTableReader(const EnvOptions& env_options, + const InternalKeyComparator& icomparator, + EncodingType encoding_type, uint64_t file_size, + int bloom_bits_per_key, double hash_table_ratio, + size_t index_sparseness, + const TableProperties* table_properties, + unique_ptr&& file, + const ImmutableCFOptions& ioptions, + bool* expect_bloom_not_match, bool store_index_in_file, + uint32_t column_family_id, + const std::string& column_family_name) + : PlainTableReader(ioptions, std::move(file), env_options, icomparator, + encoding_type, file_size, table_properties), + expect_bloom_not_match_(expect_bloom_not_match) { + Status s = MmapDataIfNeeded(); + EXPECT_TRUE(s.ok()); + + s = PopulateIndex(const_cast(table_properties), + bloom_bits_per_key, hash_table_ratio, index_sparseness, + 2 * 1024 * 1024); + EXPECT_TRUE(s.ok()); + + TableProperties* props = const_cast(table_properties); + EXPECT_EQ(column_family_id, static_cast(props->column_family_id)); + EXPECT_EQ(column_family_name, props->column_family_name); + if (store_index_in_file) { + auto bloom_version_ptr = props->user_collected_properties.find( + PlainTablePropertyNames::kBloomVersion); + EXPECT_TRUE(bloom_version_ptr != props->user_collected_properties.end()); + EXPECT_EQ(bloom_version_ptr->second, std::string("1")); + if (ioptions.bloom_locality > 0) { + auto num_blocks_ptr = props->user_collected_properties.find( + PlainTablePropertyNames::kNumBloomBlocks); + EXPECT_TRUE(num_blocks_ptr != props->user_collected_properties.end()); + } + } + } + + virtual ~TestPlainTableReader() {} + + private: + virtual bool MatchBloom(uint32_t hash) const override { + bool ret = PlainTableReader::MatchBloom(hash); + if (*expect_bloom_not_match_) { + EXPECT_TRUE(!ret); + } else { + EXPECT_TRUE(ret); + } + return ret; + } + bool* expect_bloom_not_match_; +}; + +extern const uint64_t kPlainTableMagicNumber; +class TestPlainTableFactory : public PlainTableFactory { + public: + explicit TestPlainTableFactory(bool* expect_bloom_not_match, + const PlainTableOptions& options, + uint32_t column_family_id, + std::string column_family_name) + : PlainTableFactory(options), + bloom_bits_per_key_(options.bloom_bits_per_key), + hash_table_ratio_(options.hash_table_ratio), + index_sparseness_(options.index_sparseness), + store_index_in_file_(options.store_index_in_file), + expect_bloom_not_match_(expect_bloom_not_match), + column_family_id_(column_family_id), + column_family_name_(std::move(column_family_name)) {} + + Status NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table, + bool prefetch_index_and_filter_in_cache) const override { + TableProperties* props = nullptr; + auto s = + ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber, + table_reader_options.ioptions, &props); + EXPECT_TRUE(s.ok()); + + if (store_index_in_file_) { + BlockHandle bloom_block_handle; + s = FindMetaBlock(file.get(), file_size, kPlainTableMagicNumber, + table_reader_options.ioptions, + BloomBlockBuilder::kBloomBlock, &bloom_block_handle); + EXPECT_TRUE(s.ok()); + + BlockHandle index_block_handle; + s = FindMetaBlock(file.get(), file_size, kPlainTableMagicNumber, + table_reader_options.ioptions, + PlainTableIndexBuilder::kPlainTableIndexBlock, + &index_block_handle); + EXPECT_TRUE(s.ok()); + } + + auto& user_props = props->user_collected_properties; + auto encoding_type_prop = + user_props.find(PlainTablePropertyNames::kEncodingType); + assert(encoding_type_prop != user_props.end()); + EncodingType encoding_type = static_cast( + DecodeFixed32(encoding_type_prop->second.c_str())); + + std::unique_ptr new_reader(new TestPlainTableReader( + table_reader_options.env_options, + table_reader_options.internal_comparator, encoding_type, file_size, + bloom_bits_per_key_, hash_table_ratio_, index_sparseness_, props, + std::move(file), table_reader_options.ioptions, expect_bloom_not_match_, + store_index_in_file_, column_family_id_, column_family_name_)); + + *table = std::move(new_reader); + return s; + } + + private: + int bloom_bits_per_key_; + double hash_table_ratio_; + size_t index_sparseness_; + bool store_index_in_file_; + bool* expect_bloom_not_match_; + const uint32_t column_family_id_; + const std::string column_family_name_; +}; + +TEST_P(PlainTableDBTest, Flush) { + for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; + huge_page_tlb_size += 2 * 1024 * 1024) { + for (EncodingType encoding_type : {kPlain, kPrefix}) { + for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) { + for (int total_order = 0; total_order <= 1; total_order++) { + for (int store_index_in_file = 0; store_index_in_file <= 1; + ++store_index_in_file) { + if (!bloom_bits && store_index_in_file) { + continue; + } + + Options options = CurrentOptions(); + options.create_if_missing = true; + // Set only one bucket to force bucket conflict. + // Test index interval for the same prefix to be 1, 2 and 4 + if (total_order) { + options.prefix_extractor.reset(); + + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 0; + plain_table_options.bloom_bits_per_key = bloom_bits; + plain_table_options.hash_table_ratio = 0; + plain_table_options.index_sparseness = 2; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + plain_table_options.encoding_type = encoding_type; + plain_table_options.full_scan_mode = false; + plain_table_options.store_index_in_file = store_index_in_file; + + options.table_factory.reset( + NewPlainTableFactory(plain_table_options)); + } else { + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 0; + plain_table_options.bloom_bits_per_key = bloom_bits; + plain_table_options.hash_table_ratio = 0.75; + plain_table_options.index_sparseness = 16; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + plain_table_options.encoding_type = encoding_type; + plain_table_options.full_scan_mode = false; + plain_table_options.store_index_in_file = store_index_in_file; + + options.table_factory.reset( + NewPlainTableFactory(plain_table_options)); + } + DestroyAndReopen(&options); + uint64_t int_num; + ASSERT_TRUE(dbfull()->GetIntProperty( + "rocksdb.estimate-table-readers-mem", &int_num)); + ASSERT_EQ(int_num, 0U); + + ASSERT_OK(Put("1000000000000foo", "v1")); + ASSERT_OK(Put("0000000000000bar", "v2")); + ASSERT_OK(Put("1000000000000foo", "v3")); + dbfull()->TEST_FlushMemTable(); + + ASSERT_TRUE(dbfull()->GetIntProperty( + "rocksdb.estimate-table-readers-mem", &int_num)); + ASSERT_GT(int_num, 0U); + + TablePropertiesCollection ptc; + reinterpret_cast(dbfull())->GetPropertiesOfAllTables(&ptc); + ASSERT_EQ(1U, ptc.size()); + auto row = ptc.begin(); + auto tp = row->second; + + if (!store_index_in_file) { + ASSERT_EQ(total_order ? "4" : "12", + (tp->user_collected_properties) + .at("plain_table_hash_table_size")); + ASSERT_EQ("0", (tp->user_collected_properties) + .at("plain_table_sub_index_size")); + } else { + ASSERT_EQ("0", (tp->user_collected_properties) + .at("plain_table_hash_table_size")); + ASSERT_EQ("0", (tp->user_collected_properties) + .at("plain_table_sub_index_size")); + } + ASSERT_EQ("v3", Get("1000000000000foo")); + ASSERT_EQ("v2", Get("0000000000000bar")); + } + } + } + } + } +} + +TEST_P(PlainTableDBTest, Flush2) { + for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; + huge_page_tlb_size += 2 * 1024 * 1024) { + for (EncodingType encoding_type : {kPlain, kPrefix}) { + for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) { + for (int total_order = 0; total_order <= 1; total_order++) { + for (int store_index_in_file = 0; store_index_in_file <= 1; + ++store_index_in_file) { + if (encoding_type == kPrefix && total_order) { + continue; + } + if (!bloom_bits && store_index_in_file) { + continue; + } + if (total_order && store_index_in_file) { + continue; + } + bool expect_bloom_not_match = false; + Options options = CurrentOptions(); + options.create_if_missing = true; + // Set only one bucket to force bucket conflict. + // Test index interval for the same prefix to be 1, 2 and 4 + PlainTableOptions plain_table_options; + if (total_order) { + options.prefix_extractor = nullptr; + plain_table_options.hash_table_ratio = 0; + plain_table_options.index_sparseness = 2; + } else { + plain_table_options.hash_table_ratio = 0.75; + plain_table_options.index_sparseness = 16; + } + plain_table_options.user_key_len = kPlainTableVariableLength; + plain_table_options.bloom_bits_per_key = bloom_bits; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + plain_table_options.encoding_type = encoding_type; + plain_table_options.store_index_in_file = store_index_in_file; + options.table_factory.reset(new TestPlainTableFactory( + &expect_bloom_not_match, plain_table_options, + 0 /* column_family_id */, kDefaultColumnFamilyName)); + + DestroyAndReopen(&options); + ASSERT_OK(Put("0000000000000bar", "b")); + ASSERT_OK(Put("1000000000000foo", "v1")); + dbfull()->TEST_FlushMemTable(); + + ASSERT_OK(Put("1000000000000foo", "v2")); + dbfull()->TEST_FlushMemTable(); + ASSERT_EQ("v2", Get("1000000000000foo")); + + ASSERT_OK(Put("0000000000000eee", "v3")); + dbfull()->TEST_FlushMemTable(); + ASSERT_EQ("v3", Get("0000000000000eee")); + + ASSERT_OK(Delete("0000000000000bar")); + dbfull()->TEST_FlushMemTable(); + ASSERT_EQ("NOT_FOUND", Get("0000000000000bar")); + + ASSERT_OK(Put("0000000000000eee", "v5")); + ASSERT_OK(Put("9000000000000eee", "v5")); + dbfull()->TEST_FlushMemTable(); + ASSERT_EQ("v5", Get("0000000000000eee")); + + // Test Bloom Filter + if (bloom_bits > 0) { + // Neither key nor value should exist. + expect_bloom_not_match = true; + ASSERT_EQ("NOT_FOUND", Get("5_not00000000bar")); + // Key doesn't exist any more but prefix exists. + if (total_order) { + ASSERT_EQ("NOT_FOUND", Get("1000000000000not")); + ASSERT_EQ("NOT_FOUND", Get("0000000000000not")); + } + expect_bloom_not_match = false; + } + } + } + } + } + } +} + +TEST_P(PlainTableDBTest, Iterator) { + for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; + huge_page_tlb_size += 2 * 1024 * 1024) { + for (EncodingType encoding_type : {kPlain, kPrefix}) { + for (int bloom_bits = 0; bloom_bits <= 117; bloom_bits += 117) { + for (int total_order = 0; total_order <= 1; total_order++) { + if (encoding_type == kPrefix && total_order == 1) { + continue; + } + bool expect_bloom_not_match = false; + Options options = CurrentOptions(); + options.create_if_missing = true; + // Set only one bucket to force bucket conflict. + // Test index interval for the same prefix to be 1, 2 and 4 + if (total_order) { + options.prefix_extractor = nullptr; + + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 16; + plain_table_options.bloom_bits_per_key = bloom_bits; + plain_table_options.hash_table_ratio = 0; + plain_table_options.index_sparseness = 2; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + plain_table_options.encoding_type = encoding_type; + + options.table_factory.reset(new TestPlainTableFactory( + &expect_bloom_not_match, plain_table_options, + 0 /* column_family_id */, kDefaultColumnFamilyName)); + } else { + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 16; + plain_table_options.bloom_bits_per_key = bloom_bits; + plain_table_options.hash_table_ratio = 0.75; + plain_table_options.index_sparseness = 16; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + plain_table_options.encoding_type = encoding_type; + + options.table_factory.reset(new TestPlainTableFactory( + &expect_bloom_not_match, plain_table_options, + 0 /* column_family_id */, kDefaultColumnFamilyName)); + } + DestroyAndReopen(&options); + + ASSERT_OK(Put("1000000000foo002", "v_2")); + ASSERT_OK(Put("0000000000000bar", "random")); + ASSERT_OK(Put("1000000000foo001", "v1")); + ASSERT_OK(Put("3000000000000bar", "bar_v")); + ASSERT_OK(Put("1000000000foo003", "v__3")); + ASSERT_OK(Put("1000000000foo004", "v__4")); + ASSERT_OK(Put("1000000000foo005", "v__5")); + ASSERT_OK(Put("1000000000foo007", "v__7")); + ASSERT_OK(Put("1000000000foo008", "v__8")); + dbfull()->TEST_FlushMemTable(); + ASSERT_EQ("v1", Get("1000000000foo001")); + ASSERT_EQ("v__3", Get("1000000000foo003")); + Iterator* iter = dbfull()->NewIterator(ReadOptions()); + iter->Seek("1000000000foo000"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo001", iter->key().ToString()); + ASSERT_EQ("v1", iter->value().ToString()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo002", iter->key().ToString()); + ASSERT_EQ("v_2", iter->value().ToString()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo003", iter->key().ToString()); + ASSERT_EQ("v__3", iter->value().ToString()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo004", iter->key().ToString()); + ASSERT_EQ("v__4", iter->value().ToString()); + + iter->Seek("3000000000000bar"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("3000000000000bar", iter->key().ToString()); + ASSERT_EQ("bar_v", iter->value().ToString()); + + iter->Seek("1000000000foo000"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo001", iter->key().ToString()); + ASSERT_EQ("v1", iter->value().ToString()); + + iter->Seek("1000000000foo005"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo005", iter->key().ToString()); + ASSERT_EQ("v__5", iter->value().ToString()); + + iter->Seek("1000000000foo006"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo007", iter->key().ToString()); + ASSERT_EQ("v__7", iter->value().ToString()); + + iter->Seek("1000000000foo008"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo008", iter->key().ToString()); + ASSERT_EQ("v__8", iter->value().ToString()); + + if (total_order == 0) { + iter->Seek("1000000000foo009"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("3000000000000bar", iter->key().ToString()); + } + + // Test Bloom Filter + if (bloom_bits > 0) { + if (!total_order) { + // Neither key nor value should exist. + expect_bloom_not_match = true; + iter->Seek("2not000000000bar"); + ASSERT_TRUE(!iter->Valid()); + ASSERT_EQ("NOT_FOUND", Get("2not000000000bar")); + expect_bloom_not_match = false; + } else { + expect_bloom_not_match = true; + ASSERT_EQ("NOT_FOUND", Get("2not000000000bar")); + expect_bloom_not_match = false; + } + } + + delete iter; + } + } + } + } +} + +namespace { +std::string MakeLongKey(size_t length, char c) { + return std::string(length, c); +} +} // namespace + +TEST_P(PlainTableDBTest, IteratorLargeKeys) { + Options options = CurrentOptions(); + + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 0; + plain_table_options.bloom_bits_per_key = 0; + plain_table_options.hash_table_ratio = 0; + + options.table_factory.reset(NewPlainTableFactory(plain_table_options)); + options.create_if_missing = true; + options.prefix_extractor.reset(); + DestroyAndReopen(&options); + + std::string key_list[] = { + MakeLongKey(30, '0'), + MakeLongKey(16, '1'), + MakeLongKey(32, '2'), + MakeLongKey(60, '3'), + MakeLongKey(90, '4'), + MakeLongKey(50, '5'), + MakeLongKey(26, '6') + }; + + for (size_t i = 0; i < 7; i++) { + ASSERT_OK(Put(key_list[i], ToString(i))); + } + + dbfull()->TEST_FlushMemTable(); + + Iterator* iter = dbfull()->NewIterator(ReadOptions()); + iter->Seek(key_list[0]); + + for (size_t i = 0; i < 7; i++) { + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(key_list[i], iter->key().ToString()); + ASSERT_EQ(ToString(i), iter->value().ToString()); + iter->Next(); + } + + ASSERT_TRUE(!iter->Valid()); + + delete iter; +} + +namespace { +std::string MakeLongKeyWithPrefix(size_t length, char c) { + return "00000000" + std::string(length - 8, c); +} +} // namespace + +TEST_P(PlainTableDBTest, IteratorLargeKeysWithPrefix) { + Options options = CurrentOptions(); + + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 16; + plain_table_options.bloom_bits_per_key = 0; + plain_table_options.hash_table_ratio = 0.8; + plain_table_options.index_sparseness = 3; + plain_table_options.huge_page_tlb_size = 0; + plain_table_options.encoding_type = kPrefix; + + options.table_factory.reset(NewPlainTableFactory(plain_table_options)); + options.create_if_missing = true; + DestroyAndReopen(&options); + + std::string key_list[] = { + MakeLongKeyWithPrefix(30, '0'), MakeLongKeyWithPrefix(16, '1'), + MakeLongKeyWithPrefix(32, '2'), MakeLongKeyWithPrefix(60, '3'), + MakeLongKeyWithPrefix(90, '4'), MakeLongKeyWithPrefix(50, '5'), + MakeLongKeyWithPrefix(26, '6')}; + + for (size_t i = 0; i < 7; i++) { + ASSERT_OK(Put(key_list[i], ToString(i))); + } + + dbfull()->TEST_FlushMemTable(); + + Iterator* iter = dbfull()->NewIterator(ReadOptions()); + iter->Seek(key_list[0]); + + for (size_t i = 0; i < 7; i++) { + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(key_list[i], iter->key().ToString()); + ASSERT_EQ(ToString(i), iter->value().ToString()); + iter->Next(); + } + + ASSERT_TRUE(!iter->Valid()); + + delete iter; +} + +TEST_P(PlainTableDBTest, IteratorReverseSuffixComparator) { + Options options = CurrentOptions(); + options.create_if_missing = true; + // Set only one bucket to force bucket conflict. + // Test index interval for the same prefix to be 1, 2 and 4 + test::SimpleSuffixReverseComparator comp; + options.comparator = ∁ + DestroyAndReopen(&options); + + ASSERT_OK(Put("1000000000foo002", "v_2")); + ASSERT_OK(Put("0000000000000bar", "random")); + ASSERT_OK(Put("1000000000foo001", "v1")); + ASSERT_OK(Put("3000000000000bar", "bar_v")); + ASSERT_OK(Put("1000000000foo003", "v__3")); + ASSERT_OK(Put("1000000000foo004", "v__4")); + ASSERT_OK(Put("1000000000foo005", "v__5")); + ASSERT_OK(Put("1000000000foo007", "v__7")); + ASSERT_OK(Put("1000000000foo008", "v__8")); + dbfull()->TEST_FlushMemTable(); + ASSERT_EQ("v1", Get("1000000000foo001")); + ASSERT_EQ("v__3", Get("1000000000foo003")); + Iterator* iter = dbfull()->NewIterator(ReadOptions()); + iter->Seek("1000000000foo009"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo008", iter->key().ToString()); + ASSERT_EQ("v__8", iter->value().ToString()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo007", iter->key().ToString()); + ASSERT_EQ("v__7", iter->value().ToString()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo005", iter->key().ToString()); + ASSERT_EQ("v__5", iter->value().ToString()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo004", iter->key().ToString()); + ASSERT_EQ("v__4", iter->value().ToString()); + + iter->Seek("3000000000000bar"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("3000000000000bar", iter->key().ToString()); + ASSERT_EQ("bar_v", iter->value().ToString()); + + iter->Seek("1000000000foo005"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo005", iter->key().ToString()); + ASSERT_EQ("v__5", iter->value().ToString()); + + iter->Seek("1000000000foo006"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo005", iter->key().ToString()); + ASSERT_EQ("v__5", iter->value().ToString()); + + iter->Seek("1000000000foo008"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("1000000000foo008", iter->key().ToString()); + ASSERT_EQ("v__8", iter->value().ToString()); + + iter->Seek("1000000000foo000"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("3000000000000bar", iter->key().ToString()); + + delete iter; +} + +TEST_P(PlainTableDBTest, HashBucketConflict) { + for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; + huge_page_tlb_size += 2 * 1024 * 1024) { + for (unsigned char i = 1; i <= 3; i++) { + Options options = CurrentOptions(); + options.create_if_missing = true; + // Set only one bucket to force bucket conflict. + // Test index interval for the same prefix to be 1, 2 and 4 + + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 16; + plain_table_options.bloom_bits_per_key = 0; + plain_table_options.hash_table_ratio = 0; + plain_table_options.index_sparseness = 2 ^ i; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + + options.table_factory.reset(NewPlainTableFactory(plain_table_options)); + + DestroyAndReopen(&options); + ASSERT_OK(Put("5000000000000fo0", "v1")); + ASSERT_OK(Put("5000000000000fo1", "v2")); + ASSERT_OK(Put("5000000000000fo2", "v")); + ASSERT_OK(Put("2000000000000fo0", "v3")); + ASSERT_OK(Put("2000000000000fo1", "v4")); + ASSERT_OK(Put("2000000000000fo2", "v")); + ASSERT_OK(Put("2000000000000fo3", "v")); + + dbfull()->TEST_FlushMemTable(); + + ASSERT_EQ("v1", Get("5000000000000fo0")); + ASSERT_EQ("v2", Get("5000000000000fo1")); + ASSERT_EQ("v3", Get("2000000000000fo0")); + ASSERT_EQ("v4", Get("2000000000000fo1")); + + ASSERT_EQ("NOT_FOUND", Get("5000000000000bar")); + ASSERT_EQ("NOT_FOUND", Get("2000000000000bar")); + ASSERT_EQ("NOT_FOUND", Get("5000000000000fo8")); + ASSERT_EQ("NOT_FOUND", Get("2000000000000fo8")); + + ReadOptions ro; + Iterator* iter = dbfull()->NewIterator(ro); + + iter->Seek("5000000000000fo0"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("5000000000000fo0", iter->key().ToString()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("5000000000000fo1", iter->key().ToString()); + + iter->Seek("5000000000000fo1"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("5000000000000fo1", iter->key().ToString()); + + iter->Seek("2000000000000fo0"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("2000000000000fo0", iter->key().ToString()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("2000000000000fo1", iter->key().ToString()); + + iter->Seek("2000000000000fo1"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("2000000000000fo1", iter->key().ToString()); + + iter->Seek("2000000000000bar"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("2000000000000fo0", iter->key().ToString()); + + iter->Seek("5000000000000bar"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("5000000000000fo0", iter->key().ToString()); + + iter->Seek("2000000000000fo8"); + ASSERT_TRUE(!iter->Valid() || + options.comparator->Compare(iter->key(), "20000001") > 0); + + iter->Seek("5000000000000fo8"); + ASSERT_TRUE(!iter->Valid()); + + iter->Seek("1000000000000fo2"); + ASSERT_TRUE(!iter->Valid()); + + iter->Seek("3000000000000fo2"); + ASSERT_TRUE(!iter->Valid()); + + iter->Seek("8000000000000fo2"); + ASSERT_TRUE(!iter->Valid()); + + delete iter; + } + } +} + +TEST_P(PlainTableDBTest, HashBucketConflictReverseSuffixComparator) { + for (size_t huge_page_tlb_size = 0; huge_page_tlb_size <= 2 * 1024 * 1024; + huge_page_tlb_size += 2 * 1024 * 1024) { + for (unsigned char i = 1; i <= 3; i++) { + Options options = CurrentOptions(); + options.create_if_missing = true; + test::SimpleSuffixReverseComparator comp; + options.comparator = ∁ + // Set only one bucket to force bucket conflict. + // Test index interval for the same prefix to be 1, 2 and 4 + + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 16; + plain_table_options.bloom_bits_per_key = 0; + plain_table_options.hash_table_ratio = 0; + plain_table_options.index_sparseness = 2 ^ i; + plain_table_options.huge_page_tlb_size = huge_page_tlb_size; + + options.table_factory.reset(NewPlainTableFactory(plain_table_options)); + DestroyAndReopen(&options); + ASSERT_OK(Put("5000000000000fo0", "v1")); + ASSERT_OK(Put("5000000000000fo1", "v2")); + ASSERT_OK(Put("5000000000000fo2", "v")); + ASSERT_OK(Put("2000000000000fo0", "v3")); + ASSERT_OK(Put("2000000000000fo1", "v4")); + ASSERT_OK(Put("2000000000000fo2", "v")); + ASSERT_OK(Put("2000000000000fo3", "v")); + + dbfull()->TEST_FlushMemTable(); + + ASSERT_EQ("v1", Get("5000000000000fo0")); + ASSERT_EQ("v2", Get("5000000000000fo1")); + ASSERT_EQ("v3", Get("2000000000000fo0")); + ASSERT_EQ("v4", Get("2000000000000fo1")); + + ASSERT_EQ("NOT_FOUND", Get("5000000000000bar")); + ASSERT_EQ("NOT_FOUND", Get("2000000000000bar")); + ASSERT_EQ("NOT_FOUND", Get("5000000000000fo8")); + ASSERT_EQ("NOT_FOUND", Get("2000000000000fo8")); + + ReadOptions ro; + Iterator* iter = dbfull()->NewIterator(ro); + + iter->Seek("5000000000000fo1"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("5000000000000fo1", iter->key().ToString()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("5000000000000fo0", iter->key().ToString()); + + iter->Seek("5000000000000fo1"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("5000000000000fo1", iter->key().ToString()); + + iter->Seek("2000000000000fo1"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("2000000000000fo1", iter->key().ToString()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("2000000000000fo0", iter->key().ToString()); + + iter->Seek("2000000000000fo1"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("2000000000000fo1", iter->key().ToString()); + + iter->Seek("2000000000000var"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("2000000000000fo3", iter->key().ToString()); + + iter->Seek("5000000000000var"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("5000000000000fo2", iter->key().ToString()); + + std::string seek_key = "2000000000000bar"; + iter->Seek(seek_key); + ASSERT_TRUE(!iter->Valid() || + options.prefix_extractor->Transform(iter->key()) != + options.prefix_extractor->Transform(seek_key)); + + iter->Seek("1000000000000fo2"); + ASSERT_TRUE(!iter->Valid()); + + iter->Seek("3000000000000fo2"); + ASSERT_TRUE(!iter->Valid()); + + iter->Seek("8000000000000fo2"); + ASSERT_TRUE(!iter->Valid()); + + delete iter; + } + } +} + +TEST_P(PlainTableDBTest, NonExistingKeyToNonEmptyBucket) { + Options options = CurrentOptions(); + options.create_if_missing = true; + // Set only one bucket to force bucket conflict. + // Test index interval for the same prefix to be 1, 2 and 4 + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 16; + plain_table_options.bloom_bits_per_key = 0; + plain_table_options.hash_table_ratio = 0; + plain_table_options.index_sparseness = 5; + + options.table_factory.reset(NewPlainTableFactory(plain_table_options)); + DestroyAndReopen(&options); + ASSERT_OK(Put("5000000000000fo0", "v1")); + ASSERT_OK(Put("5000000000000fo1", "v2")); + ASSERT_OK(Put("5000000000000fo2", "v3")); + + dbfull()->TEST_FlushMemTable(); + + ASSERT_EQ("v1", Get("5000000000000fo0")); + ASSERT_EQ("v2", Get("5000000000000fo1")); + ASSERT_EQ("v3", Get("5000000000000fo2")); + + ASSERT_EQ("NOT_FOUND", Get("8000000000000bar")); + ASSERT_EQ("NOT_FOUND", Get("1000000000000bar")); + + Iterator* iter = dbfull()->NewIterator(ReadOptions()); + + iter->Seek("5000000000000bar"); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("5000000000000fo0", iter->key().ToString()); + + iter->Seek("5000000000000fo8"); + ASSERT_TRUE(!iter->Valid()); + + iter->Seek("1000000000000fo2"); + ASSERT_TRUE(!iter->Valid()); + + iter->Seek("8000000000000fo2"); + ASSERT_TRUE(!iter->Valid()); + + delete iter; +} + +static std::string Key(int i) { + char buf[100]; + snprintf(buf, sizeof(buf), "key_______%06d", i); + return std::string(buf); +} + +static std::string RandomString(Random* rnd, int len) { + std::string r; + test::RandomString(rnd, len, &r); + return r; +} + +TEST_P(PlainTableDBTest, CompactionTrigger) { + Options options = CurrentOptions(); + options.write_buffer_size = 120 << 10; // 100KB + options.num_levels = 3; + options.level0_file_num_compaction_trigger = 3; + Reopen(&options); + + Random rnd(301); + + for (int num = 0; num < options.level0_file_num_compaction_trigger - 1; + num++) { + std::vector values; + // Write 120KB (10 values, each 12K) + for (int i = 0; i < 10; i++) { + values.push_back(RandomString(&rnd, 12000)); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Put(Key(999), "")); + dbfull()->TEST_WaitForFlushMemTable(); + ASSERT_EQ(NumTableFilesAtLevel(0), num + 1); + } + + //generate one more file in level-0, and should trigger level-0 compaction + std::vector values; + for (int i = 0; i < 12; i++) { + values.push_back(RandomString(&rnd, 10000)); + ASSERT_OK(Put(Key(i), values[i])); + } + ASSERT_OK(Put(Key(999), "")); + dbfull()->TEST_WaitForCompact(); + + ASSERT_EQ(NumTableFilesAtLevel(0), 0); + ASSERT_EQ(NumTableFilesAtLevel(1), 1); +} + +TEST_P(PlainTableDBTest, AdaptiveTable) { + Options options = CurrentOptions(); + options.create_if_missing = true; + + options.table_factory.reset(NewPlainTableFactory()); + DestroyAndReopen(&options); + + ASSERT_OK(Put("1000000000000foo", "v1")); + ASSERT_OK(Put("0000000000000bar", "v2")); + ASSERT_OK(Put("1000000000000foo", "v3")); + dbfull()->TEST_FlushMemTable(); + + options.create_if_missing = false; + std::shared_ptr dummy_factory; + std::shared_ptr block_based_factory( + NewBlockBasedTableFactory()); + options.table_factory.reset(NewAdaptiveTableFactory( + block_based_factory, dummy_factory, dummy_factory)); + Reopen(&options); + ASSERT_EQ("v3", Get("1000000000000foo")); + ASSERT_EQ("v2", Get("0000000000000bar")); + + ASSERT_OK(Put("2000000000000foo", "v4")); + ASSERT_OK(Put("3000000000000bar", "v5")); + dbfull()->TEST_FlushMemTable(); + ASSERT_EQ("v4", Get("2000000000000foo")); + ASSERT_EQ("v5", Get("3000000000000bar")); + + Reopen(&options); + ASSERT_EQ("v3", Get("1000000000000foo")); + ASSERT_EQ("v2", Get("0000000000000bar")); + ASSERT_EQ("v4", Get("2000000000000foo")); + ASSERT_EQ("v5", Get("3000000000000bar")); + + options.table_factory.reset(NewBlockBasedTableFactory()); + Reopen(&options); + ASSERT_NE("v3", Get("1000000000000foo")); + + options.table_factory.reset(NewPlainTableFactory()); + Reopen(&options); + ASSERT_NE("v5", Get("3000000000000bar")); +} + +INSTANTIATE_TEST_CASE_P(PlainTableDBTest, PlainTableDBTest, ::testing::Bool()); + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, "SKIPPED as plain table is not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/prefix_test.cc b/external/rocksdb/db/prefix_test.cc new file mode 100755 index 0000000000..1fb22994ea --- /dev/null +++ b/external/rocksdb/db/prefix_test.cc @@ -0,0 +1,654 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#ifndef GFLAGS +#include +int main() { + fprintf(stderr, "Please install gflags to run this test... Skipping...\n"); + return 0; +} +#else + +#include +#include +#include + +#include +#include "rocksdb/comparator.h" +#include "rocksdb/db.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/perf_context.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/table.h" +#include "util/histogram.h" +#include "util/stop_watch.h" +#include "util/string_util.h" +#include "util/testharness.h" + +using GFLAGS::ParseCommandLineFlags; + +DEFINE_bool(trigger_deadlock, false, + "issue delete in range scan to trigger PrefixHashMap deadlock"); +DEFINE_int32(bucket_count, 100000, "number of buckets"); +DEFINE_uint64(num_locks, 10001, "number of locks"); +DEFINE_bool(random_prefix, false, "randomize prefix"); +DEFINE_uint64(total_prefixes, 100000, "total number of prefixes"); +DEFINE_uint64(items_per_prefix, 1, "total number of values per prefix"); +DEFINE_int64(write_buffer_size, 33554432, ""); +DEFINE_int32(max_write_buffer_number, 2, ""); +DEFINE_int32(min_write_buffer_number_to_merge, 1, ""); +DEFINE_int32(skiplist_height, 4, ""); +DEFINE_double(memtable_prefix_bloom_size_ratio, 0.1, ""); +DEFINE_int32(memtable_huge_page_size, 2 * 1024 * 1024, ""); +DEFINE_int32(value_size, 40, ""); + +// Path to the database on file system +const std::string kDbName = rocksdb::test::TmpDir() + "/prefix_test"; + +namespace rocksdb { + +struct TestKey { + uint64_t prefix; + uint64_t sorted; + + TestKey(uint64_t _prefix, uint64_t _sorted) + : prefix(_prefix), sorted(_sorted) {} +}; + +// return a slice backed by test_key +inline Slice TestKeyToSlice(const TestKey& test_key) { + return Slice((const char*)&test_key, sizeof(test_key)); +} + +inline const TestKey* SliceToTestKey(const Slice& slice) { + return (const TestKey*)slice.data(); +} + +class TestKeyComparator : public Comparator { + public: + + // Compare needs to be aware of the possibility of a and/or b is + // prefix only + virtual int Compare(const Slice& a, const Slice& b) const override { + const TestKey* key_a = SliceToTestKey(a); + const TestKey* key_b = SliceToTestKey(b); + if (key_a->prefix != key_b->prefix) { + if (key_a->prefix < key_b->prefix) return -1; + if (key_a->prefix > key_b->prefix) return 1; + } else { + EXPECT_TRUE(key_a->prefix == key_b->prefix); + // note, both a and b could be prefix only + if (a.size() != b.size()) { + // one of them is prefix + EXPECT_TRUE( + (a.size() == sizeof(uint64_t) && b.size() == sizeof(TestKey)) || + (b.size() == sizeof(uint64_t) && a.size() == sizeof(TestKey))); + if (a.size() < b.size()) return -1; + if (a.size() > b.size()) return 1; + } else { + // both a and b are prefix + if (a.size() == sizeof(uint64_t)) { + return 0; + } + + // both a and b are whole key + EXPECT_TRUE(a.size() == sizeof(TestKey) && b.size() == sizeof(TestKey)); + if (key_a->sorted < key_b->sorted) return -1; + if (key_a->sorted > key_b->sorted) return 1; + if (key_a->sorted == key_b->sorted) return 0; + } + } + return 0; + } + + virtual const char* Name() const override { + return "TestKeyComparator"; + } + + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override {} + + virtual void FindShortSuccessor(std::string* key) const override {} +}; + +namespace { +void PutKey(DB* db, WriteOptions write_options, uint64_t prefix, + uint64_t suffix, const Slice& value) { + TestKey test_key(prefix, suffix); + Slice key = TestKeyToSlice(test_key); + ASSERT_OK(db->Put(write_options, key, value)); +} + +void SeekIterator(Iterator* iter, uint64_t prefix, uint64_t suffix) { + TestKey test_key(prefix, suffix); + Slice key = TestKeyToSlice(test_key); + iter->Seek(key); +} + +const std::string kNotFoundResult = "NOT_FOUND"; + +std::string Get(DB* db, const ReadOptions& read_options, uint64_t prefix, + uint64_t suffix) { + TestKey test_key(prefix, suffix); + Slice key = TestKeyToSlice(test_key); + + std::string result; + Status s = db->Get(read_options, key, &result); + if (s.IsNotFound()) { + result = kNotFoundResult; + } else if (!s.ok()) { + result = s.ToString(); + } + return result; +} + +class SamePrefixTransform : public SliceTransform { + private: + const Slice prefix_; + std::string name_; + + public: + explicit SamePrefixTransform(const Slice& prefix) + : prefix_(prefix), name_("rocksdb.SamePrefix." + prefix.ToString()) {} + + virtual const char* Name() const override { return name_.c_str(); } + + virtual Slice Transform(const Slice& src) const override { + assert(InDomain(src)); + return prefix_; + } + + virtual bool InDomain(const Slice& src) const override { + if (src.size() >= prefix_.size()) { + return Slice(src.data(), prefix_.size()) == prefix_; + } + return false; + } + + virtual bool InRange(const Slice& dst) const override { + return dst == prefix_; + } +}; + +} // namespace + +class PrefixTest : public testing::Test { + public: + std::shared_ptr OpenDb() { + DB* db; + + options.create_if_missing = true; + options.write_buffer_size = FLAGS_write_buffer_size; + options.max_write_buffer_number = FLAGS_max_write_buffer_number; + options.min_write_buffer_number_to_merge = + FLAGS_min_write_buffer_number_to_merge; + + options.memtable_prefix_bloom_size_ratio = + FLAGS_memtable_prefix_bloom_size_ratio; + options.memtable_huge_page_size = FLAGS_memtable_huge_page_size; + + options.prefix_extractor.reset(NewFixedPrefixTransform(8)); + BlockBasedTableOptions bbto; + bbto.filter_policy.reset(NewBloomFilterPolicy(10, false)); + bbto.whole_key_filtering = false; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + + Status s = DB::Open(options, kDbName, &db); + EXPECT_OK(s); + return std::shared_ptr(db); + } + + void FirstOption() { + option_config_ = kBegin; + } + + bool NextOptions(int bucket_count) { + // skip some options + option_config_++; + if (option_config_ < kEnd) { + options.prefix_extractor.reset(NewFixedPrefixTransform(8)); + switch(option_config_) { + case kHashSkipList: + options.memtable_factory.reset( + NewHashSkipListRepFactory(bucket_count, FLAGS_skiplist_height)); + return true; + case kHashLinkList: + options.memtable_factory.reset( + NewHashLinkListRepFactory(bucket_count)); + return true; + case kHashLinkListHugePageTlb: + options.memtable_factory.reset( + NewHashLinkListRepFactory(bucket_count, 2 * 1024 * 1024)); + return true; + case kHashLinkListTriggerSkipList: + options.memtable_factory.reset( + NewHashLinkListRepFactory(bucket_count, 0, 3)); + return true; + default: + return false; + } + } + return false; + } + + PrefixTest() : option_config_(kBegin) { + options.comparator = new TestKeyComparator(); + } + ~PrefixTest() { + delete options.comparator; + } + protected: + enum OptionConfig { + kBegin, + kHashSkipList, + kHashLinkList, + kHashLinkListHugePageTlb, + kHashLinkListTriggerSkipList, + kEnd + }; + int option_config_; + Options options; +}; + +TEST(SamePrefixTest, InDomainTest) { + DB* db; + Options options; + options.create_if_missing = true; + options.prefix_extractor.reset(new SamePrefixTransform("HHKB")); + BlockBasedTableOptions bbto; + bbto.filter_policy.reset(NewBloomFilterPolicy(10, false)); + bbto.whole_key_filtering = false; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + WriteOptions write_options; + ReadOptions read_options; + { + ASSERT_OK(DB::Open(options, kDbName, &db)); + ASSERT_OK(db->Put(write_options, "HHKB pro2", "Mar 24, 2006")); + ASSERT_OK(db->Put(write_options, "HHKB pro2 Type-S", "June 29, 2011")); + ASSERT_OK(db->Put(write_options, "Realforce 87u", "idk")); + db->Flush(FlushOptions()); + std::string result; + auto db_iter = db->NewIterator(ReadOptions()); + + db_iter->Seek("Realforce 87u"); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_OK(db_iter->status()); + ASSERT_EQ(db_iter->key(), "Realforce 87u"); + ASSERT_EQ(db_iter->value(), "idk"); + + delete db_iter; + delete db; + ASSERT_OK(DestroyDB(kDbName, Options())); + } + + { + ASSERT_OK(DB::Open(options, kDbName, &db)); + ASSERT_OK(db->Put(write_options, "pikachu", "1")); + ASSERT_OK(db->Put(write_options, "Meowth", "1")); + ASSERT_OK(db->Put(write_options, "Mewtwo", "idk")); + db->Flush(FlushOptions()); + std::string result; + auto db_iter = db->NewIterator(ReadOptions()); + + db_iter->Seek("Mewtwo"); + ASSERT_TRUE(db_iter->Valid()); + ASSERT_OK(db_iter->status()); + delete db_iter; + delete db; + ASSERT_OK(DestroyDB(kDbName, Options())); + } +} + +TEST_F(PrefixTest, TestResult) { + for (int num_buckets = 1; num_buckets <= 2; num_buckets++) { + FirstOption(); + while (NextOptions(num_buckets)) { + std::cout << "*** Mem table: " << options.memtable_factory->Name() + << " number of buckets: " << num_buckets + << std::endl; + DestroyDB(kDbName, Options()); + auto db = OpenDb(); + WriteOptions write_options; + ReadOptions read_options; + + // 1. Insert one row. + Slice v16("v16"); + PutKey(db.get(), write_options, 1, 6, v16); + std::unique_ptr iter(db->NewIterator(read_options)); + SeekIterator(iter.get(), 1, 6); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v16 == iter->value()); + SeekIterator(iter.get(), 1, 5); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v16 == iter->value()); + SeekIterator(iter.get(), 1, 5); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v16 == iter->value()); + iter->Next(); + ASSERT_TRUE(!iter->Valid()); + + SeekIterator(iter.get(), 2, 0); + ASSERT_TRUE(!iter->Valid()); + + ASSERT_EQ(v16.ToString(), Get(db.get(), read_options, 1, 6)); + ASSERT_EQ(kNotFoundResult, Get(db.get(), read_options, 1, 5)); + ASSERT_EQ(kNotFoundResult, Get(db.get(), read_options, 1, 7)); + ASSERT_EQ(kNotFoundResult, Get(db.get(), read_options, 0, 6)); + ASSERT_EQ(kNotFoundResult, Get(db.get(), read_options, 2, 6)); + + // 2. Insert an entry for the same prefix as the last entry in the bucket. + Slice v17("v17"); + PutKey(db.get(), write_options, 1, 7, v17); + iter.reset(db->NewIterator(read_options)); + SeekIterator(iter.get(), 1, 7); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v17 == iter->value()); + + SeekIterator(iter.get(), 1, 6); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v16 == iter->value()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v17 == iter->value()); + iter->Next(); + ASSERT_TRUE(!iter->Valid()); + + SeekIterator(iter.get(), 2, 0); + ASSERT_TRUE(!iter->Valid()); + + // 3. Insert an entry for the same prefix as the head of the bucket. + Slice v15("v15"); + PutKey(db.get(), write_options, 1, 5, v15); + iter.reset(db->NewIterator(read_options)); + + SeekIterator(iter.get(), 1, 7); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v17 == iter->value()); + + SeekIterator(iter.get(), 1, 5); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v15 == iter->value()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v16 == iter->value()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v17 == iter->value()); + + SeekIterator(iter.get(), 1, 5); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v15 == iter->value()); + + ASSERT_EQ(v15.ToString(), Get(db.get(), read_options, 1, 5)); + ASSERT_EQ(v16.ToString(), Get(db.get(), read_options, 1, 6)); + ASSERT_EQ(v17.ToString(), Get(db.get(), read_options, 1, 7)); + + // 4. Insert an entry with a larger prefix + Slice v22("v22"); + PutKey(db.get(), write_options, 2, 2, v22); + iter.reset(db->NewIterator(read_options)); + + SeekIterator(iter.get(), 2, 2); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v22 == iter->value()); + SeekIterator(iter.get(), 2, 0); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v22 == iter->value()); + + SeekIterator(iter.get(), 1, 5); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v15 == iter->value()); + + SeekIterator(iter.get(), 1, 7); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v17 == iter->value()); + + // 5. Insert an entry with a smaller prefix + Slice v02("v02"); + PutKey(db.get(), write_options, 0, 2, v02); + iter.reset(db->NewIterator(read_options)); + + SeekIterator(iter.get(), 0, 2); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v02 == iter->value()); + SeekIterator(iter.get(), 0, 0); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v02 == iter->value()); + + SeekIterator(iter.get(), 2, 0); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v22 == iter->value()); + + SeekIterator(iter.get(), 1, 5); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v15 == iter->value()); + + SeekIterator(iter.get(), 1, 7); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v17 == iter->value()); + + // 6. Insert to the beginning and the end of the first prefix + Slice v13("v13"); + Slice v18("v18"); + PutKey(db.get(), write_options, 1, 3, v13); + PutKey(db.get(), write_options, 1, 8, v18); + iter.reset(db->NewIterator(read_options)); + SeekIterator(iter.get(), 1, 7); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v17 == iter->value()); + + SeekIterator(iter.get(), 1, 3); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v13 == iter->value()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v15 == iter->value()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v16 == iter->value()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v17 == iter->value()); + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v18 == iter->value()); + + SeekIterator(iter.get(), 0, 0); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v02 == iter->value()); + + SeekIterator(iter.get(), 2, 0); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v22 == iter->value()); + + ASSERT_EQ(v22.ToString(), Get(db.get(), read_options, 2, 2)); + ASSERT_EQ(v02.ToString(), Get(db.get(), read_options, 0, 2)); + ASSERT_EQ(v13.ToString(), Get(db.get(), read_options, 1, 3)); + ASSERT_EQ(v15.ToString(), Get(db.get(), read_options, 1, 5)); + ASSERT_EQ(v16.ToString(), Get(db.get(), read_options, 1, 6)); + ASSERT_EQ(v17.ToString(), Get(db.get(), read_options, 1, 7)); + ASSERT_EQ(v18.ToString(), Get(db.get(), read_options, 1, 8)); + } + } +} + +// Show results in prefix +TEST_F(PrefixTest, PrefixValid) { + for (int num_buckets = 1; num_buckets <= 2; num_buckets++) { + FirstOption(); + while (NextOptions(num_buckets)) { + std::cout << "*** Mem table: " << options.memtable_factory->Name() + << " number of buckets: " << num_buckets << std::endl; + DestroyDB(kDbName, Options()); + auto db = OpenDb(); + WriteOptions write_options; + ReadOptions read_options; + + // Insert keys with common prefix and one key with different + Slice v16("v16"); + Slice v17("v17"); + Slice v18("v18"); + Slice v19("v19"); + PutKey(db.get(), write_options, 12345, 6, v16); + PutKey(db.get(), write_options, 12345, 7, v17); + PutKey(db.get(), write_options, 12345, 8, v18); + PutKey(db.get(), write_options, 12345, 9, v19); + PutKey(db.get(), write_options, 12346, 8, v16); + db->Flush(FlushOptions()); + db->Delete(write_options, TestKeyToSlice(TestKey(12346, 8))); + db->Flush(FlushOptions()); + read_options.prefix_same_as_start = true; + std::unique_ptr iter(db->NewIterator(read_options)); + SeekIterator(iter.get(), 12345, 6); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v16 == iter->value()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v17 == iter->value()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v18 == iter->value()); + + iter->Next(); + ASSERT_TRUE(iter->Valid()); + ASSERT_TRUE(v19 == iter->value()); + iter->Next(); + ASSERT_FALSE(iter->Valid()); + ASSERT_EQ(kNotFoundResult, Get(db.get(), read_options, 12346, 8)); + + // Verify seeking past the prefix won't return a result. + SeekIterator(iter.get(), 12345, 10); + ASSERT_TRUE(!iter->Valid()); + } + } +} + +TEST_F(PrefixTest, DynamicPrefixIterator) { + while (NextOptions(FLAGS_bucket_count)) { + std::cout << "*** Mem table: " << options.memtable_factory->Name() + << std::endl; + DestroyDB(kDbName, Options()); + auto db = OpenDb(); + WriteOptions write_options; + ReadOptions read_options; + + std::vector prefixes; + for (uint64_t i = 0; i < FLAGS_total_prefixes; ++i) { + prefixes.push_back(i); + } + + if (FLAGS_random_prefix) { + std::random_shuffle(prefixes.begin(), prefixes.end()); + } + + HistogramImpl hist_put_time; + HistogramImpl hist_put_comparison; + + // insert x random prefix, each with y continuous element. + for (auto prefix : prefixes) { + for (uint64_t sorted = 0; sorted < FLAGS_items_per_prefix; sorted++) { + TestKey test_key(prefix, sorted); + + Slice key = TestKeyToSlice(test_key); + std::string value(FLAGS_value_size, 0); + + perf_context.Reset(); + StopWatchNano timer(Env::Default(), true); + ASSERT_OK(db->Put(write_options, key, value)); + hist_put_time.Add(timer.ElapsedNanos()); + hist_put_comparison.Add(perf_context.user_key_comparison_count); + } + } + + std::cout << "Put key comparison: \n" << hist_put_comparison.ToString() + << "Put time: \n" << hist_put_time.ToString(); + + // test seek existing keys + HistogramImpl hist_seek_time; + HistogramImpl hist_seek_comparison; + + std::unique_ptr iter(db->NewIterator(read_options)); + + for (auto prefix : prefixes) { + TestKey test_key(prefix, FLAGS_items_per_prefix / 2); + Slice key = TestKeyToSlice(test_key); + std::string value = "v" + ToString(0); + + perf_context.Reset(); + StopWatchNano timer(Env::Default(), true); + auto key_prefix = options.prefix_extractor->Transform(key); + uint64_t total_keys = 0; + for (iter->Seek(key); + iter->Valid() && iter->key().starts_with(key_prefix); + iter->Next()) { + if (FLAGS_trigger_deadlock) { + std::cout << "Behold the deadlock!\n"; + db->Delete(write_options, iter->key()); + } + total_keys++; + } + hist_seek_time.Add(timer.ElapsedNanos()); + hist_seek_comparison.Add(perf_context.user_key_comparison_count); + ASSERT_EQ(total_keys, FLAGS_items_per_prefix - FLAGS_items_per_prefix/2); + } + + std::cout << "Seek key comparison: \n" + << hist_seek_comparison.ToString() + << "Seek time: \n" + << hist_seek_time.ToString(); + + // test non-existing keys + HistogramImpl hist_no_seek_time; + HistogramImpl hist_no_seek_comparison; + + for (auto prefix = FLAGS_total_prefixes; + prefix < FLAGS_total_prefixes + 10000; + prefix++) { + TestKey test_key(prefix, 0); + Slice key = TestKeyToSlice(test_key); + + perf_context.Reset(); + StopWatchNano timer(Env::Default(), true); + iter->Seek(key); + hist_no_seek_time.Add(timer.ElapsedNanos()); + hist_no_seek_comparison.Add(perf_context.user_key_comparison_count); + ASSERT_TRUE(!iter->Valid()); + } + + std::cout << "non-existing Seek key comparison: \n" + << hist_no_seek_comparison.ToString() + << "non-existing Seek time: \n" + << hist_no_seek_time.ToString(); + } +} + +} + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + ParseCommandLineFlags(&argc, &argv, true); + std::cout << kDbName << "\n"; + + return RUN_ALL_TESTS(); +} + +#endif // GFLAGS + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, + "SKIPPED as HashSkipList and HashLinkList are not supported in " + "ROCKSDB_LITE\n"); + return 0; +} + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/repair.cc b/external/rocksdb/db/repair.cc new file mode 100755 index 0000000000..a46e33f2dd --- /dev/null +++ b/external/rocksdb/db/repair.cc @@ -0,0 +1,620 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Repairer does best effort recovery to recover as much data as possible after +// a disaster without compromising consistency. It does not guarantee bringing +// the database to a time consistent state. +// +// Repair process is broken into 4 phases: +// (a) Find files +// (b) Convert logs to tables +// (c) Extract metadata +// (d) Write Descriptor +// +// (a) Find files +// +// The repairer goes through all the files in the directory, and classifies them +// based on their file name. Any file that cannot be identified by name will be +// ignored. +// +// (b) Convert logs to table +// +// Every log file that is active is replayed. All sections of the file where the +// checksum does not match is skipped over. We intentionally give preference to +// data consistency. +// +// (c) Extract metadata +// +// We scan every table to compute +// (1) smallest/largest for the table +// (2) largest sequence number in the table +// +// If we are unable to scan the file, then we ignore the table. +// +// (d) Write Descriptor +// +// We generate descriptor contents: +// - log number is set to zero +// - next-file-number is set to 1 + largest file number we found +// - last-sequence-number is set to largest sequence# found across +// all tables (see 2c) +// - compaction pointers are cleared +// - every table file is added at level 0 +// +// Possible optimization 1: +// (a) Compute total size and use to pick appropriate max-level M +// (b) Sort tables by largest sequence# in the table +// (c) For each table: if it overlaps earlier table, place in level-0, +// else place in level-M. +// (d) We can provide options for time consistent recovery and unsafe recovery +// (ignore checksum failure when applicable) +// Possible optimization 2: +// Store per-table metadata (smallest, largest, largest-seq#, ...) +// in the table's meta section to speed up ScanTable. + +#ifndef ROCKSDB_LITE + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include "db/builder.h" +#include "db/db_impl.h" +#include "db/dbformat.h" +#include "db/filename.h" +#include "db/log_reader.h" +#include "db/log_writer.h" +#include "db/memtable.h" +#include "db/table_cache.h" +#include "db/version_edit.h" +#include "db/write_batch_internal.h" +#include "rocksdb/comparator.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/immutable_options.h" +#include "rocksdb/options.h" +#include "rocksdb/write_buffer_manager.h" +#include "table/scoped_arena_iterator.h" +#include "util/file_reader_writer.h" +#include "util/string_util.h" + +namespace rocksdb { + +namespace { + +class Repairer { + public: + Repairer(const std::string& dbname, const DBOptions& db_options, + const std::vector& column_families, + const ColumnFamilyOptions& default_cf_opts, + const ColumnFamilyOptions& unknown_cf_opts, bool create_unknown_cfs) + : dbname_(dbname), + env_(db_options.env), + env_options_(), + db_options_(SanitizeOptions(dbname_, db_options)), + icmp_(default_cf_opts.comparator), + default_cf_opts_(default_cf_opts), + default_cf_iopts_( + ImmutableCFOptions(Options(db_options_, default_cf_opts))), + unknown_cf_opts_(unknown_cf_opts), + create_unknown_cfs_(create_unknown_cfs), + raw_table_cache_( + // TableCache can be small since we expect each table to be opened + // once. + NewLRUCache(10, db_options_.table_cache_numshardbits)), + table_cache_(new TableCache(default_cf_iopts_, env_options_, + raw_table_cache_.get())), + wb_(db_options_.db_write_buffer_size), + wc_(db_options_.delayed_write_rate), + vset_(dbname_, &db_options_, env_options_, raw_table_cache_.get(), &wb_, + &wc_), + next_file_number_(1) { + for (const auto& cfd : column_families) { + cf_name_to_opts_[cfd.name] = cfd.options; + } + } + + const ColumnFamilyOptions* GetColumnFamilyOptions( + const std::string& cf_name) { + if (cf_name_to_opts_.find(cf_name) == cf_name_to_opts_.end()) { + if (create_unknown_cfs_) { + return &unknown_cf_opts_; + } + return nullptr; + } + return &cf_name_to_opts_[cf_name]; + } + + // Adds a column family to the VersionSet with cf_options_ and updates + // manifest. + Status AddColumnFamily(const std::string& cf_name, uint32_t cf_id) { + const auto* cf_opts = GetColumnFamilyOptions(cf_name); + if (cf_opts == nullptr) { + return Status::Corruption("Encountered unknown column family with name=" + + cf_name + ", id=" + ToString(cf_id)); + } + Options opts(db_options_, *cf_opts); + MutableCFOptions mut_cf_opts(opts, ImmutableCFOptions(opts)); + + VersionEdit edit; + edit.SetComparatorName(opts.comparator->Name()); + edit.SetLogNumber(0); + edit.SetColumnFamily(cf_id); + ColumnFamilyData* cfd; + cfd = nullptr; + edit.AddColumnFamily(cf_name); + + mutex_.Lock(); + Status status = vset_.LogAndApply(cfd, mut_cf_opts, &edit, &mutex_, + nullptr /* db_directory */, + false /* new_descriptor_log */, cf_opts); + mutex_.Unlock(); + return status; + } + + ~Repairer() { + delete table_cache_; + } + + Status Run() { + Status status = FindFiles(); + if (status.ok()) { + // Discard older manifests and start a fresh one + for (size_t i = 0; i < manifests_.size(); i++) { + ArchiveFile(dbname_ + "/" + manifests_[i]); + } + // Just create a DBImpl temporarily so we can reuse NewDB() + DBImpl* db_impl = new DBImpl(db_options_, dbname_); + status = db_impl->NewDB(); + delete db_impl; + } + if (status.ok()) { + // Recover using the fresh manifest created by NewDB() + status = + vset_.Recover({{kDefaultColumnFamilyName, default_cf_opts_}}, false); + } + if (status.ok()) { + // Need to scan existing SST files first so the column families are + // created before we process WAL files + ExtractMetaData(); + + // ExtractMetaData() uses table_fds_ to know which SST files' metadata to + // extract -- we need to clear it here since metadata for existing SST + // files has been extracted already + table_fds_.clear(); + ConvertLogFilesToTables(); + ExtractMetaData(); + status = AddTables(); + } + if (status.ok()) { + uint64_t bytes = 0; + for (size_t i = 0; i < tables_.size(); i++) { + bytes += tables_[i].meta.fd.GetFileSize(); + } + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "**** Repaired rocksdb %s; " + "recovered %" ROCKSDB_PRIszt " files; %" PRIu64 + "bytes. " + "Some data may have been lost. " + "****", + dbname_.c_str(), tables_.size(), bytes); + } + return status; + } + + private: + struct TableInfo { + FileMetaData meta; + uint32_t column_family_id; + std::string column_family_name; + SequenceNumber min_sequence; + SequenceNumber max_sequence; + }; + + std::string const dbname_; + Env* const env_; + const EnvOptions env_options_; + const DBOptions db_options_; + const InternalKeyComparator icmp_; + const ColumnFamilyOptions default_cf_opts_; + const ImmutableCFOptions default_cf_iopts_; // table_cache_ holds reference + const ColumnFamilyOptions unknown_cf_opts_; + const bool create_unknown_cfs_; + std::shared_ptr raw_table_cache_; + TableCache* table_cache_; + WriteBufferManager wb_; + WriteController wc_; + VersionSet vset_; + std::unordered_map cf_name_to_opts_; + InstrumentedMutex mutex_; + + std::vector manifests_; + std::vector table_fds_; + std::vector logs_; + std::vector tables_; + uint64_t next_file_number_; + + Status FindFiles() { + std::vector filenames; + bool found_file = false; + for (size_t path_id = 0; path_id < db_options_.db_paths.size(); path_id++) { + Status status = + env_->GetChildren(db_options_.db_paths[path_id].path, &filenames); + if (!status.ok()) { + return status; + } + if (!filenames.empty()) { + found_file = true; + } + + uint64_t number; + FileType type; + for (size_t i = 0; i < filenames.size(); i++) { + if (ParseFileName(filenames[i], &number, &type)) { + if (type == kDescriptorFile) { + assert(path_id == 0); + manifests_.push_back(filenames[i]); + } else { + if (number + 1 > next_file_number_) { + next_file_number_ = number + 1; + } + if (type == kLogFile) { + assert(path_id == 0); + logs_.push_back(number); + } else if (type == kTableFile) { + table_fds_.emplace_back(number, static_cast(path_id), + 0); + } else { + // Ignore other files + } + } + } + } + } + if (!found_file) { + return Status::Corruption(dbname_, "repair found no files"); + } + return Status::OK(); + } + + void ConvertLogFilesToTables() { + for (size_t i = 0; i < logs_.size(); i++) { + std::string logname = LogFileName(dbname_, logs_[i]); + Status status = ConvertLogToTable(logs_[i]); + if (!status.ok()) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "Log #%" PRIu64 ": ignoring conversion error: %s", logs_[i], + status.ToString().c_str()); + } + ArchiveFile(logname); + } + } + + Status ConvertLogToTable(uint64_t log) { + struct LogReporter : public log::Reader::Reporter { + Env* env; + std::shared_ptr info_log; + uint64_t lognum; + virtual void Corruption(size_t bytes, const Status& s) override { + // We print error messages for corruption, but continue repairing. + Log(InfoLogLevel::ERROR_LEVEL, info_log, + "Log #%" PRIu64 ": dropping %d bytes; %s", lognum, + static_cast(bytes), s.ToString().c_str()); + } + }; + + // Open the log file + std::string logname = LogFileName(dbname_, log); + unique_ptr lfile; + Status status = env_->NewSequentialFile(logname, &lfile, env_options_); + if (!status.ok()) { + return status; + } + unique_ptr lfile_reader( + new SequentialFileReader(std::move(lfile))); + + // Create the log reader. + LogReporter reporter; + reporter.env = env_; + reporter.info_log = db_options_.info_log; + reporter.lognum = log; + // We intentionally make log::Reader do checksumming so that + // corruptions cause entire commits to be skipped instead of + // propagating bad information (like overly large sequence + // numbers). + log::Reader reader(db_options_.info_log, std::move(lfile_reader), &reporter, + true /*enable checksum*/, 0 /*initial_offset*/, log); + + // Initialize per-column family memtables + for (auto* cfd : *vset_.GetColumnFamilySet()) { + cfd->CreateNewMemtable(*cfd->GetLatestMutableCFOptions(), + kMaxSequenceNumber); + } + auto cf_mems = new ColumnFamilyMemTablesImpl(vset_.GetColumnFamilySet()); + + // Read all the records and add to a memtable + std::string scratch; + Slice record; + WriteBatch batch; + int counter = 0; + while (reader.ReadRecord(&record, &scratch)) { + if (record.size() < WriteBatchInternal::kHeader) { + reporter.Corruption( + record.size(), Status::Corruption("log record too small")); + continue; + } + WriteBatchInternal::SetContents(&batch, record); + status = WriteBatchInternal::InsertInto(&batch, cf_mems, nullptr); + if (status.ok()) { + counter += WriteBatchInternal::Count(&batch); + } else { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "Log #%" PRIu64 ": ignoring %s", log, status.ToString().c_str()); + status = Status::OK(); // Keep going with rest of file + } + } + + // Dump a table for each column family with entries in this log file. + for (auto* cfd : *vset_.GetColumnFamilySet()) { + // Do not record a version edit for this conversion to a Table + // since ExtractMetaData() will also generate edits. + MemTable* mem = cfd->mem(); + if (mem->IsEmpty()) { + continue; + } + + FileMetaData meta; + meta.fd = FileDescriptor(next_file_number_++, 0, 0); + ReadOptions ro; + ro.total_order_seek = true; + Arena arena; + ScopedArenaIterator iter(mem->NewIterator(ro, &arena)); + status = BuildTable( + dbname_, env_, *cfd->ioptions(), *cfd->GetLatestMutableCFOptions(), + env_options_, table_cache_, iter.get(), &meta, + cfd->internal_comparator(), cfd->int_tbl_prop_collector_factories(), + cfd->GetID(), cfd->GetName(), {}, kMaxSequenceNumber, kNoCompression, + CompressionOptions(), false, nullptr /* internal_stats */, + TableFileCreationReason::kRecovery); + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "Log #%" PRIu64 ": %d ops saved to Table #%" PRIu64 " %s", log, + counter, meta.fd.GetNumber(), status.ToString().c_str()); + if (status.ok()) { + if (meta.fd.GetFileSize() > 0) { + table_fds_.push_back(meta.fd); + } + } else { + break; + } + } + delete cf_mems; + return status; + } + + void ExtractMetaData() { + for (size_t i = 0; i < table_fds_.size(); i++) { + TableInfo t; + t.meta.fd = table_fds_[i]; + Status status = ScanTable(&t); + if (!status.ok()) { + std::string fname = TableFileName( + db_options_.db_paths, t.meta.fd.GetNumber(), t.meta.fd.GetPathId()); + char file_num_buf[kFormatFileNumberBufSize]; + FormatFileNumber(t.meta.fd.GetNumber(), t.meta.fd.GetPathId(), + file_num_buf, sizeof(file_num_buf)); + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "Table #%s: ignoring %s", file_num_buf, status.ToString().c_str()); + ArchiveFile(fname); + } else { + tables_.push_back(t); + } + } + } + + Status ScanTable(TableInfo* t) { + std::string fname = TableFileName( + db_options_.db_paths, t->meta.fd.GetNumber(), t->meta.fd.GetPathId()); + int counter = 0; + uint64_t file_size; + Status status = env_->GetFileSize(fname, &file_size); + t->meta.fd = FileDescriptor(t->meta.fd.GetNumber(), t->meta.fd.GetPathId(), + file_size); + std::shared_ptr props; + if (status.ok()) { + status = table_cache_->GetTableProperties(env_options_, icmp_, t->meta.fd, + &props); + } + if (status.ok()) { + t->column_family_id = static_cast(props->column_family_id); + if (t->column_family_id == + TablePropertiesCollectorFactory::Context::kUnknownColumnFamily) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "Table #%" PRIu64 + ": column family unknown (probably due to legacy format); " + "adding to default column family id 0.", + t->meta.fd.GetNumber()); + t->column_family_id = 0; + } + + if (vset_.GetColumnFamilySet()->GetColumnFamily(t->column_family_id) == + nullptr) { + status = + AddColumnFamily(props->column_family_name, t->column_family_id); + } + } + ColumnFamilyData* cfd = nullptr; + if (status.ok()) { + cfd = vset_.GetColumnFamilySet()->GetColumnFamily(t->column_family_id); + if (cfd->GetName() != props->column_family_name) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Table #%" PRIu64 + ": inconsistent column family name '%s'; expected '%s' for column " + "family id %" PRIu32 ".", + t->meta.fd.GetNumber(), props->column_family_name.c_str(), + cfd->GetName().c_str(), t->column_family_id); + status = Status::Corruption(dbname_, "inconsistent column family name"); + } + } + if (status.ok()) { + InternalIterator* iter = table_cache_->NewIterator( + ReadOptions(), env_options_, cfd->internal_comparator(), t->meta.fd); + bool empty = true; + ParsedInternalKey parsed; + t->min_sequence = 0; + t->max_sequence = 0; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + Slice key = iter->key(); + if (!ParseInternalKey(key, &parsed)) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Table #%" PRIu64 ": unparsable key %s", t->meta.fd.GetNumber(), + EscapeString(key).c_str()); + continue; + } + + counter++; + if (empty) { + empty = false; + t->meta.smallest.DecodeFrom(key); + } + t->meta.largest.DecodeFrom(key); + if (parsed.sequence < t->min_sequence) { + t->min_sequence = parsed.sequence; + } + if (parsed.sequence > t->max_sequence) { + t->max_sequence = parsed.sequence; + } + } + if (!iter->status().ok()) { + status = iter->status(); + } + delete iter; + + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "Table #%" PRIu64 ": %d entries %s", t->meta.fd.GetNumber(), counter, + status.ToString().c_str()); + } + return status; + } + + Status AddTables() { + std::unordered_map> cf_id_to_tables; + SequenceNumber max_sequence = 0; + for (size_t i = 0; i < tables_.size(); i++) { + cf_id_to_tables[tables_[i].column_family_id].push_back(&tables_[i]); + if (max_sequence < tables_[i].max_sequence) { + max_sequence = tables_[i].max_sequence; + } + } + vset_.SetLastSequence(max_sequence); + + for (const auto& cf_id_and_tables : cf_id_to_tables) { + auto* cfd = + vset_.GetColumnFamilySet()->GetColumnFamily(cf_id_and_tables.first); + VersionEdit edit; + edit.SetComparatorName(cfd->user_comparator()->Name()); + edit.SetLogNumber(0); + edit.SetNextFile(next_file_number_); + edit.SetColumnFamily(cfd->GetID()); + + // TODO(opt): separate out into multiple levels + for (const auto* table : cf_id_and_tables.second) { + edit.AddFile(0, table->meta.fd.GetNumber(), table->meta.fd.GetPathId(), + table->meta.fd.GetFileSize(), table->meta.smallest, + table->meta.largest, table->min_sequence, + table->max_sequence, table->meta.marked_for_compaction); + } + mutex_.Lock(); + Status status = vset_.LogAndApply( + cfd, *cfd->GetLatestMutableCFOptions(), &edit, &mutex_, + nullptr /* db_directory */, false /* new_descriptor_log */); + mutex_.Unlock(); + if (!status.ok()) { + return status; + } + } + return Status::OK(); + } + + void ArchiveFile(const std::string& fname) { + // Move into another directory. E.g., for + // dir/foo + // rename to + // dir/lost/foo + const char* slash = strrchr(fname.c_str(), '/'); + std::string new_dir; + if (slash != nullptr) { + new_dir.assign(fname.data(), slash - fname.data()); + } + new_dir.append("/lost"); + env_->CreateDir(new_dir); // Ignore error + std::string new_file = new_dir; + new_file.append("/"); + new_file.append((slash == nullptr) ? fname.c_str() : slash + 1); + Status s = env_->RenameFile(fname, new_file); + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, "Archiving %s: %s\n", + fname.c_str(), s.ToString().c_str()); + } +}; + +Status GetDefaultCFOptions( + const std::vector& column_families, + ColumnFamilyOptions* res) { + assert(res != nullptr); + auto iter = std::find_if(column_families.begin(), column_families.end(), + [](const ColumnFamilyDescriptor& cfd) { + return cfd.name == kDefaultColumnFamilyName; + }); + if (iter == column_families.end()) { + return Status::InvalidArgument( + "column_families", "Must contain entry for default column family"); + } + *res = iter->options; + return Status::OK(); +} +} // anonymous namespace + +Status RepairDB(const std::string& dbname, const DBOptions& db_options, + const std::vector& column_families) { + ColumnFamilyOptions default_cf_opts; + Status status = GetDefaultCFOptions(column_families, &default_cf_opts); + if (status.ok()) { + Repairer repairer(dbname, db_options, column_families, default_cf_opts, + ColumnFamilyOptions() /* unknown_cf_opts */, + false /* create_unknown_cfs */); + status = repairer.Run(); + } + return status; +} + +Status RepairDB(const std::string& dbname, const DBOptions& db_options, + const std::vector& column_families, + const ColumnFamilyOptions& unknown_cf_opts) { + ColumnFamilyOptions default_cf_opts; + Status status = GetDefaultCFOptions(column_families, &default_cf_opts); + if (status.ok()) { + Repairer repairer(dbname, db_options, column_families, default_cf_opts, + unknown_cf_opts, true /* create_unknown_cfs */); + status = repairer.Run(); + } + return status; +} + +Status RepairDB(const std::string& dbname, const Options& options) { + DBOptions db_options(options); + ColumnFamilyOptions cf_options(options); + Repairer repairer(dbname, db_options, {}, cf_options /* default_cf_opts */, + cf_options /* unknown_cf_opts */, + true /* create_unknown_cfs */); + return repairer.Run(); +} + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/repair_test.cc b/external/rocksdb/db/repair_test.cc new file mode 100755 index 0000000000..93e5113b35 --- /dev/null +++ b/external/rocksdb/db/repair_test.cc @@ -0,0 +1,276 @@ +// Copyright (c) 2016-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include + +#include "db/db_impl.h" +#include "db/db_test_util.h" +#include "rocksdb/comparator.h" +#include "rocksdb/db.h" +#include "rocksdb/transaction_log.h" +#include "util/file_util.h" +#include "util/string_util.h" + +namespace rocksdb { + +class RepairTest : public DBTestBase { + public: + RepairTest() : DBTestBase("/repair_test") {} + + std::string GetFirstSstPath() { + uint64_t manifest_size; + std::vector files; + db_->GetLiveFiles(files, &manifest_size); + auto sst_iter = + std::find_if(files.begin(), files.end(), [](const std::string& file) { + uint64_t number; + FileType type; + bool ok = ParseFileName(file, &number, &type); + return ok && type == kTableFile; + }); + return sst_iter == files.end() ? "" : dbname_ + *sst_iter; + } +}; + +TEST_F(RepairTest, LostManifest) { + // Add a couple SST files, delete the manifest, and verify RepairDB() saves + // the day. + Put("key", "val"); + Flush(); + Put("key2", "val2"); + Flush(); + // Need to get path before Close() deletes db_, but delete it after Close() to + // ensure Close() didn't change the manifest. + std::string manifest_path = + DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo()); + + Close(); + ASSERT_OK(env_->FileExists(manifest_path)); + ASSERT_OK(env_->DeleteFile(manifest_path)); + ASSERT_OK(RepairDB(dbname_, CurrentOptions())); + Reopen(CurrentOptions()); + + ASSERT_EQ(Get("key"), "val"); + ASSERT_EQ(Get("key2"), "val2"); +} + +TEST_F(RepairTest, CorruptManifest) { + // Manifest is in an invalid format. Expect a full recovery. + Put("key", "val"); + Flush(); + Put("key2", "val2"); + Flush(); + // Need to get path before Close() deletes db_, but overwrite it after Close() + // to ensure Close() didn't change the manifest. + std::string manifest_path = + DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo()); + + Close(); + ASSERT_OK(env_->FileExists(manifest_path)); + CreateFile(env_, manifest_path, "blah"); + ASSERT_OK(RepairDB(dbname_, CurrentOptions())); + Reopen(CurrentOptions()); + + ASSERT_EQ(Get("key"), "val"); + ASSERT_EQ(Get("key2"), "val2"); +} + +TEST_F(RepairTest, IncompleteManifest) { + // In this case, the manifest is valid but does not reference all of the SST + // files. Expect a full recovery. + Put("key", "val"); + Flush(); + std::string orig_manifest_path = + DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo()); + CopyFile(orig_manifest_path, orig_manifest_path + ".tmp"); + Put("key2", "val2"); + Flush(); + // Need to get path before Close() deletes db_, but overwrite it after Close() + // to ensure Close() didn't change the manifest. + std::string new_manifest_path = + DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo()); + + Close(); + ASSERT_OK(env_->FileExists(new_manifest_path)); + // Replace the manifest with one that is only aware of the first SST file. + CopyFile(orig_manifest_path + ".tmp", new_manifest_path); + ASSERT_OK(RepairDB(dbname_, CurrentOptions())); + Reopen(CurrentOptions()); + + ASSERT_EQ(Get("key"), "val"); + ASSERT_EQ(Get("key2"), "val2"); +} + +TEST_F(RepairTest, LostSst) { + // Delete one of the SST files but preserve the manifest that refers to it, + // then verify the DB is still usable for the intact SST. + Put("key", "val"); + Flush(); + Put("key2", "val2"); + Flush(); + auto sst_path = GetFirstSstPath(); + ASSERT_FALSE(sst_path.empty()); + ASSERT_OK(env_->DeleteFile(sst_path)); + + Close(); + ASSERT_OK(RepairDB(dbname_, CurrentOptions())); + Reopen(CurrentOptions()); + + // Exactly one of the key-value pairs should be in the DB now. + ASSERT_TRUE((Get("key") == "val") != (Get("key2") == "val2")); +} + +TEST_F(RepairTest, CorruptSst) { + // Corrupt one of the SST files but preserve the manifest that refers to it, + // then verify the DB is still usable for the intact SST. + Put("key", "val"); + Flush(); + Put("key2", "val2"); + Flush(); + auto sst_path = GetFirstSstPath(); + ASSERT_FALSE(sst_path.empty()); + CreateFile(env_, sst_path, "blah"); + + Close(); + ASSERT_OK(RepairDB(dbname_, CurrentOptions())); + Reopen(CurrentOptions()); + + // Exactly one of the key-value pairs should be in the DB now. + ASSERT_TRUE((Get("key") == "val") != (Get("key2") == "val2")); +} + +TEST_F(RepairTest, UnflushedSst) { + // This test case invokes repair while some data is unflushed, then verifies + // that data is in the db. + Put("key", "val"); + VectorLogPtr wal_files; + ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files)); + ASSERT_EQ(wal_files.size(), 1); + uint64_t total_ssts_size; + GetAllSSTFiles(&total_ssts_size); + ASSERT_EQ(total_ssts_size, 0); + // Need to get path before Close() deletes db_, but delete it after Close() to + // ensure Close() didn't change the manifest. + std::string manifest_path = + DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo()); + + Close(); + ASSERT_OK(env_->FileExists(manifest_path)); + ASSERT_OK(env_->DeleteFile(manifest_path)); + ASSERT_OK(RepairDB(dbname_, CurrentOptions())); + Reopen(CurrentOptions()); + + ASSERT_OK(dbfull()->GetSortedWalFiles(wal_files)); + ASSERT_EQ(wal_files.size(), 0); + GetAllSSTFiles(&total_ssts_size); + ASSERT_GT(total_ssts_size, 0); + ASSERT_EQ(Get("key"), "val"); +} + +TEST_F(RepairTest, RepairMultipleColumnFamilies) { + // Verify repair logic associates SST files with their original column + // families. + const int kNumCfs = 3; + const int kEntriesPerCf = 2; + DestroyAndReopen(CurrentOptions()); + CreateAndReopenWithCF({"pikachu1", "pikachu2"}, CurrentOptions()); + for (int i = 0; i < kNumCfs; ++i) { + for (int j = 0; j < kEntriesPerCf; ++j) { + Put(i, "key" + ToString(j), "val" + ToString(j)); + if (j == kEntriesPerCf - 1 && i == kNumCfs - 1) { + // Leave one unflushed so we can verify WAL entries are properly + // associated with column families. + continue; + } + Flush(i); + } + } + + // Need to get path before Close() deletes db_, but delete it after Close() to + // ensure Close() doesn't re-create the manifest. + std::string manifest_path = + DescriptorFileName(dbname_, dbfull()->TEST_Current_Manifest_FileNo()); + Close(); + ASSERT_OK(env_->FileExists(manifest_path)); + ASSERT_OK(env_->DeleteFile(manifest_path)); + + ASSERT_OK(RepairDB(dbname_, CurrentOptions())); + + ReopenWithColumnFamilies({"default", "pikachu1", "pikachu2"}, + CurrentOptions()); + for (int i = 0; i < kNumCfs; ++i) { + for (int j = 0; j < kEntriesPerCf; ++j) { + ASSERT_EQ(Get(i, "key" + ToString(j)), "val" + ToString(j)); + } + } +} + +TEST_F(RepairTest, RepairColumnFamilyOptions) { + // Verify repair logic uses correct ColumnFamilyOptions when repairing a + // database with different options for column families. + const int kNumCfs = 2; + const int kEntriesPerCf = 2; + + Options opts(CurrentOptions()), rev_opts(CurrentOptions()); + opts.comparator = BytewiseComparator(); + rev_opts.comparator = ReverseBytewiseComparator(); + + DestroyAndReopen(opts); + CreateColumnFamilies({"reverse"}, rev_opts); + ReopenWithColumnFamilies({"default", "reverse"}, + std::vector{opts, rev_opts}); + for (int i = 0; i < kNumCfs; ++i) { + for (int j = 0; j < kEntriesPerCf; ++j) { + Put(i, "key" + ToString(j), "val" + ToString(j)); + if (i == kNumCfs - 1 && j == kEntriesPerCf - 1) { + // Leave one unflushed so we can verify RepairDB's flush logic + continue; + } + Flush(i); + } + } + Close(); + + // RepairDB() records the comparator in the manifest, and DB::Open would fail + // if a different comparator were used. + ASSERT_OK(RepairDB(dbname_, opts, {{"default", opts}, {"reverse", rev_opts}}, + opts /* unknown_cf_opts */)); + ASSERT_OK(TryReopenWithColumnFamilies({"default", "reverse"}, + std::vector{opts, rev_opts})); + for (int i = 0; i < kNumCfs; ++i) { + for (int j = 0; j < kEntriesPerCf; ++j) { + ASSERT_EQ(Get(i, "key" + ToString(j)), "val" + ToString(j)); + } + } + + // Examine table properties to verify RepairDB() used the right options when + // converting WAL->SST + TablePropertiesCollection fname_to_props; + db_->GetPropertiesOfAllTables(handles_[1], &fname_to_props); + ASSERT_EQ(fname_to_props.size(), 2U); + for (const auto& fname_and_props : fname_to_props) { + ASSERT_EQ(InternalKeyComparator(rev_opts.comparator).Name(), + fname_and_props.second->comparator_name); + } + + // Also check comparator when it's provided via "unknown" CF options + ASSERT_OK(RepairDB(dbname_, opts, {{"default", opts}}, + rev_opts /* unknown_cf_opts */)); + ASSERT_OK(TryReopenWithColumnFamilies({"default", "reverse"}, + std::vector{opts, rev_opts})); + for (int i = 0; i < kNumCfs; ++i) { + for (int j = 0; j < kEntriesPerCf; ++j) { + ASSERT_EQ(Get(i, "key" + ToString(j)), "val" + ToString(j)); + } + } +} +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/skiplist.h b/external/rocksdb/db/skiplist.h new file mode 100755 index 0000000000..3fdbd8f545 --- /dev/null +++ b/external/rocksdb/db/skiplist.h @@ -0,0 +1,477 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Thread safety +// ------------- +// +// Writes require external synchronization, most likely a mutex. +// Reads require a guarantee that the SkipList will not be destroyed +// while the read is in progress. Apart from that, reads progress +// without any internal locking or synchronization. +// +// Invariants: +// +// (1) Allocated nodes are never deleted until the SkipList is +// destroyed. This is trivially guaranteed by the code since we +// never delete any skip list nodes. +// +// (2) The contents of a Node except for the next/prev pointers are +// immutable after the Node has been linked into the SkipList. +// Only Insert() modifies the list, and it is careful to initialize +// a node and use release-stores to publish the nodes in one or +// more lists. +// +// ... prev vs. next pointer ordering ... +// + +#pragma once +#include +#include +#include +#include "port/port.h" +#include "util/allocator.h" +#include "util/random.h" + +namespace rocksdb { + +template +class SkipList { + private: + struct Node; + + public: + // Create a new SkipList object that will use "cmp" for comparing keys, + // and will allocate memory using "*allocator". Objects allocated in the + // allocator must remain allocated for the lifetime of the skiplist object. + explicit SkipList(Comparator cmp, Allocator* allocator, + int32_t max_height = 12, int32_t branching_factor = 4); + + // Insert key into the list. + // REQUIRES: nothing that compares equal to key is currently in the list. + void Insert(const Key& key); + + // Returns true iff an entry that compares equal to key is in the list. + bool Contains(const Key& key) const; + + // Return estimated number of entries smaller than `key`. + uint64_t EstimateCount(const Key& key) const; + + // Iteration over the contents of a skip list + class Iterator { + public: + // Initialize an iterator over the specified list. + // The returned iterator is not valid. + explicit Iterator(const SkipList* list); + + // Change the underlying skiplist used for this iterator + // This enables us not changing the iterator without deallocating + // an old one and then allocating a new one + void SetList(const SkipList* list); + + // Returns true iff the iterator is positioned at a valid node. + bool Valid() const; + + // Returns the key at the current position. + // REQUIRES: Valid() + const Key& key() const; + + // Advances to the next position. + // REQUIRES: Valid() + void Next(); + + // Advances to the previous position. + // REQUIRES: Valid() + void Prev(); + + // Advance to the first entry with a key >= target + void Seek(const Key& target); + + // Position at the first entry in list. + // Final state of iterator is Valid() iff list is not empty. + void SeekToFirst(); + + // Position at the last entry in list. + // Final state of iterator is Valid() iff list is not empty. + void SeekToLast(); + + private: + const SkipList* list_; + Node* node_; + // Intentionally copyable + }; + + private: + const uint16_t kMaxHeight_; + const uint16_t kBranching_; + const uint32_t kScaledInverseBranching_; + + // Immutable after construction + Comparator const compare_; + Allocator* const allocator_; // Allocator used for allocations of nodes + + Node* const head_; + + // Modified only by Insert(). Read racily by readers, but stale + // values are ok. + std::atomic max_height_; // Height of the entire list + + // Used for optimizing sequential insert patterns. Tricky. prev_[i] for + // i up to max_height_ is the predecessor of prev_[0] and prev_height_ + // is the height of prev_[0]. prev_[0] can only be equal to head before + // insertion, in which case max_height_ and prev_height_ are 1. + Node** prev_; + int32_t prev_height_; + + inline int GetMaxHeight() const { + return max_height_.load(std::memory_order_relaxed); + } + + Node* NewNode(const Key& key, int height); + int RandomHeight(); + bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); } + + // Return true if key is greater than the data stored in "n" + bool KeyIsAfterNode(const Key& key, Node* n) const; + + // Returns the earliest node with a key >= key. + // Return nullptr if there is no such node. + Node* FindGreaterOrEqual(const Key& key) const; + + // Return the latest node with a key < key. + // Return head_ if there is no such node. + // Fills prev[level] with pointer to previous node at "level" for every + // level in [0..max_height_-1], if prev is non-null. + Node* FindLessThan(const Key& key, Node** prev = nullptr) const; + + // Return the last node in the list. + // Return head_ if list is empty. + Node* FindLast() const; + + // No copying allowed + SkipList(const SkipList&); + void operator=(const SkipList&); +}; + +// Implementation details follow +template +struct SkipList::Node { + explicit Node(const Key& k) : key(k) { } + + Key const key; + + // Accessors/mutators for links. Wrapped in methods so we can + // add the appropriate barriers as necessary. + Node* Next(int n) { + assert(n >= 0); + // Use an 'acquire load' so that we observe a fully initialized + // version of the returned Node. + return (next_[n].load(std::memory_order_acquire)); + } + void SetNext(int n, Node* x) { + assert(n >= 0); + // Use a 'release store' so that anybody who reads through this + // pointer observes a fully initialized version of the inserted node. + next_[n].store(x, std::memory_order_release); + } + + // No-barrier variants that can be safely used in a few locations. + Node* NoBarrier_Next(int n) { + assert(n >= 0); + return next_[n].load(std::memory_order_relaxed); + } + void NoBarrier_SetNext(int n, Node* x) { + assert(n >= 0); + next_[n].store(x, std::memory_order_relaxed); + } + + private: + // Array of length equal to the node height. next_[0] is lowest level link. + std::atomic next_[1]; +}; + +template +typename SkipList::Node* +SkipList::NewNode(const Key& key, int height) { + char* mem = allocator_->AllocateAligned( + sizeof(Node) + sizeof(std::atomic) * (height - 1)); + return new (mem) Node(key); +} + +template +inline SkipList::Iterator::Iterator(const SkipList* list) { + SetList(list); +} + +template +inline void SkipList::Iterator::SetList(const SkipList* list) { + list_ = list; + node_ = nullptr; +} + +template +inline bool SkipList::Iterator::Valid() const { + return node_ != nullptr; +} + +template +inline const Key& SkipList::Iterator::key() const { + assert(Valid()); + return node_->key; +} + +template +inline void SkipList::Iterator::Next() { + assert(Valid()); + node_ = node_->Next(0); +} + +template +inline void SkipList::Iterator::Prev() { + // Instead of using explicit "prev" links, we just search for the + // last node that falls before key. + assert(Valid()); + node_ = list_->FindLessThan(node_->key); + if (node_ == list_->head_) { + node_ = nullptr; + } +} + +template +inline void SkipList::Iterator::Seek(const Key& target) { + node_ = list_->FindGreaterOrEqual(target); +} + +template +inline void SkipList::Iterator::SeekToFirst() { + node_ = list_->head_->Next(0); +} + +template +inline void SkipList::Iterator::SeekToLast() { + node_ = list_->FindLast(); + if (node_ == list_->head_) { + node_ = nullptr; + } +} + +template +int SkipList::RandomHeight() { + auto rnd = Random::GetTLSInstance(); + + // Increase height with probability 1 in kBranching + int height = 1; + while (height < kMaxHeight_ && rnd->Next() < kScaledInverseBranching_) { + height++; + } + assert(height > 0); + assert(height <= kMaxHeight_); + return height; +} + +template +bool SkipList::KeyIsAfterNode(const Key& key, Node* n) const { + // nullptr n is considered infinite + return (n != nullptr) && (compare_(n->key, key) < 0); +} + +template +typename SkipList::Node* SkipList:: + FindGreaterOrEqual(const Key& key) const { + // Note: It looks like we could reduce duplication by implementing + // this function as FindLessThan(key)->Next(0), but we wouldn't be able + // to exit early on equality and the result wouldn't even be correct. + // A concurrent insert might occur after FindLessThan(key) but before + // we get a chance to call Next(0). + Node* x = head_; + int level = GetMaxHeight() - 1; + Node* last_bigger = nullptr; + while (true) { + Node* next = x->Next(level); + // Make sure the lists are sorted + assert(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); + // Make sure we haven't overshot during our search + assert(x == head_ || KeyIsAfterNode(key, x)); + int cmp = (next == nullptr || next == last_bigger) + ? 1 : compare_(next->key, key); + if (cmp == 0 || (cmp > 0 && level == 0)) { + return next; + } else if (cmp < 0) { + // Keep searching in this list + x = next; + } else { + // Switch to next list, reuse compare_() result + last_bigger = next; + level--; + } + } +} + +template +typename SkipList::Node* +SkipList::FindLessThan(const Key& key, Node** prev) const { + Node* x = head_; + int level = GetMaxHeight() - 1; + // KeyIsAfter(key, last_not_after) is definitely false + Node* last_not_after = nullptr; + while (true) { + Node* next = x->Next(level); + assert(x == head_ || next == nullptr || KeyIsAfterNode(next->key, x)); + assert(x == head_ || KeyIsAfterNode(key, x)); + if (next != last_not_after && KeyIsAfterNode(key, next)) { + // Keep searching in this list + x = next; + } else { + if (prev != nullptr) { + prev[level] = x; + } + if (level == 0) { + return x; + } else { + // Switch to next list, reuse KeyIUsAfterNode() result + last_not_after = next; + level--; + } + } + } +} + +template +typename SkipList::Node* SkipList::FindLast() + const { + Node* x = head_; + int level = GetMaxHeight() - 1; + while (true) { + Node* next = x->Next(level); + if (next == nullptr) { + if (level == 0) { + return x; + } else { + // Switch to next list + level--; + } + } else { + x = next; + } + } +} + +template +uint64_t SkipList::EstimateCount(const Key& key) const { + uint64_t count = 0; + + Node* x = head_; + int level = GetMaxHeight() - 1; + while (true) { + assert(x == head_ || compare_(x->key, key) < 0); + Node* next = x->Next(level); + if (next == nullptr || compare_(next->key, key) >= 0) { + if (level == 0) { + return count; + } else { + // Switch to next list + count *= kBranching_; + level--; + } + } else { + x = next; + count++; + } + } +} + +template +SkipList::SkipList(const Comparator cmp, Allocator* allocator, + int32_t max_height, + int32_t branching_factor) + : kMaxHeight_(max_height), + kBranching_(branching_factor), + kScaledInverseBranching_((Random::kMaxNext + 1) / kBranching_), + compare_(cmp), + allocator_(allocator), + head_(NewNode(0 /* any key will do */, max_height)), + max_height_(1), + prev_height_(1) { + assert(max_height > 0 && kMaxHeight_ == static_cast(max_height)); + assert(branching_factor > 0 && + kBranching_ == static_cast(branching_factor)); + assert(kScaledInverseBranching_ > 0); + // Allocate the prev_ Node* array, directly from the passed-in allocator. + // prev_ does not need to be freed, as its life cycle is tied up with + // the allocator as a whole. + prev_ = reinterpret_cast( + allocator_->AllocateAligned(sizeof(Node*) * kMaxHeight_)); + for (int i = 0; i < kMaxHeight_; i++) { + head_->SetNext(i, nullptr); + prev_[i] = head_; + } +} + +template +void SkipList::Insert(const Key& key) { + // fast path for sequential insertion + if (!KeyIsAfterNode(key, prev_[0]->NoBarrier_Next(0)) && + (prev_[0] == head_ || KeyIsAfterNode(key, prev_[0]))) { + assert(prev_[0] != head_ || (prev_height_ == 1 && GetMaxHeight() == 1)); + + // Outside of this method prev_[1..max_height_] is the predecessor + // of prev_[0], and prev_height_ refers to prev_[0]. Inside Insert + // prev_[0..max_height - 1] is the predecessor of key. Switch from + // the external state to the internal + for (int i = 1; i < prev_height_; i++) { + prev_[i] = prev_[0]; + } + } else { + // TODO(opt): we could use a NoBarrier predecessor search as an + // optimization for architectures where memory_order_acquire needs + // a synchronization instruction. Doesn't matter on x86 + FindLessThan(key, prev_); + } + + // Our data structure does not allow duplicate insertion + assert(prev_[0]->Next(0) == nullptr || !Equal(key, prev_[0]->Next(0)->key)); + + int height = RandomHeight(); + if (height > GetMaxHeight()) { + for (int i = GetMaxHeight(); i < height; i++) { + prev_[i] = head_; + } + //fprintf(stderr, "Change height from %d to %d\n", max_height_, height); + + // It is ok to mutate max_height_ without any synchronization + // with concurrent readers. A concurrent reader that observes + // the new value of max_height_ will see either the old value of + // new level pointers from head_ (nullptr), or a new value set in + // the loop below. In the former case the reader will + // immediately drop to the next level since nullptr sorts after all + // keys. In the latter case the reader will use the new node. + max_height_.store(height, std::memory_order_relaxed); + } + + Node* x = NewNode(key, height); + for (int i = 0; i < height; i++) { + // NoBarrier_SetNext() suffices since we will add a barrier when + // we publish a pointer to "x" in prev[i]. + x->NoBarrier_SetNext(i, prev_[i]->NoBarrier_Next(i)); + prev_[i]->SetNext(i, x); + } + prev_[0] = x; + prev_height_ = height; +} + +template +bool SkipList::Contains(const Key& key) const { + Node* x = FindGreaterOrEqual(key); + if (x != nullptr && Equal(key, x->key)) { + return true; + } else { + return false; + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/skiplist_test.cc b/external/rocksdb/db/skiplist_test.cc new file mode 100755 index 0000000000..b4f98e34ca --- /dev/null +++ b/external/rocksdb/db/skiplist_test.cc @@ -0,0 +1,378 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/skiplist.h" +#include +#include "rocksdb/env.h" +#include "util/arena.h" +#include "util/hash.h" +#include "util/random.h" +#include "util/testharness.h" + +namespace rocksdb { + +typedef uint64_t Key; + +struct TestComparator { + int operator()(const Key& a, const Key& b) const { + if (a < b) { + return -1; + } else if (a > b) { + return +1; + } else { + return 0; + } + } +}; + +class SkipTest : public testing::Test {}; + +TEST_F(SkipTest, Empty) { + Arena arena; + TestComparator cmp; + SkipList list(cmp, &arena); + ASSERT_TRUE(!list.Contains(10)); + + SkipList::Iterator iter(&list); + ASSERT_TRUE(!iter.Valid()); + iter.SeekToFirst(); + ASSERT_TRUE(!iter.Valid()); + iter.Seek(100); + ASSERT_TRUE(!iter.Valid()); + iter.SeekToLast(); + ASSERT_TRUE(!iter.Valid()); +} + +TEST_F(SkipTest, InsertAndLookup) { + const int N = 2000; + const int R = 5000; + Random rnd(1000); + std::set keys; + Arena arena; + TestComparator cmp; + SkipList list(cmp, &arena); + for (int i = 0; i < N; i++) { + Key key = rnd.Next() % R; + if (keys.insert(key).second) { + list.Insert(key); + } + } + + for (int i = 0; i < R; i++) { + if (list.Contains(i)) { + ASSERT_EQ(keys.count(i), 1U); + } else { + ASSERT_EQ(keys.count(i), 0U); + } + } + + // Simple iterator tests + { + SkipList::Iterator iter(&list); + ASSERT_TRUE(!iter.Valid()); + + iter.Seek(0); + ASSERT_TRUE(iter.Valid()); + ASSERT_EQ(*(keys.begin()), iter.key()); + + iter.SeekToFirst(); + ASSERT_TRUE(iter.Valid()); + ASSERT_EQ(*(keys.begin()), iter.key()); + + iter.SeekToLast(); + ASSERT_TRUE(iter.Valid()); + ASSERT_EQ(*(keys.rbegin()), iter.key()); + } + + // Forward iteration test + for (int i = 0; i < R; i++) { + SkipList::Iterator iter(&list); + iter.Seek(i); + + // Compare against model iterator + std::set::iterator model_iter = keys.lower_bound(i); + for (int j = 0; j < 3; j++) { + if (model_iter == keys.end()) { + ASSERT_TRUE(!iter.Valid()); + break; + } else { + ASSERT_TRUE(iter.Valid()); + ASSERT_EQ(*model_iter, iter.key()); + ++model_iter; + iter.Next(); + } + } + } + + // Backward iteration test + { + SkipList::Iterator iter(&list); + iter.SeekToLast(); + + // Compare against model iterator + for (std::set::reverse_iterator model_iter = keys.rbegin(); + model_iter != keys.rend(); + ++model_iter) { + ASSERT_TRUE(iter.Valid()); + ASSERT_EQ(*model_iter, iter.key()); + iter.Prev(); + } + ASSERT_TRUE(!iter.Valid()); + } +} + +// We want to make sure that with a single writer and multiple +// concurrent readers (with no synchronization other than when a +// reader's iterator is created), the reader always observes all the +// data that was present in the skip list when the iterator was +// constructor. Because insertions are happening concurrently, we may +// also observe new values that were inserted since the iterator was +// constructed, but we should never miss any values that were present +// at iterator construction time. +// +// We generate multi-part keys: +// +// where: +// key is in range [0..K-1] +// gen is a generation number for key +// hash is hash(key,gen) +// +// The insertion code picks a random key, sets gen to be 1 + the last +// generation number inserted for that key, and sets hash to Hash(key,gen). +// +// At the beginning of a read, we snapshot the last inserted +// generation number for each key. We then iterate, including random +// calls to Next() and Seek(). For every key we encounter, we +// check that it is either expected given the initial snapshot or has +// been concurrently added since the iterator started. +class ConcurrentTest { + private: + static const uint32_t K = 4; + + static uint64_t key(Key key) { return (key >> 40); } + static uint64_t gen(Key key) { return (key >> 8) & 0xffffffffu; } + static uint64_t hash(Key key) { return key & 0xff; } + + static uint64_t HashNumbers(uint64_t k, uint64_t g) { + uint64_t data[2] = { k, g }; + return Hash(reinterpret_cast(data), sizeof(data), 0); + } + + static Key MakeKey(uint64_t k, uint64_t g) { + assert(sizeof(Key) == sizeof(uint64_t)); + assert(k <= K); // We sometimes pass K to seek to the end of the skiplist + assert(g <= 0xffffffffu); + return ((k << 40) | (g << 8) | (HashNumbers(k, g) & 0xff)); + } + + static bool IsValidKey(Key k) { + return hash(k) == (HashNumbers(key(k), gen(k)) & 0xff); + } + + static Key RandomTarget(Random* rnd) { + switch (rnd->Next() % 10) { + case 0: + // Seek to beginning + return MakeKey(0, 0); + case 1: + // Seek to end + return MakeKey(K, 0); + default: + // Seek to middle + return MakeKey(rnd->Next() % K, 0); + } + } + + // Per-key generation + struct State { + std::atomic generation[K]; + void Set(int k, int v) { + generation[k].store(v, std::memory_order_release); + } + int Get(int k) { return generation[k].load(std::memory_order_acquire); } + + State() { + for (unsigned int k = 0; k < K; k++) { + Set(k, 0); + } + } + }; + + // Current state of the test + State current_; + + Arena arena_; + + // SkipList is not protected by mu_. We just use a single writer + // thread to modify it. + SkipList list_; + + public: + ConcurrentTest() : list_(TestComparator(), &arena_) {} + + // REQUIRES: External synchronization + void WriteStep(Random* rnd) { + const uint32_t k = rnd->Next() % K; + const int g = current_.Get(k) + 1; + const Key new_key = MakeKey(k, g); + list_.Insert(new_key); + current_.Set(k, g); + } + + void ReadStep(Random* rnd) { + // Remember the initial committed state of the skiplist. + State initial_state; + for (unsigned int k = 0; k < K; k++) { + initial_state.Set(k, current_.Get(k)); + } + + Key pos = RandomTarget(rnd); + SkipList::Iterator iter(&list_); + iter.Seek(pos); + while (true) { + Key current; + if (!iter.Valid()) { + current = MakeKey(K, 0); + } else { + current = iter.key(); + ASSERT_TRUE(IsValidKey(current)) << current; + } + ASSERT_LE(pos, current) << "should not go backwards"; + + // Verify that everything in [pos,current) was not present in + // initial_state. + while (pos < current) { + ASSERT_LT(key(pos), K) << pos; + + // Note that generation 0 is never inserted, so it is ok if + // <*,0,*> is missing. + ASSERT_TRUE((gen(pos) == 0U) || + (gen(pos) > static_cast(initial_state.Get( + static_cast(key(pos)))))) + << "key: " << key(pos) << "; gen: " << gen(pos) + << "; initgen: " << initial_state.Get(static_cast(key(pos))); + + // Advance to next key in the valid key space + if (key(pos) < key(current)) { + pos = MakeKey(key(pos) + 1, 0); + } else { + pos = MakeKey(key(pos), gen(pos) + 1); + } + } + + if (!iter.Valid()) { + break; + } + + if (rnd->Next() % 2) { + iter.Next(); + pos = MakeKey(key(pos), gen(pos) + 1); + } else { + Key new_target = RandomTarget(rnd); + if (new_target > pos) { + pos = new_target; + iter.Seek(new_target); + } + } + } + } +}; +const uint32_t ConcurrentTest::K; + +// Simple test that does single-threaded testing of the ConcurrentTest +// scaffolding. +TEST_F(SkipTest, ConcurrentWithoutThreads) { + ConcurrentTest test; + Random rnd(test::RandomSeed()); + for (int i = 0; i < 10000; i++) { + test.ReadStep(&rnd); + test.WriteStep(&rnd); + } +} + +class TestState { + public: + ConcurrentTest t_; + int seed_; + std::atomic quit_flag_; + + enum ReaderState { + STARTING, + RUNNING, + DONE + }; + + explicit TestState(int s) + : seed_(s), quit_flag_(false), state_(STARTING), state_cv_(&mu_) {} + + void Wait(ReaderState s) { + mu_.Lock(); + while (state_ != s) { + state_cv_.Wait(); + } + mu_.Unlock(); + } + + void Change(ReaderState s) { + mu_.Lock(); + state_ = s; + state_cv_.Signal(); + mu_.Unlock(); + } + + private: + port::Mutex mu_; + ReaderState state_; + port::CondVar state_cv_; +}; + +static void ConcurrentReader(void* arg) { + TestState* state = reinterpret_cast(arg); + Random rnd(state->seed_); + int64_t reads = 0; + state->Change(TestState::RUNNING); + while (!state->quit_flag_.load(std::memory_order_acquire)) { + state->t_.ReadStep(&rnd); + ++reads; + } + state->Change(TestState::DONE); +} + +static void RunConcurrent(int run) { + const int seed = test::RandomSeed() + (run * 100); + Random rnd(seed); + const int N = 1000; + const int kSize = 1000; + for (int i = 0; i < N; i++) { + if ((i % 100) == 0) { + fprintf(stderr, "Run %d of %d\n", i, N); + } + TestState state(seed + 1); + Env::Default()->Schedule(ConcurrentReader, &state); + state.Wait(TestState::RUNNING); + for (int k = 0; k < kSize; k++) { + state.t_.WriteStep(&rnd); + } + state.quit_flag_.store(true, std::memory_order_release); + state.Wait(TestState::DONE); + } +} + +TEST_F(SkipTest, Concurrent1) { RunConcurrent(1); } +TEST_F(SkipTest, Concurrent2) { RunConcurrent(2); } +TEST_F(SkipTest, Concurrent3) { RunConcurrent(3); } +TEST_F(SkipTest, Concurrent4) { RunConcurrent(4); } +TEST_F(SkipTest, Concurrent5) { RunConcurrent(5); } + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/snapshot_impl.cc b/external/rocksdb/db/snapshot_impl.cc new file mode 100755 index 0000000000..5c4f6abaa8 --- /dev/null +++ b/external/rocksdb/db/snapshot_impl.cc @@ -0,0 +1,26 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "rocksdb/snapshot.h" + +#include "rocksdb/db.h" + +namespace rocksdb { + +ManagedSnapshot::ManagedSnapshot(DB* db) : db_(db), + snapshot_(db->GetSnapshot()) {} + +ManagedSnapshot::ManagedSnapshot(DB* db, const Snapshot* _snapshot) + : db_(db), snapshot_(_snapshot) {} + +ManagedSnapshot::~ManagedSnapshot() { + if (snapshot_) { + db_->ReleaseSnapshot(snapshot_); + } +} + +const Snapshot* ManagedSnapshot::snapshot() { return snapshot_;} + +} // namespace rocksdb diff --git a/external/rocksdb/db/snapshot_impl.h b/external/rocksdb/db/snapshot_impl.h new file mode 100755 index 0000000000..aaac7a0e3a --- /dev/null +++ b/external/rocksdb/db/snapshot_impl.h @@ -0,0 +1,130 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include + +#include "rocksdb/db.h" + +namespace rocksdb { + +class SnapshotList; + +// Snapshots are kept in a doubly-linked list in the DB. +// Each SnapshotImpl corresponds to a particular sequence number. +class SnapshotImpl : public Snapshot { + public: + SequenceNumber number_; // const after creation + + virtual SequenceNumber GetSequenceNumber() const override { return number_; } + + private: + friend class SnapshotList; + + // SnapshotImpl is kept in a doubly-linked circular list + SnapshotImpl* prev_; + SnapshotImpl* next_; + + SnapshotList* list_; // just for sanity checks + + int64_t unix_time_; + + // Will this snapshot be used by a Transaction to do write-conflict checking? + bool is_write_conflict_boundary_; +}; + +class SnapshotList { + public: + SnapshotList() { + list_.prev_ = &list_; + list_.next_ = &list_; + list_.number_ = 0xFFFFFFFFL; // placeholder marker, for debugging + count_ = 0; + } + + bool empty() const { return list_.next_ == &list_; } + SnapshotImpl* oldest() const { assert(!empty()); return list_.next_; } + SnapshotImpl* newest() const { assert(!empty()); return list_.prev_; } + + const SnapshotImpl* New(SnapshotImpl* s, SequenceNumber seq, + uint64_t unix_time, bool is_write_conflict_boundary) { + s->number_ = seq; + s->unix_time_ = unix_time; + s->is_write_conflict_boundary_ = is_write_conflict_boundary; + s->list_ = this; + s->next_ = &list_; + s->prev_ = list_.prev_; + s->prev_->next_ = s; + s->next_->prev_ = s; + count_++; + return s; + } + + // Do not responsible to free the object. + void Delete(const SnapshotImpl* s) { + assert(s->list_ == this); + s->prev_->next_ = s->next_; + s->next_->prev_ = s->prev_; + count_--; + } + + // retrieve all snapshot numbers. They are sorted in ascending order. + std::vector GetAll( + SequenceNumber* oldest_write_conflict_snapshot = nullptr) { + std::vector ret; + + if (oldest_write_conflict_snapshot != nullptr) { + *oldest_write_conflict_snapshot = kMaxSequenceNumber; + } + + if (empty()) { + return ret; + } + SnapshotImpl* s = &list_; + while (s->next_ != &list_) { + ret.push_back(s->next_->number_); + + if (oldest_write_conflict_snapshot != nullptr && + *oldest_write_conflict_snapshot == kMaxSequenceNumber && + s->next_->is_write_conflict_boundary_) { + // If this is the first write-conflict boundary snapshot in the list, + // it is the oldest + *oldest_write_conflict_snapshot = s->next_->number_; + } + + s = s->next_; + } + return ret; + } + + // get the sequence number of the most recent snapshot + SequenceNumber GetNewest() { + if (empty()) { + return 0; + } + return newest()->number_; + } + + int64_t GetOldestSnapshotTime() const { + if (empty()) { + return 0; + } else { + return oldest()->unix_time_; + } + } + + uint64_t count() const { return count_; } + + private: + // Dummy head of doubly-linked list of snapshots + SnapshotImpl list_; + uint64_t count_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/table_cache.cc b/external/rocksdb/db/table_cache.cc new file mode 100755 index 0000000000..ff79975667 --- /dev/null +++ b/external/rocksdb/db/table_cache.cc @@ -0,0 +1,377 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/table_cache.h" + +#include "db/dbformat.h" +#include "db/filename.h" +#include "db/version_edit.h" + +#include "rocksdb/statistics.h" +#include "table/internal_iterator.h" +#include "table/iterator_wrapper.h" +#include "table/table_builder.h" +#include "table/table_reader.h" +#include "table/get_context.h" +#include "util/coding.h" +#include "util/file_reader_writer.h" +#include "util/perf_context_imp.h" +#include "util/stop_watch.h" +#include "util/sync_point.h" + +namespace rocksdb { + +namespace { + +template +static void DeleteEntry(const Slice& key, void* value) { + T* typed_value = reinterpret_cast(value); + delete typed_value; +} + +static void UnrefEntry(void* arg1, void* arg2) { + Cache* cache = reinterpret_cast(arg1); + Cache::Handle* h = reinterpret_cast(arg2); + cache->Release(h); +} + +static void DeleteTableReader(void* arg1, void* arg2) { + TableReader* table_reader = reinterpret_cast(arg1); + delete table_reader; +} + +static Slice GetSliceForFileNumber(const uint64_t* file_number) { + return Slice(reinterpret_cast(file_number), + sizeof(*file_number)); +} + +#ifndef ROCKSDB_LITE + +void AppendVarint64(IterKey* key, uint64_t v) { + char buf[10]; + auto ptr = EncodeVarint64(buf, v); + key->TrimAppend(key->Size(), buf, ptr - buf); +} + +#endif // ROCKSDB_LITE + +} // namespace + +TableCache::TableCache(const ImmutableCFOptions& ioptions, + const EnvOptions& env_options, Cache* const cache) + : ioptions_(ioptions), env_options_(env_options), cache_(cache) { + if (ioptions_.row_cache) { + // If the same cache is shared by multiple instances, we need to + // disambiguate its entries. + PutVarint64(&row_cache_id_, ioptions_.row_cache->NewId()); + } +} + +TableCache::~TableCache() { +} + +TableReader* TableCache::GetTableReaderFromHandle(Cache::Handle* handle) { + return reinterpret_cast(cache_->Value(handle)); +} + +void TableCache::ReleaseHandle(Cache::Handle* handle) { + cache_->Release(handle); +} + +Status TableCache::GetTableReader( + const EnvOptions& env_options, + const InternalKeyComparator& internal_comparator, const FileDescriptor& fd, + bool sequential_mode, size_t readahead, bool record_read_stats, + HistogramImpl* file_read_hist, unique_ptr* table_reader, + bool skip_filters, int level, bool prefetch_index_and_filter_in_cache) { + std::string fname = + TableFileName(ioptions_.db_paths, fd.GetNumber(), fd.GetPathId()); + unique_ptr file; + Status s = ioptions_.env->NewRandomAccessFile(fname, &file, env_options); + + RecordTick(ioptions_.statistics, NO_FILE_OPENS); + if (s.ok()) { + if (readahead > 0) { + file = NewReadaheadRandomAccessFile(std::move(file), readahead); + } + if (!sequential_mode && ioptions_.advise_random_on_open) { + file->Hint(RandomAccessFile::RANDOM); + } + StopWatch sw(ioptions_.env, ioptions_.statistics, TABLE_OPEN_IO_MICROS); + std::unique_ptr file_reader( + new RandomAccessFileReader(std::move(file), ioptions_.env, + ioptions_.statistics, record_read_stats, + file_read_hist)); + s = ioptions_.table_factory->NewTableReader( + TableReaderOptions(ioptions_, env_options, internal_comparator, + skip_filters, level), + std::move(file_reader), fd.GetFileSize(), table_reader, + prefetch_index_and_filter_in_cache); + TEST_SYNC_POINT("TableCache::GetTableReader:0"); + } + return s; +} + +void TableCache::EraseHandle(const FileDescriptor& fd, Cache::Handle* handle) { + ReleaseHandle(handle); + uint64_t number = fd.GetNumber(); + Slice key = GetSliceForFileNumber(&number); + cache_->Erase(key); +} + +Status TableCache::FindTable(const EnvOptions& env_options, + const InternalKeyComparator& internal_comparator, + const FileDescriptor& fd, Cache::Handle** handle, + const bool no_io, bool record_read_stats, + HistogramImpl* file_read_hist, bool skip_filters, + int level, + bool prefetch_index_and_filter_in_cache) { + PERF_TIMER_GUARD(find_table_nanos); + Status s; + uint64_t number = fd.GetNumber(); + Slice key = GetSliceForFileNumber(&number); + *handle = cache_->Lookup(key); + TEST_SYNC_POINT_CALLBACK("TableCache::FindTable:0", + const_cast(&no_io)); + + if (*handle == nullptr) { + if (no_io) { // Don't do IO and return a not-found status + return Status::Incomplete("Table not found in table_cache, no_io is set"); + } + unique_ptr table_reader; + s = GetTableReader(env_options, internal_comparator, fd, + false /* sequential mode */, 0 /* readahead */, + record_read_stats, file_read_hist, &table_reader, + skip_filters, level, prefetch_index_and_filter_in_cache); + if (!s.ok()) { + assert(table_reader == nullptr); + RecordTick(ioptions_.statistics, NO_FILE_ERRORS); + // We do not cache error results so that if the error is transient, + // or somebody repairs the file, we recover automatically. + } else { + s = cache_->Insert(key, table_reader.get(), 1, &DeleteEntry, + handle); + if (s.ok()) { + // Release ownership of table reader. + table_reader.release(); + } + } + } + return s; +} + +InternalIterator* TableCache::NewIterator( + const ReadOptions& options, const EnvOptions& env_options, + const InternalKeyComparator& icomparator, const FileDescriptor& fd, + TableReader** table_reader_ptr, HistogramImpl* file_read_hist, + bool for_compaction, Arena* arena, bool skip_filters, int level) { + PERF_TIMER_GUARD(new_table_iterator_nanos); + + if (table_reader_ptr != nullptr) { + *table_reader_ptr = nullptr; + } + + TableReader* table_reader = nullptr; + Cache::Handle* handle = nullptr; + + size_t readahead = 0; + bool create_new_table_reader = false; + if (for_compaction) { + if (ioptions_.new_table_reader_for_compaction_inputs) { + readahead = ioptions_.compaction_readahead_size; + create_new_table_reader = true; + } + } else { + readahead = options.readahead_size; + create_new_table_reader = readahead > 0; + } + + if (create_new_table_reader) { + unique_ptr table_reader_unique_ptr; + Status s = GetTableReader( + env_options, icomparator, fd, true /* sequential_mode */, readahead, + !for_compaction /* record stats */, nullptr, &table_reader_unique_ptr, + false /* skip_filters */, level); + if (!s.ok()) { + return NewErrorInternalIterator(s, arena); + } + table_reader = table_reader_unique_ptr.release(); + } else { + table_reader = fd.table_reader; + if (table_reader == nullptr) { + Status s = FindTable(env_options, icomparator, fd, &handle, + options.read_tier == kBlockCacheTier /* no_io */, + !for_compaction /* record read_stats */, + file_read_hist, skip_filters, level); + if (!s.ok()) { + return NewErrorInternalIterator(s, arena); + } + table_reader = GetTableReaderFromHandle(handle); + } + } + + InternalIterator* result = + table_reader->NewIterator(options, arena, skip_filters); + + if (create_new_table_reader) { + assert(handle == nullptr); + result->RegisterCleanup(&DeleteTableReader, table_reader, nullptr); + } else if (handle != nullptr) { + result->RegisterCleanup(&UnrefEntry, cache_, handle); + } + + if (for_compaction) { + table_reader->SetupForCompaction(); + } + if (table_reader_ptr != nullptr) { + *table_reader_ptr = table_reader; + } + + return result; +} + +Status TableCache::Get(const ReadOptions& options, + const InternalKeyComparator& internal_comparator, + const FileDescriptor& fd, const Slice& k, + GetContext* get_context, HistogramImpl* file_read_hist, + bool skip_filters, int level) { + TableReader* t = fd.table_reader; + Status s; + Cache::Handle* handle = nullptr; + std::string* row_cache_entry = nullptr; + +#ifndef ROCKSDB_LITE + IterKey row_cache_key; + std::string row_cache_entry_buffer; + + // Check row cache if enabled. Since row cache does not currently store + // sequence numbers, we cannot use it if we need to fetch the sequence. + if (ioptions_.row_cache && !get_context->NeedToReadSequence()) { + uint64_t fd_number = fd.GetNumber(); + auto user_key = ExtractUserKey(k); + // We use the user key as cache key instead of the internal key, + // otherwise the whole cache would be invalidated every time the + // sequence key increases. However, to support caching snapshot + // reads, we append the sequence number (incremented by 1 to + // distinguish from 0) only in this case. + uint64_t seq_no = + options.snapshot == nullptr ? 0 : 1 + GetInternalKeySeqno(k); + + // Compute row cache key. + row_cache_key.TrimAppend(row_cache_key.Size(), row_cache_id_.data(), + row_cache_id_.size()); + AppendVarint64(&row_cache_key, fd_number); + AppendVarint64(&row_cache_key, seq_no); + row_cache_key.TrimAppend(row_cache_key.Size(), user_key.data(), + user_key.size()); + + if (auto row_handle = ioptions_.row_cache->Lookup(row_cache_key.GetKey())) { + auto found_row_cache_entry = static_cast( + ioptions_.row_cache->Value(row_handle)); + replayGetContextLog(*found_row_cache_entry, user_key, get_context); + ioptions_.row_cache->Release(row_handle); + RecordTick(ioptions_.statistics, ROW_CACHE_HIT); + return Status::OK(); + } + + // Not found, setting up the replay log. + RecordTick(ioptions_.statistics, ROW_CACHE_MISS); + row_cache_entry = &row_cache_entry_buffer; + } +#endif // ROCKSDB_LITE + + if (!t) { + s = FindTable(env_options_, internal_comparator, fd, &handle, + options.read_tier == kBlockCacheTier /* no_io */, + true /* record_read_stats */, file_read_hist, skip_filters, + level); + if (s.ok()) { + t = GetTableReaderFromHandle(handle); + } + } + if (s.ok()) { + get_context->SetReplayLog(row_cache_entry); // nullptr if no cache. + s = t->Get(options, k, get_context, skip_filters); + get_context->SetReplayLog(nullptr); + if (handle != nullptr) { + ReleaseHandle(handle); + } + } else if (options.read_tier == kBlockCacheTier && s.IsIncomplete()) { + // Couldn't find Table in cache but treat as kFound if no_io set + get_context->MarkKeyMayExist(); + return Status::OK(); + } + +#ifndef ROCKSDB_LITE + // Put the replay log in row cache only if something was found. + if (s.ok() && row_cache_entry && !row_cache_entry->empty()) { + size_t charge = + row_cache_key.Size() + row_cache_entry->size() + sizeof(std::string); + void* row_ptr = new std::string(std::move(*row_cache_entry)); + ioptions_.row_cache->Insert(row_cache_key.GetKey(), row_ptr, charge, + &DeleteEntry); + } +#endif // ROCKSDB_LITE + + return s; +} + +Status TableCache::GetTableProperties( + const EnvOptions& env_options, + const InternalKeyComparator& internal_comparator, const FileDescriptor& fd, + std::shared_ptr* properties, bool no_io) { + Status s; + auto table_reader = fd.table_reader; + // table already been pre-loaded? + if (table_reader) { + *properties = table_reader->GetTableProperties(); + + return s; + } + + Cache::Handle* table_handle = nullptr; + s = FindTable(env_options, internal_comparator, fd, &table_handle, no_io); + if (!s.ok()) { + return s; + } + assert(table_handle); + auto table = GetTableReaderFromHandle(table_handle); + *properties = table->GetTableProperties(); + ReleaseHandle(table_handle); + return s; +} + +size_t TableCache::GetMemoryUsageByTableReader( + const EnvOptions& env_options, + const InternalKeyComparator& internal_comparator, + const FileDescriptor& fd) { + Status s; + auto table_reader = fd.table_reader; + // table already been pre-loaded? + if (table_reader) { + return table_reader->ApproximateMemoryUsage(); + } + + Cache::Handle* table_handle = nullptr; + s = FindTable(env_options, internal_comparator, fd, &table_handle, true); + if (!s.ok()) { + return 0; + } + assert(table_handle); + auto table = GetTableReaderFromHandle(table_handle); + auto ret = table->ApproximateMemoryUsage(); + ReleaseHandle(table_handle); + return ret; +} + +void TableCache::Evict(Cache* cache, uint64_t file_number) { + cache->Erase(GetSliceForFileNumber(&file_number)); +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/table_cache.h b/external/rocksdb/db/table_cache.h new file mode 100755 index 0000000000..cdd0f23dcb --- /dev/null +++ b/external/rocksdb/db/table_cache.h @@ -0,0 +1,127 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Thread-safe (provides internal synchronization) + +#pragma once +#include +#include +#include + +#include "db/dbformat.h" +#include "port/port.h" +#include "rocksdb/cache.h" +#include "rocksdb/env.h" +#include "rocksdb/table.h" +#include "rocksdb/options.h" +#include "table/table_reader.h" + +namespace rocksdb { + +class Env; +class Arena; +struct FileDescriptor; +class GetContext; +class HistogramImpl; +class InternalIterator; + +class TableCache { + public: + TableCache(const ImmutableCFOptions& ioptions, + const EnvOptions& storage_options, Cache* cache); + ~TableCache(); + + // Return an iterator for the specified file number (the corresponding + // file length must be exactly "file_size" bytes). If "tableptr" is + // non-nullptr, also sets "*tableptr" to point to the Table object + // underlying the returned iterator, or nullptr if no Table object underlies + // the returned iterator. The returned "*tableptr" object is owned by + // the cache and should not be deleted, and is valid for as long as the + // returned iterator is live. + // @param skip_filters Disables loading/accessing the filter block + // @param level The level this table is at, -1 for "not set / don't know" + InternalIterator* NewIterator( + const ReadOptions& options, const EnvOptions& toptions, + const InternalKeyComparator& internal_comparator, + const FileDescriptor& file_fd, TableReader** table_reader_ptr = nullptr, + HistogramImpl* file_read_hist = nullptr, bool for_compaction = false, + Arena* arena = nullptr, bool skip_filters = false, int level = -1); + + // If a seek to internal key "k" in specified file finds an entry, + // call (*handle_result)(arg, found_key, found_value) repeatedly until + // it returns false. + // @param skip_filters Disables loading/accessing the filter block + // @param level The level this table is at, -1 for "not set / don't know" + Status Get(const ReadOptions& options, + const InternalKeyComparator& internal_comparator, + const FileDescriptor& file_fd, const Slice& k, + GetContext* get_context, HistogramImpl* file_read_hist = nullptr, + bool skip_filters = false, int level = -1); + + // Evict any entry for the specified file number + static void Evict(Cache* cache, uint64_t file_number); + + // Clean table handle and erase it from the table cache + // Used in DB close, or the file is not live anymore. + void EraseHandle(const FileDescriptor& fd, Cache::Handle* handle); + + // Find table reader + // @param skip_filters Disables loading/accessing the filter block + // @param level == -1 means not specified + Status FindTable(const EnvOptions& toptions, + const InternalKeyComparator& internal_comparator, + const FileDescriptor& file_fd, Cache::Handle**, + const bool no_io = false, bool record_read_stats = true, + HistogramImpl* file_read_hist = nullptr, + bool skip_filters = false, int level = -1, + bool prefetch_index_and_filter_in_cache = true); + + // Get TableReader from a cache handle. + TableReader* GetTableReaderFromHandle(Cache::Handle* handle); + + // Get the table properties of a given table. + // @no_io: indicates if we should load table to the cache if it is not present + // in table cache yet. + // @returns: `properties` will be reset on success. Please note that we will + // return Status::Incomplete() if table is not present in cache and + // we set `no_io` to be true. + Status GetTableProperties(const EnvOptions& toptions, + const InternalKeyComparator& internal_comparator, + const FileDescriptor& file_meta, + std::shared_ptr* properties, + bool no_io = false); + + // Return total memory usage of the table reader of the file. + // 0 if table reader of the file is not loaded. + size_t GetMemoryUsageByTableReader( + const EnvOptions& toptions, + const InternalKeyComparator& internal_comparator, + const FileDescriptor& fd); + + // Release the handle from a cache + void ReleaseHandle(Cache::Handle* handle); + + private: + // Build a table reader + Status GetTableReader(const EnvOptions& env_options, + const InternalKeyComparator& internal_comparator, + const FileDescriptor& fd, bool sequential_mode, + size_t readahead, bool record_read_stats, + HistogramImpl* file_read_hist, + unique_ptr* table_reader, + bool skip_filters = false, int level = -1, + bool prefetch_index_and_filter_in_cache = true); + + const ImmutableCFOptions& ioptions_; + const EnvOptions& env_options_; + Cache* const cache_; + std::string row_cache_id_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/table_properties_collector.cc b/external/rocksdb/db/table_properties_collector.cc new file mode 100755 index 0000000000..c5ec4754a2 --- /dev/null +++ b/external/rocksdb/db/table_properties_collector.cc @@ -0,0 +1,134 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/table_properties_collector.h" + +#include "db/dbformat.h" +#include "util/coding.h" +#include "util/string_util.h" + +namespace rocksdb { + +Status InternalKeyPropertiesCollector::InternalAdd(const Slice& key, + const Slice& value, + uint64_t file_size) { + ParsedInternalKey ikey; + if (!ParseInternalKey(key, &ikey)) { + return Status::InvalidArgument("Invalid internal key"); + } + + // Note: We count both, deletions and single deletions here. + if (ikey.type == ValueType::kTypeDeletion || + ikey.type == ValueType::kTypeSingleDeletion) { + ++deleted_keys_; + } else if (ikey.type == ValueType::kTypeMerge) { + ++merge_operands_; + } + + return Status::OK(); +} + +Status InternalKeyPropertiesCollector::Finish( + UserCollectedProperties* properties) { + assert(properties); + assert(properties->find( + InternalKeyTablePropertiesNames::kDeletedKeys) == properties->end()); + assert(properties->find(InternalKeyTablePropertiesNames::kMergeOperands) == + properties->end()); + + std::string val_deleted_keys; + PutVarint64(&val_deleted_keys, deleted_keys_); + properties->insert( + {InternalKeyTablePropertiesNames::kDeletedKeys, val_deleted_keys}); + + std::string val_merge_operands; + PutVarint64(&val_merge_operands, merge_operands_); + properties->insert( + {InternalKeyTablePropertiesNames::kMergeOperands, val_merge_operands}); + + return Status::OK(); +} + +UserCollectedProperties +InternalKeyPropertiesCollector::GetReadableProperties() const { + return {{"kDeletedKeys", ToString(deleted_keys_)}, + {"kMergeOperands", ToString(merge_operands_)}}; +} + +namespace { + +EntryType GetEntryType(ValueType value_type) { + switch (value_type) { + case kTypeValue: + return kEntryPut; + case kTypeDeletion: + return kEntryDelete; + case kTypeSingleDeletion: + return kEntrySingleDelete; + case kTypeMerge: + return kEntryMerge; + default: + return kEntryOther; + } +} + +uint64_t GetUint64Property(const UserCollectedProperties& props, + const std::string property_name, + bool* property_present) { + auto pos = props.find(property_name); + if (pos == props.end()) { + *property_present = false; + return 0; + } + Slice raw = pos->second; + uint64_t val = 0; + *property_present = true; + return GetVarint64(&raw, &val) ? val : 0; +} + +} // namespace + +Status UserKeyTablePropertiesCollector::InternalAdd(const Slice& key, + const Slice& value, + uint64_t file_size) { + ParsedInternalKey ikey; + if (!ParseInternalKey(key, &ikey)) { + return Status::InvalidArgument("Invalid internal key"); + } + + return collector_->AddUserKey(ikey.user_key, value, GetEntryType(ikey.type), + ikey.sequence, file_size); +} + +Status UserKeyTablePropertiesCollector::Finish( + UserCollectedProperties* properties) { + return collector_->Finish(properties); +} + +UserCollectedProperties +UserKeyTablePropertiesCollector::GetReadableProperties() const { + return collector_->GetReadableProperties(); +} + + +const std::string InternalKeyTablePropertiesNames::kDeletedKeys + = "rocksdb.deleted.keys"; +const std::string InternalKeyTablePropertiesNames::kMergeOperands = + "rocksdb.merge.operands"; + +uint64_t GetDeletedKeys( + const UserCollectedProperties& props) { + bool property_present_ignored; + return GetUint64Property(props, InternalKeyTablePropertiesNames::kDeletedKeys, + &property_present_ignored); +} + +uint64_t GetMergeOperands(const UserCollectedProperties& props, + bool* property_present) { + return GetUint64Property( + props, InternalKeyTablePropertiesNames::kMergeOperands, property_present); +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/table_properties_collector.h b/external/rocksdb/db/table_properties_collector.h new file mode 100755 index 0000000000..b28cbfc006 --- /dev/null +++ b/external/rocksdb/db/table_properties_collector.h @@ -0,0 +1,137 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file defines a collection of statistics collectors. +#pragma once + +#include "rocksdb/table_properties.h" + +#include +#include +#include + +namespace rocksdb { + +struct InternalKeyTablePropertiesNames { + static const std::string kDeletedKeys; + static const std::string kMergeOperands; +}; + +// Base class for internal table properties collector. +class IntTblPropCollector { + public: + virtual ~IntTblPropCollector() {} + virtual Status Finish(UserCollectedProperties* properties) = 0; + + virtual const char* Name() const = 0; + + // @params key the user key that is inserted into the table. + // @params value the value that is inserted into the table. + virtual Status InternalAdd(const Slice& key, const Slice& value, + uint64_t file_size) = 0; + + virtual UserCollectedProperties GetReadableProperties() const = 0; + + virtual bool NeedCompact() const { return false; } +}; + +// Factory for internal table properties collector. +class IntTblPropCollectorFactory { + public: + virtual ~IntTblPropCollectorFactory() {} + // has to be thread-safe + virtual IntTblPropCollector* CreateIntTblPropCollector( + uint32_t column_family_id) = 0; + + // The name of the properties collector can be used for debugging purpose. + virtual const char* Name() const = 0; +}; + +// Collecting the statistics for internal keys. Visible only by internal +// rocksdb modules. +class InternalKeyPropertiesCollector : public IntTblPropCollector { + public: + virtual Status InternalAdd(const Slice& key, const Slice& value, + uint64_t file_size) override; + + virtual Status Finish(UserCollectedProperties* properties) override; + + virtual const char* Name() const override { + return "InternalKeyPropertiesCollector"; + } + + UserCollectedProperties GetReadableProperties() const override; + + private: + uint64_t deleted_keys_ = 0; + uint64_t merge_operands_ = 0; +}; + +class InternalKeyPropertiesCollectorFactory + : public IntTblPropCollectorFactory { + public: + virtual IntTblPropCollector* CreateIntTblPropCollector( + uint32_t column_family_id) override { + return new InternalKeyPropertiesCollector(); + } + + virtual const char* Name() const override { + return "InternalKeyPropertiesCollectorFactory"; + } +}; + +// When rocksdb creates a new table, it will encode all "user keys" into +// "internal keys", which contains meta information of a given entry. +// +// This class extracts user key from the encoded internal key when Add() is +// invoked. +class UserKeyTablePropertiesCollector : public IntTblPropCollector { + public: + // transfer of ownership + explicit UserKeyTablePropertiesCollector(TablePropertiesCollector* collector) + : collector_(collector) {} + + virtual ~UserKeyTablePropertiesCollector() {} + + virtual Status InternalAdd(const Slice& key, const Slice& value, + uint64_t file_size) override; + + virtual Status Finish(UserCollectedProperties* properties) override; + + virtual const char* Name() const override { return collector_->Name(); } + + UserCollectedProperties GetReadableProperties() const override; + + virtual bool NeedCompact() const override { + return collector_->NeedCompact(); + } + + protected: + std::unique_ptr collector_; +}; + +class UserKeyTablePropertiesCollectorFactory + : public IntTblPropCollectorFactory { + public: + explicit UserKeyTablePropertiesCollectorFactory( + std::shared_ptr user_collector_factory) + : user_collector_factory_(user_collector_factory) {} + virtual IntTblPropCollector* CreateIntTblPropCollector( + uint32_t column_family_id) override { + TablePropertiesCollectorFactory::Context context; + context.column_family_id = column_family_id; + return new UserKeyTablePropertiesCollector( + user_collector_factory_->CreateTablePropertiesCollector(context)); + } + + virtual const char* Name() const override { + return user_collector_factory_->Name(); + } + + private: + std::shared_ptr user_collector_factory_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/table_properties_collector_test.cc b/external/rocksdb/db/table_properties_collector_test.cc new file mode 100755 index 0000000000..2033172673 --- /dev/null +++ b/external/rocksdb/db/table_properties_collector_test.cc @@ -0,0 +1,498 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include +#include + +#include "db/db_impl.h" +#include "db/dbformat.h" +#include "db/table_properties_collector.h" +#include "rocksdb/immutable_options.h" +#include "rocksdb/table.h" +#include "table/block_based_table_factory.h" +#include "table/meta_blocks.h" +#include "table/plain_table_factory.h" +#include "table/table_builder.h" +#include "util/coding.h" +#include "util/file_reader_writer.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class TablePropertiesTest : public testing::Test, + public testing::WithParamInterface { + public: + virtual void SetUp() override { backward_mode_ = GetParam(); } + + bool backward_mode_; +}; + +// Utilities test functions +namespace { +static const uint32_t kTestColumnFamilyId = 66; +static const std::string kTestColumnFamilyName = "test_column_fam"; + +void MakeBuilder(const Options& options, const ImmutableCFOptions& ioptions, + const InternalKeyComparator& internal_comparator, + const std::vector>* + int_tbl_prop_collector_factories, + std::unique_ptr* writable, + std::unique_ptr* builder) { + unique_ptr wf(new test::StringSink); + writable->reset(new WritableFileWriter(std::move(wf), EnvOptions())); + + builder->reset(NewTableBuilder( + ioptions, internal_comparator, int_tbl_prop_collector_factories, + kTestColumnFamilyId, kTestColumnFamilyName, + writable->get(), options.compression, options.compression_opts)); +} +} // namespace + +// Collects keys that starts with "A" in a table. +class RegularKeysStartWithA: public TablePropertiesCollector { + public: + const char* Name() const override { return "RegularKeysStartWithA"; } + + Status Finish(UserCollectedProperties* properties) override { + std::string encoded; + std::string encoded_num_puts; + std::string encoded_num_deletes; + std::string encoded_num_single_deletes; + std::string encoded_num_size_changes; + PutVarint32(&encoded, count_); + PutVarint32(&encoded_num_puts, num_puts_); + PutVarint32(&encoded_num_deletes, num_deletes_); + PutVarint32(&encoded_num_single_deletes, num_single_deletes_); + PutVarint32(&encoded_num_size_changes, num_size_changes_); + *properties = UserCollectedProperties{ + {"TablePropertiesTest", message_}, + {"Count", encoded}, + {"NumPuts", encoded_num_puts}, + {"NumDeletes", encoded_num_deletes}, + {"NumSingleDeletes", encoded_num_single_deletes}, + {"NumSizeChanges", encoded_num_size_changes}, + }; + return Status::OK(); + } + + Status AddUserKey(const Slice& user_key, const Slice& value, EntryType type, + SequenceNumber seq, uint64_t file_size) override { + // simply asssume all user keys are not empty. + if (user_key.data()[0] == 'A') { + ++count_; + } + if (type == kEntryPut) { + num_puts_++; + } else if (type == kEntryDelete) { + num_deletes_++; + } else if (type == kEntrySingleDelete) { + num_single_deletes_++; + } + if (file_size < file_size_) { + message_ = "File size should not decrease."; + } else if (file_size != file_size_) { + num_size_changes_++; + } + + return Status::OK(); + } + + virtual UserCollectedProperties GetReadableProperties() const override { + return UserCollectedProperties{}; + } + + private: + std::string message_ = "Rocksdb"; + uint32_t count_ = 0; + uint32_t num_puts_ = 0; + uint32_t num_deletes_ = 0; + uint32_t num_single_deletes_ = 0; + uint32_t num_size_changes_ = 0; + uint64_t file_size_ = 0; +}; + +// Collects keys that starts with "A" in a table. Backward compatible mode +// It is also used to test internal key table property collector +class RegularKeysStartWithABackwardCompatible + : public TablePropertiesCollector { + public: + const char* Name() const override { return "RegularKeysStartWithA"; } + + Status Finish(UserCollectedProperties* properties) override { + std::string encoded; + PutVarint32(&encoded, count_); + *properties = UserCollectedProperties{{"TablePropertiesTest", "Rocksdb"}, + {"Count", encoded}}; + return Status::OK(); + } + + Status Add(const Slice& user_key, const Slice& value) override { + // simply asssume all user keys are not empty. + if (user_key.data()[0] == 'A') { + ++count_; + } + return Status::OK(); + } + + virtual UserCollectedProperties GetReadableProperties() const override { + return UserCollectedProperties{}; + } + + private: + uint32_t count_ = 0; +}; + +class RegularKeysStartWithAInternal : public IntTblPropCollector { + public: + const char* Name() const override { return "RegularKeysStartWithA"; } + + Status Finish(UserCollectedProperties* properties) override { + std::string encoded; + PutVarint32(&encoded, count_); + *properties = UserCollectedProperties{{"TablePropertiesTest", "Rocksdb"}, + {"Count", encoded}}; + return Status::OK(); + } + + Status InternalAdd(const Slice& user_key, const Slice& value, + uint64_t file_size) override { + // simply asssume all user keys are not empty. + if (user_key.data()[0] == 'A') { + ++count_; + } + return Status::OK(); + } + + virtual UserCollectedProperties GetReadableProperties() const override { + return UserCollectedProperties{}; + } + + private: + uint32_t count_ = 0; +}; + +class RegularKeysStartWithAFactory : public IntTblPropCollectorFactory, + public TablePropertiesCollectorFactory { + public: + explicit RegularKeysStartWithAFactory(bool backward_mode) + : backward_mode_(backward_mode) {} + virtual TablePropertiesCollector* CreateTablePropertiesCollector( + TablePropertiesCollectorFactory::Context context) override { + EXPECT_EQ(kTestColumnFamilyId, context.column_family_id); + if (!backward_mode_) { + return new RegularKeysStartWithA(); + } else { + return new RegularKeysStartWithABackwardCompatible(); + } + } + virtual IntTblPropCollector* CreateIntTblPropCollector( + uint32_t column_family_id) override { + return new RegularKeysStartWithAInternal(); + } + const char* Name() const override { return "RegularKeysStartWithA"; } + + bool backward_mode_; +}; + +class FlushBlockEveryThreePolicy : public FlushBlockPolicy { + public: + virtual bool Update(const Slice& key, const Slice& value) override { + return (++count_ % 3U == 0); + } + + private: + uint64_t count_ = 0; +}; + +class FlushBlockEveryThreePolicyFactory : public FlushBlockPolicyFactory { + public: + explicit FlushBlockEveryThreePolicyFactory() {} + + const char* Name() const override { + return "FlushBlockEveryThreePolicyFactory"; + } + + FlushBlockPolicy* NewFlushBlockPolicy( + const BlockBasedTableOptions& table_options, + const BlockBuilder& data_block_builder) const override { + return new FlushBlockEveryThreePolicy; + } +}; + +extern const uint64_t kBlockBasedTableMagicNumber; +extern const uint64_t kPlainTableMagicNumber; +namespace { +void TestCustomizedTablePropertiesCollector( + bool backward_mode, uint64_t magic_number, bool test_int_tbl_prop_collector, + const Options& options, const InternalKeyComparator& internal_comparator) { + // make sure the entries will be inserted with order. + std::map, std::string> kvs = { + {{"About ", kTypeValue}, "val5"}, // starts with 'A' + {{"Abstract", kTypeValue}, "val2"}, // starts with 'A' + {{"Around ", kTypeValue}, "val7"}, // starts with 'A' + {{"Beyond ", kTypeValue}, "val3"}, + {{"Builder ", kTypeValue}, "val1"}, + {{"Love ", kTypeDeletion}, ""}, + {{"Cancel ", kTypeValue}, "val4"}, + {{"Find ", kTypeValue}, "val6"}, + {{"Rocks ", kTypeDeletion}, ""}, + {{"Foo ", kTypeSingleDeletion}, ""}, + }; + + // -- Step 1: build table + std::unique_ptr builder; + std::unique_ptr writer; + const ImmutableCFOptions ioptions(options); + std::vector> + int_tbl_prop_collector_factories; + if (test_int_tbl_prop_collector) { + int_tbl_prop_collector_factories.emplace_back( + new RegularKeysStartWithAFactory(backward_mode)); + } else { + GetIntTblPropCollectorFactory(options, &int_tbl_prop_collector_factories); + } + MakeBuilder(options, ioptions, internal_comparator, + &int_tbl_prop_collector_factories, &writer, &builder); + + SequenceNumber seqNum = 0U; + for (const auto& kv : kvs) { + InternalKey ikey(kv.first.first, seqNum++, kv.first.second); + builder->Add(ikey.Encode(), kv.second); + } + ASSERT_OK(builder->Finish()); + writer->Flush(); + + // -- Step 2: Read properties + test::StringSink* fwf = + static_cast(writer->writable_file()); + std::unique_ptr fake_file_reader( + test::GetRandomAccessFileReader( + new test::StringSource(fwf->contents()))); + TableProperties* props; + Status s = ReadTableProperties(fake_file_reader.get(), fwf->contents().size(), + magic_number, ioptions, &props); + std::unique_ptr props_guard(props); + ASSERT_OK(s); + + auto user_collected = props->user_collected_properties; + + ASSERT_NE(user_collected.find("TablePropertiesTest"), user_collected.end()); + ASSERT_EQ("Rocksdb", user_collected.at("TablePropertiesTest")); + + uint32_t starts_with_A = 0; + ASSERT_NE(user_collected.find("Count"), user_collected.end()); + Slice key(user_collected.at("Count")); + ASSERT_TRUE(GetVarint32(&key, &starts_with_A)); + ASSERT_EQ(3u, starts_with_A); + + if (!backward_mode && !test_int_tbl_prop_collector) { + uint32_t num_puts; + ASSERT_NE(user_collected.find("NumPuts"), user_collected.end()); + Slice key_puts(user_collected.at("NumPuts")); + ASSERT_TRUE(GetVarint32(&key_puts, &num_puts)); + ASSERT_EQ(7u, num_puts); + + uint32_t num_deletes; + ASSERT_NE(user_collected.find("NumDeletes"), user_collected.end()); + Slice key_deletes(user_collected.at("NumDeletes")); + ASSERT_TRUE(GetVarint32(&key_deletes, &num_deletes)); + ASSERT_EQ(2u, num_deletes); + + uint32_t num_single_deletes; + ASSERT_NE(user_collected.find("NumSingleDeletes"), user_collected.end()); + Slice key_single_deletes(user_collected.at("NumSingleDeletes")); + ASSERT_TRUE(GetVarint32(&key_single_deletes, &num_single_deletes)); + ASSERT_EQ(1u, num_single_deletes); + + uint32_t num_size_changes; + ASSERT_NE(user_collected.find("NumSizeChanges"), user_collected.end()); + Slice key_size_changes(user_collected.at("NumSizeChanges")); + ASSERT_TRUE(GetVarint32(&key_size_changes, &num_size_changes)); + ASSERT_GE(num_size_changes, 2u); + } +} +} // namespace + +TEST_P(TablePropertiesTest, CustomizedTablePropertiesCollector) { + // Test properties collectors with internal keys or regular keys + // for block based table + for (bool encode_as_internal : { true, false }) { + Options options; + BlockBasedTableOptions table_options; + table_options.flush_block_policy_factory = + std::make_shared(); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + test::PlainInternalKeyComparator ikc(options.comparator); + std::shared_ptr collector_factory( + new RegularKeysStartWithAFactory(backward_mode_)); + options.table_properties_collector_factories.resize(1); + options.table_properties_collector_factories[0] = collector_factory; + + TestCustomizedTablePropertiesCollector(backward_mode_, + kBlockBasedTableMagicNumber, + encode_as_internal, options, ikc); + +#ifndef ROCKSDB_LITE // PlainTable is not supported in Lite + // test plain table + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 8; + plain_table_options.bloom_bits_per_key = 8; + plain_table_options.hash_table_ratio = 0; + + options.table_factory = + std::make_shared(plain_table_options); + TestCustomizedTablePropertiesCollector(backward_mode_, + kPlainTableMagicNumber, + encode_as_internal, options, ikc); +#endif // !ROCKSDB_LITE + } +} + +namespace { +void TestInternalKeyPropertiesCollector( + bool backward_mode, uint64_t magic_number, bool sanitized, + std::shared_ptr table_factory) { + InternalKey keys[] = { + InternalKey("A ", 0, ValueType::kTypeValue), + InternalKey("B ", 1, ValueType::kTypeValue), + InternalKey("C ", 2, ValueType::kTypeValue), + InternalKey("W ", 3, ValueType::kTypeDeletion), + InternalKey("X ", 4, ValueType::kTypeDeletion), + InternalKey("Y ", 5, ValueType::kTypeDeletion), + InternalKey("Z ", 6, ValueType::kTypeDeletion), + InternalKey("a ", 7, ValueType::kTypeSingleDeletion), + InternalKey("b ", 8, ValueType::kTypeMerge), + InternalKey("c ", 9, ValueType::kTypeMerge), + }; + + std::unique_ptr builder; + std::unique_ptr writable; + Options options; + test::PlainInternalKeyComparator pikc(options.comparator); + + std::vector> + int_tbl_prop_collector_factories; + options.table_factory = table_factory; + if (sanitized) { + options.table_properties_collector_factories.emplace_back( + new RegularKeysStartWithAFactory(backward_mode)); + // with sanitization, even regular properties collector will be able to + // handle internal keys. + auto comparator = options.comparator; + // HACK: Set options.info_log to avoid writing log in + // SanitizeOptions(). + options.info_log = std::make_shared(); + options = SanitizeOptions("db", // just a place holder + &pikc, + options); + GetIntTblPropCollectorFactory(options, &int_tbl_prop_collector_factories); + options.comparator = comparator; + } else { + int_tbl_prop_collector_factories.emplace_back( + new InternalKeyPropertiesCollectorFactory); + } + const ImmutableCFOptions ioptions(options); + + for (int iter = 0; iter < 2; ++iter) { + MakeBuilder(options, ioptions, pikc, &int_tbl_prop_collector_factories, + &writable, &builder); + for (const auto& k : keys) { + builder->Add(k.Encode(), "val"); + } + + ASSERT_OK(builder->Finish()); + writable->Flush(); + + test::StringSink* fwf = + static_cast(writable->writable_file()); + unique_ptr reader(test::GetRandomAccessFileReader( + new test::StringSource(fwf->contents()))); + TableProperties* props; + Status s = + ReadTableProperties(reader.get(), fwf->contents().size(), magic_number, + ioptions, &props); + ASSERT_OK(s); + + std::unique_ptr props_guard(props); + auto user_collected = props->user_collected_properties; + uint64_t deleted = GetDeletedKeys(user_collected); + ASSERT_EQ(5u, deleted); // deletes + single-deletes + + bool property_present; + uint64_t merges = GetMergeOperands(user_collected, &property_present); + ASSERT_TRUE(property_present); + ASSERT_EQ(2u, merges); + + if (sanitized) { + uint32_t starts_with_A = 0; + ASSERT_NE(user_collected.find("Count"), user_collected.end()); + Slice key(user_collected.at("Count")); + ASSERT_TRUE(GetVarint32(&key, &starts_with_A)); + ASSERT_EQ(1u, starts_with_A); + + if (!backward_mode) { + uint32_t num_puts; + ASSERT_NE(user_collected.find("NumPuts"), user_collected.end()); + Slice key_puts(user_collected.at("NumPuts")); + ASSERT_TRUE(GetVarint32(&key_puts, &num_puts)); + ASSERT_EQ(3u, num_puts); + + uint32_t num_deletes; + ASSERT_NE(user_collected.find("NumDeletes"), user_collected.end()); + Slice key_deletes(user_collected.at("NumDeletes")); + ASSERT_TRUE(GetVarint32(&key_deletes, &num_deletes)); + ASSERT_EQ(4u, num_deletes); + + uint32_t num_single_deletes; + ASSERT_NE(user_collected.find("NumSingleDeletes"), + user_collected.end()); + Slice key_single_deletes(user_collected.at("NumSingleDeletes")); + ASSERT_TRUE(GetVarint32(&key_single_deletes, &num_single_deletes)); + ASSERT_EQ(1u, num_single_deletes); + } + } + } +} +} // namespace + +TEST_P(TablePropertiesTest, InternalKeyPropertiesCollector) { + TestInternalKeyPropertiesCollector( + backward_mode_, kBlockBasedTableMagicNumber, true /* sanitize */, + std::make_shared()); + if (backward_mode_) { + TestInternalKeyPropertiesCollector( + backward_mode_, kBlockBasedTableMagicNumber, false /* not sanitize */, + std::make_shared()); + } + +#ifndef ROCKSDB_LITE // PlainTable is not supported in Lite + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 8; + plain_table_options.bloom_bits_per_key = 8; + plain_table_options.hash_table_ratio = 0; + + TestInternalKeyPropertiesCollector( + backward_mode_, kPlainTableMagicNumber, false /* not sanitize */, + std::make_shared(plain_table_options)); +#endif // !ROCKSDB_LITE +} + +INSTANTIATE_TEST_CASE_P(InternalKeyPropertiesCollector, TablePropertiesTest, + ::testing::Bool()); + +INSTANTIATE_TEST_CASE_P(CustomizedTablePropertiesCollector, TablePropertiesTest, + ::testing::Bool()); + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/transaction_log_impl.cc b/external/rocksdb/db/transaction_log_impl.cc new file mode 100755 index 0000000000..91ed054127 --- /dev/null +++ b/external/rocksdb/db/transaction_log_impl.cc @@ -0,0 +1,271 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include "db/transaction_log_impl.h" +#include +#include "db/write_batch_internal.h" +#include "util/file_reader_writer.h" + +namespace rocksdb { + +TransactionLogIteratorImpl::TransactionLogIteratorImpl( + const std::string& dir, const DBOptions* options, + const TransactionLogIterator::ReadOptions& read_options, + const EnvOptions& soptions, const SequenceNumber seq, + std::unique_ptr files, VersionSet const* const versions) + : dir_(dir), + options_(options), + read_options_(read_options), + soptions_(soptions), + startingSequenceNumber_(seq), + files_(std::move(files)), + started_(false), + isValid_(false), + currentFileIndex_(0), + currentBatchSeq_(0), + currentLastSeq_(0), + versions_(versions) { + assert(files_ != nullptr); + assert(versions_ != nullptr); + + reporter_.env = options_->env; + reporter_.info_log = options_->info_log.get(); + SeekToStartSequence(); // Seek till starting sequence +} + +Status TransactionLogIteratorImpl::OpenLogFile( + const LogFile* logFile, unique_ptr* file_reader) { + Env* env = options_->env; + unique_ptr file; + Status s; + if (logFile->Type() == kArchivedLogFile) { + std::string fname = ArchivedLogFileName(dir_, logFile->LogNumber()); + s = env->NewSequentialFile(fname, &file, soptions_); + } else { + std::string fname = LogFileName(dir_, logFile->LogNumber()); + s = env->NewSequentialFile(fname, &file, soptions_); + if (!s.ok()) { + // If cannot open file in DB directory. + // Try the archive dir, as it could have moved in the meanwhile. + fname = ArchivedLogFileName(dir_, logFile->LogNumber()); + s = env->NewSequentialFile(fname, &file, soptions_); + } + } + if (s.ok()) { + file_reader->reset(new SequentialFileReader(std::move(file))); + } + return s; +} + +BatchResult TransactionLogIteratorImpl::GetBatch() { + assert(isValid_); // cannot call in a non valid state. + BatchResult result; + result.sequence = currentBatchSeq_; + result.writeBatchPtr = std::move(currentBatch_); + return result; +} + +Status TransactionLogIteratorImpl::status() { + return currentStatus_; +} + +bool TransactionLogIteratorImpl::Valid() { + return started_ && isValid_; +} + +bool TransactionLogIteratorImpl::RestrictedRead( + Slice* record, + std::string* scratch) { + // Don't read if no more complete entries to read from logs + if (currentLastSeq_ >= versions_->LastSequence()) { + return false; + } + return currentLogReader_->ReadRecord(record, scratch); +} + +void TransactionLogIteratorImpl::SeekToStartSequence( + uint64_t startFileIndex, + bool strict) { + std::string scratch; + Slice record; + started_ = false; + isValid_ = false; + if (files_->size() <= startFileIndex) { + return; + } + Status s = OpenLogReader(files_->at(startFileIndex).get()); + if (!s.ok()) { + currentStatus_ = s; + reporter_.Info(currentStatus_.ToString().c_str()); + return; + } + while (RestrictedRead(&record, &scratch)) { + if (record.size() < WriteBatchInternal::kHeader) { + reporter_.Corruption( + record.size(), Status::Corruption("very small log record")); + continue; + } + UpdateCurrentWriteBatch(record); + if (currentLastSeq_ >= startingSequenceNumber_) { + if (strict && currentBatchSeq_ != startingSequenceNumber_) { + currentStatus_ = Status::Corruption("Gap in sequence number. Could not " + "seek to required sequence number"); + reporter_.Info(currentStatus_.ToString().c_str()); + return; + } else if (strict) { + reporter_.Info("Could seek required sequence number. Iterator will " + "continue."); + } + isValid_ = true; + started_ = true; // set started_ as we could seek till starting sequence + return; + } else { + isValid_ = false; + } + } + + // Could not find start sequence in first file. Normally this must be the + // only file. Otherwise log the error and let the iterator return next entry + // If strict is set, we want to seek exactly till the start sequence and it + // should have been present in the file we scanned above + if (strict) { + currentStatus_ = Status::Corruption("Gap in sequence number. Could not " + "seek to required sequence number"); + reporter_.Info(currentStatus_.ToString().c_str()); + } else if (files_->size() != 1) { + currentStatus_ = Status::Corruption("Start sequence was not found, " + "skipping to the next available"); + reporter_.Info(currentStatus_.ToString().c_str()); + // Let NextImpl find the next available entry. started_ remains false + // because we don't want to check for gaps while moving to start sequence + NextImpl(true); + } +} + +void TransactionLogIteratorImpl::Next() { + return NextImpl(false); +} + +void TransactionLogIteratorImpl::NextImpl(bool internal) { + std::string scratch; + Slice record; + isValid_ = false; + if (!internal && !started_) { + // Runs every time until we can seek to the start sequence + return SeekToStartSequence(); + } + while(true) { + assert(currentLogReader_); + if (currentLogReader_->IsEOF()) { + currentLogReader_->UnmarkEOF(); + } + while (RestrictedRead(&record, &scratch)) { + if (record.size() < WriteBatchInternal::kHeader) { + reporter_.Corruption( + record.size(), Status::Corruption("very small log record")); + continue; + } else { + // started_ should be true if called by application + assert(internal || started_); + // started_ should be false if called internally + assert(!internal || !started_); + UpdateCurrentWriteBatch(record); + if (internal && !started_) { + started_ = true; + } + return; + } + } + + // Open the next file + if (currentFileIndex_ < files_->size() - 1) { + ++currentFileIndex_; + Status s = OpenLogReader(files_->at(currentFileIndex_).get()); + if (!s.ok()) { + isValid_ = false; + currentStatus_ = s; + return; + } + } else { + isValid_ = false; + if (currentLastSeq_ == versions_->LastSequence()) { + currentStatus_ = Status::OK(); + } else { + currentStatus_ = Status::Corruption("NO MORE DATA LEFT"); + } + return; + } + } +} + +bool TransactionLogIteratorImpl::IsBatchExpected( + const WriteBatch* batch, + const SequenceNumber expectedSeq) { + assert(batch); + SequenceNumber batchSeq = WriteBatchInternal::Sequence(batch); + if (batchSeq != expectedSeq) { + char buf[200]; + snprintf(buf, sizeof(buf), + "Discontinuity in log records. Got seq=%" PRIu64 + ", Expected seq=%" PRIu64 ", Last flushed seq=%" PRIu64 + ".Log iterator will reseek the correct batch.", + batchSeq, expectedSeq, versions_->LastSequence()); + reporter_.Info(buf); + return false; + } + return true; +} + +void TransactionLogIteratorImpl::UpdateCurrentWriteBatch(const Slice& record) { + std::unique_ptr batch(new WriteBatch()); + WriteBatchInternal::SetContents(batch.get(), record); + + SequenceNumber expectedSeq = currentLastSeq_ + 1; + // If the iterator has started, then confirm that we get continuous batches + if (started_ && !IsBatchExpected(batch.get(), expectedSeq)) { + // Seek to the batch having expected sequence number + if (expectedSeq < files_->at(currentFileIndex_)->StartSequence()) { + // Expected batch must lie in the previous log file + // Avoid underflow. + if (currentFileIndex_ != 0) { + currentFileIndex_--; + } + } + startingSequenceNumber_ = expectedSeq; + // currentStatus_ will be set to Ok if reseek succeeds + currentStatus_ = Status::NotFound("Gap in sequence numbers"); + return SeekToStartSequence(currentFileIndex_, true); + } + + currentBatchSeq_ = WriteBatchInternal::Sequence(batch.get()); + currentLastSeq_ = currentBatchSeq_ + + WriteBatchInternal::Count(batch.get()) - 1; + // currentBatchSeq_ can only change here + assert(currentLastSeq_ <= versions_->LastSequence()); + + currentBatch_ = std::move(batch); + isValid_ = true; + currentStatus_ = Status::OK(); +} + +Status TransactionLogIteratorImpl::OpenLogReader(const LogFile* logFile) { + unique_ptr file; + Status s = OpenLogFile(logFile, &file); + if (!s.ok()) { + return s; + } + assert(file); + currentLogReader_.reset(new log::Reader( + options_->info_log, std::move(file), &reporter_, + read_options_.verify_checksums_, 0, logFile->LogNumber())); + return Status::OK(); +} +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/transaction_log_impl.h b/external/rocksdb/db/transaction_log_impl.h new file mode 100755 index 0000000000..d4a2468e7f --- /dev/null +++ b/external/rocksdb/db/transaction_log_impl.h @@ -0,0 +1,126 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE +#pragma once +#include + +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "rocksdb/types.h" +#include "rocksdb/transaction_log.h" +#include "db/version_set.h" +#include "db/log_reader.h" +#include "db/filename.h" +#include "port/port.h" + +namespace rocksdb { + +class LogFileImpl : public LogFile { + public: + LogFileImpl(uint64_t logNum, WalFileType logType, SequenceNumber startSeq, + uint64_t sizeBytes) : + logNumber_(logNum), + type_(logType), + startSequence_(startSeq), + sizeFileBytes_(sizeBytes) { + } + + std::string PathName() const override { + if (type_ == kArchivedLogFile) { + return ArchivedLogFileName("", logNumber_); + } + return LogFileName("", logNumber_); + } + + uint64_t LogNumber() const override { return logNumber_; } + + WalFileType Type() const override { return type_; } + + SequenceNumber StartSequence() const override { return startSequence_; } + + uint64_t SizeFileBytes() const override { return sizeFileBytes_; } + + bool operator < (const LogFile& that) const { + return LogNumber() < that.LogNumber(); + } + + private: + uint64_t logNumber_; + WalFileType type_; + SequenceNumber startSequence_; + uint64_t sizeFileBytes_; + +}; + +class TransactionLogIteratorImpl : public TransactionLogIterator { + public: + TransactionLogIteratorImpl( + const std::string& dir, const DBOptions* options, + const TransactionLogIterator::ReadOptions& read_options, + const EnvOptions& soptions, const SequenceNumber seqNum, + std::unique_ptr files, VersionSet const* const versions); + + virtual bool Valid() override; + + virtual void Next() override; + + virtual Status status() override; + + virtual BatchResult GetBatch() override; + + private: + const std::string& dir_; + const DBOptions* options_; + const TransactionLogIterator::ReadOptions read_options_; + const EnvOptions& soptions_; + SequenceNumber startingSequenceNumber_; + std::unique_ptr files_; + bool started_; + bool isValid_; // not valid when it starts of. + Status currentStatus_; + size_t currentFileIndex_; + std::unique_ptr currentBatch_; + unique_ptr currentLogReader_; + Status OpenLogFile(const LogFile* logFile, + unique_ptr* file); + + struct LogReporter : public log::Reader::Reporter { + Env* env; + Logger* info_log; + virtual void Corruption(size_t bytes, const Status& s) override { + Log(InfoLogLevel::ERROR_LEVEL, info_log, + "dropping %" ROCKSDB_PRIszt " bytes; %s", bytes, + s.ToString().c_str()); + } + virtual void Info(const char* s) { + Log(InfoLogLevel::INFO_LEVEL, info_log, "%s", s); + } + } reporter_; + + SequenceNumber currentBatchSeq_; // sequence number at start of current batch + SequenceNumber currentLastSeq_; // last sequence in the current batch + // Used only to get latest seq. num + // TODO(icanadi) can this be just a callback? + VersionSet const* const versions_; + + // Reads from transaction log only if the writebatch record has been written + bool RestrictedRead(Slice* record, std::string* scratch); + // Seeks to startingSequenceNumber reading from startFileIndex in files_. + // If strict is set,then must get a batch starting with startingSequenceNumber + void SeekToStartSequence(uint64_t startFileIndex = 0, bool strict = false); + // Implementation of Next. SeekToStartSequence calls it internally with + // internal=true to let it find next entry even if it has to jump gaps because + // the iterator may start off from the first available entry but promises to + // be continuous after that + void NextImpl(bool internal = false); + // Check if batch is expected, else return false + bool IsBatchExpected(const WriteBatch* batch, SequenceNumber expectedSeq); + // Update current batch if a continuous batch is found, else return false + void UpdateCurrentWriteBatch(const Slice& record); + Status OpenLogReader(const LogFile* file); +}; +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/db/version_builder.cc b/external/rocksdb/db/version_builder.cc new file mode 100755 index 0000000000..2837686be5 --- /dev/null +++ b/external/rocksdb/db/version_builder.cc @@ -0,0 +1,379 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/version_builder.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db/dbformat.h" +#include "db/internal_stats.h" +#include "db/table_cache.h" +#include "db/version_set.h" +#include "table/table_reader.h" + +namespace rocksdb { + +bool NewestFirstBySeqNo(FileMetaData* a, FileMetaData* b) { + if (a->smallest_seqno != b->smallest_seqno) { + return a->smallest_seqno > b->smallest_seqno; + } + if (a->largest_seqno != b->largest_seqno) { + return a->largest_seqno > b->largest_seqno; + } + // Break ties by file number + return a->fd.GetNumber() > b->fd.GetNumber(); +} + +namespace { +bool BySmallestKey(FileMetaData* a, FileMetaData* b, + const InternalKeyComparator* cmp) { + int r = cmp->Compare(a->smallest, b->smallest); + if (r != 0) { + return (r < 0); + } + // Break ties by file number + return (a->fd.GetNumber() < b->fd.GetNumber()); +} +} // namespace + +class VersionBuilder::Rep { + private: + // Helper to sort files_ in v + // kLevel0 -- NewestFirstBySeqNo + // kLevelNon0 -- BySmallestKey + struct FileComparator { + enum SortMethod { kLevel0 = 0, kLevelNon0 = 1, } sort_method; + const InternalKeyComparator* internal_comparator; + + bool operator()(FileMetaData* f1, FileMetaData* f2) const { + switch (sort_method) { + case kLevel0: + return NewestFirstBySeqNo(f1, f2); + case kLevelNon0: + return BySmallestKey(f1, f2, internal_comparator); + } + assert(false); + return false; + } + }; + + struct LevelState { + std::unordered_set deleted_files; + // Map from file number to file meta data. + std::unordered_map added_files; + }; + + const EnvOptions& env_options_; + Logger* info_log_; + TableCache* table_cache_; + VersionStorageInfo* base_vstorage_; + LevelState* levels_; + FileComparator level_zero_cmp_; + FileComparator level_nonzero_cmp_; + + public: + Rep(const EnvOptions& env_options, Logger* info_log, TableCache* table_cache, + VersionStorageInfo* base_vstorage) + : env_options_(env_options), + info_log_(info_log), + table_cache_(table_cache), + base_vstorage_(base_vstorage) { + levels_ = new LevelState[base_vstorage_->num_levels()]; + level_zero_cmp_.sort_method = FileComparator::kLevel0; + level_nonzero_cmp_.sort_method = FileComparator::kLevelNon0; + level_nonzero_cmp_.internal_comparator = + base_vstorage_->InternalComparator(); + } + + ~Rep() { + for (int level = 0; level < base_vstorage_->num_levels(); level++) { + const auto& added = levels_[level].added_files; + for (auto& pair : added) { + UnrefFile(pair.second); + } + } + + delete[] levels_; + } + + void UnrefFile(FileMetaData* f) { + f->refs--; + if (f->refs <= 0) { + if (f->table_reader_handle) { + assert(table_cache_ != nullptr); + table_cache_->ReleaseHandle(f->table_reader_handle); + f->table_reader_handle = nullptr; + } + delete f; + } + } + + void CheckConsistency(VersionStorageInfo* vstorage) { +#ifndef NDEBUG + // make sure the files are sorted correctly + for (int level = 0; level < vstorage->num_levels(); level++) { + auto& level_files = vstorage->LevelFiles(level); + for (size_t i = 1; i < level_files.size(); i++) { + auto f1 = level_files[i - 1]; + auto f2 = level_files[i]; + if (level == 0) { + assert(level_zero_cmp_(f1, f2)); + assert(f1->largest_seqno > f2->largest_seqno || + // We can have multiple files with seqno = 0 as a result of + // using DB::AddFile() + (f1->largest_seqno == 0 && f2->largest_seqno == 0)); + } else { + assert(level_nonzero_cmp_(f1, f2)); + + // Make sure there is no overlap in levels > 0 + if (vstorage->InternalComparator()->Compare(f1->largest, + f2->smallest) >= 0) { + fprintf(stderr, "overlapping ranges in same level %s vs. %s\n", + (f1->largest).DebugString().c_str(), + (f2->smallest).DebugString().c_str()); + abort(); + } + } + } + } +#endif + } + + void CheckConsistencyForDeletes(VersionEdit* edit, uint64_t number, + int level) { +#ifndef NDEBUG + // a file to be deleted better exist in the previous version + bool found = false; + for (int l = 0; !found && l < base_vstorage_->num_levels(); l++) { + const std::vector& base_files = + base_vstorage_->LevelFiles(l); + for (size_t i = 0; i < base_files.size(); i++) { + FileMetaData* f = base_files[i]; + if (f->fd.GetNumber() == number) { + found = true; + break; + } + } + } + // if the file did not exist in the previous version, then it + // is possibly moved from lower level to higher level in current + // version + for (int l = level + 1; !found && l < base_vstorage_->num_levels(); l++) { + auto& level_added = levels_[l].added_files; + auto got = level_added.find(number); + if (got != level_added.end()) { + found = true; + break; + } + } + + // maybe this file was added in a previous edit that was Applied + if (!found) { + auto& level_added = levels_[level].added_files; + auto got = level_added.find(number); + if (got != level_added.end()) { + found = true; + } + } + if (!found) { + fprintf(stderr, "not found %" PRIu64 "\n", number); + } + assert(found); +#endif + } + + // Apply all of the edits in *edit to the current state. + void Apply(VersionEdit* edit) { + CheckConsistency(base_vstorage_); + + // Delete files + const VersionEdit::DeletedFileSet& del = edit->GetDeletedFiles(); + for (const auto& del_file : del) { + const auto level = del_file.first; + const auto number = del_file.second; + levels_[level].deleted_files.insert(number); + CheckConsistencyForDeletes(edit, number, level); + + auto exising = levels_[level].added_files.find(number); + if (exising != levels_[level].added_files.end()) { + UnrefFile(exising->second); + levels_[level].added_files.erase(number); + } + } + + // Add new files + for (const auto& new_file : edit->GetNewFiles()) { + const int level = new_file.first; + FileMetaData* f = new FileMetaData(new_file.second); + f->refs = 1; + + assert(levels_[level].added_files.find(f->fd.GetNumber()) == + levels_[level].added_files.end()); + levels_[level].deleted_files.erase(f->fd.GetNumber()); + levels_[level].added_files[f->fd.GetNumber()] = f; + } + } + + // Save the current state in *v. + void SaveTo(VersionStorageInfo* vstorage) { + CheckConsistency(base_vstorage_); + CheckConsistency(vstorage); + + for (int level = 0; level < base_vstorage_->num_levels(); level++) { + const auto& cmp = (level == 0) ? level_zero_cmp_ : level_nonzero_cmp_; + // Merge the set of added files with the set of pre-existing files. + // Drop any deleted files. Store the result in *v. + const auto& base_files = base_vstorage_->LevelFiles(level); + auto base_iter = base_files.begin(); + auto base_end = base_files.end(); + const auto& unordered_added_files = levels_[level].added_files; + vstorage->Reserve(level, + base_files.size() + unordered_added_files.size()); + + // Sort added files for the level. + std::vector added_files; + added_files.reserve(unordered_added_files.size()); + for (const auto& pair : unordered_added_files) { + added_files.push_back(pair.second); + } + std::sort(added_files.begin(), added_files.end(), cmp); + +#ifndef NDEBUG + FileMetaData* prev_file = nullptr; +#endif + + for (const auto& added : added_files) { +#ifndef NDEBUG + if (level > 0 && prev_file != nullptr) { + assert(base_vstorage_->InternalComparator()->Compare( + prev_file->smallest, added->smallest) <= 0); + } + prev_file = added; +#endif + + // Add all smaller files listed in base_ + for (auto bpos = std::upper_bound(base_iter, base_end, added, cmp); + base_iter != bpos; ++base_iter) { + MaybeAddFile(vstorage, level, *base_iter); + } + + MaybeAddFile(vstorage, level, added); + } + + // Add remaining base files + for (; base_iter != base_end; ++base_iter) { + MaybeAddFile(vstorage, level, *base_iter); + } + } + + CheckConsistency(vstorage); + } + + void LoadTableHandlers(InternalStats* internal_stats, int max_threads, + bool prefetch_index_and_filter_in_cache) { + assert(table_cache_ != nullptr); + // + std::vector> files_meta; + for (int level = 0; level < base_vstorage_->num_levels(); level++) { + for (auto& file_meta_pair : levels_[level].added_files) { + auto* file_meta = file_meta_pair.second; + assert(!file_meta->table_reader_handle); + files_meta.emplace_back(file_meta, level); + } + } + + std::atomic next_file_meta_idx(0); + std::function load_handlers_func = [&]() { + while (true) { + size_t file_idx = next_file_meta_idx.fetch_add(1); + if (file_idx >= files_meta.size()) { + break; + } + + auto* file_meta = files_meta[file_idx].first; + int level = files_meta[file_idx].second; + table_cache_->FindTable(env_options_, + *(base_vstorage_->InternalComparator()), + file_meta->fd, &file_meta->table_reader_handle, + false /*no_io */, true /* record_read_stats */, + internal_stats->GetFileReadHist(level), false, + level, prefetch_index_and_filter_in_cache); + if (file_meta->table_reader_handle != nullptr) { + // Load table_reader + file_meta->fd.table_reader = table_cache_->GetTableReaderFromHandle( + file_meta->table_reader_handle); + } + } + }; + + if (max_threads <= 1) { + load_handlers_func(); + } else { + std::vector threads; + for (int i = 0; i < max_threads; i++) { + threads.emplace_back(load_handlers_func); + } + + for (auto& t : threads) { + t.join(); + } + } + } + + void MaybeAddFile(VersionStorageInfo* vstorage, int level, FileMetaData* f) { + if (levels_[level].deleted_files.count(f->fd.GetNumber()) > 0) { + // f is to-be-delected table file + vstorage->RemoveCurrentStats(f); + } else { + vstorage->AddFile(level, f, info_log_); + } + } +}; + +VersionBuilder::VersionBuilder(const EnvOptions& env_options, + TableCache* table_cache, + VersionStorageInfo* base_vstorage, + Logger* info_log) + : rep_(new Rep(env_options, info_log, table_cache, base_vstorage)) {} +VersionBuilder::~VersionBuilder() { delete rep_; } +void VersionBuilder::CheckConsistency(VersionStorageInfo* vstorage) { + rep_->CheckConsistency(vstorage); +} +void VersionBuilder::CheckConsistencyForDeletes(VersionEdit* edit, + uint64_t number, int level) { + rep_->CheckConsistencyForDeletes(edit, number, level); +} +void VersionBuilder::Apply(VersionEdit* edit) { rep_->Apply(edit); } +void VersionBuilder::SaveTo(VersionStorageInfo* vstorage) { + rep_->SaveTo(vstorage); +} +void VersionBuilder::LoadTableHandlers( + InternalStats* internal_stats, int max_threads, + bool prefetch_index_and_filter_in_cache) { + rep_->LoadTableHandlers(internal_stats, max_threads, + prefetch_index_and_filter_in_cache); +} +void VersionBuilder::MaybeAddFile(VersionStorageInfo* vstorage, int level, + FileMetaData* f) { + rep_->MaybeAddFile(vstorage, level, f); +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/version_builder.h b/external/rocksdb/db/version_builder.h new file mode 100755 index 0000000000..44ff75939b --- /dev/null +++ b/external/rocksdb/db/version_builder.h @@ -0,0 +1,44 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +#pragma once +#include "rocksdb/env.h" + +namespace rocksdb { + +class TableCache; +class VersionStorageInfo; +class VersionEdit; +struct FileMetaData; +class InternalStats; + +// A helper class so we can efficiently apply a whole sequence +// of edits to a particular state without creating intermediate +// Versions that contain full copies of the intermediate state. +class VersionBuilder { + public: + VersionBuilder(const EnvOptions& env_options, TableCache* table_cache, + VersionStorageInfo* base_vstorage, Logger* info_log = nullptr); + ~VersionBuilder(); + void CheckConsistency(VersionStorageInfo* vstorage); + void CheckConsistencyForDeletes(VersionEdit* edit, uint64_t number, + int level); + void Apply(VersionEdit* edit); + void SaveTo(VersionStorageInfo* vstorage); + void LoadTableHandlers(InternalStats* internal_stats, int max_threads, + bool prefetch_index_and_filter_in_cache); + void MaybeAddFile(VersionStorageInfo* vstorage, int level, FileMetaData* f); + + private: + class Rep; + Rep* rep_; +}; + +extern bool NewestFirstBySeqNo(FileMetaData* a, FileMetaData* b); +} // namespace rocksdb diff --git a/external/rocksdb/db/version_builder_test.cc b/external/rocksdb/db/version_builder_test.cc new file mode 100755 index 0000000000..2a87dc2380 --- /dev/null +++ b/external/rocksdb/db/version_builder_test.cc @@ -0,0 +1,305 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include "db/version_edit.h" +#include "db/version_set.h" +#include "util/logging.h" +#include "util/string_util.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class VersionBuilderTest : public testing::Test { + public: + const Comparator* ucmp_; + InternalKeyComparator icmp_; + Options options_; + ImmutableCFOptions ioptions_; + MutableCFOptions mutable_cf_options_; + VersionStorageInfo vstorage_; + uint32_t file_num_; + CompactionOptionsFIFO fifo_options_; + std::vector size_being_compacted_; + + VersionBuilderTest() + : ucmp_(BytewiseComparator()), + icmp_(ucmp_), + ioptions_(options_), + mutable_cf_options_(options_, ioptions_), + vstorage_(&icmp_, ucmp_, options_.num_levels, kCompactionStyleLevel, + nullptr), + file_num_(1) { + mutable_cf_options_.RefreshDerivedOptions(ioptions_); + size_being_compacted_.resize(options_.num_levels); + } + + ~VersionBuilderTest() { + for (int i = 0; i < vstorage_.num_levels(); i++) { + for (auto* f : vstorage_.LevelFiles(i)) { + if (--f->refs == 0) { + delete f; + } + } + } + } + + InternalKey GetInternalKey(const char* ukey, + SequenceNumber smallest_seq = 100) { + return InternalKey(ukey, smallest_seq, kTypeValue); + } + + void Add(int level, uint32_t file_number, const char* smallest, + const char* largest, uint64_t file_size = 0, uint32_t path_id = 0, + SequenceNumber smallest_seq = 100, SequenceNumber largest_seq = 100, + uint64_t num_entries = 0, uint64_t num_deletions = 0, + bool sampled = false, SequenceNumber smallest_seqno = 0, + SequenceNumber largest_seqno = 0) { + assert(level < vstorage_.num_levels()); + FileMetaData* f = new FileMetaData; + f->fd = FileDescriptor(file_number, path_id, file_size); + f->smallest = GetInternalKey(smallest, smallest_seq); + f->largest = GetInternalKey(largest, largest_seq); + f->smallest_seqno = smallest_seqno; + f->largest_seqno = largest_seqno; + f->compensated_file_size = file_size; + f->refs = 0; + f->num_entries = num_entries; + f->num_deletions = num_deletions; + vstorage_.AddFile(level, f); + if (sampled) { + f->init_stats_from_file = true; + vstorage_.UpdateAccumulatedStats(f); + } + } + + void UpdateVersionStorageInfo() { + vstorage_.UpdateFilesByCompactionPri(mutable_cf_options_); + vstorage_.UpdateNumNonEmptyLevels(); + vstorage_.GenerateFileIndexer(); + vstorage_.GenerateLevelFilesBrief(); + vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_); + vstorage_.GenerateLevel0NonOverlapping(); + vstorage_.SetFinalized(); + } +}; + +void UnrefFilesInVersion(VersionStorageInfo* new_vstorage) { + for (int i = 0; i < new_vstorage->num_levels(); i++) { + for (auto* f : new_vstorage->LevelFiles(i)) { + if (--f->refs == 0) { + delete f; + } + } + } +} + +TEST_F(VersionBuilderTest, ApplyAndSaveTo) { + Add(0, 1U, "150", "200", 100U); + + Add(1, 66U, "150", "200", 100U); + Add(1, 88U, "201", "300", 100U); + + Add(2, 6U, "150", "179", 100U); + Add(2, 7U, "180", "220", 100U); + Add(2, 8U, "221", "300", 100U); + + Add(3, 26U, "150", "170", 100U); + Add(3, 27U, "171", "179", 100U); + Add(3, 28U, "191", "220", 100U); + Add(3, 29U, "221", "300", 100U); + UpdateVersionStorageInfo(); + + VersionEdit version_edit; + version_edit.AddFile(2, 666, 0, 100U, GetInternalKey("301"), + GetInternalKey("350"), 200, 200, false); + version_edit.DeleteFile(3, 27U); + + EnvOptions env_options; + + VersionBuilder version_builder(env_options, nullptr, &vstorage_); + + VersionStorageInfo new_vstorage(&icmp_, ucmp_, options_.num_levels, + kCompactionStyleLevel, nullptr); + version_builder.Apply(&version_edit); + version_builder.SaveTo(&new_vstorage); + + ASSERT_EQ(400U, new_vstorage.NumLevelBytes(2)); + ASSERT_EQ(300U, new_vstorage.NumLevelBytes(3)); + + UnrefFilesInVersion(&new_vstorage); +} + +TEST_F(VersionBuilderTest, ApplyAndSaveToDynamic) { + ioptions_.level_compaction_dynamic_level_bytes = true; + + Add(0, 1U, "150", "200", 100U, 0, 200U, 200U, 0, 0, false, 200U, 200U); + Add(0, 88U, "201", "300", 100U, 0, 100U, 100U, 0, 0, false, 100U, 100U); + + Add(4, 6U, "150", "179", 100U); + Add(4, 7U, "180", "220", 100U); + Add(4, 8U, "221", "300", 100U); + + Add(5, 26U, "150", "170", 100U); + Add(5, 27U, "171", "179", 100U); + UpdateVersionStorageInfo(); + + VersionEdit version_edit; + version_edit.AddFile(3, 666, 0, 100U, GetInternalKey("301"), + GetInternalKey("350"), 200, 200, false); + version_edit.DeleteFile(0, 1U); + version_edit.DeleteFile(0, 88U); + + EnvOptions env_options; + + VersionBuilder version_builder(env_options, nullptr, &vstorage_); + + VersionStorageInfo new_vstorage(&icmp_, ucmp_, options_.num_levels, + kCompactionStyleLevel, nullptr); + version_builder.Apply(&version_edit); + version_builder.SaveTo(&new_vstorage); + + ASSERT_EQ(0U, new_vstorage.NumLevelBytes(0)); + ASSERT_EQ(100U, new_vstorage.NumLevelBytes(3)); + ASSERT_EQ(300U, new_vstorage.NumLevelBytes(4)); + ASSERT_EQ(200U, new_vstorage.NumLevelBytes(5)); + + UnrefFilesInVersion(&new_vstorage); +} + +TEST_F(VersionBuilderTest, ApplyAndSaveToDynamic2) { + ioptions_.level_compaction_dynamic_level_bytes = true; + + Add(0, 1U, "150", "200", 100U, 0, 200U, 200U, 0, 0, false, 200U, 200U); + Add(0, 88U, "201", "300", 100U, 0, 100U, 100U, 0, 0, false, 100U, 100U); + + Add(4, 6U, "150", "179", 100U); + Add(4, 7U, "180", "220", 100U); + Add(4, 8U, "221", "300", 100U); + + Add(5, 26U, "150", "170", 100U); + Add(5, 27U, "171", "179", 100U); + UpdateVersionStorageInfo(); + + VersionEdit version_edit; + version_edit.AddFile(4, 666, 0, 100U, GetInternalKey("301"), + GetInternalKey("350"), 200, 200, false); + version_edit.DeleteFile(0, 1U); + version_edit.DeleteFile(0, 88U); + version_edit.DeleteFile(4, 6U); + version_edit.DeleteFile(4, 7U); + version_edit.DeleteFile(4, 8U); + + EnvOptions env_options; + + VersionBuilder version_builder(env_options, nullptr, &vstorage_); + + VersionStorageInfo new_vstorage(&icmp_, ucmp_, options_.num_levels, + kCompactionStyleLevel, nullptr); + version_builder.Apply(&version_edit); + version_builder.SaveTo(&new_vstorage); + + ASSERT_EQ(0U, new_vstorage.NumLevelBytes(0)); + ASSERT_EQ(100U, new_vstorage.NumLevelBytes(4)); + ASSERT_EQ(200U, new_vstorage.NumLevelBytes(5)); + + UnrefFilesInVersion(&new_vstorage); +} + +TEST_F(VersionBuilderTest, ApplyMultipleAndSaveTo) { + UpdateVersionStorageInfo(); + + VersionEdit version_edit; + version_edit.AddFile(2, 666, 0, 100U, GetInternalKey("301"), + GetInternalKey("350"), 200, 200, false); + version_edit.AddFile(2, 676, 0, 100U, GetInternalKey("401"), + GetInternalKey("450"), 200, 200, false); + version_edit.AddFile(2, 636, 0, 100U, GetInternalKey("601"), + GetInternalKey("650"), 200, 200, false); + version_edit.AddFile(2, 616, 0, 100U, GetInternalKey("501"), + GetInternalKey("550"), 200, 200, false); + version_edit.AddFile(2, 606, 0, 100U, GetInternalKey("701"), + GetInternalKey("750"), 200, 200, false); + + EnvOptions env_options; + + VersionBuilder version_builder(env_options, nullptr, &vstorage_); + + VersionStorageInfo new_vstorage(&icmp_, ucmp_, options_.num_levels, + kCompactionStyleLevel, nullptr); + version_builder.Apply(&version_edit); + version_builder.SaveTo(&new_vstorage); + + ASSERT_EQ(500U, new_vstorage.NumLevelBytes(2)); + + UnrefFilesInVersion(&new_vstorage); +} + +TEST_F(VersionBuilderTest, ApplyDeleteAndSaveTo) { + UpdateVersionStorageInfo(); + + EnvOptions env_options; + VersionBuilder version_builder(env_options, nullptr, &vstorage_); + VersionStorageInfo new_vstorage(&icmp_, ucmp_, options_.num_levels, + kCompactionStyleLevel, nullptr); + + VersionEdit version_edit; + version_edit.AddFile(2, 666, 0, 100U, GetInternalKey("301"), + GetInternalKey("350"), 200, 200, false); + version_edit.AddFile(2, 676, 0, 100U, GetInternalKey("401"), + GetInternalKey("450"), 200, 200, false); + version_edit.AddFile(2, 636, 0, 100U, GetInternalKey("601"), + GetInternalKey("650"), 200, 200, false); + version_edit.AddFile(2, 616, 0, 100U, GetInternalKey("501"), + GetInternalKey("550"), 200, 200, false); + version_edit.AddFile(2, 606, 0, 100U, GetInternalKey("701"), + GetInternalKey("750"), 200, 200, false); + version_builder.Apply(&version_edit); + + VersionEdit version_edit2; + version_edit.AddFile(2, 808, 0, 100U, GetInternalKey("901"), + GetInternalKey("950"), 200, 200, false); + version_edit2.DeleteFile(2, 616); + version_edit2.DeleteFile(2, 636); + version_edit.AddFile(2, 806, 0, 100U, GetInternalKey("801"), + GetInternalKey("850"), 200, 200, false); + version_builder.Apply(&version_edit2); + + version_builder.SaveTo(&new_vstorage); + + ASSERT_EQ(300U, new_vstorage.NumLevelBytes(2)); + + UnrefFilesInVersion(&new_vstorage); +} + +TEST_F(VersionBuilderTest, EstimatedActiveKeys) { + const uint32_t kTotalSamples = 20; + const uint32_t kNumLevels = 5; + const uint32_t kFilesPerLevel = 8; + const uint32_t kNumFiles = kNumLevels * kFilesPerLevel; + const uint32_t kEntriesPerFile = 1000; + const uint32_t kDeletionsPerFile = 100; + for (uint32_t i = 0; i < kNumFiles; ++i) { + Add(static_cast(i / kFilesPerLevel), i + 1, + ToString((i + 100) * 1000).c_str(), + ToString((i + 100) * 1000 + 999).c_str(), + 100U, 0, 100, 100, + kEntriesPerFile, kDeletionsPerFile, + (i < kTotalSamples)); + } + // minus 2X for the number of deletion entries because: + // 1x for deletion entry does not count as a data entry. + // 1x for each deletion entry will actually remove one data entry. + ASSERT_EQ(vstorage_.GetEstimatedActiveKeys(), + (kEntriesPerFile - 2 * kDeletionsPerFile) * kNumFiles); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/version_edit.cc b/external/rocksdb/db/version_edit.cc new file mode 100755 index 0000000000..a030b67934 --- /dev/null +++ b/external/rocksdb/db/version_edit.cc @@ -0,0 +1,590 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/version_edit.h" + +#include "db/version_set.h" +#include "util/coding.h" +#include "util/event_logger.h" +#include "util/sync_point.h" +#include "rocksdb/slice.h" + +namespace rocksdb { + +// Tag numbers for serialized VersionEdit. These numbers are written to +// disk and should not be changed. +enum Tag { + kComparator = 1, + kLogNumber = 2, + kNextFileNumber = 3, + kLastSequence = 4, + kCompactPointer = 5, + kDeletedFile = 6, + kNewFile = 7, + // 8 was used for large value refs + kPrevLogNumber = 9, + + // these are new formats divergent from open source leveldb + kNewFile2 = 100, + kNewFile3 = 102, + kNewFile4 = 103, // 4th (the latest) format version of adding files + kColumnFamily = 200, // specify column family for version edit + kColumnFamilyAdd = 201, + kColumnFamilyDrop = 202, + kMaxColumnFamily = 203, +}; + +enum CustomTag { + kTerminate = 1, // The end of customized fields + kNeedCompaction = 2, + kPathId = 65, +}; +// If this bit for the custom tag is set, opening DB should fail if +// we don't know this field. +uint32_t kCustomTagNonSafeIgnoreMask = 1 << 6; + +uint64_t PackFileNumberAndPathId(uint64_t number, uint64_t path_id) { + assert(number <= kFileNumberMask); + return number | (path_id * (kFileNumberMask + 1)); +} + +void VersionEdit::Clear() { + comparator_.clear(); + max_level_ = 0; + log_number_ = 0; + prev_log_number_ = 0; + last_sequence_ = 0; + next_file_number_ = 0; + max_column_family_ = 0; + has_comparator_ = false; + has_log_number_ = false; + has_prev_log_number_ = false; + has_next_file_number_ = false; + has_last_sequence_ = false; + has_max_column_family_ = false; + deleted_files_.clear(); + new_files_.clear(); + column_family_ = 0; + is_column_family_add_ = 0; + is_column_family_drop_ = 0; + column_family_name_.clear(); +} + +bool VersionEdit::EncodeTo(std::string* dst) const { + if (has_comparator_) { + PutVarint32(dst, kComparator); + PutLengthPrefixedSlice(dst, comparator_); + } + if (has_log_number_) { + PutVarint32Varint64(dst, kLogNumber, log_number_); + } + if (has_prev_log_number_) { + PutVarint32Varint64(dst, kPrevLogNumber, prev_log_number_); + } + if (has_next_file_number_) { + PutVarint32Varint64(dst, kNextFileNumber, next_file_number_); + } + if (has_last_sequence_) { + PutVarint32Varint64(dst, kLastSequence, last_sequence_); + } + if (has_max_column_family_) { + PutVarint32Varint32(dst, kMaxColumnFamily, max_column_family_); + } + + for (const auto& deleted : deleted_files_) { + PutVarint32Varint32Varint64(dst, kDeletedFile, deleted.first /* level */, + deleted.second /* file number */); + } + + for (size_t i = 0; i < new_files_.size(); i++) { + const FileMetaData& f = new_files_[i].second; + if (!f.smallest.Valid() || !f.largest.Valid()) { + return false; + } + bool has_customized_fields = false; + if (f.marked_for_compaction) { + PutVarint32(dst, kNewFile4); + has_customized_fields = true; + } else if (f.fd.GetPathId() == 0) { + // Use older format to make sure user can roll back the build if they + // don't config multiple DB paths. + PutVarint32(dst, kNewFile2); + } else { + PutVarint32(dst, kNewFile3); + } + PutVarint32Varint64(dst, new_files_[i].first /* level */, f.fd.GetNumber()); + if (f.fd.GetPathId() != 0 && !has_customized_fields) { + // kNewFile3 + PutVarint32(dst, f.fd.GetPathId()); + } + PutVarint64(dst, f.fd.GetFileSize()); + PutLengthPrefixedSlice(dst, f.smallest.Encode()); + PutLengthPrefixedSlice(dst, f.largest.Encode()); + PutVarint64Varint64(dst, f.smallest_seqno, f.largest_seqno); + if (has_customized_fields) { + // Customized fields' format: + // +-----------------------------+ + // | 1st field's tag (varint32) | + // +-----------------------------+ + // | 1st field's size (varint32) | + // +-----------------------------+ + // | bytes for 1st field | + // | (based on size decoded) | + // +-----------------------------+ + // | | + // | ...... | + // | | + // +-----------------------------+ + // | last field's size (varint32)| + // +-----------------------------+ + // | bytes for last field | + // | (based on size decoded) | + // +-----------------------------+ + // | terminating tag (varint32) | + // +-----------------------------+ + // + // Customized encoding for fields: + // tag kPathId: 1 byte as path_id + // tag kNeedCompaction: + // now only can take one char value 1 indicating need-compaction + // + if (f.fd.GetPathId() != 0) { + PutVarint32(dst, CustomTag::kPathId); + char p = static_cast(f.fd.GetPathId()); + PutLengthPrefixedSlice(dst, Slice(&p, 1)); + } + if (f.marked_for_compaction) { + PutVarint32(dst, CustomTag::kNeedCompaction); + char p = static_cast(1); + PutLengthPrefixedSlice(dst, Slice(&p, 1)); + } + TEST_SYNC_POINT_CALLBACK("VersionEdit::EncodeTo:NewFile4:CustomizeFields", + dst); + + PutVarint32(dst, CustomTag::kTerminate); + } + } + + // 0 is default and does not need to be explicitly written + if (column_family_ != 0) { + PutVarint32Varint32(dst, kColumnFamily, column_family_); + } + + if (is_column_family_add_) { + PutVarint32(dst, kColumnFamilyAdd); + PutLengthPrefixedSlice(dst, Slice(column_family_name_)); + } + + if (is_column_family_drop_) { + PutVarint32(dst, kColumnFamilyDrop); + } + return true; +} + +static bool GetInternalKey(Slice* input, InternalKey* dst) { + Slice str; + if (GetLengthPrefixedSlice(input, &str)) { + dst->DecodeFrom(str); + return dst->Valid(); + } else { + return false; + } +} + +bool VersionEdit::GetLevel(Slice* input, int* level, const char** msg) { + uint32_t v; + if (GetVarint32(input, &v)) { + *level = v; + if (max_level_ < *level) { + max_level_ = *level; + } + return true; + } else { + return false; + } +} + +const char* VersionEdit::DecodeNewFile4From(Slice* input) { + const char* msg = nullptr; + int level; + FileMetaData f; + uint64_t number; + uint32_t path_id = 0; + uint64_t file_size; + if (GetLevel(input, &level, &msg) && GetVarint64(input, &number) && + GetVarint64(input, &file_size) && GetInternalKey(input, &f.smallest) && + GetInternalKey(input, &f.largest) && + GetVarint64(input, &f.smallest_seqno) && + GetVarint64(input, &f.largest_seqno)) { + // See comments in VersionEdit::EncodeTo() for format of customized fields + while (true) { + uint32_t custom_tag; + Slice field; + if (!GetVarint32(input, &custom_tag)) { + return "new-file4 custom field"; + } + if (custom_tag == kTerminate) { + break; + } + if (!GetLengthPrefixedSlice(input, &field)) { + return "new-file4 custom field lenth prefixed slice error"; + } + switch (custom_tag) { + case kPathId: + if (field.size() != 1) { + return "path_id field wrong size"; + } + path_id = field[0]; + if (path_id > 3) { + return "path_id wrong vaue"; + } + break; + case kNeedCompaction: + if (field.size() != 1) { + return "need_compaction field wrong size"; + } + f.marked_for_compaction = (field[0] == 1); + break; + default: + if ((custom_tag & kCustomTagNonSafeIgnoreMask) != 0) { + // Should not proceed if cannot understand it + return "new-file4 custom field not supported"; + } + break; + } + } + } else { + return "new-file4 entry"; + } + f.fd = FileDescriptor(number, path_id, file_size); + new_files_.push_back(std::make_pair(level, f)); + return nullptr; +} + +Status VersionEdit::DecodeFrom(const Slice& src) { + Clear(); + Slice input = src; + const char* msg = nullptr; + uint32_t tag; + + // Temporary storage for parsing + int level; + FileMetaData f; + Slice str; + InternalKey key; + + while (msg == nullptr && GetVarint32(&input, &tag)) { + switch (tag) { + case kComparator: + if (GetLengthPrefixedSlice(&input, &str)) { + comparator_ = str.ToString(); + has_comparator_ = true; + } else { + msg = "comparator name"; + } + break; + + case kLogNumber: + if (GetVarint64(&input, &log_number_)) { + has_log_number_ = true; + } else { + msg = "log number"; + } + break; + + case kPrevLogNumber: + if (GetVarint64(&input, &prev_log_number_)) { + has_prev_log_number_ = true; + } else { + msg = "previous log number"; + } + break; + + case kNextFileNumber: + if (GetVarint64(&input, &next_file_number_)) { + has_next_file_number_ = true; + } else { + msg = "next file number"; + } + break; + + case kLastSequence: + if (GetVarint64(&input, &last_sequence_)) { + has_last_sequence_ = true; + } else { + msg = "last sequence number"; + } + break; + + case kMaxColumnFamily: + if (GetVarint32(&input, &max_column_family_)) { + has_max_column_family_ = true; + } else { + msg = "max column family"; + } + break; + + case kCompactPointer: + if (GetLevel(&input, &level, &msg) && + GetInternalKey(&input, &key)) { + // we don't use compact pointers anymore, + // but we should not fail if they are still + // in manifest + } else { + if (!msg) { + msg = "compaction pointer"; + } + } + break; + + case kDeletedFile: { + uint64_t number; + if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number)) { + deleted_files_.insert(std::make_pair(level, number)); + } else { + if (!msg) { + msg = "deleted file"; + } + } + break; + } + + case kNewFile: { + uint64_t number; + uint64_t file_size; + if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number) && + GetVarint64(&input, &file_size) && + GetInternalKey(&input, &f.smallest) && + GetInternalKey(&input, &f.largest)) { + f.fd = FileDescriptor(number, 0, file_size); + new_files_.push_back(std::make_pair(level, f)); + } else { + if (!msg) { + msg = "new-file entry"; + } + } + break; + } + case kNewFile2: { + uint64_t number; + uint64_t file_size; + if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number) && + GetVarint64(&input, &file_size) && + GetInternalKey(&input, &f.smallest) && + GetInternalKey(&input, &f.largest) && + GetVarint64(&input, &f.smallest_seqno) && + GetVarint64(&input, &f.largest_seqno)) { + f.fd = FileDescriptor(number, 0, file_size); + new_files_.push_back(std::make_pair(level, f)); + } else { + if (!msg) { + msg = "new-file2 entry"; + } + } + break; + } + + case kNewFile3: { + uint64_t number; + uint32_t path_id; + uint64_t file_size; + if (GetLevel(&input, &level, &msg) && GetVarint64(&input, &number) && + GetVarint32(&input, &path_id) && GetVarint64(&input, &file_size) && + GetInternalKey(&input, &f.smallest) && + GetInternalKey(&input, &f.largest) && + GetVarint64(&input, &f.smallest_seqno) && + GetVarint64(&input, &f.largest_seqno)) { + f.fd = FileDescriptor(number, path_id, file_size); + new_files_.push_back(std::make_pair(level, f)); + } else { + if (!msg) { + msg = "new-file3 entry"; + } + } + break; + } + + case kNewFile4: { + msg = DecodeNewFile4From(&input); + break; + } + + case kColumnFamily: + if (!GetVarint32(&input, &column_family_)) { + if (!msg) { + msg = "set column family id"; + } + } + break; + + case kColumnFamilyAdd: + if (GetLengthPrefixedSlice(&input, &str)) { + is_column_family_add_ = true; + column_family_name_ = str.ToString(); + } else { + if (!msg) { + msg = "column family add"; + } + } + break; + + case kColumnFamilyDrop: + is_column_family_drop_ = true; + break; + + default: + msg = "unknown tag"; + break; + } + } + + if (msg == nullptr && !input.empty()) { + msg = "invalid tag"; + } + + Status result; + if (msg != nullptr) { + result = Status::Corruption("VersionEdit", msg); + } + return result; +} + +std::string VersionEdit::DebugString(bool hex_key) const { + std::string r; + r.append("VersionEdit {"); + if (has_comparator_) { + r.append("\n Comparator: "); + r.append(comparator_); + } + if (has_log_number_) { + r.append("\n LogNumber: "); + AppendNumberTo(&r, log_number_); + } + if (has_prev_log_number_) { + r.append("\n PrevLogNumber: "); + AppendNumberTo(&r, prev_log_number_); + } + if (has_next_file_number_) { + r.append("\n NextFileNumber: "); + AppendNumberTo(&r, next_file_number_); + } + if (has_last_sequence_) { + r.append("\n LastSeq: "); + AppendNumberTo(&r, last_sequence_); + } + for (DeletedFileSet::const_iterator iter = deleted_files_.begin(); + iter != deleted_files_.end(); + ++iter) { + r.append("\n DeleteFile: "); + AppendNumberTo(&r, iter->first); + r.append(" "); + AppendNumberTo(&r, iter->second); + } + for (size_t i = 0; i < new_files_.size(); i++) { + const FileMetaData& f = new_files_[i].second; + r.append("\n AddFile: "); + AppendNumberTo(&r, new_files_[i].first); + r.append(" "); + AppendNumberTo(&r, f.fd.GetNumber()); + r.append(" "); + AppendNumberTo(&r, f.fd.GetFileSize()); + r.append(" "); + r.append(f.smallest.DebugString(hex_key)); + r.append(" .. "); + r.append(f.largest.DebugString(hex_key)); + } + r.append("\n ColumnFamily: "); + AppendNumberTo(&r, column_family_); + if (is_column_family_add_) { + r.append("\n ColumnFamilyAdd: "); + r.append(column_family_name_); + } + if (is_column_family_drop_) { + r.append("\n ColumnFamilyDrop"); + } + if (has_max_column_family_) { + r.append("\n MaxColumnFamily: "); + AppendNumberTo(&r, max_column_family_); + } + r.append("\n}\n"); + return r; +} + +std::string VersionEdit::DebugJSON(int edit_num, bool hex_key) const { + JSONWriter jw; + jw << "EditNumber" << edit_num; + + if (has_comparator_) { + jw << "Comparator" << comparator_; + } + if (has_log_number_) { + jw << "LogNumber" << log_number_; + } + if (has_prev_log_number_) { + jw << "PrevLogNumber" << prev_log_number_; + } + if (has_next_file_number_) { + jw << "NextFileNumber" << next_file_number_; + } + if (has_last_sequence_) { + jw << "LastSeq" << last_sequence_; + } + + if (!deleted_files_.empty()) { + jw << "DeletedFiles"; + jw.StartArray(); + + for (DeletedFileSet::const_iterator iter = deleted_files_.begin(); + iter != deleted_files_.end(); + ++iter) { + jw.StartArrayedObject(); + jw << "Level" << iter->first; + jw << "FileNumber" << iter->second; + jw.EndArrayedObject(); + } + + jw.EndArray(); + } + + if (!new_files_.empty()) { + jw << "AddedFiles"; + jw.StartArray(); + + for (size_t i = 0; i < new_files_.size(); i++) { + jw.StartArrayedObject(); + jw << "Level" << new_files_[i].first; + const FileMetaData& f = new_files_[i].second; + jw << "FileNumber" << f.fd.GetNumber(); + jw << "FileSize" << f.fd.GetFileSize(); + jw << "SmallestIKey" << f.smallest.DebugString(hex_key); + jw << "LargestIKey" << f.largest.DebugString(hex_key); + jw.EndArrayedObject(); + } + + jw.EndArray(); + } + + jw << "ColumnFamily" << column_family_; + + if (is_column_family_add_) { + jw << "ColumnFamilyAdd" << column_family_name_; + } + if (is_column_family_drop_) { + jw << "ColumnFamilyDrop" << column_family_name_; + } + if (has_max_column_family_) { + jw << "MaxColumnFamily" << max_column_family_; + } + + jw.EndObject(); + + return jw.Get(); +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/version_edit.h b/external/rocksdb/db/version_edit.h new file mode 100755 index 0000000000..6bd5b23a1a --- /dev/null +++ b/external/rocksdb/db/version_edit.h @@ -0,0 +1,287 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include +#include +#include +#include +#include "rocksdb/cache.h" +#include "db/dbformat.h" +#include "util/arena.h" +#include "util/autovector.h" + +namespace rocksdb { + +class VersionSet; + +const uint64_t kFileNumberMask = 0x3FFFFFFFFFFFFFFF; + +extern uint64_t PackFileNumberAndPathId(uint64_t number, uint64_t path_id); + +// A copyable structure contains information needed to read data from an SST +// file. It can contains a pointer to a table reader opened for the file, or +// file number and size, which can be used to create a new table reader for it. +// The behavior is undefined when a copied of the structure is used when the +// file is not in any live version any more. +struct FileDescriptor { + // Table reader in table_reader_handle + TableReader* table_reader; + uint64_t packed_number_and_path_id; + uint64_t file_size; // File size in bytes + + FileDescriptor() : FileDescriptor(0, 0, 0) {} + + FileDescriptor(uint64_t number, uint32_t path_id, uint64_t _file_size) + : table_reader(nullptr), + packed_number_and_path_id(PackFileNumberAndPathId(number, path_id)), + file_size(_file_size) {} + + FileDescriptor& operator=(const FileDescriptor& fd) { + table_reader = fd.table_reader; + packed_number_and_path_id = fd.packed_number_and_path_id; + file_size = fd.file_size; + return *this; + } + + uint64_t GetNumber() const { + return packed_number_and_path_id & kFileNumberMask; + } + uint32_t GetPathId() const { + return static_cast( + packed_number_and_path_id / (kFileNumberMask + 1)); + } + uint64_t GetFileSize() const { return file_size; } +}; + +struct FileMetaData { + int refs; + FileDescriptor fd; + InternalKey smallest; // Smallest internal key served by table + InternalKey largest; // Largest internal key served by table + bool being_compacted; // Is this file undergoing compaction? + SequenceNumber smallest_seqno; // The smallest seqno in this file + SequenceNumber largest_seqno; // The largest seqno in this file + + // Needs to be disposed when refs becomes 0. + Cache::Handle* table_reader_handle; + + // Stats for compensating deletion entries during compaction + + // File size compensated by deletion entry. + // This is updated in Version::UpdateAccumulatedStats() first time when the + // file is created or loaded. After it is updated (!= 0), it is immutable. + uint64_t compensated_file_size; + // These values can mutate, but they can only be read or written from + // single-threaded LogAndApply thread + uint64_t num_entries; // the number of entries. + uint64_t num_deletions; // the number of deletion entries. + uint64_t raw_key_size; // total uncompressed key size. + uint64_t raw_value_size; // total uncompressed value size. + bool init_stats_from_file; // true if the data-entry stats of this file + // has initialized from file. + + bool marked_for_compaction; // True if client asked us nicely to compact this + // file. + + FileMetaData() + : refs(0), + being_compacted(false), + smallest_seqno(kMaxSequenceNumber), + largest_seqno(0), + table_reader_handle(nullptr), + compensated_file_size(0), + num_entries(0), + num_deletions(0), + raw_key_size(0), + raw_value_size(0), + init_stats_from_file(false), + marked_for_compaction(false) {} + + // REQUIRED: Keys must be given to the function in sorted order (it expects + // the last key to be the largest). + void UpdateBoundaries(const Slice& key, SequenceNumber seqno) { + if (smallest.size() == 0) { + smallest.DecodeFrom(key); + } + largest.DecodeFrom(key); + smallest_seqno = std::min(smallest_seqno, seqno); + largest_seqno = std::max(largest_seqno, seqno); + } +}; + +// A compressed copy of file meta data that just contain +// smallest and largest key's slice +struct FdWithKeyRange { + FileDescriptor fd; + Slice smallest_key; // slice that contain smallest key + Slice largest_key; // slice that contain largest key + + FdWithKeyRange() + : fd(), + smallest_key(), + largest_key() { + } + + FdWithKeyRange(FileDescriptor _fd, Slice _smallest_key, Slice _largest_key) + : fd(_fd), smallest_key(_smallest_key), largest_key(_largest_key) {} +}; + +// Data structure to store an array of FdWithKeyRange in one level +// Actual data is guaranteed to be stored closely +struct LevelFilesBrief { + size_t num_files; + FdWithKeyRange* files; + LevelFilesBrief() { + num_files = 0; + files = nullptr; + } +}; + +class VersionEdit { + public: + VersionEdit() { Clear(); } + ~VersionEdit() { } + + void Clear(); + + void SetComparatorName(const Slice& name) { + has_comparator_ = true; + comparator_ = name.ToString(); + } + void SetLogNumber(uint64_t num) { + has_log_number_ = true; + log_number_ = num; + } + void SetPrevLogNumber(uint64_t num) { + has_prev_log_number_ = true; + prev_log_number_ = num; + } + void SetNextFile(uint64_t num) { + has_next_file_number_ = true; + next_file_number_ = num; + } + void SetLastSequence(SequenceNumber seq) { + has_last_sequence_ = true; + last_sequence_ = seq; + } + void SetMaxColumnFamily(uint32_t max_column_family) { + has_max_column_family_ = true; + max_column_family_ = max_column_family; + } + + // Add the specified file at the specified number. + // REQUIRES: This version has not been saved (see VersionSet::SaveTo) + // REQUIRES: "smallest" and "largest" are smallest and largest keys in file + void AddFile(int level, uint64_t file, uint32_t file_path_id, + uint64_t file_size, const InternalKey& smallest, + const InternalKey& largest, const SequenceNumber& smallest_seqno, + const SequenceNumber& largest_seqno, + bool marked_for_compaction) { + assert(smallest_seqno <= largest_seqno); + FileMetaData f; + f.fd = FileDescriptor(file, file_path_id, file_size); + f.smallest = smallest; + f.largest = largest; + f.smallest_seqno = smallest_seqno; + f.largest_seqno = largest_seqno; + f.marked_for_compaction = marked_for_compaction; + new_files_.emplace_back(level, std::move(f)); + } + + void AddFile(int level, const FileMetaData& f) { + assert(f.smallest_seqno <= f.largest_seqno); + new_files_.emplace_back(level, f); + } + + // Delete the specified "file" from the specified "level". + void DeleteFile(int level, uint64_t file) { + deleted_files_.insert({level, file}); + } + + // Number of edits + size_t NumEntries() { return new_files_.size() + deleted_files_.size(); } + + bool IsColumnFamilyManipulation() { + return is_column_family_add_ || is_column_family_drop_; + } + + void SetColumnFamily(uint32_t column_family_id) { + column_family_ = column_family_id; + } + + // set column family ID by calling SetColumnFamily() + void AddColumnFamily(const std::string& name) { + assert(!is_column_family_drop_); + assert(!is_column_family_add_); + assert(NumEntries() == 0); + is_column_family_add_ = true; + column_family_name_ = name; + } + + // set column family ID by calling SetColumnFamily() + void DropColumnFamily() { + assert(!is_column_family_drop_); + assert(!is_column_family_add_); + assert(NumEntries() == 0); + is_column_family_drop_ = true; + } + + // return true on success. + bool EncodeTo(std::string* dst) const; + Status DecodeFrom(const Slice& src); + + const char* DecodeNewFile4From(Slice* input); + + typedef std::set> DeletedFileSet; + + const DeletedFileSet& GetDeletedFiles() { return deleted_files_; } + const std::vector>& GetNewFiles() { + return new_files_; + } + + std::string DebugString(bool hex_key = false) const; + std::string DebugJSON(int edit_num, bool hex_key = false) const; + + private: + friend class VersionSet; + friend class Version; + + bool GetLevel(Slice* input, int* level, const char** msg); + + int max_level_; + std::string comparator_; + uint64_t log_number_; + uint64_t prev_log_number_; + uint64_t next_file_number_; + uint32_t max_column_family_; + SequenceNumber last_sequence_; + bool has_comparator_; + bool has_log_number_; + bool has_prev_log_number_; + bool has_next_file_number_; + bool has_last_sequence_; + bool has_max_column_family_; + + DeletedFileSet deleted_files_; + std::vector> new_files_; + + // Each version edit record should have column_family_id set + // If it's not set, it is default (0) + uint32_t column_family_; + // a version edit can be either column_family add or + // column_family drop. If it's column family add, + // it also includes column family name. + bool is_column_family_drop_; + bool is_column_family_add_; + std::string column_family_name_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/version_edit_test.cc b/external/rocksdb/db/version_edit_test.cc new file mode 100755 index 0000000000..ab109be600 --- /dev/null +++ b/external/rocksdb/db/version_edit_test.cc @@ -0,0 +1,189 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/version_edit.h" +#include "util/sync_point.h" +#include "util/testharness.h" + +namespace rocksdb { + +static void TestEncodeDecode(const VersionEdit& edit) { + std::string encoded, encoded2; + edit.EncodeTo(&encoded); + VersionEdit parsed; + Status s = parsed.DecodeFrom(encoded); + ASSERT_TRUE(s.ok()) << s.ToString(); + parsed.EncodeTo(&encoded2); + ASSERT_EQ(encoded, encoded2); +} + +class VersionEditTest : public testing::Test {}; + +TEST_F(VersionEditTest, EncodeDecode) { + static const uint64_t kBig = 1ull << 50; + static const uint32_t kBig32Bit = 1ull << 30; + + VersionEdit edit; + for (int i = 0; i < 4; i++) { + TestEncodeDecode(edit); + edit.AddFile(3, kBig + 300 + i, kBig32Bit + 400 + i, 0, + InternalKey("foo", kBig + 500 + i, kTypeValue), + InternalKey("zoo", kBig + 600 + i, kTypeDeletion), + kBig + 500 + i, kBig + 600 + i, false); + edit.DeleteFile(4, kBig + 700 + i); + } + + edit.SetComparatorName("foo"); + edit.SetLogNumber(kBig + 100); + edit.SetNextFile(kBig + 200); + edit.SetLastSequence(kBig + 1000); + TestEncodeDecode(edit); +} + +TEST_F(VersionEditTest, EncodeDecodeNewFile4) { + static const uint64_t kBig = 1ull << 50; + + VersionEdit edit; + edit.AddFile(3, 300, 3, 100, InternalKey("foo", kBig + 500, kTypeValue), + InternalKey("zoo", kBig + 600, kTypeDeletion), kBig + 500, + kBig + 600, true); + edit.AddFile(4, 301, 3, 100, InternalKey("foo", kBig + 501, kTypeValue), + InternalKey("zoo", kBig + 601, kTypeDeletion), kBig + 501, + kBig + 601, false); + edit.AddFile(5, 302, 0, 100, InternalKey("foo", kBig + 502, kTypeValue), + InternalKey("zoo", kBig + 602, kTypeDeletion), kBig + 502, + kBig + 602, true); + + edit.DeleteFile(4, 700); + + edit.SetComparatorName("foo"); + edit.SetLogNumber(kBig + 100); + edit.SetNextFile(kBig + 200); + edit.SetLastSequence(kBig + 1000); + TestEncodeDecode(edit); + + std::string encoded, encoded2; + edit.EncodeTo(&encoded); + VersionEdit parsed; + Status s = parsed.DecodeFrom(encoded); + ASSERT_TRUE(s.ok()) << s.ToString(); + auto& new_files = parsed.GetNewFiles(); + ASSERT_TRUE(new_files[0].second.marked_for_compaction); + ASSERT_TRUE(!new_files[1].second.marked_for_compaction); + ASSERT_TRUE(new_files[2].second.marked_for_compaction); + ASSERT_EQ(3, new_files[0].second.fd.GetPathId()); + ASSERT_EQ(3, new_files[1].second.fd.GetPathId()); + ASSERT_EQ(0, new_files[2].second.fd.GetPathId()); +} + +TEST_F(VersionEditTest, ForwardCompatibleNewFile4) { + static const uint64_t kBig = 1ull << 50; + VersionEdit edit; + edit.AddFile(3, 300, 3, 100, InternalKey("foo", kBig + 500, kTypeValue), + InternalKey("zoo", kBig + 600, kTypeDeletion), kBig + 500, + kBig + 600, true); + edit.AddFile(4, 301, 3, 100, InternalKey("foo", kBig + 501, kTypeValue), + InternalKey("zoo", kBig + 601, kTypeDeletion), kBig + 501, + kBig + 601, false); + edit.DeleteFile(4, 700); + + edit.SetComparatorName("foo"); + edit.SetLogNumber(kBig + 100); + edit.SetNextFile(kBig + 200); + edit.SetLastSequence(kBig + 1000); + + std::string encoded; + + // Call back function to add extra customized builds. + bool first = true; + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "VersionEdit::EncodeTo:NewFile4:CustomizeFields", [&](void* arg) { + std::string* str = reinterpret_cast(arg); + PutVarint32(str, 33); + const std::string str1 = "random_string"; + PutLengthPrefixedSlice(str, str1); + if (first) { + first = false; + PutVarint32(str, 22); + const std::string str2 = "s"; + PutLengthPrefixedSlice(str, str2); + } + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + edit.EncodeTo(&encoded); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + VersionEdit parsed; + Status s = parsed.DecodeFrom(encoded); + ASSERT_TRUE(s.ok()) << s.ToString(); + ASSERT_TRUE(!first); + auto& new_files = parsed.GetNewFiles(); + ASSERT_TRUE(new_files[0].second.marked_for_compaction); + ASSERT_TRUE(!new_files[1].second.marked_for_compaction); + ASSERT_EQ(3, new_files[0].second.fd.GetPathId()); + ASSERT_EQ(3, new_files[1].second.fd.GetPathId()); + ASSERT_EQ(1u, parsed.GetDeletedFiles().size()); +} + +TEST_F(VersionEditTest, NewFile4NotSupportedField) { + static const uint64_t kBig = 1ull << 50; + VersionEdit edit; + edit.AddFile(3, 300, 3, 100, InternalKey("foo", kBig + 500, kTypeValue), + InternalKey("zoo", kBig + 600, kTypeDeletion), kBig + 500, + kBig + 600, true); + + edit.SetComparatorName("foo"); + edit.SetLogNumber(kBig + 100); + edit.SetNextFile(kBig + 200); + edit.SetLastSequence(kBig + 1000); + + std::string encoded; + + // Call back function to add extra customized builds. + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "VersionEdit::EncodeTo:NewFile4:CustomizeFields", [&](void* arg) { + std::string* str = reinterpret_cast(arg); + const std::string str1 = "s"; + PutLengthPrefixedSlice(str, str1); + }); + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + edit.EncodeTo(&encoded); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + VersionEdit parsed; + Status s = parsed.DecodeFrom(encoded); + ASSERT_NOK(s); +} + +TEST_F(VersionEditTest, EncodeEmptyFile) { + VersionEdit edit; + edit.AddFile(0, 0, 0, 0, InternalKey(), InternalKey(), 0, 0, false); + std::string buffer; + ASSERT_TRUE(!edit.EncodeTo(&buffer)); +} + +TEST_F(VersionEditTest, ColumnFamilyTest) { + VersionEdit edit; + edit.SetColumnFamily(2); + edit.AddColumnFamily("column_family"); + edit.SetMaxColumnFamily(5); + TestEncodeDecode(edit); + + edit.Clear(); + edit.SetColumnFamily(3); + edit.DropColumnFamily(); + TestEncodeDecode(edit); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/version_set.cc b/external/rocksdb/db/version_set.cc new file mode 100755 index 0000000000..8bd6890ac2 --- /dev/null +++ b/external/rocksdb/db/version_set.cc @@ -0,0 +1,3550 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/version_set.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db/compaction.h" +#include "db/filename.h" +#include "db/internal_stats.h" +#include "db/log_reader.h" +#include "db/log_writer.h" +#include "db/memtable.h" +#include "db/merge_context.h" +#include "db/merge_helper.h" +#include "db/pinned_iterators_manager.h" +#include "db/table_cache.h" +#include "db/version_builder.h" +#include "rocksdb/env.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/write_buffer_manager.h" +#include "table/format.h" +#include "table/get_context.h" +#include "table/internal_iterator.h" +#include "table/merger.h" +#include "table/meta_blocks.h" +#include "table/plain_table_factory.h" +#include "table/table_reader.h" +#include "table/two_level_iterator.h" +#include "util/coding.h" +#include "util/file_reader_writer.h" +#include "util/logging.h" +#include "util/perf_context_imp.h" +#include "util/stop_watch.h" +#include "util/sync_point.h" + +namespace rocksdb { + +namespace { + +// Find File in LevelFilesBrief data structure +// Within an index range defined by left and right +int FindFileInRange(const InternalKeyComparator& icmp, + const LevelFilesBrief& file_level, + const Slice& key, + uint32_t left, + uint32_t right) { + while (left < right) { + uint32_t mid = (left + right) / 2; + const FdWithKeyRange& f = file_level.files[mid]; + if (icmp.InternalKeyComparator::Compare(f.largest_key, key) < 0) { + // Key at "mid.largest" is < "target". Therefore all + // files at or before "mid" are uninteresting. + left = mid + 1; + } else { + // Key at "mid.largest" is >= "target". Therefore all files + // after "mid" are uninteresting. + right = mid; + } + } + return right; +} + +// Class to help choose the next file to search for the particular key. +// Searches and returns files level by level. +// We can search level-by-level since entries never hop across +// levels. Therefore we are guaranteed that if we find data +// in a smaller level, later levels are irrelevant (unless we +// are MergeInProgress). +class FilePicker { + public: + FilePicker(std::vector* files, const Slice& user_key, + const Slice& ikey, autovector* file_levels, + unsigned int num_levels, FileIndexer* file_indexer, + const Comparator* user_comparator, + const InternalKeyComparator* internal_comparator) + : num_levels_(num_levels), + curr_level_(static_cast(-1)), + returned_file_level_(static_cast(-1)), + hit_file_level_(static_cast(-1)), + search_left_bound_(0), + search_right_bound_(FileIndexer::kLevelMaxIndex), +#ifndef NDEBUG + files_(files), +#endif + level_files_brief_(file_levels), + is_hit_file_last_in_level_(false), + user_key_(user_key), + ikey_(ikey), + file_indexer_(file_indexer), + user_comparator_(user_comparator), + internal_comparator_(internal_comparator) { + // Setup member variables to search first level. + search_ended_ = !PrepareNextLevel(); + if (!search_ended_) { + // Prefetch Level 0 table data to avoid cache miss if possible. + for (unsigned int i = 0; i < (*level_files_brief_)[0].num_files; ++i) { + auto* r = (*level_files_brief_)[0].files[i].fd.table_reader; + if (r) { + r->Prepare(ikey); + } + } + } + } + + int GetCurrentLevel() { return returned_file_level_; } + + FdWithKeyRange* GetNextFile() { + while (!search_ended_) { // Loops over different levels. + while (curr_index_in_curr_level_ < curr_file_level_->num_files) { + // Loops over all files in current level. + FdWithKeyRange* f = &curr_file_level_->files[curr_index_in_curr_level_]; + hit_file_level_ = curr_level_; + is_hit_file_last_in_level_ = + curr_index_in_curr_level_ == curr_file_level_->num_files - 1; + int cmp_largest = -1; + + // Do key range filtering of files or/and fractional cascading if: + // (1) not all the files are in level 0, or + // (2) there are more than 3 Level 0 files + // If there are only 3 or less level 0 files in the system, we skip + // the key range filtering. In this case, more likely, the system is + // highly tuned to minimize number of tables queried by each query, + // so it is unlikely that key range filtering is more efficient than + // querying the files. + if (num_levels_ > 1 || curr_file_level_->num_files > 3) { + // Check if key is within a file's range. If search left bound and + // right bound point to the same find, we are sure key falls in + // range. + assert( + curr_level_ == 0 || + curr_index_in_curr_level_ == start_index_in_curr_level_ || + user_comparator_->Compare(user_key_, + ExtractUserKey(f->smallest_key)) <= 0); + + int cmp_smallest = user_comparator_->Compare(user_key_, + ExtractUserKey(f->smallest_key)); + if (cmp_smallest >= 0) { + cmp_largest = user_comparator_->Compare(user_key_, + ExtractUserKey(f->largest_key)); + } + + // Setup file search bound for the next level based on the + // comparison results + if (curr_level_ > 0) { + file_indexer_->GetNextLevelIndex(curr_level_, + curr_index_in_curr_level_, + cmp_smallest, cmp_largest, + &search_left_bound_, + &search_right_bound_); + } + // Key falls out of current file's range + if (cmp_smallest < 0 || cmp_largest > 0) { + if (curr_level_ == 0) { + ++curr_index_in_curr_level_; + continue; + } else { + // Search next level. + break; + } + } + } +#ifndef NDEBUG + // Sanity check to make sure that the files are correctly sorted + if (prev_file_) { + if (curr_level_ != 0) { + int comp_sign = internal_comparator_->Compare( + prev_file_->largest_key, f->smallest_key); + assert(comp_sign < 0); + } else { + // level == 0, the current file cannot be newer than the previous + // one. Use compressed data structure, has no attribute seqNo + assert(curr_index_in_curr_level_ > 0); + assert(!NewestFirstBySeqNo(files_[0][curr_index_in_curr_level_], + files_[0][curr_index_in_curr_level_-1])); + } + } + prev_file_ = f; +#endif + returned_file_level_ = curr_level_; + if (curr_level_ > 0 && cmp_largest < 0) { + // No more files to search in this level. + search_ended_ = !PrepareNextLevel(); + } else { + ++curr_index_in_curr_level_; + } + return f; + } + // Start searching next level. + search_ended_ = !PrepareNextLevel(); + } + // Search ended. + return nullptr; + } + + // getter for current file level + // for GET_HIT_L0, GET_HIT_L1 & GET_HIT_L2_AND_UP counts + unsigned int GetHitFileLevel() { return hit_file_level_; } + + // Returns true if the most recent "hit file" (i.e., one returned by + // GetNextFile()) is at the last index in its level. + bool IsHitFileLastInLevel() { return is_hit_file_last_in_level_; } + + private: + unsigned int num_levels_; + unsigned int curr_level_; + unsigned int returned_file_level_; + unsigned int hit_file_level_; + int32_t search_left_bound_; + int32_t search_right_bound_; +#ifndef NDEBUG + std::vector* files_; +#endif + autovector* level_files_brief_; + bool search_ended_; + bool is_hit_file_last_in_level_; + LevelFilesBrief* curr_file_level_; + unsigned int curr_index_in_curr_level_; + unsigned int start_index_in_curr_level_; + Slice user_key_; + Slice ikey_; + FileIndexer* file_indexer_; + const Comparator* user_comparator_; + const InternalKeyComparator* internal_comparator_; +#ifndef NDEBUG + FdWithKeyRange* prev_file_; +#endif + + // Setup local variables to search next level. + // Returns false if there are no more levels to search. + bool PrepareNextLevel() { + curr_level_++; + while (curr_level_ < num_levels_) { + curr_file_level_ = &(*level_files_brief_)[curr_level_]; + if (curr_file_level_->num_files == 0) { + // When current level is empty, the search bound generated from upper + // level must be [0, -1] or [0, FileIndexer::kLevelMaxIndex] if it is + // also empty. + assert(search_left_bound_ == 0); + assert(search_right_bound_ == -1 || + search_right_bound_ == FileIndexer::kLevelMaxIndex); + // Since current level is empty, it will need to search all files in + // the next level + search_left_bound_ = 0; + search_right_bound_ = FileIndexer::kLevelMaxIndex; + curr_level_++; + continue; + } + + // Some files may overlap each other. We find + // all files that overlap user_key and process them in order from + // newest to oldest. In the context of merge-operator, this can occur at + // any level. Otherwise, it only occurs at Level-0 (since Put/Deletes + // are always compacted into a single entry). + int32_t start_index; + if (curr_level_ == 0) { + // On Level-0, we read through all files to check for overlap. + start_index = 0; + } else { + // On Level-n (n>=1), files are sorted. Binary search to find the + // earliest file whose largest key >= ikey. Search left bound and + // right bound are used to narrow the range. + if (search_left_bound_ == search_right_bound_) { + start_index = search_left_bound_; + } else if (search_left_bound_ < search_right_bound_) { + if (search_right_bound_ == FileIndexer::kLevelMaxIndex) { + search_right_bound_ = + static_cast(curr_file_level_->num_files) - 1; + } + start_index = + FindFileInRange(*internal_comparator_, *curr_file_level_, ikey_, + static_cast(search_left_bound_), + static_cast(search_right_bound_)); + } else { + // search_left_bound > search_right_bound, key does not exist in + // this level. Since no comparison is done in this level, it will + // need to search all files in the next level. + search_left_bound_ = 0; + search_right_bound_ = FileIndexer::kLevelMaxIndex; + curr_level_++; + continue; + } + } + start_index_in_curr_level_ = start_index; + curr_index_in_curr_level_ = start_index; +#ifndef NDEBUG + prev_file_ = nullptr; +#endif + return true; + } + // curr_level_ = num_levels_. So, no more levels to search. + return false; + } +}; +} // anonymous namespace + +VersionStorageInfo::~VersionStorageInfo() { delete[] files_; } + +Version::~Version() { + assert(refs_ == 0); + + // Remove from linked list + prev_->next_ = next_; + next_->prev_ = prev_; + + // Drop references to files + for (int level = 0; level < storage_info_.num_levels_; level++) { + for (size_t i = 0; i < storage_info_.files_[level].size(); i++) { + FileMetaData* f = storage_info_.files_[level][i]; + assert(f->refs > 0); + f->refs--; + if (f->refs <= 0) { + if (f->table_reader_handle) { + cfd_->table_cache()->EraseHandle(f->fd, f->table_reader_handle); + f->table_reader_handle = nullptr; + } + vset_->obsolete_files_.push_back(f); + } + } + } +} + +int FindFile(const InternalKeyComparator& icmp, + const LevelFilesBrief& file_level, + const Slice& key) { + return FindFileInRange(icmp, file_level, key, 0, + static_cast(file_level.num_files)); +} + +void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level, + const std::vector& files, + Arena* arena) { + assert(file_level); + assert(arena); + + size_t num = files.size(); + file_level->num_files = num; + char* mem = arena->AllocateAligned(num * sizeof(FdWithKeyRange)); + file_level->files = new (mem)FdWithKeyRange[num]; + + for (size_t i = 0; i < num; i++) { + Slice smallest_key = files[i]->smallest.Encode(); + Slice largest_key = files[i]->largest.Encode(); + + // Copy key slice to sequential memory + size_t smallest_size = smallest_key.size(); + size_t largest_size = largest_key.size(); + mem = arena->AllocateAligned(smallest_size + largest_size); + memcpy(mem, smallest_key.data(), smallest_size); + memcpy(mem + smallest_size, largest_key.data(), largest_size); + + FdWithKeyRange& f = file_level->files[i]; + f.fd = files[i]->fd; + f.smallest_key = Slice(mem, smallest_size); + f.largest_key = Slice(mem + smallest_size, largest_size); + } +} + +static bool AfterFile(const Comparator* ucmp, + const Slice* user_key, const FdWithKeyRange* f) { + // nullptr user_key occurs before all keys and is therefore never after *f + return (user_key != nullptr && + ucmp->Compare(*user_key, ExtractUserKey(f->largest_key)) > 0); +} + +static bool BeforeFile(const Comparator* ucmp, + const Slice* user_key, const FdWithKeyRange* f) { + // nullptr user_key occurs after all keys and is therefore never before *f + return (user_key != nullptr && + ucmp->Compare(*user_key, ExtractUserKey(f->smallest_key)) < 0); +} + +bool SomeFileOverlapsRange( + const InternalKeyComparator& icmp, + bool disjoint_sorted_files, + const LevelFilesBrief& file_level, + const Slice* smallest_user_key, + const Slice* largest_user_key) { + const Comparator* ucmp = icmp.user_comparator(); + if (!disjoint_sorted_files) { + // Need to check against all files + for (size_t i = 0; i < file_level.num_files; i++) { + const FdWithKeyRange* f = &(file_level.files[i]); + if (AfterFile(ucmp, smallest_user_key, f) || + BeforeFile(ucmp, largest_user_key, f)) { + // No overlap + } else { + return true; // Overlap + } + } + return false; + } + + // Binary search over file list + uint32_t index = 0; + if (smallest_user_key != nullptr) { + // Find the earliest possible internal key for smallest_user_key + InternalKey small; + small.SetMaxPossibleForUserKey(*smallest_user_key); + index = FindFile(icmp, file_level, small.Encode()); + } + + if (index >= file_level.num_files) { + // beginning of range is after all files, so no overlap. + return false; + } + + return !BeforeFile(ucmp, largest_user_key, &file_level.files[index]); +} + +namespace { + +// An internal iterator. For a given version/level pair, yields +// information about the files in the level. For a given entry, key() +// is the largest key that occurs in the file, and value() is an +// 16-byte value containing the file number and file size, both +// encoded using EncodeFixed64. +class LevelFileNumIterator : public InternalIterator { + public: + LevelFileNumIterator(const InternalKeyComparator& icmp, + const LevelFilesBrief* flevel) + : icmp_(icmp), + flevel_(flevel), + index_(static_cast(flevel->num_files)), + current_value_(0, 0, 0) { // Marks as invalid + } + virtual bool Valid() const override { return index_ < flevel_->num_files; } + virtual void Seek(const Slice& target) override { + index_ = FindFile(icmp_, *flevel_, target); + } + virtual void SeekToFirst() override { index_ = 0; } + virtual void SeekToLast() override { + index_ = (flevel_->num_files == 0) + ? 0 + : static_cast(flevel_->num_files) - 1; + } + virtual void Next() override { + assert(Valid()); + index_++; + } + virtual void Prev() override { + assert(Valid()); + if (index_ == 0) { + index_ = static_cast(flevel_->num_files); // Marks as invalid + } else { + index_--; + } + } + Slice key() const override { + assert(Valid()); + return flevel_->files[index_].largest_key; + } + Slice value() const override { + assert(Valid()); + + auto file_meta = flevel_->files[index_]; + current_value_ = file_meta.fd; + return Slice(reinterpret_cast(¤t_value_), + sizeof(FileDescriptor)); + } + virtual Status status() const override { return Status::OK(); } + + private: + const InternalKeyComparator icmp_; + const LevelFilesBrief* flevel_; + uint32_t index_; + mutable FileDescriptor current_value_; +}; + +class LevelFileIteratorState : public TwoLevelIteratorState { + public: + // @param skip_filters Disables loading/accessing the filter block + LevelFileIteratorState(TableCache* table_cache, + const ReadOptions& read_options, + const EnvOptions& env_options, + const InternalKeyComparator& icomparator, + HistogramImpl* file_read_hist, bool for_compaction, + bool prefix_enabled, bool skip_filters, int level) + : TwoLevelIteratorState(prefix_enabled), + table_cache_(table_cache), + read_options_(read_options), + env_options_(env_options), + icomparator_(icomparator), + file_read_hist_(file_read_hist), + for_compaction_(for_compaction), + skip_filters_(skip_filters), + level_(level) {} + + InternalIterator* NewSecondaryIterator(const Slice& meta_handle) override { + if (meta_handle.size() != sizeof(FileDescriptor)) { + return NewErrorInternalIterator( + Status::Corruption("FileReader invoked with unexpected value")); + } else { + const FileDescriptor* fd = + reinterpret_cast(meta_handle.data()); + return table_cache_->NewIterator( + read_options_, env_options_, icomparator_, *fd, + nullptr /* don't need reference to table*/, file_read_hist_, + for_compaction_, nullptr /* arena */, skip_filters_, level_); + } + } + + bool PrefixMayMatch(const Slice& internal_key) override { + return true; + } + + private: + TableCache* table_cache_; + const ReadOptions read_options_; + const EnvOptions& env_options_; + const InternalKeyComparator& icomparator_; + HistogramImpl* file_read_hist_; + bool for_compaction_; + bool skip_filters_; + int level_; +}; + +// A wrapper of version builder which references the current version in +// constructor and unref it in the destructor. +// Both of the constructor and destructor need to be called inside DB Mutex. +class BaseReferencedVersionBuilder { + public: + explicit BaseReferencedVersionBuilder(ColumnFamilyData* cfd) + : version_builder_(new VersionBuilder( + cfd->current()->version_set()->env_options(), cfd->table_cache(), + cfd->current()->storage_info(), cfd->ioptions()->info_log)), + version_(cfd->current()) { + version_->Ref(); + } + ~BaseReferencedVersionBuilder() { + delete version_builder_; + version_->Unref(); + } + VersionBuilder* version_builder() { return version_builder_; } + + private: + VersionBuilder* version_builder_; + Version* version_; +}; +} // anonymous namespace + +Status Version::GetTableProperties(std::shared_ptr* tp, + const FileMetaData* file_meta, + const std::string* fname) const { + auto table_cache = cfd_->table_cache(); + auto ioptions = cfd_->ioptions(); + Status s = table_cache->GetTableProperties( + vset_->env_options_, cfd_->internal_comparator(), file_meta->fd, + tp, true /* no io */); + if (s.ok()) { + return s; + } + + // We only ignore error type `Incomplete` since it's by design that we + // disallow table when it's not in table cache. + if (!s.IsIncomplete()) { + return s; + } + + // 2. Table is not present in table cache, we'll read the table properties + // directly from the properties block in the file. + std::unique_ptr file; + if (fname != nullptr) { + s = ioptions->env->NewRandomAccessFile( + *fname, &file, vset_->env_options_); + } else { + s = ioptions->env->NewRandomAccessFile( + TableFileName(vset_->db_options_->db_paths, file_meta->fd.GetNumber(), + file_meta->fd.GetPathId()), + &file, vset_->env_options_); + } + if (!s.ok()) { + return s; + } + + TableProperties* raw_table_properties; + // By setting the magic number to kInvalidTableMagicNumber, we can by + // pass the magic number check in the footer. + std::unique_ptr file_reader( + new RandomAccessFileReader(std::move(file))); + s = ReadTableProperties( + file_reader.get(), file_meta->fd.GetFileSize(), + Footer::kInvalidTableMagicNumber /* table's magic number */, *ioptions, &raw_table_properties); + if (!s.ok()) { + return s; + } + RecordTick(ioptions->statistics, NUMBER_DIRECT_LOAD_TABLE_PROPERTIES); + + *tp = std::shared_ptr(raw_table_properties); + return s; +} + +Status Version::GetPropertiesOfAllTables(TablePropertiesCollection* props) { + Status s; + for (int level = 0; level < storage_info_.num_levels_; level++) { + s = GetPropertiesOfAllTables(props, level); + if (!s.ok()) { + return s; + } + } + + return Status::OK(); +} + +Status Version::GetPropertiesOfAllTables(TablePropertiesCollection* props, + int level) { + for (const auto& file_meta : storage_info_.files_[level]) { + auto fname = + TableFileName(vset_->db_options_->db_paths, file_meta->fd.GetNumber(), + file_meta->fd.GetPathId()); + // 1. If the table is already present in table cache, load table + // properties from there. + std::shared_ptr table_properties; + Status s = GetTableProperties(&table_properties, file_meta, &fname); + if (s.ok()) { + props->insert({fname, table_properties}); + } else { + return s; + } + } + + return Status::OK(); +} + +Status Version::GetPropertiesOfTablesInRange( + const Range* range, std::size_t n, TablePropertiesCollection* props) const { + for (int level = 0; level < storage_info_.num_non_empty_levels(); level++) { + for (decltype(n) i = 0; i < n; i++) { + // Convert user_key into a corresponding internal key. + InternalKey k1(range[i].start, kMaxSequenceNumber, kValueTypeForSeek); + InternalKey k2(range[i].limit, kMaxSequenceNumber, kValueTypeForSeek); + std::vector files; + storage_info_.GetOverlappingInputs(level, &k1, &k2, &files, -1, nullptr, + false); + for (const auto& file_meta : files) { + auto fname = + TableFileName(vset_->db_options_->db_paths, + file_meta->fd.GetNumber(), file_meta->fd.GetPathId()); + if (props->count(fname) == 0) { + // 1. If the table is already present in table cache, load table + // properties from there. + std::shared_ptr table_properties; + Status s = GetTableProperties(&table_properties, file_meta, &fname); + if (s.ok()) { + props->insert({fname, table_properties}); + } else { + return s; + } + } + } + } + } + + return Status::OK(); +} + +Status Version::GetAggregatedTableProperties( + std::shared_ptr* tp, int level) { + TablePropertiesCollection props; + Status s; + if (level < 0) { + s = GetPropertiesOfAllTables(&props); + } else { + s = GetPropertiesOfAllTables(&props, level); + } + if (!s.ok()) { + return s; + } + + auto* new_tp = new TableProperties(); + for (const auto& item : props) { + new_tp->Add(*item.second); + } + tp->reset(new_tp); + return Status::OK(); +} + +size_t Version::GetMemoryUsageByTableReaders() { + size_t total_usage = 0; + for (auto& file_level : storage_info_.level_files_brief_) { + for (size_t i = 0; i < file_level.num_files; i++) { + total_usage += cfd_->table_cache()->GetMemoryUsageByTableReader( + vset_->env_options_, cfd_->internal_comparator(), + file_level.files[i].fd); + } + } + return total_usage; +} + +void Version::GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta) { + assert(cf_meta); + assert(cfd_); + + cf_meta->name = cfd_->GetName(); + cf_meta->size = 0; + cf_meta->file_count = 0; + cf_meta->levels.clear(); + + auto* ioptions = cfd_->ioptions(); + auto* vstorage = storage_info(); + + for (int level = 0; level < cfd_->NumberLevels(); level++) { + uint64_t level_size = 0; + cf_meta->file_count += vstorage->LevelFiles(level).size(); + std::vector files; + for (const auto& file : vstorage->LevelFiles(level)) { + uint32_t path_id = file->fd.GetPathId(); + std::string file_path; + if (path_id < ioptions->db_paths.size()) { + file_path = ioptions->db_paths[path_id].path; + } else { + assert(!ioptions->db_paths.empty()); + file_path = ioptions->db_paths.back().path; + } + files.emplace_back( + MakeTableFileName("", file->fd.GetNumber()), + file_path, + file->fd.GetFileSize(), + file->smallest_seqno, + file->largest_seqno, + file->smallest.user_key().ToString(), + file->largest.user_key().ToString(), + file->being_compacted); + level_size += file->fd.GetFileSize(); + } + cf_meta->levels.emplace_back( + level, level_size, std::move(files)); + cf_meta->size += level_size; + } +} + + +uint64_t VersionStorageInfo::GetEstimatedActiveKeys() const { + // Estimation will be inaccurate when: + // (1) there exist merge keys + // (2) keys are directly overwritten + // (3) deletion on non-existing keys + // (4) low number of samples + if (current_num_samples_ == 0) { + return 0; + } + + if (current_num_non_deletions_ <= current_num_deletions_) { + return 0; + } + + uint64_t est = current_num_non_deletions_ - current_num_deletions_; + + uint64_t file_count = 0; + for (int level = 0; level < num_levels_; ++level) { + file_count += files_[level].size(); + } + + if (current_num_samples_ < file_count) { + // casting to avoid overflowing + return + static_cast( + (est * static_cast(file_count) / current_num_samples_) + ); + } else { + return est; + } +} + +double VersionStorageInfo::GetEstimatedCompressionRatioAtLevel( + int level) const { + assert(level < num_levels_); + uint64_t sum_file_size_bytes = 0; + uint64_t sum_data_size_bytes = 0; + for (auto* file_meta : files_[level]) { + sum_file_size_bytes += file_meta->fd.GetFileSize(); + sum_data_size_bytes += file_meta->raw_key_size + file_meta->raw_value_size; + } + if (sum_file_size_bytes == 0) { + return -1.0; + } + return static_cast(sum_data_size_bytes) / sum_file_size_bytes; +} + +void Version::AddIterators(const ReadOptions& read_options, + const EnvOptions& soptions, + MergeIteratorBuilder* merge_iter_builder) { + assert(storage_info_.finalized_); + + if (storage_info_.num_non_empty_levels() == 0) { + // No file in the Version. + return; + } + + auto* arena = merge_iter_builder->GetArena(); + + // Merge all level zero files together since they may overlap + for (size_t i = 0; i < storage_info_.LevelFilesBrief(0).num_files; i++) { + const auto& file = storage_info_.LevelFilesBrief(0).files[i]; + merge_iter_builder->AddIterator(cfd_->table_cache()->NewIterator( + read_options, soptions, cfd_->internal_comparator(), file.fd, nullptr, + cfd_->internal_stats()->GetFileReadHist(0), false, arena, + false /* skip_filters */, 0 /* level */)); + } + + // For levels > 0, we can use a concatenating iterator that sequentially + // walks through the non-overlapping files in the level, opening them + // lazily. + for (int level = 1; level < storage_info_.num_non_empty_levels(); level++) { + if (storage_info_.LevelFilesBrief(level).num_files != 0) { + auto* mem = arena->AllocateAligned(sizeof(LevelFileIteratorState)); + auto* state = new (mem) + LevelFileIteratorState(cfd_->table_cache(), read_options, soptions, + cfd_->internal_comparator(), + cfd_->internal_stats()->GetFileReadHist(level), + false /* for_compaction */, + cfd_->ioptions()->prefix_extractor != nullptr, + IsFilterSkipped(level), level); + mem = arena->AllocateAligned(sizeof(LevelFileNumIterator)); + auto* first_level_iter = new (mem) LevelFileNumIterator( + cfd_->internal_comparator(), &storage_info_.LevelFilesBrief(level)); + merge_iter_builder->AddIterator( + NewTwoLevelIterator(state, first_level_iter, arena, false)); + } + } +} + +VersionStorageInfo::VersionStorageInfo( + const InternalKeyComparator* internal_comparator, + const Comparator* user_comparator, int levels, + CompactionStyle compaction_style, VersionStorageInfo* ref_vstorage) + : internal_comparator_(internal_comparator), + user_comparator_(user_comparator), + // cfd is nullptr if Version is dummy + num_levels_(levels), + num_non_empty_levels_(0), + file_indexer_(user_comparator), + compaction_style_(compaction_style), + files_(new std::vector[num_levels_]), + base_level_(num_levels_ == 1 ? -1 : 1), + files_by_compaction_pri_(num_levels_), + level0_non_overlapping_(false), + next_file_to_compact_by_size_(num_levels_), + compaction_score_(num_levels_), + compaction_level_(num_levels_), + l0_delay_trigger_count_(0), + accumulated_file_size_(0), + accumulated_raw_key_size_(0), + accumulated_raw_value_size_(0), + accumulated_num_non_deletions_(0), + accumulated_num_deletions_(0), + current_num_non_deletions_(0), + current_num_deletions_(0), + current_num_samples_(0), + estimated_compaction_needed_bytes_(0), + finalized_(false) { + if (ref_vstorage != nullptr) { + accumulated_file_size_ = ref_vstorage->accumulated_file_size_; + accumulated_raw_key_size_ = ref_vstorage->accumulated_raw_key_size_; + accumulated_raw_value_size_ = ref_vstorage->accumulated_raw_value_size_; + accumulated_num_non_deletions_ = + ref_vstorage->accumulated_num_non_deletions_; + accumulated_num_deletions_ = ref_vstorage->accumulated_num_deletions_; + current_num_non_deletions_ = ref_vstorage->current_num_non_deletions_; + current_num_deletions_ = ref_vstorage->current_num_deletions_; + current_num_samples_ = ref_vstorage->current_num_samples_; + } +} + +Version::Version(ColumnFamilyData* column_family_data, VersionSet* vset, + uint64_t version_number) + : env_(vset->env_), + cfd_(column_family_data), + info_log_((cfd_ == nullptr) ? nullptr : cfd_->ioptions()->info_log), + db_statistics_((cfd_ == nullptr) ? nullptr + : cfd_->ioptions()->statistics), + table_cache_((cfd_ == nullptr) ? nullptr : cfd_->table_cache()), + merge_operator_((cfd_ == nullptr) ? nullptr + : cfd_->ioptions()->merge_operator), + storage_info_((cfd_ == nullptr) ? nullptr : &cfd_->internal_comparator(), + (cfd_ == nullptr) ? nullptr : cfd_->user_comparator(), + cfd_ == nullptr ? 0 : cfd_->NumberLevels(), + cfd_ == nullptr ? kCompactionStyleLevel + : cfd_->ioptions()->compaction_style, + (cfd_ == nullptr || cfd_->current() == nullptr) + ? nullptr + : cfd_->current()->storage_info()), + vset_(vset), + next_(this), + prev_(this), + refs_(0), + version_number_(version_number) {} + +void Version::Get(const ReadOptions& read_options, const LookupKey& k, + std::string* value, Status* status, + MergeContext* merge_context, bool* value_found, + bool* key_exists, SequenceNumber* seq) { + Slice ikey = k.internal_key(); + Slice user_key = k.user_key(); + + assert(status->ok() || status->IsMergeInProgress()); + + if (key_exists != nullptr) { + // will falsify below if not found + *key_exists = true; + } + + PinnedIteratorsManager pinned_iters_mgr; + GetContext get_context( + user_comparator(), merge_operator_, info_log_, db_statistics_, + status->ok() ? GetContext::kNotFound : GetContext::kMerge, user_key, + value, value_found, merge_context, this->env_, seq, + merge_operator_ ? &pinned_iters_mgr : nullptr); + + // Pin blocks that we read to hold merge operands + if (merge_operator_) { + pinned_iters_mgr.StartPinning(); + } + + FilePicker fp( + storage_info_.files_, user_key, ikey, &storage_info_.level_files_brief_, + storage_info_.num_non_empty_levels_, &storage_info_.file_indexer_, + user_comparator(), internal_comparator()); + FdWithKeyRange* f = fp.GetNextFile(); + while (f != nullptr) { + *status = table_cache_->Get( + read_options, *internal_comparator(), f->fd, ikey, &get_context, + cfd_->internal_stats()->GetFileReadHist(fp.GetHitFileLevel()), + IsFilterSkipped(static_cast(fp.GetHitFileLevel()), + fp.IsHitFileLastInLevel()), + fp.GetCurrentLevel()); + // TODO: examine the behavior for corrupted key + if (!status->ok()) { + return; + } + + switch (get_context.State()) { + case GetContext::kNotFound: + // Keep searching in other files + break; + case GetContext::kFound: + if (fp.GetHitFileLevel() == 0) { + RecordTick(db_statistics_, GET_HIT_L0); + } else if (fp.GetHitFileLevel() == 1) { + RecordTick(db_statistics_, GET_HIT_L1); + } else if (fp.GetHitFileLevel() >= 2) { + RecordTick(db_statistics_, GET_HIT_L2_AND_UP); + } + return; + case GetContext::kDeleted: + // Use empty error message for speed + *status = Status::NotFound(); + return; + case GetContext::kCorrupt: + *status = Status::Corruption("corrupted key for ", user_key); + return; + case GetContext::kMerge: + break; + } + f = fp.GetNextFile(); + } + + if (GetContext::kMerge == get_context.State()) { + if (!merge_operator_) { + *status = Status::InvalidArgument( + "merge_operator is not properly initialized."); + return; + } + // merge_operands are in saver and we hit the beginning of the key history + // do a final merge of nullptr and operands; + *status = MergeHelper::TimedFullMerge(merge_operator_, user_key, nullptr, + merge_context->GetOperands(), value, + info_log_, db_statistics_, env_); + } else { + if (key_exists != nullptr) { + *key_exists = false; + } + *status = Status::NotFound(); // Use an empty error message for speed + } +} + +bool Version::IsFilterSkipped(int level, bool is_file_last_in_level) { + // Reaching the bottom level implies misses at all upper levels, so we'll + // skip checking the filters when we predict a hit. + return cfd_->ioptions()->optimize_filters_for_hits && + (level > 0 || is_file_last_in_level) && + level == storage_info_.num_non_empty_levels() - 1; +} + +void VersionStorageInfo::GenerateLevelFilesBrief() { + level_files_brief_.resize(num_non_empty_levels_); + for (int level = 0; level < num_non_empty_levels_; level++) { + DoGenerateLevelFilesBrief( + &level_files_brief_[level], files_[level], &arena_); + } +} + +void Version::PrepareApply( + const MutableCFOptions& mutable_cf_options, + bool update_stats) { + UpdateAccumulatedStats(update_stats); + storage_info_.UpdateNumNonEmptyLevels(); + storage_info_.CalculateBaseBytes(*cfd_->ioptions(), mutable_cf_options); + storage_info_.UpdateFilesByCompactionPri(mutable_cf_options); + storage_info_.GenerateFileIndexer(); + storage_info_.GenerateLevelFilesBrief(); + storage_info_.GenerateLevel0NonOverlapping(); +} + +bool Version::MaybeInitializeFileMetaData(FileMetaData* file_meta) { + if (file_meta->init_stats_from_file || + file_meta->compensated_file_size > 0) { + return false; + } + std::shared_ptr tp; + Status s = GetTableProperties(&tp, file_meta); + file_meta->init_stats_from_file = true; + if (!s.ok()) { + Log(InfoLogLevel::ERROR_LEVEL, vset_->db_options_->info_log, + "Unable to load table properties for file %" PRIu64 " --- %s\n", + file_meta->fd.GetNumber(), s.ToString().c_str()); + return false; + } + if (tp.get() == nullptr) return false; + file_meta->num_entries = tp->num_entries; + file_meta->num_deletions = GetDeletedKeys(tp->user_collected_properties); + file_meta->raw_value_size = tp->raw_value_size; + file_meta->raw_key_size = tp->raw_key_size; + + return true; +} + +void VersionStorageInfo::UpdateAccumulatedStats(FileMetaData* file_meta) { + assert(file_meta->init_stats_from_file); + accumulated_file_size_ += file_meta->fd.GetFileSize(); + accumulated_raw_key_size_ += file_meta->raw_key_size; + accumulated_raw_value_size_ += file_meta->raw_value_size; + accumulated_num_non_deletions_ += + file_meta->num_entries - file_meta->num_deletions; + accumulated_num_deletions_ += file_meta->num_deletions; + + current_num_non_deletions_ += + file_meta->num_entries - file_meta->num_deletions; + current_num_deletions_ += file_meta->num_deletions; + current_num_samples_++; +} + +void VersionStorageInfo::RemoveCurrentStats(FileMetaData* file_meta) { + if (file_meta->init_stats_from_file) { + current_num_non_deletions_ -= + file_meta->num_entries - file_meta->num_deletions; + current_num_deletions_ -= file_meta->num_deletions; + current_num_samples_--; + } +} + +void Version::UpdateAccumulatedStats(bool update_stats) { + if (update_stats) { + // maximum number of table properties loaded from files. + const int kMaxInitCount = 20; + int init_count = 0; + // here only the first kMaxInitCount files which haven't been + // initialized from file will be updated with num_deletions. + // The motivation here is to cap the maximum I/O per Version creation. + // The reason for choosing files from lower-level instead of higher-level + // is that such design is able to propagate the initialization from + // lower-level to higher-level: When the num_deletions of lower-level + // files are updated, it will make the lower-level files have accurate + // compensated_file_size, making lower-level to higher-level compaction + // will be triggered, which creates higher-level files whose num_deletions + // will be updated here. + for (int level = 0; + level < storage_info_.num_levels_ && init_count < kMaxInitCount; + ++level) { + for (auto* file_meta : storage_info_.files_[level]) { + if (MaybeInitializeFileMetaData(file_meta)) { + // each FileMeta will be initialized only once. + storage_info_.UpdateAccumulatedStats(file_meta); + // when option "max_open_files" is -1, all the file metadata has + // already been read, so MaybeInitializeFileMetaData() won't incur + // any I/O cost. + if (vset_->db_options_->max_open_files == -1) { + continue; + } + if (++init_count >= kMaxInitCount) { + break; + } + } + } + } + // In case all sampled-files contain only deletion entries, then we + // load the table-property of a file in higher-level to initialize + // that value. + for (int level = storage_info_.num_levels_ - 1; + storage_info_.accumulated_raw_value_size_ == 0 && level >= 0; + --level) { + for (int i = static_cast(storage_info_.files_[level].size()) - 1; + storage_info_.accumulated_raw_value_size_ == 0 && i >= 0; --i) { + if (MaybeInitializeFileMetaData(storage_info_.files_[level][i])) { + storage_info_.UpdateAccumulatedStats(storage_info_.files_[level][i]); + } + } + } + } + + storage_info_.ComputeCompensatedSizes(); +} + +void VersionStorageInfo::ComputeCompensatedSizes() { + static const int kDeletionWeightOnCompaction = 2; + uint64_t average_value_size = GetAverageValueSize(); + + // compute the compensated size + for (int level = 0; level < num_levels_; level++) { + for (auto* file_meta : files_[level]) { + // Here we only compute compensated_file_size for those file_meta + // which compensated_file_size is uninitialized (== 0). This is true only + // for files that have been created right now and no other thread has + // access to them. That's why we can safely mutate compensated_file_size. + if (file_meta->compensated_file_size == 0) { + file_meta->compensated_file_size = file_meta->fd.GetFileSize(); + // Here we only boost the size of deletion entries of a file only + // when the number of deletion entries is greater than the number of + // non-deletion entries in the file. The motivation here is that in + // a stable workload, the number of deletion entries should be roughly + // equal to the number of non-deletion entries. If we compensate the + // size of deletion entries in a stable workload, the deletion + // compensation logic might introduce unwanted effet which changes the + // shape of LSM tree. + if (file_meta->num_deletions * 2 >= file_meta->num_entries) { + file_meta->compensated_file_size += + (file_meta->num_deletions * 2 - file_meta->num_entries) * + average_value_size * kDeletionWeightOnCompaction; + } + } + } + } +} + +int VersionStorageInfo::MaxInputLevel() const { + if (compaction_style_ == kCompactionStyleLevel) { + return num_levels() - 2; + } + return 0; +} + +void VersionStorageInfo::EstimateCompactionBytesNeeded( + const MutableCFOptions& mutable_cf_options) { + // Only implemented for level-based compaction + if (compaction_style_ != kCompactionStyleLevel) { + estimated_compaction_needed_bytes_ = 0; + return; + } + + // Start from Level 0, if level 0 qualifies compaction to level 1, + // we estimate the size of compaction. + // Then we move on to the next level and see whether it qualifies compaction + // to the next level. The size of the level is estimated as the actual size + // on the level plus the input bytes from the previous level if there is any. + // If it exceeds, take the exceeded bytes as compaction input and add the size + // of the compaction size to tatal size. + // We keep doing it to Level 2, 3, etc, until the last level and return the + // accumulated bytes. + + uint64_t bytes_compact_to_next_level = 0; + // Level 0 + bool level0_compact_triggered = false; + if (static_cast(files_[0].size()) > + mutable_cf_options.level0_file_num_compaction_trigger) { + level0_compact_triggered = true; + for (auto* f : files_[0]) { + bytes_compact_to_next_level += f->fd.GetFileSize(); + } + estimated_compaction_needed_bytes_ = bytes_compact_to_next_level; + } else { + estimated_compaction_needed_bytes_ = 0; + } + + // Level 1 and up. + uint64_t bytes_next_level = 0; + for (int level = base_level(); level <= MaxInputLevel(); level++) { + uint64_t level_size = 0; + if (bytes_next_level > 0) { +#ifndef NDEBUG + uint64_t level_size2 = 0; + for (auto* f : files_[level]) { + level_size2 += f->fd.GetFileSize(); + } + assert(level_size2 == bytes_next_level); +#endif + level_size = bytes_next_level; + bytes_next_level = 0; + } else { + for (auto* f : files_[level]) { + level_size += f->fd.GetFileSize(); + } + } + if (level == base_level() && level0_compact_triggered) { + // Add base level size to compaction if level0 compaction triggered. + estimated_compaction_needed_bytes_ += level_size; + } + // Add size added by previous compaction + level_size += bytes_compact_to_next_level; + bytes_compact_to_next_level = 0; + uint64_t level_target = MaxBytesForLevel(level); + if (level_size > level_target) { + bytes_compact_to_next_level = level_size - level_target; + // Estimate the actual compaction fan-out ratio as size ratio between + // the two levels. + + assert(bytes_next_level == 0); + if (level + 1 < num_levels_) { + for (auto* f : files_[level + 1]) { + bytes_next_level += f->fd.GetFileSize(); + } + } + if (bytes_next_level > 0) { + assert(level_size > 0); + estimated_compaction_needed_bytes_ += static_cast( + static_cast(bytes_compact_to_next_level) * + (static_cast(bytes_next_level) / + static_cast(level_size) + + 1)); + } + } + } +} + +void VersionStorageInfo::ComputeCompactionScore( + const MutableCFOptions& mutable_cf_options) { + for (int level = 0; level <= MaxInputLevel(); level++) { + double score; + if (level == 0) { + // We treat level-0 specially by bounding the number of files + // instead of number of bytes for two reasons: + // + // (1) With larger write-buffer sizes, it is nice not to do too + // many level-0 compactions. + // + // (2) The files in level-0 are merged on every read and + // therefore we wish to avoid too many files when the individual + // file size is small (perhaps because of a small write-buffer + // setting, or very high compression ratios, or lots of + // overwrites/deletions). + int num_sorted_runs = 0; + uint64_t total_size = 0; + for (auto* f : files_[level]) { + if (!f->being_compacted) { + total_size += f->compensated_file_size; + num_sorted_runs++; + } + } + if (compaction_style_ == kCompactionStyleUniversal) { + // For universal compaction, we use level0 score to indicate + // compaction score for the whole DB. Adding other levels as if + // they are L0 files. + for (int i = 1; i < num_levels(); i++) { + if (!files_[i].empty() && !files_[i][0]->being_compacted) { + num_sorted_runs++; + } + } + } + + if (compaction_style_ == kCompactionStyleFIFO) { + score = static_cast(total_size) / + mutable_cf_options.compaction_options_fifo.max_table_files_size; + } else { + score = static_cast(num_sorted_runs) / + mutable_cf_options.level0_file_num_compaction_trigger; + } + } else { + // Compute the ratio of current size to size limit. + uint64_t level_bytes_no_compacting = 0; + for (auto f : files_[level]) { + if (!f->being_compacted) { + level_bytes_no_compacting += f->compensated_file_size; + } + } + score = static_cast(level_bytes_no_compacting) / + MaxBytesForLevel(level); + } + compaction_level_[level] = level; + compaction_score_[level] = score; + } + + // sort all the levels based on their score. Higher scores get listed + // first. Use bubble sort because the number of entries are small. + for (int i = 0; i < num_levels() - 2; i++) { + for (int j = i + 1; j < num_levels() - 1; j++) { + if (compaction_score_[i] < compaction_score_[j]) { + double score = compaction_score_[i]; + int level = compaction_level_[i]; + compaction_score_[i] = compaction_score_[j]; + compaction_level_[i] = compaction_level_[j]; + compaction_score_[j] = score; + compaction_level_[j] = level; + } + } + } + ComputeFilesMarkedForCompaction(); + EstimateCompactionBytesNeeded(mutable_cf_options); +} + +void VersionStorageInfo::ComputeFilesMarkedForCompaction() { + files_marked_for_compaction_.clear(); + int last_qualify_level = 0; + + // Do not include files from the last level with data + // If table properties collector suggests a file on the last level, + // we should not move it to a new level. + for (int level = num_levels() - 1; level >= 1; level--) { + if (!files_[level].empty()) { + last_qualify_level = level - 1; + break; + } + } + + for (int level = 0; level <= last_qualify_level; level++) { + for (auto* f : files_[level]) { + if (!f->being_compacted && f->marked_for_compaction) { + files_marked_for_compaction_.emplace_back(level, f); + } + } + } +} + +namespace { + +// used to sort files by size +struct Fsize { + size_t index; + FileMetaData* file; +}; + +// Compator that is used to sort files based on their size +// In normal mode: descending size +bool CompareCompensatedSizeDescending(const Fsize& first, const Fsize& second) { + return (first.file->compensated_file_size > + second.file->compensated_file_size); +} +} // anonymous namespace + +void VersionStorageInfo::AddFile(int level, FileMetaData* f, Logger* info_log) { + auto* level_files = &files_[level]; + // Must not overlap +#ifndef NDEBUG + if (level > 0 && !level_files->empty() && + internal_comparator_->Compare( + (*level_files)[level_files->size() - 1]->largest, f->smallest) >= 0) { + auto* f2 = (*level_files)[level_files->size() - 1]; + if (info_log != nullptr) { + Error(info_log, "Adding new file %" PRIu64 + " range (%s, %s) to level %d but overlapping " + "with existing file %" PRIu64 " %s %s", + f->fd.GetNumber(), f->smallest.DebugString(true).c_str(), + f->largest.DebugString(true).c_str(), level, f2->fd.GetNumber(), + f2->smallest.DebugString(true).c_str(), + f2->largest.DebugString(true).c_str()); + LogFlush(info_log); + } + assert(false); + } +#endif + f->refs++; + level_files->push_back(f); +} + +// Version::PrepareApply() need to be called before calling the function, or +// following functions called: +// 1. UpdateNumNonEmptyLevels(); +// 2. CalculateBaseBytes(); +// 3. UpdateFilesByCompactionPri(); +// 4. GenerateFileIndexer(); +// 5. GenerateLevelFilesBrief(); +// 6. GenerateLevel0NonOverlapping(); +void VersionStorageInfo::SetFinalized() { + finalized_ = true; +#ifndef NDEBUG + if (compaction_style_ != kCompactionStyleLevel) { + // Not level based compaction. + return; + } + assert(base_level_ < 0 || num_levels() == 1 || + (base_level_ >= 1 && base_level_ < num_levels())); + // Verify all levels newer than base_level are empty except L0 + for (int level = 1; level < base_level(); level++) { + assert(NumLevelBytes(level) == 0); + } + uint64_t max_bytes_prev_level = 0; + for (int level = base_level(); level < num_levels() - 1; level++) { + if (LevelFiles(level).size() == 0) { + continue; + } + assert(MaxBytesForLevel(level) >= max_bytes_prev_level); + max_bytes_prev_level = MaxBytesForLevel(level); + } + int num_empty_non_l0_level = 0; + for (int level = 0; level < num_levels(); level++) { + assert(LevelFiles(level).size() == 0 || + LevelFiles(level).size() == LevelFilesBrief(level).num_files); + if (level > 0 && NumLevelBytes(level) > 0) { + num_empty_non_l0_level++; + } + if (LevelFiles(level).size() > 0) { + assert(level < num_non_empty_levels()); + } + } + assert(compaction_level_.size() > 0); + assert(compaction_level_.size() == compaction_score_.size()); +#endif +} + +void VersionStorageInfo::UpdateNumNonEmptyLevels() { + num_non_empty_levels_ = num_levels_; + for (int i = num_levels_ - 1; i >= 0; i--) { + if (files_[i].size() != 0) { + return; + } else { + num_non_empty_levels_ = i; + } + } +} + +namespace { +// Sort `temp` based on ratio of overlapping size over file size +void SortFileByOverlappingRatio( + const InternalKeyComparator& icmp, const std::vector& files, + const std::vector& next_level_files, + std::vector* temp) { + std::unordered_map file_to_order; + auto next_level_it = next_level_files.begin(); + + for (auto& file : files) { + uint64_t overlapping_bytes = 0; + // Skip files in next level that is smaller than current file + while (next_level_it != next_level_files.end() && + icmp.Compare((*next_level_it)->largest, file->smallest) < 0) { + next_level_it++; + } + + while (next_level_it != next_level_files.end() && + icmp.Compare((*next_level_it)->smallest, file->largest) < 0) { + overlapping_bytes += (*next_level_it)->fd.file_size; + + if (icmp.Compare((*next_level_it)->largest, file->largest) > 0) { + // next level file cross large boundary of current file. + break; + } + next_level_it++; + } + + assert(file->fd.file_size != 0); + file_to_order[file->fd.GetNumber()] = + overlapping_bytes * 1024u / file->fd.file_size; + } + + std::sort(temp->begin(), temp->end(), + [&](const Fsize& f1, const Fsize& f2) -> bool { + return file_to_order[f1.file->fd.GetNumber()] < + file_to_order[f2.file->fd.GetNumber()]; + }); +} +} // namespace + +void VersionStorageInfo::UpdateFilesByCompactionPri( + const MutableCFOptions& mutable_cf_options) { + if (compaction_style_ == kCompactionStyleFIFO || + compaction_style_ == kCompactionStyleUniversal) { + // don't need this + return; + } + // No need to sort the highest level because it is never compacted. + for (int level = 0; level < num_levels() - 1; level++) { + const std::vector& files = files_[level]; + auto& files_by_compaction_pri = files_by_compaction_pri_[level]; + assert(files_by_compaction_pri.size() == 0); + + // populate a temp vector for sorting based on size + std::vector temp(files.size()); + for (size_t i = 0; i < files.size(); i++) { + temp[i].index = i; + temp[i].file = files[i]; + } + + // sort the top number_of_files_to_sort_ based on file size + size_t num = VersionStorageInfo::kNumberFilesToSort; + if (num > temp.size()) { + num = temp.size(); + } + switch (mutable_cf_options.compaction_pri) { + case kByCompensatedSize: + std::partial_sort(temp.begin(), temp.begin() + num, temp.end(), + CompareCompensatedSizeDescending); + break; + case kOldestLargestSeqFirst: + std::sort(temp.begin(), temp.end(), + [this](const Fsize& f1, const Fsize& f2) -> bool { + return f1.file->largest_seqno < f2.file->largest_seqno; + }); + break; + case kOldestSmallestSeqFirst: + std::sort(temp.begin(), temp.end(), + [this](const Fsize& f1, const Fsize& f2) -> bool { + return f1.file->smallest_seqno < f2.file->smallest_seqno; + }); + break; + case kMinOverlappingRatio: + SortFileByOverlappingRatio(*internal_comparator_, files_[level], + files_[level + 1], &temp); + break; + default: + assert(false); + } + assert(temp.size() == files.size()); + + // initialize files_by_compaction_pri_ + for (size_t i = 0; i < temp.size(); i++) { + files_by_compaction_pri.push_back(static_cast(temp[i].index)); + } + next_file_to_compact_by_size_[level] = 0; + assert(files_[level].size() == files_by_compaction_pri_[level].size()); + } +} + +void VersionStorageInfo::GenerateLevel0NonOverlapping() { + assert(!finalized_); + level0_non_overlapping_ = true; + if (level_files_brief_.size() == 0) { + return; + } + + // A copy of L0 files sorted by smallest key + std::vector level0_sorted_file( + level_files_brief_[0].files, + level_files_brief_[0].files + level_files_brief_[0].num_files); + std::sort(level0_sorted_file.begin(), level0_sorted_file.end(), + [this](const FdWithKeyRange& f1, const FdWithKeyRange& f2) -> bool { + return (internal_comparator_->Compare(f1.smallest_key, + f2.smallest_key) < 0); + }); + + for (size_t i = 1; i < level0_sorted_file.size(); ++i) { + FdWithKeyRange& f = level0_sorted_file[i]; + FdWithKeyRange& prev = level0_sorted_file[i - 1]; + if (internal_comparator_->Compare(prev.largest_key, f.smallest_key) >= 0) { + level0_non_overlapping_ = false; + break; + } + } +} + +void Version::Ref() { + ++refs_; +} + +bool Version::Unref() { + assert(refs_ >= 1); + --refs_; + if (refs_ == 0) { + delete this; + return true; + } + return false; +} + +bool VersionStorageInfo::OverlapInLevel(int level, + const Slice* smallest_user_key, + const Slice* largest_user_key) { + if (level >= num_non_empty_levels_) { + // empty level, no overlap + return false; + } + return SomeFileOverlapsRange(*internal_comparator_, (level > 0), + level_files_brief_[level], smallest_user_key, + largest_user_key); +} + +// Store in "*inputs" all files in "level" that overlap [begin,end] +// If hint_index is specified, then it points to a file in the +// overlapping range. +// The file_index returns a pointer to any file in an overlapping range. +void VersionStorageInfo::GetOverlappingInputs( + int level, const InternalKey* begin, const InternalKey* end, + std::vector* inputs, int hint_index, int* file_index, + bool expand_range) const { + if (level >= num_non_empty_levels_) { + // this level is empty, no overlapping inputs + return; + } + + inputs->clear(); + Slice user_begin, user_end; + if (begin != nullptr) { + user_begin = begin->user_key(); + } + if (end != nullptr) { + user_end = end->user_key(); + } + if (file_index) { + *file_index = -1; + } + const Comparator* user_cmp = user_comparator_; + if (begin != nullptr && end != nullptr && level > 0) { + GetOverlappingInputsBinarySearch(level, user_begin, user_end, inputs, + hint_index, file_index); + return; + } + for (size_t i = 0; i < level_files_brief_[level].num_files; ) { + FdWithKeyRange* f = &(level_files_brief_[level].files[i++]); + const Slice file_start = ExtractUserKey(f->smallest_key); + const Slice file_limit = ExtractUserKey(f->largest_key); + if (begin != nullptr && user_cmp->Compare(file_limit, user_begin) < 0) { + // "f" is completely before specified range; skip it + } else if (end != nullptr && user_cmp->Compare(file_start, user_end) > 0) { + // "f" is completely after specified range; skip it + } else { + inputs->push_back(files_[level][i-1]); + if (level == 0 && expand_range) { + // Level-0 files may overlap each other. So check if the newly + // added file has expanded the range. If so, restart search. + if (begin != nullptr && user_cmp->Compare(file_start, user_begin) < 0) { + user_begin = file_start; + inputs->clear(); + i = 0; + } else if (end != nullptr + && user_cmp->Compare(file_limit, user_end) > 0) { + user_end = file_limit; + inputs->clear(); + i = 0; + } + } else if (file_index) { + *file_index = static_cast(i) - 1; + } + } + } +} + +// Store in "*inputs" all files in "level" that overlap [begin,end] +// Employ binary search to find at least one file that overlaps the +// specified range. From that file, iterate backwards and +// forwards to find all overlapping files. +void VersionStorageInfo::GetOverlappingInputsBinarySearch( + int level, const Slice& user_begin, const Slice& user_end, + std::vector* inputs, int hint_index, int* file_index) const { + assert(level > 0); + int min = 0; + int mid = 0; + int max = static_cast(files_[level].size()) - 1; + bool foundOverlap = false; + const Comparator* user_cmp = user_comparator_; + + // if the caller already knows the index of a file that has overlap, + // then we can skip the binary search. + if (hint_index != -1) { + mid = hint_index; + foundOverlap = true; + } + + while (!foundOverlap && min <= max) { + mid = (min + max)/2; + FdWithKeyRange* f = &(level_files_brief_[level].files[mid]); + const Slice file_start = ExtractUserKey(f->smallest_key); + const Slice file_limit = ExtractUserKey(f->largest_key); + if (user_cmp->Compare(file_limit, user_begin) < 0) { + min = mid + 1; + } else if (user_cmp->Compare(user_end, file_start) < 0) { + max = mid - 1; + } else { + foundOverlap = true; + break; + } + } + + // If there were no overlapping files, return immediately. + if (!foundOverlap) { + return; + } + // returns the index where an overlap is found + if (file_index) { + *file_index = mid; + } + ExtendOverlappingInputs(level, user_begin, user_end, inputs, mid); +} + +// Store in "*inputs" all files in "level" that overlap [begin,end] +// The midIndex specifies the index of at least one file that +// overlaps the specified range. From that file, iterate backward +// and forward to find all overlapping files. +// Use FileLevel in searching, make it faster +void VersionStorageInfo::ExtendOverlappingInputs( + int level, const Slice& user_begin, const Slice& user_end, + std::vector* inputs, unsigned int midIndex) const { + const Comparator* user_cmp = user_comparator_; + const FdWithKeyRange* files = level_files_brief_[level].files; +#ifndef NDEBUG + { + // assert that the file at midIndex overlaps with the range + assert(midIndex < level_files_brief_[level].num_files); + const FdWithKeyRange* f = &files[midIndex]; + const Slice fstart = ExtractUserKey(f->smallest_key); + const Slice flimit = ExtractUserKey(f->largest_key); + if (user_cmp->Compare(fstart, user_begin) >= 0) { + assert(user_cmp->Compare(fstart, user_end) <= 0); + } else { + assert(user_cmp->Compare(flimit, user_begin) >= 0); + } + } +#endif + int startIndex = midIndex + 1; + int endIndex = midIndex; + int count __attribute__((unused)) = 0; + + // check backwards from 'mid' to lower indices + for (int i = midIndex; i >= 0 ; i--) { + const FdWithKeyRange* f = &files[i]; + const Slice file_limit = ExtractUserKey(f->largest_key); + if (user_cmp->Compare(file_limit, user_begin) >= 0) { + startIndex = i; + assert((count++, true)); + } else { + break; + } + } + // check forward from 'mid+1' to higher indices + for (unsigned int i = midIndex+1; + i < level_files_brief_[level].num_files; i++) { + const FdWithKeyRange* f = &files[i]; + const Slice file_start = ExtractUserKey(f->smallest_key); + if (user_cmp->Compare(file_start, user_end) <= 0) { + assert((count++, true)); + endIndex = i; + } else { + break; + } + } + assert(count == endIndex - startIndex + 1); + + // insert overlapping files into vector + for (int i = startIndex; i <= endIndex; i++) { + FileMetaData* f = files_[level][i]; + inputs->push_back(f); + } +} + +// Returns true iff the first or last file in inputs contains +// an overlapping user key to the file "just outside" of it (i.e. +// just after the last file, or just before the first file) +// REQUIRES: "*inputs" is a sorted list of non-overlapping files +bool VersionStorageInfo::HasOverlappingUserKey( + const std::vector* inputs, int level) { + + // If inputs empty, there is no overlap. + // If level == 0, it is assumed that all needed files were already included. + if (inputs->empty() || level == 0){ + return false; + } + + const Comparator* user_cmp = user_comparator_; + const rocksdb::LevelFilesBrief& file_level = level_files_brief_[level]; + const FdWithKeyRange* files = level_files_brief_[level].files; + const size_t kNumFiles = file_level.num_files; + + // Check the last file in inputs against the file after it + size_t last_file = FindFile(*internal_comparator_, file_level, + inputs->back()->largest.Encode()); + assert(last_file < kNumFiles); // File should exist! + if (last_file < kNumFiles-1) { // If not the last file + const Slice last_key_in_input = ExtractUserKey( + files[last_file].largest_key); + const Slice first_key_after = ExtractUserKey( + files[last_file+1].smallest_key); + if (user_cmp->Equal(last_key_in_input, first_key_after)) { + // The last user key in input overlaps with the next file's first key + return true; + } + } + + // Check the first file in inputs against the file just before it + size_t first_file = FindFile(*internal_comparator_, file_level, + inputs->front()->smallest.Encode()); + assert(first_file <= last_file); // File should exist! + if (first_file > 0) { // If not first file + const Slice& first_key_in_input = ExtractUserKey( + files[first_file].smallest_key); + const Slice& last_key_before = ExtractUserKey( + files[first_file-1].largest_key); + if (user_cmp->Equal(first_key_in_input, last_key_before)) { + // The first user key in input overlaps with the previous file's last key + return true; + } + } + + return false; +} + +uint64_t VersionStorageInfo::NumLevelBytes(int level) const { + assert(level >= 0); + assert(level < num_levels()); + return TotalFileSize(files_[level]); +} + +const char* VersionStorageInfo::LevelSummary( + LevelSummaryStorage* scratch) const { + int len = 0; + if (compaction_style_ == kCompactionStyleLevel && num_levels() > 1) { + assert(base_level_ < static_cast(level_max_bytes_.size())); + len = snprintf(scratch->buffer, sizeof(scratch->buffer), + "base level %d max bytes base %" PRIu64 " ", base_level_, + level_max_bytes_[base_level_]); + } + len += + snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "files["); + for (int i = 0; i < num_levels(); i++) { + int sz = sizeof(scratch->buffer) - len; + int ret = snprintf(scratch->buffer + len, sz, "%d ", int(files_[i].size())); + if (ret < 0 || ret >= sz) break; + len += ret; + } + if (len > 0) { + // overwrite the last space + --len; + } + len += snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, + "] max score %.2f", compaction_score_[0]); + + if (!files_marked_for_compaction_.empty()) { + snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, + " (%" ROCKSDB_PRIszt " files need compaction)", + files_marked_for_compaction_.size()); + } + + return scratch->buffer; +} + +const char* VersionStorageInfo::LevelFileSummary(FileSummaryStorage* scratch, + int level) const { + int len = snprintf(scratch->buffer, sizeof(scratch->buffer), "files_size["); + for (const auto& f : files_[level]) { + int sz = sizeof(scratch->buffer) - len; + char sztxt[16]; + AppendHumanBytes(f->fd.GetFileSize(), sztxt, sizeof(sztxt)); + int ret = snprintf(scratch->buffer + len, sz, + "#%" PRIu64 "(seq=%" PRIu64 ",sz=%s,%d) ", + f->fd.GetNumber(), f->smallest_seqno, sztxt, + static_cast(f->being_compacted)); + if (ret < 0 || ret >= sz) + break; + len += ret; + } + // overwrite the last space (only if files_[level].size() is non-zero) + if (files_[level].size() && len > 0) { + --len; + } + snprintf(scratch->buffer + len, sizeof(scratch->buffer) - len, "]"); + return scratch->buffer; +} + +int64_t VersionStorageInfo::MaxNextLevelOverlappingBytes() { + uint64_t result = 0; + std::vector overlaps; + for (int level = 1; level < num_levels() - 1; level++) { + for (const auto& f : files_[level]) { + GetOverlappingInputs(level + 1, &f->smallest, &f->largest, &overlaps); + const uint64_t sum = TotalFileSize(overlaps); + if (sum > result) { + result = sum; + } + } + } + return result; +} + +uint64_t VersionStorageInfo::MaxBytesForLevel(int level) const { + // Note: the result for level zero is not really used since we set + // the level-0 compaction threshold based on number of files. + assert(level >= 0); + assert(level < static_cast(level_max_bytes_.size())); + return level_max_bytes_[level]; +} + +void VersionStorageInfo::CalculateBaseBytes(const ImmutableCFOptions& ioptions, + const MutableCFOptions& options) { + // Special logic to set number of sorted runs. + // It is to match the previous behavior when all files are in L0. + int num_l0_count = static_cast(files_[0].size()); + if (compaction_style_ == kCompactionStyleUniversal) { + // For universal compaction, we use level0 score to indicate + // compaction score for the whole DB. Adding other levels as if + // they are L0 files. + for (int i = 1; i < num_levels(); i++) { + if (!files_[i].empty()) { + num_l0_count++; + } + } + } + set_l0_delay_trigger_count(num_l0_count); + + level_max_bytes_.resize(ioptions.num_levels); + if (!ioptions.level_compaction_dynamic_level_bytes) { + base_level_ = (ioptions.compaction_style == kCompactionStyleLevel) ? 1 : -1; + + // Calculate for static bytes base case + for (int i = 0; i < ioptions.num_levels; ++i) { + if (i == 0 && ioptions.compaction_style == kCompactionStyleUniversal) { + level_max_bytes_[i] = options.max_bytes_for_level_base; + } else if (i > 1) { + level_max_bytes_[i] = MultiplyCheckOverflow( + MultiplyCheckOverflow(level_max_bytes_[i - 1], + options.max_bytes_for_level_multiplier), + options.MaxBytesMultiplerAdditional(i - 1)); + } else { + level_max_bytes_[i] = options.max_bytes_for_level_base; + } + } + } else { + uint64_t max_level_size = 0; + + int first_non_empty_level = -1; + // Find size of non-L0 level of most data. + // Cannot use the size of the last level because it can be empty or less + // than previous levels after compaction. + for (int i = 1; i < num_levels_; i++) { + uint64_t total_size = 0; + for (const auto& f : files_[i]) { + total_size += f->fd.GetFileSize(); + } + if (total_size > 0 && first_non_empty_level == -1) { + first_non_empty_level = i; + } + if (total_size > max_level_size) { + max_level_size = total_size; + } + } + + // Prefill every level's max bytes to disallow compaction from there. + for (int i = 0; i < num_levels_; i++) { + level_max_bytes_[i] = std::numeric_limits::max(); + } + + if (max_level_size == 0) { + // No data for L1 and up. L0 compacts to last level directly. + // No compaction from L1+ needs to be scheduled. + base_level_ = num_levels_ - 1; + } else { + uint64_t base_bytes_max = options.max_bytes_for_level_base; + uint64_t base_bytes_min = + base_bytes_max / options.max_bytes_for_level_multiplier; + + // Try whether we can make last level's target size to be max_level_size + uint64_t cur_level_size = max_level_size; + for (int i = num_levels_ - 2; i >= first_non_empty_level; i--) { + // Round up after dividing + cur_level_size /= options.max_bytes_for_level_multiplier; + } + + // Calculate base level and its size. + uint64_t base_level_size; + if (cur_level_size <= base_bytes_min) { + // Case 1. If we make target size of last level to be max_level_size, + // target size of the first non-empty level would be smaller than + // base_bytes_min. We set it be base_bytes_min. + base_level_size = base_bytes_min + 1U; + base_level_ = first_non_empty_level; + Warn(ioptions.info_log, + "More existing levels in DB than needed. " + "max_bytes_for_level_multiplier may not be guaranteed."); + } else { + // Find base level (where L0 data is compacted to). + base_level_ = first_non_empty_level; + while (base_level_ > 1 && cur_level_size > base_bytes_max) { + --base_level_; + cur_level_size = + cur_level_size / options.max_bytes_for_level_multiplier; + } + if (cur_level_size > base_bytes_max) { + // Even L1 will be too large + assert(base_level_ == 1); + base_level_size = base_bytes_max; + } else { + base_level_size = cur_level_size; + } + } + + uint64_t level_size = base_level_size; + for (int i = base_level_; i < num_levels_; i++) { + if (i > base_level_) { + level_size = MultiplyCheckOverflow( + level_size, options.max_bytes_for_level_multiplier); + } + level_max_bytes_[i] = level_size; + } + } + } +} + +uint64_t VersionStorageInfo::EstimateLiveDataSize() const { + // Estimate the live data size by adding up the size of the last level for all + // key ranges. Note: Estimate depends on the ordering of files in level 0 + // because files in level 0 can be overlapping. + uint64_t size = 0; + + auto ikey_lt = [this](InternalKey* x, InternalKey* y) { + return internal_comparator_->Compare(*x, *y) < 0; + }; + // (Ordered) map of largest keys in non-overlapping files + std::map ranges(ikey_lt); + + for (int l = num_levels_ - 1; l >= 0; l--) { + bool found_end = false; + for (auto file : files_[l]) { + // Find the first file where the largest key is larger than the smallest + // key of the current file. If this file does not overlap with the + // current file, none of the files in the map does. If there is + // no potential overlap, we can safely insert the rest of this level + // (if the level is not 0) into the map without checking again because + // the elements in the level are sorted and non-overlapping. + auto lb = (found_end && l != 0) ? + ranges.end() : ranges.lower_bound(&file->smallest); + found_end = (lb == ranges.end()); + if (found_end || internal_comparator_->Compare( + file->largest, (*lb).second->smallest) < 0) { + ranges.emplace_hint(lb, &file->largest, file); + size += file->fd.file_size; + } + } + } + return size; +} + + +void Version::AddLiveFiles(std::vector* live) { + for (int level = 0; level < storage_info_.num_levels(); level++) { + const std::vector& files = storage_info_.files_[level]; + for (const auto& file : files) { + live->push_back(file->fd); + } + } +} + +std::string Version::DebugString(bool hex) const { + std::string r; + for (int level = 0; level < storage_info_.num_levels_; level++) { + // E.g., + // --- level 1 --- + // 17:123['a' .. 'd'] + // 20:43['e' .. 'g'] + r.append("--- level "); + AppendNumberTo(&r, level); + r.append(" --- version# "); + AppendNumberTo(&r, version_number_); + r.append(" ---\n"); + const std::vector& files = storage_info_.files_[level]; + for (size_t i = 0; i < files.size(); i++) { + r.push_back(' '); + AppendNumberTo(&r, files[i]->fd.GetNumber()); + r.push_back(':'); + AppendNumberTo(&r, files[i]->fd.GetFileSize()); + r.append("["); + r.append(files[i]->smallest.DebugString(hex)); + r.append(" .. "); + r.append(files[i]->largest.DebugString(hex)); + r.append("]\n"); + } + } + return r; +} + +// this is used to batch writes to the manifest file +struct VersionSet::ManifestWriter { + Status status; + bool done; + InstrumentedCondVar cv; + ColumnFamilyData* cfd; + const autovector& edit_list; + + explicit ManifestWriter(InstrumentedMutex* mu, ColumnFamilyData* _cfd, + const autovector& e) + : done(false), cv(mu), cfd(_cfd), edit_list(e) {} +}; + +VersionSet::VersionSet(const std::string& dbname, const DBOptions* db_options, + const EnvOptions& storage_options, Cache* table_cache, + WriteBufferManager* write_buffer_manager, + WriteController* write_controller) + : column_family_set_( + new ColumnFamilySet(dbname, db_options, storage_options, table_cache, + write_buffer_manager, write_controller)), + env_(db_options->env), + dbname_(dbname), + db_options_(db_options), + next_file_number_(2), + manifest_file_number_(0), // Filled by Recover() + pending_manifest_file_number_(0), + last_sequence_(0), + prev_log_number_(0), + current_version_number_(0), + manifest_file_size_(0), + env_options_(storage_options), + env_options_compactions_(env_options_) {} + +void CloseTables(void* ptr, size_t) { + TableReader* table_reader = reinterpret_cast(ptr); + table_reader->Close(); +} + +VersionSet::~VersionSet() { + // we need to delete column_family_set_ because its destructor depends on + // VersionSet + column_family_set_->get_table_cache()->ApplyToAllCacheEntries(&CloseTables, + false); + column_family_set_.reset(); + for (auto file : obsolete_files_) { + delete file; + } + obsolete_files_.clear(); +} + +void VersionSet::AppendVersion(ColumnFamilyData* column_family_data, + Version* v) { + // compute new compaction score + v->storage_info()->ComputeCompactionScore( + *column_family_data->GetLatestMutableCFOptions()); + + // Mark v finalized + v->storage_info_.SetFinalized(); + + // Make "v" current + assert(v->refs_ == 0); + Version* current = column_family_data->current(); + assert(v != current); + if (current != nullptr) { + assert(current->refs_ > 0); + current->Unref(); + } + column_family_data->SetCurrent(v); + v->Ref(); + + // Append to linked list + v->prev_ = column_family_data->dummy_versions()->prev_; + v->next_ = column_family_data->dummy_versions(); + v->prev_->next_ = v; + v->next_->prev_ = v; +} + +Status VersionSet::LogAndApply(ColumnFamilyData* column_family_data, + const MutableCFOptions& mutable_cf_options, + const autovector& edit_list, + InstrumentedMutex* mu, Directory* db_directory, + bool new_descriptor_log, + const ColumnFamilyOptions* new_cf_options) { + mu->AssertHeld(); + // num of edits + auto num_edits = edit_list.size(); + if (num_edits == 0) { + return Status::OK(); + } else if (num_edits > 1) { +#ifndef NDEBUG + // no group commits for column family add or drop + for (auto& edit : edit_list) { + assert(!edit->IsColumnFamilyManipulation()); + } +#endif + } + + // column_family_data can be nullptr only if this is column_family_add. + // in that case, we also need to specify ColumnFamilyOptions + if (column_family_data == nullptr) { + assert(num_edits == 1); + assert(edit_list[0]->is_column_family_add_); + assert(new_cf_options != nullptr); + } + + // queue our request + ManifestWriter w(mu, column_family_data, edit_list); + manifest_writers_.push_back(&w); + while (!w.done && &w != manifest_writers_.front()) { + w.cv.Wait(); + } + if (w.done) { + return w.status; + } + if (column_family_data != nullptr && column_family_data->IsDropped()) { + // if column family is dropped by the time we get here, no need to write + // anything to the manifest + manifest_writers_.pop_front(); + // Notify new head of write queue + if (!manifest_writers_.empty()) { + manifest_writers_.front()->cv.Signal(); + } + // we steal this code to also inform about cf-drop + return Status::ShutdownInProgress(); + } + + autovector batch_edits; + Version* v = nullptr; + std::unique_ptr builder_guard(nullptr); + + // process all requests in the queue + ManifestWriter* last_writer = &w; + assert(!manifest_writers_.empty()); + assert(manifest_writers_.front() == &w); + if (w.edit_list.front()->IsColumnFamilyManipulation()) { + // no group commits for column family add or drop + LogAndApplyCFHelper(w.edit_list.front()); + batch_edits.push_back(w.edit_list.front()); + } else { + v = new Version(column_family_data, this, current_version_number_++); + builder_guard.reset(new BaseReferencedVersionBuilder(column_family_data)); + auto* builder = builder_guard->version_builder(); + for (const auto& writer : manifest_writers_) { + if (writer->edit_list.front()->IsColumnFamilyManipulation() || + writer->cfd->GetID() != column_family_data->GetID()) { + // no group commits for column family add or drop + // also, group commits across column families are not supported + break; + } + last_writer = writer; + for (const auto& edit : writer->edit_list) { + LogAndApplyHelper(column_family_data, builder, v, edit, mu); + batch_edits.push_back(edit); + } + } + builder->SaveTo(v->storage_info()); + } + + // Initialize new descriptor log file if necessary by creating + // a temporary file that contains a snapshot of the current version. + uint64_t new_manifest_file_size = 0; + Status s; + + assert(pending_manifest_file_number_ == 0); + if (!descriptor_log_ || + manifest_file_size_ > db_options_->max_manifest_file_size) { + pending_manifest_file_number_ = NewFileNumber(); + batch_edits.back()->SetNextFile(next_file_number_.load()); + new_descriptor_log = true; + } else { + pending_manifest_file_number_ = manifest_file_number_; + } + + if (new_descriptor_log) { + // if we're writing out new snapshot make sure to persist max column family + if (column_family_set_->GetMaxColumnFamily() > 0) { + w.edit_list.front()->SetMaxColumnFamily( + column_family_set_->GetMaxColumnFamily()); + } + } + + // Unlock during expensive operations. New writes cannot get here + // because &w is ensuring that all new writes get queued. + { + + mu->Unlock(); + + TEST_SYNC_POINT("VersionSet::LogAndApply:WriteManifest"); + if (!w.edit_list.front()->IsColumnFamilyManipulation() && + db_options_->max_open_files == -1) { + // unlimited table cache. Pre-load table handle now. + // Need to do it out of the mutex. + builder_guard->version_builder()->LoadTableHandlers( + column_family_data->internal_stats(), + column_family_data->ioptions()->optimize_filters_for_hits, + true /* prefetch_index_and_filter_in_cache */); + } + + // This is fine because everything inside of this block is serialized -- + // only one thread can be here at the same time + if (new_descriptor_log) { + // create manifest file + Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, + "Creating manifest %" PRIu64 "\n", pending_manifest_file_number_); + unique_ptr descriptor_file; + EnvOptions opt_env_opts = env_->OptimizeForManifestWrite(env_options_); + s = NewWritableFile( + env_, DescriptorFileName(dbname_, pending_manifest_file_number_), + &descriptor_file, opt_env_opts); + if (s.ok()) { + descriptor_file->SetPreallocationBlockSize( + db_options_->manifest_preallocation_size); + + unique_ptr file_writer( + new WritableFileWriter(std::move(descriptor_file), opt_env_opts)); + descriptor_log_.reset( + new log::Writer(std::move(file_writer), 0, false)); + s = WriteSnapshot(descriptor_log_.get()); + } + } + + if (!w.edit_list.front()->IsColumnFamilyManipulation()) { + // This is cpu-heavy operations, which should be called outside mutex. + v->PrepareApply(mutable_cf_options, true); + } + + // Write new record to MANIFEST log + if (s.ok()) { + for (auto& e : batch_edits) { + std::string record; + if (!e->EncodeTo(&record)) { + s = Status::Corruption( + "Unable to Encode VersionEdit:" + e->DebugString(true)); + break; + } + TEST_KILL_RANDOM("VersionSet::LogAndApply:BeforeAddRecord", + rocksdb_kill_odds * REDUCE_ODDS2); + s = descriptor_log_->AddRecord(record); + if (!s.ok()) { + break; + } + } + if (s.ok()) { + s = SyncManifest(env_, db_options_, descriptor_log_->file()); + } + if (!s.ok()) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_->info_log, + "MANIFEST write: %s\n", s.ToString().c_str()); + } + } + + // If we just created a new descriptor file, install it by writing a + // new CURRENT file that points to it. + if (s.ok() && new_descriptor_log) { + s = SetCurrentFile(env_, dbname_, pending_manifest_file_number_, + db_options_->disableDataSync ? nullptr : db_directory); + } + + if (s.ok()) { + // find offset in manifest file where this version is stored. + new_manifest_file_size = descriptor_log_->file()->GetFileSize(); + } + + if (w.edit_list.front()->is_column_family_drop_) { + TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:0"); + TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:1"); + TEST_SYNC_POINT("VersionSet::LogAndApply::ColumnFamilyDrop:2"); + } + + LogFlush(db_options_->info_log); + TEST_SYNC_POINT("VersionSet::LogAndApply:WriteManifestDone"); + mu->Lock(); + } + + // Append the old mainfest file to the obsolete_manifests_ list to be deleted + // by PurgeObsoleteFiles later. + if (s.ok() && new_descriptor_log) { + obsolete_manifests_.emplace_back( + DescriptorFileName("", manifest_file_number_)); + } + + // Install the new version + if (s.ok()) { + if (w.edit_list.front()->is_column_family_add_) { + // no group commit on column family add + assert(batch_edits.size() == 1); + assert(new_cf_options != nullptr); + CreateColumnFamily(*new_cf_options, w.edit_list.front()); + } else if (w.edit_list.front()->is_column_family_drop_) { + assert(batch_edits.size() == 1); + column_family_data->SetDropped(); + if (column_family_data->Unref()) { + delete column_family_data; + } + } else { + uint64_t max_log_number_in_batch = 0; + for (auto& e : batch_edits) { + if (e->has_log_number_) { + max_log_number_in_batch = + std::max(max_log_number_in_batch, e->log_number_); + } + } + if (max_log_number_in_batch != 0) { + assert(column_family_data->GetLogNumber() <= max_log_number_in_batch); + column_family_data->SetLogNumber(max_log_number_in_batch); + } + AppendVersion(column_family_data, v); + } + + manifest_file_number_ = pending_manifest_file_number_; + manifest_file_size_ = new_manifest_file_size; + prev_log_number_ = w.edit_list.front()->prev_log_number_; + } else { + std::string version_edits; + for (auto& e : batch_edits) { + version_edits = version_edits + "\n" + e->DebugString(true); + } + Log(InfoLogLevel::ERROR_LEVEL, db_options_->info_log, + "[%s] Error in committing version edit to MANIFEST: %s", + column_family_data ? column_family_data->GetName().c_str() : "", + version_edits.c_str()); + delete v; + if (new_descriptor_log) { + Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, + "Deleting manifest %" PRIu64 " current manifest %" PRIu64 "\n", + manifest_file_number_, pending_manifest_file_number_); + descriptor_log_.reset(); + env_->DeleteFile( + DescriptorFileName(dbname_, pending_manifest_file_number_)); + } + } + pending_manifest_file_number_ = 0; + + // wake up all the waiting writers + while (true) { + ManifestWriter* ready = manifest_writers_.front(); + manifest_writers_.pop_front(); + if (ready != &w) { + ready->status = s; + ready->done = true; + ready->cv.Signal(); + } + if (ready == last_writer) break; + } + // Notify new head of write queue + if (!manifest_writers_.empty()) { + manifest_writers_.front()->cv.Signal(); + } + return s; +} + +void VersionSet::LogAndApplyCFHelper(VersionEdit* edit) { + assert(edit->IsColumnFamilyManipulation()); + edit->SetNextFile(next_file_number_.load()); + edit->SetLastSequence(last_sequence_); + if (edit->is_column_family_drop_) { + // if we drop column family, we have to make sure to save max column family, + // so that we don't reuse existing ID + edit->SetMaxColumnFamily(column_family_set_->GetMaxColumnFamily()); + } +} + +void VersionSet::LogAndApplyHelper(ColumnFamilyData* cfd, + VersionBuilder* builder, Version* v, + VersionEdit* edit, InstrumentedMutex* mu) { + mu->AssertHeld(); + assert(!edit->IsColumnFamilyManipulation()); + + if (edit->has_log_number_) { + assert(edit->log_number_ >= cfd->GetLogNumber()); + assert(edit->log_number_ < next_file_number_.load()); + } + + if (!edit->has_prev_log_number_) { + edit->SetPrevLogNumber(prev_log_number_); + } + edit->SetNextFile(next_file_number_.load()); + edit->SetLastSequence(last_sequence_); + + builder->Apply(edit); +} + +Status VersionSet::Recover( + const std::vector& column_families, + bool read_only) { + std::unordered_map cf_name_to_options; + for (auto cf : column_families) { + cf_name_to_options.insert({cf.name, cf.options}); + } + // keeps track of column families in manifest that were not found in + // column families parameters. if those column families are not dropped + // by subsequent manifest records, Recover() will return failure status + std::unordered_map column_families_not_found; + + // Read "CURRENT" file, which contains a pointer to the current manifest file + std::string manifest_filename; + Status s = ReadFileToString( + env_, CurrentFileName(dbname_), &manifest_filename + ); + if (!s.ok()) { + return s; + } + if (manifest_filename.empty() || + manifest_filename.back() != '\n') { + return Status::Corruption("CURRENT file does not end with newline"); + } + // remove the trailing '\n' + manifest_filename.resize(manifest_filename.size() - 1); + FileType type; + bool parse_ok = + ParseFileName(manifest_filename, &manifest_file_number_, &type); + if (!parse_ok || type != kDescriptorFile) { + return Status::Corruption("CURRENT file corrupted"); + } + + Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, + "Recovering from manifest file: %s\n", + manifest_filename.c_str()); + + manifest_filename = dbname_ + "/" + manifest_filename; + unique_ptr manifest_file_reader; + { + unique_ptr manifest_file; + s = env_->NewSequentialFile(manifest_filename, &manifest_file, + env_options_); + if (!s.ok()) { + return s; + } + manifest_file_reader.reset( + new SequentialFileReader(std::move(manifest_file))); + } + uint64_t current_manifest_file_size; + s = env_->GetFileSize(manifest_filename, ¤t_manifest_file_size); + if (!s.ok()) { + return s; + } + + bool have_log_number = false; + bool have_prev_log_number = false; + bool have_next_file = false; + bool have_last_sequence = false; + uint64_t next_file = 0; + uint64_t last_sequence = 0; + uint64_t log_number = 0; + uint64_t previous_log_number = 0; + uint32_t max_column_family = 0; + std::unordered_map builders; + + // add default column family + auto default_cf_iter = cf_name_to_options.find(kDefaultColumnFamilyName); + if (default_cf_iter == cf_name_to_options.end()) { + return Status::InvalidArgument("Default column family not specified"); + } + VersionEdit default_cf_edit; + default_cf_edit.AddColumnFamily(kDefaultColumnFamilyName); + default_cf_edit.SetColumnFamily(0); + ColumnFamilyData* default_cfd = + CreateColumnFamily(default_cf_iter->second, &default_cf_edit); + builders.insert({0, new BaseReferencedVersionBuilder(default_cfd)}); + + { + VersionSet::LogReporter reporter; + reporter.status = &s; + log::Reader reader(NULL, std::move(manifest_file_reader), &reporter, + true /*checksum*/, 0 /*initial_offset*/, 0); + Slice record; + std::string scratch; + while (reader.ReadRecord(&record, &scratch) && s.ok()) { + VersionEdit edit; + s = edit.DecodeFrom(record); + if (!s.ok()) { + break; + } + + // Not found means that user didn't supply that column + // family option AND we encountered column family add + // record. Once we encounter column family drop record, + // we will delete the column family from + // column_families_not_found. + bool cf_in_not_found = + column_families_not_found.find(edit.column_family_) != + column_families_not_found.end(); + // in builders means that user supplied that column family + // option AND that we encountered column family add record + bool cf_in_builders = + builders.find(edit.column_family_) != builders.end(); + + // they can't both be true + assert(!(cf_in_not_found && cf_in_builders)); + + ColumnFamilyData* cfd = nullptr; + + if (edit.is_column_family_add_) { + if (cf_in_builders || cf_in_not_found) { + s = Status::Corruption( + "Manifest adding the same column family twice"); + break; + } + auto cf_options = cf_name_to_options.find(edit.column_family_name_); + if (cf_options == cf_name_to_options.end()) { + column_families_not_found.insert( + {edit.column_family_, edit.column_family_name_}); + } else { + cfd = CreateColumnFamily(cf_options->second, &edit); + builders.insert( + {edit.column_family_, new BaseReferencedVersionBuilder(cfd)}); + } + } else if (edit.is_column_family_drop_) { + if (cf_in_builders) { + auto builder = builders.find(edit.column_family_); + assert(builder != builders.end()); + delete builder->second; + builders.erase(builder); + cfd = column_family_set_->GetColumnFamily(edit.column_family_); + if (cfd->Unref()) { + delete cfd; + cfd = nullptr; + } else { + // who else can have reference to cfd!? + assert(false); + } + } else if (cf_in_not_found) { + column_families_not_found.erase(edit.column_family_); + } else { + s = Status::Corruption( + "Manifest - dropping non-existing column family"); + break; + } + } else if (!cf_in_not_found) { + if (!cf_in_builders) { + s = Status::Corruption( + "Manifest record referencing unknown column family"); + break; + } + + cfd = column_family_set_->GetColumnFamily(edit.column_family_); + // this should never happen since cf_in_builders is true + assert(cfd != nullptr); + if (edit.max_level_ >= cfd->current()->storage_info()->num_levels()) { + s = Status::InvalidArgument( + "db has more levels than options.num_levels"); + break; + } + + // if it is not column family add or column family drop, + // then it's a file add/delete, which should be forwarded + // to builder + auto builder = builders.find(edit.column_family_); + assert(builder != builders.end()); + builder->second->version_builder()->Apply(&edit); + } + + if (cfd != nullptr) { + if (edit.has_log_number_) { + if (cfd->GetLogNumber() > edit.log_number_) { + Log(InfoLogLevel::WARN_LEVEL, db_options_->info_log, + "MANIFEST corruption detected, but ignored - Log numbers in " + "records NOT monotonically increasing"); + } else { + cfd->SetLogNumber(edit.log_number_); + have_log_number = true; + } + } + if (edit.has_comparator_ && + edit.comparator_ != cfd->user_comparator()->Name()) { + s = Status::InvalidArgument( + cfd->user_comparator()->Name(), + "does not match existing comparator " + edit.comparator_); + break; + } + } + + if (edit.has_prev_log_number_) { + previous_log_number = edit.prev_log_number_; + have_prev_log_number = true; + } + + if (edit.has_next_file_number_) { + next_file = edit.next_file_number_; + have_next_file = true; + } + + if (edit.has_max_column_family_) { + max_column_family = edit.max_column_family_; + } + + if (edit.has_last_sequence_) { + last_sequence = edit.last_sequence_; + have_last_sequence = true; + } + } + } + + if (s.ok()) { + if (!have_next_file) { + s = Status::Corruption("no meta-nextfile entry in descriptor"); + } else if (!have_log_number) { + s = Status::Corruption("no meta-lognumber entry in descriptor"); + } else if (!have_last_sequence) { + s = Status::Corruption("no last-sequence-number entry in descriptor"); + } + + if (!have_prev_log_number) { + previous_log_number = 0; + } + + column_family_set_->UpdateMaxColumnFamily(max_column_family); + + MarkFileNumberUsedDuringRecovery(previous_log_number); + MarkFileNumberUsedDuringRecovery(log_number); + } + + // there were some column families in the MANIFEST that weren't specified + // in the argument. This is OK in read_only mode + if (read_only == false && !column_families_not_found.empty()) { + std::string list_of_not_found; + for (const auto& cf : column_families_not_found) { + list_of_not_found += ", " + cf.second; + } + list_of_not_found = list_of_not_found.substr(2); + s = Status::InvalidArgument( + "You have to open all column families. Column families not opened: " + + list_of_not_found); + } + + if (s.ok()) { + for (auto cfd : *column_family_set_) { + if (cfd->IsDropped()) { + continue; + } + auto builders_iter = builders.find(cfd->GetID()); + assert(builders_iter != builders.end()); + auto* builder = builders_iter->second->version_builder(); + + if (db_options_->max_open_files == -1) { + // unlimited table cache. Pre-load table handle now. + // Need to do it out of the mutex. + builder->LoadTableHandlers( + cfd->internal_stats(), db_options_->max_file_opening_threads, + false /* prefetch_index_and_filter_in_cache */); + } + + Version* v = new Version(cfd, this, current_version_number_++); + builder->SaveTo(v->storage_info()); + + // Install recovered version + v->PrepareApply(*cfd->GetLatestMutableCFOptions(), + !(db_options_->skip_stats_update_on_db_open)); + AppendVersion(cfd, v); + } + + manifest_file_size_ = current_manifest_file_size; + next_file_number_.store(next_file + 1); + last_sequence_ = last_sequence; + prev_log_number_ = previous_log_number; + + Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, + "Recovered from manifest file:%s succeeded," + "manifest_file_number is %lu, next_file_number is %lu, " + "last_sequence is %lu, log_number is %lu," + "prev_log_number is %lu," + "max_column_family is %u\n", + manifest_filename.c_str(), (unsigned long)manifest_file_number_, + (unsigned long)next_file_number_.load(), (unsigned long)last_sequence_, + (unsigned long)log_number, (unsigned long)prev_log_number_, + column_family_set_->GetMaxColumnFamily()); + + for (auto cfd : *column_family_set_) { + if (cfd->IsDropped()) { + continue; + } + Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, + "Column family [%s] (ID %u), log number is %" PRIu64 "\n", + cfd->GetName().c_str(), cfd->GetID(), cfd->GetLogNumber()); + } + } + + for (auto& builder : builders) { + delete builder.second; + } + + return s; +} + +Status VersionSet::ListColumnFamilies(std::vector* column_families, + const std::string& dbname, Env* env) { + // these are just for performance reasons, not correcntes, + // so we're fine using the defaults + EnvOptions soptions; + // Read "CURRENT" file, which contains a pointer to the current manifest file + std::string current; + Status s = ReadFileToString(env, CurrentFileName(dbname), ¤t); + if (!s.ok()) { + return s; + } + if (current.empty() || current[current.size()-1] != '\n') { + return Status::Corruption("CURRENT file does not end with newline"); + } + current.resize(current.size() - 1); + + std::string dscname = dbname + "/" + current; + + unique_ptr file_reader; + { + unique_ptr file; + s = env->NewSequentialFile(dscname, &file, soptions); + if (!s.ok()) { + return s; + } + file_reader.reset(new SequentialFileReader(std::move(file))); + } + + std::map column_family_names; + // default column family is always implicitly there + column_family_names.insert({0, kDefaultColumnFamilyName}); + VersionSet::LogReporter reporter; + reporter.status = &s; + log::Reader reader(NULL, std::move(file_reader), &reporter, true /*checksum*/, + 0 /*initial_offset*/, 0); + Slice record; + std::string scratch; + while (reader.ReadRecord(&record, &scratch) && s.ok()) { + VersionEdit edit; + s = edit.DecodeFrom(record); + if (!s.ok()) { + break; + } + if (edit.is_column_family_add_) { + if (column_family_names.find(edit.column_family_) != + column_family_names.end()) { + s = Status::Corruption("Manifest adding the same column family twice"); + break; + } + column_family_names.insert( + {edit.column_family_, edit.column_family_name_}); + } else if (edit.is_column_family_drop_) { + if (column_family_names.find(edit.column_family_) == + column_family_names.end()) { + s = Status::Corruption( + "Manifest - dropping non-existing column family"); + break; + } + column_family_names.erase(edit.column_family_); + } + } + + column_families->clear(); + if (s.ok()) { + for (const auto& iter : column_family_names) { + column_families->push_back(iter.second); + } + } + + return s; +} + +#ifndef ROCKSDB_LITE +Status VersionSet::ReduceNumberOfLevels(const std::string& dbname, + const Options* options, + const EnvOptions& env_options, + int new_levels) { + if (new_levels <= 1) { + return Status::InvalidArgument( + "Number of levels needs to be bigger than 1"); + } + + ColumnFamilyOptions cf_options(*options); + std::shared_ptr tc(NewLRUCache(options->max_open_files - 10, + options->table_cache_numshardbits)); + WriteController wc(options->delayed_write_rate); + WriteBufferManager wb(options->db_write_buffer_size); + VersionSet versions(dbname, options, env_options, tc.get(), &wb, &wc); + Status status; + + std::vector dummy; + ColumnFamilyDescriptor dummy_descriptor(kDefaultColumnFamilyName, + ColumnFamilyOptions(*options)); + dummy.push_back(dummy_descriptor); + status = versions.Recover(dummy); + if (!status.ok()) { + return status; + } + + Version* current_version = + versions.GetColumnFamilySet()->GetDefault()->current(); + auto* vstorage = current_version->storage_info(); + int current_levels = vstorage->num_levels(); + + if (current_levels <= new_levels) { + return Status::OK(); + } + + // Make sure there are file only on one level from + // (new_levels-1) to (current_levels-1) + int first_nonempty_level = -1; + int first_nonempty_level_filenum = 0; + for (int i = new_levels - 1; i < current_levels; i++) { + int file_num = vstorage->NumLevelFiles(i); + if (file_num != 0) { + if (first_nonempty_level < 0) { + first_nonempty_level = i; + first_nonempty_level_filenum = file_num; + } else { + char msg[255]; + snprintf(msg, sizeof(msg), + "Found at least two levels containing files: " + "[%d:%d],[%d:%d].\n", + first_nonempty_level, first_nonempty_level_filenum, i, + file_num); + return Status::InvalidArgument(msg); + } + } + } + + // we need to allocate an array with the old number of levels size to + // avoid SIGSEGV in WriteSnapshot() + // however, all levels bigger or equal to new_levels will be empty + std::vector* new_files_list = + new std::vector[current_levels]; + for (int i = 0; i < new_levels - 1; i++) { + new_files_list[i] = vstorage->LevelFiles(i); + } + + if (first_nonempty_level > 0) { + new_files_list[new_levels - 1] = vstorage->LevelFiles(first_nonempty_level); + } + + delete[] vstorage -> files_; + vstorage->files_ = new_files_list; + vstorage->num_levels_ = new_levels; + + MutableCFOptions mutable_cf_options(*options, ImmutableCFOptions(*options)); + VersionEdit ve; + InstrumentedMutex dummy_mutex; + InstrumentedMutexLock l(&dummy_mutex); + return versions.LogAndApply( + versions.GetColumnFamilySet()->GetDefault(), + mutable_cf_options, &ve, &dummy_mutex, nullptr, true); +} + +Status VersionSet::DumpManifest(Options& options, std::string& dscname, + bool verbose, bool hex, bool json) { + // Open the specified manifest file. + unique_ptr file_reader; + Status s; + { + unique_ptr file; + s = options.env->NewSequentialFile(dscname, &file, env_options_); + if (!s.ok()) { + return s; + } + file_reader.reset(new SequentialFileReader(std::move(file))); + } + + bool have_prev_log_number = false; + bool have_next_file = false; + bool have_last_sequence = false; + uint64_t next_file = 0; + uint64_t last_sequence = 0; + uint64_t previous_log_number = 0; + int count = 0; + std::unordered_map comparators; + std::unordered_map builders; + + // add default column family + VersionEdit default_cf_edit; + default_cf_edit.AddColumnFamily(kDefaultColumnFamilyName); + default_cf_edit.SetColumnFamily(0); + ColumnFamilyData* default_cfd = + CreateColumnFamily(ColumnFamilyOptions(options), &default_cf_edit); + builders.insert({0, new BaseReferencedVersionBuilder(default_cfd)}); + + { + VersionSet::LogReporter reporter; + reporter.status = &s; + log::Reader reader(NULL, std::move(file_reader), &reporter, + true /*checksum*/, 0 /*initial_offset*/, 0); + Slice record; + std::string scratch; + while (reader.ReadRecord(&record, &scratch) && s.ok()) { + VersionEdit edit; + s = edit.DecodeFrom(record); + if (!s.ok()) { + break; + } + + // Write out each individual edit + if (verbose && !json) { + printf("%s\n", edit.DebugString(hex).c_str()); + } else if (json) { + printf("%s\n", edit.DebugJSON(count, hex).c_str()); + } + count++; + + bool cf_in_builders = + builders.find(edit.column_family_) != builders.end(); + + if (edit.has_comparator_) { + comparators.insert({edit.column_family_, edit.comparator_}); + } + + ColumnFamilyData* cfd = nullptr; + + if (edit.is_column_family_add_) { + if (cf_in_builders) { + s = Status::Corruption( + "Manifest adding the same column family twice"); + break; + } + cfd = CreateColumnFamily(ColumnFamilyOptions(options), &edit); + builders.insert( + {edit.column_family_, new BaseReferencedVersionBuilder(cfd)}); + } else if (edit.is_column_family_drop_) { + if (!cf_in_builders) { + s = Status::Corruption( + "Manifest - dropping non-existing column family"); + break; + } + auto builder_iter = builders.find(edit.column_family_); + delete builder_iter->second; + builders.erase(builder_iter); + comparators.erase(edit.column_family_); + cfd = column_family_set_->GetColumnFamily(edit.column_family_); + assert(cfd != nullptr); + cfd->Unref(); + delete cfd; + cfd = nullptr; + } else { + if (!cf_in_builders) { + s = Status::Corruption( + "Manifest record referencing unknown column family"); + break; + } + + cfd = column_family_set_->GetColumnFamily(edit.column_family_); + // this should never happen since cf_in_builders is true + assert(cfd != nullptr); + + // if it is not column family add or column family drop, + // then it's a file add/delete, which should be forwarded + // to builder + auto builder = builders.find(edit.column_family_); + assert(builder != builders.end()); + builder->second->version_builder()->Apply(&edit); + } + + if (cfd != nullptr && edit.has_log_number_) { + cfd->SetLogNumber(edit.log_number_); + } + + if (edit.has_prev_log_number_) { + previous_log_number = edit.prev_log_number_; + have_prev_log_number = true; + } + + if (edit.has_next_file_number_) { + next_file = edit.next_file_number_; + have_next_file = true; + } + + if (edit.has_last_sequence_) { + last_sequence = edit.last_sequence_; + have_last_sequence = true; + } + + if (edit.has_max_column_family_) { + column_family_set_->UpdateMaxColumnFamily(edit.max_column_family_); + } + } + } + file_reader.reset(); + + if (s.ok()) { + if (!have_next_file) { + s = Status::Corruption("no meta-nextfile entry in descriptor"); + printf("no meta-nextfile entry in descriptor"); + } else if (!have_last_sequence) { + printf("no last-sequence-number entry in descriptor"); + s = Status::Corruption("no last-sequence-number entry in descriptor"); + } + + if (!have_prev_log_number) { + previous_log_number = 0; + } + } + + if (s.ok()) { + for (auto cfd : *column_family_set_) { + if (cfd->IsDropped()) { + continue; + } + auto builders_iter = builders.find(cfd->GetID()); + assert(builders_iter != builders.end()); + auto builder = builders_iter->second->version_builder(); + + Version* v = new Version(cfd, this, current_version_number_++); + builder->SaveTo(v->storage_info()); + v->PrepareApply(*cfd->GetLatestMutableCFOptions(), false); + + printf("--------------- Column family \"%s\" (ID %u) --------------\n", + cfd->GetName().c_str(), (unsigned int)cfd->GetID()); + printf("log number: %lu\n", (unsigned long)cfd->GetLogNumber()); + auto comparator = comparators.find(cfd->GetID()); + if (comparator != comparators.end()) { + printf("comparator: %s\n", comparator->second.c_str()); + } else { + printf("comparator: \n"); + } + printf("%s \n", v->DebugString(hex).c_str()); + delete v; + } + + // Free builders + for (auto& builder : builders) { + delete builder.second; + } + + next_file_number_.store(next_file + 1); + last_sequence_ = last_sequence; + prev_log_number_ = previous_log_number; + + printf( + "next_file_number %lu last_sequence " + "%lu prev_log_number %lu max_column_family %u\n", + (unsigned long)next_file_number_.load(), (unsigned long)last_sequence, + (unsigned long)previous_log_number, + column_family_set_->GetMaxColumnFamily()); + } + + return s; +} +#endif // ROCKSDB_LITE + +void VersionSet::MarkFileNumberUsedDuringRecovery(uint64_t number) { + // only called during recovery which is single threaded, so this works because + // there can't be concurrent calls + if (next_file_number_.load(std::memory_order_relaxed) <= number) { + next_file_number_.store(number + 1, std::memory_order_relaxed); + } +} + +Status VersionSet::WriteSnapshot(log::Writer* log) { + // TODO: Break up into multiple records to reduce memory usage on recovery? + + // WARNING: This method doesn't hold a mutex!! + + // This is done without DB mutex lock held, but only within single-threaded + // LogAndApply. Column family manipulations can only happen within LogAndApply + // (the same single thread), so we're safe to iterate. + for (auto cfd : *column_family_set_) { + if (cfd->IsDropped()) { + continue; + } + { + // Store column family info + VersionEdit edit; + if (cfd->GetID() != 0) { + // default column family is always there, + // no need to explicitly write it + edit.AddColumnFamily(cfd->GetName()); + edit.SetColumnFamily(cfd->GetID()); + } + edit.SetComparatorName( + cfd->internal_comparator().user_comparator()->Name()); + std::string record; + if (!edit.EncodeTo(&record)) { + return Status::Corruption( + "Unable to Encode VersionEdit:" + edit.DebugString(true)); + } + Status s = log->AddRecord(record); + if (!s.ok()) { + return s; + } + } + + { + // Save files + VersionEdit edit; + edit.SetColumnFamily(cfd->GetID()); + + for (int level = 0; level < cfd->NumberLevels(); level++) { + for (const auto& f : + cfd->current()->storage_info()->LevelFiles(level)) { + edit.AddFile(level, f->fd.GetNumber(), f->fd.GetPathId(), + f->fd.GetFileSize(), f->smallest, f->largest, + f->smallest_seqno, f->largest_seqno, + f->marked_for_compaction); + } + } + edit.SetLogNumber(cfd->GetLogNumber()); + std::string record; + if (!edit.EncodeTo(&record)) { + return Status::Corruption( + "Unable to Encode VersionEdit:" + edit.DebugString(true)); + } + Status s = log->AddRecord(record); + if (!s.ok()) { + return s; + } + } + } + + return Status::OK(); +} + +// TODO(aekmekji): in CompactionJob::GenSubcompactionBoundaries(), this +// function is called repeatedly with consecutive pairs of slices. For example +// if the slice list is [a, b, c, d] this function is called with arguments +// (a,b) then (b,c) then (c,d). Knowing this, an optimization is possible where +// we avoid doing binary search for the keys b and c twice and instead somehow +// maintain state of where they first appear in the files. +uint64_t VersionSet::ApproximateSize(Version* v, const Slice& start, + const Slice& end, int start_level, + int end_level) { + // pre-condition + assert(v->cfd_->internal_comparator().Compare(start, end) <= 0); + + uint64_t size = 0; + const auto* vstorage = v->storage_info(); + end_level = end_level == -1 + ? vstorage->num_non_empty_levels() + : std::min(end_level, vstorage->num_non_empty_levels()); + + assert(start_level <= end_level); + + for (int level = start_level; level < end_level; level++) { + const LevelFilesBrief& files_brief = vstorage->LevelFilesBrief(level); + if (!files_brief.num_files) { + // empty level, skip exploration + continue; + } + + if (!level) { + // level 0 data is sorted order, handle the use case explicitly + size += ApproximateSizeLevel0(v, files_brief, start, end); + continue; + } + + assert(level > 0); + assert(files_brief.num_files > 0); + + // identify the file position for starting key + const uint64_t idx_start = FindFileInRange( + v->cfd_->internal_comparator(), files_brief, start, + /*start=*/0, static_cast(files_brief.num_files - 1)); + assert(idx_start < files_brief.num_files); + + // scan all files from the starting position until the ending position + // inferred from the sorted order + for (uint64_t i = idx_start; i < files_brief.num_files; i++) { + uint64_t val; + val = ApproximateSize(v, files_brief.files[i], end); + if (!val) { + // the files after this will not have the range + break; + } + + size += val; + + if (i == idx_start) { + // subtract the bytes needed to be scanned to get to the starting + // key + val = ApproximateSize(v, files_brief.files[i], start); + assert(size >= val); + size -= val; + } + } + } + + return size; +} + +uint64_t VersionSet::ApproximateSizeLevel0(Version* v, + const LevelFilesBrief& files_brief, + const Slice& key_start, + const Slice& key_end) { + // level 0 files are not in sorted order, we need to iterate through + // the list to compute the total bytes that require scanning + uint64_t size = 0; + for (size_t i = 0; i < files_brief.num_files; i++) { + const uint64_t start = ApproximateSize(v, files_brief.files[i], key_start); + const uint64_t end = ApproximateSize(v, files_brief.files[i], key_end); + assert(end >= start); + size += end - start; + } + return size; +} + +uint64_t VersionSet::ApproximateSize(Version* v, const FdWithKeyRange& f, + const Slice& key) { + // pre-condition + assert(v); + + uint64_t result = 0; + if (v->cfd_->internal_comparator().Compare(f.largest_key, key) <= 0) { + // Entire file is before "key", so just add the file size + result = f.fd.GetFileSize(); + } else if (v->cfd_->internal_comparator().Compare(f.smallest_key, key) > 0) { + // Entire file is after "key", so ignore + result = 0; + } else { + // "key" falls in the range for this table. Add the + // approximate offset of "key" within the table. + TableReader* table_reader_ptr; + InternalIterator* iter = v->cfd_->table_cache()->NewIterator( + ReadOptions(), env_options_, v->cfd_->internal_comparator(), f.fd, + &table_reader_ptr); + if (table_reader_ptr != nullptr) { + result = table_reader_ptr->ApproximateOffsetOf(key); + } + delete iter; + } + return result; +} + +void VersionSet::AddLiveFiles(std::vector* live_list) { + // pre-calculate space requirement + int64_t total_files = 0; + for (auto cfd : *column_family_set_) { + Version* dummy_versions = cfd->dummy_versions(); + for (Version* v = dummy_versions->next_; v != dummy_versions; + v = v->next_) { + const auto* vstorage = v->storage_info(); + for (int level = 0; level < vstorage->num_levels(); level++) { + total_files += vstorage->LevelFiles(level).size(); + } + } + } + + // just one time extension to the right size + live_list->reserve(live_list->size() + static_cast(total_files)); + + for (auto cfd : *column_family_set_) { + auto* current = cfd->current(); + bool found_current = false; + Version* dummy_versions = cfd->dummy_versions(); + for (Version* v = dummy_versions->next_; v != dummy_versions; + v = v->next_) { + v->AddLiveFiles(live_list); + if (v == current) { + found_current = true; + } + } + if (!found_current && current != nullptr) { + // Should never happen unless it is a bug. + assert(false); + current->AddLiveFiles(live_list); + } + } +} + +InternalIterator* VersionSet::MakeInputIterator(const Compaction* c) { + auto cfd = c->column_family_data(); + ReadOptions read_options; + read_options.verify_checksums = + c->mutable_cf_options()->verify_checksums_in_compaction; + read_options.fill_cache = false; + if (c->ShouldFormSubcompactions()) { + read_options.total_order_seek = true; + } + + // Level-0 files have to be merged together. For other levels, + // we will make a concatenating iterator per level. + // TODO(opt): use concatenating iterator for level-0 if there is no overlap + const size_t space = (c->level() == 0 ? c->input_levels(0)->num_files + + c->num_input_levels() - 1 + : c->num_input_levels()); + InternalIterator** list = new InternalIterator* [space]; + size_t num = 0; + for (size_t which = 0; which < c->num_input_levels(); which++) { + if (c->input_levels(which)->num_files != 0) { + if (c->level(which) == 0) { + const LevelFilesBrief* flevel = c->input_levels(which); + for (size_t i = 0; i < flevel->num_files; i++) { + list[num++] = cfd->table_cache()->NewIterator( + read_options, env_options_compactions_, + cfd->internal_comparator(), flevel->files[i].fd, nullptr, + nullptr, /* no per level latency histogram*/ + true /* for_compaction */, nullptr /* arena */, + false /* skip_filters */, (int)which /* level */); + } + } else { + // Create concatenating iterator for the files from this level + list[num++] = NewTwoLevelIterator( + new LevelFileIteratorState( + cfd->table_cache(), read_options, env_options_, + cfd->internal_comparator(), + nullptr /* no per level latency histogram */, + true /* for_compaction */, false /* prefix enabled */, + false /* skip_filters */, (int)which /* level */), + new LevelFileNumIterator(cfd->internal_comparator(), + c->input_levels(which))); + } + } + } + assert(num <= space); + InternalIterator* result = + NewMergingIterator(&c->column_family_data()->internal_comparator(), list, + static_cast(num)); + delete[] list; + return result; +} + +// verify that the files listed in this compaction are present +// in the current version +bool VersionSet::VerifyCompactionFileConsistency(Compaction* c) { +#ifndef NDEBUG + Version* version = c->column_family_data()->current(); + const VersionStorageInfo* vstorage = version->storage_info(); + if (c->input_version() != version) { + Log(InfoLogLevel::INFO_LEVEL, db_options_->info_log, + "[%s] compaction output being applied to a different base version from" + " input version", + c->column_family_data()->GetName().c_str()); + + if (vstorage->compaction_style_ == kCompactionStyleLevel && + c->start_level() == 0 && c->num_input_levels() > 2U) { + // We are doing a L0->base_level compaction. The assumption is if + // base level is not L1, levels from L1 to base_level - 1 is empty. + // This is ensured by having one compaction from L0 going on at the + // same time in level-based compaction. So that during the time, no + // compaction/flush can put files to those levels. + for (int l = c->start_level() + 1; l < c->output_level(); l++) { + if (vstorage->NumLevelFiles(l) != 0) { + return false; + } + } + } + } + + for (size_t input = 0; input < c->num_input_levels(); ++input) { + int level = c->level(input); + for (size_t i = 0; i < c->num_input_files(input); ++i) { + uint64_t number = c->input(input, i)->fd.GetNumber(); + bool found = false; + for (size_t j = 0; j < vstorage->files_[level].size(); j++) { + FileMetaData* f = vstorage->files_[level][j]; + if (f->fd.GetNumber() == number) { + found = true; + break; + } + } + if (!found) { + return false; // input files non existent in current version + } + } + } +#endif + return true; // everything good +} + +Status VersionSet::GetMetadataForFile(uint64_t number, int* filelevel, + FileMetaData** meta, + ColumnFamilyData** cfd) { + for (auto cfd_iter : *column_family_set_) { + Version* version = cfd_iter->current(); + const auto* vstorage = version->storage_info(); + for (int level = 0; level < vstorage->num_levels(); level++) { + for (const auto& file : vstorage->LevelFiles(level)) { + if (file->fd.GetNumber() == number) { + *meta = file; + *filelevel = level; + *cfd = cfd_iter; + return Status::OK(); + } + } + } + } + return Status::NotFound("File not present in any level"); +} + +void VersionSet::GetLiveFilesMetaData(std::vector* metadata) { + for (auto cfd : *column_family_set_) { + if (cfd->IsDropped()) { + continue; + } + for (int level = 0; level < cfd->NumberLevels(); level++) { + for (const auto& file : + cfd->current()->storage_info()->LevelFiles(level)) { + LiveFileMetaData filemetadata; + filemetadata.column_family_name = cfd->GetName(); + uint32_t path_id = file->fd.GetPathId(); + if (path_id < db_options_->db_paths.size()) { + filemetadata.db_path = db_options_->db_paths[path_id].path; + } else { + assert(!db_options_->db_paths.empty()); + filemetadata.db_path = db_options_->db_paths.back().path; + } + filemetadata.name = MakeTableFileName("", file->fd.GetNumber()); + filemetadata.level = level; + filemetadata.size = file->fd.GetFileSize(); + filemetadata.smallestkey = file->smallest.user_key().ToString(); + filemetadata.largestkey = file->largest.user_key().ToString(); + filemetadata.smallest_seqno = file->smallest_seqno; + filemetadata.largest_seqno = file->largest_seqno; + metadata->push_back(filemetadata); + } + } + } +} + +void VersionSet::GetObsoleteFiles(std::vector* files, + std::vector* manifest_filenames, + uint64_t min_pending_output) { + assert(manifest_filenames->empty()); + obsolete_manifests_.swap(*manifest_filenames); + std::vector pending_files; + for (auto f : obsolete_files_) { + if (f->fd.GetNumber() < min_pending_output) { + files->push_back(f); + } else { + pending_files.push_back(f); + } + } + obsolete_files_.swap(pending_files); +} + +ColumnFamilyData* VersionSet::CreateColumnFamily( + const ColumnFamilyOptions& cf_options, VersionEdit* edit) { + assert(edit->is_column_family_add_); + + Version* dummy_versions = new Version(nullptr, this); + // Ref() dummy version once so that later we can call Unref() to delete it + // by avoiding calling "delete" explicitly (~Version is private) + dummy_versions->Ref(); + auto new_cfd = column_family_set_->CreateColumnFamily( + edit->column_family_name_, edit->column_family_, dummy_versions, + cf_options); + + Version* v = new Version(new_cfd, this, current_version_number_++); + + // Fill level target base information. + v->storage_info()->CalculateBaseBytes(*new_cfd->ioptions(), + *new_cfd->GetLatestMutableCFOptions()); + AppendVersion(new_cfd, v); + // GetLatestMutableCFOptions() is safe here without mutex since the + // cfd is not available to client + new_cfd->CreateNewMemtable(*new_cfd->GetLatestMutableCFOptions(), + LastSequence()); + new_cfd->SetLogNumber(edit->log_number_); + return new_cfd; +} + +uint64_t VersionSet::GetNumLiveVersions(Version* dummy_versions) { + uint64_t count = 0; + for (Version* v = dummy_versions->next_; v != dummy_versions; v = v->next_) { + count++; + } + return count; +} + +uint64_t VersionSet::GetTotalSstFilesSize(Version* dummy_versions) { + std::unordered_set unique_files; + uint64_t total_files_size = 0; + for (Version* v = dummy_versions->next_; v != dummy_versions; v = v->next_) { + VersionStorageInfo* storage_info = v->storage_info(); + for (int level = 0; level < storage_info->num_levels_; level++) { + for (const auto& file_meta : storage_info->LevelFiles(level)) { + if (unique_files.find(file_meta->fd.packed_number_and_path_id) == + unique_files.end()) { + unique_files.insert(file_meta->fd.packed_number_and_path_id); + total_files_size += file_meta->fd.GetFileSize(); + } + } + } + } + return total_files_size; +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/version_set.h b/external/rocksdb/db/version_set.h new file mode 100755 index 0000000000..8fe7c47ce2 --- /dev/null +++ b/external/rocksdb/db/version_set.h @@ -0,0 +1,797 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// The representation of a DBImpl consists of a set of Versions. The +// newest version is called "current". Older versions may be kept +// around to provide a consistent view to live iterators. +// +// Each Version keeps track of a set of Table files per level. The +// entire set of versions is maintained in a VersionSet. +// +// Version,VersionSet are thread-compatible, but require external +// synchronization on all accesses. + +#pragma once +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db/dbformat.h" +#include "db/version_builder.h" +#include "db/version_edit.h" +#include "port/port.h" +#include "db/table_cache.h" +#include "db/compaction.h" +#include "db/compaction_picker.h" +#include "db/column_family.h" +#include "db/log_reader.h" +#include "db/file_indexer.h" +#include "db/write_controller.h" +#include "rocksdb/env.h" +#include "util/instrumented_mutex.h" + +namespace rocksdb { + +namespace log { +class Writer; +} + +class Compaction; +class InternalIterator; +class LogBuffer; +class LookupKey; +class MemTable; +class Version; +class VersionSet; +class WriteBufferManager; +class MergeContext; +class ColumnFamilySet; +class TableCache; +class MergeIteratorBuilder; + +// Return the smallest index i such that file_level.files[i]->largest >= key. +// Return file_level.num_files if there is no such file. +// REQUIRES: "file_level.files" contains a sorted list of +// non-overlapping files. +extern int FindFile(const InternalKeyComparator& icmp, + const LevelFilesBrief& file_level, const Slice& key); + +// Returns true iff some file in "files" overlaps the user key range +// [*smallest,*largest]. +// smallest==nullptr represents a key smaller than all keys in the DB. +// largest==nullptr represents a key largest than all keys in the DB. +// REQUIRES: If disjoint_sorted_files, file_level.files[] +// contains disjoint ranges in sorted order. +extern bool SomeFileOverlapsRange(const InternalKeyComparator& icmp, + bool disjoint_sorted_files, + const LevelFilesBrief& file_level, + const Slice* smallest_user_key, + const Slice* largest_user_key); + +// Generate LevelFilesBrief from vector +// Would copy smallest_key and largest_key data to sequential memory +// arena: Arena used to allocate the memory +extern void DoGenerateLevelFilesBrief(LevelFilesBrief* file_level, + const std::vector& files, + Arena* arena); + +class VersionStorageInfo { + public: + VersionStorageInfo(const InternalKeyComparator* internal_comparator, + const Comparator* user_comparator, int num_levels, + CompactionStyle compaction_style, + VersionStorageInfo* src_vstorage); + ~VersionStorageInfo(); + + void Reserve(int level, size_t size) { files_[level].reserve(size); } + + void AddFile(int level, FileMetaData* f, Logger* info_log = nullptr); + + void SetFinalized(); + + // Update num_non_empty_levels_. + void UpdateNumNonEmptyLevels(); + + void GenerateFileIndexer() { + file_indexer_.UpdateIndex(&arena_, num_non_empty_levels_, files_); + } + + // Update the accumulated stats from a file-meta. + void UpdateAccumulatedStats(FileMetaData* file_meta); + + // Decrease the current stat form a to-be-delected file-meta + void RemoveCurrentStats(FileMetaData* file_meta); + + void ComputeCompensatedSizes(); + + // Updates internal structures that keep track of compaction scores + // We use compaction scores to figure out which compaction to do next + // REQUIRES: db_mutex held!! + // TODO find a better way to pass compaction_options_fifo. + void ComputeCompactionScore(const MutableCFOptions& mutable_cf_options); + + // Estimate est_comp_needed_bytes_ + void EstimateCompactionBytesNeeded( + const MutableCFOptions& mutable_cf_options); + + // This computes files_marked_for_compaction_ and is called by + // ComputeCompactionScore() + void ComputeFilesMarkedForCompaction(); + + // Generate level_files_brief_ from files_ + void GenerateLevelFilesBrief(); + // Sort all files for this version based on their file size and + // record results in files_by_compaction_pri_. The largest files are listed + // first. + void UpdateFilesByCompactionPri(const MutableCFOptions& mutable_cf_options); + + void GenerateLevel0NonOverlapping(); + bool level0_non_overlapping() const { + return level0_non_overlapping_; + } + + int MaxInputLevel() const; + + // Return level number that has idx'th highest score + int CompactionScoreLevel(int idx) const { return compaction_level_[idx]; } + + // Return idx'th highest score + double CompactionScore(int idx) const { return compaction_score_[idx]; } + + void GetOverlappingInputs( + int level, const InternalKey* begin, // nullptr means before all keys + const InternalKey* end, // nullptr means after all keys + std::vector* inputs, + int hint_index = -1, // index of overlap file + int* file_index = nullptr, // return index of overlap file + bool expand_range = true) // if set, returns files which overlap the + const; // range and overlap each other. If false, + // then just files intersecting the range + + void GetOverlappingInputsBinarySearch( + int level, + const Slice& begin, // nullptr means before all keys + const Slice& end, // nullptr means after all keys + std::vector* inputs, + int hint_index, // index of overlap file + int* file_index) const; // return index of overlap file + + void ExtendOverlappingInputs( + int level, + const Slice& begin, // nullptr means before all keys + const Slice& end, // nullptr means after all keys + std::vector* inputs, + unsigned int index) const; // start extending from this index + + // Returns true iff some file in the specified level overlaps + // some part of [*smallest_user_key,*largest_user_key]. + // smallest_user_key==NULL represents a key smaller than all keys in the DB. + // largest_user_key==NULL represents a key largest than all keys in the DB. + bool OverlapInLevel(int level, const Slice* smallest_user_key, + const Slice* largest_user_key); + + // Returns true iff the first or last file in inputs contains + // an overlapping user key to the file "just outside" of it (i.e. + // just after the last file, or just before the first file) + // REQUIRES: "*inputs" is a sorted list of non-overlapping files + bool HasOverlappingUserKey(const std::vector* inputs, + int level); + + int num_levels() const { return num_levels_; } + + // REQUIRES: This version has been saved (see VersionSet::SaveTo) + int num_non_empty_levels() const { + assert(finalized_); + return num_non_empty_levels_; + } + + // REQUIRES: This version has been finalized. + // (CalculateBaseBytes() is called) + // This may or may not return number of level files. It is to keep backward + // compatible behavior in universal compaction. + int l0_delay_trigger_count() const { return l0_delay_trigger_count_; } + + void set_l0_delay_trigger_count(int v) { l0_delay_trigger_count_ = v; } + + // REQUIRES: This version has been saved (see VersionSet::SaveTo) + int NumLevelFiles(int level) const { + assert(finalized_); + return static_cast(files_[level].size()); + } + + // Return the combined file size of all files at the specified level. + uint64_t NumLevelBytes(int level) const; + + // REQUIRES: This version has been saved (see VersionSet::SaveTo) + const std::vector& LevelFiles(int level) const { + return files_[level]; + } + + const rocksdb::LevelFilesBrief& LevelFilesBrief(int level) const { + assert(level < static_cast(level_files_brief_.size())); + return level_files_brief_[level]; + } + + // REQUIRES: This version has been saved (see VersionSet::SaveTo) + const std::vector& FilesByCompactionPri(int level) const { + assert(finalized_); + return files_by_compaction_pri_[level]; + } + + // REQUIRES: This version has been saved (see VersionSet::SaveTo) + // REQUIRES: DB mutex held during access + const autovector>& FilesMarkedForCompaction() + const { + assert(finalized_); + return files_marked_for_compaction_; + } + + int base_level() const { return base_level_; } + + // REQUIRES: lock is held + // Set the index that is used to offset into files_by_compaction_pri_ to find + // the next compaction candidate file. + void SetNextCompactionIndex(int level, int index) { + next_file_to_compact_by_size_[level] = index; + } + + // REQUIRES: lock is held + int NextCompactionIndex(int level) const { + return next_file_to_compact_by_size_[level]; + } + + // REQUIRES: This version has been saved (see VersionSet::SaveTo) + const FileIndexer& file_indexer() const { + assert(finalized_); + return file_indexer_; + } + + // Only the first few entries of files_by_compaction_pri_ are sorted. + // There is no need to sort all the files because it is likely + // that on a running system, we need to look at only the first + // few largest files because a new version is created every few + // seconds/minutes (because of concurrent compactions). + static const size_t kNumberFilesToSort = 50; + + // Return a human-readable short (single-line) summary of the number + // of files per level. Uses *scratch as backing store. + struct LevelSummaryStorage { + char buffer[1000]; + }; + struct FileSummaryStorage { + char buffer[3000]; + }; + const char* LevelSummary(LevelSummaryStorage* scratch) const; + // Return a human-readable short (single-line) summary of files + // in a specified level. Uses *scratch as backing store. + const char* LevelFileSummary(FileSummaryStorage* scratch, int level) const; + + // Return the maximum overlapping data (in bytes) at next level for any + // file at a level >= 1. + int64_t MaxNextLevelOverlappingBytes(); + + // Return a human readable string that describes this version's contents. + std::string DebugString(bool hex = false) const; + + uint64_t GetAverageValueSize() const { + if (accumulated_num_non_deletions_ == 0) { + return 0; + } + assert(accumulated_raw_key_size_ + accumulated_raw_value_size_ > 0); + assert(accumulated_file_size_ > 0); + return accumulated_raw_value_size_ / accumulated_num_non_deletions_ * + accumulated_file_size_ / + (accumulated_raw_key_size_ + accumulated_raw_value_size_); + } + + uint64_t GetEstimatedActiveKeys() const; + + double GetEstimatedCompressionRatioAtLevel(int level) const; + + // re-initializes the index that is used to offset into + // files_by_compaction_pri_ + // to find the next compaction candidate file. + void ResetNextCompactionIndex(int level) { + next_file_to_compact_by_size_[level] = 0; + } + + const InternalKeyComparator* InternalComparator() { + return internal_comparator_; + } + + // Returns maximum total bytes of data on a given level. + uint64_t MaxBytesForLevel(int level) const; + + // Must be called after any change to MutableCFOptions. + void CalculateBaseBytes(const ImmutableCFOptions& ioptions, + const MutableCFOptions& options); + + // Returns an estimate of the amount of live data in bytes. + uint64_t EstimateLiveDataSize() const; + + uint64_t estimated_compaction_needed_bytes() const { + return estimated_compaction_needed_bytes_; + } + + void TEST_set_estimated_compaction_needed_bytes(uint64_t v) { + estimated_compaction_needed_bytes_ = v; + } + + private: + const InternalKeyComparator* internal_comparator_; + const Comparator* user_comparator_; + int num_levels_; // Number of levels + int num_non_empty_levels_; // Number of levels. Any level larger than it + // is guaranteed to be empty. + // Per-level max bytes + std::vector level_max_bytes_; + + // A short brief metadata of files per level + autovector level_files_brief_; + FileIndexer file_indexer_; + Arena arena_; // Used to allocate space for file_levels_ + + CompactionStyle compaction_style_; + + // List of files per level, files in each level are arranged + // in increasing order of keys + std::vector* files_; + + // Level that L0 data should be compacted to. All levels < base_level_ should + // be empty. -1 if it is not level-compaction so it's not applicable. + int base_level_; + + // A list for the same set of files that are stored in files_, + // but files in each level are now sorted based on file + // size. The file with the largest size is at the front. + // This vector stores the index of the file from files_. + std::vector> files_by_compaction_pri_; + + // If true, means that files in L0 have keys with non overlapping ranges + bool level0_non_overlapping_; + + // An index into files_by_compaction_pri_ that specifies the first + // file that is not yet compacted + std::vector next_file_to_compact_by_size_; + + // Only the first few entries of files_by_compaction_pri_ are sorted. + // There is no need to sort all the files because it is likely + // that on a running system, we need to look at only the first + // few largest files because a new version is created every few + // seconds/minutes (because of concurrent compactions). + static const size_t number_of_files_to_sort_ = 50; + + // This vector contains list of files marked for compaction and also not + // currently being compacted. It is protected by DB mutex. It is calculated in + // ComputeCompactionScore() + autovector> files_marked_for_compaction_; + + // Level that should be compacted next and its compaction score. + // Score < 1 means compaction is not strictly needed. These fields + // are initialized by Finalize(). + // The most critical level to be compacted is listed first + // These are used to pick the best compaction level + std::vector compaction_score_; + std::vector compaction_level_; + int l0_delay_trigger_count_ = 0; // Count used to trigger slow down and stop + // for number of L0 files. + + // the following are the sampled temporary stats. + // the current accumulated size of sampled files. + uint64_t accumulated_file_size_; + // the current accumulated size of all raw keys based on the sampled files. + uint64_t accumulated_raw_key_size_; + // the current accumulated size of all raw keys based on the sampled files. + uint64_t accumulated_raw_value_size_; + // total number of non-deletion entries + uint64_t accumulated_num_non_deletions_; + // total number of deletion entries + uint64_t accumulated_num_deletions_; + // current number of non_deletion entries + uint64_t current_num_non_deletions_; + // current number of delection entries + uint64_t current_num_deletions_; + // current number of file samples + uint64_t current_num_samples_; + // Estimated bytes needed to be compacted until all levels' size is down to + // target sizes. + uint64_t estimated_compaction_needed_bytes_; + + bool finalized_; + + friend class Version; + friend class VersionSet; + // No copying allowed + VersionStorageInfo(const VersionStorageInfo&) = delete; + void operator=(const VersionStorageInfo&) = delete; +}; + +class Version { + public: + // Append to *iters a sequence of iterators that will + // yield the contents of this Version when merged together. + // REQUIRES: This version has been saved (see VersionSet::SaveTo) + void AddIterators(const ReadOptions&, const EnvOptions& soptions, + MergeIteratorBuilder* merger_iter_builder); + + // Lookup the value for key. If found, store it in *val and + // return OK. Else return a non-OK status. + // Uses *operands to store merge_operator operations to apply later. + // + // If the ReadOptions.read_tier is set to do a read-only fetch, then + // *value_found will be set to false if it cannot be determined whether + // this value exists without doing IO. + // + // If the key is Deleted, *status will be set to NotFound and + // *key_exists will be set to true. + // If no key was found, *status will be set to NotFound and + // *key_exists will be set to false. + // If seq is non-null, *seq will be set to the sequence number found + // for the key if a key was found. + // + // REQUIRES: lock is not held + void Get(const ReadOptions&, const LookupKey& key, std::string* val, + Status* status, MergeContext* merge_context, + bool* value_found = nullptr, bool* key_exists = nullptr, + SequenceNumber* seq = nullptr); + + // Loads some stats information from files. Call without mutex held. It needs + // to be called before applying the version to the version set. + void PrepareApply(const MutableCFOptions& mutable_cf_options, + bool update_stats); + + // Reference count management (so Versions do not disappear out from + // under live iterators) + void Ref(); + // Decrease reference count. Delete the object if no reference left + // and return true. Otherwise, return false. + bool Unref(); + + // Add all files listed in the current version to *live. + void AddLiveFiles(std::vector* live); + + // Return a human readable string that describes this version's contents. + std::string DebugString(bool hex = false) const; + + // Returns the version nuber of this version + uint64_t GetVersionNumber() const { return version_number_; } + + // REQUIRES: lock is held + // On success, "tp" will contains the table properties of the file + // specified in "file_meta". If the file name of "file_meta" is + // known ahread, passing it by a non-null "fname" can save a + // file-name conversion. + Status GetTableProperties(std::shared_ptr* tp, + const FileMetaData* file_meta, + const std::string* fname = nullptr) const; + + // REQUIRES: lock is held + // On success, *props will be populated with all SSTables' table properties. + // The keys of `props` are the sst file name, the values of `props` are the + // tables' propertis, represented as shared_ptr. + Status GetPropertiesOfAllTables(TablePropertiesCollection* props); + Status GetPropertiesOfAllTables(TablePropertiesCollection* props, int level); + Status GetPropertiesOfTablesInRange(const Range* range, std::size_t n, + TablePropertiesCollection* props) const; + + // REQUIRES: lock is held + // On success, "tp" will contains the aggregated table property amoug + // the table properties of all sst files in this version. + Status GetAggregatedTableProperties( + std::shared_ptr* tp, int level = -1); + + uint64_t GetEstimatedActiveKeys() { + return storage_info_.GetEstimatedActiveKeys(); + } + + size_t GetMemoryUsageByTableReaders(); + + ColumnFamilyData* cfd() const { return cfd_; } + + // Return the next Version in the linked list. Used for debug only + Version* TEST_Next() const { + return next_; + } + + VersionStorageInfo* storage_info() { return &storage_info_; } + + VersionSet* version_set() { return vset_; } + + void GetColumnFamilyMetaData(ColumnFamilyMetaData* cf_meta); + + private: + Env* env_; + friend class VersionSet; + + const InternalKeyComparator* internal_comparator() const { + return storage_info_.internal_comparator_; + } + const Comparator* user_comparator() const { + return storage_info_.user_comparator_; + } + + bool PrefixMayMatch(const ReadOptions& read_options, + InternalIterator* level_iter, + const Slice& internal_prefix) const; + + // Returns true if the filter blocks in the specified level will not be + // checked during read operations. In certain cases (trivial move or preload), + // the filter block may already be cached, but we still do not access it such + // that it eventually expires from the cache. + bool IsFilterSkipped(int level, bool is_file_last_in_level = false); + + // The helper function of UpdateAccumulatedStats, which may fill the missing + // fields of file_mata from its associated TableProperties. + // Returns true if it does initialize FileMetaData. + bool MaybeInitializeFileMetaData(FileMetaData* file_meta); + + // Update the accumulated stats associated with the current version. + // This accumulated stats will be used in compaction. + void UpdateAccumulatedStats(bool update_stats); + + // Sort all files for this version based on their file size and + // record results in files_by_compaction_pri_. The largest files are listed + // first. + void UpdateFilesByCompactionPri(); + + ColumnFamilyData* cfd_; // ColumnFamilyData to which this Version belongs + Logger* info_log_; + Statistics* db_statistics_; + TableCache* table_cache_; + const MergeOperator* merge_operator_; + + VersionStorageInfo storage_info_; + VersionSet* vset_; // VersionSet to which this Version belongs + Version* next_; // Next version in linked list + Version* prev_; // Previous version in linked list + int refs_; // Number of live refs to this version + + // A version number that uniquely represents this version. This is + // used for debugging and logging purposes only. + uint64_t version_number_; + + Version(ColumnFamilyData* cfd, VersionSet* vset, uint64_t version_number = 0); + + ~Version(); + + // No copying allowed + Version(const Version&); + void operator=(const Version&); +}; + +class VersionSet { + public: + VersionSet(const std::string& dbname, const DBOptions* db_options, + const EnvOptions& env_options, Cache* table_cache, + WriteBufferManager* write_buffer_manager, + WriteController* write_controller); + ~VersionSet(); + + // Apply *edit to the current version to form a new descriptor that + // is both saved to persistent state and installed as the new + // current version. Will release *mu while actually writing to the file. + // column_family_options has to be set if edit is column family add + // REQUIRES: *mu is held on entry. + // REQUIRES: no other thread concurrently calls LogAndApply() + Status LogAndApply( + ColumnFamilyData* column_family_data, + const MutableCFOptions& mutable_cf_options, VersionEdit* edit, + InstrumentedMutex* mu, Directory* db_directory = nullptr, + bool new_descriptor_log = false, + const ColumnFamilyOptions* column_family_options = nullptr) { + autovector edit_list; + edit_list.push_back(edit); + return LogAndApply(column_family_data, mutable_cf_options, edit_list, mu, + db_directory, new_descriptor_log, column_family_options); + } + // The batch version. If edit_list.size() > 1, caller must ensure that + // no edit in the list column family add or drop + Status LogAndApply( + ColumnFamilyData* column_family_data, + const MutableCFOptions& mutable_cf_options, + const autovector& edit_list, InstrumentedMutex* mu, + Directory* db_directory = nullptr, bool new_descriptor_log = false, + const ColumnFamilyOptions* column_family_options = nullptr); + + // Recover the last saved descriptor from persistent storage. + // If read_only == true, Recover() will not complain if some column families + // are not opened + Status Recover(const std::vector& column_families, + bool read_only = false); + + // Reads a manifest file and returns a list of column families in + // column_families. + static Status ListColumnFamilies(std::vector* column_families, + const std::string& dbname, Env* env); + +#ifndef ROCKSDB_LITE + // Try to reduce the number of levels. This call is valid when + // only one level from the new max level to the old + // max level containing files. + // The call is static, since number of levels is immutable during + // the lifetime of a RocksDB instance. It reduces number of levels + // in a DB by applying changes to manifest. + // For example, a db currently has 7 levels [0-6], and a call to + // to reduce to 5 [0-4] can only be executed when only one level + // among [4-6] contains files. + static Status ReduceNumberOfLevels(const std::string& dbname, + const Options* options, + const EnvOptions& env_options, + int new_levels); + + // printf contents (for debugging) + Status DumpManifest(Options& options, std::string& manifestFileName, + bool verbose, bool hex = false, bool json = false); + +#endif // ROCKSDB_LITE + + // Return the current manifest file number + uint64_t manifest_file_number() const { return manifest_file_number_; } + + uint64_t options_file_number() const { return options_file_number_; } + + uint64_t pending_manifest_file_number() const { + return pending_manifest_file_number_; + } + + uint64_t current_next_file_number() const { return next_file_number_.load(); } + + // Allocate and return a new file number + uint64_t NewFileNumber() { return next_file_number_.fetch_add(1); } + + // Return the last sequence number. + uint64_t LastSequence() const { + return last_sequence_.load(std::memory_order_acquire); + } + + // Set the last sequence number to s. + void SetLastSequence(uint64_t s) { + assert(s >= last_sequence_); + last_sequence_.store(s, std::memory_order_release); + } + + // Mark the specified file number as used. + // REQUIRED: this is only called during single-threaded recovery + void MarkFileNumberUsedDuringRecovery(uint64_t number); + + // Return the log file number for the log file that is currently + // being compacted, or zero if there is no such log file. + uint64_t prev_log_number() const { return prev_log_number_; } + + // Returns the minimum log number such that all + // log numbers less than or equal to it can be deleted + uint64_t MinLogNumber() const { + uint64_t min_log_num = std::numeric_limits::max(); + for (auto cfd : *column_family_set_) { + // It's safe to ignore dropped column families here: + // cfd->IsDropped() becomes true after the drop is persisted in MANIFEST. + if (min_log_num > cfd->GetLogNumber() && !cfd->IsDropped()) { + min_log_num = cfd->GetLogNumber(); + } + } + return min_log_num; + } + + // Create an iterator that reads over the compaction inputs for "*c". + // The caller should delete the iterator when no longer needed. + InternalIterator* MakeInputIterator(const Compaction* c); + + // Add all files listed in any live version to *live. + void AddLiveFiles(std::vector* live_list); + + // Return the approximate size of data to be scanned for range [start, end) + // in levels [start_level, end_level). If end_level == 0 it will search + // through all non-empty levels + uint64_t ApproximateSize(Version* v, const Slice& start, const Slice& end, + int start_level = 0, int end_level = -1); + + // Return the size of the current manifest file + uint64_t manifest_file_size() const { return manifest_file_size_; } + + // verify that the files that we started with for a compaction + // still exist in the current version and in the same original level. + // This ensures that a concurrent compaction did not erroneously + // pick the same files to compact. + bool VerifyCompactionFileConsistency(Compaction* c); + + Status GetMetadataForFile(uint64_t number, int* filelevel, + FileMetaData** metadata, ColumnFamilyData** cfd); + + // This function doesn't support leveldb SST filenames + void GetLiveFilesMetaData(std::vector *metadata); + + void GetObsoleteFiles(std::vector* files, + std::vector* manifest_filenames, + uint64_t min_pending_output); + + ColumnFamilySet* GetColumnFamilySet() { return column_family_set_.get(); } + const EnvOptions& env_options() { return env_options_; } + + static uint64_t GetNumLiveVersions(Version* dummy_versions); + + static uint64_t GetTotalSstFilesSize(Version* dummy_versions); + + private: + struct ManifestWriter; + + friend class Version; + friend class DBImpl; + + struct LogReporter : public log::Reader::Reporter { + Status* status; + virtual void Corruption(size_t bytes, const Status& s) override { + if (this->status->ok()) *this->status = s; + } + }; + + // ApproximateSize helper + uint64_t ApproximateSizeLevel0(Version* v, const LevelFilesBrief& files_brief, + const Slice& start, const Slice& end); + + uint64_t ApproximateSize(Version* v, const FdWithKeyRange& f, + const Slice& key); + + // Save current contents to *log + Status WriteSnapshot(log::Writer* log); + + void AppendVersion(ColumnFamilyData* column_family_data, Version* v); + + ColumnFamilyData* CreateColumnFamily(const ColumnFamilyOptions& cf_options, + VersionEdit* edit); + + std::unique_ptr column_family_set_; + + Env* const env_; + const std::string dbname_; + const DBOptions* const db_options_; + std::atomic next_file_number_; + uint64_t manifest_file_number_; + uint64_t options_file_number_; + uint64_t pending_manifest_file_number_; + std::atomic last_sequence_; + uint64_t prev_log_number_; // 0 or backing store for memtable being compacted + + // Opened lazily + unique_ptr descriptor_log_; + + // generates a increasing version number for every new version + uint64_t current_version_number_; + + // Queue of writers to the manifest file + std::deque manifest_writers_; + + // Current size of manifest file + uint64_t manifest_file_size_; + + std::vector obsolete_files_; + std::vector obsolete_manifests_; + + // env options for all reads and writes except compactions + const EnvOptions& env_options_; + + // env options used for compactions. This is a copy of + // env_options_ but with readaheads set to readahead_compactions_. + const EnvOptions env_options_compactions_; + + // No copying allowed + VersionSet(const VersionSet&); + void operator=(const VersionSet&); + + void LogAndApplyCFHelper(VersionEdit* edit); + void LogAndApplyHelper(ColumnFamilyData* cfd, VersionBuilder* b, Version* v, + VersionEdit* edit, InstrumentedMutex* mu); +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/version_set_test.cc b/external/rocksdb/db/version_set_test.cc new file mode 100755 index 0000000000..98b20a1109 --- /dev/null +++ b/external/rocksdb/db/version_set_test.cc @@ -0,0 +1,458 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/version_set.h" +#include "util/logging.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class GenerateLevelFilesBriefTest : public testing::Test { + public: + std::vector files_; + LevelFilesBrief file_level_; + Arena arena_; + + GenerateLevelFilesBriefTest() { } + + ~GenerateLevelFilesBriefTest() { + for (size_t i = 0; i < files_.size(); i++) { + delete files_[i]; + } + } + + void Add(const char* smallest, const char* largest, + SequenceNumber smallest_seq = 100, + SequenceNumber largest_seq = 100) { + FileMetaData* f = new FileMetaData; + f->fd = FileDescriptor(files_.size() + 1, 0, 0); + f->smallest = InternalKey(smallest, smallest_seq, kTypeValue); + f->largest = InternalKey(largest, largest_seq, kTypeValue); + files_.push_back(f); + } + + int Compare() { + int diff = 0; + for (size_t i = 0; i < files_.size(); i++) { + if (file_level_.files[i].fd.GetNumber() != files_[i]->fd.GetNumber()) { + diff++; + } + } + return diff; + } +}; + +TEST_F(GenerateLevelFilesBriefTest, Empty) { + DoGenerateLevelFilesBrief(&file_level_, files_, &arena_); + ASSERT_EQ(0u, file_level_.num_files); + ASSERT_EQ(0, Compare()); +} + +TEST_F(GenerateLevelFilesBriefTest, Single) { + Add("p", "q"); + DoGenerateLevelFilesBrief(&file_level_, files_, &arena_); + ASSERT_EQ(1u, file_level_.num_files); + ASSERT_EQ(0, Compare()); +} + +TEST_F(GenerateLevelFilesBriefTest, Multiple) { + Add("150", "200"); + Add("200", "250"); + Add("300", "350"); + Add("400", "450"); + DoGenerateLevelFilesBrief(&file_level_, files_, &arena_); + ASSERT_EQ(4u, file_level_.num_files); + ASSERT_EQ(0, Compare()); +} + +class CountingLogger : public Logger { + public: + CountingLogger() : log_count(0) {} + using Logger::Logv; + virtual void Logv(const char* format, va_list ap) override { log_count++; } + int log_count; +}; + +Options GetOptionsWithNumLevels(int num_levels, + std::shared_ptr logger) { + Options opt; + opt.num_levels = num_levels; + opt.info_log = logger; + return opt; +} + +class VersionStorageInfoTest : public testing::Test { + public: + const Comparator* ucmp_; + InternalKeyComparator icmp_; + std::shared_ptr logger_; + Options options_; + ImmutableCFOptions ioptions_; + MutableCFOptions mutable_cf_options_; + VersionStorageInfo vstorage_; + + InternalKey GetInternalKey(const char* ukey, + SequenceNumber smallest_seq = 100) { + return InternalKey(ukey, smallest_seq, kTypeValue); + } + + VersionStorageInfoTest() + : ucmp_(BytewiseComparator()), + icmp_(ucmp_), + logger_(new CountingLogger()), + options_(GetOptionsWithNumLevels(6, logger_)), + ioptions_(options_), + mutable_cf_options_(options_, ioptions_), + vstorage_(&icmp_, ucmp_, 6, kCompactionStyleLevel, nullptr) {} + + ~VersionStorageInfoTest() { + for (int i = 0; i < vstorage_.num_levels(); i++) { + for (auto* f : vstorage_.LevelFiles(i)) { + if (--f->refs == 0) { + delete f; + } + } + } + } + + void Add(int level, uint32_t file_number, const char* smallest, + const char* largest, uint64_t file_size = 0) { + assert(level < vstorage_.num_levels()); + FileMetaData* f = new FileMetaData; + f->fd = FileDescriptor(file_number, 0, file_size); + f->smallest = GetInternalKey(smallest, 0); + f->largest = GetInternalKey(largest, 0); + f->compensated_file_size = file_size; + f->refs = 0; + f->num_entries = 0; + f->num_deletions = 0; + vstorage_.AddFile(level, f); + } +}; + +TEST_F(VersionStorageInfoTest, MaxBytesForLevelStatic) { + ioptions_.level_compaction_dynamic_level_bytes = false; + mutable_cf_options_.max_bytes_for_level_base = 10; + mutable_cf_options_.max_bytes_for_level_multiplier = 5; + Add(4, 100U, "1", "2"); + Add(5, 101U, "1", "2"); + + vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_); + ASSERT_EQ(vstorage_.MaxBytesForLevel(1), 10U); + ASSERT_EQ(vstorage_.MaxBytesForLevel(2), 50U); + ASSERT_EQ(vstorage_.MaxBytesForLevel(3), 250U); + ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 1250U); + + ASSERT_EQ(0, logger_->log_count); +} + +TEST_F(VersionStorageInfoTest, MaxBytesForLevelDynamic) { + ioptions_.level_compaction_dynamic_level_bytes = true; + mutable_cf_options_.max_bytes_for_level_base = 1000; + mutable_cf_options_.max_bytes_for_level_multiplier = 5; + Add(5, 1U, "1", "2", 500U); + + vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_); + ASSERT_EQ(0, logger_->log_count); + ASSERT_EQ(vstorage_.base_level(), 5); + + Add(5, 2U, "3", "4", 550U); + vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_); + ASSERT_EQ(0, logger_->log_count); + ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 210U); + ASSERT_EQ(vstorage_.base_level(), 4); + + Add(4, 3U, "3", "4", 550U); + vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_); + ASSERT_EQ(0, logger_->log_count); + ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 210U); + ASSERT_EQ(vstorage_.base_level(), 4); + + Add(3, 4U, "3", "4", 250U); + Add(3, 5U, "5", "7", 300U); + vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_); + ASSERT_EQ(1, logger_->log_count); + ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 1005U); + ASSERT_EQ(vstorage_.MaxBytesForLevel(3), 201U); + ASSERT_EQ(vstorage_.base_level(), 3); + + Add(1, 6U, "3", "4", 5U); + Add(1, 7U, "8", "9", 5U); + logger_->log_count = 0; + vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_); + ASSERT_EQ(1, logger_->log_count); + ASSERT_GT(vstorage_.MaxBytesForLevel(4), 1005U); + ASSERT_GT(vstorage_.MaxBytesForLevel(3), 1005U); + ASSERT_EQ(vstorage_.MaxBytesForLevel(2), 1005U); + ASSERT_EQ(vstorage_.MaxBytesForLevel(1), 201U); + ASSERT_EQ(vstorage_.base_level(), 1); +} + +TEST_F(VersionStorageInfoTest, MaxBytesForLevelDynamicLotsOfData) { + ioptions_.level_compaction_dynamic_level_bytes = true; + mutable_cf_options_.max_bytes_for_level_base = 100; + mutable_cf_options_.max_bytes_for_level_multiplier = 2; + Add(0, 1U, "1", "2", 50U); + Add(1, 2U, "1", "2", 50U); + Add(2, 3U, "1", "2", 500U); + Add(3, 4U, "1", "2", 500U); + Add(4, 5U, "1", "2", 1700U); + Add(5, 6U, "1", "2", 500U); + + vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_); + ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 800U); + ASSERT_EQ(vstorage_.MaxBytesForLevel(3), 400U); + ASSERT_EQ(vstorage_.MaxBytesForLevel(2), 200U); + ASSERT_EQ(vstorage_.MaxBytesForLevel(1), 100U); + ASSERT_EQ(vstorage_.base_level(), 1); + ASSERT_EQ(0, logger_->log_count); +} + +TEST_F(VersionStorageInfoTest, MaxBytesForLevelDynamicLargeLevel) { + uint64_t kOneGB = 1000U * 1000U * 1000U; + ioptions_.level_compaction_dynamic_level_bytes = true; + mutable_cf_options_.max_bytes_for_level_base = 10U * kOneGB; + mutable_cf_options_.max_bytes_for_level_multiplier = 10; + Add(0, 1U, "1", "2", 50U); + Add(3, 4U, "1", "2", 32U * kOneGB); + Add(4, 5U, "1", "2", 500U * kOneGB); + Add(5, 6U, "1", "2", 3000U * kOneGB); + + vstorage_.CalculateBaseBytes(ioptions_, mutable_cf_options_); + ASSERT_EQ(vstorage_.MaxBytesForLevel(5), 3000U * kOneGB); + ASSERT_EQ(vstorage_.MaxBytesForLevel(4), 300U * kOneGB); + ASSERT_EQ(vstorage_.MaxBytesForLevel(3), 30U * kOneGB); + ASSERT_EQ(vstorage_.MaxBytesForLevel(2), 3U * kOneGB); + ASSERT_EQ(vstorage_.base_level(), 2); + ASSERT_EQ(0, logger_->log_count); +} + +TEST_F(VersionStorageInfoTest, EstimateLiveDataSize) { + // Test whether the overlaps are detected as expected + Add(1, 1U, "4", "7", 1U); // Perfect overlap with last level + Add(2, 2U, "3", "5", 1U); // Partial overlap with last level + Add(2, 3U, "6", "8", 1U); // Partial overlap with last level + Add(3, 4U, "1", "9", 1U); // Contains range of last level + Add(4, 5U, "4", "5", 1U); // Inside range of last level + Add(4, 5U, "6", "7", 1U); // Inside range of last level + Add(5, 6U, "4", "7", 10U); + ASSERT_EQ(10U, vstorage_.EstimateLiveDataSize()); +} + +TEST_F(VersionStorageInfoTest, EstimateLiveDataSize2) { + Add(0, 1U, "9", "9", 1U); // Level 0 is not ordered + Add(0, 1U, "5", "6", 1U); // Ignored because of [5,6] in l1 + Add(1, 1U, "1", "2", 1U); // Ignored because of [2,3] in l2 + Add(1, 2U, "3", "4", 1U); // Ignored because of [2,3] in l2 + Add(1, 3U, "5", "6", 1U); + Add(2, 4U, "2", "3", 1U); + Add(3, 5U, "7", "8", 1U); + ASSERT_EQ(4U, vstorage_.EstimateLiveDataSize()); +} + +class FindLevelFileTest : public testing::Test { + public: + LevelFilesBrief file_level_; + bool disjoint_sorted_files_; + Arena arena_; + + FindLevelFileTest() : disjoint_sorted_files_(true) { } + + ~FindLevelFileTest() { + } + + void LevelFileInit(size_t num = 0) { + char* mem = arena_.AllocateAligned(num * sizeof(FdWithKeyRange)); + file_level_.files = new (mem)FdWithKeyRange[num]; + file_level_.num_files = 0; + } + + void Add(const char* smallest, const char* largest, + SequenceNumber smallest_seq = 100, + SequenceNumber largest_seq = 100) { + InternalKey smallest_key = InternalKey(smallest, smallest_seq, kTypeValue); + InternalKey largest_key = InternalKey(largest, largest_seq, kTypeValue); + + Slice smallest_slice = smallest_key.Encode(); + Slice largest_slice = largest_key.Encode(); + + char* mem = arena_.AllocateAligned( + smallest_slice.size() + largest_slice.size()); + memcpy(mem, smallest_slice.data(), smallest_slice.size()); + memcpy(mem + smallest_slice.size(), largest_slice.data(), + largest_slice.size()); + + // add to file_level_ + size_t num = file_level_.num_files; + auto& file = file_level_.files[num]; + file.fd = FileDescriptor(num + 1, 0, 0); + file.smallest_key = Slice(mem, smallest_slice.size()); + file.largest_key = Slice(mem + smallest_slice.size(), + largest_slice.size()); + file_level_.num_files++; + } + + int Find(const char* key) { + InternalKey target(key, 100, kTypeValue); + InternalKeyComparator cmp(BytewiseComparator()); + return FindFile(cmp, file_level_, target.Encode()); + } + + bool Overlaps(const char* smallest, const char* largest) { + InternalKeyComparator cmp(BytewiseComparator()); + Slice s(smallest != nullptr ? smallest : ""); + Slice l(largest != nullptr ? largest : ""); + return SomeFileOverlapsRange(cmp, disjoint_sorted_files_, file_level_, + (smallest != nullptr ? &s : nullptr), + (largest != nullptr ? &l : nullptr)); + } +}; + +TEST_F(FindLevelFileTest, LevelEmpty) { + LevelFileInit(0); + + ASSERT_EQ(0, Find("foo")); + ASSERT_TRUE(! Overlaps("a", "z")); + ASSERT_TRUE(! Overlaps(nullptr, "z")); + ASSERT_TRUE(! Overlaps("a", nullptr)); + ASSERT_TRUE(! Overlaps(nullptr, nullptr)); +} + +TEST_F(FindLevelFileTest, LevelSingle) { + LevelFileInit(1); + + Add("p", "q"); + ASSERT_EQ(0, Find("a")); + ASSERT_EQ(0, Find("p")); + ASSERT_EQ(0, Find("p1")); + ASSERT_EQ(0, Find("q")); + ASSERT_EQ(1, Find("q1")); + ASSERT_EQ(1, Find("z")); + + ASSERT_TRUE(! Overlaps("a", "b")); + ASSERT_TRUE(! Overlaps("z1", "z2")); + ASSERT_TRUE(Overlaps("a", "p")); + ASSERT_TRUE(Overlaps("a", "q")); + ASSERT_TRUE(Overlaps("a", "z")); + ASSERT_TRUE(Overlaps("p", "p1")); + ASSERT_TRUE(Overlaps("p", "q")); + ASSERT_TRUE(Overlaps("p", "z")); + ASSERT_TRUE(Overlaps("p1", "p2")); + ASSERT_TRUE(Overlaps("p1", "z")); + ASSERT_TRUE(Overlaps("q", "q")); + ASSERT_TRUE(Overlaps("q", "q1")); + + ASSERT_TRUE(! Overlaps(nullptr, "j")); + ASSERT_TRUE(! Overlaps("r", nullptr)); + ASSERT_TRUE(Overlaps(nullptr, "p")); + ASSERT_TRUE(Overlaps(nullptr, "p1")); + ASSERT_TRUE(Overlaps("q", nullptr)); + ASSERT_TRUE(Overlaps(nullptr, nullptr)); +} + +TEST_F(FindLevelFileTest, LevelMultiple) { + LevelFileInit(4); + + Add("150", "200"); + Add("200", "250"); + Add("300", "350"); + Add("400", "450"); + ASSERT_EQ(0, Find("100")); + ASSERT_EQ(0, Find("150")); + ASSERT_EQ(0, Find("151")); + ASSERT_EQ(0, Find("199")); + ASSERT_EQ(0, Find("200")); + ASSERT_EQ(1, Find("201")); + ASSERT_EQ(1, Find("249")); + ASSERT_EQ(1, Find("250")); + ASSERT_EQ(2, Find("251")); + ASSERT_EQ(2, Find("299")); + ASSERT_EQ(2, Find("300")); + ASSERT_EQ(2, Find("349")); + ASSERT_EQ(2, Find("350")); + ASSERT_EQ(3, Find("351")); + ASSERT_EQ(3, Find("400")); + ASSERT_EQ(3, Find("450")); + ASSERT_EQ(4, Find("451")); + + ASSERT_TRUE(! Overlaps("100", "149")); + ASSERT_TRUE(! Overlaps("251", "299")); + ASSERT_TRUE(! Overlaps("451", "500")); + ASSERT_TRUE(! Overlaps("351", "399")); + + ASSERT_TRUE(Overlaps("100", "150")); + ASSERT_TRUE(Overlaps("100", "200")); + ASSERT_TRUE(Overlaps("100", "300")); + ASSERT_TRUE(Overlaps("100", "400")); + ASSERT_TRUE(Overlaps("100", "500")); + ASSERT_TRUE(Overlaps("375", "400")); + ASSERT_TRUE(Overlaps("450", "450")); + ASSERT_TRUE(Overlaps("450", "500")); +} + +TEST_F(FindLevelFileTest, LevelMultipleNullBoundaries) { + LevelFileInit(4); + + Add("150", "200"); + Add("200", "250"); + Add("300", "350"); + Add("400", "450"); + ASSERT_TRUE(! Overlaps(nullptr, "149")); + ASSERT_TRUE(! Overlaps("451", nullptr)); + ASSERT_TRUE(Overlaps(nullptr, nullptr)); + ASSERT_TRUE(Overlaps(nullptr, "150")); + ASSERT_TRUE(Overlaps(nullptr, "199")); + ASSERT_TRUE(Overlaps(nullptr, "200")); + ASSERT_TRUE(Overlaps(nullptr, "201")); + ASSERT_TRUE(Overlaps(nullptr, "400")); + ASSERT_TRUE(Overlaps(nullptr, "800")); + ASSERT_TRUE(Overlaps("100", nullptr)); + ASSERT_TRUE(Overlaps("200", nullptr)); + ASSERT_TRUE(Overlaps("449", nullptr)); + ASSERT_TRUE(Overlaps("450", nullptr)); +} + +TEST_F(FindLevelFileTest, LevelOverlapSequenceChecks) { + LevelFileInit(1); + + Add("200", "200", 5000, 3000); + ASSERT_TRUE(! Overlaps("199", "199")); + ASSERT_TRUE(! Overlaps("201", "300")); + ASSERT_TRUE(Overlaps("200", "200")); + ASSERT_TRUE(Overlaps("190", "200")); + ASSERT_TRUE(Overlaps("200", "210")); +} + +TEST_F(FindLevelFileTest, LevelOverlappingFiles) { + LevelFileInit(2); + + Add("150", "600"); + Add("400", "500"); + disjoint_sorted_files_ = false; + ASSERT_TRUE(! Overlaps("100", "149")); + ASSERT_TRUE(! Overlaps("601", "700")); + ASSERT_TRUE(Overlaps("100", "150")); + ASSERT_TRUE(Overlaps("100", "200")); + ASSERT_TRUE(Overlaps("100", "300")); + ASSERT_TRUE(Overlaps("100", "400")); + ASSERT_TRUE(Overlaps("100", "500")); + ASSERT_TRUE(Overlaps("375", "400")); + ASSERT_TRUE(Overlaps("450", "450")); + ASSERT_TRUE(Overlaps("450", "500")); + ASSERT_TRUE(Overlaps("450", "700")); + ASSERT_TRUE(Overlaps("600", "700")); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/wal_manager.cc b/external/rocksdb/db/wal_manager.cc new file mode 100755 index 0000000000..e57c120408 --- /dev/null +++ b/external/rocksdb/db/wal_manager.cc @@ -0,0 +1,478 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "db/wal_manager.h" + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include + +#include "db/filename.h" +#include "db/transaction_log_impl.h" +#include "db/log_reader.h" +#include "db/log_writer.h" +#include "db/write_batch_internal.h" +#include "port/port.h" +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "rocksdb/write_batch.h" +#include "util/coding.h" +#include "util/file_reader_writer.h" +#include "util/logging.h" +#include "util/mutexlock.h" +#include "util/sync_point.h" +#include "util/string_util.h" + +namespace rocksdb { + +#ifndef ROCKSDB_LITE + +Status WalManager::GetSortedWalFiles(VectorLogPtr& files) { + // First get sorted files in db dir, then get sorted files from archived + // dir, to avoid a race condition where a log file is moved to archived + // dir in between. + Status s; + // list wal files in main db dir. + VectorLogPtr logs; + s = GetSortedWalsOfType(db_options_.wal_dir, logs, kAliveLogFile); + if (!s.ok()) { + return s; + } + + // Reproduce the race condition where a log file is moved + // to archived dir, between these two sync points, used in + // (DBTest,TransactionLogIteratorRace) + TEST_SYNC_POINT("WalManager::GetSortedWalFiles:1"); + TEST_SYNC_POINT("WalManager::GetSortedWalFiles:2"); + + files.clear(); + // list wal files in archive dir. + std::string archivedir = ArchivalDirectory(db_options_.wal_dir); + Status exists = env_->FileExists(archivedir); + if (exists.ok()) { + s = GetSortedWalsOfType(archivedir, files, kArchivedLogFile); + if (!s.ok()) { + return s; + } + } else if (!exists.IsNotFound()) { + assert(s.IsIOError()); + return s; + } + + uint64_t latest_archived_log_number = 0; + if (!files.empty()) { + latest_archived_log_number = files.back()->LogNumber(); + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "Latest Archived log: %" PRIu64, + latest_archived_log_number); + } + + files.reserve(files.size() + logs.size()); + for (auto& log : logs) { + if (log->LogNumber() > latest_archived_log_number) { + files.push_back(std::move(log)); + } else { + // When the race condition happens, we could see the + // same log in both db dir and archived dir. Simply + // ignore the one in db dir. Note that, if we read + // archived dir first, we would have missed the log file. + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "%s already moved to archive", log->PathName().c_str()); + } + } + + return s; +} + +Status WalManager::GetUpdatesSince( + SequenceNumber seq, std::unique_ptr* iter, + const TransactionLogIterator::ReadOptions& read_options, + VersionSet* version_set) { + + // Get all sorted Wal Files. + // Do binary search and open files and find the seq number. + + std::unique_ptr wal_files(new VectorLogPtr); + Status s = GetSortedWalFiles(*wal_files); + if (!s.ok()) { + return s; + } + + s = RetainProbableWalFiles(*wal_files, seq); + if (!s.ok()) { + return s; + } + iter->reset(new TransactionLogIteratorImpl( + db_options_.wal_dir, &db_options_, read_options, env_options_, seq, + std::move(wal_files), version_set)); + return (*iter)->status(); +} + +// 1. Go through all archived files and +// a. if ttl is enabled, delete outdated files +// b. if archive size limit is enabled, delete empty files, +// compute file number and size. +// 2. If size limit is enabled: +// a. compute how many files should be deleted +// b. get sorted non-empty archived logs +// c. delete what should be deleted +void WalManager::PurgeObsoleteWALFiles() { + bool const ttl_enabled = db_options_.WAL_ttl_seconds > 0; + bool const size_limit_enabled = db_options_.WAL_size_limit_MB > 0; + if (!ttl_enabled && !size_limit_enabled) { + return; + } + + int64_t current_time; + Status s = env_->GetCurrentTime(¤t_time); + if (!s.ok()) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Can't get current time: %s", s.ToString().c_str()); + assert(false); + return; + } + uint64_t const now_seconds = static_cast(current_time); + uint64_t const time_to_check = (ttl_enabled && !size_limit_enabled) + ? db_options_.WAL_ttl_seconds / 2 + : kDefaultIntervalToDeleteObsoleteWAL; + + if (purge_wal_files_last_run_ + time_to_check > now_seconds) { + return; + } + + purge_wal_files_last_run_ = now_seconds; + + std::string archival_dir = ArchivalDirectory(db_options_.wal_dir); + std::vector files; + s = env_->GetChildren(archival_dir, &files); + if (!s.ok()) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Can't get archive files: %s", s.ToString().c_str()); + assert(false); + return; + } + + size_t log_files_num = 0; + uint64_t log_file_size = 0; + + for (auto& f : files) { + uint64_t number; + FileType type; + if (ParseFileName(f, &number, &type) && type == kLogFile) { + std::string const file_path = archival_dir + "/" + f; + if (ttl_enabled) { + uint64_t file_m_time; + s = env_->GetFileModificationTime(file_path, &file_m_time); + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "Can't get file mod time: %s: %s", + file_path.c_str(), s.ToString().c_str()); + continue; + } + if (now_seconds - file_m_time > db_options_.WAL_ttl_seconds) { + s = env_->DeleteFile(file_path); + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "Can't delete file: %s: %s", + file_path.c_str(), s.ToString().c_str()); + continue; + } else { + MutexLock l(&read_first_record_cache_mutex_); + read_first_record_cache_.erase(number); + } + continue; + } + } + + if (size_limit_enabled) { + uint64_t file_size; + s = env_->GetFileSize(file_path, &file_size); + if (!s.ok()) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "Unable to get file size: %s: %s", + file_path.c_str(), s.ToString().c_str()); + return; + } else { + if (file_size > 0) { + log_file_size = std::max(log_file_size, file_size); + ++log_files_num; + } else { + s = env_->DeleteFile(file_path); + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "Unable to delete file: %s: %s", + file_path.c_str(), s.ToString().c_str()); + continue; + } else { + MutexLock l(&read_first_record_cache_mutex_); + read_first_record_cache_.erase(number); + } + } + } + } + } + } + + if (0 == log_files_num || !size_limit_enabled) { + return; + } + + size_t const files_keep_num = + db_options_.WAL_size_limit_MB * 1024 * 1024 / log_file_size; + if (log_files_num <= files_keep_num) { + return; + } + + size_t files_del_num = log_files_num - files_keep_num; + VectorLogPtr archived_logs; + GetSortedWalsOfType(archival_dir, archived_logs, kArchivedLogFile); + + if (files_del_num > archived_logs.size()) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "Trying to delete more archived log files than " + "exist. Deleting all"); + files_del_num = archived_logs.size(); + } + + for (size_t i = 0; i < files_del_num; ++i) { + std::string const file_path = archived_logs[i]->PathName(); + s = env_->DeleteFile(db_options_.wal_dir + "/" + file_path); + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, db_options_.info_log, + "Unable to delete file: %s: %s", file_path.c_str(), + s.ToString().c_str()); + continue; + } else { + MutexLock l(&read_first_record_cache_mutex_); + read_first_record_cache_.erase(archived_logs[i]->LogNumber()); + } + } +} + +void WalManager::ArchiveWALFile(const std::string& fname, uint64_t number) { + auto archived_log_name = ArchivedLogFileName(db_options_.wal_dir, number); + // The sync point below is used in (DBTest,TransactionLogIteratorRace) + TEST_SYNC_POINT("WalManager::PurgeObsoleteFiles:1"); + Status s = env_->RenameFile(fname, archived_log_name); + // The sync point below is used in (DBTest,TransactionLogIteratorRace) + TEST_SYNC_POINT("WalManager::PurgeObsoleteFiles:2"); + Log(InfoLogLevel::INFO_LEVEL, db_options_.info_log, + "Move log file %s to %s -- %s\n", fname.c_str(), + archived_log_name.c_str(), s.ToString().c_str()); +} + +namespace { +struct CompareLogByPointer { + bool operator()(const std::unique_ptr& a, + const std::unique_ptr& b) { + LogFileImpl* a_impl = dynamic_cast(a.get()); + LogFileImpl* b_impl = dynamic_cast(b.get()); + return *a_impl < *b_impl; + } +}; +} + +Status WalManager::GetSortedWalsOfType(const std::string& path, + VectorLogPtr& log_files, + WalFileType log_type) { + std::vector all_files; + const Status status = env_->GetChildren(path, &all_files); + if (!status.ok()) { + return status; + } + log_files.reserve(all_files.size()); + for (const auto& f : all_files) { + uint64_t number; + FileType type; + if (ParseFileName(f, &number, &type) && type == kLogFile) { + SequenceNumber sequence; + Status s = ReadFirstRecord(log_type, number, &sequence); + if (!s.ok()) { + return s; + } + if (sequence == 0) { + // empty file + continue; + } + + // Reproduce the race condition where a log file is moved + // to archived dir, between these two sync points, used in + // (DBTest,TransactionLogIteratorRace) + TEST_SYNC_POINT("WalManager::GetSortedWalsOfType:1"); + TEST_SYNC_POINT("WalManager::GetSortedWalsOfType:2"); + + uint64_t size_bytes; + s = env_->GetFileSize(LogFileName(path, number), &size_bytes); + // re-try in case the alive log file has been moved to archive. + std::string archived_file = ArchivedLogFileName(path, number); + if (!s.ok() && log_type == kAliveLogFile && + env_->FileExists(archived_file).ok()) { + s = env_->GetFileSize(archived_file, &size_bytes); + if (!s.ok() && env_->FileExists(archived_file).IsNotFound()) { + // oops, the file just got deleted from archived dir! move on + s = Status::OK(); + continue; + } + } + if (!s.ok()) { + return s; + } + + log_files.push_back(std::unique_ptr( + new LogFileImpl(number, log_type, sequence, size_bytes))); + } + } + CompareLogByPointer compare_log_files; + std::sort(log_files.begin(), log_files.end(), compare_log_files); + return status; +} + +Status WalManager::RetainProbableWalFiles(VectorLogPtr& all_logs, + const SequenceNumber target) { + int64_t start = 0; // signed to avoid overflow when target is < first file. + int64_t end = static_cast(all_logs.size()) - 1; + // Binary Search. avoid opening all files. + while (end >= start) { + int64_t mid = start + (end - start) / 2; // Avoid overflow. + SequenceNumber current_seq_num = all_logs.at(mid)->StartSequence(); + if (current_seq_num == target) { + end = mid; + break; + } else if (current_seq_num < target) { + start = mid + 1; + } else { + end = mid - 1; + } + } + // end could be -ve. + size_t start_index = std::max(static_cast(0), end); + // The last wal file is always included + all_logs.erase(all_logs.begin(), all_logs.begin() + start_index); + return Status::OK(); +} + +Status WalManager::ReadFirstRecord(const WalFileType type, + const uint64_t number, + SequenceNumber* sequence) { + *sequence = 0; + if (type != kAliveLogFile && type != kArchivedLogFile) { + Log(InfoLogLevel::ERROR_LEVEL, db_options_.info_log, + "[WalManger] Unknown file type %s", ToString(type).c_str()); + return Status::NotSupported( + "File Type Not Known " + ToString(type)); + } + { + MutexLock l(&read_first_record_cache_mutex_); + auto itr = read_first_record_cache_.find(number); + if (itr != read_first_record_cache_.end()) { + *sequence = itr->second; + return Status::OK(); + } + } + Status s; + if (type == kAliveLogFile) { + std::string fname = LogFileName(db_options_.wal_dir, number); + s = ReadFirstLine(fname, number, sequence); + if (env_->FileExists(fname).ok() && !s.ok()) { + // return any error that is not caused by non-existing file + return s; + } + } + + if (type == kArchivedLogFile || !s.ok()) { + // check if the file got moved to archive. + std::string archived_file = + ArchivedLogFileName(db_options_.wal_dir, number); + s = ReadFirstLine(archived_file, number, sequence); + // maybe the file was deleted from archive dir. If that's the case, return + // Status::OK(). The caller with identify this as empty file because + // *sequence == 0 + if (!s.ok() && env_->FileExists(archived_file).IsNotFound()) { + return Status::OK(); + } + } + + if (s.ok() && *sequence != 0) { + MutexLock l(&read_first_record_cache_mutex_); + read_first_record_cache_.insert({number, *sequence}); + } + return s; +} + +// the function returns status.ok() and sequence == 0 if the file exists, but is +// empty +Status WalManager::ReadFirstLine(const std::string& fname, + const uint64_t number, + SequenceNumber* sequence) { + struct LogReporter : public log::Reader::Reporter { + Env* env; + Logger* info_log; + const char* fname; + + Status* status; + bool ignore_error; // true if db_options_.paranoid_checks==false + virtual void Corruption(size_t bytes, const Status& s) override { + Log(InfoLogLevel::WARN_LEVEL, info_log, + "[WalManager] %s%s: dropping %d bytes; %s", + (this->ignore_error ? "(ignoring error) " : ""), fname, + static_cast(bytes), s.ToString().c_str()); + if (this->status->ok()) { + // only keep the first error + *this->status = s; + } + } + }; + + std::unique_ptr file; + Status status = env_->NewSequentialFile(fname, &file, env_options_); + unique_ptr file_reader( + new SequentialFileReader(std::move(file))); + + if (!status.ok()) { + return status; + } + + LogReporter reporter; + reporter.env = env_; + reporter.info_log = db_options_.info_log.get(); + reporter.fname = fname.c_str(); + reporter.status = &status; + reporter.ignore_error = !db_options_.paranoid_checks; + log::Reader reader(db_options_.info_log, std::move(file_reader), &reporter, + true /*checksum*/, 0 /*initial_offset*/, number); + std::string scratch; + Slice record; + + if (reader.ReadRecord(&record, &scratch) && + (status.ok() || !db_options_.paranoid_checks)) { + if (record.size() < WriteBatchInternal::kHeader) { + reporter.Corruption(record.size(), + Status::Corruption("log record too small")); + // TODO read record's till the first no corrupt entry? + } else { + WriteBatch batch; + WriteBatchInternal::SetContents(&batch, record); + *sequence = WriteBatchInternal::Sequence(&batch); + return Status::OK(); + } + } + + // ReadRecord returns false on EOF, which means that the log file is empty. we + // return status.ok() in that case and set sequence number to 0 + *sequence = 0; + return status; +} + +#endif // ROCKSDB_LITE +} // namespace rocksdb diff --git a/external/rocksdb/db/wal_manager.h b/external/rocksdb/db/wal_manager.h new file mode 100755 index 0000000000..d27985b9d6 --- /dev/null +++ b/external/rocksdb/db/wal_manager.h @@ -0,0 +1,96 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "port/port.h" + +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "rocksdb/types.h" +#include "rocksdb/transaction_log.h" +#include "rocksdb/status.h" + +#include "db/version_set.h" + +namespace rocksdb { + +#ifndef ROCKSDB_LITE +class WalManager { + public: + WalManager(const DBOptions& db_options, const EnvOptions& env_options) + : db_options_(db_options), + env_options_(env_options), + env_(db_options.env), + purge_wal_files_last_run_(0) {} + + Status GetSortedWalFiles(VectorLogPtr& files); + + Status GetUpdatesSince( + SequenceNumber seq_number, std::unique_ptr* iter, + const TransactionLogIterator::ReadOptions& read_options, + VersionSet* version_set); + + void PurgeObsoleteWALFiles(); + + void ArchiveWALFile(const std::string& fname, uint64_t number); + + Status TEST_ReadFirstRecord(const WalFileType type, const uint64_t number, + SequenceNumber* sequence) { + return ReadFirstRecord(type, number, sequence); + } + + Status TEST_ReadFirstLine(const std::string& fname, const uint64_t number, + SequenceNumber* sequence) { + return ReadFirstLine(fname, number, sequence); + } + + private: + Status GetSortedWalsOfType(const std::string& path, VectorLogPtr& log_files, + WalFileType type); + // Requires: all_logs should be sorted with earliest log file first + // Retains all log files in all_logs which contain updates with seq no. + // Greater Than or Equal to the requested SequenceNumber. + Status RetainProbableWalFiles(VectorLogPtr& all_logs, + const SequenceNumber target); + + Status ReadFirstRecord(const WalFileType type, const uint64_t number, + SequenceNumber* sequence); + + Status ReadFirstLine(const std::string& fname, const uint64_t number, + SequenceNumber* sequence); + + // ------- state from DBImpl ------ + const DBOptions& db_options_; + const EnvOptions& env_options_; + Env* env_; + + // ------- WalManager state ------- + // cache for ReadFirstRecord() calls + std::unordered_map read_first_record_cache_; + port::Mutex read_first_record_cache_mutex_; + + // last time when PurgeObsoleteWALFiles ran. + uint64_t purge_wal_files_last_run_; + + // obsolete files will be deleted every this seconds if ttl deletion is + // enabled and archive size_limit is disabled. + static const uint64_t kDefaultIntervalToDeleteObsoleteWAL = 600; +}; + +#endif // ROCKSDB_LITE +} // namespace rocksdb diff --git a/external/rocksdb/db/wal_manager_test.cc b/external/rocksdb/db/wal_manager_test.cc new file mode 100755 index 0000000000..9c7ac50bac --- /dev/null +++ b/external/rocksdb/db/wal_manager_test.cc @@ -0,0 +1,308 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#include +#include + +#include "rocksdb/cache.h" +#include "rocksdb/write_batch.h" +#include "rocksdb/write_buffer_manager.h" + +#include "db/wal_manager.h" +#include "db/log_writer.h" +#include "db/column_family.h" +#include "db/version_set.h" +#include "util/file_reader_writer.h" +#include "util/mock_env.h" +#include "util/string_util.h" +#include "util/testharness.h" +#include "util/testutil.h" +#include "table/mock_table.h" +#include "db/db_impl.h" + +namespace rocksdb { + +// TODO(icanadi) mock out VersionSet +// TODO(icanadi) move other WalManager-specific tests from db_test here +class WalManagerTest : public testing::Test { + public: + WalManagerTest() + : env_(new MockEnv(Env::Default())), + dbname_(test::TmpDir() + "/wal_manager_test"), + table_cache_(NewLRUCache(50000, 16)), + write_buffer_manager_(db_options_.db_write_buffer_size), + current_log_number_(0) { + DestroyDB(dbname_, Options()); + } + + void Init() { + ASSERT_OK(env_->CreateDirIfMissing(dbname_)); + ASSERT_OK(env_->CreateDirIfMissing(ArchivalDirectory(dbname_))); + db_options_.db_paths.emplace_back(dbname_, + std::numeric_limits::max()); + db_options_.wal_dir = dbname_; + db_options_.env = env_.get(); + + versions_.reset(new VersionSet(dbname_, &db_options_, env_options_, + table_cache_.get(), &write_buffer_manager_, + &write_controller_)); + + wal_manager_.reset(new WalManager(db_options_, env_options_)); + } + + void Reopen() { + wal_manager_.reset(new WalManager(db_options_, env_options_)); + } + + // NOT thread safe + void Put(const std::string& key, const std::string& value) { + assert(current_log_writer_.get() != nullptr); + uint64_t seq = versions_->LastSequence() + 1; + WriteBatch batch; + batch.Put(key, value); + WriteBatchInternal::SetSequence(&batch, seq); + current_log_writer_->AddRecord(WriteBatchInternal::Contents(&batch)); + versions_->SetLastSequence(seq); + } + + // NOT thread safe + void RollTheLog(bool archived) { + current_log_number_++; + std::string fname = ArchivedLogFileName(dbname_, current_log_number_); + unique_ptr file; + ASSERT_OK(env_->NewWritableFile(fname, &file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(file), env_options_)); + current_log_writer_.reset(new log::Writer(std::move(file_writer), 0, false)); + } + + void CreateArchiveLogs(int num_logs, int entries_per_log) { + for (int i = 1; i <= num_logs; ++i) { + RollTheLog(true); + for (int k = 0; k < entries_per_log; ++k) { + Put(ToString(k), std::string(1024, 'a')); + } + } + } + + std::unique_ptr OpenTransactionLogIter( + const SequenceNumber seq) { + unique_ptr iter; + Status status = wal_manager_->GetUpdatesSince( + seq, &iter, TransactionLogIterator::ReadOptions(), versions_.get()); + EXPECT_OK(status); + return iter; + } + + std::unique_ptr env_; + std::string dbname_; + WriteController write_controller_; + EnvOptions env_options_; + std::shared_ptr table_cache_; + DBOptions db_options_; + WriteBufferManager write_buffer_manager_; + std::unique_ptr versions_; + std::unique_ptr wal_manager_; + + std::unique_ptr current_log_writer_; + uint64_t current_log_number_; +}; + +TEST_F(WalManagerTest, ReadFirstRecordCache) { + Init(); + std::string path = dbname_ + "/000001.log"; + unique_ptr file; + ASSERT_OK(env_->NewWritableFile(path, &file, EnvOptions())); + + SequenceNumber s; + ASSERT_OK(wal_manager_->TEST_ReadFirstLine(path, 1 /* number */, &s)); + ASSERT_EQ(s, 0U); + + ASSERT_OK( + wal_manager_->TEST_ReadFirstRecord(kAliveLogFile, 1 /* number */, &s)); + ASSERT_EQ(s, 0U); + + unique_ptr file_writer( + new WritableFileWriter(std::move(file), EnvOptions())); + log::Writer writer(std::move(file_writer), 1, + db_options_.recycle_log_file_num > 0); + WriteBatch batch; + batch.Put("foo", "bar"); + WriteBatchInternal::SetSequence(&batch, 10); + writer.AddRecord(WriteBatchInternal::Contents(&batch)); + + // TODO(icanadi) move SpecialEnv outside of db_test, so we can reuse it here. + // Waiting for lei to finish with db_test + // env_->count_sequential_reads_ = true; + // sequential_read_counter_ sanity test + // ASSERT_EQ(env_->sequential_read_counter_.Read(), 0); + + ASSERT_OK(wal_manager_->TEST_ReadFirstRecord(kAliveLogFile, 1, &s)); + ASSERT_EQ(s, 10U); + // did a read + // TODO(icanadi) move SpecialEnv outside of db_test, so we can reuse it here + // ASSERT_EQ(env_->sequential_read_counter_.Read(), 1); + + ASSERT_OK(wal_manager_->TEST_ReadFirstRecord(kAliveLogFile, 1, &s)); + ASSERT_EQ(s, 10U); + // no new reads since the value is cached + // TODO(icanadi) move SpecialEnv outside of db_test, so we can reuse it here + // ASSERT_EQ(env_->sequential_read_counter_.Read(), 1); +} + +namespace { +uint64_t GetLogDirSize(std::string dir_path, Env* env) { + uint64_t dir_size = 0; + std::vector files; + env->GetChildren(dir_path, &files); + for (auto& f : files) { + uint64_t number; + FileType type; + if (ParseFileName(f, &number, &type) && type == kLogFile) { + std::string const file_path = dir_path + "/" + f; + uint64_t file_size; + env->GetFileSize(file_path, &file_size); + dir_size += file_size; + } + } + return dir_size; +} +std::vector ListSpecificFiles( + Env* env, const std::string& path, const FileType expected_file_type) { + std::vector files; + std::vector file_numbers; + env->GetChildren(path, &files); + uint64_t number; + FileType type; + for (size_t i = 0; i < files.size(); ++i) { + if (ParseFileName(files[i], &number, &type)) { + if (type == expected_file_type) { + file_numbers.push_back(number); + } + } + } + return file_numbers; +} + +int CountRecords(TransactionLogIterator* iter) { + int count = 0; + SequenceNumber lastSequence = 0; + BatchResult res; + while (iter->Valid()) { + res = iter->GetBatch(); + EXPECT_TRUE(res.sequence > lastSequence); + ++count; + lastSequence = res.sequence; + EXPECT_OK(iter->status()); + iter->Next(); + } + return count; +} +} // namespace + +TEST_F(WalManagerTest, WALArchivalSizeLimit) { + db_options_.WAL_ttl_seconds = 0; + db_options_.WAL_size_limit_MB = 1000; + Init(); + + // TEST : Create WalManager with huge size limit and no ttl. + // Create some archived files and call PurgeObsoleteWALFiles(). + // Count the archived log files that survived. + // Assert that all of them did. + // Change size limit. Re-open WalManager. + // Assert that archive is not greater than WAL_size_limit_MB after + // PurgeObsoleteWALFiles() + // Set ttl and time_to_check_ to small values. Re-open db. + // Assert that there are no archived logs left. + + std::string archive_dir = ArchivalDirectory(dbname_); + CreateArchiveLogs(20, 5000); + + std::vector log_files = + ListSpecificFiles(env_.get(), archive_dir, kLogFile); + ASSERT_EQ(log_files.size(), 20U); + + db_options_.WAL_size_limit_MB = 8; + Reopen(); + wal_manager_->PurgeObsoleteWALFiles(); + + uint64_t archive_size = GetLogDirSize(archive_dir, env_.get()); + ASSERT_TRUE(archive_size <= db_options_.WAL_size_limit_MB * 1024 * 1024); + + db_options_.WAL_ttl_seconds = 1; + env_->FakeSleepForMicroseconds(2 * 1000 * 1000); + Reopen(); + wal_manager_->PurgeObsoleteWALFiles(); + + log_files = ListSpecificFiles(env_.get(), archive_dir, kLogFile); + ASSERT_TRUE(log_files.empty()); +} + +TEST_F(WalManagerTest, WALArchivalTtl) { + db_options_.WAL_ttl_seconds = 1000; + Init(); + + // TEST : Create WalManager with a ttl and no size limit. + // Create some archived log files and call PurgeObsoleteWALFiles(). + // Assert that files are not deleted + // Reopen db with small ttl. + // Assert that all archived logs was removed. + + std::string archive_dir = ArchivalDirectory(dbname_); + CreateArchiveLogs(20, 5000); + + std::vector log_files = + ListSpecificFiles(env_.get(), archive_dir, kLogFile); + ASSERT_GT(log_files.size(), 0U); + + db_options_.WAL_ttl_seconds = 1; + env_->FakeSleepForMicroseconds(3 * 1000 * 1000); + Reopen(); + wal_manager_->PurgeObsoleteWALFiles(); + + log_files = ListSpecificFiles(env_.get(), archive_dir, kLogFile); + ASSERT_TRUE(log_files.empty()); +} + +TEST_F(WalManagerTest, TransactionLogIteratorMoveOverZeroFiles) { + Init(); + RollTheLog(false); + Put("key1", std::string(1024, 'a')); + // Create a zero record WAL file. + RollTheLog(false); + RollTheLog(false); + + Put("key2", std::string(1024, 'a')); + + auto iter = OpenTransactionLogIter(0); + ASSERT_EQ(2, CountRecords(iter.get())); +} + +TEST_F(WalManagerTest, TransactionLogIteratorJustEmptyFile) { + Init(); + RollTheLog(false); + auto iter = OpenTransactionLogIter(0); + // Check that an empty iterator is returned + ASSERT_TRUE(!iter->Valid()); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, "SKIPPED as WalManager is not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/write_batch.cc b/external/rocksdb/db/write_batch.cc new file mode 100755 index 0000000000..61c2eb6f44 --- /dev/null +++ b/external/rocksdb/db/write_batch.cc @@ -0,0 +1,1173 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// WriteBatch::rep_ := +// sequence: fixed64 +// count: fixed32 +// data: record[count] +// record := +// kTypeValue varstring varstring +// kTypeDeletion varstring +// kTypeSingleDeletion varstring +// kTypeMerge varstring varstring +// kTypeColumnFamilyValue varint32 varstring varstring +// kTypeColumnFamilyDeletion varint32 varstring varstring +// kTypeColumnFamilySingleDeletion varint32 varstring varstring +// kTypeColumnFamilyMerge varint32 varstring varstring +// kTypeBeginPrepareXID varstring +// kTypeEndPrepareXID +// kTypeCommitXID varstring +// kTypeRollbackXID varstring +// kTypeNoop +// varstring := +// len: varint32 +// data: uint8[len] + +#include "rocksdb/write_batch.h" + +#include +#include +#include +#include + +#include "db/column_family.h" +#include "db/db_impl.h" +#include "db/dbformat.h" +#include "db/flush_scheduler.h" +#include "db/memtable.h" +#include "db/merge_context.h" +#include "db/snapshot_impl.h" +#include "db/write_batch_internal.h" +#include "rocksdb/merge_operator.h" +#include "util/coding.h" +#include "util/perf_context_imp.h" +#include "util/statistics.h" + +namespace rocksdb { + +// anon namespace for file-local types +namespace { + +enum ContentFlags : uint32_t { + DEFERRED = 1 << 0, + HAS_PUT = 1 << 1, + HAS_DELETE = 1 << 2, + HAS_SINGLE_DELETE = 1 << 3, + HAS_MERGE = 1 << 4, + HAS_BEGIN_PREPARE = 1 << 5, + HAS_END_PREPARE = 1 << 6, + HAS_COMMIT = 1 << 7, + HAS_ROLLBACK = 1 << 8, +}; + +struct BatchContentClassifier : public WriteBatch::Handler { + uint32_t content_flags = 0; + + Status PutCF(uint32_t, const Slice&, const Slice&) override { + content_flags |= ContentFlags::HAS_PUT; + return Status::OK(); + } + + Status DeleteCF(uint32_t, const Slice&) override { + content_flags |= ContentFlags::HAS_DELETE; + return Status::OK(); + } + + Status SingleDeleteCF(uint32_t, const Slice&) override { + content_flags |= ContentFlags::HAS_SINGLE_DELETE; + return Status::OK(); + } + + Status MergeCF(uint32_t, const Slice&, const Slice&) override { + content_flags |= ContentFlags::HAS_MERGE; + return Status::OK(); + } + + Status MarkBeginPrepare() override { + content_flags |= ContentFlags::HAS_BEGIN_PREPARE; + return Status::OK(); + } + + Status MarkEndPrepare(const Slice&) override { + content_flags |= ContentFlags::HAS_END_PREPARE; + return Status::OK(); + } + + Status MarkCommit(const Slice&) override { + content_flags |= ContentFlags::HAS_COMMIT; + return Status::OK(); + } + + Status MarkRollback(const Slice&) override { + content_flags |= ContentFlags::HAS_ROLLBACK; + return Status::OK(); + } +}; + +} // anon namespace + + +struct SavePoint { + size_t size; // size of rep_ + int count; // count of elements in rep_ + uint32_t content_flags; +}; + +struct SavePoints { + std::stack stack; +}; + +WriteBatch::WriteBatch(size_t reserved_bytes) + : save_points_(nullptr), content_flags_(0), rep_() { + rep_.reserve((reserved_bytes > WriteBatchInternal::kHeader) ? + reserved_bytes : WriteBatchInternal::kHeader); + rep_.resize(WriteBatchInternal::kHeader); +} + +WriteBatch::WriteBatch(const std::string& rep) + : save_points_(nullptr), + content_flags_(ContentFlags::DEFERRED), + rep_(rep) {} + +WriteBatch::WriteBatch(const WriteBatch& src) + : save_points_(src.save_points_), + content_flags_(src.content_flags_.load(std::memory_order_relaxed)), + rep_(src.rep_) {} + +WriteBatch::WriteBatch(WriteBatch&& src) + : save_points_(std::move(src.save_points_)), + content_flags_(src.content_flags_.load(std::memory_order_relaxed)), + rep_(std::move(src.rep_)) {} + +WriteBatch& WriteBatch::operator=(const WriteBatch& src) { + if (&src != this) { + this->~WriteBatch(); + new (this) WriteBatch(src); + } + return *this; +} + +WriteBatch& WriteBatch::operator=(WriteBatch&& src) { + if (&src != this) { + this->~WriteBatch(); + new (this) WriteBatch(std::move(src)); + } + return *this; +} + +WriteBatch::~WriteBatch() { delete save_points_; } + +WriteBatch::Handler::~Handler() { } + +void WriteBatch::Handler::LogData(const Slice& blob) { + // If the user has not specified something to do with blobs, then we ignore + // them. +} + +bool WriteBatch::Handler::Continue() { + return true; +} + +void WriteBatch::Clear() { + rep_.clear(); + rep_.resize(WriteBatchInternal::kHeader); + + content_flags_.store(0, std::memory_order_relaxed); + + if (save_points_ != nullptr) { + while (!save_points_->stack.empty()) { + save_points_->stack.pop(); + } + } +} + +int WriteBatch::Count() const { + return WriteBatchInternal::Count(this); +} + +uint32_t WriteBatch::ComputeContentFlags() const { + auto rv = content_flags_.load(std::memory_order_relaxed); + if ((rv & ContentFlags::DEFERRED) != 0) { + BatchContentClassifier classifier; + Iterate(&classifier); + rv = classifier.content_flags; + + // this method is conceptually const, because it is performing a lazy + // computation that doesn't affect the abstract state of the batch. + // content_flags_ is marked mutable so that we can perform the + // following assignment + content_flags_.store(rv, std::memory_order_relaxed); + } + return rv; +} + +bool WriteBatch::HasPut() const { + return (ComputeContentFlags() & ContentFlags::HAS_PUT) != 0; +} + +bool WriteBatch::HasDelete() const { + return (ComputeContentFlags() & ContentFlags::HAS_DELETE) != 0; +} + +bool WriteBatch::HasSingleDelete() const { + return (ComputeContentFlags() & ContentFlags::HAS_SINGLE_DELETE) != 0; +} + +bool WriteBatch::HasMerge() const { + return (ComputeContentFlags() & ContentFlags::HAS_MERGE) != 0; +} + +bool ReadKeyFromWriteBatchEntry(Slice* input, Slice* key, bool cf_record) { + assert(input != nullptr && key != nullptr); + // Skip tag byte + input->remove_prefix(1); + + if (cf_record) { + // Skip column_family bytes + uint32_t cf; + if (!GetVarint32(input, &cf)) { + return false; + } + } + + // Extract key + return GetLengthPrefixedSlice(input, key); +} + +bool WriteBatch::HasBeginPrepare() const { + return (ComputeContentFlags() & ContentFlags::HAS_BEGIN_PREPARE) != 0; +} + +bool WriteBatch::HasEndPrepare() const { + return (ComputeContentFlags() & ContentFlags::HAS_END_PREPARE) != 0; +} + +bool WriteBatch::HasCommit() const { + return (ComputeContentFlags() & ContentFlags::HAS_COMMIT) != 0; +} + +bool WriteBatch::HasRollback() const { + return (ComputeContentFlags() & ContentFlags::HAS_ROLLBACK) != 0; +} + +Status ReadRecordFromWriteBatch(Slice* input, char* tag, + uint32_t* column_family, Slice* key, + Slice* value, Slice* blob, Slice* xid) { + assert(key != nullptr && value != nullptr); + *tag = (*input)[0]; + input->remove_prefix(1); + *column_family = 0; // default + switch (*tag) { + case kTypeColumnFamilyValue: + if (!GetVarint32(input, column_family)) { + return Status::Corruption("bad WriteBatch Put"); + } + // intentional fallthrough + case kTypeValue: + if (!GetLengthPrefixedSlice(input, key) || + !GetLengthPrefixedSlice(input, value)) { + return Status::Corruption("bad WriteBatch Put"); + } + break; + case kTypeColumnFamilyDeletion: + case kTypeColumnFamilySingleDeletion: + if (!GetVarint32(input, column_family)) { + return Status::Corruption("bad WriteBatch Delete"); + } + // intentional fallthrough + case kTypeDeletion: + case kTypeSingleDeletion: + if (!GetLengthPrefixedSlice(input, key)) { + return Status::Corruption("bad WriteBatch Delete"); + } + break; + case kTypeColumnFamilyMerge: + if (!GetVarint32(input, column_family)) { + return Status::Corruption("bad WriteBatch Merge"); + } + // intentional fallthrough + case kTypeMerge: + if (!GetLengthPrefixedSlice(input, key) || + !GetLengthPrefixedSlice(input, value)) { + return Status::Corruption("bad WriteBatch Merge"); + } + break; + case kTypeLogData: + assert(blob != nullptr); + if (!GetLengthPrefixedSlice(input, blob)) { + return Status::Corruption("bad WriteBatch Blob"); + } + break; + case kTypeNoop: + case kTypeBeginPrepareXID: + break; + case kTypeEndPrepareXID: + if (!GetLengthPrefixedSlice(input, xid)) { + return Status::Corruption("bad EndPrepare XID"); + } + break; + case kTypeCommitXID: + if (!GetLengthPrefixedSlice(input, xid)) { + return Status::Corruption("bad Commit XID"); + } + break; + case kTypeRollbackXID: + if (!GetLengthPrefixedSlice(input, xid)) { + return Status::Corruption("bad Rollback XID"); + } + break; + default: + return Status::Corruption("unknown WriteBatch tag"); + } + return Status::OK(); +} + +Status WriteBatch::Iterate(Handler* handler) const { + Slice input(rep_); + if (input.size() < WriteBatchInternal::kHeader) { + return Status::Corruption("malformed WriteBatch (too small)"); + } + + input.remove_prefix(WriteBatchInternal::kHeader); + Slice key, value, blob, xid; + int found = 0; + Status s; + while (s.ok() && !input.empty() && handler->Continue()) { + char tag = 0; + uint32_t column_family = 0; // default + + s = ReadRecordFromWriteBatch(&input, &tag, &column_family, &key, &value, + &blob, &xid); + if (!s.ok()) { + return s; + } + + switch (tag) { + case kTypeColumnFamilyValue: + case kTypeValue: + assert(content_flags_.load(std::memory_order_relaxed) & + (ContentFlags::DEFERRED | ContentFlags::HAS_PUT)); + s = handler->PutCF(column_family, key, value); + found++; + break; + case kTypeColumnFamilyDeletion: + case kTypeDeletion: + assert(content_flags_.load(std::memory_order_relaxed) & + (ContentFlags::DEFERRED | ContentFlags::HAS_DELETE)); + s = handler->DeleteCF(column_family, key); + found++; + break; + case kTypeColumnFamilySingleDeletion: + case kTypeSingleDeletion: + assert(content_flags_.load(std::memory_order_relaxed) & + (ContentFlags::DEFERRED | ContentFlags::HAS_SINGLE_DELETE)); + s = handler->SingleDeleteCF(column_family, key); + found++; + break; + case kTypeColumnFamilyMerge: + case kTypeMerge: + assert(content_flags_.load(std::memory_order_relaxed) & + (ContentFlags::DEFERRED | ContentFlags::HAS_MERGE)); + s = handler->MergeCF(column_family, key, value); + found++; + break; + case kTypeLogData: + handler->LogData(blob); + break; + case kTypeBeginPrepareXID: + assert(content_flags_.load(std::memory_order_relaxed) & + (ContentFlags::DEFERRED | ContentFlags::HAS_BEGIN_PREPARE)); + handler->MarkBeginPrepare(); + break; + case kTypeEndPrepareXID: + assert(content_flags_.load(std::memory_order_relaxed) & + (ContentFlags::DEFERRED | ContentFlags::HAS_END_PREPARE)); + handler->MarkEndPrepare(xid); + break; + case kTypeCommitXID: + assert(content_flags_.load(std::memory_order_relaxed) & + (ContentFlags::DEFERRED | ContentFlags::HAS_COMMIT)); + handler->MarkCommit(xid); + break; + case kTypeRollbackXID: + assert(content_flags_.load(std::memory_order_relaxed) & + (ContentFlags::DEFERRED | ContentFlags::HAS_ROLLBACK)); + handler->MarkRollback(xid); + break; + case kTypeNoop: + break; + default: + return Status::Corruption("unknown WriteBatch tag"); + } + } + if (!s.ok()) { + return s; + } + if (found != WriteBatchInternal::Count(this)) { + return Status::Corruption("WriteBatch has wrong count"); + } else { + return Status::OK(); + } +} + +int WriteBatchInternal::Count(const WriteBatch* b) { + return DecodeFixed32(b->rep_.data() + 8); +} + +void WriteBatchInternal::SetCount(WriteBatch* b, int n) { + EncodeFixed32(&b->rep_[8], n); +} + +SequenceNumber WriteBatchInternal::Sequence(const WriteBatch* b) { + return SequenceNumber(DecodeFixed64(b->rep_.data())); +} + +void WriteBatchInternal::SetSequence(WriteBatch* b, SequenceNumber seq) { + EncodeFixed64(&b->rep_[0], seq); +} + +size_t WriteBatchInternal::GetFirstOffset(WriteBatch* b) { + return WriteBatchInternal::kHeader; +} + +void WriteBatchInternal::Put(WriteBatch* b, uint32_t column_family_id, + const Slice& key, const Slice& value) { + WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1); + if (column_family_id == 0) { + b->rep_.push_back(static_cast(kTypeValue)); + } else { + b->rep_.push_back(static_cast(kTypeColumnFamilyValue)); + PutVarint32(&b->rep_, column_family_id); + } + PutLengthPrefixedSlice(&b->rep_, key); + PutLengthPrefixedSlice(&b->rep_, value); + b->content_flags_.store( + b->content_flags_.load(std::memory_order_relaxed) | ContentFlags::HAS_PUT, + std::memory_order_relaxed); +} + +void WriteBatch::Put(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) { + WriteBatchInternal::Put(this, GetColumnFamilyID(column_family), key, value); +} + +void WriteBatchInternal::Put(WriteBatch* b, uint32_t column_family_id, + const SliceParts& key, const SliceParts& value) { + WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1); + if (column_family_id == 0) { + b->rep_.push_back(static_cast(kTypeValue)); + } else { + b->rep_.push_back(static_cast(kTypeColumnFamilyValue)); + PutVarint32(&b->rep_, column_family_id); + } + PutLengthPrefixedSliceParts(&b->rep_, key); + PutLengthPrefixedSliceParts(&b->rep_, value); + b->content_flags_.store( + b->content_flags_.load(std::memory_order_relaxed) | ContentFlags::HAS_PUT, + std::memory_order_relaxed); +} + +void WriteBatch::Put(ColumnFamilyHandle* column_family, const SliceParts& key, + const SliceParts& value) { + WriteBatchInternal::Put(this, GetColumnFamilyID(column_family), key, value); +} + +void WriteBatchInternal::InsertNoop(WriteBatch* b) { + b->rep_.push_back(static_cast(kTypeNoop)); +} + +void WriteBatchInternal::MarkEndPrepare(WriteBatch* b, const Slice& xid) { + // a manually constructed batch can only contain one prepare section + assert(b->rep_[12] == static_cast(kTypeNoop)); + + // all savepoints up to this point are cleared + if (b->save_points_ != nullptr) { + while (!b->save_points_->stack.empty()) { + b->save_points_->stack.pop(); + } + } + + // rewrite noop as begin marker + b->rep_[12] = static_cast(kTypeBeginPrepareXID); + b->rep_.push_back(static_cast(kTypeEndPrepareXID)); + PutLengthPrefixedSlice(&b->rep_, xid); + b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) | + ContentFlags::HAS_END_PREPARE | + ContentFlags::HAS_BEGIN_PREPARE, + std::memory_order_relaxed); +} + +void WriteBatchInternal::MarkCommit(WriteBatch* b, const Slice& xid) { + b->rep_.push_back(static_cast(kTypeCommitXID)); + PutLengthPrefixedSlice(&b->rep_, xid); + b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) | + ContentFlags::HAS_COMMIT, + std::memory_order_relaxed); +} + +void WriteBatchInternal::MarkRollback(WriteBatch* b, const Slice& xid) { + b->rep_.push_back(static_cast(kTypeRollbackXID)); + PutLengthPrefixedSlice(&b->rep_, xid); + b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) | + ContentFlags::HAS_ROLLBACK, + std::memory_order_relaxed); +} + +void WriteBatchInternal::Delete(WriteBatch* b, uint32_t column_family_id, + const Slice& key) { + WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1); + if (column_family_id == 0) { + b->rep_.push_back(static_cast(kTypeDeletion)); + } else { + b->rep_.push_back(static_cast(kTypeColumnFamilyDeletion)); + PutVarint32(&b->rep_, column_family_id); + } + PutLengthPrefixedSlice(&b->rep_, key); + b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) | + ContentFlags::HAS_DELETE, + std::memory_order_relaxed); +} + +void WriteBatch::Delete(ColumnFamilyHandle* column_family, const Slice& key) { + WriteBatchInternal::Delete(this, GetColumnFamilyID(column_family), key); +} + +void WriteBatchInternal::Delete(WriteBatch* b, uint32_t column_family_id, + const SliceParts& key) { + WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1); + if (column_family_id == 0) { + b->rep_.push_back(static_cast(kTypeDeletion)); + } else { + b->rep_.push_back(static_cast(kTypeColumnFamilyDeletion)); + PutVarint32(&b->rep_, column_family_id); + } + PutLengthPrefixedSliceParts(&b->rep_, key); + b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) | + ContentFlags::HAS_DELETE, + std::memory_order_relaxed); +} + +void WriteBatch::Delete(ColumnFamilyHandle* column_family, + const SliceParts& key) { + WriteBatchInternal::Delete(this, GetColumnFamilyID(column_family), key); +} + +void WriteBatchInternal::SingleDelete(WriteBatch* b, uint32_t column_family_id, + const Slice& key) { + WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1); + if (column_family_id == 0) { + b->rep_.push_back(static_cast(kTypeSingleDeletion)); + } else { + b->rep_.push_back(static_cast(kTypeColumnFamilySingleDeletion)); + PutVarint32(&b->rep_, column_family_id); + } + PutLengthPrefixedSlice(&b->rep_, key); + b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) | + ContentFlags::HAS_SINGLE_DELETE, + std::memory_order_relaxed); +} + +void WriteBatch::SingleDelete(ColumnFamilyHandle* column_family, + const Slice& key) { + WriteBatchInternal::SingleDelete(this, GetColumnFamilyID(column_family), key); +} + +void WriteBatchInternal::SingleDelete(WriteBatch* b, uint32_t column_family_id, + const SliceParts& key) { + WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1); + if (column_family_id == 0) { + b->rep_.push_back(static_cast(kTypeSingleDeletion)); + } else { + b->rep_.push_back(static_cast(kTypeColumnFamilySingleDeletion)); + PutVarint32(&b->rep_, column_family_id); + } + PutLengthPrefixedSliceParts(&b->rep_, key); + b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) | + ContentFlags::HAS_SINGLE_DELETE, + std::memory_order_relaxed); +} + +void WriteBatch::SingleDelete(ColumnFamilyHandle* column_family, + const SliceParts& key) { + WriteBatchInternal::SingleDelete(this, GetColumnFamilyID(column_family), key); +} + +void WriteBatchInternal::Merge(WriteBatch* b, uint32_t column_family_id, + const Slice& key, const Slice& value) { + WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1); + if (column_family_id == 0) { + b->rep_.push_back(static_cast(kTypeMerge)); + } else { + b->rep_.push_back(static_cast(kTypeColumnFamilyMerge)); + PutVarint32(&b->rep_, column_family_id); + } + PutLengthPrefixedSlice(&b->rep_, key); + PutLengthPrefixedSlice(&b->rep_, value); + b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) | + ContentFlags::HAS_MERGE, + std::memory_order_relaxed); +} + +void WriteBatch::Merge(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) { + WriteBatchInternal::Merge(this, GetColumnFamilyID(column_family), key, value); +} + +void WriteBatchInternal::Merge(WriteBatch* b, uint32_t column_family_id, + const SliceParts& key, + const SliceParts& value) { + WriteBatchInternal::SetCount(b, WriteBatchInternal::Count(b) + 1); + if (column_family_id == 0) { + b->rep_.push_back(static_cast(kTypeMerge)); + } else { + b->rep_.push_back(static_cast(kTypeColumnFamilyMerge)); + PutVarint32(&b->rep_, column_family_id); + } + PutLengthPrefixedSliceParts(&b->rep_, key); + PutLengthPrefixedSliceParts(&b->rep_, value); + b->content_flags_.store(b->content_flags_.load(std::memory_order_relaxed) | + ContentFlags::HAS_MERGE, + std::memory_order_relaxed); +} + +void WriteBatch::Merge(ColumnFamilyHandle* column_family, + const SliceParts& key, + const SliceParts& value) { + WriteBatchInternal::Merge(this, GetColumnFamilyID(column_family), + key, value); +} + +void WriteBatch::PutLogData(const Slice& blob) { + rep_.push_back(static_cast(kTypeLogData)); + PutLengthPrefixedSlice(&rep_, blob); +} + +void WriteBatch::SetSavePoint() { + if (save_points_ == nullptr) { + save_points_ = new SavePoints(); + } + // Record length and count of current batch of writes. + save_points_->stack.push(SavePoint{ + GetDataSize(), Count(), content_flags_.load(std::memory_order_relaxed)}); +} + +Status WriteBatch::RollbackToSavePoint() { + if (save_points_ == nullptr || save_points_->stack.size() == 0) { + return Status::NotFound(); + } + + // Pop the most recent savepoint off the stack + SavePoint savepoint = save_points_->stack.top(); + save_points_->stack.pop(); + + assert(savepoint.size <= rep_.size()); + assert(savepoint.count <= Count()); + + if (savepoint.size == rep_.size()) { + // No changes to rollback + } else if (savepoint.size == 0) { + // Rollback everything + Clear(); + } else { + rep_.resize(savepoint.size); + WriteBatchInternal::SetCount(this, savepoint.count); + content_flags_.store(savepoint.content_flags, std::memory_order_relaxed); + } + + return Status::OK(); +} + +class MemTableInserter : public WriteBatch::Handler { + public: + SequenceNumber sequence_; + ColumnFamilyMemTables* const cf_mems_; + FlushScheduler* const flush_scheduler_; + const bool ignore_missing_column_families_; + const uint64_t recovering_log_number_; + // log number that all Memtables inserted into should reference + uint64_t log_number_ref_; + DBImpl* db_; + const bool concurrent_memtable_writes_; + bool* has_valid_writes_; + typedef std::map MemPostInfoMap; + MemPostInfoMap mem_post_info_map_; + // current recovered transaction we are rebuilding (recovery) + WriteBatch* rebuilding_trx_; + + // cf_mems should not be shared with concurrent inserters + MemTableInserter(SequenceNumber sequence, ColumnFamilyMemTables* cf_mems, + FlushScheduler* flush_scheduler, + bool ignore_missing_column_families, + uint64_t recovering_log_number, DB* db, + bool concurrent_memtable_writes, + bool* has_valid_writes = nullptr) + : sequence_(sequence), + cf_mems_(cf_mems), + flush_scheduler_(flush_scheduler), + ignore_missing_column_families_(ignore_missing_column_families), + recovering_log_number_(recovering_log_number), + log_number_ref_(0), + db_(reinterpret_cast(db)), + concurrent_memtable_writes_(concurrent_memtable_writes), + has_valid_writes_(has_valid_writes), + rebuilding_trx_(nullptr) { + assert(cf_mems_); + } + + void set_log_number_ref(uint64_t log) { log_number_ref_ = log; } + + SequenceNumber get_final_sequence() { return sequence_; } + + void PostProcess() { + for (auto& pair : mem_post_info_map_) { + pair.first->BatchPostProcess(pair.second); + } + } + + bool SeekToColumnFamily(uint32_t column_family_id, Status* s) { + // If we are in a concurrent mode, it is the caller's responsibility + // to clone the original ColumnFamilyMemTables so that each thread + // has its own instance. Otherwise, it must be guaranteed that there + // is no concurrent access + bool found = cf_mems_->Seek(column_family_id); + if (!found) { + if (ignore_missing_column_families_) { + *s = Status::OK(); + } else { + *s = Status::InvalidArgument( + "Invalid column family specified in write batch"); + } + return false; + } + if (recovering_log_number_ != 0 && + recovering_log_number_ < cf_mems_->GetLogNumber()) { + // This is true only in recovery environment (recovering_log_number_ is + // always 0 in + // non-recovery, regular write code-path) + // * If recovering_log_number_ < cf_mems_->GetLogNumber(), this means that + // column + // family already contains updates from this log. We can't apply updates + // twice because of update-in-place or merge workloads -- ignore the + // update + *s = Status::OK(); + return false; + } + + if (has_valid_writes_ != nullptr) { + *has_valid_writes_ = true; + } + + if (log_number_ref_ > 0) { + cf_mems_->GetMemTable()->RefLogContainingPrepSection(log_number_ref_); + } + + return true; + } + + virtual Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + if (rebuilding_trx_ != nullptr) { + WriteBatchInternal::Put(rebuilding_trx_, column_family_id, key, value); + return Status::OK(); + } + + Status seek_status; + if (!SeekToColumnFamily(column_family_id, &seek_status)) { + ++sequence_; + return seek_status; + } + + MemTable* mem = cf_mems_->GetMemTable(); + auto* moptions = mem->GetMemTableOptions(); + if (!moptions->inplace_update_support) { + mem->Add(sequence_, kTypeValue, key, value, concurrent_memtable_writes_, + get_post_process_info(mem)); + } else if (moptions->inplace_callback == nullptr) { + assert(!concurrent_memtable_writes_); + mem->Update(sequence_, key, value); + RecordTick(moptions->statistics, NUMBER_KEYS_UPDATED); + } else { + assert(!concurrent_memtable_writes_); + if (mem->UpdateCallback(sequence_, key, value)) { + } else { + // key not found in memtable. Do sst get, update, add + SnapshotImpl read_from_snapshot; + read_from_snapshot.number_ = sequence_; + ReadOptions ropts; + ropts.snapshot = &read_from_snapshot; + + std::string prev_value; + std::string merged_value; + + auto cf_handle = cf_mems_->GetColumnFamilyHandle(); + if (cf_handle == nullptr) { + cf_handle = db_->DefaultColumnFamily(); + } + Status s = db_->Get(ropts, cf_handle, key, &prev_value); + + char* prev_buffer = const_cast(prev_value.c_str()); + uint32_t prev_size = static_cast(prev_value.size()); + auto status = moptions->inplace_callback(s.ok() ? prev_buffer : nullptr, + s.ok() ? &prev_size : nullptr, + value, &merged_value); + if (status == UpdateStatus::UPDATED_INPLACE) { + // prev_value is updated in-place with final value. + mem->Add(sequence_, kTypeValue, key, Slice(prev_buffer, prev_size)); + RecordTick(moptions->statistics, NUMBER_KEYS_WRITTEN); + } else if (status == UpdateStatus::UPDATED) { + // merged_value contains the final value. + mem->Add(sequence_, kTypeValue, key, Slice(merged_value)); + RecordTick(moptions->statistics, NUMBER_KEYS_WRITTEN); + } + } + } + // Since all Puts are logged in trasaction logs (if enabled), always bump + // sequence number. Even if the update eventually fails and does not result + // in memtable add/update. + sequence_++; + CheckMemtableFull(); + return Status::OK(); + } + + Status DeleteImpl(uint32_t column_family_id, const Slice& key, + ValueType delete_type) { + MemTable* mem = cf_mems_->GetMemTable(); + mem->Add(sequence_, delete_type, key, Slice(), concurrent_memtable_writes_, + get_post_process_info(mem)); + sequence_++; + CheckMemtableFull(); + return Status::OK(); + } + + virtual Status DeleteCF(uint32_t column_family_id, + const Slice& key) override { + if (rebuilding_trx_ != nullptr) { + WriteBatchInternal::Delete(rebuilding_trx_, column_family_id, key); + return Status::OK(); + } + + Status seek_status; + if (!SeekToColumnFamily(column_family_id, &seek_status)) { + ++sequence_; + return seek_status; + } + + return DeleteImpl(column_family_id, key, kTypeDeletion); + } + + virtual Status SingleDeleteCF(uint32_t column_family_id, + const Slice& key) override { + if (rebuilding_trx_ != nullptr) { + WriteBatchInternal::SingleDelete(rebuilding_trx_, column_family_id, key); + return Status::OK(); + } + + Status seek_status; + if (!SeekToColumnFamily(column_family_id, &seek_status)) { + ++sequence_; + return seek_status; + } + + return DeleteImpl(column_family_id, key, kTypeSingleDeletion); + } + + virtual Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + assert(!concurrent_memtable_writes_); + if (rebuilding_trx_ != nullptr) { + WriteBatchInternal::Merge(rebuilding_trx_, column_family_id, key, value); + return Status::OK(); + } + + Status seek_status; + if (!SeekToColumnFamily(column_family_id, &seek_status)) { + ++sequence_; + return seek_status; + } + + MemTable* mem = cf_mems_->GetMemTable(); + auto* moptions = mem->GetMemTableOptions(); + bool perform_merge = false; + + if (moptions->max_successive_merges > 0 && db_ != nullptr) { + LookupKey lkey(key, sequence_); + + // Count the number of successive merges at the head + // of the key in the memtable + size_t num_merges = mem->CountSuccessiveMergeEntries(lkey); + + if (num_merges >= moptions->max_successive_merges) { + perform_merge = true; + } + } + + if (perform_merge) { + // 1) Get the existing value + std::string get_value; + + // Pass in the sequence number so that we also include previous merge + // operations in the same batch. + SnapshotImpl read_from_snapshot; + read_from_snapshot.number_ = sequence_; + ReadOptions read_options; + read_options.snapshot = &read_from_snapshot; + + auto cf_handle = cf_mems_->GetColumnFamilyHandle(); + if (cf_handle == nullptr) { + cf_handle = db_->DefaultColumnFamily(); + } + db_->Get(read_options, cf_handle, key, &get_value); + Slice get_value_slice = Slice(get_value); + + // 2) Apply this merge + auto merge_operator = moptions->merge_operator; + assert(merge_operator); + + std::string new_value; + + Status merge_status = MergeHelper::TimedFullMerge( + merge_operator, key, &get_value_slice, {value}, &new_value, + moptions->info_log, moptions->statistics, Env::Default()); + + if (!merge_status.ok()) { + // Failed to merge! + // Store the delta in memtable + perform_merge = false; + } else { + // 3) Add value to memtable + mem->Add(sequence_, kTypeValue, key, new_value); + } + } + + if (!perform_merge) { + // Add merge operator to memtable + mem->Add(sequence_, kTypeMerge, key, value); + } + + sequence_++; + CheckMemtableFull(); + return Status::OK(); + } + + void CheckMemtableFull() { + if (flush_scheduler_ != nullptr) { + auto* cfd = cf_mems_->current(); + assert(cfd != nullptr); + if (cfd->mem()->ShouldScheduleFlush() && + cfd->mem()->MarkFlushScheduled()) { + // MarkFlushScheduled only returns true if we are the one that + // should take action, so no need to dedup further + flush_scheduler_->ScheduleFlush(cfd); + } + } + } + + Status MarkBeginPrepare() override { + assert(rebuilding_trx_ == nullptr); + assert(db_); + + if (recovering_log_number_ != 0) { + // during recovery we rebuild a hollow transaction + // from all encountered prepare sections of the wal + if (db_->allow_2pc() == false) { + return Status::NotSupported( + "WAL contains prepared transactions. Open with " + "TransactionDB::Open()."); + } + + // we are now iterating through a prepared section + rebuilding_trx_ = new WriteBatch(); + if (has_valid_writes_ != nullptr) { + *has_valid_writes_ = true; + } + } else { + // in non-recovery we ignore prepare markers + // and insert the values directly. making sure we have a + // log for each insertion to reference. + assert(log_number_ref_ > 0); + } + + return Status::OK(); + } + + Status MarkEndPrepare(const Slice& name) override { + assert(db_); + assert((rebuilding_trx_ != nullptr) == (recovering_log_number_ != 0)); + + if (recovering_log_number_ != 0) { + assert(db_->allow_2pc()); + db_->InsertRecoveredTransaction(recovering_log_number_, name.ToString(), + rebuilding_trx_); + rebuilding_trx_ = nullptr; + } else { + assert(rebuilding_trx_ == nullptr); + assert(log_number_ref_ > 0); + } + + return Status::OK(); + } + + Status MarkCommit(const Slice& name) override { + assert(db_); + + Status s; + + if (recovering_log_number_ != 0) { + // in recovery when we encounter a commit marker + // we lookup this transaction in our set of rebuilt transactions + // and commit. + auto trx = db_->GetRecoveredTransaction(name.ToString()); + + // the log contaiting the prepared section may have + // been released in the last incarnation because the + // data was flushed to L0 + if (trx != nullptr) { + // at this point individual CF lognumbers will prevent + // duplicate re-insertion of values. + assert(log_number_ref_ == 0); + // all insertes must reference this trx log number + log_number_ref_ = trx->log_number_; + s = trx->batch_->Iterate(this); + log_number_ref_ = 0; + + if (s.ok()) { + db_->DeleteRecoveredTransaction(name.ToString()); + } + if (has_valid_writes_ != nullptr) { + *has_valid_writes_ = true; + } + } + } else { + // in non recovery we simply ignore this tag + } + + return s; + } + + Status MarkRollback(const Slice& name) override { + assert(db_); + + if (recovering_log_number_ != 0) { + auto trx = db_->GetRecoveredTransaction(name.ToString()); + + // the log containing the transactions prep section + // may have been released in the previous incarnation + // because we knew it had been rolled back + if (trx != nullptr) { + db_->DeleteRecoveredTransaction(name.ToString()); + } + } else { + // in non recovery we simply ignore this tag + } + + return Status::OK(); + } + + private: + MemTablePostProcessInfo* get_post_process_info(MemTable* mem) { + if (!concurrent_memtable_writes_) { + // No need to batch counters locally if we don't use concurrent mode. + return nullptr; + } + return &mem_post_info_map_[mem]; + } +}; + +// This function can only be called in these conditions: +// 1) During Recovery() +// 2) During Write(), in a single-threaded write thread +// 3) During Write(), in a concurrent context where memtables has been cloned +// The reason is that it calls memtables->Seek(), which has a stateful cache +Status WriteBatchInternal::InsertInto( + const autovector& writers, SequenceNumber sequence, + ColumnFamilyMemTables* memtables, FlushScheduler* flush_scheduler, + bool ignore_missing_column_families, uint64_t log_number, DB* db, + bool concurrent_memtable_writes) { + MemTableInserter inserter(sequence, memtables, flush_scheduler, + ignore_missing_column_families, log_number, db, + concurrent_memtable_writes); + for (size_t i = 0; i < writers.size(); i++) { + auto w = writers[i]; + if (!w->ShouldWriteToMemtable()) { + continue; + } + inserter.set_log_number_ref(w->log_ref); + w->status = w->batch->Iterate(&inserter); + if (!w->status.ok()) { + return w->status; + } + } + return Status::OK(); +} + +Status WriteBatchInternal::InsertInto(WriteThread::Writer* writer, + ColumnFamilyMemTables* memtables, + FlushScheduler* flush_scheduler, + bool ignore_missing_column_families, + uint64_t log_number, DB* db, + bool concurrent_memtable_writes) { + MemTableInserter inserter(WriteBatchInternal::Sequence(writer->batch), + memtables, flush_scheduler, + ignore_missing_column_families, log_number, db, + concurrent_memtable_writes); + assert(writer->ShouldWriteToMemtable()); + inserter.set_log_number_ref(writer->log_ref); + Status s = writer->batch->Iterate(&inserter); + if (concurrent_memtable_writes) { + inserter.PostProcess(); + } + return s; +} + +Status WriteBatchInternal::InsertInto( + const WriteBatch* batch, ColumnFamilyMemTables* memtables, + FlushScheduler* flush_scheduler, bool ignore_missing_column_families, + uint64_t log_number, DB* db, bool concurrent_memtable_writes, + SequenceNumber* last_seq_used, bool* has_valid_writes) { + MemTableInserter inserter(WriteBatchInternal::Sequence(batch), memtables, + flush_scheduler, ignore_missing_column_families, + log_number, db, concurrent_memtable_writes, + has_valid_writes); + Status s = batch->Iterate(&inserter); + if (last_seq_used != nullptr) { + *last_seq_used = inserter.get_final_sequence(); + } + if (concurrent_memtable_writes) { + inserter.PostProcess(); + } + return s; +} + +void WriteBatchInternal::SetContents(WriteBatch* b, const Slice& contents) { + assert(contents.size() >= WriteBatchInternal::kHeader); + b->rep_.assign(contents.data(), contents.size()); + b->content_flags_.store(ContentFlags::DEFERRED, std::memory_order_relaxed); +} + +void WriteBatchInternal::Append(WriteBatch* dst, const WriteBatch* src) { + SetCount(dst, Count(dst) + Count(src)); + assert(src->rep_.size() >= WriteBatchInternal::kHeader); + dst->rep_.append(src->rep_.data() + WriteBatchInternal::kHeader, + src->rep_.size() - WriteBatchInternal::kHeader); + dst->content_flags_.store( + dst->content_flags_.load(std::memory_order_relaxed) | + src->content_flags_.load(std::memory_order_relaxed), + std::memory_order_relaxed); +} + +size_t WriteBatchInternal::AppendedByteSize(size_t leftByteSize, + size_t rightByteSize) { + if (leftByteSize == 0 || rightByteSize == 0) { + return leftByteSize + rightByteSize; + } else { + return leftByteSize + rightByteSize - WriteBatchInternal::kHeader; + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/write_batch_base.cc b/external/rocksdb/db/write_batch_base.cc new file mode 100755 index 0000000000..3936fbd922 --- /dev/null +++ b/external/rocksdb/db/write_batch_base.cc @@ -0,0 +1,76 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "rocksdb/write_batch_base.h" + +#include + +#include "rocksdb/slice.h" + +namespace rocksdb { + +// Simple implementation of SlicePart variants of Put(). Child classes +// can override these method with more performant solutions if they choose. +void WriteBatchBase::Put(ColumnFamilyHandle* column_family, + const SliceParts& key, const SliceParts& value) { + std::string key_buf, value_buf; + Slice key_slice(key, &key_buf); + Slice value_slice(value, &value_buf); + + Put(column_family, key_slice, value_slice); +} + +void WriteBatchBase::Put(const SliceParts& key, const SliceParts& value) { + std::string key_buf, value_buf; + Slice key_slice(key, &key_buf); + Slice value_slice(value, &value_buf); + + Put(key_slice, value_slice); +} + +void WriteBatchBase::Delete(ColumnFamilyHandle* column_family, + const SliceParts& key) { + std::string key_buf; + Slice key_slice(key, &key_buf); + Delete(column_family, key_slice); +} + +void WriteBatchBase::Delete(const SliceParts& key) { + std::string key_buf; + Slice key_slice(key, &key_buf); + Delete(key_slice); +} + +void WriteBatchBase::SingleDelete(ColumnFamilyHandle* column_family, + const SliceParts& key) { + std::string key_buf; + Slice key_slice(key, &key_buf); + SingleDelete(column_family, key_slice); +} + +void WriteBatchBase::SingleDelete(const SliceParts& key) { + std::string key_buf; + Slice key_slice(key, &key_buf); + SingleDelete(key_slice); +} + +void WriteBatchBase::Merge(ColumnFamilyHandle* column_family, + const SliceParts& key, const SliceParts& value) { + std::string key_buf, value_buf; + Slice key_slice(key, &key_buf); + Slice value_slice(value, &value_buf); + + Merge(column_family, key_slice, value_slice); +} + +void WriteBatchBase::Merge(const SliceParts& key, const SliceParts& value) { + std::string key_buf, value_buf; + Slice key_slice(key, &key_buf); + Slice value_slice(value, &value_buf); + + Merge(key_slice, value_slice); +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/write_batch_internal.h b/external/rocksdb/db/write_batch_internal.h new file mode 100755 index 0000000000..cc036cbf6b --- /dev/null +++ b/external/rocksdb/db/write_batch_internal.h @@ -0,0 +1,180 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include "db/write_thread.h" +#include "rocksdb/types.h" +#include "rocksdb/write_batch.h" +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "util/autovector.h" + +namespace rocksdb { + +class MemTable; +class FlushScheduler; +class ColumnFamilyData; + +class ColumnFamilyMemTables { + public: + virtual ~ColumnFamilyMemTables() {} + virtual bool Seek(uint32_t column_family_id) = 0; + // returns true if the update to memtable should be ignored + // (useful when recovering from log whose updates have already + // been processed) + virtual uint64_t GetLogNumber() const = 0; + virtual MemTable* GetMemTable() const = 0; + virtual ColumnFamilyHandle* GetColumnFamilyHandle() = 0; + virtual ColumnFamilyData* current() { return nullptr; } +}; + +class ColumnFamilyMemTablesDefault : public ColumnFamilyMemTables { + public: + explicit ColumnFamilyMemTablesDefault(MemTable* mem) + : ok_(false), mem_(mem) {} + + bool Seek(uint32_t column_family_id) override { + ok_ = (column_family_id == 0); + return ok_; + } + + uint64_t GetLogNumber() const override { return 0; } + + MemTable* GetMemTable() const override { + assert(ok_); + return mem_; + } + + ColumnFamilyHandle* GetColumnFamilyHandle() override { return nullptr; } + + private: + bool ok_; + MemTable* mem_; +}; + +// WriteBatchInternal provides static methods for manipulating a +// WriteBatch that we don't want in the public WriteBatch interface. +class WriteBatchInternal { + public: + + // WriteBatch header has an 8-byte sequence number followed by a 4-byte count. + static const size_t kHeader = 12; + + // WriteBatch methods with column_family_id instead of ColumnFamilyHandle* + static void Put(WriteBatch* batch, uint32_t column_family_id, + const Slice& key, const Slice& value); + + static void Put(WriteBatch* batch, uint32_t column_family_id, + const SliceParts& key, const SliceParts& value); + + static void Delete(WriteBatch* batch, uint32_t column_family_id, + const SliceParts& key); + + static void Delete(WriteBatch* batch, uint32_t column_family_id, + const Slice& key); + + static void SingleDelete(WriteBatch* batch, uint32_t column_family_id, + const SliceParts& key); + + static void SingleDelete(WriteBatch* batch, uint32_t column_family_id, + const Slice& key); + + static void Merge(WriteBatch* batch, uint32_t column_family_id, + const Slice& key, const Slice& value); + + static void Merge(WriteBatch* batch, uint32_t column_family_id, + const SliceParts& key, const SliceParts& value); + + static void MarkEndPrepare(WriteBatch* batch, const Slice& xid); + + static void MarkRollback(WriteBatch* batch, const Slice& xid); + + static void MarkCommit(WriteBatch* batch, const Slice& xid); + + static void InsertNoop(WriteBatch* batch); + + // Return the number of entries in the batch. + static int Count(const WriteBatch* batch); + + // Set the count for the number of entries in the batch. + static void SetCount(WriteBatch* batch, int n); + + // Return the seqeunce number for the start of this batch. + static SequenceNumber Sequence(const WriteBatch* batch); + + // Store the specified number as the seqeunce number for the start of + // this batch. + static void SetSequence(WriteBatch* batch, SequenceNumber seq); + + // Returns the offset of the first entry in the batch. + // This offset is only valid if the batch is not empty. + static size_t GetFirstOffset(WriteBatch* batch); + + static Slice Contents(const WriteBatch* batch) { + return Slice(batch->rep_); + } + + static size_t ByteSize(const WriteBatch* batch) { + return batch->rep_.size(); + } + + static void SetContents(WriteBatch* batch, const Slice& contents); + + // Inserts batches[i] into memtable, for i in 0..num_batches-1 inclusive. + // + // If ignore_missing_column_families == true. WriteBatch + // referencing non-existing column family will be ignored. + // If ignore_missing_column_families == false, processing of the + // batches will be stopped if a reference is found to a non-existing + // column family and InvalidArgument() will be returned. The writes + // in batches may be only partially applied at that point. + // + // If log_number is non-zero, the memtable will be updated only if + // memtables->GetLogNumber() >= log_number. + // + // If flush_scheduler is non-null, it will be invoked if the memtable + // should be flushed. + // + // Under concurrent use, the caller is responsible for making sure that + // the memtables object itself is thread-local. + static Status InsertInto(const autovector& batches, + SequenceNumber sequence, + ColumnFamilyMemTables* memtables, + FlushScheduler* flush_scheduler, + bool ignore_missing_column_families = false, + uint64_t log_number = 0, DB* db = nullptr, + bool concurrent_memtable_writes = false); + + // Convenience form of InsertInto when you have only one batch + // last_seq_used returns the last sequnce number used in a MemTable insert + static Status InsertInto(const WriteBatch* batch, + ColumnFamilyMemTables* memtables, + FlushScheduler* flush_scheduler, + bool ignore_missing_column_families = false, + uint64_t log_number = 0, DB* db = nullptr, + bool concurrent_memtable_writes = false, + SequenceNumber* last_seq_used = nullptr, + bool* has_valid_writes = nullptr); + + static Status InsertInto(WriteThread::Writer* writer, + ColumnFamilyMemTables* memtables, + FlushScheduler* flush_scheduler, + bool ignore_missing_column_families = false, + uint64_t log_number = 0, DB* db = nullptr, + bool concurrent_memtable_writes = false); + + static void Append(WriteBatch* dst, const WriteBatch* src); + + // Returns the byte size of appending a WriteBatch with ByteSize + // leftByteSize and a WriteBatch with ByteSize rightByteSize + static size_t AppendedByteSize(size_t leftByteSize, size_t rightByteSize); +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/write_batch_test.cc b/external/rocksdb/db/write_batch_test.cc new file mode 100755 index 0000000000..cc351a5a6b --- /dev/null +++ b/external/rocksdb/db/write_batch_test.cc @@ -0,0 +1,798 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "rocksdb/db.h" + +#include +#include "db/column_family.h" +#include "db/memtable.h" +#include "db/write_batch_internal.h" +#include "rocksdb/env.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/utilities/write_batch_with_index.h" +#include "rocksdb/write_buffer_manager.h" +#include "table/scoped_arena_iterator.h" +#include "util/logging.h" +#include "util/string_util.h" +#include "util/testharness.h" + +namespace rocksdb { + +static std::string PrintContents(WriteBatch* b) { + InternalKeyComparator cmp(BytewiseComparator()); + auto factory = std::make_shared(); + Options options; + options.memtable_factory = factory; + ImmutableCFOptions ioptions(options); + WriteBufferManager wb(options.db_write_buffer_size); + MemTable* mem = + new MemTable(cmp, ioptions, MutableCFOptions(options, ioptions), &wb, + kMaxSequenceNumber); + mem->Ref(); + std::string state; + ColumnFamilyMemTablesDefault cf_mems_default(mem); + Status s = WriteBatchInternal::InsertInto(b, &cf_mems_default, nullptr); + int count = 0; + int put_count = 0; + int delete_count = 0; + int single_delete_count = 0; + int merge_count = 0; + Arena arena; + ScopedArenaIterator iter(mem->NewIterator(ReadOptions(), &arena)); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + ParsedInternalKey ikey; + memset((void *)&ikey, 0, sizeof(ikey)); + EXPECT_TRUE(ParseInternalKey(iter->key(), &ikey)); + switch (ikey.type) { + case kTypeValue: + state.append("Put("); + state.append(ikey.user_key.ToString()); + state.append(", "); + state.append(iter->value().ToString()); + state.append(")"); + count++; + put_count++; + break; + case kTypeDeletion: + state.append("Delete("); + state.append(ikey.user_key.ToString()); + state.append(")"); + count++; + delete_count++; + break; + case kTypeSingleDeletion: + state.append("SingleDelete("); + state.append(ikey.user_key.ToString()); + state.append(")"); + count++; + single_delete_count++; + break; + case kTypeMerge: + state.append("Merge("); + state.append(ikey.user_key.ToString()); + state.append(", "); + state.append(iter->value().ToString()); + state.append(")"); + count++; + merge_count++; + break; + default: + assert(false); + break; + } + state.append("@"); + state.append(NumberToString(ikey.sequence)); + } + EXPECT_EQ(b->HasPut(), put_count > 0); + EXPECT_EQ(b->HasDelete(), delete_count > 0); + EXPECT_EQ(b->HasSingleDelete(), single_delete_count > 0); + EXPECT_EQ(b->HasMerge(), merge_count > 0); + if (!s.ok()) { + state.append(s.ToString()); + } else if (count != WriteBatchInternal::Count(b)) { + state.append("CountMismatch()"); + } + delete mem->Unref(); + return state; +} + +class WriteBatchTest : public testing::Test {}; + +TEST_F(WriteBatchTest, Empty) { + WriteBatch batch; + ASSERT_EQ("", PrintContents(&batch)); + ASSERT_EQ(0, WriteBatchInternal::Count(&batch)); + ASSERT_EQ(0, batch.Count()); +} + +TEST_F(WriteBatchTest, Multiple) { + WriteBatch batch; + batch.Put(Slice("foo"), Slice("bar")); + batch.Delete(Slice("box")); + batch.Put(Slice("baz"), Slice("boo")); + WriteBatchInternal::SetSequence(&batch, 100); + ASSERT_EQ(100U, WriteBatchInternal::Sequence(&batch)); + ASSERT_EQ(3, WriteBatchInternal::Count(&batch)); + ASSERT_EQ("Put(baz, boo)@102" + "Delete(box)@101" + "Put(foo, bar)@100", + PrintContents(&batch)); + ASSERT_EQ(3, batch.Count()); +} + +TEST_F(WriteBatchTest, Corruption) { + WriteBatch batch; + batch.Put(Slice("foo"), Slice("bar")); + batch.Delete(Slice("box")); + WriteBatchInternal::SetSequence(&batch, 200); + Slice contents = WriteBatchInternal::Contents(&batch); + WriteBatchInternal::SetContents(&batch, + Slice(contents.data(),contents.size()-1)); + ASSERT_EQ("Put(foo, bar)@200" + "Corruption: bad WriteBatch Delete", + PrintContents(&batch)); +} + +TEST_F(WriteBatchTest, Append) { + WriteBatch b1, b2; + WriteBatchInternal::SetSequence(&b1, 200); + WriteBatchInternal::SetSequence(&b2, 300); + WriteBatchInternal::Append(&b1, &b2); + ASSERT_EQ("", + PrintContents(&b1)); + ASSERT_EQ(0, b1.Count()); + b2.Put("a", "va"); + WriteBatchInternal::Append(&b1, &b2); + ASSERT_EQ("Put(a, va)@200", + PrintContents(&b1)); + ASSERT_EQ(1, b1.Count()); + b2.Clear(); + b2.Put("b", "vb"); + WriteBatchInternal::Append(&b1, &b2); + ASSERT_EQ("Put(a, va)@200" + "Put(b, vb)@201", + PrintContents(&b1)); + ASSERT_EQ(2, b1.Count()); + b2.Delete("foo"); + WriteBatchInternal::Append(&b1, &b2); + ASSERT_EQ("Put(a, va)@200" + "Put(b, vb)@202" + "Put(b, vb)@201" + "Delete(foo)@203", + PrintContents(&b1)); + ASSERT_EQ(4, b1.Count()); +} + +TEST_F(WriteBatchTest, SingleDeletion) { + WriteBatch batch; + WriteBatchInternal::SetSequence(&batch, 100); + ASSERT_EQ("", PrintContents(&batch)); + ASSERT_EQ(0, batch.Count()); + batch.Put("a", "va"); + ASSERT_EQ("Put(a, va)@100", PrintContents(&batch)); + ASSERT_EQ(1, batch.Count()); + batch.SingleDelete("a"); + ASSERT_EQ( + "SingleDelete(a)@101" + "Put(a, va)@100", + PrintContents(&batch)); + ASSERT_EQ(2, batch.Count()); +} + +namespace { + struct TestHandler : public WriteBatch::Handler { + std::string seen; + virtual Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + if (column_family_id == 0) { + seen += "Put(" + key.ToString() + ", " + value.ToString() + ")"; + } else { + seen += "PutCF(" + ToString(column_family_id) + ", " + + key.ToString() + ", " + value.ToString() + ")"; + } + return Status::OK(); + } + virtual Status DeleteCF(uint32_t column_family_id, + const Slice& key) override { + if (column_family_id == 0) { + seen += "Delete(" + key.ToString() + ")"; + } else { + seen += "DeleteCF(" + ToString(column_family_id) + ", " + + key.ToString() + ")"; + } + return Status::OK(); + } + virtual Status SingleDeleteCF(uint32_t column_family_id, + const Slice& key) override { + if (column_family_id == 0) { + seen += "SingleDelete(" + key.ToString() + ")"; + } else { + seen += "SingleDeleteCF(" + ToString(column_family_id) + ", " + + key.ToString() + ")"; + } + return Status::OK(); + } + virtual Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + if (column_family_id == 0) { + seen += "Merge(" + key.ToString() + ", " + value.ToString() + ")"; + } else { + seen += "MergeCF(" + ToString(column_family_id) + ", " + + key.ToString() + ", " + value.ToString() + ")"; + } + return Status::OK(); + } + virtual void LogData(const Slice& blob) override { + seen += "LogData(" + blob.ToString() + ")"; + } + virtual Status MarkBeginPrepare() override { + seen += "MarkBeginPrepare()"; + return Status::OK(); + } + virtual Status MarkEndPrepare(const Slice& xid) override { + seen += "MarkEndPrepare(" + xid.ToString() + ")"; + return Status::OK(); + } + virtual Status MarkCommit(const Slice& xid) override { + seen += "MarkCommit(" + xid.ToString() + ")"; + return Status::OK(); + } + virtual Status MarkRollback(const Slice& xid) override { + seen += "MarkRollback(" + xid.ToString() + ")"; + return Status::OK(); + } + }; +} + +TEST_F(WriteBatchTest, PutNotImplemented) { + WriteBatch batch; + batch.Put(Slice("k1"), Slice("v1")); + ASSERT_EQ(1, batch.Count()); + ASSERT_EQ("Put(k1, v1)@0", PrintContents(&batch)); + + WriteBatch::Handler handler; + ASSERT_OK(batch.Iterate(&handler)); +} + +TEST_F(WriteBatchTest, DeleteNotImplemented) { + WriteBatch batch; + batch.Delete(Slice("k2")); + ASSERT_EQ(1, batch.Count()); + ASSERT_EQ("Delete(k2)@0", PrintContents(&batch)); + + WriteBatch::Handler handler; + ASSERT_OK(batch.Iterate(&handler)); +} + +TEST_F(WriteBatchTest, SingleDeleteNotImplemented) { + WriteBatch batch; + batch.SingleDelete(Slice("k2")); + ASSERT_EQ(1, batch.Count()); + ASSERT_EQ("SingleDelete(k2)@0", PrintContents(&batch)); + + WriteBatch::Handler handler; + ASSERT_OK(batch.Iterate(&handler)); +} + +TEST_F(WriteBatchTest, MergeNotImplemented) { + WriteBatch batch; + batch.Merge(Slice("foo"), Slice("bar")); + ASSERT_EQ(1, batch.Count()); + ASSERT_EQ("Merge(foo, bar)@0", PrintContents(&batch)); + + WriteBatch::Handler handler; + ASSERT_OK(batch.Iterate(&handler)); +} + +TEST_F(WriteBatchTest, Blob) { + WriteBatch batch; + batch.Put(Slice("k1"), Slice("v1")); + batch.Put(Slice("k2"), Slice("v2")); + batch.Put(Slice("k3"), Slice("v3")); + batch.PutLogData(Slice("blob1")); + batch.Delete(Slice("k2")); + batch.SingleDelete(Slice("k3")); + batch.PutLogData(Slice("blob2")); + batch.Merge(Slice("foo"), Slice("bar")); + ASSERT_EQ(6, batch.Count()); + ASSERT_EQ( + "Merge(foo, bar)@5" + "Put(k1, v1)@0" + "Delete(k2)@3" + "Put(k2, v2)@1" + "SingleDelete(k3)@4" + "Put(k3, v3)@2", + PrintContents(&batch)); + + TestHandler handler; + batch.Iterate(&handler); + ASSERT_EQ( + "Put(k1, v1)" + "Put(k2, v2)" + "Put(k3, v3)" + "LogData(blob1)" + "Delete(k2)" + "SingleDelete(k3)" + "LogData(blob2)" + "Merge(foo, bar)", + handler.seen); +} + +TEST_F(WriteBatchTest, PrepareCommit) { + WriteBatch batch; + WriteBatchInternal::InsertNoop(&batch); + batch.Put(Slice("k1"), Slice("v1")); + batch.Put(Slice("k2"), Slice("v2")); + batch.SetSavePoint(); + WriteBatchInternal::MarkEndPrepare(&batch, Slice("xid1")); + Status s = batch.RollbackToSavePoint(); + ASSERT_EQ(s, Status::NotFound()); + WriteBatchInternal::MarkCommit(&batch, Slice("xid1")); + WriteBatchInternal::MarkRollback(&batch, Slice("xid1")); + ASSERT_EQ(2, batch.Count()); + + TestHandler handler; + batch.Iterate(&handler); + ASSERT_EQ( + "MarkBeginPrepare()" + "Put(k1, v1)" + "Put(k2, v2)" + "MarkEndPrepare(xid1)" + "MarkCommit(xid1)" + "MarkRollback(xid1)", + handler.seen); +} + +// It requires more than 30GB of memory to run the test. With single memory +// allocation of more than 30GB. +// Not all platform can run it. Also it runs a long time. So disable it. +TEST_F(WriteBatchTest, DISABLED_ManyUpdates) { + // Insert key and value of 3GB and push total batch size to 12GB. + static const size_t kKeyValueSize = 4u; + static const uint32_t kNumUpdates = 3 << 30; + std::string raw(kKeyValueSize, 'A'); + WriteBatch batch(kNumUpdates * (4 + kKeyValueSize * 2) + 1024u); + char c = 'A'; + for (uint32_t i = 0; i < kNumUpdates; i++) { + if (c > 'Z') { + c = 'A'; + } + raw[0] = c; + raw[raw.length() - 1] = c; + c++; + batch.Put(raw, raw); + } + + ASSERT_EQ(kNumUpdates, batch.Count()); + + struct NoopHandler : public WriteBatch::Handler { + uint32_t num_seen = 0; + char expected_char = 'A'; + virtual Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + EXPECT_EQ(kKeyValueSize, key.size()); + EXPECT_EQ(kKeyValueSize, value.size()); + EXPECT_EQ(expected_char, key[0]); + EXPECT_EQ(expected_char, value[0]); + EXPECT_EQ(expected_char, key[kKeyValueSize - 1]); + EXPECT_EQ(expected_char, value[kKeyValueSize - 1]); + expected_char++; + if (expected_char > 'Z') { + expected_char = 'A'; + } + ++num_seen; + return Status::OK(); + } + virtual Status DeleteCF(uint32_t column_family_id, + const Slice& key) override { + EXPECT_TRUE(false); + return Status::OK(); + } + virtual Status SingleDeleteCF(uint32_t column_family_id, + const Slice& key) override { + EXPECT_TRUE(false); + return Status::OK(); + } + virtual Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + EXPECT_TRUE(false); + return Status::OK(); + } + virtual void LogData(const Slice& blob) override { EXPECT_TRUE(false); } + virtual bool Continue() override { return num_seen < kNumUpdates; } + } handler; + + batch.Iterate(&handler); + ASSERT_EQ(kNumUpdates, handler.num_seen); +} + +// The test requires more than 18GB memory to run it, with single memory +// allocation of more than 12GB. Not all the platform can run it. So disable it. +TEST_F(WriteBatchTest, DISABLED_LargeKeyValue) { + // Insert key and value of 3GB and push total batch size to 12GB. + static const size_t kKeyValueSize = 3221225472u; + std::string raw(kKeyValueSize, 'A'); + WriteBatch batch(12884901888u + 1024u); + for (char i = 0; i < 2; i++) { + raw[0] = 'A' + i; + raw[raw.length() - 1] = 'A' - i; + batch.Put(raw, raw); + } + + ASSERT_EQ(2, batch.Count()); + + struct NoopHandler : public WriteBatch::Handler { + int num_seen = 0; + virtual Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + EXPECT_EQ(kKeyValueSize, key.size()); + EXPECT_EQ(kKeyValueSize, value.size()); + EXPECT_EQ('A' + num_seen, key[0]); + EXPECT_EQ('A' + num_seen, value[0]); + EXPECT_EQ('A' - num_seen, key[kKeyValueSize - 1]); + EXPECT_EQ('A' - num_seen, value[kKeyValueSize - 1]); + ++num_seen; + return Status::OK(); + } + virtual Status DeleteCF(uint32_t column_family_id, + const Slice& key) override { + EXPECT_TRUE(false); + return Status::OK(); + } + virtual Status SingleDeleteCF(uint32_t column_family_id, + const Slice& key) override { + EXPECT_TRUE(false); + return Status::OK(); + } + virtual Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + EXPECT_TRUE(false); + return Status::OK(); + } + virtual void LogData(const Slice& blob) override { EXPECT_TRUE(false); } + virtual bool Continue() override { return num_seen < 2; } + } handler; + + batch.Iterate(&handler); + ASSERT_EQ(2, handler.num_seen); +} + +TEST_F(WriteBatchTest, Continue) { + WriteBatch batch; + + struct Handler : public TestHandler { + int num_seen = 0; + virtual Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + ++num_seen; + return TestHandler::PutCF(column_family_id, key, value); + } + virtual Status DeleteCF(uint32_t column_family_id, + const Slice& key) override { + ++num_seen; + return TestHandler::DeleteCF(column_family_id, key); + } + virtual Status SingleDeleteCF(uint32_t column_family_id, + const Slice& key) override { + ++num_seen; + return TestHandler::SingleDeleteCF(column_family_id, key); + } + virtual Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + ++num_seen; + return TestHandler::MergeCF(column_family_id, key, value); + } + virtual void LogData(const Slice& blob) override { + ++num_seen; + TestHandler::LogData(blob); + } + virtual bool Continue() override { return num_seen < 5; } + } handler; + + batch.Put(Slice("k1"), Slice("v1")); + batch.Put(Slice("k2"), Slice("v2")); + batch.PutLogData(Slice("blob1")); + batch.Delete(Slice("k1")); + batch.SingleDelete(Slice("k2")); + batch.PutLogData(Slice("blob2")); + batch.Merge(Slice("foo"), Slice("bar")); + batch.Iterate(&handler); + ASSERT_EQ( + "Put(k1, v1)" + "Put(k2, v2)" + "LogData(blob1)" + "Delete(k1)" + "SingleDelete(k2)", + handler.seen); +} + +TEST_F(WriteBatchTest, PutGatherSlices) { + WriteBatch batch; + batch.Put(Slice("foo"), Slice("bar")); + + { + // Try a write where the key is one slice but the value is two + Slice key_slice("baz"); + Slice value_slices[2] = { Slice("header"), Slice("payload") }; + batch.Put(SliceParts(&key_slice, 1), + SliceParts(value_slices, 2)); + } + + { + // One where the key is composite but the value is a single slice + Slice key_slices[3] = { Slice("key"), Slice("part2"), Slice("part3") }; + Slice value_slice("value"); + batch.Put(SliceParts(key_slices, 3), + SliceParts(&value_slice, 1)); + } + + WriteBatchInternal::SetSequence(&batch, 100); + ASSERT_EQ("Put(baz, headerpayload)@101" + "Put(foo, bar)@100" + "Put(keypart2part3, value)@102", + PrintContents(&batch)); + ASSERT_EQ(3, batch.Count()); +} + +namespace { +class ColumnFamilyHandleImplDummy : public ColumnFamilyHandleImpl { + public: + explicit ColumnFamilyHandleImplDummy(int id) + : ColumnFamilyHandleImpl(nullptr, nullptr, nullptr), id_(id) {} + uint32_t GetID() const override { return id_; } + const Comparator* user_comparator() const override { + return BytewiseComparator(); + } + + private: + uint32_t id_; +}; +} // namespace anonymous + +TEST_F(WriteBatchTest, ColumnFamiliesBatchTest) { + WriteBatch batch; + ColumnFamilyHandleImplDummy zero(0), two(2), three(3), eight(8); + batch.Put(&zero, Slice("foo"), Slice("bar")); + batch.Put(&two, Slice("twofoo"), Slice("bar2")); + batch.Put(&eight, Slice("eightfoo"), Slice("bar8")); + batch.Delete(&eight, Slice("eightfoo")); + batch.SingleDelete(&two, Slice("twofoo")); + batch.Merge(&three, Slice("threethree"), Slice("3three")); + batch.Put(&zero, Slice("foo"), Slice("bar")); + batch.Merge(Slice("omom"), Slice("nom")); + + TestHandler handler; + batch.Iterate(&handler); + ASSERT_EQ( + "Put(foo, bar)" + "PutCF(2, twofoo, bar2)" + "PutCF(8, eightfoo, bar8)" + "DeleteCF(8, eightfoo)" + "SingleDeleteCF(2, twofoo)" + "MergeCF(3, threethree, 3three)" + "Put(foo, bar)" + "Merge(omom, nom)", + handler.seen); +} + +#ifndef ROCKSDB_LITE +TEST_F(WriteBatchTest, ColumnFamiliesBatchWithIndexTest) { + WriteBatchWithIndex batch; + ColumnFamilyHandleImplDummy zero(0), two(2), three(3), eight(8); + batch.Put(&zero, Slice("foo"), Slice("bar")); + batch.Put(&two, Slice("twofoo"), Slice("bar2")); + batch.Put(&eight, Slice("eightfoo"), Slice("bar8")); + batch.Delete(&eight, Slice("eightfoo")); + batch.SingleDelete(&two, Slice("twofoo")); + batch.Merge(&three, Slice("threethree"), Slice("3three")); + batch.Put(&zero, Slice("foo"), Slice("bar")); + batch.Merge(Slice("omom"), Slice("nom")); + + std::unique_ptr iter; + + iter.reset(batch.NewIterator(&eight)); + iter->Seek("eightfoo"); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(WriteType::kPutRecord, iter->Entry().type); + ASSERT_EQ("eightfoo", iter->Entry().key.ToString()); + ASSERT_EQ("bar8", iter->Entry().value.ToString()); + + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(WriteType::kDeleteRecord, iter->Entry().type); + ASSERT_EQ("eightfoo", iter->Entry().key.ToString()); + + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(!iter->Valid()); + + iter.reset(batch.NewIterator(&two)); + iter->Seek("twofoo"); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(WriteType::kPutRecord, iter->Entry().type); + ASSERT_EQ("twofoo", iter->Entry().key.ToString()); + ASSERT_EQ("bar2", iter->Entry().value.ToString()); + + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(WriteType::kSingleDeleteRecord, iter->Entry().type); + ASSERT_EQ("twofoo", iter->Entry().key.ToString()); + + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(!iter->Valid()); + + iter.reset(batch.NewIterator()); + iter->Seek("gggg"); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(WriteType::kMergeRecord, iter->Entry().type); + ASSERT_EQ("omom", iter->Entry().key.ToString()); + ASSERT_EQ("nom", iter->Entry().value.ToString()); + + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(!iter->Valid()); + + iter.reset(batch.NewIterator(&zero)); + iter->Seek("foo"); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(WriteType::kPutRecord, iter->Entry().type); + ASSERT_EQ("foo", iter->Entry().key.ToString()); + ASSERT_EQ("bar", iter->Entry().value.ToString()); + + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(WriteType::kPutRecord, iter->Entry().type); + ASSERT_EQ("foo", iter->Entry().key.ToString()); + ASSERT_EQ("bar", iter->Entry().value.ToString()); + + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ(WriteType::kMergeRecord, iter->Entry().type); + ASSERT_EQ("omom", iter->Entry().key.ToString()); + ASSERT_EQ("nom", iter->Entry().value.ToString()); + + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(!iter->Valid()); + + TestHandler handler; + batch.GetWriteBatch()->Iterate(&handler); + ASSERT_EQ( + "Put(foo, bar)" + "PutCF(2, twofoo, bar2)" + "PutCF(8, eightfoo, bar8)" + "DeleteCF(8, eightfoo)" + "SingleDeleteCF(2, twofoo)" + "MergeCF(3, threethree, 3three)" + "Put(foo, bar)" + "Merge(omom, nom)", + handler.seen); +} +#endif // !ROCKSDB_LITE + +TEST_F(WriteBatchTest, SavePointTest) { + Status s; + WriteBatch batch; + batch.SetSavePoint(); + + batch.Put("A", "a"); + batch.Put("B", "b"); + batch.SetSavePoint(); + + batch.Put("C", "c"); + batch.Delete("A"); + batch.SetSavePoint(); + batch.SetSavePoint(); + + ASSERT_OK(batch.RollbackToSavePoint()); + ASSERT_EQ( + "Delete(A)@3" + "Put(A, a)@0" + "Put(B, b)@1" + "Put(C, c)@2", + PrintContents(&batch)); + + ASSERT_OK(batch.RollbackToSavePoint()); + ASSERT_OK(batch.RollbackToSavePoint()); + ASSERT_EQ( + "Put(A, a)@0" + "Put(B, b)@1", + PrintContents(&batch)); + + batch.Delete("A"); + batch.Put("B", "bb"); + + ASSERT_OK(batch.RollbackToSavePoint()); + ASSERT_EQ("", PrintContents(&batch)); + + s = batch.RollbackToSavePoint(); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ("", PrintContents(&batch)); + + batch.Put("D", "d"); + batch.Delete("A"); + + batch.SetSavePoint(); + + batch.Put("A", "aaa"); + + ASSERT_OK(batch.RollbackToSavePoint()); + ASSERT_EQ( + "Delete(A)@1" + "Put(D, d)@0", + PrintContents(&batch)); + + batch.SetSavePoint(); + + batch.Put("D", "d"); + batch.Delete("A"); + + ASSERT_OK(batch.RollbackToSavePoint()); + ASSERT_EQ( + "Delete(A)@1" + "Put(D, d)@0", + PrintContents(&batch)); + + s = batch.RollbackToSavePoint(); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ( + "Delete(A)@1" + "Put(D, d)@0", + PrintContents(&batch)); + + WriteBatch batch2; + + s = batch2.RollbackToSavePoint(); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ("", PrintContents(&batch2)); + + batch2.Delete("A"); + batch2.SetSavePoint(); + + s = batch2.RollbackToSavePoint(); + ASSERT_OK(s); + ASSERT_EQ("Delete(A)@0", PrintContents(&batch2)); + + batch2.Clear(); + ASSERT_EQ("", PrintContents(&batch2)); + + batch2.SetSavePoint(); + + batch2.Delete("B"); + ASSERT_EQ("Delete(B)@0", PrintContents(&batch2)); + + batch2.SetSavePoint(); + s = batch2.RollbackToSavePoint(); + ASSERT_OK(s); + ASSERT_EQ("Delete(B)@0", PrintContents(&batch2)); + + s = batch2.RollbackToSavePoint(); + ASSERT_OK(s); + ASSERT_EQ("", PrintContents(&batch2)); + + s = batch2.RollbackToSavePoint(); + ASSERT_TRUE(s.IsNotFound()); + ASSERT_EQ("", PrintContents(&batch2)); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/write_callback.h b/external/rocksdb/db/write_callback.h new file mode 100755 index 0000000000..93c80d6510 --- /dev/null +++ b/external/rocksdb/db/write_callback.h @@ -0,0 +1,27 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include "rocksdb/status.h" + +namespace rocksdb { + +class DB; + +class WriteCallback { + public: + virtual ~WriteCallback() {} + + // Will be called while on the write thread before the write executes. If + // this function returns a non-OK status, the write will be aborted and this + // status will be returned to the caller of DB::Write(). + virtual Status Callback(DB* db) = 0; + + // return true if writes with this callback can be batched with other writes + virtual bool AllowWriteBatching() = 0; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/write_callback_test.cc b/external/rocksdb/db/write_callback_test.cc new file mode 100755 index 0000000000..33aaab7f44 --- /dev/null +++ b/external/rocksdb/db/write_callback_test.cc @@ -0,0 +1,355 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#include +#include +#include + +#include "db/db_impl.h" +#include "db/write_callback.h" +#include "rocksdb/db.h" +#include "rocksdb/write_batch.h" +#include "util/logging.h" +#include "util/sync_point.h" +#include "util/testharness.h" + +using std::string; + +namespace rocksdb { + +class WriteCallbackTest : public testing::Test { + public: + string dbname; + + WriteCallbackTest() { + dbname = test::TmpDir() + "/write_callback_testdb"; + } +}; + +class WriteCallbackTestWriteCallback1 : public WriteCallback { + public: + bool was_called = false; + + Status Callback(DB *db) override { + was_called = true; + + // Make sure db is a DBImpl + DBImpl* db_impl = dynamic_cast (db); + if (db_impl == nullptr) { + return Status::InvalidArgument(""); + } + + return Status::OK(); + } + + bool AllowWriteBatching() override { return true; } +}; + +class WriteCallbackTestWriteCallback2 : public WriteCallback { + public: + Status Callback(DB *db) override { + return Status::Busy(); + } + bool AllowWriteBatching() override { return true; } +}; + +class MockWriteCallback : public WriteCallback { + public: + bool should_fail_ = false; + bool was_called_ = false; + bool allow_batching_ = false; + + Status Callback(DB* db) override { + was_called_ = true; + if (should_fail_) { + return Status::Busy(); + } else { + return Status::OK(); + } + } + + bool AllowWriteBatching() override { return allow_batching_; } +}; + +TEST_F(WriteCallbackTest, WriteWithCallbackTest) { + struct WriteOP { + WriteOP(bool should_fail = false) { callback_.should_fail_ = should_fail; } + + void Put(const string& key, const string& val) { + kvs_.push_back(std::make_pair(key, val)); + write_batch_.Put(key, val); + } + + void Clear() { + kvs_.clear(); + write_batch_.Clear(); + callback_.was_called_ = false; + } + + MockWriteCallback callback_; + WriteBatch write_batch_; + std::vector> kvs_; + }; + + std::vector> write_scenarios = { + {true}, + {false}, + {false, false}, + {true, true}, + {true, false}, + {false, true}, + {false, false, false}, + {true, true, true}, + {false, true, false}, + {true, false, true}, + {true, false, false, false, false}, + {false, false, false, false, true}, + {false, false, true, false, true}, + }; + + for (auto& allow_parallel : {true, false}) { + for (auto& allow_batching : {true, false}) { + for (auto& enable_WAL : {true, false}) { + for (auto& write_group : write_scenarios) { + Options options; + options.create_if_missing = true; + options.allow_concurrent_memtable_write = allow_parallel; + + ReadOptions read_options; + DB* db; + DBImpl* db_impl; + + DestroyDB(dbname, options); + ASSERT_OK(DB::Open(options, dbname, &db)); + + db_impl = dynamic_cast(db); + ASSERT_TRUE(db_impl); + + std::atomic threads_waiting(0); + std::atomic seq(db_impl->GetLatestSequenceNumber()); + ASSERT_EQ(db_impl->GetLatestSequenceNumber(), 0); + + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "WriteThread::JoinBatchGroup:Wait", [&](void* arg) { + uint64_t cur_threads_waiting = 0; + bool is_leader = false; + bool is_last = false; + + // who am i + do { + cur_threads_waiting = threads_waiting.load(); + is_leader = (cur_threads_waiting == 0); + is_last = (cur_threads_waiting == write_group.size() - 1); + } while (!threads_waiting.compare_exchange_strong( + cur_threads_waiting, cur_threads_waiting + 1)); + + // check my state + auto* writer = reinterpret_cast(arg); + + if (is_leader) { + ASSERT_TRUE(writer->state == + WriteThread::State::STATE_GROUP_LEADER); + } else { + ASSERT_TRUE(writer->state == WriteThread::State::STATE_INIT); + } + + // (meta test) the first WriteOP should indeed be the first + // and the last should be the last (all others can be out of + // order) + if (is_leader) { + ASSERT_TRUE(writer->callback->Callback(nullptr).ok() == + !write_group.front().callback_.should_fail_); + } else if (is_last) { + ASSERT_TRUE(writer->callback->Callback(nullptr).ok() == + !write_group.back().callback_.should_fail_); + } + + // wait for friends + while (threads_waiting.load() < write_group.size()) { + } + }); + + rocksdb::SyncPoint::GetInstance()->SetCallBack( + "WriteThread::JoinBatchGroup:DoneWaiting", [&](void* arg) { + // check my state + auto* writer = reinterpret_cast(arg); + + if (!allow_batching) { + // no batching so everyone should be a leader + ASSERT_TRUE(writer->state == + WriteThread::State::STATE_GROUP_LEADER); + } else if (!allow_parallel) { + ASSERT_TRUE(writer->state == + WriteThread::State::STATE_COMPLETED); + } + }); + + std::atomic thread_num(0); + std::atomic dummy_key(0); + std::function write_with_callback_func = [&]() { + uint32_t i = thread_num.fetch_add(1); + Random rnd(i); + + // leaders gotta lead + while (i > 0 && threads_waiting.load() < 1) { + } + + // loser has to lose + while (i == write_group.size() - 1 && + threads_waiting.load() < write_group.size() - 1) { + } + + auto& write_op = write_group.at(i); + write_op.Clear(); + write_op.callback_.allow_batching_ = allow_batching; + + // insert some keys + for (uint32_t j = 0; j < rnd.Next() % 50; j++) { + // grab unique key + char my_key = 0; + do { + my_key = dummy_key.load(); + } while (!dummy_key.compare_exchange_strong(my_key, my_key + 1)); + + string skey(5, my_key); + string sval(10, my_key); + write_op.Put(skey, sval); + + if (!write_op.callback_.should_fail_) { + seq.fetch_add(1); + } + } + + WriteOptions woptions; + woptions.disableWAL = !enable_WAL; + woptions.sync = enable_WAL; + Status s = db_impl->WriteWithCallback( + woptions, &write_op.write_batch_, &write_op.callback_); + + if (write_op.callback_.should_fail_) { + ASSERT_TRUE(s.IsBusy()); + } else { + ASSERT_OK(s); + } + }; + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + + // do all the writes + std::vector threads; + for (uint32_t i = 0; i < write_group.size(); i++) { + threads.emplace_back(write_with_callback_func); + } + for (auto& t : threads) { + t.join(); + } + + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + + // check for keys + string value; + for (auto& w : write_group) { + ASSERT_TRUE(w.callback_.was_called_); + for (auto& kvp : w.kvs_) { + if (w.callback_.should_fail_) { + ASSERT_TRUE( + db->Get(read_options, kvp.first, &value).IsNotFound()); + } else { + ASSERT_OK(db->Get(read_options, kvp.first, &value)); + ASSERT_EQ(value, kvp.second); + } + } + } + + ASSERT_EQ(seq.load(), db_impl->GetLatestSequenceNumber()); + + delete db; + DestroyDB(dbname, options); + } + } + } + } +} + +TEST_F(WriteCallbackTest, WriteCallBackTest) { + Options options; + WriteOptions write_options; + ReadOptions read_options; + string value; + DB* db; + DBImpl* db_impl; + + DestroyDB(dbname, options); + + options.create_if_missing = true; + Status s = DB::Open(options, dbname, &db); + ASSERT_OK(s); + + db_impl = dynamic_cast (db); + ASSERT_TRUE(db_impl); + + WriteBatch wb; + + wb.Put("a", "value.a"); + wb.Delete("x"); + + // Test a simple Write + s = db->Write(write_options, &wb); + ASSERT_OK(s); + + s = db->Get(read_options, "a", &value); + ASSERT_OK(s); + ASSERT_EQ("value.a", value); + + // Test WriteWithCallback + WriteCallbackTestWriteCallback1 callback1; + WriteBatch wb2; + + wb2.Put("a", "value.a2"); + + s = db_impl->WriteWithCallback(write_options, &wb2, &callback1); + ASSERT_OK(s); + ASSERT_TRUE(callback1.was_called); + + s = db->Get(read_options, "a", &value); + ASSERT_OK(s); + ASSERT_EQ("value.a2", value); + + // Test WriteWithCallback for a callback that fails + WriteCallbackTestWriteCallback2 callback2; + WriteBatch wb3; + + wb3.Put("a", "value.a3"); + + s = db_impl->WriteWithCallback(write_options, &wb3, &callback2); + ASSERT_NOK(s); + + s = db->Get(read_options, "a", &value); + ASSERT_OK(s); + ASSERT_EQ("value.a2", value); + + delete db; + DestroyDB(dbname, options); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, + "SKIPPED as WriteWithCallback is not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/db/write_controller.cc b/external/rocksdb/db/write_controller.cc new file mode 100755 index 0000000000..d46d8d3ddc --- /dev/null +++ b/external/rocksdb/db/write_controller.cc @@ -0,0 +1,121 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/write_controller.h" + +#include +#include +#include "rocksdb/env.h" + +namespace rocksdb { + +std::unique_ptr WriteController::GetStopToken() { + ++total_stopped_; + return std::unique_ptr(new StopWriteToken(this)); +} + +std::unique_ptr WriteController::GetDelayToken( + uint64_t write_rate) { + total_delayed_++; + // Reset counters. + last_refill_time_ = 0; + bytes_left_ = 0; + set_delayed_write_rate(write_rate); + return std::unique_ptr(new DelayWriteToken(this)); +} + +std::unique_ptr +WriteController::GetCompactionPressureToken() { + ++total_compaction_pressure_; + return std::unique_ptr( + new CompactionPressureToken(this)); +} + +bool WriteController::IsStopped() const { return total_stopped_ > 0; } +// This is inside DB mutex, so we can't sleep and need to minimize +// frequency to get time. +// If it turns out to be a performance issue, we can redesign the thread +// synchronization model here. +// The function trust caller will sleep micros returned. +uint64_t WriteController::GetDelay(Env* env, uint64_t num_bytes) { + if (total_stopped_ > 0) { + return 0; + } + if (total_delayed_ == 0) { + return 0; + } + + const uint64_t kMicrosPerSecond = 1000000; + const uint64_t kRefillInterval = 1024U; + + if (bytes_left_ >= num_bytes) { + bytes_left_ -= num_bytes; + return 0; + } + // The frequency to get time inside DB mutex is less than one per refill + // interval. + auto time_now = env->NowMicros(); + + uint64_t sleep_debt = 0; + uint64_t time_since_last_refill = 0; + if (last_refill_time_ != 0) { + if (last_refill_time_ > time_now) { + sleep_debt = last_refill_time_ - time_now; + } else { + time_since_last_refill = time_now - last_refill_time_; + bytes_left_ += + static_cast(static_cast(time_since_last_refill) / + kMicrosPerSecond * delayed_write_rate_); + if (time_since_last_refill >= kRefillInterval && + bytes_left_ > num_bytes) { + // If refill interval already passed and we have enough bytes + // return without extra sleeping. + last_refill_time_ = time_now; + bytes_left_ -= num_bytes; + return 0; + } + } + } + + uint64_t single_refill_amount = + delayed_write_rate_ * kRefillInterval / kMicrosPerSecond; + if (bytes_left_ + single_refill_amount >= num_bytes) { + // Wait until a refill interval + // Never trigger expire for less than one refill interval to avoid to get + // time. + bytes_left_ = bytes_left_ + single_refill_amount - num_bytes; + last_refill_time_ = time_now + kRefillInterval; + return kRefillInterval + sleep_debt; + } + + // Need to refill more than one interval. Need to sleep longer. Check + // whether expiration will hit + + // Sleep just until `num_bytes` is allowed. + uint64_t sleep_amount = + static_cast(num_bytes / + static_cast(delayed_write_rate_) * + kMicrosPerSecond) + + sleep_debt; + last_refill_time_ = time_now + sleep_amount; + return sleep_amount; +} + +StopWriteToken::~StopWriteToken() { + assert(controller_->total_stopped_ >= 1); + --controller_->total_stopped_; +} + +DelayWriteToken::~DelayWriteToken() { + controller_->total_delayed_--; + assert(controller_->total_delayed_ >= 0); +} + +CompactionPressureToken::~CompactionPressureToken() { + controller_->total_compaction_pressure_--; + assert(controller_->total_compaction_pressure_ >= 0); +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/write_controller.h b/external/rocksdb/db/write_controller.h new file mode 100755 index 0000000000..0520471774 --- /dev/null +++ b/external/rocksdb/db/write_controller.h @@ -0,0 +1,115 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include + +#include + +namespace rocksdb { + +class Env; +class WriteControllerToken; + +// WriteController is controlling write stalls in our write code-path. Write +// stalls happen when compaction can't keep up with write rate. +// All of the methods here (including WriteControllerToken's destructors) need +// to be called while holding DB mutex +class WriteController { + public: + explicit WriteController(uint64_t _delayed_write_rate = 1024u * 1024u * 32u) + : total_stopped_(0), + total_delayed_(0), + total_compaction_pressure_(0), + bytes_left_(0), + last_refill_time_(0) { + set_delayed_write_rate(_delayed_write_rate); + } + ~WriteController() = default; + + // When an actor (column family) requests a stop token, all writes will be + // stopped until the stop token is released (deleted) + std::unique_ptr GetStopToken(); + // When an actor (column family) requests a delay token, total delay for all + // writes to the DB will be controlled under the delayed write rate. Every + // write needs to call GetDelay() with number of bytes writing to the DB, + // which returns number of microseconds to sleep. + std::unique_ptr GetDelayToken( + uint64_t delayed_write_rate); + // When an actor (column family) requests a moderate token, compaction + // threads will be increased + std::unique_ptr GetCompactionPressureToken(); + + // these three metods are querying the state of the WriteController + bool IsStopped() const; + bool NeedsDelay() const { return total_delayed_ > 0; } + bool NeedSpeedupCompaction() const { + return IsStopped() || NeedsDelay() || total_compaction_pressure_ > 0; + } + // return how many microseconds the caller needs to sleep after the call + // num_bytes: how many number of bytes to put into the DB. + // Prerequisite: DB mutex held. + uint64_t GetDelay(Env* env, uint64_t num_bytes); + void set_delayed_write_rate(uint64_t write_rate) { + // avoid divide 0 + if (write_rate == 0) { + write_rate = 1u; + } + delayed_write_rate_ = write_rate; + } + uint64_t delayed_write_rate() const { return delayed_write_rate_; } + + private: + friend class WriteControllerToken; + friend class StopWriteToken; + friend class DelayWriteToken; + friend class CompactionPressureToken; + + int total_stopped_; + int total_delayed_; + int total_compaction_pressure_; + uint64_t bytes_left_; + uint64_t last_refill_time_; + uint64_t delayed_write_rate_; +}; + +class WriteControllerToken { + public: + explicit WriteControllerToken(WriteController* controller) + : controller_(controller) {} + virtual ~WriteControllerToken() {} + + protected: + WriteController* controller_; + + private: + // no copying allowed + WriteControllerToken(const WriteControllerToken&) = delete; + void operator=(const WriteControllerToken&) = delete; +}; + +class StopWriteToken : public WriteControllerToken { + public: + explicit StopWriteToken(WriteController* controller) + : WriteControllerToken(controller) {} + virtual ~StopWriteToken(); +}; + +class DelayWriteToken : public WriteControllerToken { + public: + explicit DelayWriteToken(WriteController* controller) + : WriteControllerToken(controller) {} + virtual ~DelayWriteToken(); +}; + +class CompactionPressureToken : public WriteControllerToken { + public: + explicit CompactionPressureToken(WriteController* controller) + : WriteControllerToken(controller) {} + virtual ~CompactionPressureToken(); +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/write_controller_test.cc b/external/rocksdb/db/write_controller_test.cc new file mode 100755 index 0000000000..db9a9db1b3 --- /dev/null +++ b/external/rocksdb/db/write_controller_test.cc @@ -0,0 +1,131 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include "db/write_controller.h" + +#include "rocksdb/env.h" +#include "util/testharness.h" + +namespace rocksdb { + +class WriteControllerTest : public testing::Test {}; + +class TimeSetEnv : public EnvWrapper { + public: + explicit TimeSetEnv() : EnvWrapper(nullptr) {} + uint64_t now_micros_ = 6666; + virtual uint64_t NowMicros() override { return now_micros_; } +}; + +TEST_F(WriteControllerTest, ChangeDelayRateTest) { + TimeSetEnv env; + WriteController controller(10000000u); + auto delay_token_0 = + controller.GetDelayToken(controller.delayed_write_rate()); + ASSERT_EQ(static_cast(2000000), + controller.GetDelay(&env, 20000000u)); + auto delay_token_1 = controller.GetDelayToken(2000000u); + ASSERT_EQ(static_cast(10000000), + controller.GetDelay(&env, 20000000u)); + auto delay_token_2 = controller.GetDelayToken(1000000u); + ASSERT_EQ(static_cast(20000000), + controller.GetDelay(&env, 20000000u)); + auto delay_token_3 = controller.GetDelayToken(20000000u); + ASSERT_EQ(static_cast(1000000), + controller.GetDelay(&env, 20000000u)); + auto delay_token_4 = + controller.GetDelayToken(controller.delayed_write_rate() * 2); + ASSERT_EQ(static_cast(500000), + controller.GetDelay(&env, 20000000u)); +} + +TEST_F(WriteControllerTest, SanityTest) { + WriteController controller(10000000u); + auto stop_token_1 = controller.GetStopToken(); + auto stop_token_2 = controller.GetStopToken(); + + ASSERT_TRUE(controller.IsStopped()); + stop_token_1.reset(); + ASSERT_TRUE(controller.IsStopped()); + stop_token_2.reset(); + ASSERT_FALSE(controller.IsStopped()); + + TimeSetEnv env; + + auto delay_token_1 = controller.GetDelayToken(10000000u); + ASSERT_EQ(static_cast(2000000), + controller.GetDelay(&env, 20000000u)); + + env.now_micros_ += 1999900u; // sleep debt 1000 + + auto delay_token_2 = controller.GetDelayToken(10000000u); + // Rate reset after changing the token. + ASSERT_EQ(static_cast(2000000), + controller.GetDelay(&env, 20000000u)); + + env.now_micros_ += 1999900u; // sleep debt 1000 + + // One refill: 10240 bytes allowed, 1000 used, 9240 left + ASSERT_EQ(static_cast(1124), controller.GetDelay(&env, 1000u)); + env.now_micros_ += 1124u; // sleep debt 0 + + delay_token_2.reset(); + // 1000 used, 8240 left + ASSERT_EQ(static_cast(0), controller.GetDelay(&env, 1000u)); + + env.now_micros_ += 100u; // sleep credit 100 + // 1000 used, 7240 left + ASSERT_EQ(static_cast(0), controller.GetDelay(&env, 1000u)); + + env.now_micros_ += 100u; // sleep credit 200 + // One refill: 10240 fileed, sleep credit generates 2000. 8000 used + // 7240 + 10240 + 2000 - 8000 = 11480 left + ASSERT_EQ(static_cast(1024u), controller.GetDelay(&env, 8000u)); + + env.now_micros_ += 200u; // sleep debt 824 + // 1000 used, 10480 left. + ASSERT_EQ(static_cast(0), controller.GetDelay(&env, 1000u)); + + env.now_micros_ += 200u; // sleep debt 624 + // Out of bound sleep, still 10480 left + ASSERT_EQ(static_cast(3000624u), + controller.GetDelay(&env, 30000000u)); + + env.now_micros_ += 3000724u; // sleep credit 100 + // 6000 used, 4480 left. + ASSERT_EQ(static_cast(0), controller.GetDelay(&env, 6000u)); + + env.now_micros_ += 200u; // sleep credit 300 + // One refill, credit 4480 balance + 3000 credit + 10240 refill + // Use 8000, 9720 left + ASSERT_EQ(static_cast(1024u), controller.GetDelay(&env, 8000u)); + + env.now_micros_ += 3024u; // sleep credit 2000 + + // 1720 left + ASSERT_EQ(static_cast(0u), controller.GetDelay(&env, 8000u)); + + // 1720 balance + 20000 credit = 20170 left + // Use 8000, 12170 left + ASSERT_EQ(static_cast(0u), controller.GetDelay(&env, 8000u)); + + // 4170 left + ASSERT_EQ(static_cast(0u), controller.GetDelay(&env, 8000u)); + + // Need a refill + ASSERT_EQ(static_cast(1024u), controller.GetDelay(&env, 9000u)); + + delay_token_1.reset(); + ASSERT_EQ(static_cast(0), controller.GetDelay(&env, 30000000u)); + delay_token_1.reset(); + ASSERT_FALSE(controller.IsStopped()); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/db/write_thread.cc b/external/rocksdb/db/write_thread.cc new file mode 100755 index 0000000000..531da55df3 --- /dev/null +++ b/external/rocksdb/db/write_thread.cc @@ -0,0 +1,440 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/write_thread.h" +#include +#include +#include +#include "db/column_family.h" +#include "port/port.h" +#include "util/sync_point.h" + +namespace rocksdb { + +WriteThread::WriteThread(uint64_t max_yield_usec, uint64_t slow_yield_usec) + : max_yield_usec_(max_yield_usec), + slow_yield_usec_(slow_yield_usec), + newest_writer_(nullptr) {} + +uint8_t WriteThread::BlockingAwaitState(Writer* w, uint8_t goal_mask) { + // We're going to block. Lazily create the mutex. We guarantee + // propagation of this construction to the waker via the + // STATE_LOCKED_WAITING state. The waker won't try to touch the mutex + // or the condvar unless they CAS away the STATE_LOCKED_WAITING that + // we install below. + w->CreateMutex(); + + auto state = w->state.load(std::memory_order_acquire); + assert(state != STATE_LOCKED_WAITING); + if ((state & goal_mask) == 0 && + w->state.compare_exchange_strong(state, STATE_LOCKED_WAITING)) { + // we have permission (and an obligation) to use StateMutex + std::unique_lock guard(w->StateMutex()); + w->StateCV().wait(guard, [w] { + return w->state.load(std::memory_order_relaxed) != STATE_LOCKED_WAITING; + }); + state = w->state.load(std::memory_order_relaxed); + } + // else tricky. Goal is met or CAS failed. In the latter case the waker + // must have changed the state, and compare_exchange_strong has updated + // our local variable with the new one. At the moment WriteThread never + // waits for a transition across intermediate states, so we know that + // since a state change has occurred the goal must have been met. + assert((state & goal_mask) != 0); + return state; +} + +uint8_t WriteThread::AwaitState(Writer* w, uint8_t goal_mask, + AdaptationContext* ctx) { + uint8_t state; + + // On a modern Xeon each loop takes about 7 nanoseconds (most of which + // is the effect of the pause instruction), so 200 iterations is a bit + // more than a microsecond. This is long enough that waits longer than + // this can amortize the cost of accessing the clock and yielding. + for (uint32_t tries = 0; tries < 200; ++tries) { + state = w->state.load(std::memory_order_acquire); + if ((state & goal_mask) != 0) { + return state; + } + port::AsmVolatilePause(); + } + + // If we're only going to end up waiting a short period of time, + // it can be a lot more efficient to call std::this_thread::yield() + // in a loop than to block in StateMutex(). For reference, on my 4.0 + // SELinux test server with support for syscall auditing enabled, the + // minimum latency between FUTEX_WAKE to returning from FUTEX_WAIT is + // 2.7 usec, and the average is more like 10 usec. That can be a big + // drag on RockDB's single-writer design. Of course, spinning is a + // bad idea if other threads are waiting to run or if we're going to + // wait for a long time. How do we decide? + // + // We break waiting into 3 categories: short-uncontended, + // short-contended, and long. If we had an oracle, then we would always + // spin for short-uncontended, always block for long, and our choice for + // short-contended might depend on whether we were trying to optimize + // RocksDB throughput or avoid being greedy with system resources. + // + // Bucketing into short or long is easy by measuring elapsed time. + // Differentiating short-uncontended from short-contended is a bit + // trickier, but not too bad. We could look for involuntary context + // switches using getrusage(RUSAGE_THREAD, ..), but it's less work + // (portability code and CPU) to just look for yield calls that take + // longer than we expect. sched_yield() doesn't actually result in any + // context switch overhead if there are no other runnable processes + // on the current core, in which case it usually takes less than + // a microsecond. + // + // There are two primary tunables here: the threshold between "short" + // and "long" waits, and the threshold at which we suspect that a yield + // is slow enough to indicate we should probably block. If these + // thresholds are chosen well then CPU-bound workloads that don't + // have more threads than cores will experience few context switches + // (voluntary or involuntary), and the total number of context switches + // (voluntary and involuntary) will not be dramatically larger (maybe + // 2x) than the number of voluntary context switches that occur when + // --max_yield_wait_micros=0. + // + // There's another constant, which is the number of slow yields we will + // tolerate before reversing our previous decision. Solitary slow + // yields are pretty common (low-priority small jobs ready to run), + // so this should be at least 2. We set this conservatively to 3 so + // that we can also immediately schedule a ctx adaptation, rather than + // waiting for the next update_ctx. + + const size_t kMaxSlowYieldsWhileSpinning = 3; + + bool update_ctx = false; + bool would_spin_again = false; + + if (max_yield_usec_ > 0) { + update_ctx = Random::GetTLSInstance()->OneIn(256); + + if (update_ctx || ctx->value.load(std::memory_order_relaxed) >= 0) { + // we're updating the adaptation statistics, or spinning has > + // 50% chance of being shorter than max_yield_usec_ and causing no + // involuntary context switches + auto spin_begin = std::chrono::steady_clock::now(); + + // this variable doesn't include the final yield (if any) that + // causes the goal to be met + size_t slow_yield_count = 0; + + auto iter_begin = spin_begin; + while ((iter_begin - spin_begin) <= + std::chrono::microseconds(max_yield_usec_)) { + std::this_thread::yield(); + + state = w->state.load(std::memory_order_acquire); + if ((state & goal_mask) != 0) { + // success + would_spin_again = true; + break; + } + + auto now = std::chrono::steady_clock::now(); + if (now == iter_begin || + now - iter_begin >= std::chrono::microseconds(slow_yield_usec_)) { + // conservatively count it as a slow yield if our clock isn't + // accurate enough to measure the yield duration + ++slow_yield_count; + if (slow_yield_count >= kMaxSlowYieldsWhileSpinning) { + // Not just one ivcsw, but several. Immediately update ctx + // and fall back to blocking + update_ctx = true; + break; + } + } + iter_begin = now; + } + } + } + + if ((state & goal_mask) == 0) { + state = BlockingAwaitState(w, goal_mask); + } + + if (update_ctx) { + auto v = ctx->value.load(std::memory_order_relaxed); + // fixed point exponential decay with decay constant 1/1024, with +1 + // and -1 scaled to avoid overflow for int32_t + v = v + (v / 1024) + (would_spin_again ? 1 : -1) * 16384; + ctx->value.store(v, std::memory_order_relaxed); + } + + assert((state & goal_mask) != 0); + return state; +} + +void WriteThread::SetState(Writer* w, uint8_t new_state) { + auto state = w->state.load(std::memory_order_acquire); + if (state == STATE_LOCKED_WAITING || + !w->state.compare_exchange_strong(state, new_state)) { + assert(state == STATE_LOCKED_WAITING); + + std::lock_guard guard(w->StateMutex()); + assert(w->state.load(std::memory_order_relaxed) != new_state); + w->state.store(new_state, std::memory_order_relaxed); + w->StateCV().notify_one(); + } +} + +void WriteThread::LinkOne(Writer* w, bool* linked_as_leader) { + assert(w->state == STATE_INIT); + + Writer* writers = newest_writer_.load(std::memory_order_relaxed); + while (true) { + w->link_older = writers; + if (newest_writer_.compare_exchange_strong(writers, w)) { + if (writers == nullptr) { + // this isn't part of the WriteThread machinery, but helps with + // debugging and is checked by an assert in WriteImpl + w->state.store(STATE_GROUP_LEADER, std::memory_order_relaxed); + } + *linked_as_leader = (writers == nullptr); + return; + } + } +} + +void WriteThread::CreateMissingNewerLinks(Writer* head) { + while (true) { + Writer* next = head->link_older; + if (next == nullptr || next->link_newer != nullptr) { + assert(next == nullptr || next->link_newer == head); + break; + } + next->link_newer = head; + head = next; + } +} + +void WriteThread::JoinBatchGroup(Writer* w) { + static AdaptationContext ctx("JoinBatchGroup"); + + assert(w->batch != nullptr); + bool linked_as_leader; + LinkOne(w, &linked_as_leader); + + TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:Wait", w); + + if (!linked_as_leader) { + AwaitState(w, + STATE_GROUP_LEADER | STATE_PARALLEL_FOLLOWER | STATE_COMPLETED, + &ctx); + TEST_SYNC_POINT_CALLBACK("WriteThread::JoinBatchGroup:DoneWaiting", w); + } +} + +size_t WriteThread::EnterAsBatchGroupLeader( + Writer* leader, WriteThread::Writer** last_writer, + autovector* write_batch_group) { + assert(leader->link_older == nullptr); + assert(leader->batch != nullptr); + + size_t size = WriteBatchInternal::ByteSize(leader->batch); + write_batch_group->push_back(leader); + + // Allow the group to grow up to a maximum size, but if the + // original write is small, limit the growth so we do not slow + // down the small write too much. + size_t max_size = 1 << 20; + if (size <= (128 << 10)) { + max_size = size + (128 << 10); + } + + *last_writer = leader; + + Writer* newest_writer = newest_writer_.load(std::memory_order_acquire); + + // This is safe regardless of any db mutex status of the caller. Previous + // calls to ExitAsGroupLeader either didn't call CreateMissingNewerLinks + // (they emptied the list and then we added ourself as leader) or had to + // explicitly wake us up (the list was non-empty when we added ourself, + // so we have already received our MarkJoined). + CreateMissingNewerLinks(newest_writer); + + // Tricky. Iteration start (leader) is exclusive and finish + // (newest_writer) is inclusive. Iteration goes from old to new. + Writer* w = leader; + while (w != newest_writer) { + w = w->link_newer; + + if (w->sync && !leader->sync) { + // Do not include a sync write into a batch handled by a non-sync write. + break; + } + + if (!w->disableWAL && leader->disableWAL) { + // Do not include a write that needs WAL into a batch that has + // WAL disabled. + break; + } + + if (w->batch == nullptr) { + // Do not include those writes with nullptr batch. Those are not writes, + // those are something else. They want to be alone + break; + } + + if (w->callback != nullptr && !w->callback->AllowWriteBatching()) { + // dont batch writes that don't want to be batched + break; + } + + auto batch_size = WriteBatchInternal::ByteSize(w->batch); + if (size + batch_size > max_size) { + // Do not make batch too big + break; + } + + size += batch_size; + write_batch_group->push_back(w); + w->in_batch_group = true; + *last_writer = w; + } + return size; +} + +void WriteThread::LaunchParallelFollowers(ParallelGroup* pg, + SequenceNumber sequence) { + // EnterAsBatchGroupLeader already created the links from leader to + // newer writers in the group + + pg->leader->parallel_group = pg; + + Writer* w = pg->leader; + w->sequence = sequence; + + while (w != pg->last_writer) { + // Writers that won't write don't get sequence allotment + if (!w->CallbackFailed()) { + sequence += WriteBatchInternal::Count(w->batch); + } + w = w->link_newer; + + w->sequence = sequence; + w->parallel_group = pg; + SetState(w, STATE_PARALLEL_FOLLOWER); + } +} + +bool WriteThread::CompleteParallelWorker(Writer* w) { + static AdaptationContext ctx("CompleteParallelWorker"); + + auto* pg = w->parallel_group; + if (!w->status.ok()) { + std::lock_guard guard(w->StateMutex()); + pg->status = w->status; + } + + auto leader = pg->leader; + auto early_exit_allowed = pg->early_exit_allowed; + + if (pg->running.load(std::memory_order_acquire) > 1 && pg->running-- > 1) { + // we're not the last one + AwaitState(w, STATE_COMPLETED, &ctx); + + // Caller only needs to perform exit duties if early exit doesn't + // apply and this is the leader. Can't touch pg here. Whoever set + // our state to STATE_COMPLETED copied pg->status to w.status for us. + return w == leader && !(early_exit_allowed && w->status.ok()); + } + // else we're the last parallel worker + + if (w == leader || (early_exit_allowed && pg->status.ok())) { + // this thread should perform exit duties + w->status = pg->status; + return true; + } else { + // We're the last parallel follower but early commit is not + // applicable. Wake up the leader and then wait for it to exit. + assert(w->state == STATE_PARALLEL_FOLLOWER); + SetState(leader, STATE_COMPLETED); + AwaitState(w, STATE_COMPLETED, &ctx); + return false; + } +} + +void WriteThread::EarlyExitParallelGroup(Writer* w) { + auto* pg = w->parallel_group; + + assert(w->state == STATE_PARALLEL_FOLLOWER); + assert(pg->status.ok()); + ExitAsBatchGroupLeader(pg->leader, pg->last_writer, pg->status); + assert(w->status.ok()); + assert(w->state == STATE_COMPLETED); + SetState(pg->leader, STATE_COMPLETED); +} + +void WriteThread::ExitAsBatchGroupLeader(Writer* leader, Writer* last_writer, + Status status) { + assert(leader->link_older == nullptr); + + Writer* head = newest_writer_.load(std::memory_order_acquire); + if (head != last_writer || + !newest_writer_.compare_exchange_strong(head, nullptr)) { + // Either w wasn't the head during the load(), or it was the head + // during the load() but somebody else pushed onto the list before + // we did the compare_exchange_strong (causing it to fail). In the + // latter case compare_exchange_strong has the effect of re-reading + // its first param (head). No need to retry a failing CAS, because + // only a departing leader (which we are at the moment) can remove + // nodes from the list. + assert(head != last_writer); + + // After walking link_older starting from head (if not already done) + // we will be able to traverse w->link_newer below. This function + // can only be called from an active leader, only a leader can + // clear newest_writer_, we didn't, and only a clear newest_writer_ + // could cause the next leader to start their work without a call + // to MarkJoined, so we can definitely conclude that no other leader + // work is going on here (with or without db mutex). + CreateMissingNewerLinks(head); + assert(last_writer->link_newer->link_older == last_writer); + last_writer->link_newer->link_older = nullptr; + + // Next leader didn't self-identify, because newest_writer_ wasn't + // nullptr when they enqueued (we were definitely enqueued before them + // and are still in the list). That means leader handoff occurs when + // we call MarkJoined + SetState(last_writer->link_newer, STATE_GROUP_LEADER); + } + // else nobody else was waiting, although there might already be a new + // leader now + + while (last_writer != leader) { + last_writer->status = status; + // we need to read link_older before calling SetState, because as soon + // as it is marked committed the other thread's Await may return and + // deallocate the Writer. + auto next = last_writer->link_older; + SetState(last_writer, STATE_COMPLETED); + + last_writer = next; + } +} + +void WriteThread::EnterUnbatched(Writer* w, InstrumentedMutex* mu) { + static AdaptationContext ctx("EnterUnbatched"); + + assert(w->batch == nullptr); + bool linked_as_leader; + LinkOne(w, &linked_as_leader); + if (!linked_as_leader) { + mu->Unlock(); + TEST_SYNC_POINT("WriteThread::EnterUnbatched:Wait"); + AwaitState(w, STATE_GROUP_LEADER, &ctx); + mu->Lock(); + } +} + +void WriteThread::ExitUnbatched(Writer* w) { + Status dummy_status; + ExitAsBatchGroupLeader(w, w, dummy_status); +} + +} // namespace rocksdb diff --git a/external/rocksdb/db/write_thread.h b/external/rocksdb/db/write_thread.h new file mode 100755 index 0000000000..87ffff6f95 --- /dev/null +++ b/external/rocksdb/db/write_thread.h @@ -0,0 +1,294 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "db/write_callback.h" +#include "rocksdb/status.h" +#include "rocksdb/types.h" +#include "rocksdb/write_batch.h" +#include "util/autovector.h" +#include "util/instrumented_mutex.h" + +namespace rocksdb { + +class WriteThread { + public: + enum State : uint8_t { + // The initial state of a writer. This is a Writer that is + // waiting in JoinBatchGroup. This state can be left when another + // thread informs the waiter that it has become a group leader + // (-> STATE_GROUP_LEADER), when a leader that has chosen to be + // non-parallel informs a follower that its writes have been committed + // (-> STATE_COMPLETED), or when a leader that has chosen to perform + // updates in parallel and needs this Writer to apply its batch (-> + // STATE_PARALLEL_FOLLOWER). + STATE_INIT = 1, + + // The state used to inform a waiting Writer that it has become the + // leader, and it should now build a write batch group. Tricky: + // this state is not used if newest_writer_ is empty when a writer + // enqueues itself, because there is no need to wait (or even to + // create the mutex and condvar used to wait) in that case. This is + // a terminal state unless the leader chooses to make this a parallel + // batch, in which case the last parallel worker to finish will move + // the leader to STATE_COMPLETED. + STATE_GROUP_LEADER = 2, + + // A Writer that has returned as a follower in a parallel group. + // It should apply its batch to the memtable and then call + // CompleteParallelWorker. When someone calls ExitAsBatchGroupLeader + // or EarlyExitParallelGroup this state will get transitioned to + // STATE_COMPLETED. + STATE_PARALLEL_FOLLOWER = 4, + + // A follower whose writes have been applied, or a parallel leader + // whose followers have all finished their work. This is a terminal + // state. + STATE_COMPLETED = 8, + + // A state indicating that the thread may be waiting using StateMutex() + // and StateCondVar() + STATE_LOCKED_WAITING = 16, + }; + + struct Writer; + + struct ParallelGroup { + Writer* leader; + Writer* last_writer; + SequenceNumber last_sequence; + bool early_exit_allowed; + // before running goes to zero, status needs leader->StateMutex() + Status status; + std::atomic running; + }; + + // Information kept for every waiting writer. + struct Writer { + WriteBatch* batch; + bool sync; + bool disableWAL; + bool disable_memtable; + uint64_t log_used; // log number that this batch was inserted into + uint64_t log_ref; // log number that memtable insert should reference + bool in_batch_group; + WriteCallback* callback; + bool made_waitable; // records lazy construction of mutex and cv + std::atomic state; // write under StateMutex() or pre-link + ParallelGroup* parallel_group; + SequenceNumber sequence; // the sequence number to use + Status status; // status of memtable inserter + Status callback_status; // status returned by callback->Callback() + std::aligned_storage::type state_mutex_bytes; + std::aligned_storage::type state_cv_bytes; + Writer* link_older; // read/write only before linking, or as leader + Writer* link_newer; // lazy, read/write only before linking, or as leader + + Writer() + : batch(nullptr), + sync(false), + disableWAL(false), + disable_memtable(false), + log_used(0), + log_ref(0), + in_batch_group(false), + callback(nullptr), + made_waitable(false), + state(STATE_INIT), + parallel_group(nullptr), + link_older(nullptr), + link_newer(nullptr) {} + + ~Writer() { + if (made_waitable) { + StateMutex().~mutex(); + StateCV().~condition_variable(); + } + } + + bool CheckCallback(DB* db) { + if (callback != nullptr) { + callback_status = callback->Callback(db); + } + return callback_status.ok(); + } + + void CreateMutex() { + if (!made_waitable) { + // Note that made_waitable is tracked separately from state + // transitions, because we can't atomically create the mutex and + // link into the list. + made_waitable = true; + new (&state_mutex_bytes) std::mutex; + new (&state_cv_bytes) std::condition_variable; + } + } + + // returns the aggregate status of this Writer + Status FinalStatus() { + if (!status.ok()) { + // a non-ok memtable write status takes presidence + assert(callback == nullptr || callback_status.ok()); + return status; + } else if (!callback_status.ok()) { + // if the callback failed then that is the status we want + // because a memtable insert should not have been attempted + assert(callback != nullptr); + assert(status.ok()); + return callback_status; + } else { + // if there is no callback then we only care about + // the memtable insert status + assert(callback == nullptr || callback_status.ok()); + return status; + } + } + + bool CallbackFailed() { + return (callback != nullptr) && !callback_status.ok(); + } + + bool ShouldWriteToMemtable() { + return !CallbackFailed() && !disable_memtable; + } + + bool ShouldWriteToWAL() { return !CallbackFailed() && !disableWAL; } + + // No other mutexes may be acquired while holding StateMutex(), it is + // always last in the order + std::mutex& StateMutex() { + assert(made_waitable); + return *static_cast(static_cast(&state_mutex_bytes)); + } + + std::condition_variable& StateCV() { + assert(made_waitable); + return *static_cast( + static_cast(&state_cv_bytes)); + } + }; + + WriteThread(uint64_t max_yield_usec, uint64_t slow_yield_usec); + + // IMPORTANT: None of the methods in this class rely on the db mutex + // for correctness. All of the methods except JoinBatchGroup and + // EnterUnbatched may be called either with or without the db mutex held. + // Correctness is maintained by ensuring that only a single thread is + // a leader at a time. + + // Registers w as ready to become part of a batch group, waits until the + // caller should perform some work, and returns the current state of the + // writer. If w has become the leader of a write batch group, returns + // STATE_GROUP_LEADER. If w has been made part of a sequential batch + // group and the leader has performed the write, returns STATE_DONE. + // If w has been made part of a parallel batch group and is responsible + // for updating the memtable, returns STATE_PARALLEL_FOLLOWER. + // + // The db mutex SHOULD NOT be held when calling this function, because + // it will block. + // + // Writer* w: Writer to be executed as part of a batch group + void JoinBatchGroup(Writer* w); + + // Constructs a write batch group led by leader, which should be a + // Writer passed to JoinBatchGroup on the current thread. + // + // Writer* leader: Writer that is STATE_GROUP_LEADER + // Writer** last_writer: Out-param that identifies the last follower + // autovector* write_batch_group: Out-param of group members + // returns: Total batch group byte size + size_t EnterAsBatchGroupLeader( + Writer* leader, Writer** last_writer, + autovector* write_batch_group); + + // Causes JoinBatchGroup to return STATE_PARALLEL_FOLLOWER for all of the + // non-leader members of this write batch group. Sets Writer::sequence + // before waking them up. + // + // ParallalGroup* pg: Extra state used to coordinate the parallel add + // SequenceNumber sequence: Starting sequence number to assign to Writer-s + void LaunchParallelFollowers(ParallelGroup* pg, SequenceNumber sequence); + + // Reports the completion of w's batch to the parallel group leader, and + // waits for the rest of the parallel batch to complete. Returns true + // if this thread is the last to complete, and hence should advance + // the sequence number and then call EarlyExitParallelGroup, false if + // someone else has already taken responsibility for that. + bool CompleteParallelWorker(Writer* w); + + // This method performs an early completion of a parallel write group, + // where the cleanup work of the leader is performed by a follower who + // happens to be the last parallel worker to complete. + void EarlyExitParallelGroup(Writer* w); + + // Unlinks the Writer-s in a batch group, wakes up the non-leaders, + // and wakes up the next leader (if any). + // + // Writer* leader: From EnterAsBatchGroupLeader + // Writer* last_writer: Value of out-param of EnterAsBatchGroupLeader + // Status status: Status of write operation + void ExitAsBatchGroupLeader(Writer* leader, Writer* last_writer, + Status status); + + // Waits for all preceding writers (unlocking mu while waiting), then + // registers w as the currently proceeding writer. + // + // Writer* w: A Writer not eligible for batching + // InstrumentedMutex* mu: The db mutex, to unlock while waiting + // REQUIRES: db mutex held + void EnterUnbatched(Writer* w, InstrumentedMutex* mu); + + // Completes a Writer begun with EnterUnbatched, unblocking subsequent + // writers. + void ExitUnbatched(Writer* w); + + struct AdaptationContext { + const char* name; + std::atomic value; + + explicit AdaptationContext(const char* name0) : name(name0), value(0) {} + }; + + private: + uint64_t max_yield_usec_; + uint64_t slow_yield_usec_; + + // Points to the newest pending Writer. Only leader can remove + // elements, adding can be done lock-free by anybody + std::atomic newest_writer_; + + // Waits for w->state & goal_mask using w->StateMutex(). Returns + // the state that satisfies goal_mask. + uint8_t BlockingAwaitState(Writer* w, uint8_t goal_mask); + + // Blocks until w->state & goal_mask, returning the state value + // that satisfied the predicate. Uses ctx to adaptively use + // std::this_thread::yield() to avoid mutex overheads. ctx should be + // a context-dependent static. + uint8_t AwaitState(Writer* w, uint8_t goal_mask, AdaptationContext* ctx); + + void SetState(Writer* w, uint8_t new_state); + + // Links w into the newest_writer_ list. Sets *linked_as_leader to + // true if w was linked directly into the leader position. Safe to + // call from multiple threads without external locking. + void LinkOne(Writer* w, bool* linked_as_leader); + + // Computes any missing link_newer links. Should not be called + // concurrently with itself. + void CreateMissingNewerLinks(Writer* head); +}; + +} // namespace rocksdb diff --git a/external/rocksdb/db/xfunc_test_points.cc b/external/rocksdb/db/xfunc_test_points.cc new file mode 100755 index 0000000000..67e96dd059 --- /dev/null +++ b/external/rocksdb/db/xfunc_test_points.cc @@ -0,0 +1,145 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "db/xfunc_test_points.h" +#include "util/xfunc.h" + +namespace rocksdb { + +#ifdef XFUNC + +void xf_manage_release(ManagedIterator* iter) { + if (!(XFuncPoint::GetSkip() & kSkipNoPrefix)) { + iter->ReleaseIter(false); + } +} + +void xf_manage_create(ManagedIterator* iter) { iter->SetDropOld(false); } + +void xf_manage_new(DBImpl* db, ReadOptions* read_options, + bool is_snapshot_supported) { + if ((!XFuncPoint::Check("managed_xftest_dropold") && + (!XFuncPoint::Check("managed_xftest_release"))) || + (!read_options->managed)) { + return; + } + if ((!read_options->tailing) && (read_options->snapshot == nullptr) && + (!is_snapshot_supported)) { + read_options->managed = false; + return; + } + if (db->GetOptions().prefix_extractor != nullptr) { + if (strcmp(db->GetOptions().table_factory.get()->Name(), "PlainTable")) { + if (!(XFuncPoint::GetSkip() & kSkipNoPrefix)) { + read_options->total_order_seek = true; + } + } else { + read_options->managed = false; + } + } +} + +class XFTransactionWriteHandler : public WriteBatch::Handler { + public: + Transaction* txn_; + DBImpl* db_impl_; + + XFTransactionWriteHandler(Transaction* txn, DBImpl* db_impl) + : txn_(txn), db_impl_(db_impl) {} + + virtual Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + InstrumentedMutexLock l(&db_impl_->mutex_); + + ColumnFamilyHandle* cfh = db_impl_->GetColumnFamilyHandle(column_family_id); + if (cfh == nullptr) { + return Status::InvalidArgument( + "XFUNC test could not find column family " + "handle for id ", + ToString(column_family_id)); + } + + txn_->Put(cfh, key, value); + + return Status::OK(); + } + + virtual Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value) override { + InstrumentedMutexLock l(&db_impl_->mutex_); + + ColumnFamilyHandle* cfh = db_impl_->GetColumnFamilyHandle(column_family_id); + if (cfh == nullptr) { + return Status::InvalidArgument( + "XFUNC test could not find column family " + "handle for id ", + ToString(column_family_id)); + } + + txn_->Merge(cfh, key, value); + + return Status::OK(); + } + + virtual Status DeleteCF(uint32_t column_family_id, + const Slice& key) override { + InstrumentedMutexLock l(&db_impl_->mutex_); + + ColumnFamilyHandle* cfh = db_impl_->GetColumnFamilyHandle(column_family_id); + if (cfh == nullptr) { + return Status::InvalidArgument( + "XFUNC test could not find column family " + "handle for id ", + ToString(column_family_id)); + } + + txn_->Delete(cfh, key); + + return Status::OK(); + } + + virtual void LogData(const Slice& blob) override { txn_->PutLogData(blob); } +}; + +// Whenever DBImpl::Write is called, create a transaction and do the write via +// the transaction. +void xf_transaction_write(const WriteOptions& write_options, + const DBOptions& db_options, WriteBatch* my_batch, + WriteCallback* callback, DBImpl* db_impl, Status* s, + bool* write_attempted) { + if (callback != nullptr) { + // We may already be in a transaction, don't force a transaction + *write_attempted = false; + return; + } + + OptimisticTransactionDB* txn_db = new OptimisticTransactionDB(db_impl); + Transaction* txn = Transaction::BeginTransaction(txn_db, write_options); + + XFTransactionWriteHandler handler(txn, db_impl); + *s = my_batch->Iterate(&handler); + + if (!s->ok()) { + Log(InfoLogLevel::ERROR_LEVEL, db_options.info_log, + "XFUNC test could not iterate batch. status: $s\n", + s->ToString().c_str()); + } + + *s = txn->Commit(); + + if (!s->ok()) { + Log(InfoLogLevel::ERROR_LEVEL, db_options.info_log, + "XFUNC test could not commit transaction. status: $s\n", + s->ToString().c_str()); + } + + *write_attempted = true; + delete txn; + delete txn_db; +} + +#endif // XFUNC + +} // namespace rocksdb diff --git a/external/rocksdb/db/xfunc_test_points.h b/external/rocksdb/db/xfunc_test_points.h new file mode 100755 index 0000000000..8ed9f2c736 --- /dev/null +++ b/external/rocksdb/db/xfunc_test_points.h @@ -0,0 +1,33 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include "db/db_impl.h" +#include "db/managed_iterator.h" +#include "db/write_callback.h" +#include "rocksdb/options.h" +#include "rocksdb/write_batch.h" +#include "util/xfunc.h" + +namespace rocksdb { + +#ifdef XFUNC + +// DB-specific test points for the cross-functional test framework (see +// util/xfunc.h). +void xf_manage_release(ManagedIterator* iter); +void xf_manage_create(ManagedIterator* iter); +void xf_manage_new(DBImpl* db, ReadOptions* readoptions, + bool is_snapshot_supported); +void xf_transaction_write(const WriteOptions& write_options, + const DBOptions& db_options, + class WriteBatch* my_batch, + class WriteCallback* callback, DBImpl* db_impl, + Status* success, bool* write_attempted); + +#endif // XFUNC + +} // namespace rocksdb diff --git a/external/rocksdb/doc/doc.css b/external/rocksdb/doc/doc.css new file mode 100755 index 0000000000..700c564e43 --- /dev/null +++ b/external/rocksdb/doc/doc.css @@ -0,0 +1,89 @@ +body { + margin-left: 0.5in; + margin-right: 0.5in; + background: white; + color: black; +} + +h1 { + margin-left: -0.2in; + font-size: 14pt; +} +h2 { + margin-left: -0in; + font-size: 12pt; +} +h3 { + margin-left: -0in; +} +h4 { + margin-left: -0in; +} +hr { + margin-left: -0in; +} + +/* Definition lists: definition term bold */ +dt { + font-weight: bold; +} + +address { + text-align: center; +} +code,samp,var { + color: blue; +} +kbd { + color: #600000; +} +div.note p { + float: right; + width: 3in; + margin-right: 0%; + padding: 1px; + border: 2px solid #6060a0; + background-color: #fffff0; +} + +ul { + margin-top: -0em; + margin-bottom: -0em; +} + +ol { + margin-top: -0em; + margin-bottom: -0em; +} + +UL.nobullets { + list-style-type: none; + list-style-image: none; + margin-left: -1em; +} + +p { + margin: 1em 0 1em 0; + padding: 0 0 0 0; +} + +pre { + line-height: 1.3em; + padding: 0.4em 0 0.8em 0; + margin: 0 0 0 0; + border: 0 0 0 0; + color: blue; +} + +.datatable { + margin-left: auto; + margin-right: auto; + margin-top: 2em; + margin-bottom: 2em; + border: 1px solid; +} + +.datatable td,th { + padding: 0 0.5em 0 0.5em; + text-align: right; +} diff --git a/external/rocksdb/doc/index.html b/external/rocksdb/doc/index.html new file mode 100755 index 0000000000..94f7cb888d --- /dev/null +++ b/external/rocksdb/doc/index.html @@ -0,0 +1,827 @@ + + + + +RocksDB + + + +

RocksDB

+
The Facebook Database Engineering Team
+
Build on earlier work on leveldb by Sanjay Ghemawat + (sanjay@google.com) and Jeff Dean (jeff@google.com)
+

+The rocksdb library provides a persistent key value store. Keys and +values are arbitrary byte arrays. The keys are ordered within the key +value store according to a user-specified comparator function. + +

+

Opening A Database

+

+A rocksdb database has a name which corresponds to a file system +directory. All of the contents of database are stored in this +directory. The following example shows how to open a database, +creating it if necessary: +

+

+  #include <assert>
+  #include "rocksdb/db.h"
+
+  rocksdb::DB* db;
+  rocksdb::Options options;
+  options.create_if_missing = true;
+  rocksdb::Status status = rocksdb::DB::Open(options, "/tmp/testdb", &db);
+  assert(status.ok());
+  ...
+
+If you want to raise an error if the database already exists, add +the following line before the rocksdb::DB::Open call: +
+  options.error_if_exists = true;
+
+

Status

+

+You may have noticed the rocksdb::Status type above. Values of this +type are returned by most functions in rocksdb that may encounter an +error. You can check if such a result is ok, and also print an +associated error message: +

+

+   rocksdb::Status s = ...;
+   if (!s.ok()) cerr << s.ToString() << endl;
+
+

Closing A Database

+

+When you are done with a database, just delete the database object. +Example: +

+

+  ... open the db as described above ...
+  ... do something with db ...
+  delete db;
+
+

Reads And Writes

+

+The database provides Put, Delete, and Get methods to +modify/query the database. For example, the following code +moves the value stored under key1 to key2. +

+  std::string value;
+  rocksdb::Status s = db->Get(rocksdb::ReadOptions(), key1, &value);
+  if (s.ok()) s = db->Put(rocksdb::WriteOptions(), key2, value);
+  if (s.ok()) s = db->Delete(rocksdb::WriteOptions(), key1);
+
+ +

Atomic Updates

+

+Note that if the process dies after the Put of key2 but before the +delete of key1, the same value may be left stored under multiple keys. +Such problems can be avoided by using the WriteBatch class to +atomically apply a set of updates: +

+

+  #include "rocksdb/write_batch.h"
+  ...
+  std::string value;
+  rocksdb::Status s = db->Get(rocksdb::ReadOptions(), key1, &value);
+  if (s.ok()) {
+    rocksdb::WriteBatch batch;
+    batch.Delete(key1);
+    batch.Put(key2, value);
+    s = db->Write(rocksdb::WriteOptions(), &batch);
+  }
+
+The WriteBatch holds a sequence of edits to be made to the database, +and these edits within the batch are applied in order. Note that we +called Delete before Put so that if key1 is identical to key2, +we do not end up erroneously dropping the value entirely. +

+Apart from its atomicity benefits, WriteBatch may also be used to +speed up bulk updates by placing lots of individual mutations into the +same batch. + +

Synchronous Writes

+By default, each write to leveldb is asynchronous: it +returns after pushing the write from the process into the operating +system. The transfer from operating system memory to the underlying +persistent storage happens asynchronously. The sync flag +can be turned on for a particular write to make the write operation +not return until the data being written has been pushed all the way to +persistent storage. (On Posix systems, this is implemented by calling +either fsync(...) or fdatasync(...) or +msync(..., MS_SYNC) before the write operation returns.) +
+  rocksdb::WriteOptions write_options;
+  write_options.sync = true;
+  db->Put(write_options, ...);
+
+Asynchronous writes are often more than a thousand times as fast as +synchronous writes. The downside of asynchronous writes is that a +crash of the machine may cause the last few updates to be lost. Note +that a crash of just the writing process (i.e., not a reboot) will not +cause any loss since even when sync is false, an update +is pushed from the process memory into the operating system before it +is considered done. + +

+Asynchronous writes can often be used safely. For example, when +loading a large amount of data into the database you can handle lost +updates by restarting the bulk load after a crash. A hybrid scheme is +also possible where every Nth write is synchronous, and in the event +of a crash, the bulk load is restarted just after the last synchronous +write finished by the previous run. (The synchronous write can update +a marker that describes where to restart on a crash.) + +

+WriteBatch provides an alternative to asynchronous writes. +Multiple updates may be placed in the same WriteBatch and +applied together using a synchronous write (i.e., +write_options.sync is set to true). The extra cost of +the synchronous write will be amortized across all of the writes in +the batch. + +

+We also provide a way to completely disable Write Ahead Log for a +particular write. If you set write_option.disableWAL to true, the +write will not go to the log at all and may be lost in an event of +process crash. + +

+When opening a DB, you can disable syncing of data files by setting +Options::disableDataSync to true. This can be useful when doing +bulk-loading or big idempotent operations. Once the operation is +finished, you can manually call sync() to flush all dirty buffers +to stable storage. + +

+RocksDB by default uses faster fdatasync() to sync files. If you want +to use fsync(), you can set Options::use_fsync to true. You should set +this to true on filesystems like ext3 that can lose files after a +reboot. + +

+

Concurrency

+

+A database may only be opened by one process at a time. +The rocksdb implementation acquires a lock from the +operating system to prevent misuse. Within a single process, the +same rocksdb::DB object may be safely shared by multiple +concurrent threads. I.e., different threads may write into or fetch +iterators or call Get on the same database without any +external synchronization (the leveldb implementation will +automatically do the required synchronization). However other objects +(like Iterator and WriteBatch) may require external synchronization. +If two threads share such an object, they must protect access to it +using their own locking protocol. More details are available in +the public header files. + +

+

Merge operators

+

+Merge operators provide efficient support for read-modify-write operation. +More on the interface and implementation can be found on: +

+ + Merge Operator +

+ + Merge Operator Implementation + +

+

Iteration

+

+The following example demonstrates how to print all key,value pairs +in a database. +

+

+  rocksdb::Iterator* it = db->NewIterator(rocksdb::ReadOptions());
+  for (it->SeekToFirst(); it->Valid(); it->Next()) {
+    cout << it->key().ToString() << ": "  << it->value().ToString() << endl;
+  }
+  assert(it->status().ok());  // Check for any errors found during the scan
+  delete it;
+
+The following variation shows how to process just the keys in the +range [start,limit): +

+

+  for (it->Seek(start);
+       it->Valid() && it->key().ToString() < limit;
+       it->Next()) {
+    ...
+  }
+
+You can also process entries in reverse order. (Caveat: reverse +iteration may be somewhat slower than forward iteration.) +

+

+  for (it->SeekToLast(); it->Valid(); it->Prev()) {
+    ...
+  }
+
+

Snapshots

+

+Snapshots provide consistent read-only views over the entire state of +the key-value store. ReadOptions::snapshot may be non-NULL to indicate +that a read should operate on a particular version of the DB state. +If ReadOptions::snapshot is NULL, the read will operate on an +implicit snapshot of the current state. +

+Snapshots are created by the DB::GetSnapshot() method: +

+

+  rocksdb::ReadOptions options;
+  options.snapshot = db->GetSnapshot();
+  ... apply some updates to db ...
+  rocksdb::Iterator* iter = db->NewIterator(options);
+  ... read using iter to view the state when the snapshot was created ...
+  delete iter;
+  db->ReleaseSnapshot(options.snapshot);
+
+Note that when a snapshot is no longer needed, it should be released +using the DB::ReleaseSnapshot interface. This allows the +implementation to get rid of state that was being maintained just to +support reading as of that snapshot. +

Slice

+

+The return value of the it->key() and it->value() calls above +are instances of the rocksdb::Slice type. Slice is a simple +structure that contains a length and a pointer to an external byte +array. Returning a Slice is a cheaper alternative to returning a +std::string since we do not need to copy potentially large keys and +values. In addition, rocksdb methods do not return null-terminated +C-style strings since rocksdb keys and values are allowed to +contain '\0' bytes. +

+C++ strings and null-terminated C-style strings can be easily converted +to a Slice: +

+

+   rocksdb::Slice s1 = "hello";
+
+   std::string str("world");
+   rocksdb::Slice s2 = str;
+
+A Slice can be easily converted back to a C++ string: +
+   std::string str = s1.ToString();
+   assert(str == std::string("hello"));
+
+Be careful when using Slices since it is up to the caller to ensure that +the external byte array into which the Slice points remains live while +the Slice is in use. For example, the following is buggy: +

+

+   rocksdb::Slice slice;
+   if (...) {
+     std::string str = ...;
+     slice = str;
+   }
+   Use(slice);
+
+When the if statement goes out of scope, str will be destroyed and the +backing storage for slice will disappear. +

+

Comparators

+

+The preceding examples used the default ordering function for key, +which orders bytes lexicographically. You can however supply a custom +comparator when opening a database. For example, suppose each +database key consists of two numbers and we should sort by the first +number, breaking ties by the second number. First, define a proper +subclass of rocksdb::Comparator that expresses these rules: +

+

+  class TwoPartComparator : public rocksdb::Comparator {
+   public:
+    // Three-way comparison function:
+    //   if a < b: negative result
+    //   if a > b: positive result
+    //   else: zero result
+    int Compare(const rocksdb::Slice& a, const rocksdb::Slice& b) const {
+      int a1, a2, b1, b2;
+      ParseKey(a, &a1, &a2);
+      ParseKey(b, &b1, &b2);
+      if (a1 < b1) return -1;
+      if (a1 > b1) return +1;
+      if (a2 < b2) return -1;
+      if (a2 > b2) return +1;
+      return 0;
+    }
+
+    // Ignore the following methods for now:
+    const char* Name() const { return "TwoPartComparator"; }
+    void FindShortestSeparator(std::string*, const rocksdb::Slice&) const { }
+    void FindShortSuccessor(std::string*) const { }
+  };
+
+Now create a database using this custom comparator: +

+

+  TwoPartComparator cmp;
+  rocksdb::DB* db;
+  rocksdb::Options options;
+  options.create_if_missing = true;
+  options.comparator = &cmp;
+  rocksdb::Status status = rocksdb::DB::Open(options, "/tmp/testdb", &db);
+  ...
+
+

Backwards compatibility

+

+The result of the comparator's Name method is attached to the +database when it is created, and is checked on every subsequent +database open. If the name changes, the rocksdb::DB::Open call will +fail. Therefore, change the name if and only if the new key format +and comparison function are incompatible with existing databases, and +it is ok to discard the contents of all existing databases. +

+You can however still gradually evolve your key format over time with +a little bit of pre-planning. For example, you could store a version +number at the end of each key (one byte should suffice for most uses). +When you wish to switch to a new key format (e.g., adding an optional +third part to the keys processed by TwoPartComparator), +(a) keep the same comparator name (b) increment the version number +for new keys (c) change the comparator function so it uses the +version numbers found in the keys to decide how to interpret them. + + +

+

MemTable and Table factories

+

+By default, we keep the data in memory in skiplist memtable and the data +on disk in a table format described here: + + RocksDB Table Format. +

+Since one of the goals of RocksDB is to have +different parts of the system easily pluggable, we support different +implementations of both memtable and table format. You can supply +your own memtable factory by setting Options::memtable_factory +and your own table factory by setting Options::table_factory. +For available memtable factories, please refer to +rocksdb/memtablerep.h and for table factores to +rocksdb/table.h. These features are both in active development +and please be wary of any API changes that might break your application +going forward. +

+You can also read more about memtables here: + +Memtables wiki + + +

+

Performance

+

+Performance can be tuned by changing the default values of the +types defined in include/rocksdb/options.h. + +

+

Block size

+

+rocksdb groups adjacent keys together into the same block and such a +block is the unit of transfer to and from persistent storage. The +default block size is approximately 4096 uncompressed bytes. +Applications that mostly do bulk scans over the contents of the +database may wish to increase this size. Applications that do a lot +of point reads of small values may wish to switch to a smaller block +size if performance measurements indicate an improvement. There isn't +much benefit in using blocks smaller than one kilobyte, or larger than +a few megabytes. Also note that compression will be more effective +with larger block sizes. To change block size parameter, use +Options::block_size. +

+

Write buffer

+

+Options::write_buffer_size specifies the amount of data +to build up in memory before converting to a sorted on-disk file. +Larger values increase performance, especially during bulk loads. +Up to max_write_buffer_number write buffers may be held in memory +at the same time, +so you may wish to adjust this parameter to control memory usage. +Also, a larger write buffer will result in a longer recovery time +the next time the database is opened. +Related option is +Options::max_write_buffer_number, which is maximum number +of write buffers that are built up in memory. The default is 2, so that +when 1 write buffer is being flushed to storage, new writes can continue +to the other write buffer. +Options::min_write_buffer_number_to_merge is the minimum number +of write buffers that will be merged together before writing to storage. +If set to 1, then all write buffers are flushed to L0 as individual files and +this increases read amplification because a get request has to check in all +of these files. Also, an in-memory merge may result in writing lesser +data to storage if there are duplicate records in each of these +individual write buffers. Default: 1 +

+

Compression

+

+Each block is individually compressed before being written to +persistent storage. Compression is on by default since the default +compression method is very fast, and is automatically disabled for +uncompressible data. In rare cases, applications may want to disable +compression entirely, but should only do so if benchmarks show a +performance improvement: +

+

+  rocksdb::Options options;
+  options.compression = rocksdb::kNoCompression;
+  ... rocksdb::DB::Open(options, name, ...) ....
+
+

Cache

+

+The contents of the database are stored in a set of files in the +filesystem and each file stores a sequence of compressed blocks. If +options.block_cache is non-NULL, it is used to cache frequently +used uncompressed block contents. If options.block_cache_compressed +is non-NULL, it is used to cache frequently used compressed blocks. Compressed +cache is an alternative to OS cache, which also caches compressed blocks. If +compressed cache is used, the OS cache will be disabled automatically by setting +options.allow_os_buffer to false. +

+

+  #include "rocksdb/cache.h"
+
+  rocksdb::Options options;
+  options.block_cache = rocksdb::NewLRUCache(100 * 1048576);  // 100MB uncompressed cache
+  options.block_cache_compressed = rocksdb::NewLRUCache(100 * 1048576);  // 100MB compressed cache
+  rocksdb::DB* db;
+  rocksdb::DB::Open(options, name, &db);
+  ... use the db ...
+  delete db
+  delete options.block_cache;
+  delete options.block_cache_compressed;
+
+

+When performing a bulk read, the application may wish to disable +caching so that the data processed by the bulk read does not end up +displacing most of the cached contents. A per-iterator option can be +used to achieve this: +

+

+  rocksdb::ReadOptions options;
+  options.fill_cache = false;
+  rocksdb::Iterator* it = db->NewIterator(options);
+  for (it->SeekToFirst(); it->Valid(); it->Next()) {
+    ...
+  }
+
+

+You can also disable block cache by setting options.no_block_cache +to true. +

Key Layout

+

+Note that the unit of disk transfer and caching is a block. Adjacent +keys (according to the database sort order) will usually be placed in +the same block. Therefore the application can improve its performance +by placing keys that are accessed together near each other and placing +infrequently used keys in a separate region of the key space. +

+For example, suppose we are implementing a simple file system on top +of rocksdb. The types of entries we might wish to store are: +

+

+   filename -> permission-bits, length, list of file_block_ids
+   file_block_id -> data
+
+We might want to prefix filename keys with one letter (say '/') and the +file_block_id keys with a different letter (say '0') so that scans +over just the metadata do not force us to fetch and cache bulky file +contents. +

+

Filters

+

+Because of the way rocksdb data is organized on disk, +a single Get() call may involve multiple reads from disk. +The optional FilterPolicy mechanism can be used to reduce +the number of disk reads substantially. +

+   rocksdb::Options options;
+   options.filter_policy = NewBloomFilter(10);
+   rocksdb::DB* db;
+   rocksdb::DB::Open(options, "/tmp/testdb", &db);
+   ... use the database ...
+   delete db;
+   delete options.filter_policy;
+
+The preceding code associates a +Bloom filter +based filtering policy with the database. Bloom filter based +filtering relies on keeping some number of bits of data in memory per +key (in this case 10 bits per key since that is the argument we passed +to NewBloomFilter). This filter will reduce the number of unnecessary +disk reads needed for Get() calls by a factor of +approximately a 100. Increasing the bits per key will lead to a +larger reduction at the cost of more memory usage. We recommend that +applications whose working set does not fit in memory and that do a +lot of random reads set a filter policy. +

+If you are using a custom comparator, you should ensure that the filter +policy you are using is compatible with your comparator. For example, +consider a comparator that ignores trailing spaces when comparing keys. +NewBloomFilter must not be used with such a comparator. +Instead, the application should provide a custom filter policy that +also ignores trailing spaces. For example: +

+  class CustomFilterPolicy : public rocksdb::FilterPolicy {
+   private:
+    FilterPolicy* builtin_policy_;
+   public:
+    CustomFilterPolicy() : builtin_policy_(NewBloomFilter(10)) { }
+    ~CustomFilterPolicy() { delete builtin_policy_; }
+
+    const char* Name() const { return "IgnoreTrailingSpacesFilter"; }
+
+    void CreateFilter(const Slice* keys, int n, std::string* dst) const {
+      // Use builtin bloom filter code after removing trailing spaces
+      std::vector<Slice> trimmed(n);
+      for (int i = 0; i < n; i++) {
+        trimmed[i] = RemoveTrailingSpaces(keys[i]);
+      }
+      return builtin_policy_->CreateFilter(&trimmed[i], n, dst);
+    }
+
+    bool KeyMayMatch(const Slice& key, const Slice& filter) const {
+      // Use builtin bloom filter code after removing trailing spaces
+      return builtin_policy_->KeyMayMatch(RemoveTrailingSpaces(key), filter);
+    }
+  };
+
+

+Advanced applications may provide a filter policy that does not use +a bloom filter but uses some other mechanism for summarizing a set +of keys. See rocksdb/filter_policy.h for detail. +

+

Checksums

+

+rocksdb associates checksums with all data it stores in the file system. +There are two separate controls provided over how aggressively these +checksums are verified: +

+

    +
  • ReadOptions::verify_checksums may be set to true to force + checksum verification of all data that is read from the file system on + behalf of a particular read. By default, no such verification is + done. +

    +

  • Options::paranoid_checks may be set to true before opening a + database to make the database implementation raise an error as soon as + it detects an internal corruption. Depending on which portion of the + database has been corrupted, the error may be raised when the database + is opened, or later by another database operation. By default, + paranoid checking is off so that the database can be used even if + parts of its persistent storage have been corrupted. +

    + If a database is corrupted (perhaps it cannot be opened when + paranoid checking is turned on), the rocksdb::RepairDB function + may be used to recover as much of the data as possible. +

    +

+ +

+

Compaction

+

+You can read more on Compactions here: + + Multi-threaded compactions + +

+Here we give overview of the options that impact behavior of Compactions: +

    +

    +

  • Options::compaction_style - RocksDB currently supports two +compaction algorithms - Universal style and Level style. This option switches +between the two. Can be kCompactionStyleUniversal or kCompactionStyleLevel. +If this is kCompactionStyleUniversal, then you can configure universal style +parameters with Options::compaction_options_universal. +

    +

  • Options::disable_auto_compactions - Disable automatic compactions. +Manual compactions can still be issued on this database. +

    +

  • Options::compaction_filter - Allows an application to modify/delete +a key-value during background compaction. The client must provide +compaction_filter_factory if it requires a new compaction filter to be used +for different compaction processes. Client should specify only one of filter +or factory. +

    +

  • Options::compaction_filter_factory - a factory that provides +compaction filter objects which allow an application to modify/delete a +key-value during background compaction. +
+

+Other options impacting performance of compactions and when they get triggered +are: +

    +

    +

  • Options::access_hint_on_compaction_start - Specify the file access +pattern once a compaction is started. It will be applied to all input files of a compaction. Default: NORMAL +

    +

  • Options::level0_file_num_compaction_trigger - Number of files to trigger level-0 compaction. +A negative value means that level-0 compaction will not be triggered by number of files at all. +

    +

  • Options::max_mem_compaction_level - Maximum level to which a new compacted memtable is pushed if it +does not create overlap. We try to push to level 2 to avoid the relatively expensive level 0=>1 compactions and to avoid some +expensive manifest file operations. We do not push all the way to the largest level since that can generate a lot of wasted disk +space if the same key space is being repeatedly overwritten. +

    +

  • Options::target_file_size_base and Options::target_file_size_multiplier - +Target file size for compaction. target_file_size_base is per-file size for level-1. +Target file size for level L can be calculated by target_file_size_base * (target_file_size_multiplier ^ (L-1)) +For example, if target_file_size_base is 2MB and target_file_size_multiplier is 10, then each file on level-1 will +be 2MB, and each file on level 2 will be 20MB, and each file on level-3 will be 200MB. Default target_file_size_base is 2MB +and default target_file_size_multiplier is 1. +

    +

  • Options::expanded_compaction_factor - Maximum number of bytes in all compacted files. We avoid expanding +the lower level file set of a compaction if it would make the total compaction cover more than +(expanded_compaction_factor * targetFileSizeLevel()) many bytes. +

    +

  • Options::source_compaction_factor - Maximum number of bytes in all source files to be compacted in a +single compaction run. We avoid picking too many files in the source level so that we do not exceed the total source bytes +for compaction to exceed (source_compaction_factor * targetFileSizeLevel()) many bytes. +Default:1, i.e. pick maxfilesize amount of data as the source of a compaction. +

    +

  • Options::max_grandparent_overlap_factor - Control maximum bytes of overlaps in grandparent (i.e., level+2) before we +stop building a single file in a level->level+1 compaction. +

    +

  • Options::max_background_compactions - Maximum number of concurrent background jobs, submitted to +the default LOW priority thread pool +
+ +

+You can learn more about all of those options in rocksdb/options.h + +

Universal style compaction specific settings

+

+If you're using Universal style compaction, there is an object CompactionOptionsUniversal +that hold all the different options for that compaction. The exact definition is in +rocksdb/universal_compaction.h and you can set it in Options::compaction_options_universal. +Here we give short overview of options in CompactionOptionsUniversal: +

    +

    +

  • CompactionOptionsUniversal::size_ratio - Percentage flexibility while comparing file size. If the candidate file(s) + size is 1% smaller than the next file's size, then include next file into + this candidate set. Default: 1 +

    +

  • CompactionOptionsUniversal::min_merge_width - The minimum number of files in a single compaction run. Default: 2 +

    +

  • CompactionOptionsUniversal::max_merge_width - The maximum number of files in a single compaction run. Default: UINT_MAX +

    +

  • CompactionOptionsUniversal::max_size_amplification_percent - The size amplification is defined as the amount (in percentage) of +additional storage needed to store a single byte of data in the database. For example, a size amplification of 2% means that a database that +contains 100 bytes of user-data may occupy upto 102 bytes of physical storage. By this definition, a fully compacted database has +a size amplification of 0%. Rocksdb uses the following heuristic to calculate size amplification: it assumes that all files excluding +the earliest file contribute to the size amplification. Default: 200, which means that a 100 byte database could require upto +300 bytes of storage. +

    +

  • CompactionOptionsUniversal::compression_size_percent - If this option is set to be -1 (the default value), all the output files +will follow compression type specified. If this option is not negative, we will try to make sure compressed +size is just above this value. In normal cases, at least this percentage +of data will be compressed. +When we are compacting to a new file, here is the criteria whether +it needs to be compressed: assuming here are the list of files sorted +by generation time: [ A1...An B1...Bm C1...Ct ], +where A1 is the newest and Ct is the oldest, and we are going to compact +B1...Bm, we calculate the total size of all the files as total_size, as +well as the total size of C1...Ct as total_C, the compaction output file +will be compressed iff total_C / total_size < this percentage +

    +

  • CompactionOptionsUniversal::stop_style - The algorithm used to stop picking files into a single compaction run. +Can be kCompactionStopStyleSimilarSize (pick files of similar size) or kCompactionStopStyleTotalSize (total size of picked files > next file). +Default: kCompactionStopStyleTotalSize +
+ +

Thread pools

+

+A thread pool is associated with Env environment object. The client has to create a thread pool by setting the number of background +threads using method Env::SetBackgroundThreads() defined in rocksdb/env.h. +We use the thread pool for compactions and memtable flushes. +Since memtable flushes are in critical code path (stalling memtable flush can stall writes, increasing p99), we suggest +having two thread pools - with priorities HIGH and LOW. Memtable flushes can be set up to be scheduled on HIGH thread pool. +There are two options available for configuration of background compactions and flushes: +

    +

    +

  • Options::max_background_compactions - Maximum number of concurrent background jobs, +submitted to the default LOW priority thread pool +

    +

  • Options::max_background_flushes - Maximum number of concurrent background memtable flush jobs, submitted to +the HIGH priority thread pool. By default, all background jobs (major compaction and memtable flush) go +to the LOW priority pool. If this option is set to a positive number, memtable flush jobs will be submitted to the HIGH priority pool. +It is important when the same Env is shared by multiple db instances. Without a separate pool, long running major compaction jobs could +potentially block memtable flush jobs of other db instances, leading to unnecessary Put stalls. +
+

+

+  #include "rocksdb/env.h"
+  #include "rocksdb/db.h"
+
+  auto env = rocksdb::Env::Default();
+  env->SetBackgroundThreads(2, rocksdb::Env::LOW);
+  env->SetBackgroundThreads(1, rocksdb::Env::HIGH);
+  rocksdb::DB* db;
+  rocksdb::Options options;
+  options.env = env;
+  options.max_background_compactions = 2;
+  options.max_background_flushes = 1;
+  rocksdb::Status status = rocksdb::DB::Open(options, "/tmp/testdb", &db);
+  assert(status.ok());
+  ...
+
+

Approximate Sizes

+

+The GetApproximateSizes method can used to get the approximate +number of bytes of file system space used by one or more key ranges. +

+

+   rocksdb::Range ranges[2];
+   ranges[0] = rocksdb::Range("a", "c");
+   ranges[1] = rocksdb::Range("x", "z");
+   uint64_t sizes[2];
+   rocksdb::Status s = db->GetApproximateSizes(ranges, 2, sizes);
+
+The preceding call will set sizes[0] to the approximate number of +bytes of file system space used by the key range [a..c) and +sizes[1] to the approximate number of bytes used by the key range +[x..z). +

+

Environment

+

+All file operations (and other operating system calls) issued by the +rocksdb implementation are routed through a rocksdb::Env object. +Sophisticated clients may wish to provide their own Env +implementation to get better control. For example, an application may +introduce artificial delays in the file IO paths to limit the impact +of rocksdb on other activities in the system. +

+

+  class SlowEnv : public rocksdb::Env {
+    .. implementation of the Env interface ...
+  };
+
+  SlowEnv env;
+  rocksdb::Options options;
+  options.env = &env;
+  Status s = rocksdb::DB::Open(options, ...);
+
+

Porting

+

+rocksdb may be ported to a new platform by providing platform +specific implementations of the types/methods/functions exported by +rocksdb/port/port.h. See rocksdb/port/port_example.h for more +details. +

+In addition, the new platform may need a new default rocksdb::Env +implementation. See rocksdb/util/env_posix.h for an example. + +

Statistics

+

+To be able to efficiently tune your application, it is always helpful if you +have access to usage statistics. You can collect those statistics by setting +Options::table_properties_collectors or +Options::statistics. For more information, refer to +rocksdb/table_properties.h and rocksdb/statistics.h. +These should not add significant overhead to your application and we +recommend exporting them to other monitoring tools. + +

Purging WAL files

+

+By default, old write-ahead logs are deleted automatically when they fall out +of scope and application doesn't need them anymore. There are options that +enable the user to archive the logs and then delete them lazily, either in +TTL fashion or based on size limit. + +The options are Options::WAL_ttl_seconds and +Options::WAL_size_limit_MB. Here is how they can be used: +

    +
  • +

    +If both set to 0, logs will be deleted asap and will never get into the archive. +

  • +

    +If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, WAL +files will be checked every 10 min and if total size is greater then +WAL_size_limit_MB, they will be deleted starting with the +earliest until size_limit is met. All empty files will be deleted. +

  • +

    +If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then +WAL files will be checked every WAL_ttl_seconds / 2 and those +that are older than WAL_ttl_seconds will be deleted. +

  • +

    +If both are not 0, WAL files will be checked every 10 min and both +checks will be performed with ttl being first. +

+ +

Other Information

+

+Details about the rocksdb implementation may be found in +the following documents: +

+ + + diff --git a/external/rocksdb/doc/log_format.txt b/external/rocksdb/doc/log_format.txt new file mode 100755 index 0000000000..5222d81131 --- /dev/null +++ b/external/rocksdb/doc/log_format.txt @@ -0,0 +1,75 @@ +The log file contents are a sequence of 32KB blocks. The only +exception is that the tail of the file may contain a partial block. + +Each block consists of a sequence of records: + block := record* trailer? + record := + checksum: uint32 // crc32c of type and data[] + length: uint16 + type: uint8 // One of FULL, FIRST, MIDDLE, LAST + data: uint8[length] + +A record never starts within the last six bytes of a block (since it +won't fit). Any leftover bytes here form the trailer, which must +consist entirely of zero bytes and must be skipped by readers. + +Aside: if exactly seven bytes are left in the current block, and a new +non-zero length record is added, the writer must emit a FIRST record +(which contains zero bytes of user data) to fill up the trailing seven +bytes of the block and then emit all of the user data in subsequent +blocks. + +More types may be added in the future. Some Readers may skip record +types they do not understand, others may report that some data was +skipped. + +FULL == 1 +FIRST == 2 +MIDDLE == 3 +LAST == 4 + +The FULL record contains the contents of an entire user record. + +FIRST, MIDDLE, LAST are types used for user records that have been +split into multiple fragments (typically because of block boundaries). +FIRST is the type of the first fragment of a user record, LAST is the +type of the last fragment of a user record, and MID is the type of all +interior fragments of a user record. + +Example: consider a sequence of user records: + A: length 1000 + B: length 97270 + C: length 8000 +A will be stored as a FULL record in the first block. + +B will be split into three fragments: first fragment occupies the rest +of the first block, second fragment occupies the entirety of the +second block, and the third fragment occupies a prefix of the third +block. This will leave six bytes free in the third block, which will +be left empty as the trailer. + +C will be stored as a FULL record in the fourth block. + +=================== + +Some benefits over the recordio format: + +(1) We do not need any heuristics for resyncing - just go to next +block boundary and scan. If there is a corruption, skip to the next +block. As a side-benefit, we do not get confused when part of the +contents of one log file are embedded as a record inside another log +file. + +(2) Splitting at approximate boundaries (e.g., for mapreduce) is +simple: find the next block boundary and skip records until we +hit a FULL or FIRST record. + +(3) We do not need extra buffering for large records. + +Some downsides compared to recordio format: + +(1) No packing of tiny records. This could be fixed by adding a new +record type, so it is a shortcoming of the current implementation, +not necessarily the format. + +(2) No compression. Again, this could be fixed by adding new record types. diff --git a/external/rocksdb/doc/rockslogo.jpg b/external/rocksdb/doc/rockslogo.jpg new file mode 100755 index 0000000000..363905af5c Binary files /dev/null and b/external/rocksdb/doc/rockslogo.jpg differ diff --git a/external/rocksdb/doc/rockslogo.png b/external/rocksdb/doc/rockslogo.png new file mode 100755 index 0000000000..1961360770 Binary files /dev/null and b/external/rocksdb/doc/rockslogo.png differ diff --git a/external/rocksdb/examples/.gitignore b/external/rocksdb/examples/.gitignore new file mode 100755 index 0000000000..8c06e7972c --- /dev/null +++ b/external/rocksdb/examples/.gitignore @@ -0,0 +1,7 @@ +c_simple_example +column_families_example +compact_files_example +compaction_filter_example +optimistic_transaction_example +simple_example +transaction_example diff --git a/external/rocksdb/examples/Makefile b/external/rocksdb/examples/Makefile new file mode 100755 index 0000000000..b04f9a8fba --- /dev/null +++ b/external/rocksdb/examples/Makefile @@ -0,0 +1,46 @@ +include ../make_config.mk + +ifndef DISABLE_JEMALLOC + ifdef JEMALLOC + PLATFORM_CXXFLAGS += "-DROCKSDB_JEMALLOC" + endif + EXEC_LDFLAGS := $(JEMALLOC_LIB) $(EXEC_LDFLAGS) -lpthread + PLATFORM_CXXFLAGS += $(JEMALLOC_INCLUDE) +endif + +.PHONY: clean librocksdb + +all: simple_example column_families_example compact_files_example c_simple_example optimistic_transaction_example transaction_example + +simple_example: librocksdb simple_example.cc + $(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS) + +column_families_example: librocksdb column_families_example.cc + $(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS) + +compaction_filter_example: librocksdb compaction_filter_example.cc + $(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS) + +compact_files_example: librocksdb compact_files_example.cc + $(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS) + +.c.o: + $(CC) $(CFLAGS) -c $< -o $@ -I../include + +c_simple_example: librocksdb c_simple_example.o + $(CXX) $@.o -o$@ ../librocksdb.a $(PLATFORM_LDFLAGS) $(EXEC_LDFLAGS) + +optimistic_transaction_example: librocksdb optimistic_transaction_example.cc + $(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS) + +transaction_example: librocksdb transaction_example.cc + $(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS) + +options_file_example: librocksdb options_file_example.cc + $(CXX) $(CXXFLAGS) $@.cc -o$@ ../librocksdb.a -I../include -O2 -std=c++11 $(PLATFORM_LDFLAGS) $(PLATFORM_CXXFLAGS) $(EXEC_LDFLAGS) + +clean: + rm -rf ./simple_example ./column_families_example ./compact_files_example ./compaction_filter_example ./c_simple_example c_simple_example.o ./optimistic_transaction_example ./transaction_example + +librocksdb: + cd .. && $(MAKE) static_lib diff --git a/external/rocksdb/examples/README.md b/external/rocksdb/examples/README.md new file mode 100755 index 0000000000..f4ba2384b8 --- /dev/null +++ b/external/rocksdb/examples/README.md @@ -0,0 +1,2 @@ +1. Compile RocksDB first by executing `make static_lib` in parent dir +2. Compile all examples: `cd examples/; make all` diff --git a/external/rocksdb/examples/c_simple_example.c b/external/rocksdb/examples/c_simple_example.c new file mode 100755 index 0000000000..ab19f3bfbc --- /dev/null +++ b/external/rocksdb/examples/c_simple_example.c @@ -0,0 +1,74 @@ +#include +#include +#include +#include + +#include "rocksdb/c.h" + +#include // sysconf() - get CPU count + +const char DBPath[] = "/tmp/rocksdb_simple_example"; +const char DBBackupPath[] = "/tmp/rocksdb_simple_example_backup"; + +int main(int argc, char **argv) { + rocksdb_t *db; + rocksdb_backup_engine_t *be; + rocksdb_options_t *options = rocksdb_options_create(); + // Optimize RocksDB. This is the easiest way to + // get RocksDB to perform well + long cpus = sysconf(_SC_NPROCESSORS_ONLN); // get # of online cores + rocksdb_options_increase_parallelism(options, (int)(cpus)); + rocksdb_options_optimize_level_style_compaction(options, 0); + // create the DB if it's not already present + rocksdb_options_set_create_if_missing(options, 1); + + // open DB + char *err = NULL; + db = rocksdb_open(options, DBPath, &err); + assert(!err); + + // open Backup Engine that we will use for backing up our database + be = rocksdb_backup_engine_open(options, DBBackupPath, &err); + assert(!err); + + // Put key-value + rocksdb_writeoptions_t *writeoptions = rocksdb_writeoptions_create(); + const char key[] = "key"; + const char *value = "value"; + rocksdb_put(db, writeoptions, key, strlen(key), value, strlen(value) + 1, + &err); + assert(!err); + // Get value + rocksdb_readoptions_t *readoptions = rocksdb_readoptions_create(); + size_t len; + char *returned_value = + rocksdb_get(db, readoptions, key, strlen(key), &len, &err); + assert(!err); + assert(strcmp(returned_value, "value") == 0); + free(returned_value); + + // create new backup in a directory specified by DBBackupPath + rocksdb_backup_engine_create_new_backup(be, db, &err); + assert(!err); + + rocksdb_close(db); + + // If something is wrong, you might want to restore data from last backup + rocksdb_restore_options_t *restore_options = rocksdb_restore_options_create(); + rocksdb_backup_engine_restore_db_from_latest_backup(be, DBPath, DBPath, + restore_options, &err); + assert(!err); + rocksdb_restore_options_destroy(restore_options); + + db = rocksdb_open(options, DBPath, &err); + assert(!err); + + // cleanup + rocksdb_writeoptions_destroy(writeoptions); + rocksdb_readoptions_destroy(readoptions); + rocksdb_options_destroy(options); + rocksdb_backup_engine_close(be); + rocksdb_close(db); + + return 0; +} diff --git a/external/rocksdb/examples/column_families_example.cc b/external/rocksdb/examples/column_families_example.cc new file mode 100755 index 0000000000..f2dec691ea --- /dev/null +++ b/external/rocksdb/examples/column_families_example.cc @@ -0,0 +1,72 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#include +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/slice.h" +#include "rocksdb/options.h" + +using namespace rocksdb; + +std::string kDBPath = "/tmp/rocksdb_column_families_example"; + +int main() { + // open DB + Options options; + options.create_if_missing = true; + DB* db; + Status s = DB::Open(options, kDBPath, &db); + assert(s.ok()); + + // create column family + ColumnFamilyHandle* cf; + s = db->CreateColumnFamily(ColumnFamilyOptions(), "new_cf", &cf); + assert(s.ok()); + + // close DB + delete cf; + delete db; + + // open DB with two column families + std::vector column_families; + // have to open default column family + column_families.push_back(ColumnFamilyDescriptor( + kDefaultColumnFamilyName, ColumnFamilyOptions())); + // open the new one, too + column_families.push_back(ColumnFamilyDescriptor( + "new_cf", ColumnFamilyOptions())); + std::vector handles; + s = DB::Open(DBOptions(), kDBPath, column_families, &handles, &db); + assert(s.ok()); + + // put and get from non-default column family + s = db->Put(WriteOptions(), handles[1], Slice("key"), Slice("value")); + assert(s.ok()); + std::string value; + s = db->Get(ReadOptions(), handles[1], Slice("key"), &value); + assert(s.ok()); + + // atomic write + WriteBatch batch; + batch.Put(handles[0], Slice("key2"), Slice("value2")); + batch.Put(handles[1], Slice("key3"), Slice("value3")); + batch.Delete(handles[0], Slice("key")); + s = db->Write(WriteOptions(), &batch); + assert(s.ok()); + + // drop column family + s = db->DropColumnFamily(handles[1]); + assert(s.ok()); + + // close db + for (auto handle : handles) { + delete handle; + } + delete db; + + return 0; +} diff --git a/external/rocksdb/examples/compact_files_example.cc b/external/rocksdb/examples/compact_files_example.cc new file mode 100755 index 0000000000..023ae403b7 --- /dev/null +++ b/external/rocksdb/examples/compact_files_example.cc @@ -0,0 +1,171 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// An example code demonstrating how to use CompactFiles, EventListener, +// and GetColumnFamilyMetaData APIs to implement custom compaction algorithm. + +#include +#include +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/options.h" + +using namespace rocksdb; +std::string kDBPath = "/tmp/rocksdb_compact_files_example"; +struct CompactionTask; + +// This is an example interface of external-compaction algorithm. +// Compaction algorithm can be implemented outside the core-RocksDB +// code by using the pluggable compaction APIs that RocksDb provides. +class Compactor : public EventListener { + public: + // Picks and returns a compaction task given the specified DB + // and column family. It is the caller's responsibility to + // destroy the returned CompactionTask. Returns "nullptr" + // if it cannot find a proper compaction task. + virtual CompactionTask* PickCompaction( + DB* db, const std::string& cf_name) = 0; + + // Schedule and run the specified compaction task in background. + virtual void ScheduleCompaction(CompactionTask *task) = 0; +}; + +// Example structure that describes a compaction task. +struct CompactionTask { + CompactionTask( + DB* _db, Compactor* _compactor, + const std::string& _column_family_name, + const std::vector& _input_file_names, + const int _output_level, + const CompactionOptions& _compact_options, + bool _retry_on_fail) + : db(_db), + compactor(_compactor), + column_family_name(_column_family_name), + input_file_names(_input_file_names), + output_level(_output_level), + compact_options(_compact_options), + retry_on_fail(_retry_on_fail) {} + DB* db; + Compactor* compactor; + const std::string& column_family_name; + std::vector input_file_names; + int output_level; + CompactionOptions compact_options; + bool retry_on_fail; +}; + +// A simple compaction algorithm that always compacts everything +// to the highest level whenever possible. +class FullCompactor : public Compactor { + public: + explicit FullCompactor(const Options options) : options_(options) { + compact_options_.compression = options_.compression; + compact_options_.output_file_size_limit = + options_.target_file_size_base; + } + + // When flush happens, it determines whether to trigger compaction. If + // triggered_writes_stop is true, it will also set the retry flag of + // compaction-task to true. + void OnFlushCompleted( + DB* db, const FlushJobInfo& info) override { + CompactionTask* task = PickCompaction(db, info.cf_name); + if (task != nullptr) { + if (info.triggered_writes_stop) { + task->retry_on_fail = true; + } + // Schedule compaction in a different thread. + ScheduleCompaction(task); + } + } + + // Always pick a compaction which includes all files whenever possible. + CompactionTask* PickCompaction( + DB* db, const std::string& cf_name) override { + ColumnFamilyMetaData cf_meta; + db->GetColumnFamilyMetaData(&cf_meta); + + std::vector input_file_names; + for (auto level : cf_meta.levels) { + for (auto file : level.files) { + if (file.being_compacted) { + return nullptr; + } + input_file_names.push_back(file.name); + } + } + return new CompactionTask( + db, this, cf_name, input_file_names, + options_.num_levels - 1, compact_options_, false); + } + + // Schedule the specified compaction task in background. + void ScheduleCompaction(CompactionTask* task) override { + options_.env->Schedule(&FullCompactor::CompactFiles, task); + } + + static void CompactFiles(void* arg) { + std::unique_ptr task( + reinterpret_cast(arg)); + assert(task); + assert(task->db); + Status s = task->db->CompactFiles( + task->compact_options, + task->input_file_names, + task->output_level); + printf("CompactFiles() finished with status %s\n", s.ToString().c_str()); + if (!s.ok() && !s.IsIOError() && task->retry_on_fail) { + // If a compaction task with its retry_on_fail=true failed, + // try to schedule another compaction in case the reason + // is not an IO error. + CompactionTask* new_task = task->compactor->PickCompaction( + task->db, task->column_family_name); + task->compactor->ScheduleCompaction(new_task); + } + } + + private: + Options options_; + CompactionOptions compact_options_; +}; + +int main() { + Options options; + options.create_if_missing = true; + // Disable RocksDB background compaction. + options.compaction_style = kCompactionStyleNone; + // Small slowdown and stop trigger for experimental purpose. + options.level0_slowdown_writes_trigger = 3; + options.level0_stop_writes_trigger = 5; + options.IncreaseParallelism(5); + options.listeners.emplace_back(new FullCompactor(options)); + + DB* db = nullptr; + DestroyDB(kDBPath, options); + Status s = DB::Open(options, kDBPath, &db); + assert(s.ok()); + assert(db); + + // if background compaction is not working, write will stall + // because of options.level0_stop_writes_trigger + for (int i = 1000; i < 99999; ++i) { + db->Put(WriteOptions(), std::to_string(i), + std::string(500, 'a' + (i % 26))); + } + + // verify the values are still there + std::string value; + for (int i = 1000; i < 99999; ++i) { + db->Get(ReadOptions(), std::to_string(i), + &value); + assert(value == std::string(500, 'a' + (i % 26))); + } + + // close the db. + delete db; + + return 0; +} diff --git a/external/rocksdb/examples/compaction_filter_example.cc b/external/rocksdb/examples/compaction_filter_example.cc new file mode 100755 index 0000000000..77dbd9af76 --- /dev/null +++ b/external/rocksdb/examples/compaction_filter_example.cc @@ -0,0 +1,83 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include +#include +#include + +class MyMerge : public rocksdb::MergeOperator { + public: + virtual bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const override { + merge_out->new_value.clear(); + if (merge_in.existing_value != nullptr) { + merge_out->new_value.assign(merge_in.existing_value->data(), + merge_in.existing_value->size()); + } + for (const rocksdb::Slice& m : merge_in.operand_list) { + fprintf(stderr, "Merge(%s)\n", m.ToString().c_str()); + // the compaction filter filters out bad values + assert(m.ToString() != "bad"); + merge_out->new_value.assign(m.data(), m.size()); + } + return true; + } + + const char* Name() const override { return "MyMerge"; } +}; + +class MyFilter : public rocksdb::CompactionFilter { + public: + bool Filter(int level, const rocksdb::Slice& key, + const rocksdb::Slice& existing_value, std::string* new_value, + bool* value_changed) const override { + fprintf(stderr, "Filter(%s)\n", key.ToString().c_str()); + ++count_; + assert(*value_changed == false); + return false; + } + + bool FilterMergeOperand(int level, const rocksdb::Slice& key, + const rocksdb::Slice& existing_value) const override { + fprintf(stderr, "FilterMerge(%s)\n", key.ToString().c_str()); + ++merge_count_; + return existing_value == "bad"; + } + + const char* Name() const override { return "MyFilter"; } + + mutable int count_ = 0; + mutable int merge_count_ = 0; +}; + +int main() { + rocksdb::DB* raw_db; + rocksdb::Status status; + + MyFilter filter; + + system("rm -rf /tmp/rocksmergetest"); + rocksdb::Options options; + options.create_if_missing = true; + options.merge_operator.reset(new MyMerge); + options.compaction_filter = &filter; + status = rocksdb::DB::Open(options, "/tmp/rocksmergetest", &raw_db); + assert(status.ok()); + std::unique_ptr db(raw_db); + + rocksdb::WriteOptions wopts; + db->Merge(wopts, "0", "bad"); // This is filtered out + db->Merge(wopts, "1", "data1"); + db->Merge(wopts, "1", "bad"); + db->Merge(wopts, "1", "data2"); + db->Merge(wopts, "1", "bad"); + db->Merge(wopts, "3", "data3"); + db->CompactRange(rocksdb::CompactRangeOptions(), nullptr, nullptr); + fprintf(stderr, "filter.count_ = %d\n", filter.count_); + assert(filter.count_ == 1); + fprintf(stderr, "filter.merge_count_ = %d\n", filter.merge_count_); + assert(filter.merge_count_ == 5); +} diff --git a/external/rocksdb/examples/optimistic_transaction_example.cc b/external/rocksdb/examples/optimistic_transaction_example.cc new file mode 100755 index 0000000000..d28a305b34 --- /dev/null +++ b/external/rocksdb/examples/optimistic_transaction_example.cc @@ -0,0 +1,142 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/utilities/transaction.h" +#include "rocksdb/utilities/optimistic_transaction_db.h" + +using namespace rocksdb; + +std::string kDBPath = "/tmp/rocksdb_transaction_example"; + +int main() { + // open DB + Options options; + options.create_if_missing = true; + DB* db; + OptimisticTransactionDB* txn_db; + + Status s = OptimisticTransactionDB::Open(options, kDBPath, &txn_db); + assert(s.ok()); + db = txn_db->GetBaseDB(); + + WriteOptions write_options; + ReadOptions read_options; + OptimisticTransactionOptions txn_options; + std::string value; + + //////////////////////////////////////////////////////// + // + // Simple OptimisticTransaction Example ("Read Committed") + // + //////////////////////////////////////////////////////// + + // Start a transaction + Transaction* txn = txn_db->BeginTransaction(write_options); + assert(txn); + + // Read a key in this transaction + s = txn->Get(read_options, "abc", &value); + assert(s.IsNotFound()); + + // Write a key in this transaction + txn->Put("abc", "def"); + + // Read a key OUTSIDE this transaction. Does not affect txn. + s = db->Get(read_options, "abc", &value); + + // Write a key OUTSIDE of this transaction. + // Does not affect txn since this is an unrelated key. If we wrote key 'abc' + // here, the transaction would fail to commit. + s = db->Put(write_options, "xyz", "zzz"); + + // Commit transaction + s = txn->Commit(); + assert(s.ok()); + delete txn; + + //////////////////////////////////////////////////////// + // + // "Repeatable Read" (Snapshot Isolation) Example + // -- Using a single Snapshot + // + //////////////////////////////////////////////////////// + + // Set a snapshot at start of transaction by setting set_snapshot=true + txn_options.set_snapshot = true; + txn = txn_db->BeginTransaction(write_options, txn_options); + + const Snapshot* snapshot = txn->GetSnapshot(); + + // Write a key OUTSIDE of transaction + db->Put(write_options, "abc", "xyz"); + + // Read a key using the snapshot + read_options.snapshot = snapshot; + s = txn->GetForUpdate(read_options, "abc", &value); + assert(value == "def"); + + // Attempt to commit transaction + s = txn->Commit(); + + // Transaction could not commit since the write outside of the txn conflicted + // with the read! + assert(s.IsBusy()); + + delete txn; + // Clear snapshot from read options since it is no longer valid + read_options.snapshot = nullptr; + snapshot = nullptr; + + //////////////////////////////////////////////////////// + // + // "Read Committed" (Monotonic Atomic Views) Example + // --Using multiple Snapshots + // + //////////////////////////////////////////////////////// + + // In this example, we set the snapshot multiple times. This is probably + // only necessary if you have very strict isolation requirements to + // implement. + + // Set a snapshot at start of transaction + txn_options.set_snapshot = true; + txn = txn_db->BeginTransaction(write_options, txn_options); + + // Do some reads and writes to key "x" + read_options.snapshot = db->GetSnapshot(); + s = txn->Get(read_options, "x", &value); + txn->Put("x", "x"); + + // Do a write outside of the transaction to key "y" + s = db->Put(write_options, "y", "y"); + + // Set a new snapshot in the transaction + txn->SetSnapshot(); + read_options.snapshot = db->GetSnapshot(); + + // Do some reads and writes to key "y" + s = txn->GetForUpdate(read_options, "y", &value); + txn->Put("y", "y"); + + // Commit. Since the snapshot was advanced, the write done outside of the + // transaction does not prevent this transaction from Committing. + s = txn->Commit(); + assert(s.ok()); + delete txn; + // Clear snapshot from read options since it is no longer valid + read_options.snapshot = nullptr; + + // Cleanup + delete txn_db; + DestroyDB(kDBPath, options); + return 0; +} + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/examples/options_file_example.cc b/external/rocksdb/examples/options_file_example.cc new file mode 100755 index 0000000000..360ccddf23 --- /dev/null +++ b/external/rocksdb/examples/options_file_example.cc @@ -0,0 +1,113 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file demonstrates how to use the utility functions defined in +// rocksdb/utilities/options_util.h to open a rocksdb database without +// remembering all the rocksdb options. +#include +#include +#include + +#include "rocksdb/cache.h" +#include "rocksdb/compaction_filter.h" +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/table.h" +#include "rocksdb/utilities/options_util.h" + +using namespace rocksdb; + +std::string kDBPath = "/tmp/rocksdb_options_file_example"; + +namespace { +// A dummy compaction filter +class DummyCompactionFilter : public CompactionFilter { + public: + virtual ~DummyCompactionFilter() {} + virtual bool Filter(int level, const Slice& key, const Slice& existing_value, + std::string* new_value, bool* value_changed) const { + return false; + } + virtual const char* Name() const { return "DummyCompactionFilter"; } +}; + +} // namespace + +int main() { + DBOptions db_opt; + db_opt.create_if_missing = true; + + std::vector cf_descs; + cf_descs.push_back({kDefaultColumnFamilyName, ColumnFamilyOptions()}); + cf_descs.push_back({"new_cf", ColumnFamilyOptions()}); + + // initialize BlockBasedTableOptions + auto cache = NewLRUCache(1 * 1024 * 1024 * 1024); + BlockBasedTableOptions bbt_opts; + bbt_opts.block_size = 32 * 1024; + bbt_opts.block_cache = cache; + + // initialize column families options + std::unique_ptr compaction_filter; + compaction_filter.reset(new DummyCompactionFilter()); + cf_descs[0].options.table_factory.reset(NewBlockBasedTableFactory(bbt_opts)); + cf_descs[0].options.compaction_filter = compaction_filter.get(); + cf_descs[1].options.table_factory.reset(NewBlockBasedTableFactory(bbt_opts)); + + // destroy and open DB + DB* db; + Status s = DestroyDB(kDBPath, Options(db_opt, cf_descs[0].options)); + assert(s.ok()); + s = DB::Open(Options(db_opt, cf_descs[0].options), kDBPath, &db); + assert(s.ok()); + + // Create column family, and rocksdb will persist the options. + ColumnFamilyHandle* cf; + s = db->CreateColumnFamily(ColumnFamilyOptions(), "new_cf", &cf); + assert(s.ok()); + + // close DB + delete cf; + delete db; + + // In the following code, we will reopen the rocksdb instance using + // the options file stored in the db directory. + + // Load the options file. + DBOptions loaded_db_opt; + std::vector loaded_cf_descs; + s = LoadLatestOptions(kDBPath, Env::Default(), &loaded_db_opt, + &loaded_cf_descs); + assert(s.ok()); + assert(loaded_db_opt.create_if_missing == db_opt.create_if_missing); + + // Initialize pointer options for each column family + for (size_t i = 0; i < loaded_cf_descs.size(); ++i) { + auto* loaded_bbt_opt = reinterpret_cast( + loaded_cf_descs[0].options.table_factory->GetOptions()); + // Expect the same as BlockBasedTableOptions will be loaded form file. + assert(loaded_bbt_opt->block_size == bbt_opts.block_size); + // However, block_cache needs to be manually initialized as documented + // in rocksdb/utilities/options_util.h. + loaded_bbt_opt->block_cache = cache; + } + // In addition, as pointer options are initialized with default value, + // we need to properly initialized all the pointer options if non-defalut + // values are used before calling DB::Open(). + assert(loaded_cf_descs[0].options.compaction_filter == nullptr); + loaded_cf_descs[0].options.compaction_filter = compaction_filter.get(); + + // reopen the db using the loaded options. + std::vector handles; + s = DB::Open(loaded_db_opt, kDBPath, loaded_cf_descs, &handles, &db); + assert(s.ok()); + + // close DB + for (auto* handle : handles) { + delete handle; + } + delete db; +} diff --git a/external/rocksdb/examples/rocksdb_option_file_example.ini b/external/rocksdb/examples/rocksdb_option_file_example.ini new file mode 100755 index 0000000000..7dc0704298 --- /dev/null +++ b/external/rocksdb/examples/rocksdb_option_file_example.ini @@ -0,0 +1,144 @@ +# This is a RocksDB option file. +# +# A typical RocksDB options file has four sections, which are +# Version section, DBOptions section, at least one CFOptions +# section, and one TableOptions section for each column family. +# The RocksDB options file in general follows the basic INI +# file format with the following extensions / modifications: +# +# * Escaped characters +# We escaped the following characters: +# - \n -- line feed - new line +# - \r -- carriage return +# - \\ -- backslash \ +# - \: -- colon symbol : +# - \# -- hash tag # +# * Comments +# We support # style comments. Comments can appear at the ending +# part of a line. +# * Statements +# A statement is of the form option_name = value. +# Each statement contains a '=', where extra white-spaces +# are supported. However, we don't support multi-lined statement. +# Furthermore, each line can only contain at most one statement. +# * Sections +# Sections are of the form [SecitonTitle "SectionArgument"], +# where section argument is optional. +# * List +# We use colon-separated string to represent a list. +# For instance, n1:n2:n3:n4 is a list containing four values. +# +# Below is an example of a RocksDB options file: +[Version] + rocksdb_version=4.3.0 + options_file_version=1.1 + +[DBOptions] + stats_dump_period_sec=600 + max_manifest_file_size=18446744073709551615 + bytes_per_sync=8388608 + delayed_write_rate=2097152 + WAL_ttl_seconds=0 + WAL_size_limit_MB=0 + max_subcompactions=1 + wal_dir= + wal_bytes_per_sync=0 + db_write_buffer_size=0 + keep_log_file_num=1000 + table_cache_numshardbits=4 + max_file_opening_threads=1 + writable_file_max_buffer_size=1048576 + random_access_max_buffer_size=1048576 + use_fsync=false + max_total_wal_size=0 + max_open_files=-1 + skip_stats_update_on_db_open=false + max_background_compactions=16 + manifest_preallocation_size=4194304 + max_background_flushes=7 + is_fd_close_on_exec=true + max_log_file_size=0 + advise_random_on_open=true + create_missing_column_families=false + paranoid_checks=true + delete_obsolete_files_period_micros=21600000000 + disable_data_sync=false + log_file_time_to_roll=0 + compaction_readahead_size=0 + create_if_missing=false + use_adaptive_mutex=false + enable_thread_tracking=false + disableDataSync=false + allow_fallocate=true + error_if_exists=false + recycle_log_file_num=0 + skip_log_error_on_recovery=false + allow_mmap_reads=false + allow_os_buffer=true + db_log_dir= + new_table_reader_for_compaction_inputs=true + allow_mmap_writes=false + + +[CFOptions "default"] + compaction_style=kCompactionStyleLevel + compaction_filter=nullptr + num_levels=6 + table_factory=BlockBasedTable + comparator=leveldb.BytewiseComparator + max_sequential_skip_in_iterations=8 + soft_rate_limit=0.000000 + max_bytes_for_level_base=1073741824 + memtable_prefix_bloom_probes=6 + memtable_prefix_bloom_bits=0 + memtable_prefix_bloom_huge_page_tlb_size=0 + max_successive_merges=0 + arena_block_size=16777216 + min_write_buffer_number_to_merge=1 + target_file_size_multiplier=1 + source_compaction_factor=1 + max_bytes_for_level_multiplier=8 + compaction_filter_factory=nullptr + max_write_buffer_number=8 + level0_stop_writes_trigger=20 + compression=kSnappyCompression + level0_file_num_compaction_trigger=4 + purge_redundant_kvs_while_flush=true + max_write_buffer_number_to_maintain=0 + memtable_factory=SkipListFactory + max_grandparent_overlap_factor=8 + expanded_compaction_factor=25 + hard_pending_compaction_bytes_limit=137438953472 + inplace_update_num_locks=10000 + level_compaction_dynamic_level_bytes=true + level0_slowdown_writes_trigger=12 + filter_deletes=false + verify_checksums_in_compaction=true + min_partial_merge_operands=2 + paranoid_file_checks=false + target_file_size_base=134217728 + optimize_filters_for_hits=false + merge_operator=PutOperator + compression_per_level=kNoCompression:kNoCompression:kNoCompression:kSnappyCompression:kSnappyCompression:kSnappyCompression + compaction_measure_io_stats=false + prefix_extractor=nullptr + bloom_locality=0 + write_buffer_size=134217728 + disable_auto_compactions=false + inplace_update_support=false + +[TableOptions/BlockBasedTable "default"] + format_version=2 + whole_key_filtering=true + skip_table_builder_flush=false + no_block_cache=false + checksum=kCRC32c + filter_policy=rocksdb.BuiltinBloomFilter + block_size_deviation=10 + block_size=8192 + block_restart_interval=16 + cache_index_and_filter_blocks=false + pin_l0_filter_and_index_blocks_in_cache=false + index_type=kBinarySearch + hash_index_allow_collision=true + flush_block_policy_factory=FlushBlockBySizePolicyFactory diff --git a/external/rocksdb/examples/simple_example.cc b/external/rocksdb/examples/simple_example.cc new file mode 100755 index 0000000000..453443479b --- /dev/null +++ b/external/rocksdb/examples/simple_example.cc @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/slice.h" +#include "rocksdb/options.h" + +using namespace rocksdb; + +std::string kDBPath = "/tmp/rocksdb_simple_example"; + +int main() { + DB* db; + Options options; + // Optimize RocksDB. This is the easiest way to get RocksDB to perform well + options.IncreaseParallelism(); + options.OptimizeLevelStyleCompaction(); + // create the DB if it's not already present + options.create_if_missing = true; + + // open DB + Status s = DB::Open(options, kDBPath, &db); + assert(s.ok()); + + // Put key-value + s = db->Put(WriteOptions(), "key1", "value"); + assert(s.ok()); + std::string value; + // get value + s = db->Get(ReadOptions(), "key1", &value); + assert(s.ok()); + assert(value == "value"); + + // atomically apply a set of updates + { + WriteBatch batch; + batch.Delete("key1"); + batch.Put("key2", value); + s = db->Write(WriteOptions(), &batch); + } + + s = db->Get(ReadOptions(), "key1", &value); + assert(s.IsNotFound()); + + db->Get(ReadOptions(), "key2", &value); + assert(value == "value"); + + delete db; + + return 0; +} diff --git a/external/rocksdb/examples/transaction_example.cc b/external/rocksdb/examples/transaction_example.cc new file mode 100755 index 0000000000..914f1bc304 --- /dev/null +++ b/external/rocksdb/examples/transaction_example.cc @@ -0,0 +1,144 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/utilities/transaction.h" +#include "rocksdb/utilities/transaction_db.h" + +using namespace rocksdb; + +std::string kDBPath = "/tmp/rocksdb_transaction_example"; + +int main() { + // open DB + Options options; + TransactionDBOptions txn_db_options; + options.create_if_missing = true; + TransactionDB* txn_db; + + Status s = TransactionDB::Open(options, txn_db_options, kDBPath, &txn_db); + assert(s.ok()); + + WriteOptions write_options; + ReadOptions read_options; + TransactionOptions txn_options; + std::string value; + + //////////////////////////////////////////////////////// + // + // Simple OptimisticTransaction Example ("Read Committed") + // + //////////////////////////////////////////////////////// + + // Start a transaction + Transaction* txn = txn_db->BeginTransaction(write_options); + assert(txn); + + // Read a key in this transaction + s = txn->Get(read_options, "abc", &value); + assert(s.IsNotFound()); + + // Write a key in this transaction + s = txn->Put("abc", "def"); + assert(s.ok()); + + // Read a key OUTSIDE this transaction. Does not affect txn. + s = txn_db->Get(read_options, "abc", &value); + + // Write a key OUTSIDE of this transaction. + // Does not affect txn since this is an unrelated key. If we wrote key 'abc' + // here, the transaction would fail to commit. + s = txn_db->Put(write_options, "xyz", "zzz"); + + // Commit transaction + s = txn->Commit(); + assert(s.ok()); + delete txn; + + //////////////////////////////////////////////////////// + // + // "Repeatable Read" (Snapshot Isolation) Example + // -- Using a single Snapshot + // + //////////////////////////////////////////////////////// + + // Set a snapshot at start of transaction by setting set_snapshot=true + txn_options.set_snapshot = true; + txn = txn_db->BeginTransaction(write_options, txn_options); + + const Snapshot* snapshot = txn->GetSnapshot(); + + // Write a key OUTSIDE of transaction + s = txn_db->Put(write_options, "abc", "xyz"); + assert(s.ok()); + + // Attempt to read a key using the snapshot. This will fail since + // the previous write outside this txn conflicts with this read. + read_options.snapshot = snapshot; + s = txn->GetForUpdate(read_options, "abc", &value); + assert(s.IsBusy()); + + txn->Rollback(); + + delete txn; + // Clear snapshot from read options since it is no longer valid + read_options.snapshot = nullptr; + snapshot = nullptr; + + //////////////////////////////////////////////////////// + // + // "Read Committed" (Monotonic Atomic Views) Example + // --Using multiple Snapshots + // + //////////////////////////////////////////////////////// + + // In this example, we set the snapshot multiple times. This is probably + // only necessary if you have very strict isolation requirements to + // implement. + + // Set a snapshot at start of transaction + txn_options.set_snapshot = true; + txn = txn_db->BeginTransaction(write_options, txn_options); + + // Do some reads and writes to key "x" + read_options.snapshot = txn_db->GetSnapshot(); + s = txn->Get(read_options, "x", &value); + txn->Put("x", "x"); + + // Do a write outside of the transaction to key "y" + s = txn_db->Put(write_options, "y", "y"); + + // Set a new snapshot in the transaction + txn->SetSnapshot(); + txn->SetSavePoint(); + read_options.snapshot = txn_db->GetSnapshot(); + + // Do some reads and writes to key "y" + // Since the snapshot was advanced, the write done outside of the + // transaction does not conflict. + s = txn->GetForUpdate(read_options, "y", &value); + txn->Put("y", "y"); + + // Decide we want to revert the last write from this transaction. + txn->RollbackToSavePoint(); + + // Commit. + s = txn->Commit(); + assert(s.ok()); + delete txn; + // Clear snapshot from read options since it is no longer valid + read_options.snapshot = nullptr; + + // Cleanup + delete txn_db; + DestroyDB(kDBPath, options); + return 0; +} + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/hdfs/README b/external/rocksdb/hdfs/README new file mode 100755 index 0000000000..9036511692 --- /dev/null +++ b/external/rocksdb/hdfs/README @@ -0,0 +1,23 @@ +This directory contains the hdfs extensions needed to make rocksdb store +files in HDFS. + +It has been compiled and testing against CDH 4.4 (2.0.0+1475-1.cdh4.4.0.p0.23~precise-cdh4.4.0). + +The configuration assumes that packages libhdfs0, libhdfs0-dev are +installed which basically means that hdfs.h is in /usr/include and libhdfs in /usr/lib + +The env_hdfs.h file defines the rocksdb objects that are needed to talk to an +underlying filesystem. + +If you want to compile rocksdb with hdfs support, please set the following +environment variables appropriately (also defined in setup.sh for convenience) + USE_HDFS=1 + JAVA_HOME=/usr/local/jdk-7u79-64 + LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/jdk-7u79-64/jre/lib/amd64/server:/usr/local/jdk-7u79-64/jre/lib/amd64/:./snappy/libs + make clean all db_bench + +To run dbbench, + set CLASSPATH to include your hadoop distribution + db_bench --hdfs="hdfs://hbaseudbperf001.snc1.facebook.com:9000" + + diff --git a/external/rocksdb/hdfs/env_hdfs.h b/external/rocksdb/hdfs/env_hdfs.h new file mode 100755 index 0000000000..ab27e0440e --- /dev/null +++ b/external/rocksdb/hdfs/env_hdfs.h @@ -0,0 +1,369 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// + +#pragma once +#include +#include +#include +#include +#include "port/sys_time.h" +#include "rocksdb/env.h" +#include "rocksdb/status.h" + +#ifdef USE_HDFS +#include + +namespace rocksdb { + +// Thrown during execution when there is an issue with the supplied +// arguments. +class HdfsUsageException : public std::exception { }; + +// A simple exception that indicates something went wrong that is not +// recoverable. The intention is for the message to be printed (with +// nothing else) and the process terminate. +class HdfsFatalException : public std::exception { +public: + explicit HdfsFatalException(const std::string& s) : what_(s) { } + virtual ~HdfsFatalException() throw() { } + virtual const char* what() const throw() { + return what_.c_str(); + } +private: + const std::string what_; +}; + +// +// The HDFS environment for rocksdb. This class overrides all the +// file/dir access methods and delegates the thread-mgmt methods to the +// default posix environment. +// +class HdfsEnv : public Env { + + public: + explicit HdfsEnv(const std::string& fsname) : fsname_(fsname) { + posixEnv = Env::Default(); + fileSys_ = connectToPath(fsname_); + } + + virtual ~HdfsEnv() { + fprintf(stderr, "Destroying HdfsEnv::Default()\n"); + hdfsDisconnect(fileSys_); + } + + virtual Status NewSequentialFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options); + + virtual Status NewRandomAccessFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options); + + virtual Status NewWritableFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options); + + virtual Status NewDirectory(const std::string& name, + std::unique_ptr* result); + + virtual Status FileExists(const std::string& fname); + + virtual Status GetChildren(const std::string& path, + std::vector* result); + + virtual Status DeleteFile(const std::string& fname); + + virtual Status CreateDir(const std::string& name); + + virtual Status CreateDirIfMissing(const std::string& name); + + virtual Status DeleteDir(const std::string& name); + + virtual Status GetFileSize(const std::string& fname, uint64_t* size); + + virtual Status GetFileModificationTime(const std::string& fname, + uint64_t* file_mtime); + + virtual Status RenameFile(const std::string& src, const std::string& target); + + virtual Status LinkFile(const std::string& src, const std::string& target) { + return Status::NotSupported(); // not supported + } + + virtual Status LockFile(const std::string& fname, FileLock** lock); + + virtual Status UnlockFile(FileLock* lock); + + virtual Status NewLogger(const std::string& fname, + std::shared_ptr* result); + + virtual void Schedule(void (*function)(void* arg), void* arg, + Priority pri = LOW, void* tag = nullptr, void (*unschedFunction)(void* arg) = 0) { + posixEnv->Schedule(function, arg, pri, tag, unschedFunction); + } + + virtual int UnSchedule(void* tag, Priority pri) { + posixEnv->UnSchedule(tag, pri); + } + + virtual void StartThread(void (*function)(void* arg), void* arg) { + posixEnv->StartThread(function, arg); + } + + virtual void WaitForJoin() { posixEnv->WaitForJoin(); } + + virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const + override { + return posixEnv->GetThreadPoolQueueLen(pri); + } + + virtual Status GetTestDirectory(std::string* path) { + return posixEnv->GetTestDirectory(path); + } + + virtual uint64_t NowMicros() { + return posixEnv->NowMicros(); + } + + virtual void SleepForMicroseconds(int micros) { + posixEnv->SleepForMicroseconds(micros); + } + + virtual Status GetHostName(char* name, uint64_t len) { + return posixEnv->GetHostName(name, len); + } + + virtual Status GetCurrentTime(int64_t* unix_time) { + return posixEnv->GetCurrentTime(unix_time); + } + + virtual Status GetAbsolutePath(const std::string& db_path, + std::string* output_path) { + return posixEnv->GetAbsolutePath(db_path, output_path); + } + + virtual void SetBackgroundThreads(int number, Priority pri = LOW) { + posixEnv->SetBackgroundThreads(number, pri); + } + + virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) override { + posixEnv->IncBackgroundThreadsIfNeeded(number, pri); + } + + virtual std::string TimeToString(uint64_t number) { + return posixEnv->TimeToString(number); + } + + static uint64_t gettid() { + assert(sizeof(pthread_t) <= sizeof(uint64_t)); + return (uint64_t)pthread_self(); + } + + virtual uint64_t GetThreadID() const override { + return HdfsEnv::gettid(); + } + + private: + std::string fsname_; // string of the form "hdfs://hostname:port/" + hdfsFS fileSys_; // a single FileSystem object for all files + Env* posixEnv; // This object is derived from Env, but not from + // posixEnv. We have posixnv as an encapsulated + // object here so that we can use posix timers, + // posix threads, etc. + + static const std::string kProto; + static const std::string pathsep; + + /** + * If the URI is specified of the form hdfs://server:port/path, + * then connect to the specified cluster + * else connect to default. + */ + hdfsFS connectToPath(const std::string& uri) { + if (uri.empty()) { + return nullptr; + } + if (uri.find(kProto) != 0) { + // uri doesn't start with hdfs:// -> use default:0, which is special + // to libhdfs. + return hdfsConnectNewInstance("default", 0); + } + const std::string hostport = uri.substr(kProto.length()); + + std::vector parts; + split(hostport, ':', parts); + if (parts.size() != 2) { + throw HdfsFatalException("Bad uri for hdfs " + uri); + } + // parts[0] = hosts, parts[1] = port/xxx/yyy + std::string host(parts[0]); + std::string remaining(parts[1]); + + int rem = remaining.find(pathsep); + std::string portStr = (rem == 0 ? remaining : + remaining.substr(0, rem)); + + tPort port; + port = atoi(portStr.c_str()); + if (port == 0) { + throw HdfsFatalException("Bad host-port for hdfs " + uri); + } + hdfsFS fs = hdfsConnectNewInstance(host.c_str(), port); + return fs; + } + + void split(const std::string &s, char delim, + std::vector &elems) { + elems.clear(); + size_t prev = 0; + size_t pos = s.find(delim); + while (pos != std::string::npos) { + elems.push_back(s.substr(prev, pos)); + prev = pos + 1; + pos = s.find(delim, prev); + } + elems.push_back(s.substr(prev, s.size())); + } +}; + +} // namespace rocksdb + +#else // USE_HDFS + + +namespace rocksdb { + +static const Status notsup; + +class HdfsEnv : public Env { + + public: + explicit HdfsEnv(const std::string& fsname) { + fprintf(stderr, "You have not build rocksdb with HDFS support\n"); + fprintf(stderr, "Please see hdfs/README for details\n"); + abort(); + } + + virtual ~HdfsEnv() { + } + + virtual Status NewSequentialFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) override; + + virtual Status NewRandomAccessFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) override { + return notsup; + } + + virtual Status NewWritableFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) override { + return notsup; + } + + virtual Status NewDirectory(const std::string& name, + unique_ptr* result) override { + return notsup; + } + + virtual Status FileExists(const std::string& fname) override { + return notsup; + } + + virtual Status GetChildren(const std::string& path, + std::vector* result) override { + return notsup; + } + + virtual Status DeleteFile(const std::string& fname) override { + return notsup; + } + + virtual Status CreateDir(const std::string& name) override { return notsup; } + + virtual Status CreateDirIfMissing(const std::string& name) override { + return notsup; + } + + virtual Status DeleteDir(const std::string& name) override { return notsup; } + + virtual Status GetFileSize(const std::string& fname, + uint64_t* size) override { + return notsup; + } + + virtual Status GetFileModificationTime(const std::string& fname, + uint64_t* time) override { + return notsup; + } + + virtual Status RenameFile(const std::string& src, + const std::string& target) override { + return notsup; + } + + virtual Status LinkFile(const std::string& src, + const std::string& target) override { + return notsup; + } + + virtual Status LockFile(const std::string& fname, FileLock** lock) override { + return notsup; + } + + virtual Status UnlockFile(FileLock* lock) override { return notsup; } + + virtual Status NewLogger(const std::string& fname, + shared_ptr* result) override { + return notsup; + } + + virtual void Schedule(void (*function)(void* arg), void* arg, + Priority pri = LOW, void* tag = nullptr, + void (*unschedFunction)(void* arg) = 0) override {} + + virtual int UnSchedule(void* tag, Priority pri) override { return 0; } + + virtual void StartThread(void (*function)(void* arg), void* arg) override {} + + virtual void WaitForJoin() override {} + + virtual unsigned int GetThreadPoolQueueLen( + Priority pri = LOW) const override { + return 0; + } + + virtual Status GetTestDirectory(std::string* path) override { return notsup; } + + virtual uint64_t NowMicros() override { return 0; } + + virtual void SleepForMicroseconds(int micros) override {} + + virtual Status GetHostName(char* name, uint64_t len) override { + return notsup; + } + + virtual Status GetCurrentTime(int64_t* unix_time) override { return notsup; } + + virtual Status GetAbsolutePath(const std::string& db_path, + std::string* outputpath) override { + return notsup; + } + + virtual void SetBackgroundThreads(int number, Priority pri = LOW) override {} + virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) override { + } + virtual std::string TimeToString(uint64_t number) override { return ""; } + + virtual uint64_t GetThreadID() const override { + return 0; + } +}; +} + +#endif // USE_HDFS diff --git a/external/rocksdb/hdfs/setup.sh b/external/rocksdb/hdfs/setup.sh new file mode 100755 index 0000000000..ac69b525df --- /dev/null +++ b/external/rocksdb/hdfs/setup.sh @@ -0,0 +1,7 @@ +export USE_HDFS=1 +export LD_LIBRARY_PATH=$JAVA_HOME/jre/lib/amd64/server:$JAVA_HOME/jre/lib/amd64:/usr/lib/hadoop/lib/native + +export CLASSPATH= +for f in `find /usr/lib/hadoop-hdfs | grep jar`; do export CLASSPATH=$CLASSPATH:$f; done +for f in `find /usr/lib/hadoop | grep jar`; do export CLASSPATH=$CLASSPATH:$f; done +for f in `find /usr/lib/hadoop/client | grep jar`; do export CLASSPATH=$CLASSPATH:$f; done diff --git a/external/rocksdb/include/rocksdb/c.h b/external/rocksdb/include/rocksdb/c.h new file mode 100755 index 0000000000..e269aa9b48 --- /dev/null +++ b/external/rocksdb/include/rocksdb/c.h @@ -0,0 +1,958 @@ +/* Copyright (c) 2011-present, Facebook, Inc. All rights reserved. + This source code is licensed under the BSD-style license found in the + LICENSE file in the root directory of this source tree. An additional grant + of patent rights can be found in the PATENTS file in the same directory. + Copyright (c) 2011 The LevelDB Authors. All rights reserved. + Use of this source code is governed by a BSD-style license that can be + found in the LICENSE file. See the AUTHORS file for names of contributors. + + C bindings for rocksdb. May be useful as a stable ABI that can be + used by programs that keep rocksdb in a shared library, or for + a JNI api. + + Does not support: + . getters for the option types + . custom comparators that implement key shortening + . capturing post-write-snapshot + . custom iter, db, env, cache implementations using just the C bindings + + Some conventions: + + (1) We expose just opaque struct pointers and functions to clients. + This allows us to change internal representations without having to + recompile clients. + + (2) For simplicity, there is no equivalent to the Slice type. Instead, + the caller has to pass the pointer and length as separate + arguments. + + (3) Errors are represented by a null-terminated c string. NULL + means no error. All operations that can raise an error are passed + a "char** errptr" as the last argument. One of the following must + be true on entry: + *errptr == NULL + *errptr points to a malloc()ed null-terminated error message + On success, a leveldb routine leaves *errptr unchanged. + On failure, leveldb frees the old value of *errptr and + set *errptr to a malloc()ed error message. + + (4) Bools have the type unsigned char (0 == false; rest == true) + + (5) All of the pointer arguments must be non-NULL. +*/ + +#ifndef STORAGE_ROCKSDB_INCLUDE_C_H_ +#define STORAGE_ROCKSDB_INCLUDE_C_H_ + +#pragma once + +#ifdef _WIN32 +#ifdef ROCKSDB_DLL +#ifdef ROCKSDB_LIBRARY_EXPORTS +#define ROCKSDB_LIBRARY_API __declspec(dllexport) +#else +#define ROCKSDB_LIBRARY_API __declspec(dllimport) +#endif +#else +#define ROCKSDB_LIBRARY_API +#endif +#else +#define ROCKSDB_LIBRARY_API +#endif + +#ifdef __cplusplus +extern "C" { +#endif + +#include +#include +#include + +/* Exported types */ + +typedef struct rocksdb_t rocksdb_t; +typedef struct rocksdb_backup_engine_t rocksdb_backup_engine_t; +typedef struct rocksdb_backup_engine_info_t rocksdb_backup_engine_info_t; +typedef struct rocksdb_restore_options_t rocksdb_restore_options_t; +typedef struct rocksdb_cache_t rocksdb_cache_t; +typedef struct rocksdb_compactionfilter_t rocksdb_compactionfilter_t; +typedef struct rocksdb_compactionfiltercontext_t + rocksdb_compactionfiltercontext_t; +typedef struct rocksdb_compactionfilterfactory_t + rocksdb_compactionfilterfactory_t; +typedef struct rocksdb_comparator_t rocksdb_comparator_t; +typedef struct rocksdb_env_t rocksdb_env_t; +typedef struct rocksdb_fifo_compaction_options_t rocksdb_fifo_compaction_options_t; +typedef struct rocksdb_filelock_t rocksdb_filelock_t; +typedef struct rocksdb_filterpolicy_t rocksdb_filterpolicy_t; +typedef struct rocksdb_flushoptions_t rocksdb_flushoptions_t; +typedef struct rocksdb_iterator_t rocksdb_iterator_t; +typedef struct rocksdb_logger_t rocksdb_logger_t; +typedef struct rocksdb_mergeoperator_t rocksdb_mergeoperator_t; +typedef struct rocksdb_options_t rocksdb_options_t; +typedef struct rocksdb_block_based_table_options_t + rocksdb_block_based_table_options_t; +typedef struct rocksdb_cuckoo_table_options_t + rocksdb_cuckoo_table_options_t; +typedef struct rocksdb_randomfile_t rocksdb_randomfile_t; +typedef struct rocksdb_readoptions_t rocksdb_readoptions_t; +typedef struct rocksdb_seqfile_t rocksdb_seqfile_t; +typedef struct rocksdb_slicetransform_t rocksdb_slicetransform_t; +typedef struct rocksdb_snapshot_t rocksdb_snapshot_t; +typedef struct rocksdb_writablefile_t rocksdb_writablefile_t; +typedef struct rocksdb_writebatch_t rocksdb_writebatch_t; +typedef struct rocksdb_writeoptions_t rocksdb_writeoptions_t; +typedef struct rocksdb_universal_compaction_options_t rocksdb_universal_compaction_options_t; +typedef struct rocksdb_livefiles_t rocksdb_livefiles_t; +typedef struct rocksdb_column_family_handle_t rocksdb_column_family_handle_t; + +/* DB operations */ + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open( + const rocksdb_options_t* options, const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_for_read_only( + const rocksdb_options_t* options, const char* name, + unsigned char error_if_log_file_exist, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_backup_engine_t* rocksdb_backup_engine_open( + const rocksdb_options_t* options, const char* path, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_create_new_backup( + rocksdb_backup_engine_t* be, rocksdb_t* db, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_purge_old_backups( + rocksdb_backup_engine_t* be, uint32_t num_backups_to_keep, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_restore_options_t* +rocksdb_restore_options_create(); +extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_destroy( + rocksdb_restore_options_t* opt); +extern ROCKSDB_LIBRARY_API void rocksdb_restore_options_set_keep_log_files( + rocksdb_restore_options_t* opt, int v); + +extern ROCKSDB_LIBRARY_API void +rocksdb_backup_engine_restore_db_from_latest_backup( + rocksdb_backup_engine_t* be, const char* db_dir, const char* wal_dir, + const rocksdb_restore_options_t* restore_options, char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_backup_engine_info_t* +rocksdb_backup_engine_get_backup_info(rocksdb_backup_engine_t* be); + +extern ROCKSDB_LIBRARY_API int rocksdb_backup_engine_info_count( + const rocksdb_backup_engine_info_t* info); + +extern ROCKSDB_LIBRARY_API int64_t +rocksdb_backup_engine_info_timestamp(const rocksdb_backup_engine_info_t* info, + int index); + +extern ROCKSDB_LIBRARY_API uint32_t +rocksdb_backup_engine_info_backup_id(const rocksdb_backup_engine_info_t* info, + int index); + +extern ROCKSDB_LIBRARY_API uint64_t +rocksdb_backup_engine_info_size(const rocksdb_backup_engine_info_t* info, + int index); + +extern ROCKSDB_LIBRARY_API uint32_t rocksdb_backup_engine_info_number_files( + const rocksdb_backup_engine_info_t* info, int index); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_info_destroy( + const rocksdb_backup_engine_info_t* info); + +extern ROCKSDB_LIBRARY_API void rocksdb_backup_engine_close( + rocksdb_backup_engine_t* be); + +extern ROCKSDB_LIBRARY_API rocksdb_t* rocksdb_open_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char** column_family_names, + const rocksdb_options_t** column_family_options, + rocksdb_column_family_handle_t** column_family_handles, char** errptr); + +extern ROCKSDB_LIBRARY_API rocksdb_t* +rocksdb_open_for_read_only_column_families( + const rocksdb_options_t* options, const char* name, int num_column_families, + const char** column_family_names, + const rocksdb_options_t** column_family_options, + rocksdb_column_family_handle_t** column_family_handles, + unsigned char error_if_log_file_exist, char** errptr); + +extern ROCKSDB_LIBRARY_API char** rocksdb_list_column_families( + const rocksdb_options_t* options, const char* name, size_t* lencf, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_list_column_families_destroy( + char** list, size_t len); + +extern ROCKSDB_LIBRARY_API rocksdb_column_family_handle_t* +rocksdb_create_column_family(rocksdb_t* db, + const rocksdb_options_t* column_family_options, + const char* column_family_name, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_drop_column_family( + rocksdb_t* db, rocksdb_column_family_handle_t* handle, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_column_family_handle_destroy( + rocksdb_column_family_handle_t*); + +extern ROCKSDB_LIBRARY_API void rocksdb_close(rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_put( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_put_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_merge( + rocksdb_t* db, const rocksdb_writeoptions_t* options, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_merge_cf( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, const char* val, size_t vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_write( + rocksdb_t* db, const rocksdb_writeoptions_t* options, + rocksdb_writebatch_t* batch, char** errptr); + +/* Returns NULL if not found. A malloc()ed array otherwise. + Stores the length of the array in *vallen. */ +extern ROCKSDB_LIBRARY_API char* rocksdb_get( + rocksdb_t* db, const rocksdb_readoptions_t* options, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +extern ROCKSDB_LIBRARY_API char* rocksdb_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family, const char* key, + size_t keylen, size_t* vallen, char** errptr); + +// if values_list[i] == NULL and errs[i] == NULL, +// then we got status.IsNotFound(), which we will not return. +// all errors except status status.ok() and status.IsNotFound() are returned. +// +// errs, values_list and values_list_sizes must be num_keys in length, +// allocated by the caller. +// errs is a list of strings as opposed to the conventional one error, +// where errs[i] is the status for retrieval of keys_list[i]. +// each non-NULL errs entry is a malloc()ed, null terminated string. +// each non-NULL values_list entry is a malloc()ed array, with +// the length for each stored in values_list_sizes[i]. +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get( + rocksdb_t* db, const rocksdb_readoptions_t* options, size_t num_keys, + const char* const* keys_list, const size_t* keys_list_sizes, + char** values_list, size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API void rocksdb_multi_get_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + const rocksdb_column_family_handle_t* const* column_families, + size_t num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, char** values_list, + size_t* values_list_sizes, char** errs); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator( + rocksdb_t* db, const rocksdb_readoptions_t* options); + +extern ROCKSDB_LIBRARY_API rocksdb_iterator_t* rocksdb_create_iterator_cf( + rocksdb_t* db, const rocksdb_readoptions_t* options, + rocksdb_column_family_handle_t* column_family); + +extern ROCKSDB_LIBRARY_API void rocksdb_create_iterators( + rocksdb_t *db, rocksdb_readoptions_t* opts, + rocksdb_column_family_handle_t** column_families, + rocksdb_iterator_t** iterators, size_t size, char** errptr); + +extern ROCKSDB_LIBRARY_API const rocksdb_snapshot_t* rocksdb_create_snapshot( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_release_snapshot( + rocksdb_t* db, const rocksdb_snapshot_t* snapshot); + +/* Returns NULL if property name is unknown. + Else returns a pointer to a malloc()-ed null-terminated value. */ +extern ROCKSDB_LIBRARY_API char* rocksdb_property_value(rocksdb_t* db, + const char* propname); + +extern ROCKSDB_LIBRARY_API char* rocksdb_property_value_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* propname); + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes( + rocksdb_t* db, int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes); + +extern ROCKSDB_LIBRARY_API void rocksdb_approximate_sizes_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + int num_ranges, const char* const* range_start_key, + const size_t* range_start_key_len, const char* const* range_limit_key, + const size_t* range_limit_key_len, uint64_t* sizes); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range(rocksdb_t* db, + const char* start_key, + size_t start_key_len, + const char* limit_key, + size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_compact_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file(rocksdb_t* db, + const char* name); + +extern ROCKSDB_LIBRARY_API const rocksdb_livefiles_t* rocksdb_livefiles( + rocksdb_t* db); + +extern ROCKSDB_LIBRARY_API void rocksdb_flush( + rocksdb_t* db, const rocksdb_flushoptions_t* options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_disable_file_deletions(rocksdb_t* db, + char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_enable_file_deletions( + rocksdb_t* db, unsigned char force, char** errptr); + +/* Management operations */ + +extern ROCKSDB_LIBRARY_API void rocksdb_destroy_db( + const rocksdb_options_t* options, const char* name, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_repair_db( + const rocksdb_options_t* options, const char* name, char** errptr); + +/* Iterator */ + +extern ROCKSDB_LIBRARY_API void rocksdb_iter_destroy(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API unsigned char rocksdb_iter_valid( + const rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_first(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek_to_last(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_seek(rocksdb_iterator_t*, + const char* k, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_next(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_prev(rocksdb_iterator_t*); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_key( + const rocksdb_iterator_t*, size_t* klen); +extern ROCKSDB_LIBRARY_API const char* rocksdb_iter_value( + const rocksdb_iterator_t*, size_t* vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_iter_get_error( + const rocksdb_iterator_t*, char** errptr); + +/* Write batch */ + +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create(); +extern ROCKSDB_LIBRARY_API rocksdb_writebatch_t* rocksdb_writebatch_create_from( + const char* rep, size_t size); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_destroy( + rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_clear(rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API int rocksdb_writebatch_count(rocksdb_writebatch_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put(rocksdb_writebatch_t*, + const char* key, + size_t klen, + const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_putv_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge(rocksdb_writebatch_t*, + const char* key, + size_t klen, + const char* val, + size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_merge_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen, const char* val, size_t vlen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes, int num_values, + const char* const* values_list, const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_mergev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes, + int num_values, const char* const* values_list, + const size_t* values_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete(rocksdb_writebatch_t*, + const char* key, + size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_delete_cf( + rocksdb_writebatch_t*, rocksdb_column_family_handle_t* column_family, + const char* key, size_t klen); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev( + rocksdb_writebatch_t* b, int num_keys, const char* const* keys_list, + const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_deletev_cf( + rocksdb_writebatch_t* b, rocksdb_column_family_handle_t* column_family, + int num_keys, const char* const* keys_list, const size_t* keys_list_sizes); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_put_log_data( + rocksdb_writebatch_t*, const char* blob, size_t len); +extern ROCKSDB_LIBRARY_API void rocksdb_writebatch_iterate( + rocksdb_writebatch_t*, void* state, + void (*put)(void*, const char* k, size_t klen, const char* v, size_t vlen), + void (*deleted)(void*, const char* k, size_t klen)); +extern ROCKSDB_LIBRARY_API const char* rocksdb_writebatch_data( + rocksdb_writebatch_t*, size_t* size); + +/* Block based table options */ + +extern ROCKSDB_LIBRARY_API rocksdb_block_based_table_options_t* +rocksdb_block_based_options_create(); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_destroy( + rocksdb_block_based_table_options_t* options); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_size( + rocksdb_block_based_table_options_t* options, size_t block_size); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_size_deviation( + rocksdb_block_based_table_options_t* options, int block_size_deviation); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_restart_interval( + rocksdb_block_based_table_options_t* options, int block_restart_interval); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_filter_policy( + rocksdb_block_based_table_options_t* options, + rocksdb_filterpolicy_t* filter_policy); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_no_block_cache( + rocksdb_block_based_table_options_t* options, unsigned char no_block_cache); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_block_cache( + rocksdb_block_based_table_options_t* options, rocksdb_cache_t* block_cache); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_block_cache_compressed( + rocksdb_block_based_table_options_t* options, + rocksdb_cache_t* block_cache_compressed); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_whole_key_filtering( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_format_version( + rocksdb_block_based_table_options_t*, int); +enum { + rocksdb_block_based_table_index_type_binary_search = 0, + rocksdb_block_based_table_index_type_hash_search = 1, +}; +extern ROCKSDB_LIBRARY_API void rocksdb_block_based_options_set_index_type( + rocksdb_block_based_table_options_t*, int); // uses one of the above enums +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_hash_index_allow_collision( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_cache_index_and_filter_blocks( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_pin_l0_filter_and_index_blocks_in_cache( + rocksdb_block_based_table_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_block_based_options_set_skip_table_builder_flush( + rocksdb_block_based_table_options_t* options, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_block_based_table_factory( + rocksdb_options_t* opt, rocksdb_block_based_table_options_t* table_options); + +/* Cuckoo table options */ + +extern ROCKSDB_LIBRARY_API rocksdb_cuckoo_table_options_t* +rocksdb_cuckoo_options_create(); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_destroy( + rocksdb_cuckoo_table_options_t* options); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_hash_ratio( + rocksdb_cuckoo_table_options_t* options, double v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_max_search_depth( + rocksdb_cuckoo_table_options_t* options, uint32_t v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_cuckoo_block_size( + rocksdb_cuckoo_table_options_t* options, uint32_t v); +extern ROCKSDB_LIBRARY_API void +rocksdb_cuckoo_options_set_identity_as_first_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v); +extern ROCKSDB_LIBRARY_API void rocksdb_cuckoo_options_set_use_module_hash( + rocksdb_cuckoo_table_options_t* options, unsigned char v); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_cuckoo_table_factory( + rocksdb_options_t* opt, rocksdb_cuckoo_table_options_t* table_options); + +/* Options */ + +extern ROCKSDB_LIBRARY_API rocksdb_options_t* rocksdb_options_create(); +extern ROCKSDB_LIBRARY_API void rocksdb_options_destroy(rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_increase_parallelism( + rocksdb_options_t* opt, int total_threads); +extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_for_point_lookup( + rocksdb_options_t* opt, uint64_t block_cache_size_mb); +extern ROCKSDB_LIBRARY_API void rocksdb_options_optimize_level_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_optimize_universal_style_compaction( + rocksdb_options_t* opt, uint64_t memtable_memory_budget); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter( + rocksdb_options_t*, rocksdb_compactionfilter_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_filter_factory( + rocksdb_options_t*, rocksdb_compactionfilterfactory_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_compaction_readahead_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_comparator( + rocksdb_options_t*, rocksdb_comparator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_merge_operator( + rocksdb_options_t*, rocksdb_mergeoperator_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_uint64add_merge_operator( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_per_level( + rocksdb_options_t* opt, int* level_values, size_t num_levels); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_create_if_missing( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_create_missing_column_families(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_error_if_exists( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_paranoid_checks( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_env(rocksdb_options_t*, + rocksdb_env_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log(rocksdb_options_t*, + rocksdb_logger_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_info_log_level( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_write_buffer_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_write_buffer_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_open_files( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_total_wal_size( + rocksdb_options_t* opt, uint64_t n); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression_options( + rocksdb_options_t*, int, int, int, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_prefix_extractor( + rocksdb_options_t*, rocksdb_slicetransform_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_num_levels( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level0_file_num_compaction_trigger(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_level0_slowdown_writes_trigger(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_level0_stop_writes_trigger( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_mem_compaction_level( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_base( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_target_file_size_multiplier( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_bytes_for_level_base( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_bytes_for_level_multiplier(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_expanded_compaction_factor( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_grandparent_overlap_factor(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_bytes_for_level_multiplier_additional( + rocksdb_options_t*, int* level_values, size_t num_levels); +extern ROCKSDB_LIBRARY_API void rocksdb_options_enable_statistics( + rocksdb_options_t*); + +/* returns a pointer to a malloc()-ed, null terminated string */ +extern ROCKSDB_LIBRARY_API char* rocksdb_options_statistics_get_string( + rocksdb_options_t* opt); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_write_buffer_number( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_min_write_buffer_number_to_merge(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_write_buffer_number_to_maintain(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_compactions( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_background_flushes( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_log_file_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_log_file_time_to_roll( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_keep_log_file_num( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_recycle_log_file_num( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_soft_rate_limit( + rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hard_rate_limit( + rocksdb_options_t*, double); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_rate_limit_delay_max_milliseconds(rocksdb_options_t*, + unsigned int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_manifest_file_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_table_cache_numshardbits( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_table_cache_remove_scan_count_limit(rocksdb_options_t*, + int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_arena_block_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_fsync( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_db_log_dir( + rocksdb_options_t*, const char*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_wal_dir(rocksdb_options_t*, + const char*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_ttl_seconds( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_WAL_size_limit_MB( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_manifest_preallocation_size( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_purge_redundant_kvs_while_flush(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_os_buffer( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_reads( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_allow_mmap_writes( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_is_fd_close_on_exec( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_skip_log_error_on_recovery( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_stats_dump_period_sec( + rocksdb_options_t*, unsigned int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_advise_random_on_open( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_access_hint_on_compaction_start(rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_use_adaptive_mutex( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bytes_per_sync( + rocksdb_options_t*, uint64_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_verify_checksums_in_compaction(rocksdb_options_t*, + unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_filter_deletes( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_max_sequential_skip_in_iterations(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_disable_data_sync( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_disable_auto_compactions( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_delete_obsolete_files_period_micros(rocksdb_options_t*, + uint64_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_source_compaction_factor( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_options_prepare_for_bulk_load( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_vector_rep( + rocksdb_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_skip_list_rep( + rocksdb_options_t*, size_t, int32_t, int32_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_hash_link_list_rep( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_plain_table_factory( + rocksdb_options_t*, uint32_t, int, double, size_t); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_level_to_compress( + rocksdb_options_t* opt, int level); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_prefix_bloom_bits( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_memtable_prefix_bloom_probes(rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_memtable_huge_page_size( + rocksdb_options_t*, size_t); + +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_max_successive_merges( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_min_partial_merge_operands( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_bloom_locality( + rocksdb_options_t*, uint32_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_support( + rocksdb_options_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_inplace_update_num_locks( + rocksdb_options_t*, size_t); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_report_bg_io_stats( + rocksdb_options_t*, int); + +enum { + rocksdb_no_compression = 0, + rocksdb_snappy_compression = 1, + rocksdb_zlib_compression = 2, + rocksdb_bz2_compression = 3, + rocksdb_lz4_compression = 4, + rocksdb_lz4hc_compression = 5 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compression( + rocksdb_options_t*, int); + +enum { + rocksdb_level_compaction = 0, + rocksdb_universal_compaction = 1, + rocksdb_fifo_compaction = 2 +}; +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_compaction_style( + rocksdb_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_options_set_universal_compaction_options( + rocksdb_options_t*, rocksdb_universal_compaction_options_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_options_set_fifo_compaction_options( + rocksdb_options_t* opt, rocksdb_fifo_compaction_options_t* fifo); + +/* Compaction Filter */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactionfilter_t* +rocksdb_compactionfilter_create( + void* state, void (*destructor)(void*), + unsigned char (*filter)(void*, int level, const char* key, + size_t key_length, const char* existing_value, + size_t value_length, char** new_value, + size_t* new_value_length, + unsigned char* value_changed), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilter_destroy( + rocksdb_compactionfilter_t*); + +/* Compaction Filter Context */ + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactionfiltercontext_is_full_compaction( + rocksdb_compactionfiltercontext_t* context); + +extern ROCKSDB_LIBRARY_API unsigned char +rocksdb_compactionfiltercontext_is_manual_compaction( + rocksdb_compactionfiltercontext_t* context); + +/* Compaction Filter Factory */ + +extern ROCKSDB_LIBRARY_API rocksdb_compactionfilterfactory_t* +rocksdb_compactionfilterfactory_create( + void* state, void (*destructor)(void*), + rocksdb_compactionfilter_t* (*create_compaction_filter)( + void*, rocksdb_compactionfiltercontext_t* context), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_compactionfilterfactory_destroy( + rocksdb_compactionfilterfactory_t*); + +/* Comparator */ + +extern ROCKSDB_LIBRARY_API rocksdb_comparator_t* rocksdb_comparator_create( + void* state, void (*destructor)(void*), + int (*compare)(void*, const char* a, size_t alen, const char* b, + size_t blen), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_comparator_destroy( + rocksdb_comparator_t*); + +/* Filter policy */ + +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* rocksdb_filterpolicy_create( + void* state, void (*destructor)(void*), + char* (*create_filter)(void*, const char* const* key_array, + const size_t* key_length_array, int num_keys, + size_t* filter_length), + unsigned char (*key_may_match)(void*, const char* key, size_t length, + const char* filter, size_t filter_length), + void (*delete_filter)(void*, const char* filter, size_t filter_length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_filterpolicy_destroy( + rocksdb_filterpolicy_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_bloom(int bits_per_key); +extern ROCKSDB_LIBRARY_API rocksdb_filterpolicy_t* +rocksdb_filterpolicy_create_bloom_full(int bits_per_key); + +/* Merge Operator */ + +extern ROCKSDB_LIBRARY_API rocksdb_mergeoperator_t* +rocksdb_mergeoperator_create( + void* state, void (*destructor)(void*), + char* (*full_merge)(void*, const char* key, size_t key_length, + const char* existing_value, + size_t existing_value_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + char* (*partial_merge)(void*, const char* key, size_t key_length, + const char* const* operands_list, + const size_t* operands_list_length, int num_operands, + unsigned char* success, size_t* new_value_length), + void (*delete_value)(void*, const char* value, size_t value_length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API void rocksdb_mergeoperator_destroy( + rocksdb_mergeoperator_t*); + +/* Read options */ + +extern ROCKSDB_LIBRARY_API rocksdb_readoptions_t* rocksdb_readoptions_create(); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_destroy( + rocksdb_readoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_verify_checksums( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_fill_cache( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_snapshot( + rocksdb_readoptions_t*, const rocksdb_snapshot_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_iterate_upper_bound( + rocksdb_readoptions_t*, const char* key, size_t keylen); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_read_tier( + rocksdb_readoptions_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_tailing( + rocksdb_readoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_readoptions_set_readahead_size( + rocksdb_readoptions_t*, size_t); + +/* Write options */ + +extern ROCKSDB_LIBRARY_API rocksdb_writeoptions_t* +rocksdb_writeoptions_create(); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_destroy( + rocksdb_writeoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_set_sync( + rocksdb_writeoptions_t*, unsigned char); +extern ROCKSDB_LIBRARY_API void rocksdb_writeoptions_disable_WAL( + rocksdb_writeoptions_t* opt, int disable); + +/* Flush options */ + +extern ROCKSDB_LIBRARY_API rocksdb_flushoptions_t* +rocksdb_flushoptions_create(); +extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_destroy( + rocksdb_flushoptions_t*); +extern ROCKSDB_LIBRARY_API void rocksdb_flushoptions_set_wait( + rocksdb_flushoptions_t*, unsigned char); + +/* Cache */ + +extern ROCKSDB_LIBRARY_API rocksdb_cache_t* rocksdb_cache_create_lru( + size_t capacity); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_destroy(rocksdb_cache_t* cache); +extern ROCKSDB_LIBRARY_API void rocksdb_cache_set_capacity( + rocksdb_cache_t* cache, size_t capacity); + +/* Env */ + +extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_default_env(); +extern ROCKSDB_LIBRARY_API rocksdb_env_t* rocksdb_create_mem_env(); +extern ROCKSDB_LIBRARY_API void rocksdb_env_set_background_threads( + rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API void +rocksdb_env_set_high_priority_background_threads(rocksdb_env_t* env, int n); +extern ROCKSDB_LIBRARY_API void rocksdb_env_join_all_threads( + rocksdb_env_t* env); +extern ROCKSDB_LIBRARY_API void rocksdb_env_destroy(rocksdb_env_t*); + +/* SliceTransform */ + +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* +rocksdb_slicetransform_create( + void* state, void (*destructor)(void*), + char* (*transform)(void*, const char* key, size_t length, + size_t* dst_length), + unsigned char (*in_domain)(void*, const char* key, size_t length), + unsigned char (*in_range)(void*, const char* key, size_t length), + const char* (*name)(void*)); +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* + rocksdb_slicetransform_create_fixed_prefix(size_t); +extern ROCKSDB_LIBRARY_API rocksdb_slicetransform_t* +rocksdb_slicetransform_create_noop(); +extern ROCKSDB_LIBRARY_API void rocksdb_slicetransform_destroy( + rocksdb_slicetransform_t*); + +/* Universal Compaction options */ + +enum { + rocksdb_similar_size_compaction_stop_style = 0, + rocksdb_total_size_compaction_stop_style = 1 +}; + +extern ROCKSDB_LIBRARY_API rocksdb_universal_compaction_options_t* +rocksdb_universal_compaction_options_create(); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_size_ratio( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_min_merge_width( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_max_merge_width( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_max_size_amplification_percent( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_compression_size_percent( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API void +rocksdb_universal_compaction_options_set_stop_style( + rocksdb_universal_compaction_options_t*, int); +extern ROCKSDB_LIBRARY_API void rocksdb_universal_compaction_options_destroy( + rocksdb_universal_compaction_options_t*); + +extern ROCKSDB_LIBRARY_API rocksdb_fifo_compaction_options_t* +rocksdb_fifo_compaction_options_create(); +extern ROCKSDB_LIBRARY_API void +rocksdb_fifo_compaction_options_set_max_table_files_size( + rocksdb_fifo_compaction_options_t* fifo_opts, uint64_t size); +extern ROCKSDB_LIBRARY_API void rocksdb_fifo_compaction_options_destroy( + rocksdb_fifo_compaction_options_t* fifo_opts); + +extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_count( + const rocksdb_livefiles_t*); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_name( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API int rocksdb_livefiles_level( + const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API size_t +rocksdb_livefiles_size(const rocksdb_livefiles_t*, int index); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_smallestkey( + const rocksdb_livefiles_t*, int index, size_t* size); +extern ROCKSDB_LIBRARY_API const char* rocksdb_livefiles_largestkey( + const rocksdb_livefiles_t*, int index, size_t* size); +extern ROCKSDB_LIBRARY_API void rocksdb_livefiles_destroy( + const rocksdb_livefiles_t*); + +/* Utility Helpers */ + +extern ROCKSDB_LIBRARY_API void rocksdb_get_options_from_string( + const rocksdb_options_t* base_options, const char* opts_str, + rocksdb_options_t* new_options, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range( + rocksdb_t* db, const char* start_key, size_t start_key_len, + const char* limit_key, size_t limit_key_len, char** errptr); + +extern ROCKSDB_LIBRARY_API void rocksdb_delete_file_in_range_cf( + rocksdb_t* db, rocksdb_column_family_handle_t* column_family, + const char* start_key, size_t start_key_len, const char* limit_key, + size_t limit_key_len, char** errptr); + +// referring to convention (3), this should be used by client +// to free memory that was malloc()ed +extern ROCKSDB_LIBRARY_API void rocksdb_free(void* ptr); + +#ifdef __cplusplus +} /* end extern "C" */ +#endif + +#endif /* STORAGE_ROCKSDB_INCLUDE_C_H_ */ diff --git a/external/rocksdb/include/rocksdb/cache.h b/external/rocksdb/include/rocksdb/cache.h new file mode 100755 index 0000000000..53cb6c60f6 --- /dev/null +++ b/external/rocksdb/include/rocksdb/cache.h @@ -0,0 +1,154 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// A Cache is an interface that maps keys to values. It has internal +// synchronization and may be safely accessed concurrently from +// multiple threads. It may automatically evict entries to make room +// for new entries. Values have a specified charge against the cache +// capacity. For example, a cache where the values are variable +// length strings, may use the length of the string as the charge for +// the string. +// +// A builtin cache implementation with a least-recently-used eviction +// policy is provided. Clients may use their own implementations if +// they want something more sophisticated (like scan-resistance, a +// custom eviction policy, variable cache sizing, etc.) + +#ifndef STORAGE_ROCKSDB_INCLUDE_CACHE_H_ +#define STORAGE_ROCKSDB_INCLUDE_CACHE_H_ + +#include +#include +#include "rocksdb/slice.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +class Cache; + +// Create a new cache with a fixed size capacity. The cache is sharded +// to 2^num_shard_bits shards, by hash of the key. The total capacity +// is divided and evenly assigned to each shard. +extern std::shared_ptr NewLRUCache(size_t capacity, + int num_shard_bits = 6, + bool strict_capacity_limit = false); + +class Cache { + public: + Cache() {} + + // Destroys all existing entries by calling the "deleter" + // function that was passed via the Insert() function. + // + // @See Insert + virtual ~Cache() {} + + // Opaque handle to an entry stored in the cache. + struct Handle {}; + + // Insert a mapping from key->value into the cache and assign it + // the specified charge against the total cache capacity. + // If strict_capacity_limit is true and cache reaches its full capacity, + // return Status::Incomplete. + // + // If handle is not nullptr, returns a handle that corresponds to the + // mapping. The caller must call this->Release(handle) when the returned + // mapping is no longer needed. In case of error caller is responsible to + // cleanup the value (i.e. calling "deleter"). + // + // If handle is nullptr, it is as if Release is called immediately after + // insert. In case of error value will be cleanup. + // + // When the inserted entry is no longer needed, the key and + // value will be passed to "deleter". + virtual Status Insert(const Slice& key, void* value, size_t charge, + void (*deleter)(const Slice& key, void* value), + Handle** handle = nullptr) = 0; + + // If the cache has no mapping for "key", returns nullptr. + // + // Else return a handle that corresponds to the mapping. The caller + // must call this->Release(handle) when the returned mapping is no + // longer needed. + virtual Handle* Lookup(const Slice& key) = 0; + + // Release a mapping returned by a previous Lookup(). + // REQUIRES: handle must not have been released yet. + // REQUIRES: handle must have been returned by a method on *this. + virtual void Release(Handle* handle) = 0; + + // Return the value encapsulated in a handle returned by a + // successful Lookup(). + // REQUIRES: handle must not have been released yet. + // REQUIRES: handle must have been returned by a method on *this. + virtual void* Value(Handle* handle) = 0; + + // If the cache contains entry for key, erase it. Note that the + // underlying entry will be kept around until all existing handles + // to it have been released. + virtual void Erase(const Slice& key) = 0; + // Return a new numeric id. May be used by multiple clients who are + // sharding the same cache to partition the key space. Typically the + // client will allocate a new id at startup and prepend the id to + // its cache keys. + virtual uint64_t NewId() = 0; + + // sets the maximum configured capacity of the cache. When the new + // capacity is less than the old capacity and the existing usage is + // greater than new capacity, the implementation will do its best job to + // purge the released entries from the cache in order to lower the usage + virtual void SetCapacity(size_t capacity) = 0; + + // Set whether to return error on insertion when cache reaches its full + // capacity. + virtual void SetStrictCapacityLimit(bool strict_capacity_limit) = 0; + + // Get the flag whether to return error on insertion when cache reaches its + // full capacity. + virtual bool HasStrictCapacityLimit() const = 0; + + // returns the maximum configured capacity of the cache + virtual size_t GetCapacity() const = 0; + + // returns the memory size for the entries residing in the cache. + virtual size_t GetUsage() const = 0; + + // returns the memory size for a specific entry in the cache. + virtual size_t GetUsage(Handle* handle) const = 0; + + // returns the memory size for the entries in use by the system + virtual size_t GetPinnedUsage() const = 0; + + // Call this on shutdown if you want to speed it up. Cache will disown + // any underlying data and will not free it on delete. This call will leak + // memory - call this only if you're shutting down the process. + // Any attempts of using cache after this call will fail terribly. + // Always delete the DB object before calling this method! + virtual void DisownData(){ + // default implementation is noop + }; + + // Apply callback to all entries in the cache + // If thread_safe is true, it will also lock the accesses. Otherwise, it will + // access the cache without the lock held + virtual void ApplyToAllCacheEntries(void (*callback)(void*, size_t), + bool thread_safe) = 0; + + // Remove all entries. + // Prerequisit: no entry is referenced. + virtual void EraseUnRefEntries() = 0; + + private: + // No copying allowed + Cache(const Cache&); + Cache& operator=(const Cache&); +}; + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_UTIL_CACHE_H_ diff --git a/external/rocksdb/include/rocksdb/compaction_filter.h b/external/rocksdb/include/rocksdb/compaction_filter.h new file mode 100755 index 0000000000..acdc3aa1bb --- /dev/null +++ b/external/rocksdb/include/rocksdb/compaction_filter.h @@ -0,0 +1,137 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2013 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_ +#define STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_ + +#include +#include +#include + +namespace rocksdb { + +class Slice; +class SliceTransform; + +// Context information of a compaction run +struct CompactionFilterContext { + // Does this compaction run include all data files + bool is_full_compaction; + // Is this compaction requested by the client (true), + // or is it occurring as an automatic compaction process + bool is_manual_compaction; +}; + +// CompactionFilter allows an application to modify/delete a key-value at +// the time of compaction. + +class CompactionFilter { + public: + // Context information of a compaction run + struct Context { + // Does this compaction run include all data files + bool is_full_compaction; + // Is this compaction requested by the client (true), + // or is it occurring as an automatic compaction process + bool is_manual_compaction; + // Which column family this compaction is for. + uint32_t column_family_id; + }; + + virtual ~CompactionFilter() {} + + // The compaction process invokes this + // method for kv that is being compacted. A return value + // of false indicates that the kv should be preserved in the + // output of this compaction run and a return value of true + // indicates that this key-value should be removed from the + // output of the compaction. The application can inspect + // the existing value of the key and make decision based on it. + // + // Key-Values that are results of merge operation during compaction are not + // passed into this function. Currently, when you have a mix of Put()s and + // Merge()s on a same key, we only guarantee to process the merge operands + // through the compaction filters. Put()s might be processed, or might not. + // + // When the value is to be preserved, the application has the option + // to modify the existing_value and pass it back through new_value. + // value_changed needs to be set to true in this case. + // + // If you use snapshot feature of RocksDB (i.e. call GetSnapshot() API on a + // DB* object), CompactionFilter might not be very useful for you. Due to + // guarantees we need to maintain, compaction process will not call Filter() + // on any keys that were written before the latest snapshot. In other words, + // compaction will only call Filter() on keys written after your most recent + // call to GetSnapshot(). In most cases, Filter() will not be called very + // often. This is something we're fixing. See the discussion at: + // https://www.facebook.com/groups/mysqlonrocksdb/permalink/999723240091865/ + // + // If multithreaded compaction is being used *and* a single CompactionFilter + // instance was supplied via Options::compaction_filter, this method may be + // called from different threads concurrently. The application must ensure + // that the call is thread-safe. + // + // If the CompactionFilter was created by a factory, then it will only ever + // be used by a single thread that is doing the compaction run, and this + // call does not need to be thread-safe. However, multiple filters may be + // in existence and operating concurrently. + // + // The last paragraph is not true if you set max_subcompactions to more than + // 1. In that case, subcompaction from multiple threads may call a single + // CompactionFilter concurrently. + virtual bool Filter(int level, + const Slice& key, + const Slice& existing_value, + std::string* new_value, + bool* value_changed) const = 0; + + // The compaction process invokes this method on every merge operand. If this + // method returns true, the merge operand will be ignored and not written out + // in the compaction output + // + // Note: If you are using a TransactionDB, it is not recommended to implement + // FilterMergeOperand(). If a Merge operation is filtered out, TransactionDB + // may not realize there is a write conflict and may allow a Transaction to + // Commit that should have failed. Instead, it is better to implement any + // Merge filtering inside the MergeOperator. + virtual bool FilterMergeOperand(int level, const Slice& key, + const Slice& operand) const { + return false; + } + + // By default, compaction will only call Filter() on keys written after the + // most recent call to GetSnapshot(). However, if the compaction filter + // overrides IgnoreSnapshots to make it return false, the compaction filter + // will be called even if the keys were written before the last snapshot. + // This behavior is to be used only when we want to delete a set of keys + // irrespective of snapshots. In particular, care should be taken + // to understand that the values of thesekeys will change even if we are + // using a snapshot. + virtual bool IgnoreSnapshots() const { return false; } + + // Returns a name that identifies this compaction filter. + // The name will be printed to LOG file on start up for diagnosis. + virtual const char* Name() const = 0; +}; + +// Each compaction will create a new CompactionFilter allowing the +// application to know about different compactions +class CompactionFilterFactory { + public: + virtual ~CompactionFilterFactory() { } + + virtual std::unique_ptr CreateCompactionFilter( + const CompactionFilter::Context& context) = 0; + + // Returns a name that identifies this compaction filter factory. + virtual const char* Name() const = 0; +}; + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_COMPACTION_FILTER_H_ diff --git a/external/rocksdb/include/rocksdb/compaction_job_stats.h b/external/rocksdb/include/rocksdb/compaction_job_stats.h new file mode 100755 index 0000000000..cfd81f80e9 --- /dev/null +++ b/external/rocksdb/include/rocksdb/compaction_job_stats.h @@ -0,0 +1,85 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#include +#include +#include + +namespace rocksdb { +struct CompactionJobStats { + CompactionJobStats() { Reset(); } + void Reset(); + // Aggregate the CompactionJobStats from another instance with this one + void Add(const CompactionJobStats& stats); + + // the elapsed time in micro of this compaction. + uint64_t elapsed_micros; + + // the number of compaction input records. + uint64_t num_input_records; + // the number of compaction input files. + size_t num_input_files; + // the number of compaction input files at the output level. + size_t num_input_files_at_output_level; + + // the number of compaction output records. + uint64_t num_output_records; + // the number of compaction output files. + size_t num_output_files; + + // true if the compaction is a manual compaction + bool is_manual_compaction; + + // the size of the compaction input in bytes. + uint64_t total_input_bytes; + // the size of the compaction output in bytes. + uint64_t total_output_bytes; + + // number of records being replaced by newer record associated with same key. + // this could be a new value or a deletion entry for that key so this field + // sums up all updated and deleted keys + uint64_t num_records_replaced; + + // the sum of the uncompressed input keys in bytes. + uint64_t total_input_raw_key_bytes; + // the sum of the uncompressed input values in bytes. + uint64_t total_input_raw_value_bytes; + + // the number of deletion entries before compaction. Deletion entries + // can disappear after compaction because they expired + uint64_t num_input_deletion_records; + // number of deletion records that were found obsolete and discarded + // because it is not possible to delete any more keys with this entry + // (i.e. all possible deletions resulting from it have been completed) + uint64_t num_expired_deletion_records; + + // number of corrupt keys (ParseInternalKey returned false when applied to + // the key) encountered and written out. + uint64_t num_corrupt_keys; + + // Following counters are only populated if + // options.report_bg_io_stats = true; + + // Time spent on file's Append() call. + uint64_t file_write_nanos; + + // Time spent on sync file range. + uint64_t file_range_sync_nanos; + + // Time spent on file fsync. + uint64_t file_fsync_nanos; + + // Time spent on preparing file write (falocate, etc) + uint64_t file_prepare_write_nanos; + + // 0-terminated strings storing the first 8 bytes of the smallest and + // largest key in the output. + static const size_t kMaxPrefixLength = 8; + + std::string smallest_output_key_prefix; + std::string largest_output_key_prefix; +}; +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/comparator.h b/external/rocksdb/include/rocksdb/comparator.h new file mode 100755 index 0000000000..1c67b0d4eb --- /dev/null +++ b/external/rocksdb/include/rocksdb/comparator.h @@ -0,0 +1,80 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_ +#define STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_ + +#include + +namespace rocksdb { + +class Slice; + +// A Comparator object provides a total order across slices that are +// used as keys in an sstable or a database. A Comparator implementation +// must be thread-safe since rocksdb may invoke its methods concurrently +// from multiple threads. +class Comparator { + public: + virtual ~Comparator(); + + // Three-way comparison. Returns value: + // < 0 iff "a" < "b", + // == 0 iff "a" == "b", + // > 0 iff "a" > "b" + virtual int Compare(const Slice& a, const Slice& b) const = 0; + + // Compares two slices for equality. The following invariant should always + // hold (and is the default implementation): + // Equal(a, b) iff Compare(a, b) == 0 + // Overwrite only if equality comparisons can be done more efficiently than + // three-way comparisons. + virtual bool Equal(const Slice& a, const Slice& b) const { + return Compare(a, b) == 0; + } + + // The name of the comparator. Used to check for comparator + // mismatches (i.e., a DB created with one comparator is + // accessed using a different comparator. + // + // The client of this package should switch to a new name whenever + // the comparator implementation changes in a way that will cause + // the relative ordering of any two keys to change. + // + // Names starting with "rocksdb." are reserved and should not be used + // by any clients of this package. + virtual const char* Name() const = 0; + + // Advanced functions: these are used to reduce the space requirements + // for internal data structures like index blocks. + + // If *start < limit, changes *start to a short string in [start,limit). + // Simple comparator implementations may return with *start unchanged, + // i.e., an implementation of this method that does nothing is correct. + virtual void FindShortestSeparator( + std::string* start, + const Slice& limit) const = 0; + + // Changes *key to a short string >= *key. + // Simple comparator implementations may return with *key unchanged, + // i.e., an implementation of this method that does nothing is correct. + virtual void FindShortSuccessor(std::string* key) const = 0; +}; + +// Return a builtin comparator that uses lexicographic byte-wise +// ordering. The result remains the property of this module and +// must not be deleted. +extern const Comparator* BytewiseComparator(); + +// Return a builtin comparator that uses reverse lexicographic byte-wise +// ordering. +extern const Comparator* ReverseBytewiseComparator(); + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_COMPARATOR_H_ diff --git a/external/rocksdb/include/rocksdb/convenience.h b/external/rocksdb/include/rocksdb/convenience.h new file mode 100755 index 0000000000..07b267eb5d --- /dev/null +++ b/external/rocksdb/include/rocksdb/convenience.h @@ -0,0 +1,112 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/table.h" + +namespace rocksdb { + +#ifndef ROCKSDB_LITE +// Take a map of option name and option value, apply them into the +// base_options, and return the new options as a result. +// +// If input_strings_escaped is set to true, then each escaped characters +// prefixed by '\' in the values of the opts_map will be further +// converted back to the raw string before assigning to the associated +// options. +Status GetColumnFamilyOptionsFromMap( + const ColumnFamilyOptions& base_options, + const std::unordered_map& opts_map, + ColumnFamilyOptions* new_options, bool input_strings_escaped = false); + +// Take a map of option name and option value, apply them into the +// base_options, and return the new options as a result. +// +// If input_strings_escaped is set to true, then each escaped characters +// prefixed by '\' in the values of the opts_map will be further +// converted back to the raw string before assigning to the associated +// options. +Status GetDBOptionsFromMap( + const DBOptions& base_options, + const std::unordered_map& opts_map, + DBOptions* new_options, bool input_strings_escaped = false); + +Status GetBlockBasedTableOptionsFromMap( + const BlockBasedTableOptions& table_options, + const std::unordered_map& opts_map, + BlockBasedTableOptions* new_table_options, + bool input_strings_escaped = false); + +Status GetPlainTableOptionsFromMap( + const PlainTableOptions& table_options, + const std::unordered_map& opts_map, + PlainTableOptions* new_table_options, + bool input_strings_escaped = false); + +// Take a string representation of option names and values, apply them into the +// base_options, and return the new options as a result. The string has the +// following format: +// "write_buffer_size=1024;max_write_buffer_number=2" +// Nested options config is also possible. For example, you can define +// BlockBasedTableOptions as part of the string for block-based table factory: +// "write_buffer_size=1024;block_based_table_factory={block_size=4k};" +// "max_write_buffer_num=2" +Status GetColumnFamilyOptionsFromString( + const ColumnFamilyOptions& base_options, + const std::string& opts_str, + ColumnFamilyOptions* new_options); + +Status GetDBOptionsFromString( + const DBOptions& base_options, + const std::string& opts_str, + DBOptions* new_options); + +Status GetStringFromDBOptions(std::string* opts_str, + const DBOptions& db_options, + const std::string& delimiter = "; "); + +Status GetStringFromColumnFamilyOptions(std::string* opts_str, + const ColumnFamilyOptions& db_options, + const std::string& delimiter = "; "); + +Status GetStringFromCompressionType(std::string* compression_str, + CompressionType compression_type); + +Status GetBlockBasedTableOptionsFromString( + const BlockBasedTableOptions& table_options, + const std::string& opts_str, + BlockBasedTableOptions* new_table_options); + +Status GetPlainTableOptionsFromString( + const PlainTableOptions& table_options, + const std::string& opts_str, + PlainTableOptions* new_table_options); + +Status GetMemTableRepFactoryFromString( + const std::string& opts_str, + std::unique_ptr* new_mem_factory); + +Status GetOptionsFromString(const Options& base_options, + const std::string& opts_str, Options* new_options); + +// Request stopping background work, if wait is true wait until it's done +void CancelAllBackgroundWork(DB* db, bool wait = false); + +// Delete files which are entirely in the given range +// Could leave some keys in the range which are in files which are not +// entirely in the range. +// Snapshots before the delete might not see the data in the given range. +Status DeleteFilesInRange(DB* db, ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end); +#endif // ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/db.h b/external/rocksdb/include/rocksdb/db.h new file mode 100755 index 0000000000..3a1d6b33cc --- /dev/null +++ b/external/rocksdb/include/rocksdb/db.h @@ -0,0 +1,936 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef STORAGE_ROCKSDB_INCLUDE_DB_H_ +#define STORAGE_ROCKSDB_INCLUDE_DB_H_ + +#include +#include +#include +#include +#include +#include +#include "rocksdb/immutable_options.h" +#include "rocksdb/iterator.h" +#include "rocksdb/listener.h" +#include "rocksdb/metadata.h" +#include "rocksdb/options.h" +#include "rocksdb/snapshot.h" +#include "rocksdb/sst_file_writer.h" +#include "rocksdb/thread_status.h" +#include "rocksdb/transaction_log.h" +#include "rocksdb/types.h" +#include "rocksdb/version.h" + +#ifdef _WIN32 +// Windows API macro interference +#undef DeleteFile +#endif + + +namespace rocksdb { + +struct Options; +struct DBOptions; +struct ColumnFamilyOptions; +struct ReadOptions; +struct WriteOptions; +struct FlushOptions; +struct CompactionOptions; +struct CompactRangeOptions; +struct TableProperties; +struct ExternalSstFileInfo; +class WriteBatch; +class Env; +class EventListener; + +using std::unique_ptr; + +extern const std::string kDefaultColumnFamilyName; +struct ColumnFamilyDescriptor { + std::string name; + ColumnFamilyOptions options; + ColumnFamilyDescriptor() + : name(kDefaultColumnFamilyName), options(ColumnFamilyOptions()) {} + ColumnFamilyDescriptor(const std::string& _name, + const ColumnFamilyOptions& _options) + : name(_name), options(_options) {} +}; + +class ColumnFamilyHandle { + public: + virtual ~ColumnFamilyHandle() {} + // Returns the name of the column family associated with the current handle. + virtual const std::string& GetName() const = 0; + // Returns the ID of the column family associated with the current handle. + virtual uint32_t GetID() const = 0; + // Fills "*desc" with the up-to-date descriptor of the column family + // associated with this handle. Since it fills "*desc" with the up-to-date + // information, this call might internally lock and release DB mutex to + // access the up-to-date CF options. In addition, all the pointer-typed + // options cannot be referenced any longer than the original options exist. + // + // Note that this function is not supported in RocksDBLite. + virtual Status GetDescriptor(ColumnFamilyDescriptor* desc) = 0; +}; + +static const int kMajorVersion = __ROCKSDB_MAJOR__; +static const int kMinorVersion = __ROCKSDB_MINOR__; + +// A range of keys +struct Range { + Slice start; // Included in the range + Slice limit; // Not included in the range + + Range() { } + Range(const Slice& s, const Slice& l) : start(s), limit(l) { } +}; + +// A collections of table properties objects, where +// key: is the table's file name. +// value: the table properties object of the given table. +typedef std::unordered_map> + TablePropertiesCollection; + +// A DB is a persistent ordered map from keys to values. +// A DB is safe for concurrent access from multiple threads without +// any external synchronization. +class DB { + public: + // Open the database with the specified "name". + // Stores a pointer to a heap-allocated database in *dbptr and returns + // OK on success. + // Stores nullptr in *dbptr and returns a non-OK status on error. + // Caller should delete *dbptr when it is no longer needed. + static Status Open(const Options& options, + const std::string& name, + DB** dbptr); + + // Open the database for read only. All DB interfaces + // that modify data, like put/delete, will return error. + // If the db is opened in read only mode, then no compactions + // will happen. + // + // Not supported in ROCKSDB_LITE, in which case the function will + // return Status::NotSupported. + static Status OpenForReadOnly(const Options& options, + const std::string& name, DB** dbptr, + bool error_if_log_file_exist = false); + + // Open the database for read only with column families. When opening DB with + // read only, you can specify only a subset of column families in the + // database that should be opened. However, you always need to specify default + // column family. The default column family name is 'default' and it's stored + // in rocksdb::kDefaultColumnFamilyName + // + // Not supported in ROCKSDB_LITE, in which case the function will + // return Status::NotSupported. + static Status OpenForReadOnly( + const DBOptions& db_options, const std::string& name, + const std::vector& column_families, + std::vector* handles, DB** dbptr, + bool error_if_log_file_exist = false); + + // Open DB with column families. + // db_options specify database specific options + // column_families is the vector of all column families in the database, + // containing column family name and options. You need to open ALL column + // families in the database. To get the list of column families, you can use + // ListColumnFamilies(). Also, you can open only a subset of column families + // for read-only access. + // The default column family name is 'default' and it's stored + // in rocksdb::kDefaultColumnFamilyName. + // If everything is OK, handles will on return be the same size + // as column_families --- handles[i] will be a handle that you + // will use to operate on column family column_family[i]. + // Before delete DB, you have to close All column families by calling + // DestroyColumnFamilyHandle() with all the handles. + static Status Open(const DBOptions& db_options, const std::string& name, + const std::vector& column_families, + std::vector* handles, DB** dbptr); + + // ListColumnFamilies will open the DB specified by argument name + // and return the list of all column families in that DB + // through column_families argument. The ordering of + // column families in column_families is unspecified. + static Status ListColumnFamilies(const DBOptions& db_options, + const std::string& name, + std::vector* column_families); + + DB() { } + virtual ~DB(); + + // Create a column_family and return the handle of column family + // through the argument handle. + virtual Status CreateColumnFamily(const ColumnFamilyOptions& options, + const std::string& column_family_name, + ColumnFamilyHandle** handle); + + // Drop a column family specified by column_family handle. This call + // only records a drop record in the manifest and prevents the column + // family from flushing and compacting. + virtual Status DropColumnFamily(ColumnFamilyHandle* column_family); + // Close a column family specified by column_family handle and destroy + // the column family handle specified to avoid double deletion. This call + // deletes the column family handle by default. Use this method to + // close column family instead of deleting column family handle directly + virtual Status DestroyColumnFamilyHandle(ColumnFamilyHandle* column_family); + + // Set the database entry for "key" to "value". + // If "key" already exists, it will be overwritten. + // Returns OK on success, and a non-OK status on error. + // Note: consider setting options.sync = true. + virtual Status Put(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) = 0; + virtual Status Put(const WriteOptions& options, const Slice& key, + const Slice& value) { + return Put(options, DefaultColumnFamily(), key, value); + } + + // Remove the database entry (if any) for "key". Returns OK on + // success, and a non-OK status on error. It is not an error if "key" + // did not exist in the database. + // Note: consider setting options.sync = true. + virtual Status Delete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) = 0; + virtual Status Delete(const WriteOptions& options, const Slice& key) { + return Delete(options, DefaultColumnFamily(), key); + } + + // Remove the database entry for "key". Requires that the key exists + // and was not overwritten. Returns OK on success, and a non-OK status + // on error. It is not an error if "key" did not exist in the database. + // + // If a key is overwritten (by calling Put() multiple times), then the result + // of calling SingleDelete() on this key is undefined. SingleDelete() only + // behaves correctly if there has been only one Put() for this key since the + // previous call to SingleDelete() for this key. + // + // This feature is currently an experimental performance optimization + // for a very specific workload. It is up to the caller to ensure that + // SingleDelete is only used for a key that is not deleted using Delete() or + // written using Merge(). Mixing SingleDelete operations with Deletes and + // Merges can result in undefined behavior. + // + // Note: consider setting options.sync = true. + virtual Status SingleDelete(const WriteOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key) = 0; + virtual Status SingleDelete(const WriteOptions& options, const Slice& key) { + return SingleDelete(options, DefaultColumnFamily(), key); + } + + // Merge the database entry for "key" with "value". Returns OK on success, + // and a non-OK status on error. The semantics of this operation is + // determined by the user provided merge_operator when opening DB. + // Note: consider setting options.sync = true. + virtual Status Merge(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) = 0; + virtual Status Merge(const WriteOptions& options, const Slice& key, + const Slice& value) { + return Merge(options, DefaultColumnFamily(), key, value); + } + + // Apply the specified updates to the database. + // If `updates` contains no update, WAL will still be synced if + // options.sync=true. + // Returns OK on success, non-OK on failure. + // Note: consider setting options.sync = true. + virtual Status Write(const WriteOptions& options, WriteBatch* updates) = 0; + + // If the database contains an entry for "key" store the + // corresponding value in *value and return OK. + // + // If there is no entry for "key" leave *value unchanged and return + // a status for which Status::IsNotFound() returns true. + // + // May return some other Status on an error. + virtual Status Get(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value) = 0; + virtual Status Get(const ReadOptions& options, const Slice& key, std::string* value) { + return Get(options, DefaultColumnFamily(), key, value); + } + + // If keys[i] does not exist in the database, then the i'th returned + // status will be one for which Status::IsNotFound() is true, and + // (*values)[i] will be set to some arbitrary value (often ""). Otherwise, + // the i'th returned status will have Status::ok() true, and (*values)[i] + // will store the value associated with keys[i]. + // + // (*values) will always be resized to be the same size as (keys). + // Similarly, the number of returned statuses will be the number of keys. + // Note: keys will not be "de-duplicated". Duplicate keys will return + // duplicate values in order. + virtual std::vector MultiGet( + const ReadOptions& options, + const std::vector& column_family, + const std::vector& keys, std::vector* values) = 0; + virtual std::vector MultiGet(const ReadOptions& options, + const std::vector& keys, + std::vector* values) { + return MultiGet(options, std::vector( + keys.size(), DefaultColumnFamily()), + keys, values); + } + + // If the key definitely does not exist in the database, then this method + // returns false, else true. If the caller wants to obtain value when the key + // is found in memory, a bool for 'value_found' must be passed. 'value_found' + // will be true on return if value has been set properly. + // This check is potentially lighter-weight than invoking DB::Get(). One way + // to make this lighter weight is to avoid doing any IOs. + // Default implementation here returns true and sets 'value_found' to false + virtual bool KeyMayExist(const ReadOptions& /*options*/, + ColumnFamilyHandle* /*column_family*/, + const Slice& /*key*/, std::string* /*value*/, + bool* value_found = nullptr) { + if (value_found != nullptr) { + *value_found = false; + } + return true; + } + virtual bool KeyMayExist(const ReadOptions& options, const Slice& key, + std::string* value, bool* value_found = nullptr) { + return KeyMayExist(options, DefaultColumnFamily(), key, value, value_found); + } + + // Return a heap-allocated iterator over the contents of the database. + // The result of NewIterator() is initially invalid (caller must + // call one of the Seek methods on the iterator before using it). + // + // Caller should delete the iterator when it is no longer needed. + // The returned iterator should be deleted before this db is deleted. + virtual Iterator* NewIterator(const ReadOptions& options, + ColumnFamilyHandle* column_family) = 0; + virtual Iterator* NewIterator(const ReadOptions& options) { + return NewIterator(options, DefaultColumnFamily()); + } + // Returns iterators from a consistent database state across multiple + // column families. Iterators are heap allocated and need to be deleted + // before the db is deleted + virtual Status NewIterators( + const ReadOptions& options, + const std::vector& column_families, + std::vector* iterators) = 0; + + // Return a handle to the current DB state. Iterators created with + // this handle will all observe a stable snapshot of the current DB + // state. The caller must call ReleaseSnapshot(result) when the + // snapshot is no longer needed. + // + // nullptr will be returned if the DB fails to take a snapshot or does + // not support snapshot. + virtual const Snapshot* GetSnapshot() = 0; + + // Release a previously acquired snapshot. The caller must not + // use "snapshot" after this call. + virtual void ReleaseSnapshot(const Snapshot* snapshot) = 0; + +#ifndef ROCKSDB_LITE + // Contains all valid property arguments for GetProperty(). + // + // NOTE: Property names cannot end in numbers since those are interpreted as + // arguments, e.g., see kNumFilesAtLevelPrefix. + struct Properties { + // "rocksdb.num-files-at-level" - returns string containing the number + // of files at level , where is an ASCII representation of a + // level number (e.g., "0"). + static const std::string kNumFilesAtLevelPrefix; + + // "rocksdb.compression-ratio-at-level" - returns string containing the + // compression ratio of data at level , where is an ASCII + // representation of a level number (e.g., "0"). Here, compression + // ratio is defined as uncompressed data size / compressed file size. + // Returns "-1.0" if no open files at level . + static const std::string kCompressionRatioAtLevelPrefix; + + // "rocksdb.stats" - returns a multi-line string containing the data + // described by kCFStats followed by the data described by kDBStats. + static const std::string kStats; + + // "rocksdb.sstables" - returns a multi-line string summarizing current + // SST files. + static const std::string kSSTables; + + // "rocksdb.cfstats" - returns a multi-line string with general column + // family stats per-level over db's lifetime ("L"), aggregated over + // db's lifetime ("Sum"), and aggregated over the interval since the + // last retrieval ("Int"). + static const std::string kCFStats; + + // "rocksdb.dbstats" - returns a multi-line string with general database + // stats, both cumulative (over the db's lifetime) and interval (since + // the last retrieval of kDBStats). + static const std::string kDBStats; + + // "rocksdb.levelstats" - returns multi-line string containing the number + // of files per level and total size of each level (MB). + static const std::string kLevelStats; + + // "rocksdb.num-immutable-mem-table" - returns number of immutable + // memtables that have not yet been flushed. + static const std::string kNumImmutableMemTable; + + // "rocksdb.num-immutable-mem-table-flushed" - returns number of immutable + // memtables that have already been flushed. + static const std::string kNumImmutableMemTableFlushed; + + // "rocksdb.mem-table-flush-pending" - returns 1 if a memtable flush is + // pending; otherwise, returns 0. + static const std::string kMemTableFlushPending; + + // "rocksdb.num-running-flushes" - returns the number of currently running + // flushes. + static const std::string kNumRunningFlushes; + + // "rocksdb.compaction-pending" - returns 1 if at least one compaction is + // pending; otherwise, returns 0. + static const std::string kCompactionPending; + + // "rocksdb.num-running-compactions" - returns the number of currently + // running compactions. + static const std::string kNumRunningCompactions; + + // "rocksdb.background-errors" - returns accumulated number of background + // errors. + static const std::string kBackgroundErrors; + + // "rocksdb.cur-size-active-mem-table" - returns approximate size of active + // memtable (bytes). + static const std::string kCurSizeActiveMemTable; + + // "rocksdb.cur-size-all-mem-tables" - returns approximate size of active + // and unflushed immutable memtables (bytes). + static const std::string kCurSizeAllMemTables; + + // "rocksdb.size-all-mem-tables" - returns approximate size of active, + // unflushed immutable, and pinned immutable memtables (bytes). + static const std::string kSizeAllMemTables; + + // "rocksdb.num-entries-active-mem-table" - returns total number of entries + // in the active memtable. + static const std::string kNumEntriesActiveMemTable; + + // "rocksdb.num-entries-imm-mem-tables" - returns total number of entries + // in the unflushed immutable memtables. + static const std::string kNumEntriesImmMemTables; + + // "rocksdb.num-deletes-active-mem-table" - returns total number of delete + // entries in the active memtable. + static const std::string kNumDeletesActiveMemTable; + + // "rocksdb.num-deletes-imm-mem-tables" - returns total number of delete + // entries in the unflushed immutable memtables. + static const std::string kNumDeletesImmMemTables; + + // "rocksdb.estimate-num-keys" - returns estimated number of total keys in + // the active and unflushed immutable memtables. + static const std::string kEstimateNumKeys; + + // "rocksdb.estimate-table-readers-mem" - returns estimated memory used for + // reading SST tables, excluding memory used in block cache (e.g., + // filter and index blocks). + static const std::string kEstimateTableReadersMem; + + // "rocksdb.is-file-deletions-enabled" - returns 0 if deletion of obsolete + // files is enabled; otherwise, returns a non-zero number. + static const std::string kIsFileDeletionsEnabled; + + // "rocksdb.num-snapshots" - returns number of unreleased snapshots of the + // database. + static const std::string kNumSnapshots; + + // "rocksdb.oldest-snapshot-time" - returns number representing unix + // timestamp of oldest unreleased snapshot. + static const std::string kOldestSnapshotTime; + + // "rocksdb.num-live-versions" - returns number of live versions. `Version` + // is an internal data structure. See version_set.h for details. More + // live versions often mean more SST files are held from being deleted, + // by iterators or unfinished compactions. + static const std::string kNumLiveVersions; + + // "rocksdb.current-super-version-number" - returns number of curent LSM + // version. It is a uint64_t integer number, incremented after there is + // any change to the LSM tree. The number is not preserved after restarting + // the DB. After DB restart, it will start from 0 again. + static const std::string kCurrentSuperVersionNumber; + + // "rocksdb.estimate-live-data-size" - returns an estimate of the amount of + // live data in bytes. + static const std::string kEstimateLiveDataSize; + + // "rocksdb.total-sst-files-size" - returns total size (bytes) of all SST + // files. + // WARNING: may slow down online queries if there are too many files. + static const std::string kTotalSstFilesSize; + + // "rocksdb.base-level" - returns number of level to which L0 data will be + // compacted. + static const std::string kBaseLevel; + + // "rocksdb.estimate-pending-compaction-bytes" - returns estimated total + // number of bytes compaction needs to rewrite to get all levels down + // to under target size. Not valid for other compactions than level- + // based. + static const std::string kEstimatePendingCompactionBytes; + + // "rocksdb.aggregated-table-properties" - returns a string representation + // of the aggregated table properties of the target column family. + static const std::string kAggregatedTableProperties; + + // "rocksdb.aggregated-table-properties-at-level", same as the previous + // one but only returns the aggregated table properties of the + // specified level "N" at the target column family. + static const std::string kAggregatedTablePropertiesAtLevel; + }; +#endif /* ROCKSDB_LITE */ + + // DB implementations can export properties about their state via this method. + // If "property" is a valid property understood by this DB implementation (see + // Properties struct above for valid options), fills "*value" with its current + // value and returns true. Otherwise, returns false. + virtual bool GetProperty(ColumnFamilyHandle* column_family, + const Slice& property, std::string* value) = 0; + virtual bool GetProperty(const Slice& property, std::string* value) { + return GetProperty(DefaultColumnFamily(), property, value); + } + + // Similar to GetProperty(), but only works for a subset of properties whose + // return value is an integer. Return the value by integer. Supported + // properties: + // "rocksdb.num-immutable-mem-table" + // "rocksdb.mem-table-flush-pending" + // "rocksdb.compaction-pending" + // "rocksdb.background-errors" + // "rocksdb.cur-size-active-mem-table" + // "rocksdb.cur-size-all-mem-tables" + // "rocksdb.size-all-mem-tables" + // "rocksdb.num-entries-active-mem-table" + // "rocksdb.num-entries-imm-mem-tables" + // "rocksdb.num-deletes-active-mem-table" + // "rocksdb.num-deletes-imm-mem-tables" + // "rocksdb.estimate-num-keys" + // "rocksdb.estimate-table-readers-mem" + // "rocksdb.is-file-deletions-enabled" + // "rocksdb.num-snapshots" + // "rocksdb.oldest-snapshot-time" + // "rocksdb.num-live-versions" + // "rocksdb.current-super-version-number" + // "rocksdb.estimate-live-data-size" + // "rocksdb.total-sst-files-size" + // "rocksdb.base-level" + // "rocksdb.estimate-pending-compaction-bytes" + // "rocksdb.num-running-compactions" + // "rocksdb.num-running-flushes" + virtual bool GetIntProperty(ColumnFamilyHandle* column_family, + const Slice& property, uint64_t* value) = 0; + virtual bool GetIntProperty(const Slice& property, uint64_t* value) { + return GetIntProperty(DefaultColumnFamily(), property, value); + } + + // Same as GetIntProperty(), but this one returns the aggregated int + // property from all column families. + virtual bool GetAggregatedIntProperty(const Slice& property, + uint64_t* value) = 0; + + // For each i in [0,n-1], store in "sizes[i]", the approximate + // file system space used by keys in "[range[i].start .. range[i].limit)". + // + // Note that the returned sizes measure file system space usage, so + // if the user data compresses by a factor of ten, the returned + // sizes will be one-tenth the size of the corresponding user data size. + // + // If include_memtable is set to true, then the result will also + // include those recently written data in the mem-tables if + // the mem-table type supports it. + virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, + const Range* range, int n, uint64_t* sizes, + bool include_memtable = false) = 0; + virtual void GetApproximateSizes(const Range* range, int n, uint64_t* sizes, + bool include_memtable = false) { + GetApproximateSizes(DefaultColumnFamily(), range, n, sizes, + include_memtable); + } + + // Compact the underlying storage for the key range [*begin,*end]. + // The actual compaction interval might be superset of [*begin, *end]. + // In particular, deleted and overwritten versions are discarded, + // and the data is rearranged to reduce the cost of operations + // needed to access the data. This operation should typically only + // be invoked by users who understand the underlying implementation. + // + // begin==nullptr is treated as a key before all keys in the database. + // end==nullptr is treated as a key after all keys in the database. + // Therefore the following call will compact the entire database: + // db->CompactRange(options, nullptr, nullptr); + // Note that after the entire database is compacted, all data are pushed + // down to the last level containing any data. If the total data size after + // compaction is reduced, that level might not be appropriate for hosting all + // the files. In this case, client could set options.change_level to true, to + // move the files back to the minimum level capable of holding the data set + // or a given level (specified by non-negative options.target_level). + virtual Status CompactRange(const CompactRangeOptions& options, + ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) = 0; + virtual Status CompactRange(const CompactRangeOptions& options, + const Slice* begin, const Slice* end) { + return CompactRange(options, DefaultColumnFamily(), begin, end); + } + +#if defined(__GNUC__) || defined(__clang__) + __attribute__((__deprecated__)) +#elif _WIN32 + __declspec(deprecated) +#endif + virtual Status + CompactRange(ColumnFamilyHandle* column_family, const Slice* begin, + const Slice* end, bool change_level = false, + int target_level = -1, uint32_t target_path_id = 0) { + CompactRangeOptions options; + options.change_level = change_level; + options.target_level = target_level; + options.target_path_id = target_path_id; + return CompactRange(options, column_family, begin, end); + } +#if defined(__GNUC__) || defined(__clang__) + __attribute__((__deprecated__)) +#elif _WIN32 + __declspec(deprecated) +#endif + virtual Status + CompactRange(const Slice* begin, const Slice* end, bool change_level = false, + int target_level = -1, uint32_t target_path_id = 0) { + CompactRangeOptions options; + options.change_level = change_level; + options.target_level = target_level; + options.target_path_id = target_path_id; + return CompactRange(options, DefaultColumnFamily(), begin, end); + } + + virtual Status SetOptions( + ColumnFamilyHandle* /*column_family*/, + const std::unordered_map& /*new_options*/) { + return Status::NotSupported("Not implemented"); + } + virtual Status SetOptions( + const std::unordered_map& new_options) { + return SetOptions(DefaultColumnFamily(), new_options); + } + + // CompactFiles() inputs a list of files specified by file numbers and + // compacts them to the specified level. Note that the behavior is different + // from CompactRange() in that CompactFiles() performs the compaction job + // using the CURRENT thread. + // + // @see GetDataBaseMetaData + // @see GetColumnFamilyMetaData + virtual Status CompactFiles( + const CompactionOptions& compact_options, + ColumnFamilyHandle* column_family, + const std::vector& input_file_names, + const int output_level, const int output_path_id = -1) = 0; + + virtual Status CompactFiles( + const CompactionOptions& compact_options, + const std::vector& input_file_names, + const int output_level, const int output_path_id = -1) { + return CompactFiles(compact_options, DefaultColumnFamily(), + input_file_names, output_level, output_path_id); + } + + // This function will wait until all currently running background processes + // finish. After it returns, no background process will be run until + // UnblockBackgroundWork is called + virtual Status PauseBackgroundWork() = 0; + virtual Status ContinueBackgroundWork() = 0; + + // This function will enable automatic compactions for the given column + // families if they were previously disabled. The function will first set the + // disable_auto_compactions option for each column family to 'false', after + // which it will schedule a flush/compaction. + // + // NOTE: Setting disable_auto_compactions to 'false' through SetOptions() API + // does NOT schedule a flush/compaction afterwards, and only changes the + // parameter itself within the column family option. + // + virtual Status EnableAutoCompaction( + const std::vector& column_family_handles) = 0; + + // Number of levels used for this DB. + virtual int NumberLevels(ColumnFamilyHandle* column_family) = 0; + virtual int NumberLevels() { return NumberLevels(DefaultColumnFamily()); } + + // Maximum level to which a new compacted memtable is pushed if it + // does not create overlap. + virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) = 0; + virtual int MaxMemCompactionLevel() { + return MaxMemCompactionLevel(DefaultColumnFamily()); + } + + // Number of files in level-0 that would stop writes. + virtual int Level0StopWriteTrigger(ColumnFamilyHandle* column_family) = 0; + virtual int Level0StopWriteTrigger() { + return Level0StopWriteTrigger(DefaultColumnFamily()); + } + + // Get DB name -- the exact same name that was provided as an argument to + // DB::Open() + virtual const std::string& GetName() const = 0; + + // Get Env object from the DB + virtual Env* GetEnv() const = 0; + + // Get DB Options that we use. During the process of opening the + // column family, the options provided when calling DB::Open() or + // DB::CreateColumnFamily() will have been "sanitized" and transformed + // in an implementation-defined manner. + virtual const Options& GetOptions(ColumnFamilyHandle* column_family) + const = 0; + virtual const Options& GetOptions() const { + return GetOptions(DefaultColumnFamily()); + } + + virtual const DBOptions& GetDBOptions() const = 0; + + // Flush all mem-table data. + virtual Status Flush(const FlushOptions& options, + ColumnFamilyHandle* column_family) = 0; + virtual Status Flush(const FlushOptions& options) { + return Flush(options, DefaultColumnFamily()); + } + + // Sync the wal. Note that Write() followed by SyncWAL() is not exactly the + // same as Write() with sync=true: in the latter case the changes won't be + // visible until the sync is done. + // Currently only works if allow_mmap_writes = false in Options. + virtual Status SyncWAL() = 0; + + // The sequence number of the most recent transaction. + virtual SequenceNumber GetLatestSequenceNumber() const = 0; + +#ifndef ROCKSDB_LITE + + // Prevent file deletions. Compactions will continue to occur, + // but no obsolete files will be deleted. Calling this multiple + // times have the same effect as calling it once. + virtual Status DisableFileDeletions() = 0; + + // Allow compactions to delete obsolete files. + // If force == true, the call to EnableFileDeletions() will guarantee that + // file deletions are enabled after the call, even if DisableFileDeletions() + // was called multiple times before. + // If force == false, EnableFileDeletions will only enable file deletion + // after it's been called at least as many times as DisableFileDeletions(), + // enabling the two methods to be called by two threads concurrently without + // synchronization -- i.e., file deletions will be enabled only after both + // threads call EnableFileDeletions() + virtual Status EnableFileDeletions(bool force = true) = 0; + + // GetLiveFiles followed by GetSortedWalFiles can generate a lossless backup + + // Retrieve the list of all files in the database. The files are + // relative to the dbname and are not absolute paths. The valid size of the + // manifest file is returned in manifest_file_size. The manifest file is an + // ever growing file, but only the portion specified by manifest_file_size is + // valid for this snapshot. + // Setting flush_memtable to true does Flush before recording the live files. + // Setting flush_memtable to false is useful when we don't want to wait for + // flush which may have to wait for compaction to complete taking an + // indeterminate time. + // + // In case you have multiple column families, even if flush_memtable is true, + // you still need to call GetSortedWalFiles after GetLiveFiles to compensate + // for new data that arrived to already-flushed column families while other + // column families were flushing + virtual Status GetLiveFiles(std::vector&, + uint64_t* manifest_file_size, + bool flush_memtable = true) = 0; + + // Retrieve the sorted list of all wal files with earliest file first + virtual Status GetSortedWalFiles(VectorLogPtr& files) = 0; + + // Sets iter to an iterator that is positioned at a write-batch containing + // seq_number. If the sequence number is non existent, it returns an iterator + // at the first available seq_no after the requested seq_no + // Returns Status::OK if iterator is valid + // Must set WAL_ttl_seconds or WAL_size_limit_MB to large values to + // use this api, else the WAL files will get + // cleared aggressively and the iterator might keep getting invalid before + // an update is read. + virtual Status GetUpdatesSince( + SequenceNumber seq_number, unique_ptr* iter, + const TransactionLogIterator::ReadOptions& + read_options = TransactionLogIterator::ReadOptions()) = 0; + +// Windows API macro interference +#undef DeleteFile + // Delete the file name from the db directory and update the internal state to + // reflect that. Supports deletion of sst and log files only. 'name' must be + // path relative to the db directory. eg. 000001.sst, /archive/000003.log + virtual Status DeleteFile(std::string name) = 0; + + // Returns a list of all table files with their level, start key + // and end key + virtual void GetLiveFilesMetaData( + std::vector* /*metadata*/) {} + + // Obtains the meta data of the specified column family of the DB. + // Status::NotFound() will be returned if the current DB does not have + // any column family match the specified name. + // + // If cf_name is not specified, then the metadata of the default + // column family will be returned. + virtual void GetColumnFamilyMetaData(ColumnFamilyHandle* /*column_family*/, + ColumnFamilyMetaData* /*metadata*/) {} + + // Get the metadata of the default column family. + void GetColumnFamilyMetaData( + ColumnFamilyMetaData* metadata) { + GetColumnFamilyMetaData(DefaultColumnFamily(), metadata); + } + + // Batch load table files whose paths stored in "file_path_list" into + // "column_family", a vector of ExternalSstFileInfo can be used + // instead of "file_path_list" to do a blind batch add that wont + // need to read the file, move_file can be set to true to + // move the files instead of copying them. + // + // Current Requirements: + // (1) The key ranges of the files don't overlap with each other + // (1) The key range of any file in list doesn't overlap with + // existing keys or tombstones in DB. + // (2) No snapshots are held. + // + // Notes: We will try to ingest the files to the lowest possible level + // even if the file compression dont match the level compression + virtual Status AddFile(ColumnFamilyHandle* column_family, + const std::vector& file_path_list, + bool move_file = false) = 0; + virtual Status AddFile(const std::vector& file_path_list, + bool move_file = false) { + return AddFile(DefaultColumnFamily(), file_path_list, move_file); + } +#if defined(__GNUC__) || defined(__clang__) + __attribute__((__deprecated__)) +#elif _WIN32 + __declspec(deprecated) +#endif + virtual Status + AddFile(ColumnFamilyHandle* column_family, const std::string& file_path, + bool move_file = false) { + return AddFile(column_family, std::vector(1, file_path), + move_file); + } +#if defined(__GNUC__) || defined(__clang__) + __attribute__((__deprecated__)) +#elif _WIN32 + __declspec(deprecated) +#endif + virtual Status + AddFile(const std::string& file_path, bool move_file = false) { + return AddFile(DefaultColumnFamily(), + std::vector(1, file_path), move_file); + } + + // Load table file with information "file_info" into "column_family" + virtual Status AddFile(ColumnFamilyHandle* column_family, + const std::vector& file_info_list, + bool move_file = false) = 0; + virtual Status AddFile(const std::vector& file_info_list, + bool move_file = false) { + return AddFile(DefaultColumnFamily(), file_info_list, move_file); + } +#if defined(__GNUC__) || defined(__clang__) + __attribute__((__deprecated__)) +#elif _WIN32 + __declspec(deprecated) +#endif + virtual Status + AddFile(ColumnFamilyHandle* column_family, + const ExternalSstFileInfo* file_info, bool move_file = false) { + return AddFile(column_family, + std::vector(1, *file_info), move_file); + } +#if defined(__GNUC__) || defined(__clang__) + __attribute__((__deprecated__)) +#elif _WIN32 + __declspec(deprecated) +#endif + virtual Status + AddFile(const ExternalSstFileInfo* file_info, bool move_file = false) { + return AddFile(DefaultColumnFamily(), + std::vector(1, *file_info), move_file); + } + +#endif // ROCKSDB_LITE + + // Sets the globally unique ID created at database creation time by invoking + // Env::GenerateUniqueId(), in identity. Returns Status::OK if identity could + // be set properly + virtual Status GetDbIdentity(std::string& identity) const = 0; + + // Returns default column family handle + virtual ColumnFamilyHandle* DefaultColumnFamily() const = 0; + +#ifndef ROCKSDB_LITE + virtual Status GetPropertiesOfAllTables(ColumnFamilyHandle* column_family, + TablePropertiesCollection* props) = 0; + virtual Status GetPropertiesOfAllTables(TablePropertiesCollection* props) { + return GetPropertiesOfAllTables(DefaultColumnFamily(), props); + } + virtual Status GetPropertiesOfTablesInRange( + ColumnFamilyHandle* column_family, const Range* range, std::size_t n, + TablePropertiesCollection* props) = 0; +#endif // ROCKSDB_LITE + + // Needed for StackableDB + virtual DB* GetRootDB() { return this; } + + private: + // No copying allowed + DB(const DB&); + void operator=(const DB&); +}; + +// Destroy the contents of the specified database. +// Be very careful using this method. +Status DestroyDB(const std::string& name, const Options& options); + +#ifndef ROCKSDB_LITE +// If a DB cannot be opened, you may attempt to call this method to +// resurrect as much of the contents of the database as possible. +// Some data may be lost, so be careful when calling this function +// on a database that contains important information. +// +// With this API, we will warn and skip data associated with column families not +// specified in column_families. +// +// @param column_families Descriptors for known column families +Status RepairDB(const std::string& dbname, const DBOptions& db_options, + const std::vector& column_families); + +// @param unknown_cf_opts Options for column families encountered during the +// repair that were not specified in column_families. +Status RepairDB(const std::string& dbname, const DBOptions& db_options, + const std::vector& column_families, + const ColumnFamilyOptions& unknown_cf_opts); + +// @param options These options will be used for the database and for ALL column +// families encountered during the repair +Status RepairDB(const std::string& dbname, const Options& options); + +#endif + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_DB_H_ diff --git a/external/rocksdb/include/rocksdb/db_bench_tool.h b/external/rocksdb/include/rocksdb/db_bench_tool.h new file mode 100755 index 0000000000..0e33ae96e2 --- /dev/null +++ b/external/rocksdb/include/rocksdb/db_bench_tool.h @@ -0,0 +1,9 @@ +// Copyright (c) 2013-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +namespace rocksdb { +int db_bench_tool(int argc, char** argv); +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/db_dump_tool.h b/external/rocksdb/include/rocksdb/db_dump_tool.h new file mode 100755 index 0000000000..1acc631762 --- /dev/null +++ b/external/rocksdb/include/rocksdb/db_dump_tool.h @@ -0,0 +1,45 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include + +#include "rocksdb/db.h" + +namespace rocksdb { + +struct DumpOptions { + // Database that will be dumped + std::string db_path; + // File location that will contain dump output + std::string dump_location; + // Dont include db information header in the dump + bool anonymous = false; +}; + +class DbDumpTool { + public: + bool Run(const DumpOptions& dump_options, + rocksdb::Options options = rocksdb::Options()); +}; + +struct UndumpOptions { + // Database that we will load the dumped file into + std::string db_path; + // File location of the dumped file that will be loaded + std::string dump_location; + // Compact the db after loading the dumped file + bool compact_db = false; +}; + +class DbUndumpTool { + public: + bool Run(const UndumpOptions& undump_options, + rocksdb::Options options = rocksdb::Options()); +}; +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/env.h b/external/rocksdb/include/rocksdb/env.h new file mode 100755 index 0000000000..2d33da27cb --- /dev/null +++ b/external/rocksdb/include/rocksdb/env.h @@ -0,0 +1,990 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// An Env is an interface used by the rocksdb implementation to access +// operating system functionality like the filesystem etc. Callers +// may wish to provide a custom Env object when opening a database to +// get fine gain control; e.g., to rate limit file system operations. +// +// All Env implementations are safe for concurrent access from +// multiple threads without any external synchronization. + +#ifndef STORAGE_ROCKSDB_INCLUDE_ENV_H_ +#define STORAGE_ROCKSDB_INCLUDE_ENV_H_ + +#include +#include +#include +#include +#include +#include +#include "rocksdb/status.h" +#include "rocksdb/thread_status.h" + +#ifdef _WIN32 +// Windows API macro interference +#undef DeleteFile +#undef GetCurrentTime +#endif + +namespace rocksdb { + +class FileLock; +class Logger; +class RandomAccessFile; +class SequentialFile; +class Slice; +class WritableFile; +class Directory; +struct DBOptions; +class RateLimiter; +class ThreadStatusUpdater; +struct ThreadStatus; + +using std::unique_ptr; +using std::shared_ptr; + + +// Options while opening a file to read/write +struct EnvOptions { + + // construct with default Options + EnvOptions(); + + // construct from Options + explicit EnvOptions(const DBOptions& options); + + // If true, then allow caching of data in environment buffers + bool use_os_buffer = true; + + // If true, then use mmap to read data + bool use_mmap_reads = false; + + // If true, then use mmap to write data + bool use_mmap_writes = true; + + // If true, then use O_DIRECT for reading data + bool use_direct_reads = false; + + // If true, then use O_DIRECT for writing data + bool use_direct_writes = false; + + // If false, fallocate() calls are bypassed + bool allow_fallocate = true; + + // If true, set the FD_CLOEXEC on open fd. + bool set_fd_cloexec = true; + + // Allows OS to incrementally sync files to disk while they are being + // written, in the background. Issue one request for every bytes_per_sync + // written. 0 turns it off. + // Default: 0 + uint64_t bytes_per_sync = 0; + + // If true, we will preallocate the file with FALLOC_FL_KEEP_SIZE flag, which + // means that file size won't change as part of preallocation. + // If false, preallocation will also change the file size. This option will + // improve the performance in workloads where you sync the data on every + // write. By default, we set it to true for MANIFEST writes and false for + // WAL writes + bool fallocate_with_keep_size = true; + + // See DBOPtions doc + size_t compaction_readahead_size; + + // See DBOPtions doc + size_t random_access_max_buffer_size; + + // See DBOptions doc + size_t writable_file_max_buffer_size = 1024 * 1024; + + // If not nullptr, write rate limiting is enabled for flush and compaction + RateLimiter* rate_limiter = nullptr; +}; + +class Env { + public: + struct FileAttributes { + // File name + std::string name; + + // Size of file in bytes + uint64_t size_bytes; + }; + + Env() : thread_status_updater_(nullptr) {} + + virtual ~Env(); + + // Return a default environment suitable for the current operating + // system. Sophisticated users may wish to provide their own Env + // implementation instead of relying on this default environment. + // + // The result of Default() belongs to rocksdb and must never be deleted. + static Env* Default(); + + // Create a brand new sequentially-readable file with the specified name. + // On success, stores a pointer to the new file in *result and returns OK. + // On failure stores nullptr in *result and returns non-OK. If the file does + // not exist, returns a non-OK status. + // + // The returned file will only be accessed by one thread at a time. + virtual Status NewSequentialFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) + = 0; + + // Create a brand new random access read-only file with the + // specified name. On success, stores a pointer to the new file in + // *result and returns OK. On failure stores nullptr in *result and + // returns non-OK. If the file does not exist, returns a non-OK + // status. + // + // The returned file may be concurrently accessed by multiple threads. + virtual Status NewRandomAccessFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) + = 0; + + // Create an object that writes to a new file with the specified + // name. Deletes any existing file with the same name and creates a + // new file. On success, stores a pointer to the new file in + // *result and returns OK. On failure stores nullptr in *result and + // returns non-OK. + // + // The returned file will only be accessed by one thread at a time. + virtual Status NewWritableFile(const std::string& fname, + unique_ptr* result, + const EnvOptions& options) = 0; + + // Reuse an existing file by renaming it and opening it as writable. + virtual Status ReuseWritableFile(const std::string& fname, + const std::string& old_fname, + unique_ptr* result, + const EnvOptions& options); + + // Create an object that represents a directory. Will fail if directory + // doesn't exist. If the directory exists, it will open the directory + // and create a new Directory object. + // + // On success, stores a pointer to the new Directory in + // *result and returns OK. On failure stores nullptr in *result and + // returns non-OK. + virtual Status NewDirectory(const std::string& name, + unique_ptr* result) = 0; + + // Returns OK if the named file exists. + // NotFound if the named file does not exist, + // the calling process does not have permission to determine + // whether this file exists, or if the path is invalid. + // IOError if an IO Error was encountered + virtual Status FileExists(const std::string& fname) = 0; + + // Store in *result the names of the children of the specified directory. + // The names are relative to "dir". + // Original contents of *results are dropped. + virtual Status GetChildren(const std::string& dir, + std::vector* result) = 0; + + // Store in *result the attributes of the children of the specified directory. + // In case the implementation lists the directory prior to iterating the files + // and files are concurrently deleted, the deleted files will be omitted from + // result. + // The name attributes are relative to "dir". + // Original contents of *results are dropped. + virtual Status GetChildrenFileAttributes(const std::string& dir, + std::vector* result); + + // Delete the named file. + virtual Status DeleteFile(const std::string& fname) = 0; + + // Create the specified directory. Returns error if directory exists. + virtual Status CreateDir(const std::string& dirname) = 0; + + // Creates directory if missing. Return Ok if it exists, or successful in + // Creating. + virtual Status CreateDirIfMissing(const std::string& dirname) = 0; + + // Delete the specified directory. + virtual Status DeleteDir(const std::string& dirname) = 0; + + // Store the size of fname in *file_size. + virtual Status GetFileSize(const std::string& fname, uint64_t* file_size) = 0; + + // Store the last modification time of fname in *file_mtime. + virtual Status GetFileModificationTime(const std::string& fname, + uint64_t* file_mtime) = 0; + // Rename file src to target. + virtual Status RenameFile(const std::string& src, + const std::string& target) = 0; + + // Hard Link file src to target. + virtual Status LinkFile(const std::string& src, const std::string& target) { + return Status::NotSupported("LinkFile is not supported for this Env"); + } + + // Lock the specified file. Used to prevent concurrent access to + // the same db by multiple processes. On failure, stores nullptr in + // *lock and returns non-OK. + // + // On success, stores a pointer to the object that represents the + // acquired lock in *lock and returns OK. The caller should call + // UnlockFile(*lock) to release the lock. If the process exits, + // the lock will be automatically released. + // + // If somebody else already holds the lock, finishes immediately + // with a failure. I.e., this call does not wait for existing locks + // to go away. + // + // May create the named file if it does not already exist. + virtual Status LockFile(const std::string& fname, FileLock** lock) = 0; + + // Release the lock acquired by a previous successful call to LockFile. + // REQUIRES: lock was returned by a successful LockFile() call + // REQUIRES: lock has not already been unlocked. + virtual Status UnlockFile(FileLock* lock) = 0; + + // Priority for scheduling job in thread pool + enum Priority { LOW, HIGH, TOTAL }; + + // Priority for requesting bytes in rate limiter scheduler + enum IOPriority { + IO_LOW = 0, + IO_HIGH = 1, + IO_TOTAL = 2 + }; + + // Arrange to run "(*function)(arg)" once in a background thread, in + // the thread pool specified by pri. By default, jobs go to the 'LOW' + // priority thread pool. + + // "function" may run in an unspecified thread. Multiple functions + // added to the same Env may run concurrently in different threads. + // I.e., the caller may not assume that background work items are + // serialized. + // When the UnSchedule function is called, the unschedFunction + // registered at the time of Schedule is invoked with arg as a parameter. + virtual void Schedule(void (*function)(void* arg), void* arg, + Priority pri = LOW, void* tag = nullptr, + void (*unschedFunction)(void* arg) = 0) = 0; + + // Arrange to remove jobs for given arg from the queue_ if they are not + // already scheduled. Caller is expected to have exclusive lock on arg. + virtual int UnSchedule(void* arg, Priority pri) { return 0; } + + // Start a new thread, invoking "function(arg)" within the new thread. + // When "function(arg)" returns, the thread will be destroyed. + virtual void StartThread(void (*function)(void* arg), void* arg) = 0; + + // Wait for all threads started by StartThread to terminate. + virtual void WaitForJoin() {} + + // Get thread pool queue length for specific thrad pool. + virtual unsigned int GetThreadPoolQueueLen(Priority pri = LOW) const { + return 0; + } + + // *path is set to a temporary directory that can be used for testing. It may + // or many not have just been created. The directory may or may not differ + // between runs of the same process, but subsequent calls will return the + // same directory. + virtual Status GetTestDirectory(std::string* path) = 0; + + // Create and return a log file for storing informational messages. + virtual Status NewLogger(const std::string& fname, + shared_ptr* result) = 0; + + // Returns the number of micro-seconds since some fixed point in time. Only + // useful for computing deltas of time. + // However, it is often used as system time such as in GenericRateLimiter + // and other places so a port needs to return system time in order to work. + virtual uint64_t NowMicros() = 0; + + // Returns the number of nano-seconds since some fixed point in time. Only + // useful for computing deltas of time in one run. + // Default implementation simply relies on NowMicros + virtual uint64_t NowNanos() { + return NowMicros() * 1000; + } + + // Sleep/delay the thread for the perscribed number of micro-seconds. + virtual void SleepForMicroseconds(int micros) = 0; + + // Get the current host name. + virtual Status GetHostName(char* name, uint64_t len) = 0; + + // Get the number of seconds since the Epoch, 1970-01-01 00:00:00 (UTC). + virtual Status GetCurrentTime(int64_t* unix_time) = 0; + + // Get full directory name for this db. + virtual Status GetAbsolutePath(const std::string& db_path, + std::string* output_path) = 0; + + // The number of background worker threads of a specific thread pool + // for this environment. 'LOW' is the default pool. + // default number: 1 + virtual void SetBackgroundThreads(int number, Priority pri = LOW) = 0; + + // Enlarge number of background worker threads of a specific thread pool + // for this environment if it is smaller than specified. 'LOW' is the default + // pool. + virtual void IncBackgroundThreadsIfNeeded(int number, Priority pri) = 0; + + // Lower IO priority for threads from the specified pool. + virtual void LowerThreadPoolIOPriority(Priority pool = LOW) {} + + // Converts seconds-since-Jan-01-1970 to a printable string + virtual std::string TimeToString(uint64_t time) = 0; + + // Generates a unique id that can be used to identify a db + virtual std::string GenerateUniqueId(); + + // OptimizeForLogWrite will create a new EnvOptions object that is a copy of + // the EnvOptions in the parameters, but is optimized for writing log files. + // Default implementation returns the copy of the same object. + virtual EnvOptions OptimizeForLogWrite(const EnvOptions& env_options, + const DBOptions& db_options) const; + // OptimizeForManifestWrite will create a new EnvOptions object that is a copy + // of the EnvOptions in the parameters, but is optimized for writing manifest + // files. Default implementation returns the copy of the same object. + virtual EnvOptions OptimizeForManifestWrite(const EnvOptions& env_options) + const; + + // Returns the status of all threads that belong to the current Env. + virtual Status GetThreadList(std::vector* thread_list) { + return Status::NotSupported("Not supported."); + } + + // Returns the pointer to ThreadStatusUpdater. This function will be + // used in RocksDB internally to update thread status and supports + // GetThreadList(). + virtual ThreadStatusUpdater* GetThreadStatusUpdater() const { + return thread_status_updater_; + } + + // Returns the ID of the current thread. + virtual uint64_t GetThreadID() const; + + protected: + // The pointer to an internal structure that will update the + // status of each thread. + ThreadStatusUpdater* thread_status_updater_; + + private: + // No copying allowed + Env(const Env&); + void operator=(const Env&); +}; + +// The factory function to construct a ThreadStatusUpdater. Any Env +// that supports GetThreadList() feature should call this function in its +// constructor to initialize thread_status_updater_. +ThreadStatusUpdater* CreateThreadStatusUpdater(); + +// A file abstraction for reading sequentially through a file +class SequentialFile { + public: + SequentialFile() { } + virtual ~SequentialFile(); + + // Read up to "n" bytes from the file. "scratch[0..n-1]" may be + // written by this routine. Sets "*result" to the data that was + // read (including if fewer than "n" bytes were successfully read). + // May set "*result" to point at data in "scratch[0..n-1]", so + // "scratch[0..n-1]" must be live when "*result" is used. + // If an error was encountered, returns a non-OK status. + // + // REQUIRES: External synchronization + virtual Status Read(size_t n, Slice* result, char* scratch) = 0; + + // Skip "n" bytes from the file. This is guaranteed to be no + // slower that reading the same data, but may be faster. + // + // If end of file is reached, skipping will stop at the end of the + // file, and Skip will return OK. + // + // REQUIRES: External synchronization + virtual Status Skip(uint64_t n) = 0; + + // Remove any kind of caching of data from the offset to offset+length + // of this file. If the length is 0, then it refers to the end of file. + // If the system is not caching the file contents, then this is a noop. + virtual Status InvalidateCache(size_t offset, size_t length) { + return Status::NotSupported("InvalidateCache not supported."); + } +}; + +// A file abstraction for randomly reading the contents of a file. +class RandomAccessFile { + public: + RandomAccessFile() { } + virtual ~RandomAccessFile(); + + // Read up to "n" bytes from the file starting at "offset". + // "scratch[0..n-1]" may be written by this routine. Sets "*result" + // to the data that was read (including if fewer than "n" bytes were + // successfully read). May set "*result" to point at data in + // "scratch[0..n-1]", so "scratch[0..n-1]" must be live when + // "*result" is used. If an error was encountered, returns a non-OK + // status. + // + // Safe for concurrent use by multiple threads. + virtual Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const = 0; + + // Used by the file_reader_writer to decide if the ReadAhead wrapper + // should simply forward the call and do not enact buffering or locking. + virtual bool ShouldForwardRawRequest() const { + return false; + } + + // For cases when read-ahead is implemented in the platform dependent + // layer + virtual void EnableReadAhead() {} + + // Tries to get an unique ID for this file that will be the same each time + // the file is opened (and will stay the same while the file is open). + // Furthermore, it tries to make this ID at most "max_size" bytes. If such an + // ID can be created this function returns the length of the ID and places it + // in "id"; otherwise, this function returns 0, in which case "id" + // may not have been modified. + // + // This function guarantees, for IDs from a given environment, two unique ids + // cannot be made equal to eachother by adding arbitrary bytes to one of + // them. That is, no unique ID is the prefix of another. + // + // This function guarantees that the returned ID will not be interpretable as + // a single varint. + // + // Note: these IDs are only valid for the duration of the process. + virtual size_t GetUniqueId(char* id, size_t max_size) const { + return 0; // Default implementation to prevent issues with backwards + // compatibility. + }; + + enum AccessPattern { NORMAL, RANDOM, SEQUENTIAL, WILLNEED, DONTNEED }; + + virtual void Hint(AccessPattern pattern) {} + + // Remove any kind of caching of data from the offset to offset+length + // of this file. If the length is 0, then it refers to the end of file. + // If the system is not caching the file contents, then this is a noop. + virtual Status InvalidateCache(size_t offset, size_t length) { + return Status::NotSupported("InvalidateCache not supported."); + } +}; + +// A file abstraction for sequential writing. The implementation +// must provide buffering since callers may append small fragments +// at a time to the file. +class WritableFile { + public: + WritableFile() + : last_preallocated_block_(0), + preallocation_block_size_(0), + io_priority_(Env::IO_TOTAL) { + } + virtual ~WritableFile(); + + // Indicates if the class makes use of unbuffered I/O + virtual bool UseOSBuffer() const { + return true; + } + + const size_t c_DefaultPageSize = 4 * 1024; + + // This is needed when you want to allocate + // AlignedBuffer for use with file I/O classes + // Used for unbuffered file I/O when UseOSBuffer() returns false + virtual size_t GetRequiredBufferAlignment() const { + return c_DefaultPageSize; + } + + virtual Status Append(const Slice& data) = 0; + + // Positioned write for unbuffered access default forward + // to simple append as most of the tests are buffered by default + virtual Status PositionedAppend(const Slice& /* data */, uint64_t /* offset */) { + return Status::NotSupported(); + } + + // Truncate is necessary to trim the file to the correct size + // before closing. It is not always possible to keep track of the file + // size due to whole pages writes. The behavior is undefined if called + // with other writes to follow. + virtual Status Truncate(uint64_t size) { + return Status::OK(); + } + virtual Status Close() = 0; + virtual Status Flush() = 0; + virtual Status Sync() = 0; // sync data + + /* + * Sync data and/or metadata as well. + * By default, sync only data. + * Override this method for environments where we need to sync + * metadata as well. + */ + virtual Status Fsync() { + return Sync(); + } + + // true if Sync() and Fsync() are safe to call concurrently with Append() + // and Flush(). + virtual bool IsSyncThreadSafe() const { + return false; + } + + // Indicates the upper layers if the current WritableFile implementation + // uses direct IO. + virtual bool UseDirectIO() const { return false; } + + /* + * Change the priority in rate limiter if rate limiting is enabled. + * If rate limiting is not enabled, this call has no effect. + */ + virtual void SetIOPriority(Env::IOPriority pri) { + io_priority_ = pri; + } + + virtual Env::IOPriority GetIOPriority() { return io_priority_; } + + /* + * Get the size of valid data in the file. + */ + virtual uint64_t GetFileSize() { + return 0; + } + + /* + * Get and set the default pre-allocation block size for writes to + * this file. If non-zero, then Allocate will be used to extend the + * underlying storage of a file (generally via fallocate) if the Env + * instance supports it. + */ + virtual void SetPreallocationBlockSize(size_t size) { + preallocation_block_size_ = size; + } + + virtual void GetPreallocationStatus(size_t* block_size, + size_t* last_allocated_block) { + *last_allocated_block = last_preallocated_block_; + *block_size = preallocation_block_size_; + } + + // For documentation, refer to RandomAccessFile::GetUniqueId() + virtual size_t GetUniqueId(char* id, size_t max_size) const { + return 0; // Default implementation to prevent issues with backwards + } + + // Remove any kind of caching of data from the offset to offset+length + // of this file. If the length is 0, then it refers to the end of file. + // If the system is not caching the file contents, then this is a noop. + // This call has no effect on dirty pages in the cache. + virtual Status InvalidateCache(size_t offset, size_t length) { + return Status::NotSupported("InvalidateCache not supported."); + } + + // Sync a file range with disk. + // offset is the starting byte of the file range to be synchronized. + // nbytes specifies the length of the range to be synchronized. + // This asks the OS to initiate flushing the cached data to disk, + // without waiting for completion. + // Default implementation does nothing. + virtual Status RangeSync(uint64_t offset, uint64_t nbytes) { return Status::OK(); } + + // PrepareWrite performs any necessary preparation for a write + // before the write actually occurs. This allows for pre-allocation + // of space on devices where it can result in less file + // fragmentation and/or less waste from over-zealous filesystem + // pre-allocation. + virtual void PrepareWrite(size_t offset, size_t len) { + if (preallocation_block_size_ == 0) { + return; + } + // If this write would cross one or more preallocation blocks, + // determine what the last preallocation block necesessary to + // cover this write would be and Allocate to that point. + const auto block_size = preallocation_block_size_; + size_t new_last_preallocated_block = + (offset + len + block_size - 1) / block_size; + if (new_last_preallocated_block > last_preallocated_block_) { + size_t num_spanned_blocks = + new_last_preallocated_block - last_preallocated_block_; + Allocate(block_size * last_preallocated_block_, + block_size * num_spanned_blocks); + last_preallocated_block_ = new_last_preallocated_block; + } + } + + protected: + /* + * Pre-allocate space for a file. + */ + virtual Status Allocate(uint64_t offset, uint64_t len) { + return Status::OK(); + } + + size_t preallocation_block_size() { return preallocation_block_size_; } + + private: + size_t last_preallocated_block_; + size_t preallocation_block_size_; + // No copying allowed + WritableFile(const WritableFile&); + void operator=(const WritableFile&); + + protected: + friend class WritableFileWrapper; + friend class WritableFileMirror; + + Env::IOPriority io_priority_; +}; + +// Directory object represents collection of files and implements +// filesystem operations that can be executed on directories. +class Directory { + public: + virtual ~Directory() {} + // Fsync directory. Can be called concurrently from multiple threads. + virtual Status Fsync() = 0; +}; + +enum InfoLogLevel : unsigned char { + DEBUG_LEVEL = 0, + INFO_LEVEL, + WARN_LEVEL, + ERROR_LEVEL, + FATAL_LEVEL, + HEADER_LEVEL, + NUM_INFO_LOG_LEVELS, +}; + +// An interface for writing log messages. +class Logger { + public: + size_t kDoNotSupportGetLogFileSize = std::numeric_limits::max(); + + explicit Logger(const InfoLogLevel log_level = InfoLogLevel::INFO_LEVEL) + : log_level_(log_level) {} + virtual ~Logger(); + + // Write a header to the log file with the specified format + // It is recommended that you log all header information at the start of the + // application. But it is not enforced. + virtual void LogHeader(const char* format, va_list ap) { + // Default implementation does a simple INFO level log write. + // Please override as per the logger class requirement. + Logv(format, ap); + } + + // Write an entry to the log file with the specified format. + virtual void Logv(const char* format, va_list ap) = 0; + + // Write an entry to the log file with the specified log level + // and format. Any log with level under the internal log level + // of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be + // printed. + virtual void Logv(const InfoLogLevel log_level, const char* format, va_list ap); + + virtual size_t GetLogFileSize() const { return kDoNotSupportGetLogFileSize; } + // Flush to the OS buffers + virtual void Flush() {} + virtual InfoLogLevel GetInfoLogLevel() const { return log_level_; } + virtual void SetInfoLogLevel(const InfoLogLevel log_level) { + log_level_ = log_level; + } + + private: + // No copying allowed + Logger(const Logger&); + void operator=(const Logger&); + InfoLogLevel log_level_; +}; + + +// Identifies a locked file. +class FileLock { + public: + FileLock() { } + virtual ~FileLock(); + private: + // No copying allowed + FileLock(const FileLock&); + void operator=(const FileLock&); +}; + +extern void LogFlush(const shared_ptr& info_log); + +extern void Log(const InfoLogLevel log_level, + const shared_ptr& info_log, const char* format, ...); + +// a set of log functions with different log levels. +extern void Header(const shared_ptr& info_log, const char* format, ...); +extern void Debug(const shared_ptr& info_log, const char* format, ...); +extern void Info(const shared_ptr& info_log, const char* format, ...); +extern void Warn(const shared_ptr& info_log, const char* format, ...); +extern void Error(const shared_ptr& info_log, const char* format, ...); +extern void Fatal(const shared_ptr& info_log, const char* format, ...); + +// Log the specified data to *info_log if info_log is non-nullptr. +// The default info log level is InfoLogLevel::INFO_LEVEL. +extern void Log(const shared_ptr& info_log, const char* format, ...) +# if defined(__GNUC__) || defined(__clang__) + __attribute__((__format__ (__printf__, 2, 3))) +# endif + ; + +extern void LogFlush(Logger *info_log); + +extern void Log(const InfoLogLevel log_level, Logger* info_log, + const char* format, ...); + +// The default info log level is InfoLogLevel::INFO_LEVEL. +extern void Log(Logger* info_log, const char* format, ...) +# if defined(__GNUC__) || defined(__clang__) + __attribute__((__format__ (__printf__, 2, 3))) +# endif + ; + +// a set of log functions with different log levels. +extern void Header(Logger* info_log, const char* format, ...); +extern void Debug(Logger* info_log, const char* format, ...); +extern void Info(Logger* info_log, const char* format, ...); +extern void Warn(Logger* info_log, const char* format, ...); +extern void Error(Logger* info_log, const char* format, ...); +extern void Fatal(Logger* info_log, const char* format, ...); + +// A utility routine: write "data" to the named file. +extern Status WriteStringToFile(Env* env, const Slice& data, + const std::string& fname, + bool should_sync = false); + +// A utility routine: read contents of named file into *data +extern Status ReadFileToString(Env* env, const std::string& fname, + std::string* data); + +// An implementation of Env that forwards all calls to another Env. +// May be useful to clients who wish to override just part of the +// functionality of another Env. +class EnvWrapper : public Env { + public: + // Initialize an EnvWrapper that delegates all calls to *t + explicit EnvWrapper(Env* t) : target_(t) { } + virtual ~EnvWrapper(); + + // Return the target to which this Env forwards all calls + Env* target() const { return target_; } + + // The following text is boilerplate that forwards all methods to target() + Status NewSequentialFile(const std::string& f, unique_ptr* r, + const EnvOptions& options) override { + return target_->NewSequentialFile(f, r, options); + } + Status NewRandomAccessFile(const std::string& f, + unique_ptr* r, + const EnvOptions& options) override { + return target_->NewRandomAccessFile(f, r, options); + } + Status NewWritableFile(const std::string& f, unique_ptr* r, + const EnvOptions& options) override { + return target_->NewWritableFile(f, r, options); + } + Status ReuseWritableFile(const std::string& fname, + const std::string& old_fname, + unique_ptr* r, + const EnvOptions& options) override { + return target_->ReuseWritableFile(fname, old_fname, r, options); + } + virtual Status NewDirectory(const std::string& name, + unique_ptr* result) override { + return target_->NewDirectory(name, result); + } + Status FileExists(const std::string& f) override { + return target_->FileExists(f); + } + Status GetChildren(const std::string& dir, + std::vector* r) override { + return target_->GetChildren(dir, r); + } + Status GetChildrenFileAttributes( + const std::string& dir, std::vector* result) override { + return target_->GetChildrenFileAttributes(dir, result); + } + Status DeleteFile(const std::string& f) override { + return target_->DeleteFile(f); + } + Status CreateDir(const std::string& d) override { + return target_->CreateDir(d); + } + Status CreateDirIfMissing(const std::string& d) override { + return target_->CreateDirIfMissing(d); + } + Status DeleteDir(const std::string& d) override { + return target_->DeleteDir(d); + } + Status GetFileSize(const std::string& f, uint64_t* s) override { + return target_->GetFileSize(f, s); + } + + Status GetFileModificationTime(const std::string& fname, + uint64_t* file_mtime) override { + return target_->GetFileModificationTime(fname, file_mtime); + } + + Status RenameFile(const std::string& s, const std::string& t) override { + return target_->RenameFile(s, t); + } + + Status LinkFile(const std::string& s, const std::string& t) override { + return target_->LinkFile(s, t); + } + + Status LockFile(const std::string& f, FileLock** l) override { + return target_->LockFile(f, l); + } + + Status UnlockFile(FileLock* l) override { return target_->UnlockFile(l); } + + void Schedule(void (*f)(void* arg), void* a, Priority pri, + void* tag = nullptr, void (*u)(void* arg) = 0) override { + return target_->Schedule(f, a, pri, tag, u); + } + + int UnSchedule(void* tag, Priority pri) override { + return target_->UnSchedule(tag, pri); + } + + void StartThread(void (*f)(void*), void* a) override { + return target_->StartThread(f, a); + } + void WaitForJoin() override { return target_->WaitForJoin(); } + virtual unsigned int GetThreadPoolQueueLen( + Priority pri = LOW) const override { + return target_->GetThreadPoolQueueLen(pri); + } + virtual Status GetTestDirectory(std::string* path) override { + return target_->GetTestDirectory(path); + } + virtual Status NewLogger(const std::string& fname, + shared_ptr* result) override { + return target_->NewLogger(fname, result); + } + uint64_t NowMicros() override { return target_->NowMicros(); } + void SleepForMicroseconds(int micros) override { + target_->SleepForMicroseconds(micros); + } + Status GetHostName(char* name, uint64_t len) override { + return target_->GetHostName(name, len); + } + Status GetCurrentTime(int64_t* unix_time) override { + return target_->GetCurrentTime(unix_time); + } + Status GetAbsolutePath(const std::string& db_path, + std::string* output_path) override { + return target_->GetAbsolutePath(db_path, output_path); + } + void SetBackgroundThreads(int num, Priority pri) override { + return target_->SetBackgroundThreads(num, pri); + } + + void IncBackgroundThreadsIfNeeded(int num, Priority pri) override { + return target_->IncBackgroundThreadsIfNeeded(num, pri); + } + + void LowerThreadPoolIOPriority(Priority pool = LOW) override { + target_->LowerThreadPoolIOPriority(pool); + } + + std::string TimeToString(uint64_t time) override { + return target_->TimeToString(time); + } + + Status GetThreadList(std::vector* thread_list) override { + return target_->GetThreadList(thread_list); + } + + ThreadStatusUpdater* GetThreadStatusUpdater() const override { + return target_->GetThreadStatusUpdater(); + } + + uint64_t GetThreadID() const override { + return target_->GetThreadID(); + } + + private: + Env* target_; +}; + +// An implementation of WritableFile that forwards all calls to another +// WritableFile. May be useful to clients who wish to override just part of the +// functionality of another WritableFile. +// It's declared as friend of WritableFile to allow forwarding calls to +// protected virtual methods. +class WritableFileWrapper : public WritableFile { + public: + explicit WritableFileWrapper(WritableFile* t) : target_(t) { } + + Status Append(const Slice& data) override { return target_->Append(data); } + Status PositionedAppend(const Slice& data, uint64_t offset) override { + return target_->PositionedAppend(data, offset); + } + Status Truncate(uint64_t size) override { return target_->Truncate(size); } + Status Close() override { return target_->Close(); } + Status Flush() override { return target_->Flush(); } + Status Sync() override { return target_->Sync(); } + Status Fsync() override { return target_->Fsync(); } + bool IsSyncThreadSafe() const override { return target_->IsSyncThreadSafe(); } + void SetIOPriority(Env::IOPriority pri) override { + target_->SetIOPriority(pri); + } + Env::IOPriority GetIOPriority() override { return target_->GetIOPriority(); } + uint64_t GetFileSize() override { return target_->GetFileSize(); } + void GetPreallocationStatus(size_t* block_size, + size_t* last_allocated_block) override { + target_->GetPreallocationStatus(block_size, last_allocated_block); + } + size_t GetUniqueId(char* id, size_t max_size) const override { + return target_->GetUniqueId(id, max_size); + } + Status InvalidateCache(size_t offset, size_t length) override { + return target_->InvalidateCache(offset, length); + } + + virtual void SetPreallocationBlockSize(size_t size) override { + target_->SetPreallocationBlockSize(size); + } + virtual void PrepareWrite(size_t offset, size_t len) override { + target_->PrepareWrite(offset, len); + } + + protected: + Status Allocate(uint64_t offset, uint64_t len) override { + return target_->Allocate(offset, len); + } + Status RangeSync(uint64_t offset, uint64_t nbytes) override { + return target_->RangeSync(offset, nbytes); + } + + private: + WritableFile* target_; +}; + +// Returns a new environment that stores its data in memory and delegates +// all non-file-storage tasks to base_env. The caller must delete the result +// when it is no longer needed. +// *base_env must remain live while the result is in use. +Env* NewMemEnv(Env* base_env); + +// Returns a new environment that is used for HDFS environment. +// This is a factory method for HdfsEnv declared in hdfs/env_hdfs.h +Status NewHdfsEnv(Env** hdfs_env, const std::string& fsname); + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_ENV_H_ diff --git a/external/rocksdb/include/rocksdb/experimental.h b/external/rocksdb/include/rocksdb/experimental.h new file mode 100755 index 0000000000..70ad0b914b --- /dev/null +++ b/external/rocksdb/include/rocksdb/experimental.h @@ -0,0 +1,29 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include "rocksdb/db.h" +#include "rocksdb/status.h" + +namespace rocksdb { +namespace experimental { + +// Supported only for Leveled compaction +Status SuggestCompactRange(DB* db, ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end); +Status SuggestCompactRange(DB* db, const Slice* begin, const Slice* end); + +// Move all L0 files to target_level skipping compaction. +// This operation succeeds only if the files in L0 have disjoint ranges; this +// is guaranteed to happen, for instance, if keys are inserted in sorted +// order. Furthermore, all levels between 1 and target_level must be empty. +// If any of the above condition is violated, InvalidArgument will be +// returned. +Status PromoteL0(DB* db, ColumnFamilyHandle* column_family, + int target_level = 1); + +} // namespace experimental +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/filter_policy.h b/external/rocksdb/include/rocksdb/filter_policy.h new file mode 100755 index 0000000000..2c1588a232 --- /dev/null +++ b/external/rocksdb/include/rocksdb/filter_policy.h @@ -0,0 +1,132 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2012 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// A database can be configured with a custom FilterPolicy object. +// This object is responsible for creating a small filter from a set +// of keys. These filters are stored in rocksdb and are consulted +// automatically by rocksdb to decide whether or not to read some +// information from disk. In many cases, a filter can cut down the +// number of disk seeks form a handful to a single disk seek per +// DB::Get() call. +// +// Most people will want to use the builtin bloom filter support (see +// NewBloomFilterPolicy() below). + +#ifndef STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_ +#define STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_ + +#include +#include + +namespace rocksdb { + +class Slice; + +// A class that takes a bunch of keys, then generates filter +class FilterBitsBuilder { + public: + virtual ~FilterBitsBuilder() {} + + // Add Key to filter, you could use any way to store the key. + // Such as: storing hashes or original keys + // Keys are in sorted order and duplicated keys are possible. + virtual void AddKey(const Slice& key) = 0; + + // Generate the filter using the keys that are added + // The return value of this function would be the filter bits, + // The ownership of actual data is set to buf + virtual Slice Finish(std::unique_ptr* buf) = 0; +}; + +// A class that checks if a key can be in filter +// It should be initialized by Slice generated by BitsBuilder +class FilterBitsReader { + public: + virtual ~FilterBitsReader() {} + + // Check if the entry match the bits in filter + virtual bool MayMatch(const Slice& entry) = 0; +}; + +// We add a new format of filter block called full filter block +// This new interface gives you more space of customization +// +// For the full filter block, you can plug in your version by implement +// the FilterBitsBuilder and FilterBitsReader +// +// There are two sets of interface in FilterPolicy +// Set 1: CreateFilter, KeyMayMatch: used for blockbased filter +// Set 2: GetFilterBitsBuilder, GetFilterBitsReader, they are used for +// full filter. +// Set 1 MUST be implemented correctly, Set 2 is optional +// RocksDB would first try using functions in Set 2. if they return nullptr, +// it would use Set 1 instead. +// You can choose filter type in NewBloomFilterPolicy +class FilterPolicy { + public: + virtual ~FilterPolicy(); + + // Return the name of this policy. Note that if the filter encoding + // changes in an incompatible way, the name returned by this method + // must be changed. Otherwise, old incompatible filters may be + // passed to methods of this type. + virtual const char* Name() const = 0; + + // keys[0,n-1] contains a list of keys (potentially with duplicates) + // that are ordered according to the user supplied comparator. + // Append a filter that summarizes keys[0,n-1] to *dst. + // + // Warning: do not change the initial contents of *dst. Instead, + // append the newly constructed filter to *dst. + virtual void CreateFilter(const Slice* keys, int n, std::string* dst) + const = 0; + + // "filter" contains the data appended by a preceding call to + // CreateFilter() on this class. This method must return true if + // the key was in the list of keys passed to CreateFilter(). + // This method may return true or false if the key was not on the + // list, but it should aim to return false with a high probability. + virtual bool KeyMayMatch(const Slice& key, const Slice& filter) const = 0; + + // Get the FilterBitsBuilder, which is ONLY used for full filter block + // It contains interface to take individual key, then generate filter + virtual FilterBitsBuilder* GetFilterBitsBuilder() const { + return nullptr; + } + + // Get the FilterBitsReader, which is ONLY used for full filter block + // It contains interface to tell if key can be in filter + // The input slice should NOT be deleted by FilterPolicy + virtual FilterBitsReader* GetFilterBitsReader(const Slice& contents) const { + return nullptr; + } +}; + +// Return a new filter policy that uses a bloom filter with approximately +// the specified number of bits per key. +// +// bits_per_key: bits per key in bloom filter. A good value for bits_per_key +// is 10, which yields a filter with ~ 1% false positive rate. +// use_block_based_builder: use block based filter rather than full fiter. +// If you want to builder full filter, it needs to be set to false. +// +// Callers must delete the result after any database that is using the +// result has been closed. +// +// Note: if you are using a custom comparator that ignores some parts +// of the keys being compared, you must not use NewBloomFilterPolicy() +// and must provide your own FilterPolicy that also ignores the +// corresponding parts of the keys. For example, if the comparator +// ignores trailing spaces, it would be incorrect to use a +// FilterPolicy (like NewBloomFilterPolicy) that does not ignore +// trailing spaces in keys. +extern const FilterPolicy* NewBloomFilterPolicy(int bits_per_key, + bool use_block_based_builder = true); +} + +#endif // STORAGE_ROCKSDB_INCLUDE_FILTER_POLICY_H_ diff --git a/external/rocksdb/include/rocksdb/flush_block_policy.h b/external/rocksdb/include/rocksdb/flush_block_policy.h new file mode 100755 index 0000000000..022e0be4a9 --- /dev/null +++ b/external/rocksdb/include/rocksdb/flush_block_policy.h @@ -0,0 +1,60 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include "rocksdb/table.h" + +namespace rocksdb { + +class Slice; +class BlockBuilder; +struct Options; + +// FlushBlockPolicy provides a configurable way to determine when to flush a +// block in the block based tables, +class FlushBlockPolicy { + public: + // Keep track of the key/value sequences and return the boolean value to + // determine if table builder should flush current data block. + virtual bool Update(const Slice& key, + const Slice& value) = 0; + + virtual ~FlushBlockPolicy() { } +}; + +class FlushBlockPolicyFactory { + public: + // Return the name of the flush block policy. + virtual const char* Name() const = 0; + + // Return a new block flush policy that flushes data blocks by data size. + // FlushBlockPolicy may need to access the metadata of the data block + // builder to determine when to flush the blocks. + // + // Callers must delete the result after any database that is using the + // result has been closed. + virtual FlushBlockPolicy* NewFlushBlockPolicy( + const BlockBasedTableOptions& table_options, + const BlockBuilder& data_block_builder) const = 0; + + virtual ~FlushBlockPolicyFactory() { } +}; + +class FlushBlockBySizePolicyFactory : public FlushBlockPolicyFactory { + public: + FlushBlockBySizePolicyFactory() {} + + virtual const char* Name() const override { + return "FlushBlockBySizePolicyFactory"; + } + + virtual FlushBlockPolicy* NewFlushBlockPolicy( + const BlockBasedTableOptions& table_options, + const BlockBuilder& data_block_builder) const override; +}; + +} // rocksdb diff --git a/external/rocksdb/include/rocksdb/immutable_options.h b/external/rocksdb/include/rocksdb/immutable_options.h new file mode 100755 index 0000000000..9417aa2e97 --- /dev/null +++ b/external/rocksdb/include/rocksdb/immutable_options.h @@ -0,0 +1,107 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include +#include "rocksdb/options.h" + +namespace rocksdb { + +// ImmutableCFOptions is a data struct used by RocksDB internal. It contains a +// subset of Options that should not be changed during the entire lifetime +// of DB. You shouldn't need to access this data structure unless you are +// implementing a new TableFactory. Raw pointers defined in this struct do +// not have ownership to the data they point to. Options contains shared_ptr +// to these data. +struct ImmutableCFOptions { + explicit ImmutableCFOptions(const Options& options); + + CompactionStyle compaction_style; + + CompactionOptionsUniversal compaction_options_universal; + CompactionOptionsFIFO compaction_options_fifo; + + const SliceTransform* prefix_extractor; + + const Comparator* comparator; + + MergeOperator* merge_operator; + + const CompactionFilter* compaction_filter; + + CompactionFilterFactory* compaction_filter_factory; + + bool inplace_update_support; + + UpdateStatus (*inplace_callback)(char* existing_value, + uint32_t* existing_value_size, + Slice delta_value, + std::string* merged_value); + + Logger* info_log; + + Statistics* statistics; + + InfoLogLevel info_log_level; + + Env* env; + + uint64_t delayed_write_rate; + + // Allow the OS to mmap file for reading sst tables. Default: false + bool allow_mmap_reads; + + // Allow the OS to mmap file for writing. Default: false + bool allow_mmap_writes; + + std::vector db_paths; + + MemTableRepFactory* memtable_factory; + + TableFactory* table_factory; + + Options::TablePropertiesCollectorFactories + table_properties_collector_factories; + + bool advise_random_on_open; + + // This options is required by PlainTableReader. May need to move it + // to PlainTalbeOptions just like bloom_bits_per_key + uint32_t bloom_locality; + + bool purge_redundant_kvs_while_flush; + + bool disable_data_sync; + + bool use_fsync; + + std::vector compression_per_level; + + CompressionType bottommost_compression; + + CompressionOptions compression_opts; + + bool level_compaction_dynamic_level_bytes; + + Options::AccessHint access_hint_on_compaction_start; + + bool new_table_reader_for_compaction_inputs; + + size_t compaction_readahead_size; + + int num_levels; + + bool optimize_filters_for_hits; + + // A vector of EventListeners which call-back functions will be called + // when specific RocksDB event happens. + std::vector> listeners; + + std::shared_ptr row_cache; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/iostats_context.h b/external/rocksdb/include/rocksdb/iostats_context.h new file mode 100755 index 0000000000..632fe44c82 --- /dev/null +++ b/external/rocksdb/include/rocksdb/iostats_context.h @@ -0,0 +1,57 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#include +#include + +#include "rocksdb/perf_level.h" + +// A thread local context for gathering io-stats efficiently and transparently. +// Use SetPerfLevel(PerfLevel::kEnableTime) to enable time stats. + +namespace rocksdb { + +struct IOStatsContext { + // reset all io-stats counter to zero + void Reset(); + + std::string ToString(bool exclude_zero_counters = false) const; + + // the thread pool id + uint64_t thread_pool_id; + + // number of bytes that has been written. + uint64_t bytes_written; + // number of bytes that has been read. + uint64_t bytes_read; + + // time spent in open() and fopen(). + uint64_t open_nanos; + // time spent in fallocate(). + uint64_t allocate_nanos; + // time spent in write() and pwrite(). + uint64_t write_nanos; + // time spent in read() and pread() + uint64_t read_nanos; + // time spent in sync_file_range(). + uint64_t range_sync_nanos; + // time spent in fsync + uint64_t fsync_nanos; + // time spent in preparing write (fallocate etc). + uint64_t prepare_write_nanos; + // time spent in Logger::Logv(). + uint64_t logger_nanos; +}; + +#ifndef IOS_CROSS_COMPILE +# ifdef _WIN32 +extern __declspec(thread) IOStatsContext iostats_context; +# else +extern __thread IOStatsContext iostats_context; +# endif +#endif // IOS_CROSS_COMPILE + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/iterator.h b/external/rocksdb/include/rocksdb/iterator.h new file mode 100755 index 0000000000..c39ce46413 --- /dev/null +++ b/external/rocksdb/include/rocksdb/iterator.h @@ -0,0 +1,125 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// An iterator yields a sequence of key/value pairs from a source. +// The following class defines the interface. Multiple implementations +// are provided by this library. In particular, iterators are provided +// to access the contents of a Table or a DB. +// +// Multiple threads can invoke const methods on an Iterator without +// external synchronization, but if any of the threads may call a +// non-const method, all threads accessing the same Iterator must use +// external synchronization. + +#ifndef STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_ +#define STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_ + +#include +#include "rocksdb/slice.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +class Cleanable { + public: + Cleanable(); + ~Cleanable(); + // Clients are allowed to register function/arg1/arg2 triples that + // will be invoked when this iterator is destroyed. + // + // Note that unlike all of the preceding methods, this method is + // not abstract and therefore clients should not override it. + typedef void (*CleanupFunction)(void* arg1, void* arg2); + void RegisterCleanup(CleanupFunction function, void* arg1, void* arg2); + + protected: + struct Cleanup { + CleanupFunction function; + void* arg1; + void* arg2; + Cleanup* next; + }; + Cleanup cleanup_; +}; + +class Iterator : public Cleanable { + public: + Iterator() {} + virtual ~Iterator() {} + + // An iterator is either positioned at a key/value pair, or + // not valid. This method returns true iff the iterator is valid. + virtual bool Valid() const = 0; + + // Position at the first key in the source. The iterator is Valid() + // after this call iff the source is not empty. + virtual void SeekToFirst() = 0; + + // Position at the last key in the source. The iterator is + // Valid() after this call iff the source is not empty. + virtual void SeekToLast() = 0; + + // Position at the first key in the source that at or past target + // The iterator is Valid() after this call iff the source contains + // an entry that comes at or past target. + virtual void Seek(const Slice& target) = 0; + + // Moves to the next entry in the source. After this call, Valid() is + // true iff the iterator was not positioned at the last entry in the source. + // REQUIRES: Valid() + virtual void Next() = 0; + + // Moves to the previous entry in the source. After this call, Valid() is + // true iff the iterator was not positioned at the first entry in source. + // REQUIRES: Valid() + virtual void Prev() = 0; + + // Return the key for the current entry. The underlying storage for + // the returned slice is valid only until the next modification of + // the iterator. + // REQUIRES: Valid() + virtual Slice key() const = 0; + + // Return the value for the current entry. The underlying storage for + // the returned slice is valid only until the next modification of + // the iterator. + // REQUIRES: !AtEnd() && !AtStart() + virtual Slice value() const = 0; + + // If an error has occurred, return it. Else return an ok status. + // If non-blocking IO is requested and this operation cannot be + // satisfied without doing some IO, then this returns Status::Incomplete(). + virtual Status status() const = 0; + + // Property "rocksdb.iterator.is-key-pinned": + // If returning "1", this means that the Slice returned by key() is valid + // as long as the iterator is not deleted. + // It is guaranteed to always return "1" if + // - Iterator created with ReadOptions::pin_data = true + // - DB tables were created with + // BlockBasedTableOptions::use_delta_encoding = false. + // Property "rocksdb.iterator.super-version-number": + // LSM version used by the iterator. The same format as DB Property + // kCurrentSuperVersionNumber. See its comment for more information. + virtual Status GetProperty(std::string prop_name, std::string* prop); + + private: + // No copying allowed + Iterator(const Iterator&); + void operator=(const Iterator&); +}; + +// Return an empty iterator (yields nothing). +extern Iterator* NewEmptyIterator(); + +// Return an empty iterator with the specified status. +extern Iterator* NewErrorIterator(const Status& status); + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_ITERATOR_H_ diff --git a/external/rocksdb/include/rocksdb/ldb_tool.h b/external/rocksdb/include/rocksdb/ldb_tool.h new file mode 100755 index 0000000000..8a6918ba47 --- /dev/null +++ b/external/rocksdb/include/rocksdb/ldb_tool.h @@ -0,0 +1,41 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#ifndef ROCKSDB_LITE +#pragma once +#include +#include +#include "rocksdb/db.h" +#include "rocksdb/options.h" + +namespace rocksdb { + +// An interface for converting a slice to a readable string +class SliceFormatter { + public: + virtual ~SliceFormatter() {} + virtual std::string Format(const Slice& s) const = 0; +}; + +// Options for customizing ldb tool (beyond the DB Options) +struct LDBOptions { + // Create LDBOptions with default values for all fields + LDBOptions(); + + // Key formatter that converts a slice to a readable string. + // Default: Slice::ToString() + std::shared_ptr key_formatter; +}; + +class LDBTool { + public: + void Run( + int argc, char** argv, Options db_options = Options(), + const LDBOptions& ldb_options = LDBOptions(), + const std::vector* column_families = nullptr); +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/listener.h b/external/rocksdb/include/rocksdb/listener.h new file mode 100755 index 0000000000..fde0db592d --- /dev/null +++ b/external/rocksdb/include/rocksdb/listener.h @@ -0,0 +1,292 @@ +// Copyright (c) 2014 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include +#include +#include +#include +#include "rocksdb/compaction_job_stats.h" +#include "rocksdb/status.h" +#include "rocksdb/table_properties.h" + +namespace rocksdb { + +typedef std::unordered_map> + TablePropertiesCollection; + +class DB; +class Status; +struct CompactionJobStats; +enum CompressionType : unsigned char; + +enum class TableFileCreationReason { + kFlush, + kCompaction, + kRecovery, +}; + +struct TableFileCreationBriefInfo { + // the name of the database where the file was created + std::string db_name; + // the name of the column family where the file was created. + std::string cf_name; + // the path to the created file. + std::string file_path; + // the id of the job (which could be flush or compaction) that + // created the file. + int job_id; + // reason of creating the table. + TableFileCreationReason reason; +}; + +struct TableFileCreationInfo : public TableFileCreationBriefInfo { + TableFileCreationInfo() = default; + explicit TableFileCreationInfo(TableProperties&& prop) + : table_properties(prop) {} + // the size of the file. + uint64_t file_size; + // Detailed properties of the created file. + TableProperties table_properties; + // The status indicating whether the creation was successful or not. + Status status; +}; + +enum class CompactionReason { + kUnknown, + // [Level] number of L0 files > level0_file_num_compaction_trigger + kLevelL0FilesNum, + // [Level] total size of level > MaxBytesForLevel() + kLevelMaxLevelSize, + // [Universal] Compacting for size amplification + kUniversalSizeAmplification, + // [Universal] Compacting for size ratio + kUniversalSizeRatio, + // [Universal] number of sorted runs > level0_file_num_compaction_trigger + kUniversalSortedRunNum, + // [FIFO] total size > max_table_files_size + kFIFOMaxSize, + // Manual compaction + kManualCompaction, + // DB::SuggestCompactRange() marked files for compaction + kFilesMarkedForCompaction, +}; + +#ifndef ROCKSDB_LITE + +struct TableFileDeletionInfo { + // The name of the database where the file was deleted. + std::string db_name; + // The path to the deleted file. + std::string file_path; + // The id of the job which deleted the file. + int job_id; + // The status indicating whether the deletion was successful or not. + Status status; +}; + +struct FlushJobInfo { + // the name of the column family + std::string cf_name; + // the path to the newly created file + std::string file_path; + // the id of the thread that completed this flush job. + uint64_t thread_id; + // the job id, which is unique in the same thread. + int job_id; + // If true, then rocksdb is currently slowing-down all writes to prevent + // creating too many Level 0 files as compaction seems not able to + // catch up the write request speed. This indicates that there are + // too many files in Level 0. + bool triggered_writes_slowdown; + // If true, then rocksdb is currently blocking any writes to prevent + // creating more L0 files. This indicates that there are too many + // files in level 0. Compactions should try to compact L0 files down + // to lower levels as soon as possible. + bool triggered_writes_stop; + // The smallest sequence number in the newly created file + SequenceNumber smallest_seqno; + // The largest sequence number in the newly created file + SequenceNumber largest_seqno; + // Table properties of the table being flushed + TableProperties table_properties; +}; + +struct CompactionJobInfo { + CompactionJobInfo() = default; + explicit CompactionJobInfo(const CompactionJobStats& _stats) : + stats(_stats) {} + + // the name of the column family where the compaction happened. + std::string cf_name; + // the status indicating whether the compaction was successful or not. + Status status; + // the id of the thread that completed this compaction job. + uint64_t thread_id; + // the job id, which is unique in the same thread. + int job_id; + // the smallest input level of the compaction. + int base_input_level; + // the output level of the compaction. + int output_level; + // the names of the compaction input files. + std::vector input_files; + + // the names of the compaction output files. + std::vector output_files; + // Table properties for input and output tables. + // The map is keyed by values from input_files and output_files. + TablePropertiesCollection table_properties; + + // Reason to run the compaction + CompactionReason compaction_reason; + + // Compression algorithm used for output files + CompressionType compression; + + // If non-null, this variable stores detailed information + // about this compaction. + CompactionJobStats stats; +}; + +struct MemTableInfo { + // the name of the column family to which memtable belongs + std::string cf_name; + // Sequence number of the first element that was inserted + // into the memtable. + SequenceNumber first_seqno; + // Sequence number that is guaranteed to be smaller than or equal + // to the sequence number of any key that could be inserted into this + // memtable. It can then be assumed that any write with a larger(or equal) + // sequence number will be present in this memtable or a later memtable. + SequenceNumber earliest_seqno; + // Total number of entries in memtable + uint64_t num_entries; + // Total number of deletes in memtable + uint64_t num_deletes; + +}; + +// EventListener class contains a set of call-back functions that will +// be called when specific RocksDB event happens such as flush. It can +// be used as a building block for developing custom features such as +// stats-collector or external compaction algorithm. +// +// Note that call-back functions should not run for an extended period of +// time before the function returns, otherwise RocksDB may be blocked. +// For example, it is not suggested to do DB::CompactFiles() (as it may +// run for a long while) or issue many of DB::Put() (as Put may be blocked +// in certain cases) in the same thread in the EventListener callback. +// However, doing DB::CompactFiles() and DB::Put() in another thread is +// considered safe. +// +// [Threading] All EventListener callback will be called using the +// actual thread that involves in that specific event. For example, it +// is the RocksDB background flush thread that does the actual flush to +// call EventListener::OnFlushCompleted(). +// +// [Locking] All EventListener callbacks are designed to be called without +// the current thread holding any DB mutex. This is to prevent potential +// deadlock and performance issue when using EventListener callback +// in a complex way. However, all EventListener call-back functions +// should not run for an extended period of time before the function +// returns, otherwise RocksDB may be blocked. For example, it is not +// suggested to do DB::CompactFiles() (as it may run for a long while) +// or issue many of DB::Put() (as Put may be blocked in certain cases) +// in the same thread in the EventListener callback. However, doing +// DB::CompactFiles() and DB::Put() in a thread other than the +// EventListener callback thread is considered safe. +class EventListener { + public: + // A call-back function to RocksDB which will be called whenever a + // registered RocksDB flushes a file. The default implementation is + // no-op. + // + // Note that the this function must be implemented in a way such that + // it should not run for an extended period of time before the function + // returns. Otherwise, RocksDB may be blocked. + virtual void OnFlushCompleted(DB* /*db*/, + const FlushJobInfo& /*flush_job_info*/) {} + + // A call-back function for RocksDB which will be called whenever + // a SST file is deleted. Different from OnCompactionCompleted and + // OnFlushCompleted, this call-back is designed for external logging + // service and thus only provide string parameters instead + // of a pointer to DB. Applications that build logic basic based + // on file creations and deletions is suggested to implement + // OnFlushCompleted and OnCompactionCompleted. + // + // Note that if applications would like to use the passed reference + // outside this function call, they should make copies from the + // returned value. + virtual void OnTableFileDeleted(const TableFileDeletionInfo& /*info*/) {} + + // A call-back function for RocksDB which will be called whenever + // a registered RocksDB compacts a file. The default implementation + // is a no-op. + // + // Note that this function must be implemented in a way such that + // it should not run for an extended period of time before the function + // returns. Otherwise, RocksDB may be blocked. + // + // @param db a pointer to the rocksdb instance which just compacted + // a file. + // @param ci a reference to a CompactionJobInfo struct. 'ci' is released + // after this function is returned, and must be copied if it is needed + // outside of this function. + virtual void OnCompactionCompleted(DB* /*db*/, + const CompactionJobInfo& /*ci*/) {} + + // A call-back function for RocksDB which will be called whenever + // a SST file is created. Different from OnCompactionCompleted and + // OnFlushCompleted, this call-back is designed for external logging + // service and thus only provide string parameters instead + // of a pointer to DB. Applications that build logic basic based + // on file creations and deletions is suggested to implement + // OnFlushCompleted and OnCompactionCompleted. + // + // Historically it will only be called if the file is successfully created. + // Now it will also be called on failure case. User can check info.status + // to see if it succeeded or not. + // + // Note that if applications would like to use the passed reference + // outside this function call, they should make copies from these + // returned value. + virtual void OnTableFileCreated(const TableFileCreationInfo& /*info*/) {} + + // A call-back function for RocksDB which will be called before + // a SST file is being created. It will follow by OnTableFileCreated after + // the creation finishes. + // + // Note that if applications would like to use the passed reference + // outside this function call, they should make copies from these + // returned value. + virtual void OnTableFileCreationStarted( + const TableFileCreationBriefInfo& /*info*/) {} + + // A call-back function for RocksDB which will be called before + // a memtable is made immutable. + // + // Note that the this function must be implemented in a way such that + // it should not run for an extended period of time before the function + // returns. Otherwise, RocksDB may be blocked. + // + // Note that if applications would like to use the passed reference + // outside this function call, they should make copies from these + // returned value. + virtual void OnMemTableSealed( + const MemTableInfo& /*info*/) {} + + virtual ~EventListener() {} +}; + +#else + +class EventListener { +}; + +#endif // ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/memtablerep.h b/external/rocksdb/include/rocksdb/memtablerep.h new file mode 100755 index 0000000000..f6f0309469 --- /dev/null +++ b/external/rocksdb/include/rocksdb/memtablerep.h @@ -0,0 +1,329 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file contains the interface that must be implemented by any collection +// to be used as the backing store for a MemTable. Such a collection must +// satisfy the following properties: +// (1) It does not store duplicate items. +// (2) It uses MemTableRep::KeyComparator to compare items for iteration and +// equality. +// (3) It can be accessed concurrently by multiple readers and can support +// during reads. However, it needn't support multiple concurrent writes. +// (4) Items are never deleted. +// The liberal use of assertions is encouraged to enforce (1). +// +// The factory will be passed an MemTableAllocator object when a new MemTableRep +// is requested. +// +// Users can implement their own memtable representations. We include three +// types built in: +// - SkipListRep: This is the default; it is backed by a skip list. +// - HashSkipListRep: The memtable rep that is best used for keys that are +// structured like "prefix:suffix" where iteration within a prefix is +// common and iteration across different prefixes is rare. It is backed by +// a hash map where each bucket is a skip list. +// - VectorRep: This is backed by an unordered std::vector. On iteration, the +// vector is sorted. It is intelligent about sorting; once the MarkReadOnly() +// has been called, the vector will only be sorted once. It is optimized for +// random-write-heavy workloads. +// +// The last four implementations are designed for situations in which +// iteration over the entire collection is rare since doing so requires all the +// keys to be copied into a sorted data structure. + +#pragma once + +#include +#include +#include +#include + +namespace rocksdb { + +class Arena; +class MemTableAllocator; +class LookupKey; +class Slice; +class SliceTransform; +class Logger; + +typedef void* KeyHandle; + +class MemTableRep { + public: + // KeyComparator provides a means to compare keys, which are internal keys + // concatenated with values. + class KeyComparator { + public: + // Compare a and b. Return a negative value if a is less than b, 0 if they + // are equal, and a positive value if a is greater than b + virtual int operator()(const char* prefix_len_key1, + const char* prefix_len_key2) const = 0; + + virtual int operator()(const char* prefix_len_key, + const Slice& key) const = 0; + + virtual ~KeyComparator() { } + }; + + explicit MemTableRep(MemTableAllocator* allocator) : allocator_(allocator) {} + + // Allocate a buf of len size for storing key. The idea is that a + // specific memtable representation knows its underlying data structure + // better. By allowing it to allocate memory, it can possibly put + // correlated stuff in consecutive memory area to make processor + // prefetching more efficient. + virtual KeyHandle Allocate(const size_t len, char** buf); + + // Insert key into the collection. (The caller will pack key and value into a + // single buffer and pass that in as the parameter to Insert). + // REQUIRES: nothing that compares equal to key is currently in the + // collection, and no concurrent modifications to the table in progress + virtual void Insert(KeyHandle handle) = 0; + + // Like Insert(handle), but may be called concurrent with other calls + // to InsertConcurrently for other handles + virtual void InsertConcurrently(KeyHandle handle) { +#ifndef ROCKSDB_LITE + throw std::runtime_error("concurrent insert not supported"); +#else + abort(); +#endif + } + + // Returns true iff an entry that compares equal to key is in the collection. + virtual bool Contains(const char* key) const = 0; + + // Notify this table rep that it will no longer be added to. By default, + // does nothing. After MarkReadOnly() is called, this table rep will + // not be written to (ie No more calls to Allocate(), Insert(), + // or any writes done directly to entries accessed through the iterator.) + virtual void MarkReadOnly() { } + + // Look up key from the mem table, since the first key in the mem table whose + // user_key matches the one given k, call the function callback_func(), with + // callback_args directly forwarded as the first parameter, and the mem table + // key as the second parameter. If the return value is false, then terminates. + // Otherwise, go through the next key. + // + // It's safe for Get() to terminate after having finished all the potential + // key for the k.user_key(), or not. + // + // Default: + // Get() function with a default value of dynamically construct an iterator, + // seek and call the call back function. + virtual void Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, const char* entry)); + + virtual uint64_t ApproximateNumEntries(const Slice& start_ikey, + const Slice& end_key) { + return 0; + } + + // Report an approximation of how much memory has been used other than memory + // that was allocated through the allocator. Safe to call from any thread. + virtual size_t ApproximateMemoryUsage() = 0; + + virtual ~MemTableRep() { } + + // Iteration over the contents of a skip collection + class Iterator { + public: + // Initialize an iterator over the specified collection. + // The returned iterator is not valid. + // explicit Iterator(const MemTableRep* collection); + virtual ~Iterator() {} + + // Returns true iff the iterator is positioned at a valid node. + virtual bool Valid() const = 0; + + // Returns the key at the current position. + // REQUIRES: Valid() + virtual const char* key() const = 0; + + // Advances to the next position. + // REQUIRES: Valid() + virtual void Next() = 0; + + // Advances to the previous position. + // REQUIRES: Valid() + virtual void Prev() = 0; + + // Advance to the first entry with a key >= target + virtual void Seek(const Slice& internal_key, const char* memtable_key) = 0; + + // Position at the first entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToFirst() = 0; + + // Position at the last entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToLast() = 0; + }; + + // Return an iterator over the keys in this representation. + // arena: If not null, the arena needs to be used to allocate the Iterator. + // When destroying the iterator, the caller will not call "delete" + // but Iterator::~Iterator() directly. The destructor needs to destroy + // all the states but those allocated in arena. + virtual Iterator* GetIterator(Arena* arena = nullptr) = 0; + + // Return an iterator that has a special Seek semantics. The result of + // a Seek might only include keys with the same prefix as the target key. + // arena: If not null, the arena is used to allocate the Iterator. + // When destroying the iterator, the caller will not call "delete" + // but Iterator::~Iterator() directly. The destructor needs to destroy + // all the states but those allocated in arena. + virtual Iterator* GetDynamicPrefixIterator(Arena* arena = nullptr) { + return GetIterator(arena); + } + + // Return true if the current MemTableRep supports merge operator. + // Default: true + virtual bool IsMergeOperatorSupported() const { return true; } + + // Return true if the current MemTableRep supports snapshot + // Default: true + virtual bool IsSnapshotSupported() const { return true; } + + protected: + // When *key is an internal key concatenated with the value, returns the + // user key. + virtual Slice UserKey(const char* key) const; + + MemTableAllocator* allocator_; +}; + +// This is the base class for all factories that are used by RocksDB to create +// new MemTableRep objects +class MemTableRepFactory { + public: + virtual ~MemTableRepFactory() {} + virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&, + MemTableAllocator*, + const SliceTransform*, + Logger* logger) = 0; + virtual const char* Name() const = 0; + + // Return true if the current MemTableRep supports concurrent inserts + // Default: false + virtual bool IsInsertConcurrentlySupported() const { return false; } +}; + +// This uses a skip list to store keys. It is the default. +// +// Parameters: +// lookahead: If non-zero, each iterator's seek operation will start the +// search from the previously visited record (doing at most 'lookahead' +// steps). This is an optimization for the access pattern including many +// seeks with consecutive keys. +class SkipListFactory : public MemTableRepFactory { + public: + explicit SkipListFactory(size_t lookahead = 0) : lookahead_(lookahead) {} + + virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&, + MemTableAllocator*, + const SliceTransform*, + Logger* logger) override; + virtual const char* Name() const override { return "SkipListFactory"; } + + bool IsInsertConcurrentlySupported() const override { return true; } + + private: + const size_t lookahead_; +}; + +#ifndef ROCKSDB_LITE +// This creates MemTableReps that are backed by an std::vector. On iteration, +// the vector is sorted. This is useful for workloads where iteration is very +// rare and writes are generally not issued after reads begin. +// +// Parameters: +// count: Passed to the constructor of the underlying std::vector of each +// VectorRep. On initialization, the underlying array will be at least count +// bytes reserved for usage. +class VectorRepFactory : public MemTableRepFactory { + const size_t count_; + + public: + explicit VectorRepFactory(size_t count = 0) : count_(count) { } + virtual MemTableRep* CreateMemTableRep(const MemTableRep::KeyComparator&, + MemTableAllocator*, + const SliceTransform*, + Logger* logger) override; + virtual const char* Name() const override { + return "VectorRepFactory"; + } +}; + +// This class contains a fixed array of buckets, each +// pointing to a skiplist (null if the bucket is empty). +// bucket_count: number of fixed array buckets +// skiplist_height: the max height of the skiplist +// skiplist_branching_factor: probabilistic size ratio between adjacent +// link lists in the skiplist +extern MemTableRepFactory* NewHashSkipListRepFactory( + size_t bucket_count = 1000000, int32_t skiplist_height = 4, + int32_t skiplist_branching_factor = 4 +); + +// The factory is to create memtables based on a hash table: +// it contains a fixed array of buckets, each pointing to either a linked list +// or a skip list if number of entries inside the bucket exceeds +// threshold_use_skiplist. +// @bucket_count: number of fixed array buckets +// @huge_page_tlb_size: if <=0, allocate the hash table bytes from malloc. +// Otherwise from huge page TLB. The user needs to reserve +// huge pages for it to be allocated, like: +// sysctl -w vm.nr_hugepages=20 +// See linux doc Documentation/vm/hugetlbpage.txt +// @bucket_entries_logging_threshold: if number of entries in one bucket +// exceeds this number, log about it. +// @if_log_bucket_dist_when_flash: if true, log distribution of number of +// entries when flushing. +// @threshold_use_skiplist: a bucket switches to skip list if number of +// entries exceed this parameter. +extern MemTableRepFactory* NewHashLinkListRepFactory( + size_t bucket_count = 50000, size_t huge_page_tlb_size = 0, + int bucket_entries_logging_threshold = 4096, + bool if_log_bucket_dist_when_flash = true, + uint32_t threshold_use_skiplist = 256); + +// This factory creates a cuckoo-hashing based mem-table representation. +// Cuckoo-hash is a closed-hash strategy, in which all key/value pairs +// are stored in the bucket array itself intead of in some data structures +// external to the bucket array. In addition, each key in cuckoo hash +// has a constant number of possible buckets in the bucket array. These +// two properties together makes cuckoo hash more memory efficient and +// a constant worst-case read time. Cuckoo hash is best suitable for +// point-lookup workload. +// +// When inserting a key / value, it first checks whether one of its possible +// buckets is empty. If so, the key / value will be inserted to that vacant +// bucket. Otherwise, one of the keys originally stored in one of these +// possible buckets will be "kicked out" and move to one of its possible +// buckets (and possibly kicks out another victim.) In the current +// implementation, such "kick-out" path is bounded. If it cannot find a +// "kick-out" path for a specific key, this key will be stored in a backup +// structure, and the current memtable to be forced to immutable. +// +// Note that currently this mem-table representation does not support +// snapshot (i.e., it only queries latest state) and iterators. In addition, +// MultiGet operation might also lose its atomicity due to the lack of +// snapshot support. +// +// Parameters: +// write_buffer_size: the write buffer size in bytes. +// average_data_size: the average size of key + value in bytes. This value +// together with write_buffer_size will be used to compute the number +// of buckets. +// hash_function_count: the number of hash functions that will be used by +// the cuckoo-hash. The number also equals to the number of possible +// buckets each key will have. +extern MemTableRepFactory* NewHashCuckooRepFactory( + size_t write_buffer_size, size_t average_data_size = 64, + unsigned int hash_function_count = 4); +#endif // ROCKSDB_LITE +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/merge_operator.h b/external/rocksdb/include/rocksdb/merge_operator.h new file mode 100755 index 0000000000..b3ca013b74 --- /dev/null +++ b/external/rocksdb/include/rocksdb/merge_operator.h @@ -0,0 +1,229 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_ +#define STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_ + +#include +#include +#include +#include + +#include "rocksdb/slice.h" + +namespace rocksdb { + +class Slice; +class Logger; + +// The Merge Operator +// +// Essentially, a MergeOperator specifies the SEMANTICS of a merge, which only +// client knows. It could be numeric addition, list append, string +// concatenation, edit data structure, ... , anything. +// The library, on the other hand, is concerned with the exercise of this +// interface, at the right time (during get, iteration, compaction...) +// +// To use merge, the client needs to provide an object implementing one of +// the following interfaces: +// a) AssociativeMergeOperator - for most simple semantics (always take +// two values, and merge them into one value, which is then put back +// into rocksdb); numeric addition and string concatenation are examples; +// +// b) MergeOperator - the generic class for all the more abstract / complex +// operations; one method (FullMergeV2) to merge a Put/Delete value with a +// merge operand; and another method (PartialMerge) that merges multiple +// operands together. this is especially useful if your key values have +// complex structures but you would still like to support client-specific +// incremental updates. +// +// AssociativeMergeOperator is simpler to implement. MergeOperator is simply +// more powerful. +// +// Refer to rocksdb-merge wiki for more details and example implementations. +// +class MergeOperator { + public: + virtual ~MergeOperator() {} + + // Gives the client a way to express the read -> modify -> write semantics + // key: (IN) The key that's associated with this merge operation. + // Client could multiplex the merge operator based on it + // if the key space is partitioned and different subspaces + // refer to different types of data which have different + // merge operation semantics + // existing: (IN) null indicates that the key does not exist before this op + // operand_list:(IN) the sequence of merge operations to apply, front() first. + // new_value:(OUT) Client is responsible for filling the merge result here. + // The string that new_value is pointing to will be empty. + // logger: (IN) Client could use this to log errors during merge. + // + // Return true on success. + // All values passed in will be client-specific values. So if this method + // returns false, it is because client specified bad data or there was + // internal corruption. This will be treated as an error by the library. + // + // Also make use of the *logger for error messages. + virtual bool FullMerge(const Slice& key, + const Slice* existing_value, + const std::deque& operand_list, + std::string* new_value, + Logger* logger) const { + // deprecated, please use FullMergeV2() + assert(false); + return false; + } + + struct MergeOperationInput { + explicit MergeOperationInput(const Slice& _key, + const Slice* _existing_value, + const std::vector& _operand_list, + Logger* _logger) + : key(_key), + existing_value(_existing_value), + operand_list(_operand_list), + logger(_logger) {} + + // The key associated with the merge operation. + const Slice& key; + // The existing value of the current key, nullptr means that the + // value dont exist. + const Slice* existing_value; + // A list of operands to apply. + const std::vector& operand_list; + // Logger could be used by client to log any errors that happen during + // the merge operation. + Logger* logger; + }; + + struct MergeOperationOutput { + explicit MergeOperationOutput(std::string& _new_value, + Slice& _existing_operand) + : new_value(_new_value), existing_operand(_existing_operand) {} + + // Client is responsible for filling the merge result here. + std::string& new_value; + // If the merge result is one of the existing operands (or existing_value), + // client can set this field to the operand (or existing_value) instead of + // using new_value. + Slice& existing_operand; + }; + + virtual bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const; + + // This function performs merge(left_op, right_op) + // when both the operands are themselves merge operation types + // that you would have passed to a DB::Merge() call in the same order + // (i.e.: DB::Merge(key,left_op), followed by DB::Merge(key,right_op)). + // + // PartialMerge should combine them into a single merge operation that is + // saved into *new_value, and then it should return true. + // *new_value should be constructed such that a call to + // DB::Merge(key, *new_value) would yield the same result as a call + // to DB::Merge(key, left_op) followed by DB::Merge(key, right_op). + // + // The string that new_value is pointing to will be empty. + // + // The default implementation of PartialMergeMulti will use this function + // as a helper, for backward compatibility. Any successor class of + // MergeOperator should either implement PartialMerge or PartialMergeMulti, + // although implementing PartialMergeMulti is suggested as it is in general + // more effective to merge multiple operands at a time instead of two + // operands at a time. + // + // If it is impossible or infeasible to combine the two operations, + // leave new_value unchanged and return false. The library will + // internally keep track of the operations, and apply them in the + // correct order once a base-value (a Put/Delete/End-of-Database) is seen. + // + // TODO: Presently there is no way to differentiate between error/corruption + // and simply "return false". For now, the client should simply return + // false in any case it cannot perform partial-merge, regardless of reason. + // If there is corruption in the data, handle it in the FullMergeV2() function + // and return false there. The default implementation of PartialMerge will + // always return false. + virtual bool PartialMerge(const Slice& key, const Slice& left_operand, + const Slice& right_operand, std::string* new_value, + Logger* logger) const { + return false; + } + + // This function performs merge when all the operands are themselves merge + // operation types that you would have passed to a DB::Merge() call in the + // same order (front() first) + // (i.e. DB::Merge(key, operand_list[0]), followed by + // DB::Merge(key, operand_list[1]), ...) + // + // PartialMergeMulti should combine them into a single merge operation that is + // saved into *new_value, and then it should return true. *new_value should + // be constructed such that a call to DB::Merge(key, *new_value) would yield + // the same result as subquential individual calls to DB::Merge(key, operand) + // for each operand in operand_list from front() to back(). + // + // The string that new_value is pointing to will be empty. + // + // The PartialMergeMulti function will be called only when the list of + // operands are long enough. The minimum amount of operands that will be + // passed to the function are specified by the "min_partial_merge_operands" + // option. + // + // In the default implementation, PartialMergeMulti will invoke PartialMerge + // multiple times, where each time it only merges two operands. Developers + // should either implement PartialMergeMulti, or implement PartialMerge which + // is served as the helper function of the default PartialMergeMulti. + virtual bool PartialMergeMulti(const Slice& key, + const std::deque& operand_list, + std::string* new_value, Logger* logger) const; + + // The name of the MergeOperator. Used to check for MergeOperator + // mismatches (i.e., a DB created with one MergeOperator is + // accessed using a different MergeOperator) + // TODO: the name is currently not stored persistently and thus + // no checking is enforced. Client is responsible for providing + // consistent MergeOperator between DB opens. + virtual const char* Name() const = 0; +}; + +// The simpler, associative merge operator. +class AssociativeMergeOperator : public MergeOperator { + public: + virtual ~AssociativeMergeOperator() {} + + // Gives the client a way to express the read -> modify -> write semantics + // key: (IN) The key that's associated with this merge operation. + // existing_value:(IN) null indicates the key does not exist before this op + // value: (IN) the value to update/merge the existing_value with + // new_value: (OUT) Client is responsible for filling the merge result + // here. The string that new_value is pointing to will be empty. + // logger: (IN) Client could use this to log errors during merge. + // + // Return true on success. + // All values passed in will be client-specific values. So if this method + // returns false, it is because client specified bad data or there was + // internal corruption. The client should assume that this will be treated + // as an error by the library. + virtual bool Merge(const Slice& key, + const Slice* existing_value, + const Slice& value, + std::string* new_value, + Logger* logger) const = 0; + + + private: + // Default implementations of the MergeOperator functions + virtual bool FullMergeV2(const MergeOperationInput& merge_in, + MergeOperationOutput* merge_out) const override; + + virtual bool PartialMerge(const Slice& key, + const Slice& left_operand, + const Slice& right_operand, + std::string* new_value, + Logger* logger) const override; +}; + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_MERGE_OPERATOR_H_ diff --git a/external/rocksdb/include/rocksdb/metadata.h b/external/rocksdb/include/rocksdb/metadata.h new file mode 100755 index 0000000000..5425146d7b --- /dev/null +++ b/external/rocksdb/include/rocksdb/metadata.h @@ -0,0 +1,92 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include + +#include +#include +#include + +#include "rocksdb/types.h" + +namespace rocksdb { +struct ColumnFamilyMetaData; +struct LevelMetaData; +struct SstFileMetaData; + +// The metadata that describes a column family. +struct ColumnFamilyMetaData { + ColumnFamilyMetaData() : size(0), name("") {} + ColumnFamilyMetaData(const std::string& _name, uint64_t _size, + const std::vector&& _levels) : + size(_size), name(_name), levels(_levels) {} + + // The size of this column family in bytes, which is equal to the sum of + // the file size of its "levels". + uint64_t size; + // The number of files in this column family. + size_t file_count; + // The name of the column family. + std::string name; + // The metadata of all levels in this column family. + std::vector levels; +}; + +// The metadata that describes a level. +struct LevelMetaData { + LevelMetaData(int _level, uint64_t _size, + const std::vector&& _files) : + level(_level), size(_size), + files(_files) {} + + // The level which this meta data describes. + const int level; + // The size of this level in bytes, which is equal to the sum of + // the file size of its "files". + const uint64_t size; + // The metadata of all sst files in this level. + const std::vector files; +}; + +// The metadata that describes a SST file. +struct SstFileMetaData { + SstFileMetaData() {} + SstFileMetaData(const std::string& _file_name, + const std::string& _path, uint64_t _size, + SequenceNumber _smallest_seqno, + SequenceNumber _largest_seqno, + const std::string& _smallestkey, + const std::string& _largestkey, + bool _being_compacted) : + size(_size), name(_file_name), + db_path(_path), smallest_seqno(_smallest_seqno), largest_seqno(_largest_seqno), + smallestkey(_smallestkey), largestkey(_largestkey), + being_compacted(_being_compacted) {} + + // File size in bytes. + uint64_t size; + // The name of the file. + std::string name; + // The full path where the file locates. + std::string db_path; + + SequenceNumber smallest_seqno; // Smallest sequence number in file. + SequenceNumber largest_seqno; // Largest sequence number in file. + std::string smallestkey; // Smallest user defined key in the file. + std::string largestkey; // Largest user defined key in the file. + bool being_compacted; // true if the file is currently being compacted. +}; + +// The full set of metadata associated with each SST file. +struct LiveFileMetaData : SstFileMetaData { + std::string column_family_name; // Name of the column family + int level; // Level at which this file resides. +}; + + + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/options.h b/external/rocksdb/include/rocksdb/options.h new file mode 100755 index 0000000000..ac14fa570a --- /dev/null +++ b/external/rocksdb/include/rocksdb/options.h @@ -0,0 +1,1628 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_ +#define STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_ + +#include +#include +#include +#include +#include +#include +#include + +#include "rocksdb/listener.h" +#include "rocksdb/universal_compaction.h" +#include "rocksdb/version.h" +#include "rocksdb/write_buffer_manager.h" + +#ifdef max +#undef max +#endif + +namespace rocksdb { + +class Cache; +class CompactionFilter; +class CompactionFilterFactory; +class Comparator; +class Env; +enum InfoLogLevel : unsigned char; +class SstFileManager; +class FilterPolicy; +class Logger; +class MergeOperator; +class Snapshot; +class TableFactory; +class MemTableRepFactory; +class TablePropertiesCollectorFactory; +class RateLimiter; +class Slice; +class SliceTransform; +class Statistics; +class InternalKeyComparator; +class WalFilter; + +// DB contents are stored in a set of blocks, each of which holds a +// sequence of key,value pairs. Each block may be compressed before +// being stored in a file. The following enum describes which +// compression method (if any) is used to compress a block. +enum CompressionType : unsigned char { + // NOTE: do not change the values of existing entries, as these are + // part of the persistent format on disk. + kNoCompression = 0x0, + kSnappyCompression = 0x1, + kZlibCompression = 0x2, + kBZip2Compression = 0x3, + kLZ4Compression = 0x4, + kLZ4HCCompression = 0x5, + kXpressCompression = 0x6, + // zstd format is not finalized yet so it's subject to changes. + kZSTDNotFinalCompression = 0x40, + + // kDisableCompressionOption is used to disable some compression options. + kDisableCompressionOption = 0xff, +}; + +enum CompactionStyle : char { + // level based compaction style + kCompactionStyleLevel = 0x0, + // Universal compaction style + // Not supported in ROCKSDB_LITE. + kCompactionStyleUniversal = 0x1, + // FIFO compaction style + // Not supported in ROCKSDB_LITE + kCompactionStyleFIFO = 0x2, + // Disable background compaction. Compaction jobs are submitted + // via CompactFiles(). + // Not supported in ROCKSDB_LITE + kCompactionStyleNone = 0x3, +}; + +// In Level-based comapction, it Determines which file from a level to be +// picked to merge to the next level. We suggest people try +// kMinOverlappingRatio first when you tune your database. +enum CompactionPri : char { + // Slightly Priotize larger files by size compensated by #deletes + kByCompensatedSize = 0x0, + // First compact files whose data's latest update time is oldest. + // Try this if you only update some hot keys in small ranges. + kOldestLargestSeqFirst = 0x1, + // First compact files whose range hasn't been compacted to the next level + // for the longest. If your updates are random across the key space, + // write amplification is slightly better with this option. + kOldestSmallestSeqFirst = 0x2, + // First compact files whose ratio between overlapping size in next level + // and its size is the smallest. It in many cases can optimize write + // amplification. + kMinOverlappingRatio = 0x3, +}; + +enum class WALRecoveryMode : char { + // Original levelDB recovery + // We tolerate incomplete record in trailing data on all logs + // Use case : This is legacy behavior (default) + kTolerateCorruptedTailRecords = 0x00, + // Recover from clean shutdown + // We don't expect to find any corruption in the WAL + // Use case : This is ideal for unit tests and rare applications that + // can require high consistency guarantee + kAbsoluteConsistency = 0x01, + // Recover to point-in-time consistency + // We stop the WAL playback on discovering WAL inconsistency + // Use case : Ideal for systems that have disk controller cache like + // hard disk, SSD without super capacitor that store related data + kPointInTimeRecovery = 0x02, + // Recovery after a disaster + // We ignore any corruption in the WAL and try to salvage as much data as + // possible + // Use case : Ideal for last ditch effort to recover data or systems that + // operate with low grade unrelated data + kSkipAnyCorruptedRecords = 0x03, +}; + +struct CompactionOptionsFIFO { + // once the total sum of table files reaches this, we will delete the oldest + // table file + // Default: 1GB + uint64_t max_table_files_size; + + CompactionOptionsFIFO() : max_table_files_size(1 * 1024 * 1024 * 1024) {} +}; + +// Compression options for different compression algorithms like Zlib +struct CompressionOptions { + int window_bits; + int level; + int strategy; + // Maximum size of dictionary used to prime the compression library. Currently + // this dictionary will be constructed by sampling the first output file in a + // subcompaction when the target level is bottommost. This dictionary will be + // loaded into the compression library before compressing/uncompressing each + // data block of subsequent files in the subcompaction. Effectively, this + // improves compression ratios when there are repetitions across data blocks. + // A value of 0 indicates the feature is disabled. + // Default: 0. + uint32_t max_dict_bytes; + + CompressionOptions() + : window_bits(-14), level(-1), strategy(0), max_dict_bytes(0) {} + CompressionOptions(int wbits, int _lev, int _strategy, int _max_dict_bytes) + : window_bits(wbits), + level(_lev), + strategy(_strategy), + max_dict_bytes(_max_dict_bytes) {} +}; + +enum UpdateStatus { // Return status For inplace update callback + UPDATE_FAILED = 0, // Nothing to update + UPDATED_INPLACE = 1, // Value updated inplace + UPDATED = 2, // No inplace update. Merged value set +}; + +struct DbPath { + std::string path; + uint64_t target_size; // Target size of total files under the path, in byte. + + DbPath() : target_size(0) {} + DbPath(const std::string& p, uint64_t t) : path(p), target_size(t) {} +}; + +struct Options; + +struct ColumnFamilyOptions { + // The function recovers options to a previous version. Only 4.6 or later + // versions are supported. + ColumnFamilyOptions* OldDefaults(int rocksdb_major_version = 4, + int rocksdb_minor_version = 6); + + // Some functions that make it easier to optimize RocksDB + // Use this if your DB is very small (like under 1GB) and you don't want to + // spend lots of memory for memtables. + ColumnFamilyOptions* OptimizeForSmallDb(); + + // Use this if you don't need to keep the data sorted, i.e. you'll never use + // an iterator, only Put() and Get() API calls + // + // Not supported in ROCKSDB_LITE + ColumnFamilyOptions* OptimizeForPointLookup( + uint64_t block_cache_size_mb); + + // Default values for some parameters in ColumnFamilyOptions are not + // optimized for heavy workloads and big datasets, which means you might + // observe write stalls under some conditions. As a starting point for tuning + // RocksDB options, use the following two functions: + // * OptimizeLevelStyleCompaction -- optimizes level style compaction + // * OptimizeUniversalStyleCompaction -- optimizes universal style compaction + // Universal style compaction is focused on reducing Write Amplification + // Factor for big data sets, but increases Space Amplification. You can learn + // more about the different styles here: + // https://github.com/facebook/rocksdb/wiki/Rocksdb-Architecture-Guide + // Make sure to also call IncreaseParallelism(), which will provide the + // biggest performance gains. + // Note: we might use more memory than memtable_memory_budget during high + // write rate period + // + // OptimizeUniversalStyleCompaction is not supported in ROCKSDB_LITE + ColumnFamilyOptions* OptimizeLevelStyleCompaction( + uint64_t memtable_memory_budget = 512 * 1024 * 1024); + ColumnFamilyOptions* OptimizeUniversalStyleCompaction( + uint64_t memtable_memory_budget = 512 * 1024 * 1024); + + // ------------------- + // Parameters that affect behavior + + // Comparator used to define the order of keys in the table. + // Default: a comparator that uses lexicographic byte-wise ordering + // + // REQUIRES: The client must ensure that the comparator supplied + // here has the same name and orders keys *exactly* the same as the + // comparator provided to previous open calls on the same DB. + const Comparator* comparator; + + // REQUIRES: The client must provide a merge operator if Merge operation + // needs to be accessed. Calling Merge on a DB without a merge operator + // would result in Status::NotSupported. The client must ensure that the + // merge operator supplied here has the same name and *exactly* the same + // semantics as the merge operator provided to previous open calls on + // the same DB. The only exception is reserved for upgrade, where a DB + // previously without a merge operator is introduced to Merge operation + // for the first time. It's necessary to specify a merge operator when + // openning the DB in this case. + // Default: nullptr + std::shared_ptr merge_operator; + + // A single CompactionFilter instance to call into during compaction. + // Allows an application to modify/delete a key-value during background + // compaction. + // + // If the client requires a new compaction filter to be used for different + // compaction runs, it can specify compaction_filter_factory instead of this + // option. The client should specify only one of the two. + // compaction_filter takes precedence over compaction_filter_factory if + // client specifies both. + // + // If multithreaded compaction is being used, the supplied CompactionFilter + // instance may be used from different threads concurrently and so should be + // thread-safe. + // + // Default: nullptr + const CompactionFilter* compaction_filter; + + // This is a factory that provides compaction filter objects which allow + // an application to modify/delete a key-value during background compaction. + // + // A new filter will be created on each compaction run. If multithreaded + // compaction is being used, each created CompactionFilter will only be used + // from a single thread and so does not need to be thread-safe. + // + // Default: nullptr + std::shared_ptr compaction_filter_factory; + + // ------------------- + // Parameters that affect performance + + // Amount of data to build up in memory (backed by an unsorted log + // on disk) before converting to a sorted on-disk file. + // + // Larger values increase performance, especially during bulk loads. + // Up to max_write_buffer_number write buffers may be held in memory + // at the same time, + // so you may wish to adjust this parameter to control memory usage. + // Also, a larger write buffer will result in a longer recovery time + // the next time the database is opened. + // + // Note that write_buffer_size is enforced per column family. + // See db_write_buffer_size for sharing memory across column families. + // + // Default: 64MB + // + // Dynamically changeable through SetOptions() API + size_t write_buffer_size; + + // The maximum number of write buffers that are built up in memory. + // The default and the minimum number is 2, so that when 1 write buffer + // is being flushed to storage, new writes can continue to the other + // write buffer. + // If max_write_buffer_number > 3, writing will be slowed down to + // options.delayed_write_rate if we are writing to the last write buffer + // allowed. + // + // Default: 2 + // + // Dynamically changeable through SetOptions() API + int max_write_buffer_number; + + // The minimum number of write buffers that will be merged together + // before writing to storage. If set to 1, then + // all write buffers are flushed to L0 as individual files and this increases + // read amplification because a get request has to check in all of these + // files. Also, an in-memory merge may result in writing lesser + // data to storage if there are duplicate records in each of these + // individual write buffers. Default: 1 + int min_write_buffer_number_to_merge; + + // The total maximum number of write buffers to maintain in memory including + // copies of buffers that have already been flushed. Unlike + // max_write_buffer_number, this parameter does not affect flushing. + // This controls the minimum amount of write history that will be available + // in memory for conflict checking when Transactions are used. + // + // When using an OptimisticTransactionDB: + // If this value is too low, some transactions may fail at commit time due + // to not being able to determine whether there were any write conflicts. + // + // When using a TransactionDB: + // If Transaction::SetSnapshot is used, TransactionDB will read either + // in-memory write buffers or SST files to do write-conflict checking. + // Increasing this value can reduce the number of reads to SST files + // done for conflict detection. + // + // Setting this value to 0 will cause write buffers to be freed immediately + // after they are flushed. + // If this value is set to -1, 'max_write_buffer_number' will be used. + // + // Default: + // If using a TransactionDB/OptimisticTransactionDB, the default value will + // be set to the value of 'max_write_buffer_number' if it is not explicitly + // set by the user. Otherwise, the default is 0. + int max_write_buffer_number_to_maintain; + + // Compress blocks using the specified compression algorithm. This + // parameter can be changed dynamically. + // + // Default: kSnappyCompression, if it's supported. If snappy is not linked + // with the library, the default is kNoCompression. + // + // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz: + // ~200-500MB/s compression + // ~400-800MB/s decompression + // Note that these speeds are significantly faster than most + // persistent storage speeds, and therefore it is typically never + // worth switching to kNoCompression. Even if the input data is + // incompressible, the kSnappyCompression implementation will + // efficiently detect that and will switch to uncompressed mode. + CompressionType compression; + + // Different levels can have different compression policies. There + // are cases where most lower levels would like to use quick compression + // algorithms while the higher levels (which have more data) use + // compression algorithms that have better compression but could + // be slower. This array, if non-empty, should have an entry for + // each level of the database; these override the value specified in + // the previous field 'compression'. + // + // NOTICE if level_compaction_dynamic_level_bytes=true, + // compression_per_level[0] still determines L0, but other elements + // of the array are based on base level (the level L0 files are merged + // to), and may not match the level users see from info log for metadata. + // If L0 files are merged to level-n, then, for i>0, compression_per_level[i] + // determines compaction type for level n+i-1. + // For example, if we have three 5 levels, and we determine to merge L0 + // data to L4 (which means L1..L3 will be empty), then the new files go to + // L4 uses compression type compression_per_level[1]. + // If now L0 is merged to L2. Data goes to L2 will be compressed + // according to compression_per_level[1], L3 using compression_per_level[2] + // and L4 using compression_per_level[3]. Compaction for each level can + // change when data grows. + std::vector compression_per_level; + + // Compression algorithm that will be used for the bottommost level that + // contain files. If level-compaction is used, this option will only affect + // levels after base level. + // + // Default: kDisableCompressionOption (Disabled) + CompressionType bottommost_compression; + + // different options for compression algorithms + CompressionOptions compression_opts; + + // If non-nullptr, use the specified function to determine the + // prefixes for keys. These prefixes will be placed in the filter. + // Depending on the workload, this can reduce the number of read-IOP + // cost for scans when a prefix is passed via ReadOptions to + // db.NewIterator(). For prefix filtering to work properly, + // "prefix_extractor" and "comparator" must be such that the following + // properties hold: + // + // 1) key.starts_with(prefix(key)) + // 2) Compare(prefix(key), key) <= 0. + // 3) If Compare(k1, k2) <= 0, then Compare(prefix(k1), prefix(k2)) <= 0 + // 4) prefix(prefix(key)) == prefix(key) + // + // Default: nullptr + std::shared_ptr prefix_extractor; + + // Number of levels for this database + int num_levels; + + // Number of files to trigger level-0 compaction. A value <0 means that + // level-0 compaction will not be triggered by number of files at all. + // + // Default: 4 + // + // Dynamically changeable through SetOptions() API + int level0_file_num_compaction_trigger; + + // Soft limit on number of level-0 files. We start slowing down writes at this + // point. A value <0 means that no writing slow down will be triggered by + // number of files in level-0. + // + // Dynamically changeable through SetOptions() API + int level0_slowdown_writes_trigger; + + // Maximum number of level-0 files. We stop writes at this point. + // + // Dynamically changeable through SetOptions() API + int level0_stop_writes_trigger; + + // This does not do anything anymore. Deprecated. + int max_mem_compaction_level; + + // Target file size for compaction. + // target_file_size_base is per-file size for level-1. + // Target file size for level L can be calculated by + // target_file_size_base * (target_file_size_multiplier ^ (L-1)) + // For example, if target_file_size_base is 2MB and + // target_file_size_multiplier is 10, then each file on level-1 will + // be 2MB, and each file on level 2 will be 20MB, + // and each file on level-3 will be 200MB. + // + // Default: 64MB. + // + // Dynamically changeable through SetOptions() API + uint64_t target_file_size_base; + + // By default target_file_size_multiplier is 1, which means + // by default files in different levels will have similar size. + // + // Dynamically changeable through SetOptions() API + int target_file_size_multiplier; + + // Control maximum total data size for a level. + // max_bytes_for_level_base is the max total for level-1. + // Maximum number of bytes for level L can be calculated as + // (max_bytes_for_level_base) * (max_bytes_for_level_multiplier ^ (L-1)) + // For example, if max_bytes_for_level_base is 200MB, and if + // max_bytes_for_level_multiplier is 10, total data size for level-1 + // will be 200MB, total file size for level-2 will be 2GB, + // and total file size for level-3 will be 20GB. + // + // Default: 256MB. + // + // Dynamically changeable through SetOptions() API + uint64_t max_bytes_for_level_base; + + // If true, RocksDB will pick target size of each level dynamically. + // We will pick a base level b >= 1. L0 will be directly merged into level b, + // instead of always into level 1. Level 1 to b-1 need to be empty. + // We try to pick b and its target size so that + // 1. target size is in the range of + // (max_bytes_for_level_base / max_bytes_for_level_multiplier, + // max_bytes_for_level_base] + // 2. target size of the last level (level num_levels-1) equals to extra size + // of the level. + // At the same time max_bytes_for_level_multiplier and + // max_bytes_for_level_multiplier_additional are still satisfied. + // + // With this option on, from an empty DB, we make last level the base level, + // which means merging L0 data into the last level, until it exceeds + // max_bytes_for_level_base. And then we make the second last level to be + // base level, to start to merge L0 data to second last level, with its + // target size to be 1/max_bytes_for_level_multiplier of the last level's + // extra size. After the data accumulates more so that we need to move the + // base level to the third last one, and so on. + // + // For example, assume max_bytes_for_level_multiplier=10, num_levels=6, + // and max_bytes_for_level_base=10MB. + // Target sizes of level 1 to 5 starts with: + // [- - - - 10MB] + // with base level is level. Target sizes of level 1 to 4 are not applicable + // because they will not be used. + // Until the size of Level 5 grows to more than 10MB, say 11MB, we make + // base target to level 4 and now the targets looks like: + // [- - - 1.1MB 11MB] + // While data are accumulated, size targets are tuned based on actual data + // of level 5. When level 5 has 50MB of data, the target is like: + // [- - - 5MB 50MB] + // Until level 5's actual size is more than 100MB, say 101MB. Now if we keep + // level 4 to be the base level, its target size needs to be 10.1MB, which + // doesn't satisfy the target size range. So now we make level 3 the target + // size and the target sizes of the levels look like: + // [- - 1.01MB 10.1MB 101MB] + // In the same way, while level 5 further grows, all levels' targets grow, + // like + // [- - 5MB 50MB 500MB] + // Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the + // base level and make levels' target sizes like this: + // [- 1.001MB 10.01MB 100.1MB 1001MB] + // and go on... + // + // By doing it, we give max_bytes_for_level_multiplier a priority against + // max_bytes_for_level_base, for a more predictable LSM tree shape. It is + // useful to limit worse case space amplification. + // + // max_bytes_for_level_multiplier_additional is ignored with this flag on. + // + // Turning this feature on or off for an existing DB can cause unexpected + // LSM tree structure so it's not recommended. + // + // NOTE: this option is experimental + // + // Default: false + bool level_compaction_dynamic_level_bytes; + + // Default: 10. + // + // Dynamically changeable through SetOptions() API + int max_bytes_for_level_multiplier; + + // Different max-size multipliers for different levels. + // These are multiplied by max_bytes_for_level_multiplier to arrive + // at the max-size of each level. + // + // Default: 1 + // + // Dynamically changeable through SetOptions() API + std::vector max_bytes_for_level_multiplier_additional; + + // Maximum number of bytes in all compacted files. We avoid expanding + // the lower level file set of a compaction if it would make the + // total compaction cover more than + // (expanded_compaction_factor * targetFileSizeLevel()) many bytes. + // + // Dynamically changeable through SetOptions() API + int expanded_compaction_factor; + + // Maximum number of bytes in all source files to be compacted in a + // single compaction run. We avoid picking too many files in the + // source level so that we do not exceed the total source bytes + // for compaction to exceed + // (source_compaction_factor * targetFileSizeLevel()) many bytes. + // Default:1, i.e. pick maxfilesize amount of data as the source of + // a compaction. + // + // Dynamically changeable through SetOptions() API + int source_compaction_factor; + + // Control maximum bytes of overlaps in grandparent (i.e., level+2) before we + // stop building a single file in a level->level+1 compaction. + // + // Dynamically changeable through SetOptions() API + int max_grandparent_overlap_factor; + + // DEPRECATED -- this options is no longer used + // Puts are delayed to options.delayed_write_rate when any level has a + // compaction score that exceeds soft_rate_limit. This is ignored when == 0.0. + // + // Default: 0 (disabled) + // + // Dynamically changeable through SetOptions() API + double soft_rate_limit; + + // DEPRECATED -- this options is no longer used + double hard_rate_limit; + + // All writes will be slowed down to at least delayed_write_rate if estimated + // bytes needed to be compaction exceed this threshold. + // + // Default: 64GB + uint64_t soft_pending_compaction_bytes_limit; + + // All writes are stopped if estimated bytes needed to be compaction exceed + // this threshold. + // + // Default: 256GB + uint64_t hard_pending_compaction_bytes_limit; + + // DEPRECATED -- this options is no longer used + unsigned int rate_limit_delay_max_milliseconds; + + // size of one block in arena memory allocation. + // If <= 0, a proper value is automatically calculated (usually 1/8 of + // writer_buffer_size, rounded up to a multiple of 4KB). + // + // There are two additional restriction of the The specified size: + // (1) size should be in the range of [4096, 2 << 30] and + // (2) be the multiple of the CPU word (which helps with the memory + // alignment). + // + // We'll automatically check and adjust the size number to make sure it + // conforms to the restrictions. + // + // Default: 0 + // + // Dynamically changeable through SetOptions() API + size_t arena_block_size; + + // Disable automatic compactions. Manual compactions can still + // be issued on this column family + // + // Dynamically changeable through SetOptions() API + bool disable_auto_compactions; + + // DEPREACTED + // Does not have any effect. + bool purge_redundant_kvs_while_flush; + + // The compaction style. Default: kCompactionStyleLevel + CompactionStyle compaction_style; + + // If level compaction_style = kCompactionStyleLevel, for each level, + // which files are prioritized to be picked to compact. + // Default: kByCompensatedSize + CompactionPri compaction_pri; + + // If true, compaction will verify checksum on every read that happens + // as part of compaction + // + // Default: true + // + // Dynamically changeable through SetOptions() API + bool verify_checksums_in_compaction; + + // The options needed to support Universal Style compactions + CompactionOptionsUniversal compaction_options_universal; + + // The options for FIFO compaction style + CompactionOptionsFIFO compaction_options_fifo; + + // An iteration->Next() sequentially skips over keys with the same + // user-key unless this option is set. This number specifies the number + // of keys (with the same userkey) that will be sequentially + // skipped before a reseek is issued. + // + // Default: 8 + // + // Dynamically changeable through SetOptions() API + uint64_t max_sequential_skip_in_iterations; + + // This is a factory that provides MemTableRep objects. + // Default: a factory that provides a skip-list-based implementation of + // MemTableRep. + std::shared_ptr memtable_factory; + + // This is a factory that provides TableFactory objects. + // Default: a block-based table factory that provides a default + // implementation of TableBuilder and TableReader with default + // BlockBasedTableOptions. + std::shared_ptr table_factory; + + // Block-based table related options are moved to BlockBasedTableOptions. + // Related options that were originally here but now moved include: + // no_block_cache + // block_cache + // block_cache_compressed + // block_size + // block_size_deviation + // block_restart_interval + // filter_policy + // whole_key_filtering + // If you'd like to customize some of these options, you will need to + // use NewBlockBasedTableFactory() to construct a new table factory. + + // This option allows user to collect their own interested statistics of + // the tables. + // Default: empty vector -- no user-defined statistics collection will be + // performed. + typedef std::vector> + TablePropertiesCollectorFactories; + TablePropertiesCollectorFactories table_properties_collector_factories; + + // Allows thread-safe inplace updates. If this is true, there is no way to + // achieve point-in-time consistency using snapshot or iterator (assuming + // concurrent updates). Hence iterator and multi-get will return results + // which are not consistent as of any point-in-time. + // If inplace_callback function is not set, + // Put(key, new_value) will update inplace the existing_value iff + // * key exists in current memtable + // * new sizeof(new_value) <= sizeof(existing_value) + // * existing_value for that key is a put i.e. kTypeValue + // If inplace_callback function is set, check doc for inplace_callback. + // Default: false. + bool inplace_update_support; + + // Number of locks used for inplace update + // Default: 10000, if inplace_update_support = true, else 0. + // + // Dynamically changeable through SetOptions() API + size_t inplace_update_num_locks; + + // existing_value - pointer to previous value (from both memtable and sst). + // nullptr if key doesn't exist + // existing_value_size - pointer to size of existing_value). + // nullptr if key doesn't exist + // delta_value - Delta value to be merged with the existing_value. + // Stored in transaction logs. + // merged_value - Set when delta is applied on the previous value. + + // Applicable only when inplace_update_support is true, + // this callback function is called at the time of updating the memtable + // as part of a Put operation, lets say Put(key, delta_value). It allows the + // 'delta_value' specified as part of the Put operation to be merged with + // an 'existing_value' of the key in the database. + + // If the merged value is smaller in size that the 'existing_value', + // then this function can update the 'existing_value' buffer inplace and + // the corresponding 'existing_value'_size pointer, if it wishes to. + // The callback should return UpdateStatus::UPDATED_INPLACE. + // In this case. (In this case, the snapshot-semantics of the rocksdb + // Iterator is not atomic anymore). + + // If the merged value is larger in size than the 'existing_value' or the + // application does not wish to modify the 'existing_value' buffer inplace, + // then the merged value should be returned via *merge_value. It is set by + // merging the 'existing_value' and the Put 'delta_value'. The callback should + // return UpdateStatus::UPDATED in this case. This merged value will be added + // to the memtable. + + // If merging fails or the application does not wish to take any action, + // then the callback should return UpdateStatus::UPDATE_FAILED. + + // Please remember that the original call from the application is Put(key, + // delta_value). So the transaction log (if enabled) will still contain (key, + // delta_value). The 'merged_value' is not stored in the transaction log. + // Hence the inplace_callback function should be consistent across db reopens. + + // Default: nullptr + UpdateStatus (*inplace_callback)(char* existing_value, + uint32_t* existing_value_size, + Slice delta_value, + std::string* merged_value); + + // if prefix_extractor is set and memtable_prefix_bloom_size_ratio is not 0, + // create prefix bloom for memtable with the size of + // write_buffer_size * memtable_prefix_bloom_size_ratio. + // If it is larger than 0.25, it is santinized to 0.25. + // + // Default: 0 (disable) + // + // Dynamically changeable through SetOptions() API + double memtable_prefix_bloom_size_ratio; + + // Page size for huge page for the arena used by the memtable. If <=0, it + // won't allocate from huge page but from malloc. + // Users are responsible to reserve huge pages for it to be allocated. For + // example: + // sysctl -w vm.nr_hugepages=20 + // See linux doc Documentation/vm/hugetlbpage.txt + // If there isn't enough free huge page available, it will fall back to + // malloc. + // + // Dynamically changeable through SetOptions() API + size_t memtable_huge_page_size; + + // Control locality of bloom filter probes to improve cache miss rate. + // This option only applies to memtable prefix bloom and plaintable + // prefix bloom. It essentially limits every bloom checking to one cache line. + // This optimization is turned off when set to 0, and positive number to turn + // it on. + // Default: 0 + uint32_t bloom_locality; + + // Maximum number of successive merge operations on a key in the memtable. + // + // When a merge operation is added to the memtable and the maximum number of + // successive merges is reached, the value of the key will be calculated and + // inserted into the memtable instead of the merge operation. This will + // ensure that there are never more than max_successive_merges merge + // operations in the memtable. + // + // Default: 0 (disabled) + // + // Dynamically changeable through SetOptions() API + size_t max_successive_merges; + + // The number of partial merge operands to accumulate before partial + // merge will be performed. Partial merge will not be called + // if the list of values to merge is less than min_partial_merge_operands. + // + // If min_partial_merge_operands < 2, then it will be treated as 2. + // + // Default: 2 + uint32_t min_partial_merge_operands; + + // This flag specifies that the implementation should optimize the filters + // mainly for cases where keys are found rather than also optimize for keys + // missed. This would be used in cases where the application knows that + // there are very few misses or the performance in the case of misses is not + // important. + // + // For now, this flag allows us to not store filters for the last level i.e + // the largest level which contains data of the LSM store. For keys which + // are hits, the filters in this level are not useful because we will search + // for the data anyway. NOTE: the filters in other levels are still useful + // even for key hit because they tell us whether to look in that level or go + // to the higher level. + // + // Default: false + bool optimize_filters_for_hits; + + // After writing every SST file, reopen it and read all the keys. + // Default: false + bool paranoid_file_checks; + + // Measure IO stats in compactions and flushes, if true. + // Default: false + bool report_bg_io_stats; + + // Create ColumnFamilyOptions with default values for all fields + ColumnFamilyOptions(); + // Create ColumnFamilyOptions from Options + explicit ColumnFamilyOptions(const Options& options); + + void Dump(Logger* log) const; +}; + +struct DBOptions { + // The function recovers options to the option as in version 4.6. + DBOptions* OldDefaults(int rocksdb_major_version = 4, + int rocksdb_minor_version = 6); + + // Some functions that make it easier to optimize RocksDB + + // Use this if your DB is very small (like under 1GB) and you don't want to + // spend lots of memory for memtables. + DBOptions* OptimizeForSmallDb(); + +#ifndef ROCKSDB_LITE + // By default, RocksDB uses only one background thread for flush and + // compaction. Calling this function will set it up such that total of + // `total_threads` is used. Good value for `total_threads` is the number of + // cores. You almost definitely want to call this function if your system is + // bottlenecked by RocksDB. + DBOptions* IncreaseParallelism(int total_threads = 16); +#endif // ROCKSDB_LITE + + // If true, the database will be created if it is missing. + // Default: false + bool create_if_missing; + + // If true, missing column families will be automatically created. + // Default: false + bool create_missing_column_families; + + // If true, an error is raised if the database already exists. + // Default: false + bool error_if_exists; + + // If true, RocksDB will aggressively check consistency of the data. + // Also, if any of the writes to the database fails (Put, Delete, Merge, + // Write), the database will switch to read-only mode and fail all other + // Write operations. + // In most cases you want this to be set to true. + // Default: true + bool paranoid_checks; + + // Use the specified object to interact with the environment, + // e.g. to read/write files, schedule background work, etc. + // Default: Env::Default() + Env* env; + + // Use to control write rate of flush and compaction. Flush has higher + // priority than compaction. Rate limiting is disabled if nullptr. + // If rate limiter is enabled, bytes_per_sync is set to 1MB by default. + // Default: nullptr + std::shared_ptr rate_limiter; + + // Use to track SST files and control their file deletion rate. + // + // Features: + // - Throttle the deletion rate of the SST files. + // - Keep track the total size of all SST files. + // - Set a maximum allowed space limit for SST files that when reached + // the DB wont do any further flushes or compactions and will set the + // background error. + // - Can be shared between multiple dbs. + // Limitations: + // - Only track and throttle deletes of SST files in + // first db_path (db_name if db_paths is empty). + // + // Default: nullptr + std::shared_ptr sst_file_manager; + + // Any internal progress/error information generated by the db will + // be written to info_log if it is non-nullptr, or to a file stored + // in the same directory as the DB contents if info_log is nullptr. + // Default: nullptr + std::shared_ptr info_log; + + InfoLogLevel info_log_level; + + // Number of open files that can be used by the DB. You may need to + // increase this if your database has a large working set. Value -1 means + // files opened are always kept open. You can estimate number of files based + // on target_file_size_base and target_file_size_multiplier for level-based + // compaction. For universal-style compaction, you can usually set it to -1. + // Default: -1 + int max_open_files; + + // If max_open_files is -1, DB will open all files on DB::Open(). You can + // use this option to increase the number of threads used to open the files. + // Default: 16 + int max_file_opening_threads; + + // Once write-ahead logs exceed this size, we will start forcing the flush of + // column families whose memtables are backed by the oldest live WAL file + // (i.e. the ones that are causing all the space amplification). If set to 0 + // (default), we will dynamically choose the WAL size limit to be + // [sum of all write_buffer_size * max_write_buffer_number] * 4 + // Default: 0 + uint64_t max_total_wal_size; + + // If non-null, then we should collect metrics about database operations + // Statistics objects should not be shared between DB instances as + // it does not use any locks to prevent concurrent updates. + std::shared_ptr statistics; + + // If true, then the contents of manifest and data files are not synced + // to stable storage. Their contents remain in the OS buffers till the + // OS decides to flush them. This option is good for bulk-loading + // of data. Once the bulk-loading is complete, please issue a + // sync to the OS to flush all dirty buffesrs to stable storage. + // Default: false + bool disableDataSync; + + // If true, then every store to stable storage will issue a fsync. + // If false, then every store to stable storage will issue a fdatasync. + // This parameter should be set to true while storing data to + // filesystem like ext3 that can lose files after a reboot. + // Default: false + bool use_fsync; + + // A list of paths where SST files can be put into, with its target size. + // Newer data is placed into paths specified earlier in the vector while + // older data gradually moves to paths specified later in the vector. + // + // For example, you have a flash device with 10GB allocated for the DB, + // as well as a hard drive of 2TB, you should config it to be: + // [{"/flash_path", 10GB}, {"/hard_drive", 2TB}] + // + // The system will try to guarantee data under each path is close to but + // not larger than the target size. But current and future file sizes used + // by determining where to place a file are based on best-effort estimation, + // which means there is a chance that the actual size under the directory + // is slightly more than target size under some workloads. User should give + // some buffer room for those cases. + // + // If none of the paths has sufficient room to place a file, the file will + // be placed to the last path anyway, despite to the target size. + // + // Placing newer data to earlier paths is also best-efforts. User should + // expect user files to be placed in higher levels in some extreme cases. + // + // If left empty, only one path will be used, which is db_name passed when + // opening the DB. + // Default: empty + std::vector db_paths; + + // This specifies the info LOG dir. + // If it is empty, the log files will be in the same dir as data. + // If it is non empty, the log files will be in the specified dir, + // and the db data dir's absolute path will be used as the log file + // name's prefix. + std::string db_log_dir; + + // This specifies the absolute dir path for write-ahead logs (WAL). + // If it is empty, the log files will be in the same dir as data, + // dbname is used as the data dir by default + // If it is non empty, the log files will be in kept the specified dir. + // When destroying the db, + // all log files in wal_dir and the dir itself is deleted + std::string wal_dir; + + // The periodicity when obsolete files get deleted. The default + // value is 6 hours. The files that get out of scope by compaction + // process will still get automatically delete on every compaction, + // regardless of this setting + uint64_t delete_obsolete_files_period_micros; + + // Suggested number of concurrent background compaction jobs, submitted to + // the default LOW priority thread pool. + // + // Default: 1 + int base_background_compactions; + + // Maximum number of concurrent background compaction jobs, submitted to + // the default LOW priority thread pool. + // We first try to schedule compactions based on + // `base_background_compactions`. If the compaction cannot catch up , we + // will increase number of compaction threads up to + // `max_background_compactions`. + // + // If you're increasing this, also consider increasing number of threads in + // LOW priority thread pool. For more information, see + // Env::SetBackgroundThreads + // Default: 1 + int max_background_compactions; + + // This value represents the maximum number of threads that will + // concurrently perform a compaction job by breaking it into multiple, + // smaller ones that are run simultaneously. + // Default: 1 (i.e. no subcompactions) + uint32_t max_subcompactions; + + // Maximum number of concurrent background memtable flush jobs, submitted to + // the HIGH priority thread pool. + // + // By default, all background jobs (major compaction and memtable flush) go + // to the LOW priority pool. If this option is set to a positive number, + // memtable flush jobs will be submitted to the HIGH priority pool. + // It is important when the same Env is shared by multiple db instances. + // Without a separate pool, long running major compaction jobs could + // potentially block memtable flush jobs of other db instances, leading to + // unnecessary Put stalls. + // + // If you're increasing this, also consider increasing number of threads in + // HIGH priority thread pool. For more information, see + // Env::SetBackgroundThreads + // Default: 1 + int max_background_flushes; + + // Specify the maximal size of the info log file. If the log file + // is larger than `max_log_file_size`, a new info log file will + // be created. + // If max_log_file_size == 0, all logs will be written to one + // log file. + size_t max_log_file_size; + + // Time for the info log file to roll (in seconds). + // If specified with non-zero value, log file will be rolled + // if it has been active longer than `log_file_time_to_roll`. + // Default: 0 (disabled) + size_t log_file_time_to_roll; + + // Maximal info log files to be kept. + // Default: 1000 + size_t keep_log_file_num; + + // Recycle log files. + // If non-zero, we will reuse previously written log files for new + // logs, overwriting the old data. The value indicates how many + // such files we will keep around at any point in time for later + // use. This is more efficient because the blocks are already + // allocated and fdatasync does not need to update the inode after + // each write. + // Default: 0 + size_t recycle_log_file_num; + + // manifest file is rolled over on reaching this limit. + // The older manifest file be deleted. + // The default value is MAX_INT so that roll-over does not take place. + uint64_t max_manifest_file_size; + + // Number of shards used for table cache. + int table_cache_numshardbits; + + // DEPRECATED + // int table_cache_remove_scan_count_limit; + + // The following two fields affect how archived logs will be deleted. + // 1. If both set to 0, logs will be deleted asap and will not get into + // the archive. + // 2. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + // WAL files will be checked every 10 min and if total size is greater + // then WAL_size_limit_MB, they will be deleted starting with the + // earliest until size_limit is met. All empty files will be deleted. + // 3. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + // WAL files will be checked every WAL_ttl_secondsi / 2 and those that + // are older than WAL_ttl_seconds will be deleted. + // 4. If both are not 0, WAL files will be checked every 10 min and both + // checks will be performed with ttl being first. + uint64_t WAL_ttl_seconds; + uint64_t WAL_size_limit_MB; + + // Number of bytes to preallocate (via fallocate) the manifest + // files. Default is 4mb, which is reasonable to reduce random IO + // as well as prevent overallocation for mounts that preallocate + // large amounts of data (such as xfs's allocsize option). + size_t manifest_preallocation_size; + + // Hint the OS that it should not buffer disk I/O. Enabling this + // parameter may improve performance but increases pressure on the + // system cache. + // + // The exact behavior of this parameter is platform dependent. + // + // On POSIX systems, after RocksDB reads data from disk it will + // mark the pages as "unneeded". The operating system may - or may not + // - evict these pages from memory, reducing pressure on the system + // cache. If the disk block is requested again this can result in + // additional disk I/O. + // + // On WINDOWS system, files will be opened in "unbuffered I/O" mode + // which means that data read from the disk will not be cached or + // bufferized. The hardware buffer of the devices may however still + // be used. Memory mapped files are not impacted by this parameter. + // + // Default: true + bool allow_os_buffer; + + // Allow the OS to mmap file for reading sst tables. Default: false + bool allow_mmap_reads; + + // Allow the OS to mmap file for writing. + // DB::SyncWAL() only works if this is set to false. + // Default: false + bool allow_mmap_writes; + + // If false, fallocate() calls are bypassed + bool allow_fallocate; + + // Disable child process inherit open files. Default: true + bool is_fd_close_on_exec; + + // DEPRECATED -- this options is no longer used + bool skip_log_error_on_recovery; + + // if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + // Default: 600 (10 min) + unsigned int stats_dump_period_sec; + + // If set true, will hint the underlying file system that the file + // access pattern is random, when a sst file is opened. + // Default: true + bool advise_random_on_open; + + // Amount of data to build up in memtables across all column + // families before writing to disk. + // + // This is distinct from write_buffer_size, which enforces a limit + // for a single memtable. + // + // This feature is disabled by default. Specify a non-zero value + // to enable it. + // + // Default: 0 (disabled) + size_t db_write_buffer_size; + + // The memory usage of memtable will report to this object. The same object + // can be passed into multiple DBs and it will track the sum of size of all + // the DBs. If the total size of all live memtables of all the DBs exceeds + // a limit, a flush will be triggered in the next DB to which the next write + // is issued. + // + // If the object is only passed to on DB, the behavior is the same as + // db_write_buffer_size. When write_buffer_manager is set, the value set will + // override db_write_buffer_size. + // + // This feature is disabled by default. Specify a non-zero value + // to enable it. + // + // Default: null + std::shared_ptr write_buffer_manager; + + // Specify the file access pattern once a compaction is started. + // It will be applied to all input files of a compaction. + // Default: NORMAL + enum AccessHint { + NONE, + NORMAL, + SEQUENTIAL, + WILLNEED + }; + AccessHint access_hint_on_compaction_start; + + // If true, always create a new file descriptor and new table reader + // for compaction inputs. Turn this parameter on may introduce extra + // memory usage in the table reader, if it allocates extra memory + // for indexes. This will allow file descriptor prefetch options + // to be set for compaction input files and not to impact file + // descriptors for the same file used by user queries. + // Suggest to enable BlockBasedTableOptions.cache_index_and_filter_blocks + // for this mode if using block-based table. + // + // Default: false + bool new_table_reader_for_compaction_inputs; + + // If non-zero, we perform bigger reads when doing compaction. If you're + // running RocksDB on spinning disks, you should set this to at least 2MB. + // That way RocksDB's compaction is doing sequential instead of random reads. + // + // When non-zero, we also force new_table_reader_for_compaction_inputs to + // true. + // + // Default: 0 + size_t compaction_readahead_size; + + // This is a maximum buffer size that is used by WinMmapReadableFile in + // unbuffered disk I/O mode. We need to maintain an aligned buffer for + // reads. We allow the buffer to grow until the specified value and then + // for bigger requests allocate one shot buffers. In unbuffered mode we + // always bypass read-ahead buffer at ReadaheadRandomAccessFile + // When read-ahead is required we then make use of compaction_readahead_size + // value and always try to read ahead. With read-ahead we always + // pre-allocate buffer to the size instead of growing it up to a limit. + // + // This option is currently honored only on Windows + // + // Default: 1 Mb + // + // Special value: 0 - means do not maintain per instance buffer. Allocate + // per request buffer and avoid locking. + size_t random_access_max_buffer_size; + + // This is the maximum buffer size that is used by WritableFileWriter. + // On Windows, we need to maintain an aligned buffer for writes. + // We allow the buffer to grow until it's size hits the limit. + // + // Default: 1024 * 1024 (1 MB) + size_t writable_file_max_buffer_size; + + + // Use adaptive mutex, which spins in the user space before resorting + // to kernel. This could reduce context switch when the mutex is not + // heavily contended. However, if the mutex is hot, we could end up + // wasting spin time. + // Default: false + bool use_adaptive_mutex; + + // Create DBOptions with default values for all fields + DBOptions(); + // Create DBOptions from Options + explicit DBOptions(const Options& options); + + void Dump(Logger* log) const; + + // Allows OS to incrementally sync files to disk while they are being + // written, asynchronously, in the background. This operation can be used + // to smooth out write I/Os over time. Users shouldn't reply on it for + // persistency guarantee. + // Issue one request for every bytes_per_sync written. 0 turns it off. + // Default: 0 + // + // You may consider using rate_limiter to regulate write rate to device. + // When rate limiter is enabled, it automatically enables bytes_per_sync + // to 1MB. + // + // This option applies to table files + uint64_t bytes_per_sync; + + // Same as bytes_per_sync, but applies to WAL files + // Default: 0, turned off + uint64_t wal_bytes_per_sync; + + // A vector of EventListeners which call-back functions will be called + // when specific RocksDB event happens. + std::vector> listeners; + + // If true, then the status of the threads involved in this DB will + // be tracked and available via GetThreadList() API. + // + // Default: false + bool enable_thread_tracking; + + // The limited write rate to DB if soft_pending_compaction_bytes_limit or + // level0_slowdown_writes_trigger is triggered, or we are writing to the + // last mem table allowed and we allow more than 3 mem tables. It is + // calculated using size of user write requests before compression. + // RocksDB may decide to slow down more if the compaction still + // gets behind further. + // Unit: byte per second. + // + // Default: 2MB/s + uint64_t delayed_write_rate; + + // If true, allow multi-writers to update mem tables in parallel. + // Only some memtable_factory-s support concurrent writes; currently it + // is implemented only for SkipListFactory. Concurrent memtable writes + // are not compatible with inplace_update_support or filter_deletes. + // It is strongly recommended to set enable_write_thread_adaptive_yield + // if you are going to use this feature. + // + // Default: false + bool allow_concurrent_memtable_write; + + // If true, threads synchronizing with the write batch group leader will + // wait for up to write_thread_max_yield_usec before blocking on a mutex. + // This can substantially improve throughput for concurrent workloads, + // regardless of whether allow_concurrent_memtable_write is enabled. + // + // Default: false + bool enable_write_thread_adaptive_yield; + + // The maximum number of microseconds that a write operation will use + // a yielding spin loop to coordinate with other write threads before + // blocking on a mutex. (Assuming write_thread_slow_yield_usec is + // set properly) increasing this value is likely to increase RocksDB + // throughput at the expense of increased CPU usage. + // + // Default: 100 + uint64_t write_thread_max_yield_usec; + + // The latency in microseconds after which a std::this_thread::yield + // call (sched_yield on Linux) is considered to be a signal that + // other processes or threads would like to use the current core. + // Increasing this makes writer threads more likely to take CPU + // by spinning, which will show up as an increase in the number of + // involuntary context switches. + // + // Default: 3 + uint64_t write_thread_slow_yield_usec; + + // If true, then DB::Open() will not update the statistics used to optimize + // compaction decision by loading table properties from many files. + // Turning off this feature will improve DBOpen time especially in + // disk environment. + // + // Default: false + bool skip_stats_update_on_db_open; + + // Recovery mode to control the consistency while replaying WAL + // Default: kPointInTimeRecovery + WALRecoveryMode wal_recovery_mode; + + // if set to false then recovery will fail when a prepared + // transaction is encountered in the WAL + bool allow_2pc = false; + + // A global cache for table-level rows. + // Default: nullptr (disabled) + // Not supported in ROCKSDB_LITE mode! + std::shared_ptr row_cache; + +#ifndef ROCKSDB_LITE + // A filter object supplied to be invoked while processing write-ahead-logs + // (WALs) during recovery. The filter provides a way to inspect log + // records, ignoring a particular record or skipping replay. + // The filter is invoked at startup and is invoked from a single-thread + // currently. + WalFilter* wal_filter; +#endif // ROCKSDB_LITE + + // If true, then DB::Open / CreateColumnFamily / DropColumnFamily + // / SetOptions will fail if options file is not detected or properly + // persisted. + // + // DEFAULT: false + bool fail_if_options_file_error; + + // If true, then print malloc stats together with rocksdb.stats + // when printing to LOG. + // DEFAULT: false + bool dump_malloc_stats; + + // By default RocksDB replay WAL logs and flush them on DB open, which may + // create very small SST files. If this option is enabled, RocksDB will try + // to avoid (but not guarantee not to) flush during recovery. Also, existing + // WAL logs will be kept, so that if crash happened before flush, we still + // have logs to recover from. + // + // DEFAULT: false + bool avoid_flush_during_recovery; +}; + +// Options to control the behavior of a database (passed to DB::Open) +struct Options : public DBOptions, public ColumnFamilyOptions { + // Create an Options object with default values for all fields. + Options() : DBOptions(), ColumnFamilyOptions() {} + + Options(const DBOptions& db_options, + const ColumnFamilyOptions& column_family_options) + : DBOptions(db_options), ColumnFamilyOptions(column_family_options) {} + + // The function recovers options to the option as in version 4.6. + Options* OldDefaults(int rocksdb_major_version = 4, + int rocksdb_minor_version = 6); + + void Dump(Logger* log) const; + + void DumpCFOptions(Logger* log) const; + + // Some functions that make it easier to optimize RocksDB + + // Set appropriate parameters for bulk loading. + // The reason that this is a function that returns "this" instead of a + // constructor is to enable chaining of multiple similar calls in the future. + // + + // All data will be in level 0 without any automatic compaction. + // It's recommended to manually call CompactRange(NULL, NULL) before reading + // from the database, because otherwise the read can be very slow. + Options* PrepareForBulkLoad(); + + // Use this if your DB is very small (like under 1GB) and you don't want to + // spend lots of memory for memtables. + Options* OptimizeForSmallDb(); +}; + +// +// An application can issue a read request (via Get/Iterators) and specify +// if that read should process data that ALREADY resides on a specified cache +// level. For example, if an application specifies kBlockCacheTier then the +// Get call will process data that is already processed in the memtable or +// the block cache. It will not page in data from the OS cache or data that +// resides in storage. +enum ReadTier { + kReadAllTier = 0x0, // data in memtable, block cache, OS cache or storage + kBlockCacheTier = 0x1, // data in memtable or block cache + kPersistedTier = 0x2 // persisted data. When WAL is disabled, this option + // will skip data in memtable. + // Note that this ReadTier currently only supports + // Get and MultiGet and does not support iterators. +}; + +// Options that control read operations +struct ReadOptions { + // If true, all data read from underlying storage will be + // verified against corresponding checksums. + // Default: true + bool verify_checksums; + + // Should the "data block"/"index block"/"filter block" read for this + // iteration be cached in memory? + // Callers may wish to set this field to false for bulk scans. + // Default: true + bool fill_cache; + + // If this option is set and memtable implementation allows, Seek + // might only return keys with the same prefix as the seek-key + // + // ! DEPRECATED: prefix_seek is on by default when prefix_extractor + // is configured + // bool prefix_seek; + + // If "snapshot" is non-nullptr, read as of the supplied snapshot + // (which must belong to the DB that is being read and which must + // not have been released). If "snapshot" is nullptr, use an impliicit + // snapshot of the state at the beginning of this read operation. + // Default: nullptr + const Snapshot* snapshot; + + // If "prefix" is non-nullptr, and ReadOptions is being passed to + // db.NewIterator, only return results when the key begins with this + // prefix. This field is ignored by other calls (e.g., Get). + // Options.prefix_extractor must also be set, and + // prefix_extractor.InRange(prefix) must be true. The iterator + // returned by NewIterator when this option is set will behave just + // as if the underlying store did not contain any non-matching keys, + // with two exceptions. Seek() only accepts keys starting with the + // prefix, and SeekToLast() is not supported. prefix filter with this + // option will sometimes reduce the number of read IOPs. + // Default: nullptr + // + // ! DEPRECATED + // const Slice* prefix; + + // "iterate_upper_bound" defines the extent upto which the forward iterator + // can returns entries. Once the bound is reached, Valid() will be false. + // "iterate_upper_bound" is exclusive ie the bound value is + // not a valid entry. If iterator_extractor is not null, the Seek target + // and iterator_upper_bound need to have the same prefix. + // This is because ordering is not guaranteed outside of prefix domain. + // There is no lower bound on the iterator. If needed, that can be easily + // implemented + // + // Default: nullptr + const Slice* iterate_upper_bound; + + // Specify if this read request should process data that ALREADY + // resides on a particular cache. If the required data is not + // found at the specified cache, then Status::Incomplete is returned. + // Default: kReadAllTier + ReadTier read_tier; + + // Specify to create a tailing iterator -- a special iterator that has a + // view of the complete database (i.e. it can also be used to read newly + // added data) and is optimized for sequential reads. It will return records + // that were inserted into the database after the creation of the iterator. + // Default: false + // Not supported in ROCKSDB_LITE mode! + bool tailing; + + // Specify to create a managed iterator -- a special iterator that + // uses less resources by having the ability to free its underlying + // resources on request. + // Default: false + // Not supported in ROCKSDB_LITE mode! + bool managed; + + // Enable a total order seek regardless of index format (e.g. hash index) + // used in the table. Some table format (e.g. plain table) may not support + // this option. + // If true when calling Get(), we also skip prefix bloom when reading from + // block based table. It provides a way to read exisiting data after + // changing implementation of prefix extractor. + bool total_order_seek; + + // Enforce that the iterator only iterates over the same prefix as the seek. + // This option is effective only for prefix seeks, i.e. prefix_extractor is + // non-null for the column family and total_order_seek is false. Unlike + // iterate_upper_bound, prefix_same_as_start only works within a prefix + // but in both directions. + // Default: false + bool prefix_same_as_start; + + // Keep the blocks loaded by the iterator pinned in memory as long as the + // iterator is not deleted, If used when reading from tables created with + // BlockBasedTableOptions::use_delta_encoding = false, + // Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to + // return 1. + // Default: false + bool pin_data; + + // If true, when PurgeObsoleteFile is called in CleanupIteratorState, we + // schedule a background job in the flush job queue and delete obsolete files + // in background. + // Default: false + bool background_purge_on_iterator_cleanup; + + // If non-zero, NewIterator will create a new table reader which + // performs reads of the given size. Using a large size (> 2MB) can + // improve the performance of forward iteration on spinning disks. + // Default: 0 + size_t readahead_size; + + ReadOptions(); + ReadOptions(bool cksum, bool cache); +}; + +// Options that control write operations +struct WriteOptions { + // If true, the write will be flushed from the operating system + // buffer cache (by calling WritableFile::Sync()) before the write + // is considered complete. If this flag is true, writes will be + // slower. + // + // If this flag is false, and the machine crashes, some recent + // writes may be lost. Note that if it is just the process that + // crashes (i.e., the machine does not reboot), no writes will be + // lost even if sync==false. + // + // In other words, a DB write with sync==false has similar + // crash semantics as the "write()" system call. A DB write + // with sync==true has similar crash semantics to a "write()" + // system call followed by "fdatasync()". + // + // Default: false + bool sync; + + // If true, writes will not first go to the write ahead log, + // and the write may got lost after a crash. + bool disableWAL; + + // The option is deprecated. It's not used anymore. + uint64_t timeout_hint_us; + + // If true and if user is trying to write to column families that don't exist + // (they were dropped), ignore the write (don't return an error). If there + // are multiple writes in a WriteBatch, other writes will succeed. + // Default: false + bool ignore_missing_column_families; + + WriteOptions() + : sync(false), + disableWAL(false), + timeout_hint_us(0), + ignore_missing_column_families(false) {} +}; + +// Options that control flush operations +struct FlushOptions { + // If true, the flush will wait until the flush is done. + // Default: true + bool wait; + + FlushOptions() : wait(true) {} +}; + +// Create a Logger from provided DBOptions +extern Status CreateLoggerFromOptions(const std::string& dbname, + const DBOptions& options, + std::shared_ptr* logger); + +// CompactionOptions are used in CompactFiles() call. +struct CompactionOptions { + // Compaction output compression type + // Default: snappy + CompressionType compression; + // Compaction will create files of size `output_file_size_limit`. + // Default: MAX, which means that compaction will create a single file + uint64_t output_file_size_limit; + + CompactionOptions() + : compression(kSnappyCompression), + output_file_size_limit(std::numeric_limits::max()) {} +}; + +// For level based compaction, we can configure if we want to skip/force +// bottommost level compaction. +enum class BottommostLevelCompaction { + // Skip bottommost level compaction + kSkip, + // Only compact bottommost level if there is a compaction filter + // This is the default option + kIfHaveCompactionFilter, + // Always compact bottommost level + kForce, +}; + +// CompactRangeOptions is used by CompactRange() call. +struct CompactRangeOptions { + // If true, no other compaction will run at the same time as this + // manual compaction + bool exclusive_manual_compaction = true; + // If true, compacted files will be moved to the minimum level capable + // of holding the data or given level (specified non-negative target_level). + bool change_level = false; + // If change_level is true and target_level have non-negative value, compacted + // files will be moved to target_level. + int target_level = -1; + // Compaction outputs will be placed in options.db_paths[target_path_id]. + // Behavior is undefined if target_path_id is out of range. + uint32_t target_path_id = 0; + // By default level based compaction will only compact the bottommost level + // if there is a compaction filter + BottommostLevelCompaction bottommost_level_compaction = + BottommostLevelCompaction::kIfHaveCompactionFilter; +}; + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_OPTIONS_H_ diff --git a/external/rocksdb/include/rocksdb/perf_context.h b/external/rocksdb/include/rocksdb/perf_context.h new file mode 100755 index 0000000000..1d73fecaa1 --- /dev/null +++ b/external/rocksdb/include/rocksdb/perf_context.h @@ -0,0 +1,128 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H +#define STORAGE_ROCKSDB_INCLUDE_PERF_CONTEXT_H + +#include +#include + +#include "rocksdb/perf_level.h" + +namespace rocksdb { + +// A thread local context for gathering performance counter efficiently +// and transparently. +// Use SetPerfLevel(PerfLevel::kEnableTime) to enable time stats. + +struct PerfContext { + + void Reset(); // reset all performance counters to zero + + std::string ToString(bool exclude_zero_counters = false) const; + + uint64_t user_key_comparison_count; // total number of user key comparisons + uint64_t block_cache_hit_count; // total number of block cache hits + uint64_t block_read_count; // total number of block reads (with IO) + uint64_t block_read_byte; // total number of bytes from block reads + uint64_t block_read_time; // total nanos spent on block reads + uint64_t block_checksum_time; // total nanos spent on block checksum + uint64_t block_decompress_time; // total nanos spent on block decompression + // total number of internal keys skipped over during iteration. + // There are several reasons for it: + // 1. when calling Next(), the iterator is in the position of the previous + // key, so that we'll need to skip it. It means this counter will always + // be incremented in Next(). + // 2. when calling Next(), we need to skip internal entries for the previous + // keys that are overwritten. + // 3. when calling Next(), Seek() or SeekToFirst(), after previous key + // before calling Next(), the seek key in Seek() or the beginning for + // SeekToFirst(), there may be one or more deleted keys before the next + // valid key that the operation should place the iterator to. We need + // to skip both of the tombstone and updates hidden by the tombstones. The + // tombstones are not included in this counter, while previous updates + // hidden by the tombstones will be included here. + // 4. symmetric cases for Prev() and SeekToLast() + // We sometimes also skip entries of more recent updates than the snapshot + // we read from, but they are not included in this counter. + // + uint64_t internal_key_skipped_count; + // Total number of deletes and single deletes skipped over during iteration + // When calling Next(), Seek() or SeekToFirst(), after previous position + // before calling Next(), the seek key in Seek() or the beginning for + // SeekToFirst(), there may be one or more deleted keys before the next valid + // key. Every deleted key is counted once. We don't recount here if there are + // still older updates invalidated by the tombstones. + // + uint64_t internal_delete_skipped_count; + + uint64_t get_snapshot_time; // total nanos spent on getting snapshot + uint64_t get_from_memtable_time; // total nanos spent on querying memtables + uint64_t get_from_memtable_count; // number of mem tables queried + // total nanos spent after Get() finds a key + uint64_t get_post_process_time; + uint64_t get_from_output_files_time; // total nanos reading from output files + // total nanos spent on seeking memtable + uint64_t seek_on_memtable_time; + // number of seeks issued on memtable + uint64_t seek_on_memtable_count; + // total nanos spent on seeking child iters + uint64_t seek_child_seek_time; + // number of seek issued in child iterators + uint64_t seek_child_seek_count; + uint64_t seek_min_heap_time; // total nanos spent on the merge heap + // total nanos spent on seeking the internal entries + uint64_t seek_internal_seek_time; + // total nanos spent on iterating internal entries to find the next user entry + uint64_t find_next_user_entry_time; + + // total nanos spent on writing to WAL + uint64_t write_wal_time; + // total nanos spent on writing to mem tables + uint64_t write_memtable_time; + // total nanos spent on delaying write + uint64_t write_delay_time; + // total nanos spent on writing a record, excluding the above three times + uint64_t write_pre_and_post_process_time; + + uint64_t db_mutex_lock_nanos; // time spent on acquiring DB mutex. + // Time spent on waiting with a condition variable created with DB mutex. + uint64_t db_condition_wait_nanos; + // Time spent on merge operator. + uint64_t merge_operator_time_nanos; + + // Time spent on reading index block from block cache or SST file + uint64_t read_index_block_nanos; + // Time spent on reading filter block from block cache or SST file + uint64_t read_filter_block_nanos; + // Time spent on creating data block iterator + uint64_t new_table_block_iter_nanos; + // Time spent on creating a iterator of an SST file. + uint64_t new_table_iterator_nanos; + // Time spent on seeking a key in data/index blocks + uint64_t block_seek_nanos; + // Time spent on finding or creating a table reader + uint64_t find_table_nanos; + // total number of mem table bloom hits + uint64_t bloom_memtable_hit_count; + // total number of mem table bloom misses + uint64_t bloom_memtable_miss_count; + // total number of SST table bloom hits + uint64_t bloom_sst_hit_count; + // total number of SST table bloom misses + uint64_t bloom_sst_miss_count; +}; + +#if defined(NPERF_CONTEXT) || defined(IOS_CROSS_COMPILE) +extern PerfContext perf_context; +#elif _WIN32 +extern __declspec(thread) PerfContext perf_context; +#else +extern __thread PerfContext perf_context; +#endif + +} + +#endif diff --git a/external/rocksdb/include/rocksdb/perf_level.h b/external/rocksdb/include/rocksdb/perf_level.h new file mode 100755 index 0000000000..5fddac57c2 --- /dev/null +++ b/external/rocksdb/include/rocksdb/perf_level.h @@ -0,0 +1,34 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef INCLUDE_ROCKSDB_PERF_LEVEL_H_ +#define INCLUDE_ROCKSDB_PERF_LEVEL_H_ + +#include +#include + +namespace rocksdb { + +// How much perf stats to collect. Affects perf_context and iostats_context. + +enum PerfLevel : unsigned char { + kUninitialized = 0, // unknown setting + kDisable = 1, // disable perf stats + kEnableCount = 2, // enable only count stats + kEnableTimeExceptForMutex = 3, // Other than count stats, also enable time + // stats except for mutexes + kEnableTime = 4, // enable count and time stats + kOutOfBounds = 5 // N.B. Must always be the last value! +}; + +// set the perf stats level for current thread +void SetPerfLevel(PerfLevel level); + +// get current perf stats level for current thread +PerfLevel GetPerfLevel(); + +} // namespace rocksdb + +#endif // INCLUDE_ROCKSDB_PERF_LEVEL_H_ diff --git a/external/rocksdb/include/rocksdb/persistent_cache.h b/external/rocksdb/include/rocksdb/persistent_cache.h new file mode 100755 index 0000000000..ef49da5ab6 --- /dev/null +++ b/external/rocksdb/include/rocksdb/persistent_cache.h @@ -0,0 +1,49 @@ +// Copyright (c) 2013, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#pragma once + +#include +#include + +#include "rocksdb/slice.h" +#include "rocksdb/statistics.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +// PersistentCache +// +// Persistent cache interface for caching IO pages on a persistent medium. The +// cache interface is specifically designed for persistent read cache. +class PersistentCache { + public: + virtual ~PersistentCache() {} + + // Insert to page cache + // + // page_key Identifier to identify a page uniquely across restarts + // data Page data + // size Size of the page + virtual Status Insert(const Slice& key, const char* data, + const size_t size) = 0; + + // Lookup page cache by page identifier + // + // page_key Page identifier + // buf Buffer where the data should be copied + // size Size of the page + virtual Status Lookup(const Slice& key, std::unique_ptr* data, + size_t* size) = 0; + + // Is cache storing uncompressed data ? + // + // True if the cache is configured to store uncompressed data else false + virtual bool IsCompressed() = 0; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/rate_limiter.h b/external/rocksdb/include/rocksdb/rate_limiter.h new file mode 100755 index 0000000000..b1bf3f4271 --- /dev/null +++ b/external/rocksdb/include/rocksdb/rate_limiter.h @@ -0,0 +1,64 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include "rocksdb/env.h" + +namespace rocksdb { + +class RateLimiter { + public: + virtual ~RateLimiter() {} + + // This API allows user to dynamically change rate limiter's bytes per second. + // REQUIRED: bytes_per_second > 0 + virtual void SetBytesPerSecond(int64_t bytes_per_second) = 0; + + // Request for token to write bytes. If this request can not be satisfied, + // the call is blocked. Caller is responsible to make sure + // bytes <= GetSingleBurstBytes() + virtual void Request(const int64_t bytes, const Env::IOPriority pri) = 0; + + // Max bytes can be granted in a single burst + virtual int64_t GetSingleBurstBytes() const = 0; + + // Total bytes that go though rate limiter + virtual int64_t GetTotalBytesThrough( + const Env::IOPriority pri = Env::IO_TOTAL) const = 0; + + // Total # of requests that go though rate limiter + virtual int64_t GetTotalRequests( + const Env::IOPriority pri = Env::IO_TOTAL) const = 0; +}; + +// Create a RateLimiter object, which can be shared among RocksDB instances to +// control write rate of flush and compaction. +// @rate_bytes_per_sec: this is the only parameter you want to set most of the +// time. It controls the total write rate of compaction and flush in bytes per +// second. Currently, RocksDB does not enforce rate limit for anything other +// than flush and compaction, e.g. write to WAL. +// @refill_period_us: this controls how often tokens are refilled. For example, +// when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to +// 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to +// burstier writes while smaller value introduces more CPU overhead. +// The default should work for most cases. +// @fairness: RateLimiter accepts high-pri requests and low-pri requests. +// A low-pri request is usually blocked in favor of hi-pri request. Currently, +// RocksDB assigns low-pri to request from compaciton and high-pri to request +// from flush. Low-pri requests can get blocked if flush requests come in +// continuouly. This fairness parameter grants low-pri requests permission by +// 1/fairness chance even though high-pri requests exist to avoid starvation. +// You should be good by leaving it at default 10. +extern RateLimiter* NewGenericRateLimiter( + int64_t rate_bytes_per_sec, + int64_t refill_period_us = 100 * 1000, + int32_t fairness = 10); + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/slice.h b/external/rocksdb/include/rocksdb/slice.h new file mode 100755 index 0000000000..38d494ed98 --- /dev/null +++ b/external/rocksdb/include/rocksdb/slice.h @@ -0,0 +1,160 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Slice is a simple structure containing a pointer into some external +// storage and a size. The user of a Slice must ensure that the slice +// is not used after the corresponding external storage has been +// deallocated. +// +// Multiple threads can invoke const methods on a Slice without +// external synchronization, but if any of the threads may call a +// non-const method, all threads accessing the same Slice must use +// external synchronization. + +#ifndef STORAGE_ROCKSDB_INCLUDE_SLICE_H_ +#define STORAGE_ROCKSDB_INCLUDE_SLICE_H_ + +#include +#include +#include +#include +#include + +namespace rocksdb { + +class Slice { + public: + // Create an empty slice. + Slice() : data_(""), size_(0) { } + + // Create a slice that refers to d[0,n-1]. + Slice(const char* d, size_t n) : data_(d), size_(n) { } + + // Create a slice that refers to the contents of "s" + /* implicit */ + Slice(const std::string& s) : data_(s.data()), size_(s.size()) { } + + // Create a slice that refers to s[0,strlen(s)-1] + /* implicit */ + Slice(const char* s) : data_(s), size_(strlen(s)) { } + + // Create a single slice from SliceParts using buf as storage. + // buf must exist as long as the returned Slice exists. + Slice(const struct SliceParts& parts, std::string* buf); + + // Return a pointer to the beginning of the referenced data + const char* data() const { return data_; } + + // Return the length (in bytes) of the referenced data + size_t size() const { return size_; } + + // Return true iff the length of the referenced data is zero + bool empty() const { return size_ == 0; } + + // Return the ith byte in the referenced data. + // REQUIRES: n < size() + char operator[](size_t n) const { + assert(n < size()); + return data_[n]; + } + + // Change this slice to refer to an empty array + void clear() { data_ = ""; size_ = 0; } + + // Drop the first "n" bytes from this slice. + void remove_prefix(size_t n) { + assert(n <= size()); + data_ += n; + size_ -= n; + } + + void remove_suffix(size_t n) { + assert(n <= size()); + size_ -= n; + } + + // Return a string that contains the copy of the referenced data. + // when hex is true, returns a string of twice the length hex encoded (0-9A-F) + std::string ToString(bool hex = false) const; + + // Decodes the current slice interpreted as an hexadecimal string into result, + // if successful returns true, if this isn't a valid hex string + // (e.g not coming from Slice::ToString(true)) DecodeHex returns false. + // This slice is expected to have an even number of 0-9A-F characters + // also accepts lowercase (a-f) + bool DecodeHex(std::string* result) const; + + // Three-way comparison. Returns value: + // < 0 iff "*this" < "b", + // == 0 iff "*this" == "b", + // > 0 iff "*this" > "b" + int compare(const Slice& b) const; + + // Return true iff "x" is a prefix of "*this" + bool starts_with(const Slice& x) const { + return ((size_ >= x.size_) && + (memcmp(data_, x.data_, x.size_) == 0)); + } + + bool ends_with(const Slice& x) const { + return ((size_ >= x.size_) && + (memcmp(data_ + size_ - x.size_, x.data_, x.size_) == 0)); + } + + // Compare two slices and returns the first byte where they differ + size_t difference_offset(const Slice& b) const; + + // private: make these public for rocksdbjni access + const char* data_; + size_t size_; + + // Intentionally copyable +}; + +// A set of Slices that are virtually concatenated together. 'parts' points +// to an array of Slices. The number of elements in the array is 'num_parts'. +struct SliceParts { + SliceParts(const Slice* _parts, int _num_parts) : + parts(_parts), num_parts(_num_parts) { } + SliceParts() : parts(nullptr), num_parts(0) {} + + const Slice* parts; + int num_parts; +}; + +inline bool operator==(const Slice& x, const Slice& y) { + return ((x.size() == y.size()) && + (memcmp(x.data(), y.data(), x.size()) == 0)); +} + +inline bool operator!=(const Slice& x, const Slice& y) { + return !(x == y); +} + +inline int Slice::compare(const Slice& b) const { + const size_t min_len = (size_ < b.size_) ? size_ : b.size_; + int r = memcmp(data_, b.data_, min_len); + if (r == 0) { + if (size_ < b.size_) r = -1; + else if (size_ > b.size_) r = +1; + } + return r; +} + +inline size_t Slice::difference_offset(const Slice& b) const { + size_t off = 0; + const size_t len = (size_ < b.size_) ? size_ : b.size_; + for (; off < len; off++) { + if (data_[off] != b.data_[off]) break; + } + return off; +} + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_SLICE_H_ diff --git a/external/rocksdb/include/rocksdb/slice_transform.h b/external/rocksdb/include/rocksdb/slice_transform.h new file mode 100755 index 0000000000..d123258121 --- /dev/null +++ b/external/rocksdb/include/rocksdb/slice_transform.h @@ -0,0 +1,76 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2012 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Class for specifying user-defined functions which perform a +// transformation on a slice. It is not required that every slice +// belong to the domain and/or range of a function. Subclasses should +// define InDomain and InRange to determine which slices are in either +// of these sets respectively. + +#ifndef STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_ +#define STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_ + +#include + +namespace rocksdb { + +class Slice; + +class SliceTransform { + public: + virtual ~SliceTransform() {}; + + // Return the name of this transformation. + virtual const char* Name() const = 0; + + // transform a src in domain to a dst in the range + virtual Slice Transform(const Slice& src) const = 0; + + // determine whether this is a valid src upon the function applies + virtual bool InDomain(const Slice& src) const = 0; + + // determine whether dst=Transform(src) for some src + virtual bool InRange(const Slice& dst) const = 0; + + // Transform(s)=Transform(`prefix`) for any s with `prefix` as a prefix. + // + // This function is not used by RocksDB, but for users. If users pass + // Options by string to RocksDB, they might not know what prefix extractor + // they are using. This function is to help users can determine: + // if they want to iterate all keys prefixing `prefix`, whetherit is + // safe to use prefix bloom filter and seek to key `prefix`. + // If this function returns true, this means a user can Seek() to a prefix + // using the bloom filter. Otherwise, user needs to skip the bloom filter + // by setting ReadOptions.total_order_seek = true. + // + // Here is an example: Suppose we implement a slice transform that returns + // the first part of the string after spliting it using deimiter ",": + // 1. SameResultWhenAppended("abc,") should return true. If aplying prefix + // bloom filter using it, all slices matching "abc:.*" will be extracted + // to "abc,", so any SST file or memtable containing any of those key + // will not be filtered out. + // 2. SameResultWhenAppended("abc") should return false. A user will not + // guaranteed to see all the keys matching "abc.*" if a user seek to "abc" + // against a DB with the same setting. If one SST file only contains + // "abcd,e", the file can be filtered out and the key will be invisible. + // + // i.e., an implementation always returning false is safe. + virtual bool SameResultWhenAppended(const Slice& prefix) const { + return false; + } +}; + +extern const SliceTransform* NewFixedPrefixTransform(size_t prefix_len); + +extern const SliceTransform* NewCappedPrefixTransform(size_t cap_len); + +extern const SliceTransform* NewNoopTransform(); + +} + +#endif // STORAGE_ROCKSDB_INCLUDE_SLICE_TRANSFORM_H_ diff --git a/external/rocksdb/include/rocksdb/snapshot.h b/external/rocksdb/include/rocksdb/snapshot.h new file mode 100755 index 0000000000..d8d999dc25 --- /dev/null +++ b/external/rocksdb/include/rocksdb/snapshot.h @@ -0,0 +1,48 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include "rocksdb/types.h" + +namespace rocksdb { + +class DB; + +// Abstract handle to particular state of a DB. +// A Snapshot is an immutable object and can therefore be safely +// accessed from multiple threads without any external synchronization. +// +// To Create a Snapshot, call DB::GetSnapshot(). +// To Destroy a Snapshot, call DB::ReleaseSnapshot(snapshot). +class Snapshot { + public: + // returns Snapshot's sequence number + virtual SequenceNumber GetSequenceNumber() const = 0; + + protected: + virtual ~Snapshot(); +}; + +// Simple RAII wrapper class for Snapshot. +// Constructing this object will create a snapshot. Destructing will +// release the snapshot. +class ManagedSnapshot { + public: + explicit ManagedSnapshot(DB* db); + + // Instead of creating a snapshot, take ownership of the input snapshot. + ManagedSnapshot(DB* db, const Snapshot* _snapshot); + + ~ManagedSnapshot(); + + const Snapshot* snapshot(); + + private: + DB* db_; + const Snapshot* snapshot_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/sst_dump_tool.h b/external/rocksdb/include/rocksdb/sst_dump_tool.h new file mode 100755 index 0000000000..0dd94caba0 --- /dev/null +++ b/external/rocksdb/include/rocksdb/sst_dump_tool.h @@ -0,0 +1,17 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#ifndef ROCKSDB_LITE +#pragma once + +namespace rocksdb { + +class SSTDumpTool { + public: + int Run(int argc, char** argv); +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/sst_file_manager.h b/external/rocksdb/include/rocksdb/sst_file_manager.h new file mode 100755 index 0000000000..bee243e4a9 --- /dev/null +++ b/external/rocksdb/include/rocksdb/sst_file_manager.h @@ -0,0 +1,80 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include +#include + +#include "rocksdb/status.h" + +namespace rocksdb { + +class Env; +class Logger; + +// SstFileManager is used to track SST files in the DB and control there +// deletion rate. +// All SstFileManager public functions are thread-safe. +class SstFileManager { + public: + virtual ~SstFileManager() {} + + // Update the maximum allowed space that should be used by RocksDB, if + // the total size of the SST files exceeds max_allowed_space, writes to + // RocksDB will fail. + // + // Setting max_allowed_space to 0 will disable this feature, maximum allowed + // space will be infinite (Default value). + // + // thread-safe. + virtual void SetMaxAllowedSpaceUsage(uint64_t max_allowed_space) = 0; + + // Return true if the total size of SST files exceeded the maximum allowed + // space usage. + // + // thread-safe. + virtual bool IsMaxAllowedSpaceReached() = 0; + + // Return the total size of all tracked files. + // thread-safe + virtual uint64_t GetTotalSize() = 0; + + // Return a map containing all tracked files and there corresponding sizes. + // thread-safe + virtual std::unordered_map GetTrackedFiles() = 0; + + // Return delete rate limit in bytes per second. + // thread-safe + virtual int64_t GetDeleteRateBytesPerSecond() = 0; +}; + +// Create a new SstFileManager that can be shared among multiple RocksDB +// instances to track SST file and control there deletion rate. +// +// @param env: Pointer to Env object, please see "rocksdb/env.h". +// @param info_log: If not nullptr, info_log will be used to log errors. +// +// == Deletion rate limiting specific arguments == +// @param trash_dir: Path to the directory where deleted files will be moved +// to be deleted in a background thread while applying rate limiting. If this +// directory dont exist, it will be created. This directory should not be +// used by any other process or any other SstFileManager, Set to "" to +// disable deletion rate limiting. +// @param rate_bytes_per_sec: How many bytes should be deleted per second, If +// this value is set to 1024 (1 Kb / sec) and we deleted a file of size 4 Kb +// in 1 second, we will wait for another 3 seconds before we delete other +// files, Set to 0 to disable deletion rate limiting. +// @param delete_exisitng_trash: If set to true, the newly created +// SstFileManager will delete files that already exist in trash_dir. +// @param status: If not nullptr, status will contain any errors that happened +// during creating the missing trash_dir or deleting existing files in trash. +extern SstFileManager* NewSstFileManager( + Env* env, std::shared_ptr info_log = nullptr, + std::string trash_dir = "", int64_t rate_bytes_per_sec = 0, + bool delete_exisitng_trash = true, Status* status = nullptr); + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/sst_file_writer.h b/external/rocksdb/include/rocksdb/sst_file_writer.h new file mode 100755 index 0000000000..530bed186e --- /dev/null +++ b/external/rocksdb/include/rocksdb/sst_file_writer.h @@ -0,0 +1,76 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#include +#include "rocksdb/env.h" +#include "rocksdb/immutable_options.h" +#include "rocksdb/types.h" + +namespace rocksdb { + +class Comparator; + +// Table Properties that are specific to tables created by SstFileWriter. +struct ExternalSstFilePropertyNames { + // value of this property is a fixed int32 number. + static const std::string kVersion; +}; + +// ExternalSstFileInfo include information about sst files created +// using SstFileWriter +struct ExternalSstFileInfo { + ExternalSstFileInfo() {} + ExternalSstFileInfo(const std::string& _file_path, + const std::string& _smallest_key, + const std::string& _largest_key, + SequenceNumber _sequence_number, uint64_t _file_size, + int32_t _num_entries, int32_t _version) + : file_path(_file_path), + smallest_key(_smallest_key), + largest_key(_largest_key), + sequence_number(_sequence_number), + file_size(_file_size), + num_entries(_num_entries), + version(_version) {} + + std::string file_path; // external sst file path + std::string smallest_key; // smallest user key in file + std::string largest_key; // largest user key in file + SequenceNumber sequence_number; // sequence number of all keys in file + uint64_t file_size; // file size in bytes + uint64_t num_entries; // number of entries in file + int32_t version; // file version +}; + +// SstFileWriter is used to create sst files that can be added to database later +// All keys in files generated by SstFileWriter will have sequence number = 0 +class SstFileWriter { + public: + SstFileWriter(const EnvOptions& env_options, const Options& options, + const Comparator* user_comparator); + + ~SstFileWriter(); + + // Prepare SstFileWriter to write into file located at "file_path". + Status Open(const std::string& file_path); + + // Add key, value to currently opened file + // REQUIRES: key is after any previously added key according to comparator. + Status Add(const Slice& user_key, const Slice& value); + + // Finalize writing to sst file and close file. + // + // An optional ExternalSstFileInfo pointer can be passed to the function + // which will be populated with information about the created sst file + Status Finish(ExternalSstFileInfo* file_info = nullptr); + + private: + class SstFileWriterPropertiesCollectorFactory; + class SstFileWriterPropertiesCollector; + struct Rep; + Rep* rep_; +}; +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/statistics.h b/external/rocksdb/include/rocksdb/statistics.h new file mode 100755 index 0000000000..16fba0906b --- /dev/null +++ b/external/rocksdb/include/rocksdb/statistics.h @@ -0,0 +1,423 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_ +#define STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_ + +#include +#include +#include +#include +#include +#include + +namespace rocksdb { + +/** + * Keep adding ticker's here. + * 1. Any ticker should be added before TICKER_ENUM_MAX. + * 2. Add a readable string in TickersNameMap below for the newly added ticker. + */ +enum Tickers : uint32_t { + // total block cache misses + // REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + + // BLOCK_CACHE_FILTER_MISS + + // BLOCK_CACHE_DATA_MISS; + BLOCK_CACHE_MISS = 0, + // total block cache hit + // REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + + // BLOCK_CACHE_FILTER_HIT + + // BLOCK_CACHE_DATA_HIT; + BLOCK_CACHE_HIT, + // # of blocks added to block cache. + BLOCK_CACHE_ADD, + // # of failures when adding blocks to block cache. + BLOCK_CACHE_ADD_FAILURES, + // # of times cache miss when accessing index block from block cache. + BLOCK_CACHE_INDEX_MISS, + // # of times cache hit when accessing index block from block cache. + BLOCK_CACHE_INDEX_HIT, + // # of bytes of index blocks inserted into cache + BLOCK_CACHE_INDEX_BYTES_INSERT, + // # of bytes of index block erased from cache + BLOCK_CACHE_INDEX_BYTES_EVICT, + // # of times cache miss when accessing filter block from block cache. + BLOCK_CACHE_FILTER_MISS, + // # of times cache hit when accessing filter block from block cache. + BLOCK_CACHE_FILTER_HIT, + // # of bytes of bloom filter blocks inserted into cache + BLOCK_CACHE_FILTER_BYTES_INSERT, + // # of bytes of bloom filter block erased from cache + BLOCK_CACHE_FILTER_BYTES_EVICT, + // # of times cache miss when accessing data block from block cache. + BLOCK_CACHE_DATA_MISS, + // # of times cache hit when accessing data block from block cache. + BLOCK_CACHE_DATA_HIT, + // # of bytes read from cache. + BLOCK_CACHE_BYTES_READ, + // # of bytes written into cache. + BLOCK_CACHE_BYTES_WRITE, + + // # of times bloom filter has avoided file reads. + BLOOM_FILTER_USEFUL, + + // # persistent cache hit + PERSISTENT_CACHE_HIT, + // # persistent cache miss + PERSISTENT_CACHE_MISS, + + // # of memtable hits. + MEMTABLE_HIT, + // # of memtable misses. + MEMTABLE_MISS, + + // # of Get() queries served by L0 + GET_HIT_L0, + // # of Get() queries served by L1 + GET_HIT_L1, + // # of Get() queries served by L2 and up + GET_HIT_L2_AND_UP, + + /** + * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction + * There are 3 reasons currently. + */ + COMPACTION_KEY_DROP_NEWER_ENTRY, // key was written with a newer value. + COMPACTION_KEY_DROP_OBSOLETE, // The key is obsolete. + COMPACTION_KEY_DROP_USER, // user compaction function has dropped the key. + + // Number of keys written to the database via the Put and Write call's + NUMBER_KEYS_WRITTEN, + // Number of Keys read, + NUMBER_KEYS_READ, + // Number keys updated, if inplace update is enabled + NUMBER_KEYS_UPDATED, + // The number of uncompressed bytes issued by DB::Put(), DB::Delete(), + // DB::Merge(), and DB::Write(). + BYTES_WRITTEN, + // The number of uncompressed bytes read from DB::Get(). It could be + // either from memtables, cache, or table files. + // For the number of logical bytes read from DB::MultiGet(), + // please use NUMBER_MULTIGET_BYTES_READ. + BYTES_READ, + // The number of calls to seek/next/prev + NUMBER_DB_SEEK, + NUMBER_DB_NEXT, + NUMBER_DB_PREV, + // The number of calls to seek/next/prev that returned data + NUMBER_DB_SEEK_FOUND, + NUMBER_DB_NEXT_FOUND, + NUMBER_DB_PREV_FOUND, + // The number of uncompressed bytes read from an iterator. + // Includes size of key and value. + ITER_BYTES_READ, + NO_FILE_CLOSES, + NO_FILE_OPENS, + NO_FILE_ERRORS, + // DEPRECATED Time system had to wait to do LO-L1 compactions + STALL_L0_SLOWDOWN_MICROS, + // DEPRECATED Time system had to wait to move memtable to L1. + STALL_MEMTABLE_COMPACTION_MICROS, + // DEPRECATED write throttle because of too many files in L0 + STALL_L0_NUM_FILES_MICROS, + // Writer has to wait for compaction or flush to finish. + STALL_MICROS, + // The wait time for db mutex. + // Disabled by default. To enable it set stats level to kAll + DB_MUTEX_WAIT_MICROS, + RATE_LIMIT_DELAY_MILLIS, + NO_ITERATORS, // number of iterators currently open + + // Number of MultiGet calls, keys read, and bytes read + NUMBER_MULTIGET_CALLS, + NUMBER_MULTIGET_KEYS_READ, + NUMBER_MULTIGET_BYTES_READ, + + // Number of deletes records that were not required to be + // written to storage because key does not exist + NUMBER_FILTERED_DELETES, + NUMBER_MERGE_FAILURES, + SEQUENCE_NUMBER, + + // number of times bloom was checked before creating iterator on a + // file, and the number of times the check was useful in avoiding + // iterator creation (and thus likely IOPs). + BLOOM_FILTER_PREFIX_CHECKED, + BLOOM_FILTER_PREFIX_USEFUL, + + // Number of times we had to reseek inside an iteration to skip + // over large number of keys with same userkey. + NUMBER_OF_RESEEKS_IN_ITERATION, + + // Record the number of calls to GetUpadtesSince. Useful to keep track of + // transaction log iterator refreshes + GET_UPDATES_SINCE_CALLS, + BLOCK_CACHE_COMPRESSED_MISS, // miss in the compressed block cache + BLOCK_CACHE_COMPRESSED_HIT, // hit in the compressed block cache + // Number of blocks added to comopressed block cache + BLOCK_CACHE_COMPRESSED_ADD, + // Number of failures when adding blocks to compressed block cache + BLOCK_CACHE_COMPRESSED_ADD_FAILURES, + WAL_FILE_SYNCED, // Number of times WAL sync is done + WAL_FILE_BYTES, // Number of bytes written to WAL + + // Writes can be processed by requesting thread or by the thread at the + // head of the writers queue. + WRITE_DONE_BY_SELF, + WRITE_DONE_BY_OTHER, // Equivalent to writes done for others + WRITE_TIMEDOUT, // Number of writes ending up with timed-out. + WRITE_WITH_WAL, // Number of Write calls that request WAL + COMPACT_READ_BYTES, // Bytes read during compaction + COMPACT_WRITE_BYTES, // Bytes written during compaction + FLUSH_WRITE_BYTES, // Bytes written during flush + + // Number of table's properties loaded directly from file, without creating + // table reader object. + NUMBER_DIRECT_LOAD_TABLE_PROPERTIES, + NUMBER_SUPERVERSION_ACQUIRES, + NUMBER_SUPERVERSION_RELEASES, + NUMBER_SUPERVERSION_CLEANUPS, + + // # of compressions/decompressions executed + NUMBER_BLOCK_COMPRESSED, + NUMBER_BLOCK_DECOMPRESSED, + + NUMBER_BLOCK_NOT_COMPRESSED, + MERGE_OPERATION_TOTAL_TIME, + FILTER_OPERATION_TOTAL_TIME, + + // Row cache. + ROW_CACHE_HIT, + ROW_CACHE_MISS, + + TICKER_ENUM_MAX +}; + +// The order of items listed in Tickers should be the same as +// the order listed in TickersNameMap +const std::vector> TickersNameMap = { + {BLOCK_CACHE_MISS, "rocksdb.block.cache.miss"}, + {BLOCK_CACHE_HIT, "rocksdb.block.cache.hit"}, + {BLOCK_CACHE_ADD, "rocksdb.block.cache.add"}, + {BLOCK_CACHE_ADD_FAILURES, "rocksdb.block.cache.add.failures"}, + {BLOCK_CACHE_INDEX_MISS, "rocksdb.block.cache.index.miss"}, + {BLOCK_CACHE_INDEX_HIT, "rocksdb.block.cache.index.hit"}, + {BLOCK_CACHE_INDEX_BYTES_INSERT, "rocksdb.block.cache.index.bytes.insert"}, + {BLOCK_CACHE_INDEX_BYTES_EVICT, "rocksdb.block.cache.index.bytes.evict"}, + {BLOCK_CACHE_FILTER_MISS, "rocksdb.block.cache.filter.miss"}, + {BLOCK_CACHE_FILTER_HIT, "rocksdb.block.cache.filter.hit"}, + {BLOCK_CACHE_FILTER_BYTES_INSERT, + "rocksdb.block.cache.filter.bytes.insert"}, + {BLOCK_CACHE_FILTER_BYTES_EVICT, "rocksdb.block.cache.filter.bytes.evict"}, + {BLOCK_CACHE_DATA_MISS, "rocksdb.block.cache.data.miss"}, + {BLOCK_CACHE_DATA_HIT, "rocksdb.block.cache.data.hit"}, + {BLOCK_CACHE_BYTES_READ, "rocksdb.block.cache.bytes.read"}, + {BLOCK_CACHE_BYTES_WRITE, "rocksdb.block.cache.bytes.write"}, + {BLOOM_FILTER_USEFUL, "rocksdb.bloom.filter.useful"}, + {PERSISTENT_CACHE_HIT, "rocksdb.persistent.cache.hit"}, + {PERSISTENT_CACHE_MISS, "rocksdb.persistent.cache.miss"}, + {MEMTABLE_HIT, "rocksdb.memtable.hit"}, + {MEMTABLE_MISS, "rocksdb.memtable.miss"}, + {GET_HIT_L0, "rocksdb.l0.hit"}, + {GET_HIT_L1, "rocksdb.l1.hit"}, + {GET_HIT_L2_AND_UP, "rocksdb.l2andup.hit"}, + {COMPACTION_KEY_DROP_NEWER_ENTRY, "rocksdb.compaction.key.drop.new"}, + {COMPACTION_KEY_DROP_OBSOLETE, "rocksdb.compaction.key.drop.obsolete"}, + {COMPACTION_KEY_DROP_USER, "rocksdb.compaction.key.drop.user"}, + {NUMBER_KEYS_WRITTEN, "rocksdb.number.keys.written"}, + {NUMBER_KEYS_READ, "rocksdb.number.keys.read"}, + {NUMBER_KEYS_UPDATED, "rocksdb.number.keys.updated"}, + {BYTES_WRITTEN, "rocksdb.bytes.written"}, + {BYTES_READ, "rocksdb.bytes.read"}, + {NUMBER_DB_SEEK, "rocksdb.number.db.seek"}, + {NUMBER_DB_NEXT, "rocksdb.number.db.next"}, + {NUMBER_DB_PREV, "rocksdb.number.db.prev"}, + {NUMBER_DB_SEEK_FOUND, "rocksdb.number.db.seek.found"}, + {NUMBER_DB_NEXT_FOUND, "rocksdb.number.db.next.found"}, + {NUMBER_DB_PREV_FOUND, "rocksdb.number.db.prev.found"}, + {ITER_BYTES_READ, "rocksdb.db.iter.bytes.read"}, + {NO_FILE_CLOSES, "rocksdb.no.file.closes"}, + {NO_FILE_OPENS, "rocksdb.no.file.opens"}, + {NO_FILE_ERRORS, "rocksdb.no.file.errors"}, + {STALL_L0_SLOWDOWN_MICROS, "rocksdb.l0.slowdown.micros"}, + {STALL_MEMTABLE_COMPACTION_MICROS, "rocksdb.memtable.compaction.micros"}, + {STALL_L0_NUM_FILES_MICROS, "rocksdb.l0.num.files.stall.micros"}, + {STALL_MICROS, "rocksdb.stall.micros"}, + {DB_MUTEX_WAIT_MICROS, "rocksdb.db.mutex.wait.micros"}, + {RATE_LIMIT_DELAY_MILLIS, "rocksdb.rate.limit.delay.millis"}, + {NO_ITERATORS, "rocksdb.num.iterators"}, + {NUMBER_MULTIGET_CALLS, "rocksdb.number.multiget.get"}, + {NUMBER_MULTIGET_KEYS_READ, "rocksdb.number.multiget.keys.read"}, + {NUMBER_MULTIGET_BYTES_READ, "rocksdb.number.multiget.bytes.read"}, + {NUMBER_FILTERED_DELETES, "rocksdb.number.deletes.filtered"}, + {NUMBER_MERGE_FAILURES, "rocksdb.number.merge.failures"}, + {SEQUENCE_NUMBER, "rocksdb.sequence.number"}, + {BLOOM_FILTER_PREFIX_CHECKED, "rocksdb.bloom.filter.prefix.checked"}, + {BLOOM_FILTER_PREFIX_USEFUL, "rocksdb.bloom.filter.prefix.useful"}, + {NUMBER_OF_RESEEKS_IN_ITERATION, "rocksdb.number.reseeks.iteration"}, + {GET_UPDATES_SINCE_CALLS, "rocksdb.getupdatessince.calls"}, + {BLOCK_CACHE_COMPRESSED_MISS, "rocksdb.block.cachecompressed.miss"}, + {BLOCK_CACHE_COMPRESSED_HIT, "rocksdb.block.cachecompressed.hit"}, + {BLOCK_CACHE_COMPRESSED_ADD, "rocksdb.block.cachecompressed.add"}, + {BLOCK_CACHE_COMPRESSED_ADD_FAILURES, + "rocksdb.block.cachecompressed.add.failures"}, + {WAL_FILE_SYNCED, "rocksdb.wal.synced"}, + {WAL_FILE_BYTES, "rocksdb.wal.bytes"}, + {WRITE_DONE_BY_SELF, "rocksdb.write.self"}, + {WRITE_DONE_BY_OTHER, "rocksdb.write.other"}, + {WRITE_TIMEDOUT, "rocksdb.write.timeout"}, + {WRITE_WITH_WAL, "rocksdb.write.wal"}, + {COMPACT_READ_BYTES, "rocksdb.compact.read.bytes"}, + {COMPACT_WRITE_BYTES, "rocksdb.compact.write.bytes"}, + {FLUSH_WRITE_BYTES, "rocksdb.flush.write.bytes"}, + {NUMBER_DIRECT_LOAD_TABLE_PROPERTIES, + "rocksdb.number.direct.load.table.properties"}, + {NUMBER_SUPERVERSION_ACQUIRES, "rocksdb.number.superversion_acquires"}, + {NUMBER_SUPERVERSION_RELEASES, "rocksdb.number.superversion_releases"}, + {NUMBER_SUPERVERSION_CLEANUPS, "rocksdb.number.superversion_cleanups"}, + {NUMBER_BLOCK_COMPRESSED, "rocksdb.number.block.compressed"}, + {NUMBER_BLOCK_DECOMPRESSED, "rocksdb.number.block.decompressed"}, + {NUMBER_BLOCK_NOT_COMPRESSED, "rocksdb.number.block.not_compressed"}, + {MERGE_OPERATION_TOTAL_TIME, "rocksdb.merge.operation.time.nanos"}, + {FILTER_OPERATION_TOTAL_TIME, "rocksdb.filter.operation.time.nanos"}, + {ROW_CACHE_HIT, "rocksdb.row.cache.hit"}, + {ROW_CACHE_MISS, "rocksdb.row.cache.miss"}, +}; + +/** + * Keep adding histogram's here. + * Any histogram whould have value less than HISTOGRAM_ENUM_MAX + * Add a new Histogram by assigning it the current value of HISTOGRAM_ENUM_MAX + * Add a string representation in HistogramsNameMap below + * And increment HISTOGRAM_ENUM_MAX + */ +enum Histograms : uint32_t { + DB_GET = 0, + DB_WRITE, + COMPACTION_TIME, + SUBCOMPACTION_SETUP_TIME, + TABLE_SYNC_MICROS, + COMPACTION_OUTFILE_SYNC_MICROS, + WAL_FILE_SYNC_MICROS, + MANIFEST_FILE_SYNC_MICROS, + // TIME SPENT IN IO DURING TABLE OPEN + TABLE_OPEN_IO_MICROS, + DB_MULTIGET, + READ_BLOCK_COMPACTION_MICROS, + READ_BLOCK_GET_MICROS, + WRITE_RAW_BLOCK_MICROS, + STALL_L0_SLOWDOWN_COUNT, + STALL_MEMTABLE_COMPACTION_COUNT, + STALL_L0_NUM_FILES_COUNT, + HARD_RATE_LIMIT_DELAY_COUNT, + SOFT_RATE_LIMIT_DELAY_COUNT, + NUM_FILES_IN_SINGLE_COMPACTION, + DB_SEEK, + WRITE_STALL, + SST_READ_MICROS, + // The number of subcompactions actually scheduled during a compaction + NUM_SUBCOMPACTIONS_SCHEDULED, + // Value size distribution in each operation + BYTES_PER_READ, + BYTES_PER_WRITE, + BYTES_PER_MULTIGET, + + // number of bytes compressed/decompressed + // number of bytes is when uncompressed; i.e. before/after respectively + BYTES_COMPRESSED, + BYTES_DECOMPRESSED, + COMPRESSION_TIMES_NANOS, + DECOMPRESSION_TIMES_NANOS, + + HISTOGRAM_ENUM_MAX, // TODO(ldemailly): enforce HistogramsNameMap match +}; + +const std::vector> HistogramsNameMap = { + {DB_GET, "rocksdb.db.get.micros"}, + {DB_WRITE, "rocksdb.db.write.micros"}, + {COMPACTION_TIME, "rocksdb.compaction.times.micros"}, + {SUBCOMPACTION_SETUP_TIME, "rocksdb.subcompaction.setup.times.micros"}, + {TABLE_SYNC_MICROS, "rocksdb.table.sync.micros"}, + {COMPACTION_OUTFILE_SYNC_MICROS, "rocksdb.compaction.outfile.sync.micros"}, + {WAL_FILE_SYNC_MICROS, "rocksdb.wal.file.sync.micros"}, + {MANIFEST_FILE_SYNC_MICROS, "rocksdb.manifest.file.sync.micros"}, + {TABLE_OPEN_IO_MICROS, "rocksdb.table.open.io.micros"}, + {DB_MULTIGET, "rocksdb.db.multiget.micros"}, + {READ_BLOCK_COMPACTION_MICROS, "rocksdb.read.block.compaction.micros"}, + {READ_BLOCK_GET_MICROS, "rocksdb.read.block.get.micros"}, + {WRITE_RAW_BLOCK_MICROS, "rocksdb.write.raw.block.micros"}, + {STALL_L0_SLOWDOWN_COUNT, "rocksdb.l0.slowdown.count"}, + {STALL_MEMTABLE_COMPACTION_COUNT, "rocksdb.memtable.compaction.count"}, + {STALL_L0_NUM_FILES_COUNT, "rocksdb.num.files.stall.count"}, + {HARD_RATE_LIMIT_DELAY_COUNT, "rocksdb.hard.rate.limit.delay.count"}, + {SOFT_RATE_LIMIT_DELAY_COUNT, "rocksdb.soft.rate.limit.delay.count"}, + {NUM_FILES_IN_SINGLE_COMPACTION, "rocksdb.numfiles.in.singlecompaction"}, + {DB_SEEK, "rocksdb.db.seek.micros"}, + {WRITE_STALL, "rocksdb.db.write.stall"}, + {SST_READ_MICROS, "rocksdb.sst.read.micros"}, + {NUM_SUBCOMPACTIONS_SCHEDULED, "rocksdb.num.subcompactions.scheduled"}, + {BYTES_PER_READ, "rocksdb.bytes.per.read"}, + {BYTES_PER_WRITE, "rocksdb.bytes.per.write"}, + {BYTES_PER_MULTIGET, "rocksdb.bytes.per.multiget"}, + {BYTES_COMPRESSED, "rocksdb.bytes.compressed"}, + {BYTES_DECOMPRESSED, "rocksdb.bytes.decompressed"}, + {COMPRESSION_TIMES_NANOS, "rocksdb.compression.times.nanos"}, + {DECOMPRESSION_TIMES_NANOS, "rocksdb.decompression.times.nanos"}, +}; + +struct HistogramData { + double median; + double percentile95; + double percentile99; + double average; + double standard_deviation; +}; + +enum StatsLevel { + // Collect all stats except the counters requiring to get time inside the + // mutex lock. + kExceptTimeForMutex, + // Collect all stats expect time inside mutex lock AND time spent on + // compression + kExceptDetailedTimers, + // Collect all stats, including measuring duration of mutex operations. + // If getting time is expensive on the platform to run, it can + // reduce scalability to more threads, especially for writes. + kAll, +}; + +// Analyze the performance of a db +class Statistics { + public: + virtual ~Statistics() {} + + virtual uint64_t getTickerCount(uint32_t tickerType) const = 0; + virtual void histogramData(uint32_t type, + HistogramData* const data) const = 0; + virtual std::string getHistogramString(uint32_t type) const { return ""; } + virtual void recordTick(uint32_t tickerType, uint64_t count = 0) = 0; + virtual void setTickerCount(uint32_t tickerType, uint64_t count) = 0; + virtual void measureTime(uint32_t histogramType, uint64_t time) = 0; + + // String representation of the statistic object. + virtual std::string ToString() const { + // Do nothing by default + return std::string("ToString(): not implemented"); + } + + // Override this function to disable particular histogram collection + virtual bool HistEnabledForType(uint32_t type) const { + return type < HISTOGRAM_ENUM_MAX; + } + + StatsLevel stats_level_ = kExceptTimeForMutex; +}; + +// Create a concrete DBStatistics object +std::shared_ptr CreateDBStatistics(); + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_STATISTICS_H_ diff --git a/external/rocksdb/include/rocksdb/status.h b/external/rocksdb/include/rocksdb/status.h new file mode 100755 index 0000000000..bff15ee0f7 --- /dev/null +++ b/external/rocksdb/include/rocksdb/status.h @@ -0,0 +1,274 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// A Status encapsulates the result of an operation. It may indicate success, +// or it may indicate an error with an associated error message. +// +// Multiple threads can invoke const methods on a Status without +// external synchronization, but if any of the threads may call a +// non-const method, all threads accessing the same Status must use +// external synchronization. + +#ifndef STORAGE_ROCKSDB_INCLUDE_STATUS_H_ +#define STORAGE_ROCKSDB_INCLUDE_STATUS_H_ + +#include +#include "rocksdb/slice.h" + +namespace rocksdb { + +class Status { + public: + // Create a success status. + Status() : code_(kOk), subcode_(kNone), state_(nullptr) {} + ~Status() { delete[] state_; } + + // Copy the specified status. + Status(const Status& s); + Status& operator=(const Status& s); + Status(Status&& s) +#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) + noexcept +#endif + ; + Status& operator=(Status&& s) +#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) + noexcept +#endif + ; + bool operator==(const Status& rhs) const; + bool operator!=(const Status& rhs) const; + + enum Code { + kOk = 0, + kNotFound = 1, + kCorruption = 2, + kNotSupported = 3, + kInvalidArgument = 4, + kIOError = 5, + kMergeInProgress = 6, + kIncomplete = 7, + kShutdownInProgress = 8, + kTimedOut = 9, + kAborted = 10, + kBusy = 11, + kExpired = 12, + kTryAgain = 13, + }; + + Code code() const { return code_; } + + enum SubCode { + kNone = 0, + kMutexTimeout = 1, + kLockTimeout = 2, + kLockLimit = 3, + kMaxSubCode + }; + + SubCode subcode() const { return subcode_; } + + // Return a success status. + static Status OK() { return Status(); } + + // Return error status of an appropriate type. + static Status NotFound(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kNotFound, msg, msg2); + } + // Fast path for not found without malloc; + static Status NotFound(SubCode msg = kNone) { return Status(kNotFound, msg); } + + static Status Corruption(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kCorruption, msg, msg2); + } + static Status Corruption(SubCode msg = kNone) { + return Status(kCorruption, msg); + } + + static Status NotSupported(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kNotSupported, msg, msg2); + } + static Status NotSupported(SubCode msg = kNone) { + return Status(kNotSupported, msg); + } + + static Status InvalidArgument(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kInvalidArgument, msg, msg2); + } + static Status InvalidArgument(SubCode msg = kNone) { + return Status(kInvalidArgument, msg); + } + + static Status IOError(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kIOError, msg, msg2); + } + static Status IOError(SubCode msg = kNone) { return Status(kIOError, msg); } + + static Status MergeInProgress(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kMergeInProgress, msg, msg2); + } + static Status MergeInProgress(SubCode msg = kNone) { + return Status(kMergeInProgress, msg); + } + + static Status Incomplete(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kIncomplete, msg, msg2); + } + static Status Incomplete(SubCode msg = kNone) { + return Status(kIncomplete, msg); + } + + static Status ShutdownInProgress(SubCode msg = kNone) { + return Status(kShutdownInProgress, msg); + } + static Status ShutdownInProgress(const Slice& msg, + const Slice& msg2 = Slice()) { + return Status(kShutdownInProgress, msg, msg2); + } + static Status Aborted(SubCode msg = kNone) { return Status(kAborted, msg); } + static Status Aborted(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kAborted, msg, msg2); + } + + static Status Busy(SubCode msg = kNone) { return Status(kBusy, msg); } + static Status Busy(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kBusy, msg, msg2); + } + + static Status TimedOut(SubCode msg = kNone) { return Status(kTimedOut, msg); } + static Status TimedOut(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kTimedOut, msg, msg2); + } + + static Status Expired(SubCode msg = kNone) { return Status(kExpired, msg); } + static Status Expired(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kExpired, msg, msg2); + } + + static Status TryAgain(SubCode msg = kNone) { return Status(kTryAgain, msg); } + static Status TryAgain(const Slice& msg, const Slice& msg2 = Slice()) { + return Status(kTryAgain, msg, msg2); + } + + // Returns true iff the status indicates success. + bool ok() const { return code() == kOk; } + + // Returns true iff the status indicates a NotFound error. + bool IsNotFound() const { return code() == kNotFound; } + + // Returns true iff the status indicates a Corruption error. + bool IsCorruption() const { return code() == kCorruption; } + + // Returns true iff the status indicates a NotSupported error. + bool IsNotSupported() const { return code() == kNotSupported; } + + // Returns true iff the status indicates an InvalidArgument error. + bool IsInvalidArgument() const { return code() == kInvalidArgument; } + + // Returns true iff the status indicates an IOError. + bool IsIOError() const { return code() == kIOError; } + + // Returns true iff the status indicates an MergeInProgress. + bool IsMergeInProgress() const { return code() == kMergeInProgress; } + + // Returns true iff the status indicates Incomplete + bool IsIncomplete() const { return code() == kIncomplete; } + + // Returns true iff the status indicates Shutdown In progress + bool IsShutdownInProgress() const { return code() == kShutdownInProgress; } + + bool IsTimedOut() const { return code() == kTimedOut; } + + bool IsAborted() const { return code() == kAborted; } + + // Returns true iff the status indicates that a resource is Busy and + // temporarily could not be acquired. + bool IsBusy() const { return code() == kBusy; } + + // Returns true iff the status indicated that the operation has Expired. + bool IsExpired() const { return code() == kExpired; } + + // Returns true iff the status indicates a TryAgain error. + // This usually means that the operation failed, but may succeed if + // re-attempted. + bool IsTryAgain() const { return code() == kTryAgain; } + + // Return a string representation of this status suitable for printing. + // Returns the string "OK" for success. + std::string ToString() const; + + private: + // A nullptr state_ (which is always the case for OK) means the message + // is empty. + // of the following form: + // state_[0..3] == length of message + // state_[4..] == message + Code code_; + SubCode subcode_; + const char* state_; + + static const char* msgs[static_cast(kMaxSubCode)]; + + explicit Status(Code _code, SubCode _subcode = kNone) + : code_(_code), subcode_(_subcode), state_(nullptr) {} + + Status(Code _code, const Slice& msg, const Slice& msg2); + static const char* CopyState(const char* s); +}; + +inline Status::Status(const Status& s) : code_(s.code_), subcode_(s.subcode_) { + state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); +} +inline Status& Status::operator=(const Status& s) { + // The following condition catches both aliasing (when this == &s), + // and the common case where both s and *this are ok. + if(this != &s) { + code_ = s.code_; + subcode_ = s.subcode_; + delete[] state_; + state_ = (s.state_ == nullptr) ? nullptr : CopyState(s.state_); + } + return *this; +} + +inline Status::Status(Status&& s) +#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) +noexcept +#endif + : Status() { + *this = std::move(s); +} + +inline Status& Status::operator=(Status&& s) +#if !(defined _MSC_VER) || ((defined _MSC_VER) && (_MSC_VER >= 1900)) +noexcept +#endif +{ + if(this != &s) { + code_ = std::move(s.code_); + s.code_ = kOk; + subcode_ = std::move(s.subcode_); + s.subcode_ = kNone; + delete [] state_; + state_ = nullptr; + std::swap(state_, s.state_); + } + return *this; +} + +inline bool Status::operator==(const Status& rhs) const { + return (code_ == rhs.code_); +} + +inline bool Status::operator!=(const Status& rhs) const { + return !(*this == rhs); +} + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_STATUS_H_ diff --git a/external/rocksdb/include/rocksdb/table.h b/external/rocksdb/include/rocksdb/table.h new file mode 100755 index 0000000000..3332753165 --- /dev/null +++ b/external/rocksdb/include/rocksdb/table.h @@ -0,0 +1,465 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Currently we support two types of tables: plain table and block-based table. +// 1. Block-based table: this is the default table type that we inherited from +// LevelDB, which was designed for storing data in hard disk or flash +// device. +// 2. Plain table: it is one of RocksDB's SST file format optimized +// for low query latency on pure-memory or really low-latency media. +// +// A tutorial of rocksdb table formats is available here: +// https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats +// +// Example code is also available +// https://github.com/facebook/rocksdb/wiki/A-Tutorial-of-RocksDB-SST-formats#wiki-examples + +#pragma once +#include +#include +#include + +#include "rocksdb/env.h" +#include "rocksdb/immutable_options.h" +#include "rocksdb/iterator.h" +#include "rocksdb/options.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +// -- Block-based Table +class FlushBlockPolicyFactory; +class PersistentCache; +class RandomAccessFile; +struct TableReaderOptions; +struct TableBuilderOptions; +class TableBuilder; +class TableReader; +class WritableFileWriter; +struct EnvOptions; +struct Options; + +using std::unique_ptr; + +enum ChecksumType : char { + kNoChecksum = 0x0, // not yet supported. Will fail + kCRC32c = 0x1, + kxxHash = 0x2, +}; + +// For advanced user only +struct BlockBasedTableOptions { + // @flush_block_policy_factory creates the instances of flush block policy. + // which provides a configurable way to determine when to flush a block in + // the block based tables. If not set, table builder will use the default + // block flush policy, which cut blocks by block size (please refer to + // `FlushBlockBySizePolicy`). + std::shared_ptr flush_block_policy_factory; + + // TODO(kailiu) Temporarily disable this feature by making the default value + // to be false. + // + // Indicating if we'd put index/filter blocks to the block cache. + // If not specified, each "table reader" object will pre-load index/filter + // block during table initialization. + bool cache_index_and_filter_blocks = false; + + // if cache_index_and_filter_blocks is true and the below is true, then + // filter and index blocks are stored in the cache, but a reference is + // held in the "table reader" object so the blocks are pinned and only + // evicted from cache when the table reader is freed. + bool pin_l0_filter_and_index_blocks_in_cache = false; + + // The index type that will be used for this table. + enum IndexType : char { + // A space efficient index block that is optimized for + // binary-search-based index. + kBinarySearch, + + // The hash index, if enabled, will do the hash lookup when + // `Options.prefix_extractor` is provided. + kHashSearch, + }; + + IndexType index_type = kBinarySearch; + + // This option is now deprecated. No matter what value it is set to, + // it will behave as if hash_index_allow_collision=true. + bool hash_index_allow_collision = true; + + // Use the specified checksum type. Newly created table files will be + // protected with this checksum type. Old table files will still be readable, + // even though they have different checksum type. + ChecksumType checksum = kCRC32c; + + // Disable block cache. If this is set to true, + // then no block cache should be used, and the block_cache should + // point to a nullptr object. + bool no_block_cache = false; + + // If non-NULL use the specified cache for blocks. + // If NULL, rocksdb will automatically create and use an 8MB internal cache. + std::shared_ptr block_cache = nullptr; + + // If non-NULL use the specified cache for pages read from device + // IF NULL, no page cache is used + std::shared_ptr persistent_cache = nullptr; + + // If non-NULL use the specified cache for compressed blocks. + // If NULL, rocksdb will not use a compressed block cache. + std::shared_ptr block_cache_compressed = nullptr; + + // Approximate size of user data packed per block. Note that the + // block size specified here corresponds to uncompressed data. The + // actual size of the unit read from disk may be smaller if + // compression is enabled. This parameter can be changed dynamically. + size_t block_size = 4 * 1024; + + // This is used to close a block before it reaches the configured + // 'block_size'. If the percentage of free space in the current block is less + // than this specified number and adding a new record to the block will + // exceed the configured block size, then this block will be closed and the + // new record will be written to the next block. + int block_size_deviation = 10; + + // Number of keys between restart points for delta encoding of keys. + // This parameter can be changed dynamically. Most clients should + // leave this parameter alone. The minimum value allowed is 1. Any smaller + // value will be silently overwritten with 1. + int block_restart_interval = 16; + + // Same as block_restart_interval but used for the index block. + int index_block_restart_interval = 1; + + // Use delta encoding to compress keys in blocks. + // ReadOptions::pin_data requires this option to be disabled. + // + // Default: true + bool use_delta_encoding = true; + + // If non-nullptr, use the specified filter policy to reduce disk reads. + // Many applications will benefit from passing the result of + // NewBloomFilterPolicy() here. + std::shared_ptr filter_policy = nullptr; + + // If true, place whole keys in the filter (not just prefixes). + // This must generally be true for gets to be efficient. + bool whole_key_filtering = true; + + // If true, block will not be explicitly flushed to disk during building + // a SstTable. Instead, buffer in WritableFileWriter will take + // care of the flushing when it is full. + // + // On Windows, this option helps a lot when unbuffered I/O + // (allow_os_buffer = false) is used, since it avoids small + // unbuffered disk write. + // + // User may also adjust writable_file_max_buffer_size to optimize disk I/O + // size. + // + // Default: false + bool skip_table_builder_flush = false; + + // Verify that decompressing the compressed block gives back the input. This + // is a verification mode that we use to detect bugs in compression + // algorithms. + bool verify_compression = false; + + // We currently have three versions: + // 0 -- This version is currently written out by all RocksDB's versions by + // default. Can be read by really old RocksDB's. Doesn't support changing + // checksum (default is CRC32). + // 1 -- Can be read by RocksDB's versions since 3.0. Supports non-default + // checksum, like xxHash. It is written by RocksDB when + // BlockBasedTableOptions::checksum is something other than kCRC32c. (version + // 0 is silently upconverted) + // 2 -- Can be read by RocksDB's versions since 3.10. Changes the way we + // encode compressed blocks with LZ4, BZip2 and Zlib compression. If you + // don't plan to run RocksDB before version 3.10, you should probably use + // this. + // This option only affects newly written tables. When reading exising tables, + // the information about version is read from the footer. + uint32_t format_version = 2; +}; + +// Table Properties that are specific to block-based table properties. +struct BlockBasedTablePropertyNames { + // value of this propertis is a fixed int32 number. + static const std::string kIndexType; + // value is "1" for true and "0" for false. + static const std::string kWholeKeyFiltering; + // value is "1" for true and "0" for false. + static const std::string kPrefixFiltering; +}; + +// Create default block based table factory. +extern TableFactory* NewBlockBasedTableFactory( + const BlockBasedTableOptions& table_options = BlockBasedTableOptions()); + +#ifndef ROCKSDB_LITE + +enum EncodingType : char { + // Always write full keys without any special encoding. + kPlain, + // Find opportunity to write the same prefix once for multiple rows. + // In some cases, when a key follows a previous key with the same prefix, + // instead of writing out the full key, it just writes out the size of the + // shared prefix, as well as other bytes, to save some bytes. + // + // When using this option, the user is required to use the same prefix + // extractor to make sure the same prefix will be extracted from the same key. + // The Name() value of the prefix extractor will be stored in the file. When + // reopening the file, the name of the options.prefix_extractor given will be + // bitwise compared to the prefix extractors stored in the file. An error + // will be returned if the two don't match. + kPrefix, +}; + +// Table Properties that are specific to plain table properties. +struct PlainTablePropertyNames { + static const std::string kPrefixExtractorName; + static const std::string kEncodingType; + static const std::string kBloomVersion; + static const std::string kNumBloomBlocks; +}; + +const uint32_t kPlainTableVariableLength = 0; + +struct PlainTableOptions { + // @user_key_len: plain table has optimization for fix-sized keys, which can + // be specified via user_key_len. Alternatively, you can pass + // `kPlainTableVariableLength` if your keys have variable + // lengths. + uint32_t user_key_len = kPlainTableVariableLength; + + // @bloom_bits_per_key: the number of bits used for bloom filer per prefix. + // You may disable it by passing a zero. + int bloom_bits_per_key = 10; + + // @hash_table_ratio: the desired utilization of the hash table used for + // prefix hashing. + // hash_table_ratio = number of prefixes / #buckets in the + // hash table + double hash_table_ratio = 0.75; + + // @index_sparseness: inside each prefix, need to build one index record for + // how many keys for binary search inside each hash bucket. + // For encoding type kPrefix, the value will be used when + // writing to determine an interval to rewrite the full + // key. It will also be used as a suggestion and satisfied + // when possible. + size_t index_sparseness = 16; + + // @huge_page_tlb_size: if <=0, allocate hash indexes and blooms from malloc. + // Otherwise from huge page TLB. The user needs to + // reserve huge pages for it to be allocated, like: + // sysctl -w vm.nr_hugepages=20 + // See linux doc Documentation/vm/hugetlbpage.txt + size_t huge_page_tlb_size = 0; + + // @encoding_type: how to encode the keys. See enum EncodingType above for + // the choices. The value will determine how to encode keys + // when writing to a new SST file. This value will be stored + // inside the SST file which will be used when reading from + // the file, which makes it possible for users to choose + // different encoding type when reopening a DB. Files with + // different encoding types can co-exist in the same DB and + // can be read. + EncodingType encoding_type = kPlain; + + // @full_scan_mode: mode for reading the whole file one record by one without + // using the index. + bool full_scan_mode = false; + + // @store_index_in_file: compute plain table index and bloom filter during + // file building and store it in file. When reading + // file, index will be mmaped instead of recomputation. + bool store_index_in_file = false; +}; + +// -- Plain Table with prefix-only seek +// For this factory, you need to set Options.prefix_extrator properly to make it +// work. Look-up will starts with prefix hash lookup for key prefix. Inside the +// hash bucket found, a binary search is executed for hash conflicts. Finally, +// a linear search is used. + +extern TableFactory* NewPlainTableFactory(const PlainTableOptions& options = + PlainTableOptions()); + +struct CuckooTablePropertyNames { + // The key that is used to fill empty buckets. + static const std::string kEmptyKey; + // Fixed length of value. + static const std::string kValueLength; + // Number of hash functions used in Cuckoo Hash. + static const std::string kNumHashFunc; + // It denotes the number of buckets in a Cuckoo Block. Given a key and a + // particular hash function, a Cuckoo Block is a set of consecutive buckets, + // where starting bucket id is given by the hash function on the key. In case + // of a collision during inserting the key, the builder tries to insert the + // key in other locations of the cuckoo block before using the next hash + // function. This reduces cache miss during read operation in case of + // collision. + static const std::string kCuckooBlockSize; + // Size of the hash table. Use this number to compute the modulo of hash + // function. The actual number of buckets will be kMaxHashTableSize + + // kCuckooBlockSize - 1. The last kCuckooBlockSize-1 buckets are used to + // accommodate the Cuckoo Block from end of hash table, due to cache friendly + // implementation. + static const std::string kHashTableSize; + // Denotes if the key sorted in the file is Internal Key (if false) + // or User Key only (if true). + static const std::string kIsLastLevel; + // Indicate if using identity function for the first hash function. + static const std::string kIdentityAsFirstHash; + // Indicate if using module or bit and to calculate hash value + static const std::string kUseModuleHash; + // Fixed user key length + static const std::string kUserKeyLength; +}; + +struct CuckooTableOptions { + // Determines the utilization of hash tables. Smaller values + // result in larger hash tables with fewer collisions. + double hash_table_ratio = 0.9; + // A property used by builder to determine the depth to go to + // to search for a path to displace elements in case of + // collision. See Builder.MakeSpaceForKey method. Higher + // values result in more efficient hash tables with fewer + // lookups but take more time to build. + uint32_t max_search_depth = 100; + // In case of collision while inserting, the builder + // attempts to insert in the next cuckoo_block_size + // locations before skipping over to the next Cuckoo hash + // function. This makes lookups more cache friendly in case + // of collisions. + uint32_t cuckoo_block_size = 5; + // If this option is enabled, user key is treated as uint64_t and its value + // is used as hash value directly. This option changes builder's behavior. + // Reader ignore this option and behave according to what specified in table + // property. + bool identity_as_first_hash = false; + // If this option is set to true, module is used during hash calculation. + // This often yields better space efficiency at the cost of performance. + // If this optino is set to false, # of entries in table is constrained to be + // power of two, and bit and is used to calculate hash, which is faster in + // general. + bool use_module_hash = true; +}; + +// Cuckoo Table Factory for SST table format using Cache Friendly Cuckoo Hashing +extern TableFactory* NewCuckooTableFactory( + const CuckooTableOptions& table_options = CuckooTableOptions()); + +#endif // ROCKSDB_LITE + +class RandomAccessFileReader; + +// A base class for table factories. +class TableFactory { + public: + virtual ~TableFactory() {} + + // The type of the table. + // + // The client of this package should switch to a new name whenever + // the table format implementation changes. + // + // Names starting with "rocksdb." are reserved and should not be used + // by any clients of this package. + virtual const char* Name() const = 0; + + // Returns a Table object table that can fetch data from file specified + // in parameter file. It's the caller's responsibility to make sure + // file is in the correct format. + // + // NewTableReader() is called in three places: + // (1) TableCache::FindTable() calls the function when table cache miss + // and cache the table object returned. + // (2) SstFileReader (for SST Dump) opens the table and dump the table + // contents using the interator of the table. + // (3) DBImpl::AddFile() calls this function to read the contents of + // the sst file it's attempting to add + // + // table_reader_options is a TableReaderOptions which contain all the + // needed parameters and configuration to open the table. + // file is a file handler to handle the file for the table. + // file_size is the physical file size of the file. + // table_reader is the output table reader. + virtual Status NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table_reader, + bool prefetch_index_and_filter_in_cache = true) const = 0; + + // Return a table builder to write to a file for this table type. + // + // It is called in several places: + // (1) When flushing memtable to a level-0 output file, it creates a table + // builder (In DBImpl::WriteLevel0Table(), by calling BuildTable()) + // (2) During compaction, it gets the builder for writing compaction output + // files in DBImpl::OpenCompactionOutputFile(). + // (3) When recovering from transaction logs, it creates a table builder to + // write to a level-0 output file (In DBImpl::WriteLevel0TableForRecovery, + // by calling BuildTable()) + // (4) When running Repairer, it creates a table builder to convert logs to + // SST files (In Repairer::ConvertLogToTable() by calling BuildTable()) + // + // ImmutableCFOptions is a subset of Options that can not be altered. + // Multiple configured can be acceseed from there, including and not limited + // to compression options. file is a handle of a writable file. + // It is the caller's responsibility to keep the file open and close the file + // after closing the table builder. compression_type is the compression type + // to use in this table. + virtual TableBuilder* NewTableBuilder( + const TableBuilderOptions& table_builder_options, + uint32_t column_family_id, WritableFileWriter* file) const = 0; + + // Sanitizes the specified DB Options and ColumnFamilyOptions. + // + // If the function cannot find a way to sanitize the input DB Options, + // a non-ok Status will be returned. + virtual Status SanitizeOptions( + const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const = 0; + + // Return a string that contains printable format of table configurations. + // RocksDB prints configurations at DB Open(). + virtual std::string GetPrintableTableOptions() const = 0; + + // Returns the raw pointer of the table options that is used by this + // TableFactory, or nullptr if this function is not supported. + // Since the return value is a raw pointer, the TableFactory owns the + // pointer and the caller should not delete the pointer. + // + // In certan case, it is desirable to alter the underlying options when the + // TableFactory is not used by any open DB by casting the returned pointer + // to the right class. For instance, if BlockBasedTableFactory is used, + // then the pointer can be casted to BlockBasedTableOptions. + // + // Note that changing the underlying TableFactory options while the + // TableFactory is currently used by any open DB is undefined behavior. + // Developers should use DB::SetOption() instead to dynamically change + // options while the DB is open. + virtual void* GetOptions() { return nullptr; } +}; + +#ifndef ROCKSDB_LITE +// Create a special table factory that can open either of the supported +// table formats, based on setting inside the SST files. It should be used to +// convert a DB from one table format to another. +// @table_factory_to_write: the table factory used when writing to new files. +// @block_based_table_factory: block based table factory to use. If NULL, use +// a default one. +// @plain_table_factory: plain table factory to use. If NULL, use a default one. +// @cuckoo_table_factory: cuckoo table factory to use. If NULL, use a default one. +extern TableFactory* NewAdaptiveTableFactory( + std::shared_ptr table_factory_to_write = nullptr, + std::shared_ptr block_based_table_factory = nullptr, + std::shared_ptr plain_table_factory = nullptr, + std::shared_ptr cuckoo_table_factory = nullptr); + +#endif // ROCKSDB_LITE + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/table_properties.h b/external/rocksdb/include/rocksdb/table_properties.h new file mode 100755 index 0000000000..ee95e84f77 --- /dev/null +++ b/external/rocksdb/include/rocksdb/table_properties.h @@ -0,0 +1,199 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#pragma once + +#include +#include +#include +#include "rocksdb/status.h" +#include "rocksdb/types.h" + +namespace rocksdb { + +// -- Table Properties +// Other than basic table properties, each table may also have the user +// collected properties. +// The value of the user-collected properties are encoded as raw bytes -- +// users have to interprete these values by themselves. +// Note: To do prefix seek/scan in `UserCollectedProperties`, you can do +// something similar to: +// +// UserCollectedProperties props = ...; +// for (auto pos = props.lower_bound(prefix); +// pos != props.end() && pos->first.compare(0, prefix.size(), prefix) == 0; +// ++pos) { +// ... +// } +typedef std::map UserCollectedProperties; + +// table properties' human-readable names in the property block. +struct TablePropertiesNames { + static const std::string kDataSize; + static const std::string kIndexSize; + static const std::string kFilterSize; + static const std::string kRawKeySize; + static const std::string kRawValueSize; + static const std::string kNumDataBlocks; + static const std::string kNumEntries; + static const std::string kFormatVersion; + static const std::string kFixedKeyLen; + static const std::string kFilterPolicy; + static const std::string kColumnFamilyName; + static const std::string kColumnFamilyId; + static const std::string kComparator; + static const std::string kMergeOperator; + static const std::string kPropertyCollectors; + static const std::string kCompression; +}; + +extern const std::string kPropertiesBlock; +extern const std::string kCompressionDictBlock; + +enum EntryType { + kEntryPut, + kEntryDelete, + kEntrySingleDelete, + kEntryMerge, + kEntryOther, +}; + +// `TablePropertiesCollector` provides the mechanism for users to collect +// their own properties that they are interested in. This class is essentially +// a collection of callback functions that will be invoked during table +// building. It is construced with TablePropertiesCollectorFactory. The methods +// don't need to be thread-safe, as we will create exactly one +// TablePropertiesCollector object per table and then call it sequentially +class TablePropertiesCollector { + public: + virtual ~TablePropertiesCollector() {} + + // DEPRECATE User defined collector should implement AddUserKey(), though + // this old function still works for backward compatible reason. + // Add() will be called when a new key/value pair is inserted into the table. + // @params key the user key that is inserted into the table. + // @params value the value that is inserted into the table. + virtual Status Add(const Slice& /*key*/, const Slice& /*value*/) { + return Status::InvalidArgument( + "TablePropertiesCollector::Add() deprecated."); + } + + // AddUserKey() will be called when a new key/value pair is inserted into the + // table. + // @params key the user key that is inserted into the table. + // @params value the value that is inserted into the table. + virtual Status AddUserKey(const Slice& key, const Slice& value, + EntryType /*type*/, SequenceNumber /*seq*/, + uint64_t /*file_size*/) { + // For backwards-compatibility. + return Add(key, value); + } + + // Finish() will be called when a table has already been built and is ready + // for writing the properties block. + // @params properties User will add their collected statistics to + // `properties`. + virtual Status Finish(UserCollectedProperties* properties) = 0; + + // Return the human-readable properties, where the key is property name and + // the value is the human-readable form of value. + virtual UserCollectedProperties GetReadableProperties() const = 0; + + // The name of the properties collector can be used for debugging purpose. + virtual const char* Name() const = 0; + + // EXPERIMENTAL Return whether the output file should be further compacted + virtual bool NeedCompact() const { return false; } +}; + +// Constructs TablePropertiesCollector. Internals create a new +// TablePropertiesCollector for each new table +class TablePropertiesCollectorFactory { + public: + struct Context { + uint32_t column_family_id; + static const uint32_t kUnknownColumnFamily; + }; + + virtual ~TablePropertiesCollectorFactory() {} + // has to be thread-safe + virtual TablePropertiesCollector* CreateTablePropertiesCollector( + TablePropertiesCollectorFactory::Context context) = 0; + + // The name of the properties collector can be used for debugging purpose. + virtual const char* Name() const = 0; +}; + +// TableProperties contains a bunch of read-only properties of its associated +// table. +struct TableProperties { + public: + // the total size of all data blocks. + uint64_t data_size = 0; + // the size of index block. + uint64_t index_size = 0; + // the size of filter block. + uint64_t filter_size = 0; + // total raw key size + uint64_t raw_key_size = 0; + // total raw value size + uint64_t raw_value_size = 0; + // the number of blocks in this table + uint64_t num_data_blocks = 0; + // the number of entries in this table + uint64_t num_entries = 0; + // format version, reserved for backward compatibility + uint64_t format_version = 0; + // If 0, key is variable length. Otherwise number of bytes for each key. + uint64_t fixed_key_len = 0; + // ID of column family for this SST file, corresponding to the CF identified + // by column_family_name. + uint64_t column_family_id = + rocksdb::TablePropertiesCollectorFactory::Context::kUnknownColumnFamily; + + // Name of the column family with which this SST file is associated. + // If column family is unknown, `column_family_name` will be an empty string. + std::string column_family_name; + + // The name of the filter policy used in this table. + // If no filter policy is used, `filter_policy_name` will be an empty string. + std::string filter_policy_name; + + // The name of the comparator used in this table. + std::string comparator_name; + + // The name of the merge operator used in this table. + // If no merge operator is used, `merge_operator_name` will be "nullptr". + std::string merge_operator_name; + + // The names of the property collectors factories used in this table + // separated by commas + // {collector_name[1]},{collector_name[2]},{collector_name[3]} .. + std::string property_collectors_names; + + // The compression algo used to compress the SST files. + std::string compression_name; + + // user collected properties + UserCollectedProperties user_collected_properties; + UserCollectedProperties readable_properties; + + // convert this object to a human readable form + // @prop_delim: delimiter for each property. + std::string ToString(const std::string& prop_delim = "; ", + const std::string& kv_delim = "=") const; + + // Aggregate the numerical member variables of the specified + // TableProperties. + void Add(const TableProperties& tp); +}; + +// Extra properties +// Below is a list of non-basic properties that are collected by database +// itself. Especially some properties regarding to the internal keys (which +// is unknown to `table`). +extern uint64_t GetDeletedKeys(const UserCollectedProperties& props); +extern uint64_t GetMergeOperands(const UserCollectedProperties& props, + bool* property_present); + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/thread_status.h b/external/rocksdb/include/rocksdb/thread_status.h new file mode 100755 index 0000000000..0cdea2b519 --- /dev/null +++ b/external/rocksdb/include/rocksdb/thread_status.h @@ -0,0 +1,195 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file defines the structures for exposing run-time status of any +// rocksdb-related thread. Such run-time status can be obtained via +// GetThreadList() API. +// +// Note that all thread-status features are still under-development, and +// thus APIs and class definitions might subject to change at this point. +// Will remove this comment once the APIs have been finalized. + +#pragma once + +#include +#include +#include +#include +#include +#include + +#ifndef ROCKSDB_USING_THREAD_STATUS +#define ROCKSDB_USING_THREAD_STATUS \ + !defined(ROCKSDB_LITE) && \ + !defined(NROCKSDB_THREAD_STATUS) && \ + !defined(OS_MACOSX) && \ + !defined(IOS_CROSS_COMPILE) +#endif + +namespace rocksdb { + +// TODO(yhchiang): remove this function once c++14 is available +// as std::max will be able to cover this. +// Current MS compiler does not support constexpr +template +struct constexpr_max { + static const int result = (A > B) ? A : B; +}; + +// A structure that describes the current status of a thread. +// The status of active threads can be fetched using +// rocksdb::GetThreadList(). +struct ThreadStatus { + // The type of a thread. + enum ThreadType : int { + HIGH_PRIORITY = 0, // RocksDB BG thread in high-pri thread pool + LOW_PRIORITY, // RocksDB BG thread in low-pri thread pool + USER, // User thread (Non-RocksDB BG thread) + NUM_THREAD_TYPES + }; + + // The type used to refer to a thread operation. + // A thread operation describes high-level action of a thread. + // Examples include compaction and flush. + enum OperationType : int { + OP_UNKNOWN = 0, + OP_COMPACTION, + OP_FLUSH, + NUM_OP_TYPES + }; + + enum OperationStage : int { + STAGE_UNKNOWN = 0, + STAGE_FLUSH_RUN, + STAGE_FLUSH_WRITE_L0, + STAGE_COMPACTION_PREPARE, + STAGE_COMPACTION_RUN, + STAGE_COMPACTION_PROCESS_KV, + STAGE_COMPACTION_INSTALL, + STAGE_COMPACTION_SYNC_FILE, + STAGE_PICK_MEMTABLES_TO_FLUSH, + STAGE_MEMTABLE_ROLLBACK, + STAGE_MEMTABLE_INSTALL_FLUSH_RESULTS, + NUM_OP_STAGES + }; + + enum CompactionPropertyType : int { + COMPACTION_JOB_ID = 0, + COMPACTION_INPUT_OUTPUT_LEVEL, + COMPACTION_PROP_FLAGS, + COMPACTION_TOTAL_INPUT_BYTES, + COMPACTION_BYTES_READ, + COMPACTION_BYTES_WRITTEN, + NUM_COMPACTION_PROPERTIES + }; + + enum FlushPropertyType : int { + FLUSH_JOB_ID = 0, + FLUSH_BYTES_MEMTABLES, + FLUSH_BYTES_WRITTEN, + NUM_FLUSH_PROPERTIES + }; + + // The maximum number of properties of an operation. + // This number should be set to the biggest NUM_XXX_PROPERTIES. + static const int kNumOperationProperties = + constexpr_max::result; + + // The type used to refer to a thread state. + // A state describes lower-level action of a thread + // such as reading / writing a file or waiting for a mutex. + enum StateType : int { + STATE_UNKNOWN = 0, + STATE_MUTEX_WAIT = 1, + NUM_STATE_TYPES + }; + + ThreadStatus(const uint64_t _id, + const ThreadType _thread_type, + const std::string& _db_name, + const std::string& _cf_name, + const OperationType _operation_type, + const uint64_t _op_elapsed_micros, + const OperationStage _operation_stage, + const uint64_t _op_props[], + const StateType _state_type) : + thread_id(_id), thread_type(_thread_type), + db_name(_db_name), + cf_name(_cf_name), + operation_type(_operation_type), + op_elapsed_micros(_op_elapsed_micros), + operation_stage(_operation_stage), + state_type(_state_type) { + for (int i = 0; i < kNumOperationProperties; ++i) { + op_properties[i] = _op_props[i]; + } + } + + // An unique ID for the thread. + const uint64_t thread_id; + + // The type of the thread, it could be HIGH_PRIORITY, + // LOW_PRIORITY, and USER + const ThreadType thread_type; + + // The name of the DB instance where the thread is currently + // involved with. It would be set to empty string if the thread + // does not involve in any DB operation. + const std::string db_name; + + // The name of the column family where the thread is currently + // It would be set to empty string if the thread does not involve + // in any column family. + const std::string cf_name; + + // The operation (high-level action) that the current thread is involved. + const OperationType operation_type; + + // The elapsed time in micros of the current thread operation. + const uint64_t op_elapsed_micros; + + // An integer showing the current stage where the thread is involved + // in the current operation. + const OperationStage operation_stage; + + // A list of properties that describe some details about the current + // operation. Same field in op_properties[] might have different + // meanings for different operations. + uint64_t op_properties[kNumOperationProperties]; + + // The state (lower-level action) that the current thread is involved. + const StateType state_type; + + // The followings are a set of utility functions for interpreting + // the information of ThreadStatus + + static const std::string& GetThreadTypeName(ThreadType thread_type); + + // Obtain the name of an operation given its type. + static const std::string& GetOperationName(OperationType op_type); + + static const std::string MicrosToString(uint64_t op_elapsed_time); + + // Obtain a human-readable string describing the specified operation stage. + static const std::string& GetOperationStageName( + OperationStage stage); + + // Obtain the name of the "i"th operation property of the + // specified operation. + static const std::string& GetOperationPropertyName( + OperationType op_type, int i); + + // Translate the "i"th property of the specified operation given + // a property value. + static std::map + InterpretOperationProperties( + OperationType op_type, const uint64_t* op_properties); + + // Obtain the name of a state given its type. + static const std::string& GetStateName(StateType state_type); +}; + + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/transaction_log.h b/external/rocksdb/include/rocksdb/transaction_log.h new file mode 100755 index 0000000000..1fb93ace16 --- /dev/null +++ b/external/rocksdb/include/rocksdb/transaction_log.h @@ -0,0 +1,125 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_ +#define STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_ + +#include "rocksdb/status.h" +#include "rocksdb/types.h" +#include "rocksdb/write_batch.h" +#include +#include + +namespace rocksdb { + +class LogFile; +typedef std::vector> VectorLogPtr; + +enum WalFileType { + /* Indicates that WAL file is in archive directory. WAL files are moved from + * the main db directory to archive directory once they are not live and stay + * there until cleaned up. Files are cleaned depending on archive size + * (Options::WAL_size_limit_MB) and time since last cleaning + * (Options::WAL_ttl_seconds). + */ + kArchivedLogFile = 0, + + /* Indicates that WAL file is live and resides in the main db directory */ + kAliveLogFile = 1 +} ; + +class LogFile { + public: + LogFile() {} + virtual ~LogFile() {} + + // Returns log file's pathname relative to the main db dir + // Eg. For a live-log-file = /000003.log + // For an archived-log-file = /archive/000003.log + virtual std::string PathName() const = 0; + + + // Primary identifier for log file. + // This is directly proportional to creation time of the log file + virtual uint64_t LogNumber() const = 0; + + // Log file can be either alive or archived + virtual WalFileType Type() const = 0; + + // Starting sequence number of writebatch written in this log file + virtual SequenceNumber StartSequence() const = 0; + + // Size of log file on disk in Bytes + virtual uint64_t SizeFileBytes() const = 0; +}; + +struct BatchResult { + SequenceNumber sequence = 0; + std::unique_ptr writeBatchPtr; + + // Add empty __ctor and __dtor for the rule of five + // However, preserve the original semantics and prohibit copying + // as the unique_ptr member does not copy. + BatchResult() {} + + ~BatchResult() {} + + BatchResult(const BatchResult&) = delete; + + BatchResult& operator=(const BatchResult&) = delete; + + BatchResult(BatchResult&& bResult) + : sequence(std::move(bResult.sequence)), + writeBatchPtr(std::move(bResult.writeBatchPtr)) {} + + BatchResult& operator=(BatchResult&& bResult) { + sequence = std::move(bResult.sequence); + writeBatchPtr = std::move(bResult.writeBatchPtr); + return *this; + } +}; + +// A TransactionLogIterator is used to iterate over the transactions in a db. +// One run of the iterator is continuous, i.e. the iterator will stop at the +// beginning of any gap in sequences +class TransactionLogIterator { + public: + TransactionLogIterator() {} + virtual ~TransactionLogIterator() {} + + // An iterator is either positioned at a WriteBatch or not valid. + // This method returns true if the iterator is valid. + // Can read data from a valid iterator. + virtual bool Valid() = 0; + + // Moves the iterator to the next WriteBatch. + // REQUIRES: Valid() to be true. + virtual void Next() = 0; + + // Returns ok if the iterator is valid. + // Returns the Error when something has gone wrong. + virtual Status status() = 0; + + // If valid return's the current write_batch and the sequence number of the + // earliest transaction contained in the batch. + // ONLY use if Valid() is true and status() is OK. + virtual BatchResult GetBatch() = 0; + + // The read options for TransactionLogIterator. + struct ReadOptions { + // If true, all data read from underlying storage will be + // verified against corresponding checksums. + // Default: true + bool verify_checksums_; + + ReadOptions() : verify_checksums_(true) {} + + explicit ReadOptions(bool verify_checksums) + : verify_checksums_(verify_checksums) {} + }; +}; +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_TRANSACTION_LOG_ITERATOR_H_ diff --git a/external/rocksdb/include/rocksdb/types.h b/external/rocksdb/include/rocksdb/types.h new file mode 100755 index 0000000000..6a477cab89 --- /dev/null +++ b/external/rocksdb/include/rocksdb/types.h @@ -0,0 +1,20 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef STORAGE_ROCKSDB_INCLUDE_TYPES_H_ +#define STORAGE_ROCKSDB_INCLUDE_TYPES_H_ + +#include + +namespace rocksdb { + +// Define all public custom types here. + +// Represents a sequence number in a WAL file. +typedef uint64_t SequenceNumber; + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_TYPES_H_ diff --git a/external/rocksdb/include/rocksdb/universal_compaction.h b/external/rocksdb/include/rocksdb/universal_compaction.h new file mode 100755 index 0000000000..11490e413a --- /dev/null +++ b/external/rocksdb/include/rocksdb/universal_compaction.h @@ -0,0 +1,90 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H +#define STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H + +#include +#include +#include + +namespace rocksdb { + +// +// Algorithm used to make a compaction request stop picking new files +// into a single compaction run +// +enum CompactionStopStyle { + kCompactionStopStyleSimilarSize, // pick files of similar size + kCompactionStopStyleTotalSize // total size of picked files > next file +}; + +class CompactionOptionsUniversal { + public: + + // Percentage flexibilty while comparing file size. If the candidate file(s) + // size is 1% smaller than the next file's size, then include next file into + // this candidate set. // Default: 1 + unsigned int size_ratio; + + // The minimum number of files in a single compaction run. Default: 2 + unsigned int min_merge_width; + + // The maximum number of files in a single compaction run. Default: UINT_MAX + unsigned int max_merge_width; + + // The size amplification is defined as the amount (in percentage) of + // additional storage needed to store a single byte of data in the database. + // For example, a size amplification of 2% means that a database that + // contains 100 bytes of user-data may occupy upto 102 bytes of + // physical storage. By this definition, a fully compacted database has + // a size amplification of 0%. Rocksdb uses the following heuristic + // to calculate size amplification: it assumes that all files excluding + // the earliest file contribute to the size amplification. + // Default: 200, which means that a 100 byte database could require upto + // 300 bytes of storage. + unsigned int max_size_amplification_percent; + + // If this option is set to be -1 (the default value), all the output files + // will follow compression type specified. + // + // If this option is not negative, we will try to make sure compressed + // size is just above this value. In normal cases, at least this percentage + // of data will be compressed. + // When we are compacting to a new file, here is the criteria whether + // it needs to be compressed: assuming here are the list of files sorted + // by generation time: + // A1...An B1...Bm C1...Ct + // where A1 is the newest and Ct is the oldest, and we are going to compact + // B1...Bm, we calculate the total size of all the files as total_size, as + // well as the total size of C1...Ct as total_C, the compaction output file + // will be compressed iff + // total_C / total_size < this percentage + // Default: -1 + int compression_size_percent; + + // The algorithm used to stop picking files into a single compaction run + // Default: kCompactionStopStyleTotalSize + CompactionStopStyle stop_style; + + // Option to optimize the universal multi level compaction by enabling + // trivial move for non overlapping files. + // Default: false + bool allow_trivial_move; + + // Default set of parameters + CompactionOptionsUniversal() + : size_ratio(1), + min_merge_width(2), + max_merge_width(UINT_MAX), + max_size_amplification_percent(200), + compression_size_percent(-1), + stop_style(kCompactionStopStyleTotalSize), + allow_trivial_move(false) {} +}; + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_UNIVERSAL_COMPACTION_OPTIONS_H diff --git a/external/rocksdb/include/rocksdb/utilities/backupable_db.h b/external/rocksdb/include/rocksdb/utilities/backupable_db.h new file mode 100755 index 0000000000..27c1b49ac2 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/backupable_db.h @@ -0,0 +1,313 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#ifndef ROCKSDB_LITE + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include +#include + +#include "rocksdb/utilities/stackable_db.h" + +#include "rocksdb/env.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +struct BackupableDBOptions { + // Where to keep the backup files. Has to be different than dbname_ + // Best to set this to dbname_ + "/backups" + // Required + std::string backup_dir; + + // Backup Env object. It will be used for backup file I/O. If it's + // nullptr, backups will be written out using DBs Env. If it's + // non-nullptr, backup's I/O will be performed using this object. + // If you want to have backups on HDFS, use HDFS Env here! + // Default: nullptr + Env* backup_env; + + // If share_table_files == true, backup will assume that table files with + // same name have the same contents. This enables incremental backups and + // avoids unnecessary data copies. + // If share_table_files == false, each backup will be on its own and will + // not share any data with other backups. + // default: true + bool share_table_files; + + // Backup info and error messages will be written to info_log + // if non-nullptr. + // Default: nullptr + Logger* info_log; + + // If sync == true, we can guarantee you'll get consistent backup even + // on a machine crash/reboot. Backup process is slower with sync enabled. + // If sync == false, we don't guarantee anything on machine reboot. However, + // chances are some of the backups are consistent. + // Default: true + bool sync; + + // If true, it will delete whatever backups there are already + // Default: false + bool destroy_old_data; + + // If false, we won't backup log files. This option can be useful for backing + // up in-memory databases where log file are persisted, but table files are in + // memory. + // Default: true + bool backup_log_files; + + // Max bytes that can be transferred in a second during backup. + // If 0, go as fast as you can + // Default: 0 + uint64_t backup_rate_limit; + + // Backup rate limiter. Used to control transfer speed for backup. If this is + // not null, backup_rate_limit is ignored. + // Default: nullptr + std::shared_ptr backup_rate_limiter{nullptr}; + + // Max bytes that can be transferred in a second during restore. + // If 0, go as fast as you can + // Default: 0 + uint64_t restore_rate_limit; + + // Restore rate limiter. Used to control transfer speed during restore. If + // this is not null, restore_rate_limit is ignored. + // Default: nullptr + std::shared_ptr restore_rate_limiter{nullptr}; + + // Only used if share_table_files is set to true. If true, will consider that + // backups can come from different databases, hence a sst is not uniquely + // identifed by its name, but by the triple (file name, crc32, file length) + // Default: false + // Note: this is an experimental option, and you'll need to set it manually + // *turn it on only if you know what you're doing* + bool share_files_with_checksum; + + // Up to this many background threads will copy files for CreateNewBackup() + // and RestoreDBFromBackup() + // Default: 1 + int max_background_operations; + + // During backup user can get callback every time next + // callback_trigger_interval_size bytes being copied. + // Default: 4194304 + uint64_t callback_trigger_interval_size; + + void Dump(Logger* logger) const; + + explicit BackupableDBOptions( + const std::string& _backup_dir, Env* _backup_env = nullptr, + bool _share_table_files = true, Logger* _info_log = nullptr, + bool _sync = true, bool _destroy_old_data = false, + bool _backup_log_files = true, uint64_t _backup_rate_limit = 0, + uint64_t _restore_rate_limit = 0, int _max_background_operations = 1, + uint64_t _callback_trigger_interval_size = 4 * 1024 * 1024) + : backup_dir(_backup_dir), + backup_env(_backup_env), + share_table_files(_share_table_files), + info_log(_info_log), + sync(_sync), + destroy_old_data(_destroy_old_data), + backup_log_files(_backup_log_files), + backup_rate_limit(_backup_rate_limit), + restore_rate_limit(_restore_rate_limit), + share_files_with_checksum(false), + max_background_operations(_max_background_operations), + callback_trigger_interval_size(_callback_trigger_interval_size) { + assert(share_table_files || !share_files_with_checksum); + } +}; + +struct RestoreOptions { + // If true, restore won't overwrite the existing log files in wal_dir. It will + // also move all log files from archive directory to wal_dir. Use this option + // in combination with BackupableDBOptions::backup_log_files = false for + // persisting in-memory databases. + // Default: false + bool keep_log_files; + + explicit RestoreOptions(bool _keep_log_files = false) + : keep_log_files(_keep_log_files) {} +}; + +typedef uint32_t BackupID; + +struct BackupInfo { + BackupID backup_id; + int64_t timestamp; + uint64_t size; + + uint32_t number_files; + std::string app_metadata; + + BackupInfo() {} + + BackupInfo(BackupID _backup_id, int64_t _timestamp, uint64_t _size, + uint32_t _number_files, const std::string& _app_metadata) + : backup_id(_backup_id), + timestamp(_timestamp), + size(_size), + number_files(_number_files), + app_metadata(_app_metadata) {} +}; + +class BackupStatistics { + public: + BackupStatistics() { + number_success_backup = 0; + number_fail_backup = 0; + } + + BackupStatistics(uint32_t _number_success_backup, + uint32_t _number_fail_backup) + : number_success_backup(_number_success_backup), + number_fail_backup(_number_fail_backup) {} + + ~BackupStatistics() {} + + void IncrementNumberSuccessBackup(); + void IncrementNumberFailBackup(); + + uint32_t GetNumberSuccessBackup() const; + uint32_t GetNumberFailBackup() const; + + std::string ToString() const; + + private: + uint32_t number_success_backup; + uint32_t number_fail_backup; +}; + +// A backup engine for accessing information about backups and restoring from +// them. +class BackupEngineReadOnly { + public: + virtual ~BackupEngineReadOnly() {} + + static Status Open(Env* db_env, const BackupableDBOptions& options, + BackupEngineReadOnly** backup_engine_ptr); + + // Returns info about backups in backup_info + // You can GetBackupInfo safely, even with other BackupEngine performing + // backups on the same directory + virtual void GetBackupInfo(std::vector* backup_info) = 0; + + // Returns info about corrupt backups in corrupt_backups + virtual void GetCorruptedBackups( + std::vector* corrupt_backup_ids) = 0; + + // Restoring DB from backup is NOT safe when there is another BackupEngine + // running that might call DeleteBackup() or PurgeOldBackups(). It is caller's + // responsibility to synchronize the operation, i.e. don't delete the backup + // when you're restoring from it + // See also the corresponding doc in BackupEngine + virtual Status RestoreDBFromBackup( + BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, + const RestoreOptions& restore_options = RestoreOptions()) = 0; + + // See the corresponding doc in BackupEngine + virtual Status RestoreDBFromLatestBackup( + const std::string& db_dir, const std::string& wal_dir, + const RestoreOptions& restore_options = RestoreOptions()) = 0; + + // checks that each file exists and that the size of the file matches our + // expectations. it does not check file checksum. + // Returns Status::OK() if all checks are good + virtual Status VerifyBackup(BackupID backup_id) = 0; +}; + +// A backup engine for creating new backups. +class BackupEngine { + public: + virtual ~BackupEngine() {} + + // BackupableDBOptions have to be the same as the ones used in previous + // BackupEngines for the same backup directory. + static Status Open(Env* db_env, + const BackupableDBOptions& options, + BackupEngine** backup_engine_ptr); + + // same as CreateNewBackup, but stores extra application metadata + virtual Status CreateNewBackupWithMetadata( + DB* db, const std::string& app_metadata, bool flush_before_backup = false, + std::function progress_callback = []() {}) = 0; + + // Captures the state of the database in the latest backup + // NOT a thread safe call + virtual Status CreateNewBackup(DB* db, bool flush_before_backup = false, + std::function progress_callback = + []() {}) { + return CreateNewBackupWithMetadata(db, "", flush_before_backup, + progress_callback); + } + + // deletes old backups, keeping latest num_backups_to_keep alive + virtual Status PurgeOldBackups(uint32_t num_backups_to_keep) = 0; + + // deletes a specific backup + virtual Status DeleteBackup(BackupID backup_id) = 0; + + // Call this from another thread if you want to stop the backup + // that is currently happening. It will return immediatelly, will + // not wait for the backup to stop. + // The backup will stop ASAP and the call to CreateNewBackup will + // return Status::Incomplete(). It will not clean up after itself, but + // the state will remain consistent. The state will be cleaned up + // next time you create BackupableDB or RestoreBackupableDB. + virtual void StopBackup() = 0; + + // Returns info about backups in backup_info + virtual void GetBackupInfo(std::vector* backup_info) = 0; + + // Returns info about corrupt backups in corrupt_backups + virtual void GetCorruptedBackups( + std::vector* corrupt_backup_ids) = 0; + + // restore from backup with backup_id + // IMPORTANT -- if options_.share_table_files == true, + // options_.share_files_with_checksum == false, you restore DB from some + // backup that is not the latest, and you start creating new backups from the + // new DB, they will probably fail. + // + // Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3. + // If you add new data to the DB and try creating a new backup now, the + // database will diverge from backups 4 and 5 and the new backup will fail. + // If you want to create new backup, you will first have to delete backups 4 + // and 5. + virtual Status RestoreDBFromBackup( + BackupID backup_id, const std::string& db_dir, const std::string& wal_dir, + const RestoreOptions& restore_options = RestoreOptions()) = 0; + + // restore from the latest backup + virtual Status RestoreDBFromLatestBackup( + const std::string& db_dir, const std::string& wal_dir, + const RestoreOptions& restore_options = RestoreOptions()) = 0; + + // checks that each file exists and that the size of the file matches our + // expectations. it does not check file checksum. + // Returns Status::OK() if all checks are good + virtual Status VerifyBackup(BackupID backup_id) = 0; + + // Will delete all the files we don't need anymore + // It will do the full scan of the files/ directory and delete all the + // files that are not referenced. + virtual Status GarbageCollect() = 0; +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/checkpoint.h b/external/rocksdb/include/rocksdb/utilities/checkpoint.h new file mode 100755 index 0000000000..b4523c25ef --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/checkpoint.h @@ -0,0 +1,36 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// A checkpoint is an openable snapshot of a database at a point in time. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include "rocksdb/status.h" + +namespace rocksdb { + +class DB; + +class Checkpoint { + public: + // Creates a Checkpoint object to be used for creating openable sbapshots + static Status Create(DB* db, Checkpoint** checkpoint_ptr); + + // Builds an openable snapshot of RocksDB on the same disk, which + // accepts an output directory on the same disk, and under the directory + // (1) hard-linked SST files pointing to existing live SST files + // SST files will be copied if output directory is on a different filesystem + // (2) a copied manifest files and other files + // The directory should not already exist and will be created by this API. + // The directory will be an absolute path + virtual Status CreateCheckpoint(const std::string& checkpoint_dir); + + virtual ~Checkpoint() {} +}; + +} // namespace rocksdb +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/convenience.h b/external/rocksdb/include/rocksdb/utilities/convenience.h new file mode 100755 index 0000000000..b0ac15c6df --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/convenience.h @@ -0,0 +1,10 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +// This file was moved to rocksdb/convenience.h" + +#include "rocksdb/convenience.h" diff --git a/external/rocksdb/include/rocksdb/utilities/db_ttl.h b/external/rocksdb/include/rocksdb/utilities/db_ttl.h new file mode 100755 index 0000000000..09107c50c4 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/db_ttl.h @@ -0,0 +1,68 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include + +#include "rocksdb/utilities/stackable_db.h" +#include "rocksdb/db.h" + +namespace rocksdb { + +// Database with TTL support. +// +// USE-CASES: +// This API should be used to open the db when key-values inserted are +// meant to be removed from the db in a non-strict 'ttl' amount of time +// Therefore, this guarantees that key-values inserted will remain in the +// db for >= ttl amount of time and the db will make efforts to remove the +// key-values as soon as possible after ttl seconds of their insertion. +// +// BEHAVIOUR: +// TTL is accepted in seconds +// (int32_t)Timestamp(creation) is suffixed to values in Put internally +// Expired TTL values deleted in compaction only:(Timestamp+ttl=5 +// read_only=true opens in the usual read-only mode. Compactions will not be +// triggered(neither manual nor automatic), so no expired entries removed +// +// CONSTRAINTS: +// Not specifying/passing or non-positive TTL behaves like TTL = infinity +// +// !!!WARNING!!!: +// Calling DB::Open directly to re-open a db created by this API will get +// corrupt values(timestamp suffixed) and no ttl effect will be there +// during the second Open, so use this API consistently to open the db +// Be careful when passing ttl with a small positive value because the +// whole database may be deleted in a small amount of time + +class DBWithTTL : public StackableDB { + public: + virtual Status CreateColumnFamilyWithTtl( + const ColumnFamilyOptions& options, const std::string& column_family_name, + ColumnFamilyHandle** handle, int ttl) = 0; + + static Status Open(const Options& options, const std::string& dbname, + DBWithTTL** dbptr, int32_t ttl = 0, + bool read_only = false); + + static Status Open(const DBOptions& db_options, const std::string& dbname, + const std::vector& column_families, + std::vector* handles, + DBWithTTL** dbptr, std::vector ttls, + bool read_only = false); + + protected: + explicit DBWithTTL(DB* db) : StackableDB(db) {} +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/document_db.h b/external/rocksdb/include/rocksdb/utilities/document_db.h new file mode 100755 index 0000000000..52f2257058 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/document_db.h @@ -0,0 +1,149 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include + +#include "rocksdb/utilities/stackable_db.h" +#include "rocksdb/utilities/json_document.h" +#include "rocksdb/db.h" + +namespace rocksdb { + +// IMPORTANT: DocumentDB is a work in progress. It is unstable and we might +// change the API without warning. Talk to RocksDB team before using this in +// production ;) + +// DocumentDB is a layer on top of RocksDB that provides a very simple JSON API. +// When creating a DB, you specify a list of indexes you want to keep on your +// data. You can insert a JSON document to the DB, which is automatically +// indexed. Every document added to the DB needs to have "_id" field which is +// automatically indexed and is an unique primary key. All other indexes are +// non-unique. + +// NOTE: field names in the JSON are NOT allowed to start with '$' or +// contain '.'. We don't currently enforce that rule, but will start behaving +// badly. + +// Cursor is what you get as a result of executing query. To get all +// results from a query, call Next() on a Cursor while Valid() returns true +class Cursor { + public: + Cursor() = default; + virtual ~Cursor() {} + + virtual bool Valid() const = 0; + virtual void Next() = 0; + // Lifecycle of the returned JSONDocument is until the next Next() call + virtual const JSONDocument& document() const = 0; + virtual Status status() const = 0; + + private: + // No copying allowed + Cursor(const Cursor&); + void operator=(const Cursor&); +}; + +struct DocumentDBOptions { + int background_threads = 4; + uint64_t memtable_size = 128 * 1024 * 1024; // 128 MB + uint64_t cache_size = 1 * 1024 * 1024 * 1024; // 1 GB +}; + +// TODO(icanadi) Add `JSONDocument* info` parameter to all calls that can be +// used by the caller to get more information about the call execution (number +// of dropped records, number of updated records, etc.) +class DocumentDB : public StackableDB { + public: + struct IndexDescriptor { + // Currently, you can only define an index on a single field. To specify an + // index on a field X, set index description to JSON "{X: 1}" + // Currently the value needs to be 1, which means ascending. + // In the future, we plan to also support indexes on multiple keys, where + // you could mix ascending sorting (1) with descending sorting indexes (-1) + JSONDocument* description; + std::string name; + }; + + // Open DocumentDB with specified indexes. The list of indexes has to be + // complete, i.e. include all indexes present in the DB, except the primary + // key index. + // Otherwise, Open() will return an error + static Status Open(const DocumentDBOptions& options, const std::string& name, + const std::vector& indexes, + DocumentDB** db, bool read_only = false); + + explicit DocumentDB(DB* db) : StackableDB(db) {} + + // Create a new index. It will stop all writes for the duration of the call. + // All current documents in the DB are scanned and corresponding index entries + // are created + virtual Status CreateIndex(const WriteOptions& write_options, + const IndexDescriptor& index) = 0; + + // Drop an index. Client is responsible to make sure that index is not being + // used by currently executing queries + virtual Status DropIndex(const std::string& name) = 0; + + // Insert a document to the DB. The document needs to have a primary key "_id" + // which can either be a string or an integer. Otherwise the write will fail + // with InvalidArgument. + virtual Status Insert(const WriteOptions& options, + const JSONDocument& document) = 0; + + // Deletes all documents matching a filter atomically + virtual Status Remove(const ReadOptions& read_options, + const WriteOptions& write_options, + const JSONDocument& query) = 0; + + // Does this sequence of operations: + // 1. Find all documents matching a filter + // 2. For all documents, atomically: + // 2.1. apply the update operators + // 2.2. update the secondary indexes + // + // Currently only $set update operator is supported. + // Syntax is: {$set: {key1: value1, key2: value2, etc...}} + // This operator will change a document's key1 field to value1, key2 to + // value2, etc. New values will be set even if a document didn't have an entry + // for the specified key. + // + // You can not change a primary key of a document. + // + // Update example: Update({id: {$gt: 5}, $index: id}, {$set: {enabled: true}}) + virtual Status Update(const ReadOptions& read_options, + const WriteOptions& write_options, + const JSONDocument& filter, + const JSONDocument& updates) = 0; + + // query has to be an array in which every element is an operator. Currently + // only $filter operator is supported. Syntax of $filter operator is: + // {$filter: {key1: condition1, key2: condition2, etc.}} where conditions can + // be either: + // 1) a single value in which case the condition is equality condition, or + // 2) a defined operators, like {$gt: 4}, which will match all documents that + // have key greater than 4. + // + // Supported operators are: + // 1) $gt -- greater than + // 2) $gte -- greater than or equal + // 3) $lt -- less than + // 4) $lte -- less than or equal + // If you want the filter to use an index, you need to specify it like this: + // {$filter: {...(conditions)..., $index: index_name}} + // + // Example query: + // * [{$filter: {name: John, age: {$gte: 18}, $index: age}}] + // will return all Johns whose age is greater or equal to 18 and it will use + // index "age" to satisfy the query. + virtual Cursor* Query(const ReadOptions& read_options, + const JSONDocument& query) = 0; +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/env_librados.h b/external/rocksdb/include/rocksdb/utilities/env_librados.h new file mode 100755 index 0000000000..5c10ea7ccf --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/env_librados.h @@ -0,0 +1,186 @@ +// -*- mode:C++; tab-width:8; c-basic-offset:2; indent-tabs-mode:t -*- +// vim: ts=8 sw=2 smarttab +#ifndef ROCKSDB_UTILITIES_ENV_LIBRADOS_H +#define ROCKSDB_UTILITIES_ENV_LIBRADOS_H + +#include +#include + +#include "rocksdb/status.h" +#include "rocksdb/utilities/env_mirror.h" + +#include + +namespace rocksdb { +class LibradosWritableFile; + +class EnvLibrados : public EnvWrapper { +public: + // Create a brand new sequentially-readable file with the specified name. + // On success, stores a pointer to the new file in *result and returns OK. + // On failure stores nullptr in *result and returns non-OK. If the file does + // not exist, returns a non-OK status. + // + // The returned file will only be accessed by one thread at a time. + Status NewSequentialFile( + const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options); + + // Create a brand new random access read-only file with the + // specified name. On success, stores a pointer to the new file in + // *result and returns OK. On failure stores nullptr in *result and + // returns non-OK. If the file does not exist, returns a non-OK + // status. + // + // The returned file may be concurrently accessed by multiple threads. + Status NewRandomAccessFile( + const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options); + + // Create an object that writes to a new file with the specified + // name. Deletes any existing file with the same name and creates a + // new file. On success, stores a pointer to the new file in + // *result and returns OK. On failure stores nullptr in *result and + // returns non-OK. + // + // The returned file will only be accessed by one thread at a time. + Status NewWritableFile( + const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options); + + // Reuse an existing file by renaming it and opening it as writable. + Status ReuseWritableFile( + const std::string& fname, + const std::string& old_fname, + std::unique_ptr* result, + const EnvOptions& options); + + // Create an object that represents a directory. Will fail if directory + // doesn't exist. If the directory exists, it will open the directory + // and create a new Directory object. + // + // On success, stores a pointer to the new Directory in + // *result and returns OK. On failure stores nullptr in *result and + // returns non-OK. + Status NewDirectory( + const std::string& name, + std::unique_ptr* result); + + // Returns OK if the named file exists. + // NotFound if the named file does not exist, + // the calling process does not have permission to determine + // whether this file exists, or if the path is invalid. + // IOError if an IO Error was encountered + Status FileExists(const std::string& fname); + + // Store in *result the names of the children of the specified directory. + // The names are relative to "dir". + // Original contents of *results are dropped. + Status GetChildren(const std::string& dir, + std::vector* result); + + // Delete the named file. + Status DeleteFile(const std::string& fname); + + // Create the specified directory. Returns error if directory exists. + Status CreateDir(const std::string& dirname); + + // Creates directory if missing. Return Ok if it exists, or successful in + // Creating. + Status CreateDirIfMissing(const std::string& dirname); + + // Delete the specified directory. + Status DeleteDir(const std::string& dirname); + + // Store the size of fname in *file_size. + Status GetFileSize(const std::string& fname, uint64_t* file_size); + + // Store the last modification time of fname in *file_mtime. + Status GetFileModificationTime(const std::string& fname, + uint64_t* file_mtime); + // Rename file src to target. + Status RenameFile(const std::string& src, + const std::string& target); + // Hard Link file src to target. + Status LinkFile(const std::string& src, const std::string& target); + + // Lock the specified file. Used to prevent concurrent access to + // the same db by multiple processes. On failure, stores nullptr in + // *lock and returns non-OK. + // + // On success, stores a pointer to the object that represents the + // acquired lock in *lock and returns OK. The caller should call + // UnlockFile(*lock) to release the lock. If the process exits, + // the lock will be automatically released. + // + // If somebody else already holds the lock, finishes immediately + // with a failure. I.e., this call does not wait for existing locks + // to go away. + // + // May create the named file if it does not already exist. + Status LockFile(const std::string& fname, FileLock** lock); + + // Release the lock acquired by a previous successful call to LockFile. + // REQUIRES: lock was returned by a successful LockFile() call + // REQUIRES: lock has not already been unlocked. + Status UnlockFile(FileLock* lock); + + // Get full directory name for this db. + Status GetAbsolutePath(const std::string& db_path, + std::string* output_path); + + // Generate unique id + std::string GenerateUniqueId(); + + // Get default EnvLibrados + static EnvLibrados* Default(); + + explicit EnvLibrados(const std::string& db_name, + const std::string& config_path, + const std::string& db_pool); + + explicit EnvLibrados(const std::string& client_name, // first 3 parameters are for RADOS client init + const std::string& cluster_name, + const uint64_t flags, + const std::string& db_name, + const std::string& config_path, + const std::string& db_pool, + const std::string& wal_dir, + const std::string& wal_pool, + const uint64_t write_buffer_size); + ~EnvLibrados() { + _rados.shutdown(); + } +private: + std::string _client_name; + std::string _cluster_name; + uint64_t _flags; + std::string _db_name; // get from user, readable string; Also used as db_id for db metadata + std::string _config_path; + librados::Rados _rados; // RADOS client + std::string _db_pool_name; + librados::IoCtx _db_pool_ioctx; // IoCtx for connecting db_pool + std::string _wal_dir; // WAL dir path + std::string _wal_pool_name; + librados::IoCtx _wal_pool_ioctx; // IoCtx for connecting wal_pool + uint64_t _write_buffer_size; // WritableFile buffer max size + + /* private function to communicate with rados */ + std::string _CreateFid(); + Status _GetFid(const std::string& fname, std::string& fid); + Status _GetFid(const std::string& fname, std::string& fid, int fid_len); + Status _RenameFid(const std::string& old_fname, const std::string& new_fname); + Status _AddFid(const std::string& fname, const std::string& fid); + Status _DelFid(const std::string& fname); + Status _GetSubFnames( + const std::string& dirname, + std::vector * result + ); + librados::IoCtx* _GetIoctx(const std::string& prefix); + friend class LibradosWritableFile; +}; +} +#endif diff --git a/external/rocksdb/include/rocksdb/utilities/env_mirror.h b/external/rocksdb/include/rocksdb/utilities/env_mirror.h new file mode 100755 index 0000000000..021fbfa45b --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/env_mirror.h @@ -0,0 +1,166 @@ +// Copyright (c) 2015, Red Hat, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// MirrorEnv is an Env implementation that mirrors all file-related +// operations to two backing Env's (provided at construction time). +// Writes are mirrored. For read operations, we do the read from both +// backends and assert that the results match. +// +// This is useful when implementing a new Env and ensuring that the +// semantics and behavior are correct (in that they match that of an +// existing, stable Env, like the default POSIX one). + +#ifndef ROCKSDB_LITE + +#ifndef STORAGE_ROCKSDB_INCLUDE_UTILITIES_ENVMIRROR_H_ +#define STORAGE_ROCKSDB_INCLUDE_UTLIITIES_ENVMIRROR_H_ + +#include +#include +#include +#include "rocksdb/env.h" + +namespace rocksdb { + +class SequentialFileMirror; +class RandomAccessFileMirror; +class WritableFileMirror; + +class EnvMirror : public EnvWrapper { + Env* a_, *b_; + + public: + EnvMirror(Env* a, Env* b) : EnvWrapper(a), a_(a), b_(b) {} + + Status NewSequentialFile(const std::string& f, unique_ptr* r, + const EnvOptions& options) override; + Status NewRandomAccessFile(const std::string& f, + unique_ptr* r, + const EnvOptions& options) override; + Status NewWritableFile(const std::string& f, unique_ptr* r, + const EnvOptions& options) override; + Status ReuseWritableFile(const std::string& fname, + const std::string& old_fname, + unique_ptr* r, + const EnvOptions& options) override; + virtual Status NewDirectory(const std::string& name, + unique_ptr* result) override { + unique_ptr br; + Status as = a_->NewDirectory(name, result); + Status bs = b_->NewDirectory(name, &br); + assert(as == bs); + return as; + } + Status FileExists(const std::string& f) override { + Status as = a_->FileExists(f); + Status bs = b_->FileExists(f); + assert(as == bs); + return as; + } + Status GetChildren(const std::string& dir, + std::vector* r) override { + std::vector ar, br; + Status as = a_->GetChildren(dir, &ar); + Status bs = b_->GetChildren(dir, &br); + assert(as == bs); + std::sort(ar.begin(), ar.end()); + std::sort(br.begin(), br.end()); + if (!as.ok() || ar != br) { + assert(0 == "getchildren results don't match"); + } + *r = ar; + return as; + } + Status DeleteFile(const std::string& f) override { + Status as = a_->DeleteFile(f); + Status bs = b_->DeleteFile(f); + assert(as == bs); + return as; + } + Status CreateDir(const std::string& d) override { + Status as = a_->CreateDir(d); + Status bs = b_->CreateDir(d); + assert(as == bs); + return as; + } + Status CreateDirIfMissing(const std::string& d) override { + Status as = a_->CreateDirIfMissing(d); + Status bs = b_->CreateDirIfMissing(d); + assert(as == bs); + return as; + } + Status DeleteDir(const std::string& d) override { + Status as = a_->DeleteDir(d); + Status bs = b_->DeleteDir(d); + assert(as == bs); + return as; + } + Status GetFileSize(const std::string& f, uint64_t* s) override { + uint64_t asize, bsize; + Status as = a_->GetFileSize(f, &asize); + Status bs = b_->GetFileSize(f, &bsize); + assert(as == bs); + assert(!as.ok() || asize == bsize); + *s = asize; + return as; + } + + Status GetFileModificationTime(const std::string& fname, + uint64_t* file_mtime) override { + uint64_t amtime, bmtime; + Status as = a_->GetFileModificationTime(fname, &amtime); + Status bs = b_->GetFileModificationTime(fname, &bmtime); + assert(as == bs); + assert(!as.ok() || amtime - bmtime < 10000 || bmtime - amtime < 10000); + *file_mtime = amtime; + return as; + } + + Status RenameFile(const std::string& s, const std::string& t) override { + Status as = a_->RenameFile(s, t); + Status bs = b_->RenameFile(s, t); + assert(as == bs); + return as; + } + + Status LinkFile(const std::string& s, const std::string& t) override { + Status as = a_->LinkFile(s, t); + Status bs = b_->LinkFile(s, t); + assert(as == bs); + return as; + } + + class FileLockMirror : public FileLock { + public: + FileLock* a_, *b_; + FileLockMirror(FileLock* a, FileLock* b) : a_(a), b_(b) {} + }; + + Status LockFile(const std::string& f, FileLock** l) override { + FileLock* al, *bl; + Status as = a_->LockFile(f, &al); + Status bs = b_->LockFile(f, &bl); + assert(as == bs); + if (as.ok()) *l = new FileLockMirror(al, bl); + return as; + } + + Status UnlockFile(FileLock* l) override { + FileLockMirror* ml = static_cast(l); + Status as = a_->UnlockFile(ml->a_); + Status bs = b_->UnlockFile(ml->b_); + assert(as == bs); + return as; + } +}; + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_UTILITIES_ENVMIRROR_H_ + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/env_registry.h b/external/rocksdb/include/rocksdb/utilities/env_registry.h new file mode 100755 index 0000000000..4074c87f00 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/env_registry.h @@ -0,0 +1,45 @@ +// Copyright (c) 2016-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#ifndef ROCKSDB_LITE + +#include +#include +#include + +#include "rocksdb/env.h" + +namespace rocksdb { + +// Returns a new Env when called with a URI string. Populates the unique_ptr +// argument if granting ownership to caller. +typedef std::function*)> + EnvFactoryFunc; + +// Creates a new Env using the registered factory function corresponding to a +// prefix of uri. +// +// If no prefixes match, returns nullptr. If multiple prefixes match, the +// factory function used is unspecified. +// +// Populates env_guard with result pointer if caller is granted ownership. +Env* NewEnvFromUri(const std::string& uri, std::unique_ptr* env_guard); + +// To register an Env factory function, initialize an EnvRegistrar object with +// static storage duration. For example: +// +// static EnvRegistrar hdfs_reg("hdfs://", &CreateHdfsEnv); +// +// Then, calling NewEnvFromUri("hdfs://some_path", ...) will use CreateHdfsEnv +// to make a new Env. +class EnvRegistrar { + public: + explicit EnvRegistrar(std::string uri_prefix, EnvFactoryFunc env_factory); +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/flashcache.h b/external/rocksdb/include/rocksdb/utilities/flashcache.h new file mode 100755 index 0000000000..b54d245f06 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/flashcache.h @@ -0,0 +1,25 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include "rocksdb/env.h" + +namespace rocksdb { + +// This API is experimental. We will mark it stable once we run it in production +// for a while. +// NewFlashcacheAwareEnv() creates and Env that blacklists all background +// threads (used for flush and compaction) from using flashcache to cache their +// reads. Reads from compaction thread don't need to be cached because they are +// going to be soon made obsolete (due to nature of compaction) +// Usually you would pass Env::Default() as base. +// cachedev_fd is a file descriptor of the flashcache device. Caller has to +// open flashcache device before calling this API. +extern std::unique_ptr NewFlashcacheAwareEnv( + Env* base, const int cachedev_fd); + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/utilities/geo_db.h b/external/rocksdb/include/rocksdb/utilities/geo_db.h new file mode 100755 index 0000000000..37e5ebdc74 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/geo_db.h @@ -0,0 +1,114 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// + +#ifndef ROCKSDB_LITE +#pragma once +#include +#include + +#include "rocksdb/utilities/stackable_db.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +// +// Configurable options needed for setting up a Geo database +// +struct GeoDBOptions { + // Backup info and error messages will be written to info_log + // if non-nullptr. + // Default: nullptr + Logger* info_log; + + explicit GeoDBOptions(Logger* _info_log = nullptr):info_log(_info_log) { } +}; + +// +// A position in the earth's geoid +// +class GeoPosition { + public: + double latitude; + double longitude; + + explicit GeoPosition(double la = 0, double lo = 0) : + latitude(la), longitude(lo) { + } +}; + +// +// Description of an object on the Geoid. It is located by a GPS location, +// and is identified by the id. The value associated with this object is +// an opaque string 'value'. Different objects identified by unique id's +// can have the same gps-location associated with them. +// +class GeoObject { + public: + GeoPosition position; + std::string id; + std::string value; + + GeoObject() {} + + GeoObject(const GeoPosition& pos, const std::string& i, + const std::string& val) : + position(pos), id(i), value(val) { + } +}; + +class GeoIterator { + public: + GeoIterator() = default; + virtual ~GeoIterator() {} + virtual void Next() = 0; + virtual bool Valid() const = 0; + virtual const GeoObject& geo_object() = 0; + virtual Status status() const = 0; +}; + +// +// Stack your DB with GeoDB to be able to get geo-spatial support +// +class GeoDB : public StackableDB { + public: + // GeoDBOptions have to be the same as the ones used in a previous + // incarnation of the DB + // + // GeoDB owns the pointer `DB* db` now. You should not delete it or + // use it after the invocation of GeoDB + // GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} + GeoDB(DB* db, const GeoDBOptions& options) : StackableDB(db) {} + virtual ~GeoDB() {} + + // Insert a new object into the location database. The object is + // uniquely identified by the id. If an object with the same id already + // exists in the db, then the old one is overwritten by the new + // object being inserted here. + virtual Status Insert(const GeoObject& object) = 0; + + // Retrieve the value of the object located at the specified GPS + // location and is identified by the 'id'. + virtual Status GetByPosition(const GeoPosition& pos, + const Slice& id, std::string* value) = 0; + + // Retrieve the value of the object identified by the 'id'. This method + // could be potentially slower than GetByPosition + virtual Status GetById(const Slice& id, GeoObject* object) = 0; + + // Delete the specified object + virtual Status Remove(const Slice& id) = 0; + + // Returns an iterator for the items within a circular radius from the + // specified gps location. If 'number_of_values' is specified, + // then the iterator is capped to that number of objects. + // The radius is specified in 'meters'. + virtual GeoIterator* SearchRadial(const GeoPosition& pos, + double radius, + int number_of_values = INT_MAX) = 0; +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/info_log_finder.h b/external/rocksdb/include/rocksdb/utilities/info_log_finder.h new file mode 100755 index 0000000000..4b7530c28b --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/info_log_finder.h @@ -0,0 +1,19 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/options.h" + +namespace rocksdb { + +// This function can be used to list the Information logs, +// given the db pointer. +Status GetInfoLogList(DB* db, std::vector* info_log_list); +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/utilities/json_document.h b/external/rocksdb/include/rocksdb/utilities/json_document.h new file mode 100755 index 0000000000..9473258c8d --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/json_document.h @@ -0,0 +1,195 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include +#include +#include +#include +#include +#include + +#include "rocksdb/slice.h" + +// We use JSONDocument for DocumentDB API +// Implementation inspired by folly::dynamic, rapidjson and fbson + +namespace fbson { + class FbsonValue; + class ObjectVal; + template + class FbsonWriterT; + class FbsonOutStream; + typedef FbsonWriterT FbsonWriter; +} // namespace fbson + +namespace rocksdb { + +// NOTE: none of this is thread-safe +class JSONDocument { + public: + // return nullptr on parse failure + static JSONDocument* ParseJSON(const char* json); + + enum Type { + kNull, + kArray, + kBool, + kDouble, + kInt64, + kObject, + kString, + }; + + /* implicit */ JSONDocument(); // null + /* implicit */ JSONDocument(bool b); + /* implicit */ JSONDocument(double d); + /* implicit */ JSONDocument(int8_t i); + /* implicit */ JSONDocument(int16_t i); + /* implicit */ JSONDocument(int32_t i); + /* implicit */ JSONDocument(int64_t i); + /* implicit */ JSONDocument(const std::string& s); + /* implicit */ JSONDocument(const char* s); + // constructs JSONDocument of specific type with default value + explicit JSONDocument(Type _type); + + JSONDocument(const JSONDocument& json_document); + + JSONDocument(JSONDocument&& json_document); + + Type type() const; + + // REQUIRES: IsObject() + bool Contains(const std::string& key) const; + // REQUIRES: IsObject() + // Returns non-owner object + JSONDocument operator[](const std::string& key) const; + + // REQUIRES: IsArray() == true || IsObject() == true + size_t Count() const; + + // REQUIRES: IsArray() + // Returns non-owner object + JSONDocument operator[](size_t i) const; + + JSONDocument& operator=(JSONDocument jsonDocument); + + bool IsNull() const; + bool IsArray() const; + bool IsBool() const; + bool IsDouble() const; + bool IsInt64() const; + bool IsObject() const; + bool IsString() const; + + // REQUIRES: IsBool() == true + bool GetBool() const; + // REQUIRES: IsDouble() == true + double GetDouble() const; + // REQUIRES: IsInt64() == true + int64_t GetInt64() const; + // REQUIRES: IsString() == true + std::string GetString() const; + + bool operator==(const JSONDocument& rhs) const; + + bool operator!=(const JSONDocument& rhs) const; + + JSONDocument Copy() const; + + bool IsOwner() const; + + std::string DebugString() const; + + private: + class ItemsIteratorGenerator; + + public: + // REQUIRES: IsObject() + ItemsIteratorGenerator Items() const; + + // appends serialized object to dst + void Serialize(std::string* dst) const; + // returns nullptr if Slice doesn't represent valid serialized JSONDocument + static JSONDocument* Deserialize(const Slice& src); + + private: + friend class JSONDocumentBuilder; + + JSONDocument(fbson::FbsonValue* val, bool makeCopy); + + void InitFromValue(const fbson::FbsonValue* val); + + // iteration on objects + class const_item_iterator { + private: + class Impl; + public: + typedef std::pair value_type; + explicit const_item_iterator(Impl* impl); + const_item_iterator(const_item_iterator&&); + const_item_iterator& operator++(); + bool operator!=(const const_item_iterator& other); + value_type operator*(); + ~const_item_iterator(); + private: + friend class ItemsIteratorGenerator; + std::unique_ptr it_; + }; + + class ItemsIteratorGenerator { + public: + explicit ItemsIteratorGenerator(const fbson::ObjectVal& object); + const_item_iterator begin() const; + + const_item_iterator end() const; + + private: + const fbson::ObjectVal& object_; + }; + + std::unique_ptr data_; + mutable fbson::FbsonValue* value_; + + // Our serialization format's first byte specifies the encoding version. That + // way, we can easily change our format while providing backwards + // compatibility. This constant specifies the current version of the + // serialization format + static const char kSerializationFormatVersion; +}; + +class JSONDocumentBuilder { + public: + JSONDocumentBuilder(); + + explicit JSONDocumentBuilder(fbson::FbsonOutStream* out); + + void Reset(); + + bool WriteStartArray(); + + bool WriteEndArray(); + + bool WriteStartObject(); + + bool WriteEndObject(); + + bool WriteKeyValue(const std::string& key, const JSONDocument& value); + + bool WriteJSONDocument(const JSONDocument& value); + + JSONDocument GetJSONDocument(); + + ~JSONDocumentBuilder(); + + private: + std::unique_ptr writer_; +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/ldb_cmd.h b/external/rocksdb/include/rocksdb/utilities/ldb_cmd.h new file mode 100755 index 0000000000..7df0cd76f0 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/ldb_cmd.h @@ -0,0 +1,251 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#pragma once + +#ifndef ROCKSDB_LITE + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "rocksdb/env.h" +#include "rocksdb/iterator.h" +#include "rocksdb/ldb_tool.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/utilities/db_ttl.h" +#include "rocksdb/utilities/ldb_cmd_execute_result.h" + +namespace rocksdb { + +class LDBCommand { + public: + // Command-line arguments + static const std::string ARG_DB; + static const std::string ARG_PATH; + static const std::string ARG_HEX; + static const std::string ARG_KEY_HEX; + static const std::string ARG_VALUE_HEX; + static const std::string ARG_CF_NAME; + static const std::string ARG_TTL; + static const std::string ARG_TTL_START; + static const std::string ARG_TTL_END; + static const std::string ARG_TIMESTAMP; + static const std::string ARG_FROM; + static const std::string ARG_TO; + static const std::string ARG_MAX_KEYS; + static const std::string ARG_BLOOM_BITS; + static const std::string ARG_FIX_PREFIX_LEN; + static const std::string ARG_COMPRESSION_TYPE; + static const std::string ARG_COMPRESSION_MAX_DICT_BYTES; + static const std::string ARG_BLOCK_SIZE; + static const std::string ARG_AUTO_COMPACTION; + static const std::string ARG_DB_WRITE_BUFFER_SIZE; + static const std::string ARG_WRITE_BUFFER_SIZE; + static const std::string ARG_FILE_SIZE; + static const std::string ARG_CREATE_IF_MISSING; + static const std::string ARG_NO_VALUE; + + struct ParsedParams { + std::string cmd; + std::vector cmd_params; + std::map option_map; + std::vector flags; + }; + + static LDBCommand* SelectCommand(const ParsedParams& parsed_parms); + + static LDBCommand* InitFromCmdLineArgs( + const std::vector& args, const Options& options, + const LDBOptions& ldb_options, + const std::vector* column_families, + const std::function& selector = + SelectCommand); + + static LDBCommand* InitFromCmdLineArgs( + int argc, char** argv, const Options& options, + const LDBOptions& ldb_options, + const std::vector* column_families); + + bool ValidateCmdLineOptions(); + + virtual Options PrepareOptionsForOpenDB(); + + virtual void SetDBOptions(Options options) { options_ = options; } + + virtual void SetColumnFamilies( + const std::vector* column_families) { + if (column_families != nullptr) { + column_families_ = *column_families; + } else { + column_families_.clear(); + } + } + + void SetLDBOptions(const LDBOptions& ldb_options) { + ldb_options_ = ldb_options; + } + + virtual bool NoDBOpen() { return false; } + + virtual ~LDBCommand() { CloseDB(); } + + /* Run the command, and return the execute result. */ + void Run(); + + virtual void DoCommand() = 0; + + LDBCommandExecuteResult GetExecuteState() { return exec_state_; } + + void ClearPreviousRunState() { exec_state_.Reset(); } + + // Consider using Slice::DecodeHex directly instead if you don't need the + // 0x prefix + static std::string HexToString(const std::string& str); + + // Consider using Slice::ToString(true) directly instead if + // you don't need the 0x prefix + static std::string StringToHex(const std::string& str); + + static const char* DELIM; + + protected: + LDBCommandExecuteResult exec_state_; + std::string db_path_; + std::string column_family_name_; + DB* db_; + DBWithTTL* db_ttl_; + std::map cf_handles_; + + /** + * true implies that this command can work if the db is opened in read-only + * mode. + */ + bool is_read_only_; + + /** If true, the key is input/output as hex in get/put/scan/delete etc. */ + bool is_key_hex_; + + /** If true, the value is input/output as hex in get/put/scan/delete etc. */ + bool is_value_hex_; + + /** If true, the value is treated as timestamp suffixed */ + bool is_db_ttl_; + + // If true, the kvs are output with their insert/modify timestamp in a ttl db + bool timestamp_; + + /** + * Map of options passed on the command-line. + */ + const std::map option_map_; + + /** + * Flags passed on the command-line. + */ + const std::vector flags_; + + /** List of command-line options valid for this command */ + const std::vector valid_cmd_line_options_; + + bool ParseKeyValue(const std::string& line, std::string* key, + std::string* value, bool is_key_hex, bool is_value_hex); + + LDBCommand(const std::map& options, + const std::vector& flags, bool is_read_only, + const std::vector& valid_cmd_line_options); + + void OpenDB(); + + void CloseDB(); + + ColumnFamilyHandle* GetCfHandle(); + + static std::string PrintKeyValue(const std::string& key, + const std::string& value, bool is_key_hex, + bool is_value_hex); + + static std::string PrintKeyValue(const std::string& key, + const std::string& value, bool is_hex); + + /** + * Return true if the specified flag is present in the specified flags vector + */ + static bool IsFlagPresent(const std::vector& flags, + const std::string& flag) { + return (std::find(flags.begin(), flags.end(), flag) != flags.end()); + } + + static std::string HelpRangeCmdArgs(); + + /** + * A helper function that returns a list of command line options + * used by this command. It includes the common options and the ones + * passed in. + */ + static std::vector BuildCmdLineOptions( + std::vector options); + + bool ParseIntOption(const std::map& options, + const std::string& option, int& value, + LDBCommandExecuteResult& exec_state); + + bool ParseStringOption(const std::map& options, + const std::string& option, std::string* value); + + Options options_; + std::vector column_families_; + LDBOptions ldb_options_; + + private: + /** + * Interpret command line options and flags to determine if the key + * should be input/output in hex. + */ + bool IsKeyHex(const std::map& options, + const std::vector& flags); + + /** + * Interpret command line options and flags to determine if the value + * should be input/output in hex. + */ + bool IsValueHex(const std::map& options, + const std::vector& flags); + + /** + * Returns the value of the specified option as a boolean. + * default_val is used if the option is not found in options. + * Throws an exception if the value of the option is not + * "true" or "false" (case insensitive). + */ + bool ParseBooleanOption(const std::map& options, + const std::string& option, bool default_val); + + /** + * Converts val to a boolean. + * val must be either true or false (case insensitive). + * Otherwise an exception is thrown. + */ + bool StringToBool(std::string val); +}; + +class LDBCommandRunner { + public: + static void PrintHelp(const char* exec_name); + + static void RunCommand( + int argc, char** argv, Options options, const LDBOptions& ldb_options, + const std::vector* column_families); +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/ldb_cmd_execute_result.h b/external/rocksdb/include/rocksdb/utilities/ldb_cmd_execute_result.h new file mode 100755 index 0000000000..94f271c86e --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/ldb_cmd_execute_result.h @@ -0,0 +1,75 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#pragma once + +#ifdef FAILED +#undef FAILED +#endif + +namespace rocksdb { + +class LDBCommandExecuteResult { +public: + enum State { + EXEC_NOT_STARTED = 0, EXEC_SUCCEED = 1, EXEC_FAILED = 2, + }; + + LDBCommandExecuteResult() : state_(EXEC_NOT_STARTED), message_("") {} + + LDBCommandExecuteResult(State state, std::string& msg) : + state_(state), message_(msg) {} + + std::string ToString() { + std::string ret; + switch (state_) { + case EXEC_SUCCEED: + break; + case EXEC_FAILED: + ret.append("Failed: "); + break; + case EXEC_NOT_STARTED: + ret.append("Not started: "); + } + if (!message_.empty()) { + ret.append(message_); + } + return ret; + } + + void Reset() { + state_ = EXEC_NOT_STARTED; + message_ = ""; + } + + bool IsSucceed() { + return state_ == EXEC_SUCCEED; + } + + bool IsNotStarted() { + return state_ == EXEC_NOT_STARTED; + } + + bool IsFailed() { + return state_ == EXEC_FAILED; + } + + static LDBCommandExecuteResult Succeed(std::string msg) { + return LDBCommandExecuteResult(EXEC_SUCCEED, msg); + } + + static LDBCommandExecuteResult Failed(std::string msg) { + return LDBCommandExecuteResult(EXEC_FAILED, msg); + } + +private: + State state_; + std::string message_; + + bool operator==(const LDBCommandExecuteResult&); + bool operator!=(const LDBCommandExecuteResult&); +}; + +} diff --git a/external/rocksdb/include/rocksdb/utilities/leveldb_options.h b/external/rocksdb/include/rocksdb/utilities/leveldb_options.h new file mode 100755 index 0000000000..d17bcb0d0d --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/leveldb_options.h @@ -0,0 +1,144 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include + +namespace rocksdb { + +class Cache; +class Comparator; +class Env; +class FilterPolicy; +class Logger; +struct Options; +class Snapshot; + +enum CompressionType : unsigned char; + +// Options to control the behavior of a database (passed to +// DB::Open). A LevelDBOptions object can be initialized as though +// it were a LevelDB Options object, and then it can be converted into +// a RocksDB Options object. +struct LevelDBOptions { + // ------------------- + // Parameters that affect behavior + + // Comparator used to define the order of keys in the table. + // Default: a comparator that uses lexicographic byte-wise ordering + // + // REQUIRES: The client must ensure that the comparator supplied + // here has the same name and orders keys *exactly* the same as the + // comparator provided to previous open calls on the same DB. + const Comparator* comparator; + + // If true, the database will be created if it is missing. + // Default: false + bool create_if_missing; + + // If true, an error is raised if the database already exists. + // Default: false + bool error_if_exists; + + // If true, the implementation will do aggressive checking of the + // data it is processing and will stop early if it detects any + // errors. This may have unforeseen ramifications: for example, a + // corruption of one DB entry may cause a large number of entries to + // become unreadable or for the entire DB to become unopenable. + // Default: false + bool paranoid_checks; + + // Use the specified object to interact with the environment, + // e.g. to read/write files, schedule background work, etc. + // Default: Env::Default() + Env* env; + + // Any internal progress/error information generated by the db will + // be written to info_log if it is non-NULL, or to a file stored + // in the same directory as the DB contents if info_log is NULL. + // Default: NULL + Logger* info_log; + + // ------------------- + // Parameters that affect performance + + // Amount of data to build up in memory (backed by an unsorted log + // on disk) before converting to a sorted on-disk file. + // + // Larger values increase performance, especially during bulk loads. + // Up to two write buffers may be held in memory at the same time, + // so you may wish to adjust this parameter to control memory usage. + // Also, a larger write buffer will result in a longer recovery time + // the next time the database is opened. + // + // Default: 4MB + size_t write_buffer_size; + + // Number of open files that can be used by the DB. You may need to + // increase this if your database has a large working set (budget + // one open file per 2MB of working set). + // + // Default: 1000 + int max_open_files; + + // Control over blocks (user data is stored in a set of blocks, and + // a block is the unit of reading from disk). + + // If non-NULL, use the specified cache for blocks. + // If NULL, leveldb will automatically create and use an 8MB internal cache. + // Default: NULL + Cache* block_cache; + + // Approximate size of user data packed per block. Note that the + // block size specified here corresponds to uncompressed data. The + // actual size of the unit read from disk may be smaller if + // compression is enabled. This parameter can be changed dynamically. + // + // Default: 4K + size_t block_size; + + // Number of keys between restart points for delta encoding of keys. + // This parameter can be changed dynamically. Most clients should + // leave this parameter alone. + // + // Default: 16 + int block_restart_interval; + + // Compress blocks using the specified compression algorithm. This + // parameter can be changed dynamically. + // + // Default: kSnappyCompression, which gives lightweight but fast + // compression. + // + // Typical speeds of kSnappyCompression on an Intel(R) Core(TM)2 2.4GHz: + // ~200-500MB/s compression + // ~400-800MB/s decompression + // Note that these speeds are significantly faster than most + // persistent storage speeds, and therefore it is typically never + // worth switching to kNoCompression. Even if the input data is + // incompressible, the kSnappyCompression implementation will + // efficiently detect that and will switch to uncompressed mode. + CompressionType compression; + + // If non-NULL, use the specified filter policy to reduce disk reads. + // Many applications will benefit from passing the result of + // NewBloomFilterPolicy() here. + // + // Default: NULL + const FilterPolicy* filter_policy; + + // Create a LevelDBOptions object with default values for all fields. + LevelDBOptions(); +}; + +// Converts a LevelDBOptions object into a RocksDB Options object. +Options ConvertOptions(const LevelDBOptions& leveldb_options); + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/utilities/memory_util.h b/external/rocksdb/include/rocksdb/utilities/memory_util.h new file mode 100755 index 0000000000..d89bb6adc4 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/memory_util.h @@ -0,0 +1,50 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#pragma once + +#include +#include +#include +#include + +#include "rocksdb/cache.h" +#include "rocksdb/db.h" + +namespace rocksdb { + +// Returns the current memory usage of the specified DB instances. +class MemoryUtil { + public: + enum UsageType : int { + // Memory usage of all the mem-tables. + kMemTableTotal = 0, + // Memory usage of those un-flushed mem-tables. + kMemTableUnFlushed = 1, + // Memory usage of all the table readers. + kTableReadersTotal = 2, + // Memory usage by Cache. + kCacheTotal = 3, + kNumUsageTypes = 4 + }; + + // Returns the approximate memory usage of different types in the input + // list of DBs and Cache set. For instance, in the output map + // usage_by_type, usage_by_type[kMemTableTotal] will store the memory + // usage of all the mem-tables from all the input rocksdb instances. + // + // Note that for memory usage inside Cache class, we will + // only report the usage of the input "cache_set" without + // including those Cache usage inside the input list "dbs" + // of DBs. + static Status GetApproximateMemoryUsageByType( + const std::vector& dbs, + const std::unordered_set cache_set, + std::map* usage_by_type); +}; +} // namespace rocksdb +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/optimistic_transaction_db.h b/external/rocksdb/include/rocksdb/utilities/optimistic_transaction_db.h new file mode 100755 index 0000000000..b2c2f99a87 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/optimistic_transaction_db.h @@ -0,0 +1,76 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include + +#include "rocksdb/comparator.h" +#include "rocksdb/db.h" + +namespace rocksdb { + +class Transaction; + +// Database with Transaction support. +// +// See optimistic_transaction.h and examples/transaction_example.cc + +// Options to use when starting an Optimistic Transaction +struct OptimisticTransactionOptions { + // Setting set_snapshot=true is the same as calling SetSnapshot(). + bool set_snapshot = false; + + // Should be set if the DB has a non-default comparator. + // See comment in WriteBatchWithIndex constructor. + const Comparator* cmp = BytewiseComparator(); +}; + +class OptimisticTransactionDB { + public: + // Open an OptimisticTransactionDB similar to DB::Open(). + static Status Open(const Options& options, const std::string& dbname, + OptimisticTransactionDB** dbptr); + + static Status Open(const DBOptions& db_options, const std::string& dbname, + const std::vector& column_families, + std::vector* handles, + OptimisticTransactionDB** dbptr); + + virtual ~OptimisticTransactionDB() {} + + // Starts a new Transaction. + // + // Caller is responsible for deleting the returned transaction when no + // longer needed. + // + // If old_txn is not null, BeginTransaction will reuse this Transaction + // handle instead of allocating a new one. This is an optimization to avoid + // extra allocations when repeatedly creating transactions. + virtual Transaction* BeginTransaction( + const WriteOptions& write_options, + const OptimisticTransactionOptions& txn_options = + OptimisticTransactionOptions(), + Transaction* old_txn = nullptr) = 0; + + // Return the underlying Database that was opened + virtual DB* GetBaseDB() = 0; + + protected: + // To Create an OptimisticTransactionDB, call Open() + explicit OptimisticTransactionDB(DB* db) {} + OptimisticTransactionDB() {} + + private: + // No copying allowed + OptimisticTransactionDB(const OptimisticTransactionDB&); + void operator=(const OptimisticTransactionDB&); +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/option_change_migration.h b/external/rocksdb/include/rocksdb/utilities/option_change_migration.h new file mode 100755 index 0000000000..aa14a02999 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/option_change_migration.h @@ -0,0 +1,19 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include "rocksdb/options.h" +#include "rocksdb/status.h" + +namespace rocksdb { +// Try to migrate DB created with old_opts to be use new_opts. +// Multiple column families is not supported. +// It is best-effort. No guarantee to succeed. +// A full compaction may be executed. +Status OptionChangeMigration(std::string dbname, const Options& old_opts, + const Options& new_opts); +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/utilities/options_util.h b/external/rocksdb/include/rocksdb/utilities/options_util.h new file mode 100755 index 0000000000..1d961a2bb8 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/options_util.h @@ -0,0 +1,86 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +// This file contains utility functions for RocksDB Options. +#pragma once + +#ifndef ROCKSDB_LITE + +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "rocksdb/status.h" + +namespace rocksdb { +// Constructs the DBOptions and ColumnFamilyDescriptors by loading the +// latest RocksDB options file stored in the specified rocksdb database. +// +// Note that the all the pointer options (except table_factory, which will +// be described in more details below) will be initialized with the default +// values. Developers can further initialize them after this function call. +// Below is an example list of pointer options which will be initialized +// +// * env +// * memtable_factory +// * compaction_filter_factory +// * prefix_extractor +// * comparator +// * merge_operator +// * compaction_filter +// +// For table_factory, this function further supports deserializing +// BlockBasedTableFactory and its BlockBasedTableOptions except the +// pointer options of BlockBasedTableOptions (flush_block_policy_factory, +// block_cache, and block_cache_compressed), which will be initialized with +// default values. Developers can further specify these three options by +// casting the return value of TableFactoroy::GetOptions() to +// BlockBasedTableOptions and making necessary changes. +// +// examples/options_file_example.cc demonstrates how to use this function +// to open a RocksDB instance. +// +// @return the function returns an OK status when it went successfully. If +// the specified "dbpath" does not contain any option file, then a +// Status::NotFound will be returned. A return value other than +// Status::OK or Status::NotFound indicates there're some error related +// to the options file itself. +// +// @see LoadOptionsFromFile +Status LoadLatestOptions(const std::string& dbpath, Env* env, + DBOptions* db_options, + std::vector* cf_descs); + +// Similar to LoadLatestOptions, this function constructs the DBOptions +// and ColumnFamilyDescriptors based on the specified RocksDB Options file. +// +// @see LoadLatestOptions +Status LoadOptionsFromFile(const std::string& options_file_name, Env* env, + DBOptions* db_options, + std::vector* cf_descs); + +// Returns the latest options file name under the specified db path. +Status GetLatestOptionsFileName(const std::string& dbpath, Env* env, + std::string* options_file_name); + +// Returns Status::OK if the input DBOptions and ColumnFamilyDescriptors +// are compatible with the latest options stored in the specified DB path. +// +// If the return status is non-ok, it means the specified RocksDB instance +// might not be correctly opened with the input set of options. Currently, +// changing one of the following options will fail the compatibility check: +// +// * comparator +// * prefix_extractor +// * table_factory +// * merge_operator +Status CheckOptionsCompatibility( + const std::string& dbpath, Env* env, const DBOptions& db_options, + const std::vector& cf_descs); + +} // namespace rocksdb +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/sim_cache.h b/external/rocksdb/include/rocksdb/utilities/sim_cache.h new file mode 100755 index 0000000000..cc8a01beca --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/sim_cache.h @@ -0,0 +1,66 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include +#include +#include "rocksdb/cache.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +class SimCache; + +// For instrumentation purpose, use NewSimCache instead of NewLRUCache API +// NewSimCache is a wrapper function returning a SimCache instance that can +// have additional interface provided in Simcache class besides Cache interface +// to predict block cache hit rate without actually allocating the memory. It +// can help users tune their current block cache size, and determine how +// efficient they are using the memory. +extern std::shared_ptr NewSimCache(std::shared_ptr cache, + size_t sim_capacity, + int num_shard_bits); + +class SimCache : public Cache { + public: + SimCache() {} + + virtual ~SimCache() {} + + // returns the maximum configured capacity of the simcache for simulation + virtual size_t GetSimCapacity() const = 0; + + // simcache doesn't provide internal handler reference to user, so always + // PinnedUsage = 0 and the behavior will be not exactly consistent the + // with real cache. + // returns the memory size for the entries residing in the simcache. + virtual size_t GetSimUsage() const = 0; + + // sets the maximum configured capacity of the simcache. When the new + // capacity is less than the old capacity and the existing usage is + // greater than new capacity, the implementation will purge old entries + // to fit new capapicty. + virtual void SetSimCapacity(size_t capacity) = 0; + + // returns the lookup times of simcache + virtual uint64_t get_lookup_counter() const = 0; + // returns the hit times of simcache + virtual uint64_t get_hit_counter() const = 0; + // returns the hit rate of simcache + virtual double get_hit_rate() const = 0; + // reset the lookup and hit counters + virtual void reset_counter() = 0; + // String representation of the statistics of the simcache + virtual std::string ToString() const = 0; + + private: + SimCache(const SimCache&); + SimCache& operator=(const SimCache&); +}; + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/utilities/spatial_db.h b/external/rocksdb/include/rocksdb/utilities/spatial_db.h new file mode 100755 index 0000000000..108915fd77 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/spatial_db.h @@ -0,0 +1,261 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/slice.h" +#include "rocksdb/utilities/stackable_db.h" + +namespace rocksdb { +namespace spatial { + +// NOTE: SpatialDB is experimental and we might change its API without warning. +// Please talk to us before developing against SpatialDB API. +// +// SpatialDB is a support for spatial indexes built on top of RocksDB. +// When creating a new SpatialDB, clients specifies a list of spatial indexes to +// build on their data. Each spatial index is defined by the area and +// granularity. If you're storing map data, different spatial index +// granularities can be used for different zoom levels. +// +// Each element inserted into SpatialDB has: +// * a bounding box, which determines how will the element be indexed +// * string blob, which will usually be WKB representation of the polygon +// (http://en.wikipedia.org/wiki/Well-known_text) +// * feature set, which is a map of key-value pairs, where value can be null, +// int, double, bool, string +// * a list of indexes to insert the element in +// +// Each query is executed on a single spatial index. Query guarantees that it +// will return all elements intersecting the specified bounding box, but it +// might also return some extra non-intersecting elements. + +// Variant is a class that can be many things: null, bool, int, double or string +// It is used to store different value types in FeatureSet (see below) +struct Variant { + // Don't change the values here, they are persisted on disk + enum Type { + kNull = 0x0, + kBool = 0x1, + kInt = 0x2, + kDouble = 0x3, + kString = 0x4, + }; + + Variant() : type_(kNull) {} + /* implicit */ Variant(bool b) : type_(kBool) { data_.b = b; } + /* implicit */ Variant(uint64_t i) : type_(kInt) { data_.i = i; } + /* implicit */ Variant(double d) : type_(kDouble) { data_.d = d; } + /* implicit */ Variant(const std::string& s) : type_(kString) { + new (&data_.s) std::string(s); + } + + Variant(const Variant& v) : type_(v.type_) { Init(v, data_); } + + Variant& operator=(const Variant& v); + + Variant(Variant&& rhs) : type_(kNull) { *this = std::move(rhs); } + + Variant& operator=(Variant&& v); + + ~Variant() { Destroy(type_, data_); } + + Type type() const { return type_; } + bool get_bool() const { return data_.b; } + uint64_t get_int() const { return data_.i; } + double get_double() const { return data_.d; } + const std::string& get_string() const { return *GetStringPtr(data_); } + + bool operator==(const Variant& other) const; + bool operator!=(const Variant& other) const { return !(*this == other); } + + private: + Type type_; + + union Data { + bool b; + uint64_t i; + double d; + // Current version of MS compiler not C++11 compliant so can not put + // std::string + // however, even then we still need the rest of the maintenance. + char s[sizeof(std::string)]; + } data_; + + // Avoid type_punned aliasing problem + static std::string* GetStringPtr(Data& d) { + void* p = d.s; + return reinterpret_cast(p); + } + + static const std::string* GetStringPtr(const Data& d) { + const void* p = d.s; + return reinterpret_cast(p); + } + + static void Init(const Variant&, Data&); + + static void Destroy(Type t, Data& d) { + if (t == kString) { + using std::string; + GetStringPtr(d)->~string(); + } + } +}; + +// FeatureSet is a map of key-value pairs. One feature set is associated with +// each element in SpatialDB. It can be used to add rich data about the element. +class FeatureSet { + private: + typedef std::unordered_map map; + + public: + class iterator { + public: + /* implicit */ iterator(const map::const_iterator itr) : itr_(itr) {} + iterator& operator++() { + ++itr_; + return *this; + } + bool operator!=(const iterator& other) { return itr_ != other.itr_; } + bool operator==(const iterator& other) { return itr_ == other.itr_; } + map::value_type operator*() { return *itr_; } + + private: + map::const_iterator itr_; + }; + FeatureSet() = default; + + FeatureSet* Set(const std::string& key, const Variant& value); + bool Contains(const std::string& key) const; + // REQUIRES: Contains(key) + const Variant& Get(const std::string& key) const; + iterator Find(const std::string& key) const; + + iterator begin() const { return map_.begin(); } + iterator end() const { return map_.end(); } + + void Clear(); + size_t Size() const { return map_.size(); } + + void Serialize(std::string* output) const; + // REQUIRED: empty FeatureSet + bool Deserialize(const Slice& input); + + std::string DebugString() const; + + private: + map map_; +}; + +// BoundingBox is a helper structure for defining rectangles representing +// bounding boxes of spatial elements. +template +struct BoundingBox { + T min_x, min_y, max_x, max_y; + BoundingBox() = default; + BoundingBox(T _min_x, T _min_y, T _max_x, T _max_y) + : min_x(_min_x), min_y(_min_y), max_x(_max_x), max_y(_max_y) {} + + bool Intersects(const BoundingBox& a) const { + return !(min_x > a.max_x || min_y > a.max_y || a.min_x > max_x || + a.min_y > max_y); + } +}; + +struct SpatialDBOptions { + uint64_t cache_size = 1 * 1024 * 1024 * 1024LL; // 1GB + int num_threads = 16; + bool bulk_load = true; +}; + +// Cursor is used to return data from the query to the client. To get all the +// data from the query, just call Next() while Valid() is true +class Cursor { + public: + Cursor() = default; + virtual ~Cursor() {} + + virtual bool Valid() const = 0; + // REQUIRES: Valid() + virtual void Next() = 0; + + // Lifetime of the underlying storage until the next call to Next() + // REQUIRES: Valid() + virtual const Slice blob() = 0; + // Lifetime of the underlying storage until the next call to Next() + // REQUIRES: Valid() + virtual const FeatureSet& feature_set() = 0; + + virtual Status status() const = 0; + + private: + // No copying allowed + Cursor(const Cursor&); + void operator=(const Cursor&); +}; + +// SpatialIndexOptions defines a spatial index that will be built on the data +struct SpatialIndexOptions { + // Spatial indexes are referenced by names + std::string name; + // An area that is indexed. If the element is not intersecting with spatial + // index's bbox, it will not be inserted into the index + BoundingBox bbox; + // tile_bits control the granularity of the spatial index. Each dimension of + // the bbox will be split into (1 << tile_bits) tiles, so there will be a + // total of (1 << tile_bits)^2 tiles. It is recommended to configure a size of + // each tile to be approximately the size of the query on that spatial index + uint32_t tile_bits; + SpatialIndexOptions() {} + SpatialIndexOptions(const std::string& _name, + const BoundingBox& _bbox, uint32_t _tile_bits) + : name(_name), bbox(_bbox), tile_bits(_tile_bits) {} +}; + +class SpatialDB : public StackableDB { + public: + // Creates the SpatialDB with specified list of indexes. + // REQUIRED: db doesn't exist + static Status Create(const SpatialDBOptions& options, const std::string& name, + const std::vector& spatial_indexes); + + // Open the existing SpatialDB. The resulting db object will be returned + // through db parameter. + // REQUIRED: db was created using SpatialDB::Create + static Status Open(const SpatialDBOptions& options, const std::string& name, + SpatialDB** db, bool read_only = false); + + explicit SpatialDB(DB* db) : StackableDB(db) {} + + // Insert the element into the DB. Element will be inserted into specified + // spatial_indexes, based on specified bbox. + // REQUIRES: spatial_indexes.size() > 0 + virtual Status Insert(const WriteOptions& write_options, + const BoundingBox& bbox, const Slice& blob, + const FeatureSet& feature_set, + const std::vector& spatial_indexes) = 0; + + // Calling Compact() after inserting a bunch of elements should speed up + // reading. This is especially useful if you use SpatialDBOptions::bulk_load + // Num threads determines how many threads we'll use for compactions. Setting + // this to bigger number will use more IO and CPU, but finish faster + virtual Status Compact(int num_threads = 1) = 0; + + // Query the specified spatial_index. Query will return all elements that + // intersect bbox, but it may also return some extra elements. + virtual Cursor* Query(const ReadOptions& read_options, + const BoundingBox& bbox, + const std::string& spatial_index) = 0; +}; + +} // namespace spatial +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/stackable_db.h b/external/rocksdb/include/rocksdb/utilities/stackable_db.h new file mode 100755 index 0000000000..7565088e0d --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/stackable_db.h @@ -0,0 +1,321 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include "rocksdb/db.h" + +#ifdef _WIN32 +// Windows API macro interference +#undef DeleteFile +#endif + + +namespace rocksdb { + +// This class contains APIs to stack rocksdb wrappers.Eg. Stack TTL over base d +class StackableDB : public DB { + public: + // StackableDB is the owner of db now! + explicit StackableDB(DB* db) : db_(db) {} + + ~StackableDB() { + delete db_; + } + + virtual DB* GetBaseDB() { + return db_; + } + + virtual DB* GetRootDB() override { return db_->GetRootDB(); } + + virtual Status CreateColumnFamily(const ColumnFamilyOptions& options, + const std::string& column_family_name, + ColumnFamilyHandle** handle) override { + return db_->CreateColumnFamily(options, column_family_name, handle); + } + + virtual Status DropColumnFamily(ColumnFamilyHandle* column_family) override { + return db_->DropColumnFamily(column_family); + } + + virtual Status DestroyColumnFamilyHandle( + ColumnFamilyHandle* column_family) override { + return db_->DestroyColumnFamilyHandle(column_family); + } + + using DB::Put; + virtual Status Put(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& val) override { + return db_->Put(options, column_family, key, val); + } + + using DB::Get; + virtual Status Get(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value) override { + return db_->Get(options, column_family, key, value); + } + + using DB::MultiGet; + virtual std::vector MultiGet( + const ReadOptions& options, + const std::vector& column_family, + const std::vector& keys, + std::vector* values) override { + return db_->MultiGet(options, column_family, keys, values); + } + + using DB::AddFile; + virtual Status AddFile(ColumnFamilyHandle* column_family, + const std::vector& file_info_list, + bool move_file) override { + return db_->AddFile(column_family, file_info_list, move_file); + } + virtual Status AddFile(ColumnFamilyHandle* column_family, + const std::vector& file_path_list, + bool move_file) override { + return db_->AddFile(column_family, file_path_list, move_file); + } + + using DB::KeyMayExist; + virtual bool KeyMayExist(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value, + bool* value_found = nullptr) override { + return db_->KeyMayExist(options, column_family, key, value, value_found); + } + + using DB::Delete; + virtual Status Delete(const WriteOptions& wopts, + ColumnFamilyHandle* column_family, + const Slice& key) override { + return db_->Delete(wopts, column_family, key); + } + + using DB::SingleDelete; + virtual Status SingleDelete(const WriteOptions& wopts, + ColumnFamilyHandle* column_family, + const Slice& key) override { + return db_->SingleDelete(wopts, column_family, key); + } + + using DB::Merge; + virtual Status Merge(const WriteOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override { + return db_->Merge(options, column_family, key, value); + } + + + virtual Status Write(const WriteOptions& opts, WriteBatch* updates) + override { + return db_->Write(opts, updates); + } + + using DB::NewIterator; + virtual Iterator* NewIterator(const ReadOptions& opts, + ColumnFamilyHandle* column_family) override { + return db_->NewIterator(opts, column_family); + } + + virtual Status NewIterators( + const ReadOptions& options, + const std::vector& column_families, + std::vector* iterators) override { + return db_->NewIterators(options, column_families, iterators); + } + + + virtual const Snapshot* GetSnapshot() override { + return db_->GetSnapshot(); + } + + virtual void ReleaseSnapshot(const Snapshot* snapshot) override { + return db_->ReleaseSnapshot(snapshot); + } + + using DB::GetProperty; + virtual bool GetProperty(ColumnFamilyHandle* column_family, + const Slice& property, std::string* value) override { + return db_->GetProperty(column_family, property, value); + } + + using DB::GetIntProperty; + virtual bool GetIntProperty(ColumnFamilyHandle* column_family, + const Slice& property, uint64_t* value) override { + return db_->GetIntProperty(column_family, property, value); + } + + using DB::GetAggregatedIntProperty; + virtual bool GetAggregatedIntProperty(const Slice& property, + uint64_t* value) override { + return db_->GetAggregatedIntProperty(property, value); + } + + using DB::GetApproximateSizes; + virtual void GetApproximateSizes(ColumnFamilyHandle* column_family, + const Range* r, int n, uint64_t* sizes, + bool include_memtable = false) override { + return db_->GetApproximateSizes(column_family, r, n, sizes, + include_memtable); + } + + using DB::CompactRange; + virtual Status CompactRange(const CompactRangeOptions& options, + ColumnFamilyHandle* column_family, + const Slice* begin, const Slice* end) override { + return db_->CompactRange(options, column_family, begin, end); + } + + using DB::CompactFiles; + virtual Status CompactFiles( + const CompactionOptions& compact_options, + ColumnFamilyHandle* column_family, + const std::vector& input_file_names, + const int output_level, const int output_path_id = -1) override { + return db_->CompactFiles( + compact_options, column_family, input_file_names, + output_level, output_path_id); + } + + virtual Status PauseBackgroundWork() override { + return db_->PauseBackgroundWork(); + } + virtual Status ContinueBackgroundWork() override { + return db_->ContinueBackgroundWork(); + } + + virtual Status EnableAutoCompaction( + const std::vector& column_family_handles) override { + return db_->EnableAutoCompaction(column_family_handles); + } + + using DB::NumberLevels; + virtual int NumberLevels(ColumnFamilyHandle* column_family) override { + return db_->NumberLevels(column_family); + } + + using DB::MaxMemCompactionLevel; + virtual int MaxMemCompactionLevel(ColumnFamilyHandle* column_family) + override { + return db_->MaxMemCompactionLevel(column_family); + } + + using DB::Level0StopWriteTrigger; + virtual int Level0StopWriteTrigger(ColumnFamilyHandle* column_family) + override { + return db_->Level0StopWriteTrigger(column_family); + } + + virtual const std::string& GetName() const override { + return db_->GetName(); + } + + virtual Env* GetEnv() const override { + return db_->GetEnv(); + } + + using DB::GetOptions; + virtual const Options& GetOptions(ColumnFamilyHandle* column_family) const + override { + return db_->GetOptions(column_family); + } + + using DB::GetDBOptions; + virtual const DBOptions& GetDBOptions() const override { + return db_->GetDBOptions(); + } + + using DB::Flush; + virtual Status Flush(const FlushOptions& fopts, + ColumnFamilyHandle* column_family) override { + return db_->Flush(fopts, column_family); + } + + virtual Status SyncWAL() override { + return db_->SyncWAL(); + } + +#ifndef ROCKSDB_LITE + + virtual Status DisableFileDeletions() override { + return db_->DisableFileDeletions(); + } + + virtual Status EnableFileDeletions(bool force) override { + return db_->EnableFileDeletions(force); + } + + virtual void GetLiveFilesMetaData( + std::vector* metadata) override { + db_->GetLiveFilesMetaData(metadata); + } + + virtual void GetColumnFamilyMetaData( + ColumnFamilyHandle *column_family, + ColumnFamilyMetaData* cf_meta) override { + db_->GetColumnFamilyMetaData(column_family, cf_meta); + } + +#endif // ROCKSDB_LITE + + virtual Status GetLiveFiles(std::vector& vec, uint64_t* mfs, + bool flush_memtable = true) override { + return db_->GetLiveFiles(vec, mfs, flush_memtable); + } + + virtual SequenceNumber GetLatestSequenceNumber() const override { + return db_->GetLatestSequenceNumber(); + } + + virtual Status GetSortedWalFiles(VectorLogPtr& files) override { + return db_->GetSortedWalFiles(files); + } + + virtual Status DeleteFile(std::string name) override { + return db_->DeleteFile(name); + } + + virtual Status GetDbIdentity(std::string& identity) const override { + return db_->GetDbIdentity(identity); + } + + using DB::SetOptions; + virtual Status SetOptions(ColumnFamilyHandle* column_family_handle, + const std::unordered_map& + new_options) override { + return db_->SetOptions(column_family_handle, new_options); + } + + using DB::GetPropertiesOfAllTables; + virtual Status GetPropertiesOfAllTables( + ColumnFamilyHandle* column_family, + TablePropertiesCollection* props) override { + return db_->GetPropertiesOfAllTables(column_family, props); + } + + using DB::GetPropertiesOfTablesInRange; + virtual Status GetPropertiesOfTablesInRange( + ColumnFamilyHandle* column_family, const Range* range, std::size_t n, + TablePropertiesCollection* props) override { + return db_->GetPropertiesOfTablesInRange(column_family, range, n, props); + } + + virtual Status GetUpdatesSince( + SequenceNumber seq_number, unique_ptr* iter, + const TransactionLogIterator::ReadOptions& read_options) override { + return db_->GetUpdatesSince(seq_number, iter, read_options); + } + + virtual ColumnFamilyHandle* DefaultColumnFamily() const override { + return db_->DefaultColumnFamily(); + } + + protected: + DB* db_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/utilities/table_properties_collectors.h b/external/rocksdb/include/rocksdb/utilities/table_properties_collectors.h new file mode 100755 index 0000000000..68a88e7180 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/table_properties_collectors.h @@ -0,0 +1,29 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE +#include + +#include "rocksdb/table_properties.h" + +namespace rocksdb { + +// Creates a factory of a table property collector that marks a SST +// file as need-compaction when it observe at least "D" deletion +// entries in any "N" consecutive entires. +// +// @param sliding_window_size "N". Note that this number will be +// round up to the smallest multiple of 128 that is no less +// than the specified size. +// @param deletion_trigger "D". Note that even when "N" is changed, +// the specified number for "D" will not be changed. +extern std::shared_ptr + NewCompactOnDeletionCollectorFactory( + size_t sliding_window_size, + size_t deletion_trigger); +} // namespace rocksdb + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/transaction.h b/external/rocksdb/include/rocksdb/utilities/transaction.h new file mode 100755 index 0000000000..5a6c0cf45c --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/transaction.h @@ -0,0 +1,429 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#ifndef ROCKSDB_LITE + +#include +#include + +#include "rocksdb/comparator.h" +#include "rocksdb/db.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +class Iterator; +class TransactionDB; +class WriteBatchWithIndex; + +typedef std::string TransactionName; + +// Provides notification to the caller of SetSnapshotOnNextOperation when +// the actual snapshot gets created +class TransactionNotifier { + public: + virtual ~TransactionNotifier() {} + + // Implement this method to receive notification when a snapshot is + // requested via SetSnapshotOnNextOperation. + virtual void SnapshotCreated(const Snapshot* newSnapshot) = 0; +}; + +// Provides BEGIN/COMMIT/ROLLBACK transactions. +// +// To use transactions, you must first create either an OptimisticTransactionDB +// or a TransactionDB. See examples/[optimistic_]transaction_example.cc for +// more information. +// +// To create a transaction, use [Optimistic]TransactionDB::BeginTransaction(). +// +// It is up to the caller to synchronize access to this object. +// +// See examples/transaction_example.cc for some simple examples. +// +// TODO(agiardullo): Not yet implemented +// -PerfContext statistics +// -Support for using Transactions with DBWithTTL +class Transaction { + public: + virtual ~Transaction() {} + + // If a transaction has a snapshot set, the transaction will ensure that + // any keys successfully written(or fetched via GetForUpdate()) have not + // been modified outside of this transaction since the time the snapshot was + // set. + // If a snapshot has not been set, the transaction guarantees that keys have + // not been modified since the time each key was first written (or fetched via + // GetForUpdate()). + // + // Using SetSnapshot() will provide stricter isolation guarantees at the + // expense of potentially more transaction failures due to conflicts with + // other writes. + // + // Calling SetSnapshot() has no effect on keys written before this function + // has been called. + // + // SetSnapshot() may be called multiple times if you would like to change + // the snapshot used for different operations in this transaction. + // + // Calling SetSnapshot will not affect the version of Data returned by Get() + // methods. See Transaction::Get() for more details. + virtual void SetSnapshot() = 0; + + // Similar to SetSnapshot(), but will not change the current snapshot + // until Put/Merge/Delete/GetForUpdate/MultigetForUpdate is called. + // By calling this function, the transaction will essentially call + // SetSnapshot() for you right before performing the next write/GetForUpdate. + // + // Calling SetSnapshotOnNextOperation() will not affect what snapshot is + // returned by GetSnapshot() until the next write/GetForUpdate is executed. + // + // When the snapshot is created the notifier's SnapshotCreated method will + // be called so that the caller can get access to the snapshot. + // + // This is an optimization to reduce the likelihood of conflicts that + // could occur in between the time SetSnapshot() is called and the first + // write/GetForUpdate operation. Eg, this prevents the following + // race-condition: + // + // txn1->SetSnapshot(); + // txn2->Put("A", ...); + // txn2->Commit(); + // txn1->GetForUpdate(opts, "A", ...); // FAIL! + virtual void SetSnapshotOnNextOperation( + std::shared_ptr notifier = nullptr) = 0; + + // Returns the Snapshot created by the last call to SetSnapshot(). + // + // REQUIRED: The returned Snapshot is only valid up until the next time + // SetSnapshot()/SetSnapshotOnNextSavePoint() is called, ClearSnapshot() + // is called, or the Transaction is deleted. + virtual const Snapshot* GetSnapshot() const = 0; + + // Clears the current snapshot (i.e. no snapshot will be 'set') + // + // This removes any snapshot that currently exists or is set to be created + // on the next update operation (SetSnapshotOnNextOperation). + // + // Calling ClearSnapshot() has no effect on keys written before this function + // has been called. + // + // If a reference to a snapshot was retrieved via GetSnapshot(), it will no + // longer be valid and should be discarded after a call to ClearSnapshot(). + virtual void ClearSnapshot() = 0; + + // Prepare the current transation for 2PC + virtual Status Prepare() = 0; + + // Write all batched keys to the db atomically. + // + // Returns OK on success. + // + // May return any error status that could be returned by DB:Write(). + // + // If this transaction was created by an OptimisticTransactionDB(), + // Status::Busy() may be returned if the transaction could not guarantee + // that there are no write conflicts. Status::TryAgain() may be returned + // if the memtable history size is not large enough + // (See max_write_buffer_number_to_maintain). + // + // If this transaction was created by a TransactionDB(), Status::Expired() + // may be returned if this transaction has lived for longer than + // TransactionOptions.expiration. + virtual Status Commit() = 0; + + // Discard all batched writes in this transaction. + virtual Status Rollback() = 0; + + // Records the state of the transaction for future calls to + // RollbackToSavePoint(). May be called multiple times to set multiple save + // points. + virtual void SetSavePoint() = 0; + + // Undo all operations in this transaction (Put, Merge, Delete, PutLogData) + // since the most recent call to SetSavePoint() and removes the most recent + // SetSavePoint(). + // If there is no previous call to SetSavePoint(), returns Status::NotFound() + virtual Status RollbackToSavePoint() = 0; + + // This function is similar to DB::Get() except it will also read pending + // changes in this transaction. Currently, this function will return + // Status::MergeInProgress if the most recent write to the queried key in + // this batch is a Merge. + // + // If read_options.snapshot is not set, the current version of the key will + // be read. Calling SetSnapshot() does not affect the version of the data + // returned. + // + // Note that setting read_options.snapshot will affect what is read from the + // DB but will NOT change which keys are read from this transaction (the keys + // in this transaction do not yet belong to any snapshot and will be fetched + // regardless). + virtual Status Get(const ReadOptions& options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value) = 0; + + virtual Status Get(const ReadOptions& options, const Slice& key, + std::string* value) = 0; + + virtual std::vector MultiGet( + const ReadOptions& options, + const std::vector& column_family, + const std::vector& keys, std::vector* values) = 0; + + virtual std::vector MultiGet(const ReadOptions& options, + const std::vector& keys, + std::vector* values) = 0; + + // Read this key and ensure that this transaction will only + // be able to be committed if this key is not written outside this + // transaction after it has first been read (or after the snapshot if a + // snapshot is set in this transaction). The transaction behavior is the + // same regardless of whether the key exists or not. + // + // Note: Currently, this function will return Status::MergeInProgress + // if the most recent write to the queried key in this batch is a Merge. + // + // The values returned by this function are similar to Transaction::Get(). + // If value==nullptr, then this function will not read any data, but will + // still ensure that this key cannot be written to by outside of this + // transaction. + // + // If this transaction was created by an OptimisticTransaction, GetForUpdate() + // could cause commit() to fail. Otherwise, it could return any error + // that could be returned by DB::Get(). + // + // If this transaction was created by a TransactionDB, it can return + // Status::OK() on success, + // Status::Busy() if there is a write conflict, + // Status::TimedOut() if a lock could not be acquired, + // Status::TryAgain() if the memtable history size is not large enough + // (See max_write_buffer_number_to_maintain) + // Status::MergeInProgress() if merge operations cannot be resolved. + // or other errors if this key could not be read. + virtual Status GetForUpdate(const ReadOptions& options, + ColumnFamilyHandle* column_family, + const Slice& key, std::string* value) = 0; + + virtual Status GetForUpdate(const ReadOptions& options, const Slice& key, + std::string* value) = 0; + + virtual std::vector MultiGetForUpdate( + const ReadOptions& options, + const std::vector& column_family, + const std::vector& keys, std::vector* values) = 0; + + virtual std::vector MultiGetForUpdate( + const ReadOptions& options, const std::vector& keys, + std::vector* values) = 0; + + // Returns an iterator that will iterate on all keys in the default + // column family including both keys in the DB and uncommitted keys in this + // transaction. + // + // Setting read_options.snapshot will affect what is read from the + // DB but will NOT change which keys are read from this transaction (the keys + // in this transaction do not yet belong to any snapshot and will be fetched + // regardless). + // + // Caller is responsible for deleting the returned Iterator. + // + // The returned iterator is only valid until Commit(), Rollback(), or + // RollbackToSavePoint() is called. + virtual Iterator* GetIterator(const ReadOptions& read_options) = 0; + + virtual Iterator* GetIterator(const ReadOptions& read_options, + ColumnFamilyHandle* column_family) = 0; + + // Put, Merge, Delete, and SingleDelete behave similarly to the corresponding + // functions in WriteBatch, but will also do conflict checking on the + // keys being written. + // + // If this Transaction was created on an OptimisticTransactionDB, these + // functions should always return Status::OK(). + // + // If this Transaction was created on a TransactionDB, the status returned + // can be: + // Status::OK() on success, + // Status::Busy() if there is a write conflict, + // Status::TimedOut() if a lock could not be acquired, + // Status::TryAgain() if the memtable history size is not large enough + // (See max_write_buffer_number_to_maintain) + // or other errors on unexpected failures. + virtual Status Put(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) = 0; + virtual Status Put(const Slice& key, const Slice& value) = 0; + virtual Status Put(ColumnFamilyHandle* column_family, const SliceParts& key, + const SliceParts& value) = 0; + virtual Status Put(const SliceParts& key, const SliceParts& value) = 0; + + virtual Status Merge(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) = 0; + virtual Status Merge(const Slice& key, const Slice& value) = 0; + + virtual Status Delete(ColumnFamilyHandle* column_family, + const Slice& key) = 0; + virtual Status Delete(const Slice& key) = 0; + virtual Status Delete(ColumnFamilyHandle* column_family, + const SliceParts& key) = 0; + virtual Status Delete(const SliceParts& key) = 0; + + virtual Status SingleDelete(ColumnFamilyHandle* column_family, + const Slice& key) = 0; + virtual Status SingleDelete(const Slice& key) = 0; + virtual Status SingleDelete(ColumnFamilyHandle* column_family, + const SliceParts& key) = 0; + virtual Status SingleDelete(const SliceParts& key) = 0; + + // PutUntracked() will write a Put to the batch of operations to be committed + // in this transaction. This write will only happen if this transaction + // gets committed successfully. But unlike Transaction::Put(), + // no conflict checking will be done for this key. + // + // If this Transaction was created on a TransactionDB, this function will + // still acquire locks necessary to make sure this write doesn't cause + // conflicts in other transactions and may return Status::Busy(). + virtual Status PutUntracked(ColumnFamilyHandle* column_family, + const Slice& key, const Slice& value) = 0; + virtual Status PutUntracked(const Slice& key, const Slice& value) = 0; + virtual Status PutUntracked(ColumnFamilyHandle* column_family, + const SliceParts& key, + const SliceParts& value) = 0; + virtual Status PutUntracked(const SliceParts& key, + const SliceParts& value) = 0; + + virtual Status MergeUntracked(ColumnFamilyHandle* column_family, + const Slice& key, const Slice& value) = 0; + virtual Status MergeUntracked(const Slice& key, const Slice& value) = 0; + + virtual Status DeleteUntracked(ColumnFamilyHandle* column_family, + const Slice& key) = 0; + + virtual Status DeleteUntracked(const Slice& key) = 0; + virtual Status DeleteUntracked(ColumnFamilyHandle* column_family, + const SliceParts& key) = 0; + virtual Status DeleteUntracked(const SliceParts& key) = 0; + + // Similar to WriteBatch::PutLogData + virtual void PutLogData(const Slice& blob) = 0; + + // By default, all Put/Merge/Delete operations will be indexed in the + // transaction so that Get/GetForUpdate/GetIterator can search for these + // keys. + // + // If the caller does not want to fetch the keys about to be written, + // they may want to avoid indexing as a performance optimization. + // Calling DisableIndexing() will turn off indexing for all future + // Put/Merge/Delete operations until EnableIndexing() is called. + // + // If a key is Put/Merge/Deleted after DisableIndexing is called and then + // is fetched via Get/GetForUpdate/GetIterator, the result of the fetch is + // undefined. + virtual void DisableIndexing() = 0; + virtual void EnableIndexing() = 0; + + // Returns the number of distinct Keys being tracked by this transaction. + // If this transaction was created by a TransactinDB, this is the number of + // keys that are currently locked by this transaction. + // If this transaction was created by an OptimisticTransactionDB, this is the + // number of keys that need to be checked for conflicts at commit time. + virtual uint64_t GetNumKeys() const = 0; + + // Returns the number of Puts/Deletes/Merges that have been applied to this + // transaction so far. + virtual uint64_t GetNumPuts() const = 0; + virtual uint64_t GetNumDeletes() const = 0; + virtual uint64_t GetNumMerges() const = 0; + + // Returns the elapsed time in milliseconds since this Transaction began. + virtual uint64_t GetElapsedTime() const = 0; + + // Fetch the underlying write batch that contains all pending changes to be + // committed. + // + // Note: You should not write or delete anything from the batch directly and + // should only use the functions in the Transaction class to + // write to this transaction. + virtual WriteBatchWithIndex* GetWriteBatch() = 0; + + // Change the value of TransactionOptions.lock_timeout (in milliseconds) for + // this transaction. + // Has no effect on OptimisticTransactions. + virtual void SetLockTimeout(int64_t timeout) = 0; + + // Return the WriteOptions that will be used during Commit() + virtual WriteOptions* GetWriteOptions() = 0; + + // Reset the WriteOptions that will be used during Commit(). + virtual void SetWriteOptions(const WriteOptions& write_options) = 0; + + // If this key was previously fetched in this transaction using + // GetForUpdate/MultigetForUpdate(), calling UndoGetForUpdate will tell + // the transaction that it no longer needs to do any conflict checking + // for this key. + // + // If a key has been fetched N times via GetForUpdate/MultigetForUpdate(), + // then UndoGetForUpdate will only have an effect if it is also called N + // times. If this key has been written to in this transaction, + // UndoGetForUpdate() will have no effect. + // + // If SetSavePoint() has been called after the GetForUpdate(), + // UndoGetForUpdate() will not have any effect. + // + // If this Transaction was created by an OptimisticTransactionDB, + // calling UndoGetForUpdate can affect whether this key is conflict checked + // at commit time. + // If this Transaction was created by a TransactionDB, + // calling UndoGetForUpdate may release any held locks for this key. + virtual void UndoGetForUpdate(ColumnFamilyHandle* column_family, + const Slice& key) = 0; + virtual void UndoGetForUpdate(const Slice& key) = 0; + + virtual Status RebuildFromWriteBatch(WriteBatch* src_batch) = 0; + + virtual WriteBatch* GetCommitTimeWriteBatch() = 0; + + virtual void SetLogNumber(uint64_t log) { log_number_ = log; } + + virtual uint64_t GetLogNumber() { return log_number_; } + + virtual Status SetName(const TransactionName& name) = 0; + + virtual TransactionName GetName() { return name_; } + + enum ExecutionStatus { + STARTED = 0, + AWAITING_PREPARE = 1, + PREPARED = 2, + AWAITING_COMMIT = 3, + COMMITED = 4, + AWAITING_ROLLBACK = 5, + ROLLEDBACK = 6, + LOCKS_STOLEN = 7, + }; + + // Execution status of the transaction. + std::atomic exec_status_; + + protected: + explicit Transaction(const TransactionDB* db) {} + Transaction() {} + + // the log in which the prepared section for this txn resides + // (for two phase commit) + uint64_t log_number_; + TransactionName name_; + + private: + // No copying allowed + Transaction(const Transaction&); + void operator=(const Transaction&); +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/transaction_db.h b/external/rocksdb/include/rocksdb/utilities/transaction_db.h new file mode 100755 index 0000000000..4b64754245 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/transaction_db.h @@ -0,0 +1,142 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include + +#include "rocksdb/comparator.h" +#include "rocksdb/db.h" +#include "rocksdb/utilities/stackable_db.h" +#include "rocksdb/utilities/transaction.h" + +// Database with Transaction support. +// +// See transaction.h and examples/transaction_example.cc + +namespace rocksdb { + +class TransactionDBMutexFactory; + +struct TransactionDBOptions { + // Specifies the maximum number of keys that can be locked at the same time + // per column family. + // If the number of locked keys is greater than max_num_locks, transaction + // writes (or GetForUpdate) will return an error. + // If this value is not positive, no limit will be enforced. + int64_t max_num_locks = -1; + + // Increasing this value will increase the concurrency by dividing the lock + // table (per column family) into more sub-tables, each with their own + // separate + // mutex. + size_t num_stripes = 16; + + // If positive, specifies the default wait timeout in milliseconds when + // a transaction attempts to lock a key if not specified by + // TransactionOptions::lock_timeout. + // + // If 0, no waiting is done if a lock cannot instantly be acquired. + // If negative, there is no timeout. Not using a timeout is not recommended + // as it can lead to deadlocks. Currently, there is no deadlock-detection to + // recover + // from a deadlock. + int64_t transaction_lock_timeout = 1000; // 1 second + + // If positive, specifies the wait timeout in milliseconds when writing a key + // OUTSIDE of a transaction (ie by calling DB::Put(),Merge(),Delete(),Write() + // directly). + // If 0, no waiting is done if a lock cannot instantly be acquired. + // If negative, there is no timeout and will block indefinitely when acquiring + // a lock. + // + // Not using a timeout can lead to deadlocks. Currently, there + // is no deadlock-detection to recover from a deadlock. While DB writes + // cannot deadlock with other DB writes, they can deadlock with a transaction. + // A negative timeout should only be used if all transactions have a small + // expiration set. + int64_t default_lock_timeout = 1000; // 1 second + + // If set, the TransactionDB will use this implemenation of a mutex and + // condition variable for all transaction locking instead of the default + // mutex/condvar implementation. + std::shared_ptr custom_mutex_factory; +}; + +struct TransactionOptions { + // Setting set_snapshot=true is the same as calling + // Transaction::SetSnapshot(). + bool set_snapshot = false; + + + // TODO(agiardullo): TransactionDB does not yet support comparators that allow + // two non-equal keys to be equivalent. Ie, cmp->Compare(a,b) should only + // return 0 if + // a.compare(b) returns 0. + + + // If positive, specifies the wait timeout in milliseconds when + // a transaction attempts to lock a key. + // + // If 0, no waiting is done if a lock cannot instantly be acquired. + // If negative, TransactionDBOptions::transaction_lock_timeout will be used. + int64_t lock_timeout = -1; + + // Expiration duration in milliseconds. If non-negative, transactions that + // last longer than this many milliseconds will fail to commit. If not set, + // a forgotten transaction that is never committed, rolled back, or deleted + // will never relinquish any locks it holds. This could prevent keys from + // being + // written by other writers. + int64_t expiration = -1; +}; + +class TransactionDB : public StackableDB { + public: + // Open a TransactionDB similar to DB::Open(). + static Status Open(const Options& options, + const TransactionDBOptions& txn_db_options, + const std::string& dbname, TransactionDB** dbptr); + + static Status Open(const DBOptions& db_options, + const TransactionDBOptions& txn_db_options, + const std::string& dbname, + const std::vector& column_families, + std::vector* handles, + TransactionDB** dbptr); + + virtual ~TransactionDB() {} + + // Starts a new Transaction. + // + // Caller is responsible for deleting the returned transaction when no + // longer needed. + // + // If old_txn is not null, BeginTransaction will reuse this Transaction + // handle instead of allocating a new one. This is an optimization to avoid + // extra allocations when repeatedly creating transactions. + virtual Transaction* BeginTransaction( + const WriteOptions& write_options, + const TransactionOptions& txn_options = TransactionOptions(), + Transaction* old_txn = nullptr) = 0; + + virtual Transaction* GetTransactionByName(const TransactionName& name) = 0; + virtual void GetAllPreparedTransactions(std::vector* trans) = 0; + + protected: + // To Create an TransactionDB, call Open() + explicit TransactionDB(DB* db) : StackableDB(db) {} + + private: + // No copying allowed + TransactionDB(const TransactionDB&); + void operator=(const TransactionDB&); +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/transaction_db_mutex.h b/external/rocksdb/include/rocksdb/utilities/transaction_db_mutex.h new file mode 100755 index 0000000000..cedf542955 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/transaction_db_mutex.h @@ -0,0 +1,92 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include + +#include "rocksdb/status.h" + +namespace rocksdb { + +// TransactionDBMutex and TransactionDBCondVar APIs allows applications to +// implement custom mutexes and condition variables to be used by a +// TransactionDB when locking keys. +// +// To open a TransactionDB with a custom TransactionDBMutexFactory, set +// TransactionDBOptions.custom_mutex_factory. + +class TransactionDBMutex { + public: + virtual ~TransactionDBMutex() {} + + // Attempt to acquire lock. Return OK on success, or other Status on failure. + // If returned status is OK, TransactionDB will eventually call UnLock(). + virtual Status Lock() = 0; + + // Attempt to acquire lock. If timeout is non-negative, operation may be + // failed after this many microseconds. + // Returns OK on success, + // TimedOut if timed out, + // or other Status on failure. + // If returned status is OK, TransactionDB will eventually call UnLock(). + virtual Status TryLockFor(int64_t timeout_time) = 0; + + // Unlock Mutex that was successfully locked by Lock() or TryLockUntil() + virtual void UnLock() = 0; +}; + +class TransactionDBCondVar { + public: + virtual ~TransactionDBCondVar() {} + + // Block current thread until condition variable is notified by a call to + // Notify() or NotifyAll(). Wait() will be called with mutex locked. + // Returns OK if notified. + // Returns non-OK if TransactionDB should stop waiting and fail the operation. + // May return OK spuriously even if not notified. + virtual Status Wait(std::shared_ptr mutex) = 0; + + // Block current thread until condition variable is notified by a call to + // Notify() or NotifyAll(), or if the timeout is reached. + // Wait() will be called with mutex locked. + // + // If timeout is non-negative, operation should be failed after this many + // microseconds. + // If implementing a custom version of this class, the implementation may + // choose to ignore the timeout. + // + // Returns OK if notified. + // Returns TimedOut if timeout is reached. + // Returns other status if TransactionDB should otherwis stop waiting and + // fail the operation. + // May return OK spuriously even if not notified. + virtual Status WaitFor(std::shared_ptr mutex, + int64_t timeout_time) = 0; + + // If any threads are waiting on *this, unblock at least one of the + // waiting threads. + virtual void Notify() = 0; + + // Unblocks all threads waiting on *this. + virtual void NotifyAll() = 0; +}; + +// Factory class that can allocate mutexes and condition variables. +class TransactionDBMutexFactory { + public: + // Create a TransactionDBMutex object. + virtual std::shared_ptr AllocateMutex() = 0; + + // Create a TransactionDBCondVar object. + virtual std::shared_ptr AllocateCondVar() = 0; + + virtual ~TransactionDBMutexFactory() {} +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/utility_db.h b/external/rocksdb/include/rocksdb/utilities/utility_db.h new file mode 100755 index 0000000000..a34a638980 --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/utility_db.h @@ -0,0 +1,35 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#ifndef ROCKSDB_LITE +#include +#include + +#include "rocksdb/utilities/stackable_db.h" +#include "rocksdb/utilities/db_ttl.h" +#include "rocksdb/db.h" + +namespace rocksdb { + +// Please don't use this class. It's deprecated +class UtilityDB { + public: + // This function is here only for backwards compatibility. Please use the + // functions defined in DBWithTTl (rocksdb/utilities/db_ttl.h) + // (deprecated) +#if defined(__GNUC__) || defined(__clang__) + __attribute__((deprecated)) +#elif _WIN32 + __declspec(deprecated) +#endif + static Status OpenTtlDB(const Options& options, + const std::string& name, + StackableDB** dbptr, + int32_t ttl = 0, + bool read_only = false); +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/utilities/write_batch_with_index.h b/external/rocksdb/include/rocksdb/utilities/write_batch_with_index.h new file mode 100755 index 0000000000..ccfd67e5ea --- /dev/null +++ b/external/rocksdb/include/rocksdb/utilities/write_batch_with_index.h @@ -0,0 +1,205 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// A WriteBatchWithIndex with a binary searchable index built for all the keys +// inserted. +#pragma once + +#ifndef ROCKSDB_LITE + +#include + +#include "rocksdb/comparator.h" +#include "rocksdb/iterator.h" +#include "rocksdb/slice.h" +#include "rocksdb/status.h" +#include "rocksdb/write_batch.h" +#include "rocksdb/write_batch_base.h" + +namespace rocksdb { + +class ColumnFamilyHandle; +class Comparator; +class DB; +struct ReadOptions; +struct DBOptions; + +enum WriteType { + kPutRecord, + kMergeRecord, + kDeleteRecord, + kSingleDeleteRecord, + kLogDataRecord, + kXIDRecord, +}; + +// an entry for Put, Merge, Delete, or SingleDelete entry for write batches. +// Used in WBWIIterator. +struct WriteEntry { + WriteType type; + Slice key; + Slice value; +}; + +// Iterator of one column family out of a WriteBatchWithIndex. +class WBWIIterator { + public: + virtual ~WBWIIterator() {} + + virtual bool Valid() const = 0; + + virtual void SeekToFirst() = 0; + + virtual void SeekToLast() = 0; + + virtual void Seek(const Slice& key) = 0; + + virtual void Next() = 0; + + virtual void Prev() = 0; + + // the return WriteEntry is only valid until the next mutation of + // WriteBatchWithIndex + virtual WriteEntry Entry() const = 0; + + virtual Status status() const = 0; +}; + +// A WriteBatchWithIndex with a binary searchable index built for all the keys +// inserted. +// In Put(), Merge() Delete(), or SingleDelete(), the same function of the +// wrapped will be called. At the same time, indexes will be built. +// By calling GetWriteBatch(), a user will get the WriteBatch for the data +// they inserted, which can be used for DB::Write(). +// A user can call NewIterator() to create an iterator. +class WriteBatchWithIndex : public WriteBatchBase { + public: + // backup_index_comparator: the backup comparator used to compare keys + // within the same column family, if column family is not given in the + // interface, or we can't find a column family from the column family handle + // passed in, backup_index_comparator will be used for the column family. + // reserved_bytes: reserved bytes in underlying WriteBatch + // overwrite_key: if true, overwrite the key in the index when inserting + // the same key as previously, so iterator will never + // show two entries with the same key. + explicit WriteBatchWithIndex( + const Comparator* backup_index_comparator = BytewiseComparator(), + size_t reserved_bytes = 0, bool overwrite_key = false); + virtual ~WriteBatchWithIndex(); + + using WriteBatchBase::Put; + void Put(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override; + + void Put(const Slice& key, const Slice& value) override; + + using WriteBatchBase::Merge; + void Merge(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override; + + void Merge(const Slice& key, const Slice& value) override; + + using WriteBatchBase::Delete; + void Delete(ColumnFamilyHandle* column_family, const Slice& key) override; + void Delete(const Slice& key) override; + + using WriteBatchBase::SingleDelete; + void SingleDelete(ColumnFamilyHandle* column_family, + const Slice& key) override; + void SingleDelete(const Slice& key) override; + + using WriteBatchBase::PutLogData; + void PutLogData(const Slice& blob) override; + + using WriteBatchBase::Clear; + void Clear() override; + + using WriteBatchBase::GetWriteBatch; + WriteBatch* GetWriteBatch() override; + + // Create an iterator of a column family. User can call iterator.Seek() to + // search to the next entry of or after a key. Keys will be iterated in the + // order given by index_comparator. For multiple updates on the same key, + // each update will be returned as a separate entry, in the order of update + // time. + // + // The returned iterator should be deleted by the caller. + WBWIIterator* NewIterator(ColumnFamilyHandle* column_family); + // Create an iterator of the default column family. + WBWIIterator* NewIterator(); + + // Will create a new Iterator that will use WBWIIterator as a delta and + // base_iterator as base. + // + // This function is only supported if the WriteBatchWithIndex was + // constructed with overwrite_key=true. + // + // The returned iterator should be deleted by the caller. + // The base_iterator is now 'owned' by the returned iterator. Deleting the + // returned iterator will also delete the base_iterator. + Iterator* NewIteratorWithBase(ColumnFamilyHandle* column_family, + Iterator* base_iterator); + // default column family + Iterator* NewIteratorWithBase(Iterator* base_iterator); + + // Similar to DB::Get() but will only read the key from this batch. + // If the batch does not have enough data to resolve Merge operations, + // MergeInProgress status may be returned. + Status GetFromBatch(ColumnFamilyHandle* column_family, + const DBOptions& options, const Slice& key, + std::string* value); + + // Similar to previous function but does not require a column_family. + // Note: An InvalidArgument status will be returned if there are any Merge + // operators for this key. Use previous method instead. + Status GetFromBatch(const DBOptions& options, const Slice& key, + std::string* value) { + return GetFromBatch(nullptr, options, key, value); + } + + // Similar to DB::Get() but will also read writes from this batch. + // + // This function will query both this batch and the DB and then merge + // the results using the DB's merge operator (if the batch contains any + // merge requests). + // + // Setting read_options.snapshot will affect what is read from the DB + // but will NOT change which keys are read from the batch (the keys in + // this batch do not yet belong to any snapshot and will be fetched + // regardless). + Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, + const Slice& key, std::string* value); + Status GetFromBatchAndDB(DB* db, const ReadOptions& read_options, + ColumnFamilyHandle* column_family, const Slice& key, + std::string* value); + + // Records the state of the batch for future calls to RollbackToSavePoint(). + // May be called multiple times to set multiple save points. + void SetSavePoint() override; + + // Remove all entries in this batch (Put, Merge, Delete, SingleDelete, + // PutLogData) since the most recent call to SetSavePoint() and removes the + // most recent save point. + // If there is no previous call to SetSavePoint(), behaves the same as + // Clear(). + // + // Calling RollbackToSavePoint invalidates any open iterators on this batch. + // + // Returns Status::OK() on success, + // Status::NotFound() if no previous call to SetSavePoint(), + // or other Status on corruption. + Status RollbackToSavePoint() override; + + private: + struct Rep; + Rep* rep; +}; + +} // namespace rocksdb + +#endif // !ROCKSDB_LITE diff --git a/external/rocksdb/include/rocksdb/version.h b/external/rocksdb/include/rocksdb/version.h new file mode 100755 index 0000000000..d9cc91fae1 --- /dev/null +++ b/external/rocksdb/include/rocksdb/version.h @@ -0,0 +1,16 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#define ROCKSDB_MAJOR 4 +#define ROCKSDB_MINOR 11 +#define ROCKSDB_PATCH 2 + +// Do not use these. We made the mistake of declaring macros starting with +// double underscore. Now we have to live with our choice. We'll deprecate these +// at some point +#define __ROCKSDB_MAJOR__ ROCKSDB_MAJOR +#define __ROCKSDB_MINOR__ ROCKSDB_MINOR +#define __ROCKSDB_PATCH__ ROCKSDB_PATCH diff --git a/external/rocksdb/include/rocksdb/wal_filter.h b/external/rocksdb/include/rocksdb/wal_filter.h new file mode 100755 index 0000000000..131fe87e7c --- /dev/null +++ b/external/rocksdb/include/rocksdb/wal_filter.h @@ -0,0 +1,101 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#include +#include + +namespace rocksdb { + +class WriteBatch; + +// WALFilter allows an application to inspect write-ahead-log (WAL) +// records or modify their processing on recovery. +// Please see the details below. +class WalFilter { + public: + enum class WalProcessingOption { + // Continue processing as usual + kContinueProcessing = 0, + // Ignore the current record but continue processing of log(s) + kIgnoreCurrentRecord = 1, + // Stop replay of logs and discard logs + // Logs won't be replayed on subsequent recovery + kStopReplay = 2, + // Corrupted record detected by filter + kCorruptedRecord = 3, + // Marker for enum count + kWalProcessingOptionMax = 4 + }; + + virtual ~WalFilter() {} + + // Provide ColumnFamily->LogNumber map to filter + // so that filter can determine whether a log number applies to a given + // column family (i.e. that log hasn't been flushed to SST already for the + // column family). + // We also pass in name->id map as only name is known during + // recovery (as handles are opened post-recovery). + // while write batch callbacks happen in terms of column family id. + // + // @params cf_lognumber_map column_family_id to lognumber map + // @params cf_name_id_map column_family_name to column_family_id map + + virtual void ColumnFamilyLogNumberMap( + const std::map& cf_lognumber_map, + const std::map& cf_name_id_map) {} + + // LogRecord is invoked for each log record encountered for all the logs + // during replay on logs on recovery. This method can be used to: + // * inspect the record (using the batch parameter) + // * ignoring current record + // (by returning WalProcessingOption::kIgnoreCurrentRecord) + // * reporting corrupted record + // (by returning WalProcessingOption::kCorruptedRecord) + // * stop log replay + // (by returning kStop replay) - please note that this implies + // discarding the logs from current record onwards. + // + // @params log_number log_number of the current log. + // Filter might use this to determine if the log + // record is applicable to a certain column family. + // @params log_file_name log file name - only for informational purposes + // @params batch batch encountered in the log during recovery + // @params new_batch new_batch to populate if filter wants to change + // the batch (for example to filter some records out, + // or alter some records). + // Please note that the new batch MUST NOT contain + // more records than original, else recovery would + // be failed. + // @params batch_changed Whether batch was changed by the filter. + // It must be set to true if new_batch was populated, + // else new_batch has no effect. + // @returns Processing option for the current record. + // Please see WalProcessingOption enum above for + // details. + virtual WalProcessingOption LogRecordFound(unsigned long long log_number, + const std::string& log_file_name, + const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) { + // Default implementation falls back to older function for compatibility + return LogRecord(batch, new_batch, batch_changed); + } + + // Please see the comments for LogRecord above. This function is for + // compatibility only and contains a subset of parameters. + // New code should use the function above. + virtual WalProcessingOption LogRecord(const WriteBatch& batch, + WriteBatch* new_batch, + bool* batch_changed) const { + return WalProcessingOption::kContinueProcessing; + } + + // Returns a name that identifies this WAL filter. + // The name will be printed to LOG file on start up for diagnosis. + virtual const char* Name() const = 0; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/write_batch.h b/external/rocksdb/include/rocksdb/write_batch.h new file mode 100755 index 0000000000..be6374cb39 --- /dev/null +++ b/external/rocksdb/include/rocksdb/write_batch.h @@ -0,0 +1,275 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// WriteBatch holds a collection of updates to apply atomically to a DB. +// +// The updates are applied in the order in which they are added +// to the WriteBatch. For example, the value of "key" will be "v3" +// after the following batch is written: +// +// batch.Put("key", "v1"); +// batch.Delete("key"); +// batch.Put("key", "v2"); +// batch.Put("key", "v3"); +// +// Multiple threads can invoke const methods on a WriteBatch without +// external synchronization, but if any of the threads may call a +// non-const method, all threads accessing the same WriteBatch must use +// external synchronization. + +#ifndef STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_ +#define STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_ + +#include +#include +#include +#include +#include "rocksdb/status.h" +#include "rocksdb/write_batch_base.h" + +namespace rocksdb { + +class Slice; +class ColumnFamilyHandle; +struct SavePoints; +struct SliceParts; + +class WriteBatch : public WriteBatchBase { + public: + explicit WriteBatch(size_t reserved_bytes = 0); + ~WriteBatch(); + + using WriteBatchBase::Put; + // Store the mapping "key->value" in the database. + void Put(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override; + void Put(const Slice& key, const Slice& value) override { + Put(nullptr, key, value); + } + + // Variant of Put() that gathers output like writev(2). The key and value + // that will be written to the database are concatentations of arrays of + // slices. + void Put(ColumnFamilyHandle* column_family, const SliceParts& key, + const SliceParts& value) override; + void Put(const SliceParts& key, const SliceParts& value) override { + Put(nullptr, key, value); + } + + using WriteBatchBase::Delete; + // If the database contains a mapping for "key", erase it. Else do nothing. + void Delete(ColumnFamilyHandle* column_family, const Slice& key) override; + void Delete(const Slice& key) override { Delete(nullptr, key); } + + // variant that takes SliceParts + void Delete(ColumnFamilyHandle* column_family, + const SliceParts& key) override; + void Delete(const SliceParts& key) override { Delete(nullptr, key); } + + using WriteBatchBase::SingleDelete; + // WriteBatch implementation of DB::SingleDelete(). See db.h. + void SingleDelete(ColumnFamilyHandle* column_family, + const Slice& key) override; + void SingleDelete(const Slice& key) override { SingleDelete(nullptr, key); } + + // variant that takes SliceParts + void SingleDelete(ColumnFamilyHandle* column_family, + const SliceParts& key) override; + void SingleDelete(const SliceParts& key) override { + SingleDelete(nullptr, key); + } + + using WriteBatchBase::Merge; + // Merge "value" with the existing value of "key" in the database. + // "key->merge(existing, value)" + void Merge(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) override; + void Merge(const Slice& key, const Slice& value) override { + Merge(nullptr, key, value); + } + + // variant that takes SliceParts + void Merge(ColumnFamilyHandle* column_family, const SliceParts& key, + const SliceParts& value) override; + void Merge(const SliceParts& key, const SliceParts& value) override { + Merge(nullptr, key, value); + } + + using WriteBatchBase::PutLogData; + // Append a blob of arbitrary size to the records in this batch. The blob will + // be stored in the transaction log but not in any other file. In particular, + // it will not be persisted to the SST files. When iterating over this + // WriteBatch, WriteBatch::Handler::LogData will be called with the contents + // of the blob as it is encountered. Blobs, puts, deletes, and merges will be + // encountered in the same order in thich they were inserted. The blob will + // NOT consume sequence number(s) and will NOT increase the count of the batch + // + // Example application: add timestamps to the transaction log for use in + // replication. + void PutLogData(const Slice& blob) override; + + using WriteBatchBase::Clear; + // Clear all updates buffered in this batch. + void Clear() override; + + // Records the state of the batch for future calls to RollbackToSavePoint(). + // May be called multiple times to set multiple save points. + void SetSavePoint() override; + + // Remove all entries in this batch (Put, Merge, Delete, PutLogData) since the + // most recent call to SetSavePoint() and removes the most recent save point. + // If there is no previous call to SetSavePoint(), Status::NotFound() + // will be returned. + // Otherwise returns Status::OK(). + Status RollbackToSavePoint() override; + + // Support for iterating over the contents of a batch. + class Handler { + public: + virtual ~Handler(); + // default implementation will just call Put without column family for + // backwards compatibility. If the column family is not default, + // the function is noop + virtual Status PutCF(uint32_t column_family_id, const Slice& key, + const Slice& value) { + if (column_family_id == 0) { + // Put() historically doesn't return status. We didn't want to be + // backwards incompatible so we didn't change the return status + // (this is a public API). We do an ordinary get and return Status::OK() + Put(key, value); + return Status::OK(); + } + return Status::InvalidArgument( + "non-default column family and PutCF not implemented"); + } + virtual void Put(const Slice& /*key*/, const Slice& /*value*/) {} + + virtual Status DeleteCF(uint32_t column_family_id, const Slice& key) { + if (column_family_id == 0) { + Delete(key); + return Status::OK(); + } + return Status::InvalidArgument( + "non-default column family and DeleteCF not implemented"); + } + virtual void Delete(const Slice& /*key*/) {} + + virtual Status SingleDeleteCF(uint32_t column_family_id, const Slice& key) { + if (column_family_id == 0) { + SingleDelete(key); + return Status::OK(); + } + return Status::InvalidArgument( + "non-default column family and SingleDeleteCF not implemented"); + } + virtual void SingleDelete(const Slice& /*key*/) {} + + // Merge and LogData are not pure virtual. Otherwise, we would break + // existing clients of Handler on a source code level. The default + // implementation of Merge does nothing. + virtual Status MergeCF(uint32_t column_family_id, const Slice& key, + const Slice& value) { + if (column_family_id == 0) { + Merge(key, value); + return Status::OK(); + } + return Status::InvalidArgument( + "non-default column family and MergeCF not implemented"); + } + virtual void Merge(const Slice& /*key*/, const Slice& /*value*/) {} + + // The default implementation of LogData does nothing. + virtual void LogData(const Slice& blob); + + virtual Status MarkBeginPrepare() { + return Status::InvalidArgument("MarkBeginPrepare() handler not defined."); + } + + virtual Status MarkEndPrepare(const Slice& xid) { + return Status::InvalidArgument("MarkEndPrepare() handler not defined."); + } + + virtual Status MarkRollback(const Slice& xid) { + return Status::InvalidArgument( + "MarkRollbackPrepare() handler not defined."); + } + + virtual Status MarkCommit(const Slice& xid) { + return Status::InvalidArgument("MarkCommit() handler not defined."); + } + + // Continue is called by WriteBatch::Iterate. If it returns false, + // iteration is halted. Otherwise, it continues iterating. The default + // implementation always returns true. + virtual bool Continue(); + }; + Status Iterate(Handler* handler) const; + + // Retrieve the serialized version of this batch. + const std::string& Data() const { return rep_; } + + // Retrieve data size of the batch. + size_t GetDataSize() const { return rep_.size(); } + + // Returns the number of updates in the batch + int Count() const; + + // Returns true if PutCF will be called during Iterate + bool HasPut() const; + + // Returns true if DeleteCF will be called during Iterate + bool HasDelete() const; + + // Returns true if SingleDeleteCF will be called during Iterate + bool HasSingleDelete() const; + + // Returns trie if MergeCF will be called during Iterate + bool HasMerge() const; + + // Returns true if MarkBeginPrepare will be called during Iterate + bool HasBeginPrepare() const; + + // Returns true if MarkEndPrepare will be called during Iterate + bool HasEndPrepare() const; + + // Returns trie if MarkCommit will be called during Iterate + bool HasCommit() const; + + // Returns trie if MarkRollback will be called during Iterate + bool HasRollback() const; + + using WriteBatchBase::GetWriteBatch; + WriteBatch* GetWriteBatch() override { return this; } + + // Constructor with a serialized string object + explicit WriteBatch(const std::string& rep); + + WriteBatch(const WriteBatch& src); + WriteBatch(WriteBatch&& src); + WriteBatch& operator=(const WriteBatch& src); + WriteBatch& operator=(WriteBatch&& src); + + private: + friend class WriteBatchInternal; + SavePoints* save_points_; + + // For HasXYZ. Mutable to allow lazy computation of results + mutable std::atomic content_flags_; + + // Performs deferred computation of content_flags if necessary + uint32_t ComputeContentFlags() const; + + protected: + std::string rep_; // See comment in write_batch.cc for the format of rep_ + + // Intentionally copyable +}; + +} // namespace rocksdb + +#endif // STORAGE_ROCKSDB_INCLUDE_WRITE_BATCH_H_ diff --git a/external/rocksdb/include/rocksdb/write_batch_base.h b/external/rocksdb/include/rocksdb/write_batch_base.h new file mode 100755 index 0000000000..86ccbaa185 --- /dev/null +++ b/external/rocksdb/include/rocksdb/write_batch_base.h @@ -0,0 +1,99 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +namespace rocksdb { + +class Slice; +class Status; +class ColumnFamilyHandle; +class WriteBatch; +struct SliceParts; + +// Abstract base class that defines the basic interface for a write batch. +// See WriteBatch for a basic implementation and WrithBatchWithIndex for an +// indexed implemenation. +class WriteBatchBase { + public: + virtual ~WriteBatchBase() {} + + // Store the mapping "key->value" in the database. + virtual void Put(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) = 0; + virtual void Put(const Slice& key, const Slice& value) = 0; + + // Variant of Put() that gathers output like writev(2). The key and value + // that will be written to the database are concatentations of arrays of + // slices. + virtual void Put(ColumnFamilyHandle* column_family, const SliceParts& key, + const SliceParts& value); + virtual void Put(const SliceParts& key, const SliceParts& value); + + // Merge "value" with the existing value of "key" in the database. + // "key->merge(existing, value)" + virtual void Merge(ColumnFamilyHandle* column_family, const Slice& key, + const Slice& value) = 0; + virtual void Merge(const Slice& key, const Slice& value) = 0; + + // variant that takes SliceParts + virtual void Merge(ColumnFamilyHandle* column_family, const SliceParts& key, + const SliceParts& value); + virtual void Merge(const SliceParts& key, const SliceParts& value); + + // If the database contains a mapping for "key", erase it. Else do nothing. + virtual void Delete(ColumnFamilyHandle* column_family, const Slice& key) = 0; + virtual void Delete(const Slice& key) = 0; + + // variant that takes SliceParts + virtual void Delete(ColumnFamilyHandle* column_family, const SliceParts& key); + virtual void Delete(const SliceParts& key); + + // If the database contains a mapping for "key", erase it. Expects that the + // key was not overwritten. Else do nothing. + virtual void SingleDelete(ColumnFamilyHandle* column_family, + const Slice& key) = 0; + virtual void SingleDelete(const Slice& key) = 0; + + // variant that takes SliceParts + virtual void SingleDelete(ColumnFamilyHandle* column_family, + const SliceParts& key); + virtual void SingleDelete(const SliceParts& key); + + // Append a blob of arbitrary size to the records in this batch. The blob will + // be stored in the transaction log but not in any other file. In particular, + // it will not be persisted to the SST files. When iterating over this + // WriteBatch, WriteBatch::Handler::LogData will be called with the contents + // of the blob as it is encountered. Blobs, puts, deletes, and merges will be + // encountered in the same order in thich they were inserted. The blob will + // NOT consume sequence number(s) and will NOT increase the count of the batch + // + // Example application: add timestamps to the transaction log for use in + // replication. + virtual void PutLogData(const Slice& blob) = 0; + + // Clear all updates buffered in this batch. + virtual void Clear() = 0; + + // Covert this batch into a WriteBatch. This is an abstracted way of + // converting any WriteBatchBase(eg WriteBatchWithIndex) into a basic + // WriteBatch. + virtual WriteBatch* GetWriteBatch() = 0; + + // Records the state of the batch for future calls to RollbackToSavePoint(). + // May be called multiple times to set multiple save points. + virtual void SetSavePoint() = 0; + + // Remove all entries in this batch (Put, Merge, Delete, PutLogData) since the + // most recent call to SetSavePoint() and removes the most recent save point. + // If there is no previous call to SetSavePoint(), behaves the same as + // Clear(). + virtual Status RollbackToSavePoint() = 0; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/include/rocksdb/write_buffer_manager.h b/external/rocksdb/include/rocksdb/write_buffer_manager.h new file mode 100755 index 0000000000..7b4f095f9e --- /dev/null +++ b/external/rocksdb/include/rocksdb/write_buffer_manager.h @@ -0,0 +1,62 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// WriteBufferManager is for managing memory allocation for one or more +// MemTables. + +#pragma once + +#include +#include + +namespace rocksdb { + +class WriteBufferManager { + public: + // _buffer_size = 0 indicates no limit. Memory won't be tracked, + // memory_usage() won't be valid and ShouldFlush() will always return true. + explicit WriteBufferManager(size_t _buffer_size) + : buffer_size_(_buffer_size), memory_used_(0) {} + + ~WriteBufferManager() {} + + bool enabled() const { return buffer_size_ != 0; } + + // Only valid if enabled() + size_t memory_usage() const { + return memory_used_.load(std::memory_order_relaxed); + } + size_t buffer_size() const { return buffer_size_; } + + // Should only be called from write thread + bool ShouldFlush() const { + return enabled() && memory_usage() >= buffer_size(); + } + + // Should only be called from write thread + void ReserveMem(size_t mem) { + if (enabled()) { + memory_used_.fetch_add(mem, std::memory_order_relaxed); + } + } + void FreeMem(size_t mem) { + if (enabled()) { + memory_used_.fetch_sub(mem, std::memory_order_relaxed); + } + } + + private: + const size_t buffer_size_; + std::atomic memory_used_; + + // No copying allowed + WriteBufferManager(const WriteBufferManager&) = delete; + WriteBufferManager& operator=(const WriteBufferManager&) = delete; +}; +} // namespace rocksdb diff --git a/external/rocksdb/java/CMakeLists.txt b/external/rocksdb/java/CMakeLists.txt new file mode 100755 index 0000000000..5f2c942b45 --- /dev/null +++ b/external/rocksdb/java/CMakeLists.txt @@ -0,0 +1,165 @@ +cmake_minimum_required(VERSION 2.6) + +set(JNI_NATIVE_SOURCES + rocksjni/backupenginejni.cc + rocksjni/backupablejni.cc + rocksjni/checkpoint.cc + rocksjni/columnfamilyhandle.cc + rocksjni/compaction_filter.cc + rocksjni/comparator.cc + rocksjni/comparatorjnicallback.cc + rocksjni/env.cc + rocksjni/filter.cc + rocksjni/iterator.cc + rocksjni/loggerjnicallback.cc + rocksjni/memtablejni.cc + rocksjni/merge_operator.cc + rocksjni/options.cc + rocksjni/ratelimiterjni.cc + rocksjni/remove_emptyvalue_compactionfilterjni.cc + rocksjni/restorejni.cc + rocksjni/rocksjni.cc + rocksjni/slice.cc + rocksjni/snapshot.cc + rocksjni/statistics.cc + rocksjni/table.cc + rocksjni/transaction_log.cc + rocksjni/ttl.cc + rocksjni/write_batch.cc + rocksjni/writebatchhandlerjnicallback.cc + rocksjni/write_batch_with_index.cc + rocksjni/write_batch_test.cc +) + +set(NATIVE_JAVA_CLASSES + org.rocksdb.AbstractCompactionFilter + org.rocksdb.AbstractComparator + org.rocksdb.AbstractSlice + org.rocksdb.BackupEngine + org.rocksdb.BackupableDB + org.rocksdb.BackupableDBOptions + org.rocksdb.BlockBasedTableConfig + org.rocksdb.BloomFilter + org.rocksdb.Checkpoint + org.rocksdb.ColumnFamilyHandle + org.rocksdb.ColumnFamilyOptions + org.rocksdb.Comparator + org.rocksdb.ComparatorOptions + org.rocksdb.DBOptions + org.rocksdb.DirectComparator + org.rocksdb.DirectSlice + org.rocksdb.Env + org.rocksdb.FlushOptions + org.rocksdb.Filter + org.rocksdb.GenericRateLimiterConfig + org.rocksdb.HashLinkedListMemTableConfig + org.rocksdb.HashSkipListMemTableConfig + org.rocksdb.Logger + org.rocksdb.MergeOperator + org.rocksdb.Options + org.rocksdb.PlainTableConfig + org.rocksdb.ReadOptions + org.rocksdb.RemoveEmptyValueCompactionFilter + org.rocksdb.RestoreBackupableDB + org.rocksdb.RestoreOptions + org.rocksdb.RocksDB + org.rocksdb.RocksEnv + org.rocksdb.RocksIterator + org.rocksdb.RocksMemEnv + org.rocksdb.SkipListMemTableConfig + org.rocksdb.Slice + org.rocksdb.Statistics + org.rocksdb.TransactionLogIterator + org.rocksdb.TtlDB + org.rocksdb.VectorMemTableConfig + org.rocksdb.Snapshot + org.rocksdb.StringAppendOperator + org.rocksdb.WriteBatch + org.rocksdb.WriteBatch.Handler + org.rocksdb.WriteOptions + org.rocksdb.WriteBatchWithIndex + org.rocksdb.WBWIRocksIterator + org.rocksdb.WriteBatchTest + org.rocksdb.WriteBatchTestInternalHelper +) + +include_directories($ENV{JAVA_HOME}/include) +include_directories($ENV{JAVA_HOME}/include/win32) +include_directories(${PROJECT_SOURCE_DIR}/java) + +set(JAVA_TEST_LIBDIR ${PROJECT_SOURCE_DIR}/java/test-libs) +set(JAVA_TMP_JAR ${JAVA_TEST_LIBDIR}/tmp.jar) +set(JAVA_JUNIT_JAR ${JAVA_TEST_LIBDIR}/junit-4.12.jar) +set(JAVA_HAMCR_JAR ${JAVA_TEST_LIBDIR}/hamcrest-core-1.3.jar) +set(JAVA_MOCKITO_JAR ${JAVA_TEST_LIBDIR}/mockito-all-1.10.19.jar) +set(JAVA_CGLIB_JAR ${JAVA_TEST_LIBDIR}/cglib-2.2.2.jar) +set(JAVA_ASSERTJ_JAR ${JAVA_TEST_LIBDIR}/assertj-core-1.7.1.jar) +set(JAVA_TESTCLASSPATH "${JAVA_JUNIT_JAR}\;${JAVA_HAMCR_JAR}\;${JAVA_MOCKITO_JAR}\;${JAVA_CGLIB_JAR}\;${JAVA_ASSERTJ_JAR}") + +if(NOT EXISTS ${PROJECT_SOURCE_DIR}/java/classes) + execute_process(COMMAND mkdir ${PROJECT_SOURCE_DIR}/java/classes) +endif() + +if(NOT EXISTS ${JAVA_TEST_LIBDIR}) + execute_process(COMMAND mkdir ${JAVA_TEST_LIBDIR}) +endif() + +if(NOT EXISTS ${JAVA_JUNIT_JAR}) + message("Downloading ${JAVA_JUNIT_JAR}") + file(DOWNLOAD http://search.maven.org/remotecontent?filepath=junit/junit/4.12/junit-4.12.jar ${JAVA_TMP_JAR} STATUS downloadStatus) + list(GET downloadStatus 0 error_code) + if(NOT error_code EQUAL 0) + message(FATAL_ERROR "Failed downloading ${JAVA_JUNIT_JAR}") + endif() + file(RENAME ${JAVA_TMP_JAR} ${JAVA_JUNIT_JAR}) +endif() +if(NOT EXISTS ${JAVA_HAMCR_JAR}) + message("Downloading ${JAVA_HAMCR_JAR}") + file(DOWNLOAD http://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar ${JAVA_TMP_JAR} STATUS downloadStatus) + list(GET downloadStatus 0 error_code) + if(NOT error_code EQUAL 0) + message(FATAL_ERROR "Failed downloading ${JAVA_HAMCR_JAR}") + endif() + file(RENAME ${JAVA_TMP_JAR} ${JAVA_HAMCR_JAR}) +endif() +if(NOT EXISTS ${JAVA_MOCKITO_JAR}) + message("Downloading ${JAVA_MOCKITO_JAR}") + file(DOWNLOAD http://search.maven.org/remotecontent?filepath=org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar ${JAVA_TMP_JAR} STATUS downloadStatus) + list(GET downloadStatus 0 error_code) + if(NOT error_code EQUAL 0) + message(FATAL_ERROR "Failed downloading ${JAVA_MOCKITO_JAR}") + endif() + file(RENAME ${JAVA_TMP_JAR} ${JAVA_MOCKITO_JAR}) +endif() +if(NOT EXISTS ${JAVA_CGLIB_JAR}) + message("Downloading ${JAVA_CGLIB_JAR}") + file(DOWNLOAD http://search.maven.org/remotecontent?filepath=cglib/cglib/2.2.2/cglib-2.2.2.jar ${JAVA_TMP_JAR} STATUS downloadStatus) + list(GET downloadStatus 0 error_code) + if(NOT error_code EQUAL 0) + message(FATAL_ERROR "Failed downloading ${JAVA_CGLIB_JAR}") + endif() + file(RENAME ${JAVA_TMP_JAR} ${JAVA_CGLIB_JAR}) +endif() +if(NOT EXISTS ${JAVA_ASSERTJ_JAR}) + message("Downloading ${JAVA_ASSERTJ_JAR}") + file(DOWNLOAD http://central.maven.org/maven2/org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar ${JAVA_TMP_JAR} STATUS downloadStatus) + list(GET downloadStatus 0 error_code) + if(NOT error_code EQUAL 0) + message(FATAL_ERROR "Failed downloading ${JAVA_ASSERTJ_JAR}") + endif() + file(RENAME ${JAVA_TMP_JAR} ${JAVA_ASSERTJ_JAR}) +endif() + +if(WIN32) + set(JAVAC cmd /c javac) + set(JAVAH cmd /c javah) +else() + set(JAVAC javac) + set(JAVAH javah) +endif() + +execute_process(COMMAND ${JAVAC} -cp ${JAVA_TESTCLASSPATH} -d ${PROJECT_SOURCE_DIR}/java/classes ${PROJECT_SOURCE_DIR}/java/src/main/java/org/rocksdb/util/*.java ${PROJECT_SOURCE_DIR}/java/src/main/java/org/rocksdb/*.java ${PROJECT_SOURCE_DIR}/java/src/test/java/org/rocksdb/*.java) +execute_process(COMMAND ${JAVAH} -cp ${PROJECT_SOURCE_DIR}/java/classes -d ${PROJECT_SOURCE_DIR}/java/include -jni ${NATIVE_JAVA_CLASSES}) +add_library(rocksdbjni${ARTIFACT_SUFFIX} SHARED ${JNI_NATIVE_SOURCES}) +set_target_properties(rocksdbjni${ARTIFACT_SUFFIX} PROPERTIES COMPILE_FLAGS "/Fd${CMAKE_CFG_INTDIR}/rocksdbjni${ARTIFACT_SUFFIX}.pdb") +target_link_libraries(rocksdbjni${ARTIFACT_SUFFIX} rocksdblib${ARTIFACT_SUFFIX} ${LIBS}) diff --git a/external/rocksdb/java/HISTORY-JAVA.md b/external/rocksdb/java/HISTORY-JAVA.md new file mode 100755 index 0000000000..731886a610 --- /dev/null +++ b/external/rocksdb/java/HISTORY-JAVA.md @@ -0,0 +1,86 @@ +# RocksJava Change Log + +## 3.13 (8/4/2015) +### New Features +* Exposed BackupEngine API. +* Added CappedPrefixExtractor support. To use such extractor, simply call useCappedPrefixExtractor in either Options or ColumnFamilyOptions. +* Added RemoveEmptyValueCompactionFilter. + +## 3.10.0 (3/24/2015) +### New Features +* Added compression per level API. +* MemEnv is now available in RocksJava via RocksMemEnv class. +* lz4 compression is now included in rocksjava static library when running `make rocksdbjavastatic`. + +### Public API Changes +* Overflowing a size_t when setting rocksdb options now throws an IllegalArgumentException, which removes the necessity for a developer to catch these Exceptions explicitly. +* The set and get functions for tableCacheRemoveScanCountLimit are deprecated. + + +## By 01/31/2015 +### New Features +* WriteBatchWithIndex support. +* Iterator support for WriteBatch and WriteBatchWithIndex +* GetUpdatesSince support. +* Snapshots carry now information about the related sequence number. +* TTL DB support. + +## By 11/14/2014 +### New Features +* Full support for Column Family. +* Slice and Comparator support. +* Default merge operator support. +* RateLimiter support. + +## By 06/15/2014 +### New Features +* Added basic Java binding for rocksdb::Env such that multiple RocksDB can share the same thread pool and environment. +* Added RestoreBackupableDB + +## By 05/30/2014 +### Internal Framework Improvement +* Added disOwnNativeHandle to RocksObject, which allows a RocksObject to give-up the ownership of its native handle. This method is useful when sharing and transferring the ownership of RocksDB C++ resources. + +## By 05/15/2014 +### New Features +* Added RocksObject --- the base class of all RocksDB classes which holds some RocksDB resources in the C++ side. +* Use environmental variable JAVA_HOME in Makefile for RocksJava +### Public API changes +* Renamed org.rocksdb.Iterator to org.rocksdb.RocksIterator to avoid potential confliction with Java built-in Iterator. + +## By 04/30/2014 +### New Features +* Added Java binding for MultiGet. +* Added static method RocksDB.loadLibrary(), which loads necessary library files. +* Added Java bindings for 60+ rocksdb::Options. +* Added Java binding for BloomFilter. +* Added Java binding for ReadOptions. +* Added Java binding for memtables. +* Added Java binding for sst formats. +* Added Java binding for RocksDB Iterator which enables sequential scan operation. +* Added Java binding for Statistics +* Added Java binding for BackupableDB. + +### DB Benchmark +* Added filluniquerandom, readseq benchmark. +* 70+ command-line options. +* Enabled BloomFilter configuration. + +## By 04/15/2014 +### New Features +* Added Java binding for WriteOptions. +* Added Java binding for WriteBatch, which enables batch-write. +* Added Java binding for rocksdb::Options. +* Added Java binding for block cache. +* Added Java version DB Benchmark. + +### DB Benchmark +* Added readwhilewriting benchmark. + +### Internal Framework Improvement +* Avoid a potential byte-array-copy between c++ and Java in RocksDB.get. +* Added SizeUnit in org.rocksdb.util to store consts like KB and GB. + +### 03/28/2014 +* RocksJava project started. +* Added Java binding for RocksDB, which supports Open, Close, Get and Put. diff --git a/external/rocksdb/java/Makefile b/external/rocksdb/java/Makefile new file mode 100755 index 0000000000..dcfcc0d309 --- /dev/null +++ b/external/rocksdb/java/Makefile @@ -0,0 +1,191 @@ +NATIVE_JAVA_CLASSES = org.rocksdb.AbstractCompactionFilter\ + org.rocksdb.AbstractComparator\ + org.rocksdb.AbstractSlice\ + org.rocksdb.BackupEngine\ + org.rocksdb.BackupableDBOptions\ + org.rocksdb.BlockBasedTableConfig\ + org.rocksdb.BloomFilter\ + org.rocksdb.Checkpoint\ + org.rocksdb.ColumnFamilyHandle\ + org.rocksdb.ColumnFamilyOptions\ + org.rocksdb.Comparator\ + org.rocksdb.ComparatorOptions\ + org.rocksdb.DBOptions\ + org.rocksdb.DirectComparator\ + org.rocksdb.DirectSlice\ + org.rocksdb.Env\ + org.rocksdb.FlushOptions\ + org.rocksdb.Filter\ + org.rocksdb.GenericRateLimiterConfig\ + org.rocksdb.HashLinkedListMemTableConfig\ + org.rocksdb.HashSkipListMemTableConfig\ + org.rocksdb.Logger\ + org.rocksdb.MergeOperator\ + org.rocksdb.Options\ + org.rocksdb.PlainTableConfig\ + org.rocksdb.ReadOptions\ + org.rocksdb.RemoveEmptyValueCompactionFilter\ + org.rocksdb.RestoreOptions\ + org.rocksdb.RocksDB\ + org.rocksdb.RocksEnv\ + org.rocksdb.RocksIterator\ + org.rocksdb.RocksMemEnv\ + org.rocksdb.SkipListMemTableConfig\ + org.rocksdb.Slice\ + org.rocksdb.Statistics\ + org.rocksdb.TransactionLogIterator\ + org.rocksdb.TtlDB\ + org.rocksdb.VectorMemTableConfig\ + org.rocksdb.Snapshot\ + org.rocksdb.StringAppendOperator\ + org.rocksdb.WriteBatch\ + org.rocksdb.WriteBatch.Handler\ + org.rocksdb.WriteOptions\ + org.rocksdb.WriteBatchWithIndex\ + org.rocksdb.WBWIRocksIterator + +NATIVE_JAVA_TEST_CLASSES = org.rocksdb.WriteBatchTest\ + org.rocksdb.WriteBatchTestInternalHelper + +ROCKSDB_MAJOR = $(shell egrep "ROCKSDB_MAJOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3) +ROCKSDB_MINOR = $(shell egrep "ROCKSDB_MINOR.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3) +ROCKSDB_PATCH = $(shell egrep "ROCKSDB_PATCH.[0-9]" ../include/rocksdb/version.h | cut -d ' ' -f 3) + +NATIVE_INCLUDE = ./include +ARCH := $(shell getconf LONG_BIT) +ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-linux$(ARCH).jar +ifeq ($(PLATFORM), OS_MACOSX) +ROCKSDB_JAR = rocksdbjni-$(ROCKSDB_MAJOR).$(ROCKSDB_MINOR).$(ROCKSDB_PATCH)-osx.jar +endif + +JAVA_TESTS = org.rocksdb.BackupableDBOptionsTest\ + org.rocksdb.BackupEngineTest\ + org.rocksdb.BlockBasedTableConfigTest\ + org.rocksdb.util.BytewiseComparatorTest\ + org.rocksdb.CheckPointTest\ + org.rocksdb.ColumnFamilyOptionsTest\ + org.rocksdb.ColumnFamilyTest\ + org.rocksdb.ComparatorOptionsTest\ + org.rocksdb.ComparatorTest\ + org.rocksdb.CompressionOptionsTest\ + org.rocksdb.DBOptionsTest\ + org.rocksdb.DirectComparatorTest\ + org.rocksdb.DirectSliceTest\ + org.rocksdb.util.EnvironmentTest\ + org.rocksdb.FilterTest\ + org.rocksdb.FlushTest\ + org.rocksdb.InfoLogLevelTest\ + org.rocksdb.KeyMayExistTest\ + org.rocksdb.LoggerTest\ + org.rocksdb.MemTableTest\ + org.rocksdb.MergeTest\ + org.rocksdb.MixedOptionsTest\ + org.rocksdb.NativeLibraryLoaderTest\ + org.rocksdb.OptionsTest\ + org.rocksdb.PlainTableConfigTest\ + org.rocksdb.ReadOnlyTest\ + org.rocksdb.ReadOptionsTest\ + org.rocksdb.RocksDBTest\ + org.rocksdb.RocksEnvTest\ + org.rocksdb.RocksIteratorTest\ + org.rocksdb.RocksMemEnvTest\ + org.rocksdb.util.SizeUnitTest\ + org.rocksdb.SliceTest\ + org.rocksdb.SnapshotTest\ + org.rocksdb.TransactionLogIteratorTest\ + org.rocksdb.TtlDBTest\ + org.rocksdb.StatisticsCollectorTest\ + org.rocksdb.WriteBatchHandlerTest\ + org.rocksdb.WriteBatchTest\ + org.rocksdb.WriteBatchThreadedTest\ + org.rocksdb.WriteOptionsTest\ + org.rocksdb.WriteBatchWithIndexTest + +MAIN_SRC = src/main/java +TEST_SRC = src/test/java +OUTPUT = target +MAIN_CLASSES = $(OUTPUT)/classes +TEST_CLASSES = $(OUTPUT)/test-classes +JAVADOC = $(OUTPUT)/apidocs + +BENCHMARK_MAIN_SRC = benchmark/src/main/java +BENCHMARK_OUTPUT = benchmark/target +BENCHMARK_MAIN_CLASSES = $(BENCHMARK_OUTPUT)/classes + +SAMPLES_MAIN_SRC = samples/src/main/java +SAMPLES_OUTPUT = samples/target +SAMPLES_MAIN_CLASSES = $(SAMPLES_OUTPUT)/classes + +JAVA_TEST_LIBDIR = test-libs +JAVA_JUNIT_JAR = $(JAVA_TEST_LIBDIR)/junit-4.12.jar +JAVA_HAMCR_JAR = $(JAVA_TEST_LIBDIR)/hamcrest-core-1.3.jar +JAVA_MOCKITO_JAR = $(JAVA_TEST_LIBDIR)/mockito-all-1.10.19.jar +JAVA_CGLIB_JAR = $(JAVA_TEST_LIBDIR)/cglib-2.2.2.jar +JAVA_ASSERTJ_JAR = $(JAVA_TEST_LIBDIR)/assertj-core-1.7.1.jar +JAVA_TESTCLASSPATH = $(JAVA_JUNIT_JAR):$(JAVA_HAMCR_JAR):$(JAVA_MOCKITO_JAR):$(JAVA_CGLIB_JAR):$(JAVA_ASSERTJ_JAR) + +MVN_LOCAL = ~/.m2/repository + +clean: + $(AM_V_at)rm -rf include/* + $(AM_V_at)rm -rf test-libs/ + $(AM_V_at)rm -rf $(OUTPUT) + $(AM_V_at)rm -rf $(BENCHMARK_OUTPUT) + $(AM_V_at)rm -rf $(SAMPLES_OUTPUT) + + +javadocs: + $(AM_V_GEN)mkdir -p $(JAVADOC) + $(AM_V_at)javadoc -d $(JAVADOC) -sourcepath $(MAIN_SRC) -subpackages org + +javalib: java java_test javadocs + +java: + $(AM_V_GEN)mkdir -p $(MAIN_CLASSES) + $(AM_V_at)javac -d $(MAIN_CLASSES)\ + $(MAIN_SRC)/org/rocksdb/util/*.java\ + $(MAIN_SRC)/org/rocksdb/*.java + $(AM_V_at)@cp ../HISTORY.md ./HISTORY-CPP.md + $(AM_V_at)@rm -f ./HISTORY-CPP.md + $(AM_V_at)javah -cp $(MAIN_CLASSES) -d $(NATIVE_INCLUDE) -jni $(NATIVE_JAVA_CLASSES) + +sample: java + $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES) + $(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBSample.java + $(AM_V_at)@rm -rf /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found + java -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBSample /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni_not_found + +column_family_sample: java + $(AM_V_GEN)mkdir -p $(SAMPLES_MAIN_CLASSES) + $(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(SAMPLES_MAIN_CLASSES) $(SAMPLES_MAIN_SRC)/RocksDBColumnFamilySample.java + $(AM_V_at)@rm -rf /tmp/rocksdbjni + java -ea -Xcheck:jni -Djava.library.path=target -cp $(MAIN_CLASSES):$(SAMPLES_MAIN_CLASSES) RocksDBColumnFamilySample /tmp/rocksdbjni + $(AM_V_at)@rm -rf /tmp/rocksdbjni + +resolve_test_deps: + test -s "$(JAVA_TEST_LIBDIR)" || mkdir -p "$(JAVA_TEST_LIBDIR)" + test -s "$(JAVA_JUNIT_JAR)" || cp $(MVN_LOCAL)/junit/junit/4.12/junit-4.12.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o $(JAVA_JUNIT_JAR) http://search.maven.org/remotecontent?filepath=junit/junit/4.12/junit-4.12.jar + test -s "$(JAVA_HAMCR_JAR)" || cp $(MVN_LOCAL)/org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o $(JAVA_HAMCR_JAR) http://search.maven.org/remotecontent?filepath=org/hamcrest/hamcrest-core/1.3/hamcrest-core-1.3.jar + test -s "$(JAVA_MOCKITO_JAR)" || cp $(MVN_LOCAL)/org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o "$(JAVA_MOCKITO_JAR)" http://search.maven.org/remotecontent?filepath=org/mockito/mockito-all/1.10.19/mockito-all-1.10.19.jar + test -s "$(JAVA_CGLIB_JAR)" || cp $(MVN_LOCAL)/cglib/cglib/2.2.2/cglib-2.2.2.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o "$(JAVA_CGLIB_JAR)" http://search.maven.org/remotecontent?filepath=cglib/cglib/2.2.2/cglib-2.2.2.jar + test -s "$(JAVA_ASSERTJ_JAR)" || cp $(MVN_LOCAL)/org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar $(JAVA_TEST_LIBDIR) || curl -k -L -o "$(JAVA_ASSERTJ_JAR)" http://central.maven.org/maven2/org/assertj/assertj-core/1.7.1/assertj-core-1.7.1.jar + +java_test: resolve_test_deps + $(AM_V_GEN)mkdir -p $(TEST_CLASSES) + $(AM_V_at)javac -cp $(MAIN_CLASSES):$(JAVA_TESTCLASSPATH) -d $(TEST_CLASSES)\ + $(TEST_SRC)/org/rocksdb/test/*.java\ + $(TEST_SRC)/org/rocksdb/util/*.java\ + $(TEST_SRC)/org/rocksdb/*.java + $(AM_V_at)javah -cp $(MAIN_CLASSES):$(TEST_CLASSES) -d $(NATIVE_INCLUDE) -jni $(NATIVE_JAVA_TEST_CLASSES) + +test: java resolve_test_deps java_test run_test + +run_test: + java -ea -Xcheck:jni -Djava.library.path=target -cp "$(MAIN_CLASSES):$(TEST_CLASSES):$(JAVA_TESTCLASSPATH):target/*" org.rocksdb.test.RocksJunitRunner $(JAVA_TESTS) + +db_bench: java + $(AM_V_GEN)mkdir -p $(BENCHMARK_MAIN_CLASSES) + $(AM_V_at)javac -cp $(MAIN_CLASSES) -d $(BENCHMARK_MAIN_CLASSES) $(BENCHMARK_MAIN_SRC)/org/rocksdb/benchmark/*.java diff --git a/external/rocksdb/java/RELEASE.md b/external/rocksdb/java/RELEASE.md new file mode 100755 index 0000000000..cb9aaf987b --- /dev/null +++ b/external/rocksdb/java/RELEASE.md @@ -0,0 +1,54 @@ +## Cross-building + +RocksDB can be built as a single self contained cross-platform JAR. The cross-platform jar can be usd on any 64-bit OSX system, 32-bit Linux system, or 64-bit Linux system. + +Building a cross-platform JAR requires: + + * [Vagrant](https://www.vagrantup.com/) + * [Virtualbox](https://www.virtualbox.org/) + * A Mac OSX machine that can compile RocksDB. + * Java 7 set as JAVA_HOME. + +Once you have these items, run this make command from RocksDB's root source directory: + + make jclean clean rocksdbjavastaticrelease + +This command will build RocksDB natively on OSX, and will then spin up two Vagrant Virtualbox Ubuntu images to build RocksDB for both 32-bit and 64-bit Linux. + +You can find all native binaries and JARs in the java/target directory upon completion: + + librocksdbjni-linux32.so + librocksdbjni-linux64.so + librocksdbjni-osx.jnilib + rocksdbjni-3.5.0-javadoc.jar + rocksdbjni-3.5.0-linux32.jar + rocksdbjni-3.5.0-linux64.jar + rocksdbjni-3.5.0-osx.jar + rocksdbjni-3.5.0-sources.jar + rocksdbjni-3.5.0.jar + +## Maven publication + +Set ~/.m2/settings.xml to contain: + + + + + sonatype-nexus-staging + your-sonatype-jira-username + your-sonatype-jira-password + + + + +From RocksDB's root directory, first build the Java static JARs: + + make jclean clean rocksdbjavastaticpublish + +This command will [stage the JAR artifacts on the Sonatype staging repository](http://central.sonatype.org/pages/manual-staging-bundle-creation-and-deployment.html). To release the staged artifacts. + +1. Go to [https://oss.sonatype.org/#stagingRepositories](https://oss.sonatype.org/#stagingRepositories) and search for "rocksdb" in the upper right hand search box. +2. Select the rocksdb staging repository, and inspect its contents. +3. If all is well, follow [these steps](https://oss.sonatype.org/#stagingRepositories) to close the repository and release it. + +After the release has occurred, the artifacts will be synced to Maven central within 24-48 hours. diff --git a/external/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java b/external/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java new file mode 100755 index 0000000000..1f805b1e20 --- /dev/null +++ b/external/rocksdb/java/benchmark/src/main/java/org/rocksdb/benchmark/DbBenchmark.java @@ -0,0 +1,1668 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +/** + * Copyright (C) 2011 the original author or authors. + * See the notice.md file distributed with this work for additional + * information regarding copyright ownership. + * + * Licensed under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. + * You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.rocksdb.benchmark; + +import java.io.IOException; +import java.lang.Runnable; +import java.lang.Math; +import java.io.File; +import java.lang.reflect.Constructor; +import java.lang.reflect.InvocationTargetException; +import java.nio.ByteBuffer; +import java.nio.file.Files; +import java.util.Collection; +import java.util.Date; +import java.util.EnumMap; +import java.util.List; +import java.util.Map; +import java.util.Random; +import java.util.concurrent.TimeUnit; +import java.util.Arrays; +import java.util.ArrayList; +import java.util.concurrent.Callable; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.Future; +import java.util.concurrent.TimeUnit; +import org.rocksdb.*; +import org.rocksdb.RocksMemEnv; +import org.rocksdb.util.SizeUnit; + +class Stats { + int id_; + long start_; + long finish_; + double seconds_; + long done_; + long found_; + long lastOpTime_; + long nextReport_; + long bytes_; + StringBuilder message_; + boolean excludeFromMerge_; + + // TODO(yhchiang): use the following arguments: + // (Long)Flag.stats_interval + // (Integer)Flag.stats_per_interval + + Stats(int id) { + id_ = id; + nextReport_ = 100; + done_ = 0; + bytes_ = 0; + seconds_ = 0; + start_ = System.nanoTime(); + lastOpTime_ = start_; + finish_ = start_; + found_ = 0; + message_ = new StringBuilder(""); + excludeFromMerge_ = false; + } + + void merge(final Stats other) { + if (other.excludeFromMerge_) { + return; + } + + done_ += other.done_; + found_ += other.found_; + bytes_ += other.bytes_; + seconds_ += other.seconds_; + if (other.start_ < start_) start_ = other.start_; + if (other.finish_ > finish_) finish_ = other.finish_; + + // Just keep the messages from one thread + if (message_.length() == 0) { + message_ = other.message_; + } + } + + void stop() { + finish_ = System.nanoTime(); + seconds_ = (double) (finish_ - start_) * 1e-9; + } + + void addMessage(String msg) { + if (message_.length() > 0) { + message_.append(" "); + } + message_.append(msg); + } + + void setId(int id) { id_ = id; } + void setExcludeFromMerge() { excludeFromMerge_ = true; } + + void finishedSingleOp(int bytes) { + done_++; + lastOpTime_ = System.nanoTime(); + bytes_ += bytes; + if (done_ >= nextReport_) { + if (nextReport_ < 1000) { + nextReport_ += 100; + } else if (nextReport_ < 5000) { + nextReport_ += 500; + } else if (nextReport_ < 10000) { + nextReport_ += 1000; + } else if (nextReport_ < 50000) { + nextReport_ += 5000; + } else if (nextReport_ < 100000) { + nextReport_ += 10000; + } else if (nextReport_ < 500000) { + nextReport_ += 50000; + } else { + nextReport_ += 100000; + } + System.err.printf("... Task %s finished %d ops%30s\r", id_, done_, ""); + } + } + + void report(String name) { + // Pretend at least one op was done in case we are running a benchmark + // that does not call FinishedSingleOp(). + if (done_ < 1) done_ = 1; + + StringBuilder extra = new StringBuilder(""); + if (bytes_ > 0) { + // Rate is computed on actual elapsed time, not the sum of per-thread + // elapsed times. + double elapsed = (finish_ - start_) * 1e-9; + extra.append(String.format("%6.1f MB/s", (bytes_ / 1048576.0) / elapsed)); + } + extra.append(message_.toString()); + double elapsed = (finish_ - start_); + double throughput = (double) done_ / (elapsed * 1e-9); + + System.out.format("%-12s : %11.3f micros/op %d ops/sec;%s%s\n", + name, (elapsed * 1e-6) / done_, + (long) throughput, (extra.length() == 0 ? "" : " "), extra.toString()); + } +} + +public class DbBenchmark { + enum Order { + SEQUENTIAL, + RANDOM + } + + enum DBState { + FRESH, + EXISTING + } + + static { + RocksDB.loadLibrary(); + } + + abstract class BenchmarkTask implements Callable { + // TODO(yhchiang): use (Integer)Flag.perf_level. + public BenchmarkTask( + int tid, long randSeed, long numEntries, long keyRange) { + tid_ = tid; + rand_ = new Random(randSeed + tid * 1000); + numEntries_ = numEntries; + keyRange_ = keyRange; + stats_ = new Stats(tid); + } + + @Override public Stats call() throws RocksDBException { + stats_.start_ = System.nanoTime(); + runTask(); + stats_.finish_ = System.nanoTime(); + return stats_; + } + + abstract protected void runTask() throws RocksDBException; + + protected int tid_; + protected Random rand_; + protected long numEntries_; + protected long keyRange_; + protected Stats stats_; + + protected void getFixedKey(byte[] key, long sn) { + generateKeyFromLong(key, sn); + } + + protected void getRandomKey(byte[] key, long range) { + generateKeyFromLong(key, Math.abs(rand_.nextLong() % range)); + } + } + + abstract class WriteTask extends BenchmarkTask { + public WriteTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch) { + super(tid, randSeed, numEntries, keyRange); + writeOpt_ = writeOpt; + entriesPerBatch_ = entriesPerBatch; + maxWritesPerSecond_ = -1; + } + + public WriteTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch, long maxWritesPerSecond) { + super(tid, randSeed, numEntries, keyRange); + writeOpt_ = writeOpt; + entriesPerBatch_ = entriesPerBatch; + maxWritesPerSecond_ = maxWritesPerSecond; + } + + @Override public void runTask() throws RocksDBException { + if (numEntries_ != DbBenchmark.this.num_) { + stats_.message_.append(String.format(" (%d ops)", numEntries_)); + } + byte[] key = new byte[keySize_]; + byte[] value = new byte[valueSize_]; + + try { + if (entriesPerBatch_ == 1) { + for (long i = 0; i < numEntries_; ++i) { + getKey(key, i, keyRange_); + DbBenchmark.this.gen_.generate(value); + db_.put(writeOpt_, key, value); + stats_.finishedSingleOp(keySize_ + valueSize_); + writeRateControl(i); + if (isFinished()) { + return; + } + } + } else { + for (long i = 0; i < numEntries_; i += entriesPerBatch_) { + WriteBatch batch = new WriteBatch(); + for (long j = 0; j < entriesPerBatch_; j++) { + getKey(key, i + j, keyRange_); + DbBenchmark.this.gen_.generate(value); + batch.put(key, value); + stats_.finishedSingleOp(keySize_ + valueSize_); + } + db_.write(writeOpt_, batch); + batch.dispose(); + writeRateControl(i); + if (isFinished()) { + return; + } + } + } + } catch (InterruptedException e) { + // thread has been terminated. + } + } + + protected void writeRateControl(long writeCount) + throws InterruptedException { + if (maxWritesPerSecond_ <= 0) return; + long minInterval = + writeCount * TimeUnit.SECONDS.toNanos(1) / maxWritesPerSecond_; + long interval = System.nanoTime() - stats_.start_; + if (minInterval - interval > TimeUnit.MILLISECONDS.toNanos(1)) { + TimeUnit.NANOSECONDS.sleep(minInterval - interval); + } + } + + abstract protected void getKey(byte[] key, long id, long range); + protected WriteOptions writeOpt_; + protected long entriesPerBatch_; + protected long maxWritesPerSecond_; + } + + class WriteSequentialTask extends WriteTask { + public WriteSequentialTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch); + } + public WriteSequentialTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch, + long maxWritesPerSecond) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch, + maxWritesPerSecond); + } + @Override protected void getKey(byte[] key, long id, long range) { + getFixedKey(key, id); + } + } + + class WriteRandomTask extends WriteTask { + public WriteRandomTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch); + } + public WriteRandomTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch, + long maxWritesPerSecond) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch, + maxWritesPerSecond); + } + @Override protected void getKey(byte[] key, long id, long range) { + getRandomKey(key, range); + } + } + + class WriteUniqueRandomTask extends WriteTask { + static final int MAX_BUFFER_SIZE = 10000000; + public WriteUniqueRandomTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch); + initRandomKeySequence(); + } + public WriteUniqueRandomTask( + int tid, long randSeed, long numEntries, long keyRange, + WriteOptions writeOpt, long entriesPerBatch, + long maxWritesPerSecond) { + super(tid, randSeed, numEntries, keyRange, + writeOpt, entriesPerBatch, + maxWritesPerSecond); + initRandomKeySequence(); + } + @Override protected void getKey(byte[] key, long id, long range) { + generateKeyFromLong(key, nextUniqueRandom()); + } + + protected void initRandomKeySequence() { + bufferSize_ = MAX_BUFFER_SIZE; + if (bufferSize_ > keyRange_) { + bufferSize_ = (int) keyRange_; + } + currentKeyCount_ = bufferSize_; + keyBuffer_ = new long[MAX_BUFFER_SIZE]; + for (int k = 0; k < bufferSize_; ++k) { + keyBuffer_[k] = k; + } + } + + /** + * Semi-randomly return the next unique key. It is guaranteed to be + * fully random if keyRange_ <= MAX_BUFFER_SIZE. + */ + long nextUniqueRandom() { + if (bufferSize_ == 0) { + System.err.println("bufferSize_ == 0."); + return 0; + } + int r = rand_.nextInt(bufferSize_); + // randomly pick one from the keyBuffer + long randKey = keyBuffer_[r]; + if (currentKeyCount_ < keyRange_) { + // if we have not yet inserted all keys, insert next new key to [r]. + keyBuffer_[r] = currentKeyCount_++; + } else { + // move the last element to [r] and decrease the size by 1. + keyBuffer_[r] = keyBuffer_[--bufferSize_]; + } + return randKey; + } + + int bufferSize_; + long currentKeyCount_; + long[] keyBuffer_; + } + + class ReadRandomTask extends BenchmarkTask { + public ReadRandomTask( + int tid, long randSeed, long numEntries, long keyRange) { + super(tid, randSeed, numEntries, keyRange); + } + @Override public void runTask() throws RocksDBException { + byte[] key = new byte[keySize_]; + byte[] value = new byte[valueSize_]; + for (long i = 0; i < numEntries_; i++) { + getRandomKey(key, keyRange_); + int len = db_.get(key, value); + if (len != RocksDB.NOT_FOUND) { + stats_.found_++; + stats_.finishedSingleOp(keySize_ + valueSize_); + } else { + stats_.finishedSingleOp(keySize_); + } + if (isFinished()) { + return; + } + } + } + } + + class ReadSequentialTask extends BenchmarkTask { + public ReadSequentialTask( + int tid, long randSeed, long numEntries, long keyRange) { + super(tid, randSeed, numEntries, keyRange); + } + @Override public void runTask() throws RocksDBException { + RocksIterator iter = db_.newIterator(); + long i; + for (iter.seekToFirst(), i = 0; + iter.isValid() && i < numEntries_; + iter.next(), ++i) { + stats_.found_++; + stats_.finishedSingleOp(iter.key().length + iter.value().length); + if (isFinished()) { + iter.dispose(); + return; + } + } + iter.dispose(); + } + } + + public DbBenchmark(Map flags) throws Exception { + benchmarks_ = (List) flags.get(Flag.benchmarks); + num_ = (Integer) flags.get(Flag.num); + threadNum_ = (Integer) flags.get(Flag.threads); + reads_ = (Integer) (flags.get(Flag.reads) == null ? + flags.get(Flag.num) : flags.get(Flag.reads)); + keySize_ = (Integer) flags.get(Flag.key_size); + valueSize_ = (Integer) flags.get(Flag.value_size); + compressionRatio_ = (Double) flags.get(Flag.compression_ratio); + useExisting_ = (Boolean) flags.get(Flag.use_existing_db); + randSeed_ = (Long) flags.get(Flag.seed); + databaseDir_ = (String) flags.get(Flag.db); + writesPerSeconds_ = (Integer) flags.get(Flag.writes_per_second); + memtable_ = (String) flags.get(Flag.memtablerep); + maxWriteBufferNumber_ = (Integer) flags.get(Flag.max_write_buffer_number); + prefixSize_ = (Integer) flags.get(Flag.prefix_size); + keysPerPrefix_ = (Integer) flags.get(Flag.keys_per_prefix); + hashBucketCount_ = (Long) flags.get(Flag.hash_bucket_count); + usePlainTable_ = (Boolean) flags.get(Flag.use_plain_table); + useMemenv_ = (Boolean) flags.get(Flag.use_mem_env); + flags_ = flags; + finishLock_ = new Object(); + // options.setPrefixSize((Integer)flags_.get(Flag.prefix_size)); + // options.setKeysPerPrefix((Long)flags_.get(Flag.keys_per_prefix)); + compressionType_ = (String) flags.get(Flag.compression_type); + compression_ = CompressionType.NO_COMPRESSION; + try { + if (compressionType_!=null) { + final CompressionType compressionType = + CompressionType.getCompressionType(compressionType_); + if (compressionType != null && + compressionType != CompressionType.NO_COMPRESSION) { + System.loadLibrary(compressionType.getLibraryName()); + } + + } + } catch (UnsatisfiedLinkError e) { + System.err.format("Unable to load %s library:%s%n" + + "No compression is used.%n", + compressionType_, e.toString()); + compressionType_ = "none"; + } + gen_ = new RandomGenerator(randSeed_, compressionRatio_); + } + + private void prepareReadOptions(ReadOptions options) { + options.setVerifyChecksums((Boolean)flags_.get(Flag.verify_checksum)); + options.setTailing((Boolean)flags_.get(Flag.use_tailing_iterator)); + } + + private void prepareWriteOptions(WriteOptions options) { + options.setSync((Boolean)flags_.get(Flag.sync)); + options.setDisableWAL((Boolean)flags_.get(Flag.disable_wal)); + } + + private void prepareOptions(Options options) throws RocksDBException { + if (!useExisting_) { + options.setCreateIfMissing(true); + } else { + options.setCreateIfMissing(false); + } + if (useMemenv_) { + options.setEnv(new RocksMemEnv()); + } + switch (memtable_) { + case "skip_list": + options.setMemTableConfig(new SkipListMemTableConfig()); + break; + case "vector": + options.setMemTableConfig(new VectorMemTableConfig()); + break; + case "hash_linkedlist": + options.setMemTableConfig( + new HashLinkedListMemTableConfig() + .setBucketCount(hashBucketCount_)); + options.useFixedLengthPrefixExtractor(prefixSize_); + break; + case "hash_skiplist": + case "prefix_hash": + options.setMemTableConfig( + new HashSkipListMemTableConfig() + .setBucketCount(hashBucketCount_)); + options.useFixedLengthPrefixExtractor(prefixSize_); + break; + default: + System.err.format( + "unable to detect the specified memtable, " + + "use the default memtable factory %s%n", + options.memTableFactoryName()); + break; + } + if (usePlainTable_) { + options.setTableFormatConfig( + new PlainTableConfig().setKeySize(keySize_)); + } else { + BlockBasedTableConfig table_options = new BlockBasedTableConfig(); + table_options.setBlockSize((Long)flags_.get(Flag.block_size)) + .setBlockCacheSize((Long)flags_.get(Flag.cache_size)) + .setCacheNumShardBits( + (Integer)flags_.get(Flag.cache_numshardbits)); + options.setTableFormatConfig(table_options); + } + options.setWriteBufferSize( + (Long)flags_.get(Flag.write_buffer_size)); + options.setMaxWriteBufferNumber( + (Integer)flags_.get(Flag.max_write_buffer_number)); + options.setMaxBackgroundCompactions( + (Integer)flags_.get(Flag.max_background_compactions)); + options.getEnv().setBackgroundThreads( + (Integer)flags_.get(Flag.max_background_compactions)); + options.setMaxBackgroundFlushes( + (Integer)flags_.get(Flag.max_background_flushes)); + options.setMaxOpenFiles( + (Integer)flags_.get(Flag.open_files)); + options.setDisableDataSync( + (Boolean)flags_.get(Flag.disable_data_sync)); + options.setUseFsync( + (Boolean)flags_.get(Flag.use_fsync)); + options.setWalDir( + (String)flags_.get(Flag.wal_dir)); + options.setDeleteObsoleteFilesPeriodMicros( + (Integer)flags_.get(Flag.delete_obsolete_files_period_micros)); + options.setTableCacheNumshardbits( + (Integer)flags_.get(Flag.table_cache_numshardbits)); + options.setAllowMmapReads( + (Boolean)flags_.get(Flag.mmap_read)); + options.setAllowMmapWrites( + (Boolean)flags_.get(Flag.mmap_write)); + options.setAdviseRandomOnOpen( + (Boolean)flags_.get(Flag.advise_random_on_open)); + options.setUseAdaptiveMutex( + (Boolean)flags_.get(Flag.use_adaptive_mutex)); + options.setBytesPerSync( + (Long)flags_.get(Flag.bytes_per_sync)); + options.setBloomLocality( + (Integer)flags_.get(Flag.bloom_locality)); + options.setMinWriteBufferNumberToMerge( + (Integer)flags_.get(Flag.min_write_buffer_number_to_merge)); + options.setMemtablePrefixBloomSizeRatio((Double) flags_.get(Flag.memtable_bloom_size_ratio)); + options.setNumLevels( + (Integer)flags_.get(Flag.num_levels)); + options.setTargetFileSizeBase( + (Integer)flags_.get(Flag.target_file_size_base)); + options.setTargetFileSizeMultiplier( + (Integer)flags_.get(Flag.target_file_size_multiplier)); + options.setMaxBytesForLevelBase( + (Integer)flags_.get(Flag.max_bytes_for_level_base)); + options.setMaxBytesForLevelMultiplier( + (Integer)flags_.get(Flag.max_bytes_for_level_multiplier)); + options.setLevelZeroStopWritesTrigger( + (Integer)flags_.get(Flag.level0_stop_writes_trigger)); + options.setLevelZeroSlowdownWritesTrigger( + (Integer)flags_.get(Flag.level0_slowdown_writes_trigger)); + options.setLevelZeroFileNumCompactionTrigger( + (Integer)flags_.get(Flag.level0_file_num_compaction_trigger)); + options.setSoftRateLimit( + (Double)flags_.get(Flag.soft_rate_limit)); + options.setHardRateLimit( + (Double)flags_.get(Flag.hard_rate_limit)); + options.setRateLimitDelayMaxMilliseconds( + (Integer)flags_.get(Flag.rate_limit_delay_max_milliseconds)); + options.setMaxGrandparentOverlapFactor( + (Integer)flags_.get(Flag.max_grandparent_overlap_factor)); + options.setDisableAutoCompactions( + (Boolean)flags_.get(Flag.disable_auto_compactions)); + options.setSourceCompactionFactor( + (Integer)flags_.get(Flag.source_compaction_factor)); + options.setMaxSuccessiveMerges( + (Integer)flags_.get(Flag.max_successive_merges)); + options.setWalTtlSeconds((Long)flags_.get(Flag.wal_ttl_seconds)); + options.setWalSizeLimitMB((Long)flags_.get(Flag.wal_size_limit_MB)); + if(flags_.get(Flag.java_comparator) != null) { + options.setComparator( + (AbstractComparator)flags_.get(Flag.java_comparator)); + } + + /* TODO(yhchiang): enable the following parameters + options.setCompressionType((String)flags_.get(Flag.compression_type)); + options.setCompressionLevel((Integer)flags_.get(Flag.compression_level)); + options.setMinLevelToCompress((Integer)flags_.get(Flag.min_level_to_compress)); + options.setHdfs((String)flags_.get(Flag.hdfs)); // env + options.setStatistics((Boolean)flags_.get(Flag.statistics)); + options.setUniversalSizeRatio( + (Integer)flags_.get(Flag.universal_size_ratio)); + options.setUniversalMinMergeWidth( + (Integer)flags_.get(Flag.universal_min_merge_width)); + options.setUniversalMaxMergeWidth( + (Integer)flags_.get(Flag.universal_max_merge_width)); + options.setUniversalMaxSizeAmplificationPercent( + (Integer)flags_.get(Flag.universal_max_size_amplification_percent)); + options.setUniversalCompressionSizePercent( + (Integer)flags_.get(Flag.universal_compression_size_percent)); + // TODO(yhchiang): add RocksDB.openForReadOnly() to enable Flag.readonly + // TODO(yhchiang): enable Flag.merge_operator by switch + options.setAccessHintOnCompactionStart( + (String)flags_.get(Flag.compaction_fadvice)); + // available values of fadvice are "NONE", "NORMAL", "SEQUENTIAL", "WILLNEED" for fadvice + */ + } + + private void run() throws RocksDBException { + if (!useExisting_) { + destroyDb(); + } + Options options = new Options(); + prepareOptions(options); + open(options); + + printHeader(options); + + for (String benchmark : benchmarks_) { + List> tasks = new ArrayList>(); + List> bgTasks = new ArrayList>(); + WriteOptions writeOpt = new WriteOptions(); + prepareWriteOptions(writeOpt); + ReadOptions readOpt = new ReadOptions(); + prepareReadOptions(readOpt); + int currentTaskId = 0; + boolean known = true; + + switch (benchmark) { + case "fillseq": + tasks.add(new WriteSequentialTask( + currentTaskId++, randSeed_, num_, num_, writeOpt, 1)); + break; + case "fillbatch": + tasks.add(new WriteRandomTask( + currentTaskId++, randSeed_, num_ / 1000, num_, writeOpt, 1000)); + break; + case "fillrandom": + tasks.add(new WriteRandomTask( + currentTaskId++, randSeed_, num_, num_, writeOpt, 1)); + break; + case "filluniquerandom": + tasks.add(new WriteUniqueRandomTask( + currentTaskId++, randSeed_, num_, num_, writeOpt, 1)); + break; + case "fillsync": + writeOpt.setSync(true); + tasks.add(new WriteRandomTask( + currentTaskId++, randSeed_, num_ / 1000, num_ / 1000, + writeOpt, 1)); + break; + case "readseq": + for (int t = 0; t < threadNum_; ++t) { + tasks.add(new ReadSequentialTask( + currentTaskId++, randSeed_, reads_ / threadNum_, num_)); + } + break; + case "readrandom": + for (int t = 0; t < threadNum_; ++t) { + tasks.add(new ReadRandomTask( + currentTaskId++, randSeed_, reads_ / threadNum_, num_)); + } + break; + case "readwhilewriting": + WriteTask writeTask = new WriteRandomTask( + -1, randSeed_, Long.MAX_VALUE, num_, writeOpt, 1, writesPerSeconds_); + writeTask.stats_.setExcludeFromMerge(); + bgTasks.add(writeTask); + for (int t = 0; t < threadNum_; ++t) { + tasks.add(new ReadRandomTask( + currentTaskId++, randSeed_, reads_ / threadNum_, num_)); + } + break; + case "readhot": + for (int t = 0; t < threadNum_; ++t) { + tasks.add(new ReadRandomTask( + currentTaskId++, randSeed_, reads_ / threadNum_, num_ / 100)); + } + break; + case "delete": + destroyDb(); + open(options); + break; + default: + known = false; + System.err.println("Unknown benchmark: " + benchmark); + break; + } + if (known) { + ExecutorService executor = Executors.newCachedThreadPool(); + ExecutorService bgExecutor = Executors.newCachedThreadPool(); + try { + // measure only the main executor time + List> bgResults = new ArrayList>(); + for (Callable bgTask : bgTasks) { + bgResults.add(bgExecutor.submit(bgTask)); + } + start(); + List> results = executor.invokeAll(tasks); + executor.shutdown(); + boolean finished = executor.awaitTermination(10, TimeUnit.SECONDS); + if (!finished) { + System.out.format( + "Benchmark %s was not finished before timeout.", + benchmark); + executor.shutdownNow(); + } + setFinished(true); + bgExecutor.shutdown(); + finished = bgExecutor.awaitTermination(10, TimeUnit.SECONDS); + if (!finished) { + System.out.format( + "Benchmark %s was not finished before timeout.", + benchmark); + bgExecutor.shutdownNow(); + } + + stop(benchmark, results, currentTaskId); + } catch (InterruptedException e) { + System.err.println(e); + } + } + writeOpt.dispose(); + readOpt.dispose(); + } + options.dispose(); + db_.close(); + } + + private void printHeader(Options options) { + int kKeySize = 16; + System.out.printf("Keys: %d bytes each\n", kKeySize); + System.out.printf("Values: %d bytes each (%d bytes after compression)\n", + valueSize_, + (int) (valueSize_ * compressionRatio_ + 0.5)); + System.out.printf("Entries: %d\n", num_); + System.out.printf("RawSize: %.1f MB (estimated)\n", + ((double)(kKeySize + valueSize_) * num_) / SizeUnit.MB); + System.out.printf("FileSize: %.1f MB (estimated)\n", + (((kKeySize + valueSize_ * compressionRatio_) * num_) / SizeUnit.MB)); + System.out.format("Memtable Factory: %s%n", options.memTableFactoryName()); + System.out.format("Prefix: %d bytes%n", prefixSize_); + System.out.format("Compression: %s%n", compressionType_); + printWarnings(); + System.out.printf("------------------------------------------------\n"); + } + + void printWarnings() { + boolean assertsEnabled = false; + assert assertsEnabled = true; // Intentional side effect!!! + if (assertsEnabled) { + System.out.printf( + "WARNING: Assertions are enabled; benchmarks unnecessarily slow\n"); + } + } + + private void open(Options options) throws RocksDBException { + System.out.println("Using database directory: " + databaseDir_); + db_ = RocksDB.open(options, databaseDir_); + } + + private void start() { + setFinished(false); + startTime_ = System.nanoTime(); + } + + private void stop( + String benchmark, List> results, int concurrentThreads) { + long endTime = System.nanoTime(); + double elapsedSeconds = + 1.0d * (endTime - startTime_) / TimeUnit.SECONDS.toNanos(1); + + Stats stats = new Stats(-1); + int taskFinishedCount = 0; + for (Future result : results) { + if (result.isDone()) { + try { + Stats taskStats = result.get(3, TimeUnit.SECONDS); + if (!result.isCancelled()) { + taskFinishedCount++; + } + stats.merge(taskStats); + } catch (Exception e) { + // then it's not successful, the output will indicate this + } + } + } + String extra = ""; + if (benchmark.indexOf("read") >= 0) { + extra = String.format(" %d / %d found; ", stats.found_, stats.done_); + } else { + extra = String.format(" %d ops done; ", stats.done_); + } + + System.out.printf( + "%-16s : %11.5f micros/op; %6.1f MB/s;%s %d / %d task(s) finished.\n", + benchmark, elapsedSeconds / stats.done_ * 1e6, + (stats.bytes_ / 1048576.0) / elapsedSeconds, extra, + taskFinishedCount, concurrentThreads); + } + + public void generateKeyFromLong(byte[] slice, long n) { + assert(n >= 0); + int startPos = 0; + + if (keysPerPrefix_ > 0) { + long numPrefix = (num_ + keysPerPrefix_ - 1) / keysPerPrefix_; + long prefix = n % numPrefix; + int bytesToFill = Math.min(prefixSize_, 8); + for (int i = 0; i < bytesToFill; ++i) { + slice[i] = (byte) (prefix % 256); + prefix /= 256; + } + for (int i = 8; i < bytesToFill; ++i) { + slice[i] = '0'; + } + startPos = bytesToFill; + } + + for (int i = slice.length - 1; i >= startPos; --i) { + slice[i] = (byte) ('0' + (n % 10)); + n /= 10; + } + } + + private void destroyDb() { + if (db_ != null) { + db_.close(); + } + // TODO(yhchiang): develop our own FileUtil + // FileUtil.deleteDir(databaseDir_); + } + + private void printStats() { + } + + static void printHelp() { + System.out.println("usage:"); + for (Flag flag : Flag.values()) { + System.out.format(" --%s%n\t%s%n", + flag.name(), + flag.desc()); + if (flag.getDefaultValue() != null) { + System.out.format("\tDEFAULT: %s%n", + flag.getDefaultValue().toString()); + } + } + } + + public static void main(String[] args) throws Exception { + Map flags = new EnumMap(Flag.class); + for (Flag flag : Flag.values()) { + if (flag.getDefaultValue() != null) { + flags.put(flag, flag.getDefaultValue()); + } + } + for (String arg : args) { + boolean valid = false; + if (arg.equals("--help") || arg.equals("-h")) { + printHelp(); + System.exit(0); + } + if (arg.startsWith("--")) { + try { + String[] parts = arg.substring(2).split("="); + if (parts.length >= 1) { + Flag key = Flag.valueOf(parts[0]); + if (key != null) { + Object value = null; + if (parts.length >= 2) { + value = key.parseValue(parts[1]); + } + flags.put(key, value); + valid = true; + } + } + } + catch (Exception e) { + } + } + if (!valid) { + System.err.println("Invalid argument " + arg); + System.exit(1); + } + } + new DbBenchmark(flags).run(); + } + + private enum Flag { + benchmarks( + Arrays.asList( + "fillseq", + "readrandom", + "fillrandom"), + "Comma-separated list of operations to run in the specified order\n" + + "\tActual benchmarks:\n" + + "\t\tfillseq -- write N values in sequential key order in async mode.\n" + + "\t\tfillrandom -- write N values in random key order in async mode.\n" + + "\t\tfillbatch -- write N/1000 batch where each batch has 1000 values\n" + + "\t\t in random key order in sync mode.\n" + + "\t\tfillsync -- write N/100 values in random key order in sync mode.\n" + + "\t\tfill100K -- write N/1000 100K values in random order in async mode.\n" + + "\t\treadseq -- read N times sequentially.\n" + + "\t\treadrandom -- read N times in random order.\n" + + "\t\treadhot -- read N times in random order from 1% section of DB.\n" + + "\t\treadwhilewriting -- measure the read performance of multiple readers\n" + + "\t\t with a bg single writer. The write rate of the bg\n" + + "\t\t is capped by --writes_per_second.\n" + + "\tMeta Operations:\n" + + "\t\tdelete -- delete DB") { + @Override public Object parseValue(String value) { + return new ArrayList(Arrays.asList(value.split(","))); + } + }, + compression_ratio(0.5d, + "Arrange to generate values that shrink to this fraction of\n" + + "\ttheir original size after compression.") { + @Override public Object parseValue(String value) { + return Double.parseDouble(value); + } + }, + use_existing_db(false, + "If true, do not destroy the existing database. If you set this\n" + + "\tflag and also specify a benchmark that wants a fresh database,\n" + + "\tthat benchmark will fail.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + num(1000000, + "Number of key/values to place in database.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + threads(1, + "Number of concurrent threads to run.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + reads(null, + "Number of read operations to do. If negative, do --nums reads.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + key_size(16, + "The size of each key in bytes.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + value_size(100, + "The size of each value in bytes.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + write_buffer_size(4 * SizeUnit.MB, + "Number of bytes to buffer in memtable before compacting\n" + + "\t(initialized to default value by 'main'.)") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + max_write_buffer_number(2, + "The number of in-memory memtables. Each memtable is of size\n" + + "\twrite_buffer_size.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + prefix_size(0, "Controls the prefix size for HashSkipList, HashLinkedList,\n" + + "\tand plain table.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + keys_per_prefix(0, "Controls the average number of keys generated\n" + + "\tper prefix, 0 means no special handling of the prefix,\n" + + "\ti.e. use the prefix comes with the generated random number.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + memtablerep("skip_list", + "The memtable format. Available options are\n" + + "\tskip_list,\n" + + "\tvector,\n" + + "\thash_linkedlist,\n" + + "\thash_skiplist (prefix_hash.)") { + @Override public Object parseValue(String value) { + return value; + } + }, + hash_bucket_count(SizeUnit.MB, + "The number of hash buckets used in the hash-bucket-based\n" + + "\tmemtables. Memtables that currently support this argument are\n" + + "\thash_linkedlist and hash_skiplist.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + writes_per_second(10000, + "The write-rate of the background writer used in the\n" + + "\t`readwhilewriting` benchmark. Non-positive number indicates\n" + + "\tusing an unbounded write-rate in `readwhilewriting` benchmark.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + use_plain_table(false, + "Use plain-table sst format.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + cache_size(-1L, + "Number of bytes to use as a cache of uncompressed data.\n" + + "\tNegative means use default settings.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + seed(0L, + "Seed base for random number generators.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + num_levels(7, + "The total number of levels.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + numdistinct(1000, + "Number of distinct keys to use. Used in RandomWithVerify to\n" + + "\tread/write on fewer keys so that gets are more likely to find the\n" + + "\tkey and puts are more likely to update the same key.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + merge_keys(-1, + "Number of distinct keys to use for MergeRandom and\n" + + "\tReadRandomMergeRandom.\n" + + "\tIf negative, there will be FLAGS_num keys.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + bloom_locality(0,"Control bloom filter probes locality.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + duration(0,"Time in seconds for the random-ops tests to run.\n" + + "\tWhen 0 then num & reads determine the test duration.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + num_multi_db(0, + "Number of DBs used in the benchmark. 0 means single DB.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + histogram(false,"Print histogram of operation timings.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + min_write_buffer_number_to_merge( + defaultOptions_.minWriteBufferNumberToMerge(), + "The minimum number of write buffers that will be merged together\n" + + "\tbefore writing to storage. This is cheap because it is an\n" + + "\tin-memory merge. If this feature is not enabled, then all these\n" + + "\twrite buffers are flushed to L0 as separate files and this\n" + + "\tincreases read amplification because a get request has to check\n" + + "\tin all of these files. Also, an in-memory merge may result in\n" + + "\twriting less data to storage if there are duplicate records\n" + + "\tin each of these individual write buffers.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_background_compactions( + defaultOptions_.maxBackgroundCompactions(), + "The maximum number of concurrent background compactions\n" + + "\tthat can occur in parallel.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_background_flushes( + defaultOptions_.maxBackgroundFlushes(), + "The maximum number of concurrent background flushes\n" + + "\tthat can occur in parallel.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + /* TODO(yhchiang): enable the following + compaction_style((int32_t) defaultOptions_.compactionStyle(), + "style of compaction: level-based vs universal.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + },*/ + universal_size_ratio(0, + "Percentage flexibility while comparing file size\n" + + "\t(for universal compaction only).") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + universal_min_merge_width(0,"The minimum number of files in a\n" + + "\tsingle compaction run (for universal compaction only).") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + universal_max_merge_width(0,"The max number of files to compact\n" + + "\tin universal style compaction.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + universal_max_size_amplification_percent(0, + "The max size amplification for universal style compaction.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + universal_compression_size_percent(-1, + "The percentage of the database to compress for universal\n" + + "\tcompaction. -1 means compress everything.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + block_size(defaultBlockBasedTableOptions_.blockSize(), + "Number of bytes in a block.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + compressed_cache_size(-1, + "Number of bytes to use as a cache of compressed data.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + open_files(defaultOptions_.maxOpenFiles(), + "Maximum number of files to keep open at the same time\n" + + "\t(use default if == 0)") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + bloom_bits(-1,"Bloom filter bits per key. Negative means\n" + + "\tuse default settings.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + memtable_bloom_size_ratio(0, "Ratio of memtable used by the bloom filter.\n" + + "\t0 means no bloom filter.") { + @Override public Object parseValue(String value) { + return Double.parseDouble(value); + } + }, + cache_numshardbits(-1,"Number of shards for the block cache\n" + + "\tis 2 ** cache_numshardbits. Negative means use default settings.\n" + + "\tThis is applied only if FLAGS_cache_size is non-negative.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + verify_checksum(false,"Verify checksum for every block read\n" + + "\tfrom storage.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + statistics(false,"Database statistics.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + writes(-1,"Number of write operations to do. If negative, do\n" + + "\t--num reads.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + sync(false,"Sync all writes to disk.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + disable_data_sync(false,"If true, do not wait until data is\n" + + "\tsynced to disk.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + use_fsync(false,"If true, issue fsync instead of fdatasync.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + disable_wal(false,"If true, do not write WAL for write.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + wal_dir("", "If not empty, use the given dir for WAL.") { + @Override public Object parseValue(String value) { + return value; + } + }, + target_file_size_base(2 * 1048576,"Target file size at level-1") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + target_file_size_multiplier(1, + "A multiplier to compute target level-N file size (N >= 2)") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_bytes_for_level_base(10 * 1048576, + "Max bytes for level-1") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_bytes_for_level_multiplier(10, + "A multiplier to compute max bytes for level-N (N >= 2)") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + level0_stop_writes_trigger(12,"Number of files in level-0\n" + + "\tthat will trigger put stop.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + level0_slowdown_writes_trigger(8,"Number of files in level-0\n" + + "\tthat will slow down writes.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + level0_file_num_compaction_trigger(4,"Number of files in level-0\n" + + "\twhen compactions start.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + readwritepercent(90,"Ratio of reads to reads/writes (expressed\n" + + "\tas percentage) for the ReadRandomWriteRandom workload. The\n" + + "\tdefault value 90 means 90% operations out of all reads and writes\n" + + "\toperations are reads. In other words, 9 gets for every 1 put.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + mergereadpercent(70,"Ratio of merges to merges&reads (expressed\n" + + "\tas percentage) for the ReadRandomMergeRandom workload. The\n" + + "\tdefault value 70 means 70% out of all read and merge operations\n" + + "\tare merges. In other words, 7 merges for every 3 gets.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + deletepercent(2,"Percentage of deletes out of reads/writes/\n" + + "\tdeletes (used in RandomWithVerify only). RandomWithVerify\n" + + "\tcalculates writepercent as (100 - FLAGS_readwritepercent -\n" + + "\tdeletepercent), so deletepercent must be smaller than (100 -\n" + + "\tFLAGS_readwritepercent)") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + delete_obsolete_files_period_micros(0,"Option to delete\n" + + "\tobsolete files periodically. 0 means that obsolete files are\n" + + "\tdeleted after every compaction run.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + compression_type("snappy", + "Algorithm used to compress the database.") { + @Override public Object parseValue(String value) { + return value; + } + }, + compression_level(-1, + "Compression level. For zlib this should be -1 for the\n" + + "\tdefault level, or between 0 and 9.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + min_level_to_compress(-1,"If non-negative, compression starts\n" + + "\tfrom this level. Levels with number < min_level_to_compress are\n" + + "\tnot compressed. Otherwise, apply compression_type to\n" + + "\tall levels.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + table_cache_numshardbits(4,"") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + stats_interval(0,"Stats are reported every N operations when\n" + + "\tthis is greater than zero. When 0 the interval grows over time.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + stats_per_interval(0,"Reports additional stats per interval when\n" + + "\tthis is greater than 0.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + perf_level(0,"Level of perf collection.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + soft_rate_limit(0.0,"") { + @Override public Object parseValue(String value) { + return Double.parseDouble(value); + } + }, + hard_rate_limit(0.0,"When not equal to 0 this make threads\n" + + "\tsleep at each stats reporting interval until the compaction\n" + + "\tscore for all levels is less than or equal to this value.") { + @Override public Object parseValue(String value) { + return Double.parseDouble(value); + } + }, + rate_limit_delay_max_milliseconds(1000, + "When hard_rate_limit is set then this is the max time a put will\n" + + "\tbe stalled.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + max_grandparent_overlap_factor(10,"Control maximum bytes of\n" + + "\toverlaps in grandparent (i.e., level+2) before we stop building a\n" + + "\tsingle file in a level->level+1 compaction.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + readonly(false,"Run read only benchmarks.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + disable_auto_compactions(false,"Do not auto trigger compactions.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + source_compaction_factor(1,"Cap the size of data in level-K for\n" + + "\ta compaction run that compacts Level-K with Level-(K+1) (for\n" + + "\tK >= 1)") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + wal_ttl_seconds(0L,"Set the TTL for the WAL Files in seconds.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + wal_size_limit_MB(0L,"Set the size limit for the WAL Files\n" + + "\tin MB.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + /* TODO(yhchiang): enable the following + bufferedio(rocksdb::EnvOptions().use_os_buffer, + "Allow buffered io using OS buffers.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + */ + mmap_read(false, + "Allow reads to occur via mmap-ing files.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + mmap_write(false, + "Allow writes to occur via mmap-ing files.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + advise_random_on_open(defaultOptions_.adviseRandomOnOpen(), + "Advise random access on table file open.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + compaction_fadvice("NORMAL", + "Access pattern advice when a file is compacted.") { + @Override public Object parseValue(String value) { + return value; + } + }, + use_tailing_iterator(false, + "Use tailing iterator to access a series of keys instead of get.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + use_adaptive_mutex(defaultOptions_.useAdaptiveMutex(), + "Use adaptive mutex.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + bytes_per_sync(defaultOptions_.bytesPerSync(), + "Allows OS to incrementally sync files to disk while they are\n" + + "\tbeing written, in the background. Issue one request for every\n" + + "\tbytes_per_sync written. 0 turns it off.") { + @Override public Object parseValue(String value) { + return Long.parseLong(value); + } + }, + filter_deletes(false," On true, deletes use bloom-filter and drop\n" + + "\tthe delete if key not present.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + max_successive_merges(0,"Maximum number of successive merge\n" + + "\toperations on a key in the memtable.") { + @Override public Object parseValue(String value) { + return Integer.parseInt(value); + } + }, + db(getTempDir("rocksdb-jni"), + "Use the db with the following name.") { + @Override public Object parseValue(String value) { + return value; + } + }, + use_mem_env(false, "Use RocksMemEnv instead of default filesystem based\n" + + "environment.") { + @Override public Object parseValue(String value) { + return parseBoolean(value); + } + }, + java_comparator(null, "Class name of a Java Comparator to use instead\n" + + "\tof the default C++ ByteWiseComparatorImpl. Must be available on\n" + + "\tthe classpath") { + @Override + protected Object parseValue(final String value) { + try { + final ComparatorOptions copt = new ComparatorOptions(); + final Class clsComparator = + (Class)Class.forName(value); + final Constructor cstr = + clsComparator.getConstructor(ComparatorOptions.class); + return cstr.newInstance(copt); + } catch(final ClassNotFoundException cnfe) { + throw new IllegalArgumentException("Java Comparator '" + value + "'" + + " not found on the classpath", cnfe); + } catch(final NoSuchMethodException nsme) { + throw new IllegalArgumentException("Java Comparator '" + value + "'" + + " does not have a public ComparatorOptions constructor", nsme); + } catch(final IllegalAccessException | InstantiationException + | InvocationTargetException ie) { + throw new IllegalArgumentException("Unable to construct Java" + + " Comparator '" + value + "'", ie); + } + } + }; + + private Flag(Object defaultValue, String desc) { + defaultValue_ = defaultValue; + desc_ = desc; + } + + public Object getDefaultValue() { + return defaultValue_; + } + + public String desc() { + return desc_; + } + + public boolean parseBoolean(String value) { + if (value.equals("1")) { + return true; + } else if (value.equals("0")) { + return false; + } + return Boolean.parseBoolean(value); + } + + protected abstract Object parseValue(String value); + + private final Object defaultValue_; + private final String desc_; + } + + private final static String DEFAULT_TEMP_DIR = "/tmp"; + + private static String getTempDir(final String dirName) { + try { + return Files.createTempDirectory(dirName).toAbsolutePath().toString(); + } catch(final IOException ioe) { + System.err.println("Unable to create temp directory, defaulting to: " + + DEFAULT_TEMP_DIR); + return DEFAULT_TEMP_DIR + File.pathSeparator + dirName; + } + } + + private static class RandomGenerator { + private final byte[] data_; + private int dataLength_; + private int position_; + private double compressionRatio_; + Random rand_; + + private RandomGenerator(long seed, double compressionRatio) { + // We use a limited amount of data over and over again and ensure + // that it is larger than the compression window (32KB), and also + byte[] value = new byte[100]; + // large enough to serve all typical value sizes we want to write. + rand_ = new Random(seed); + dataLength_ = value.length * 10000; + data_ = new byte[dataLength_]; + compressionRatio_ = compressionRatio; + int pos = 0; + while (pos < dataLength_) { + compressibleBytes(value); + System.arraycopy(value, 0, data_, pos, + Math.min(value.length, dataLength_ - pos)); + pos += value.length; + } + } + + private void compressibleBytes(byte[] value) { + int baseLength = value.length; + if (compressionRatio_ < 1.0d) { + baseLength = (int) (compressionRatio_ * value.length + 0.5); + } + if (baseLength <= 0) { + baseLength = 1; + } + int pos; + for (pos = 0; pos < baseLength; ++pos) { + value[pos] = (byte) (' ' + rand_.nextInt(95)); // ' ' .. '~' + } + while (pos < value.length) { + System.arraycopy(value, 0, value, pos, + Math.min(baseLength, value.length - pos)); + pos += baseLength; + } + } + + private void generate(byte[] value) { + if (position_ + value.length > data_.length) { + position_ = 0; + assert(value.length <= data_.length); + } + position_ += value.length; + System.arraycopy(data_, position_ - value.length, + value, 0, value.length); + } + } + + boolean isFinished() { + synchronized(finishLock_) { + return isFinished_; + } + } + + void setFinished(boolean flag) { + synchronized(finishLock_) { + isFinished_ = flag; + } + } + + RocksDB db_; + final List benchmarks_; + final int num_; + final int reads_; + final int keySize_; + final int valueSize_; + final int threadNum_; + final int writesPerSeconds_; + final long randSeed_; + final boolean useExisting_; + final String databaseDir_; + double compressionRatio_; + RandomGenerator gen_; + long startTime_; + + // env + boolean useMemenv_; + + // memtable related + final int maxWriteBufferNumber_; + final int prefixSize_; + final int keysPerPrefix_; + final String memtable_; + final long hashBucketCount_; + + // sst format related + boolean usePlainTable_; + + Object finishLock_; + boolean isFinished_; + Map flags_; + // as the scope of a static member equals to the scope of the problem, + // we let its c++ pointer to be disposed in its finalizer. + static Options defaultOptions_ = new Options(); + static BlockBasedTableConfig defaultBlockBasedTableOptions_ = + new BlockBasedTableConfig(); + String compressionType_; + CompressionType compression_; +} diff --git a/external/rocksdb/java/crossbuild/Vagrantfile b/external/rocksdb/java/crossbuild/Vagrantfile new file mode 100755 index 0000000000..21cce1201a --- /dev/null +++ b/external/rocksdb/java/crossbuild/Vagrantfile @@ -0,0 +1,26 @@ +# -*- mode: ruby -*- +# vi: set ft=ruby : + +# Vagrantfile API/syntax version. Don't touch unless you know what you're doing! +VAGRANTFILE_API_VERSION = "2" + +Vagrant.configure(VAGRANTFILE_API_VERSION) do |config| + + config.vm.define "linux32" do |linux32| + linux32.vm.box = "hansode/centos-5.6-i386" + end + + config.vm.define "linux64" do |linux64| + linux64.vm.box = "hansode/centos-5.6-x86_64" + end + + config.vm.provider "virtualbox" do |v| + v.memory = 2048 + v.cpus = 4 + end + + config.vm.provision :shell, path: "build-linux-centos.sh" + config.vm.synced_folder "../target", "/rocksdb-build" + config.vm.synced_folder "../..", "/rocksdb", type: "rsync" + config.vm.boot_timeout = 1200 +end diff --git a/external/rocksdb/java/crossbuild/build-linux-centos.sh b/external/rocksdb/java/crossbuild/build-linux-centos.sh new file mode 100755 index 0000000000..2e8f81d94b --- /dev/null +++ b/external/rocksdb/java/crossbuild/build-linux-centos.sh @@ -0,0 +1,24 @@ +#!/usr/bin/env bash +# install all required packages for rocksdb that are available through yum +ARCH=$(uname -i) +sudo yum -y install openssl java-1.7.0-openjdk-devel.$ARCH + +# install gcc/g++ 4.8.2 via CERN (http://linux.web.cern.ch/linux/devtoolset/) +sudo wget -O /etc/yum.repos.d/slc5-devtoolset.repo http://linuxsoft.cern.ch/cern/devtoolset/slc5-devtoolset.repo +sudo wget -O /etc/pki/rpm-gpg/RPM-GPG-KEY-cern http://ftp.mirrorservice.org/sites/ftp.scientificlinux.org/linux/scientific/51/i386/RPM-GPG-KEYs/RPM-GPG-KEY-cern +sudo yum -y install devtoolset-2 + +wget http://gflags.googlecode.com/files/gflags-2.0-no-svn-files.tar.gz +tar xvfz gflags-2.0-no-svn-files.tar.gz; cd gflags-2.0; scl enable devtoolset-2 ./configure; scl enable devtoolset-2 make; sudo make install +export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:/usr/local/lib + +# set java home so we can build rocksdb jars +export JAVA_HOME=/usr/lib/jvm/java-1.7.0 + +# build rocksdb +cd /rocksdb +scl enable devtoolset-2 'make jclean clean' +scl enable devtoolset-2 'PORTABLE=1 make rocksdbjavastatic' +cp /rocksdb/java/target/librocksdbjni-* /rocksdb-build +cp /rocksdb/java/target/rocksdbjni-* /rocksdb-build + diff --git a/external/rocksdb/java/crossbuild/build-linux.sh b/external/rocksdb/java/crossbuild/build-linux.sh new file mode 100755 index 0000000000..48d1c28d92 --- /dev/null +++ b/external/rocksdb/java/crossbuild/build-linux.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash +# install all required packages for rocksdb +sudo apt-get update +sudo apt-get -y install git make gcc g++ libgflags-dev libsnappy-dev zlib1g-dev libbz2-dev default-jdk + +# set java home so we can build rocksdb jars +export JAVA_HOME=$(echo /usr/lib/jvm/java-7-openjdk*) +cd /rocksdb +make jclean clean +make -j 4 rocksdbjavastatic +cp /rocksdb/java/target/librocksdbjni-* /rocksdb-build +cp /rocksdb/java/target/rocksdbjni-* /rocksdb-build +sudo shutdown -h now + diff --git a/external/rocksdb/java/jdb_bench.sh b/external/rocksdb/java/jdb_bench.sh new file mode 100755 index 0000000000..9665de785e --- /dev/null +++ b/external/rocksdb/java/jdb_bench.sh @@ -0,0 +1,10 @@ +PLATFORM=64 +if [ `getconf LONG_BIT` != "64" ] +then + PLATFORM=32 +fi + +ROCKS_JAR=`find target -name rocksdbjni*.jar` + +echo "Running benchmark in $PLATFORM-Bit mode." +java -server -d$PLATFORM -XX:NewSize=4m -XX:+AggressiveOpts -Djava.library.path=target -cp "${ROCKS_JAR}:benchmark/target/classes" org.rocksdb.benchmark.DbBenchmark $@ diff --git a/external/rocksdb/java/rocksjni.pom b/external/rocksdb/java/rocksjni.pom new file mode 100755 index 0000000000..0251d8f364 --- /dev/null +++ b/external/rocksdb/java/rocksjni.pom @@ -0,0 +1,145 @@ + + + 4.0.0 + RocksDB JNI + http://rocksdb.org/ + org.rocksdb + rocksdbjni + + - + RocksDB fat jar that contains .so files for linux32 and linux64, and jnilib files + for Mac OSX. + + + + Apache License 2.0 + http://www.apache.org/licenses/LICENSE-2.0.html + repo + + + + scm:git:git://github.com/dropwizard/metrics.git + scm:git:git@github.com:dropwizard/metrics.git + http://github.com/dropwizard/metrics/ + HEAD + + + + Facebook + help@facebook.com + America/New_York + + architect + + + + + + 1.7 + 1.7 + UTF-8 + + + + + + org.apache.maven.plugins + maven-compiler-plugin + 3.2 + + ${project.build.source} + ${project.build.target} + ${project.build.sourceEncoding} + + + + org.apache.maven.plugins + maven-surefire-plugin + 2.18.1 + + ${argLine} -ea -Xcheck:jni -Djava.library.path=${project.build.directory} + false + false + + ${project.build.directory}/* + + + + + org.jacoco + jacoco-maven-plugin + 0.7.2.201409121644 + + + + prepare-agent + + + + report + prepare-package + + report + + + + + + org.codehaus.gmaven + groovy-maven-plugin + 2.0 + + + process-classes + + execute + + + + Xenu + + + String fileContents = new File(project.basedir.absolutePath + '/../include/rocksdb/version.h').getText('UTF-8') + matcher = (fileContents =~ /(?s).*ROCKSDB_MAJOR ([0-9]+).*?/) + String major_version = matcher.getAt(0).getAt(1) + matcher = (fileContents =~ /(?s).*ROCKSDB_MINOR ([0-9]+).*?/) + String minor_version = matcher.getAt(0).getAt(1) + matcher = (fileContents =~ /(?s).*ROCKSDB_PATCH ([0-9]+).*?/) + String patch_version = matcher.getAt(0).getAt(1) + String version = String.format('%s.%s.%s', major_version, minor_version, patch_version) + // Set version to be used in pom.properties + project.version = version + // Set version to be set as jar name + project.build.finalName = project.artifactId + "-" + version + + + + + + + + + + + junit + junit + 4.12 + test + + + org.assertj + assertj-core + 1.7.1 + test + + + org.mockito + mockito-all + 1.10.19 + test + + + diff --git a/external/rocksdb/java/rocksjni/backupablejni.cc b/external/rocksdb/java/rocksjni/backupablejni.cc new file mode 100755 index 0000000000..d9976386ca --- /dev/null +++ b/external/rocksdb/java/rocksjni/backupablejni.cc @@ -0,0 +1,211 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::BackupableDB and rocksdb::BackupableDBOptions methods +// from Java side. + +#include +#include +#include +#include +#include + +#include "include/org_rocksdb_BackupableDBOptions.h" +#include "rocksjni/portal.h" +#include "rocksdb/utilities/backupable_db.h" + +/////////////////////////////////////////////////////////////////////////// +// BackupDBOptions + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: newBackupableDBOptions + * Signature: (Ljava/lang/String;)J + */ +jlong Java_org_rocksdb_BackupableDBOptions_newBackupableDBOptions( + JNIEnv* env, jclass jcls, jstring jpath) { + const char* cpath = env->GetStringUTFChars(jpath, NULL); + auto bopt = new rocksdb::BackupableDBOptions(cpath); + env->ReleaseStringUTFChars(jpath, cpath); + return reinterpret_cast(bopt); +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: backupDir + * Signature: (J)Ljava/lang/String; + */ +jstring Java_org_rocksdb_BackupableDBOptions_backupDir( + JNIEnv* env, jobject jopt, jlong jhandle) { + auto bopt = reinterpret_cast(jhandle); + return env->NewStringUTF(bopt->backup_dir.c_str()); +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setShareTableFiles + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setShareTableFiles( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { + auto bopt = reinterpret_cast(jhandle); + bopt->share_table_files = flag; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: shareTableFiles + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_BackupableDBOptions_shareTableFiles( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto bopt = reinterpret_cast(jhandle); + return bopt->share_table_files; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setSync + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setSync( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { + auto bopt = reinterpret_cast(jhandle); + bopt->sync = flag; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: sync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_BackupableDBOptions_sync( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto bopt = reinterpret_cast(jhandle); + return bopt->sync; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setDestroyOldData + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setDestroyOldData( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { + auto bopt = reinterpret_cast(jhandle); + bopt->destroy_old_data = flag; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: destroyOldData + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_BackupableDBOptions_destroyOldData( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto bopt = reinterpret_cast(jhandle); + return bopt->destroy_old_data; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setBackupLogFiles + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setBackupLogFiles( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { + auto bopt = reinterpret_cast(jhandle); + bopt->backup_log_files = flag; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: backupLogFiles + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_BackupableDBOptions_backupLogFiles( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto bopt = reinterpret_cast(jhandle); + return bopt->backup_log_files; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setBackupRateLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setBackupRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jbackup_rate_limit) { + auto bopt = reinterpret_cast(jhandle); + bopt->backup_rate_limit = jbackup_rate_limit; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: backupRateLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_BackupableDBOptions_backupRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto bopt = reinterpret_cast(jhandle); + return bopt->backup_rate_limit; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setRestoreRateLimit + * Signature: (JJ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setRestoreRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jrestore_rate_limit) { + auto bopt = reinterpret_cast(jhandle); + bopt->restore_rate_limit = jrestore_rate_limit; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: restoreRateLimit + * Signature: (J)J + */ +jlong Java_org_rocksdb_BackupableDBOptions_restoreRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto bopt = reinterpret_cast(jhandle); + return bopt->restore_rate_limit; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: setShareFilesWithChecksum + * Signature: (JZ)V + */ +void Java_org_rocksdb_BackupableDBOptions_setShareFilesWithChecksum( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { + auto bopt = reinterpret_cast(jhandle); + bopt->share_files_with_checksum = flag; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: shareFilesWithChecksum + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_BackupableDBOptions_shareFilesWithChecksum( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto bopt = reinterpret_cast(jhandle); + return bopt->share_files_with_checksum; +} + +/* + * Class: org_rocksdb_BackupableDBOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_BackupableDBOptions_disposeInternal( + JNIEnv* env, jobject jopt, jlong jhandle) { + auto bopt = reinterpret_cast(jhandle); + assert(bopt); + delete bopt; +} diff --git a/external/rocksdb/java/rocksjni/backupenginejni.cc b/external/rocksdb/java/rocksjni/backupenginejni.cc new file mode 100755 index 0000000000..a796d6e5be --- /dev/null +++ b/external/rocksdb/java/rocksjni/backupenginejni.cc @@ -0,0 +1,210 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling C++ rocksdb::BackupEngine methods from the Java side. + +#include +#include + +#include "include/org_rocksdb_BackupEngine.h" +#include "rocksdb/utilities/backupable_db.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_BackupEngine + * Method: open + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_BackupEngine_open( + JNIEnv* env, jclass jcls, jlong env_handle, + jlong backupable_db_options_handle) { + auto* rocks_env = reinterpret_cast(env_handle); + auto* backupable_db_options = + reinterpret_cast( + backupable_db_options_handle); + rocksdb::BackupEngine* backup_engine; + auto status = rocksdb::BackupEngine::Open(rocks_env, + *backupable_db_options, &backup_engine); + + if (status.ok()) { + return reinterpret_cast(backup_engine); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, status); + return 0; + } +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: createNewBackup + * Signature: (JJZ)V + */ +void Java_org_rocksdb_BackupEngine_createNewBackup( + JNIEnv* env, jobject jbe, jlong jbe_handle, jlong db_handle, + jboolean jflush_before_backup) { + auto* db = reinterpret_cast(db_handle); + auto* backup_engine = reinterpret_cast(jbe_handle); + auto status = backup_engine->CreateNewBackup(db, + static_cast(jflush_before_backup)); + + if (status.ok()) { + return; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: getBackupInfo + * Signature: (J)Ljava/util/List; + */ +jobject Java_org_rocksdb_BackupEngine_getBackupInfo( + JNIEnv* env, jobject jbe, jlong jbe_handle) { + auto* backup_engine = reinterpret_cast(jbe_handle); + std::vector backup_infos; + backup_engine->GetBackupInfo(&backup_infos); + return rocksdb::BackupInfoListJni::getBackupInfo(env, backup_infos); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: getCorruptedBackups + * Signature: (J)[I + */ +jintArray Java_org_rocksdb_BackupEngine_getCorruptedBackups( + JNIEnv* env, jobject jbe, jlong jbe_handle) { + auto* backup_engine = reinterpret_cast(jbe_handle); + std::vector backup_ids; + backup_engine->GetCorruptedBackups(&backup_ids); + // store backupids in int array + std::vector int_backup_ids(backup_ids.begin(), backup_ids.end()); + // Store ints in java array + jintArray ret_backup_ids; + // Its ok to loose precision here (64->32) + jsize ret_backup_ids_size = static_cast(backup_ids.size()); + ret_backup_ids = env->NewIntArray(ret_backup_ids_size); + env->SetIntArrayRegion(ret_backup_ids, 0, ret_backup_ids_size, + int_backup_ids.data()); + return ret_backup_ids; +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: garbageCollect + * Signature: (J)V + */ +void Java_org_rocksdb_BackupEngine_garbageCollect( + JNIEnv* env, jobject jbe, jlong jbe_handle) { + auto* backup_engine = reinterpret_cast(jbe_handle); + auto status = backup_engine->GarbageCollect(); + + if (status.ok()) { + return; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: purgeOldBackups + * Signature: (JI)V + */ +void Java_org_rocksdb_BackupEngine_purgeOldBackups( + JNIEnv* env, jobject jbe, jlong jbe_handle, jint jnum_backups_to_keep) { + auto* backup_engine = reinterpret_cast(jbe_handle); + auto status = + backup_engine-> + PurgeOldBackups(static_cast(jnum_backups_to_keep)); + + if (status.ok()) { + return; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: deleteBackup + * Signature: (JI)V + */ +void Java_org_rocksdb_BackupEngine_deleteBackup( + JNIEnv* env, jobject jbe, jlong jbe_handle, jint jbackup_id) { + auto* backup_engine = reinterpret_cast(jbe_handle); + auto status = + backup_engine->DeleteBackup(static_cast(jbackup_id)); + + if (status.ok()) { + return; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: restoreDbFromBackup + * Signature: (JILjava/lang/String;Ljava/lang/String;J)V + */ +void Java_org_rocksdb_BackupEngine_restoreDbFromBackup( + JNIEnv* env, jobject jbe, jlong jbe_handle, jint jbackup_id, + jstring jdb_dir, jstring jwal_dir, jlong jrestore_options_handle) { + auto* backup_engine = reinterpret_cast(jbe_handle); + const char* db_dir = env->GetStringUTFChars(jdb_dir, 0); + const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0); + auto* restore_options = + reinterpret_cast(jrestore_options_handle); + auto status = + backup_engine->RestoreDBFromBackup( + static_cast(jbackup_id), db_dir, wal_dir, + *restore_options); + env->ReleaseStringUTFChars(jwal_dir, wal_dir); + env->ReleaseStringUTFChars(jdb_dir, db_dir); + + if (status.ok()) { + return; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: restoreDbFromLatestBackup + * Signature: (JLjava/lang/String;Ljava/lang/String;J)V + */ +void Java_org_rocksdb_BackupEngine_restoreDbFromLatestBackup( + JNIEnv* env, jobject jbe, jlong jbe_handle, jstring jdb_dir, + jstring jwal_dir, jlong jrestore_options_handle) { + auto* backup_engine = reinterpret_cast(jbe_handle); + const char* db_dir = env->GetStringUTFChars(jdb_dir, 0); + const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0); + auto* restore_options = + reinterpret_cast(jrestore_options_handle); + auto status = + backup_engine->RestoreDBFromLatestBackup(db_dir, wal_dir, + *restore_options); + env->ReleaseStringUTFChars(jwal_dir, wal_dir); + env->ReleaseStringUTFChars(jdb_dir, db_dir); + + if (status.ok()) { + return; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, status); +} + +/* + * Class: org_rocksdb_BackupEngine + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_BackupEngine_disposeInternal( + JNIEnv* env, jobject jbe, jlong jbe_handle) { + delete reinterpret_cast(jbe_handle); +} diff --git a/external/rocksdb/java/rocksjni/checkpoint.cc b/external/rocksdb/java/rocksjni/checkpoint.cc new file mode 100755 index 0000000000..45f0fde6b8 --- /dev/null +++ b/external/rocksdb/java/rocksjni/checkpoint.cc @@ -0,0 +1,61 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::Checkpoint methods from Java side. + +#include +#include +#include +#include + +#include "include/org_rocksdb_Checkpoint.h" +#include "rocksjni/portal.h" +#include "rocksdb/db.h" +#include "rocksdb/utilities/checkpoint.h" +/* + * Class: org_rocksdb_Checkpoint + * Method: newCheckpoint + * Signature: (J)J + */ +jlong Java_org_rocksdb_Checkpoint_newCheckpoint(JNIEnv* env, + jclass jclazz, jlong jdb_handle) { + auto db = reinterpret_cast(jdb_handle); + rocksdb::Checkpoint* checkpoint; + rocksdb::Checkpoint::Create(db, &checkpoint); + return reinterpret_cast(checkpoint); +} + +/* + * Class: org_rocksdb_Checkpoint + * Method: dispose + * Signature: (J)V + */ +void Java_org_rocksdb_Checkpoint_disposeInternal(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto checkpoint = reinterpret_cast(jhandle); + assert(checkpoint); + delete checkpoint; +} + +/* + * Class: org_rocksdb_Checkpoint + * Method: createCheckpoint + * Signature: (JLjava/lang/String;)V + */ +void Java_org_rocksdb_Checkpoint_createCheckpoint( + JNIEnv* env, jobject jobj, jlong jcheckpoint_handle, + jstring jcheckpoint_path) { + auto checkpoint = reinterpret_cast( + jcheckpoint_handle); + const char* checkpoint_path = env->GetStringUTFChars( + jcheckpoint_path, 0); + rocksdb::Status s = checkpoint->CreateCheckpoint( + checkpoint_path); + env->ReleaseStringUTFChars(jcheckpoint_path, checkpoint_path); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} diff --git a/external/rocksdb/java/rocksjni/columnfamilyhandle.cc b/external/rocksdb/java/rocksjni/columnfamilyhandle.cc new file mode 100755 index 0000000000..2a874b1d90 --- /dev/null +++ b/external/rocksdb/java/rocksjni/columnfamilyhandle.cc @@ -0,0 +1,25 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::Iterator methods from Java side. + +#include +#include +#include + +#include "include/org_rocksdb_ColumnFamilyHandle.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_ColumnFamilyHandle + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ColumnFamilyHandle_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + auto it = reinterpret_cast(handle); + delete it; +} diff --git a/external/rocksdb/java/rocksjni/compaction_filter.cc b/external/rocksdb/java/rocksjni/compaction_filter.cc new file mode 100755 index 0000000000..6cc6c7732a --- /dev/null +++ b/external/rocksdb/java/rocksjni/compaction_filter.cc @@ -0,0 +1,25 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::CompactionFilter. + +#include + +#include "include/org_rocksdb_AbstractCompactionFilter.h" +#include "rocksdb/compaction_filter.h" + +// + +/* + * Class: org_rocksdb_AbstractCompactionFilter + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_AbstractCompactionFilter_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + delete reinterpret_cast(handle); +} +// diff --git a/external/rocksdb/java/rocksjni/comparator.cc b/external/rocksdb/java/rocksjni/comparator.cc new file mode 100755 index 0000000000..dd11f10e4c --- /dev/null +++ b/external/rocksdb/java/rocksjni/comparator.cc @@ -0,0 +1,66 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::Comparator. + +#include +#include +#include +#include +#include + +#include "include/org_rocksdb_AbstractComparator.h" +#include "include/org_rocksdb_Comparator.h" +#include "include/org_rocksdb_DirectComparator.h" +#include "rocksjni/comparatorjnicallback.h" +#include "rocksjni/portal.h" + +// + +/* + * Class: org_rocksdb_Comparator + * Method: createNewComparator0 + * Signature: ()J + */ +jlong Java_org_rocksdb_Comparator_createNewComparator0( + JNIEnv* env, jobject jobj, jlong copt_handle) { + const rocksdb::ComparatorJniCallbackOptions* copt = + reinterpret_cast(copt_handle); + const rocksdb::ComparatorJniCallback* c = + new rocksdb::ComparatorJniCallback(env, jobj, copt); + return reinterpret_cast(c); +} +// + +// use_adaptive_mutex)), + mtx_findShortestSeparator(new port::Mutex(copt->use_adaptive_mutex)) { + // Note: Comparator methods may be accessed by multiple threads, + // so we ref the jvm not the env + const jint rs __attribute__((unused)) = env->GetJavaVM(&m_jvm); + assert(rs == JNI_OK); + + // Note: we want to access the Java Comparator instance + // across multiple method calls, so we create a global ref + m_jComparator = env->NewGlobalRef(jComparator); + + // Note: The name of a Comparator will not change during it's lifetime, + // so we cache it in a global var + jmethodID jNameMethodId = AbstractComparatorJni::getNameMethodId(env); + jstring jsName = (jstring)env->CallObjectMethod(m_jComparator, jNameMethodId); + m_name = JniUtil::copyString(env, jsName); // also releases jsName + + m_jCompareMethodId = AbstractComparatorJni::getCompareMethodId(env); + m_jFindShortestSeparatorMethodId = + AbstractComparatorJni::getFindShortestSeparatorMethodId(env); + m_jFindShortSuccessorMethodId = + AbstractComparatorJni::getFindShortSuccessorMethodId(env); +} + +/** + * Attach/Get a JNIEnv for the current native thread + */ +JNIEnv* BaseComparatorJniCallback::getJniEnv() const { + JNIEnv *env; + jint rs __attribute__((unused)) = + m_jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); + assert(rs == JNI_OK); + return env; +} + +const char* BaseComparatorJniCallback::Name() const { + return m_name.c_str(); +} + +int BaseComparatorJniCallback::Compare(const Slice& a, const Slice& b) const { + JNIEnv* m_env = getJniEnv(); + + // TODO(adamretter): slice objects can potentially be cached using thread + // local variables to avoid locking. Could make this configurable depending on + // performance. + mtx_compare->Lock(); + + AbstractSliceJni::setHandle(m_env, m_jSliceA, &a, JNI_FALSE); + AbstractSliceJni::setHandle(m_env, m_jSliceB, &b, JNI_FALSE); + jint result = + m_env->CallIntMethod(m_jComparator, m_jCompareMethodId, m_jSliceA, + m_jSliceB); + + mtx_compare->Unlock(); + + m_jvm->DetachCurrentThread(); + + return result; +} + +void BaseComparatorJniCallback::FindShortestSeparator( + std::string* start, const Slice& limit) const { + if (start == nullptr) { + return; + } + + JNIEnv* m_env = getJniEnv(); + + const char* startUtf = start->c_str(); + jstring jsStart = m_env->NewStringUTF(startUtf); + + // TODO(adamretter): slice object can potentially be cached using thread local + // variable to avoid locking. Could make this configurable depending on + // performance. + mtx_findShortestSeparator->Lock(); + + AbstractSliceJni::setHandle(m_env, m_jSliceLimit, &limit, JNI_FALSE); + jstring jsResultStart = + (jstring)m_env->CallObjectMethod(m_jComparator, + m_jFindShortestSeparatorMethodId, jsStart, m_jSliceLimit); + + mtx_findShortestSeparator->Unlock(); + + m_env->DeleteLocalRef(jsStart); + + if (jsResultStart != nullptr) { + // update start with result + *start = + JniUtil::copyString(m_env, jsResultStart); // also releases jsResultStart + } + + m_jvm->DetachCurrentThread(); +} + +void BaseComparatorJniCallback::FindShortSuccessor(std::string* key) const { + if (key == nullptr) { + return; + } + + JNIEnv* m_env = getJniEnv(); + + const char* keyUtf = key->c_str(); + jstring jsKey = m_env->NewStringUTF(keyUtf); + + jstring jsResultKey = + (jstring)m_env->CallObjectMethod(m_jComparator, + m_jFindShortSuccessorMethodId, jsKey); + + m_env->DeleteLocalRef(jsKey); + + if (jsResultKey != nullptr) { + // updates key with result, also releases jsResultKey. + *key = JniUtil::copyString(m_env, jsResultKey); + } + + m_jvm->DetachCurrentThread(); +} + +BaseComparatorJniCallback::~BaseComparatorJniCallback() { + JNIEnv* m_env = getJniEnv(); + + m_env->DeleteGlobalRef(m_jComparator); + + // Note: do not need to explicitly detach, as this function is effectively + // called from the Java class's disposeInternal method, and so already + // has an attached thread, getJniEnv above is just a no-op Attach to get + // the env jvm->DetachCurrentThread(); +} + +ComparatorJniCallback::ComparatorJniCallback( + JNIEnv* env, jobject jComparator, + const ComparatorJniCallbackOptions* copt) : + BaseComparatorJniCallback(env, jComparator, copt) { + m_jSliceA = env->NewGlobalRef(SliceJni::construct0(env)); + m_jSliceB = env->NewGlobalRef(SliceJni::construct0(env)); + m_jSliceLimit = env->NewGlobalRef(SliceJni::construct0(env)); +} + +ComparatorJniCallback::~ComparatorJniCallback() { + JNIEnv* m_env = getJniEnv(); + m_env->DeleteGlobalRef(m_jSliceA); + m_env->DeleteGlobalRef(m_jSliceB); + m_env->DeleteGlobalRef(m_jSliceLimit); +} + +DirectComparatorJniCallback::DirectComparatorJniCallback( + JNIEnv* env, jobject jComparator, + const ComparatorJniCallbackOptions* copt) : + BaseComparatorJniCallback(env, jComparator, copt) { + m_jSliceA = env->NewGlobalRef(DirectSliceJni::construct0(env)); + m_jSliceB = env->NewGlobalRef(DirectSliceJni::construct0(env)); + m_jSliceLimit = env->NewGlobalRef(DirectSliceJni::construct0(env)); +} + +DirectComparatorJniCallback::~DirectComparatorJniCallback() { + JNIEnv* m_env = getJniEnv(); + m_env->DeleteGlobalRef(m_jSliceA); + m_env->DeleteGlobalRef(m_jSliceB); + m_env->DeleteGlobalRef(m_jSliceLimit); +} +} // namespace rocksdb diff --git a/external/rocksdb/java/rocksjni/comparatorjnicallback.h b/external/rocksdb/java/rocksjni/comparatorjnicallback.h new file mode 100755 index 0000000000..821a91e451 --- /dev/null +++ b/external/rocksdb/java/rocksjni/comparatorjnicallback.h @@ -0,0 +1,95 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::Comparator and rocksdb::DirectComparator. + +#ifndef JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_ +#define JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_ + +#include +#include +#include "rocksdb/comparator.h" +#include "rocksdb/slice.h" +#include "port/port.h" + +namespace rocksdb { + +struct ComparatorJniCallbackOptions { + // Use adaptive mutex, which spins in the user space before resorting + // to kernel. This could reduce context switch when the mutex is not + // heavily contended. However, if the mutex is hot, we could end up + // wasting spin time. + // Default: false + bool use_adaptive_mutex; + + ComparatorJniCallbackOptions() : use_adaptive_mutex(false) { + } +}; + +/** + * This class acts as a bridge between C++ + * and Java. The methods in this class will be + * called back from the RocksDB storage engine (C++) + * we then callback to the appropriate Java method + * this enables Comparators to be implemented in Java. + * + * The design of this Comparator caches the Java Slice + * objects that are used in the compare and findShortestSeparator + * method callbacks. Instead of creating new objects for each callback + * of those functions, by reuse via setHandle we are a lot + * faster; Unfortunately this means that we have to + * introduce independent locking in regions of each of those methods + * via the mutexs mtx_compare and mtx_findShortestSeparator respectively + */ +class BaseComparatorJniCallback : public Comparator { + public: + BaseComparatorJniCallback( + JNIEnv* env, jobject jComparator, + const ComparatorJniCallbackOptions* copt); + virtual ~BaseComparatorJniCallback(); + virtual const char* Name() const; + virtual int Compare(const Slice& a, const Slice& b) const; + virtual void FindShortestSeparator( + std::string* start, const Slice& limit) const; + virtual void FindShortSuccessor(std::string* key) const; + + private: + // used for synchronisation in compare method + port::Mutex* mtx_compare; + // used for synchronisation in findShortestSeparator method + port::Mutex* mtx_findShortestSeparator; + JavaVM* m_jvm; + jobject m_jComparator; + std::string m_name; + jmethodID m_jCompareMethodId; + jmethodID m_jFindShortestSeparatorMethodId; + jmethodID m_jFindShortSuccessorMethodId; + + protected: + JNIEnv* getJniEnv() const; + jobject m_jSliceA; + jobject m_jSliceB; + jobject m_jSliceLimit; +}; + +class ComparatorJniCallback : public BaseComparatorJniCallback { + public: + ComparatorJniCallback( + JNIEnv* env, jobject jComparator, + const ComparatorJniCallbackOptions* copt); + ~ComparatorJniCallback(); +}; + +class DirectComparatorJniCallback : public BaseComparatorJniCallback { + public: + DirectComparatorJniCallback( + JNIEnv* env, jobject jComparator, + const ComparatorJniCallbackOptions* copt); + ~DirectComparatorJniCallback(); +}; +} // namespace rocksdb + +#endif // JAVA_ROCKSJNI_COMPARATORJNICALLBACK_H_ diff --git a/external/rocksdb/java/rocksjni/env.cc b/external/rocksdb/java/rocksjni/env.cc new file mode 100755 index 0000000000..a58f54ea7b --- /dev/null +++ b/external/rocksdb/java/rocksjni/env.cc @@ -0,0 +1,79 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::Env methods from Java side. + +#include "include/org_rocksdb_Env.h" +#include "include/org_rocksdb_RocksEnv.h" +#include "include/org_rocksdb_RocksMemEnv.h" +#include "rocksdb/env.h" + +/* + * Class: org_rocksdb_Env + * Method: getDefaultEnvInternal + * Signature: ()J + */ +jlong Java_org_rocksdb_Env_getDefaultEnvInternal( + JNIEnv* env, jclass jclazz) { + return reinterpret_cast(rocksdb::Env::Default()); +} + +/* + * Class: org_rocksdb_Env + * Method: setBackgroundThreads + * Signature: (JII)V + */ +void Java_org_rocksdb_Env_setBackgroundThreads( + JNIEnv* env, jobject jobj, jlong jhandle, + jint num, jint priority) { + auto* rocks_env = reinterpret_cast(jhandle); + switch (priority) { + case org_rocksdb_Env_FLUSH_POOL: + rocks_env->SetBackgroundThreads(num, rocksdb::Env::Priority::LOW); + break; + case org_rocksdb_Env_COMPACTION_POOL: + rocks_env->SetBackgroundThreads(num, rocksdb::Env::Priority::HIGH); + break; + } +} + +/* + * Class: org_rocksdb_sEnv + * Method: getThreadPoolQueueLen + * Signature: (JI)I + */ +jint Java_org_rocksdb_Env_getThreadPoolQueueLen( + JNIEnv* env, jobject jobj, jlong jhandle, jint pool_id) { + auto* rocks_env = reinterpret_cast(jhandle); + switch (pool_id) { + case org_rocksdb_RocksEnv_FLUSH_POOL: + return rocks_env->GetThreadPoolQueueLen(rocksdb::Env::Priority::LOW); + case org_rocksdb_RocksEnv_COMPACTION_POOL: + return rocks_env->GetThreadPoolQueueLen(rocksdb::Env::Priority::HIGH); + } + return 0; +} + +/* + * Class: org_rocksdb_RocksMemEnv + * Method: createMemEnv + * Signature: ()J + */ +jlong Java_org_rocksdb_RocksMemEnv_createMemEnv( + JNIEnv* env, jclass jclazz) { + return reinterpret_cast(rocksdb::NewMemEnv( + rocksdb::Env::Default())); +} + +/* + * Class: org_rocksdb_RocksMemEnv + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksMemEnv_disposeInternal( + JNIEnv* env, jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/external/rocksdb/java/rocksjni/filter.cc b/external/rocksdb/java/rocksjni/filter.cc new file mode 100755 index 0000000000..96ef9856bc --- /dev/null +++ b/external/rocksdb/java/rocksjni/filter.cc @@ -0,0 +1,46 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::FilterPolicy. + +#include +#include +#include +#include + +#include "include/org_rocksdb_Filter.h" +#include "include/org_rocksdb_BloomFilter.h" +#include "rocksjni/portal.h" +#include "rocksdb/filter_policy.h" + +/* + * Class: org_rocksdb_BloomFilter + * Method: createBloomFilter + * Signature: (IZ)J + */ +jlong Java_org_rocksdb_BloomFilter_createNewBloomFilter( + JNIEnv* env, jclass jcls, jint bits_per_key, + jboolean use_block_base_builder) { + auto* fp = const_cast( + rocksdb::NewBloomFilterPolicy(bits_per_key, use_block_base_builder)); + auto* pFilterPolicy = + new std::shared_ptr; + *pFilterPolicy = std::shared_ptr(fp); + return reinterpret_cast(pFilterPolicy); +} + +/* + * Class: org_rocksdb_Filter + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_Filter_disposeInternal( + JNIEnv* env, jobject jobj, jlong jhandle) { + + std::shared_ptr *handle = + reinterpret_cast *>(jhandle); + handle->reset(); +} diff --git a/external/rocksdb/java/rocksjni/iterator.cc b/external/rocksdb/java/rocksjni/iterator.cc new file mode 100755 index 0000000000..c5e64adfbe --- /dev/null +++ b/external/rocksdb/java/rocksjni/iterator.cc @@ -0,0 +1,144 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::Iterator methods from Java side. + +#include +#include +#include + +#include "include/org_rocksdb_RocksIterator.h" +#include "rocksjni/portal.h" +#include "rocksdb/iterator.h" + +/* + * Class: org_rocksdb_RocksIterator + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + auto it = reinterpret_cast(handle); + delete it; +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: isValid0 + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_RocksIterator_isValid0( + JNIEnv* env, jobject jobj, jlong handle) { + return reinterpret_cast(handle)->Valid(); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: seekToFirst0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_seekToFirst0( + JNIEnv* env, jobject jobj, jlong handle) { + reinterpret_cast(handle)->SeekToFirst(); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: seekToLast0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_seekToLast0( + JNIEnv* env, jobject jobj, jlong handle) { + reinterpret_cast(handle)->SeekToLast(); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: next0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_next0( + JNIEnv* env, jobject jobj, jlong handle) { + reinterpret_cast(handle)->Next(); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: prev0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_prev0( + JNIEnv* env, jobject jobj, jlong handle) { + reinterpret_cast(handle)->Prev(); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: seek0 + * Signature: (J[BI)V + */ +void Java_org_rocksdb_RocksIterator_seek0( + JNIEnv* env, jobject jobj, jlong handle, + jbyteArray jtarget, jint jtarget_len) { + auto it = reinterpret_cast(handle); + jbyte* target = env->GetByteArrayElements(jtarget, 0); + rocksdb::Slice target_slice( + reinterpret_cast(target), jtarget_len); + + it->Seek(target_slice); + + env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: status0 + * Signature: (J)V + */ +void Java_org_rocksdb_RocksIterator_status0( + JNIEnv* env, jobject jobj, jlong handle) { + auto it = reinterpret_cast(handle); + rocksdb::Status s = it->status(); + + if (s.ok()) { + return; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: key0 + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_RocksIterator_key0( + JNIEnv* env, jobject jobj, jlong handle) { + auto it = reinterpret_cast(handle); + rocksdb::Slice key_slice = it->key(); + + jbyteArray jkey = env->NewByteArray(static_cast(key_slice.size())); + env->SetByteArrayRegion(jkey, 0, static_cast(key_slice.size()), + reinterpret_cast(key_slice.data())); + return jkey; +} + +/* + * Class: org_rocksdb_RocksIterator + * Method: value0 + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_RocksIterator_value0( + JNIEnv* env, jobject jobj, jlong handle) { + auto it = reinterpret_cast(handle); + rocksdb::Slice value_slice = it->value(); + + jbyteArray jkeyValue = + env->NewByteArray(static_cast(value_slice.size())); + env->SetByteArrayRegion(jkeyValue, 0, static_cast(value_slice.size()), + reinterpret_cast(value_slice.data())); + return jkeyValue; +} diff --git a/external/rocksdb/java/rocksjni/loggerjnicallback.cc b/external/rocksdb/java/rocksjni/loggerjnicallback.cc new file mode 100755 index 0000000000..42e97015ee --- /dev/null +++ b/external/rocksdb/java/rocksjni/loggerjnicallback.cc @@ -0,0 +1,234 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::Logger. + +#include "include/org_rocksdb_Logger.h" + +#include "rocksjni/loggerjnicallback.h" +#include "rocksjni/portal.h" + +namespace rocksdb { + +LoggerJniCallback::LoggerJniCallback( + JNIEnv* env, jobject jlogger) { + const jint rs __attribute__((unused)) = env->GetJavaVM(&m_jvm); + assert(rs == JNI_OK); + + // Note: we want to access the Java Logger instance + // across multiple method calls, so we create a global ref + m_jLogger = env->NewGlobalRef(jlogger); + m_jLogMethodId = LoggerJni::getLogMethodId(env); + + jobject jdebug_level = InfoLogLevelJni::DEBUG_LEVEL(env); + assert(jdebug_level != nullptr); + m_jdebug_level = env->NewGlobalRef(jdebug_level); + + jobject jinfo_level = InfoLogLevelJni::INFO_LEVEL(env); + assert(jinfo_level != nullptr); + m_jinfo_level = env->NewGlobalRef(jinfo_level); + + jobject jwarn_level = InfoLogLevelJni::WARN_LEVEL(env); + assert(jwarn_level != nullptr); + m_jwarn_level = env->NewGlobalRef(jwarn_level); + + jobject jerror_level = InfoLogLevelJni::ERROR_LEVEL(env); + assert(jerror_level != nullptr); + m_jerror_level = env->NewGlobalRef(jerror_level); + + jobject jfatal_level = InfoLogLevelJni::FATAL_LEVEL(env); + assert(jfatal_level != nullptr); + m_jfatal_level = env->NewGlobalRef(jfatal_level); + + jobject jheader_level = InfoLogLevelJni::HEADER_LEVEL(env); + assert(jheader_level != nullptr); + m_jheader_level = env->NewGlobalRef(jheader_level); +} + +/** + * Get JNIEnv for current native thread + */ +JNIEnv* LoggerJniCallback::getJniEnv() const { + JNIEnv *env; + jint rs __attribute__((unused)) = + m_jvm->AttachCurrentThread(reinterpret_cast(&env), NULL); + assert(rs == JNI_OK); + return env; +} + +void LoggerJniCallback::Logv(const char* format, va_list ap) { + // We implement this method because it is virtual but we don't + // use it because we need to know about the log level. +} + +void LoggerJniCallback::Logv(const InfoLogLevel log_level, + const char* format, va_list ap) { + if (GetInfoLogLevel() <= log_level) { + + // determine InfoLogLevel java enum instance + jobject jlog_level; + switch (log_level) { + case rocksdb::InfoLogLevel::DEBUG_LEVEL: + jlog_level = m_jdebug_level; + break; + case rocksdb::InfoLogLevel::INFO_LEVEL: + jlog_level = m_jinfo_level; + break; + case rocksdb::InfoLogLevel::WARN_LEVEL: + jlog_level = m_jwarn_level; + break; + case rocksdb::InfoLogLevel::ERROR_LEVEL: + jlog_level = m_jerror_level; + break; + case rocksdb::InfoLogLevel::FATAL_LEVEL: + jlog_level = m_jfatal_level; + break; + case rocksdb::InfoLogLevel::HEADER_LEVEL: + jlog_level = m_jheader_level; + break; + default: + jlog_level = m_jfatal_level; + break; + } + + // We try twice: the first time with a fixed-size stack allocated buffer, + // and the second time with a much larger dynamically allocated buffer. + char buffer[500]; + for (int iter = 0; iter < 2; iter++) { + char* base; + int bufsize; + if (iter == 0) { + bufsize = sizeof(buffer); + base = buffer; + } else { + bufsize = 30000; + base = new char[bufsize]; + } + char* p = base; + char* limit = base + bufsize; + // Print the message + if (p < limit) { + va_list backup_ap; + va_copy(backup_ap, ap); + p += vsnprintf(p, limit - p, format, backup_ap); + va_end(backup_ap); + } + // Truncate to available space if necessary + if (p >= limit) { + if (iter == 0) { + continue; // Try again with larger buffer + } else { + p = limit - 1; + } + } + assert(p < limit); + *p++ = '\0'; + + JNIEnv* env = getJniEnv(); + + // pass java string to callback handler + env->CallVoidMethod( + m_jLogger, + m_jLogMethodId, + jlog_level, + env->NewStringUTF(base)); + + if (base != buffer) { + delete[] base; + } + break; + } + m_jvm->DetachCurrentThread(); + } +} + +LoggerJniCallback::~LoggerJniCallback() { + JNIEnv* env = getJniEnv(); + env->DeleteGlobalRef(m_jLogger); + + env->DeleteGlobalRef(m_jdebug_level); + env->DeleteGlobalRef(m_jinfo_level); + env->DeleteGlobalRef(m_jwarn_level); + env->DeleteGlobalRef(m_jerror_level); + env->DeleteGlobalRef(m_jfatal_level); + env->DeleteGlobalRef(m_jheader_level); + + m_jvm->DetachCurrentThread(); +} + +} // namespace rocksdb + +/* + * Class: org_rocksdb_Logger + * Method: createNewLoggerOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_Logger_createNewLoggerOptions( + JNIEnv* env, jobject jobj, jlong joptions) { + rocksdb::LoggerJniCallback* c = + new rocksdb::LoggerJniCallback(env, jobj); + // set log level + c->SetInfoLogLevel(reinterpret_cast + (joptions)->info_log_level); + std::shared_ptr *pLoggerJniCallback = + new std::shared_ptr; + *pLoggerJniCallback = std::shared_ptr(c); + return reinterpret_cast(pLoggerJniCallback); +} + +/* + * Class: org_rocksdb_Logger + * Method: createNewLoggerDbOptions + * Signature: (J)J + */ +jlong Java_org_rocksdb_Logger_createNewLoggerDbOptions( + JNIEnv* env, jobject jobj, jlong jdb_options) { + rocksdb::LoggerJniCallback* c = + new rocksdb::LoggerJniCallback(env, jobj); + // set log level + c->SetInfoLogLevel(reinterpret_cast + (jdb_options)->info_log_level); + std::shared_ptr *pLoggerJniCallback = + new std::shared_ptr; + *pLoggerJniCallback = std::shared_ptr(c); + return reinterpret_cast(pLoggerJniCallback); +} + +/* + * Class: org_rocksdb_Logger + * Method: setInfoLogLevel + * Signature: (JB)V + */ +void Java_org_rocksdb_Logger_setInfoLogLevel( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jlog_level) { + std::shared_ptr *handle = + reinterpret_cast *>(jhandle); + (*handle)->SetInfoLogLevel(static_cast(jlog_level)); +} + +/* + * Class: org_rocksdb_Logger + * Method: infoLogLevel + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Logger_infoLogLevel( + JNIEnv* env, jobject jobj, jlong jhandle) { + std::shared_ptr *handle = + reinterpret_cast *>(jhandle); + return static_cast((*handle)->GetInfoLogLevel()); +} + +/* + * Class: org_rocksdb_Logger + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_Logger_disposeInternal( + JNIEnv* env, jobject jobj, jlong jhandle) { + std::shared_ptr *handle = + reinterpret_cast *>(jhandle); + handle->reset(); +} diff --git a/external/rocksdb/java/rocksjni/loggerjnicallback.h b/external/rocksdb/java/rocksjni/loggerjnicallback.h new file mode 100755 index 0000000000..9a7605d24f --- /dev/null +++ b/external/rocksdb/java/rocksjni/loggerjnicallback.h @@ -0,0 +1,50 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::Logger + +#ifndef JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_ +#define JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_ + +#include +#include +#include "port/port.h" +#include "rocksdb/env.h" + +namespace rocksdb { + + class LoggerJniCallback : public Logger { + public: + LoggerJniCallback(JNIEnv* env, jobject jLogger); + virtual ~LoggerJniCallback(); + + using Logger::SetInfoLogLevel; + using Logger::GetInfoLogLevel; + // Write an entry to the log file with the specified format. + virtual void Logv(const char* format, va_list ap); + // Write an entry to the log file with the specified log level + // and format. Any log with level under the internal log level + // of *this (see @SetInfoLogLevel and @GetInfoLogLevel) will not be + // printed. + virtual void Logv(const InfoLogLevel log_level, + const char* format, va_list ap); + + protected: + JNIEnv* getJniEnv() const; + private: + JavaVM* m_jvm; + jobject m_jLogger; + jmethodID m_jLogMethodId; + jobject m_jdebug_level; + jobject m_jinfo_level; + jobject m_jwarn_level; + jobject m_jerror_level; + jobject m_jfatal_level; + jobject m_jheader_level; + }; +} // namespace rocksdb + +#endif // JAVA_ROCKSJNI_LOGGERJNICALLBACK_H_ diff --git a/external/rocksdb/java/rocksjni/memtablejni.cc b/external/rocksdb/java/rocksjni/memtablejni.cc new file mode 100755 index 0000000000..ead038d50b --- /dev/null +++ b/external/rocksdb/java/rocksjni/memtablejni.cc @@ -0,0 +1,90 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for MemTables. + +#include "rocksjni/portal.h" +#include "include/org_rocksdb_HashSkipListMemTableConfig.h" +#include "include/org_rocksdb_HashLinkedListMemTableConfig.h" +#include "include/org_rocksdb_VectorMemTableConfig.h" +#include "include/org_rocksdb_SkipListMemTableConfig.h" +#include "rocksdb/memtablerep.h" + +/* + * Class: org_rocksdb_HashSkipListMemTableConfig + * Method: newMemTableFactoryHandle + * Signature: (JII)J + */ +jlong Java_org_rocksdb_HashSkipListMemTableConfig_newMemTableFactoryHandle( + JNIEnv* env, jobject jobj, jlong jbucket_count, + jint jheight, jint jbranching_factor) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jbucket_count); + if (s.ok()) { + return reinterpret_cast(rocksdb::NewHashSkipListRepFactory( + static_cast(jbucket_count), + static_cast(jheight), + static_cast(jbranching_factor))); + } + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + return 0; +} + +/* + * Class: org_rocksdb_HashLinkedListMemTableConfig + * Method: newMemTableFactoryHandle + * Signature: (JJIZI)J + */ +jlong Java_org_rocksdb_HashLinkedListMemTableConfig_newMemTableFactoryHandle( + JNIEnv* env, jobject jobj, jlong jbucket_count, jlong jhuge_page_tlb_size, + jint jbucket_entries_logging_threshold, + jboolean jif_log_bucket_dist_when_flash, jint jthreshold_use_skiplist) { + rocksdb::Status statusBucketCount = + rocksdb::check_if_jlong_fits_size_t(jbucket_count); + rocksdb::Status statusHugePageTlb = + rocksdb::check_if_jlong_fits_size_t(jhuge_page_tlb_size); + if (statusBucketCount.ok() && statusHugePageTlb.ok()) { + return reinterpret_cast(rocksdb::NewHashLinkListRepFactory( + static_cast(jbucket_count), + static_cast(jhuge_page_tlb_size), + static_cast(jbucket_entries_logging_threshold), + static_cast(jif_log_bucket_dist_when_flash), + static_cast(jthreshold_use_skiplist))); + } + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, + !statusBucketCount.ok()?statusBucketCount:statusHugePageTlb); + return 0; +} + +/* + * Class: org_rocksdb_VectorMemTableConfig + * Method: newMemTableFactoryHandle + * Signature: (J)J + */ +jlong Java_org_rocksdb_VectorMemTableConfig_newMemTableFactoryHandle( + JNIEnv* env, jobject jobj, jlong jreserved_size) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jreserved_size); + if (s.ok()) { + return reinterpret_cast(new rocksdb::VectorRepFactory( + static_cast(jreserved_size))); + } + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + return 0; +} + +/* + * Class: org_rocksdb_SkipListMemTableConfig + * Method: newMemTableFactoryHandle0 + * Signature: (J)J + */ +jlong Java_org_rocksdb_SkipListMemTableConfig_newMemTableFactoryHandle0( + JNIEnv* env, jobject jobj, jlong jlookahead) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jlookahead); + if (s.ok()) { + return reinterpret_cast(new rocksdb::SkipListFactory( + static_cast(jlookahead))); + } + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + return 0; +} diff --git a/external/rocksdb/java/rocksjni/merge_operator.cc b/external/rocksdb/java/rocksjni/merge_operator.cc new file mode 100755 index 0000000000..68fe9b6351 --- /dev/null +++ b/external/rocksdb/java/rocksjni/merge_operator.cc @@ -0,0 +1,37 @@ +// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ +// for rocksdb::MergeOperator. + +#include +#include +#include +#include +#include + +#include "include/org_rocksdb_StringAppendOperator.h" +#include "rocksjni/portal.h" +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/statistics.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/table.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/merge_operator.h" +#include "utilities/merge_operators.h" + +/* + * Class: org_rocksdb_StringAppendOperator + * Method: newMergeOperatorHandle + * Signature: ()J + */ +jlong Java_org_rocksdb_StringAppendOperator_newMergeOperatorHandleImpl +(JNIEnv* env, jobject jobj) { + std::shared_ptr *op = + new std::shared_ptr(); + *op = rocksdb::MergeOperators::CreateFromStringId("stringappend"); + return reinterpret_cast(op); +} diff --git a/external/rocksdb/java/rocksjni/options.cc b/external/rocksdb/java/rocksjni/options.cc new file mode 100755 index 0000000000..755c56d22c --- /dev/null +++ b/external/rocksdb/java/rocksjni/options.cc @@ -0,0 +1,4147 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for rocksdb::Options. + +#include +#include +#include +#include + +#include "include/org_rocksdb_Options.h" +#include "include/org_rocksdb_DBOptions.h" +#include "include/org_rocksdb_ColumnFamilyOptions.h" +#include "include/org_rocksdb_WriteOptions.h" +#include "include/org_rocksdb_ReadOptions.h" +#include "include/org_rocksdb_ComparatorOptions.h" +#include "include/org_rocksdb_FlushOptions.h" + +#include "rocksjni/comparatorjnicallback.h" +#include "rocksjni/portal.h" + +#include "rocksdb/db.h" +#include "rocksdb/options.h" +#include "rocksdb/statistics.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/table.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/rate_limiter.h" +#include "rocksdb/comparator.h" +#include "rocksdb/convenience.h" +#include "rocksdb/merge_operator.h" +#include "utilities/merge_operators.h" + +/* + * Class: org_rocksdb_Options + * Method: newOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_Options_newOptions__(JNIEnv* env, jclass jcls) { + rocksdb::Options* op = new rocksdb::Options(); + return reinterpret_cast(op); +} + +/* + * Class: org_rocksdb_Options + * Method: newOptions + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_Options_newOptions__JJ(JNIEnv* env, jclass jcls, + jlong jdboptions, jlong jcfoptions) { + auto* dbOpt = reinterpret_cast(jdboptions); + auto* cfOpt = reinterpret_cast( + jcfoptions); + rocksdb::Options* op = new rocksdb::Options(*dbOpt, *cfOpt); + return reinterpret_cast(op); +} + +/* + * Class: org_rocksdb_Options + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_Options_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + delete reinterpret_cast(handle); +} + +/* + * Class: org_rocksdb_Options + * Method: setIncreaseParallelism + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setIncreaseParallelism( + JNIEnv * evnv, jobject jobj, jlong jhandle, jint totalThreads) { + reinterpret_cast + (jhandle)->IncreaseParallelism(static_cast(totalThreads)); +} + +/* + * Class: org_rocksdb_Options + * Method: setCreateIfMissing + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setCreateIfMissing( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { + reinterpret_cast(jhandle)->create_if_missing = flag; +} + +/* + * Class: org_rocksdb_Options + * Method: createIfMissing + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_createIfMissing( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->create_if_missing; +} + +/* + * Class: org_rocksdb_Options + * Method: setCreateMissingColumnFamilies + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setCreateMissingColumnFamilies( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { + reinterpret_cast + (jhandle)->create_missing_column_families = flag; +} + +/* + * Class: org_rocksdb_Options + * Method: createMissingColumnFamilies + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_createMissingColumnFamilies( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast + (jhandle)->create_missing_column_families; +} + +/* + * Class: org_rocksdb_Options + * Method: setComparatorHandle + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setComparatorHandle__JI( + JNIEnv* env, jobject jobj, jlong jhandle, jint builtinComparator) { + switch (builtinComparator) { + case 1: + reinterpret_cast(jhandle)->comparator = + rocksdb::ReverseBytewiseComparator(); + break; + default: + reinterpret_cast(jhandle)->comparator = + rocksdb::BytewiseComparator(); + break; + } +} + +/* + * Class: org_rocksdb_Options + * Method: setComparatorHandle + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setComparatorHandle__JJ( + JNIEnv* env, jobject jobj, jlong jopt_handle, jlong jcomparator_handle) { + reinterpret_cast(jopt_handle)->comparator = + reinterpret_cast(jcomparator_handle); +} + +/* + * Class: org_rocksdb_Options + * Method: setMergeOperatorName + * Signature: (JJjava/lang/String)V + */ +void Java_org_rocksdb_Options_setMergeOperatorName( + JNIEnv* env, jobject jobj, jlong jhandle, jstring jop_name) { + auto options = reinterpret_cast(jhandle); + const char* op_name = env->GetStringUTFChars(jop_name, 0); + options->merge_operator = rocksdb::MergeOperators::CreateFromStringId( + op_name); + env->ReleaseStringUTFChars(jop_name, op_name); +} + +/* + * Class: org_rocksdb_Options + * Method: setMergeOperator + * Signature: (JJjava/lang/String)V + */ +void Java_org_rocksdb_Options_setMergeOperator( + JNIEnv* env, jobject jobj, jlong jhandle, jlong mergeOperatorHandle) { + reinterpret_cast(jhandle)->merge_operator = + *(reinterpret_cast*> + (mergeOperatorHandle)); +} + +/* + * Class: org_rocksdb_Options + * Method: setWriteBufferSize + * Signature: (JJ)I + */ +void Java_org_rocksdb_Options_setWriteBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_buffer_size) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jwrite_buffer_size); + if (s.ok()) { + reinterpret_cast(jhandle)->write_buffer_size = + jwrite_buffer_size; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: writeBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_writeBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->write_buffer_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxWriteBufferNumber + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxWriteBufferNumber( + JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_write_buffer_number) { + reinterpret_cast(jhandle)->max_write_buffer_number = + jmax_write_buffer_number; +} + +/* + * Class: org_rocksdb_Options + * Method: createStatistics + * Signature: (J)V + */ +void Java_org_rocksdb_Options_createStatistics( + JNIEnv* env, jobject jobj, jlong jOptHandle) { + reinterpret_cast(jOptHandle)->statistics = + rocksdb::CreateDBStatistics(); +} + +/* + * Class: org_rocksdb_Options + * Method: statisticsPtr + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_statisticsPtr( + JNIEnv* env, jobject jobj, jlong jOptHandle) { + auto st = reinterpret_cast(jOptHandle)->statistics.get(); + return reinterpret_cast(st); +} + +/* + * Class: org_rocksdb_Options + * Method: maxWriteBufferNumber + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxWriteBufferNumber( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_write_buffer_number; +} + +/* + * Class: org_rocksdb_Options + * Method: errorIfExists + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_errorIfExists( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->error_if_exists; +} + +/* + * Class: org_rocksdb_Options + * Method: setErrorIfExists + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setErrorIfExists( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean error_if_exists) { + reinterpret_cast(jhandle)->error_if_exists = + static_cast(error_if_exists); +} + +/* + * Class: org_rocksdb_Options + * Method: paranoidChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_paranoidChecks( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->paranoid_checks; +} + +/* + * Class: org_rocksdb_Options + * Method: setParanoidChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setParanoidChecks( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean paranoid_checks) { + reinterpret_cast(jhandle)->paranoid_checks = + static_cast(paranoid_checks); +} + +/* + * Class: org_rocksdb_Options + * Method: setEnv + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setEnv( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jenv) { + reinterpret_cast(jhandle)->env = + reinterpret_cast(jenv); +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxTotalWalSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxTotalWalSize( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_total_wal_size) { + reinterpret_cast(jhandle)->max_total_wal_size = + static_cast(jmax_total_wal_size); +} + +/* + * Class: org_rocksdb_Options + * Method: maxTotalWalSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxTotalWalSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + max_total_wal_size; +} + +/* + * Class: org_rocksdb_Options + * Method: maxOpenFiles + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxOpenFiles( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_open_files; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxOpenFiles + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxOpenFiles( + JNIEnv* env, jobject jobj, jlong jhandle, jint max_open_files) { + reinterpret_cast(jhandle)->max_open_files = + static_cast(max_open_files); +} + +/* + * Class: org_rocksdb_Options + * Method: disableDataSync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_disableDataSync( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->disableDataSync; +} + +/* + * Class: org_rocksdb_Options + * Method: setDisableDataSync + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setDisableDataSync( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean disableDataSync) { + reinterpret_cast(jhandle)->disableDataSync = + static_cast(disableDataSync); +} + +/* + * Class: org_rocksdb_Options + * Method: useFsync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_useFsync( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->use_fsync; +} + +/* + * Class: org_rocksdb_Options + * Method: setUseFsync + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setUseFsync( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_fsync) { + reinterpret_cast(jhandle)->use_fsync = + static_cast(use_fsync); +} + +/* + * Class: org_rocksdb_Options + * Method: dbLogDir + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_dbLogDir( + JNIEnv* env, jobject jobj, jlong jhandle) { + return env->NewStringUTF( + reinterpret_cast(jhandle)->db_log_dir.c_str()); +} + +/* + * Class: org_rocksdb_Options + * Method: setDbLogDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_Options_setDbLogDir( + JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) { + const char* log_dir = env->GetStringUTFChars(jdb_log_dir, 0); + reinterpret_cast(jhandle)->db_log_dir.assign(log_dir); + env->ReleaseStringUTFChars(jdb_log_dir, log_dir); +} + +/* + * Class: org_rocksdb_Options + * Method: walDir + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_walDir( + JNIEnv* env, jobject jobj, jlong jhandle) { + return env->NewStringUTF( + reinterpret_cast(jhandle)->wal_dir.c_str()); +} + +/* + * Class: org_rocksdb_Options + * Method: setWalDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_Options_setWalDir( + JNIEnv* env, jobject jobj, jlong jhandle, jstring jwal_dir) { + const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0); + reinterpret_cast(jhandle)->wal_dir.assign(wal_dir); + env->ReleaseStringUTFChars(jwal_dir, wal_dir); +} + +/* + * Class: org_rocksdb_Options + * Method: deleteObsoleteFilesPeriodMicros + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_deleteObsoleteFilesPeriodMicros( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->delete_obsolete_files_period_micros; +} + +/* + * Class: org_rocksdb_Options + * Method: setDeleteObsoleteFilesPeriodMicros + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setDeleteObsoleteFilesPeriodMicros( + JNIEnv* env, jobject jobj, jlong jhandle, jlong micros) { + reinterpret_cast(jhandle) + ->delete_obsolete_files_period_micros = + static_cast(micros); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBackgroundCompactions + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxBackgroundCompactions( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_background_compactions; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBackgroundCompactions + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxBackgroundCompactions( + JNIEnv* env, jobject jobj, jlong jhandle, jint max) { + reinterpret_cast(jhandle) + ->max_background_compactions = static_cast(max); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBackgroundFlushes + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxBackgroundFlushes( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_background_flushes; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBackgroundFlushes + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxBackgroundFlushes( + JNIEnv* env, jobject jobj, jlong jhandle, jint max_background_flushes) { + reinterpret_cast(jhandle)->max_background_flushes = + static_cast(max_background_flushes); +} + +/* + * Class: org_rocksdb_Options + * Method: maxLogFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxLogFileSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_log_file_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxLogFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxLogFileSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong max_log_file_size) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(max_log_file_size); + if (s.ok()) { + reinterpret_cast(jhandle)->max_log_file_size = + max_log_file_size; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: logFileTimeToRoll + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_logFileTimeToRoll( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->log_file_time_to_roll; +} + +/* + * Class: org_rocksdb_Options + * Method: setLogFileTimeToRoll + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setLogFileTimeToRoll( + JNIEnv* env, jobject jobj, jlong jhandle, jlong log_file_time_to_roll) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t( + log_file_time_to_roll); + if (s.ok()) { + reinterpret_cast(jhandle)->log_file_time_to_roll = + log_file_time_to_roll; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: keepLogFileNum + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_keepLogFileNum( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->keep_log_file_num; +} + +/* + * Class: org_rocksdb_Options + * Method: setKeepLogFileNum + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setKeepLogFileNum( + JNIEnv* env, jobject jobj, jlong jhandle, jlong keep_log_file_num) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(keep_log_file_num); + if (s.ok()) { + reinterpret_cast(jhandle)->keep_log_file_num = + keep_log_file_num; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: recycleLogFiles + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_recycleLogFileNum(JNIEnv* env, jobject jobj, + jlong jhandle) { + return reinterpret_cast(jhandle)->recycle_log_file_num; +} + +/* + * Class: org_rocksdb_Options + * Method: setRecycleLogFiles + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setRecycleLogFiles(JNIEnv* env, jobject jobj, + jlong jhandle, + jlong recycle_log_file_num) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(recycle_log_file_num); + if (s.ok()) { + reinterpret_cast(jhandle)->recycle_log_file_num = + recycle_log_file_num; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: maxManifestFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxManifestFileSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_manifest_file_size; +} + +/* + * Method: memTableFactoryName + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_memTableFactoryName( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto opt = reinterpret_cast(jhandle); + rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get(); + + // Should never be nullptr. + // Default memtable factory is SkipListFactory + assert(tf); + + // temporarly fix for the historical typo + if (strcmp(tf->Name(), "HashLinkListRepFactory") == 0) { + return env->NewStringUTF("HashLinkedListRepFactory"); + } + + return env->NewStringUTF(tf->Name()); +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxManifestFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxManifestFileSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong max_manifest_file_size) { + reinterpret_cast(jhandle)->max_manifest_file_size = + static_cast(max_manifest_file_size); +} + +/* + * Method: setMemTableFactory + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMemTableFactory( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jfactory_handle) { + reinterpret_cast(jhandle)->memtable_factory.reset( + reinterpret_cast(jfactory_handle)); +} + +/* + * Class: org_rocksdb_Options + * Method: setRateLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setRateLimiter( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) { + reinterpret_cast(jhandle)->rate_limiter.reset( + reinterpret_cast(jrate_limiter_handle)); +} + +/* + * Class: org_rocksdb_Options + * Method: setLogger + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setLogger( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jlogger_handle) { +std::shared_ptr *pLogger = + reinterpret_cast *>( + jlogger_handle); + reinterpret_cast(jhandle)->info_log = *pLogger; +} + +/* + * Class: org_rocksdb_Options + * Method: setInfoLogLevel + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setInfoLogLevel( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jlog_level) { + reinterpret_cast(jhandle)->info_log_level = + static_cast(jlog_level); +} + +/* + * Class: org_rocksdb_Options + * Method: infoLogLevel + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_infoLogLevel( + JNIEnv* env, jobject jobj, jlong jhandle) { + return static_cast( + reinterpret_cast(jhandle)->info_log_level); +} + +/* + * Class: org_rocksdb_Options + * Method: tableCacheNumshardbits + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_tableCacheNumshardbits( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->table_cache_numshardbits; +} + +/* + * Class: org_rocksdb_Options + * Method: setTableCacheNumshardbits + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setTableCacheNumshardbits( + JNIEnv* env, jobject jobj, jlong jhandle, jint table_cache_numshardbits) { + reinterpret_cast(jhandle)->table_cache_numshardbits = + static_cast(table_cache_numshardbits); +} + +/* + * Method: useFixedLengthPrefixExtractor + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_useFixedLengthPrefixExtractor( + JNIEnv* env, jobject jobj, jlong jhandle, jint jprefix_length) { + reinterpret_cast(jhandle)->prefix_extractor.reset( + rocksdb::NewFixedPrefixTransform( + static_cast(jprefix_length))); +} + +/* + * Method: useCappedPrefixExtractor + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_useCappedPrefixExtractor( + JNIEnv* env, jobject jobj, jlong jhandle, jint jprefix_length) { + reinterpret_cast(jhandle)->prefix_extractor.reset( + rocksdb::NewCappedPrefixTransform( + static_cast(jprefix_length))); +} + +/* + * Class: org_rocksdb_Options + * Method: walTtlSeconds + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_walTtlSeconds( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->WAL_ttl_seconds; +} + +/* + * Class: org_rocksdb_Options + * Method: setWalTtlSeconds + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWalTtlSeconds( + JNIEnv* env, jobject jobj, jlong jhandle, jlong WAL_ttl_seconds) { + reinterpret_cast(jhandle)->WAL_ttl_seconds = + static_cast(WAL_ttl_seconds); +} + +/* + * Class: org_rocksdb_Options + * Method: walTtlSeconds + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_walSizeLimitMB( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->WAL_size_limit_MB; +} + +/* + * Class: org_rocksdb_Options + * Method: setWalSizeLimitMB + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setWalSizeLimitMB( + JNIEnv* env, jobject jobj, jlong jhandle, jlong WAL_size_limit_MB) { + reinterpret_cast(jhandle)->WAL_size_limit_MB = + static_cast(WAL_size_limit_MB); +} + +/* + * Class: org_rocksdb_Options + * Method: manifestPreallocationSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_manifestPreallocationSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->manifest_preallocation_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setManifestPreallocationSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setManifestPreallocationSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong preallocation_size) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(preallocation_size); + if (s.ok()) { + reinterpret_cast(jhandle)->manifest_preallocation_size = + preallocation_size; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: allowOsBuffer + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowOsBuffer( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->allow_os_buffer; +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowOsBuffer + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowOsBuffer( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_os_buffer) { + reinterpret_cast(jhandle)->allow_os_buffer = + static_cast(allow_os_buffer); +} + +/* + * Method: setTableFactory + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setTableFactory( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jfactory_handle) { + reinterpret_cast(jhandle)->table_factory.reset( + reinterpret_cast(jfactory_handle)); +} + +/* + * Class: org_rocksdb_Options + * Method: allowMmapReads + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowMmapReads( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->allow_mmap_reads; +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowMmapReads + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowMmapReads( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_reads) { + reinterpret_cast(jhandle)->allow_mmap_reads = + static_cast(allow_mmap_reads); +} + +/* + * Class: org_rocksdb_Options + * Method: allowMmapWrites + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_allowMmapWrites( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->allow_mmap_writes; +} + +/* + * Class: org_rocksdb_Options + * Method: setAllowMmapWrites + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAllowMmapWrites( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_writes) { + reinterpret_cast(jhandle)->allow_mmap_writes = + static_cast(allow_mmap_writes); +} + +/* + * Class: org_rocksdb_Options + * Method: isFdCloseOnExec + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_isFdCloseOnExec( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->is_fd_close_on_exec; +} + +/* + * Class: org_rocksdb_Options + * Method: setIsFdCloseOnExec + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setIsFdCloseOnExec( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean is_fd_close_on_exec) { + reinterpret_cast(jhandle)->is_fd_close_on_exec = + static_cast(is_fd_close_on_exec); +} + +/* + * Class: org_rocksdb_Options + * Method: statsDumpPeriodSec + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_statsDumpPeriodSec( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->stats_dump_period_sec; +} + +/* + * Class: org_rocksdb_Options + * Method: setStatsDumpPeriodSec + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setStatsDumpPeriodSec( + JNIEnv* env, jobject jobj, jlong jhandle, jint stats_dump_period_sec) { + reinterpret_cast(jhandle)->stats_dump_period_sec = + static_cast(stats_dump_period_sec); +} + +/* + * Class: org_rocksdb_Options + * Method: adviseRandomOnOpen + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_adviseRandomOnOpen( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->advise_random_on_open; +} + +/* + * Class: org_rocksdb_Options + * Method: setAdviseRandomOnOpen + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setAdviseRandomOnOpen( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean advise_random_on_open) { + reinterpret_cast(jhandle)->advise_random_on_open = + static_cast(advise_random_on_open); +} + +/* + * Class: org_rocksdb_Options + * Method: useAdaptiveMutex + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_useAdaptiveMutex( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->use_adaptive_mutex; +} + +/* + * Class: org_rocksdb_Options + * Method: setUseAdaptiveMutex + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setUseAdaptiveMutex( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_adaptive_mutex) { + reinterpret_cast(jhandle)->use_adaptive_mutex = + static_cast(use_adaptive_mutex); +} + +/* + * Class: org_rocksdb_Options + * Method: bytesPerSync + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_bytesPerSync( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->bytes_per_sync; +} + +/* + * Class: org_rocksdb_Options + * Method: setBytesPerSync + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setBytesPerSync( + JNIEnv* env, jobject jobj, jlong jhandle, jlong bytes_per_sync) { + reinterpret_cast(jhandle)->bytes_per_sync = + static_cast(bytes_per_sync); +} + +/* + * Method: tableFactoryName + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_Options_tableFactoryName( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto opt = reinterpret_cast(jhandle); + rocksdb::TableFactory* tf = opt->table_factory.get(); + + // Should never be nullptr. + // Default memtable factory is SkipListFactory + assert(tf); + + return env->NewStringUTF(tf->Name()); +} + + +/* + * Class: org_rocksdb_Options + * Method: minWriteBufferNumberToMerge + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_minWriteBufferNumberToMerge( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->min_write_buffer_number_to_merge; +} + +/* + * Class: org_rocksdb_Options + * Method: setMinWriteBufferNumberToMerge + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMinWriteBufferNumberToMerge( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmin_write_buffer_number_to_merge) { + reinterpret_cast( + jhandle)->min_write_buffer_number_to_merge = + static_cast(jmin_write_buffer_number_to_merge); +} +/* + * Class: org_rocksdb_Options + * Method: maxWriteBufferNumberToMaintain + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxWriteBufferNumberToMaintain(JNIEnv* env, + jobject jobj, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_write_buffer_number_to_maintain; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxWriteBufferNumberToMaintain + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxWriteBufferNumberToMaintain( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmax_write_buffer_number_to_maintain) { + reinterpret_cast(jhandle) + ->max_write_buffer_number_to_maintain = + static_cast(jmax_write_buffer_number_to_maintain); +} + +/* + * Class: org_rocksdb_Options + * Method: setCompressionType + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setCompressionType( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte compression) { + reinterpret_cast(jhandle)->compression = + static_cast(compression); +} + +/* + * Class: org_rocksdb_Options + * Method: compressionType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_compressionType( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->compression; +} + +/* + * Helper method to convert a Java list to a CompressionType + * vector. + */ +std::vector rocksdb_compression_vector_helper( + JNIEnv* env, jbyteArray jcompressionLevels) { + std::vector compressionLevels; + + jsize len = env->GetArrayLength(jcompressionLevels); + jbyte* jcompressionLevel = env->GetByteArrayElements(jcompressionLevels, + NULL); + for(int i = 0; i < len; i++) { + jbyte jcl; + jcl = jcompressionLevel[i]; + compressionLevels.push_back(static_cast(jcl)); + } + env->ReleaseByteArrayElements(jcompressionLevels, jcompressionLevel, + JNI_ABORT); + + return compressionLevels; +} + +/* + * Helper method to convert a CompressionType vector to a Java + * List. + */ +jbyteArray rocksdb_compression_list_helper(JNIEnv* env, + std::vector compressionLevels) { + std::unique_ptr jbuf = + std::unique_ptr(new jbyte[compressionLevels.size()]); + for (std::vector::size_type i = 0; + i != compressionLevels.size(); i++) { + jbuf[i] = compressionLevels[i]; + } + // insert in java array + jbyteArray jcompressionLevels = env->NewByteArray( + static_cast(compressionLevels.size())); + env->SetByteArrayRegion(jcompressionLevels, 0, + static_cast(compressionLevels.size()), jbuf.get()); + return jcompressionLevels; +} + +/* + * Class: org_rocksdb_Options + * Method: setCompressionPerLevel + * Signature: (J[B)V + */ +void Java_org_rocksdb_Options_setCompressionPerLevel( + JNIEnv* env, jobject jobj, jlong jhandle, + jbyteArray jcompressionLevels) { + auto* options = reinterpret_cast(jhandle); + std::vector compressionLevels = + rocksdb_compression_vector_helper(env, jcompressionLevels); + options->compression_per_level = compressionLevels; +} + +/* + * Class: org_rocksdb_Options + * Method: compressionPerLevel + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_Options_compressionPerLevel( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* options = reinterpret_cast(jhandle); + return rocksdb_compression_list_helper(env, + options->compression_per_level); +} + + +/* + * Class: org_rocksdb_Options + * Method: setCompactionStyle + * Signature: (JB)V + */ +void Java_org_rocksdb_Options_setCompactionStyle( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte compaction_style) { + reinterpret_cast(jhandle)->compaction_style = + static_cast(compaction_style); +} + +/* + * Class: org_rocksdb_Options + * Method: compactionStyle + * Signature: (J)B + */ +jbyte Java_org_rocksdb_Options_compactionStyle( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->compaction_style; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxTableFilesSizeFIFO + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxTableFilesSizeFIFO( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_table_files_size) { + reinterpret_cast(jhandle)->compaction_options_fifo.max_table_files_size = + static_cast(jmax_table_files_size); +} + +/* + * Class: org_rocksdb_Options + * Method: maxTableFilesSizeFIFO + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxTableFilesSizeFIFO( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->compaction_options_fifo.max_table_files_size; +} + +/* + * Class: org_rocksdb_Options + * Method: numLevels + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_numLevels( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->num_levels; +} + +/* + * Class: org_rocksdb_Options + * Method: setNumLevels + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setNumLevels( + JNIEnv* env, jobject jobj, jlong jhandle, jint jnum_levels) { + reinterpret_cast(jhandle)->num_levels = + static_cast(jnum_levels); +} + +/* + * Class: org_rocksdb_Options + * Method: levelZeroFileNumCompactionTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_levelZeroFileNumCompactionTrigger( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level0_file_num_compaction_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelZeroFileNumCompactionTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevelZeroFileNumCompactionTrigger( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jlevel0_file_num_compaction_trigger) { + reinterpret_cast( + jhandle)->level0_file_num_compaction_trigger = + static_cast(jlevel0_file_num_compaction_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: levelZeroSlowdownWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_levelZeroSlowdownWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level0_slowdown_writes_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelSlowdownWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevelZeroSlowdownWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jlevel0_slowdown_writes_trigger) { + reinterpret_cast( + jhandle)->level0_slowdown_writes_trigger = + static_cast(jlevel0_slowdown_writes_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: levelZeroStopWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_levelZeroStopWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level0_stop_writes_trigger; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelStopWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setLevelZeroStopWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jlevel0_stop_writes_trigger) { + reinterpret_cast(jhandle)->level0_stop_writes_trigger = + static_cast(jlevel0_stop_writes_trigger); +} + +/* + * Class: org_rocksdb_Options + * Method: targetFileSizeBase + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_targetFileSizeBase( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->target_file_size_base; +} + +/* + * Class: org_rocksdb_Options + * Method: setTargetFileSizeBase + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setTargetFileSizeBase( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jtarget_file_size_base) { + reinterpret_cast(jhandle)->target_file_size_base = + static_cast(jtarget_file_size_base); +} + +/* + * Class: org_rocksdb_Options + * Method: targetFileSizeMultiplier + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_targetFileSizeMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->target_file_size_multiplier; +} + +/* + * Class: org_rocksdb_Options + * Method: setTargetFileSizeMultiplier + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setTargetFileSizeMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jtarget_file_size_multiplier) { + reinterpret_cast( + jhandle)->target_file_size_multiplier = + static_cast(jtarget_file_size_multiplier); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBytesForLevelBase + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxBytesForLevelBase( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_bytes_for_level_base; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBytesForLevelBase + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxBytesForLevelBase( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_bytes_for_level_base) { + reinterpret_cast( + jhandle)->max_bytes_for_level_base = + static_cast(jmax_bytes_for_level_base); +} + +/* + * Class: org_rocksdb_Options + * Method: levelCompactionDynamicLevelBytes + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_levelCompactionDynamicLevelBytes( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level_compaction_dynamic_level_bytes; +} + +/* + * Class: org_rocksdb_Options + * Method: setLevelCompactionDynamicLevelBytes + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setLevelCompactionDynamicLevelBytes( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jenable_dynamic_level_bytes) { + reinterpret_cast( + jhandle)->level_compaction_dynamic_level_bytes = + (jenable_dynamic_level_bytes); +} + +/* + * Class: org_rocksdb_Options + * Method: maxBytesForLevelMultiplier + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxBytesForLevelMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_bytes_for_level_multiplier; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxBytesForLevelMultiplier + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxBytesForLevelMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmax_bytes_for_level_multiplier) { + reinterpret_cast( + jhandle)->max_bytes_for_level_multiplier = + static_cast(jmax_bytes_for_level_multiplier); +} + +/* + * Class: org_rocksdb_Options + * Method: expandedCompactionFactor + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_expandedCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->expanded_compaction_factor; +} + +/* + * Class: org_rocksdb_Options + * Method: setExpandedCompactionFactor + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setExpandedCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jexpanded_compaction_factor) { + reinterpret_cast( + jhandle)->expanded_compaction_factor = + static_cast(jexpanded_compaction_factor); +} + +/* + * Class: org_rocksdb_Options + * Method: sourceCompactionFactor + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_sourceCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->source_compaction_factor; +} + +/* + * Class: org_rocksdb_Options + * Method: setSourceCompactionFactor + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setSourceCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jsource_compaction_factor) { + reinterpret_cast( + jhandle)->source_compaction_factor = + static_cast(jsource_compaction_factor); +} + +/* + * Class: org_rocksdb_Options + * Method: maxGrandparentOverlapFactor + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_maxGrandparentOverlapFactor( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_grandparent_overlap_factor; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxGrandparentOverlapFactor + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMaxGrandparentOverlapFactor( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmax_grandparent_overlap_factor) { + reinterpret_cast( + jhandle)->max_grandparent_overlap_factor = + static_cast(jmax_grandparent_overlap_factor); +} + +/* + * Class: org_rocksdb_Options + * Method: softRateLimit + * Signature: (J)D + */ +jdouble Java_org_rocksdb_Options_softRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->soft_rate_limit; +} + +/* + * Class: org_rocksdb_Options + * Method: setSoftRateLimit + * Signature: (JD)V + */ +void Java_org_rocksdb_Options_setSoftRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle, jdouble jsoft_rate_limit) { + reinterpret_cast(jhandle)->soft_rate_limit = + static_cast(jsoft_rate_limit); +} + +/* + * Class: org_rocksdb_Options + * Method: hardRateLimit + * Signature: (J)D + */ +jdouble Java_org_rocksdb_Options_hardRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->hard_rate_limit; +} + +/* + * Class: org_rocksdb_Options + * Method: setHardRateLimit + * Signature: (JD)V + */ +void Java_org_rocksdb_Options_setHardRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle, jdouble jhard_rate_limit) { + reinterpret_cast(jhandle)->hard_rate_limit = + static_cast(jhard_rate_limit); +} + +/* + * Class: org_rocksdb_Options + * Method: rateLimitDelayMaxMilliseconds + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_rateLimitDelayMaxMilliseconds( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->rate_limit_delay_max_milliseconds; +} + +/* + * Class: org_rocksdb_Options + * Method: setRateLimitDelayMaxMilliseconds + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setRateLimitDelayMaxMilliseconds( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jrate_limit_delay_max_milliseconds) { + reinterpret_cast( + jhandle)->rate_limit_delay_max_milliseconds = + static_cast(jrate_limit_delay_max_milliseconds); +} + +/* + * Class: org_rocksdb_Options + * Method: arenaBlockSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_arenaBlockSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->arena_block_size; +} + +/* + * Class: org_rocksdb_Options + * Method: setArenaBlockSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setArenaBlockSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jarena_block_size) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jarena_block_size); + if (s.ok()) { + reinterpret_cast(jhandle)->arena_block_size = + jarena_block_size; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: disableAutoCompactions + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_disableAutoCompactions( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->disable_auto_compactions; +} + +/* + * Class: org_rocksdb_Options + * Method: setDisableAutoCompactions + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setDisableAutoCompactions( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jdisable_auto_compactions) { + reinterpret_cast( + jhandle)->disable_auto_compactions = + static_cast(jdisable_auto_compactions); +} + +/* + * Class: org_rocksdb_Options + * Method: purgeRedundantKvsWhileFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_purgeRedundantKvsWhileFlush( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->purge_redundant_kvs_while_flush; +} + +/* + * Class: org_rocksdb_Options + * Method: setPurgeRedundantKvsWhileFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setPurgeRedundantKvsWhileFlush( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jpurge_redundant_kvs_while_flush) { + reinterpret_cast( + jhandle)->purge_redundant_kvs_while_flush = + static_cast(jpurge_redundant_kvs_while_flush); +} + +/* + * Class: org_rocksdb_Options + * Method: verifyChecksumsInCompaction + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_verifyChecksumsInCompaction( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->verify_checksums_in_compaction; +} + +/* + * Class: org_rocksdb_Options + * Method: setVerifyChecksumsInCompaction + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setVerifyChecksumsInCompaction( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jverify_checksums_in_compaction) { + reinterpret_cast( + jhandle)->verify_checksums_in_compaction = + static_cast(jverify_checksums_in_compaction); +} + +/* + * Class: org_rocksdb_Options + * Method: maxSequentialSkipInIterations + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxSequentialSkipInIterations( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_sequential_skip_in_iterations; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxSequentialSkipInIterations + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxSequentialSkipInIterations( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_sequential_skip_in_iterations) { + reinterpret_cast( + jhandle)->max_sequential_skip_in_iterations = + static_cast(jmax_sequential_skip_in_iterations); +} + +/* + * Class: org_rocksdb_Options + * Method: inplaceUpdateSupport + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_inplaceUpdateSupport( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->inplace_update_support; +} + +/* + * Class: org_rocksdb_Options + * Method: setInplaceUpdateSupport + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setInplaceUpdateSupport( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jinplace_update_support) { + reinterpret_cast( + jhandle)->inplace_update_support = + static_cast(jinplace_update_support); +} + +/* + * Class: org_rocksdb_Options + * Method: inplaceUpdateNumLocks + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_inplaceUpdateNumLocks( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->inplace_update_num_locks; +} + +/* + * Class: org_rocksdb_Options + * Method: setInplaceUpdateNumLocks + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setInplaceUpdateNumLocks( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jinplace_update_num_locks) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t( + jinplace_update_num_locks); + if (s.ok()) { + reinterpret_cast(jhandle)->inplace_update_num_locks = + jinplace_update_num_locks; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: memtablePrefixBloomSizeRatio + * Signature: (J)I + */ +jdouble Java_org_rocksdb_Options_memtablePrefixBloomSizeRatio(JNIEnv* env, + jobject jobj, + jlong jhandle) { + return reinterpret_cast(jhandle) + ->memtable_prefix_bloom_size_ratio; +} + +/* + * Class: org_rocksdb_Options + * Method: setMemtablePrefixBloomSizeRatio + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMemtablePrefixBloomSizeRatio( + JNIEnv* env, jobject jobj, jlong jhandle, + jdouble jmemtable_prefix_bloom_size_ratio) { + reinterpret_cast(jhandle) + ->memtable_prefix_bloom_size_ratio = + static_cast(jmemtable_prefix_bloom_size_ratio); +} + +/* + * Class: org_rocksdb_Options + * Method: bloomLocality + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_bloomLocality( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->bloom_locality; +} + +/* + * Class: org_rocksdb_Options + * Method: setBloomLocality + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setBloomLocality( + JNIEnv* env, jobject jobj, jlong jhandle, jint jbloom_locality) { + reinterpret_cast(jhandle)->bloom_locality = + static_cast(jbloom_locality); +} + +/* + * Class: org_rocksdb_Options + * Method: maxSuccessiveMerges + * Signature: (J)J + */ +jlong Java_org_rocksdb_Options_maxSuccessiveMerges( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_successive_merges; +} + +/* + * Class: org_rocksdb_Options + * Method: setMaxSuccessiveMerges + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_setMaxSuccessiveMerges( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_successive_merges) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t( + jmax_successive_merges); + if (s.ok()) { + reinterpret_cast(jhandle)->max_successive_merges = + jmax_successive_merges; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_Options + * Method: minPartialMergeOperands + * Signature: (J)I + */ +jint Java_org_rocksdb_Options_minPartialMergeOperands( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->min_partial_merge_operands; +} + +/* + * Class: org_rocksdb_Options + * Method: setMinPartialMergeOperands + * Signature: (JI)V + */ +void Java_org_rocksdb_Options_setMinPartialMergeOperands( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmin_partial_merge_operands) { + reinterpret_cast( + jhandle)->min_partial_merge_operands = + static_cast(jmin_partial_merge_operands); +} + +/* + * Class: org_rocksdb_Options + * Method: optimizeFiltersForHits + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_Options_optimizeFiltersForHits( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->optimize_filters_for_hits; +} + +/* + * Class: org_rocksdb_Options + * Method: setOptimizeFiltersForHits + * Signature: (JZ)V + */ +void Java_org_rocksdb_Options_setOptimizeFiltersForHits( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean joptimize_filters_for_hits) { + reinterpret_cast( + jhandle)->optimize_filters_for_hits = + static_cast(joptimize_filters_for_hits); +} + +/* + * Method: optimizeForPointLookup + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_optimizeForPointLookup( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong block_cache_size_mb) { + reinterpret_cast(jhandle)-> + OptimizeForPointLookup(block_cache_size_mb); +} + +/* + * Method: optimizeLevelStyleCompaction + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_optimizeLevelStyleCompaction( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong memtable_memory_budget) { + reinterpret_cast(jhandle)-> + OptimizeLevelStyleCompaction(memtable_memory_budget); +} + +/* + * Class: org_rocksdb_Options + * Method: optimizeUniversalStyleCompaction + * Signature: (JJ)V + */ +void Java_org_rocksdb_Options_optimizeUniversalStyleCompaction( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong memtable_memory_budget) { + reinterpret_cast(jhandle)-> + OptimizeUniversalStyleCompaction(memtable_memory_budget); +} + +/* + * Class: org_rocksdb_Options + * Method: prepareForBulkLoad + * Signature: (J)V + */ +void Java_org_rocksdb_Options_prepareForBulkLoad( + JNIEnv* env, jobject jobj, jlong jhandle) { + reinterpret_cast(jhandle)-> + PrepareForBulkLoad(); +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::ColumnFamilyOptions + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: newColumnFamilyOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_newColumnFamilyOptions( + JNIEnv* env, jclass jcls) { + rocksdb::ColumnFamilyOptions* op = new rocksdb::ColumnFamilyOptions(); + return reinterpret_cast(op); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: getColumnFamilyOptionsFromProps + * Signature: (Ljava/util/String;)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_getColumnFamilyOptionsFromProps( + JNIEnv* env, jclass jclazz, jstring jopt_string) { + jlong ret_value = 0; + rocksdb::ColumnFamilyOptions* cf_options = + new rocksdb::ColumnFamilyOptions(); + const char* opt_string = env->GetStringUTFChars(jopt_string, 0); + rocksdb::Status status = rocksdb::GetColumnFamilyOptionsFromString( + rocksdb::ColumnFamilyOptions(), opt_string, cf_options); + env->ReleaseStringUTFChars(jopt_string, opt_string); + // Check if ColumnFamilyOptions creation was possible. + if (status.ok()) { + ret_value = reinterpret_cast(cf_options); + } else { + // if operation failed the ColumnFamilyOptions need to be deleted + // again to prevent a memory leak. + delete cf_options; + } + return ret_value; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + delete reinterpret_cast(handle); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeForPointLookup + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_optimizeForPointLookup( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong block_cache_size_mb) { + reinterpret_cast(jhandle)-> + OptimizeForPointLookup(block_cache_size_mb); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeLevelStyleCompaction + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_optimizeLevelStyleCompaction( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong memtable_memory_budget) { + reinterpret_cast(jhandle)-> + OptimizeLevelStyleCompaction(memtable_memory_budget); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeUniversalStyleCompaction + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_optimizeUniversalStyleCompaction( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong memtable_memory_budget) { + reinterpret_cast(jhandle)-> + OptimizeUniversalStyleCompaction(memtable_memory_budget); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setComparatorHandle + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JI( + JNIEnv* env, jobject jobj, jlong jhandle, jint builtinComparator) { + switch (builtinComparator) { + case 1: + reinterpret_cast(jhandle)->comparator = + rocksdb::ReverseBytewiseComparator(); + break; + default: + reinterpret_cast(jhandle)->comparator = + rocksdb::BytewiseComparator(); + break; + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setComparatorHandle + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setComparatorHandle__JJ( + JNIEnv* env, jobject jobj, jlong jopt_handle, jlong jcomparator_handle) { + reinterpret_cast(jopt_handle)->comparator = + reinterpret_cast(jcomparator_handle); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMergeOperatorName + * Signature: (JJjava/lang/String)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperatorName( + JNIEnv* env, jobject jobj, jlong jhandle, jstring jop_name) { + auto options = reinterpret_cast(jhandle); + const char* op_name = env->GetStringUTFChars(jop_name, 0); + options->merge_operator = rocksdb::MergeOperators::CreateFromStringId( + op_name); + env->ReleaseStringUTFChars(jop_name, op_name); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMergeOperator + * Signature: (JJjava/lang/String)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMergeOperator( + JNIEnv* env, jobject jobj, jlong jhandle, jlong mergeOperatorHandle) { + reinterpret_cast(jhandle)->merge_operator = + *(reinterpret_cast*> + (mergeOperatorHandle)); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionFilterHandle + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionFilterHandle( + JNIEnv* env, jobject jobj, jlong jopt_handle, + jlong jcompactionfilter_handle) { + reinterpret_cast(jopt_handle)-> + compaction_filter = reinterpret_cast + (jcompactionfilter_handle); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setWriteBufferSize + * Signature: (JJ)I + */ +void Java_org_rocksdb_ColumnFamilyOptions_setWriteBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jwrite_buffer_size) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jwrite_buffer_size); + if (s.ok()) { + reinterpret_cast(jhandle)-> + write_buffer_size = jwrite_buffer_size; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: writeBufferSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_writeBufferSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + write_buffer_size; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxWriteBufferNumber + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumber( + JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_write_buffer_number) { + reinterpret_cast(jhandle)-> + max_write_buffer_number = jmax_write_buffer_number; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxWriteBufferNumber + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumber( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + max_write_buffer_number; +} + +/* + * Method: setMemTableFactory + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMemTableFactory( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jfactory_handle) { + reinterpret_cast(jhandle)-> + memtable_factory.reset( + reinterpret_cast(jfactory_handle)); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: memTableFactoryName + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_ColumnFamilyOptions_memTableFactoryName( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto opt = reinterpret_cast(jhandle); + rocksdb::MemTableRepFactory* tf = opt->memtable_factory.get(); + + // Should never be nullptr. + // Default memtable factory is SkipListFactory + assert(tf); + + // temporarly fix for the historical typo + if (strcmp(tf->Name(), "HashLinkListRepFactory") == 0) { + return env->NewStringUTF("HashLinkedListRepFactory"); + } + + return env->NewStringUTF(tf->Name()); +} + +/* + * Method: useFixedLengthPrefixExtractor + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_useFixedLengthPrefixExtractor( + JNIEnv* env, jobject jobj, jlong jhandle, jint jprefix_length) { + reinterpret_cast(jhandle)-> + prefix_extractor.reset(rocksdb::NewFixedPrefixTransform( + static_cast(jprefix_length))); +} + +/* + * Method: useCappedPrefixExtractor + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_useCappedPrefixExtractor( + JNIEnv* env, jobject jobj, jlong jhandle, jint jprefix_length) { + reinterpret_cast(jhandle)-> + prefix_extractor.reset(rocksdb::NewCappedPrefixTransform( + static_cast(jprefix_length))); +} + +/* + * Method: setTableFactory + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setTableFactory( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jfactory_handle) { + reinterpret_cast(jhandle)-> + table_factory.reset(reinterpret_cast( + jfactory_handle)); +} + +/* + * Method: tableFactoryName + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_ColumnFamilyOptions_tableFactoryName( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto opt = reinterpret_cast(jhandle); + rocksdb::TableFactory* tf = opt->table_factory.get(); + + // Should never be nullptr. + // Default memtable factory is SkipListFactory + assert(tf); + + return env->NewStringUTF(tf->Name()); +} + + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: minWriteBufferNumberToMerge + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_minWriteBufferNumberToMerge( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->min_write_buffer_number_to_merge; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMinWriteBufferNumberToMerge + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMinWriteBufferNumberToMerge( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmin_write_buffer_number_to_merge) { + reinterpret_cast( + jhandle)->min_write_buffer_number_to_merge = + static_cast(jmin_write_buffer_number_to_merge); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxWriteBufferNumberToMaintain + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_maxWriteBufferNumberToMaintain( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->max_write_buffer_number_to_maintain; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxWriteBufferNumberToMaintain + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxWriteBufferNumberToMaintain( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmax_write_buffer_number_to_maintain) { + reinterpret_cast(jhandle) + ->max_write_buffer_number_to_maintain = + static_cast(jmax_write_buffer_number_to_maintain); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompressionType + * Signature: (JB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompressionType( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte compression) { + reinterpret_cast(jhandle)-> + compression = static_cast(compression); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: compressionType + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ColumnFamilyOptions_compressionType( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + compression; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompressionPerLevel + * Signature: (J[B)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompressionPerLevel( + JNIEnv* env, jobject jobj, jlong jhandle, + jbyteArray jcompressionLevels) { + auto* options = reinterpret_cast(jhandle); + std::vector compressionLevels = + rocksdb_compression_vector_helper(env, jcompressionLevels); + options->compression_per_level = compressionLevels; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: compressionPerLevel + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_ColumnFamilyOptions_compressionPerLevel( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto* options = reinterpret_cast(jhandle); + return rocksdb_compression_list_helper(env, + options->compression_per_level); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setCompactionStyle + * Signature: (JB)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setCompactionStyle( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte compaction_style) { + reinterpret_cast(jhandle)->compaction_style = + static_cast(compaction_style); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: compactionStyle + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ColumnFamilyOptions_compactionStyle( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast + (jhandle)->compaction_style; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxTableFilesSizeFIFO + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxTableFilesSizeFIFO( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jmax_table_files_size) { + reinterpret_cast(jhandle)->compaction_options_fifo.max_table_files_size = + static_cast(jmax_table_files_size); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxTableFilesSizeFIFO + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_maxTableFilesSizeFIFO( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->compaction_options_fifo.max_table_files_size; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: numLevels + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_numLevels( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->num_levels; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setNumLevels + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setNumLevels( + JNIEnv* env, jobject jobj, jlong jhandle, jint jnum_levels) { + reinterpret_cast(jhandle)->num_levels = + static_cast(jnum_levels); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: levelZeroFileNumCompactionTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroFileNumCompactionTrigger( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level0_file_num_compaction_trigger; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevelZeroFileNumCompactionTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroFileNumCompactionTrigger( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jlevel0_file_num_compaction_trigger) { + reinterpret_cast( + jhandle)->level0_file_num_compaction_trigger = + static_cast(jlevel0_file_num_compaction_trigger); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: levelZeroSlowdownWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroSlowdownWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level0_slowdown_writes_trigger; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevelSlowdownWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroSlowdownWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jlevel0_slowdown_writes_trigger) { + reinterpret_cast( + jhandle)->level0_slowdown_writes_trigger = + static_cast(jlevel0_slowdown_writes_trigger); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: levelZeroStopWritesTrigger + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_levelZeroStopWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level0_stop_writes_trigger; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevelStopWritesTrigger + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevelZeroStopWritesTrigger( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jlevel0_stop_writes_trigger) { + reinterpret_cast(jhandle)-> + level0_stop_writes_trigger = static_cast( + jlevel0_stop_writes_trigger); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxMemCompactionLevel + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_maxMemCompactionLevel( + JNIEnv* env, jobject jobj, jlong jhandle) { + return 0; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxMemCompactionLevel + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxMemCompactionLevel( + JNIEnv* env, jobject jobj, jlong jhandle, jint jmax_mem_compaction_level) {} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: targetFileSizeBase + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeBase( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + target_file_size_base; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setTargetFileSizeBase + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeBase( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jtarget_file_size_base) { + reinterpret_cast(jhandle)-> + target_file_size_base = static_cast(jtarget_file_size_base); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: targetFileSizeMultiplier + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_targetFileSizeMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->target_file_size_multiplier; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setTargetFileSizeMultiplier + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setTargetFileSizeMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jtarget_file_size_multiplier) { + reinterpret_cast( + jhandle)->target_file_size_multiplier = + static_cast(jtarget_file_size_multiplier); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxBytesForLevelBase + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelBase( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_bytes_for_level_base; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxBytesForLevelBase + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelBase( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_bytes_for_level_base) { + reinterpret_cast( + jhandle)->max_bytes_for_level_base = + static_cast(jmax_bytes_for_level_base); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: levelCompactionDynamicLevelBytes + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_levelCompactionDynamicLevelBytes( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->level_compaction_dynamic_level_bytes; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setLevelCompactionDynamicLevelBytes + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setLevelCompactionDynamicLevelBytes( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jenable_dynamic_level_bytes) { + reinterpret_cast( + jhandle)->level_compaction_dynamic_level_bytes = + (jenable_dynamic_level_bytes); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxBytesForLevelMultiplier + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_maxBytesForLevelMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_bytes_for_level_multiplier; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxBytesForLevelMultiplier + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxBytesForLevelMultiplier( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmax_bytes_for_level_multiplier) { + reinterpret_cast( + jhandle)->max_bytes_for_level_multiplier = + static_cast(jmax_bytes_for_level_multiplier); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: expandedCompactionFactor + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_expandedCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->expanded_compaction_factor; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setExpandedCompactionFactor + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setExpandedCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jexpanded_compaction_factor) { + reinterpret_cast( + jhandle)->expanded_compaction_factor = + static_cast(jexpanded_compaction_factor); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: sourceCompactionFactor + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_sourceCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->source_compaction_factor; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setSourceCompactionFactor + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setSourceCompactionFactor( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jsource_compaction_factor) { + reinterpret_cast( + jhandle)->source_compaction_factor = + static_cast(jsource_compaction_factor); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxGrandparentOverlapFactor + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_maxGrandparentOverlapFactor( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_grandparent_overlap_factor; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxGrandparentOverlapFactor + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxGrandparentOverlapFactor( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmax_grandparent_overlap_factor) { + reinterpret_cast( + jhandle)->max_grandparent_overlap_factor = + static_cast(jmax_grandparent_overlap_factor); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: softRateLimit + * Signature: (J)D + */ +jdouble Java_org_rocksdb_ColumnFamilyOptions_softRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + soft_rate_limit; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setSoftRateLimit + * Signature: (JD)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setSoftRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle, jdouble jsoft_rate_limit) { + reinterpret_cast(jhandle)->soft_rate_limit = + static_cast(jsoft_rate_limit); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: hardRateLimit + * Signature: (J)D + */ +jdouble Java_org_rocksdb_ColumnFamilyOptions_hardRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + hard_rate_limit; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setHardRateLimit + * Signature: (JD)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setHardRateLimit( + JNIEnv* env, jobject jobj, jlong jhandle, jdouble jhard_rate_limit) { + reinterpret_cast(jhandle)->hard_rate_limit = + static_cast(jhard_rate_limit); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: rateLimitDelayMaxMilliseconds + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_rateLimitDelayMaxMilliseconds( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->rate_limit_delay_max_milliseconds; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setRateLimitDelayMaxMilliseconds + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setRateLimitDelayMaxMilliseconds( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jrate_limit_delay_max_milliseconds) { + reinterpret_cast( + jhandle)->rate_limit_delay_max_milliseconds = + static_cast(jrate_limit_delay_max_milliseconds); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: arenaBlockSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_arenaBlockSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + arena_block_size; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setArenaBlockSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setArenaBlockSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jarena_block_size) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(jarena_block_size); + if (s.ok()) { + reinterpret_cast(jhandle)-> + arena_block_size = jarena_block_size; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: disableAutoCompactions + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_disableAutoCompactions( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->disable_auto_compactions; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setDisableAutoCompactions + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setDisableAutoCompactions( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jdisable_auto_compactions) { + reinterpret_cast( + jhandle)->disable_auto_compactions = + static_cast(jdisable_auto_compactions); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: purgeRedundantKvsWhileFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_purgeRedundantKvsWhileFlush( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->purge_redundant_kvs_while_flush; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setPurgeRedundantKvsWhileFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setPurgeRedundantKvsWhileFlush( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jpurge_redundant_kvs_while_flush) { + reinterpret_cast( + jhandle)->purge_redundant_kvs_while_flush = + static_cast(jpurge_redundant_kvs_while_flush); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: verifyChecksumsInCompaction + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_verifyChecksumsInCompaction( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->verify_checksums_in_compaction; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setVerifyChecksumsInCompaction + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setVerifyChecksumsInCompaction( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jverify_checksums_in_compaction) { + reinterpret_cast( + jhandle)->verify_checksums_in_compaction = + static_cast(jverify_checksums_in_compaction); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxSequentialSkipInIterations + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_maxSequentialSkipInIterations( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_sequential_skip_in_iterations; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxSequentialSkipInIterations + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxSequentialSkipInIterations( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_sequential_skip_in_iterations) { + reinterpret_cast( + jhandle)->max_sequential_skip_in_iterations = + static_cast(jmax_sequential_skip_in_iterations); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: inplaceUpdateSupport + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateSupport( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->inplace_update_support; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setInplaceUpdateSupport + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateSupport( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jinplace_update_support) { + reinterpret_cast( + jhandle)->inplace_update_support = + static_cast(jinplace_update_support); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: inplaceUpdateNumLocks + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_inplaceUpdateNumLocks( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->inplace_update_num_locks; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setInplaceUpdateNumLocks + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setInplaceUpdateNumLocks( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jinplace_update_num_locks) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t( + jinplace_update_num_locks); + if (s.ok()) { + reinterpret_cast(jhandle)-> + inplace_update_num_locks = jinplace_update_num_locks; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: memtablePrefixBloomSizeRatio + * Signature: (J)I + */ +jdouble Java_org_rocksdb_ColumnFamilyOptions_memtablePrefixBloomSizeRatio( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->memtable_prefix_bloom_size_ratio; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMemtablePrefixBloomSizeRatio + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMemtablePrefixBloomSizeRatio( + JNIEnv* env, jobject jobj, jlong jhandle, + jdouble jmemtable_prefix_bloom_size_ratio) { + reinterpret_cast(jhandle) + ->memtable_prefix_bloom_size_ratio = + static_cast(jmemtable_prefix_bloom_size_ratio); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: bloomLocality + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_bloomLocality( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + bloom_locality; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setBloomLocality + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setBloomLocality( + JNIEnv* env, jobject jobj, jlong jhandle, jint jbloom_locality) { + reinterpret_cast(jhandle)->bloom_locality = + static_cast(jbloom_locality); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: maxSuccessiveMerges + * Signature: (J)J + */ +jlong Java_org_rocksdb_ColumnFamilyOptions_maxSuccessiveMerges( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + max_successive_merges; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMaxSuccessiveMerges + * Signature: (JJ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMaxSuccessiveMerges( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_successive_merges) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t( + jmax_successive_merges); + if (s.ok()) { + reinterpret_cast(jhandle)-> + max_successive_merges = jmax_successive_merges; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: minPartialMergeOperands + * Signature: (J)I + */ +jint Java_org_rocksdb_ColumnFamilyOptions_minPartialMergeOperands( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->min_partial_merge_operands; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setMinPartialMergeOperands + * Signature: (JI)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setMinPartialMergeOperands( + JNIEnv* env, jobject jobj, jlong jhandle, + jint jmin_partial_merge_operands) { + reinterpret_cast( + jhandle)->min_partial_merge_operands = + static_cast(jmin_partial_merge_operands); +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: optimizeFiltersForHits + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ColumnFamilyOptions_optimizeFiltersForHits( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->optimize_filters_for_hits; +} + +/* + * Class: org_rocksdb_ColumnFamilyOptions + * Method: setOptimizeFiltersForHits + * Signature: (JZ)V + */ +void Java_org_rocksdb_ColumnFamilyOptions_setOptimizeFiltersForHits( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean joptimize_filters_for_hits) { + reinterpret_cast( + jhandle)->optimize_filters_for_hits = + static_cast(joptimize_filters_for_hits); +} + +///////////////////////////////////////////////////////////////////// +// rocksdb::DBOptions + +/* + * Class: org_rocksdb_DBOptions + * Method: newDBOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_DBOptions_newDBOptions(JNIEnv* env, + jclass jcls) { + rocksdb::DBOptions* dbop = new rocksdb::DBOptions(); + return reinterpret_cast(dbop); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: getDBOptionsFromProps + * Signature: (Ljava/util/String;)J + */ +jlong Java_org_rocksdb_DBOptions_getDBOptionsFromProps( + JNIEnv* env, jclass jclazz, jstring jopt_string) { + jlong ret_value = 0; + rocksdb::DBOptions* db_options = + new rocksdb::DBOptions(); + const char* opt_string = env->GetStringUTFChars(jopt_string, 0); + rocksdb::Status status = rocksdb::GetDBOptionsFromString( + rocksdb::DBOptions(), opt_string, db_options); + env->ReleaseStringUTFChars(jopt_string, opt_string); + // Check if DBOptions creation was possible. + if (status.ok()) { + ret_value = reinterpret_cast(db_options); + } else { + // if operation failed the DBOptions need to be deleted + // again to prevent a memory leak. + delete db_options; + } + return ret_value; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_DBOptions_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + delete reinterpret_cast(handle); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setIncreaseParallelism + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setIncreaseParallelism( + JNIEnv * env, jobject jobj, jlong jhandle, jint totalThreads) { + reinterpret_cast + (jhandle)->IncreaseParallelism(static_cast(totalThreads)); +} + + +/* + * Class: org_rocksdb_DBOptions + * Method: setCreateIfMissing + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setCreateIfMissing( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { + reinterpret_cast(jhandle)-> + create_if_missing = flag; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: createIfMissing + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_createIfMissing( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->create_if_missing; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setCreateMissingColumnFamilies + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setCreateMissingColumnFamilies( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean flag) { + reinterpret_cast + (jhandle)->create_missing_column_families = flag; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: createMissingColumnFamilies + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_createMissingColumnFamilies( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast + (jhandle)->create_missing_column_families; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setErrorIfExists + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setErrorIfExists( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean error_if_exists) { + reinterpret_cast(jhandle)->error_if_exists = + static_cast(error_if_exists); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: errorIfExists + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_errorIfExists( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->error_if_exists; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setParanoidChecks + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setParanoidChecks( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean paranoid_checks) { + reinterpret_cast(jhandle)->paranoid_checks = + static_cast(paranoid_checks); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: paranoidChecks + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_paranoidChecks( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->paranoid_checks; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setRateLimiter + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setRateLimiter( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jrate_limiter_handle) { + reinterpret_cast(jhandle)->rate_limiter.reset( + reinterpret_cast(jrate_limiter_handle)); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setLogger + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setLogger( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jlogger_handle) { + std::shared_ptr *pLogger = + reinterpret_cast *>( + jlogger_handle); + reinterpret_cast(jhandle)->info_log = *pLogger; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setInfoLogLevel + * Signature: (JB)V + */ +void Java_org_rocksdb_DBOptions_setInfoLogLevel( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jlog_level) { + reinterpret_cast(jhandle)->info_log_level = + static_cast(jlog_level); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: infoLogLevel + * Signature: (J)B + */ +jbyte Java_org_rocksdb_DBOptions_infoLogLevel( + JNIEnv* env, jobject jobj, jlong jhandle) { + return static_cast( + reinterpret_cast(jhandle)->info_log_level); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxTotalWalSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setMaxTotalWalSize( + JNIEnv* env, jobject jobj, jlong jhandle, + jlong jmax_total_wal_size) { + reinterpret_cast(jhandle)->max_total_wal_size = + static_cast(jmax_total_wal_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxTotalWalSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_maxTotalWalSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + max_total_wal_size; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxOpenFiles + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxOpenFiles( + JNIEnv* env, jobject jobj, jlong jhandle, jint max_open_files) { + reinterpret_cast(jhandle)->max_open_files = + static_cast(max_open_files); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxOpenFiles + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxOpenFiles( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_open_files; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: createStatistics + * Signature: (J)V + */ +void Java_org_rocksdb_DBOptions_createStatistics( + JNIEnv* env, jobject jobj, jlong jOptHandle) { + reinterpret_cast(jOptHandle)->statistics = + rocksdb::CreateDBStatistics(); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: statisticsPtr + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_statisticsPtr( + JNIEnv* env, jobject jobj, jlong jOptHandle) { + auto st = reinterpret_cast(jOptHandle)-> + statistics.get(); + return reinterpret_cast(st); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDisableDataSync + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setDisableDataSync( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean disableDataSync) { + reinterpret_cast(jhandle)->disableDataSync = + static_cast(disableDataSync); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: disableDataSync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_disableDataSync( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->disableDataSync; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setUseFsync + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setUseFsync( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_fsync) { + reinterpret_cast(jhandle)->use_fsync = + static_cast(use_fsync); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: useFsync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_useFsync( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->use_fsync; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDbLogDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_DBOptions_setDbLogDir( + JNIEnv* env, jobject jobj, jlong jhandle, jstring jdb_log_dir) { + const char* log_dir = env->GetStringUTFChars(jdb_log_dir, 0); + reinterpret_cast(jhandle)->db_log_dir.assign(log_dir); + env->ReleaseStringUTFChars(jdb_log_dir, log_dir); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: dbLogDir + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_DBOptions_dbLogDir( + JNIEnv* env, jobject jobj, jlong jhandle) { + return env->NewStringUTF( + reinterpret_cast(jhandle)->db_log_dir.c_str()); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWalDir + * Signature: (JLjava/lang/String)V + */ +void Java_org_rocksdb_DBOptions_setWalDir( + JNIEnv* env, jobject jobj, jlong jhandle, jstring jwal_dir) { + const char* wal_dir = env->GetStringUTFChars(jwal_dir, 0); + reinterpret_cast(jhandle)->wal_dir.assign(wal_dir); + env->ReleaseStringUTFChars(jwal_dir, wal_dir); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: walDir + * Signature: (J)Ljava/lang/String + */ +jstring Java_org_rocksdb_DBOptions_walDir( + JNIEnv* env, jobject jobj, jlong jhandle) { + return env->NewStringUTF( + reinterpret_cast(jhandle)->wal_dir.c_str()); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setDeleteObsoleteFilesPeriodMicros + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setDeleteObsoleteFilesPeriodMicros( + JNIEnv* env, jobject jobj, jlong jhandle, jlong micros) { + reinterpret_cast(jhandle) + ->delete_obsolete_files_period_micros = + static_cast(micros); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: deleteObsoleteFilesPeriodMicros + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_deleteObsoleteFilesPeriodMicros( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->delete_obsolete_files_period_micros; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxBackgroundCompactions + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxBackgroundCompactions( + JNIEnv* env, jobject jobj, jlong jhandle, jint max) { + reinterpret_cast(jhandle) + ->max_background_compactions = static_cast(max); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxBackgroundCompactions + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxBackgroundCompactions( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->max_background_compactions; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxBackgroundFlushes + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setMaxBackgroundFlushes( + JNIEnv* env, jobject jobj, jlong jhandle, jint max_background_flushes) { + reinterpret_cast(jhandle)->max_background_flushes = + static_cast(max_background_flushes); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxBackgroundFlushes + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_maxBackgroundFlushes( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + max_background_flushes; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxLogFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setMaxLogFileSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong max_log_file_size) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(max_log_file_size); + if (s.ok()) { + reinterpret_cast(jhandle)->max_log_file_size = + max_log_file_size; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxLogFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_maxLogFileSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->max_log_file_size; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setLogFileTimeToRoll + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setLogFileTimeToRoll( + JNIEnv* env, jobject jobj, jlong jhandle, jlong log_file_time_to_roll) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t( + log_file_time_to_roll); + if (s.ok()) { + reinterpret_cast(jhandle)->log_file_time_to_roll = + log_file_time_to_roll; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: logFileTimeToRoll + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_logFileTimeToRoll( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->log_file_time_to_roll; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setKeepLogFileNum + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setKeepLogFileNum( + JNIEnv* env, jobject jobj, jlong jhandle, jlong keep_log_file_num) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(keep_log_file_num); + if (s.ok()) { + reinterpret_cast(jhandle)->keep_log_file_num = + keep_log_file_num; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: keepLogFileNum + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_keepLogFileNum( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->keep_log_file_num; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setRecycleLogFiles + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setRecycleLogFileNum( + JNIEnv* env, jobject jobj, jlong jhandle, jlong recycle_log_file_num) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(recycle_log_file_num); + if (s.ok()) { + reinterpret_cast(jhandle)->recycle_log_file_num = + recycle_log_file_num; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: recycleLogFiles + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_recycleLogFileNum(JNIEnv* env, jobject jobj, + jlong jhandle) { + return reinterpret_cast(jhandle)->recycle_log_file_num; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setMaxManifestFileSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setMaxManifestFileSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong max_manifest_file_size) { + reinterpret_cast(jhandle)->max_manifest_file_size = + static_cast(max_manifest_file_size); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: maxManifestFileSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_maxManifestFileSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + max_manifest_file_size; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setTableCacheNumshardbits + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setTableCacheNumshardbits( + JNIEnv* env, jobject jobj, jlong jhandle, jint table_cache_numshardbits) { + reinterpret_cast(jhandle)->table_cache_numshardbits = + static_cast(table_cache_numshardbits); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: tableCacheNumshardbits + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_tableCacheNumshardbits( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)-> + table_cache_numshardbits; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWalTtlSeconds + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWalTtlSeconds( + JNIEnv* env, jobject jobj, jlong jhandle, jlong WAL_ttl_seconds) { + reinterpret_cast(jhandle)->WAL_ttl_seconds = + static_cast(WAL_ttl_seconds); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: walTtlSeconds + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_walTtlSeconds( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->WAL_ttl_seconds; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setWalSizeLimitMB + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setWalSizeLimitMB( + JNIEnv* env, jobject jobj, jlong jhandle, jlong WAL_size_limit_MB) { + reinterpret_cast(jhandle)->WAL_size_limit_MB = + static_cast(WAL_size_limit_MB); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: walTtlSeconds + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_walSizeLimitMB( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->WAL_size_limit_MB; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setManifestPreallocationSize + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setManifestPreallocationSize( + JNIEnv* env, jobject jobj, jlong jhandle, jlong preallocation_size) { + rocksdb::Status s = rocksdb::check_if_jlong_fits_size_t(preallocation_size); + if (s.ok()) { + reinterpret_cast(jhandle)-> + manifest_preallocation_size = preallocation_size; + } else { + rocksdb::IllegalArgumentExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_DBOptions + * Method: manifestPreallocationSize + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_manifestPreallocationSize( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->manifest_preallocation_size; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAllowOsBuffer + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllowOsBuffer( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_os_buffer) { + reinterpret_cast(jhandle)->allow_os_buffer = + static_cast(allow_os_buffer); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allowOsBuffer + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allowOsBuffer( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->allow_os_buffer; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAllowMmapReads + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllowMmapReads( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_reads) { + reinterpret_cast(jhandle)->allow_mmap_reads = + static_cast(allow_mmap_reads); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allowMmapReads + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allowMmapReads( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->allow_mmap_reads; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAllowMmapWrites + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAllowMmapWrites( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean allow_mmap_writes) { + reinterpret_cast(jhandle)->allow_mmap_writes = + static_cast(allow_mmap_writes); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: allowMmapWrites + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_allowMmapWrites( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->allow_mmap_writes; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setIsFdCloseOnExec + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setIsFdCloseOnExec( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean is_fd_close_on_exec) { + reinterpret_cast(jhandle)->is_fd_close_on_exec = + static_cast(is_fd_close_on_exec); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: isFdCloseOnExec + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_isFdCloseOnExec( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->is_fd_close_on_exec; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setStatsDumpPeriodSec + * Signature: (JI)V + */ +void Java_org_rocksdb_DBOptions_setStatsDumpPeriodSec( + JNIEnv* env, jobject jobj, jlong jhandle, jint stats_dump_period_sec) { + reinterpret_cast(jhandle)->stats_dump_period_sec = + static_cast(stats_dump_period_sec); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: statsDumpPeriodSec + * Signature: (J)I + */ +jint Java_org_rocksdb_DBOptions_statsDumpPeriodSec( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->stats_dump_period_sec; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setAdviseRandomOnOpen + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setAdviseRandomOnOpen( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean advise_random_on_open) { + reinterpret_cast(jhandle)->advise_random_on_open = + static_cast(advise_random_on_open); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: adviseRandomOnOpen + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_adviseRandomOnOpen( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->advise_random_on_open; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setUseAdaptiveMutex + * Signature: (JZ)V + */ +void Java_org_rocksdb_DBOptions_setUseAdaptiveMutex( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean use_adaptive_mutex) { + reinterpret_cast(jhandle)->use_adaptive_mutex = + static_cast(use_adaptive_mutex); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: useAdaptiveMutex + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_DBOptions_useAdaptiveMutex( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->use_adaptive_mutex; +} + +/* + * Class: org_rocksdb_DBOptions + * Method: setBytesPerSync + * Signature: (JJ)V + */ +void Java_org_rocksdb_DBOptions_setBytesPerSync( + JNIEnv* env, jobject jobj, jlong jhandle, jlong bytes_per_sync) { + reinterpret_cast(jhandle)->bytes_per_sync = + static_cast(bytes_per_sync); +} + +/* + * Class: org_rocksdb_DBOptions + * Method: bytesPerSync + * Signature: (J)J + */ +jlong Java_org_rocksdb_DBOptions_bytesPerSync( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->bytes_per_sync; +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::WriteOptions + +/* + * Class: org_rocksdb_WriteOptions + * Method: newWriteOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_WriteOptions_newWriteOptions( + JNIEnv* env, jclass jcls) { + rocksdb::WriteOptions* op = new rocksdb::WriteOptions(); + return reinterpret_cast(op); +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: disposeInternal + * Signature: ()V + */ +void Java_org_rocksdb_WriteOptions_disposeInternal( + JNIEnv* env, jobject jwrite_options, jlong jhandle) { + auto write_options = reinterpret_cast(jhandle); + delete write_options; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: setSync + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setSync( + JNIEnv* env, jobject jwrite_options, jlong jhandle, jboolean jflag) { + reinterpret_cast(jhandle)->sync = jflag; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: sync + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_sync( + JNIEnv* env, jobject jwrite_options, jlong jhandle) { + return reinterpret_cast(jhandle)->sync; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: setDisableWAL + * Signature: (JZ)V + */ +void Java_org_rocksdb_WriteOptions_setDisableWAL( + JNIEnv* env, jobject jwrite_options, jlong jhandle, jboolean jflag) { + reinterpret_cast(jhandle)->disableWAL = jflag; +} + +/* + * Class: org_rocksdb_WriteOptions + * Method: disableWAL + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WriteOptions_disableWAL( + JNIEnv* env, jobject jwrite_options, jlong jhandle) { + return reinterpret_cast(jhandle)->disableWAL; +} + +///////////////////////////////////////////////////////////////////// +// rocksdb::ReadOptions + +/* + * Class: org_rocksdb_ReadOptions + * Method: newReadOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_ReadOptions_newReadOptions( + JNIEnv* env, jclass jcls) { + auto read_opt = new rocksdb::ReadOptions(); + return reinterpret_cast(read_opt); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ReadOptions_disposeInternal( + JNIEnv* env, jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setVerifyChecksums + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setVerifyChecksums( + JNIEnv* env, jobject jobj, jlong jhandle, + jboolean jverify_checksums) { + reinterpret_cast(jhandle)->verify_checksums = + static_cast(jverify_checksums); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: verifyChecksums + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_verifyChecksums( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast( + jhandle)->verify_checksums; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setFillCache + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setFillCache( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jfill_cache) { + reinterpret_cast(jhandle)->fill_cache = + static_cast(jfill_cache); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: fillCache + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_fillCache( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->fill_cache; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setTailing + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setTailing( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jtailing) { + reinterpret_cast(jhandle)->tailing = + static_cast(jtailing); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: tailing + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_tailing( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->tailing; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: managed + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_managed( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->managed; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setManaged + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setManaged( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jmanaged) { + reinterpret_cast(jhandle)->managed = + static_cast(jmanaged); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: totalOrderSeek + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_totalOrderSeek( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->total_order_seek; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setTotalOrderSeek + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setTotalOrderSeek( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jtotal_order_seek) { + reinterpret_cast(jhandle)->total_order_seek = + static_cast(jtotal_order_seek); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: prefixSameAsStart + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_prefixSameAsStart( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->prefix_same_as_start; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setPrefixSameAsStart + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setPrefixSameAsStart( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jprefix_same_as_start) { + reinterpret_cast(jhandle)->prefix_same_as_start = + static_cast(jprefix_same_as_start); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: pinData + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ReadOptions_pinData( + JNIEnv* env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle)->pin_data; +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setPinData + * Signature: (JZ)V + */ +void Java_org_rocksdb_ReadOptions_setPinData( + JNIEnv* env, jobject jobj, jlong jhandle, jboolean jpin_data) { + reinterpret_cast(jhandle)->pin_data = + static_cast(jpin_data); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setSnapshot + * Signature: (JJ)V + */ +void Java_org_rocksdb_ReadOptions_setSnapshot( + JNIEnv* env, jobject jobj, jlong jhandle, jlong jsnapshot) { + reinterpret_cast(jhandle)->snapshot = + reinterpret_cast(jsnapshot); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: snapshot + * Signature: (J)J + */ +jlong Java_org_rocksdb_ReadOptions_snapshot( + JNIEnv* env, jobject jobj, jlong jhandle) { + auto& snapshot = + reinterpret_cast(jhandle)->snapshot; + return reinterpret_cast(snapshot); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: readTier + * Signature: (J)B + */ +jbyte Java_org_rocksdb_ReadOptions_readTier( + JNIEnv* env, jobject jobj, jlong jhandle) { + return static_cast( + reinterpret_cast(jhandle)->read_tier); +} + +/* + * Class: org_rocksdb_ReadOptions + * Method: setReadTier + * Signature: (JB)V + */ +void Java_org_rocksdb_ReadOptions_setReadTier( + JNIEnv* env, jobject jobj, jlong jhandle, jbyte jread_tier) { + reinterpret_cast(jhandle)->read_tier = + static_cast(jread_tier); +} + +///////////////////////////////////////////////////////////////////// +// rocksdb::ComparatorOptions + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: newComparatorOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_ComparatorOptions_newComparatorOptions( + JNIEnv* env, jclass jcls) { + auto comparator_opt = new rocksdb::ComparatorJniCallbackOptions(); + return reinterpret_cast(comparator_opt); +} + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: useAdaptiveMutex + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_ComparatorOptions_useAdaptiveMutex( + JNIEnv * env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->use_adaptive_mutex; +} + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: setUseAdaptiveMutex + * Signature: (JZ)V + */ +void Java_org_rocksdb_ComparatorOptions_setUseAdaptiveMutex( + JNIEnv * env, jobject jobj, jlong jhandle, jboolean juse_adaptive_mutex) { + reinterpret_cast(jhandle) + ->use_adaptive_mutex = static_cast(juse_adaptive_mutex); +} + +/* + * Class: org_rocksdb_ComparatorOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_ComparatorOptions_disposeInternal( + JNIEnv * env, jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} + +///////////////////////////////////////////////////////////////////// +// rocksdb::FlushOptions + +/* + * Class: org_rocksdb_FlushOptions + * Method: newFlushOptions + * Signature: ()J + */ +jlong Java_org_rocksdb_FlushOptions_newFlushOptions( + JNIEnv* env, jclass jcls) { + auto flush_opt = new rocksdb::FlushOptions(); + return reinterpret_cast(flush_opt); +} + +/* + * Class: org_rocksdb_FlushOptions + * Method: setWaitForFlush + * Signature: (JZ)V + */ +void Java_org_rocksdb_FlushOptions_setWaitForFlush( + JNIEnv * env, jobject jobj, jlong jhandle, jboolean jwait) { + reinterpret_cast(jhandle) + ->wait = static_cast(jwait); +} + +/* + * Class: org_rocksdb_FlushOptions + * Method: waitForFlush + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_FlushOptions_waitForFlush( + JNIEnv * env, jobject jobj, jlong jhandle) { + return reinterpret_cast(jhandle) + ->wait; +} + +/* + * Class: org_rocksdb_FlushOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_FlushOptions_disposeInternal( + JNIEnv * env, jobject jobj, jlong jhandle) { + delete reinterpret_cast(jhandle); +} diff --git a/external/rocksdb/java/rocksjni/portal.h b/external/rocksdb/java/rocksjni/portal.h new file mode 100755 index 0000000000..4d7e502174 --- /dev/null +++ b/external/rocksdb/java/rocksjni/portal.h @@ -0,0 +1,780 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +// This file is designed for caching those frequently used IDs and provide +// efficient portal (i.e, a set of static functions) to access java code +// from c++. + +#ifndef JAVA_ROCKSJNI_PORTAL_H_ +#define JAVA_ROCKSJNI_PORTAL_H_ + +#include +#include +#include +#include + +#include "rocksdb/db.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/status.h" +#include "rocksdb/utilities/backupable_db.h" +#include "rocksdb/utilities/write_batch_with_index.h" +#include "rocksjni/comparatorjnicallback.h" +#include "rocksjni/loggerjnicallback.h" +#include "rocksjni/writebatchhandlerjnicallback.h" + +// Remove macro on windows +#ifdef DELETE +#undef DELETE +#endif + +namespace rocksdb { + +// Detect if jlong overflows size_t +inline Status check_if_jlong_fits_size_t(const jlong& jvalue) { + Status s = Status::OK(); + if (static_cast(jvalue) > std::numeric_limits::max()) { + s = Status::InvalidArgument(Slice("jlong overflows 32 bit value.")); + } + return s; +} + +// Native class template +template class RocksDBNativeClass { + public: + // Get the java class id + static jclass getJClass(JNIEnv* env, const char* jclazz_name) { + jclass jclazz = env->FindClass(jclazz_name); + assert(jclazz != nullptr); + return jclazz; + } +}; + +// Native class template for sub-classes of RocksMutableObject +template class NativeRocksMutableObject + : public RocksDBNativeClass { + public: + + static jmethodID getSetNativeHandleMethod(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + DERIVED::getJClass(env), "setNativeHandle", "(JZ)V"); + assert(mid != nullptr); + return mid; + } + + // Pass the pointer to the java side. + static void setHandle(JNIEnv* env, jobject jobj, PTR ptr, + jboolean java_owns_handle) { + env->CallVoidMethod(jobj, getSetNativeHandleMethod(env), + reinterpret_cast(ptr), java_owns_handle); + } +}; + +// Java Exception template +template class RocksDBJavaException { + public: + // Get the java class id + static jclass getJClass(JNIEnv* env, const char* jclazz_name) { + jclass jclazz = env->FindClass(jclazz_name); + assert(jclazz != nullptr); + return jclazz; + } + + // Create and throw a java exception by converting the input + // Status. + // + // In case s.ok() is true, then this function will not throw any + // exception. + static void ThrowNew(JNIEnv* env, Status s) { + if (s.ok()) { + return; + } + jstring msg = env->NewStringUTF(s.ToString().c_str()); + // get the constructor id of org.rocksdb.RocksDBException + static jmethodID mid = env->GetMethodID( + DERIVED::getJClass(env), "", "(Ljava/lang/String;)V"); + assert(mid != nullptr); + + env->Throw((jthrowable)env->NewObject(DERIVED::getJClass(env), + mid, msg)); + } +}; + +// The portal class for org.rocksdb.RocksDB +class RocksDBJni : public RocksDBNativeClass { + public: + // Get the java class id of org.rocksdb.RocksDB. + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/RocksDB"); + } +}; + +// The portal class for org.rocksdb.RocksDBException +class RocksDBExceptionJni : + public RocksDBJavaException { + public: + // Get the java class id of java.lang.IllegalArgumentException + static jclass getJClass(JNIEnv* env) { + return RocksDBJavaException::getJClass(env, + "org/rocksdb/RocksDBException"); + } +}; + +// The portal class for java.lang.IllegalArgumentException +class IllegalArgumentExceptionJni : + public RocksDBJavaException { + public: + // Get the java class id of java.lang.IllegalArgumentException + static jclass getJClass(JNIEnv* env) { + return RocksDBJavaException::getJClass(env, + "java/lang/IllegalArgumentException"); + } +}; + + +// The portal class for org.rocksdb.Options +class OptionsJni : public RocksDBNativeClass< + rocksdb::Options*, OptionsJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/Options"); + } +}; + +// The portal class for org.rocksdb.DBOptions +class DBOptionsJni : public RocksDBNativeClass< + rocksdb::DBOptions*, DBOptionsJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, "org/rocksdb/DBOptions"); + } +}; + +class ColumnFamilyDescriptorJni { + public: + // Get the java class id of org.rocksdb.ColumnFamilyDescriptor + static jclass getColumnFamilyDescriptorClass(JNIEnv* env) { + jclass jclazz = env->FindClass("org/rocksdb/ColumnFamilyDescriptor"); + assert(jclazz != nullptr); + return jclazz; + } + + // Get the java method id of columnFamilyName + static jmethodID getColumnFamilyNameMethod(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getColumnFamilyDescriptorClass(env), + "columnFamilyName", "()[B"); + assert(mid != nullptr); + return mid; + } + + // Get the java method id of columnFamilyOptions + static jmethodID getColumnFamilyOptionsMethod(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getColumnFamilyDescriptorClass(env), + "columnFamilyOptions", "()Lorg/rocksdb/ColumnFamilyOptions;"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.ColumnFamilyOptions +class ColumnFamilyOptionsJni : public RocksDBNativeClass< + rocksdb::ColumnFamilyOptions*, ColumnFamilyOptionsJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/ColumnFamilyOptions"); + } +}; + +// The portal class for org.rocksdb.WriteOptions +class WriteOptionsJni : public RocksDBNativeClass< + rocksdb::WriteOptions*, WriteOptionsJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/WriteOptions"); + } +}; + +// The portal class for org.rocksdb.ReadOptions +class ReadOptionsJni : public RocksDBNativeClass< + rocksdb::ReadOptions*, ReadOptionsJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/ReadOptions"); + } +}; + +// The portal class for org.rocksdb.ReadOptions +class WriteBatchJni : public RocksDBNativeClass< + rocksdb::WriteBatch*, WriteBatchJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/WriteBatch"); + } +}; + +// The portal class for org.rocksdb.WriteBatch.Handler +class WriteBatchHandlerJni : public RocksDBNativeClass< + const rocksdb::WriteBatchHandlerJniCallback*, + WriteBatchHandlerJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/WriteBatch$Handler"); + } + + // Get the java method `put` of org.rocksdb.WriteBatch.Handler. + static jmethodID getPutMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getJClass(env), "put", "([B[B)V"); + assert(mid != nullptr); + return mid; + } + + // Get the java method `merge` of org.rocksdb.WriteBatch.Handler. + static jmethodID getMergeMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getJClass(env), "merge", "([B[B)V"); + assert(mid != nullptr); + return mid; + } + + // Get the java method `delete` of org.rocksdb.WriteBatch.Handler. + static jmethodID getDeleteMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getJClass(env), "delete", "([B)V"); + assert(mid != nullptr); + return mid; + } + + // Get the java method `logData` of org.rocksdb.WriteBatch.Handler. + static jmethodID getLogDataMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getJClass(env), "logData", "([B)V"); + assert(mid != nullptr); + return mid; + } + + // Get the java method `shouldContinue` of org.rocksdb.WriteBatch.Handler. + static jmethodID getContinueMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getJClass(env), "shouldContinue", "()Z"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.WriteBatchWithIndex +class WriteBatchWithIndexJni : public RocksDBNativeClass< + rocksdb::WriteBatchWithIndex*, WriteBatchWithIndexJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/WriteBatch"); + } +}; + +class HistogramDataJni { + public: + static jmethodID getConstructorMethodId(JNIEnv* env, jclass jclazz) { + static jmethodID mid = env->GetMethodID(jclazz, "", "(DDDDD)V"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.WriteBatchWithIndex +class BackupableDBOptionsJni : public RocksDBNativeClass< + rocksdb::BackupableDBOptions*, BackupableDBOptionsJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/BackupableDBOptions"); + } +}; + +class BackupEngineJni : public RocksDBNativeClass< + rocksdb::BackupEngine*, BackupEngineJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/BackupEngine"); + } +}; + +// The portal class for org.rocksdb.RocksIterator +class IteratorJni : public RocksDBNativeClass< + rocksdb::Iterator*, IteratorJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/RocksIterator"); + } +}; + +// The portal class for org.rocksdb.Filter +class FilterJni : public RocksDBNativeClass< + std::shared_ptr*, FilterJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/Filter"); + } +}; + +// The portal class for org.rocksdb.ColumnFamilyHandle +class ColumnFamilyHandleJni : public RocksDBNativeClass< + rocksdb::ColumnFamilyHandle*, ColumnFamilyHandleJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/ColumnFamilyHandle"); + } +}; + +// The portal class for org.rocksdb.FlushOptions +class FlushOptionsJni : public RocksDBNativeClass< + rocksdb::FlushOptions*, FlushOptionsJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/FlushOptions"); + } +}; + +// The portal class for org.rocksdb.ComparatorOptions +class ComparatorOptionsJni : public RocksDBNativeClass< + rocksdb::ComparatorJniCallbackOptions*, ComparatorOptionsJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/ComparatorOptions"); + } +}; + +// The portal class for org.rocksdb.AbstractComparator +class AbstractComparatorJni : public RocksDBNativeClass< + const rocksdb::BaseComparatorJniCallback*, + AbstractComparatorJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/AbstractComparator"); + } + + // Get the java method `name` of org.rocksdb.Comparator. + static jmethodID getNameMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getJClass(env), "name", "()Ljava/lang/String;"); + assert(mid != nullptr); + return mid; + } + + // Get the java method `compare` of org.rocksdb.Comparator. + static jmethodID getCompareMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID(getJClass(env), + "compare", + "(Lorg/rocksdb/AbstractSlice;Lorg/rocksdb/AbstractSlice;)I"); + assert(mid != nullptr); + return mid; + } + + // Get the java method `findShortestSeparator` of org.rocksdb.Comparator. + static jmethodID getFindShortestSeparatorMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID(getJClass(env), + "findShortestSeparator", + "(Ljava/lang/String;Lorg/rocksdb/AbstractSlice;)Ljava/lang/String;"); + assert(mid != nullptr); + return mid; + } + + // Get the java method `findShortSuccessor` of org.rocksdb.Comparator. + static jmethodID getFindShortSuccessorMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID(getJClass(env), + "findShortSuccessor", + "(Ljava/lang/String;)Ljava/lang/String;"); + assert(mid != nullptr); + return mid; + } +}; + +// The portal class for org.rocksdb.AbstractSlice +class AbstractSliceJni : public NativeRocksMutableObject< + const rocksdb::Slice*, AbstractSliceJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/AbstractSlice"); + } +}; + +class SliceJni { + public: + // Get the java class id of org.rocksdb.Slice. + static jclass getJClass(JNIEnv* env) { + jclass jclazz = env->FindClass("org/rocksdb/Slice"); + assert(jclazz != nullptr); + return jclazz; + } + + static jobject construct0(JNIEnv* env) { + static jmethodID mid = env->GetMethodID(getJClass(env), "", "()V"); + assert(mid != nullptr); + return env->NewObject(getJClass(env), mid); + } +}; + +class DirectSliceJni { + public: + // Get the java class id of org.rocksdb.DirectSlice. + static jclass getJClass(JNIEnv* env) { + jclass jclazz = env->FindClass("org/rocksdb/DirectSlice"); + assert(jclazz != nullptr); + return jclazz; + } + + static jobject construct0(JNIEnv* env) { + static jmethodID mid = env->GetMethodID(getJClass(env), "", "()V"); + assert(mid != nullptr); + return env->NewObject(getJClass(env), mid); + } +}; + +class ListJni { + public: + // Get the java class id of java.util.List. + static jclass getListClass(JNIEnv* env) { + jclass jclazz = env->FindClass("java/util/List"); + assert(jclazz != nullptr); + return jclazz; + } + + // Get the java class id of java.util.ArrayList. + static jclass getArrayListClass(JNIEnv* env) { + jclass jclazz = env->FindClass("java/util/ArrayList"); + assert(jclazz != nullptr); + return jclazz; + } + + // Get the java class id of java.util.Iterator. + static jclass getIteratorClass(JNIEnv* env) { + jclass jclazz = env->FindClass("java/util/Iterator"); + assert(jclazz != nullptr); + return jclazz; + } + + // Get the java method id of java.util.List.iterator(). + static jmethodID getIteratorMethod(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getListClass(env), "iterator", "()Ljava/util/Iterator;"); + assert(mid != nullptr); + return mid; + } + + // Get the java method id of java.util.Iterator.hasNext(). + static jmethodID getHasNextMethod(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getIteratorClass(env), "hasNext", "()Z"); + assert(mid != nullptr); + return mid; + } + + // Get the java method id of java.util.Iterator.next(). + static jmethodID getNextMethod(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getIteratorClass(env), "next", "()Ljava/lang/Object;"); + assert(mid != nullptr); + return mid; + } + + // Get the java method id of arrayList constructor. + static jmethodID getArrayListConstructorMethodId(JNIEnv* env, jclass jclazz) { + static jmethodID mid = env->GetMethodID( + jclazz, "", "(I)V"); + assert(mid != nullptr); + return mid; + } + + // Get the java method id of java.util.List.add(). + static jmethodID getListAddMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getListClass(env), "add", "(Ljava/lang/Object;)Z"); + assert(mid != nullptr); + return mid; + } +}; + +class ByteJni { + public: + // Get the java class id of java.lang.Byte. + static jclass getByteClass(JNIEnv* env) { + jclass jclazz = env->FindClass("java/lang/Byte"); + assert(jclazz != nullptr); + return jclazz; + } + + // Get the java method id of java.lang.Byte.byteValue. + static jmethodID getByteValueMethod(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getByteClass(env), "byteValue", "()B"); + assert(mid != nullptr); + return mid; + } +}; + +class BackupInfoJni { + public: + // Get the java class id of org.rocksdb.BackupInfo. + static jclass getJClass(JNIEnv* env) { + jclass jclazz = env->FindClass("org/rocksdb/BackupInfo"); + assert(jclazz != nullptr); + return jclazz; + } + + static jobject construct0(JNIEnv* env, uint32_t backup_id, int64_t timestamp, + uint64_t size, uint32_t number_files) { + static jmethodID mid = env->GetMethodID(getJClass(env), "", + "(IJJI)V"); + assert(mid != nullptr); + return env->NewObject(getJClass(env), mid, + backup_id, timestamp, size, number_files); + } +}; + +class BackupInfoListJni { + public: + static jobject getBackupInfo(JNIEnv* env, + std::vector backup_infos) { + jclass jclazz = env->FindClass("java/util/ArrayList"); + jmethodID mid = rocksdb::ListJni::getArrayListConstructorMethodId( + env, jclazz); + jobject jbackup_info_handle_list = env->NewObject(jclazz, mid, + backup_infos.size()); + // insert in java list + for (std::vector::size_type i = 0; + i != backup_infos.size(); i++) { + rocksdb::BackupInfo backup_info = backup_infos[i]; + jobject obj = rocksdb::BackupInfoJni::construct0(env, + backup_info.backup_id, + backup_info.timestamp, + backup_info.size, + backup_info.number_files); + env->CallBooleanMethod(jbackup_info_handle_list, + rocksdb::ListJni::getListAddMethodId(env), obj); + } + return jbackup_info_handle_list; + } +}; + +class WBWIRocksIteratorJni { + public: + // Get the java class id of org.rocksdb.WBWIRocksIterator. + static jclass getJClass(JNIEnv* env) { + static jclass jclazz = env->FindClass("org/rocksdb/WBWIRocksIterator"); + assert(jclazz != nullptr); + return jclazz; + } + + static jfieldID getWriteEntryField(JNIEnv* env) { + static jfieldID fid = + env->GetFieldID(getJClass(env), "entry", + "Lorg/rocksdb/WBWIRocksIterator$WriteEntry;"); + assert(fid != nullptr); + return fid; + } + + static jobject getWriteEntry(JNIEnv* env, jobject jwbwi_rocks_iterator) { + jobject jwe = + env->GetObjectField(jwbwi_rocks_iterator, getWriteEntryField(env)); + assert(jwe != nullptr); + return jwe; + } +}; + +class WriteTypeJni { + public: + // Get the PUT enum field of org.rocksdb.WBWIRocksIterator.WriteType + static jobject PUT(JNIEnv* env) { + return getEnum(env, "PUT"); + } + + // Get the MERGE enum field of org.rocksdb.WBWIRocksIterator.WriteType + static jobject MERGE(JNIEnv* env) { + return getEnum(env, "MERGE"); + } + + // Get the DELETE enum field of org.rocksdb.WBWIRocksIterator.WriteType + static jobject DELETE(JNIEnv* env) { + return getEnum(env, "DELETE"); + } + + // Get the LOG enum field of org.rocksdb.WBWIRocksIterator.WriteType + static jobject LOG(JNIEnv* env) { + return getEnum(env, "LOG"); + } + + private: + // Get the java class id of org.rocksdb.WBWIRocksIterator.WriteType. + static jclass getJClass(JNIEnv* env) { + jclass jclazz = env->FindClass("org/rocksdb/WBWIRocksIterator$WriteType"); + assert(jclazz != nullptr); + return jclazz; + } + + // Get an enum field of org.rocksdb.WBWIRocksIterator.WriteType + static jobject getEnum(JNIEnv* env, const char name[]) { + jclass jclazz = getJClass(env); + jfieldID jfid = + env->GetStaticFieldID(jclazz, name, + "Lorg/rocksdb/WBWIRocksIterator$WriteType;"); + assert(jfid != nullptr); + return env->GetStaticObjectField(jclazz, jfid); + } +}; + +class WriteEntryJni { + public: + // Get the java class id of org.rocksdb.WBWIRocksIterator.WriteEntry. + static jclass getJClass(JNIEnv* env) { + static jclass jclazz = + env->FindClass("org/rocksdb/WBWIRocksIterator$WriteEntry"); + assert(jclazz != nullptr); + return jclazz; + } +}; + +class InfoLogLevelJni { + public: + // Get the DEBUG_LEVEL enum field of org.rocksdb.InfoLogLevel + static jobject DEBUG_LEVEL(JNIEnv* env) { + return getEnum(env, "DEBUG_LEVEL"); + } + + // Get the INFO_LEVEL enum field of org.rocksdb.InfoLogLevel + static jobject INFO_LEVEL(JNIEnv* env) { + return getEnum(env, "INFO_LEVEL"); + } + + // Get the WARN_LEVEL enum field of org.rocksdb.InfoLogLevel + static jobject WARN_LEVEL(JNIEnv* env) { + return getEnum(env, "WARN_LEVEL"); + } + + // Get the ERROR_LEVEL enum field of org.rocksdb.InfoLogLevel + static jobject ERROR_LEVEL(JNIEnv* env) { + return getEnum(env, "ERROR_LEVEL"); + } + + // Get the FATAL_LEVEL enum field of org.rocksdb.InfoLogLevel + static jobject FATAL_LEVEL(JNIEnv* env) { + return getEnum(env, "FATAL_LEVEL"); + } + + // Get the HEADER_LEVEL enum field of org.rocksdb.InfoLogLevel + static jobject HEADER_LEVEL(JNIEnv* env) { + return getEnum(env, "HEADER_LEVEL"); + } + + private: + // Get the java class id of org.rocksdb.InfoLogLevel + static jclass getJClass(JNIEnv* env) { + jclass jclazz = env->FindClass("org/rocksdb/InfoLogLevel"); + assert(jclazz != nullptr); + return jclazz; + } + + // Get an enum field of org.rocksdb.InfoLogLevel + static jobject getEnum(JNIEnv* env, const char name[]) { + jclass jclazz = getJClass(env); + jfieldID jfid = + env->GetStaticFieldID(jclazz, name, + "Lorg/rocksdb/InfoLogLevel;"); + assert(jfid != nullptr); + return env->GetStaticObjectField(jclazz, jfid); + } +}; + +// The portal class for org.rocksdb.Logger +class LoggerJni : public RocksDBNativeClass< + std::shared_ptr*, LoggerJni> { + public: + static jclass getJClass(JNIEnv* env) { + return RocksDBNativeClass::getJClass(env, + "org/rocksdb/Logger"); + } + + // Get the java method `name` of org.rocksdb.Logger. + static jmethodID getLogMethodId(JNIEnv* env) { + static jmethodID mid = env->GetMethodID( + getJClass(env), "log", + "(Lorg/rocksdb/InfoLogLevel;Ljava/lang/String;)V"); + assert(mid != nullptr); + return mid; + } +}; + +class JniUtil { + public: + /* + * Copies a jstring to a std::string + * and releases the original jstring + */ + static std::string copyString(JNIEnv* env, jstring js) { + const char *utf = env->GetStringUTFChars(js, NULL); + std::string name(utf); + env->ReleaseStringUTFChars(js, utf); + return name; + } + + /* + * Helper for operations on a key and value + * for example WriteBatch->Put + * + * TODO(AR) could be extended to cover returning rocksdb::Status + * from `op` and used for RocksDB->Put etc. + */ + static void kv_op( + std::function op, + JNIEnv* env, jobject jobj, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + jbyte* value = env->GetByteArrayElements(jentry_value, nullptr); + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + rocksdb::Slice value_slice(reinterpret_cast(value), + jentry_value_len); + + op(key_slice, value_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + env->ReleaseByteArrayElements(jentry_value, value, JNI_ABORT); + } + + /* + * Helper for operations on a key + * for example WriteBatch->Delete + * + * TODO(AR) could be extended to cover returning rocksdb::Status + * from `op` and used for RocksDB->Delete etc. + */ + static void k_op( + std::function op, + JNIEnv* env, jobject jobj, + jbyteArray jkey, jint jkey_len) { + jbyte* key = env->GetByteArrayElements(jkey, nullptr); + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + + op(key_slice); + + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + } +}; + +} // namespace rocksdb +#endif // JAVA_ROCKSJNI_PORTAL_H_ diff --git a/external/rocksdb/java/rocksjni/ratelimiterjni.cc b/external/rocksdb/java/rocksjni/ratelimiterjni.cc new file mode 100755 index 0000000000..7b4bc1f224 --- /dev/null +++ b/external/rocksdb/java/rocksjni/ratelimiterjni.cc @@ -0,0 +1,24 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for RateLimiter. + +#include "rocksjni/portal.h" +#include "include/org_rocksdb_GenericRateLimiterConfig.h" +#include "rocksdb/rate_limiter.h" + +/* + * Class: org_rocksdb_GenericRateLimiterConfig + * Method: newRateLimiterHandle + * Signature: (JJI)J + */ +jlong Java_org_rocksdb_GenericRateLimiterConfig_newRateLimiterHandle( + JNIEnv* env, jobject jobj, jlong jrate_bytes_per_second, + jlong jrefill_period_micros, jint jfairness) { + return reinterpret_cast(rocksdb::NewGenericRateLimiter( + static_cast(jrate_bytes_per_second), + static_cast(jrefill_period_micros), + static_cast(jfairness))); +} diff --git a/external/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc b/external/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc new file mode 100755 index 0000000000..ef17efeec6 --- /dev/null +++ b/external/rocksdb/java/rocksjni/remove_emptyvalue_compactionfilterjni.cc @@ -0,0 +1,24 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include + +#include "include/org_rocksdb_RemoveEmptyValueCompactionFilter.h" +#include "utilities/compaction_filters/remove_emptyvalue_compactionfilter.h" + + +/* + * Class: org_rocksdb_RemoveEmptyValueCompactionFilter + * Method: createNewRemoveEmptyValueCompactionFilter0 + * Signature: ()J + */ +jlong Java_org_rocksdb_RemoveEmptyValueCompactionFilter_createNewRemoveEmptyValueCompactionFilter0( + JNIEnv* env, jclass jcls) { + auto* compaction_filter = + new rocksdb::RemoveEmptyValueCompactionFilter(); + + // set the native handle to our native compaction filter + return reinterpret_cast(compaction_filter); +} diff --git a/external/rocksdb/java/rocksjni/restorejni.cc b/external/rocksdb/java/rocksjni/restorejni.cc new file mode 100755 index 0000000000..154a9b5f12 --- /dev/null +++ b/external/rocksdb/java/rocksjni/restorejni.cc @@ -0,0 +1,39 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::RestoreBackupableDB and rocksdb::RestoreOptions methods +// from Java side. + +#include +#include +#include +#include + +#include "include/org_rocksdb_RestoreOptions.h" +#include "rocksjni/portal.h" +#include "rocksdb/utilities/backupable_db.h" +/* + * Class: org_rocksdb_RestoreOptions + * Method: newRestoreOptions + * Signature: (Z)J + */ +jlong Java_org_rocksdb_RestoreOptions_newRestoreOptions(JNIEnv* env, + jclass jcls, jboolean keep_log_files) { + auto ropt = new rocksdb::RestoreOptions(keep_log_files); + return reinterpret_cast(ropt); +} + +/* + * Class: org_rocksdb_RestoreOptions + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RestoreOptions_disposeInternal(JNIEnv* env, jobject jobj, + jlong jhandle) { + auto ropt = reinterpret_cast(jhandle); + assert(ropt); + delete ropt; +} diff --git a/external/rocksdb/java/rocksjni/rocksjni.cc b/external/rocksdb/java/rocksjni/rocksjni.cc new file mode 100755 index 0000000000..4c68178636 --- /dev/null +++ b/external/rocksdb/java/rocksjni/rocksjni.cc @@ -0,0 +1,1589 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::DB methods from Java side. + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "include/org_rocksdb_RocksDB.h" +#include "rocksdb/db.h" +#include "rocksdb/cache.h" +#include "rocksdb/types.h" +#include "rocksjni/portal.h" + +#ifdef min +#undef min +#endif + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::Open +jlong rocksdb_open_helper(JNIEnv* env, jlong jopt_handle, jstring jdb_path, + std::function open_fn + ) { + auto* opt = reinterpret_cast(jopt_handle); + rocksdb::DB* db = nullptr; + const char* db_path = env->GetStringUTFChars(jdb_path, NULL); + rocksdb::Status s = open_fn(*opt, db_path, &db); + env->ReleaseStringUTFChars(jdb_path, db_path); + + if (s.ok()) { + return reinterpret_cast(db); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: open + * Signature: (JLjava/lang/String;)J + */ +jlong Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2( + JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path) { + return rocksdb_open_helper(env, jopt_handle, jdb_path, + (rocksdb::Status(*) + (const rocksdb::Options&, const std::string&, rocksdb::DB**) + )&rocksdb::DB::Open + ); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: openROnly + * Signature: (JLjava/lang/String;)J + */ +jlong Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2( + JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path) { + return rocksdb_open_helper(env, jopt_handle, jdb_path, []( + const rocksdb::Options& options, + const std::string& db_path, rocksdb::DB** db) { + return rocksdb::DB::OpenForReadOnly(options, db_path, db); + }); +} + +jlongArray rocksdb_open_helper(JNIEnv* env, jlong jopt_handle, + jstring jdb_path, jobjectArray jcolumn_names, jlongArray jcolumn_options, + std::function&, + std::vector*, + rocksdb::DB**)> open_fn + ) { + auto* opt = reinterpret_cast(jopt_handle); + const char* db_path = env->GetStringUTFChars(jdb_path, NULL); + + std::vector column_families; + + jsize len_cols = env->GetArrayLength(jcolumn_names); + jlong* jco = env->GetLongArrayElements(jcolumn_options, NULL); + for(int i = 0; i < len_cols; i++) { + jobject jcn = env->GetObjectArrayElement(jcolumn_names, i); + jbyteArray jcn_ba = reinterpret_cast(jcn); + jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, NULL); + const int jcf_name_len = env->GetArrayLength(jcn_ba); + + //TODO(AR) do I need to make a copy of jco[i] ? + + std::string cf_name (reinterpret_cast(jcf_name), jcf_name_len); + rocksdb::ColumnFamilyOptions* cf_options = + reinterpret_cast(jco[i]); + column_families.push_back( + rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options)); + + env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT); + env->DeleteLocalRef(jcn); + } + env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT); + + std::vector handles; + rocksdb::DB* db = nullptr; + rocksdb::Status s = open_fn(*opt, db_path, column_families, + &handles, &db); + + // check if open operation was successful + if (s.ok()) { + jsize resultsLen = 1 + len_cols; //db handle + column family handles + std::unique_ptr results = + std::unique_ptr(new jlong[resultsLen]); + results[0] = reinterpret_cast(db); + for(int i = 1; i <= len_cols; i++) { + results[i] = reinterpret_cast(handles[i - 1]); + } + + jlongArray jresults = env->NewLongArray(resultsLen); + env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); + return jresults; + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return NULL; + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: openROnly + * Signature: (JLjava/lang/String;[[B[J)[J + */ +jlongArray Java_org_rocksdb_RocksDB_openROnly__JLjava_lang_String_2_3_3B_3J( + JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options) { + return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names, + jcolumn_options, []( + const rocksdb::DBOptions& options, const std::string& db_path, + const std::vector& column_families, + std::vector* handles, rocksdb::DB** db) { + return rocksdb::DB::OpenForReadOnly(options, db_path, column_families, + handles, db); + }); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: open + * Signature: (JLjava/lang/String;[[B[J)[J + */ +jlongArray Java_org_rocksdb_RocksDB_open__JLjava_lang_String_2_3_3B_3J( + JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options) { + return rocksdb_open_helper(env, jopt_handle, jdb_path, jcolumn_names, + jcolumn_options, (rocksdb::Status(*) + (const rocksdb::DBOptions&, const std::string&, + const std::vector&, + std::vector*, rocksdb::DB**) + )&rocksdb::DB::Open + ); +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::ListColumnFamilies + +/* + * Class: org_rocksdb_RocksDB + * Method: listColumnFamilies + * Signature: (JLjava/lang/String;)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_listColumnFamilies( + JNIEnv* env, jclass jclazz, jlong jopt_handle, jstring jdb_path) { + std::vector column_family_names; + auto* opt = reinterpret_cast(jopt_handle); + const char* db_path = env->GetStringUTFChars(jdb_path, 0); + rocksdb::Status s = rocksdb::DB::ListColumnFamilies(*opt, db_path, + &column_family_names); + env->ReleaseStringUTFChars(jdb_path, db_path); + + jclass jcls_ba = env->FindClass("[B"); + jobjectArray jresults = env->NewObjectArray( + static_cast(column_family_names.size()), jcls_ba, NULL); + if (s.ok()) { + for (std::vector::size_type i = 0; + i < column_family_names.size(); i++) { + jbyteArray jcf_value = + env->NewByteArray(static_cast(column_family_names[i].size())); + env->SetByteArrayRegion( + jcf_value, 0, static_cast(column_family_names[i].size()), + reinterpret_cast(column_family_names[i].data())); + env->SetObjectArrayElement(jresults, static_cast(i), jcf_value); + env->DeleteLocalRef(jcf_value); + } + } + return jresults; +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::Put + +void rocksdb_put_helper( + JNIEnv* env, rocksdb::DB* db, const rocksdb::WriteOptions& write_options, + rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + + jbyte* key = env->GetByteArrayElements(jkey, 0); + jbyte* value = env->GetByteArrayElements(jentry_value, 0); + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + rocksdb::Slice value_slice(reinterpret_cast(value), + jentry_value_len); + + rocksdb::Status s; + if (cf_handle != nullptr) { + s = db->Put(write_options, cf_handle, key_slice, value_slice); + } else { + // backwards compatibility + s = db->Put(write_options, key_slice, value_slice); + } + + // trigger java unref on key and value. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + env->ReleaseByteArrayElements(jentry_value, value, JNI_ABORT); + + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: put + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_RocksDB_put__J_3BI_3BI( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + auto db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + + rocksdb_put_helper(env, db, default_write_options, nullptr, + jkey, jkey_len, + jentry_value, jentry_value_len); +} +/* + * Class: org_rocksdb_RocksDB + * Method: put + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_RocksDB_put__J_3BI_3BIJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { + auto db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + auto cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_put_helper(env, db, default_write_options, cf_handle, + jkey, jkey_len, jentry_value, jentry_value_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: put + * Signature: (JJ[BI[BI)V + */ +void Java_org_rocksdb_RocksDB_put__JJ_3BI_3BI( + JNIEnv* env, jobject jdb, + jlong jdb_handle, jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + auto db = reinterpret_cast(jdb_handle); + auto write_options = reinterpret_cast( + jwrite_options_handle); + + rocksdb_put_helper(env, db, *write_options, nullptr, + jkey, jkey_len, + jentry_value, jentry_value_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: put + * Signature: (JJ[BI[BIJ)V + */ +void Java_org_rocksdb_RocksDB_put__JJ_3BI_3BIJ( + JNIEnv* env, jobject jdb, + jlong jdb_handle, jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { + auto db = reinterpret_cast(jdb_handle); + auto write_options = reinterpret_cast( + jwrite_options_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_put_helper(env, db, *write_options, cf_handle, + jkey, jkey_len, jentry_value, jentry_value_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + } +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::Write +/* + * Class: org_rocksdb_RocksDB + * Method: write0 + * Signature: (JJJ)V + */ +void Java_org_rocksdb_RocksDB_write0( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jlong jwrite_options_handle, jlong jwb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = reinterpret_cast( + jwrite_options_handle); + auto* wb = reinterpret_cast(jwb_handle); + + rocksdb::Status s = db->Write(*write_options, wb); + + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: write1 + * Signature: (JJJ)V + */ +void Java_org_rocksdb_RocksDB_write1( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jlong jwrite_options_handle, jlong jwbwi_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto* write_options = reinterpret_cast( + jwrite_options_handle); + auto* wbwi = reinterpret_cast(jwbwi_handle); + auto* wb = wbwi->GetWriteBatch(); + + rocksdb::Status s = db->Write(*write_options, wb); + + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::KeyMayExist +jboolean key_may_exist_helper(JNIEnv* env, rocksdb::DB* db, + const rocksdb::ReadOptions& read_opt, + rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey, jint jkey_len, + jobject jstring_buffer) { + std::string value; + bool value_found = false; + jboolean isCopy; + jbyte* key = env->GetByteArrayElements(jkey, &isCopy); + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + bool keyMayExist; + if (cf_handle != nullptr) { + keyMayExist = db->KeyMayExist(read_opt, cf_handle, key_slice, + &value, &value_found); + } else { + keyMayExist = db->KeyMayExist(read_opt, key_slice, + &value, &value_found); + } + + if (value_found && !value.empty()) { + jclass clazz = env->GetObjectClass(jstring_buffer); + jmethodID mid = env->GetMethodID(clazz, "append", + "(Ljava/lang/String;)Ljava/lang/StringBuffer;"); + jstring new_value_str = env->NewStringUTF(value.c_str()); + env->CallObjectMethod(jstring_buffer, mid, new_value_str); + } + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + return static_cast(keyMayExist); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: keyMayExist + * Signature: (J[BILjava/lang/StringBuffer;)Z + */ +jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BILjava_lang_StringBuffer_2( + JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jkey, jint jkey_len, + jobject jstring_buffer) { + auto* db = reinterpret_cast(jdb_handle); + return key_may_exist_helper(env, db, rocksdb::ReadOptions(), + nullptr, jkey, jkey_len, jstring_buffer); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: keyMayExist + * Signature: (J[BIJLjava/lang/StringBuffer;)Z + */ +jboolean Java_org_rocksdb_RocksDB_keyMayExist__J_3BIJLjava_lang_StringBuffer_2( + JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jkey, jint jkey_len, + jlong jcf_handle, jobject jstring_buffer) { + auto* db = reinterpret_cast(jdb_handle); + auto* cf_handle = reinterpret_cast( + jcf_handle); + if (cf_handle != nullptr) { + return key_may_exist_helper(env, db, rocksdb::ReadOptions(), + cf_handle, jkey, jkey_len, jstring_buffer); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + return true; + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: keyMayExist + * Signature: (JJ[BILjava/lang/StringBuffer;)Z + */ +jboolean Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BILjava_lang_StringBuffer_2( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jread_options_handle, + jbyteArray jkey, jint jkey_len, jobject jstring_buffer) { + auto* db = reinterpret_cast(jdb_handle); + auto& read_options = *reinterpret_cast( + jread_options_handle); + return key_may_exist_helper(env, db, read_options, + nullptr, jkey, jkey_len, jstring_buffer); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: keyMayExist + * Signature: (JJ[BIJLjava/lang/StringBuffer;)Z + */ +jboolean Java_org_rocksdb_RocksDB_keyMayExist__JJ_3BIJLjava_lang_StringBuffer_2( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jread_options_handle, + jbyteArray jkey, jint jkey_len, jlong jcf_handle, jobject jstring_buffer) { + auto* db = reinterpret_cast(jdb_handle); + auto& read_options = *reinterpret_cast( + jread_options_handle); + auto* cf_handle = reinterpret_cast( + jcf_handle); + if (cf_handle != nullptr) { + return key_may_exist_helper(env, db, read_options, cf_handle, + jkey, jkey_len, jstring_buffer); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + return true; + } +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::Get + +jbyteArray rocksdb_get_helper( + JNIEnv* env, rocksdb::DB* db, const rocksdb::ReadOptions& read_opt, + rocksdb::ColumnFamilyHandle* column_family_handle, jbyteArray jkey, + jint jkey_len) { + jboolean isCopy; + jbyte* key = env->GetByteArrayElements(jkey, &isCopy); + rocksdb::Slice key_slice( + reinterpret_cast(key), jkey_len); + + std::string value; + rocksdb::Status s; + if (column_family_handle != nullptr) { + s = db->Get(read_opt, column_family_handle, key_slice, &value); + } else { + // backwards compatibility + s = db->Get(read_opt, key_slice, &value); + } + + // trigger java unref on key. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (s.IsNotFound()) { + return nullptr; + } + + if (s.ok()) { + jbyteArray jret_value = env->NewByteArray(static_cast(value.size())); + env->SetByteArrayRegion(jret_value, 0, static_cast(value.size()), + reinterpret_cast(value.c_str())); + return jret_value; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + + return nullptr; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (J[BI)[B + */ +jbyteArray Java_org_rocksdb_RocksDB_get__J_3BI( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jkey, jint jkey_len) { + return rocksdb_get_helper(env, + reinterpret_cast(jdb_handle), + rocksdb::ReadOptions(), nullptr, + jkey, jkey_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (J[BIJ)[B + */ +jbyteArray Java_org_rocksdb_RocksDB_get__J_3BIJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jkey, jint jkey_len, jlong jcf_handle) { + auto db_handle = reinterpret_cast(jdb_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), + cf_handle, jkey, jkey_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + // will never be evaluated + return env->NewByteArray(0); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (JJ[BI)[B + */ +jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BI( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jbyteArray jkey, jint jkey_len) { + return rocksdb_get_helper(env, + reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), nullptr, + jkey, jkey_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (JJ[BIJ)[B + */ +jbyteArray Java_org_rocksdb_RocksDB_get__JJ_3BIJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jbyteArray jkey, jint jkey_len, jlong jcf_handle) { + auto db_handle = reinterpret_cast(jdb_handle); + auto& ro_opt = *reinterpret_cast(jropt_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle, + jkey, jkey_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + // will never be evaluated + return env->NewByteArray(0); + } +} + +jint rocksdb_get_helper( + JNIEnv* env, rocksdb::DB* db, const rocksdb::ReadOptions& read_options, + rocksdb::ColumnFamilyHandle* column_family_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) { + static const int kNotFound = -1; + static const int kStatusError = -2; + + jbyte* key = env->GetByteArrayElements(jkey, 0); + rocksdb::Slice key_slice( + reinterpret_cast(key), jkey_len); + + // TODO(yhchiang): we might save one memory allocation here by adding + // a DB::Get() function which takes preallocated jbyte* as input. + std::string cvalue; + rocksdb::Status s; + if (column_family_handle != nullptr) { + s = db->Get(read_options, column_family_handle, key_slice, &cvalue); + } else { + // backwards compatibility + s = db->Get(read_options, key_slice, &cvalue); + } + + // trigger java unref on key. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (s.IsNotFound()) { + return kNotFound; + } else if (!s.ok()) { + // Here since we are throwing a Java exception from c++ side. + // As a result, c++ does not know calling this function will in fact + // throwing an exception. As a result, the execution flow will + // not stop here, and codes after this throw will still be + // executed. + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + + // Return a dummy const value to avoid compilation error, although + // java side might not have a chance to get the return value :) + return kStatusError; + } + + jint cvalue_len = static_cast(cvalue.size()); + jint length = std::min(jentry_value_len, cvalue_len); + + env->SetByteArrayRegion( + jentry_value, 0, length, + reinterpret_cast(cvalue.c_str())); + return cvalue_len; +} + +// cf multi get +jobjectArray multi_get_helper(JNIEnv* env, jobject jdb, rocksdb::DB* db, + const rocksdb::ReadOptions& rOpt, jobjectArray jkeys, + jlongArray jcolumn_family_handles) { + std::vector cf_handles; + if (jcolumn_family_handles != nullptr) { + jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, NULL); + for (int i = 0; i < len_cols; i++) { + auto* cf_handle = + reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); + } + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); + } + + std::vector keys; + std::vector> keys_to_free; + jsize len_keys = env->GetArrayLength(jkeys); + if(env->EnsureLocalCapacity(len_keys) != 0) { + // out of memory + return NULL; + } + for (int i = 0; i < len_keys; i++) { + jobject jk = env->GetObjectArrayElement(jkeys, i); + jbyteArray jk_ba = reinterpret_cast(jk); + jsize len_key = env->GetArrayLength(jk_ba); + jbyte* jk_val = env->GetByteArrayElements(jk_ba, NULL); + + rocksdb::Slice key_slice(reinterpret_cast(jk_val), len_key); + keys.push_back(key_slice); + + keys_to_free.push_back(std::make_tuple(jk_ba, jk_val, jk)); + } + + std::vector values; + std::vector s; + if (cf_handles.size() == 0) { + s = db->MultiGet(rOpt, keys, &values); + } else { + s = db->MultiGet(rOpt, cf_handles, keys, &values); + } + + // free up allocated byte arrays + for (std::vector>::size_type i = 0; + i < keys_to_free.size(); i++) { + jobject jk; + jbyteArray jk_ba; + jbyte* jk_val; + std::tie(jk_ba, jk_val, jk) = keys_to_free[i]; + env->ReleaseByteArrayElements(jk_ba, jk_val, JNI_ABORT); + env->DeleteLocalRef(jk); + } + + // prepare the results + jclass jcls_ba = env->FindClass("[B"); + jobjectArray jresults = + env->NewObjectArray(static_cast(s.size()), jcls_ba, NULL); + + // add to the jresults + for (std::vector::size_type i = 0; i != s.size(); i++) { + if (s[i].ok()) { + jbyteArray jentry_value = + env->NewByteArray(static_cast(values[i].size())); + env->SetByteArrayRegion( + jentry_value, 0, static_cast(values[i].size()), + reinterpret_cast(values[i].c_str())); + env->SetObjectArrayElement(jresults, static_cast(i), jentry_value); + env->DeleteLocalRef(jentry_value); + } + } + + return jresults; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: multiGet + * Signature: (J[[B)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B( + JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys) { + return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), + rocksdb::ReadOptions(), jkeys, nullptr); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: multiGet + * Signature: (J[[B[J)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_multiGet__J_3_3B_3J( + JNIEnv* env, jobject jdb, jlong jdb_handle, jobjectArray jkeys, + jlongArray jcolumn_family_handles) { + return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), + rocksdb::ReadOptions(), jkeys, jcolumn_family_handles); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: multiGet + * Signature: (JJ[[B)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jobjectArray jkeys) { + return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), jkeys, nullptr); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: multiGet + * Signature: (JJ[[B[J)[[B + */ +jobjectArray Java_org_rocksdb_RocksDB_multiGet__JJ_3_3B_3J( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jobjectArray jkeys, jlongArray jcolumn_family_handles) { + return multi_get_helper(env, jdb, reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), jkeys, + jcolumn_family_handles); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (J[BI[BI)I + */ +jint Java_org_rocksdb_RocksDB_get__J_3BI_3BI( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + return rocksdb_get_helper(env, + reinterpret_cast(jdb_handle), + rocksdb::ReadOptions(), nullptr, + jkey, jkey_len, jentry_value, jentry_value_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (J[BI[BIJ)I + */ +jint Java_org_rocksdb_RocksDB_get__J_3BI_3BIJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { + auto db_handle = reinterpret_cast(jdb_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + return rocksdb_get_helper(env, db_handle, rocksdb::ReadOptions(), cf_handle, + jkey, jkey_len, jentry_value, jentry_value_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + // will never be evaluated + return 0; + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (JJ[BI[BI)I + */ +jint Java_org_rocksdb_RocksDB_get__JJ_3BI_3BI( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + return rocksdb_get_helper(env, + reinterpret_cast(jdb_handle), + *reinterpret_cast(jropt_handle), + nullptr, jkey, jkey_len, jentry_value, jentry_value_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: get + * Signature: (JJ[BI[BIJ)I + */ +jint Java_org_rocksdb_RocksDB_get__JJ_3BI_3BIJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jropt_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { + auto db_handle = reinterpret_cast(jdb_handle); + auto& ro_opt = *reinterpret_cast(jropt_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + return rocksdb_get_helper(env, db_handle, ro_opt, cf_handle, jkey, + jkey_len, jentry_value, jentry_value_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + // will never be evaluated + return 0; + } +} +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::Delete() +void rocksdb_remove_helper( + JNIEnv* env, rocksdb::DB* db, const rocksdb::WriteOptions& write_options, + rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey, jint jkey_len) { + jbyte* key = env->GetByteArrayElements(jkey, 0); + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + + rocksdb::Status s; + if (cf_handle != nullptr) { + s = db->Delete(write_options, cf_handle, key_slice); + } else { + // backwards compatibility + s = db->Delete(write_options, key_slice); + } + // trigger java unref on key and value. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } + return; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: remove + * Signature: (J[BI)V + */ +void Java_org_rocksdb_RocksDB_remove__J_3BI( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jkey, jint jkey_len) { + auto db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + rocksdb_remove_helper(env, db, default_write_options, nullptr, + jkey, jkey_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: remove + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_RocksDB_remove__J_3BIJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jkey, jint jkey_len, jlong jcf_handle) { + auto db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + auto cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_remove_helper(env, db, default_write_options, cf_handle, + jkey, jkey_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: remove + * Signature: (JJ[BIJ)V + */ +void Java_org_rocksdb_RocksDB_remove__JJ_3BI( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jlong jwrite_options, jbyteArray jkey, jint jkey_len) { + auto db = reinterpret_cast(jdb_handle); + auto write_options = reinterpret_cast(jwrite_options); + rocksdb_remove_helper(env, db, *write_options, nullptr, jkey, jkey_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: remove + * Signature: (JJ[BIJ)V + */ +void Java_org_rocksdb_RocksDB_remove__JJ_3BIJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jlong jwrite_options, jbyteArray jkey, jint jkey_len, + jlong jcf_handle) { + auto db = reinterpret_cast(jdb_handle); + auto write_options = reinterpret_cast(jwrite_options); + auto cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_remove_helper(env, db, *write_options, cf_handle, jkey, jkey_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + } +} +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::Merge + +void rocksdb_merge_helper( + JNIEnv* env, rocksdb::DB* db, const rocksdb::WriteOptions& write_options, + rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + + jbyte* key = env->GetByteArrayElements(jkey, 0); + jbyte* value = env->GetByteArrayElements(jentry_value, 0); + rocksdb::Slice key_slice(reinterpret_cast(key), jkey_len); + rocksdb::Slice value_slice(reinterpret_cast(value), + jentry_value_len); + + rocksdb::Status s; + if (cf_handle != nullptr) { + s = db->Merge(write_options, cf_handle, key_slice, value_slice); + } else { + s = db->Merge(write_options, key_slice, value_slice); + } + + // trigger java unref on key and value. + // by passing JNI_ABORT, it will simply release the reference without + // copying the result back to the java byte array. + env->ReleaseByteArrayElements(jkey, key, JNI_ABORT); + env->ReleaseByteArrayElements(jentry_value, value, JNI_ABORT); + + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: merge + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_RocksDB_merge__J_3BI_3BI( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + auto db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + + rocksdb_merge_helper(env, db, default_write_options, + nullptr, jkey, jkey_len, jentry_value, jentry_value_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: merge + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_RocksDB_merge__J_3BI_3BIJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { + auto db = reinterpret_cast(jdb_handle); + static const rocksdb::WriteOptions default_write_options = + rocksdb::WriteOptions(); + auto cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_merge_helper(env, db, default_write_options, + cf_handle, jkey, jkey_len, jentry_value, jentry_value_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: merge + * Signature: (JJ[BI[BI)V + */ +void Java_org_rocksdb_RocksDB_merge__JJ_3BI_3BI( + JNIEnv* env, jobject jdb, + jlong jdb_handle, jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + auto db = reinterpret_cast(jdb_handle); + auto write_options = reinterpret_cast( + jwrite_options_handle); + + rocksdb_merge_helper(env, db, *write_options, + nullptr, jkey, jkey_len, jentry_value, jentry_value_len); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: merge + * Signature: (JJ[BI[BIJ)V + */ +void Java_org_rocksdb_RocksDB_merge__JJ_3BI_3BIJ( + JNIEnv* env, jobject jdb, + jlong jdb_handle, jlong jwrite_options_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { + auto db = reinterpret_cast(jdb_handle); + auto write_options = reinterpret_cast( + jwrite_options_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + if (cf_handle != nullptr) { + rocksdb_merge_helper(env, db, *write_options, + cf_handle, jkey, jkey_len, jentry_value, jentry_value_len); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, + rocksdb::Status::InvalidArgument("Invalid ColumnFamilyHandle.")); + } +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::~DB() + +/* + * Class: org_rocksdb_RocksDB + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_disposeInternal( + JNIEnv* env, jobject java_db, jlong jhandle) { + delete reinterpret_cast(jhandle); +} + +jlong rocksdb_iterator_helper( + rocksdb::DB* db, rocksdb::ReadOptions read_options, + rocksdb::ColumnFamilyHandle* cf_handle) { + rocksdb::Iterator* iterator = nullptr; + if (cf_handle != nullptr) { + iterator = db->NewIterator(read_options, cf_handle); + } else { + iterator = db->NewIterator(read_options); + } + return reinterpret_cast(iterator); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: iterator + * Signature: (J)J + */ +jlong Java_org_rocksdb_RocksDB_iterator__J( + JNIEnv* env, jobject jdb, jlong db_handle) { + auto db = reinterpret_cast(db_handle); + return rocksdb_iterator_helper(db, rocksdb::ReadOptions(), + nullptr); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: iterator + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_RocksDB_iterator__JJ( + JNIEnv* env, jobject jdb, jlong db_handle, + jlong jread_options_handle) { + auto db = reinterpret_cast(db_handle); + auto& read_options = *reinterpret_cast( + jread_options_handle); + return rocksdb_iterator_helper(db, read_options, + nullptr); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: iteratorCF + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_RocksDB_iteratorCF__JJ( + JNIEnv* env, jobject jdb, jlong db_handle, jlong jcf_handle) { + auto db = reinterpret_cast(db_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + return rocksdb_iterator_helper(db, rocksdb::ReadOptions(), + cf_handle); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: iteratorCF + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_RocksDB_iteratorCF__JJJ( + JNIEnv* env, jobject jdb, jlong db_handle, jlong jcf_handle, + jlong jread_options_handle) { + auto db = reinterpret_cast(db_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + auto& read_options = *reinterpret_cast( + jread_options_handle); + return rocksdb_iterator_helper(db, read_options, + cf_handle); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: iterators + * Signature: (J[JJ)[J + */ +jlongArray Java_org_rocksdb_RocksDB_iterators( + JNIEnv* env, jobject jdb, jlong db_handle, + jlongArray jcolumn_family_handles, jlong jread_options_handle) { + auto* db = reinterpret_cast(db_handle); + auto& read_options = *reinterpret_cast( + jread_options_handle); + std::vector cf_handles; + if (jcolumn_family_handles != nullptr) { + jsize len_cols = env->GetArrayLength(jcolumn_family_handles); + jlong* jcfh = env->GetLongArrayElements(jcolumn_family_handles, NULL); + for (int i = 0; i < len_cols; i++) { + auto* cf_handle = + reinterpret_cast(jcfh[i]); + cf_handles.push_back(cf_handle); + } + env->ReleaseLongArrayElements(jcolumn_family_handles, jcfh, JNI_ABORT); + } + + std::vector iterators; + rocksdb::Status s = db->NewIterators(read_options, + cf_handles, &iterators); + if (s.ok()) { + jlongArray jLongArray = + env->NewLongArray(static_cast(iterators.size())); + for (std::vector::size_type i = 0; + i < iterators.size(); i++) { + env->SetLongArrayRegion(jLongArray, static_cast(i), 1, + reinterpret_cast(&iterators[i])); + } + return jLongArray; + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return NULL; + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getDefaultColumnFamily + * Signature: (J)J + */ +jlong Java_org_rocksdb_RocksDB_getDefaultColumnFamily( + JNIEnv* env, jobject jobj, jlong jdb_handle) { + auto* db_handle = reinterpret_cast(jdb_handle); + auto* cf_handle = db_handle->DefaultColumnFamily(); + return reinterpret_cast(cf_handle); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: createColumnFamily + * Signature: (J[BJ)J + */ +jlong Java_org_rocksdb_RocksDB_createColumnFamily( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jbyteArray jcolumn_name, jlong jcolumn_options) { + rocksdb::ColumnFamilyHandle* handle; + auto db_handle = reinterpret_cast(jdb_handle); + + jbyte* cfname = env->GetByteArrayElements(jcolumn_name, 0); + const int len = env->GetArrayLength(jcolumn_name); + + auto* cfOptions = + reinterpret_cast(jcolumn_options); + + rocksdb::Status s = db_handle->CreateColumnFamily( + *cfOptions, std::string(reinterpret_cast(cfname), len), &handle); + env->ReleaseByteArrayElements(jcolumn_name, cfname, 0); + + if (s.ok()) { + return reinterpret_cast(handle); + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: dropColumnFamily + * Signature: (JJ)V; + */ +void Java_org_rocksdb_RocksDB_dropColumnFamily( + JNIEnv* env, jobject jdb, jlong jdb_handle, jlong jcf_handle) { + auto cf_handle = reinterpret_cast(jcf_handle); + auto db_handle = reinterpret_cast(jdb_handle); + rocksdb::Status s = db_handle->DropColumnFamily(cf_handle); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Method: getSnapshot + * Signature: (J)J + */ +jlong Java_org_rocksdb_RocksDB_getSnapshot( + JNIEnv* env, jobject jdb, jlong db_handle) { + auto db = reinterpret_cast(db_handle); + const rocksdb::Snapshot* snapshot = db->GetSnapshot(); + return reinterpret_cast(snapshot); +} + +/* + * Method: releaseSnapshot + * Signature: (JJ)V + */ +void Java_org_rocksdb_RocksDB_releaseSnapshot( + JNIEnv* env, jobject jdb, jlong db_handle, jlong snapshot_handle) { + auto db = reinterpret_cast(db_handle); + auto snapshot = reinterpret_cast(snapshot_handle); + db->ReleaseSnapshot(snapshot); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getProperty0 + * Signature: (JLjava/lang/String;I)Ljava/lang/String; + */ +jstring Java_org_rocksdb_RocksDB_getProperty0__JLjava_lang_String_2I( + JNIEnv* env, jobject jdb, jlong db_handle, jstring jproperty, + jint jproperty_len) { + auto db = reinterpret_cast(db_handle); + + const char* property = env->GetStringUTFChars(jproperty, 0); + rocksdb::Slice property_slice(property, jproperty_len); + + std::string property_value; + bool retCode = db->GetProperty(property_slice, &property_value); + env->ReleaseStringUTFChars(jproperty, property); + + if (!retCode) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound()); + } + + return env->NewStringUTF(property_value.data()); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getProperty0 + * Signature: (JJLjava/lang/String;I)Ljava/lang/String; + */ +jstring Java_org_rocksdb_RocksDB_getProperty0__JJLjava_lang_String_2I( + JNIEnv* env, jobject jdb, jlong db_handle, jlong jcf_handle, + jstring jproperty, jint jproperty_len) { + auto db = reinterpret_cast(db_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + + const char* property = env->GetStringUTFChars(jproperty, 0); + rocksdb::Slice property_slice(property, jproperty_len); + + std::string property_value; + bool retCode = db->GetProperty(cf_handle, property_slice, &property_value); + env->ReleaseStringUTFChars(jproperty, property); + + if (!retCode) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound()); + } + + return env->NewStringUTF(property_value.data()); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getLongProperty + * Signature: (JLjava/lang/String;I)L; + */ +jlong Java_org_rocksdb_RocksDB_getLongProperty__JLjava_lang_String_2I( + JNIEnv* env, jobject jdb, jlong db_handle, jstring jproperty, + jint jproperty_len) { + auto db = reinterpret_cast(db_handle); + + const char* property = env->GetStringUTFChars(jproperty, 0); + rocksdb::Slice property_slice(property, jproperty_len); + + uint64_t property_value = 0; + bool retCode = db->GetIntProperty(property_slice, &property_value); + env->ReleaseStringUTFChars(jproperty, property); + + if (!retCode) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound()); + } + return property_value; +} + +/* + * Class: org_rocksdb_RocksDB + * Method: getLongProperty + * Signature: (JJLjava/lang/String;I)L; + */ +jlong Java_org_rocksdb_RocksDB_getLongProperty__JJLjava_lang_String_2I( + JNIEnv* env, jobject jdb, jlong db_handle, jlong jcf_handle, + jstring jproperty, jint jproperty_len) { + auto db = reinterpret_cast(db_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + + const char* property = env->GetStringUTFChars(jproperty, 0); + rocksdb::Slice property_slice(property, jproperty_len); + + uint64_t property_value; + bool retCode = db->GetIntProperty(cf_handle, property_slice, &property_value); + env->ReleaseStringUTFChars(jproperty, property); + + if (!retCode) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, rocksdb::Status::NotFound()); + } + return property_value; +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::Flush + +void rocksdb_flush_helper( + JNIEnv* env, rocksdb::DB* db, const rocksdb::FlushOptions& flush_options, + rocksdb::ColumnFamilyHandle* column_family_handle) { + rocksdb::Status s; + if (column_family_handle != nullptr) { + s = db->Flush(flush_options, column_family_handle); + } else { + s = db->Flush(flush_options); + } + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: flush + * Signature: (JJ)V + */ +void Java_org_rocksdb_RocksDB_flush__JJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jlong jflush_options) { + auto db = reinterpret_cast(jdb_handle); + auto flush_options = reinterpret_cast(jflush_options); + rocksdb_flush_helper(env, db, *flush_options, nullptr); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: flush + * Signature: (JJJ)V + */ +void Java_org_rocksdb_RocksDB_flush__JJJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jlong jflush_options, jlong jcf_handle) { + auto db = reinterpret_cast(jdb_handle); + auto flush_options = reinterpret_cast(jflush_options); + auto cf_handle = reinterpret_cast(jcf_handle); + rocksdb_flush_helper(env, db, *flush_options, cf_handle); +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::CompactRange - Full + +void rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db, + rocksdb::ColumnFamilyHandle* cf_handle, jboolean jreduce_level, + jint jtarget_level, jint jtarget_path_id) { + + rocksdb::Status s; + rocksdb::CompactRangeOptions compact_options; + compact_options.change_level = jreduce_level; + compact_options.target_level = jtarget_level; + compact_options.target_path_id = static_cast(jtarget_path_id); + if (cf_handle != nullptr) { + s = db->CompactRange(compact_options, cf_handle, nullptr, nullptr); + } else { + // backwards compatibility + s = db->CompactRange(compact_options, nullptr, nullptr); + } + + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: compactRange0 + * Signature: (JZII)V + */ +void Java_org_rocksdb_RocksDB_compactRange0__JZII(JNIEnv* env, + jobject jdb, jlong jdb_handle, jboolean jreduce_level, + jint jtarget_level, jint jtarget_path_id) { + auto db = reinterpret_cast(jdb_handle); + rocksdb_compactrange_helper(env, db, nullptr, jreduce_level, + jtarget_level, jtarget_path_id); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: compactRange + * Signature: (JZIIJ)V + */ +void Java_org_rocksdb_RocksDB_compactRange__JZIIJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, + jboolean jreduce_level, jint jtarget_level, + jint jtarget_path_id, jlong jcf_handle) { + auto db = reinterpret_cast(jdb_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + rocksdb_compactrange_helper(env, db, cf_handle, jreduce_level, + jtarget_level, jtarget_path_id); +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::CompactRange - Range + +void rocksdb_compactrange_helper(JNIEnv* env, rocksdb::DB* db, + rocksdb::ColumnFamilyHandle* cf_handle, jbyteArray jbegin, jint jbegin_len, + jbyteArray jend, jint jend_len, jboolean jreduce_level, jint jtarget_level, + jint jtarget_path_id) { + + jbyte* begin = env->GetByteArrayElements(jbegin, 0); + jbyte* end = env->GetByteArrayElements(jend, 0); + const rocksdb::Slice begin_slice(reinterpret_cast(begin), jbegin_len); + const rocksdb::Slice end_slice(reinterpret_cast(end), jend_len); + + rocksdb::Status s; + rocksdb::CompactRangeOptions compact_options; + compact_options.change_level = jreduce_level; + compact_options.target_level = jtarget_level; + compact_options.target_path_id = static_cast(jtarget_path_id); + if (cf_handle != nullptr) { + s = db->CompactRange(compact_options, cf_handle, &begin_slice, &end_slice); + } else { + // backwards compatibility + s = db->CompactRange(compact_options, &begin_slice, &end_slice); + } + + env->ReleaseByteArrayElements(jbegin, begin, JNI_ABORT); + env->ReleaseByteArrayElements(jend, end, JNI_ABORT); + + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: compactRange0 + * Signature: (J[BI[BIZII)V + */ +void Java_org_rocksdb_RocksDB_compactRange0__J_3BI_3BIZII(JNIEnv* env, + jobject jdb, jlong jdb_handle, jbyteArray jbegin, jint jbegin_len, + jbyteArray jend, jint jend_len, jboolean jreduce_level, + jint jtarget_level, jint jtarget_path_id) { + auto db = reinterpret_cast(jdb_handle); + rocksdb_compactrange_helper(env, db, nullptr, jbegin, jbegin_len, + jend, jend_len, jreduce_level, jtarget_level, jtarget_path_id); +} + +/* + * Class: org_rocksdb_RocksDB + * Method: compactRange + * Signature: (JJ[BI[BIZII)V + */ +void Java_org_rocksdb_RocksDB_compactRange__J_3BI_3BIZIIJ( + JNIEnv* env, jobject jdb, jlong jdb_handle, jbyteArray jbegin, + jint jbegin_len, jbyteArray jend, jint jend_len, + jboolean jreduce_level, jint jtarget_level, + jint jtarget_path_id, jlong jcf_handle) { + auto db = reinterpret_cast(jdb_handle); + auto cf_handle = reinterpret_cast(jcf_handle); + rocksdb_compactrange_helper(env, db, cf_handle, jbegin, jbegin_len, + jend, jend_len, jreduce_level, jtarget_level, jtarget_path_id); +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::PauseBackgroundWork + +/* + * Class: org_rocksdb_RocksDB + * Method: pauseBackgroundWork + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_pauseBackgroundWork( + JNIEnv* env, jobject jobj, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->PauseBackgroundWork(); + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::ContinueBackgroundWork + +/* + * Class: org_rocksdb_RocksDB + * Method: continueBackgroundWork + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_continueBackgroundWork( + JNIEnv* env, jobject jobj, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + auto s = db->ContinueBackgroundWork(); + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::GetLatestSequenceNumber + +/* + * Class: org_rocksdb_RocksDB + * Method: getLatestSequenceNumber + * Signature: (J)V + */ +jlong Java_org_rocksdb_RocksDB_getLatestSequenceNumber(JNIEnv* env, + jobject jdb, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + return db->GetLatestSequenceNumber(); +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB enable/disable file deletions + +/* + * Class: org_rocksdb_RocksDB + * Method: enableFileDeletions + * Signature: (J)V + */ +void Java_org_rocksdb_RocksDB_disableFileDeletions(JNIEnv* env, + jobject jdb, jlong jdb_handle) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::Status s = db->DisableFileDeletions(); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_RocksDB + * Method: enableFileDeletions + * Signature: (JZ)V + */ +void Java_org_rocksdb_RocksDB_enableFileDeletions(JNIEnv* env, + jobject jdb, jlong jdb_handle, jboolean jforce) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::Status s = db->EnableFileDeletions(jforce); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +////////////////////////////////////////////////////////////////////////////// +// rocksdb::DB::GetUpdatesSince + +/* + * Class: org_rocksdb_RocksDB + * Method: getUpdatesSince + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_RocksDB_getUpdatesSince(JNIEnv* env, + jobject jdb, jlong jdb_handle, jlong jsequence_number) { + auto* db = reinterpret_cast(jdb_handle); + rocksdb::SequenceNumber sequence_number = + static_cast(jsequence_number); + std::unique_ptr iter; + rocksdb::Status s = db->GetUpdatesSince(sequence_number, &iter); + if (s.ok()) { + return reinterpret_cast(iter.release()); + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; +} diff --git a/external/rocksdb/java/rocksjni/slice.cc b/external/rocksdb/java/rocksjni/slice.cc new file mode 100755 index 0000000000..e5eb383bd6 --- /dev/null +++ b/external/rocksdb/java/rocksjni/slice.cc @@ -0,0 +1,259 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for +// rocksdb::Slice. + +#include +#include +#include +#include + +#include "include/org_rocksdb_AbstractSlice.h" +#include "include/org_rocksdb_Slice.h" +#include "include/org_rocksdb_DirectSlice.h" +#include "rocksdb/slice.h" +#include "rocksjni/portal.h" + +// + +/* + * Class: org_rocksdb_Slice + * Method: createNewSlice0 + * Signature: ([BI)J + */ +jlong Java_org_rocksdb_Slice_createNewSlice0( + JNIEnv * env, jclass jcls, jbyteArray data, jint offset) { + + const jsize dataSize = env->GetArrayLength(data); + const int len = dataSize - offset; + jbyte* ptrData = new jbyte[len]; + env->GetByteArrayRegion(data, offset, len, ptrData); + + const auto* slice = new rocksdb::Slice((const char*)ptrData, len); + return reinterpret_cast(slice); +} + +/* + * Class: org_rocksdb_Slice + * Method: createNewSlice1 + * Signature: ([B)J + */ +jlong Java_org_rocksdb_Slice_createNewSlice1( + JNIEnv * env, jclass jcls, jbyteArray data) { + + const int len = env->GetArrayLength(data) + 1; + + jboolean isCopy; + jbyte* ptrData = env->GetByteArrayElements(data, &isCopy); + + // NOTE: buf will be deleted in the org.rocksdb.Slice#dispose method + char* buf = new char[len]; + memcpy(buf, ptrData, len - 1); + buf[len-1]='\0'; + + const auto* slice = + new rocksdb::Slice(buf, len - 1); + + env->ReleaseByteArrayElements(data, ptrData, JNI_ABORT); + + return reinterpret_cast(slice); +} + +/* + * Class: org_rocksdb_Slice + * Method: data0 + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_Slice_data0( + JNIEnv* env, jobject jobj, jlong handle) { + const auto* slice = reinterpret_cast(handle); + const int len = static_cast(slice->size()); + const jbyteArray data = env->NewByteArray(len); + env->SetByteArrayRegion(data, 0, len, + reinterpret_cast(slice->data())); + return data; +} + +/* + * Class: org_rocksdb_Slice + * Method: disposeInternalBuf + * Signature: (J)V + */ +void Java_org_rocksdb_Slice_disposeInternalBuf( + JNIEnv * env, jobject jobj, jlong handle) { + const auto* slice = reinterpret_cast(handle); + delete [] slice->data_; +} + +// + +// +#include +#include + +#include "include/org_rocksdb_Snapshot.h" +#include "rocksdb/db.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_Snapshot + * Method: getSequenceNumber + * Signature: (J)J + */ +jlong Java_org_rocksdb_Snapshot_getSequenceNumber(JNIEnv* env, + jobject jobj, jlong jsnapshot_handle) { + auto* snapshot = reinterpret_cast( + jsnapshot_handle); + return snapshot->GetSequenceNumber(); +} diff --git a/external/rocksdb/java/rocksjni/statistics.cc b/external/rocksdb/java/rocksjni/statistics.cc new file mode 100755 index 0000000000..c41ec72c0b --- /dev/null +++ b/external/rocksdb/java/rocksjni/statistics.cc @@ -0,0 +1,50 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::Statistics methods from Java side. + +#include +#include +#include + +#include "include/org_rocksdb_Statistics.h" +#include "rocksjni/portal.h" +#include "rocksdb/statistics.h" + +/* + * Class: org_rocksdb_Statistics + * Method: getTickerCount0 + * Signature: (IJ)J + */ +jlong Java_org_rocksdb_Statistics_getTickerCount0( + JNIEnv* env, jobject jobj, jint tickerType, jlong handle) { + auto st = reinterpret_cast(handle); + assert(st != nullptr); + + return st->getTickerCount(static_cast(tickerType)); +} + +/* + * Class: org_rocksdb_Statistics + * Method: getHistogramData0 + * Signature: (IJ)Lorg/rocksdb/HistogramData; + */ +jobject Java_org_rocksdb_Statistics_getHistogramData0( + JNIEnv* env, jobject jobj, jint histogramType, jlong handle) { + auto st = reinterpret_cast(handle); + assert(st != nullptr); + + rocksdb::HistogramData data; + st->histogramData(static_cast(histogramType), + &data); + + // Don't reuse class pointer + jclass jclazz = env->FindClass("org/rocksdb/HistogramData"); + jmethodID mid = rocksdb::HistogramDataJni::getConstructorMethodId( + env, jclazz); + return env->NewObject(jclazz, mid, data.median, data.percentile95, + data.percentile99, data.average, data.standard_deviation); +} diff --git a/external/rocksdb/java/rocksjni/table.cc b/external/rocksdb/java/rocksjni/table.cc new file mode 100755 index 0000000000..204d1ba38f --- /dev/null +++ b/external/rocksdb/java/rocksjni/table.cc @@ -0,0 +1,92 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ for rocksdb::Options. + +#include +#include "include/org_rocksdb_PlainTableConfig.h" +#include "include/org_rocksdb_BlockBasedTableConfig.h" +#include "rocksdb/table.h" +#include "rocksdb/cache.h" +#include "rocksdb/filter_policy.h" + +/* + * Class: org_rocksdb_PlainTableConfig + * Method: newTableFactoryHandle + * Signature: (IIDIIBZZ)J + */ +jlong Java_org_rocksdb_PlainTableConfig_newTableFactoryHandle( + JNIEnv* env, jobject jobj, jint jkey_size, jint jbloom_bits_per_key, + jdouble jhash_table_ratio, jint jindex_sparseness, + jint jhuge_page_tlb_size, jbyte jencoding_type, + jboolean jfull_scan_mode, jboolean jstore_index_in_file) { + rocksdb::PlainTableOptions options = rocksdb::PlainTableOptions(); + options.user_key_len = jkey_size; + options.bloom_bits_per_key = jbloom_bits_per_key; + options.hash_table_ratio = jhash_table_ratio; + options.index_sparseness = jindex_sparseness; + options.huge_page_tlb_size = jhuge_page_tlb_size; + options.encoding_type = static_cast( + jencoding_type); + options.full_scan_mode = jfull_scan_mode; + options.store_index_in_file = jstore_index_in_file; + return reinterpret_cast(rocksdb::NewPlainTableFactory(options)); +} + +/* + * Class: org_rocksdb_BlockBasedTableConfig + * Method: newTableFactoryHandle + * Signature: (ZJIJIIZIZZZJIBBI)J + */ +jlong Java_org_rocksdb_BlockBasedTableConfig_newTableFactoryHandle( + JNIEnv* env, jobject jobj, jboolean no_block_cache, jlong block_cache_size, + jint block_cache_num_shardbits, jlong block_size, jint block_size_deviation, + jint block_restart_interval, jboolean whole_key_filtering, + jlong jfilterPolicy, jboolean cache_index_and_filter_blocks, + jboolean pin_l0_filter_and_index_blocks_in_cache, + jboolean hash_index_allow_collision, jlong block_cache_compressed_size, + jint block_cache_compressd_num_shard_bits, jbyte jchecksum_type, + jbyte jindex_type, jint jformat_version) { + rocksdb::BlockBasedTableOptions options; + options.no_block_cache = no_block_cache; + + if (!no_block_cache && block_cache_size > 0) { + if (block_cache_num_shardbits > 0) { + options.block_cache = + rocksdb::NewLRUCache(block_cache_size, block_cache_num_shardbits); + } else { + options.block_cache = rocksdb::NewLRUCache(block_cache_size); + } + } + options.block_size = block_size; + options.block_size_deviation = block_size_deviation; + options.block_restart_interval = block_restart_interval; + options.whole_key_filtering = whole_key_filtering; + if (jfilterPolicy > 0) { + std::shared_ptr *pFilterPolicy = + reinterpret_cast *>( + jfilterPolicy); + options.filter_policy = *pFilterPolicy; + } + options.cache_index_and_filter_blocks = cache_index_and_filter_blocks; + options.pin_l0_filter_and_index_blocks_in_cache = + pin_l0_filter_and_index_blocks_in_cache; + options.hash_index_allow_collision = hash_index_allow_collision; + if (block_cache_compressed_size > 0) { + if (block_cache_compressd_num_shard_bits > 0) { + options.block_cache = + rocksdb::NewLRUCache(block_cache_compressed_size, + block_cache_compressd_num_shard_bits); + } else { + options.block_cache = rocksdb::NewLRUCache(block_cache_compressed_size); + } + } + options.checksum = static_cast(jchecksum_type); + options.index_type = static_cast< + rocksdb::BlockBasedTableOptions::IndexType>(jindex_type); + options.format_version = jformat_version; + + return reinterpret_cast(rocksdb::NewBlockBasedTableFactory(options)); +} diff --git a/external/rocksdb/java/rocksjni/transaction_log.cc b/external/rocksdb/java/rocksjni/transaction_log.cc new file mode 100755 index 0000000000..eed8d84b5a --- /dev/null +++ b/external/rocksdb/java/rocksjni/transaction_log.cc @@ -0,0 +1,78 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::Iterator methods from Java side. + +#include +#include +#include + +#include "include/org_rocksdb_TransactionLogIterator.h" +#include "rocksdb/transaction_log.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_TransactionLogIterator + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionLogIterator_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + delete reinterpret_cast(handle); +} + +/* + * Class: org_rocksdb_TransactionLogIterator + * Method: isValid + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_TransactionLogIterator_isValid( + JNIEnv* env, jobject jobj, jlong handle) { + return reinterpret_cast(handle)->Valid(); +} + +/* + * Class: org_rocksdb_TransactionLogIterator + * Method: next + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionLogIterator_next( + JNIEnv* env, jobject jobj, jlong handle) { + reinterpret_cast(handle)->Next(); +} + +/* + * Class: org_rocksdb_TransactionLogIterator + * Method: status + * Signature: (J)V + */ +void Java_org_rocksdb_TransactionLogIterator_status( + JNIEnv* env, jobject jobj, jlong handle) { + rocksdb::Status s = reinterpret_cast< + rocksdb::TransactionLogIterator*>(handle)->status(); + if (!s.ok()) { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + } +} + +/* + * Class: org_rocksdb_TransactionLogIterator + * Method: getBatch + * Signature: (J)Lorg/rocksdb/TransactionLogIterator$BatchResult + */ +jobject Java_org_rocksdb_TransactionLogIterator_getBatch( + JNIEnv* env, jobject jobj, jlong handle) { + rocksdb::BatchResult batch_result = + reinterpret_cast(handle)->GetBatch(); + jclass jclazz = env->FindClass( + "org/rocksdb/TransactionLogIterator$BatchResult"); + assert(jclazz != nullptr); + jmethodID mid = env->GetMethodID( + jclazz, "", "(Lorg/rocksdb/TransactionLogIterator;JJ)V"); + assert(mid != nullptr); + return env->NewObject(jclazz, mid, jobj, + batch_result.sequence, batch_result.writeBatchPtr.release()); +} diff --git a/external/rocksdb/java/rocksjni/ttl.cc b/external/rocksdb/java/rocksjni/ttl.cc new file mode 100755 index 0000000000..6b39252f1d --- /dev/null +++ b/external/rocksdb/java/rocksjni/ttl.cc @@ -0,0 +1,142 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::TtlDB methods. +// from Java side. + +#include +#include +#include +#include +#include +#include + +#include "include/org_rocksdb_TtlDB.h" +#include "rocksdb/utilities/db_ttl.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_TtlDB + * Method: open + * Signature: (JLjava/lang/String;IZ)J + */ +jlong Java_org_rocksdb_TtlDB_open(JNIEnv* env, + jclass jcls, jlong joptions_handle, jstring jdb_path, + jint jttl, jboolean jread_only) { + auto* opt = reinterpret_cast(joptions_handle); + rocksdb::DBWithTTL* db = nullptr; + const char* db_path = env->GetStringUTFChars(jdb_path, 0); + rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, &db, + jttl, jread_only); + env->ReleaseStringUTFChars(jdb_path, db_path); + + // as TTLDB extends RocksDB on the java side, we can reuse + // the RocksDB portal here. + if (s.ok()) { + return reinterpret_cast(db); + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; + } +} + +/* + * Class: org_rocksdb_TtlDB + * Method: openCF + * Signature: (JLjava/lang/String;[[B[J[IZ)[J + */ +jlongArray + Java_org_rocksdb_TtlDB_openCF( + JNIEnv* env, jclass jcls, jlong jopt_handle, jstring jdb_path, + jobjectArray jcolumn_names, jlongArray jcolumn_options, + jintArray jttls, jboolean jread_only) { + auto* opt = reinterpret_cast(jopt_handle); + const char* db_path = env->GetStringUTFChars(jdb_path, NULL); + + std::vector column_families; + + jsize len_cols = env->GetArrayLength(jcolumn_names); + jlong* jco = env->GetLongArrayElements(jcolumn_options, NULL); + for(int i = 0; i < len_cols; i++) { + jobject jcn = env->GetObjectArrayElement(jcolumn_names, i); + jbyteArray jcn_ba = reinterpret_cast(jcn); + jbyte* jcf_name = env->GetByteArrayElements(jcn_ba, NULL); + const int jcf_name_len = env->GetArrayLength(jcn_ba); + + //TODO(AR) do I need to make a copy of jco[i] ? + + std::string cf_name (reinterpret_cast(jcf_name), jcf_name_len); + rocksdb::ColumnFamilyOptions* cf_options = + reinterpret_cast(jco[i]); + column_families.push_back( + rocksdb::ColumnFamilyDescriptor(cf_name, *cf_options)); + + env->ReleaseByteArrayElements(jcn_ba, jcf_name, JNI_ABORT); + env->DeleteLocalRef(jcn); + } + env->ReleaseLongArrayElements(jcolumn_options, jco, JNI_ABORT); + + std::vector handles; + rocksdb::DBWithTTL* db = nullptr; + + std::vector ttl_values; + jint* jttlv = env->GetIntArrayElements(jttls, NULL); + jsize len_ttls = env->GetArrayLength(jttls); + for(int i = 0; i < len_ttls; i++) { + ttl_values.push_back(jttlv[i]); + } + env->ReleaseIntArrayElements(jttls, jttlv, JNI_ABORT); + + rocksdb::Status s = rocksdb::DBWithTTL::Open(*opt, db_path, column_families, + &handles, &db, ttl_values, jread_only); + + // check if open operation was successful + if (s.ok()) { + jsize resultsLen = 1 + len_cols; //db handle + column family handles + std::unique_ptr results = + std::unique_ptr(new jlong[resultsLen]); + results[0] = reinterpret_cast(db); + for(int i = 1; i <= len_cols; i++) { + results[i] = reinterpret_cast(handles[i - 1]); + } + + jlongArray jresults = env->NewLongArray(resultsLen); + env->SetLongArrayRegion(jresults, 0, resultsLen, results.get()); + return jresults; + } else { + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return NULL; + } +} + +/* + * Class: org_rocksdb_TtlDB + * Method: createColumnFamilyWithTtl + * Signature: (JLorg/rocksdb/ColumnFamilyDescriptor;[BJI)J; + */ +jlong Java_org_rocksdb_TtlDB_createColumnFamilyWithTtl( + JNIEnv* env, jobject jobj, jlong jdb_handle, + jbyteArray jcolumn_name, jlong jcolumn_options, jint jttl) { + rocksdb::ColumnFamilyHandle* handle; + auto* db_handle = reinterpret_cast(jdb_handle); + + jbyte* cfname = env->GetByteArrayElements(jcolumn_name, 0); + const int len = env->GetArrayLength(jcolumn_name); + + auto* cfOptions = + reinterpret_cast(jcolumn_options); + + rocksdb::Status s = db_handle->CreateColumnFamilyWithTtl( + *cfOptions, std::string(reinterpret_cast(cfname), + len), &handle, jttl); + env->ReleaseByteArrayElements(jcolumn_name, cfname, 0); + + if (s.ok()) { + return reinterpret_cast(handle); + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); + return 0; +} diff --git a/external/rocksdb/java/rocksjni/write_batch.cc b/external/rocksdb/java/rocksjni/write_batch.cc new file mode 100755 index 0000000000..2ae9587025 --- /dev/null +++ b/external/rocksdb/java/rocksjni/write_batch.cc @@ -0,0 +1,271 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::WriteBatch methods from Java side. +#include + +#include "db/memtable.h" +#include "db/write_batch_internal.h" +#include "include/org_rocksdb_WriteBatch.h" +#include "include/org_rocksdb_WriteBatch_Handler.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/immutable_options.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/status.h" +#include "rocksdb/write_batch.h" +#include "rocksdb/write_buffer_manager.h" +#include "rocksjni/portal.h" +#include "rocksjni/writebatchhandlerjnicallback.h" +#include "table/scoped_arena_iterator.h" +#include "util/logging.h" +#include "util/testharness.h" + +/* + * Class: org_rocksdb_WriteBatch + * Method: newWriteBatch + * Signature: (I)J + */ +jlong Java_org_rocksdb_WriteBatch_newWriteBatch( + JNIEnv* env, jclass jcls, jint jreserved_bytes) { + rocksdb::WriteBatch* wb = new rocksdb::WriteBatch( + static_cast(jreserved_bytes)); + return reinterpret_cast(wb); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: count0 + * Signature: (J)I + */ +jint Java_org_rocksdb_WriteBatch_count0(JNIEnv* env, jobject jobj, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return static_cast(wb->Count()); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: clear0 + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_clear0(JNIEnv* env, jobject jobj, + jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + wb->Clear(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: setSavePoint0 + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_setSavePoint0( + JNIEnv* env, jobject jobj, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + wb->SetSavePoint(); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: rollbackToSavePoint0 + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_rollbackToSavePoint0( + JNIEnv* env, jobject jobj, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + auto s = wb->RollbackToSavePoint(); + + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: put + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_WriteBatch_put__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto put = [&wb] (rocksdb::Slice key, rocksdb::Slice value) { + wb->Put(key, value); + }; + rocksdb::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, jentry_value, + jentry_value_len); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: put + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_WriteBatch_put__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto put = [&wb, &cf_handle] (rocksdb::Slice key, rocksdb::Slice value) { + wb->Put(cf_handle, key, value); + }; + rocksdb::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, jentry_value, + jentry_value_len); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: merge + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto merge = [&wb] (rocksdb::Slice key, rocksdb::Slice value) { + wb->Merge(key, value); + }; + rocksdb::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, jentry_value, + jentry_value_len); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: merge + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_WriteBatch_merge__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwb_handle, + jbyteArray jkey, jint jkey_len, + jbyteArray jentry_value, jint jentry_value_len, jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto merge = [&wb, &cf_handle] (rocksdb::Slice key, rocksdb::Slice value) { + wb->Merge(cf_handle, key, value); + }; + rocksdb::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, jentry_value, + jentry_value_len); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: remove + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WriteBatch_remove__J_3BI( + JNIEnv* env, jobject jobj, jlong jwb_handle, + jbyteArray jkey, jint jkey_len) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto remove = [&wb] (rocksdb::Slice key) { + wb->Delete(key); + }; + rocksdb::JniUtil::k_op(remove, env, jobj, jkey, jkey_len); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: remove + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_WriteBatch_remove__J_3BIJ( + JNIEnv* env, jobject jobj, jlong jwb_handle, + jbyteArray jkey, jint jkey_len, jlong jcf_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto* cf_handle = reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto remove = [&wb, &cf_handle] (rocksdb::Slice key) { + wb->Delete(cf_handle, key); + }; + rocksdb::JniUtil::k_op(remove, env, jobj, jkey, jkey_len); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: putLogData + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WriteBatch_putLogData( + JNIEnv* env, jobject jobj, jlong jwb_handle, jbyteArray jblob, + jint jblob_len) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + auto putLogData = [&wb] (rocksdb::Slice blob) { + wb->PutLogData(blob); + }; + rocksdb::JniUtil::k_op(putLogData, env, jobj, jblob, jblob_len); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: iterate + * Signature: (JJ)V + */ +void Java_org_rocksdb_WriteBatch_iterate( + JNIEnv* env , jobject jobj, jlong jwb_handle, jlong handlerHandle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + rocksdb::Status s = wb->Iterate( + reinterpret_cast(handlerHandle)); + + if (s.ok()) { + return; + } + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_WriteBatch + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + delete reinterpret_cast(handle); +} + +/* + * Class: org_rocksdb_WriteBatch_Handler + * Method: createNewHandler0 + * Signature: ()J + */ +jlong Java_org_rocksdb_WriteBatch_00024Handler_createNewHandler0( + JNIEnv* env, jobject jobj) { + const rocksdb::WriteBatchHandlerJniCallback* h = + new rocksdb::WriteBatchHandlerJniCallback(env, jobj); + return reinterpret_cast(h); +} + +/* + * Class: org_rocksdb_WriteBatch_Handler + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatch_00024Handler_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + delete reinterpret_cast(handle); +} diff --git a/external/rocksdb/java/rocksjni/write_batch_test.cc b/external/rocksdb/java/rocksjni/write_batch_test.cc new file mode 100755 index 0000000000..371744e4f9 --- /dev/null +++ b/external/rocksdb/java/rocksjni/write_batch_test.cc @@ -0,0 +1,149 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::WriteBatch methods testing from Java side. +#include + +#include "db/memtable.h" +#include "db/write_batch_internal.h" +#include "include/org_rocksdb_WriteBatch.h" +#include "include/org_rocksdb_WriteBatchTest.h" +#include "include/org_rocksdb_WriteBatchTestInternalHelper.h" +#include "include/org_rocksdb_WriteBatch_Handler.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/immutable_options.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/status.h" +#include "rocksdb/write_batch.h" +#include "rocksdb/write_buffer_manager.h" +#include "rocksjni/portal.h" +#include "table/scoped_arena_iterator.h" +#include "util/logging.h" +#include "util/testharness.h" + +/* + * Class: org_rocksdb_WriteBatchTest + * Method: getContents + * Signature: (J)[B + */ +jbyteArray Java_org_rocksdb_WriteBatchTest_getContents( + JNIEnv* env, jclass jclazz, jlong jwb_handle) { + auto* b = reinterpret_cast(jwb_handle); + assert(b != nullptr); + + // todo: Currently the following code is directly copied from + // db/write_bench_test.cc. It could be implemented in java once + // all the necessary components can be accessed via jni api. + + rocksdb::InternalKeyComparator cmp(rocksdb::BytewiseComparator()); + auto factory = std::make_shared(); + rocksdb::Options options; + rocksdb::WriteBufferManager wb(options.db_write_buffer_size); + options.memtable_factory = factory; + rocksdb::MemTable* mem = new rocksdb::MemTable( + cmp, rocksdb::ImmutableCFOptions(options), + rocksdb::MutableCFOptions(options, rocksdb::ImmutableCFOptions(options)), + &wb, rocksdb::kMaxSequenceNumber); + mem->Ref(); + std::string state; + rocksdb::ColumnFamilyMemTablesDefault cf_mems_default(mem); + rocksdb::Status s = + rocksdb::WriteBatchInternal::InsertInto(b, &cf_mems_default, nullptr); + int count = 0; + rocksdb::Arena arena; + rocksdb::ScopedArenaIterator iter(mem->NewIterator( + rocksdb::ReadOptions(), &arena)); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + rocksdb::ParsedInternalKey ikey; + memset(reinterpret_cast(&ikey), 0, sizeof(ikey)); + bool parsed = rocksdb::ParseInternalKey(iter->key(), &ikey); + assert(parsed); + switch (ikey.type) { + case rocksdb::kTypeValue: + state.append("Put("); + state.append(ikey.user_key.ToString()); + state.append(", "); + state.append(iter->value().ToString()); + state.append(")"); + count++; + break; + case rocksdb::kTypeMerge: + state.append("Merge("); + state.append(ikey.user_key.ToString()); + state.append(", "); + state.append(iter->value().ToString()); + state.append(")"); + count++; + break; + case rocksdb::kTypeDeletion: + state.append("Delete("); + state.append(ikey.user_key.ToString()); + state.append(")"); + count++; + break; + default: + assert(false); + break; + } + state.append("@"); + state.append(rocksdb::NumberToString(ikey.sequence)); + } + if (!s.ok()) { + state.append(s.ToString()); + } else if (count != rocksdb::WriteBatchInternal::Count(b)) { + state.append("CountMismatch()"); + } + delete mem->Unref(); + + jbyteArray jstate = env->NewByteArray(static_cast(state.size())); + env->SetByteArrayRegion(jstate, 0, static_cast(state.size()), + reinterpret_cast(state.c_str())); + + return jstate; +} + +/* + * Class: org_rocksdb_WriteBatchTestInternalHelper + * Method: setSequence + * Signature: (JJ)V + */ +void Java_org_rocksdb_WriteBatchTestInternalHelper_setSequence( + JNIEnv* env, jclass jclazz, jlong jwb_handle, jlong jsn) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + rocksdb::WriteBatchInternal::SetSequence( + wb, static_cast(jsn)); +} + +/* + * Class: org_rocksdb_WriteBatchTestInternalHelper + * Method: sequence + * Signature: (J)J + */ +jlong Java_org_rocksdb_WriteBatchTestInternalHelper_sequence( + JNIEnv* env, jclass jclazz, jlong jwb_handle) { + auto* wb = reinterpret_cast(jwb_handle); + assert(wb != nullptr); + + return static_cast(rocksdb::WriteBatchInternal::Sequence(wb)); +} + +/* + * Class: org_rocksdb_WriteBatchTestInternalHelper + * Method: append + * Signature: (JJ)V + */ +void Java_org_rocksdb_WriteBatchTestInternalHelper_append( + JNIEnv* env, jclass jclazz, jlong jwb_handle_1, jlong jwb_handle_2) { + auto* wb1 = reinterpret_cast(jwb_handle_1); + assert(wb1 != nullptr); + auto* wb2 = reinterpret_cast(jwb_handle_2); + assert(wb2 != nullptr); + + rocksdb::WriteBatchInternal::Append(wb1, wb2); +} diff --git a/external/rocksdb/java/rocksjni/write_batch_with_index.cc b/external/rocksdb/java/rocksjni/write_batch_with_index.cc new file mode 100755 index 0000000000..2b8cf778b6 --- /dev/null +++ b/external/rocksdb/java/rocksjni/write_batch_with_index.cc @@ -0,0 +1,441 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the "bridge" between Java and C++ and enables +// calling c++ rocksdb::WriteBatchWithIndex methods from Java side. + +#include "include/org_rocksdb_WBWIRocksIterator.h" +#include "include/org_rocksdb_WriteBatchWithIndex.h" +#include "rocksdb/comparator.h" +#include "rocksdb/utilities/write_batch_with_index.h" +#include "rocksjni/portal.h" + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: newWriteBatchWithIndex + * Signature: ()J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__( + JNIEnv* env, jclass jcls) { + rocksdb::WriteBatchWithIndex* wbwi = new rocksdb::WriteBatchWithIndex(); + return reinterpret_cast(wbwi); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: newWriteBatchWithIndex + * Signature: (Z)J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__Z( + JNIEnv* env, jclass jcls, jboolean joverwrite_key) { + rocksdb::WriteBatchWithIndex* wbwi = + new rocksdb::WriteBatchWithIndex(rocksdb::BytewiseComparator(), 0, + static_cast(joverwrite_key)); + return reinterpret_cast(wbwi); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: newWriteBatchWithIndex + * Signature: (JIZ)J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_newWriteBatchWithIndex__JIZ( + JNIEnv* env, jclass jcls, jlong jfallback_index_comparator_handle, + jint jreserved_bytes, jboolean joverwrite_key) { + rocksdb::WriteBatchWithIndex* wbwi = + new rocksdb::WriteBatchWithIndex( + reinterpret_cast(jfallback_index_comparator_handle), + static_cast(jreserved_bytes), static_cast(joverwrite_key)); + return reinterpret_cast(wbwi); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: count0 + * Signature: (J)I + */ +jint Java_org_rocksdb_WriteBatchWithIndex_count0( + JNIEnv* env, jobject jobj, jlong jwbwi_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + return static_cast(wbwi->GetWriteBatch()->Count()); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: put + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto put = [&wbwi] (rocksdb::Slice key, rocksdb::Slice value) { + wbwi->Put(key, value); + }; + rocksdb::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, jentry_value, + jentry_value_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: put + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_put__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len, + jlong jcf_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto* cf_handle = reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto put = [&wbwi, &cf_handle] (rocksdb::Slice key, rocksdb::Slice value) { + wbwi->Put(cf_handle, key, value); + }; + rocksdb::JniUtil::kv_op(put, env, jobj, jkey, jkey_len, jentry_value, + jentry_value_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: merge + * Signature: (J[BI[BI)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BI( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto merge = [&wbwi] (rocksdb::Slice key, rocksdb::Slice value) { + wbwi->Merge(key, value); + }; + rocksdb::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, jentry_value, + jentry_value_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: merge + * Signature: (J[BI[BIJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_merge__J_3BI_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jbyteArray jentry_value, jint jentry_value_len, + jlong jcf_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto* cf_handle = reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto merge = [&wbwi, &cf_handle] (rocksdb::Slice key, rocksdb::Slice value) { + wbwi->Merge(cf_handle, key, value); + }; + rocksdb::JniUtil::kv_op(merge, env, jobj, jkey, jkey_len, jentry_value, + jentry_value_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: remove + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_remove__J_3BI( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto remove = [&wbwi] (rocksdb::Slice key) { + wbwi->Delete(key); + }; + rocksdb::JniUtil::k_op(remove, env, jobj, jkey, jkey_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: remove + * Signature: (J[BIJ)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_remove__J_3BIJ( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jkey, + jint jkey_len, jlong jcf_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto* cf_handle = reinterpret_cast(jcf_handle); + assert(cf_handle != nullptr); + auto remove = [&wbwi, &cf_handle] (rocksdb::Slice key) { + wbwi->Delete(cf_handle, key); + }; + rocksdb::JniUtil::k_op(remove, env, jobj, jkey, jkey_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: putLogData + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_putLogData( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jbyteArray jblob, + jint jblob_len) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + auto putLogData = [&wbwi] (rocksdb::Slice blob) { + wbwi->PutLogData(blob); + }; + rocksdb::JniUtil::k_op(putLogData, env, jobj, jblob, jblob_len); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: clear + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_clear0( + JNIEnv* env, jobject jobj, jlong jwbwi_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + wbwi->Clear(); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: setSavePoint0 + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_setSavePoint0( + JNIEnv* env, jobject jobj, jlong jwbwi_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + wbwi->SetSavePoint(); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: rollbackToSavePoint0 + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_rollbackToSavePoint0( + JNIEnv* env, jobject jobj, jlong jwbwi_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + assert(wbwi != nullptr); + + auto s = wbwi->RollbackToSavePoint(); + + if (s.ok()) { + return; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: iterator0 + * Signature: (J)J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_iterator0( + JNIEnv* env, jobject jobj, jlong jwbwi_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator(); + return reinterpret_cast(wbwi_iterator); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: iterator1 + * Signature: (JJ)J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_iterator1( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + auto* cf_handle = reinterpret_cast(jcf_handle); + rocksdb::WBWIIterator* wbwi_iterator = wbwi->NewIterator(cf_handle); + return reinterpret_cast(wbwi_iterator); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: iteratorWithBase + * Signature: (JJJ)J + */ +jlong Java_org_rocksdb_WriteBatchWithIndex_iteratorWithBase( + JNIEnv* env, jobject jobj, jlong jwbwi_handle, jlong jcf_handle, + jlong jbi_handle) { + auto* wbwi = reinterpret_cast(jwbwi_handle); + auto* cf_handle = reinterpret_cast(jcf_handle); + auto* base_iterator = reinterpret_cast(jbi_handle); + auto* iterator = wbwi->NewIteratorWithBase(cf_handle, base_iterator); + return reinterpret_cast(iterator); +} + +/* + * Class: org_rocksdb_WriteBatchWithIndex + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_WriteBatchWithIndex_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + auto* wbwi = reinterpret_cast(handle); + delete wbwi; +} + +/* WBWIRocksIterator below */ + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: disposeInternal + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_disposeInternal( + JNIEnv* env, jobject jobj, jlong handle) { + auto* it = reinterpret_cast(handle); + delete it; +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: isValid0 + * Signature: (J)Z + */ +jboolean Java_org_rocksdb_WBWIRocksIterator_isValid0( + JNIEnv* env, jobject jobj, jlong handle) { + return reinterpret_cast(handle)->Valid(); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: seekToFirst0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seekToFirst0( + JNIEnv* env, jobject jobj, jlong handle) { + reinterpret_cast(handle)->SeekToFirst(); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: seekToLast0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seekToLast0( + JNIEnv* env, jobject jobj, jlong handle) { + reinterpret_cast(handle)->SeekToLast(); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: next0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_next0( + JNIEnv* env, jobject jobj, jlong handle) { + reinterpret_cast(handle)->Next(); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: prev0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_prev0( + JNIEnv* env, jobject jobj, jlong handle) { + reinterpret_cast(handle)->Prev(); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: seek0 + * Signature: (J[BI)V + */ +void Java_org_rocksdb_WBWIRocksIterator_seek0( + JNIEnv* env, jobject jobj, jlong handle, jbyteArray jtarget, + jint jtarget_len) { + auto* it = reinterpret_cast(handle); + jbyte* target = env->GetByteArrayElements(jtarget, 0); + rocksdb::Slice target_slice( + reinterpret_cast(target), jtarget_len); + + it->Seek(target_slice); + + env->ReleaseByteArrayElements(jtarget, target, JNI_ABORT); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: status0 + * Signature: (J)V + */ +void Java_org_rocksdb_WBWIRocksIterator_status0( + JNIEnv* env, jobject jobj, jlong handle) { + auto* it = reinterpret_cast(handle); + rocksdb::Status s = it->status(); + + if (s.ok()) { + return; + } + + rocksdb::RocksDBExceptionJni::ThrowNew(env, s); +} + +/* + * Class: org_rocksdb_WBWIRocksIterator + * Method: entry1 + * Signature: (J)[J + */ +jlongArray Java_org_rocksdb_WBWIRocksIterator_entry1( + JNIEnv* env, jobject jobj, jlong handle) { + auto* it = reinterpret_cast(handle); + const rocksdb::WriteEntry& we = it->Entry(); + + jlong results[3]; + + //set the type of the write entry + switch (we.type) { + case rocksdb::kPutRecord: + results[0] = 0x1; + break; + + case rocksdb::kMergeRecord: + results[0] = 0x2; + break; + + case rocksdb::kDeleteRecord: + results[0] = 0x4; + break; + + case rocksdb::kLogDataRecord: + results[0] = 0x8; + break; + + default: + results[0] = 0x0; + } + + //TODO(AR) do we leak buf and value_buf? + + //set the pointer to the key slice + char* buf = new char[we.key.size()]; + memcpy(buf, we.key.data(), we.key.size()); + auto* key_slice = new rocksdb::Slice(buf, we.key.size()); + results[1] = reinterpret_cast(key_slice); + + //set the pointer to the value slice + if (we.type == rocksdb::kDeleteRecord || we.type == rocksdb::kLogDataRecord) { + // set native handle of value slice to null if no value available + results[2] = 0; + } else { + char* value_buf = new char[we.value.size()]; + memcpy(value_buf, we.value.data(), we.value.size()); + auto* value_slice = new rocksdb::Slice(value_buf, we.value.size()); + results[2] = reinterpret_cast(value_slice); + } + + jlongArray jresults = env->NewLongArray(3); + env->SetLongArrayRegion(jresults, 0, 3, results); + return jresults; +} diff --git a/external/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc b/external/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc new file mode 100755 index 0000000000..b252365184 --- /dev/null +++ b/external/rocksdb/java/rocksjni/writebatchhandlerjnicallback.cc @@ -0,0 +1,104 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::Comparator. + +#include "rocksjni/writebatchhandlerjnicallback.h" +#include "rocksjni/portal.h" + +namespace rocksdb { +WriteBatchHandlerJniCallback::WriteBatchHandlerJniCallback( + JNIEnv* env, jobject jWriteBatchHandler) + : m_env(env) { + + // Note: we want to access the Java WriteBatchHandler instance + // across multiple method calls, so we create a global ref + m_jWriteBatchHandler = env->NewGlobalRef(jWriteBatchHandler); + + m_jPutMethodId = WriteBatchHandlerJni::getPutMethodId(env); + m_jMergeMethodId = WriteBatchHandlerJni::getMergeMethodId(env); + m_jDeleteMethodId = WriteBatchHandlerJni::getDeleteMethodId(env); + m_jLogDataMethodId = WriteBatchHandlerJni::getLogDataMethodId(env); + m_jContinueMethodId = WriteBatchHandlerJni::getContinueMethodId(env); +} + +void WriteBatchHandlerJniCallback::Put(const Slice& key, const Slice& value) { + const jbyteArray j_key = sliceToJArray(key); + const jbyteArray j_value = sliceToJArray(value); + + m_env->CallVoidMethod( + m_jWriteBatchHandler, + m_jPutMethodId, + j_key, + j_value); + + m_env->DeleteLocalRef(j_value); + m_env->DeleteLocalRef(j_key); +} + +void WriteBatchHandlerJniCallback::Merge(const Slice& key, const Slice& value) { + const jbyteArray j_key = sliceToJArray(key); + const jbyteArray j_value = sliceToJArray(value); + + m_env->CallVoidMethod( + m_jWriteBatchHandler, + m_jMergeMethodId, + j_key, + j_value); + + m_env->DeleteLocalRef(j_value); + m_env->DeleteLocalRef(j_key); +} + +void WriteBatchHandlerJniCallback::Delete(const Slice& key) { + const jbyteArray j_key = sliceToJArray(key); + + m_env->CallVoidMethod( + m_jWriteBatchHandler, + m_jDeleteMethodId, + j_key); + + m_env->DeleteLocalRef(j_key); +} + +void WriteBatchHandlerJniCallback::LogData(const Slice& blob) { + const jbyteArray j_blob = sliceToJArray(blob); + + m_env->CallVoidMethod( + m_jWriteBatchHandler, + m_jLogDataMethodId, + j_blob); + + m_env->DeleteLocalRef(j_blob); +} + +bool WriteBatchHandlerJniCallback::Continue() { + jboolean jContinue = m_env->CallBooleanMethod( + m_jWriteBatchHandler, + m_jContinueMethodId); + + return static_cast(jContinue == JNI_TRUE); +} + +/* + * Creates a Java Byte Array from the data in a Slice + * + * When calling this function + * you must remember to call env->DeleteLocalRef + * on the result after you have finished with it + */ +jbyteArray WriteBatchHandlerJniCallback::sliceToJArray(const Slice& s) { + jbyteArray ja = m_env->NewByteArray(static_cast(s.size())); + m_env->SetByteArrayRegion( + ja, 0, static_cast(s.size()), + reinterpret_cast(s.data())); + return ja; +} + +WriteBatchHandlerJniCallback::~WriteBatchHandlerJniCallback() { + m_env->DeleteGlobalRef(m_jWriteBatchHandler); +} +} // namespace rocksdb diff --git a/external/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h b/external/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h new file mode 100755 index 0000000000..1c421db030 --- /dev/null +++ b/external/rocksdb/java/rocksjni/writebatchhandlerjnicallback.h @@ -0,0 +1,46 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// This file implements the callback "bridge" between Java and C++ for +// rocksdb::WriteBatch::Handler. + +#ifndef JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_ +#define JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_ + +#include +#include "rocksdb/write_batch.h" + +namespace rocksdb { +/** + * This class acts as a bridge between C++ + * and Java. The methods in this class will be + * called back from the RocksDB storage engine (C++) + * which calls the appropriate Java method. + * This enables Write Batch Handlers to be implemented in Java. + */ +class WriteBatchHandlerJniCallback : public WriteBatch::Handler { + public: + WriteBatchHandlerJniCallback( + JNIEnv* env, jobject jWriteBackHandler); + ~WriteBatchHandlerJniCallback(); + void Put(const Slice& key, const Slice& value); + void Merge(const Slice& key, const Slice& value); + void Delete(const Slice& key); + void LogData(const Slice& blob); + bool Continue(); + + private: + JNIEnv* m_env; + jobject m_jWriteBatchHandler; + jbyteArray sliceToJArray(const Slice& s); + jmethodID m_jPutMethodId; + jmethodID m_jMergeMethodId; + jmethodID m_jDeleteMethodId; + jmethodID m_jLogDataMethodId; + jmethodID m_jContinueMethodId; +}; +} // namespace rocksdb + +#endif // JAVA_ROCKSJNI_WRITEBATCHHANDLERJNICALLBACK_H_ diff --git a/external/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java b/external/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java new file mode 100755 index 0000000000..7dd0de9cc4 --- /dev/null +++ b/external/rocksdb/java/samples/src/main/java/RocksDBColumnFamilySample.java @@ -0,0 +1,79 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +import org.rocksdb.*; + +import java.util.ArrayList; +import java.util.List; + +public class RocksDBColumnFamilySample { + static { + RocksDB.loadLibrary(); + } + + public static void main(String[] args) throws RocksDBException { + if (args.length < 1) { + System.out.println( + "usage: RocksDBColumnFamilySample db_path"); + return; + } + String db_path = args[0]; + + System.out.println("RocksDBColumnFamilySample"); + try(final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, db_path)) { + + assert(db != null); + + // create column family + try(final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf".getBytes(), + new ColumnFamilyOptions()))) { + assert (columnFamilyHandle != null); + } + } + + // open DB with two column families + final List columnFamilyDescriptors = + new ArrayList<>(); + // have to open default column family + columnFamilyDescriptors.add(new ColumnFamilyDescriptor( + RocksDB.DEFAULT_COLUMN_FAMILY, new ColumnFamilyOptions())); + // open the new one, too + columnFamilyDescriptors.add(new ColumnFamilyDescriptor( + "new_cf".getBytes(), new ColumnFamilyOptions())); + final List columnFamilyHandles = new ArrayList<>(); + try(final DBOptions options = new DBOptions(); + final RocksDB db = RocksDB.open(options, db_path, + columnFamilyDescriptors, columnFamilyHandles)) { + assert(db != null); + + try { + // put and get from non-default column family + db.put(columnFamilyHandles.get(0), new WriteOptions(), + "key".getBytes(), "value".getBytes()); + String value = new String(db.get(columnFamilyHandles.get(0), + "key".getBytes())); + + // atomic write + try (final WriteBatch wb = new WriteBatch()) { + wb.put(columnFamilyHandles.get(0), "key2".getBytes(), + "value2".getBytes()); + wb.put(columnFamilyHandles.get(1), "key3".getBytes(), + "value3".getBytes()); + wb.remove(columnFamilyHandles.get(0), "key".getBytes()); + db.write(new WriteOptions(), wb); + } + + // drop column family + db.dropColumnFamily(columnFamilyHandles.get(1)); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } +} diff --git a/external/rocksdb/java/samples/src/main/java/RocksDBSample.java b/external/rocksdb/java/samples/src/main/java/RocksDBSample.java new file mode 100755 index 0000000000..de5bc26d5d --- /dev/null +++ b/external/rocksdb/java/samples/src/main/java/RocksDBSample.java @@ -0,0 +1,300 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +import java.lang.IllegalArgumentException; +import java.util.Arrays; +import java.util.List; +import java.util.Map; +import java.util.ArrayList; + +import org.rocksdb.*; +import org.rocksdb.util.SizeUnit; + +import java.io.IOException; + +public class RocksDBSample { + static { + RocksDB.loadLibrary(); + } + + public static void main(String[] args) { + if (args.length < 1) { + System.out.println("usage: RocksDBSample db_path"); + return; + } + String db_path = args[0]; + String db_path_not_found = db_path + "_not_found"; + + System.out.println("RocksDBSample"); + try (final Options options = new Options(); + final Filter bloomFilter = new BloomFilter(10); + final ReadOptions readOptions = new ReadOptions() + .setFillCache(false)) { + + try (final RocksDB db = RocksDB.open(options, db_path_not_found)) { + assert (false); + } catch (RocksDBException e) { + System.out.format("caught the expected exception -- %s\n", e); + } + + try { + options.setCreateIfMissing(true) + .createStatistics() + .setWriteBufferSize(8 * SizeUnit.KB) + .setMaxWriteBufferNumber(3) + .setMaxBackgroundCompactions(10) + .setCompressionType(CompressionType.SNAPPY_COMPRESSION) + .setCompactionStyle(CompactionStyle.UNIVERSAL); + } catch (IllegalArgumentException e) { + assert (false); + } + + Statistics stats = options.statisticsPtr(); + + assert (options.createIfMissing() == true); + assert (options.writeBufferSize() == 8 * SizeUnit.KB); + assert (options.maxWriteBufferNumber() == 3); + assert (options.maxBackgroundCompactions() == 10); + assert (options.compressionType() == CompressionType.SNAPPY_COMPRESSION); + assert (options.compactionStyle() == CompactionStyle.UNIVERSAL); + + assert (options.memTableFactoryName().equals("SkipListFactory")); + options.setMemTableConfig( + new HashSkipListMemTableConfig() + .setHeight(4) + .setBranchingFactor(4) + .setBucketCount(2000000)); + assert (options.memTableFactoryName().equals("HashSkipListRepFactory")); + + options.setMemTableConfig( + new HashLinkedListMemTableConfig() + .setBucketCount(100000)); + assert (options.memTableFactoryName().equals("HashLinkedListRepFactory")); + + options.setMemTableConfig( + new VectorMemTableConfig().setReservedSize(10000)); + assert (options.memTableFactoryName().equals("VectorRepFactory")); + + options.setMemTableConfig(new SkipListMemTableConfig()); + assert (options.memTableFactoryName().equals("SkipListFactory")); + + options.setTableFormatConfig(new PlainTableConfig()); + // Plain-Table requires mmap read + options.setAllowMmapReads(true); + assert (options.tableFactoryName().equals("PlainTable")); + + options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000, + 10000, 10)); + options.setRateLimiterConfig(new GenericRateLimiterConfig(10000000)); + + final BlockBasedTableConfig table_options = new BlockBasedTableConfig(); + table_options.setBlockCacheSize(64 * SizeUnit.KB) + .setFilter(bloomFilter) + .setCacheNumShardBits(6) + .setBlockSizeDeviation(5) + .setBlockRestartInterval(10) + .setCacheIndexAndFilterBlocks(true) + .setHashIndexAllowCollision(false) + .setBlockCacheCompressedSize(64 * SizeUnit.KB) + .setBlockCacheCompressedNumShardBits(10); + + assert (table_options.blockCacheSize() == 64 * SizeUnit.KB); + assert (table_options.cacheNumShardBits() == 6); + assert (table_options.blockSizeDeviation() == 5); + assert (table_options.blockRestartInterval() == 10); + assert (table_options.cacheIndexAndFilterBlocks() == true); + assert (table_options.hashIndexAllowCollision() == false); + assert (table_options.blockCacheCompressedSize() == 64 * SizeUnit.KB); + assert (table_options.blockCacheCompressedNumShardBits() == 10); + + options.setTableFormatConfig(table_options); + assert (options.tableFactoryName().equals("BlockBasedTable")); + + try (final RocksDB db = RocksDB.open(options, db_path)) { + db.put("hello".getBytes(), "world".getBytes()); + byte[] value = db.get("hello".getBytes()); + assert ("world".equals(new String(value))); + String str = db.getProperty("rocksdb.stats"); + assert (str != null && !str.equals("")); + } catch (RocksDBException e) { + System.out.format("[ERROR] caught the unexpceted exception -- %s\n", e); + assert (false); + } + + try (final RocksDB db = RocksDB.open(options, db_path)) { + db.put("hello".getBytes(), "world".getBytes()); + byte[] value = db.get("hello".getBytes()); + System.out.format("Get('hello') = %s\n", + new String(value)); + + for (int i = 1; i <= 9; ++i) { + for (int j = 1; j <= 9; ++j) { + db.put(String.format("%dx%d", i, j).getBytes(), + String.format("%d", i * j).getBytes()); + } + } + + for (int i = 1; i <= 9; ++i) { + for (int j = 1; j <= 9; ++j) { + System.out.format("%s ", new String(db.get( + String.format("%dx%d", i, j).getBytes()))); + } + System.out.println(""); + } + + // write batch test + try (final WriteOptions writeOpt = new WriteOptions()) { + for (int i = 10; i <= 19; ++i) { + try (final WriteBatch batch = new WriteBatch()) { + for (int j = 10; j <= 19; ++j) { + batch.put(String.format("%dx%d", i, j).getBytes(), + String.format("%d", i * j).getBytes()); + } + db.write(writeOpt, batch); + } + } + } + for (int i = 10; i <= 19; ++i) { + for (int j = 10; j <= 19; ++j) { + assert (new String( + db.get(String.format("%dx%d", i, j).getBytes())).equals( + String.format("%d", i * j))); + System.out.format("%s ", new String(db.get( + String.format("%dx%d", i, j).getBytes()))); + } + System.out.println(""); + } + + value = db.get("1x1".getBytes()); + assert (value != null); + value = db.get("world".getBytes()); + assert (value == null); + value = db.get(readOptions, "world".getBytes()); + assert (value == null); + + byte[] testKey = "asdf".getBytes(); + byte[] testValue = + "asdfghjkl;'?> insufficientArray.length); + len = db.get("asdfjkl;".getBytes(), enoughArray); + assert (len == RocksDB.NOT_FOUND); + len = db.get(testKey, enoughArray); + assert (len == testValue.length); + + len = db.get(readOptions, testKey, insufficientArray); + assert (len > insufficientArray.length); + len = db.get(readOptions, "asdfjkl;".getBytes(), enoughArray); + assert (len == RocksDB.NOT_FOUND); + len = db.get(readOptions, testKey, enoughArray); + assert (len == testValue.length); + + db.remove(testKey); + len = db.get(testKey, enoughArray); + assert (len == RocksDB.NOT_FOUND); + + // repeat the test with WriteOptions + try (final WriteOptions writeOpts = new WriteOptions()) { + writeOpts.setSync(true); + writeOpts.setDisableWAL(true); + db.put(writeOpts, testKey, testValue); + len = db.get(testKey, enoughArray); + assert (len == testValue.length); + assert (new String(testValue).equals( + new String(enoughArray, 0, len))); + } + + try { + for (TickerType statsType : TickerType.values()) { + stats.getTickerCount(statsType); + } + System.out.println("getTickerCount() passed."); + } catch (Exception e) { + System.out.println("Failed in call to getTickerCount()"); + assert (false); //Should never reach here. + } + + try { + for (HistogramType histogramType : HistogramType.values()) { + HistogramData data = stats.geHistogramData(histogramType); + } + System.out.println("geHistogramData() passed."); + } catch (Exception e) { + System.out.println("Failed in call to geHistogramData()"); + assert (false); //Should never reach here. + } + + try (final RocksIterator iterator = db.newIterator()) { + + boolean seekToFirstPassed = false; + for (iterator.seekToFirst(); iterator.isValid(); iterator.next()) { + iterator.status(); + assert (iterator.key() != null); + assert (iterator.value() != null); + seekToFirstPassed = true; + } + if (seekToFirstPassed) { + System.out.println("iterator seekToFirst tests passed."); + } + + boolean seekToLastPassed = false; + for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) { + iterator.status(); + assert (iterator.key() != null); + assert (iterator.value() != null); + seekToLastPassed = true; + } + + if (seekToLastPassed) { + System.out.println("iterator seekToLastPassed tests passed."); + } + + iterator.seekToFirst(); + iterator.seek(iterator.key()); + assert (iterator.key() != null); + assert (iterator.value() != null); + + System.out.println("iterator seek test passed."); + + } + System.out.println("iterator tests passed."); + + final List keys = new ArrayList<>(); + try (final RocksIterator iterator = db.newIterator()) { + for (iterator.seekToLast(); iterator.isValid(); iterator.prev()) { + keys.add(iterator.key()); + } + } + + Map values = db.multiGet(keys); + assert (values.size() == keys.size()); + for (byte[] value1 : values.values()) { + assert (value1 != null); + } + + values = db.multiGet(new ReadOptions(), keys); + assert (values.size() == keys.size()); + for (byte[] value1 : values.values()) { + assert (value1 != null); + } + } catch (RocksDBException e) { + System.err.println(e); + } + } + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java new file mode 100755 index 0000000000..7d3c5bcd92 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractCompactionFilter.java @@ -0,0 +1,30 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +/** + * A CompactionFilter allows an application to modify/delete a key-value at + * the time of compaction. + * + * At present we just permit an overriding Java class to wrap a C++ + * implementation + */ +public abstract class AbstractCompactionFilter> + extends RocksObject { + + protected AbstractCompactionFilter(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Deletes underlying C++ compaction pointer. + * + * Note that this function should be called only after all + * RocksDB instances referencing the compaction filter are closed. + * Otherwise an undefined behavior will occur. + */ + @Override + protected final native void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java new file mode 100755 index 0000000000..78ee371658 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractComparator.java @@ -0,0 +1,106 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Comparators are used by RocksDB to determine + * the ordering of keys. + * + * This class is package private, implementers + * should extend either of the public abstract classes: + * @see org.rocksdb.Comparator + * @see org.rocksdb.DirectComparator + */ +public abstract class AbstractComparator> + extends AbstractImmutableNativeReference { + + protected AbstractComparator() { + super(true); + } + + /** + * The name of the comparator. Used to check for comparator + * mismatches (i.e., a DB created with one comparator is + * accessed using a different comparator). + * + * A new name should be used whenever + * the comparator implementation changes in a way that will cause + * the relative ordering of any two keys to change. + * + * Names starting with "rocksdb." are reserved and should not be used. + * + * @return The name of this comparator implementation + */ + public abstract String name(); + + /** + * Three-way key comparison + * + * @param a Slice access to first key + * @param b Slice access to second key + * + * @return Should return either: + * 1) < 0 if "a" < "b" + * 2) == 0 if "a" == "b" + * 3) > 0 if "a" > "b" + */ + public abstract int compare(final T a, final T b); + + /** + *

Used to reduce the space requirements + * for internal data structures like index blocks.

+ * + *

If start < limit, you may return a new start which is a + * shorter string in [start, limit).

+ * + *

Simple comparator implementations may return null if they + * wish to use start unchanged. i.e., an implementation of + * this method that does nothing is correct.

+ * + * @param start String + * @param limit of type T + * + * @return a shorter start, or null + */ + public String findShortestSeparator(final String start, final T limit) { + return null; + } + + /** + *

Used to reduce the space requirements + * for internal data structures like index blocks.

+ * + *

You may return a new short key (key1) where + * key1 ≥ key.

+ * + *

Simple comparator implementations may return null if they + * wish to leave the key unchanged. i.e., an implementation of + * this method that does nothing is correct.

+ * + * @param key String + * + * @return a shorter key, or null + */ + public String findShortSuccessor(final String key) { + return null; + } + + /** + * Deletes underlying C++ comparator pointer. + * + * Note that this function should be called only after all + * RocksDB instances referencing the comparator are closed. + * Otherwise an undefined behavior will occur. + */ + @Override + protected void disposeInternal() { + disposeInternal(getNativeHandle()); + } + + protected abstract long getNativeHandle(); + + private native void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java new file mode 100755 index 0000000000..b0af31ac37 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractImmutableNativeReference.java @@ -0,0 +1,66 @@ +// Copyright (c) 2016, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.concurrent.atomic.AtomicBoolean; + +/** + * Offers functionality for implementations of + * {@link AbstractNativeReference} which have an immutable reference to the + * underlying native C++ object + */ +public abstract class AbstractImmutableNativeReference + extends AbstractNativeReference { + + /** + * A flag indicating whether the current {@code AbstractNativeReference} is + * responsible to free the underlying C++ object + */ + private final AtomicBoolean owningHandle_; + + protected AbstractImmutableNativeReference(final boolean owningHandle) { + this.owningHandle_ = new AtomicBoolean(owningHandle); + } + + @Override + public boolean isOwningHandle() { + return owningHandle_.get(); + } + + /** + * Releases this {@code AbstractNativeReference} from the responsibility of + * freeing the underlying native C++ object + *

+ * This will prevent the object from attempting to delete the underlying + * native object in its finalizer. This must be used when another object + * takes over ownership of the native object or both will attempt to delete + * the underlying object when garbage collected. + *

+ * When {@code disOwnNativeHandle()} is called, {@code dispose()} will + * subsequently take no action. As a result, incorrect use of this function + * may cause a memory leak. + *

+ * + * @see #dispose() + */ + protected final void disOwnNativeHandle() { + owningHandle_.set(false); + } + + @Override + public void close() { + if (owningHandle_.compareAndSet(true, false)) { + disposeInternal(); + } + } + + /** + * The helper function of {@link AbstractImmutableNativeReference#dispose()} + * which all subclasses of {@code AbstractImmutableNativeReference} must + * implement to release their underlying native C++ objects. + */ + protected abstract void disposeInternal(); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java new file mode 100755 index 0000000000..c5aae48909 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractNativeReference.java @@ -0,0 +1,76 @@ +// Copyright (c) 2016, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * AbstractNativeReference is the base-class of all RocksDB classes that have + * a pointer to a native C++ {@code rocksdb} object. + *

+ * AbstractNativeReference has the {@link AbstractNativeReference#dispose()} + * method, which frees its associated C++ object.

+ *

+ * This function should be called manually, however, if required it will be + * called automatically during the regular Java GC process via + * {@link AbstractNativeReference#finalize()}.

+ *

+ * Note - Java can only see the long member variable (which is the C++ pointer + * value to the native object), as such it does not know the real size of the + * object and therefore may assign a low GC priority for it; So it is strongly + * suggested that you manually dispose of objects when you are finished with + * them.

+ */ +public abstract class AbstractNativeReference implements AutoCloseable { + + /** + * Returns true if we are responsible for freeing the underlying C++ object + * + * @return true if we are responsible to free the C++ object + * @see #dispose() + */ + protected abstract boolean isOwningHandle(); + + /** + * Frees the underlying C++ object + *

+ * It is strong recommended that the developer calls this after they + * have finished using the object.

+ *

+ * Note, that once an instance of {@link AbstractNativeReference} has been + * disposed, calling any of its functions will lead to undefined + * behavior.

+ */ + @Override + public abstract void close(); + + /** + * @deprecated Instead use {@link AbstractNativeReference#close()} + */ + @Deprecated + public final void dispose() { + close(); + } + + /** + * Simply calls {@link AbstractNativeReference#dispose()} to free + * any underlying C++ object reference which has not yet been manually + * released. + * + * @deprecated You should not rely on GC of Rocks objects, and instead should + * either call {@link AbstractNativeReference#close()} manually or make + * use of some sort of ARM (Automatic Resource Management) such as + * Java 7's try-with-resources + * statement + */ + @Override + @Deprecated + protected void finalize() throws Throwable { + if(isOwningHandle()) { + //TODO(AR) log a warning message... developer should have called close() + } + dispose(); + super.finalize(); + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java new file mode 100755 index 0000000000..a1547b3b33 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractRocksIterator.java @@ -0,0 +1,101 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Base class implementation for Rocks Iterators + * in the Java API + * + *

Multiple threads can invoke const methods on an RocksIterator without + * external synchronization, but if any of the threads may call a + * non-const method, all threads accessing the same RocksIterator must use + * external synchronization.

+ * + * @param

The type of the Parent Object from which the Rocks Iterator was + * created. This is used by disposeInternal to avoid double-free + * issues with the underlying C++ object. + * @see org.rocksdb.RocksObject + */ +public abstract class AbstractRocksIterator

+ extends RocksObject implements RocksIteratorInterface { + final P parent_; + + protected AbstractRocksIterator(final P parent, + final long nativeHandle) { + super(nativeHandle); + // parent must point to a valid RocksDB instance. + assert (parent != null); + // RocksIterator must hold a reference to the related parent instance + // to guarantee that while a GC cycle starts RocksIterator instances + // are freed prior to parent instances. + parent_ = parent; + } + + @Override + public boolean isValid() { + assert (isOwningHandle()); + return isValid0(nativeHandle_); + } + + @Override + public void seekToFirst() { + assert (isOwningHandle()); + seekToFirst0(nativeHandle_); + } + + @Override + public void seekToLast() { + assert (isOwningHandle()); + seekToLast0(nativeHandle_); + } + + @Override + public void seek(byte[] target) { + assert (isOwningHandle()); + seek0(nativeHandle_, target, target.length); + } + + @Override + public void next() { + assert (isOwningHandle()); + next0(nativeHandle_); + } + + @Override + public void prev() { + assert (isOwningHandle()); + prev0(nativeHandle_); + } + + @Override + public void status() throws RocksDBException { + assert (isOwningHandle()); + status0(nativeHandle_); + } + + /** + *

Deletes underlying C++ iterator pointer.

+ * + *

Note: the underlying handle can only be safely deleted if the parent + * instance related to a certain RocksIterator is still valid and initialized. + * Therefore {@code disposeInternal()} checks if the parent is initialized + * before freeing the native handle.

+ */ + @Override + protected void disposeInternal() { + if (parent_.isOwningHandle()) { + disposeInternal(nativeHandle_); + } + } + + abstract boolean isValid0(long handle); + abstract void seekToFirst0(long handle); + abstract void seekToLast0(long handle); + abstract void next0(long handle); + abstract void prev0(long handle); + abstract void seek0(long handle, byte[] target, int targetLen); + abstract void status0(long handle) throws RocksDBException; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java new file mode 100755 index 0000000000..b6335a5f4e --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractSlice.java @@ -0,0 +1,177 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Slices are used by RocksDB to provide + * efficient access to keys and values. + * + * This class is package private, implementers + * should extend either of the public abstract classes: + * @see org.rocksdb.Slice + * @see org.rocksdb.DirectSlice + * + * Regards the lifecycle of Java Slices in RocksDB: + * At present when you configure a Comparator from Java, it creates an + * instance of a C++ BaseComparatorJniCallback subclass and + * passes that to RocksDB as the comparator. That subclass of + * BaseComparatorJniCallback creates the Java + * @see org.rocksdb.AbstractSlice subclass Objects. When you dispose + * the Java @see org.rocksdb.AbstractComparator subclass, it disposes the + * C++ BaseComparatorJniCallback subclass, which in turn destroys the + * Java @see org.rocksdb.AbstractSlice subclass Objects. + */ +public abstract class AbstractSlice extends RocksMutableObject { + + protected AbstractSlice() { + super(); + } + + protected AbstractSlice(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Returns the data of the slice. + * + * @return The slice data. Note, the type of access is + * determined by the subclass + * @see org.rocksdb.AbstractSlice#data0(long) + */ + public T data() { + return data0(getNativeHandle()); + } + + /** + * Access to the data is provided by the + * subtype as it needs to handle the + * generic typing. + * + * @param handle The address of the underlying + * native object. + * + * @return Java typed access to the data. + */ + protected abstract T data0(long handle); + + /** + * Return the length (in bytes) of the data. + * + * @return The length in bytes. + */ + public int size() { + return size0(getNativeHandle()); + } + + /** + * Return true if the length of the + * data is zero. + * + * @return true if there is no data, false otherwise. + */ + public boolean empty() { + return empty0(getNativeHandle()); + } + + /** + * Creates a string representation of the data + * + * @param hex When true, the representation + * will be encoded in hexadecimal. + * + * @return The string representation of the data. + */ + public String toString(final boolean hex) { + return toString0(getNativeHandle(), hex); + } + + @Override + public String toString() { + return toString(false); + } + + /** + * Three-way key comparison + * + * @param other A slice to compare against + * + * @return Should return either: + * 1) < 0 if this < other + * 2) == 0 if this == other + * 3) > 0 if this > other + */ + public int compare(final AbstractSlice other) { + assert (other != null); + if(!isOwningHandle()) { + return other.isOwningHandle() ? -1 : 0; + } else { + if(!other.isOwningHandle()) { + return 1; + } else { + return compare0(getNativeHandle(), other.getNativeHandle()); + } + } + } + + @Override + public int hashCode() { + return toString().hashCode(); + } + + /** + * If other is a slice object, then + * we defer to {@link #compare(AbstractSlice) compare} + * to check equality, otherwise we return false. + * + * @param other Object to test for equality + * + * @return true when {@code this.compare(other) == 0}, + * false otherwise. + */ + @Override + public boolean equals(final Object other) { + if (other != null && other instanceof AbstractSlice) { + return compare((AbstractSlice)other) == 0; + } else { + return false; + } + } + + /** + * Determines whether this slice starts with + * another slice + * + * @param prefix Another slice which may of may not + * be a prefix of this slice. + * + * @return true when this slice starts with the + * {@code prefix} slice + */ + public boolean startsWith(final AbstractSlice prefix) { + if (prefix != null) { + return startsWith0(getNativeHandle(), prefix.getNativeHandle()); + } else { + return false; + } + } + + protected native static long createNewSliceFromString(final String str); + private native int size0(long handle); + private native boolean empty0(long handle); + private native String toString0(long handle, boolean hex); + private native int compare0(long handle, long otherHandle); + private native boolean startsWith0(long handle, long otherHandle); + + /** + * Deletes underlying C++ slice pointer. + * Note that this function should be called only after all + * RocksDB instances referencing the slice are closed. + * Otherwise an undefined behavior will occur. + */ + @Override + protected final native void disposeInternal(final long handle); + +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java new file mode 100755 index 0000000000..cad7ebbd3a --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/AbstractWriteBatch.java @@ -0,0 +1,113 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +public abstract class AbstractWriteBatch extends RocksObject + implements WriteBatchInterface { + + protected AbstractWriteBatch(final long nativeHandle) { + super(nativeHandle); + } + + @Override + public int count() { + assert (isOwningHandle()); + return count0(nativeHandle_); + } + + @Override + public void put(byte[] key, byte[] value) { + assert (isOwningHandle()); + put(nativeHandle_, key, key.length, value, value.length); + } + + @Override + public void put(ColumnFamilyHandle columnFamilyHandle, byte[] key, + byte[] value) { + assert (isOwningHandle()); + put(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + @Override + public void merge(byte[] key, byte[] value) { + assert (isOwningHandle()); + merge(nativeHandle_, key, key.length, value, value.length); + } + + @Override + public void merge(ColumnFamilyHandle columnFamilyHandle, byte[] key, + byte[] value) { + assert (isOwningHandle()); + merge(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + @Override + public void remove(byte[] key) { + assert (isOwningHandle()); + remove(nativeHandle_, key, key.length); + } + + @Override + public void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key) { + assert (isOwningHandle()); + remove(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); + } + + @Override + public void putLogData(byte[] blob) { + assert (isOwningHandle()); + putLogData(nativeHandle_, blob, blob.length); + } + + @Override + public void clear() { + assert (isOwningHandle()); + clear0(nativeHandle_); + } + + @Override + public void setSavePoint() { + assert (isOwningHandle()); + setSavePoint0(nativeHandle_); + } + + @Override + public void rollbackToSavePoint() throws RocksDBException { + assert (isOwningHandle()); + rollbackToSavePoint0(nativeHandle_); + } + + abstract int count0(final long handle); + + abstract void put(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen); + + abstract void put(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen, final long cfHandle); + + abstract void merge(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen); + + abstract void merge(final long handle, final byte[] key, final int keyLen, + final byte[] value, final int valueLen, final long cfHandle); + + abstract void remove(final long handle, final byte[] key, + final int keyLen); + + abstract void remove(final long handle, final byte[] key, + final int keyLen, final long cfHandle); + + abstract void putLogData(final long handle, final byte[] blob, + final int blobLen); + + abstract void clear0(final long handle); + + abstract void setSavePoint0(final long handle); + + abstract void rollbackToSavePoint0(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java b/external/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java new file mode 100755 index 0000000000..22f1d359e5 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/BackupEngine.java @@ -0,0 +1,221 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import java.util.List; + +/** + * BackupEngine allows you to backup + * and restore the database + * + * Be aware, that `new BackupEngine` takes time proportional to the amount + * of backups. So if you have a slow filesystem to backup (like HDFS) + * and you have a lot of backups then restoring can take some time. + * That's why we recommend to limit the number of backups. + * Also we recommend to keep BackupEngine alive and not to recreate it every + * time you need to do a backup. + */ +public class BackupEngine extends RocksObject implements AutoCloseable { + + protected BackupEngine(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Opens a new Backup Engine + * + * @param env The environment that the backup engine should operate within + * @param options Any options for the backup engine + * + * @return A new BackupEngine instance + * @throws RocksDBException thrown if the backup engine could not be opened + */ + public static BackupEngine open(final Env env, + final BackupableDBOptions options) throws RocksDBException { + return new BackupEngine(open(env.nativeHandle_, options.nativeHandle_)); + } + + /** + * Captures the state of the database in the latest backup + * + * Just a convenience for {@link #createNewBackup(RocksDB, boolean)} with + * the flushBeforeBackup parameter set to false + * + * @param db The database to backup + * + * Note - This method is not thread safe + * + * @throws RocksDBException thrown if a new backup could not be created + */ + public void createNewBackup(final RocksDB db) throws RocksDBException { + createNewBackup(db, false); + } + + /** + * Captures the state of the database in the latest backup + * + * @param db The database to backup + * @param flushBeforeBackup When true, the Backup Engine will first issue a + * memtable flush and only then copy the DB files to + * the backup directory. Doing so will prevent log + * files from being copied to the backup directory + * (since flush will delete them). + * When false, the Backup Engine will not issue a + * flush before starting the backup. In that case, + * the backup will also include log files + * corresponding to live memtables. The backup will + * always be consistent with the current state of the + * database regardless of the flushBeforeBackup + * parameter. + * + * Note - This method is not thread safe + * + * @throws RocksDBException thrown if a new backup could not be created + */ + public void createNewBackup( + final RocksDB db, final boolean flushBeforeBackup) + throws RocksDBException { + assert (isOwningHandle()); + createNewBackup(nativeHandle_, db.nativeHandle_, flushBeforeBackup); + } + + /** + * Gets information about the available + * backups + * + * @return A list of information about each available backup + */ + public List getBackupInfo() { + assert (isOwningHandle()); + return getBackupInfo(nativeHandle_); + } + + /** + *

Returns a list of corrupted backup ids. If there + * is no corrupted backup the method will return an + * empty list.

+ * + * @return array of backup ids as int ids. + */ + public int[] getCorruptedBackups() { + assert(isOwningHandle()); + return getCorruptedBackups(nativeHandle_); + } + + /** + *

Will delete all the files we don't need anymore. It will + * do the full scan of the files/ directory and delete all the + * files that are not referenced.

+ * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void garbageCollect() throws RocksDBException { + assert(isOwningHandle()); + garbageCollect(nativeHandle_); + } + + /** + * Deletes old backups, keeping just the latest numBackupsToKeep + * + * @param numBackupsToKeep The latest n backups to keep + * + * @throws RocksDBException thrown if the old backups could not be deleted + */ + public void purgeOldBackups( + final int numBackupsToKeep) throws RocksDBException { + assert (isOwningHandle()); + purgeOldBackups(nativeHandle_, numBackupsToKeep); + } + + /** + * Deletes a backup + * + * @param backupId The id of the backup to delete + * + * @throws RocksDBException thrown if the backup could not be deleted + */ + public void deleteBackup(final int backupId) throws RocksDBException { + assert (isOwningHandle()); + deleteBackup(nativeHandle_, backupId); + } + + /** + * Restore the database from a backup + * + * IMPORTANT: if options.share_table_files == true and you restore the DB + * from some backup that is not the latest, and you start creating new + * backups from the new DB, they will probably fail! + * + * Example: Let's say you have backups 1, 2, 3, 4, 5 and you restore 3. + * If you add new data to the DB and try creating a new backup now, the + * database will diverge from backups 4 and 5 and the new backup will fail. + * If you want to create new backup, you will first have to delete backups 4 + * and 5. + * + * @param backupId The id of the backup to restore + * @param dbDir The directory to restore the backup to, i.e. where your + * database is + * @param walDir The location of the log files for your database, + * often the same as dbDir + * @param restoreOptions Options for controlling the restore + * + * @throws RocksDBException thrown if the database could not be restored + */ + public void restoreDbFromBackup( + final int backupId, final String dbDir, final String walDir, + final RestoreOptions restoreOptions) throws RocksDBException { + assert (isOwningHandle()); + restoreDbFromBackup(nativeHandle_, backupId, dbDir, walDir, + restoreOptions.nativeHandle_); + } + + /** + * Restore the database from the latest backup + * + * @param dbDir The directory to restore the backup to, i.e. where your + * database is + * @param walDir The location of the log files for your database, often the + * same as dbDir + * @param restoreOptions Options for controlling the restore + * + * @throws RocksDBException thrown if the database could not be restored + */ + public void restoreDbFromLatestBackup( + final String dbDir, final String walDir, + final RestoreOptions restoreOptions) throws RocksDBException { + assert (isOwningHandle()); + restoreDbFromLatestBackup(nativeHandle_, dbDir, walDir, + restoreOptions.nativeHandle_); + } + + private native static long open(final long env, + final long backupableDbOptions) throws RocksDBException; + + private native void createNewBackup(final long handle, final long dbHandle, + final boolean flushBeforeBackup) throws RocksDBException; + + private native List getBackupInfo(final long handle); + + private native int[] getCorruptedBackups(final long handle); + + private native void garbageCollect(final long handle) throws RocksDBException; + + private native void purgeOldBackups(final long handle, + final int numBackupsToKeep) throws RocksDBException; + + private native void deleteBackup(final long handle, final int backupId) + throws RocksDBException; + + private native void restoreDbFromBackup(final long handle, final int backupId, + final String dbDir, final String walDir, final long restoreOptionsHandle) + throws RocksDBException; + + private native void restoreDbFromLatestBackup(final long handle, + final String dbDir, final String walDir, final long restoreOptionsHandle) + throws RocksDBException; + + @Override protected final native void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java b/external/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java new file mode 100755 index 0000000000..4f3a628458 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/BackupInfo.java @@ -0,0 +1,67 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +/** + * Instances of this class describe a Backup made by + * {@link org.rocksdb.BackupableDB}. + */ +public class BackupInfo { + + /** + * Package private constructor used to create instances + * of BackupInfo by {@link org.rocksdb.BackupableDB} and + * {@link org.rocksdb.RestoreBackupableDB}. + * + * @param backupId id of backup + * @param timestamp timestamp of backup + * @param size size of backup + * @param numberFiles number of files related to this backup. + */ + BackupInfo(final int backupId, final long timestamp, final long size, + final int numberFiles) { + backupId_ = backupId; + timestamp_ = timestamp; + size_ = size; + numberFiles_ = numberFiles; + } + + /** + * + * @return the backup id. + */ + public int backupId() { + return backupId_; + } + + /** + * + * @return the timestamp of the backup. + */ + public long timestamp() { + return timestamp_; + } + + /** + * + * @return the size of the backup + */ + public long size() { + return size_; + } + + /** + * + * @return the number of files of this backup. + */ + public int numberFiles() { + return numberFiles_; + } + + private int backupId_; + private long timestamp_; + private long size_; + private int numberFiles_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/BackupableDB.java b/external/rocksdb/java/src/main/java/org/rocksdb/BackupableDB.java new file mode 100755 index 0000000000..cebd69f674 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/BackupableDB.java @@ -0,0 +1,167 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.List; + +/** + *

A subclass of RocksDB which supports + * backup-related operations.

+ * + * @see org.rocksdb.BackupableDBOptions + */ +public class BackupableDB extends RocksDB { + /** + *

Open a {@code BackupableDB} under the specified path. + * Note that the backup path should be set properly in the + * input BackupableDBOptions.

+ * + * @param opt {@link org.rocksdb.Options} to set for the database. + * @param bopt {@link org.rocksdb.BackupableDBOptions} to use. + * @param db_path Path to store data to. The path for storing the backup + * should be specified in the {@link org.rocksdb.BackupableDBOptions}. + * + * @return {@link BackupableDB} reference to the opened database. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static BackupableDB open( + final Options opt, final BackupableDBOptions bopt, final String db_path) + throws RocksDBException { + + final RocksDB db = RocksDB.open(opt, db_path); + final BackupableDB bdb = new BackupableDB(open(db.nativeHandle_, + bopt.nativeHandle_)); + + // Prevent the RocksDB object from attempting to delete + // the underly C++ DB object. + db.disOwnNativeHandle(); + + return bdb; + } + + /** + *

Captures the state of the database in the latest backup. + * Note that this function is not thread-safe.

+ * + * @param flushBeforeBackup if true, then all data will be flushed + * before creating backup. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void createNewBackup(final boolean flushBeforeBackup) + throws RocksDBException { + assert(isOwningHandle()); + createNewBackup(nativeHandle_, flushBeforeBackup); + } + + /** + *

Deletes old backups, keeping latest numBackupsToKeep alive.

+ * + * @param numBackupsToKeep Number of latest backups to keep. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void purgeOldBackups(final int numBackupsToKeep) + throws RocksDBException { + assert(isOwningHandle()); + purgeOldBackups(nativeHandle_, numBackupsToKeep); + } + + /** + *

Deletes a specific backup.

+ * + * @param backupId of backup to delete. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void deleteBackup(final int backupId) throws RocksDBException { + assert(isOwningHandle()); + deleteBackup0(nativeHandle_, backupId); + } + + /** + *

Returns a list of {@link BackupInfo} instances, which describe + * already made backups.

+ * + * @return List of {@link BackupInfo} instances. + */ + public List getBackupInfos() { + assert(isOwningHandle()); + return getBackupInfo(nativeHandle_); + } + + /** + *

Returns a list of corrupted backup ids. If there + * is no corrupted backup the method will return an + * empty list.

+ * + * @return array of backup ids as int ids. + */ + public int[] getCorruptedBackups() { + assert(isOwningHandle()); + return getCorruptedBackups(nativeHandle_); + } + + /** + *

Will delete all the files we don't need anymore. It will + * do the full scan of the files/ directory and delete all the + * files that are not referenced.

+ * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void garbageCollect() throws RocksDBException { + assert(isOwningHandle()); + garbageCollect(nativeHandle_); + } + + /** + *

Close the BackupableDB instance and release resource.

+ * + *

Internally, {@link BackupableDB} owns the {@code rocksdb::DB} + * pointer to its associated {@link org.rocksdb.RocksDB}. + * The release of that RocksDB pointer is handled in the destructor + * of the c++ {@code rocksdb::BackupableDB} and should be transparent + * to Java developers.

+ */ + @Override public void close() { + super.close(); + } + + /** + *

A protected construction that will be used in the static + * factory method {@link #open(Options, BackupableDBOptions, String)}. + *

+ * + * @param nativeHandle The native handle of the C++ BackupableDB object + */ + protected BackupableDB(final long nativeHandle) { + super(nativeHandle); + } + + @Override protected void finalize() throws Throwable { + close(); + super.finalize(); + } + + protected native static long open(final long rocksDBHandle, + final long backupDBOptionsHandle); + protected native void createNewBackup(long handle, boolean flag) + throws RocksDBException; + protected native void purgeOldBackups(long handle, int numBackupsToKeep) + throws RocksDBException; + private native void deleteBackup0(long nativeHandle, int backupId) + throws RocksDBException; + protected native List getBackupInfo(long handle); + private native int[] getCorruptedBackups(long handle); + private native void garbageCollect(long handle) + throws RocksDBException; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/BackupableDBOptions.java b/external/rocksdb/java/src/main/java/org/rocksdb/BackupableDBOptions.java new file mode 100755 index 0000000000..89591de82d --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/BackupableDBOptions.java @@ -0,0 +1,272 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.io.File; + +/** + *

BackupableDBOptions to control the behavior of a backupable database. + * It will be used during the creation of a {@link org.rocksdb.BackupableDB}. + *

+ *

Note that dispose() must be called before an Options instance + * become out-of-scope to release the allocated memory in c++.

+ * + * @see org.rocksdb.BackupableDB + */ +public class BackupableDBOptions extends RocksObject { + + /** + *

BackupableDBOptions constructor.

+ * + * @param path Where to keep the backup files. Has to be different than db + * name. Best to set this to {@code db name_ + "/backups"} + * @throws java.lang.IllegalArgumentException if illegal path is used. + */ + public BackupableDBOptions(final String path) { + super(newBackupableDBOptions(ensureWritableFile(path))); + } + + private static String ensureWritableFile(final String path) { + final File backupPath = path == null ? null : new File(path); + if (backupPath == null || !backupPath.isDirectory() || + !backupPath.canWrite()) { + throw new IllegalArgumentException("Illegal path provided."); + } else { + return path; + } + } + + /** + *

Returns the path to the BackupableDB directory.

+ * + * @return the path to the BackupableDB directory. + */ + public String backupDir() { + assert(isOwningHandle()); + return backupDir(nativeHandle_); + } + + /** + *

Share table files between backups.

+ * + * @param shareTableFiles If {@code share_table_files == true}, backup will + * assume that table files with same name have the same contents. This + * enables incremental backups and avoids unnecessary data copies. If + * {@code share_table_files == false}, each backup will be on its own and + * will not share any data with other backups. + * + *

Default: true

+ * + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setShareTableFiles(final boolean shareTableFiles) { + assert(isOwningHandle()); + setShareTableFiles(nativeHandle_, shareTableFiles); + return this; + } + + /** + *

Share table files between backups.

+ * + * @return boolean value indicating if SST files will be shared between + * backups. + */ + public boolean shareTableFiles() { + assert(isOwningHandle()); + return shareTableFiles(nativeHandle_); + } + + /** + *

Set synchronous backups.

+ * + * @param sync If {@code sync == true}, we can guarantee you'll get consistent + * backup even on a machine crash/reboot. Backup process is slower with sync + * enabled. If {@code sync == false}, we don't guarantee anything on machine + * reboot. However, chances are some of the backups are consistent. + * + *

Default: true

+ * + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setSync(final boolean sync) { + assert(isOwningHandle()); + setSync(nativeHandle_, sync); + return this; + } + + /** + *

Are synchronous backups activated.

+ * + * @return boolean value if synchronous backups are configured. + */ + public boolean sync() { + assert(isOwningHandle()); + return sync(nativeHandle_); + } + + /** + *

Set if old data will be destroyed.

+ * + * @param destroyOldData If true, it will delete whatever backups there are + * already. + * + *

Default: false

+ * + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setDestroyOldData(final boolean destroyOldData) { + assert(isOwningHandle()); + setDestroyOldData(nativeHandle_, destroyOldData); + return this; + } + + /** + *

Returns if old data will be destroyed will performing new backups.

+ * + * @return boolean value indicating if old data will be destroyed. + */ + public boolean destroyOldData() { + assert(isOwningHandle()); + return destroyOldData(nativeHandle_); + } + + /** + *

Set if log files shall be persisted.

+ * + * @param backupLogFiles If false, we won't backup log files. This option can + * be useful for backing up in-memory databases where log file are + * persisted, but table files are in memory. + * + *

Default: true

+ * + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setBackupLogFiles(final boolean backupLogFiles) { + assert(isOwningHandle()); + setBackupLogFiles(nativeHandle_, backupLogFiles); + return this; + } + + /** + *

Return information if log files shall be persisted.

+ * + * @return boolean value indicating if log files will be persisted. + */ + public boolean backupLogFiles() { + assert(isOwningHandle()); + return backupLogFiles(nativeHandle_); + } + + /** + *

Set backup rate limit.

+ * + * @param backupRateLimit Max bytes that can be transferred in a second during + * backup. If 0 or negative, then go as fast as you can. + * + *

Default: 0

+ * + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setBackupRateLimit(long backupRateLimit) { + assert(isOwningHandle()); + backupRateLimit = (backupRateLimit <= 0) ? 0 : backupRateLimit; + setBackupRateLimit(nativeHandle_, backupRateLimit); + return this; + } + + /** + *

Return backup rate limit which described the max bytes that can be + * transferred in a second during backup.

+ * + * @return numerical value describing the backup transfer limit in bytes per + * second. + */ + public long backupRateLimit() { + assert(isOwningHandle()); + return backupRateLimit(nativeHandle_); + } + + /** + *

Set restore rate limit.

+ * + * @param restoreRateLimit Max bytes that can be transferred in a second + * during restore. If 0 or negative, then go as fast as you can. + * + *

Default: 0

+ * + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setRestoreRateLimit(long restoreRateLimit) { + assert(isOwningHandle()); + restoreRateLimit = (restoreRateLimit <= 0) ? 0 : restoreRateLimit; + setRestoreRateLimit(nativeHandle_, restoreRateLimit); + return this; + } + + /** + *

Return restore rate limit which described the max bytes that can be + * transferred in a second during restore.

+ * + * @return numerical value describing the restore transfer limit in bytes per + * second. + */ + public long restoreRateLimit() { + assert(isOwningHandle()); + return restoreRateLimit(nativeHandle_); + } + + /** + *

Only used if share_table_files is set to true. If true, will consider + * that backups can come from different databases, hence a sst is not uniquely + * identified by its name, but by the triple (file name, crc32, file length) + *

+ * + * @param shareFilesWithChecksum boolean value indicating if SST files are + * stored using the triple (file name, crc32, file length) and not its name. + * + *

Note: this is an experimental option, and you'll need to set it manually + * turn it on only if you know what you're doing*

+ * + *

Default: false

+ * + * @return instance of current BackupableDBOptions. + */ + public BackupableDBOptions setShareFilesWithChecksum( + final boolean shareFilesWithChecksum) { + assert(isOwningHandle()); + setShareFilesWithChecksum(nativeHandle_, shareFilesWithChecksum); + return this; + } + + /** + *

Return of share files with checksum is active.

+ * + * @return boolean value indicating if share files with checksum + * is active. + */ + public boolean shareFilesWithChecksum() { + assert(isOwningHandle()); + return shareFilesWithChecksum(nativeHandle_); + } + + private native static long newBackupableDBOptions(final String path); + private native String backupDir(long handle); + private native void setShareTableFiles(long handle, boolean flag); + private native boolean shareTableFiles(long handle); + private native void setSync(long handle, boolean flag); + private native boolean sync(long handle); + private native void setDestroyOldData(long handle, boolean flag); + private native boolean destroyOldData(long handle); + private native void setBackupLogFiles(long handle, boolean flag); + private native boolean backupLogFiles(long handle); + private native void setBackupRateLimit(long handle, long rateLimit); + private native long backupRateLimit(long handle); + private native void setRestoreRateLimit(long handle, long rateLimit); + private native long restoreRateLimit(long handle); + private native void setShareFilesWithChecksum(long handle, boolean flag); + private native boolean shareFilesWithChecksum(long handle); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java b/external/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java new file mode 100755 index 0000000000..050eff1c89 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/BlockBasedTableConfig.java @@ -0,0 +1,452 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +/** + * The config for plain table sst format. + * + * BlockBasedTable is a RocksDB's default SST file format. + */ +public class BlockBasedTableConfig extends TableFormatConfig { + + public BlockBasedTableConfig() { + noBlockCache_ = false; + blockCacheSize_ = 8 * 1024 * 1024; + blockCacheNumShardBits_ = 0; + blockSize_ = 4 * 1024; + blockSizeDeviation_ = 10; + blockRestartInterval_ = 16; + wholeKeyFiltering_ = true; + filter_ = null; + cacheIndexAndFilterBlocks_ = false; + pinL0FilterAndIndexBlocksInCache_ = false; + hashIndexAllowCollision_ = true; + blockCacheCompressedSize_ = 0; + blockCacheCompressedNumShardBits_ = 0; + checksumType_ = ChecksumType.kCRC32c; + indexType_ = IndexType.kBinarySearch; + formatVersion_ = 0; + } + + /** + * Disable block cache. If this is set to true, + * then no block cache should be used, and the block_cache should + * point to a {@code nullptr} object. + * Default: false + * + * @param noBlockCache if use block cache + * @return the reference to the current config. + */ + public BlockBasedTableConfig setNoBlockCache(final boolean noBlockCache) { + noBlockCache_ = noBlockCache; + return this; + } + + /** + * @return if block cache is disabled + */ + public boolean noBlockCache() { + return noBlockCache_; + } + + /** + * Set the amount of cache in bytes that will be used by RocksDB. + * If cacheSize is non-positive, then cache will not be used. + * DEFAULT: 8M + * + * @param blockCacheSize block cache size in bytes + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockCacheSize(final long blockCacheSize) { + blockCacheSize_ = blockCacheSize; + return this; + } + + /** + * @return block cache size in bytes + */ + public long blockCacheSize() { + return blockCacheSize_; + } + + /** + * Controls the number of shards for the block cache. + * This is applied only if cacheSize is set to non-negative. + * + * @param blockCacheNumShardBits the number of shard bits. The resulting + * number of shards would be 2 ^ numShardBits. Any negative + * number means use default settings." + * @return the reference to the current option. + */ + public BlockBasedTableConfig setCacheNumShardBits( + final int blockCacheNumShardBits) { + blockCacheNumShardBits_ = blockCacheNumShardBits; + return this; + } + + /** + * Returns the number of shard bits used in the block cache. + * The resulting number of shards would be 2 ^ (returned value). + * Any negative number means use default settings. + * + * @return the number of shard bits used in the block cache. + */ + public int cacheNumShardBits() { + return blockCacheNumShardBits_; + } + + /** + * Approximate size of user data packed per block. Note that the + * block size specified here corresponds to uncompressed data. The + * actual size of the unit read from disk may be smaller if + * compression is enabled. This parameter can be changed dynamically. + * Default: 4K + * + * @param blockSize block size in bytes + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockSize(final long blockSize) { + blockSize_ = blockSize; + return this; + } + + /** + * @return block size in bytes + */ + public long blockSize() { + return blockSize_; + } + + /** + * This is used to close a block before it reaches the configured + * 'block_size'. If the percentage of free space in the current block is less + * than this specified number and adding a new record to the block will + * exceed the configured block size, then this block will be closed and the + * new record will be written to the next block. + * Default is 10. + * + * @param blockSizeDeviation the deviation to block size allowed + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockSizeDeviation( + final int blockSizeDeviation) { + blockSizeDeviation_ = blockSizeDeviation; + return this; + } + + /** + * @return the hash table ratio. + */ + public int blockSizeDeviation() { + return blockSizeDeviation_; + } + + /** + * Set block restart interval + * + * @param restartInterval block restart interval. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockRestartInterval( + final int restartInterval) { + blockRestartInterval_ = restartInterval; + return this; + } + + /** + * @return block restart interval + */ + public int blockRestartInterval() { + return blockRestartInterval_; + } + + /** + * If true, place whole keys in the filter (not just prefixes). + * This must generally be true for gets to be efficient. + * Default: true + * + * @param wholeKeyFiltering if enable whole key filtering + * @return the reference to the current config. + */ + public BlockBasedTableConfig setWholeKeyFiltering( + final boolean wholeKeyFiltering) { + wholeKeyFiltering_ = wholeKeyFiltering; + return this; + } + + /** + * @return if whole key filtering is enabled + */ + public boolean wholeKeyFiltering() { + return wholeKeyFiltering_; + } + + /** + * Use the specified filter policy to reduce disk reads. + * + * {@link org.rocksdb.Filter} should not be disposed before options instances + * using this filter is disposed. If {@link Filter#dispose()} function is not + * called, then filter object will be GC'd automatically. + * + * {@link org.rocksdb.Filter} instance can be re-used in multiple options + * instances. + * + * @param filter {@link org.rocksdb.Filter} Filter Policy java instance. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setFilter( + final Filter filter) { + filter_ = filter; + return this; + } + + /** + * Indicating if we'd put index/filter blocks to the block cache. + If not specified, each "table reader" object will pre-load index/filter + block during table initialization. + * + * @return if index and filter blocks should be put in block cache. + */ + public boolean cacheIndexAndFilterBlocks() { + return cacheIndexAndFilterBlocks_; + } + + /** + * Indicating if we'd put index/filter blocks to the block cache. + If not specified, each "table reader" object will pre-load index/filter + block during table initialization. + * + * @param cacheIndexAndFilterBlocks and filter blocks should be put in block cache. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setCacheIndexAndFilterBlocks( + final boolean cacheIndexAndFilterBlocks) { + cacheIndexAndFilterBlocks_ = cacheIndexAndFilterBlocks; + return this; + } + + /** + * Indicating if we'd like to pin L0 index/filter blocks to the block cache. + If not specified, defaults to false. + * + * @return if L0 index and filter blocks should be pinned to the block cache. + */ + public boolean pinL0FilterAndIndexBlocksInCache() { + return pinL0FilterAndIndexBlocksInCache_; + } + + /** + * Indicating if we'd like to pin L0 index/filter blocks to the block cache. + If not specified, defaults to false. + * + * @param pinL0FilterAndIndexBlocksInCache pin blocks in block cache + * @return the reference to the current config. + */ + public BlockBasedTableConfig setPinL0FilterAndIndexBlocksInCache( + final boolean pinL0FilterAndIndexBlocksInCache) { + pinL0FilterAndIndexBlocksInCache_ = pinL0FilterAndIndexBlocksInCache; + return this; + } + + /** + * Influence the behavior when kHashSearch is used. + if false, stores a precise prefix to block range mapping + if true, does not store prefix and allows prefix hash collision + (less memory consumption) + * + * @return if hash collisions should be allowed. + */ + public boolean hashIndexAllowCollision() { + return hashIndexAllowCollision_; + } + + /** + * Influence the behavior when kHashSearch is used. + if false, stores a precise prefix to block range mapping + if true, does not store prefix and allows prefix hash collision + (less memory consumption) + * + * @param hashIndexAllowCollision points out if hash collisions should be allowed. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setHashIndexAllowCollision( + final boolean hashIndexAllowCollision) { + hashIndexAllowCollision_ = hashIndexAllowCollision; + return this; + } + + /** + * Size of compressed block cache. If 0, then block_cache_compressed is set + * to null. + * + * @return size of compressed block cache. + */ + public long blockCacheCompressedSize() { + return blockCacheCompressedSize_; + } + + /** + * Size of compressed block cache. If 0, then block_cache_compressed is set + * to null. + * + * @param blockCacheCompressedSize of compressed block cache. + * @return the reference to the current config. + */ + public BlockBasedTableConfig setBlockCacheCompressedSize( + final long blockCacheCompressedSize) { + blockCacheCompressedSize_ = blockCacheCompressedSize; + return this; + } + + /** + * Controls the number of shards for the block compressed cache. + * This is applied only if blockCompressedCacheSize is set to non-negative. + * + * @return numShardBits the number of shard bits. The resulting + * number of shards would be 2 ^ numShardBits. Any negative + * number means use default settings. + */ + public int blockCacheCompressedNumShardBits() { + return blockCacheCompressedNumShardBits_; + } + + /** + * Controls the number of shards for the block compressed cache. + * This is applied only if blockCompressedCacheSize is set to non-negative. + * + * @param blockCacheCompressedNumShardBits the number of shard bits. The resulting + * number of shards would be 2 ^ numShardBits. Any negative + * number means use default settings." + * @return the reference to the current option. + */ + public BlockBasedTableConfig setBlockCacheCompressedNumShardBits( + final int blockCacheCompressedNumShardBits) { + blockCacheCompressedNumShardBits_ = blockCacheCompressedNumShardBits; + return this; + } + + /** + * Sets the checksum type to be used with this table. + * + * @param checksumType {@link org.rocksdb.ChecksumType} value. + * @return the reference to the current option. + */ + public BlockBasedTableConfig setChecksumType( + final ChecksumType checksumType) { + checksumType_ = checksumType; + return this; + } + + /** + * + * @return the currently set checksum type + */ + public ChecksumType checksumType() { + return checksumType_; + } + + /** + * Sets the index type to used with this table. + * + * @param indexType {@link org.rocksdb.IndexType} value + * @return the reference to the current option. + */ + public BlockBasedTableConfig setIndexType( + final IndexType indexType) { + indexType_ = indexType; + return this; + } + + /** + * + * @return the currently set index type + */ + public IndexType indexType() { + return indexType_; + } + + /** + *

We currently have three versions:

+ * + *
    + *
  • 0 - This version is currently written + * out by all RocksDB's versions by default. Can be read by really old + * RocksDB's. Doesn't support changing checksum (default is CRC32).
  • + *
  • 1 - Can be read by RocksDB's versions since 3.0. + * Supports non-default checksum, like xxHash. It is written by RocksDB when + * BlockBasedTableOptions::checksum is something other than kCRC32c. (version + * 0 is silently upconverted)
  • + *
  • 2 - Can be read by RocksDB's versions since 3.10. + * Changes the way we encode compressed blocks with LZ4, BZip2 and Zlib + * compression. If you don't plan to run RocksDB before version 3.10, + * you should probably use this.
  • + *
+ *

This option only affects newly written tables. When reading existing + * tables, the information about version is read from the footer.

+ * + * @param formatVersion integer representing the version to be used. + * @return the reference to the current option. + */ + public BlockBasedTableConfig setFormatVersion( + final int formatVersion) { + assert(formatVersion >= 0 && formatVersion <= 2); + formatVersion_ = formatVersion; + return this; + } + + /** + * + * @return the currently configured format version. + * See also: {@link #setFormatVersion(int)}. + */ + public int formatVersion() { + return formatVersion_; + } + + + + @Override protected long newTableFactoryHandle() { + long filterHandle = 0; + if (filter_ != null) { + filterHandle = filter_.nativeHandle_; + } + + return newTableFactoryHandle(noBlockCache_, blockCacheSize_, + blockCacheNumShardBits_, blockSize_, blockSizeDeviation_, + blockRestartInterval_, wholeKeyFiltering_, + filterHandle, cacheIndexAndFilterBlocks_, + pinL0FilterAndIndexBlocksInCache_, + hashIndexAllowCollision_, blockCacheCompressedSize_, + blockCacheCompressedNumShardBits_, + checksumType_.getValue(), indexType_.getValue(), + formatVersion_); + } + + private native long newTableFactoryHandle( + boolean noBlockCache, long blockCacheSize, int blockCacheNumShardBits, + long blockSize, int blockSizeDeviation, int blockRestartInterval, + boolean wholeKeyFiltering, long filterPolicyHandle, + boolean cacheIndexAndFilterBlocks, boolean pinL0FilterAndIndexBlocksInCache, + boolean hashIndexAllowCollision, long blockCacheCompressedSize, + int blockCacheCompressedNumShardBits, byte checkSumType, + byte indexType, int formatVersion); + + private boolean cacheIndexAndFilterBlocks_; + private boolean pinL0FilterAndIndexBlocksInCache_; + private IndexType indexType_; + private boolean hashIndexAllowCollision_; + private ChecksumType checksumType_; + private boolean noBlockCache_; + private long blockSize_; + private long blockCacheSize_; + private int blockCacheNumShardBits_; + private long blockCacheCompressedSize_; + private int blockCacheCompressedNumShardBits_; + private int blockSizeDeviation_; + private int blockRestartInterval_; + private Filter filter_; + private boolean wholeKeyFiltering_; + private int formatVersion_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java b/external/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java new file mode 100755 index 0000000000..a8c2f7e7f9 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/BloomFilter.java @@ -0,0 +1,79 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Bloom filter policy that uses a bloom filter with approximately + * the specified number of bits per key. + * + *

+ * Note: if you are using a custom comparator that ignores some parts + * of the keys being compared, you must not use this {@code BloomFilter} + * and must provide your own FilterPolicy that also ignores the + * corresponding parts of the keys. For example, if the comparator + * ignores trailing spaces, it would be incorrect to use a + * FilterPolicy (like {@code BloomFilter}) that does not ignore + * trailing spaces in keys.

+ */ +public class BloomFilter extends Filter { + + private static final int DEFAULT_BITS_PER_KEY = 10; + private static final boolean DEFAULT_MODE = true; + + /** + * BloomFilter constructor + * + *

+ * Callers must delete the result after any database that is using the + * result has been closed.

+ */ + public BloomFilter() { + this(DEFAULT_BITS_PER_KEY, DEFAULT_MODE); + } + + /** + * BloomFilter constructor + * + *

+ * bits_per_key: bits per key in bloom filter. A good value for bits_per_key + * is 10, which yields a filter with ~ 1% false positive rate. + *

+ *

+ * Callers must delete the result after any database that is using the + * result has been closed.

+ * + * @param bitsPerKey number of bits to use + */ + public BloomFilter(final int bitsPerKey) { + this(bitsPerKey, DEFAULT_MODE); + } + + /** + * BloomFilter constructor + * + *

+ * bits_per_key: bits per key in bloom filter. A good value for bits_per_key + * is 10, which yields a filter with ~ 1% false positive rate. + *

default bits_per_key: 10

+ * + *

use_block_based_builder: use block based filter rather than full filter. + * If you want to builder full filter, it needs to be set to false. + *

+ *

default mode: block based filter

+ *

+ * Callers must delete the result after any database that is using the + * result has been closed.

+ * + * @param bitsPerKey number of bits to use + * @param useBlockBasedMode use block based mode or full filter mode + */ + public BloomFilter(final int bitsPerKey, final boolean useBlockBasedMode) { + super(createNewBloomFilter(bitsPerKey, useBlockBasedMode)); + } + + private native static long createNewBloomFilter(final int bitsKeyKey, + final boolean useBlockBasedMode); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java b/external/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java new file mode 100755 index 0000000000..436cb513f1 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/BuiltinComparator.java @@ -0,0 +1,20 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Builtin RocksDB comparators + * + *
    + *
  1. BYTEWISE_COMPARATOR - Sorts all keys in ascending bytewise + * order.
  2. + *
  3. REVERSE_BYTEWISE_COMPARATOR - Sorts all keys in descending bytewise + * order
  4. + *
+ */ +public enum BuiltinComparator { + BYTEWISE_COMPARATOR, REVERSE_BYTEWISE_COMPARATOR +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java b/external/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java new file mode 100755 index 0000000000..d867227784 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/Checkpoint.java @@ -0,0 +1,66 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Provides Checkpoint functionality. Checkpoints + * provide persistent snapshots of RocksDB databases. + */ +public class Checkpoint extends RocksObject { + + /** + * Creates a Checkpoint object to be used for creating open-able + * snapshots. + * + * @param db {@link RocksDB} instance. + * @return a Checkpoint instance. + * + * @throws java.lang.IllegalArgumentException if {@link RocksDB} + * instance is null. + * @throws java.lang.IllegalStateException if {@link RocksDB} + * instance is not initialized. + */ + public static Checkpoint create(final RocksDB db) { + if (db == null) { + throw new IllegalArgumentException( + "RocksDB instance shall not be null."); + } else if (!db.isOwningHandle()) { + throw new IllegalStateException( + "RocksDB instance must be initialized."); + } + Checkpoint checkpoint = new Checkpoint(db); + return checkpoint; + } + + /** + *

Builds an open-able snapshot of RocksDB on the same disk, which + * accepts an output directory on the same disk, and under the directory + * (1) hard-linked SST files pointing to existing live SST files + * (2) a copied manifest files and other files

+ * + * @param checkpointPath path to the folder where the snapshot is going + * to be stored. + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void createCheckpoint(final String checkpointPath) + throws RocksDBException { + createCheckpoint(nativeHandle_, checkpointPath); + } + + private Checkpoint(final RocksDB db) { + super(newCheckpoint(db.nativeHandle_)); + this.db_ = db; + } + + private final RocksDB db_; + + private static native long newCheckpoint(long dbHandle); + @Override protected final native void disposeInternal(final long handle); + + private native void createCheckpoint(long handle, String checkpointPath) + throws RocksDBException; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java b/external/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java new file mode 100755 index 0000000000..7f560170c0 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/ChecksumType.java @@ -0,0 +1,39 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Checksum types used in conjunction with BlockBasedTable. + */ +public enum ChecksumType { + /** + * Not implemented yet. + */ + kNoChecksum((byte) 0), + /** + * CRC32 Checksum + */ + kCRC32c((byte) 1), + /** + * XX Hash + */ + kxxHash((byte) 2); + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + private ChecksumType(byte value) { + value_ = value; + } + + private final byte value_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java b/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java new file mode 100755 index 0000000000..84581f465c --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyDescriptor.java @@ -0,0 +1,61 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + *

Describes a column family with a + * name and respective Options.

+ */ +public class ColumnFamilyDescriptor { + + /** + *

Creates a new Column Family using a name and default + * options,

+ * + * @param columnFamilyName name of column family. + * @since 3.10.0 + */ + public ColumnFamilyDescriptor(final byte[] columnFamilyName) { + this(columnFamilyName, new ColumnFamilyOptions()); + } + + /** + *

Creates a new Column Family using a name and custom + * options.

+ * + * @param columnFamilyName name of column family. + * @param columnFamilyOptions options to be used with + * column family. + * @since 3.10.0 + */ + public ColumnFamilyDescriptor(final byte[] columnFamilyName, + final ColumnFamilyOptions columnFamilyOptions) { + columnFamilyName_ = columnFamilyName; + columnFamilyOptions_ = columnFamilyOptions; + } + + /** + * Retrieve name of column family. + * + * @return column family name. + * @since 3.10.0 + */ + public byte[] columnFamilyName() { + return columnFamilyName_; + } + + /** + * Retrieve assigned options instance. + * + * @return Options instance assigned to this instance. + */ + public ColumnFamilyOptions columnFamilyOptions() { + return columnFamilyOptions_; + } + + private final byte[] columnFamilyName_; + private final ColumnFamilyOptions columnFamilyOptions_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java b/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java new file mode 100755 index 0000000000..6aa22d3fea --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyHandle.java @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * ColumnFamilyHandle class to hold handles to underlying rocksdb + * ColumnFamily Pointers. + */ +public class ColumnFamilyHandle extends RocksObject { + ColumnFamilyHandle(final RocksDB rocksDB, + final long nativeHandle) { + super(nativeHandle); + // rocksDB must point to a valid RocksDB instance; + assert(rocksDB != null); + // ColumnFamilyHandle must hold a reference to the related RocksDB instance + // to guarantee that while a GC cycle starts ColumnFamilyHandle instances + // are freed prior to RocksDB instances. + this.rocksDB_ = rocksDB; + } + + /** + *

Deletes underlying C++ iterator pointer.

+ * + *

Note: the underlying handle can only be safely deleted if the RocksDB + * instance related to a certain ColumnFamilyHandle is still valid and + * initialized. Therefore {@code disposeInternal()} checks if the RocksDB is + * initialized before freeing the native handle.

+ */ + @Override + protected void disposeInternal() { + if(rocksDB_.isOwningHandle()) { + disposeInternal(nativeHandle_); + } + } + + @Override protected final native void disposeInternal(final long handle); + + private final RocksDB rocksDB_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java b/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java new file mode 100755 index 0000000000..528e0f2053 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptions.java @@ -0,0 +1,784 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; + +/** + * ColumnFamilyOptions to control the behavior of a database. It will be used + * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). + * + * If {@link #dispose()} function is not called, then it will be GC'd + * automatically and native resources will be released as part of the process. + */ +public class ColumnFamilyOptions extends RocksObject + implements ColumnFamilyOptionsInterface { + static { + RocksDB.loadLibrary(); + } + + /** + * Construct ColumnFamilyOptions. + * + * This constructor will create (by allocating a block of memory) + * an {@code rocksdb::DBOptions} in the c++ side. + */ + public ColumnFamilyOptions() { + super(newColumnFamilyOptions()); + } + + /** + *

Method to get a options instance by using pre-configured + * property values. If one or many values are undefined in + * the context of RocksDB the method will return a null + * value.

+ * + *

Note: Property keys can be derived from + * getter methods within the options class. Example: the method + * {@code writeBufferSize()} has a property key: + * {@code write_buffer_size}.

+ * + * @param properties {@link java.util.Properties} instance. + * + * @return {@link org.rocksdb.ColumnFamilyOptions instance} + * or null. + * + * @throws java.lang.IllegalArgumentException if null or empty + * {@link Properties} instance is passed to the method call. + */ + public static ColumnFamilyOptions getColumnFamilyOptionsFromProps( + final Properties properties) { + if (properties == null || properties.size() == 0) { + throw new IllegalArgumentException( + "Properties value must contain at least one value."); + } + ColumnFamilyOptions columnFamilyOptions = null; + StringBuilder stringBuilder = new StringBuilder(); + for (final String name : properties.stringPropertyNames()){ + stringBuilder.append(name); + stringBuilder.append("="); + stringBuilder.append(properties.getProperty(name)); + stringBuilder.append(";"); + } + long handle = getColumnFamilyOptionsFromProps( + stringBuilder.toString()); + if (handle != 0){ + columnFamilyOptions = new ColumnFamilyOptions(handle); + } + return columnFamilyOptions; + } + + @Override + public ColumnFamilyOptions optimizeForPointLookup( + final long blockCacheSizeMb) { + optimizeForPointLookup(nativeHandle_, + blockCacheSizeMb); + return this; + } + + @Override + public ColumnFamilyOptions optimizeLevelStyleCompaction() { + optimizeLevelStyleCompaction(nativeHandle_, + DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET); + return this; + } + + @Override + public ColumnFamilyOptions optimizeLevelStyleCompaction( + final long memtableMemoryBudget) { + optimizeLevelStyleCompaction(nativeHandle_, + memtableMemoryBudget); + return this; + } + + @Override + public ColumnFamilyOptions optimizeUniversalStyleCompaction() { + optimizeUniversalStyleCompaction(nativeHandle_, + DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET); + return this; + } + + @Override + public ColumnFamilyOptions optimizeUniversalStyleCompaction( + final long memtableMemoryBudget) { + optimizeUniversalStyleCompaction(nativeHandle_, + memtableMemoryBudget); + return this; + } + + @Override + public ColumnFamilyOptions setComparator( + final BuiltinComparator builtinComparator) { + assert(isOwningHandle()); + setComparatorHandle(nativeHandle_, builtinComparator.ordinal()); + return this; + } + + @Override + public ColumnFamilyOptions setComparator( + final AbstractComparator> comparator) { + assert (isOwningHandle()); + setComparatorHandle(nativeHandle_, comparator.getNativeHandle()); + comparator_ = comparator; + return this; + } + + @Override + public ColumnFamilyOptions setMergeOperatorName(final String name) { + assert (isOwningHandle()); + if (name == null) { + throw new IllegalArgumentException( + "Merge operator name must not be null."); + } + setMergeOperatorName(nativeHandle_, name); + return this; + } + + @Override + public ColumnFamilyOptions setMergeOperator( + final MergeOperator mergeOperator) { + setMergeOperator(nativeHandle_, mergeOperator.newMergeOperatorHandle()); + return this; + } + + public ColumnFamilyOptions setCompactionFilter( + final AbstractCompactionFilter> + compactionFilter) { + setCompactionFilterHandle(nativeHandle_, compactionFilter.nativeHandle_); + compactionFilter_ = compactionFilter; + return this; + } + + @Override + public ColumnFamilyOptions setWriteBufferSize(final long writeBufferSize) { + assert(isOwningHandle()); + setWriteBufferSize(nativeHandle_, writeBufferSize); + return this; + } + + @Override + public long writeBufferSize() { + assert(isOwningHandle()); + return writeBufferSize(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxWriteBufferNumber( + final int maxWriteBufferNumber) { + assert(isOwningHandle()); + setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber); + return this; + } + + @Override + public int maxWriteBufferNumber() { + assert(isOwningHandle()); + return maxWriteBufferNumber(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMinWriteBufferNumberToMerge( + final int minWriteBufferNumberToMerge) { + setMinWriteBufferNumberToMerge(nativeHandle_, minWriteBufferNumberToMerge); + return this; + } + + @Override + public int minWriteBufferNumberToMerge() { + return minWriteBufferNumberToMerge(nativeHandle_); + } + + @Override + public ColumnFamilyOptions useFixedLengthPrefixExtractor(final int n) { + assert(isOwningHandle()); + useFixedLengthPrefixExtractor(nativeHandle_, n); + return this; + } + + @Override + public ColumnFamilyOptions useCappedPrefixExtractor(final int n) { + assert(isOwningHandle()); + useCappedPrefixExtractor(nativeHandle_, n); + return this; + } + + @Override + public ColumnFamilyOptions setCompressionType( + final CompressionType compressionType) { + setCompressionType(nativeHandle_, compressionType.getValue()); + return this; + } + + @Override + public CompressionType compressionType() { + return CompressionType.values()[compressionType(nativeHandle_)]; + } + + @Override + public ColumnFamilyOptions setCompressionPerLevel( + final List compressionLevels) { + final byte[] byteCompressionTypes = new byte[ + compressionLevels.size()]; + for (int i = 0; i < compressionLevels.size(); i++) { + byteCompressionTypes[i] = compressionLevels.get(i).getValue(); + } + setCompressionPerLevel(nativeHandle_, byteCompressionTypes); + return this; + } + + @Override + public List compressionPerLevel() { + final byte[] byteCompressionTypes = + compressionPerLevel(nativeHandle_); + final List compressionLevels = new ArrayList<>(); + for (final Byte byteCompressionType : byteCompressionTypes) { + compressionLevels.add(CompressionType.getCompressionType( + byteCompressionType)); + } + return compressionLevels; + } + + @Override + public ColumnFamilyOptions setNumLevels(final int numLevels) { + setNumLevels(nativeHandle_, numLevels); + return this; + } + + @Override + public int numLevels() { + return numLevels(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevelZeroFileNumCompactionTrigger( + final int numFiles) { + setLevelZeroFileNumCompactionTrigger( + nativeHandle_, numFiles); + return this; + } + + @Override + public int levelZeroFileNumCompactionTrigger() { + return levelZeroFileNumCompactionTrigger(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevelZeroSlowdownWritesTrigger( + final int numFiles) { + setLevelZeroSlowdownWritesTrigger(nativeHandle_, numFiles); + return this; + } + + @Override + public int levelZeroSlowdownWritesTrigger() { + return levelZeroSlowdownWritesTrigger(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevelZeroStopWritesTrigger(final int numFiles) { + setLevelZeroStopWritesTrigger(nativeHandle_, numFiles); + return this; + } + + @Override + public int levelZeroStopWritesTrigger() { + return levelZeroStopWritesTrigger(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxMemCompactionLevel( + final int maxMemCompactionLevel) { + return this; + } + + @Override + public int maxMemCompactionLevel() { + return 0; + } + + @Override + public ColumnFamilyOptions setTargetFileSizeBase( + final long targetFileSizeBase) { + setTargetFileSizeBase(nativeHandle_, targetFileSizeBase); + return this; + } + + @Override + public long targetFileSizeBase() { + return targetFileSizeBase(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setTargetFileSizeMultiplier( + final int multiplier) { + setTargetFileSizeMultiplier(nativeHandle_, multiplier); + return this; + } + + @Override + public int targetFileSizeMultiplier() { + return targetFileSizeMultiplier(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxBytesForLevelBase( + final long maxBytesForLevelBase) { + setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase); + return this; + } + + @Override + public long maxBytesForLevelBase() { + return maxBytesForLevelBase(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setLevelCompactionDynamicLevelBytes( + final boolean enableLevelCompactionDynamicLevelBytes) { + setLevelCompactionDynamicLevelBytes(nativeHandle_, + enableLevelCompactionDynamicLevelBytes); + return this; + } + + @Override + public boolean levelCompactionDynamicLevelBytes() { + return levelCompactionDynamicLevelBytes(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxBytesForLevelMultiplier( + final int multiplier) { + setMaxBytesForLevelMultiplier(nativeHandle_, multiplier); + return this; + } + + @Override + public int maxBytesForLevelMultiplier() { + return maxBytesForLevelMultiplier(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setExpandedCompactionFactor( + final int expandedCompactionFactor) { + setExpandedCompactionFactor(nativeHandle_, expandedCompactionFactor); + return this; + } + + @Override + public int expandedCompactionFactor() { + return expandedCompactionFactor(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setSourceCompactionFactor( + final int sourceCompactionFactor) { + setSourceCompactionFactor(nativeHandle_, sourceCompactionFactor); + return this; + } + + @Override + public int sourceCompactionFactor() { + return sourceCompactionFactor(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxGrandparentOverlapFactor( + final int maxGrandparentOverlapFactor) { + setMaxGrandparentOverlapFactor(nativeHandle_, maxGrandparentOverlapFactor); + return this; + } + + @Override + public int maxGrandparentOverlapFactor() { + return maxGrandparentOverlapFactor(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setSoftRateLimit( + final double softRateLimit) { + setSoftRateLimit(nativeHandle_, softRateLimit); + return this; + } + + @Override + public double softRateLimit() { + return softRateLimit(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setHardRateLimit( + final double hardRateLimit) { + setHardRateLimit(nativeHandle_, hardRateLimit); + return this; + } + + @Override + public double hardRateLimit() { + return hardRateLimit(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setRateLimitDelayMaxMilliseconds( + final int rateLimitDelayMaxMilliseconds) { + setRateLimitDelayMaxMilliseconds( + nativeHandle_, rateLimitDelayMaxMilliseconds); + return this; + } + + @Override + public int rateLimitDelayMaxMilliseconds() { + return rateLimitDelayMaxMilliseconds(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setArenaBlockSize( + final long arenaBlockSize) { + setArenaBlockSize(nativeHandle_, arenaBlockSize); + return this; + } + + @Override + public long arenaBlockSize() { + return arenaBlockSize(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setDisableAutoCompactions( + final boolean disableAutoCompactions) { + setDisableAutoCompactions(nativeHandle_, disableAutoCompactions); + return this; + } + + @Override + public boolean disableAutoCompactions() { + return disableAutoCompactions(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setPurgeRedundantKvsWhileFlush( + final boolean purgeRedundantKvsWhileFlush) { + setPurgeRedundantKvsWhileFlush( + nativeHandle_, purgeRedundantKvsWhileFlush); + return this; + } + + @Override + public boolean purgeRedundantKvsWhileFlush() { + return purgeRedundantKvsWhileFlush(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setCompactionStyle( + final CompactionStyle compactionStyle) { + setCompactionStyle(nativeHandle_, compactionStyle.getValue()); + return this; + } + + @Override + public CompactionStyle compactionStyle() { + return CompactionStyle.values()[compactionStyle(nativeHandle_)]; + } + + @Override + public ColumnFamilyOptions setMaxTableFilesSizeFIFO( + final long maxTableFilesSize) { + assert(maxTableFilesSize > 0); // unsigned native type + assert(isOwningHandle()); + setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize); + return this; + } + + @Override + public long maxTableFilesSizeFIFO() { + return maxTableFilesSizeFIFO(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setVerifyChecksumsInCompaction( + final boolean verifyChecksumsInCompaction) { + setVerifyChecksumsInCompaction( + nativeHandle_, verifyChecksumsInCompaction); + return this; + } + + @Override + public boolean verifyChecksumsInCompaction() { + return verifyChecksumsInCompaction(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxSequentialSkipInIterations( + final long maxSequentialSkipInIterations) { + setMaxSequentialSkipInIterations(nativeHandle_, + maxSequentialSkipInIterations); + return this; + } + + @Override + public long maxSequentialSkipInIterations() { + return maxSequentialSkipInIterations(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMemTableConfig( + final MemTableConfig config) { + memTableConfig_ = config; + setMemTableFactory(nativeHandle_, config.newMemTableFactoryHandle()); + return this; + } + + @Override + public String memTableFactoryName() { + assert(isOwningHandle()); + return memTableFactoryName(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setTableFormatConfig( + final TableFormatConfig config) { + tableFormatConfig_ = config; + setTableFactory(nativeHandle_, config.newTableFactoryHandle()); + return this; + } + + @Override + public String tableFactoryName() { + assert(isOwningHandle()); + return tableFactoryName(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setInplaceUpdateSupport( + final boolean inplaceUpdateSupport) { + setInplaceUpdateSupport(nativeHandle_, inplaceUpdateSupport); + return this; + } + + @Override + public boolean inplaceUpdateSupport() { + return inplaceUpdateSupport(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setInplaceUpdateNumLocks( + final long inplaceUpdateNumLocks) { + setInplaceUpdateNumLocks(nativeHandle_, inplaceUpdateNumLocks); + return this; + } + + @Override + public long inplaceUpdateNumLocks() { + return inplaceUpdateNumLocks(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMemtablePrefixBloomSizeRatio( + final double memtablePrefixBloomSizeRatio) { + setMemtablePrefixBloomSizeRatio(nativeHandle_, memtablePrefixBloomSizeRatio); + return this; + } + + @Override + public double memtablePrefixBloomSizeRatio() { + return memtablePrefixBloomSizeRatio(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setBloomLocality(int bloomLocality) { + setBloomLocality(nativeHandle_, bloomLocality); + return this; + } + + @Override + public int bloomLocality() { + return bloomLocality(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMaxSuccessiveMerges( + final long maxSuccessiveMerges) { + setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges); + return this; + } + + @Override + public long maxSuccessiveMerges() { + return maxSuccessiveMerges(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setMinPartialMergeOperands( + final int minPartialMergeOperands) { + setMinPartialMergeOperands(nativeHandle_, minPartialMergeOperands); + return this; + } + + @Override + public int minPartialMergeOperands() { + return minPartialMergeOperands(nativeHandle_); + } + + @Override + public ColumnFamilyOptions setOptimizeFiltersForHits( + final boolean optimizeFiltersForHits) { + setOptimizeFiltersForHits(nativeHandle_, optimizeFiltersForHits); + return this; + } + + @Override + public boolean optimizeFiltersForHits() { + return optimizeFiltersForHits(nativeHandle_); + } + + /** + *

Private constructor to be used by + * {@link #getColumnFamilyOptionsFromProps(java.util.Properties)}

+ * + * @param handle native handle to ColumnFamilyOptions instance. + */ + private ColumnFamilyOptions(final long handle) { + super(handle); + } + + private static native long getColumnFamilyOptionsFromProps( + String optString); + + private static native long newColumnFamilyOptions(); + @Override protected final native void disposeInternal(final long handle); + + private native void optimizeForPointLookup(long handle, + long blockCacheSizeMb); + private native void optimizeLevelStyleCompaction(long handle, + long memtableMemoryBudget); + private native void optimizeUniversalStyleCompaction(long handle, + long memtableMemoryBudget); + private native void setComparatorHandle(long handle, int builtinComparator); + private native void setComparatorHandle(long optHandle, + long comparatorHandle); + private native void setMergeOperatorName(long handle, String name); + private native void setMergeOperator(long handle, long mergeOperatorHandle); + private native void setCompactionFilterHandle(long handle, + long compactionFilterHandle); + private native void setWriteBufferSize(long handle, long writeBufferSize) + throws IllegalArgumentException; + private native long writeBufferSize(long handle); + private native void setMaxWriteBufferNumber( + long handle, int maxWriteBufferNumber); + private native int maxWriteBufferNumber(long handle); + private native void setMinWriteBufferNumberToMerge( + long handle, int minWriteBufferNumberToMerge); + private native int minWriteBufferNumberToMerge(long handle); + private native void setCompressionType(long handle, byte compressionType); + private native byte compressionType(long handle); + private native void setCompressionPerLevel(long handle, + byte[] compressionLevels); + private native byte[] compressionPerLevel(long handle); + private native void useFixedLengthPrefixExtractor( + long handle, int prefixLength); + private native void useCappedPrefixExtractor( + long handle, int prefixLength); + private native void setNumLevels( + long handle, int numLevels); + private native int numLevels(long handle); + private native void setLevelZeroFileNumCompactionTrigger( + long handle, int numFiles); + private native int levelZeroFileNumCompactionTrigger(long handle); + private native void setLevelZeroSlowdownWritesTrigger( + long handle, int numFiles); + private native int levelZeroSlowdownWritesTrigger(long handle); + private native void setLevelZeroStopWritesTrigger( + long handle, int numFiles); + private native int levelZeroStopWritesTrigger(long handle); + private native void setTargetFileSizeBase( + long handle, long targetFileSizeBase); + private native long targetFileSizeBase(long handle); + private native void setTargetFileSizeMultiplier( + long handle, int multiplier); + private native int targetFileSizeMultiplier(long handle); + private native void setMaxBytesForLevelBase( + long handle, long maxBytesForLevelBase); + private native long maxBytesForLevelBase(long handle); + private native void setLevelCompactionDynamicLevelBytes( + long handle, boolean enableLevelCompactionDynamicLevelBytes); + private native boolean levelCompactionDynamicLevelBytes( + long handle); + private native void setMaxBytesForLevelMultiplier( + long handle, int multiplier); + private native int maxBytesForLevelMultiplier(long handle); + private native void setExpandedCompactionFactor( + long handle, int expandedCompactionFactor); + private native int expandedCompactionFactor(long handle); + private native void setSourceCompactionFactor( + long handle, int sourceCompactionFactor); + private native int sourceCompactionFactor(long handle); + private native void setMaxGrandparentOverlapFactor( + long handle, int maxGrandparentOverlapFactor); + private native int maxGrandparentOverlapFactor(long handle); + private native void setSoftRateLimit( + long handle, double softRateLimit); + private native double softRateLimit(long handle); + private native void setHardRateLimit( + long handle, double hardRateLimit); + private native double hardRateLimit(long handle); + private native void setRateLimitDelayMaxMilliseconds( + long handle, int rateLimitDelayMaxMilliseconds); + private native int rateLimitDelayMaxMilliseconds(long handle); + private native void setArenaBlockSize( + long handle, long arenaBlockSize) + throws IllegalArgumentException; + private native long arenaBlockSize(long handle); + private native void setDisableAutoCompactions( + long handle, boolean disableAutoCompactions); + private native boolean disableAutoCompactions(long handle); + private native void setCompactionStyle(long handle, byte compactionStyle); + private native byte compactionStyle(long handle); + private native void setMaxTableFilesSizeFIFO( + long handle, long max_table_files_size); + private native long maxTableFilesSizeFIFO(long handle); + private native void setPurgeRedundantKvsWhileFlush( + long handle, boolean purgeRedundantKvsWhileFlush); + private native boolean purgeRedundantKvsWhileFlush(long handle); + private native void setVerifyChecksumsInCompaction( + long handle, boolean verifyChecksumsInCompaction); + private native boolean verifyChecksumsInCompaction(long handle); + private native void setMaxSequentialSkipInIterations( + long handle, long maxSequentialSkipInIterations); + private native long maxSequentialSkipInIterations(long handle); + private native void setMemTableFactory(long handle, long factoryHandle); + private native String memTableFactoryName(long handle); + private native void setTableFactory(long handle, long factoryHandle); + private native String tableFactoryName(long handle); + private native void setInplaceUpdateSupport( + long handle, boolean inplaceUpdateSupport); + private native boolean inplaceUpdateSupport(long handle); + private native void setInplaceUpdateNumLocks( + long handle, long inplaceUpdateNumLocks) + throws IllegalArgumentException; + private native long inplaceUpdateNumLocks(long handle); + private native void setMemtablePrefixBloomSizeRatio( + long handle, double memtablePrefixBloomSizeRatio); + private native double memtablePrefixBloomSizeRatio(long handle); + private native void setBloomLocality( + long handle, int bloomLocality); + private native int bloomLocality(long handle); + private native void setMaxSuccessiveMerges( + long handle, long maxSuccessiveMerges) + throws IllegalArgumentException; + private native long maxSuccessiveMerges(long handle); + private native void setMinPartialMergeOperands( + long handle, int minPartialMergeOperands); + private native int minPartialMergeOperands(long handle); + private native void setOptimizeFiltersForHits(long handle, + boolean optimizeFiltersForHits); + private native boolean optimizeFiltersForHits(long handle); + + MemTableConfig memTableConfig_; + TableFormatConfig tableFormatConfig_; + AbstractComparator> comparator_; + AbstractCompactionFilter> compactionFilter_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java b/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java new file mode 100755 index 0000000000..dea3d1b9f9 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/ColumnFamilyOptionsInterface.java @@ -0,0 +1,1144 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.List; + +public interface ColumnFamilyOptionsInterface { + + /** + * Use this if you don't need to keep the data sorted, i.e. you'll never use + * an iterator, only Put() and Get() API calls + * + * @param blockCacheSizeMb Block cache size in MB + * @return the instance of the current Object. + */ + Object optimizeForPointLookup(long blockCacheSizeMb); + + /** + *

Default values for some parameters in ColumnFamilyOptions are not + * optimized for heavy workloads and big datasets, which means you might + * observe write stalls under some conditions. As a starting point for tuning + * RocksDB options, use the following for level style compaction.

+ * + *

Make sure to also call IncreaseParallelism(), which will provide the + * biggest performance gains.

+ *

Note: we might use more memory than memtable_memory_budget during high + * write rate period

+ * + * @return the instance of the current Object. + */ + Object optimizeLevelStyleCompaction(); + + /** + *

Default values for some parameters in ColumnFamilyOptions are not + * optimized for heavy workloads and big datasets, which means you might + * observe write stalls under some conditions. As a starting point for tuning + * RocksDB options, use the following for level style compaction.

+ * + *

Make sure to also call IncreaseParallelism(), which will provide the + * biggest performance gains.

+ *

Note: we might use more memory than memtable_memory_budget during high + * write rate period

+ * + * @param memtableMemoryBudget memory budget in bytes + * @return the instance of the current Object. + */ + Object optimizeLevelStyleCompaction(long memtableMemoryBudget); + + /** + *

Default values for some parameters in ColumnFamilyOptions are not + * optimized for heavy workloads and big datasets, which means you might + * observe write stalls under some conditions. As a starting point for tuning + * RocksDB options, use the following for universal style compaction.

+ * + *

Universal style compaction is focused on reducing Write Amplification + * Factor for big data sets, but increases Space Amplification.

+ * + *

Make sure to also call IncreaseParallelism(), which will provide the + * biggest performance gains.

+ * + *

Note: we might use more memory than memtable_memory_budget during high + * write rate period

+ * + * @return the instance of the current Object. + */ + Object optimizeUniversalStyleCompaction(); + + /** + *

Default values for some parameters in ColumnFamilyOptions are not + * optimized for heavy workloads and big datasets, which means you might + * observe write stalls under some conditions. As a starting point for tuning + * RocksDB options, use the following for universal style compaction.

+ * + *

Universal style compaction is focused on reducing Write Amplification + * Factor for big data sets, but increases Space Amplification.

+ * + *

Make sure to also call IncreaseParallelism(), which will provide the + * biggest performance gains.

+ * + *

Note: we might use more memory than memtable_memory_budget during high + * write rate period

+ * + * @param memtableMemoryBudget memory budget in bytes + * @return the instance of the current Object. + */ + Object optimizeUniversalStyleCompaction(long memtableMemoryBudget); + + /** + * Set {@link BuiltinComparator} to be used with RocksDB. + * + * Note: Comparator can be set once upon database creation. + * + * Default: BytewiseComparator. + * @param builtinComparator a {@link BuiltinComparator} type. + * @return the instance of the current Object. + */ + Object setComparator(BuiltinComparator builtinComparator); + + /** + * Use the specified comparator for key ordering. + * + * Comparator should not be disposed before options instances using this comparator is + * disposed. If dispose() function is not called, then comparator object will be + * GC'd automatically. + * + * Comparator instance can be re-used in multiple options instances. + * + * @param comparator java instance. + * @return the instance of the current Object. + */ + Object setComparator(AbstractComparator> comparator); + + /** + *

Set the merge operator to be used for merging two merge operands + * of the same key. The merge function is invoked during + * compaction and at lookup time, if multiple key/value pairs belonging + * to the same key are found in the database.

+ * + * @param name the name of the merge function, as defined by + * the MergeOperators factory (see utilities/MergeOperators.h) + * The merge function is specified by name and must be one of the + * standard merge operators provided by RocksDB. The available + * operators are "put", "uint64add", "stringappend" and "stringappendtest". + * @return the instance of the current Object. + */ + Object setMergeOperatorName(String name); + + /** + *

Set the merge operator to be used for merging two different key/value + * pairs that share the same key. The merge function is invoked during + * compaction and at lookup time, if multiple key/value pairs belonging + * to the same key are found in the database.

+ * + * @param mergeOperator {@link MergeOperator} instance. + * @return the instance of the current Object. + */ + Object setMergeOperator(MergeOperator mergeOperator); + + /** + * Amount of data to build up in memory (backed by an unsorted log + * on disk) before converting to a sorted on-disk file. + * + * Larger values increase performance, especially during bulk loads. + * Up to {@code max_write_buffer_number} write buffers may be held in memory + * at the same time, so you may wish to adjust this parameter + * to control memory usage. + * + * Also, a larger write buffer will result in a longer recovery time + * the next time the database is opened. + * + * Default: 4MB + * @param writeBufferSize the size of write buffer. + * @return the instance of the current Object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + Object setWriteBufferSize(long writeBufferSize); + + /** + * Return size of write buffer size. + * + * @return size of write buffer. + * @see #setWriteBufferSize(long) + */ + long writeBufferSize(); + + /** + * The maximum number of write buffers that are built up in memory. + * The default is 2, so that when 1 write buffer is being flushed to + * storage, new writes can continue to the other write buffer. + * Default: 2 + * + * @param maxWriteBufferNumber maximum number of write buffers. + * @return the instance of the current Object. + */ + Object setMaxWriteBufferNumber( + int maxWriteBufferNumber); + + /** + * Returns maximum number of write buffers. + * + * @return maximum number of write buffers. + * @see #setMaxWriteBufferNumber(int) + */ + int maxWriteBufferNumber(); + + /** + * The minimum number of write buffers that will be merged together + * before writing to storage. If set to 1, then + * all write buffers are flushed to L0 as individual files and this increases + * read amplification because a get request has to check in all of these + * files. Also, an in-memory merge may result in writing lesser + * data to storage if there are duplicate records in each of these + * individual write buffers. Default: 1 + * + * @param minWriteBufferNumberToMerge the minimum number of write buffers + * that will be merged together. + * @return the reference to the current option. + */ + Object setMinWriteBufferNumberToMerge( + int minWriteBufferNumberToMerge); + + /** + * The minimum number of write buffers that will be merged together + * before writing to storage. If set to 1, then + * all write buffers are flushed to L0 as individual files and this increases + * read amplification because a get request has to check in all of these + * files. Also, an in-memory merge may result in writing lesser + * data to storage if there are duplicate records in each of these + * individual write buffers. Default: 1 + * + * @return the minimum number of write buffers that will be merged together. + */ + int minWriteBufferNumberToMerge(); + + /** + * This prefix-extractor uses the first n bytes of a key as its prefix. + * + * In some hash-based memtable representation such as HashLinkedList + * and HashSkipList, prefixes are used to partition the keys into + * several buckets. Prefix extractor is used to specify how to + * extract the prefix given a key. + * + * @param n use the first n bytes of a key as its prefix. + * @return the reference to the current option. + */ + Object useFixedLengthPrefixExtractor(int n); + + + /** + * Same as fixed length prefix extractor, except that when slice is + * shorter than the fixed length, it will use the full key. + * + * @param n use the first n bytes of a key as its prefix. + * @return the reference to the current option. + */ + Object useCappedPrefixExtractor(int n); + + /** + * Compress blocks using the specified compression algorithm. This + * parameter can be changed dynamically. + * + * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. + * + * @param compressionType Compression Type. + * @return the reference to the current option. + */ + Object setCompressionType(CompressionType compressionType); + + /** + * Compress blocks using the specified compression algorithm. This + * parameter can be changed dynamically. + * + * Default: SNAPPY_COMPRESSION, which gives lightweight but fast compression. + * + * @return Compression type. + */ + CompressionType compressionType(); + + /** + *

Different levels can have different compression + * policies. There are cases where most lower levels + * would like to use quick compression algorithms while + * the higher levels (which have more data) use + * compression algorithms that have better compression + * but could be slower. This array, if non-empty, should + * have an entry for each level of the database; + * these override the value specified in the previous + * field 'compression'.

+ * + * NOTICE + *

If {@code level_compaction_dynamic_level_bytes=true}, + * {@code compression_per_level[0]} still determines {@code L0}, + * but other elements of the array are based on base level + * (the level {@code L0} files are merged to), and may not + * match the level users see from info log for metadata. + *

+ *

If {@code L0} files are merged to {@code level - n}, + * then, for {@code i>0}, {@code compression_per_level[i]} + * determines compaction type for level {@code n+i-1}.

+ * + * Example + *

For example, if we have 5 levels, and we determine to + * merge {@code L0} data to {@code L4} (which means {@code L1..L3} + * will be empty), then the new files go to {@code L4} uses + * compression type {@code compression_per_level[1]}.

+ * + *

If now {@code L0} is merged to {@code L2}. Data goes to + * {@code L2} will be compressed according to + * {@code compression_per_level[1]}, {@code L3} using + * {@code compression_per_level[2]}and {@code L4} using + * {@code compression_per_level[3]}. Compaction for each + * level can change when data grows.

+ * + *

Default: empty

+ * + * @param compressionLevels list of + * {@link org.rocksdb.CompressionType} instances. + * + * @return the reference to the current option. + */ + Object setCompressionPerLevel( + List compressionLevels); + + /** + *

Return the currently set {@link org.rocksdb.CompressionType} + * per instances.

+ * + *

See: {@link #setCompressionPerLevel(java.util.List)}

+ * + * @return list of {@link org.rocksdb.CompressionType} + * instances. + */ + List compressionPerLevel(); + + /** + * Set the number of levels for this database + * If level-styled compaction is used, then this number determines + * the total number of levels. + * + * @param numLevels the number of levels. + * @return the reference to the current option. + */ + Object setNumLevels(int numLevels); + + /** + * If level-styled compaction is used, then this number determines + * the total number of levels. + * + * @return the number of levels. + */ + int numLevels(); + + /** + * Number of files to trigger level-0 compaction. A value < 0 means that + * level-0 compaction will not be triggered by number of files at all. + * Default: 4 + * + * @param numFiles the number of files in level-0 to trigger compaction. + * @return the reference to the current option. + */ + Object setLevelZeroFileNumCompactionTrigger( + int numFiles); + + /** + * The number of files in level 0 to trigger compaction from level-0 to + * level-1. A value < 0 means that level-0 compaction will not be + * triggered by number of files at all. + * Default: 4 + * + * @return the number of files in level 0 to trigger compaction. + */ + int levelZeroFileNumCompactionTrigger(); + + /** + * Soft limit on number of level-0 files. We start slowing down writes at this + * point. A value < 0 means that no writing slow down will be triggered by + * number of files in level-0. + * + * @param numFiles soft limit on number of level-0 files. + * @return the reference to the current option. + */ + Object setLevelZeroSlowdownWritesTrigger( + int numFiles); + + /** + * Soft limit on the number of level-0 files. We start slowing down writes + * at this point. A value < 0 means that no writing slow down will be + * triggered by number of files in level-0. + * + * @return the soft limit on the number of level-0 files. + */ + int levelZeroSlowdownWritesTrigger(); + + /** + * Maximum number of level-0 files. We stop writes at this point. + * + * @param numFiles the hard limit of the number of level-0 files. + * @return the reference to the current option. + */ + Object setLevelZeroStopWritesTrigger(int numFiles); + + /** + * Maximum number of level-0 files. We stop writes at this point. + * + * @return the hard limit of the number of level-0 file. + */ + int levelZeroStopWritesTrigger(); + + /** + * This does nothing anymore. Deprecated. + * + * @param maxMemCompactionLevel Unused. + * + * @return the reference to the current option. + */ + @Deprecated + Object setMaxMemCompactionLevel( + int maxMemCompactionLevel); + + /** + * This does nothing anymore. Deprecated. + * + * @return Always returns 0. + */ + @Deprecated + int maxMemCompactionLevel(); + + /** + * The target file size for compaction. + * This targetFileSizeBase determines a level-1 file size. + * Target file size for level L can be calculated by + * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) + * For example, if targetFileSizeBase is 2MB and + * target_file_size_multiplier is 10, then each file on level-1 will + * be 2MB, and each file on level 2 will be 20MB, + * and each file on level-3 will be 200MB. + * by default targetFileSizeBase is 2MB. + * + * @param targetFileSizeBase the target size of a level-0 file. + * @return the reference to the current option. + * + * @see #setTargetFileSizeMultiplier(int) + */ + Object setTargetFileSizeBase(long targetFileSizeBase); + + /** + * The target file size for compaction. + * This targetFileSizeBase determines a level-1 file size. + * Target file size for level L can be calculated by + * targetFileSizeBase * (targetFileSizeMultiplier ^ (L-1)) + * For example, if targetFileSizeBase is 2MB and + * target_file_size_multiplier is 10, then each file on level-1 will + * be 2MB, and each file on level 2 will be 20MB, + * and each file on level-3 will be 200MB. + * by default targetFileSizeBase is 2MB. + * + * @return the target size of a level-0 file. + * + * @see #targetFileSizeMultiplier() + */ + long targetFileSizeBase(); + + /** + * targetFileSizeMultiplier defines the size ratio between a + * level-L file and level-(L+1) file. + * By default target_file_size_multiplier is 1, meaning + * files in different levels have the same target. + * + * @param multiplier the size ratio between a level-(L+1) file + * and level-L file. + * @return the reference to the current option. + */ + Object setTargetFileSizeMultiplier(int multiplier); + + /** + * targetFileSizeMultiplier defines the size ratio between a + * level-(L+1) file and level-L file. + * By default targetFileSizeMultiplier is 1, meaning + * files in different levels have the same target. + * + * @return the size ratio between a level-(L+1) file and level-L file. + */ + int targetFileSizeMultiplier(); + + /** + * The upper-bound of the total size of level-1 files in bytes. + * Maximum number of bytes for level L can be calculated as + * (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1)) + * For example, if maxBytesForLevelBase is 20MB, and if + * max_bytes_for_level_multiplier is 10, total data size for level-1 + * will be 20MB, total file size for level-2 will be 200MB, + * and total file size for level-3 will be 2GB. + * by default 'maxBytesForLevelBase' is 10MB. + * + * @param maxBytesForLevelBase maximum bytes for level base. + * + * @return the reference to the current option. + * @see #setMaxBytesForLevelMultiplier(int) + */ + Object setMaxBytesForLevelBase( + long maxBytesForLevelBase); + + /** + * The upper-bound of the total size of level-1 files in bytes. + * Maximum number of bytes for level L can be calculated as + * (maxBytesForLevelBase) * (maxBytesForLevelMultiplier ^ (L-1)) + * For example, if maxBytesForLevelBase is 20MB, and if + * max_bytes_for_level_multiplier is 10, total data size for level-1 + * will be 20MB, total file size for level-2 will be 200MB, + * and total file size for level-3 will be 2GB. + * by default 'maxBytesForLevelBase' is 10MB. + * + * @return the upper-bound of the total size of level-1 files + * in bytes. + * @see #maxBytesForLevelMultiplier() + */ + long maxBytesForLevelBase(); + + /** + *

If {@code true}, RocksDB will pick target size of each level + * dynamically. We will pick a base level b >= 1. L0 will be + * directly merged into level b, instead of always into level 1. + * Level 1 to b-1 need to be empty. We try to pick b and its target + * size so that

+ * + *
    + *
  1. target size is in the range of + * (max_bytes_for_level_base / max_bytes_for_level_multiplier, + * max_bytes_for_level_base]
  2. + *
  3. target size of the last level (level num_levels-1) equals to extra size + * of the level.
  4. + *
+ * + *

At the same time max_bytes_for_level_multiplier and + * max_bytes_for_level_multiplier_additional are still satisfied.

+ * + *

With this option on, from an empty DB, we make last level the base + * level, which means merging L0 data into the last level, until it exceeds + * max_bytes_for_level_base. And then we make the second last level to be + * base level, to start to merge L0 data to second last level, with its + * target size to be {@code 1/max_bytes_for_level_multiplier} of the last + * levels extra size. After the data accumulates more so that we need to + * move the base level to the third last one, and so on.

+ * + *

Example

+ *

For example, assume {@code max_bytes_for_level_multiplier=10}, + * {@code num_levels=6}, and {@code max_bytes_for_level_base=10MB}.

+ * + *

Target sizes of level 1 to 5 starts with:

+ * {@code [- - - - 10MB]} + *

with base level is level. Target sizes of level 1 to 4 are not applicable + * because they will not be used. + * Until the size of Level 5 grows to more than 10MB, say 11MB, we make + * base target to level 4 and now the targets looks like:

+ * {@code [- - - 1.1MB 11MB]} + *

While data are accumulated, size targets are tuned based on actual data + * of level 5. When level 5 has 50MB of data, the target is like:

+ * {@code [- - - 5MB 50MB]} + *

Until level 5's actual size is more than 100MB, say 101MB. Now if we + * keep level 4 to be the base level, its target size needs to be 10.1MB, + * which doesn't satisfy the target size range. So now we make level 3 + * the target size and the target sizes of the levels look like:

+ * {@code [- - 1.01MB 10.1MB 101MB]} + *

In the same way, while level 5 further grows, all levels' targets grow, + * like

+ * {@code [- - 5MB 50MB 500MB]} + *

Until level 5 exceeds 1000MB and becomes 1001MB, we make level 2 the + * base level and make levels' target sizes like this:

+ * {@code [- 1.001MB 10.01MB 100.1MB 1001MB]} + *

and go on...

+ * + *

By doing it, we give {@code max_bytes_for_level_multiplier} a priority + * against {@code max_bytes_for_level_base}, for a more predictable LSM tree + * shape. It is useful to limit worse case space amplification.

+ * + *

{@code max_bytes_for_level_multiplier_additional} is ignored with + * this flag on.

+ * + *

Turning this feature on or off for an existing DB can cause unexpected + * LSM tree structure so it's not recommended.

+ * + *

Caution: this option is experimental

+ * + *

Default: false

+ * + * @param enableLevelCompactionDynamicLevelBytes boolean value indicating + * if {@code LevelCompactionDynamicLevelBytes} shall be enabled. + * @return the reference to the current option. + */ + Object setLevelCompactionDynamicLevelBytes( + boolean enableLevelCompactionDynamicLevelBytes); + + /** + *

Return if {@code LevelCompactionDynamicLevelBytes} is enabled. + *

+ * + *

For further information see + * {@link #setLevelCompactionDynamicLevelBytes(boolean)}

+ * + * @return boolean value indicating if + * {@code levelCompactionDynamicLevelBytes} is enabled. + */ + boolean levelCompactionDynamicLevelBytes(); + + /** + * The ratio between the total size of level-(L+1) files and the total + * size of level-L files for all L. + * DEFAULT: 10 + * + * @param multiplier the ratio between the total size of level-(L+1) + * files and the total size of level-L files for all L. + * @return the reference to the current option. + * @see #setMaxBytesForLevelBase(long) + */ + Object setMaxBytesForLevelMultiplier(int multiplier); + + /** + * The ratio between the total size of level-(L+1) files and the total + * size of level-L files for all L. + * DEFAULT: 10 + * + * @return the ratio between the total size of level-(L+1) files and + * the total size of level-L files for all L. + * @see #maxBytesForLevelBase() + */ + int maxBytesForLevelMultiplier(); + + /** + * Maximum number of bytes in all compacted files. We avoid expanding + * the lower level file set of a compaction if it would make the + * total compaction cover more than + * (expanded_compaction_factor * targetFileSizeLevel()) many bytes. + * + * @param expandedCompactionFactor the maximum number of bytes in all + * compacted files. + * @return the reference to the current option. + * @see #setSourceCompactionFactor(int) + */ + Object setExpandedCompactionFactor(int expandedCompactionFactor); + + /** + * Maximum number of bytes in all compacted files. We avoid expanding + * the lower level file set of a compaction if it would make the + * total compaction cover more than + * (expanded_compaction_factor * targetFileSizeLevel()) many bytes. + * + * @return the maximum number of bytes in all compacted files. + * @see #sourceCompactionFactor() + */ + int expandedCompactionFactor(); + + /** + * Maximum number of bytes in all source files to be compacted in a + * single compaction run. We avoid picking too many files in the + * source level so that we do not exceed the total source bytes + * for compaction to exceed + * (source_compaction_factor * targetFileSizeLevel()) many bytes. + * Default:1, i.e. pick maxfilesize amount of data as the source of + * a compaction. + * + * @param sourceCompactionFactor the maximum number of bytes in all + * source files to be compacted in a single compaction run. + * @return the reference to the current option. + * @see #setExpandedCompactionFactor(int) + */ + Object setSourceCompactionFactor(int sourceCompactionFactor); + + /** + * Maximum number of bytes in all source files to be compacted in a + * single compaction run. We avoid picking too many files in the + * source level so that we do not exceed the total source bytes + * for compaction to exceed + * (source_compaction_factor * targetFileSizeLevel()) many bytes. + * Default:1, i.e. pick maxfilesize amount of data as the source of + * a compaction. + * + * @return the maximum number of bytes in all source files to be compactedo. + * @see #expandedCompactionFactor() + */ + int sourceCompactionFactor(); + + /** + * Control maximum bytes of overlaps in grandparent (i.e., level+2) before we + * stop building a single file in a level->level+1 compaction. + * + * @param maxGrandparentOverlapFactor maximum bytes of overlaps in + * "grandparent" level. + * @return the reference to the current option. + */ + Object setMaxGrandparentOverlapFactor( + int maxGrandparentOverlapFactor); + + /** + * Control maximum bytes of overlaps in grandparent (i.e., level+2) before we + * stop building a single file in a level->level+1 compaction. + * + * @return maximum bytes of overlaps in "grandparent" level. + */ + int maxGrandparentOverlapFactor(); + + /** + * Puts are delayed 0-1 ms when any level has a compaction score that exceeds + * soft_rate_limit. This is ignored when == 0.0. + * CONSTRAINT: soft_rate_limit ≤ hard_rate_limit. If this constraint does not + * hold, RocksDB will set soft_rate_limit = hard_rate_limit + * Default: 0 (disabled) + * + * @param softRateLimit the soft-rate-limit of a compaction score + * for put delay. + * @return the reference to the current option. + */ + Object setSoftRateLimit(double softRateLimit); + + /** + * Puts are delayed 0-1 ms when any level has a compaction score that exceeds + * soft_rate_limit. This is ignored when == 0.0. + * CONSTRAINT: soft_rate_limit ≤ hard_rate_limit. If this constraint does not + * hold, RocksDB will set soft_rate_limit = hard_rate_limit + * Default: 0 (disabled) + * + * @return soft-rate-limit for put delay. + */ + double softRateLimit(); + + /** + * Puts are delayed 1ms at a time when any level has a compaction score that + * exceeds hard_rate_limit. This is ignored when ≤ 1.0. + * Default: 0 (disabled) + * + * @param hardRateLimit the hard-rate-limit of a compaction score for put + * delay. + * @return the reference to the current option. + */ + Object setHardRateLimit(double hardRateLimit); + + /** + * Puts are delayed 1ms at a time when any level has a compaction score that + * exceeds hard_rate_limit. This is ignored when ≤ 1.0. + * Default: 0 (disabled) + * + * @return the hard-rate-limit of a compaction score for put delay. + */ + double hardRateLimit(); + + /** + * The maximum time interval a put will be stalled when hard_rate_limit + * is enforced. If 0, then there is no limit. + * Default: 1000 + * + * @param rateLimitDelayMaxMilliseconds the maximum time interval a put + * will be stalled. + * @return the reference to the current option. + */ + Object setRateLimitDelayMaxMilliseconds( + int rateLimitDelayMaxMilliseconds); + + /** + * The maximum time interval a put will be stalled when hard_rate_limit + * is enforced. If 0, then there is no limit. + * Default: 1000 + * + * @return the maximum time interval a put will be stalled when + * hard_rate_limit is enforced. + */ + int rateLimitDelayMaxMilliseconds(); + + /** + * The size of one block in arena memory allocation. + * If ≤ 0, a proper value is automatically calculated (usually 1/10 of + * writer_buffer_size). + * + * There are two additional restriction of the The specified size: + * (1) size should be in the range of [4096, 2 << 30] and + * (2) be the multiple of the CPU word (which helps with the memory + * alignment). + * + * We'll automatically check and adjust the size number to make sure it + * conforms to the restrictions. + * Default: 0 + * + * @param arenaBlockSize the size of an arena block + * @return the reference to the current option. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + Object setArenaBlockSize(long arenaBlockSize); + + /** + * The size of one block in arena memory allocation. + * If ≤ 0, a proper value is automatically calculated (usually 1/10 of + * writer_buffer_size). + * + * There are two additional restriction of the The specified size: + * (1) size should be in the range of [4096, 2 << 30] and + * (2) be the multiple of the CPU word (which helps with the memory + * alignment). + * + * We'll automatically check and adjust the size number to make sure it + * conforms to the restrictions. + * Default: 0 + * + * @return the size of an arena block + */ + long arenaBlockSize(); + + /** + * Disable automatic compactions. Manual compactions can still + * be issued on this column family + * + * @param disableAutoCompactions true if auto-compactions are disabled. + * @return the reference to the current option. + */ + Object setDisableAutoCompactions(boolean disableAutoCompactions); + + /** + * Disable automatic compactions. Manual compactions can still + * be issued on this column family + * + * @return true if auto-compactions are disabled. + */ + boolean disableAutoCompactions(); + + /** + * Purge duplicate/deleted keys when a memtable is flushed to storage. + * Default: true + * + * @param purgeRedundantKvsWhileFlush true if purging keys is disabled. + * @return the reference to the current option. + */ + Object setPurgeRedundantKvsWhileFlush( + boolean purgeRedundantKvsWhileFlush); + + /** + * Purge duplicate/deleted keys when a memtable is flushed to storage. + * Default: true + * + * @return true if purging keys is disabled. + */ + boolean purgeRedundantKvsWhileFlush(); + + /** + * Set compaction style for DB. + * + * Default: LEVEL. + * + * @param compactionStyle Compaction style. + * @return the reference to the current option. + */ + Object setCompactionStyle(CompactionStyle compactionStyle); + + /** + * Compaction style for DB. + * + * @return Compaction style. + */ + CompactionStyle compactionStyle(); + + /** + * FIFO compaction option. + * The oldest table file will be deleted + * once the sum of table files reaches this size. + * The default value is 1GB (1 * 1024 * 1024 * 1024). + * + * @param maxTableFilesSize the size limit of the total sum of table files. + * @return the instance of the current Object. + */ + Object setMaxTableFilesSizeFIFO(long maxTableFilesSize); + + /** + * FIFO compaction option. + * The oldest table file will be deleted + * once the sum of table files reaches this size. + * The default value is 1GB (1 * 1024 * 1024 * 1024). + * + * @return the size limit of the total sum of table files. + */ + long maxTableFilesSizeFIFO(); + + /** + * If true, compaction will verify checksum on every read that happens + * as part of compaction + * Default: true + * + * @param verifyChecksumsInCompaction true if compaction verifies + * checksum on every read. + * @return the reference to the current option. + */ + Object setVerifyChecksumsInCompaction( + boolean verifyChecksumsInCompaction); + + /** + * If true, compaction will verify checksum on every read that happens + * as part of compaction + * Default: true + * + * @return true if compaction verifies checksum on every read. + */ + boolean verifyChecksumsInCompaction(); + + /** + * An iteration->Next() sequentially skips over keys with the same + * user-key unless this option is set. This number specifies the number + * of keys (with the same userkey) that will be sequentially + * skipped before a reseek is issued. + * Default: 8 + * + * @param maxSequentialSkipInIterations the number of keys could + * be skipped in a iteration. + * @return the reference to the current option. + */ + Object setMaxSequentialSkipInIterations(long maxSequentialSkipInIterations); + + /** + * An iteration->Next() sequentially skips over keys with the same + * user-key unless this option is set. This number specifies the number + * of keys (with the same userkey) that will be sequentially + * skipped before a reseek is issued. + * Default: 8 + * + * @return the number of keys could be skipped in a iteration. + */ + long maxSequentialSkipInIterations(); + + /** + * Set the config for mem-table. + * + * @param config the mem-table config. + * @return the instance of the current Object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + Object setMemTableConfig(MemTableConfig config); + + /** + * Returns the name of the current mem table representation. + * Memtable format can be set using setTableFormatConfig. + * + * @return the name of the currently-used memtable factory. + * @see #setTableFormatConfig(org.rocksdb.TableFormatConfig) + */ + String memTableFactoryName(); + + /** + * Set the config for table format. + * + * @param config the table format config. + * @return the reference of the current Options. + */ + Object setTableFormatConfig(TableFormatConfig config); + + /** + * @return the name of the currently used table factory. + */ + String tableFactoryName(); + + /** + * Allows thread-safe inplace updates. + * If inplace_callback function is not set, + * Put(key, new_value) will update inplace the existing_value iff + * * key exists in current memtable + * * new sizeof(new_value) ≤ sizeof(existing_value) + * * existing_value for that key is a put i.e. kTypeValue + * If inplace_callback function is set, check doc for inplace_callback. + * Default: false. + * + * @param inplaceUpdateSupport true if thread-safe inplace updates + * are allowed. + * @return the reference to the current option. + */ + Object setInplaceUpdateSupport(boolean inplaceUpdateSupport); + + /** + * Allows thread-safe inplace updates. + * If inplace_callback function is not set, + * Put(key, new_value) will update inplace the existing_value iff + * * key exists in current memtable + * * new sizeof(new_value) ≤ sizeof(existing_value) + * * existing_value for that key is a put i.e. kTypeValue + * If inplace_callback function is set, check doc for inplace_callback. + * Default: false. + * + * @return true if thread-safe inplace updates are allowed. + */ + boolean inplaceUpdateSupport(); + + /** + * Number of locks used for inplace update + * Default: 10000, if inplace_update_support = true, else 0. + * + * @param inplaceUpdateNumLocks the number of locks used for + * inplace updates. + * @return the reference to the current option. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + Object setInplaceUpdateNumLocks(long inplaceUpdateNumLocks); + + /** + * Number of locks used for inplace update + * Default: 10000, if inplace_update_support = true, else 0. + * + * @return the number of locks used for inplace update. + */ + long inplaceUpdateNumLocks(); + + /** + * Sets the size ratio of the memtable used in the prefix bloom filter. + * + * This value will be used only when a prefix-extractor is specified. + * + * @param memtablePrefixBloomSizeRatio the number of bits used in the + * prefix bloom filter. + * @return the reference to the current option. + */ + Object setMemtablePrefixBloomSizeRatio(double memtablePrefixBloomSizeRatio); + + /** + * Returns the number of bits used in the prefix bloom filter. + * + * This value will be used only when a prefix-extractor is specified. + * + * @return the number of bloom-bits. + * @see #useFixedLengthPrefixExtractor(int) + */ + double memtablePrefixBloomSizeRatio(); + + /** + * Control locality of bloom filter probes to improve cache miss rate. + * This option only applies to memtable prefix bloom and plaintable + * prefix bloom. It essentially limits the max number of cache lines each + * bloom filter check can touch. + * This optimization is turned off when set to 0. The number should never + * be greater than number of probes. This option can boost performance + * for in-memory workload but should use with care since it can cause + * higher false positive rate. + * Default: 0 + * + * @param bloomLocality the level of locality of bloom-filter probes. + * @return the reference to the current option. + */ + Object setBloomLocality(int bloomLocality); + + /** + * Control locality of bloom filter probes to improve cache miss rate. + * This option only applies to memtable prefix bloom and plaintable + * prefix bloom. It essentially limits the max number of cache lines each + * bloom filter check can touch. + * This optimization is turned off when set to 0. The number should never + * be greater than number of probes. This option can boost performance + * for in-memory workload but should use with care since it can cause + * higher false positive rate. + * Default: 0 + * + * @return the level of locality of bloom-filter probes. + * @see #setBloomLocality(int) + */ + int bloomLocality(); + + /** + * Maximum number of successive merge operations on a key in the memtable. + * + * When a merge operation is added to the memtable and the maximum number of + * successive merges is reached, the value of the key will be calculated and + * inserted into the memtable instead of the merge operation. This will + * ensure that there are never more than max_successive_merges merge + * operations in the memtable. + * + * Default: 0 (disabled) + * + * @param maxSuccessiveMerges the maximum number of successive merges. + * @return the reference to the current option. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + Object setMaxSuccessiveMerges(long maxSuccessiveMerges); + + /** + * Maximum number of successive merge operations on a key in the memtable. + * + * When a merge operation is added to the memtable and the maximum number of + * successive merges is reached, the value of the key will be calculated and + * inserted into the memtable instead of the merge operation. This will + * ensure that there are never more than max_successive_merges merge + * operations in the memtable. + * + * Default: 0 (disabled) + * + * @return the maximum number of successive merges. + */ + long maxSuccessiveMerges(); + + /** + * The number of partial merge operands to accumulate before partial + * merge will be performed. Partial merge will not be called + * if the list of values to merge is less than min_partial_merge_operands. + * + * If min_partial_merge_operands < 2, then it will be treated as 2. + * + * Default: 2 + * + * @param minPartialMergeOperands min partial merge operands + * @return the reference to the current option. + */ + Object setMinPartialMergeOperands(int minPartialMergeOperands); + + /** + * The number of partial merge operands to accumulate before partial + * merge will be performed. Partial merge will not be called + * if the list of values to merge is less than min_partial_merge_operands. + * + * If min_partial_merge_operands < 2, then it will be treated as 2. + * + * Default: 2 + * + * @return min partial merge operands + */ + int minPartialMergeOperands(); + + /** + *

This flag specifies that the implementation should optimize the filters + * mainly for cases where keys are found rather than also optimize for keys + * missed. This would be used in cases where the application knows that + * there are very few misses or the performance in the case of misses is not + * important.

+ * + *

For now, this flag allows us to not store filters for the last level i.e + * the largest level which contains data of the LSM store. For keys which + * are hits, the filters in this level are not useful because we will search + * for the data anyway.

+ * + *

NOTE: the filters in other levels are still useful + * even for key hit because they tell us whether to look in that level or go + * to the higher level.

+ * + *

Default: false

+ * + * @param optimizeFiltersForHits boolean value indicating if this flag is set. + * @return the reference to the current option. + */ + Object setOptimizeFiltersForHits(boolean optimizeFiltersForHits); + + /** + *

Returns the current state of the {@code optimize_filters_for_hits} + * setting.

+ * + * @return boolean value indicating if the flag + * {@code optimize_filters_for_hits} was set. + */ + boolean optimizeFiltersForHits(); + + /** + * Default memtable memory budget used with the following methods: + * + *
    + *
  1. {@link #optimizeLevelStyleCompaction()}
  2. + *
  3. {@link #optimizeUniversalStyleCompaction()}
  4. + *
+ */ + long DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET = 512 * 1024 * 1024; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java b/external/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java new file mode 100755 index 0000000000..22dc7dcf5f --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/CompactionStyle.java @@ -0,0 +1,52 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Enum CompactionStyle + * + * RocksDB supports different styles of compaction. Available + * compaction styles can be chosen using this enumeration. + * + *
    + *
  1. LEVEL - Level based Compaction style
  2. + *
  3. UNIVERSAL - Universal Compaction Style is a + * compaction style, targeting the use cases requiring lower write + * amplification, trading off read amplification and space + * amplification.
  4. + *
  5. FIFO - FIFO compaction style is the simplest + * compaction strategy. It is suited for keeping event log data with + * very low overhead (query log for example). It periodically deletes + * the old data, so it's basically a TTL compaction style.
  6. + *
+ * + * @see + * Universal Compaction + * @see + * FIFO Compaction + */ +public enum CompactionStyle { + LEVEL((byte) 0), + UNIVERSAL((byte) 1), + FIFO((byte) 2); + + private final byte value_; + + private CompactionStyle(byte value) { + value_ = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value_; + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/Comparator.java b/external/rocksdb/java/src/main/java/org/rocksdb/Comparator.java new file mode 100755 index 0000000000..009f2e51f4 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/Comparator.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Base class for comparators which will receive + * byte[] based access via org.rocksdb.Slice in their + * compare method implementation. + * + * byte[] based slices perform better when small keys + * are involved. When using larger keys consider + * using @see org.rocksdb.DirectComparator + */ +public abstract class Comparator extends AbstractComparator { + + private final long nativeHandle_; + + public Comparator(final ComparatorOptions copt) { + super(); + this.nativeHandle_ = createNewComparator0(copt.nativeHandle_); + } + + @Override + protected final long getNativeHandle() { + return nativeHandle_; + } + + private native long createNewComparator0(final long comparatorOptionsHandle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java b/external/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java new file mode 100755 index 0000000000..3a05befa44 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/ComparatorOptions.java @@ -0,0 +1,51 @@ +package org.rocksdb; + +/** + * This class controls the behaviour + * of Java implementations of + * AbstractComparator + * + * Note that dispose() must be called before a ComparatorOptions + * instance becomes out-of-scope to release the allocated memory in C++. + */ +public class ComparatorOptions extends RocksObject { + public ComparatorOptions() { + super(newComparatorOptions()); + } + + /** + * Use adaptive mutex, which spins in the user space before resorting + * to kernel. This could reduce context switch when the mutex is not + * heavily contended. However, if the mutex is hot, we could end up + * wasting spin time. + * Default: false + * + * @return true if adaptive mutex is used. + */ + public boolean useAdaptiveMutex() { + assert(isOwningHandle()); + return useAdaptiveMutex(nativeHandle_); + } + + /** + * Use adaptive mutex, which spins in the user space before resorting + * to kernel. This could reduce context switch when the mutex is not + * heavily contended. However, if the mutex is hot, we could end up + * wasting spin time. + * Default: false + * + * @param useAdaptiveMutex true if adaptive mutex is used. + * @return the reference to the current comparator options. + */ + public ComparatorOptions setUseAdaptiveMutex(final boolean useAdaptiveMutex) { + assert (isOwningHandle()); + setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex); + return this; + } + + private native static long newComparatorOptions(); + private native boolean useAdaptiveMutex(final long handle); + private native void setUseAdaptiveMutex(final long handle, + final boolean useAdaptiveMutex); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java b/external/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java new file mode 100755 index 0000000000..b4d86166e5 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/CompressionType.java @@ -0,0 +1,94 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Enum CompressionType + * + *

DB contents are stored in a set of blocks, each of which holds a + * sequence of key,value pairs. Each block may be compressed before + * being stored in a file. The following enum describes which + * compression method (if any) is used to compress a block.

+ */ +public enum CompressionType { + + NO_COMPRESSION((byte) 0, null), + SNAPPY_COMPRESSION((byte) 1, "snappy"), + ZLIB_COMPRESSION((byte) 2, "z"), + BZLIB2_COMPRESSION((byte) 3, "bzip2"), + LZ4_COMPRESSION((byte) 4, "lz4"), + LZ4HC_COMPRESSION((byte) 5, "lz4hc"); + + /** + *

Get the CompressionType enumeration value by + * passing the library name to this method.

+ * + *

If library cannot be found the enumeration + * value {@code NO_COMPRESSION} will be returned.

+ * + * @param libraryName compression library name. + * + * @return CompressionType instance. + */ + public static CompressionType getCompressionType(String libraryName) { + if (libraryName != null) { + for (CompressionType compressionType : CompressionType.values()) { + if (compressionType.getLibraryName() != null && + compressionType.getLibraryName().equals(libraryName)) { + return compressionType; + } + } + } + return CompressionType.NO_COMPRESSION; + } + + /** + *

Get the CompressionType enumeration value by + * passing the byte identifier to this method.

+ * + *

If library cannot be found the enumeration + * value {@code NO_COMPRESSION} will be returned.

+ * + * @param byteIdentifier of CompressionType. + * + * @return CompressionType instance. + */ + public static CompressionType getCompressionType(byte byteIdentifier) { + for (CompressionType compressionType : CompressionType.values()) { + if (compressionType.getValue() == byteIdentifier) { + return compressionType; + } + } + return CompressionType.NO_COMPRESSION; + } + + /** + *

Returns the byte value of the enumerations value.

+ * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + /** + *

Returns the library name of the compression type + * identified by the enumeration value.

+ * + * @return library name + */ + public String getLibraryName() { + return libraryName_; + } + + private CompressionType(byte value, final String libraryName) { + value_ = value; + libraryName_ = libraryName; + } + + private final byte value_; + private final String libraryName_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java b/external/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java new file mode 100755 index 0000000000..878dd4d70f --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/DBOptions.java @@ -0,0 +1,644 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.Properties; + +/** + * DBOptions to control the behavior of a database. It will be used + * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). + * + * If {@link #dispose()} function is not called, then it will be GC'd + * automatically and native resources will be released as part of the process. + */ +public class DBOptions extends RocksObject implements DBOptionsInterface { + static { + RocksDB.loadLibrary(); + } + + /** + * Construct DBOptions. + * + * This constructor will create (by allocating a block of memory) + * an {@code rocksdb::DBOptions} in the c++ side. + */ + public DBOptions() { + super(newDBOptions()); + numShardBits_ = DEFAULT_NUM_SHARD_BITS; + } + + /** + *

Method to get a options instance by using pre-configured + * property values. If one or many values are undefined in + * the context of RocksDB the method will return a null + * value.

+ * + *

Note: Property keys can be derived from + * getter methods within the options class. Example: the method + * {@code allowMmapReads()} has a property key: + * {@code allow_mmap_reads}.

+ * + * @param properties {@link java.util.Properties} instance. + * + * @return {@link org.rocksdb.DBOptions instance} + * or null. + * + * @throws java.lang.IllegalArgumentException if null or empty + * {@link java.util.Properties} instance is passed to the method call. + */ + public static DBOptions getDBOptionsFromProps( + final Properties properties) { + if (properties == null || properties.size() == 0) { + throw new IllegalArgumentException( + "Properties value must contain at least one value."); + } + DBOptions dbOptions = null; + StringBuilder stringBuilder = new StringBuilder(); + for (final String name : properties.stringPropertyNames()){ + stringBuilder.append(name); + stringBuilder.append("="); + stringBuilder.append(properties.getProperty(name)); + stringBuilder.append(";"); + } + long handle = getDBOptionsFromProps( + stringBuilder.toString()); + if (handle != 0){ + dbOptions = new DBOptions(handle); + } + return dbOptions; + } + + @Override + public DBOptions setIncreaseParallelism( + final int totalThreads) { + assert(isOwningHandle()); + setIncreaseParallelism(nativeHandle_, totalThreads); + return this; + } + + @Override + public DBOptions setCreateIfMissing(final boolean flag) { + assert(isOwningHandle()); + setCreateIfMissing(nativeHandle_, flag); + return this; + } + + @Override + public boolean createIfMissing() { + assert(isOwningHandle()); + return createIfMissing(nativeHandle_); + } + + @Override + public DBOptions setCreateMissingColumnFamilies( + final boolean flag) { + assert(isOwningHandle()); + setCreateMissingColumnFamilies(nativeHandle_, flag); + return this; + } + + @Override + public boolean createMissingColumnFamilies() { + assert(isOwningHandle()); + return createMissingColumnFamilies(nativeHandle_); + } + + @Override + public DBOptions setErrorIfExists( + final boolean errorIfExists) { + assert(isOwningHandle()); + setErrorIfExists(nativeHandle_, errorIfExists); + return this; + } + + @Override + public boolean errorIfExists() { + assert(isOwningHandle()); + return errorIfExists(nativeHandle_); + } + + @Override + public DBOptions setParanoidChecks( + final boolean paranoidChecks) { + assert(isOwningHandle()); + setParanoidChecks(nativeHandle_, paranoidChecks); + return this; + } + + @Override + public boolean paranoidChecks() { + assert(isOwningHandle()); + return paranoidChecks(nativeHandle_); + } + + @Override + public DBOptions setRateLimiterConfig( + final RateLimiterConfig config) { + assert(isOwningHandle()); + rateLimiterConfig_ = config; + setRateLimiter(nativeHandle_, config.newRateLimiterHandle()); + return this; + } + + @Override + public DBOptions setLogger(final Logger logger) { + assert(isOwningHandle()); + setLogger(nativeHandle_, logger.nativeHandle_); + return this; + } + + @Override + public DBOptions setInfoLogLevel( + final InfoLogLevel infoLogLevel) { + assert(isOwningHandle()); + setInfoLogLevel(nativeHandle_, infoLogLevel.getValue()); + return this; + } + + @Override + public InfoLogLevel infoLogLevel() { + assert(isOwningHandle()); + return InfoLogLevel.getInfoLogLevel( + infoLogLevel(nativeHandle_)); + } + + @Override + public DBOptions setMaxOpenFiles( + final int maxOpenFiles) { + assert(isOwningHandle()); + setMaxOpenFiles(nativeHandle_, maxOpenFiles); + return this; + } + + @Override + public int maxOpenFiles() { + assert(isOwningHandle()); + return maxOpenFiles(nativeHandle_); + } + + @Override + public DBOptions setMaxTotalWalSize( + final long maxTotalWalSize) { + assert(isOwningHandle()); + setMaxTotalWalSize(nativeHandle_, maxTotalWalSize); + return this; + } + + @Override + public long maxTotalWalSize() { + assert(isOwningHandle()); + return maxTotalWalSize(nativeHandle_); + } + + @Override + public DBOptions createStatistics() { + assert(isOwningHandle()); + createStatistics(nativeHandle_); + return this; + } + + @Override + public Statistics statisticsPtr() { + assert(isOwningHandle()); + + long statsPtr = statisticsPtr(nativeHandle_); + if(statsPtr == 0) { + createStatistics(); + statsPtr = statisticsPtr(nativeHandle_); + } + + return new Statistics(statsPtr); + } + + @Override + public DBOptions setDisableDataSync( + final boolean disableDataSync) { + assert(isOwningHandle()); + setDisableDataSync(nativeHandle_, disableDataSync); + return this; + } + + @Override + public boolean disableDataSync() { + assert(isOwningHandle()); + return disableDataSync(nativeHandle_); + } + + @Override + public DBOptions setUseFsync( + final boolean useFsync) { + assert(isOwningHandle()); + setUseFsync(nativeHandle_, useFsync); + return this; + } + + @Override + public boolean useFsync() { + assert(isOwningHandle()); + return useFsync(nativeHandle_); + } + + @Override + public DBOptions setDbLogDir( + final String dbLogDir) { + assert(isOwningHandle()); + setDbLogDir(nativeHandle_, dbLogDir); + return this; + } + + @Override + public String dbLogDir() { + assert(isOwningHandle()); + return dbLogDir(nativeHandle_); + } + + @Override + public DBOptions setWalDir( + final String walDir) { + assert(isOwningHandle()); + setWalDir(nativeHandle_, walDir); + return this; + } + + @Override + public String walDir() { + assert(isOwningHandle()); + return walDir(nativeHandle_); + } + + @Override + public DBOptions setDeleteObsoleteFilesPeriodMicros( + final long micros) { + assert(isOwningHandle()); + setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros); + return this; + } + + @Override + public long deleteObsoleteFilesPeriodMicros() { + assert(isOwningHandle()); + return deleteObsoleteFilesPeriodMicros(nativeHandle_); + } + + @Override + public DBOptions setMaxBackgroundCompactions( + final int maxBackgroundCompactions) { + assert(isOwningHandle()); + setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions); + return this; + } + + @Override + public int maxBackgroundCompactions() { + assert(isOwningHandle()); + return maxBackgroundCompactions(nativeHandle_); + } + + @Override + public DBOptions setMaxBackgroundFlushes( + final int maxBackgroundFlushes) { + assert(isOwningHandle()); + setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes); + return this; + } + + @Override + public int maxBackgroundFlushes() { + assert(isOwningHandle()); + return maxBackgroundFlushes(nativeHandle_); + } + + @Override + public DBOptions setMaxLogFileSize( + final long maxLogFileSize) { + assert(isOwningHandle()); + setMaxLogFileSize(nativeHandle_, maxLogFileSize); + return this; + } + + @Override + public long maxLogFileSize() { + assert(isOwningHandle()); + return maxLogFileSize(nativeHandle_); + } + + @Override + public DBOptions setLogFileTimeToRoll( + final long logFileTimeToRoll) { + assert(isOwningHandle()); + setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll); + return this; + } + + @Override + public long logFileTimeToRoll() { + assert(isOwningHandle()); + return logFileTimeToRoll(nativeHandle_); + } + + @Override + public DBOptions setKeepLogFileNum( + final long keepLogFileNum) { + assert(isOwningHandle()); + setKeepLogFileNum(nativeHandle_, keepLogFileNum); + return this; + } + + @Override + public long keepLogFileNum() { + assert(isOwningHandle()); + return keepLogFileNum(nativeHandle_); + } + + @Override + public DBOptions setMaxManifestFileSize( + final long maxManifestFileSize) { + assert(isOwningHandle()); + setMaxManifestFileSize(nativeHandle_, maxManifestFileSize); + return this; + } + + @Override + public long maxManifestFileSize() { + assert(isOwningHandle()); + return maxManifestFileSize(nativeHandle_); + } + + @Override + public DBOptions setTableCacheNumshardbits( + final int tableCacheNumshardbits) { + assert(isOwningHandle()); + setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits); + return this; + } + + @Override + public int tableCacheNumshardbits() { + assert(isOwningHandle()); + return tableCacheNumshardbits(nativeHandle_); + } + + @Override + public DBOptions setWalTtlSeconds( + final long walTtlSeconds) { + assert(isOwningHandle()); + setWalTtlSeconds(nativeHandle_, walTtlSeconds); + return this; + } + + @Override + public long walTtlSeconds() { + assert(isOwningHandle()); + return walTtlSeconds(nativeHandle_); + } + + @Override + public DBOptions setWalSizeLimitMB( + final long sizeLimitMB) { + assert(isOwningHandle()); + setWalSizeLimitMB(nativeHandle_, sizeLimitMB); + return this; + } + + @Override + public long walSizeLimitMB() { + assert(isOwningHandle()); + return walSizeLimitMB(nativeHandle_); + } + + @Override + public DBOptions setManifestPreallocationSize( + final long size) { + assert(isOwningHandle()); + setManifestPreallocationSize(nativeHandle_, size); + return this; + } + + @Override + public long manifestPreallocationSize() { + assert(isOwningHandle()); + return manifestPreallocationSize(nativeHandle_); + } + + @Override + public DBOptions setAllowOsBuffer( + final boolean allowOsBuffer) { + assert(isOwningHandle()); + setAllowOsBuffer(nativeHandle_, allowOsBuffer); + return this; + } + + @Override + public boolean allowOsBuffer() { + assert(isOwningHandle()); + return allowOsBuffer(nativeHandle_); + } + + @Override + public DBOptions setAllowMmapReads( + final boolean allowMmapReads) { + assert(isOwningHandle()); + setAllowMmapReads(nativeHandle_, allowMmapReads); + return this; + } + + @Override + public boolean allowMmapReads() { + assert(isOwningHandle()); + return allowMmapReads(nativeHandle_); + } + + @Override + public DBOptions setAllowMmapWrites( + final boolean allowMmapWrites) { + assert(isOwningHandle()); + setAllowMmapWrites(nativeHandle_, allowMmapWrites); + return this; + } + + @Override + public boolean allowMmapWrites() { + assert(isOwningHandle()); + return allowMmapWrites(nativeHandle_); + } + + @Override + public DBOptions setIsFdCloseOnExec( + final boolean isFdCloseOnExec) { + assert(isOwningHandle()); + setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec); + return this; + } + + @Override + public boolean isFdCloseOnExec() { + assert(isOwningHandle()); + return isFdCloseOnExec(nativeHandle_); + } + + @Override + public DBOptions setStatsDumpPeriodSec( + final int statsDumpPeriodSec) { + assert(isOwningHandle()); + setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec); + return this; + } + + @Override + public int statsDumpPeriodSec() { + assert(isOwningHandle()); + return statsDumpPeriodSec(nativeHandle_); + } + + @Override + public DBOptions setAdviseRandomOnOpen( + final boolean adviseRandomOnOpen) { + assert(isOwningHandle()); + setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen); + return this; + } + + @Override + public boolean adviseRandomOnOpen() { + return adviseRandomOnOpen(nativeHandle_); + } + + @Override + public DBOptions setUseAdaptiveMutex( + final boolean useAdaptiveMutex) { + assert(isOwningHandle()); + setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex); + return this; + } + + @Override + public boolean useAdaptiveMutex() { + assert(isOwningHandle()); + return useAdaptiveMutex(nativeHandle_); + } + + @Override + public DBOptions setBytesPerSync( + final long bytesPerSync) { + assert(isOwningHandle()); + setBytesPerSync(nativeHandle_, bytesPerSync); + return this; + } + + @Override + public long bytesPerSync() { + return bytesPerSync(nativeHandle_); + } + + static final int DEFAULT_NUM_SHARD_BITS = -1; + + /** + *

Private constructor to be used by + * {@link #getDBOptionsFromProps(java.util.Properties)}

+ * + * @param nativeHandle native handle to DBOptions instance. + */ + private DBOptions(final long nativeHandle) { + super(nativeHandle); + } + + private static native long getDBOptionsFromProps( + String optString); + + private native static long newDBOptions(); + @Override protected final native void disposeInternal(final long handle); + + private native void setIncreaseParallelism(long handle, int totalThreads); + private native void setCreateIfMissing(long handle, boolean flag); + private native boolean createIfMissing(long handle); + private native void setCreateMissingColumnFamilies( + long handle, boolean flag); + private native boolean createMissingColumnFamilies(long handle); + private native void setErrorIfExists(long handle, boolean errorIfExists); + private native boolean errorIfExists(long handle); + private native void setParanoidChecks( + long handle, boolean paranoidChecks); + private native boolean paranoidChecks(long handle); + private native void setRateLimiter(long handle, + long rateLimiterHandle); + private native void setLogger(long handle, + long loggerHandle); + private native void setInfoLogLevel(long handle, byte logLevel); + private native byte infoLogLevel(long handle); + private native void setMaxOpenFiles(long handle, int maxOpenFiles); + private native int maxOpenFiles(long handle); + private native void setMaxTotalWalSize(long handle, + long maxTotalWalSize); + private native long maxTotalWalSize(long handle); + private native void createStatistics(long optHandle); + private native long statisticsPtr(long optHandle); + private native void setDisableDataSync(long handle, boolean disableDataSync); + private native boolean disableDataSync(long handle); + private native boolean useFsync(long handle); + private native void setUseFsync(long handle, boolean useFsync); + private native void setDbLogDir(long handle, String dbLogDir); + private native String dbLogDir(long handle); + private native void setWalDir(long handle, String walDir); + private native String walDir(long handle); + private native void setDeleteObsoleteFilesPeriodMicros( + long handle, long micros); + private native long deleteObsoleteFilesPeriodMicros(long handle); + private native void setMaxBackgroundCompactions( + long handle, int maxBackgroundCompactions); + private native int maxBackgroundCompactions(long handle); + private native void setMaxBackgroundFlushes( + long handle, int maxBackgroundFlushes); + private native int maxBackgroundFlushes(long handle); + private native void setMaxLogFileSize(long handle, long maxLogFileSize) + throws IllegalArgumentException; + private native long maxLogFileSize(long handle); + private native void setLogFileTimeToRoll( + long handle, long logFileTimeToRoll) throws IllegalArgumentException; + private native long logFileTimeToRoll(long handle); + private native void setKeepLogFileNum(long handle, long keepLogFileNum) + throws IllegalArgumentException; + private native long keepLogFileNum(long handle); + private native void setMaxManifestFileSize( + long handle, long maxManifestFileSize); + private native long maxManifestFileSize(long handle); + private native void setTableCacheNumshardbits( + long handle, int tableCacheNumshardbits); + private native int tableCacheNumshardbits(long handle); + private native void setWalTtlSeconds(long handle, long walTtlSeconds); + private native long walTtlSeconds(long handle); + private native void setWalSizeLimitMB(long handle, long sizeLimitMB); + private native long walSizeLimitMB(long handle); + private native void setManifestPreallocationSize( + long handle, long size) throws IllegalArgumentException; + private native long manifestPreallocationSize(long handle); + private native void setAllowOsBuffer( + long handle, boolean allowOsBuffer); + private native boolean allowOsBuffer(long handle); + private native void setAllowMmapReads( + long handle, boolean allowMmapReads); + private native boolean allowMmapReads(long handle); + private native void setAllowMmapWrites( + long handle, boolean allowMmapWrites); + private native boolean allowMmapWrites(long handle); + private native void setIsFdCloseOnExec( + long handle, boolean isFdCloseOnExec); + private native boolean isFdCloseOnExec(long handle); + private native void setStatsDumpPeriodSec( + long handle, int statsDumpPeriodSec); + private native int statsDumpPeriodSec(long handle); + private native void setAdviseRandomOnOpen( + long handle, boolean adviseRandomOnOpen); + private native boolean adviseRandomOnOpen(long handle); + private native void setUseAdaptiveMutex( + long handle, boolean useAdaptiveMutex); + private native boolean useAdaptiveMutex(long handle); + private native void setBytesPerSync( + long handle, long bytesPerSync); + private native long bytesPerSync(long handle); + + int numShardBits_; + RateLimiterConfig rateLimiterConfig_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java b/external/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java new file mode 100755 index 0000000000..917e26ab08 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/DBOptionsInterface.java @@ -0,0 +1,764 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +public interface DBOptionsInterface { + + /** + *

By default, RocksDB uses only one background thread for flush and + * compaction. Calling this function will set it up such that total of + * `total_threads` is used.

+ * + *

You almost definitely want to call this function if your system is + * bottlenecked by RocksDB.

+ * + * @param totalThreads The total number of threads to be used by RocksDB. + * A good value is the number of cores. + * + * @return the instance of the current Options + */ + Object setIncreaseParallelism(int totalThreads); + + /** + * If this value is set to true, then the database will be created + * if it is missing during {@code RocksDB.open()}. + * Default: false + * + * @param flag a flag indicating whether to create a database the + * specified database in {@link RocksDB#open(org.rocksdb.Options, String)} operation + * is missing. + * @return the instance of the current Options + * @see RocksDB#open(org.rocksdb.Options, String) + */ + Object setCreateIfMissing(boolean flag); + + /** + * Return true if the create_if_missing flag is set to true. + * If true, the database will be created if it is missing. + * + * @return true if the createIfMissing option is set to true. + * @see #setCreateIfMissing(boolean) + */ + boolean createIfMissing(); + + /** + *

If true, missing column families will be automatically created

+ * + *

Default: false

+ * + * @param flag a flag indicating if missing column families shall be + * created automatically. + * @return true if missing column families shall be created automatically + * on open. + */ + Object setCreateMissingColumnFamilies(boolean flag); + + /** + * Return true if the create_missing_column_families flag is set + * to true. If true column families be created if missing. + * + * @return true if the createMissingColumnFamilies is set to + * true. + * @see #setCreateMissingColumnFamilies(boolean) + */ + boolean createMissingColumnFamilies(); + + /** + * If true, an error will be thrown during RocksDB.open() if the + * database already exists. + * Default: false + * + * @param errorIfExists if true, an exception will be thrown + * during {@code RocksDB.open()} if the database already exists. + * @return the reference to the current option. + * @see RocksDB#open(org.rocksdb.Options, String) + */ + Object setErrorIfExists(boolean errorIfExists); + + /** + * If true, an error will be thrown during RocksDB.open() if the + * database already exists. + * + * @return if true, an error is raised when the specified database + * already exists before open. + */ + boolean errorIfExists(); + + /** + * If true, the implementation will do aggressive checking of the + * data it is processing and will stop early if it detects any + * errors. This may have unforeseen ramifications: for example, a + * corruption of one DB entry may cause a large number of entries to + * become unreadable or for the entire DB to become unopenable. + * If any of the writes to the database fails (Put, Delete, Merge, Write), + * the database will switch to read-only mode and fail all other + * Write operations. + * Default: true + * + * @param paranoidChecks a flag to indicate whether paranoid-check + * is on. + * @return the reference to the current option. + */ + Object setParanoidChecks(boolean paranoidChecks); + + /** + * If true, the implementation will do aggressive checking of the + * data it is processing and will stop early if it detects any + * errors. This may have unforeseen ramifications: for example, a + * corruption of one DB entry may cause a large number of entries to + * become unreadable or for the entire DB to become unopenable. + * If any of the writes to the database fails (Put, Delete, Merge, Write), + * the database will switch to read-only mode and fail all other + * Write operations. + * + * @return a boolean indicating whether paranoid-check is on. + */ + boolean paranoidChecks(); + + /** + * Use to control write rate of flush and compaction. Flush has higher + * priority than compaction. Rate limiting is disabled if nullptr. + * Default: nullptr + * + * @param config rate limiter config. + * @return the instance of the current Object. + */ + Object setRateLimiterConfig(RateLimiterConfig config); + + /** + *

Any internal progress/error information generated by + * the db will be written to the Logger if it is non-nullptr, + * or to a file stored in the same directory as the DB + * contents if info_log is nullptr.

+ * + *

Default: nullptr

+ * + * @param logger {@link Logger} instance. + * @return the instance of the current Object. + */ + Object setLogger(Logger logger); + + /** + *

Sets the RocksDB log level. Default level is INFO

+ * + * @param infoLogLevel log level to set. + * @return the instance of the current Object. + */ + Object setInfoLogLevel(InfoLogLevel infoLogLevel); + + /** + *

Returns currently set log level.

+ * @return {@link org.rocksdb.InfoLogLevel} instance. + */ + InfoLogLevel infoLogLevel(); + + /** + * Number of open files that can be used by the DB. You may need to + * increase this if your database has a large working set. Value -1 means + * files opened are always kept open. You can estimate number of files based + * on {@code target_file_size_base} and {@code target_file_size_multiplier} + * for level-based compaction. For universal-style compaction, you can usually + * set it to -1. + * Default: 5000 + * + * @param maxOpenFiles the maximum number of open files. + * @return the instance of the current Object. + */ + Object setMaxOpenFiles(int maxOpenFiles); + + /** + * Number of open files that can be used by the DB. You may need to + * increase this if your database has a large working set. Value -1 means + * files opened are always kept open. You can estimate number of files based + * on {@code target_file_size_base} and {@code target_file_size_multiplier} + * for level-based compaction. For universal-style compaction, you can usually + * set it to -1. + * + * @return the maximum number of open files. + */ + int maxOpenFiles(); + + /** + *

Once write-ahead logs exceed this size, we will start forcing the + * flush of column families whose memtables are backed by the oldest live + * WAL file (i.e. the ones that are causing all the space amplification). + *

+ *

If set to 0 (default), we will dynamically choose the WAL size limit to + * be [sum of all write_buffer_size * max_write_buffer_number] * 2

+ *

Default: 0

+ * + * @param maxTotalWalSize max total wal size. + * @return the instance of the current Object. + */ + Object setMaxTotalWalSize(long maxTotalWalSize); + + /** + *

Returns the max total wal size. Once write-ahead logs exceed this size, + * we will start forcing the flush of column families whose memtables are + * backed by the oldest live WAL file (i.e. the ones that are causing all + * the space amplification).

+ * + *

If set to 0 (default), we will dynamically choose the WAL size limit + * to be [sum of all write_buffer_size * max_write_buffer_number] * 2 + *

+ * + * @return max total wal size + */ + long maxTotalWalSize(); + + /** + *

Creates statistics object which collects metrics about database operations. + * Statistics objects should not be shared between DB instances as + * it does not use any locks to prevent concurrent updates.

+ * + * @return the instance of the current Object. + * @see RocksDB#open(org.rocksdb.Options, String) + */ + Object createStatistics(); + + /** + *

Returns statistics object. Calls {@link #createStatistics()} if + * C++ returns {@code nullptr} for statistics.

+ * + * @return the instance of the statistics object. + * @see #createStatistics() + */ + Statistics statisticsPtr(); + + /** + *

If true, then the contents of manifest and data files are + * not synced to stable storage. Their contents remain in the + * OS buffers till theOS decides to flush them.

+ * + *

This option is good for bulk-loading of data.

+ * + *

Once the bulk-loading is complete, please issue a sync to + * the OS to flush all dirty buffers to stable storage.

+ * + *

Default: false

+ * + * @param disableDataSync a boolean flag to specify whether to + * disable data sync. + * @return the instance of the current Object. + */ + Object setDisableDataSync(boolean disableDataSync); + + /** + * If true, then the contents of data files are not synced + * to stable storage. Their contents remain in the OS buffers till the + * OS decides to flush them. This option is good for bulk-loading + * of data. Once the bulk-loading is complete, please issue a + * sync to the OS to flush all dirty buffers to stable storage. + * + * @return if true, then data-sync is disabled. + */ + boolean disableDataSync(); + + /** + *

If true, then every store to stable storage will issue a fsync.

+ *

If false, then every store to stable storage will issue a fdatasync. + * This parameter should be set to true while storing data to + * filesystem like ext3 that can lose files after a reboot.

+ *

Default: false

+ * + * @param useFsync a boolean flag to specify whether to use fsync + * @return the instance of the current Object. + */ + Object setUseFsync(boolean useFsync); + + /** + *

If true, then every store to stable storage will issue a fsync.

+ *

If false, then every store to stable storage will issue a fdatasync. + * This parameter should be set to true while storing data to + * filesystem like ext3 that can lose files after a reboot.

+ * + * @return boolean value indicating if fsync is used. + */ + boolean useFsync(); + + /** + * This specifies the info LOG dir. + * If it is empty, the log files will be in the same dir as data. + * If it is non empty, the log files will be in the specified dir, + * and the db data dir's absolute path will be used as the log file + * name's prefix. + * + * @param dbLogDir the path to the info log directory + * @return the instance of the current Object. + */ + Object setDbLogDir(String dbLogDir); + + /** + * Returns the directory of info log. + * + * If it is empty, the log files will be in the same dir as data. + * If it is non empty, the log files will be in the specified dir, + * and the db data dir's absolute path will be used as the log file + * name's prefix. + * + * @return the path to the info log directory + */ + String dbLogDir(); + + /** + * This specifies the absolute dir path for write-ahead logs (WAL). + * If it is empty, the log files will be in the same dir as data, + * dbname is used as the data dir by default + * If it is non empty, the log files will be in kept the specified dir. + * When destroying the db, + * all log files in wal_dir and the dir itself is deleted + * + * @param walDir the path to the write-ahead-log directory. + * @return the instance of the current Object. + */ + Object setWalDir(String walDir); + + /** + * Returns the path to the write-ahead-logs (WAL) directory. + * + * If it is empty, the log files will be in the same dir as data, + * dbname is used as the data dir by default + * If it is non empty, the log files will be in kept the specified dir. + * When destroying the db, + * all log files in wal_dir and the dir itself is deleted + * + * @return the path to the write-ahead-logs (WAL) directory. + */ + String walDir(); + + /** + * The periodicity when obsolete files get deleted. The default + * value is 6 hours. The files that get out of scope by compaction + * process will still get automatically delete on every compaction, + * regardless of this setting + * + * @param micros the time interval in micros + * @return the instance of the current Object. + */ + Object setDeleteObsoleteFilesPeriodMicros(long micros); + + /** + * The periodicity when obsolete files get deleted. The default + * value is 6 hours. The files that get out of scope by compaction + * process will still get automatically delete on every compaction, + * regardless of this setting + * + * @return the time interval in micros when obsolete files will be deleted. + */ + long deleteObsoleteFilesPeriodMicros(); + + /** + * Specifies the maximum number of concurrent background compaction jobs, + * submitted to the default LOW priority thread pool. + * If you're increasing this, also consider increasing number of threads in + * LOW priority thread pool. For more information, see + * Default: 1 + * + * @param maxBackgroundCompactions the maximum number of background + * compaction jobs. + * @return the instance of the current Object. + * + * @see RocksEnv#setBackgroundThreads(int) + * @see RocksEnv#setBackgroundThreads(int, int) + * @see #maxBackgroundFlushes() + */ + Object setMaxBackgroundCompactions(int maxBackgroundCompactions); + + /** + * Returns the maximum number of concurrent background compaction jobs, + * submitted to the default LOW priority thread pool. + * When increasing this number, we may also want to consider increasing + * number of threads in LOW priority thread pool. + * Default: 1 + * + * @return the maximum number of concurrent background compaction jobs. + * @see RocksEnv#setBackgroundThreads(int) + * @see RocksEnv#setBackgroundThreads(int, int) + */ + int maxBackgroundCompactions(); + + /** + * Specifies the maximum number of concurrent background flush jobs. + * If you're increasing this, also consider increasing number of threads in + * HIGH priority thread pool. For more information, see + * Default: 1 + * + * @param maxBackgroundFlushes number of max concurrent flush jobs + * @return the instance of the current Object. + * + * @see RocksEnv#setBackgroundThreads(int) + * @see RocksEnv#setBackgroundThreads(int, int) + * @see #maxBackgroundCompactions() + */ + Object setMaxBackgroundFlushes(int maxBackgroundFlushes); + + /** + * Returns the maximum number of concurrent background flush jobs. + * If you're increasing this, also consider increasing number of threads in + * HIGH priority thread pool. For more information, see + * Default: 1 + * + * @return the maximum number of concurrent background flush jobs. + * @see RocksEnv#setBackgroundThreads(int) + * @see RocksEnv#setBackgroundThreads(int, int) + */ + int maxBackgroundFlushes(); + + /** + * Specifies the maximum size of a info log file. If the current log file + * is larger than `max_log_file_size`, a new info log file will + * be created. + * If 0, all logs will be written to one log file. + * + * @param maxLogFileSize the maximum size of a info log file. + * @return the instance of the current Object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + Object setMaxLogFileSize(long maxLogFileSize); + + /** + * Returns the maximum size of a info log file. If the current log file + * is larger than this size, a new info log file will be created. + * If 0, all logs will be written to one log file. + * + * @return the maximum size of the info log file. + */ + long maxLogFileSize(); + + /** + * Specifies the time interval for the info log file to roll (in seconds). + * If specified with non-zero value, log file will be rolled + * if it has been active longer than `log_file_time_to_roll`. + * Default: 0 (disabled) + * + * @param logFileTimeToRoll the time interval in seconds. + * @return the instance of the current Object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + Object setLogFileTimeToRoll(long logFileTimeToRoll); + + /** + * Returns the time interval for the info log file to roll (in seconds). + * If specified with non-zero value, log file will be rolled + * if it has been active longer than `log_file_time_to_roll`. + * Default: 0 (disabled) + * + * @return the time interval in seconds. + */ + long logFileTimeToRoll(); + + /** + * Specifies the maximum number of info log files to be kept. + * Default: 1000 + * + * @param keepLogFileNum the maximum number of info log files to be kept. + * @return the instance of the current Object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + Object setKeepLogFileNum(long keepLogFileNum); + + /** + * Returns the maximum number of info log files to be kept. + * Default: 1000 + * + * @return the maximum number of info log files to be kept. + */ + long keepLogFileNum(); + + /** + * Manifest file is rolled over on reaching this limit. + * The older manifest file be deleted. + * The default value is MAX_INT so that roll-over does not take place. + * + * @param maxManifestFileSize the size limit of a manifest file. + * @return the instance of the current Object. + */ + Object setMaxManifestFileSize(long maxManifestFileSize); + + /** + * Manifest file is rolled over on reaching this limit. + * The older manifest file be deleted. + * The default value is MAX_INT so that roll-over does not take place. + * + * @return the size limit of a manifest file. + */ + long maxManifestFileSize(); + + /** + * Number of shards used for table cache. + * + * @param tableCacheNumshardbits the number of chards + * @return the instance of the current Object. + */ + Object setTableCacheNumshardbits(int tableCacheNumshardbits); + + /** + * Number of shards used for table cache. + * + * @return the number of shards used for table cache. + */ + int tableCacheNumshardbits(); + + /** + * {@link #walTtlSeconds()} and {@link #walSizeLimitMB()} affect how archived logs + * will be deleted. + *
    + *
  1. If both set to 0, logs will be deleted asap and will not get into + * the archive.
  2. + *
  3. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + * WAL files will be checked every 10 min and if total size is greater + * then WAL_size_limit_MB, they will be deleted starting with the + * earliest until size_limit is met. All empty files will be deleted.
  4. + *
  5. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + * WAL files will be checked every WAL_ttl_secondsi / 2 and those that + * are older than WAL_ttl_seconds will be deleted.
  6. + *
  7. If both are not 0, WAL files will be checked every 10 min and both + * checks will be performed with ttl being first.
  8. + *
+ * + * @param walTtlSeconds the ttl seconds + * @return the instance of the current Object. + * @see #setWalSizeLimitMB(long) + */ + Object setWalTtlSeconds(long walTtlSeconds); + + /** + * WalTtlSeconds() and walSizeLimitMB() affect how archived logs + * will be deleted. + *
    + *
  1. If both set to 0, logs will be deleted asap and will not get into + * the archive.
  2. + *
  3. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + * WAL files will be checked every 10 min and if total size is greater + * then WAL_size_limit_MB, they will be deleted starting with the + * earliest until size_limit is met. All empty files will be deleted.
  4. + *
  5. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + * WAL files will be checked every WAL_ttl_secondsi / 2 and those that + * are older than WAL_ttl_seconds will be deleted.
  6. + *
  7. If both are not 0, WAL files will be checked every 10 min and both + * checks will be performed with ttl being first.
  8. + *
+ * + * @return the wal-ttl seconds + * @see #walSizeLimitMB() + */ + long walTtlSeconds(); + + /** + * WalTtlSeconds() and walSizeLimitMB() affect how archived logs + * will be deleted. + *
    + *
  1. If both set to 0, logs will be deleted asap and will not get into + * the archive.
  2. + *
  3. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + * WAL files will be checked every 10 min and if total size is greater + * then WAL_size_limit_MB, they will be deleted starting with the + * earliest until size_limit is met. All empty files will be deleted.
  4. + *
  5. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + * WAL files will be checked every WAL_ttl_secondsi / 2 and those that + * are older than WAL_ttl_seconds will be deleted.
  6. + *
  7. If both are not 0, WAL files will be checked every 10 min and both + * checks will be performed with ttl being first.
  8. + *
+ * + * @param sizeLimitMB size limit in mega-bytes. + * @return the instance of the current Object. + * @see #setWalSizeLimitMB(long) + */ + Object setWalSizeLimitMB(long sizeLimitMB); + + /** + * {@link #walTtlSeconds()} and {@code #walSizeLimitMB()} affect how archived logs + * will be deleted. + *
    + *
  1. If both set to 0, logs will be deleted asap and will not get into + * the archive.
  2. + *
  3. If WAL_ttl_seconds is 0 and WAL_size_limit_MB is not 0, + * WAL files will be checked every 10 min and if total size is greater + * then WAL_size_limit_MB, they will be deleted starting with the + * earliest until size_limit is met. All empty files will be deleted.
  4. + *
  5. If WAL_ttl_seconds is not 0 and WAL_size_limit_MB is 0, then + * WAL files will be checked every WAL_ttl_seconds i / 2 and those that + * are older than WAL_ttl_seconds will be deleted.
  6. + *
  7. If both are not 0, WAL files will be checked every 10 min and both + * checks will be performed with ttl being first.
  8. + *
+ * @return size limit in mega-bytes. + * @see #walSizeLimitMB() + */ + long walSizeLimitMB(); + + /** + * Number of bytes to preallocate (via fallocate) the manifest + * files. Default is 4mb, which is reasonable to reduce random IO + * as well as prevent overallocation for mounts that preallocate + * large amounts of data (such as xfs's allocsize option). + * + * @param size the size in byte + * @return the instance of the current Object. + * @throws java.lang.IllegalArgumentException thrown on 32-Bit platforms + * while overflowing the underlying platform specific value. + */ + Object setManifestPreallocationSize(long size); + + /** + * Number of bytes to preallocate (via fallocate) the manifest + * files. Default is 4mb, which is reasonable to reduce random IO + * as well as prevent overallocation for mounts that preallocate + * large amounts of data (such as xfs's allocsize option). + * + * @return size in bytes. + */ + long manifestPreallocationSize(); + + /** + * Data being read from file storage may be buffered in the OS + * Default: true + * + * @param allowOsBuffer if true, then OS buffering is allowed. + * @return the instance of the current Object. + */ + Object setAllowOsBuffer(boolean allowOsBuffer); + + /** + * Data being read from file storage may be buffered in the OS + * Default: true + * + * @return if true, then OS buffering is allowed. + */ + boolean allowOsBuffer(); + + /** + * Allow the OS to mmap file for reading sst tables. + * Default: false + * + * @param allowMmapReads true if mmap reads are allowed. + * @return the instance of the current Object. + */ + Object setAllowMmapReads(boolean allowMmapReads); + + /** + * Allow the OS to mmap file for reading sst tables. + * Default: false + * + * @return true if mmap reads are allowed. + */ + boolean allowMmapReads(); + + /** + * Allow the OS to mmap file for writing. Default: false + * + * @param allowMmapWrites true if mmap writes are allowd. + * @return the instance of the current Object. + */ + Object setAllowMmapWrites(boolean allowMmapWrites); + + /** + * Allow the OS to mmap file for writing. Default: false + * + * @return true if mmap writes are allowed. + */ + boolean allowMmapWrites(); + + /** + * Disable child process inherit open files. Default: true + * + * @param isFdCloseOnExec true if child process inheriting open + * files is disabled. + * @return the instance of the current Object. + */ + Object setIsFdCloseOnExec(boolean isFdCloseOnExec); + + /** + * Disable child process inherit open files. Default: true + * + * @return true if child process inheriting open files is disabled. + */ + boolean isFdCloseOnExec(); + + /** + * if not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + * Default: 3600 (1 hour) + * + * @param statsDumpPeriodSec time interval in seconds. + * @return the instance of the current Object. + */ + Object setStatsDumpPeriodSec(int statsDumpPeriodSec); + + /** + * If not zero, dump rocksdb.stats to LOG every stats_dump_period_sec + * Default: 3600 (1 hour) + * + * @return time interval in seconds. + */ + int statsDumpPeriodSec(); + + /** + * If set true, will hint the underlying file system that the file + * access pattern is random, when a sst file is opened. + * Default: true + * + * @param adviseRandomOnOpen true if hinting random access is on. + * @return the instance of the current Object. + */ + Object setAdviseRandomOnOpen(boolean adviseRandomOnOpen); + + /** + * If set true, will hint the underlying file system that the file + * access pattern is random, when a sst file is opened. + * Default: true + * + * @return true if hinting random access is on. + */ + boolean adviseRandomOnOpen(); + + /** + * Use adaptive mutex, which spins in the user space before resorting + * to kernel. This could reduce context switch when the mutex is not + * heavily contended. However, if the mutex is hot, we could end up + * wasting spin time. + * Default: false + * + * @param useAdaptiveMutex true if adaptive mutex is used. + * @return the instance of the current Object. + */ + Object setUseAdaptiveMutex(boolean useAdaptiveMutex); + + /** + * Use adaptive mutex, which spins in the user space before resorting + * to kernel. This could reduce context switch when the mutex is not + * heavily contended. However, if the mutex is hot, we could end up + * wasting spin time. + * Default: false + * + * @return true if adaptive mutex is used. + */ + boolean useAdaptiveMutex(); + + /** + * Allows OS to incrementally sync files to disk while they are being + * written, asynchronously, in the background. + * Issue one request for every bytes_per_sync written. 0 turns it off. + * Default: 0 + * + * @param bytesPerSync size in bytes + * @return the instance of the current Object. + */ + Object setBytesPerSync(long bytesPerSync); + + /** + * Allows OS to incrementally sync files to disk while they are being + * written, asynchronously, in the background. + * Issue one request for every bytes_per_sync written. 0 turns it off. + * Default: 0 + * + * @return size in bytes + */ + long bytesPerSync(); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/DirectComparator.java b/external/rocksdb/java/src/main/java/org/rocksdb/DirectComparator.java new file mode 100755 index 0000000000..d288047569 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/DirectComparator.java @@ -0,0 +1,33 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Base class for comparators which will receive + * ByteBuffer based access via org.rocksdb.DirectSlice + * in their compare method implementation. + * + * ByteBuffer based slices perform better when large keys + * are involved. When using smaller keys consider + * using @see org.rocksdb.Comparator + */ +public abstract class DirectComparator extends AbstractComparator { + + private final long nativeHandle_; + + public DirectComparator(final ComparatorOptions copt) { + super(); + this.nativeHandle_ = createNewDirectComparator0(copt.nativeHandle_); + } + + @Override + protected final long getNativeHandle() { + return nativeHandle_; + } + + private native long createNewDirectComparator0( + final long comparatorOptionsHandle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java b/external/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java new file mode 100755 index 0000000000..8f96eb49f6 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/DirectSlice.java @@ -0,0 +1,115 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.nio.ByteBuffer; + +/** + * Base class for slices which will receive direct + * ByteBuffer based access to the underlying data. + * + * ByteBuffer backed slices typically perform better with + * larger keys and values. When using smaller keys and + * values consider using @see org.rocksdb.Slice + */ +public class DirectSlice extends AbstractSlice { + public final static DirectSlice NONE = new DirectSlice(); + + /** + * Called from JNI to construct a new Java DirectSlice + * without an underlying C++ object set + * at creation time. + * + * Note: You should be aware that it is intentionally marked as + * package-private. This is so that developers cannot construct their own + * default DirectSlice objects (at present). As developers cannot construct + * their own DirectSlice objects through this, they are not creating + * underlying C++ DirectSlice objects, and so there is nothing to free + * (dispose) from Java. + */ + DirectSlice() { + super(); + } + + /** + * Constructs a slice + * where the data is taken from + * a String. + * + * @param str The string + */ + public DirectSlice(final String str) { + super(createNewSliceFromString(str)); + } + + /** + * Constructs a slice where the data is + * read from the provided + * ByteBuffer up to a certain length + * + * @param data The buffer containing the data + * @param length The length of the data to use for the slice + */ + public DirectSlice(final ByteBuffer data, final int length) { + super(createNewDirectSlice0(ensureDirect(data), length)); + } + + /** + * Constructs a slice where the data is + * read from the provided + * ByteBuffer + * + * @param data The bugger containing the data + */ + public DirectSlice(final ByteBuffer data) { + super(createNewDirectSlice1(ensureDirect(data))); + } + + private static ByteBuffer ensureDirect(final ByteBuffer data) { + // TODO(AR) consider throwing a checked exception, as if it's not direct + // this can SIGSEGV + assert(data.isDirect()); + return data; + } + + /** + * Retrieves the byte at a specific offset + * from the underlying data + * + * @param offset The (zero-based) offset of the byte to retrieve + * + * @return the requested byte + */ + public byte get(int offset) { + return get0(getNativeHandle(), offset); + } + + /** + * Clears the backing slice + */ + public void clear() { + clear0(getNativeHandle()); + } + + /** + * Drops the specified {@code n} + * number of bytes from the start + * of the backing slice + * + * @param n The number of bytes to drop + */ + public void removePrefix(final int n) { + removePrefix0(getNativeHandle(), n); + } + + private native static long createNewDirectSlice0(final ByteBuffer data, + final int length); + private native static long createNewDirectSlice1(final ByteBuffer data); + @Override protected final native ByteBuffer data0(long handle); + private native byte get0(long handle, int offset); + private native void clear0(long handle); + private native void removePrefix0(long handle, int length); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java b/external/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java new file mode 100755 index 0000000000..e27a9853ff --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/EncodingType.java @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * EncodingType + * + *

The value will determine how to encode keys + * when writing to a new SST file.

+ * + *

This value will be stored + * inside the SST file which will be used when reading from + * the file, which makes it possible for users to choose + * different encoding type when reopening a DB. Files with + * different encoding types can co-exist in the same DB and + * can be read.

+ */ +public enum EncodingType { + /** + * Always write full keys without any special encoding. + */ + kPlain((byte) 0), + /** + *

Find opportunity to write the same prefix once for multiple rows. + * In some cases, when a key follows a previous key with the same prefix, + * instead of writing out the full key, it just writes out the size of the + * shared prefix, as well as other bytes, to save some bytes.

+ * + *

When using this option, the user is required to use the same prefix + * extractor to make sure the same prefix will be extracted from the same key. + * The Name() value of the prefix extractor will be stored in the file. When + * reopening the file, the name of the options.prefix_extractor given will be + * bitwise compared to the prefix extractors stored in the file. An error + * will be returned if the two don't match.

+ */ + kPrefix((byte) 1); + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + private EncodingType(byte value) { + value_ = value; + } + + private final byte value_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/Env.java b/external/rocksdb/java/src/main/java/org/rocksdb/Env.java new file mode 100755 index 0000000000..7d30ea5df1 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/Env.java @@ -0,0 +1,92 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Base class for all Env implementations in RocksDB. + */ +public abstract class Env extends RocksObject { + public static final int FLUSH_POOL = 0; + public static final int COMPACTION_POOL = 1; + + /** + *

Returns the default environment suitable for the current operating + * system.

+ * + *

The result of {@code getDefault()} is a singleton whose ownership + * belongs to rocksdb c++. As a result, the returned RocksEnv will not + * have the ownership of its c++ resource, and calling its dispose() + * will be no-op.

+ * + * @return the default {@link org.rocksdb.RocksEnv} instance. + */ + public static Env getDefault() { + return default_env_; + } + + /** + *

Sets the number of background worker threads of the flush pool + * for this environment.

+ *

Default number: 1

+ * + * @param num the number of threads + * + * @return current {@link RocksEnv} instance. + */ + public Env setBackgroundThreads(final int num) { + return setBackgroundThreads(num, FLUSH_POOL); + } + + /** + *

Sets the number of background worker threads of the specified thread + * pool for this environment.

+ * + * @param num the number of threads + * @param poolID the id to specified a thread pool. Should be either + * FLUSH_POOL or COMPACTION_POOL. + * + *

Default number: 1

+ * @return current {@link RocksEnv} instance. + */ + public Env setBackgroundThreads(final int num, final int poolID) { + setBackgroundThreads(nativeHandle_, num, poolID); + return this; + } + + /** + *

Returns the length of the queue associated with the specified + * thread pool.

+ * + * @param poolID the id to specified a thread pool. Should be either + * FLUSH_POOL or COMPACTION_POOL. + * + * @return the thread pool queue length. + */ + public int getThreadPoolQueueLen(final int poolID) { + return getThreadPoolQueueLen(nativeHandle_, poolID); + } + + + protected Env(final long nativeHandle) { + super(nativeHandle); + } + + static { + default_env_ = new RocksEnv(getDefaultEnvInternal()); + } + + /** + *

The static default Env. The ownership of its native handle + * belongs to rocksdb c++ and is not able to be released on the Java + * side.

+ */ + static Env default_env_; + + private static native long getDefaultEnvInternal(); + private native void setBackgroundThreads( + long handle, int num, int priority); + private native int getThreadPoolQueueLen(long handle, int poolID); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/Filter.java b/external/rocksdb/java/src/main/java/org/rocksdb/Filter.java new file mode 100755 index 0000000000..01853d9694 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/Filter.java @@ -0,0 +1,35 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Filters are stored in rocksdb and are consulted automatically + * by rocksdb to decide whether or not to read some + * information from disk. In many cases, a filter can cut down the + * number of disk seeks form a handful to a single disk seek per + * DB::Get() call. + */ +public abstract class Filter extends RocksObject { + + protected Filter(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Deletes underlying C++ filter pointer. + * + * Note that this function should be called only after all + * RocksDB instances referencing the filter are closed. + * Otherwise an undefined behavior will occur. + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + @Override + protected final native void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java b/external/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java new file mode 100755 index 0000000000..4931b5d85b --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/FlushOptions.java @@ -0,0 +1,46 @@ +package org.rocksdb; + +/** + * FlushOptions to be passed to flush operations of + * {@link org.rocksdb.RocksDB}. + */ +public class FlushOptions extends RocksObject { + + /** + * Construct a new instance of FlushOptions. + */ + public FlushOptions(){ + super(newFlushOptions()); + } + + /** + * Set if the flush operation shall block until it terminates. + * + * @param waitForFlush boolean value indicating if the flush + * operations waits for termination of the flush process. + * + * @return instance of current FlushOptions. + */ + public FlushOptions setWaitForFlush(final boolean waitForFlush) { + assert(isOwningHandle()); + setWaitForFlush(nativeHandle_, waitForFlush); + return this; + } + + /** + * Wait for flush to finished. + * + * @return boolean value indicating if the flush operation + * waits for termination of the flush process. + */ + public boolean waitForFlush() { + assert(isOwningHandle()); + return waitForFlush(nativeHandle_); + } + + private native static long newFlushOptions(); + @Override protected final native void disposeInternal(final long handle); + private native void setWaitForFlush(long handle, + boolean wait); + private native boolean waitForFlush(long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/GenericRateLimiterConfig.java b/external/rocksdb/java/src/main/java/org/rocksdb/GenericRateLimiterConfig.java new file mode 100755 index 0000000000..cc00c6f0ae --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/GenericRateLimiterConfig.java @@ -0,0 +1,66 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +/** + * Config for rate limiter, which is used to control write rate of flush and + * compaction. + * + * @see RateLimiterConfig + */ +public class GenericRateLimiterConfig extends RateLimiterConfig { + private static final long DEFAULT_REFILL_PERIOD_MICROS = (100 * 1000); + private static final int DEFAULT_FAIRNESS = 10; + + /** + * GenericRateLimiterConfig constructor + * + * @param rateBytesPerSecond this is the only parameter you want to set + * most of the time. It controls the total write rate of compaction + * and flush in bytes per second. Currently, RocksDB does not enforce + * rate limit for anything other than flush and compaction, e.g. write to WAL. + * @param refillPeriodMicros this controls how often tokens are refilled. For example, + * when rate_bytes_per_sec is set to 10MB/s and refill_period_us is set to + * 100ms, then 1MB is refilled every 100ms internally. Larger value can lead to + * burstier writes while smaller value introduces more CPU overhead. + * The default should work for most cases. + * @param fairness RateLimiter accepts high-pri requests and low-pri requests. + * A low-pri request is usually blocked in favor of hi-pri request. Currently, + * RocksDB assigns low-pri to request from compaction and high-pri to request + * from flush. Low-pri requests can get blocked if flush requests come in + * continuously. This fairness parameter grants low-pri requests permission by + * fairness chance even though high-pri requests exist to avoid starvation. + * You should be good by leaving it at default 10. + */ + public GenericRateLimiterConfig(final long rateBytesPerSecond, + final long refillPeriodMicros, final int fairness) { + rateBytesPerSecond_ = rateBytesPerSecond; + refillPeriodMicros_ = refillPeriodMicros; + fairness_ = fairness; + } + + /** + * GenericRateLimiterConfig constructor + * + * @param rateBytesPerSecond this is the only parameter you want to set + * most of the time. It controls the total write rate of compaction + * and flush in bytes per second. Currently, RocksDB does not enforce + * rate limit for anything other than flush and compaction, e.g. write to WAL. + */ + public GenericRateLimiterConfig(final long rateBytesPerSecond) { + this(rateBytesPerSecond, DEFAULT_REFILL_PERIOD_MICROS, DEFAULT_FAIRNESS); + } + + @Override protected long newRateLimiterHandle() { + return newRateLimiterHandle(rateBytesPerSecond_, refillPeriodMicros_, + fairness_); + } + + private native long newRateLimiterHandle(long rateBytesPerSecond, + long refillPeriodMicros, int fairness); + private final long rateBytesPerSecond_; + private final long refillPeriodMicros_; + private final int fairness_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java b/external/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java new file mode 100755 index 0000000000..d56c46c290 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/HashLinkedListMemTableConfig.java @@ -0,0 +1,173 @@ +package org.rocksdb; + +/** + * The config for hash linked list memtable representation + * Such memtable contains a fix-sized array of buckets, where + * each bucket points to a sorted singly-linked + * list (or null if the bucket is empty). + * + * Note that since this mem-table representation relies on the + * key prefix, it is required to invoke one of the usePrefixExtractor + * functions to specify how to extract key prefix given a key. + * If proper prefix-extractor is not set, then RocksDB will + * use the default memtable representation (SkipList) instead + * and post a warning in the LOG. + */ +public class HashLinkedListMemTableConfig extends MemTableConfig { + public static final long DEFAULT_BUCKET_COUNT = 50000; + public static final long DEFAULT_HUGE_PAGE_TLB_SIZE = 0; + public static final int DEFAULT_BUCKET_ENTRIES_LOG_THRES = 4096; + public static final boolean + DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH = true; + public static final int DEFAUL_THRESHOLD_USE_SKIPLIST = 256; + + /** + * HashLinkedListMemTableConfig constructor + */ + public HashLinkedListMemTableConfig() { + bucketCount_ = DEFAULT_BUCKET_COUNT; + hugePageTlbSize_ = DEFAULT_HUGE_PAGE_TLB_SIZE; + bucketEntriesLoggingThreshold_ = DEFAULT_BUCKET_ENTRIES_LOG_THRES; + ifLogBucketDistWhenFlush_ = DEFAULT_IF_LOG_BUCKET_DIST_WHEN_FLUSH; + thresholdUseSkiplist_ = DEFAUL_THRESHOLD_USE_SKIPLIST; + } + + /** + * Set the number of buckets in the fixed-size array used + * in the hash linked-list mem-table. + * + * @param count the number of hash buckets. + * @return the reference to the current HashLinkedListMemTableConfig. + */ + public HashLinkedListMemTableConfig setBucketCount( + final long count) { + bucketCount_ = count; + return this; + } + + /** + * Returns the number of buckets that will be used in the memtable + * created based on this config. + * + * @return the number of buckets + */ + public long bucketCount() { + return bucketCount_; + } + + /** + *

Set the size of huge tlb or allocate the hashtable bytes from + * malloc if {@code size <= 0}.

+ * + *

The user needs to reserve huge pages for it to be allocated, + * like: {@code sysctl -w vm.nr_hugepages=20}

+ * + *

See linux documentation/vm/hugetlbpage.txt

+ * + * @param size if set to {@code <= 0} hashtable bytes from malloc + * @return the reference to the current HashLinkedListMemTableConfig. + */ + public HashLinkedListMemTableConfig setHugePageTlbSize( + final long size) { + hugePageTlbSize_ = size; + return this; + } + + /** + * Returns the size value of hugePageTlbSize. + * + * @return the hugePageTlbSize. + */ + public long hugePageTlbSize() { + return hugePageTlbSize_; + } + + /** + * If number of entries in one bucket exceeds that setting, log + * about it. + * + * @param threshold - number of entries in a single bucket before + * logging starts. + * @return the reference to the current HashLinkedListMemTableConfig. + */ + public HashLinkedListMemTableConfig + setBucketEntriesLoggingThreshold(final int threshold) { + bucketEntriesLoggingThreshold_ = threshold; + return this; + } + + /** + * Returns the maximum number of entries in one bucket before + * logging starts. + * + * @return maximum number of entries in one bucket before logging + * starts. + */ + public int bucketEntriesLoggingThreshold() { + return bucketEntriesLoggingThreshold_; + } + + /** + * If true the distrubition of number of entries will be logged. + * + * @param logDistribution - boolean parameter indicating if number + * of entry distribution shall be logged. + * @return the reference to the current HashLinkedListMemTableConfig. + */ + public HashLinkedListMemTableConfig + setIfLogBucketDistWhenFlush(final boolean logDistribution) { + ifLogBucketDistWhenFlush_ = logDistribution; + return this; + } + + /** + * Returns information about logging the distribution of + * number of entries on flush. + * + * @return if distrubtion of number of entries shall be logged. + */ + public boolean ifLogBucketDistWhenFlush() { + return ifLogBucketDistWhenFlush_; + } + + /** + * Set maximum number of entries in one bucket. Exceeding this val + * leads to a switch from LinkedList to SkipList. + * + * @param threshold maximum number of entries before SkipList is + * used. + * @return the reference to the current HashLinkedListMemTableConfig. + */ + public HashLinkedListMemTableConfig + setThresholdUseSkiplist(final int threshold) { + thresholdUseSkiplist_ = threshold; + return this; + } + + /** + * Returns entries per bucket threshold before LinkedList is + * replaced by SkipList usage for that bucket. + * + * @return entries per bucket threshold before SkipList is used. + */ + public int thresholdUseSkiplist() { + return thresholdUseSkiplist_; + } + + @Override protected long newMemTableFactoryHandle() { + return newMemTableFactoryHandle(bucketCount_, hugePageTlbSize_, + bucketEntriesLoggingThreshold_, ifLogBucketDistWhenFlush_, + thresholdUseSkiplist_); + } + + private native long newMemTableFactoryHandle(long bucketCount, + long hugePageTlbSize, int bucketEntriesLoggingThreshold, + boolean ifLogBucketDistWhenFlush, int thresholdUseSkiplist) + throws IllegalArgumentException; + + private long bucketCount_; + private long hugePageTlbSize_; + private int bucketEntriesLoggingThreshold_; + private boolean ifLogBucketDistWhenFlush_; + private int thresholdUseSkiplist_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java b/external/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java new file mode 100755 index 0000000000..fe1779b1cf --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/HashSkipListMemTableConfig.java @@ -0,0 +1,105 @@ +package org.rocksdb; + +/** + * The config for hash skip-list mem-table representation. + * Such mem-table representation contains a fix-sized array of + * buckets, where each bucket points to a skiplist (or null if the + * bucket is empty). + * + * Note that since this mem-table representation relies on the + * key prefix, it is required to invoke one of the usePrefixExtractor + * functions to specify how to extract key prefix given a key. + * If proper prefix-extractor is not set, then RocksDB will + * use the default memtable representation (SkipList) instead + * and post a warning in the LOG. + */ +public class HashSkipListMemTableConfig extends MemTableConfig { + public static final int DEFAULT_BUCKET_COUNT = 1000000; + public static final int DEFAULT_BRANCHING_FACTOR = 4; + public static final int DEFAULT_HEIGHT = 4; + + /** + * HashSkipListMemTableConfig constructor + */ + public HashSkipListMemTableConfig() { + bucketCount_ = DEFAULT_BUCKET_COUNT; + branchingFactor_ = DEFAULT_BRANCHING_FACTOR; + height_ = DEFAULT_HEIGHT; + } + + /** + * Set the number of hash buckets used in the hash skiplist memtable. + * Default = 1000000. + * + * @param count the number of hash buckets used in the hash + * skiplist memtable. + * @return the reference to the current HashSkipListMemTableConfig. + */ + public HashSkipListMemTableConfig setBucketCount( + final long count) { + bucketCount_ = count; + return this; + } + + /** + * @return the number of hash buckets + */ + public long bucketCount() { + return bucketCount_; + } + + /** + * Set the height of the skip list. Default = 4. + * + * @param height height to set. + * + * @return the reference to the current HashSkipListMemTableConfig. + */ + public HashSkipListMemTableConfig setHeight(final int height) { + height_ = height; + return this; + } + + /** + * @return the height of the skip list. + */ + public int height() { + return height_; + } + + /** + * Set the branching factor used in the hash skip-list memtable. + * This factor controls the probabilistic size ratio between adjacent + * links in the skip list. + * + * @param bf the probabilistic size ratio between adjacent link + * lists in the skip list. + * @return the reference to the current HashSkipListMemTableConfig. + */ + public HashSkipListMemTableConfig setBranchingFactor( + final int bf) { + branchingFactor_ = bf; + return this; + } + + /** + * @return branching factor, the probabilistic size ratio between + * adjacent links in the skip list. + */ + public int branchingFactor() { + return branchingFactor_; + } + + @Override protected long newMemTableFactoryHandle() { + return newMemTableFactoryHandle( + bucketCount_, height_, branchingFactor_); + } + + private native long newMemTableFactoryHandle( + long bucketCount, int height, int branchingFactor) + throws IllegalArgumentException; + + private long bucketCount_; + private int branchingFactor_; + private int height_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java b/external/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java new file mode 100755 index 0000000000..a920f4b4e2 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/HistogramData.java @@ -0,0 +1,44 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +public class HistogramData { + private final double median_; + private final double percentile95_; + private final double percentile99_; + private final double average_; + private final double standardDeviation_; + + public HistogramData(final double median, final double percentile95, + final double percentile99, final double average, + final double standardDeviation) { + median_ = median; + percentile95_ = percentile95; + percentile99_ = percentile99; + average_ = average; + standardDeviation_ = standardDeviation; + } + + public double getMedian() { + return median_; + } + + public double getPercentile95() { + return percentile95_; + } + + public double getPercentile99() { + return percentile99_; + } + + public double getAverage() { + return average_; + } + + public double getStandardDeviation() { + return standardDeviation_; + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java b/external/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java new file mode 100755 index 0000000000..a4459eecc8 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/HistogramType.java @@ -0,0 +1,40 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +public enum HistogramType { + DB_GET(0), + DB_WRITE(1), + COMPACTION_TIME(2), + TABLE_SYNC_MICROS(3), + COMPACTION_OUTFILE_SYNC_MICROS(4), + WAL_FILE_SYNC_MICROS(5), + MANIFEST_FILE_SYNC_MICROS(6), + // TIME SPENT IN IO DURING TABLE OPEN + TABLE_OPEN_IO_MICROS(7), + DB_MULTIGET(8), + READ_BLOCK_COMPACTION_MICROS(9), + READ_BLOCK_GET_MICROS(10), + WRITE_RAW_BLOCK_MICROS(11), + STALL_L0_SLOWDOWN_COUNT(12), + STALL_MEMTABLE_COMPACTION_COUNT(13), + STALL_L0_NUM_FILES_COUNT(14), + HARD_RATE_LIMIT_DELAY_COUNT(15), + SOFT_RATE_LIMIT_DELAY_COUNT(16), + NUM_FILES_IN_SINGLE_COMPACTION(17), + DB_SEEK(18), + WRITE_STALL(19); + + private final int value_; + + private HistogramType(int value) { + value_ = value; + } + + public int getValue() { + return value_; + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/IndexType.java b/external/rocksdb/java/src/main/java/org/rocksdb/IndexType.java new file mode 100755 index 0000000000..db24a6f681 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/IndexType.java @@ -0,0 +1,37 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * IndexType used in conjunction with BlockBasedTable. + */ +public enum IndexType { + /** + * A space efficient index block that is optimized for + * binary-search-based index. + */ + kBinarySearch((byte) 0), + /** + * The hash index, if enabled, will do the hash lookup when + * {@code Options.prefix_extractor} is provided. + */ + kHashSearch((byte) 1); + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + private IndexType(byte value) { + value_ = value; + } + + private final byte value_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java b/external/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java new file mode 100755 index 0000000000..971c0b2ec5 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/InfoLogLevel.java @@ -0,0 +1,48 @@ +package org.rocksdb; + +/** + * RocksDB log levels. + */ +public enum InfoLogLevel { + DEBUG_LEVEL((byte)0), + INFO_LEVEL((byte)1), + WARN_LEVEL((byte)2), + ERROR_LEVEL((byte)3), + FATAL_LEVEL((byte)4), + HEADER_LEVEL((byte)5), + NUM_INFO_LOG_LEVELS((byte)6); + + private final byte value_; + + private InfoLogLevel(byte value) { + value_ = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value_; + } + + /** + * Get InfoLogLevel by byte value. + * + * @param value byte representation of InfoLogLevel. + * + * @return {@link org.rocksdb.InfoLogLevel} instance or null. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static InfoLogLevel getInfoLogLevel(byte value) { + for (InfoLogLevel infoLogLevel : InfoLogLevel.values()) { + if (infoLogLevel.getValue() == value){ + return infoLogLevel; + } + } + throw new IllegalArgumentException( + "Illegal value provided for InfoLogLevel."); + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/Logger.java b/external/rocksdb/java/src/main/java/org/rocksdb/Logger.java new file mode 100755 index 0000000000..5db377dde1 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/Logger.java @@ -0,0 +1,111 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + *

This class provides a custom logger functionality + * in Java which wraps {@code RocksDB} logging facilities. + *

+ * + *

Using this class RocksDB can log with common + * Java logging APIs like Log4j or Slf4j without keeping + * database logs in the filesystem.

+ * + * Performance + *

There are certain performance penalties using a Java + * {@code Logger} implementation within production code. + *

+ * + *

+ * A log level can be set using {@link org.rocksdb.Options} or + * {@link Logger#setInfoLogLevel(InfoLogLevel)}. The set log level + * influences the underlying native code. Each log message is + * checked against the set log level and if the log level is more + * verbose as the set log level, native allocations will be made + * and data structures are allocated. + *

+ * + *

Every log message which will be emitted by native code will + * trigger expensive native to Java transitions. So the preferred + * setting for production use is either + * {@link org.rocksdb.InfoLogLevel#ERROR_LEVEL} or + * {@link org.rocksdb.InfoLogLevel#FATAL_LEVEL}. + *

+ */ +public abstract class Logger extends AbstractImmutableNativeReference { + + final long nativeHandle_; + + /** + *

AbstractLogger constructor.

+ * + *

Important: the log level set within + * the {@link org.rocksdb.Options} instance will be used as + * maximum log level of RocksDB.

+ * + * @param options {@link org.rocksdb.Options} instance. + */ + public Logger(final Options options) { + super(true); + this.nativeHandle_ = createNewLoggerOptions(options.nativeHandle_); + } + + /** + *

AbstractLogger constructor.

+ * + *

Important: the log level set within + * the {@link org.rocksdb.DBOptions} instance will be used + * as maximum log level of RocksDB.

+ * + * @param dboptions {@link org.rocksdb.DBOptions} instance. + */ + public Logger(final DBOptions dboptions) { + super(true); + this.nativeHandle_ = createNewLoggerDbOptions(dboptions.nativeHandle_); + } + + /** + * Set {@link org.rocksdb.InfoLogLevel} to AbstractLogger. + * + * @param infoLogLevel {@link org.rocksdb.InfoLogLevel} instance. + */ + public void setInfoLogLevel(final InfoLogLevel infoLogLevel) { + setInfoLogLevel(nativeHandle_, infoLogLevel.getValue()); + } + + /** + * Return the loggers log level. + * + * @return {@link org.rocksdb.InfoLogLevel} instance. + */ + public InfoLogLevel infoLogLevel() { + return InfoLogLevel.getInfoLogLevel( + infoLogLevel(nativeHandle_)); + } + + protected abstract void log(InfoLogLevel infoLogLevel, + String logMsg); + + /** + * Deletes underlying C++ slice pointer. + * Note that this function should be called only after all + * RocksDB instances referencing the slice are closed. + * Otherwise an undefined behavior will occur. + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + protected native long createNewLoggerOptions( + long options); + protected native long createNewLoggerDbOptions( + long dbOptions); + protected native void setInfoLogLevel(long handle, + byte infoLogLevel); + protected native byte infoLogLevel(long handle); + private native void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java b/external/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java new file mode 100755 index 0000000000..8b854917f9 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/MemTableConfig.java @@ -0,0 +1,29 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +/** + * MemTableConfig is used to config the internal mem-table of a RocksDB. + * It is required for each memtable to have one such sub-class to allow + * Java developers to use it. + * + * To make a RocksDB to use a specific MemTable format, its associated + * MemTableConfig should be properly set and passed into Options + * via Options.setMemTableFactory() and open the db using that Options. + * + * @see Options + */ +public abstract class MemTableConfig { + /** + * This function should only be called by Options.setMemTableConfig(), + * which will create a c++ shared-pointer to the c++ MemTableRepFactory + * that associated with the Java MemTableConfig. + * + * @see Options#setMemTableConfig(MemTableConfig) + * + * @return native handle address to native memory table instance. + */ + abstract protected long newMemTableFactoryHandle(); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java b/external/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java new file mode 100755 index 0000000000..3abea024d4 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/MergeOperator.java @@ -0,0 +1,15 @@ +// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * MergeOperator holds an operator to be applied when compacting + * two merge operands held under the same key in order to obtain a single + * value. + */ +public interface MergeOperator { + long newMergeOperatorHandle(); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java b/external/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java new file mode 100755 index 0000000000..96d364cd1c --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/NativeLibraryLoader.java @@ -0,0 +1,124 @@ +package org.rocksdb; + +import java.io.*; +import java.nio.file.Files; +import java.nio.file.StandardCopyOption; + +import org.rocksdb.util.Environment; + +/** + * This class is used to load the RocksDB shared library from within the jar. + * The shared library is extracted to a temp folder and loaded from there. + */ +public class NativeLibraryLoader { + //singleton + private static final NativeLibraryLoader instance = new NativeLibraryLoader(); + private static boolean initialized = false; + + private static final String sharedLibraryName = Environment.getSharedLibraryName("rocksdb"); + private static final String jniLibraryName = Environment.getJniLibraryName("rocksdb"); + private static final String jniLibraryFileName = Environment.getJniLibraryFileName("rocksdb"); + private static final String tempFilePrefix = "librocksdbjni"; + private static final String tempFileSuffix = Environment.getJniLibraryExtension(); + + /** + * Get a reference to the NativeLibraryLoader + * + * @return The NativeLibraryLoader + */ + public static NativeLibraryLoader getInstance() { + return instance; + } + + /** + * Firstly attempts to load the library from java.library.path, + * if that fails then it falls back to extracting + * the library from the classpath + * {@link org.rocksdb.NativeLibraryLoader#loadLibraryFromJar(java.lang.String)} + * + * @param tmpDir A temporary directory to use + * to copy the native library to when loading from the classpath. + * If null, or the empty string, we rely on Java's + * {@link java.io.File#createTempFile(String, String)} + * function to provide a temporary location. + * The temporary file will be registered for deletion + * on exit. + * + * @throws java.io.IOException if a filesystem operation fails. + */ + public synchronized void loadLibrary(final String tmpDir) throws IOException { + try { + System.loadLibrary(sharedLibraryName); + } catch(final UnsatisfiedLinkError ule1) { + try { + System.loadLibrary(jniLibraryName); + } catch(final UnsatisfiedLinkError ule2) { + loadLibraryFromJar(tmpDir); + } + } + } + + /** + * Attempts to extract the native RocksDB library + * from the classpath and load it + * + * @param tmpDir A temporary directory to use + * to copy the native library to. If null, + * or the empty string, we rely on Java's + * {@link java.io.File#createTempFile(String, String)} + * function to provide a temporary location. + * The temporary file will be registered for deletion + * on exit. + * + * @throws java.io.IOException if a filesystem operation fails. + */ + void loadLibraryFromJar(final String tmpDir) + throws IOException { + if (!initialized) { + System.load(loadLibraryFromJarToTemp(tmpDir).getAbsolutePath()); + initialized = true; + } + } + + File loadLibraryFromJarToTemp(final String tmpDir) + throws IOException { + final File temp; + if (tmpDir == null || tmpDir.isEmpty()) { + temp = File.createTempFile(tempFilePrefix, tempFileSuffix); + } else { + temp = new File(tmpDir, jniLibraryFileName); + if (temp.exists() && !temp.delete()) { + throw new RuntimeException("File: " + temp.getAbsolutePath() + + " already exists and cannot be removed."); + } + if (!temp.createNewFile()) { + throw new RuntimeException("File: " + temp.getAbsolutePath() + + " could not be created."); + } + } + + if (!temp.exists()) { + throw new RuntimeException("File " + temp.getAbsolutePath() + " does not exist."); + } else { + temp.deleteOnExit(); + } + + // attempt to copy the library from the Jar file to the temp destination + try (final InputStream is = getClass().getClassLoader(). + getResourceAsStream(jniLibraryFileName)) { + if (is == null) { + throw new RuntimeException(jniLibraryFileName + " was not found inside JAR."); + } else { + Files.copy(is, temp.toPath(), StandardCopyOption.REPLACE_EXISTING); + } + } + + return temp; + } + + /** + * Private constructor to disallow instantiation + */ + private NativeLibraryLoader() { + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/Options.java b/external/rocksdb/java/src/main/java/org/rocksdb/Options.java new file mode 100755 index 0000000000..dcc512f2db --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/Options.java @@ -0,0 +1,1290 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.ArrayList; +import java.util.List; + +/** + * Options to control the behavior of a database. It will be used + * during the creation of a {@link org.rocksdb.RocksDB} (i.e., RocksDB.open()). + * + * If {@link #dispose()} function is not called, then it will be GC'd + * automaticallyand native resources will be released as part of the process. + */ +public class Options extends RocksObject + implements DBOptionsInterface, ColumnFamilyOptionsInterface { + static { + RocksDB.loadLibrary(); + } + /** + * Construct options for opening a RocksDB. + * + * This constructor will create (by allocating a block of memory) + * an {@code rocksdb::Options} in the c++ side. + */ + public Options() { + super(newOptions()); + env_ = Env.getDefault(); + } + + /** + * Construct options for opening a RocksDB. Reusing database options + * and column family options. + * + * @param dbOptions {@link org.rocksdb.DBOptions} instance + * @param columnFamilyOptions {@link org.rocksdb.ColumnFamilyOptions} + * instance + */ + public Options(final DBOptions dbOptions, + final ColumnFamilyOptions columnFamilyOptions) { + super(newOptions(dbOptions.nativeHandle_, + columnFamilyOptions.nativeHandle_)); + env_ = Env.getDefault(); + } + + @Override + public Options setIncreaseParallelism(final int totalThreads) { + assert(isOwningHandle()); + setIncreaseParallelism(nativeHandle_, totalThreads); + return this; + } + + @Override + public Options setCreateIfMissing(final boolean flag) { + assert(isOwningHandle()); + setCreateIfMissing(nativeHandle_, flag); + return this; + } + + @Override + public Options setCreateMissingColumnFamilies(final boolean flag) { + assert(isOwningHandle()); + setCreateMissingColumnFamilies(nativeHandle_, flag); + return this; + } + + /** + * Use the specified object to interact with the environment, + * e.g. to read/write files, schedule background work, etc. + * Default: {@link Env#getDefault()} + * + * @param env {@link Env} instance. + * @return the instance of the current Options. + */ + public Options setEnv(final Env env) { + assert(isOwningHandle()); + setEnv(nativeHandle_, env.nativeHandle_); + env_ = env; + return this; + } + + /** + * Returns the set RocksEnv instance. + * + * @return {@link RocksEnv} instance set in the Options. + */ + public Env getEnv() { + return env_; + } + + /** + *

Set appropriate parameters for bulk loading. + * The reason that this is a function that returns "this" instead of a + * constructor is to enable chaining of multiple similar calls in the future. + *

+ * + *

All data will be in level 0 without any automatic compaction. + * It's recommended to manually call CompactRange(NULL, NULL) before reading + * from the database, because otherwise the read can be very slow.

+ * + * @return the instance of the current Options. + */ + public Options prepareForBulkLoad() { + prepareForBulkLoad(nativeHandle_); + return this; + } + + @Override + public boolean createIfMissing() { + assert(isOwningHandle()); + return createIfMissing(nativeHandle_); + } + + @Override + public boolean createMissingColumnFamilies() { + assert(isOwningHandle()); + return createMissingColumnFamilies(nativeHandle_); + } + + @Override + public Options optimizeForPointLookup( + long blockCacheSizeMb) { + optimizeForPointLookup(nativeHandle_, + blockCacheSizeMb); + return this; + } + + @Override + public Options optimizeLevelStyleCompaction() { + optimizeLevelStyleCompaction(nativeHandle_, + DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET); + return this; + } + + @Override + public Options optimizeLevelStyleCompaction( + long memtableMemoryBudget) { + optimizeLevelStyleCompaction(nativeHandle_, + memtableMemoryBudget); + return this; + } + + @Override + public Options optimizeUniversalStyleCompaction() { + optimizeUniversalStyleCompaction(nativeHandle_, + DEFAULT_COMPACTION_MEMTABLE_MEMORY_BUDGET); + return this; + } + + @Override + public Options optimizeUniversalStyleCompaction( + final long memtableMemoryBudget) { + optimizeUniversalStyleCompaction(nativeHandle_, + memtableMemoryBudget); + return this; + } + + @Override + public Options setComparator(final BuiltinComparator builtinComparator) { + assert(isOwningHandle()); + setComparatorHandle(nativeHandle_, builtinComparator.ordinal()); + return this; + } + + @Override + public Options setComparator( + final AbstractComparator> comparator) { + assert(isOwningHandle()); + setComparatorHandle(nativeHandle_, comparator.getNativeHandle()); + comparator_ = comparator; + return this; + } + + @Override + public Options setMergeOperatorName(final String name) { + assert(isOwningHandle()); + if (name == null) { + throw new IllegalArgumentException( + "Merge operator name must not be null."); + } + setMergeOperatorName(nativeHandle_, name); + return this; + } + + @Override + public Options setMergeOperator(final MergeOperator mergeOperator) { + setMergeOperator(nativeHandle_, mergeOperator.newMergeOperatorHandle()); + return this; + } + + @Override + public Options setWriteBufferSize(final long writeBufferSize) { + assert(isOwningHandle()); + setWriteBufferSize(nativeHandle_, writeBufferSize); + return this; + } + + @Override + public long writeBufferSize() { + assert(isOwningHandle()); + return writeBufferSize(nativeHandle_); + } + + @Override + public Options setMaxWriteBufferNumber(final int maxWriteBufferNumber) { + assert(isOwningHandle()); + setMaxWriteBufferNumber(nativeHandle_, maxWriteBufferNumber); + return this; + } + + @Override + public int maxWriteBufferNumber() { + assert(isOwningHandle()); + return maxWriteBufferNumber(nativeHandle_); + } + + @Override + public boolean errorIfExists() { + assert(isOwningHandle()); + return errorIfExists(nativeHandle_); + } + + @Override + public Options setErrorIfExists(final boolean errorIfExists) { + assert(isOwningHandle()); + setErrorIfExists(nativeHandle_, errorIfExists); + return this; + } + + @Override + public boolean paranoidChecks() { + assert(isOwningHandle()); + return paranoidChecks(nativeHandle_); + } + + @Override + public Options setParanoidChecks(final boolean paranoidChecks) { + assert(isOwningHandle()); + setParanoidChecks(nativeHandle_, paranoidChecks); + return this; + } + + @Override + public int maxOpenFiles() { + assert(isOwningHandle()); + return maxOpenFiles(nativeHandle_); + } + + @Override + public Options setMaxTotalWalSize(final long maxTotalWalSize) { + assert(isOwningHandle()); + setMaxTotalWalSize(nativeHandle_, maxTotalWalSize); + return this; + } + + @Override + public long maxTotalWalSize() { + assert(isOwningHandle()); + return maxTotalWalSize(nativeHandle_); + } + + @Override + public Options setMaxOpenFiles(final int maxOpenFiles) { + assert(isOwningHandle()); + setMaxOpenFiles(nativeHandle_, maxOpenFiles); + return this; + } + + @Override + public boolean disableDataSync() { + assert(isOwningHandle()); + return disableDataSync(nativeHandle_); + } + + @Override + public Options setDisableDataSync(final boolean disableDataSync) { + assert(isOwningHandle()); + setDisableDataSync(nativeHandle_, disableDataSync); + return this; + } + + @Override + public boolean useFsync() { + assert(isOwningHandle()); + return useFsync(nativeHandle_); + } + + @Override + public Options setUseFsync(final boolean useFsync) { + assert(isOwningHandle()); + setUseFsync(nativeHandle_, useFsync); + return this; + } + + @Override + public String dbLogDir() { + assert(isOwningHandle()); + return dbLogDir(nativeHandle_); + } + + @Override + public Options setDbLogDir(final String dbLogDir) { + assert(isOwningHandle()); + setDbLogDir(nativeHandle_, dbLogDir); + return this; + } + + @Override + public String walDir() { + assert(isOwningHandle()); + return walDir(nativeHandle_); + } + + @Override + public Options setWalDir(final String walDir) { + assert(isOwningHandle()); + setWalDir(nativeHandle_, walDir); + return this; + } + + @Override + public long deleteObsoleteFilesPeriodMicros() { + assert(isOwningHandle()); + return deleteObsoleteFilesPeriodMicros(nativeHandle_); + } + + @Override + public Options setDeleteObsoleteFilesPeriodMicros( + final long micros) { + assert(isOwningHandle()); + setDeleteObsoleteFilesPeriodMicros(nativeHandle_, micros); + return this; + } + + @Override + public int maxBackgroundCompactions() { + assert(isOwningHandle()); + return maxBackgroundCompactions(nativeHandle_); + } + + @Override + public Options createStatistics() { + assert(isOwningHandle()); + createStatistics(nativeHandle_); + return this; + } + + @Override + public Statistics statisticsPtr() { + assert(isOwningHandle()); + + long statsPtr = statisticsPtr(nativeHandle_); + if(statsPtr == 0) { + createStatistics(); + statsPtr = statisticsPtr(nativeHandle_); + } + + return new Statistics(statsPtr); + } + + @Override + public Options setMaxBackgroundCompactions( + final int maxBackgroundCompactions) { + assert(isOwningHandle()); + setMaxBackgroundCompactions(nativeHandle_, maxBackgroundCompactions); + return this; + } + + @Override + public int maxBackgroundFlushes() { + assert(isOwningHandle()); + return maxBackgroundFlushes(nativeHandle_); + } + + @Override + public Options setMaxBackgroundFlushes( + final int maxBackgroundFlushes) { + assert(isOwningHandle()); + setMaxBackgroundFlushes(nativeHandle_, maxBackgroundFlushes); + return this; + } + + @Override + public long maxLogFileSize() { + assert(isOwningHandle()); + return maxLogFileSize(nativeHandle_); + } + + @Override + public Options setMaxLogFileSize(final long maxLogFileSize) { + assert(isOwningHandle()); + setMaxLogFileSize(nativeHandle_, maxLogFileSize); + return this; + } + + @Override + public long logFileTimeToRoll() { + assert(isOwningHandle()); + return logFileTimeToRoll(nativeHandle_); + } + + @Override + public Options setLogFileTimeToRoll(final long logFileTimeToRoll) { + assert(isOwningHandle()); + setLogFileTimeToRoll(nativeHandle_, logFileTimeToRoll); + return this; + } + + @Override + public long keepLogFileNum() { + assert(isOwningHandle()); + return keepLogFileNum(nativeHandle_); + } + + @Override + public Options setKeepLogFileNum(final long keepLogFileNum) { + assert(isOwningHandle()); + setKeepLogFileNum(nativeHandle_, keepLogFileNum); + return this; + } + + @Override + public long maxManifestFileSize() { + assert(isOwningHandle()); + return maxManifestFileSize(nativeHandle_); + } + + @Override + public Options setMaxManifestFileSize( + final long maxManifestFileSize) { + assert(isOwningHandle()); + setMaxManifestFileSize(nativeHandle_, maxManifestFileSize); + return this; + } + + @Override + public Options setMaxTableFilesSizeFIFO( + final long maxTableFilesSize) { + assert(maxTableFilesSize > 0); // unsigned native type + assert(isOwningHandle()); + setMaxTableFilesSizeFIFO(nativeHandle_, maxTableFilesSize); + return this; + } + + @Override + public long maxTableFilesSizeFIFO() { + return maxTableFilesSizeFIFO(nativeHandle_); + } + + @Override + public int tableCacheNumshardbits() { + assert(isOwningHandle()); + return tableCacheNumshardbits(nativeHandle_); + } + + @Override + public Options setTableCacheNumshardbits( + final int tableCacheNumshardbits) { + assert(isOwningHandle()); + setTableCacheNumshardbits(nativeHandle_, tableCacheNumshardbits); + return this; + } + + @Override + public long walTtlSeconds() { + assert(isOwningHandle()); + return walTtlSeconds(nativeHandle_); + } + + @Override + public Options setWalTtlSeconds(final long walTtlSeconds) { + assert(isOwningHandle()); + setWalTtlSeconds(nativeHandle_, walTtlSeconds); + return this; + } + + @Override + public long walSizeLimitMB() { + assert(isOwningHandle()); + return walSizeLimitMB(nativeHandle_); + } + + @Override + public Options setWalSizeLimitMB(final long sizeLimitMB) { + assert(isOwningHandle()); + setWalSizeLimitMB(nativeHandle_, sizeLimitMB); + return this; + } + + @Override + public long manifestPreallocationSize() { + assert(isOwningHandle()); + return manifestPreallocationSize(nativeHandle_); + } + + @Override + public Options setManifestPreallocationSize(final long size) { + assert(isOwningHandle()); + setManifestPreallocationSize(nativeHandle_, size); + return this; + } + + @Override + public boolean allowOsBuffer() { + assert(isOwningHandle()); + return allowOsBuffer(nativeHandle_); + } + + @Override + public Options setAllowOsBuffer(final boolean allowOsBuffer) { + assert(isOwningHandle()); + setAllowOsBuffer(nativeHandle_, allowOsBuffer); + return this; + } + + @Override + public boolean allowMmapReads() { + assert(isOwningHandle()); + return allowMmapReads(nativeHandle_); + } + + @Override + public Options setAllowMmapReads(final boolean allowMmapReads) { + assert(isOwningHandle()); + setAllowMmapReads(nativeHandle_, allowMmapReads); + return this; + } + + @Override + public boolean allowMmapWrites() { + assert(isOwningHandle()); + return allowMmapWrites(nativeHandle_); + } + + @Override + public Options setAllowMmapWrites(final boolean allowMmapWrites) { + assert(isOwningHandle()); + setAllowMmapWrites(nativeHandle_, allowMmapWrites); + return this; + } + + @Override + public boolean isFdCloseOnExec() { + assert(isOwningHandle()); + return isFdCloseOnExec(nativeHandle_); + } + + @Override + public Options setIsFdCloseOnExec(final boolean isFdCloseOnExec) { + assert(isOwningHandle()); + setIsFdCloseOnExec(nativeHandle_, isFdCloseOnExec); + return this; + } + + @Override + public int statsDumpPeriodSec() { + assert(isOwningHandle()); + return statsDumpPeriodSec(nativeHandle_); + } + + @Override + public Options setStatsDumpPeriodSec(final int statsDumpPeriodSec) { + assert(isOwningHandle()); + setStatsDumpPeriodSec(nativeHandle_, statsDumpPeriodSec); + return this; + } + + @Override + public boolean adviseRandomOnOpen() { + return adviseRandomOnOpen(nativeHandle_); + } + + @Override + public Options setAdviseRandomOnOpen(final boolean adviseRandomOnOpen) { + assert(isOwningHandle()); + setAdviseRandomOnOpen(nativeHandle_, adviseRandomOnOpen); + return this; + } + + @Override + public boolean useAdaptiveMutex() { + assert(isOwningHandle()); + return useAdaptiveMutex(nativeHandle_); + } + + @Override + public Options setUseAdaptiveMutex(final boolean useAdaptiveMutex) { + assert(isOwningHandle()); + setUseAdaptiveMutex(nativeHandle_, useAdaptiveMutex); + return this; + } + + @Override + public long bytesPerSync() { + return bytesPerSync(nativeHandle_); + } + + @Override + public Options setBytesPerSync(final long bytesPerSync) { + assert(isOwningHandle()); + setBytesPerSync(nativeHandle_, bytesPerSync); + return this; + } + + @Override + public Options setMemTableConfig(final MemTableConfig config) { + memTableConfig_ = config; + setMemTableFactory(nativeHandle_, config.newMemTableFactoryHandle()); + return this; + } + + @Override + public Options setRateLimiterConfig(final RateLimiterConfig config) { + rateLimiterConfig_ = config; + setRateLimiter(nativeHandle_, config.newRateLimiterHandle()); + return this; + } + + @Override + public Options setLogger(final Logger logger) { + assert(isOwningHandle()); + setLogger(nativeHandle_, logger.nativeHandle_); + return this; + } + + @Override + public Options setInfoLogLevel(final InfoLogLevel infoLogLevel) { + assert(isOwningHandle()); + setInfoLogLevel(nativeHandle_, infoLogLevel.getValue()); + return this; + } + + @Override + public InfoLogLevel infoLogLevel() { + assert(isOwningHandle()); + return InfoLogLevel.getInfoLogLevel( + infoLogLevel(nativeHandle_)); + } + + @Override + public String memTableFactoryName() { + assert(isOwningHandle()); + return memTableFactoryName(nativeHandle_); + } + + @Override + public Options setTableFormatConfig(final TableFormatConfig config) { + tableFormatConfig_ = config; + setTableFactory(nativeHandle_, config.newTableFactoryHandle()); + return this; + } + + @Override + public String tableFactoryName() { + assert(isOwningHandle()); + return tableFactoryName(nativeHandle_); + } + + @Override + public Options useFixedLengthPrefixExtractor(final int n) { + assert(isOwningHandle()); + useFixedLengthPrefixExtractor(nativeHandle_, n); + return this; + } + + @Override + public Options useCappedPrefixExtractor(final int n) { + assert(isOwningHandle()); + useCappedPrefixExtractor(nativeHandle_, n); + return this; + } + + @Override + public CompressionType compressionType() { + return CompressionType.values()[compressionType(nativeHandle_)]; + } + + @Override + public Options setCompressionPerLevel( + final List compressionLevels) { + final byte[] byteCompressionTypes = new byte[ + compressionLevels.size()]; + for (int i = 0; i < compressionLevels.size(); i++) { + byteCompressionTypes[i] = compressionLevels.get(i).getValue(); + } + setCompressionPerLevel(nativeHandle_, byteCompressionTypes); + return this; + } + + @Override + public List compressionPerLevel() { + final byte[] byteCompressionTypes = + compressionPerLevel(nativeHandle_); + final List compressionLevels = new ArrayList<>(); + for (final Byte byteCompressionType : byteCompressionTypes) { + compressionLevels.add(CompressionType.getCompressionType( + byteCompressionType)); + } + return compressionLevels; + } + + @Override + public Options setCompressionType(CompressionType compressionType) { + setCompressionType(nativeHandle_, compressionType.getValue()); + return this; + } + + @Override + public CompactionStyle compactionStyle() { + return CompactionStyle.values()[compactionStyle(nativeHandle_)]; + } + + @Override + public Options setCompactionStyle( + final CompactionStyle compactionStyle) { + setCompactionStyle(nativeHandle_, compactionStyle.getValue()); + return this; + } + + @Override + public int numLevels() { + return numLevels(nativeHandle_); + } + + @Override + public Options setNumLevels(int numLevels) { + setNumLevels(nativeHandle_, numLevels); + return this; + } + + @Override + public int levelZeroFileNumCompactionTrigger() { + return levelZeroFileNumCompactionTrigger(nativeHandle_); + } + + @Override + public Options setLevelZeroFileNumCompactionTrigger( + final int numFiles) { + setLevelZeroFileNumCompactionTrigger( + nativeHandle_, numFiles); + return this; + } + + @Override + public int levelZeroSlowdownWritesTrigger() { + return levelZeroSlowdownWritesTrigger(nativeHandle_); + } + + @Override + public Options setLevelZeroSlowdownWritesTrigger( + final int numFiles) { + setLevelZeroSlowdownWritesTrigger(nativeHandle_, numFiles); + return this; + } + + @Override + public int levelZeroStopWritesTrigger() { + return levelZeroStopWritesTrigger(nativeHandle_); + } + + @Override + public Options setLevelZeroStopWritesTrigger( + final int numFiles) { + setLevelZeroStopWritesTrigger(nativeHandle_, numFiles); + return this; + } + + @Override + public int maxMemCompactionLevel() { + return 0; + } + + @Override + public Options setMaxMemCompactionLevel( + final int maxMemCompactionLevel) { + return this; + } + + @Override + public long targetFileSizeBase() { + return targetFileSizeBase(nativeHandle_); + } + + @Override + public Options setTargetFileSizeBase(long targetFileSizeBase) { + setTargetFileSizeBase(nativeHandle_, targetFileSizeBase); + return this; + } + + @Override + public int targetFileSizeMultiplier() { + return targetFileSizeMultiplier(nativeHandle_); + } + + @Override + public Options setTargetFileSizeMultiplier(int multiplier) { + setTargetFileSizeMultiplier(nativeHandle_, multiplier); + return this; + } + + @Override + public Options setMaxBytesForLevelBase(final long maxBytesForLevelBase) { + setMaxBytesForLevelBase(nativeHandle_, maxBytesForLevelBase); + return this; + } + + @Override + public long maxBytesForLevelBase() { + return maxBytesForLevelBase(nativeHandle_); + } + + @Override + public Options setLevelCompactionDynamicLevelBytes( + final boolean enableLevelCompactionDynamicLevelBytes) { + setLevelCompactionDynamicLevelBytes(nativeHandle_, + enableLevelCompactionDynamicLevelBytes); + return this; + } + + @Override + public boolean levelCompactionDynamicLevelBytes() { + return levelCompactionDynamicLevelBytes(nativeHandle_); + } + + @Override + public int maxBytesForLevelMultiplier() { + return maxBytesForLevelMultiplier(nativeHandle_); + } + + @Override + public Options setMaxBytesForLevelMultiplier(final int multiplier) { + setMaxBytesForLevelMultiplier(nativeHandle_, multiplier); + return this; + } + + @Override + public int expandedCompactionFactor() { + return expandedCompactionFactor(nativeHandle_); + } + + @Override + public Options setExpandedCompactionFactor( + final int expandedCompactionFactor) { + setExpandedCompactionFactor(nativeHandle_, expandedCompactionFactor); + return this; + } + + @Override + public int sourceCompactionFactor() { + return sourceCompactionFactor(nativeHandle_); + } + + @Override + public Options setSourceCompactionFactor(int sourceCompactionFactor) { + setSourceCompactionFactor(nativeHandle_, sourceCompactionFactor); + return this; + } + + @Override + public int maxGrandparentOverlapFactor() { + return maxGrandparentOverlapFactor(nativeHandle_); + } + + @Override + public Options setMaxGrandparentOverlapFactor( + final int maxGrandparentOverlapFactor) { + setMaxGrandparentOverlapFactor(nativeHandle_, maxGrandparentOverlapFactor); + return this; + } + + @Override + public double softRateLimit() { + return softRateLimit(nativeHandle_); + } + + @Override + public Options setSoftRateLimit(final double softRateLimit) { + setSoftRateLimit(nativeHandle_, softRateLimit); + return this; + } + + @Override + public double hardRateLimit() { + return hardRateLimit(nativeHandle_); + } + + @Override + public Options setHardRateLimit(double hardRateLimit) { + setHardRateLimit(nativeHandle_, hardRateLimit); + return this; + } + + @Override + public int rateLimitDelayMaxMilliseconds() { + return rateLimitDelayMaxMilliseconds(nativeHandle_); + } + + @Override + public Options setRateLimitDelayMaxMilliseconds( + final int rateLimitDelayMaxMilliseconds) { + setRateLimitDelayMaxMilliseconds( + nativeHandle_, rateLimitDelayMaxMilliseconds); + return this; + } + + @Override + public long arenaBlockSize() { + return arenaBlockSize(nativeHandle_); + } + + @Override + public Options setArenaBlockSize(final long arenaBlockSize) { + setArenaBlockSize(nativeHandle_, arenaBlockSize); + return this; + } + + @Override + public boolean disableAutoCompactions() { + return disableAutoCompactions(nativeHandle_); + } + + @Override + public Options setDisableAutoCompactions( + final boolean disableAutoCompactions) { + setDisableAutoCompactions(nativeHandle_, disableAutoCompactions); + return this; + } + + @Override + public boolean purgeRedundantKvsWhileFlush() { + return purgeRedundantKvsWhileFlush(nativeHandle_); + } + + @Override + public Options setPurgeRedundantKvsWhileFlush( + final boolean purgeRedundantKvsWhileFlush) { + setPurgeRedundantKvsWhileFlush( + nativeHandle_, purgeRedundantKvsWhileFlush); + return this; + } + + @Override + public boolean verifyChecksumsInCompaction() { + return verifyChecksumsInCompaction(nativeHandle_); + } + + @Override + public Options setVerifyChecksumsInCompaction( + final boolean verifyChecksumsInCompaction) { + setVerifyChecksumsInCompaction( + nativeHandle_, verifyChecksumsInCompaction); + return this; + } + + @Override + public long maxSequentialSkipInIterations() { + return maxSequentialSkipInIterations(nativeHandle_); + } + + @Override + public Options setMaxSequentialSkipInIterations( + final long maxSequentialSkipInIterations) { + setMaxSequentialSkipInIterations(nativeHandle_, + maxSequentialSkipInIterations); + return this; + } + + @Override + public boolean inplaceUpdateSupport() { + return inplaceUpdateSupport(nativeHandle_); + } + + @Override + public Options setInplaceUpdateSupport( + final boolean inplaceUpdateSupport) { + setInplaceUpdateSupport(nativeHandle_, inplaceUpdateSupport); + return this; + } + + @Override + public long inplaceUpdateNumLocks() { + return inplaceUpdateNumLocks(nativeHandle_); + } + + @Override + public Options setInplaceUpdateNumLocks( + final long inplaceUpdateNumLocks) { + setInplaceUpdateNumLocks(nativeHandle_, inplaceUpdateNumLocks); + return this; + } + + @Override + public double memtablePrefixBloomSizeRatio() { + return memtablePrefixBloomSizeRatio(nativeHandle_); + } + + @Override + public Options setMemtablePrefixBloomSizeRatio(final double memtablePrefixBloomSizeRatio) { + setMemtablePrefixBloomSizeRatio(nativeHandle_, memtablePrefixBloomSizeRatio); + return this; + } + + @Override + public int bloomLocality() { + return bloomLocality(nativeHandle_); + } + + @Override + public Options setBloomLocality(final int bloomLocality) { + setBloomLocality(nativeHandle_, bloomLocality); + return this; + } + + @Override + public long maxSuccessiveMerges() { + return maxSuccessiveMerges(nativeHandle_); + } + + @Override + public Options setMaxSuccessiveMerges(long maxSuccessiveMerges) { + setMaxSuccessiveMerges(nativeHandle_, maxSuccessiveMerges); + return this; + } + + @Override + public int minWriteBufferNumberToMerge() { + return minWriteBufferNumberToMerge(nativeHandle_); + } + + @Override + public Options setMinWriteBufferNumberToMerge( + final int minWriteBufferNumberToMerge) { + setMinWriteBufferNumberToMerge(nativeHandle_, minWriteBufferNumberToMerge); + return this; + } + + @Override + public int minPartialMergeOperands() { + return minPartialMergeOperands(nativeHandle_); + } + + @Override + public Options setMinPartialMergeOperands( + final int minPartialMergeOperands) { + setMinPartialMergeOperands(nativeHandle_, minPartialMergeOperands); + return this; + } + + @Override + public Options setOptimizeFiltersForHits( + final boolean optimizeFiltersForHits) { + setOptimizeFiltersForHits(nativeHandle_, optimizeFiltersForHits); + return this; + } + + @Override + public boolean optimizeFiltersForHits() { + return optimizeFiltersForHits(nativeHandle_); + } + + private native static long newOptions(); + private native static long newOptions(long dbOptHandle, + long cfOptHandle); + @Override protected final native void disposeInternal(final long handle); + private native void setEnv(long optHandle, long envHandle); + private native void prepareForBulkLoad(long handle); + + // DB native handles + private native void setIncreaseParallelism(long handle, int totalThreads); + private native void setCreateIfMissing(long handle, boolean flag); + private native boolean createIfMissing(long handle); + private native void setCreateMissingColumnFamilies( + long handle, boolean flag); + private native boolean createMissingColumnFamilies(long handle); + private native void setErrorIfExists(long handle, boolean errorIfExists); + private native boolean errorIfExists(long handle); + private native void setParanoidChecks( + long handle, boolean paranoidChecks); + private native boolean paranoidChecks(long handle); + private native void setRateLimiter(long handle, + long rateLimiterHandle); + private native void setLogger(long handle, + long loggerHandle); + private native void setInfoLogLevel(long handle, byte logLevel); + private native byte infoLogLevel(long handle); + private native void setMaxOpenFiles(long handle, int maxOpenFiles); + private native int maxOpenFiles(long handle); + private native void setMaxTotalWalSize(long handle, + long maxTotalWalSize); + private native long maxTotalWalSize(long handle); + private native void createStatistics(long optHandle); + private native long statisticsPtr(long optHandle); + private native void setDisableDataSync(long handle, boolean disableDataSync); + private native boolean disableDataSync(long handle); + private native boolean useFsync(long handle); + private native void setUseFsync(long handle, boolean useFsync); + private native void setDbLogDir(long handle, String dbLogDir); + private native String dbLogDir(long handle); + private native void setWalDir(long handle, String walDir); + private native String walDir(long handle); + private native void setDeleteObsoleteFilesPeriodMicros( + long handle, long micros); + private native long deleteObsoleteFilesPeriodMicros(long handle); + private native void setMaxBackgroundCompactions( + long handle, int maxBackgroundCompactions); + private native int maxBackgroundCompactions(long handle); + private native void setMaxBackgroundFlushes( + long handle, int maxBackgroundFlushes); + private native int maxBackgroundFlushes(long handle); + private native void setMaxLogFileSize(long handle, long maxLogFileSize) + throws IllegalArgumentException; + private native long maxLogFileSize(long handle); + private native void setLogFileTimeToRoll( + long handle, long logFileTimeToRoll) throws IllegalArgumentException; + private native long logFileTimeToRoll(long handle); + private native void setKeepLogFileNum(long handle, long keepLogFileNum) + throws IllegalArgumentException; + private native long keepLogFileNum(long handle); + private native void setMaxManifestFileSize( + long handle, long maxManifestFileSize); + private native long maxManifestFileSize(long handle); + private native void setMaxTableFilesSizeFIFO( + long handle, long maxTableFilesSize); + private native long maxTableFilesSizeFIFO(long handle); + private native void setTableCacheNumshardbits( + long handle, int tableCacheNumshardbits); + private native int tableCacheNumshardbits(long handle); + private native void setWalTtlSeconds(long handle, long walTtlSeconds); + private native long walTtlSeconds(long handle); + private native void setWalSizeLimitMB(long handle, long sizeLimitMB); + private native long walSizeLimitMB(long handle); + private native void setManifestPreallocationSize( + long handle, long size) throws IllegalArgumentException; + private native long manifestPreallocationSize(long handle); + private native void setAllowOsBuffer( + long handle, boolean allowOsBuffer); + private native boolean allowOsBuffer(long handle); + private native void setAllowMmapReads( + long handle, boolean allowMmapReads); + private native boolean allowMmapReads(long handle); + private native void setAllowMmapWrites( + long handle, boolean allowMmapWrites); + private native boolean allowMmapWrites(long handle); + private native void setIsFdCloseOnExec( + long handle, boolean isFdCloseOnExec); + private native boolean isFdCloseOnExec(long handle); + private native void setStatsDumpPeriodSec( + long handle, int statsDumpPeriodSec); + private native int statsDumpPeriodSec(long handle); + private native void setAdviseRandomOnOpen( + long handle, boolean adviseRandomOnOpen); + private native boolean adviseRandomOnOpen(long handle); + private native void setUseAdaptiveMutex( + long handle, boolean useAdaptiveMutex); + private native boolean useAdaptiveMutex(long handle); + private native void setBytesPerSync( + long handle, long bytesPerSync); + private native long bytesPerSync(long handle); + // CF native handles + private native void optimizeForPointLookup(long handle, + long blockCacheSizeMb); + private native void optimizeLevelStyleCompaction(long handle, + long memtableMemoryBudget); + private native void optimizeUniversalStyleCompaction(long handle, + long memtableMemoryBudget); + private native void setComparatorHandle(long handle, int builtinComparator); + private native void setComparatorHandle(long optHandle, + long comparatorHandle); + private native void setMergeOperatorName( + long handle, String name); + private native void setMergeOperator( + long handle, long mergeOperatorHandle); + private native void setWriteBufferSize(long handle, long writeBufferSize) + throws IllegalArgumentException; + private native long writeBufferSize(long handle); + private native void setMaxWriteBufferNumber( + long handle, int maxWriteBufferNumber); + private native int maxWriteBufferNumber(long handle); + private native void setMinWriteBufferNumberToMerge( + long handle, int minWriteBufferNumberToMerge); + private native int minWriteBufferNumberToMerge(long handle); + private native void setCompressionType(long handle, byte compressionType); + private native byte compressionType(long handle); + private native void setCompressionPerLevel(long handle, + byte[] compressionLevels); + private native byte[] compressionPerLevel(long handle); + private native void useFixedLengthPrefixExtractor( + long handle, int prefixLength); + private native void useCappedPrefixExtractor( + long handle, int prefixLength); + private native void setNumLevels( + long handle, int numLevels); + private native int numLevels(long handle); + private native void setLevelZeroFileNumCompactionTrigger( + long handle, int numFiles); + private native int levelZeroFileNumCompactionTrigger(long handle); + private native void setLevelZeroSlowdownWritesTrigger( + long handle, int numFiles); + private native int levelZeroSlowdownWritesTrigger(long handle); + private native void setLevelZeroStopWritesTrigger( + long handle, int numFiles); + private native int levelZeroStopWritesTrigger(long handle); + private native void setTargetFileSizeBase( + long handle, long targetFileSizeBase); + private native long targetFileSizeBase(long handle); + private native void setTargetFileSizeMultiplier( + long handle, int multiplier); + private native int targetFileSizeMultiplier(long handle); + private native void setMaxBytesForLevelBase( + long handle, long maxBytesForLevelBase); + private native long maxBytesForLevelBase(long handle); + private native void setLevelCompactionDynamicLevelBytes( + long handle, boolean enableLevelCompactionDynamicLevelBytes); + private native boolean levelCompactionDynamicLevelBytes( + long handle); + private native void setMaxBytesForLevelMultiplier( + long handle, int multiplier); + private native int maxBytesForLevelMultiplier(long handle); + private native void setExpandedCompactionFactor( + long handle, int expandedCompactionFactor); + private native int expandedCompactionFactor(long handle); + private native void setSourceCompactionFactor( + long handle, int sourceCompactionFactor); + private native int sourceCompactionFactor(long handle); + private native void setMaxGrandparentOverlapFactor( + long handle, int maxGrandparentOverlapFactor); + private native int maxGrandparentOverlapFactor(long handle); + private native void setSoftRateLimit( + long handle, double softRateLimit); + private native double softRateLimit(long handle); + private native void setHardRateLimit( + long handle, double hardRateLimit); + private native double hardRateLimit(long handle); + private native void setRateLimitDelayMaxMilliseconds( + long handle, int rateLimitDelayMaxMilliseconds); + private native int rateLimitDelayMaxMilliseconds(long handle); + private native void setArenaBlockSize( + long handle, long arenaBlockSize) throws IllegalArgumentException; + private native long arenaBlockSize(long handle); + private native void setDisableAutoCompactions( + long handle, boolean disableAutoCompactions); + private native boolean disableAutoCompactions(long handle); + private native void setCompactionStyle(long handle, byte compactionStyle); + private native byte compactionStyle(long handle); + private native void setPurgeRedundantKvsWhileFlush( + long handle, boolean purgeRedundantKvsWhileFlush); + private native boolean purgeRedundantKvsWhileFlush(long handle); + private native void setVerifyChecksumsInCompaction( + long handle, boolean verifyChecksumsInCompaction); + private native boolean verifyChecksumsInCompaction(long handle); + private native void setMaxSequentialSkipInIterations( + long handle, long maxSequentialSkipInIterations); + private native long maxSequentialSkipInIterations(long handle); + private native void setMemTableFactory(long handle, long factoryHandle); + private native String memTableFactoryName(long handle); + private native void setTableFactory(long handle, long factoryHandle); + private native String tableFactoryName(long handle); + private native void setInplaceUpdateSupport( + long handle, boolean inplaceUpdateSupport); + private native boolean inplaceUpdateSupport(long handle); + private native void setInplaceUpdateNumLocks( + long handle, long inplaceUpdateNumLocks) + throws IllegalArgumentException; + private native long inplaceUpdateNumLocks(long handle); + private native void setMemtablePrefixBloomSizeRatio( + long handle, double memtablePrefixBloomSizeRatio); + private native double memtablePrefixBloomSizeRatio(long handle); + private native void setBloomLocality( + long handle, int bloomLocality); + private native int bloomLocality(long handle); + private native void setMaxSuccessiveMerges( + long handle, long maxSuccessiveMerges) + throws IllegalArgumentException; + private native long maxSuccessiveMerges(long handle); + private native void setMinPartialMergeOperands( + long handle, int minPartialMergeOperands); + private native int minPartialMergeOperands(long handle); + private native void setOptimizeFiltersForHits(long handle, + boolean optimizeFiltersForHits); + private native boolean optimizeFiltersForHits(long handle); + // instance variables + Env env_; + MemTableConfig memTableConfig_; + TableFormatConfig tableFormatConfig_; + RateLimiterConfig rateLimiterConfig_; + AbstractComparator> comparator_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java b/external/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java new file mode 100755 index 0000000000..044c18d803 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/PlainTableConfig.java @@ -0,0 +1,251 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +/** + * The config for plain table sst format. + * + *

PlainTable is a RocksDB's SST file format optimized for low query + * latency on pure-memory or really low-latency media.

+ * + *

It also support prefix hash feature.

+ */ +public class PlainTableConfig extends TableFormatConfig { + public static final int VARIABLE_LENGTH = 0; + public static final int DEFAULT_BLOOM_BITS_PER_KEY = 10; + public static final double DEFAULT_HASH_TABLE_RATIO = 0.75; + public static final int DEFAULT_INDEX_SPARSENESS = 16; + public static final int DEFAULT_HUGE_TLB_SIZE = 0; + public static final EncodingType DEFAULT_ENCODING_TYPE = + EncodingType.kPlain; + public static final boolean DEFAULT_FULL_SCAN_MODE = false; + public static final boolean DEFAULT_STORE_INDEX_IN_FILE + = false; + + public PlainTableConfig() { + keySize_ = VARIABLE_LENGTH; + bloomBitsPerKey_ = DEFAULT_BLOOM_BITS_PER_KEY; + hashTableRatio_ = DEFAULT_HASH_TABLE_RATIO; + indexSparseness_ = DEFAULT_INDEX_SPARSENESS; + hugePageTlbSize_ = DEFAULT_HUGE_TLB_SIZE; + encodingType_ = DEFAULT_ENCODING_TYPE; + fullScanMode_ = DEFAULT_FULL_SCAN_MODE; + storeIndexInFile_ = DEFAULT_STORE_INDEX_IN_FILE; + } + + /** + *

Set the length of the user key. If it is set to be + * VARIABLE_LENGTH, then it indicates the user keys are + * of variable length.

+ * + *

Otherwise,all the keys need to have the same length + * in byte.

+ * + *

DEFAULT: VARIABLE_LENGTH

+ * + * @param keySize the length of the user key. + * @return the reference to the current config. + */ + public PlainTableConfig setKeySize(int keySize) { + keySize_ = keySize; + return this; + } + + /** + * @return the specified size of the user key. If VARIABLE_LENGTH, + * then it indicates variable-length key. + */ + public int keySize() { + return keySize_; + } + + /** + * Set the number of bits per key used by the internal bloom filter + * in the plain table sst format. + * + * @param bitsPerKey the number of bits per key for bloom filer. + * @return the reference to the current config. + */ + public PlainTableConfig setBloomBitsPerKey(int bitsPerKey) { + bloomBitsPerKey_ = bitsPerKey; + return this; + } + + /** + * @return the number of bits per key used for the bloom filter. + */ + public int bloomBitsPerKey() { + return bloomBitsPerKey_; + } + + /** + * hashTableRatio is the desired utilization of the hash table used + * for prefix hashing. The ideal ratio would be the number of + * prefixes / the number of hash buckets. If this value is set to + * zero, then hash table will not be used. + * + * @param ratio the hash table ratio. + * @return the reference to the current config. + */ + public PlainTableConfig setHashTableRatio(double ratio) { + hashTableRatio_ = ratio; + return this; + } + + /** + * @return the hash table ratio. + */ + public double hashTableRatio() { + return hashTableRatio_; + } + + /** + * Index sparseness determines the index interval for keys inside the + * same prefix. This number is equal to the maximum number of linear + * search required after hash and binary search. If it's set to 0, + * then each key will be indexed. + * + * @param sparseness the index sparseness. + * @return the reference to the current config. + */ + public PlainTableConfig setIndexSparseness(int sparseness) { + indexSparseness_ = sparseness; + return this; + } + + /** + * @return the index sparseness. + */ + public long indexSparseness() { + return indexSparseness_; + } + + /** + *

huge_page_tlb_size: if ≤0, allocate hash indexes and blooms + * from malloc otherwise from huge page TLB.

+ * + *

The user needs to reserve huge pages for it to be allocated, + * like: {@code sysctl -w vm.nr_hugepages=20}

+ * + *

See linux doc Documentation/vm/hugetlbpage.txt

+ * + * @param hugePageTlbSize huge page tlb size + * @return the reference to the current config. + */ + public PlainTableConfig setHugePageTlbSize(int hugePageTlbSize) { + this.hugePageTlbSize_ = hugePageTlbSize; + return this; + } + + /** + * Returns the value for huge page tlb size + * + * @return hugePageTlbSize + */ + public int hugePageTlbSize() { + return hugePageTlbSize_; + } + + /** + * Sets the encoding type. + * + *

This setting determines how to encode + * the keys. See enum {@link EncodingType} for + * the choices.

+ * + *

The value will determine how to encode keys + * when writing to a new SST file. This value will be stored + * inside the SST file which will be used when reading from + * the file, which makes it possible for users to choose + * different encoding type when reopening a DB. Files with + * different encoding types can co-exist in the same DB and + * can be read.

+ * + * @param encodingType {@link org.rocksdb.EncodingType} value. + * @return the reference to the current config. + */ + public PlainTableConfig setEncodingType(EncodingType encodingType) { + this.encodingType_ = encodingType; + return this; + } + + /** + * Returns the active EncodingType + * + * @return currently set encoding type + */ + public EncodingType encodingType() { + return encodingType_; + } + + /** + * Set full scan mode, if true the whole file will be read + * one record by one without using the index. + * + * @param fullScanMode boolean value indicating if full + * scan mode shall be enabled. + * @return the reference to the current config. + */ + public PlainTableConfig setFullScanMode(boolean fullScanMode) { + this.fullScanMode_ = fullScanMode; + return this; + } + + /** + * Return if full scan mode is active + * @return boolean value indicating if the full scan mode is + * enabled. + */ + public boolean fullScanMode() { + return fullScanMode_; + } + + /** + *

If set to true: compute plain table index and bloom + * filter during file building and store it in file. + * When reading file, index will be mmaped instead + * of doing recomputation.

+ * + * @param storeIndexInFile value indicating if index shall + * be stored in a file + * @return the reference to the current config. + */ + public PlainTableConfig setStoreIndexInFile(boolean storeIndexInFile) { + this.storeIndexInFile_ = storeIndexInFile; + return this; + } + + /** + * Return a boolean value indicating if index shall be stored + * in a file. + * + * @return currently set value for store index in file. + */ + public boolean storeIndexInFile() { + return storeIndexInFile_; + } + + @Override protected long newTableFactoryHandle() { + return newTableFactoryHandle(keySize_, bloomBitsPerKey_, + hashTableRatio_, indexSparseness_, hugePageTlbSize_, + encodingType_.getValue(), fullScanMode_, + storeIndexInFile_); + } + + private native long newTableFactoryHandle( + int keySize, int bloomBitsPerKey, + double hashTableRatio, int indexSparseness, + int hugePageTlbSize, byte encodingType, + boolean fullScanMode, boolean storeIndexInFile); + + private int keySize_; + private int bloomBitsPerKey_; + private double hashTableRatio_; + private int indexSparseness_; + private int hugePageTlbSize_; + private EncodingType encodingType_; + private boolean fullScanMode_; + private boolean storeIndexInFile_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RateLimiterConfig.java b/external/rocksdb/java/src/main/java/org/rocksdb/RateLimiterConfig.java new file mode 100755 index 0000000000..d2e7459e35 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RateLimiterConfig.java @@ -0,0 +1,23 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +/** + * Config for rate limiter, which is used to control write rate of flush and + * compaction. + */ +public abstract class RateLimiterConfig { + /** + * This function should only be called by + * {@link org.rocksdb.DBOptions#setRateLimiter(long, long)}, which will + * create a c++ shared-pointer to the c++ {@code RateLimiter} that is associated + * with a Java RateLimiterConfig. + * + * @see org.rocksdb.DBOptions#setRateLimiter(long, long) + * + * @return native handle address to rate limiter instance. + */ + abstract protected long newRateLimiterHandle(); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java b/external/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java new file mode 100755 index 0000000000..9bb23d0139 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/ReadOptions.java @@ -0,0 +1,294 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * The class that controls the get behavior. + * + * Note that dispose() must be called before an Options instance + * become out-of-scope to release the allocated memory in c++. + */ +public class ReadOptions extends RocksObject { + public ReadOptions() { + super(newReadOptions()); + } + + /** + * If true, all data read from underlying storage will be + * verified against corresponding checksums. + * Default: true + * + * @return true if checksum verification is on. + */ + public boolean verifyChecksums() { + assert(isOwningHandle()); + return verifyChecksums(nativeHandle_); + } + + /** + * If true, all data read from underlying storage will be + * verified against corresponding checksums. + * Default: true + * + * @param verifyChecksums if true, then checksum verification + * will be performed on every read. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setVerifyChecksums( + final boolean verifyChecksums) { + assert(isOwningHandle()); + setVerifyChecksums(nativeHandle_, verifyChecksums); + return this; + } + + // TODO(yhchiang): this option seems to be block-based table only. + // move this to a better place? + /** + * Fill the cache when loading the block-based sst formated db. + * Callers may wish to set this field to false for bulk scans. + * Default: true + * + * @return true if the fill-cache behavior is on. + */ + public boolean fillCache() { + assert(isOwningHandle()); + return fillCache(nativeHandle_); + } + + /** + * Fill the cache when loading the block-based sst formatted db. + * Callers may wish to set this field to false for bulk scans. + * Default: true + * + * @param fillCache if true, then fill-cache behavior will be + * performed. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setFillCache(final boolean fillCache) { + assert(isOwningHandle()); + setFillCache(nativeHandle_, fillCache); + return this; + } + + /** + * Returns the currently assigned Snapshot instance. + * + * @return the Snapshot assigned to this instance. If no Snapshot + * is assigned null. + */ + public Snapshot snapshot() { + assert(isOwningHandle()); + long snapshotHandle = snapshot(nativeHandle_); + if (snapshotHandle != 0) { + return new Snapshot(snapshotHandle); + } + return null; + } + + /** + *

If "snapshot" is non-nullptr, read as of the supplied snapshot + * (which must belong to the DB that is being read and which must + * not have been released). If "snapshot" is nullptr, use an implicit + * snapshot of the state at the beginning of this read operation.

+ *

Default: null

+ * + * @param snapshot {@link Snapshot} instance + * @return the reference to the current ReadOptions. + */ + public ReadOptions setSnapshot(final Snapshot snapshot) { + assert(isOwningHandle()); + if (snapshot != null) { + setSnapshot(nativeHandle_, snapshot.nativeHandle_); + } else { + setSnapshot(nativeHandle_, 0l); + } + return this; + } + + /** + * Returns the current read tier. + * + * @return the read tier in use, by default {@link ReadTier#READ_ALL_TIER} + */ + public ReadTier readTier() { + assert(isOwningHandle()); + return ReadTier.getReadTier(readTier(nativeHandle_)); + } + + /** + * Specify if this read request should process data that ALREADY + * resides on a particular cache. If the required data is not + * found at the specified cache, then {@link RocksDBException} is thrown. + * + * @param readTier {@link ReadTier} instance + * @return the reference to the current ReadOptions. + */ + public ReadOptions setReadTier(final ReadTier readTier) { + assert(isOwningHandle()); + setReadTier(nativeHandle_, readTier.getValue()); + return this; + } + + /** + * Specify to create a tailing iterator -- a special iterator that has a + * view of the complete database (i.e. it can also be used to read newly + * added data) and is optimized for sequential reads. It will return records + * that were inserted into the database after the creation of the iterator. + * Default: false + * + * Not supported in {@code ROCKSDB_LITE} mode! + * + * @return true if tailing iterator is enabled. + */ + public boolean tailing() { + assert(isOwningHandle()); + return tailing(nativeHandle_); + } + + /** + * Specify to create a tailing iterator -- a special iterator that has a + * view of the complete database (i.e. it can also be used to read newly + * added data) and is optimized for sequential reads. It will return records + * that were inserted into the database after the creation of the iterator. + * Default: false + * Not supported in ROCKSDB_LITE mode! + * + * @param tailing if true, then tailing iterator will be enabled. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setTailing(final boolean tailing) { + assert(isOwningHandle()); + setTailing(nativeHandle_, tailing); + return this; + } + + /** + * Returns whether managed iterators will be used. + * + * @return the setting of whether managed iterators will be used, by default false + */ + public boolean managed() { + assert(isOwningHandle()); + return managed(nativeHandle_); + } + + /** + * Specify to create a managed iterator -- a special iterator that + * uses less resources by having the ability to free its underlying + * resources on request. + * + * @param managed if true, then managed iterators will be enabled. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setManaged(final boolean managed) { + assert(isOwningHandle()); + setManaged(nativeHandle_, managed); + return this; + } + + /** + * Returns whether a total seek order will be used + * + * @return the setting of whether a total seek order will be used + */ + public boolean totalOrderSeek() { + assert(isOwningHandle()); + return totalOrderSeek(nativeHandle_); + } + + /** + * Enable a total order seek regardless of index format (e.g. hash index) + * used in the table. Some table format (e.g. plain table) may not support + * this option. + * + * @param totalOrderSeek if true, then total order seek will be enabled. + * @return the reference to the current ReadOptions. + */ + public ReadOptions setTotalOrderSeek(final boolean totalOrderSeek) { + assert(isOwningHandle()); + setTotalOrderSeek(nativeHandle_, totalOrderSeek); + return this; + } + + /** + * Returns whether the iterator only iterates over the same prefix as the seek + * + * @return the setting of whether the iterator only iterates over the same + * prefix as the seek, default is false + */ + public boolean prefixSameAsStart() { + assert(isOwningHandle()); + return prefixSameAsStart(nativeHandle_); + } + + + /** + * Enforce that the iterator only iterates over the same prefix as the seek. + * This option is effective only for prefix seeks, i.e. prefix_extractor is + * non-null for the column family and {@link #totalOrderSeek()} is false. + * Unlike iterate_upper_bound, {@link #setPrefixSameAsStart(boolean)} only + * works within a prefix but in both directions. + * + * @param prefixSameAsStart if true, then the iterator only iterates over the + * same prefix as the seek + * @return the reference to the current ReadOptions. + */ + public ReadOptions setPrefixSameAsStart(final boolean prefixSameAsStart) { + assert(isOwningHandle()); + setPrefixSameAsStart(nativeHandle_, prefixSameAsStart); + return this; + } + + /** + * Returns whether the blocks loaded by the iterator will be pinned in memory + * + * @return the setting of whether the blocks loaded by the iterator will be + * pinned in memory + */ + public boolean pinData() { + assert(isOwningHandle()); + return pinData(nativeHandle_); + } + + /** + * Keep the blocks loaded by the iterator pinned in memory as long as the + * iterator is not deleted, If used when reading from tables created with + * BlockBasedTableOptions::use_delta_encoding = false, + * Iterator's property "rocksdb.iterator.is-key-pinned" is guaranteed to + * return 1. + * + * @param pinData if true, the blocks loaded by the iterator will be pinned + * @return the reference to the current ReadOptions. + */ + public ReadOptions setPinData(final boolean pinData) { + assert(isOwningHandle()); + setPinData(nativeHandle_, pinData); + return this; + } + + private native static long newReadOptions(); + private native boolean verifyChecksums(long handle); + private native void setVerifyChecksums(long handle, boolean verifyChecksums); + private native boolean fillCache(long handle); + private native void setFillCache(long handle, boolean fillCache); + private native long snapshot(long handle); + private native void setSnapshot(long handle, long snapshotHandle); + private native byte readTier(long handle); + private native void setReadTier(long handle, byte readTierValue); + private native boolean tailing(long handle); + private native void setTailing(long handle, boolean tailing); + private native boolean managed(long handle); + private native void setManaged(long handle, boolean managed); + private native boolean totalOrderSeek(long handle); + private native void setTotalOrderSeek(long handle, boolean totalOrderSeek); + private native boolean prefixSameAsStart(long handle); + private native void setPrefixSameAsStart(long handle, boolean prefixSameAsStart); + private native boolean pinData(long handle); + private native void setPinData(long handle, boolean pinData); + + @Override protected final native void disposeInternal(final long handle); + +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java b/external/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java new file mode 100755 index 0000000000..c6f48214d9 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/ReadTier.java @@ -0,0 +1,48 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * RocksDB {@link ReadOptions} read tiers. + */ +public enum ReadTier { + READ_ALL_TIER((byte)0), + BLOCK_CACHE_TIER((byte)1), + PERSISTED_TIER((byte)2); + + private final byte value; + + ReadTier(final byte value) { + this.value = value; + } + + /** + * Returns the byte value of the enumerations value + * + * @return byte representation + */ + public byte getValue() { + return value; + } + + /** + * Get ReadTier by byte value. + * + * @param value byte representation of ReadTier. + * + * @return {@link org.rocksdb.ReadTier} instance or null. + * @throws java.lang.IllegalArgumentException if an invalid + * value is provided. + */ + public static ReadTier getReadTier(final byte value) { + for (final ReadTier readTier : ReadTier.values()) { + if (readTier.getValue() == value){ + return readTier; + } + } + throw new IllegalArgumentException("Illegal value provided for ReadTier."); + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java b/external/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java new file mode 100755 index 0000000000..1beb45c46f --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RemoveEmptyValueCompactionFilter.java @@ -0,0 +1,18 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Just a Java wrapper around EmptyValueCompactionFilter implemented in C++ + */ +public class RemoveEmptyValueCompactionFilter + extends AbstractCompactionFilter { + public RemoveEmptyValueCompactionFilter() { + super(createNewRemoveEmptyValueCompactionFilter0()); + } + + private native static long createNewRemoveEmptyValueCompactionFilter0(); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RestoreBackupableDB.java b/external/rocksdb/java/src/main/java/org/rocksdb/RestoreBackupableDB.java new file mode 100755 index 0000000000..f303b15070 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RestoreBackupableDB.java @@ -0,0 +1,158 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.List; + +/** + *

This class is used to access information about backups and + * restore from them.

+ * + *

Note: {@code dispose()} must be called before this instance + * become out-of-scope to release the allocated + * memory in c++.

+ * + */ +public class RestoreBackupableDB extends RocksObject { + /** + *

Construct new estoreBackupableDB instance.

+ * + * @param options {@link org.rocksdb.BackupableDBOptions} instance + */ + public RestoreBackupableDB(final BackupableDBOptions options) { + super(newRestoreBackupableDB(options.nativeHandle_)); + } + + /** + *

Restore from backup with backup_id.

+ * + *

Important: If options_.share_table_files == true + * and you restore DB from some backup that is not the latest, and you + * start creating new backups from the new DB, they will probably + * fail.

+ * + *

Example: Let's say you have backups 1, 2, 3, 4, 5 + * and you restore 3. If you add new data to the DB and try creating a new + * backup now, the database will diverge from backups 4 and 5 and the new + * backup will fail. If you want to create new backup, you will first have + * to delete backups 4 and 5.

+ * + * @param backupId id pointing to backup + * @param dbDir database directory to restore to + * @param walDir directory where wal files are located + * @param restoreOptions {@link org.rocksdb.RestoreOptions} instance. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void restoreDBFromBackup(final long backupId, final String dbDir, + final String walDir, final RestoreOptions restoreOptions) + throws RocksDBException { + assert(isOwningHandle()); + restoreDBFromBackup0(nativeHandle_, backupId, dbDir, walDir, + restoreOptions.nativeHandle_); + } + + /** + *

Restore from the latest backup.

+ * + * @param dbDir database directory to restore to + * @param walDir directory where wal files are located + * @param restoreOptions {@link org.rocksdb.RestoreOptions} instance + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void restoreDBFromLatestBackup(final String dbDir, + final String walDir, final RestoreOptions restoreOptions) + throws RocksDBException { + assert(isOwningHandle()); + restoreDBFromLatestBackup0(nativeHandle_, dbDir, walDir, + restoreOptions.nativeHandle_); + } + + /** + *

Deletes old backups, keeping latest numBackupsToKeep alive.

+ * + * @param numBackupsToKeep of latest backups to keep + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void purgeOldBackups(final int numBackupsToKeep) + throws RocksDBException { + assert(isOwningHandle()); + purgeOldBackups0(nativeHandle_, numBackupsToKeep); + } + + /** + *

Deletes a specific backup.

+ * + * @param backupId of backup to delete. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void deleteBackup(final int backupId) + throws RocksDBException { + assert(isOwningHandle()); + deleteBackup0(nativeHandle_, backupId); + } + + /** + *

Returns a list of {@link BackupInfo} instances, which describe + * already made backups.

+ * + * @return List of {@link BackupInfo} instances. + */ + public List getBackupInfos() { + assert(isOwningHandle()); + return getBackupInfo(nativeHandle_); + } + + /** + *

Returns a list of corrupted backup ids. If there + * is no corrupted backup the method will return an + * empty list.

+ * + * @return array of backup ids as int ids. + */ + public int[] getCorruptedBackups() { + assert(isOwningHandle()); + return getCorruptedBackups(nativeHandle_); + } + + /** + *

Will delete all the files we don't need anymore. It will + * do the full scan of the files/ directory and delete all the + * files that are not referenced.

+ * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void garbageCollect() throws RocksDBException { + assert(isOwningHandle()); + garbageCollect(nativeHandle_); + } + + private native static long newRestoreBackupableDB(final long options); + private native void restoreDBFromBackup0(long nativeHandle, long backupId, + String dbDir, String walDir, long restoreOptions) + throws RocksDBException; + private native void restoreDBFromLatestBackup0(long nativeHandle, + String dbDir, String walDir, long restoreOptions) + throws RocksDBException; + private native void purgeOldBackups0(long nativeHandle, int numBackupsToKeep) + throws RocksDBException; + private native void deleteBackup0(long nativeHandle, int backupId) + throws RocksDBException; + private native List getBackupInfo(long handle); + private native int[] getCorruptedBackups(long handle); + private native void garbageCollect(long handle) + throws RocksDBException; + @Override protected final native void disposeInternal( + final long nativeHandle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java b/external/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java new file mode 100755 index 0000000000..54b0eff28c --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RestoreOptions.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * RestoreOptions to control the behavior of restore. + * + * Note that dispose() must be called before this instance become out-of-scope + * to release the allocated memory in c++. + * + */ +public class RestoreOptions extends RocksObject { + /** + * Constructor + * + * @param keepLogFiles If true, restore won't overwrite the existing log files + * in wal_dir. It will also move all log files from archive directory to + * wal_dir. Use this option in combination with + * BackupableDBOptions::backup_log_files = false for persisting in-memory + * databases. + * Default: false + */ + public RestoreOptions(final boolean keepLogFiles) { + super(newRestoreOptions(keepLogFiles)); + } + + private native static long newRestoreOptions(boolean keepLogFiles); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java b/external/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java new file mode 100755 index 0000000000..dd04ce3f5f --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RocksDB.java @@ -0,0 +1,1906 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.*; +import java.io.IOException; +import org.rocksdb.util.Environment; + +/** + * A RocksDB is a persistent ordered map from keys to values. It is safe for + * concurrent access from multiple threads without any external synchronization. + * All methods of this class could potentially throw RocksDBException, which + * indicates sth wrong at the RocksDB library side and the call failed. + */ +public class RocksDB extends RocksObject { + public static final byte[] DEFAULT_COLUMN_FAMILY = "default".getBytes(); + public static final int NOT_FOUND = -1; + + static { + RocksDB.loadLibrary(); + } + + /** + * Loads the necessary library files. + * Calling this method twice will have no effect. + * By default the method extracts the shared library for loading at + * java.io.tmpdir, however, you can override this temporary location by + * setting the environment variable ROCKSDB_SHAREDLIB_DIR. + */ + public static synchronized void loadLibrary() { + String tmpDir = System.getenv("ROCKSDB_SHAREDLIB_DIR"); + // loading possibly necessary libraries. + for (CompressionType compressionType : CompressionType.values()) { + try { + if (compressionType.getLibraryName() != null) { + System.loadLibrary(compressionType.getLibraryName()); + } + } catch (UnsatisfiedLinkError e) { + // since it may be optional, we ignore its loading failure here. + } + } + try + { + NativeLibraryLoader.getInstance().loadLibrary(tmpDir); + } + catch (IOException e) + { + throw new RuntimeException("Unable to load the RocksDB shared library" + + e); + } + } + + /** + * Tries to load the necessary library files from the given list of + * directories. + * + * @param paths a list of strings where each describes a directory + * of a library. + */ + public static synchronized void loadLibrary(final List paths) { + for (CompressionType compressionType : CompressionType.values()) { + if (compressionType.equals(CompressionType.NO_COMPRESSION)) { + continue; + } + for (String path : paths) { + try { + System.load(path + "/" + Environment.getSharedLibraryFileName( + compressionType.getLibraryName())); + break; + } catch (UnsatisfiedLinkError e) { + // since they are optional, we ignore loading fails. + } + } + } + boolean success = false; + UnsatisfiedLinkError err = null; + for (String path : paths) { + try { + System.load(path + "/" + + Environment.getJniLibraryFileName("rocksdbjni")); + success = true; + break; + } catch (UnsatisfiedLinkError e) { + err = e; + } + } + if (!success) { + throw err; + } + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance given + * the path to the database using the default options w/ createIfMissing + * set to true. + * + * @param path the path to the rocksdb. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @see Options#setCreateIfMissing(boolean) + */ + public static RocksDB open(final String path) throws RocksDBException { + // This allows to use the rocksjni default Options instead of + // the c++ one. + Options options = new Options(); + options.setCreateIfMissing(true); + return open(options, path); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance given + * the path to the database using the specified options and db path and a list + * of column family names. + *

+ * If opened in read write mode every existing column family name must be + * passed within the list to this method.

+ *

+ * If opened in read-only mode only a subset of existing column families must + * be passed to this method.

+ *

+ * Options instance *should* not be disposed before all DBs using this options + * instance have been closed. If user doesn't call options dispose explicitly, + * then this options instance will be GC'd automatically

+ *

+ * ColumnFamily handles are disposed when the RocksDB instance is disposed. + *

+ * + * @param path the path to the rocksdb. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @see DBOptions#setCreateIfMissing(boolean) + */ + public static RocksDB open(final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + // This allows to use the rocksjni default Options instead of + // the c++ one. + DBOptions options = new DBOptions(); + return open(options, path, columnFamilyDescriptors, columnFamilyHandles); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance given + * the path to the database using the specified options and db path. + * + *

+ * Options instance *should* not be disposed before all DBs using this options + * instance have been closed. If user doesn't call options dispose explicitly, + * then this options instance will be GC'd automatically.

+ *

+ * Options instance can be re-used to open multiple DBs if DB statistics is + * not used. If DB statistics are required, then its recommended to open DB + * with new Options instance as underlying native statistics instance does not + * use any locks to prevent concurrent updates.

+ * + * @param options {@link org.rocksdb.Options} instance. + * @param path the path to the rocksdb. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @see Options#setCreateIfMissing(boolean) + */ + public static RocksDB open(final Options options, final String path) + throws RocksDBException { + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + final RocksDB db = new RocksDB(open(options.nativeHandle_, path)); + db.storeOptionsInstance(options); + return db; + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance given + * the path to the database using the specified options and db path and a list + * of column family names. + *

+ * If opened in read write mode every existing column family name must be + * passed within the list to this method.

+ *

+ * If opened in read-only mode only a subset of existing column families must + * be passed to this method.

+ *

+ * Options instance *should* not be disposed before all DBs using this options + * instance have been closed. If user doesn't call options dispose explicitly, + * then this options instance will be GC'd automatically.

+ *

+ * Options instance can be re-used to open multiple DBs if DB statistics is + * not used. If DB statistics are required, then its recommended to open DB + * with new Options instance as underlying native statistics instance does not + * use any locks to prevent concurrent updates.

+ *

+ * ColumnFamily handles are disposed when the RocksDB instance is disposed. + *

+ * + * @param options {@link org.rocksdb.DBOptions} instance. + * @param path the path to the rocksdb. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * + * @see DBOptions#setCreateIfMissing(boolean) + */ + public static RocksDB open(final DBOptions options, final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors + .get(i); + cfNames[i] = cfDescriptor.columnFamilyName(); + cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_; + } + + final long[] handles = open(options.nativeHandle_, path, cfNames, + cfOptionHandles); + final RocksDB db = new RocksDB(handles[0]); + db.storeOptionsInstance(options); + + for (int i = 1; i < handles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i])); + } + + return db; + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance in + * Read-Only mode given the path to the database using the default + * options. + * + * @param path the path to the RocksDB. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openReadOnly(final String path) + throws RocksDBException { + // This allows to use the rocksjni default Options instead of + // the c++ one. + Options options = new Options(); + return openReadOnly(options, path); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance in + * Read-Only mode given the path to the database using the default + * options. + * + * @param path the path to the RocksDB. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openReadOnly(final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + // This allows to use the rocksjni default Options instead of + // the c++ one. + final DBOptions options = new DBOptions(); + return openReadOnly(options, path, columnFamilyDescriptors, + columnFamilyHandles); + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance in + * Read-Only mode given the path to the database using the specified + * options and db path. + * + * Options instance *should* not be disposed before all DBs using this options + * instance have been closed. If user doesn't call options dispose explicitly, + * then this options instance will be GC'd automatically. + * + * @param options {@link Options} instance. + * @param path the path to the RocksDB. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openReadOnly(final Options options, final String path) + throws RocksDBException { + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + final RocksDB db = new RocksDB(openROnly(options.nativeHandle_, path)); + db.storeOptionsInstance(options); + return db; + } + + /** + * The factory constructor of RocksDB that opens a RocksDB instance in + * Read-Only mode given the path to the database using the specified + * options and db path. + * + *

This open method allows to open RocksDB using a subset of available + * column families

+ *

Options instance *should* not be disposed before all DBs using this + * options instance have been closed. If user doesn't call options dispose + * explicitly,then this options instance will be GC'd automatically.

+ * + * @param options {@link DBOptions} instance. + * @param path the path to the RocksDB. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @return a {@link RocksDB} instance on success, null if the specified + * {@link RocksDB} can not be opened. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static RocksDB openReadOnly(final DBOptions options, final String path, + final List columnFamilyDescriptors, + final List columnFamilyHandles) + throws RocksDBException { + // when non-default Options is used, keeping an Options reference + // in RocksDB can prevent Java to GC during the life-time of + // the currently-created RocksDB. + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = columnFamilyDescriptors + .get(i); + cfNames[i] = cfDescriptor.columnFamilyName(); + cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_; + } + + final long[] handles = openROnly(options.nativeHandle_, path, cfNames, + cfOptionHandles); + final RocksDB db = new RocksDB(handles[0]); + db.storeOptionsInstance(options); + + for (int i = 1; i < handles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(db, handles[i])); + } + + return db; + } + /** + * Static method to determine all available column families for a + * rocksdb database identified by path + * + * @param options Options for opening the database + * @param path Absolute path to rocksdb database + * @return List<byte[]> List containing the column family names + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public static List listColumnFamilies(final Options options, + final String path) throws RocksDBException { + return Arrays.asList(RocksDB.listColumnFamilies(options.nativeHandle_, + path)); + } + + private void storeOptionsInstance(DBOptionsInterface options) { + options_ = options; + } + + /** + * Set the database entry for "key" to "value". + * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void put(final byte[] key, final byte[] value) + throws RocksDBException { + put(nativeHandle_, key, key.length, value, value.length); + } + + /** + * Set the database entry for "key" to "value" in the specified + * column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * throws IllegalArgumentException if column family is not present + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + put(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Set the database entry for "key" to "value". + * + * @param writeOpts {@link org.rocksdb.WriteOptions} instance. + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void put(final WriteOptions writeOpts, final byte[] key, + final byte[] value) throws RocksDBException { + put(nativeHandle_, writeOpts.nativeHandle_, + key, key.length, value, value.length); + } + + /** + * Set the database entry for "key" to "value" for the specified + * column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpts {@link org.rocksdb.WriteOptions} instance. + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + * + * throws IllegalArgumentException if column family is not present + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @see IllegalArgumentException + */ + public void put(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpts, final byte[] key, + final byte[] value) throws RocksDBException { + put(nativeHandle_, writeOpts.nativeHandle_, key, key.length, value, + value.length, columnFamilyHandle.nativeHandle_); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, else true. + * + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. + * + * @param key byte array of a key to search for + * @param value StringBuffer instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. + */ + public boolean keyMayExist(final byte[] key, final StringBuffer value){ + return keyMayExist(nativeHandle_, key, key.length, value); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, else true. + * + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key byte array of a key to search for + * @param value StringBuffer instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. + */ + public boolean keyMayExist(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final StringBuffer value){ + return keyMayExist(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_, value); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, else true. + * + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. + * + * @param readOptions {@link ReadOptions} instance + * @param key byte array of a key to search for + * @param value StringBuffer instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. + */ + public boolean keyMayExist(final ReadOptions readOptions, + final byte[] key, final StringBuffer value){ + return keyMayExist(nativeHandle_, readOptions.nativeHandle_, + key, key.length, value); + } + + /** + * If the key definitely does not exist in the database, then this method + * returns false, else true. + * + * This check is potentially lighter-weight than invoking DB::Get(). One way + * to make this lighter weight is to avoid doing any IOs. + * + * @param readOptions {@link ReadOptions} instance + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key byte array of a key to search for + * @param value StringBuffer instance which is a out parameter if a value is + * found in block-cache. + * @return boolean value indicating if key does not exist or might exist. + */ + public boolean keyMayExist(final ReadOptions readOptions, + final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final StringBuffer value){ + return keyMayExist(nativeHandle_, readOptions.nativeHandle_, + key, key.length, columnFamilyHandle.nativeHandle_, + value); + } + + /** + * Apply the specified updates to the database. + * + * @param writeOpts WriteOptions instance + * @param updates WriteBatch instance + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void write(final WriteOptions writeOpts, final WriteBatch updates) + throws RocksDBException { + write0(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_); + } + + /** + * Apply the specified updates to the database. + * + * @param writeOpts WriteOptions instance + * @param updates WriteBatchWithIndex instance + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void write(final WriteOptions writeOpts, + final WriteBatchWithIndex updates) throws RocksDBException { + write1(nativeHandle_, writeOpts.nativeHandle_, updates.nativeHandle_); + } + + /** + * Add merge operand for key/value pair. + * + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final byte[] key, final byte[] value) + throws RocksDBException { + merge(nativeHandle_, key, key.length, value, value.length); + } + + /** + * Add merge operand for key/value pair in a ColumnFamily. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key, final byte[] value) throws RocksDBException { + merge(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Add merge operand for key/value pair. + * + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final WriteOptions writeOpts, final byte[] key, + final byte[] value) throws RocksDBException { + merge(nativeHandle_, writeOpts.nativeHandle_, + key, key.length, value, value.length); + } + + /** + * Add merge operand for key/value pair. + * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param writeOpts {@link WriteOptions} for this write. + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void merge(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpts, final byte[] key, + final byte[] value) throws RocksDBException { + merge(nativeHandle_, writeOpts.nativeHandle_, + key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Get the value associated with the specified key within column family* + * @param key the key to retrieve the value. + * @param value the out-value to receive the retrieved value. + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final byte[] key, final byte[] value) throws RocksDBException { + return get(nativeHandle_, key, key.length, value, value.length); + } + + /** + * Get the value associated with the specified key within column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key to retrieve the value. + * @param value the out-value to receive the retrieved value. + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ColumnFamilyHandle columnFamilyHandle, final byte[] key, + final byte[] value) throws RocksDBException, IllegalArgumentException { + return get(nativeHandle_, key, key.length, value, value.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Get the value associated with the specified key. + * + * @param opt {@link org.rocksdb.ReadOptions} instance. + * @param key the key to retrieve the value. + * @param value the out-value to receive the retrieved value. + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ReadOptions opt, final byte[] key, + final byte[] value) throws RocksDBException { + return get(nativeHandle_, opt.nativeHandle_, + key, key.length, value, value.length); + } + /** + * Get the value associated with the specified key within column family. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param opt {@link org.rocksdb.ReadOptions} instance. + * @param key the key to retrieve the value. + * @param value the out-value to receive the retrieved value. + * @return The size of the actual value that matches the specified + * {@code key} in byte. If the return value is greater than the + * length of {@code value}, then it indicates that the size of the + * input buffer {@code value} is insufficient and partial result will + * be returned. RocksDB.NOT_FOUND will be returned if the value not + * found. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public int get(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions opt, final byte[] key, final byte[] value) + throws RocksDBException { + return get(nativeHandle_, opt.nativeHandle_, key, key.length, value, + value.length, columnFamilyHandle.nativeHandle_); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param key the key retrieve the value. + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final byte[] key) throws RocksDBException { + return get(nativeHandle_, key, key.length); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key retrieve the value. + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + return get(nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param key the key retrieve the value. + * @param opt Read options. + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final ReadOptions opt, final byte[] key) + throws RocksDBException { + return get(nativeHandle_, opt.nativeHandle_, key, key.length); + } + + /** + * The simplified version of get which returns a new byte array storing + * the value associated with the specified input key if any. null will be + * returned if the specified key is not found. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the key retrieve the value. + * @param opt Read options. + * @return a byte array storing the value associated with the input key if + * any. null if it does not find the specified key. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public byte[] get(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions opt, final byte[] key) throws RocksDBException { + return get(nativeHandle_, opt.nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * Returns a map of keys for which values were found in DB. + * + * @param keys List of keys for which values need to be retrieved. + * @return Map where key of map is the key passed by user and value for map + * entry is the corresponding value in DB. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public Map multiGet(final List keys) + throws RocksDBException { + assert(keys.size() != 0); + + final byte[][] values = multiGet(nativeHandle_, + keys.toArray(new byte[keys.size()][])); + + Map keyValueMap = new HashMap<>(); + for(int i = 0; i < values.length; i++) { + if(values[i] == null) { + continue; + } + + keyValueMap.put(keys.get(i), values[i]); + } + + return keyValueMap; + } + + /** + * Returns a map of keys for which values were found in DB. + *

+ * Note: Every key needs to have a related column family name in + * {@code columnFamilyHandleList}. + *

+ * + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys List of keys for which values need to be retrieved. + * @return Map where key of map is the key passed by user and value for map + * entry is the corresponding value in DB. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IllegalArgumentException thrown if the size of passed keys is not + * equal to the amount of passed column family handles. + */ + public Map multiGet( + final List columnFamilyHandleList, + final List keys) throws RocksDBException, + IllegalArgumentException { + assert(keys.size() != 0); + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.size() != columnFamilyHandleList.size()) { + throw new IllegalArgumentException( + "For each key there must be a ColumnFamilyHandle."); + } + final long[] cfHandles = new long[columnFamilyHandleList.size()]; + for (int i = 0; i < columnFamilyHandleList.size(); i++) { + cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + final byte[][] values = multiGet(nativeHandle_, + keys.toArray(new byte[keys.size()][]), cfHandles); + + Map keyValueMap = new HashMap<>(); + for(int i = 0; i < values.length; i++) { + if (values[i] == null) { + continue; + } + keyValueMap.put(keys.get(i), values[i]); + } + return keyValueMap; + } + + /** + * Returns a map of keys for which values were found in DB. + * + * @param opt Read options. + * @param keys of keys for which values need to be retrieved. + * @return Map where key of map is the key passed by user and value for map + * entry is the corresponding value in DB. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public Map multiGet(final ReadOptions opt, + final List keys) throws RocksDBException { + assert(keys.size() != 0); + + final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_, + keys.toArray(new byte[keys.size()][])); + + Map keyValueMap = new HashMap<>(); + for(int i = 0; i < values.length; i++) { + if(values[i] == null) { + continue; + } + + keyValueMap.put(keys.get(i), values[i]); + } + + return keyValueMap; + } + + /** + * Returns a map of keys for which values were found in DB. + *

+ * Note: Every key needs to have a related column family name in + * {@code columnFamilyHandleList}. + *

+ * + * @param opt Read options. + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param keys of keys for which values need to be retrieved. + * @return Map where key of map is the key passed by user and value for map + * entry is the corresponding value in DB. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + * @throws IllegalArgumentException thrown if the size of passed keys is not + * equal to the amount of passed column family handles. + */ + public Map multiGet(final ReadOptions opt, + final List columnFamilyHandleList, + final List keys) throws RocksDBException { + assert(keys.size() != 0); + // Check if key size equals cfList size. If not a exception must be + // thrown. If not a Segmentation fault happens. + if (keys.size()!=columnFamilyHandleList.size()){ + throw new IllegalArgumentException( + "For each key there must be a ColumnFamilyHandle."); + } + final long[] cfHandles = new long[columnFamilyHandleList.size()]; + for (int i = 0; i < columnFamilyHandleList.size(); i++) { + cfHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + final byte[][] values = multiGet(nativeHandle_, opt.nativeHandle_, + keys.toArray(new byte[keys.size()][]), cfHandles); + + Map keyValueMap = new HashMap<>(); + for(int i = 0; i < values.length; i++) { + if(values[i] == null) { + continue; + } + keyValueMap.put(keys.get(i), values[i]); + } + + return keyValueMap; + } + + /** + * Remove the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void remove(final byte[] key) throws RocksDBException { + remove(nativeHandle_, key, key.length); + } + + /** + * Remove the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void remove(final ColumnFamilyHandle columnFamilyHandle, + final byte[] key) throws RocksDBException { + remove(nativeHandle_, key, key.length, columnFamilyHandle.nativeHandle_); + } + + /** + * Remove the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void remove(final WriteOptions writeOpt, final byte[] key) + throws RocksDBException { + remove(nativeHandle_, writeOpt.nativeHandle_, key, key.length); + } + + /** + * Remove the database entry (if any) for "key". Returns OK on + * success, and a non-OK status on error. It is not an error if "key" + * did not exist in the database. + * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param writeOpt WriteOptions to be used with delete operation + * @param key Key to delete within database + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public void remove(final ColumnFamilyHandle columnFamilyHandle, + final WriteOptions writeOpt, final byte[] key) + throws RocksDBException { + remove(nativeHandle_, writeOpt.nativeHandle_, key, key.length, + columnFamilyHandle.nativeHandle_); + } + + /** + * DB implements can export properties about their state + * via this method on a per column family level. + * + *

If {@code property} is a valid property understood by this DB + * implementation, fills {@code value} with its current value and + * returns true. Otherwise returns false.

+ * + *

Valid property names include: + *

    + *
  • "rocksdb.num-files-at-level<N>" - return the number of files at + * level <N>, where <N> is an ASCII representation of a level + * number (e.g. "0").
  • + *
  • "rocksdb.stats" - returns a multi-line string that describes statistics + * about the internal operation of the DB.
  • + *
  • "rocksdb.sstables" - returns a multi-line string that describes all + * of the sstables that make up the db contents.
  • + *
+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param property to be fetched. See above for examples + * @return property value + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public String getProperty(final ColumnFamilyHandle columnFamilyHandle, + final String property) throws RocksDBException { + return getProperty0(nativeHandle_, columnFamilyHandle.nativeHandle_, + property, property.length()); + } + + /** + * DB implementations can export properties about their state + * via this method. If "property" is a valid property understood by this + * DB implementation, fills "*value" with its current value and returns + * true. Otherwise returns false. + * + *

Valid property names include: + *

    + *
  • "rocksdb.num-files-at-level<N>" - return the number of files at + * level <N>, where <N> is an ASCII representation of a level + * number (e.g. "0").
  • + *
  • "rocksdb.stats" - returns a multi-line string that describes statistics + * about the internal operation of the DB.
  • + *
  • "rocksdb.sstables" - returns a multi-line string that describes all + * of the sstables that make up the db contents.
  • + *
+ * + * @param property to be fetched. See above for examples + * @return property value + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public String getProperty(final String property) throws RocksDBException { + return getProperty0(nativeHandle_, property, property.length()); + } + + /** + *

Similar to GetProperty(), but only works for a subset of properties + * whose return value is a numerical value. Return the value as long.

+ * + *

Note: As the returned property is of type + * {@code uint64_t} on C++ side the returning value can be negative + * because Java supports in Java 7 only signed long values.

+ * + *

Java 7: To mitigate the problem of the non + * existent unsigned long tpye, values should be encapsulated using + * {@link java.math.BigInteger} to reflect the correct value. The correct + * behavior is guaranteed if {@code 2^64} is added to negative values.

+ * + *

Java 8: In Java 8 the value should be treated as + * unsigned long using provided methods of type {@link Long}.

+ * + * @param property to be fetched. + * + * @return numerical property value. + * + * @throws RocksDBException if an error happens in the underlying native code. + */ + public long getLongProperty(final String property) throws RocksDBException { + return getLongProperty(nativeHandle_, property, property.length()); + } + + /** + *

Similar to GetProperty(), but only works for a subset of properties + * whose return value is a numerical value. Return the value as long.

+ * + *

Note: As the returned property is of type + * {@code uint64_t} on C++ side the returning value can be negative + * because Java supports in Java 7 only signed long values.

+ * + *

Java 7: To mitigate the problem of the non + * existent unsigned long tpye, values should be encapsulated using + * {@link java.math.BigInteger} to reflect the correct value. The correct + * behavior is guaranteed if {@code 2^64} is added to negative values.

+ * + *

Java 8: In Java 8 the value should be treated as + * unsigned long using provided methods of type {@link Long}.

+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param property to be fetched. + * + * @return numerical property value + * + * @throws RocksDBException if an error happens in the underlying native code. + */ + public long getLongProperty(final ColumnFamilyHandle columnFamilyHandle, + final String property) throws RocksDBException { + return getLongProperty(nativeHandle_, columnFamilyHandle.nativeHandle_, + property, property.length()); + } + + /** + *

Return a heap-allocated iterator over the contents of the + * database. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

+ * + *

Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

+ * + * @return instance of iterator object. + */ + public RocksIterator newIterator() { + return new RocksIterator(this, iterator(nativeHandle_)); + } + + /** + *

Return a heap-allocated iterator over the contents of the + * database. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

+ * + *

Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

+ * + * @param readOptions {@link ReadOptions} instance. + * @return instance of iterator object. + */ + public RocksIterator newIterator(final ReadOptions readOptions) { + return new RocksIterator(this, iterator(nativeHandle_, + readOptions.nativeHandle_)); + } + + /** + *

Return a handle to the current DB state. Iterators created with + * this handle will all observe a stable snapshot of the current DB + * state. The caller must call ReleaseSnapshot(result) when the + * snapshot is no longer needed.

+ * + *

nullptr will be returned if the DB fails to take a snapshot or does + * not support snapshot.

+ * + * @return Snapshot {@link Snapshot} instance + */ + public Snapshot getSnapshot() { + long snapshotHandle = getSnapshot(nativeHandle_); + if (snapshotHandle != 0) { + return new Snapshot(snapshotHandle); + } + return null; + } + + /** + * Release a previously acquired snapshot. The caller must not + * use "snapshot" after this call. + * + * @param snapshot {@link Snapshot} instance + */ + public void releaseSnapshot(final Snapshot snapshot) { + if (snapshot != null) { + releaseSnapshot(nativeHandle_, snapshot.nativeHandle_); + } + } + + /** + *

Return a heap-allocated iterator over the contents of the + * database. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

+ * + *

Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @return instance of iterator object. + */ + public RocksIterator newIterator( + final ColumnFamilyHandle columnFamilyHandle) { + return new RocksIterator(this, iteratorCF(nativeHandle_, + columnFamilyHandle.nativeHandle_)); + } + + /** + *

Return a heap-allocated iterator over the contents of the + * database. The result of newIterator() is initially invalid + * (caller must call one of the Seek methods on the iterator + * before using it).

+ * + *

Caller should close the iterator when it is no longer needed. + * The returned iterator should be closed before this db is closed. + *

+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param readOptions {@link ReadOptions} instance. + * @return instance of iterator object. + */ + public RocksIterator newIterator(final ColumnFamilyHandle columnFamilyHandle, + final ReadOptions readOptions) { + return new RocksIterator(this, iteratorCF(nativeHandle_, + columnFamilyHandle.nativeHandle_, readOptions.nativeHandle_)); + } + + /** + * Returns iterators from a consistent database state across multiple + * column families. Iterators are heap allocated and need to be deleted + * before the db is deleted + * + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator} + * instances + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public List newIterators( + final List columnFamilyHandleList) + throws RocksDBException { + return newIterators(columnFamilyHandleList, new ReadOptions()); + } + + /** + * Returns iterators from a consistent database state across multiple + * column families. Iterators are heap allocated and need to be deleted + * before the db is deleted + * + * @param columnFamilyHandleList {@link java.util.List} containing + * {@link org.rocksdb.ColumnFamilyHandle} instances. + * @param readOptions {@link ReadOptions} instance. + * @return {@link java.util.List} containing {@link org.rocksdb.RocksIterator} + * instances + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public List newIterators( + final List columnFamilyHandleList, + final ReadOptions readOptions) throws RocksDBException { + + final long[] columnFamilyHandles = new long[columnFamilyHandleList.size()]; + for (int i = 0; i < columnFamilyHandleList.size(); i++) { + columnFamilyHandles[i] = columnFamilyHandleList.get(i).nativeHandle_; + } + + final long[] iteratorRefs = iterators(nativeHandle_, columnFamilyHandles, + readOptions.nativeHandle_); + + final List iterators = new ArrayList<>( + columnFamilyHandleList.size()); + for (int i=0; iFlush all memory table data.

+ * + *

Note: it must be ensured that the FlushOptions instance + * is not GC'ed before this method finishes. If the wait parameter is + * set to false, flush processing is asynchronous.

+ * + * @param flushOptions {@link org.rocksdb.FlushOptions} instance. + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void flush(final FlushOptions flushOptions) + throws RocksDBException { + flush(nativeHandle_, flushOptions.nativeHandle_); + } + + /** + *

Flush all memory table data.

+ * + *

Note: it must be ensured that the FlushOptions instance + * is not GC'ed before this method finishes. If the wait parameter is + * set to false, flush processing is asynchronous.

+ * + * @param flushOptions {@link org.rocksdb.FlushOptions} instance. + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} instance. + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void flush(final FlushOptions flushOptions, + final ColumnFamilyHandle columnFamilyHandle) throws RocksDBException { + flush(nativeHandle_, flushOptions.nativeHandle_, + columnFamilyHandle.nativeHandle_); + } + + /** + *

Range compaction of database.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

See also

+ *
    + *
  • {@link #compactRange(boolean, int, int)}
  • + *
  • {@link #compactRange(byte[], byte[])}
  • + *
  • {@link #compactRange(byte[], byte[], boolean, int, int)}
  • + *
+ * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange() throws RocksDBException { + compactRange0(nativeHandle_, false, -1, 0); + } + + /** + *

Range compaction of database.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

See also

+ *
    + *
  • {@link #compactRange()}
  • + *
  • {@link #compactRange(boolean, int, int)}
  • + *
  • {@link #compactRange(byte[], byte[], boolean, int, int)}
  • + *
+ * + * @param begin start of key range (included in range) + * @param end end of key range (excluded from range) + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange(final byte[] begin, final byte[] end) + throws RocksDBException { + compactRange0(nativeHandle_, begin, begin.length, end, + end.length, false, -1, 0); + } + + /** + *

Range compaction of database.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

Compaction outputs should be placed in options.db_paths + * [target_path_id]. Behavior is undefined if target_path_id is + * out of range.

+ * + *

See also

+ *
    + *
  • {@link #compactRange()}
  • + *
  • {@link #compactRange(byte[], byte[])}
  • + *
  • {@link #compactRange(byte[], byte[], boolean, int, int)}
  • + *
+ * + * @param reduce_level reduce level after compaction + * @param target_level target level to compact to + * @param target_path_id the target path id of output path + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange(final boolean reduce_level, + final int target_level, final int target_path_id) + throws RocksDBException { + compactRange0(nativeHandle_, reduce_level, + target_level, target_path_id); + } + + + /** + *

Range compaction of database.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

Compaction outputs should be placed in options.db_paths + * [target_path_id]. Behavior is undefined if target_path_id is + * out of range.

+ * + *

See also

+ *
    + *
  • {@link #compactRange()}
  • + *
  • {@link #compactRange(boolean, int, int)}
  • + *
  • {@link #compactRange(byte[], byte[])}
  • + *
+ * + * @param begin start of key range (included in range) + * @param end end of key range (excluded from range) + * @param reduce_level reduce level after compaction + * @param target_level target level to compact to + * @param target_path_id the target path id of output path + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange(final byte[] begin, final byte[] end, + final boolean reduce_level, final int target_level, + final int target_path_id) throws RocksDBException { + compactRange0(nativeHandle_, begin, begin.length, end, end.length, + reduce_level, target_level, target_path_id); + } + + /** + *

Range compaction of column family.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

See also

+ *
    + *
  • + * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)} + *
  • + *
  • + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} + *
  • + *
  • + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[], + * boolean, int, int)} + *
  • + *
+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange(final ColumnFamilyHandle columnFamilyHandle) + throws RocksDBException { + compactRange(nativeHandle_, false, -1, 0, + columnFamilyHandle.nativeHandle_); + } + + /** + *

Range compaction of column family.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

See also

+ *
    + *
  • {@link #compactRange(ColumnFamilyHandle)}
  • + *
  • + * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)} + *
  • + *
  • + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[], + * boolean, int, int)} + *
  • + *
+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance. + * @param begin start of key range (included in range) + * @param end end of key range (excluded from range) + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange(final ColumnFamilyHandle columnFamilyHandle, + final byte[] begin, final byte[] end) throws RocksDBException { + compactRange(nativeHandle_, begin, begin.length, end, end.length, + false, -1, 0, columnFamilyHandle.nativeHandle_); + } + + /** + *

Range compaction of column family.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

Compaction outputs should be placed in options.db_paths + * [target_path_id]. Behavior is undefined if target_path_id is + * out of range.

+ * + *

See also

+ *
    + *
  • {@link #compactRange(ColumnFamilyHandle)}
  • + *
  • + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} + *
  • + *
  • + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[], + * boolean, int, int)} + *
  • + *
+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance. + * @param reduce_level reduce level after compaction + * @param target_level target level to compact to + * @param target_path_id the target path id of output path + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange(final ColumnFamilyHandle columnFamilyHandle, + final boolean reduce_level, final int target_level, + final int target_path_id) throws RocksDBException { + compactRange(nativeHandle_, reduce_level, target_level, + target_path_id, columnFamilyHandle.nativeHandle_); + } + + /** + *

Range compaction of column family.

+ *

Note: After the database has been compacted, + * all data will have been pushed down to the last level containing + * any data.

+ * + *

Compaction outputs should be placed in options.db_paths + * [target_path_id]. Behavior is undefined if target_path_id is + * out of range.

+ * + *

See also

+ *
    + *
  • {@link #compactRange(ColumnFamilyHandle)}
  • + *
  • + * {@link #compactRange(ColumnFamilyHandle, boolean, int, int)} + *
  • + *
  • + * {@link #compactRange(ColumnFamilyHandle, byte[], byte[])} + *
  • + *
+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance. + * @param begin start of key range (included in range) + * @param end end of key range (excluded from range) + * @param reduce_level reduce level after compaction + * @param target_level target level to compact to + * @param target_path_id the target path id of output path + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public void compactRange(final ColumnFamilyHandle columnFamilyHandle, + final byte[] begin, final byte[] end, final boolean reduce_level, + final int target_level, final int target_path_id) + throws RocksDBException { + compactRange(nativeHandle_, begin, begin.length, end, end.length, + reduce_level, target_level, target_path_id, + columnFamilyHandle.nativeHandle_); + } + + /** + * This function will wait until all currently running background processes + * finish. After it returns, no background process will be run until + * {@link #continueBackgroundWork()} is called + */ + public void pauseBackgroundWork() throws RocksDBException { + pauseBackgroundWork(nativeHandle_); + } + + /** + * Resumes backround work which was suspended by + * previously calling {@link #pauseBackgroundWork()} + */ + public void continueBackgroundWork() throws RocksDBException { + continueBackgroundWork(nativeHandle_); + } + + /** + *

The sequence number of the most recent transaction.

+ * + * @return sequence number of the most + * recent transaction. + */ + public long getLatestSequenceNumber() { + return getLatestSequenceNumber(nativeHandle_); + } + + /** + *

Prevent file deletions. Compactions will continue to occur, + * but no obsolete files will be deleted. Calling this multiple + * times have the same effect as calling it once.

+ * + * @throws RocksDBException thrown if operation was not performed + * successfully. + */ + public void disableFileDeletions() throws RocksDBException { + disableFileDeletions(nativeHandle_); + } + + /** + *

Allow compactions to delete obsolete files. + * If force == true, the call to EnableFileDeletions() + * will guarantee that file deletions are enabled after + * the call, even if DisableFileDeletions() was called + * multiple times before.

+ * + *

If force == false, EnableFileDeletions will only + * enable file deletion after it's been called at least + * as many times as DisableFileDeletions(), enabling + * the two methods to be called by two threads + * concurrently without synchronization + * -- i.e., file deletions will be enabled only after both + * threads call EnableFileDeletions()

+ * + * @param force boolean value described above. + * + * @throws RocksDBException thrown if operation was not performed + * successfully. + */ + public void enableFileDeletions(final boolean force) + throws RocksDBException { + enableFileDeletions(nativeHandle_, force); + } + + /** + *

Returns an iterator that is positioned at a write-batch containing + * seq_number. If the sequence number is non existent, it returns an iterator + * at the first available seq_no after the requested seq_no.

+ * + *

Must set WAL_ttl_seconds or WAL_size_limit_MB to large values to + * use this api, else the WAL files will get + * cleared aggressively and the iterator might keep getting invalid before + * an update is read.

+ * + * @param sequenceNumber sequence number offset + * + * @return {@link org.rocksdb.TransactionLogIterator} instance. + * + * @throws org.rocksdb.RocksDBException if iterator cannot be retrieved + * from native-side. + */ + public TransactionLogIterator getUpdatesSince(final long sequenceNumber) + throws RocksDBException { + return new TransactionLogIterator( + getUpdatesSince(nativeHandle_, sequenceNumber)); + } + + /** + * Private constructor. + * + * @param nativeHandle The native handle of the C++ RocksDB object + */ + protected RocksDB(final long nativeHandle) { + super(nativeHandle); + } + + // native methods + protected native static long open(final long optionsHandle, + final String path) throws RocksDBException; + + /** + * @param optionsHandle Native handle pointing to an Options object + * @param path The directory path for the database files + * @param columnFamilyNames An array of column family names + * @param columnFamilyOptions An array of native handles pointing to + * ColumnFamilyOptions objects + * + * @return An array of native handles, [0] is the handle of the RocksDB object + * [1..1+n] are handles of the ColumnFamilyReferences + * + * @throws RocksDBException thrown if the database could not be opened + */ + protected native static long[] open(final long optionsHandle, + final String path, final byte[][] columnFamilyNames, + final long[] columnFamilyOptions) throws RocksDBException; + + protected native static long openROnly(final long optionsHandle, + final String path) throws RocksDBException; + + /** + * @param optionsHandle Native handle pointing to an Options object + * @param path The directory path for the database files + * @param columnFamilyNames An array of column family names + * @param columnFamilyOptions An array of native handles pointing to + * ColumnFamilyOptions objects + * + * @return An array of native handles, [0] is the handle of the RocksDB object + * [1..1+n] are handles of the ColumnFamilyReferences + * + * @throws RocksDBException thrown if the database could not be opened + */ + protected native static long[] openROnly(final long optionsHandle, + final String path, final byte[][] columnFamilyNames, + final long[] columnFamilyOptions + ) throws RocksDBException; + + protected native static byte[][] listColumnFamilies( + long optionsHandle, String path) throws RocksDBException; + protected native void put( + long handle, byte[] key, int keyLen, + byte[] value, int valueLen) throws RocksDBException; + protected native void put( + long handle, byte[] key, int keyLen, + byte[] value, int valueLen, long cfHandle) throws RocksDBException; + protected native void put( + long handle, long writeOptHandle, + byte[] key, int keyLen, + byte[] value, int valueLen) throws RocksDBException; + protected native void put( + long handle, long writeOptHandle, + byte[] key, int keyLen, + byte[] value, int valueLen, long cfHandle) throws RocksDBException; + protected native void write0(final long handle, long writeOptHandle, + long wbHandle) throws RocksDBException; + protected native void write1(final long handle, long writeOptHandle, + long wbwiHandle) throws RocksDBException; + protected native boolean keyMayExist(final long handle, final byte[] key, + final int keyLen, final StringBuffer stringBuffer); + protected native boolean keyMayExist(final long handle, final byte[] key, + final int keyLen, final long cfHandle, final StringBuffer stringBuffer); + protected native boolean keyMayExist(final long handle, + final long optionsHandle, final byte[] key, final int keyLen, + final StringBuffer stringBuffer); + protected native boolean keyMayExist(final long handle, + final long optionsHandle, final byte[] key, final int keyLen, + final long cfHandle, final StringBuffer stringBuffer); + protected native void merge( + long handle, byte[] key, int keyLen, + byte[] value, int valueLen) throws RocksDBException; + protected native void merge( + long handle, byte[] key, int keyLen, + byte[] value, int valueLen, long cfHandle) throws RocksDBException; + protected native void merge( + long handle, long writeOptHandle, + byte[] key, int keyLen, + byte[] value, int valueLen) throws RocksDBException; + protected native void merge( + long handle, long writeOptHandle, + byte[] key, int keyLen, + byte[] value, int valueLen, long cfHandle) throws RocksDBException; + protected native int get( + long handle, byte[] key, int keyLen, + byte[] value, int valueLen) throws RocksDBException; + protected native int get( + long handle, byte[] key, int keyLen, + byte[] value, int valueLen, long cfHandle) throws RocksDBException; + protected native int get( + long handle, long readOptHandle, byte[] key, int keyLen, + byte[] value, int valueLen) throws RocksDBException; + protected native int get( + long handle, long readOptHandle, byte[] key, int keyLen, + byte[] value, int valueLen, long cfHandle) throws RocksDBException; + protected native byte[][] multiGet(final long dbHandle, final byte[][] keys); + protected native byte[][] multiGet(final long dbHandle, final byte[][] keys, + final long[] columnFamilyHandles); + protected native byte[][] multiGet(final long dbHandle, final long rOptHandle, + final byte[][] keys); + protected native byte[][] multiGet(final long dbHandle, final long rOptHandle, + final byte[][] keys, final long[] columnFamilyHandles); + protected native byte[] get( + long handle, byte[] key, int keyLen) throws RocksDBException; + protected native byte[] get( + long handle, byte[] key, int keyLen, long cfHandle) + throws RocksDBException; + protected native byte[] get( + long handle, long readOptHandle, + byte[] key, int keyLen) throws RocksDBException; + protected native byte[] get( + long handle, long readOptHandle, + byte[] key, int keyLen, long cfHandle) throws RocksDBException; + protected native void remove( + long handle, byte[] key, int keyLen) throws RocksDBException; + protected native void remove( + long handle, byte[] key, int keyLen, long cfHandle) + throws RocksDBException; + protected native void remove( + long handle, long writeOptHandle, + byte[] key, int keyLen) throws RocksDBException; + protected native void remove( + long handle, long writeOptHandle, + byte[] key, int keyLen, long cfHandle) throws RocksDBException; + protected native String getProperty0(long nativeHandle, + String property, int propertyLength) throws RocksDBException; + protected native String getProperty0(long nativeHandle, long cfHandle, + String property, int propertyLength) throws RocksDBException; + protected native long getLongProperty(long nativeHandle, + String property, int propertyLength) throws RocksDBException; + protected native long getLongProperty(long nativeHandle, long cfHandle, + String property, int propertyLength) throws RocksDBException; + protected native long iterator(long handle); + protected native long iterator(long handle, long readOptHandle); + protected native long iteratorCF(long handle, long cfHandle); + protected native long iteratorCF(long handle, long cfHandle, + long readOptHandle); + protected native long[] iterators(final long handle, + final long[] columnFamilyHandles, final long readOptHandle) + throws RocksDBException; + protected native long getSnapshot(long nativeHandle); + protected native void releaseSnapshot( + long nativeHandle, long snapshotHandle); + @Override protected final native void disposeInternal(final long handle); + private native long getDefaultColumnFamily(long handle); + private native long createColumnFamily(final long handle, + final byte[] columnFamilyName, final long columnFamilyOptions) + throws RocksDBException; + private native void dropColumnFamily(long handle, long cfHandle) + throws RocksDBException; + private native void flush(long handle, long flushOptHandle) + throws RocksDBException; + private native void flush(long handle, long flushOptHandle, + long cfHandle) throws RocksDBException; + private native void compactRange0(long handle, boolean reduce_level, + int target_level, int target_path_id) throws RocksDBException; + private native void compactRange0(long handle, byte[] begin, int beginLen, + byte[] end, int endLen, boolean reduce_level, int target_level, + int target_path_id) throws RocksDBException; + private native void compactRange(long handle, boolean reduce_level, + int target_level, int target_path_id, long cfHandle) + throws RocksDBException; + private native void compactRange(long handle, byte[] begin, int beginLen, + byte[] end, int endLen, boolean reduce_level, int target_level, + int target_path_id, long cfHandle) throws RocksDBException; + private native void pauseBackgroundWork(long handle) throws RocksDBException; + private native void continueBackgroundWork(long handle) throws RocksDBException; + private native long getLatestSequenceNumber(long handle); + private native void disableFileDeletions(long handle) throws RocksDBException; + private native void enableFileDeletions(long handle, + boolean force) throws RocksDBException; + private native long getUpdatesSince(long handle, long sequenceNumber) + throws RocksDBException; + + protected DBOptionsInterface options_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java b/external/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java new file mode 100755 index 0000000000..ee869f20ff --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RocksDBException.java @@ -0,0 +1,21 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * A RocksDBException encapsulates the error of an operation. This exception + * type is used to describe an internal error from the c++ rocksdb library. + */ +public class RocksDBException extends Exception { + /** + * The private construct used by a set of public static factory method. + * + * @param msg the specified error message. + */ + public RocksDBException(final String msg) { + super(msg); + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java b/external/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java new file mode 100755 index 0000000000..72dc22c42c --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RocksEnv.java @@ -0,0 +1,43 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + *

A RocksEnv is an interface used by the rocksdb implementation to access + * operating system functionality like the filesystem etc.

+ * + *

All Env implementations are safe for concurrent access from + * multiple threads without any external synchronization.

+ */ +public class RocksEnv extends Env { + + /** + *

Package-private constructor that uses the specified native handle + * to construct a RocksEnv.

+ * + *

Note that the ownership of the input handle + * belongs to the caller, and the newly created RocksEnv will not take + * the ownership of the input handle. As a result, calling + * {@code dispose()} of the created RocksEnv will be no-op.

+ */ + RocksEnv(final long handle) { + super(handle); + disOwnNativeHandle(); + } + + /** + *

The helper function of {@link #dispose()} which all subclasses of + * {@link RocksObject} must implement to release their associated C++ + * resource.

+ * + *

Note: this class is used to use the default + * RocksEnv with RocksJava. The default env allocation is managed + * by C++.

+ */ + @Override + protected final void disposeInternal(final long handle) { + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java b/external/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java new file mode 100755 index 0000000000..42e2460cf1 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RocksIterator.java @@ -0,0 +1,64 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + *

An iterator that yields a sequence of key/value pairs from a source. + * Multiple implementations are provided by this library. + * In particular, iterators are provided + * to access the contents of a Table or a DB.

+ * + *

Multiple threads can invoke const methods on an RocksIterator without + * external synchronization, but if any of the threads may call a + * non-const method, all threads accessing the same RocksIterator must use + * external synchronization.

+ * + * @see org.rocksdb.RocksObject + */ +public class RocksIterator extends AbstractRocksIterator { + protected RocksIterator(RocksDB rocksDB, long nativeHandle) { + super(rocksDB, nativeHandle); + } + + /** + *

Return the key for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator.

+ * + *

REQUIRES: {@link #isValid()}

+ * + * @return key for the current entry. + */ + public byte[] key() { + assert(isOwningHandle()); + return key0(nativeHandle_); + } + + /** + *

Return the value for the current entry. The underlying storage for + * the returned slice is valid only until the next modification of + * the iterator.

+ * + *

REQUIRES: !AtEnd() && !AtStart()

+ * @return value for the current entry. + */ + public byte[] value() { + assert(isOwningHandle()); + return value0(nativeHandle_); + } + + @Override protected final native void disposeInternal(final long handle); + @Override final native boolean isValid0(long handle); + @Override final native void seekToFirst0(long handle); + @Override final native void seekToLast0(long handle); + @Override final native void next0(long handle); + @Override final native void prev0(long handle); + @Override final native void seek0(long handle, byte[] target, int targetLen); + @Override final native void status0(long handle) throws RocksDBException; + + private native byte[] key0(long handle); + private native byte[] value0(long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java b/external/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java new file mode 100755 index 0000000000..3ac74a90a2 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RocksIteratorInterface.java @@ -0,0 +1,80 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + *

Defines the interface for an Iterator which provides + * access to data one entry at a time. Multiple implementations + * are provided by this library. In particular, iterators are provided + * to access the contents of a DB and Write Batch.

+ * + *

Multiple threads can invoke const methods on an RocksIterator without + * external synchronization, but if any of the threads may call a + * non-const method, all threads accessing the same RocksIterator must use + * external synchronization.

+ * + * @see org.rocksdb.RocksObject + */ +public interface RocksIteratorInterface { + + /** + *

An iterator is either positioned at an entry, or + * not valid. This method returns true if the iterator is valid.

+ * + * @return true if iterator is valid. + */ + boolean isValid(); + + /** + *

Position at the first entry in the source. The iterator is Valid() + * after this call if the source is not empty.

+ */ + void seekToFirst(); + + /** + *

Position at the last entry in the source. The iterator is + * valid after this call if the source is not empty.

+ */ + void seekToLast(); + + /** + *

Position at the first entry in the source whose key is that or + * past target.

+ * + *

The iterator is valid after this call if the source contains + * a key that comes at or past target.

+ * + * @param target byte array describing a key or a + * key prefix to seek for. + */ + void seek(byte[] target); + + /** + *

Moves to the next entry in the source. After this call, Valid() is + * true if the iterator was not positioned at the last entry in the source.

+ * + *

REQUIRES: {@link #isValid()}

+ */ + void next(); + + /** + *

Moves to the previous entry in the source. After this call, Valid() is + * true if the iterator was not positioned at the first entry in source.

+ * + *

REQUIRES: {@link #isValid()}

+ */ + void prev(); + + /** + *

If an error has occurred, return it. Else return an ok status. + * If non-blocking IO is requested and this operation cannot be + * satisfied without doing some IO, then this returns Status::Incomplete().

+ * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + void status() throws RocksDBException; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java b/external/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java new file mode 100755 index 0000000000..d7854eae17 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RocksMemEnv.java @@ -0,0 +1,27 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * RocksDB memory environment. + */ +public class RocksMemEnv extends Env { + + /** + *

Creates a new RocksDB environment that stores its data + * in memory and delegates all non-file-storage tasks to + * base_env. The caller must delete the result when it is + * no longer needed.

+ * + *

{@code *base_env} must remain live while the result is in use.

+ */ + public RocksMemEnv() { + super(createMemEnv()); + } + + private static native long createMemEnv(); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java b/external/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java new file mode 100755 index 0000000000..9b9e576dce --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RocksMutableObject.java @@ -0,0 +1,69 @@ +// Copyright (c) 2016, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * RocksMutableObject is an implementation of {@link AbstractNativeReference} + * whose reference to the underlying native C++ object can change. + * + *

The use of {@code RocksMutableObject} should be kept to a minimum, as it + * has synchronization overheads and introduces complexity. Instead it is + * recommended to use {@link RocksObject} where possible.

+ */ +public abstract class RocksMutableObject extends AbstractNativeReference { + + /** + * An mutable reference to the value of the C++ pointer pointing to some + * underlying native RocksDB C++ object. + */ + private long nativeHandle_; + private boolean owningHandle_; + + protected RocksMutableObject() { + } + + protected RocksMutableObject(final long nativeHandle) { + this.nativeHandle_ = nativeHandle; + this.owningHandle_ = true; + } + + public synchronized void setNativeHandle(final long nativeHandle, + final boolean owningNativeHandle) { + this.nativeHandle_ = nativeHandle; + this.owningHandle_ = owningNativeHandle; + } + + @Override + protected synchronized boolean isOwningHandle() { + return this.owningHandle_; + } + + /** + * Gets the value of the C++ pointer pointing to the underlying + * native C++ object + * + * @return the pointer value for the native object + */ + protected synchronized long getNativeHandle() { + assert (this.nativeHandle_ != 0); + return this.nativeHandle_; + } + + @Override + public synchronized final void close() { + if (isOwningHandle()) { + disposeInternal(); + this.owningHandle_ = false; + this.nativeHandle_ = 0; + } + } + + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + protected abstract void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java b/external/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java new file mode 100755 index 0000000000..2a35852c5e --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/RocksObject.java @@ -0,0 +1,41 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * RocksObject is an implementation of {@link AbstractNativeReference} which + * has an immutable and therefore thread-safe reference to the underlying + * native C++ RocksDB object. + *

+ * RocksObject is the base-class of almost all RocksDB classes that have a + * pointer to some underlying native C++ {@code rocksdb} object.

+ *

+ * The use of {@code RocksObject} should always be preferred over + * {@link RocksMutableObject}.

+ */ +public abstract class RocksObject extends AbstractImmutableNativeReference { + + /** + * An immutable reference to the value of the C++ pointer pointing to some + * underlying native RocksDB C++ object. + */ + protected final long nativeHandle_; + + protected RocksObject(final long nativeHandle) { + super(true); + this.nativeHandle_ = nativeHandle; + } + + /** + * Deletes underlying C++ object pointer. + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + protected abstract void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java b/external/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java new file mode 100755 index 0000000000..e31e199161 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/SkipListMemTableConfig.java @@ -0,0 +1,50 @@ +package org.rocksdb; + +/** + * The config for skip-list memtable representation. + */ +public class SkipListMemTableConfig extends MemTableConfig { + + public static final long DEFAULT_LOOKAHEAD = 0; + + /** + * SkipListMemTableConfig constructor + */ + public SkipListMemTableConfig() { + lookahead_ = DEFAULT_LOOKAHEAD; + } + + /** + * Sets lookahead for SkipList + * + * @param lookahead If non-zero, each iterator's seek operation + * will start the search from the previously visited record + * (doing at most 'lookahead' steps). This is an + * optimization for the access pattern including many + * seeks with consecutive keys. + * @return the current instance of SkipListMemTableConfig + */ + public SkipListMemTableConfig setLookahead(final long lookahead) { + lookahead_ = lookahead; + return this; + } + + /** + * Returns the currently set lookahead value. + * + * @return lookahead value + */ + public long lookahead() { + return lookahead_; + } + + + @Override protected long newMemTableFactoryHandle() { + return newMemTableFactoryHandle0(lookahead_); + } + + private native long newMemTableFactoryHandle0(long lookahead) + throws IllegalArgumentException; + + private long lookahead_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/Slice.java b/external/rocksdb/java/src/main/java/org/rocksdb/Slice.java new file mode 100755 index 0000000000..cbb9742a37 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/Slice.java @@ -0,0 +1,86 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + *

Base class for slices which will receive + * byte[] based access to the underlying data.

+ * + *

byte[] backed slices typically perform better with + * small keys and values. When using larger keys and + * values consider using {@link org.rocksdb.DirectSlice}

+ */ +public class Slice extends AbstractSlice { + /** + *

Called from JNI to construct a new Java Slice + * without an underlying C++ object set + * at creation time.

+ * + *

Note: You should be aware that + * {@see org.rocksdb.RocksObject#disOwnNativeHandle()} is intentionally + * called from the default Slice constructor, and that it is marked as + * private. This is so that developers cannot construct their own default + * Slice objects (at present). As developers cannot construct their own + * Slice objects through this, they are not creating underlying C++ Slice + * objects, and so there is nothing to free (dispose) from Java.

+ */ + private Slice() { + super(); + } + + /** + *

Constructs a slice where the data is taken from + * a String.

+ * + * @param str String value. + */ + public Slice(final String str) { + super(createNewSliceFromString(str)); + } + + /** + *

Constructs a slice where the data is a copy of + * the byte array from a specific offset.

+ * + * @param data byte array. + * @param offset offset within the byte array. + */ + public Slice(final byte[] data, final int offset) { + super(createNewSlice0(data, offset)); + } + + /** + *

Constructs a slice where the data is a copy of + * the byte array.

+ * + * @param data byte array. + */ + public Slice(final byte[] data) { + super(createNewSlice1(data)); + } + + /** + *

Deletes underlying C++ slice pointer + * and any buffered data.

+ * + *

+ * Note that this function should be called only after all + * RocksDB instances referencing the slice are closed. + * Otherwise an undefined behavior will occur.

+ */ + @Override + protected void disposeInternal() { + final long nativeHandle = getNativeHandle(); + disposeInternalBuf(nativeHandle); + super.disposeInternal(nativeHandle); + } + + @Override protected final native byte[] data0(long handle); + private native static long createNewSlice0(final byte[] data, + final int length); + private native static long createNewSlice1(final byte[] data); + private native void disposeInternalBuf(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java b/external/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java new file mode 100755 index 0000000000..8475ec9951 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/Snapshot.java @@ -0,0 +1,37 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Snapshot of database + */ +public class Snapshot extends RocksObject { + Snapshot(final long nativeHandle) { + super(nativeHandle); + } + + /** + * Return the associated sequence number; + * + * @return the associated sequence number of + * this snapshot. + */ + public long getSequenceNumber() { + assert(isOwningHandle()); + return getSequenceNumber(nativeHandle_); + } + + /** + * Dont release C++ Snapshot pointer. The pointer + * to the snapshot is released by the database + * instance. + */ + @Override + protected final void disposeInternal(final long handle) { + } + + private native long getSequenceNumber(long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/Statistics.java b/external/rocksdb/java/src/main/java/org/rocksdb/Statistics.java new file mode 100755 index 0000000000..858637763a --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/Statistics.java @@ -0,0 +1,37 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Statistics to analyze the performance of a db. Pointer for statistics object + * is managed by Options class. + */ +public class Statistics { + + private final long statsHandle_; + + public Statistics(final long statsHandle) { + statsHandle_ = statsHandle; + } + + public long getTickerCount(TickerType tickerType) { + assert(isInitialized()); + return getTickerCount0(tickerType.getValue(), statsHandle_); + } + + public HistogramData getHistogramData(final HistogramType histogramType) { + assert(isInitialized()); + return getHistogramData0( + histogramType.getValue(), statsHandle_); + } + + private boolean isInitialized() { + return (statsHandle_ != 0); + } + + private native long getTickerCount0(int tickerType, long handle); + private native HistogramData getHistogramData0(int histogramType, long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java b/external/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java new file mode 100755 index 0000000000..246826d326 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollector.java @@ -0,0 +1,107 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.List; +import java.util.concurrent.Executors; +import java.util.concurrent.ExecutorService; +import java.util.concurrent.TimeUnit; + +/** + *

Helper class to collect DB statistics periodically at a period specified in + * constructor. Callback function (provided in constructor) is called with + * every statistics collection.

+ * + *

Caller should call start() to start statistics collection. Shutdown() should + * be called to stop stats collection and should be called before statistics ( + * provided in constructor) reference has been disposed.

+ */ +public class StatisticsCollector { + private final List _statsCollectorInputList; + private final ExecutorService _executorService; + private final int _statsCollectionInterval; + private volatile boolean _isRunning = true; + + /** + * Constructor for statistics collector. + * + * @param statsCollectorInputList List of statistics collector input. + * @param statsCollectionIntervalInMilliSeconds Statistics collection time + * period (specified in milliseconds). + */ + public StatisticsCollector( + final List statsCollectorInputList, + final int statsCollectionIntervalInMilliSeconds) { + _statsCollectorInputList = statsCollectorInputList; + _statsCollectionInterval = statsCollectionIntervalInMilliSeconds; + + _executorService = Executors.newSingleThreadExecutor(); + } + + public void start() { + _executorService.submit(collectStatistics()); + } + + /** + * Shuts down statistics collector. + * + * @param shutdownTimeout Time in milli-seconds to wait for shutdown before + * killing the collection process. + * @throws java.lang.InterruptedException thrown if Threads are interrupted. + */ + public void shutDown(final int shutdownTimeout) throws InterruptedException { + _isRunning = false; + + _executorService.shutdownNow(); + // Wait for collectStatistics runnable to finish so that disposal of + // statistics does not cause any exceptions to be thrown. + _executorService.awaitTermination(shutdownTimeout, TimeUnit.MILLISECONDS); + } + + private Runnable collectStatistics() { + return new Runnable() { + + @Override + public void run() { + while (_isRunning) { + try { + if(Thread.currentThread().isInterrupted()) { + break; + } + for(StatsCollectorInput statsCollectorInput : + _statsCollectorInputList) { + Statistics statistics = statsCollectorInput.getStatistics(); + StatisticsCollectorCallback statsCallback = + statsCollectorInput.getCallback(); + + // Collect ticker data + for(TickerType ticker : TickerType.values()) { + long tickerValue = statistics.getTickerCount(ticker); + statsCallback.tickerCallback(ticker, tickerValue); + } + + // Collect histogram data + for(HistogramType histogramType : HistogramType.values()) { + HistogramData histogramData = + statistics.getHistogramData(histogramType); + statsCallback.histogramCallback(histogramType, histogramData); + } + + Thread.sleep(_statsCollectionInterval); + } + } + catch (InterruptedException e) { + Thread.currentThread().interrupt(); + break; + } + catch (Exception e) { + throw new RuntimeException("Error while calculating statistics", e); + } + } + } + }; + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java b/external/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java new file mode 100755 index 0000000000..18f81790e5 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/StatisticsCollectorCallback.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Callback interface provided to StatisticsCollector. + * + * Thread safety: + * StatisticsCollector doesn't make any guarantees about thread safety. + * If the same reference of StatisticsCollectorCallback is passed to multiple + * StatisticsCollector references, then its the responsibility of the + * user to make StatisticsCollectorCallback's implementation thread-safe. + * + */ +public interface StatisticsCollectorCallback { + /** + * Callback function to get ticker values. + * @param tickerType Ticker type. + * @param tickerCount Value of ticker type. + */ + void tickerCallback(TickerType tickerType, long tickerCount); + + /** + * Callback function to get histogram values. + * @param histType Histogram type. + * @param histData Histogram data. + */ + void histogramCallback(HistogramType histType, HistogramData histData); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java b/external/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java new file mode 100755 index 0000000000..a3acede3fc --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/StatsCollectorInput.java @@ -0,0 +1,35 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Contains all information necessary to collect statistics from one instance + * of DB statistics. + */ +public class StatsCollectorInput { + private final Statistics _statistics; + private final StatisticsCollectorCallback _statsCallback; + + /** + * Constructor for StatsCollectorInput. + * + * @param statistics Reference of DB statistics. + * @param statsCallback Reference of statistics callback interface. + */ + public StatsCollectorInput(final Statistics statistics, + final StatisticsCollectorCallback statsCallback) { + _statistics = statistics; + _statsCallback = statsCallback; + } + + public Statistics getStatistics() { + return _statistics; + } + + public StatisticsCollectorCallback getCallback() { + return _statsCallback; + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java b/external/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java new file mode 100755 index 0000000000..52cd43e796 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/StringAppendOperator.java @@ -0,0 +1,17 @@ +// Copyright (c) 2014, Vlad Balan (vlad.gm@gmail.com). All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * StringAppendOperator is a merge operator that concatenates + * two strings. + */ +public class StringAppendOperator implements MergeOperator { + @Override public long newMergeOperatorHandle() { + return newMergeOperatorHandleImpl(); + } + private native long newMergeOperatorHandleImpl(); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java b/external/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java new file mode 100755 index 0000000000..29cd262c2c --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/TableFormatConfig.java @@ -0,0 +1,22 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +/** + * TableFormatConfig is used to config the internal Table format of a RocksDB. + * To make a RocksDB to use a specific Table format, its associated + * TableFormatConfig should be properly set and passed into Options via + * Options.setTableFormatConfig() and open the db using that Options. + */ +public abstract class TableFormatConfig { + /** + *

This function should only be called by Options.setTableFormatConfig(), + * which will create a c++ shared-pointer to the c++ TableFactory + * that associated with the Java TableFormatConfig.

+ * + * @return native handle address to native table instance. + */ + abstract protected long newTableFactoryHandle(); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/TickerType.java b/external/rocksdb/java/src/main/java/org/rocksdb/TickerType.java new file mode 100755 index 0000000000..9ff819a202 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/TickerType.java @@ -0,0 +1,137 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +public enum TickerType { + // total block cache misses + // REQUIRES: BLOCK_CACHE_MISS == BLOCK_CACHE_INDEX_MISS + + // BLOCK_CACHE_FILTER_MISS + + // BLOCK_CACHE_DATA_MISS; + BLOCK_CACHE_MISS(0), + // total block cache hit + // REQUIRES: BLOCK_CACHE_HIT == BLOCK_CACHE_INDEX_HIT + + // BLOCK_CACHE_FILTER_HIT + + // BLOCK_CACHE_DATA_HIT; + BLOCK_CACHE_HIT(1), + // # of blocks added to block cache. + BLOCK_CACHE_ADD(2), + // # of times cache miss when accessing index block from block cache. + BLOCK_CACHE_INDEX_MISS(3), + // # of times cache hit when accessing index block from block cache. + BLOCK_CACHE_INDEX_HIT(4), + // # of times cache miss when accessing filter block from block cache. + BLOCK_CACHE_FILTER_MISS(5), + // # of times cache hit when accessing filter block from block cache. + BLOCK_CACHE_FILTER_HIT(6), + // # of times cache miss when accessing data block from block cache. + BLOCK_CACHE_DATA_MISS(7), + // # of times cache hit when accessing data block from block cache. + BLOCK_CACHE_DATA_HIT(8), + // # of times bloom filter has avoided file reads. + BLOOM_FILTER_USEFUL(9), + + // # of memtable hits. + MEMTABLE_HIT(10), + // # of memtable misses. + MEMTABLE_MISS(11), + + // # of Get() queries served by L0 + GET_HIT_L0(12), + // # of Get() queries served by L1 + GET_HIT_L1(13), + // # of Get() queries served by L2 and up + GET_HIT_L2_AND_UP(14), + + /** + * COMPACTION_KEY_DROP_* count the reasons for key drop during compaction + * There are 3 reasons currently. + */ + COMPACTION_KEY_DROP_NEWER_ENTRY(15), // key was written with a newer value. + COMPACTION_KEY_DROP_OBSOLETE(16), // The key is obsolete. + COMPACTION_KEY_DROP_USER(17), // user compaction function has dropped the key. + + // Number of keys written to the database via the Put and Write call's + NUMBER_KEYS_WRITTEN(18), + // Number of Keys read, + NUMBER_KEYS_READ(19), + // Number keys updated, if inplace update is enabled + NUMBER_KEYS_UPDATED(20), + // Bytes written / read + BYTES_WRITTEN(21), + BYTES_READ(22), + NO_FILE_CLOSES(23), + NO_FILE_OPENS(24), + NO_FILE_ERRORS(25), + // Time system had to wait to do LO-L1 compactions + STALL_L0_SLOWDOWN_MICROS(26), + // Time system had to wait to move memtable to L1. + STALL_MEMTABLE_COMPACTION_MICROS(27), + // write throttle because of too many files in L0 + STALL_L0_NUM_FILES_MICROS(28), + // Writer has to wait for compaction or flush to finish. + STALL_MICROS(29), + // The wait time for db mutex. + DB_MUTEX_WAIT_MICROS(30), + RATE_LIMIT_DELAY_MILLIS(31), + NO_ITERATORS(32), // number of iterators currently open + + // Number of MultiGet calls, keys read, and bytes read + NUMBER_MULTIGET_CALLS(33), + NUMBER_MULTIGET_KEYS_READ(34), + NUMBER_MULTIGET_BYTES_READ(35), + + // Number of deletes records that were not required to be + // written to storage because key does not exist + NUMBER_FILTERED_DELETES(36), + NUMBER_MERGE_FAILURES(37), + SEQUENCE_NUMBER(38), + + // number of times bloom was checked before creating iterator on a + // file, and the number of times the check was useful in avoiding + // iterator creation (and thus likely IOPs). + BLOOM_FILTER_PREFIX_CHECKED(39), + BLOOM_FILTER_PREFIX_USEFUL(40), + + // Number of times we had to reseek inside an iteration to skip + // over large number of keys with same userkey. + NUMBER_OF_RESEEKS_IN_ITERATION(41), + + // Record the number of calls to GetUpadtesSince. Useful to keep track of + // transaction log iterator refreshes + GET_UPDATES_SINCE_CALLS(42), + BLOCK_CACHE_COMPRESSED_MISS(43), // miss in the compressed block cache + BLOCK_CACHE_COMPRESSED_HIT(44), // hit in the compressed block cache + WAL_FILE_SYNCED(45), // Number of times WAL sync is done + WAL_FILE_BYTES(46), // Number of bytes written to WAL + + // Writes can be processed by requesting thread or by the thread at the + // head of the writers queue. + WRITE_DONE_BY_SELF(47), + WRITE_DONE_BY_OTHER(48), + WRITE_TIMEDOUT(49), // Number of writes ending up with timed-out. + WRITE_WITH_WAL(50), // Number of Write calls that request WAL + COMPACT_READ_BYTES(51), // Bytes read during compaction + COMPACT_WRITE_BYTES(52), // Bytes written during compaction + FLUSH_WRITE_BYTES(53), // Bytes written during flush + + // Number of table's properties loaded directly from file, without creating + // table reader object. + NUMBER_DIRECT_LOAD_TABLE_PROPERTIES(54), + NUMBER_SUPERVERSION_ACQUIRES(55), + NUMBER_SUPERVERSION_RELEASES(56), + NUMBER_SUPERVERSION_CLEANUPS(57), + NUMBER_BLOCK_NOT_COMPRESSED(58); + + private final int value_; + + private TickerType(int value) { + value_ = value; + } + + public int getValue() { + return value_; + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java b/external/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java new file mode 100755 index 0000000000..f9684bd72e --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/TransactionLogIterator.java @@ -0,0 +1,111 @@ +package org.rocksdb; + +/** + *

A TransactionLogIterator is used to iterate over the transactions in a db. + * One run of the iterator is continuous, i.e. the iterator will stop at the + * beginning of any gap in sequences.

+ */ +public class TransactionLogIterator extends RocksObject { + + /** + *

An iterator is either positioned at a WriteBatch + * or not valid. This method returns true if the iterator + * is valid. Can read data from a valid iterator.

+ * + * @return true if iterator position is valid. + */ + public boolean isValid() { + return isValid(nativeHandle_); + } + + /** + *

Moves the iterator to the next WriteBatch. + * REQUIRES: Valid() to be true.

+ */ + public void next() { + next(nativeHandle_); + } + + /** + *

Throws RocksDBException if something went wrong.

+ * + * @throws org.rocksdb.RocksDBException if something went + * wrong in the underlying C++ code. + */ + public void status() throws RocksDBException { + status(nativeHandle_); + } + + /** + *

If iterator position is valid, return the current + * write_batch and the sequence number of the earliest + * transaction contained in the batch.

+ * + *

ONLY use if Valid() is true and status() is OK.

+ * + * @return {@link org.rocksdb.TransactionLogIterator.BatchResult} + * instance. + */ + public BatchResult getBatch() { + assert(isValid()); + return getBatch(nativeHandle_); + } + + /** + *

TransactionLogIterator constructor.

+ * + * @param nativeHandle address to native address. + */ + TransactionLogIterator(final long nativeHandle) { + super(nativeHandle); + } + + /** + *

BatchResult represents a data structure returned + * by a TransactionLogIterator containing a sequence + * number and a {@link WriteBatch} instance.

+ */ + public final class BatchResult { + /** + *

Constructor of BatchResult class.

+ * + * @param sequenceNumber related to this BatchResult instance. + * @param nativeHandle to {@link org.rocksdb.WriteBatch} + * native instance. + */ + public BatchResult(final long sequenceNumber, + final long nativeHandle) { + sequenceNumber_ = sequenceNumber; + writeBatch_ = new WriteBatch(nativeHandle); + } + + /** + *

Return sequence number related to this BatchResult.

+ * + * @return Sequence number. + */ + public long sequenceNumber() { + return sequenceNumber_; + } + + /** + *

Return contained {@link org.rocksdb.WriteBatch} + * instance

+ * + * @return {@link org.rocksdb.WriteBatch} instance. + */ + public WriteBatch writeBatch() { + return writeBatch_; + } + + private final long sequenceNumber_; + private final WriteBatch writeBatch_; + } + + @Override protected final native void disposeInternal(final long handle); + private native boolean isValid(long handle); + private native void next(long handle); + private native void status(long handle) + throws RocksDBException; + private native BatchResult getBatch(long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java b/external/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java new file mode 100755 index 0000000000..72704893c2 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/TtlDB.java @@ -0,0 +1,211 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.List; + +/** + * Database with TTL support. + * + *

Use case

+ *

This API should be used to open the db when key-values inserted are + * meant to be removed from the db in a non-strict 'ttl' amount of time + * Therefore, this guarantees that key-values inserted will remain in the + * db for >= ttl amount of time and the db will make efforts to remove the + * key-values as soon as possible after ttl seconds of their insertion. + *

+ * + *

Behaviour

+ *

TTL is accepted in seconds + * (int32_t)Timestamp(creation) is suffixed to values in Put internally + * Expired TTL values deleted in compaction only:(Timestamp+ttl<time_now) + * Get/Iterator may return expired entries(compaction not run on them yet) + * Different TTL may be used during different Opens + *

+ * + *

Example

+ *
    + *
  • Open1 at t=0 with ttl=4 and insert k1,k2, close at t=2
  • + *
  • Open2 at t=3 with ttl=5. Now k1,k2 should be deleted at t>=5
  • + *
+ * + *

+ * read_only=true opens in the usual read-only mode. Compactions will not be + * triggered(neither manual nor automatic), so no expired entries removed + *

+ * + *

Constraints

+ *

Not specifying/passing or non-positive TTL behaves + * like TTL = infinity

+ * + *

!!!WARNING!!!

+ *

Calling DB::Open directly to re-open a db created by this API will get + * corrupt values(timestamp suffixed) and no ttl effect will be there + * during the second Open, so use this API consistently to open the db + * Be careful when passing ttl with a small positive value because the + * whole database may be deleted in a small amount of time.

+ */ +public class TtlDB extends RocksDB { + + /** + *

Opens a TtlDB.

+ * + *

Database is opened in read-write mode without default TTL.

+ * + * @param options {@link org.rocksdb.Options} instance. + * @param db_path path to database. + * + * @return TtlDB instance. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public static TtlDB open(final Options options, final String db_path) + throws RocksDBException { + return open(options, db_path, 0, false); + } + + /** + *

Opens a TtlDB.

+ * + * @param options {@link org.rocksdb.Options} instance. + * @param db_path path to database. + * @param ttl time to live for new entries. + * @param readOnly boolean value indicating if database if db is + * opened read-only. + * + * @return TtlDB instance. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + public static TtlDB open(final Options options, final String db_path, + final int ttl, final boolean readOnly) throws RocksDBException { + return new TtlDB(open(options.nativeHandle_, db_path, ttl, readOnly)); + } + + /** + *

Opens a TtlDB.

+ * + * @param options {@link org.rocksdb.Options} instance. + * @param db_path path to database. + * @param columnFamilyDescriptors list of column family descriptors + * @param columnFamilyHandles will be filled with ColumnFamilyHandle instances + * on open. + * @param ttlValues time to live values per column family handle + * @param readOnly boolean value indicating if database if db is + * opened read-only. + * + * @return TtlDB instance. + * + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + * @throws java.lang.IllegalArgumentException when there is not a ttl value + * per given column family handle. + */ + public static TtlDB open(final DBOptions options, final String db_path, + final List columnFamilyDescriptors, + final List columnFamilyHandles, + final List ttlValues, final boolean readOnly) + throws RocksDBException { + if (columnFamilyDescriptors.size() != ttlValues.size()) { + throw new IllegalArgumentException("There must be a ttl value per column" + + "family handle."); + } + + final byte[][] cfNames = new byte[columnFamilyDescriptors.size()][]; + final long[] cfOptionHandles = new long[columnFamilyDescriptors.size()]; + for (int i = 0; i < columnFamilyDescriptors.size(); i++) { + final ColumnFamilyDescriptor cfDescriptor = + columnFamilyDescriptors.get(i); + cfNames[i] = cfDescriptor.columnFamilyName(); + cfOptionHandles[i] = cfDescriptor.columnFamilyOptions().nativeHandle_; + } + + final int ttlVals[] = new int[ttlValues.size()]; + for(int i = 0; i < ttlValues.size(); i++) { + ttlVals[i] = ttlValues.get(i); + } + final long[] handles = openCF(options.nativeHandle_, db_path, + cfNames, cfOptionHandles, ttlVals, readOnly); + + final TtlDB ttlDB = new TtlDB(handles[0]); + for (int i = 1; i < handles.length; i++) { + columnFamilyHandles.add(new ColumnFamilyHandle(ttlDB, handles[i])); + } + return ttlDB; + } + + /** + *

Creates a new ttl based column family with a name defined + * in given ColumnFamilyDescriptor and allocates a + * ColumnFamilyHandle within an internal structure.

+ * + *

The ColumnFamilyHandle is automatically disposed with DB + * disposal.

+ * + * @param columnFamilyDescriptor column family to be created. + * @param ttl TTL to set for this column family. + * + * @return {@link org.rocksdb.ColumnFamilyHandle} instance. + * + * @throws RocksDBException thrown if error happens in underlying + * native library. + */ + public ColumnFamilyHandle createColumnFamilyWithTtl( + final ColumnFamilyDescriptor columnFamilyDescriptor, + final int ttl) throws RocksDBException { + return new ColumnFamilyHandle(this, + createColumnFamilyWithTtl(nativeHandle_, + columnFamilyDescriptor.columnFamilyName(), + columnFamilyDescriptor.columnFamilyOptions().nativeHandle_, ttl)); + } + + /** + *

Close the TtlDB instance and release resource.

+ * + *

Internally, TtlDB owns the {@code rocksdb::DB} pointer + * to its associated {@link org.rocksdb.RocksDB}. The release + * of that RocksDB pointer is handled in the destructor of the + * c++ {@code rocksdb::TtlDB} and should be transparent to + * Java developers.

+ */ + @Override + public void close() { + super.close(); + } + + /** + *

A protected constructor that will be used in the static + * factory method + * {@link #open(Options, String, int, boolean)} + * and + * {@link #open(DBOptions, String, java.util.List, java.util.List, + * java.util.List, boolean)}. + *

+ * + * @param nativeHandle The native handle of the C++ TtlDB object + */ + protected TtlDB(final long nativeHandle) { + super(nativeHandle); + } + + @Override protected void finalize() throws Throwable { + close(); //TODO(AR) revisit here when implementing AutoCloseable + super.finalize(); + } + + private native static long open(final long optionsHandle, + final String db_path, final int ttl, final boolean readOnly) + throws RocksDBException; + private native static long[] openCF(final long optionsHandle, + final String db_path, final byte[][] columnFamilyNames, + final long[] columnFamilyOptions, final int[] ttlValues, + final boolean readOnly) throws RocksDBException; + private native long createColumnFamilyWithTtl(final long handle, + final byte[] columnFamilyName, final long columnFamilyOptions, int ttl) + throws RocksDBException; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java b/external/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java new file mode 100755 index 0000000000..378340248f --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/VectorMemTableConfig.java @@ -0,0 +1,45 @@ +package org.rocksdb; + +/** + * The config for vector memtable representation. + */ +public class VectorMemTableConfig extends MemTableConfig { + public static final int DEFAULT_RESERVED_SIZE = 0; + + /** + * VectorMemTableConfig constructor + */ + public VectorMemTableConfig() { + reservedSize_ = DEFAULT_RESERVED_SIZE; + } + + /** + * Set the initial size of the vector that will be used + * by the memtable created based on this config. + * + * @param size the initial size of the vector. + * @return the reference to the current config. + */ + public VectorMemTableConfig setReservedSize(final int size) { + reservedSize_ = size; + return this; + } + + /** + * Returns the initial size of the vector used by the memtable + * created based on this config. + * + * @return the initial size of the vector. + */ + public int reservedSize() { + return reservedSize_; + } + + @Override protected long newMemTableFactoryHandle() { + return newMemTableFactoryHandle(reservedSize_); + } + + private native long newMemTableFactoryHandle(long reservedSize) + throws IllegalArgumentException; + private int reservedSize_; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java b/external/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java new file mode 100755 index 0000000000..7eb019d0b2 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/WBWIRocksIterator.java @@ -0,0 +1,172 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +public class WBWIRocksIterator + extends AbstractRocksIterator { + private final WriteEntry entry = new WriteEntry(); + + protected WBWIRocksIterator(final WriteBatchWithIndex wbwi, + final long nativeHandle) { + super(wbwi, nativeHandle); + } + + /** + * Get the current entry + * + * The WriteEntry is only valid + * until the iterator is repositioned. + * If you want to keep the WriteEntry across iterator + * movements, you must make a copy of its data! + * + * Note - This method is not thread-safe with respect to the WriteEntry + * as it performs a non-atomic update across the fields of the WriteEntry + * + * @return The WriteEntry of the current entry + */ + public WriteEntry entry() { + assert(isOwningHandle()); + assert(entry != null); + final long ptrs[] = entry1(nativeHandle_); + + entry.type = WriteType.fromId((byte)ptrs[0]); + entry.key.setNativeHandle(ptrs[1], true); + entry.value.setNativeHandle(ptrs[2], ptrs[2] != 0); + + return entry; + } + + @Override protected final native void disposeInternal(final long handle); + @Override final native boolean isValid0(long handle); + @Override final native void seekToFirst0(long handle); + @Override final native void seekToLast0(long handle); + @Override final native void next0(long handle); + @Override final native void prev0(long handle); + @Override final native void seek0(long handle, byte[] target, int targetLen); + @Override final native void status0(long handle) throws RocksDBException; + + private native long[] entry1(final long handle); + + /** + * Enumeration of the Write operation + * that created the record in the Write Batch + */ + public enum WriteType { + PUT((byte)0x1), + MERGE((byte)0x2), + DELETE((byte)0x4), + LOG((byte)0x8); + + final byte id; + WriteType(final byte id) { + this.id = id; + } + + public static WriteType fromId(final byte id) { + for(final WriteType wt : WriteType.values()) { + if(id == wt.id) { + return wt; + } + } + throw new IllegalArgumentException("No WriteType with id=" + id); + } + } + + /** + * Represents an entry returned by + * {@link org.rocksdb.WBWIRocksIterator#entry()} + * + * It is worth noting that a WriteEntry with + * the type {@link org.rocksdb.WBWIRocksIterator.WriteType#DELETE} + * or {@link org.rocksdb.WBWIRocksIterator.WriteType#LOG} + * will not have a value. + */ + public static class WriteEntry { + WriteType type = null; + final DirectSlice key; + final DirectSlice value; + + /** + * Intentionally private as this + * should only be instantiated in + * this manner by the outer WBWIRocksIterator + * class; The class members are then modified + * by calling {@link org.rocksdb.WBWIRocksIterator#entry()} + */ + private WriteEntry() { + key = new DirectSlice(); + value = new DirectSlice(); + } + + public WriteEntry(WriteType type, DirectSlice key, DirectSlice value) { + this.type = type; + this.key = key; + this.value = value; + } + + /** + * Returns the type of the Write Entry + * + * @return the WriteType of the WriteEntry + */ + public WriteType getType() { + return type; + } + + /** + * Returns the key of the Write Entry + * + * @return The slice containing the key + * of the WriteEntry + */ + public DirectSlice getKey() { + return key; + } + + /** + * Returns the value of the Write Entry + * + * @return The slice containing the value of + * the WriteEntry or null if the WriteEntry has + * no value + */ + public DirectSlice getValue() { + if(!value.isOwningHandle()) { + return null; //TODO(AR) migrate to JDK8 java.util.Optional#empty() + } else { + return value; + } + } + + /** + * Generates a hash code for the Write Entry. NOTE: The hash code is based + * on the string representation of the key, so it may not work correctly + * with exotic custom comparators. + * + * @return The hash code for the Write Entry + */ + @Override + public int hashCode() { + return (key == null) ? 0 : key.hashCode(); + } + + @Override + public boolean equals(Object other) { + if(other == null) { + return false; + } else if (this == other) { + return true; + } else if(other instanceof WriteEntry) { + final WriteEntry otherWriteEntry = (WriteEntry)other; + return type.equals(otherWriteEntry.type) + && key.equals(otherWriteEntry.key) + && value.equals(otherWriteEntry.value); + } else { + return false; + } + } + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java b/external/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java new file mode 100755 index 0000000000..325f9c0573 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/WriteBatch.java @@ -0,0 +1,134 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * WriteBatch holds a collection of updates to apply atomically to a DB. + * + * The updates are applied in the order in which they are added + * to the WriteBatch. For example, the value of "key" will be "v3" + * after the following batch is written: + * + * batch.put("key", "v1"); + * batch.remove("key"); + * batch.put("key", "v2"); + * batch.put("key", "v3"); + * + * Multiple threads can invoke const methods on a WriteBatch without + * external synchronization, but if any of the threads may call a + * non-const method, all threads accessing the same WriteBatch must use + * external synchronization. + */ +public class WriteBatch extends AbstractWriteBatch { + /** + * Constructs a WriteBatch instance. + */ + public WriteBatch() { + this(0); + } + + /** + * Constructs a WriteBatch instance with a given size. + * + * @param reserved_bytes reserved size for WriteBatch + */ + public WriteBatch(final int reserved_bytes) { + super(newWriteBatch(reserved_bytes)); + } + + /** + * Support for iterating over the contents of a batch. + * + * @param handler A handler that is called back for each + * update present in the batch + * + * @throws RocksDBException If we cannot iterate over the batch + */ + public void iterate(final Handler handler) throws RocksDBException { + iterate(nativeHandle_, handler.nativeHandle_); + } + + /** + *

Private WriteBatch constructor which is used to construct + * WriteBatch instances from C++ side. As the reference to this + * object is also managed from C++ side the handle will be disowned.

+ * + * @param nativeHandle address of native instance. + */ + WriteBatch(final long nativeHandle) { + super(nativeHandle); + disOwnNativeHandle(); + } + + @Override protected final native void disposeInternal(final long handle); + @Override final native int count0(final long handle); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override final native void remove(final long handle, final byte[] key, + final int keyLen); + @Override final native void remove(final long handle, final byte[] key, + final int keyLen, final long cfHandle); + @Override final native void putLogData(final long handle, + final byte[] blob, final int blobLen); + @Override final native void clear0(final long handle); + @Override final native void setSavePoint0(final long handle); + @Override final native void rollbackToSavePoint0(final long handle); + + private native static long newWriteBatch(final int reserved_bytes); + private native void iterate(final long handle, final long handlerHandle) + throws RocksDBException; + + + /** + * Handler callback for iterating over the contents of a batch. + */ + public static abstract class Handler + extends AbstractImmutableNativeReference { + private final long nativeHandle_; + public Handler() { + super(true); + this.nativeHandle_ = createNewHandler0(); + } + + public abstract void put(byte[] key, byte[] value); + public abstract void merge(byte[] key, byte[] value); + public abstract void delete(byte[] key); + public abstract void logData(byte[] blob); + + /** + * shouldContinue is called by the underlying iterator + * WriteBatch::Iterate. If it returns false, + * iteration is halted. Otherwise, it continues + * iterating. The default implementation always + * returns true. + * + * @return boolean value indicating if the + * iteration is halted. + */ + public boolean shouldContinue() { + return true; + } + + /** + * Deletes underlying C++ handler pointer. + */ + @Override + protected void disposeInternal() { + disposeInternal(nativeHandle_); + } + + private native long createNewHandler0(); + private native void disposeInternal(final long handle); + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java b/external/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java new file mode 100755 index 0000000000..a077918519 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/WriteBatchInterface.java @@ -0,0 +1,113 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + *

Defines the interface for a Write Batch which + * holds a collection of updates to apply atomically to a DB.

+ */ +public interface WriteBatchInterface { + + /** + * Returns the number of updates in the batch. + * + * @return number of items in WriteBatch + */ + int count(); + + /** + *

Store the mapping "key->value" in the database.

+ * + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + */ + void put(byte[] key, byte[] value); + + /** + *

Store the mapping "key->value" within given column + * family.

+ * + * @param columnFamilyHandle {@link org.rocksdb.ColumnFamilyHandle} + * instance + * @param key the specified key to be inserted. + * @param value the value associated with the specified key. + */ + void put(ColumnFamilyHandle columnFamilyHandle, + byte[] key, byte[] value); + + /** + *

Merge "value" with the existing value of "key" in the database. + * "key->merge(existing, value)"

+ * + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + */ + void merge(byte[] key, byte[] value); + + /** + *

Merge "value" with the existing value of "key" in given column family. + * "key->merge(existing, value)"

+ * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key the specified key to be merged. + * @param value the value to be merged with the current value for + * the specified key. + */ + void merge(ColumnFamilyHandle columnFamilyHandle, + byte[] key, byte[] value); + + /** + *

If the database contains a mapping for "key", erase it. Else do nothing.

+ * + * @param key Key to delete within database + */ + void remove(byte[] key); + + /** + *

If column family contains a mapping for "key", erase it. Else do nothing.

+ * + * @param columnFamilyHandle {@link ColumnFamilyHandle} instance + * @param key Key to delete within database + */ + void remove(ColumnFamilyHandle columnFamilyHandle, byte[] key); + + /** + * Append a blob of arbitrary size to the records in this batch. The blob will + * be stored in the transaction log but not in any other file. In particular, + * it will not be persisted to the SST files. When iterating over this + * WriteBatch, WriteBatch::Handler::LogData will be called with the contents + * of the blob as it is encountered. Blobs, puts, deletes, and merges will be + * encountered in the same order in thich they were inserted. The blob will + * NOT consume sequence number(s) and will NOT increase the count of the batch + * + * Example application: add timestamps to the transaction log for use in + * replication. + * + * @param blob binary object to be inserted + */ + void putLogData(byte[] blob); + + /** + * Clear all updates buffered in this batch + */ + void clear(); + + /** + * Records the state of the batch for future calls to RollbackToSavePoint(). + * May be called multiple times to set multiple save points. + */ + void setSavePoint(); + + /** + * Remove all entries in this batch (Put, Merge, Delete, PutLogData) since + * the most recent call to SetSavePoint() and removes the most recent save + * point. + * + * @throws RocksDBException if there is no previous call to SetSavePoint() + */ + void rollbackToSavePoint() throws RocksDBException; +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java b/external/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java new file mode 100755 index 0000000000..dad908a246 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/WriteBatchWithIndex.java @@ -0,0 +1,170 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Similar to {@link org.rocksdb.WriteBatch} but with a binary searchable + * index built for all the keys inserted. + * + * Calling put, merge, remove or putLogData calls the same function + * as with {@link org.rocksdb.WriteBatch} whilst also building an index. + * + * A user can call {@link org.rocksdb.WriteBatchWithIndex#newIterator()} to + * create an iterator over the write batch or + * {@link org.rocksdb.WriteBatchWithIndex#newIteratorWithBase(org.rocksdb.RocksIterator)} + * to get an iterator for the database with Read-Your-Own-Writes like capability + */ +public class WriteBatchWithIndex extends AbstractWriteBatch { + /** + * Creates a WriteBatchWithIndex where no bytes + * are reserved up-front, bytewise comparison is + * used for fallback key comparisons, + * and duplicate keys operations are retained + */ + public WriteBatchWithIndex() { + super(newWriteBatchWithIndex()); + } + + + /** + * Creates a WriteBatchWithIndex where no bytes + * are reserved up-front, bytewise comparison is + * used for fallback key comparisons, and duplicate key + * assignment is determined by the constructor argument + * + * @param overwriteKey if true, overwrite the key in the index when + * inserting a duplicate key, in this way an iterator will never + * show two entries with the same key. + */ + public WriteBatchWithIndex(final boolean overwriteKey) { + super(newWriteBatchWithIndex(overwriteKey)); + } + + /** + * Creates a WriteBatchWithIndex + * + * @param fallbackIndexComparator We fallback to this comparator + * to compare keys within a column family if we cannot determine + * the column family and so look up it's comparator. + * + * @param reservedBytes reserved bytes in underlying WriteBatch + * + * @param overwriteKey if true, overwrite the key in the index when + * inserting a duplicate key, in this way an iterator will never + * show two entries with the same key. + */ + public WriteBatchWithIndex( + final AbstractComparator> + fallbackIndexComparator, final int reservedBytes, + final boolean overwriteKey) { + super(newWriteBatchWithIndex(fallbackIndexComparator.getNativeHandle(), + reservedBytes, overwriteKey)); + } + + /** + * Create an iterator of a column family. User can call + * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to + * search to the next entry of or after a key. Keys will be iterated in the + * order given by index_comparator. For multiple updates on the same key, + * each update will be returned as a separate entry, in the order of update + * time. + * + * @param columnFamilyHandle The column family to iterate over + * @return An iterator for the Write Batch contents, restricted to the column + * family + */ + public WBWIRocksIterator newIterator( + final ColumnFamilyHandle columnFamilyHandle) { + return new WBWIRocksIterator(this, iterator1(nativeHandle_, + columnFamilyHandle.nativeHandle_)); + } + + /** + * Create an iterator of the default column family. User can call + * {@link org.rocksdb.RocksIteratorInterface#seek(byte[])} to + * search to the next entry of or after a key. Keys will be iterated in the + * order given by index_comparator. For multiple updates on the same key, + * each update will be returned as a separate entry, in the order of update + * time. + * + * @return An iterator for the Write Batch contents + */ + public WBWIRocksIterator newIterator() { + return new WBWIRocksIterator(this, iterator0(nativeHandle_)); + } + + /** + * Provides Read-Your-Own-Writes like functionality by + * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} + * as a delta and baseIterator as a base + * + * @param columnFamilyHandle The column family to iterate over + * @param baseIterator The base iterator, + * e.g. {@link org.rocksdb.RocksDB#newIterator()} + * @return An iterator which shows a view comprised of both the database + * point-in-time from baseIterator and modifications made in this write batch. + */ + public RocksIterator newIteratorWithBase( + final ColumnFamilyHandle columnFamilyHandle, + final RocksIterator baseIterator) { + RocksIterator iterator = new RocksIterator( + baseIterator.parent_, + iteratorWithBase(nativeHandle_, + columnFamilyHandle.nativeHandle_, + baseIterator.nativeHandle_)); + //when the iterator is deleted it will also delete the baseIterator + baseIterator.disOwnNativeHandle(); + return iterator; + } + + /** + * Provides Read-Your-Own-Writes like functionality by + * creating a new Iterator that will use {@link org.rocksdb.WBWIRocksIterator} + * as a delta and baseIterator as a base. Operates on the default column + * family. + * + * @param baseIterator The base iterator, + * e.g. {@link org.rocksdb.RocksDB#newIterator()} + * @return An iterator which shows a view comprised of both the database + * point-in-timefrom baseIterator and modifications made in this write batch. + */ + public RocksIterator newIteratorWithBase(final RocksIterator baseIterator) { + return newIteratorWithBase(baseIterator.parent_.getDefaultColumnFamily(), + baseIterator); + } + + @Override protected final native void disposeInternal(final long handle); + @Override final native int count0(final long handle); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void put(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen); + @Override final native void merge(final long handle, final byte[] key, + final int keyLen, final byte[] value, final int valueLen, + final long cfHandle); + @Override final native void remove(final long handle, final byte[] key, + final int keyLen); + @Override final native void remove(final long handle, final byte[] key, + final int keyLen, final long cfHandle); + @Override final native void putLogData(final long handle, final byte[] blob, + final int blobLen); + @Override final native void clear0(final long handle); + @Override final native void setSavePoint0(final long handle); + @Override final native void rollbackToSavePoint0(final long handle); + + private native static long newWriteBatchWithIndex(); + private native static long newWriteBatchWithIndex(final boolean overwriteKey); + private native static long newWriteBatchWithIndex( + final long fallbackIndexComparatorHandle, final int reservedBytes, + final boolean overwriteKey); + private native long iterator0(final long handle); + private native long iterator1(final long handle, final long cfHandle); + private native long iteratorWithBase(final long handle, + final long baseIteratorHandle, final long cfHandle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java b/external/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java new file mode 100755 index 0000000000..4e7abd873a --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/WriteOptions.java @@ -0,0 +1,101 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Options that control write operations. + * + * Note that developers should call WriteOptions.dispose() to release the + * c++ side memory before a WriteOptions instance runs out of scope. + */ +public class WriteOptions extends RocksObject { + /** + * Construct WriteOptions instance. + */ + public WriteOptions() { + super(newWriteOptions()); + + } + + /** + * If true, the write will be flushed from the operating system + * buffer cache (by calling WritableFile::Sync()) before the write + * is considered complete. If this flag is true, writes will be + * slower. + * + * If this flag is false, and the machine crashes, some recent + * writes may be lost. Note that if it is just the process that + * crashes (i.e., the machine does not reboot), no writes will be + * lost even if sync==false. + * + * In other words, a DB write with sync==false has similar + * crash semantics as the "write()" system call. A DB write + * with sync==true has similar crash semantics to a "write()" + * system call followed by "fdatasync()". + * + * Default: false + * + * @param flag a boolean flag to indicate whether a write + * should be synchronized. + * @return the instance of the current WriteOptions. + */ + public WriteOptions setSync(final boolean flag) { + setSync(nativeHandle_, flag); + return this; + } + + /** + * If true, the write will be flushed from the operating system + * buffer cache (by calling WritableFile::Sync()) before the write + * is considered complete. If this flag is true, writes will be + * slower. + * + * If this flag is false, and the machine crashes, some recent + * writes may be lost. Note that if it is just the process that + * crashes (i.e., the machine does not reboot), no writes will be + * lost even if sync==false. + * + * In other words, a DB write with sync==false has similar + * crash semantics as the "write()" system call. A DB write + * with sync==true has similar crash semantics to a "write()" + * system call followed by "fdatasync()". + * + * @return boolean value indicating if sync is active. + */ + public boolean sync() { + return sync(nativeHandle_); + } + + /** + * If true, writes will not first go to the write ahead log, + * and the write may got lost after a crash. + * + * @param flag a boolean flag to specify whether to disable + * write-ahead-log on writes. + * @return the instance of the current WriteOptions. + */ + public WriteOptions setDisableWAL(final boolean flag) { + setDisableWAL(nativeHandle_, flag); + return this; + } + + /** + * If true, writes will not first go to the write ahead log, + * and the write may got lost after a crash. + * + * @return boolean value indicating if WAL is disabled. + */ + public boolean disableWAL() { + return disableWAL(nativeHandle_); + } + + private native static long newWriteOptions(); + private native void setSync(long handle, boolean flag); + private native boolean sync(long handle); + private native void setDisableWAL(long handle, boolean flag); + private native boolean disableWAL(long handle); + @Override protected final native void disposeInternal(final long handle); +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java b/external/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java new file mode 100755 index 0000000000..17337bfc85 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/util/BytewiseComparator.java @@ -0,0 +1,91 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb.util; + +import org.rocksdb.*; + +import java.nio.ByteBuffer; + +/** + * This is a Java Native implementation of the C++ + * equivalent BytewiseComparatorImpl using {@link Slice} + * + * The performance of Comparators implemented in Java is always + * less than their C++ counterparts due to the bridging overhead, + * as such you likely don't want to use this apart from benchmarking + * and you most likely instead wanted + * {@link org.rocksdb.BuiltinComparator#BYTEWISE_COMPARATOR} + */ +public class BytewiseComparator extends Comparator { + + public BytewiseComparator(final ComparatorOptions copt) { + super(copt); + } + + @Override + public String name() { + return "rocksdb.java.BytewiseComparator"; + } + + @Override + public int compare(final Slice a, final Slice b) { + return compare(a.data(), b.data()); + } + + @Override + public String findShortestSeparator(final String start, + final Slice limit) { + final byte[] startBytes = start.getBytes(); + final byte[] limitBytes = limit.data(); + + // Find length of common prefix + final int min_length = Math.min(startBytes.length, limit.size()); + int diff_index = 0; + while ((diff_index < min_length) && + (startBytes[diff_index] == limitBytes[diff_index])) { + diff_index++; + } + + if (diff_index >= min_length) { + // Do not shorten if one string is a prefix of the other + } else { + final byte diff_byte = startBytes[diff_index]; + if(diff_byte < 0xff && diff_byte + 1 < limitBytes[diff_index]) { + final byte shortest[] = new byte[diff_index + 1]; + System.arraycopy(startBytes, 0, shortest, 0, diff_index + 1); + shortest[diff_index]++; + assert(compare(shortest, limitBytes) < 0); + return new String(shortest); + } + } + + return null; + } + + private static int compare(final byte[] a, final byte[] b) { + return ByteBuffer.wrap(a).compareTo(ByteBuffer.wrap(b)); + } + + @Override + public String findShortSuccessor(final String key) { + final byte[] keyBytes = key.getBytes(); + + // Find first character that can be incremented + final int n = keyBytes.length; + for (int i = 0; i < n; i++) { + final byte byt = keyBytes[i]; + if (byt != 0xff) { + final byte shortSuccessor[] = new byte[i + 1]; + System.arraycopy(keyBytes, 0, shortSuccessor, 0, i + 1); + shortSuccessor[i]++; + return new String(shortSuccessor); + } + } + // *key is a run of 0xffs. Leave it alone. + + return null; + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/util/DirectBytewiseComparator.java b/external/rocksdb/java/src/main/java/org/rocksdb/util/DirectBytewiseComparator.java new file mode 100755 index 0000000000..170f0f42e4 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/util/DirectBytewiseComparator.java @@ -0,0 +1,88 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb.util; + +import org.rocksdb.ComparatorOptions; +import org.rocksdb.DirectComparator; +import org.rocksdb.DirectSlice; + +import java.nio.ByteBuffer; + +/** + * This is a Java Native implementation of the C++ + * equivalent BytewiseComparatorImpl using {@link DirectSlice} + * + * The performance of Comparators implemented in Java is always + * less than their C++ counterparts due to the bridging overhead, + * as such you likely don't want to use this apart from benchmarking + * and you most likely instead wanted + * {@link org.rocksdb.BuiltinComparator#BYTEWISE_COMPARATOR} + */ +public class DirectBytewiseComparator extends DirectComparator { + + public DirectBytewiseComparator(final ComparatorOptions copt) { + super(copt); + } + + @Override + public String name() { + return "rocksdb.java.DirectBytewiseComparator"; + } + + @Override + public int compare(final DirectSlice a, final DirectSlice b) { + return a.data().compareTo(b.data()); + } + + @Override + public String findShortestSeparator(final String start, + final DirectSlice limit) { + final byte[] startBytes = start.getBytes(); + + // Find length of common prefix + final int min_length = Math.min(startBytes.length, limit.size()); + int diff_index = 0; + while ((diff_index < min_length) && + (startBytes[diff_index] == limit.get(diff_index))) { + diff_index++; + } + + if (diff_index >= min_length) { + // Do not shorten if one string is a prefix of the other + } else { + final byte diff_byte = startBytes[diff_index]; + if(diff_byte < 0xff && diff_byte + 1 < limit.get(diff_index)) { + final byte shortest[] = new byte[diff_index + 1]; + System.arraycopy(startBytes, 0, shortest, 0, diff_index + 1); + shortest[diff_index]++; + assert(ByteBuffer.wrap(shortest).compareTo(limit.data()) < 0); + return new String(shortest); + } + } + + return null; + } + + @Override + public String findShortSuccessor(final String key) { + final byte[] keyBytes = key.getBytes(); + + // Find first character that can be incremented + final int n = keyBytes.length; + for (int i = 0; i < n; i++) { + final byte byt = keyBytes[i]; + if (byt != 0xff) { + final byte shortSuccessor[] = new byte[i + 1]; + System.arraycopy(keyBytes, 0, shortSuccessor, 0, i + 1); + shortSuccessor[i]++; + return new String(shortSuccessor); + } + } + // *key is a run of 0xffs. Leave it alone. + + return null; + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java b/external/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java new file mode 100755 index 0000000000..6fccc43bba --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/util/Environment.java @@ -0,0 +1,72 @@ +package org.rocksdb.util; + +public class Environment { + private static String OS = System.getProperty("os.name").toLowerCase(); + private static String ARCH = System.getProperty("os.arch").toLowerCase(); + + public static boolean isWindows() { + return (OS.contains("win")); + } + + public static boolean isMac() { + return (OS.contains("mac")); + } + + public static boolean isUnix() { + return (OS.contains("nix") || + OS.contains("nux") || + OS.contains("aix")); + } + + public static boolean isSolaris() { + return OS.contains("sunos"); + } + + public static boolean is64Bit() { + return (ARCH.indexOf("64") > 0); + } + + public static String getSharedLibraryName(final String name) { + return name + "jni"; + } + + public static String getSharedLibraryFileName(final String name) { + return appendLibOsSuffix("lib" + getSharedLibraryName(name), true); + } + + public static String getJniLibraryName(final String name) { + if (isUnix()) { + final String arch = (is64Bit()) ? "64" : "32"; + return String.format("%sjni-linux%s", name, arch); + } else if (isMac()) { + return String.format("%sjni-osx", name); + } else if (isSolaris()) { + return String.format("%sjni-solaris%d", name, is64Bit() ? 64 : 32); + } else if (isWindows() && is64Bit()) { + return String.format("%sjni-win64", name); + } + throw new UnsupportedOperationException(); + } + + public static String getJniLibraryFileName(final String name) { + return appendLibOsSuffix("lib" + getJniLibraryName(name), false); + } + + private static String appendLibOsSuffix(final String libraryFileName, final boolean shared) { + if (isUnix() || isSolaris()) { + return libraryFileName + ".so"; + } else if (isMac()) { + return libraryFileName + (shared ? ".dylib" : ".jnilib"); + } else if (isWindows()) { + return libraryFileName + ".dll"; + } + throw new UnsupportedOperationException(); + } + + public static String getJniLibraryExtension() { + if (isWindows()) { + return ".dll"; + } + return (isMac()) ? ".jnilib" : ".so"; + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java b/external/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java new file mode 100755 index 0000000000..beedc185d4 --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/util/ReverseBytewiseComparator.java @@ -0,0 +1,37 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb.util; + +import org.rocksdb.BuiltinComparator; +import org.rocksdb.ComparatorOptions; +import org.rocksdb.Slice; + +/** + * This is a Java Native implementation of the C++ + * equivalent ReverseBytewiseComparatorImpl using {@link Slice} + * + * The performance of Comparators implemented in Java is always + * less than their C++ counterparts due to the bridging overhead, + * as such you likely don't want to use this apart from benchmarking + * and you most likely instead wanted + * {@link BuiltinComparator#REVERSE_BYTEWISE_COMPARATOR} + */ +public class ReverseBytewiseComparator extends BytewiseComparator { + + public ReverseBytewiseComparator(final ComparatorOptions copt) { + super(copt); + } + + @Override + public String name() { + return "rocksdb.java.ReverseBytewiseComparator"; + } + + @Override + public int compare(final Slice a, final Slice b) { + return -super.compare(a, b); + } +} diff --git a/external/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java b/external/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java new file mode 100755 index 0000000000..e66fc371cd --- /dev/null +++ b/external/rocksdb/java/src/main/java/org/rocksdb/util/SizeUnit.java @@ -0,0 +1,16 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb.util; + +public class SizeUnit { + public static final long KB = 1024L; + public static final long MB = KB * KB; + public static final long GB = KB * MB; + public static final long TB = KB * GB; + public static final long PB = KB * TB; + + private SizeUnit() {} +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/AbstractComparatorTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/AbstractComparatorTest.java new file mode 100755 index 0000000000..db4b4d7d01 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/AbstractComparatorTest.java @@ -0,0 +1,199 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.io.IOException; +import java.nio.file.*; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; +import static org.rocksdb.Types.byteToInt; +import static org.rocksdb.Types.intToByte; + +/** + * Abstract tests for both Comparator and DirectComparator + */ +public abstract class AbstractComparatorTest { + + /** + * Get a comparator which will expect Integer keys + * and determine an ascending order + * + * @return An integer ascending order key comparator + */ + public abstract AbstractComparator getAscendingIntKeyComparator(); + + /** + * Test which stores random keys into the database + * using an @see getAscendingIntKeyComparator + * it then checks that these keys are read back in + * ascending order + * + * @param db_path A path where we can store database + * files temporarily + * + * @throws java.io.IOException if IO error happens. + */ + public void testRoundtrip(final Path db_path) throws IOException, + RocksDBException { + try (final AbstractComparator comparator = getAscendingIntKeyComparator(); + final Options opt = new Options() + .setCreateIfMissing(true) + .setComparator(comparator)) { + + // store 10,000 random integer keys + final int ITERATIONS = 10000; + try (final RocksDB db = RocksDB.open(opt, db_path.toString())) { + final Random random = new Random(); + for (int i = 0; i < ITERATIONS; i++) { + final byte key[] = intToByte(random.nextInt()); + // does key already exist (avoid duplicates) + if (i > 0 && db.get(key) != null) { + i--; // generate a different key + } else { + db.put(key, "value".getBytes()); + } + } + } + + // re-open db and read from start to end + // integer keys should be in ascending + // order as defined by SimpleIntComparator + try (final RocksDB db = RocksDB.open(opt, db_path.toString()); + final RocksIterator it = db.newIterator()) { + it.seekToFirst(); + int lastKey = Integer.MIN_VALUE; + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + final int thisKey = byteToInt(it.key()); + assertThat(thisKey).isGreaterThan(lastKey); + lastKey = thisKey; + count++; + } + assertThat(count).isEqualTo(ITERATIONS); + } + } + } + + /** + * Test which stores random keys into a column family + * in the database + * using an @see getAscendingIntKeyComparator + * it then checks that these keys are read back in + * ascending order + * + * @param db_path A path where we can store database + * files temporarily + * + * @throws java.io.IOException if IO error happens. + */ + public void testRoundtripCf(final Path db_path) throws IOException, + RocksDBException { + + try(final AbstractComparator comparator = getAscendingIntKeyComparator()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), + new ColumnFamilyOptions().setComparator(comparator)) + ); + + final List cfHandles = new ArrayList<>(); + + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true)) { + + // store 10,000 random integer keys + final int ITERATIONS = 10000; + + try (final RocksDB db = RocksDB.open(opt, db_path.toString(), + cfDescriptors, cfHandles)) { + try { + assertThat(cfDescriptors.size()).isEqualTo(2); + assertThat(cfHandles.size()).isEqualTo(2); + + final Random random = new Random(); + for (int i = 0; i < ITERATIONS; i++) { + final byte key[] = intToByte(random.nextInt()); + if (i > 0 && db.get(cfHandles.get(1), key) != null) { + // does key already exist (avoid duplicates) + i--; // generate a different key + } else { + db.put(cfHandles.get(1), key, "value".getBytes()); + } + } + } finally { + for (final ColumnFamilyHandle handle : cfHandles) { + handle.close(); + } + } + cfHandles.clear(); + } + + // re-open db and read from start to end + // integer keys should be in ascending + // order as defined by SimpleIntComparator + try (final RocksDB db = RocksDB.open(opt, db_path.toString(), + cfDescriptors, cfHandles); + final RocksIterator it = db.newIterator(cfHandles.get(1))) { + try { + assertThat(cfDescriptors.size()).isEqualTo(2); + assertThat(cfHandles.size()).isEqualTo(2); + + it.seekToFirst(); + int lastKey = Integer.MIN_VALUE; + int count = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + final int thisKey = byteToInt(it.key()); + assertThat(thisKey).isGreaterThan(lastKey); + lastKey = thisKey; + count++; + } + + assertThat(count).isEqualTo(ITERATIONS); + + } finally { + for (final ColumnFamilyHandle handle : cfHandles) { + handle.close(); + } + } + cfHandles.clear(); + } + } + } + } + + /** + * Compares integer keys + * so that they are in ascending order + * + * @param a 4-bytes representing an integer key + * @param b 4-bytes representing an integer key + * + * @return negative if a < b, 0 if a == b, positive otherwise + */ + protected final int compareIntKeys(final byte[] a, final byte[] b) { + + final int iA = byteToInt(a); + final int iB = byteToInt(b); + + // protect against int key calculation overflow + final double diff = (double)iA - iB; + final int result; + if (diff < Integer.MIN_VALUE) { + result = Integer.MIN_VALUE; + } else if(diff > Integer.MAX_VALUE) { + result = Integer.MAX_VALUE; + } else { + result = (int)diff; + } + + return result; + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java new file mode 100755 index 0000000000..b50ddf4990 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/BackupEngineTest.java @@ -0,0 +1,240 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class BackupEngineTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Rule + public TemporaryFolder backupFolder = new TemporaryFolder(); + + @Test + public void backupDb() throws RocksDBException { + // Open empty database. + try(final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + + // Fill database with some test values + prepareDatabase(db); + + // Create two backups + try(final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, false); + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 2); + } + } + } + + @Test + public void deleteBackup() throws RocksDBException { + // Open empty database. + try(final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(db); + // Create two backups + try(final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, false); + be.createNewBackup(db, true); + final List backupInfo = + verifyNumberOfValidBackups(be, 2); + // Delete the first backup + be.deleteBackup(backupInfo.get(0).backupId()); + final List newBackupInfo = + verifyNumberOfValidBackups(be, 1); + + // The second backup must remain. + assertThat(newBackupInfo.get(0).backupId()). + isEqualTo(backupInfo.get(1).backupId()); + } + } + } + + @Test + public void purgeOldBackups() throws RocksDBException { + // Open empty database. + try(final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // Fill database with some test values + prepareDatabase(db); + // Create four backups + try(final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, false); + be.createNewBackup(db, true); + be.createNewBackup(db, true); + be.createNewBackup(db, true); + final List backupInfo = + verifyNumberOfValidBackups(be, 4); + // Delete everything except the latest backup + be.purgeOldBackups(1); + final List newBackupInfo = + verifyNumberOfValidBackups(be, 1); + // The latest backup must remain. + assertThat(newBackupInfo.get(0).backupId()). + isEqualTo(backupInfo.get(3).backupId()); + } + } + } + + @Test + public void restoreLatestBackup() throws RocksDBException { + try(final Options opt = new Options().setCreateIfMissing(true)) { + // Open empty database. + RocksDB db = null; + try { + db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()); + // Fill database with some test values + prepareDatabase(db); + + try (final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 1); + db.put("key1".getBytes(), "valueV2".getBytes()); + db.put("key2".getBytes(), "valueV2".getBytes()); + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 2); + db.put("key1".getBytes(), "valueV3".getBytes()); + db.put("key2".getBytes(), "valueV3".getBytes()); + assertThat(new String(db.get("key1".getBytes()))).endsWith("V3"); + assertThat(new String(db.get("key2".getBytes()))).endsWith("V3"); + + db.close(); + db = null; + + verifyNumberOfValidBackups(be, 2); + // restore db from latest backup + try(final RestoreOptions ropts = new RestoreOptions(false)) { + be.restoreDbFromLatestBackup(dbFolder.getRoot().getAbsolutePath(), + dbFolder.getRoot().getAbsolutePath(), ropts); + } + + // Open database again. + db = RocksDB.open(opt, dbFolder.getRoot().getAbsolutePath()); + + // Values must have suffix V2 because of restoring latest backup. + assertThat(new String(db.get("key1".getBytes()))).endsWith("V2"); + assertThat(new String(db.get("key2".getBytes()))).endsWith("V2"); + } + } finally { + if(db != null) { + db.close(); + } + } + } + } + + @Test + public void restoreFromBackup() + throws RocksDBException { + try(final Options opt = new Options().setCreateIfMissing(true)) { + RocksDB db = null; + try { + // Open empty database. + db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()); + // Fill database with some test values + prepareDatabase(db); + try (final BackupableDBOptions bopt = new BackupableDBOptions( + backupFolder.getRoot().getAbsolutePath()); + final BackupEngine be = BackupEngine.open(opt.getEnv(), bopt)) { + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 1); + db.put("key1".getBytes(), "valueV2".getBytes()); + db.put("key2".getBytes(), "valueV2".getBytes()); + be.createNewBackup(db, true); + verifyNumberOfValidBackups(be, 2); + db.put("key1".getBytes(), "valueV3".getBytes()); + db.put("key2".getBytes(), "valueV3".getBytes()); + assertThat(new String(db.get("key1".getBytes()))).endsWith("V3"); + assertThat(new String(db.get("key2".getBytes()))).endsWith("V3"); + + //close the database + db.close(); + db = null; + + //restore the backup + final List backupInfo = verifyNumberOfValidBackups(be, 2); + // restore db from first backup + be.restoreDbFromBackup(backupInfo.get(0).backupId(), + dbFolder.getRoot().getAbsolutePath(), + dbFolder.getRoot().getAbsolutePath(), + new RestoreOptions(false)); + // Open database again. + db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()); + // Values must have suffix V2 because of restoring latest backup. + assertThat(new String(db.get("key1".getBytes()))).endsWith("V1"); + assertThat(new String(db.get("key2".getBytes()))).endsWith("V1"); + } + } finally { + if(db != null) { + db.close(); + } + } + } + } + + /** + * Verify backups. + * + * @param be {@link BackupEngine} instance. + * @param expectedNumberOfBackups numerical value + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + private List verifyNumberOfValidBackups(final BackupEngine be, + final int expectedNumberOfBackups) throws RocksDBException { + // Verify that backups exist + assertThat(be.getCorruptedBackups().length). + isEqualTo(0); + be.garbageCollect(); + final List backupInfo = be.getBackupInfo(); + assertThat(backupInfo.size()). + isEqualTo(expectedNumberOfBackups); + return backupInfo; + } + + /** + * Fill database with some test values. + * + * @param db {@link RocksDB} instance. + * @throws RocksDBException thrown if an error occurs within the native + * part of the library. + */ + private void prepareDatabase(final RocksDB db) + throws RocksDBException { + db.put("key1".getBytes(), "valueV1".getBytes()); + db.put("key2".getBytes(), "valueV1".getBytes()); + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java new file mode 100755 index 0000000000..c3836ac9b1 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/BackupableDBOptionsTest.java @@ -0,0 +1,261 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import static org.assertj.core.api.Assertions.assertThat; + +import java.util.Random; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +public class BackupableDBOptionsTest { + + private final static String ARBITRARY_PATH = + System.getProperty("java.io.tmpdir"); + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void backupDir() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + assertThat(backupableDBOptions.backupDir()). + isEqualTo(ARBITRARY_PATH); + } + } + + @Test + public void shareTableFiles() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final boolean value = rand.nextBoolean(); + backupableDBOptions.setShareTableFiles(value); + assertThat(backupableDBOptions.shareTableFiles()). + isEqualTo(value); + } + } + + @Test + public void sync() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final boolean value = rand.nextBoolean(); + backupableDBOptions.setSync(value); + assertThat(backupableDBOptions.sync()).isEqualTo(value); + } + } + + @Test + public void destroyOldData() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH);) { + final boolean value = rand.nextBoolean(); + backupableDBOptions.setDestroyOldData(value); + assertThat(backupableDBOptions.destroyOldData()). + isEqualTo(value); + } + } + + @Test + public void backupLogFiles() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final boolean value = rand.nextBoolean(); + backupableDBOptions.setBackupLogFiles(value); + assertThat(backupableDBOptions.backupLogFiles()). + isEqualTo(value); + } + } + + @Test + public void backupRateLimit() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final long value = Math.abs(rand.nextLong()); + backupableDBOptions.setBackupRateLimit(value); + assertThat(backupableDBOptions.backupRateLimit()). + isEqualTo(value); + // negative will be mapped to 0 + backupableDBOptions.setBackupRateLimit(-1); + assertThat(backupableDBOptions.backupRateLimit()). + isEqualTo(0); + } + } + + @Test + public void restoreRateLimit() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + final long value = Math.abs(rand.nextLong()); + backupableDBOptions.setRestoreRateLimit(value); + assertThat(backupableDBOptions.restoreRateLimit()). + isEqualTo(value); + // negative will be mapped to 0 + backupableDBOptions.setRestoreRateLimit(-1); + assertThat(backupableDBOptions.restoreRateLimit()). + isEqualTo(0); + } + } + + @Test + public void shareFilesWithChecksum() { + try (final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH)) { + boolean value = rand.nextBoolean(); + backupableDBOptions.setShareFilesWithChecksum(value); + assertThat(backupableDBOptions.shareFilesWithChecksum()). + isEqualTo(value); + } + } + + @Test + public void failBackupDirIsNull() { + exception.expect(IllegalArgumentException.class); + try (final BackupableDBOptions opts = new BackupableDBOptions(null)) { + //no-op + } + } + + @Test + public void failBackupDirIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.backupDir(); + } + } + + @Test + public void failSetShareTableFilesIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setShareTableFiles(true); + } + } + + @Test + public void failShareTableFilesIfDisposed() { + try (BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.shareTableFiles(); + } + } + + @Test + public void failSetSyncIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setSync(true); + } + } + + @Test + public void failSyncIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.sync(); + } + } + + @Test + public void failSetDestroyOldDataIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setDestroyOldData(true); + } + } + + @Test + public void failDestroyOldDataIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.destroyOldData(); + } + } + + @Test + public void failSetBackupLogFilesIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setBackupLogFiles(true); + } + } + + @Test + public void failBackupLogFilesIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.backupLogFiles(); + } + } + + @Test + public void failSetBackupRateLimitIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setBackupRateLimit(1); + } + } + + @Test + public void failBackupRateLimitIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.backupRateLimit(); + } + } + + @Test + public void failSetRestoreRateLimitIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setRestoreRateLimit(1); + } + } + + @Test + public void failRestoreRateLimitIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.restoreRateLimit(); + } + } + + @Test + public void failSetShareFilesWithChecksumIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.setShareFilesWithChecksum(true); + } + } + + @Test + public void failShareFilesWithChecksumIfDisposed() { + try (final BackupableDBOptions options = + setupUninitializedBackupableDBOptions(exception)) { + options.shareFilesWithChecksum(); + } + } + + private BackupableDBOptions setupUninitializedBackupableDBOptions( + ExpectedException exception) { + final BackupableDBOptions backupableDBOptions = + new BackupableDBOptions(ARBITRARY_PATH); + backupableDBOptions.close(); + exception.expect(AssertionError.class); + return backupableDBOptions; + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java new file mode 100755 index 0000000000..ccb7b7625f --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/BlockBasedTableConfigTest.java @@ -0,0 +1,171 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class BlockBasedTableConfigTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void noBlockCache() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setNoBlockCache(true); + assertThat(blockBasedTableConfig.noBlockCache()).isTrue(); + } + + @Test + public void blockCacheSize() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockCacheSize(8 * 1024); + assertThat(blockBasedTableConfig.blockCacheSize()). + isEqualTo(8 * 1024); + } + + @Test + public void blockSizeDeviation() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockSizeDeviation(12); + assertThat(blockBasedTableConfig.blockSizeDeviation()). + isEqualTo(12); + } + + @Test + public void blockRestartInterval() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockRestartInterval(15); + assertThat(blockBasedTableConfig.blockRestartInterval()). + isEqualTo(15); + } + + @Test + public void wholeKeyFiltering() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setWholeKeyFiltering(false); + assertThat(blockBasedTableConfig.wholeKeyFiltering()). + isFalse(); + } + + @Test + public void cacheIndexAndFilterBlocks() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setCacheIndexAndFilterBlocks(true); + assertThat(blockBasedTableConfig.cacheIndexAndFilterBlocks()). + isTrue(); + + } + + @Test + public void hashIndexAllowCollision() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setHashIndexAllowCollision(false); + assertThat(blockBasedTableConfig.hashIndexAllowCollision()). + isFalse(); + } + + @Test + public void blockCacheCompressedSize() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockCacheCompressedSize(40); + assertThat(blockBasedTableConfig.blockCacheCompressedSize()). + isEqualTo(40); + } + + @Test + public void checksumType() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + assertThat(ChecksumType.values().length).isEqualTo(3); + assertThat(ChecksumType.valueOf("kxxHash")). + isEqualTo(ChecksumType.kxxHash); + blockBasedTableConfig.setChecksumType(ChecksumType.kNoChecksum); + blockBasedTableConfig.setChecksumType(ChecksumType.kxxHash); + assertThat(blockBasedTableConfig.checksumType().equals( + ChecksumType.kxxHash)); + } + + @Test + public void indexType() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + assertThat(IndexType.values().length).isEqualTo(2); + blockBasedTableConfig.setIndexType(IndexType.kHashSearch); + assertThat(blockBasedTableConfig.indexType().equals( + IndexType.kHashSearch)); + assertThat(IndexType.valueOf("kBinarySearch")).isNotNull(); + blockBasedTableConfig.setIndexType(IndexType.valueOf("kBinarySearch")); + assertThat(blockBasedTableConfig.indexType().equals( + IndexType.kBinarySearch)); + } + + @Test + public void blockCacheCompressedNumShardBits() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockCacheCompressedNumShardBits(4); + assertThat(blockBasedTableConfig.blockCacheCompressedNumShardBits()). + isEqualTo(4); + } + + @Test + public void cacheNumShardBits() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setCacheNumShardBits(5); + assertThat(blockBasedTableConfig.cacheNumShardBits()). + isEqualTo(5); + } + + @Test + public void blockSize() { + BlockBasedTableConfig blockBasedTableConfig = new BlockBasedTableConfig(); + blockBasedTableConfig.setBlockSize(10); + assertThat(blockBasedTableConfig.blockSize()).isEqualTo(10); + } + + + @Test + public void blockBasedTableWithFilter() { + try(final Options options = new Options() + .setTableFormatConfig(new BlockBasedTableConfig() + .setFilter(new BloomFilter(10)))) { + assertThat(options.tableFactoryName()). + isEqualTo("BlockBasedTable"); + } + } + + @Test + public void blockBasedTableWithoutFilter() { + try(final Options options = new Options().setTableFormatConfig( + new BlockBasedTableConfig().setFilter(null))) { + assertThat(options.tableFactoryName()). + isEqualTo("BlockBasedTable"); + } + } + + @Test + public void blockBasedTableFormatVersion() { + BlockBasedTableConfig config = new BlockBasedTableConfig(); + for (int version=0; version<=2; version++) { + config.setFormatVersion(version); + assertThat(config.formatVersion()).isEqualTo(version); + } + } + + @Test(expected = AssertionError.class) + public void blockBasedTableFormatVersionFailNegative() { + BlockBasedTableConfig config = new BlockBasedTableConfig(); + config.setFormatVersion(-1); + } + + @Test(expected = AssertionError.class) + public void blockBasedTableFormatVersionFailIllegalVersion() { + BlockBasedTableConfig config = new BlockBasedTableConfig(); + config.setFormatVersion(3); + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java new file mode 100755 index 0000000000..e79569fb8b --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/CheckPointTest.java @@ -0,0 +1,82 @@ +package org.rocksdb; + + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class CheckPointTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Rule + public TemporaryFolder checkpointFolder = new TemporaryFolder(); + + @Test + public void checkPoint() throws RocksDBException { + try (final Options options = new Options(). + setCreateIfMissing(true)) { + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + try (final Checkpoint checkpoint = Checkpoint.create(db)) { + checkpoint.createCheckpoint(checkpointFolder. + getRoot().getAbsolutePath() + "/snapshot1"); + db.put("key2".getBytes(), "value2".getBytes()); + checkpoint.createCheckpoint(checkpointFolder. + getRoot().getAbsolutePath() + "/snapshot2"); + } + } + + try (final RocksDB db = RocksDB.open(options, + checkpointFolder.getRoot().getAbsolutePath() + + "/snapshot1")) { + assertThat(new String(db.get("key".getBytes()))). + isEqualTo("value"); + assertThat(db.get("key2".getBytes())).isNull(); + } + + try (final RocksDB db = RocksDB.open(options, + checkpointFolder.getRoot().getAbsolutePath() + + "/snapshot2")) { + assertThat(new String(db.get("key".getBytes()))). + isEqualTo("value"); + assertThat(new String(db.get("key2".getBytes()))). + isEqualTo("value2"); + } + } + } + + @Test(expected = IllegalArgumentException.class) + public void failIfDbIsNull() { + try (final Checkpoint checkpoint = Checkpoint.create(null)) { + + } + } + + @Test(expected = IllegalStateException.class) + public void failIfDbNotInitialized() throws RocksDBException { + try (final RocksDB db = RocksDB.open( + dbFolder.getRoot().getAbsolutePath())) { + db.close(); + Checkpoint.create(db); + } + } + + @Test(expected = RocksDBException.class) + public void failWithIllegalPath() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final Checkpoint checkpoint = Checkpoint.create(db)) { + checkpoint.createCheckpoint("/Z:///:\\C:\\TZ/-"); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java new file mode 100755 index 0000000000..18c5270e8e --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyOptionsTest.java @@ -0,0 +1,472 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import java.util.ArrayList; +import java.util.List; +import java.util.Properties; +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ColumnFamilyOptionsTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void getColumnFamilyOptionsFromProps() { + Properties properties = new Properties(); + properties.put("write_buffer_size", "112"); + properties.put("max_write_buffer_number", "13"); + + try (final ColumnFamilyOptions opt = ColumnFamilyOptions. + getColumnFamilyOptionsFromProps(properties)) { + // setup sample properties + assertThat(opt).isNotNull(); + assertThat(String.valueOf(opt.writeBufferSize())). + isEqualTo(properties.get("write_buffer_size")); + assertThat(String.valueOf(opt.maxWriteBufferNumber())). + isEqualTo(properties.get("max_write_buffer_number")); + } + } + + @Test + public void failColumnFamilyOptionsFromPropsWithIllegalValue() { + // setup sample properties + final Properties properties = new Properties(); + properties.put("tomato", "1024"); + properties.put("burger", "2"); + + try (final ColumnFamilyOptions opt = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps(properties)) { + assertThat(opt).isNull(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void failColumnFamilyOptionsFromPropsWithNullValue() { + try (final ColumnFamilyOptions opt = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps(null)) { + } + } + + @Test(expected = IllegalArgumentException.class) + public void failColumnFamilyOptionsFromPropsWithEmptyProps() { + try (final ColumnFamilyOptions opt = + ColumnFamilyOptions.getColumnFamilyOptionsFromProps( + new Properties())) { + } + } + + @Test + public void writeBufferSize() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setWriteBufferSize(longValue); + assertThat(opt.writeBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void maxWriteBufferNumber() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxWriteBufferNumber(intValue); + assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue); + } + } + + @Test + public void minWriteBufferNumberToMerge() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setMinWriteBufferNumberToMerge(intValue); + assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue); + } + } + + @Test + public void numLevels() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setNumLevels(intValue); + assertThat(opt.numLevels()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroFileNumCompactionTrigger() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroFileNumCompactionTrigger(intValue); + assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroSlowdownWritesTrigger() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroSlowdownWritesTrigger(intValue); + assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroStopWritesTrigger() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroStopWritesTrigger(intValue); + assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void targetFileSizeBase() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setTargetFileSizeBase(longValue); + assertThat(opt.targetFileSizeBase()).isEqualTo(longValue); + } + } + + @Test + public void targetFileSizeMultiplier() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setTargetFileSizeMultiplier(intValue); + assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue); + } + } + + @Test + public void maxBytesForLevelBase() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxBytesForLevelBase(longValue); + assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue); + } + } + + @Test + public void levelCompactionDynamicLevelBytes() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setLevelCompactionDynamicLevelBytes(boolValue); + assertThat(opt.levelCompactionDynamicLevelBytes()) + .isEqualTo(boolValue); + } + } + + @Test + public void maxBytesForLevelMultiplier() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxBytesForLevelMultiplier(intValue); + assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(intValue); + } + } + + @Test + public void expandedCompactionFactor() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setExpandedCompactionFactor(intValue); + assertThat(opt.expandedCompactionFactor()).isEqualTo(intValue); + } + } + + @Test + public void sourceCompactionFactor() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setSourceCompactionFactor(intValue); + assertThat(opt.sourceCompactionFactor()).isEqualTo(intValue); + } + } + + @Test + public void maxGrandparentOverlapFactor() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxGrandparentOverlapFactor(intValue); + assertThat(opt.maxGrandparentOverlapFactor()).isEqualTo(intValue); + } + } + + @Test + public void softRateLimit() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final double doubleValue = rand.nextDouble(); + opt.setSoftRateLimit(doubleValue); + assertThat(opt.softRateLimit()).isEqualTo(doubleValue); + } + } + + @Test + public void hardRateLimit() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final double doubleValue = rand.nextDouble(); + opt.setHardRateLimit(doubleValue); + assertThat(opt.hardRateLimit()).isEqualTo(doubleValue); + } + } + + @Test + public void rateLimitDelayMaxMilliseconds() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setRateLimitDelayMaxMilliseconds(intValue); + assertThat(opt.rateLimitDelayMaxMilliseconds()).isEqualTo(intValue); + } + } + + @Test + public void arenaBlockSize() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setArenaBlockSize(longValue); + assertThat(opt.arenaBlockSize()).isEqualTo(longValue); + } + } + + @Test + public void disableAutoCompactions() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDisableAutoCompactions(boolValue); + assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue); + } + } + + @Test + public void purgeRedundantKvsWhileFlush() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setPurgeRedundantKvsWhileFlush(boolValue); + assertThat(opt.purgeRedundantKvsWhileFlush()).isEqualTo(boolValue); + } + } + + @Test + public void verifyChecksumsInCompaction() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setVerifyChecksumsInCompaction(boolValue); + assertThat(opt.verifyChecksumsInCompaction()).isEqualTo(boolValue); + } + } + + @Test + public void maxSequentialSkipInIterations() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxSequentialSkipInIterations(longValue); + assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue); + } + } + + @Test + public void inplaceUpdateSupport() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setInplaceUpdateSupport(boolValue); + assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue); + } + } + + @Test + public void inplaceUpdateNumLocks() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setInplaceUpdateNumLocks(longValue); + assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue); + } + } + + @Test + public void memtablePrefixBloomSizeRatio() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final double doubleValue = rand.nextDouble(); + opt.setMemtablePrefixBloomSizeRatio(doubleValue); + assertThat(opt.memtablePrefixBloomSizeRatio()).isEqualTo(doubleValue); + } + } + + @Test + public void bloomLocality() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setBloomLocality(intValue); + assertThat(opt.bloomLocality()).isEqualTo(intValue); + } + } + + @Test + public void maxSuccessiveMerges() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxSuccessiveMerges(longValue); + assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue); + } + } + + @Test + public void minPartialMergeOperands() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final int intValue = rand.nextInt(); + opt.setMinPartialMergeOperands(intValue); + assertThat(opt.minPartialMergeOperands()).isEqualTo(intValue); + } + } + + @Test + public void optimizeFiltersForHits() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + final boolean aBoolean = rand.nextBoolean(); + opt.setOptimizeFiltersForHits(aBoolean); + assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean); + } + } + + @Test + public void memTable() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + opt.setMemTableConfig(new HashLinkedListMemTableConfig()); + assertThat(opt.memTableFactoryName()). + isEqualTo("HashLinkedListRepFactory"); + } + } + + @Test + public void comparator() throws RocksDBException { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + opt.setComparator(BuiltinComparator.BYTEWISE_COMPARATOR); + } + } + + @Test + public void linkageOfPrepMethods() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + options.optimizeUniversalStyleCompaction(); + options.optimizeUniversalStyleCompaction(4000); + options.optimizeLevelStyleCompaction(); + options.optimizeLevelStyleCompaction(3000); + options.optimizeForPointLookup(10); + } + } + + @Test + public void shouldSetTestPrefixExtractor() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + options.useFixedLengthPrefixExtractor(100); + options.useFixedLengthPrefixExtractor(10); + } + } + + @Test + public void shouldSetTestCappedPrefixExtractor() { + try (final ColumnFamilyOptions options = new ColumnFamilyOptions()) { + options.useCappedPrefixExtractor(100); + options.useCappedPrefixExtractor(10); + } + } + + @Test + public void compressionTypes() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + for (final CompressionType compressionType : + CompressionType.values()) { + columnFamilyOptions.setCompressionType(compressionType); + assertThat(columnFamilyOptions.compressionType()). + isEqualTo(compressionType); + assertThat(CompressionType.valueOf("NO_COMPRESSION")). + isEqualTo(CompressionType.NO_COMPRESSION); + } + } + } + + @Test + public void compressionPerLevel() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); + List compressionTypeList = new ArrayList<>(); + for (int i = 0; i < columnFamilyOptions.numLevels(); i++) { + compressionTypeList.add(CompressionType.NO_COMPRESSION); + } + columnFamilyOptions.setCompressionPerLevel(compressionTypeList); + compressionTypeList = columnFamilyOptions.compressionPerLevel(); + for (CompressionType compressionType : compressionTypeList) { + assertThat(compressionType).isEqualTo( + CompressionType.NO_COMPRESSION); + } + } + } + + @Test + public void differentCompressionsPerLevel() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + columnFamilyOptions.setNumLevels(3); + + assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); + List compressionTypeList = new ArrayList<>(); + + compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION); + compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION); + compressionTypeList.add(CompressionType.LZ4_COMPRESSION); + + columnFamilyOptions.setCompressionPerLevel(compressionTypeList); + compressionTypeList = columnFamilyOptions.compressionPerLevel(); + + assertThat(compressionTypeList.size()).isEqualTo(3); + assertThat(compressionTypeList). + containsExactly( + CompressionType.BZLIB2_COMPRESSION, + CompressionType.SNAPPY_COMPRESSION, + CompressionType.LZ4_COMPRESSION); + + } + } + + @Test + public void compactionStyles() { + try (final ColumnFamilyOptions columnFamilyOptions + = new ColumnFamilyOptions()) { + for (final CompactionStyle compactionStyle : + CompactionStyle.values()) { + columnFamilyOptions.setCompactionStyle(compactionStyle); + assertThat(columnFamilyOptions.compactionStyle()). + isEqualTo(compactionStyle); + assertThat(CompactionStyle.valueOf("FIFO")). + isEqualTo(CompactionStyle.FIFO); + } + } + } + + @Test + public void maxTableFilesSizeFIFO() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + long longValue = rand.nextLong(); + // Size has to be positive + longValue = (longValue < 0) ? -longValue : longValue; + longValue = (longValue == 0) ? longValue + 1 : longValue; + opt.setMaxTableFilesSizeFIFO(longValue); + assertThat(opt.maxTableFilesSizeFIFO()). + isEqualTo(longValue); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java new file mode 100755 index 0000000000..c5b4fe96af --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/ColumnFamilyTest.java @@ -0,0 +1,605 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.*; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ColumnFamilyTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void listColumnFamilies() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + // Test listColumnFamilies + final List columnFamilyNames = RocksDB.listColumnFamilies(options, + dbFolder.getRoot().getAbsolutePath()); + assertThat(columnFamilyNames).isNotNull(); + assertThat(columnFamilyNames.size()).isGreaterThan(0); + assertThat(columnFamilyNames.size()).isEqualTo(1); + assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default"); + } + } + + @Test + public void defaultColumnFamily() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + final ColumnFamilyHandle cfh = db.getDefaultColumnFamily(); + try { + assertThat(cfh).isNotNull(); + + final byte[] key = "key".getBytes(); + final byte[] value = "value".getBytes(); + + db.put(cfh, key, value); + + final byte[] actualValue = db.get(cfh, key); + + assertThat(cfh).isNotNull(); + assertThat(actualValue).isEqualTo(value); + } finally { + cfh.close(); + } + } + } + + @Test + public void createColumnFamily() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + final ColumnFamilyHandle columnFamilyHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf".getBytes(), + new ColumnFamilyOptions())); + try { + final List columnFamilyNames = RocksDB.listColumnFamilies( + options, dbFolder.getRoot().getAbsolutePath()); + assertThat(columnFamilyNames).isNotNull(); + assertThat(columnFamilyNames.size()).isGreaterThan(0); + assertThat(columnFamilyNames.size()).isEqualTo(2); + assertThat(new String(columnFamilyNames.get(0))).isEqualTo("default"); + assertThat(new String(columnFamilyNames.get(1))).isEqualTo("new_cf"); + } finally { + columnFamilyHandle.close(); + } + } + } + + @Test + public void openWithColumnFamilies() throws RocksDBException { + final List cfNames = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes()) + ); + + final List columnFamilyHandleList = + new ArrayList<>(); + + // Test open database with column family names + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfNames, + columnFamilyHandleList)) { + + try { + assertThat(columnFamilyHandleList.size()).isEqualTo(2); + db.put("dfkey1".getBytes(), "dfvalue".getBytes()); + db.put(columnFamilyHandleList.get(0), "dfkey2".getBytes(), + "dfvalue".getBytes()); + db.put(columnFamilyHandleList.get(1), "newcfkey1".getBytes(), + "newcfvalue".getBytes()); + + String retVal = new String(db.get(columnFamilyHandleList.get(1), + "newcfkey1".getBytes())); + assertThat(retVal).isEqualTo("newcfvalue"); + assertThat((db.get(columnFamilyHandleList.get(1), + "dfkey1".getBytes()))).isNull(); + db.remove(columnFamilyHandleList.get(1), "newcfkey1".getBytes()); + assertThat((db.get(columnFamilyHandleList.get(1), + "newcfkey1".getBytes()))).isNull(); + db.remove(columnFamilyHandleList.get(0), new WriteOptions(), + "dfkey2".getBytes()); + assertThat(db.get(columnFamilyHandleList.get(0), new ReadOptions(), + "dfkey2".getBytes())).isNull(); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void getWithOutValueAndCf() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + final List columnFamilyHandleList = new ArrayList<>(); + + // Test open database with column family names + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + try { + db.put(columnFamilyHandleList.get(0), new WriteOptions(), + "key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + final byte[] outValue = new byte[5]; + // not found value + int getResult = db.get("keyNotFound".getBytes(), outValue); + assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND); + // found value which fits in outValue + getResult = db.get(columnFamilyHandleList.get(0), "key1".getBytes(), + outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("value".getBytes()); + // found value which fits partially + getResult = db.get(columnFamilyHandleList.get(0), new ReadOptions(), + "key2".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("12345".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void createWriteDropColumnFamily() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + ColumnFamilyHandle tmpColumnFamilyHandle = null; + try { + tmpColumnFamilyHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("tmpCF".getBytes(), + new ColumnFamilyOptions())); + db.put(tmpColumnFamilyHandle, "key".getBytes(), "value".getBytes()); + db.dropColumnFamily(tmpColumnFamilyHandle); + } finally { + if (tmpColumnFamilyHandle != null) { + tmpColumnFamilyHandle.close(); + } + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void writeBatch() throws RocksDBException { + try (final ColumnFamilyOptions defaultCfOptions = new ColumnFamilyOptions() + .setMergeOperator(new StringAppendOperator())) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, + defaultCfOptions), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList); + final WriteBatch writeBatch = new WriteBatch(); + final WriteOptions writeOpt = new WriteOptions()) { + try { + writeBatch.put("key".getBytes(), "value".getBytes()); + writeBatch.put(db.getDefaultColumnFamily(), + "mergeKey".getBytes(), "merge".getBytes()); + writeBatch.merge(db.getDefaultColumnFamily(), "mergeKey".getBytes(), + "merge".getBytes()); + writeBatch.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), + "value".getBytes()); + writeBatch.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(), + "value2".getBytes()); + writeBatch.remove("xyz".getBytes()); + writeBatch.remove(columnFamilyHandleList.get(1), "xyz".getBytes()); + db.write(writeOpt, writeBatch); + + assertThat(db.get(columnFamilyHandleList.get(1), + "xyz".getBytes()) == null); + assertThat(new String(db.get(columnFamilyHandleList.get(1), + "newcfkey".getBytes()))).isEqualTo("value"); + assertThat(new String(db.get(columnFamilyHandleList.get(1), + "newcfkey2".getBytes()))).isEqualTo("value2"); + assertThat(new String(db.get("key".getBytes()))).isEqualTo("value"); + // check if key is merged + assertThat(new String(db.get(db.getDefaultColumnFamily(), + "mergeKey".getBytes()))).isEqualTo("merge,merge"); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test + public void iteratorOnColumnFamily() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + + db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), + "value".getBytes()); + db.put(columnFamilyHandleList.get(1), "newcfkey2".getBytes(), + "value2".getBytes()); + try (final RocksIterator rocksIterator = + db.newIterator(columnFamilyHandleList.get(1))) { + rocksIterator.seekToFirst(); + Map refMap = new HashMap<>(); + refMap.put("newcfkey", "value"); + refMap.put("newcfkey2", "value2"); + int i = 0; + while (rocksIterator.isValid()) { + i++; + assertThat(refMap.get(new String(rocksIterator.key()))). + isEqualTo(new String(rocksIterator.value())); + rocksIterator.next(); + } + assertThat(i).isEqualTo(2); + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void multiGet() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + db.put(columnFamilyHandleList.get(0), "key".getBytes(), + "value".getBytes()); + db.put(columnFamilyHandleList.get(1), "newcfkey".getBytes(), + "value".getBytes()); + + final List keys = Arrays.asList(new byte[][]{ + "key".getBytes(), "newcfkey".getBytes() + }); + Map retValues = db.multiGet(columnFamilyHandleList, + keys); + assertThat(retValues.size()).isEqualTo(2); + assertThat(new String(retValues.get(keys.get(0)))) + .isEqualTo("value"); + assertThat(new String(retValues.get(keys.get(1)))) + .isEqualTo("value"); + retValues = db.multiGet(new ReadOptions(), columnFamilyHandleList, + keys); + assertThat(retValues.size()).isEqualTo(2); + assertThat(new String(retValues.get(keys.get(0)))) + .isEqualTo("value"); + assertThat(new String(retValues.get(keys.get(1)))) + .isEqualTo("value"); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void properties() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + assertThat(db.getProperty("rocksdb.estimate-num-keys")). + isNotNull(); + assertThat(db.getLongProperty(columnFamilyHandleList.get(0), + "rocksdb.estimate-num-keys")).isGreaterThanOrEqualTo(0); + assertThat(db.getProperty("rocksdb.stats")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(0), + "rocksdb.sstables")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(1), + "rocksdb.estimate-num-keys")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(1), + "rocksdb.stats")).isNotNull(); + assertThat(db.getProperty(columnFamilyHandleList.get(1), + "rocksdb.sstables")).isNotNull(); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + + @Test + public void iterators() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + List iterators = null; + try { + iterators = db.newIterators(columnFamilyHandleList); + assertThat(iterators.size()).isEqualTo(2); + RocksIterator iter = iterators.get(0); + iter.seekToFirst(); + final Map defRefMap = new HashMap<>(); + defRefMap.put("dfkey1", "dfvalue"); + defRefMap.put("key", "value"); + while (iter.isValid()) { + assertThat(defRefMap.get(new String(iter.key()))). + isEqualTo(new String(iter.value())); + iter.next(); + } + // iterate over new_cf key/value pairs + final Map cfRefMap = new HashMap<>(); + cfRefMap.put("newcfkey", "value"); + cfRefMap.put("newcfkey2", "value2"); + iter = iterators.get(1); + iter.seekToFirst(); + while (iter.isValid()) { + assertThat(cfRefMap.get(new String(iter.key()))). + isEqualTo(new String(iter.value())); + iter.next(); + } + } finally { + if (iterators != null) { + for (final RocksIterator rocksIterator : iterators) { + rocksIterator.close(); + } + } + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failPutDisposedCF() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + db.dropColumnFamily(columnFamilyHandleList.get(1)); + db.put(columnFamilyHandleList.get(1), "key".getBytes(), + "value".getBytes()); + } finally { + for (ColumnFamilyHandle columnFamilyHandle : columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failRemoveDisposedCF() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + db.dropColumnFamily(columnFamilyHandleList.get(1)); + db.remove(columnFamilyHandleList.get(1), "key".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failGetDisposedCF() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + try { + db.dropColumnFamily(columnFamilyHandleList.get(1)); + db.get(columnFamilyHandleList.get(1), "key".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failMultiGetWithoutCorrectNumberOfCF() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes())); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + try { + final List keys = new ArrayList<>(); + keys.add("key".getBytes()); + keys.add("newcfkey".getBytes()); + final List cfCustomList = new ArrayList<>(); + db.multiGet(cfCustomList, keys); + + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void testByteCreateFolumnFamily() throws RocksDBException { + + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + final byte[] b0 = new byte[]{(byte) 0x00}; + final byte[] b1 = new byte[]{(byte) 0x01}; + final byte[] b2 = new byte[]{(byte) 0x02}; + ColumnFamilyHandle cf1 = null, cf2 = null, cf3 = null; + try { + cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0)); + cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1)); + final List families = RocksDB.listColumnFamilies(options, + dbFolder.getRoot().getAbsolutePath()); + assertThat(families).contains("default".getBytes(), b0, b1); + cf3 = db.createColumnFamily(new ColumnFamilyDescriptor(b2)); + } finally { + if (cf1 != null) { + cf1.close(); + } + if (cf2 != null) { + cf2.close(); + } + if (cf3 != null) { + cf3.close(); + } + } + } + } + + @Test + public void testCFNamesWithZeroBytes() throws RocksDBException { + ColumnFamilyHandle cf1 = null, cf2 = null; + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + ) { + try { + final byte[] b0 = new byte[]{0, 0}; + final byte[] b1 = new byte[]{0, 1}; + cf1 = db.createColumnFamily(new ColumnFamilyDescriptor(b0)); + cf2 = db.createColumnFamily(new ColumnFamilyDescriptor(b1)); + final List families = RocksDB.listColumnFamilies(options, + dbFolder.getRoot().getAbsolutePath()); + assertThat(families).contains("default".getBytes(), b0, b1); + } finally { + if (cf1 != null) { + cf1.close(); + } + if (cf2 != null) { + cf2.close(); + } + } + } + } + + @Test + public void testCFNameSimplifiedChinese() throws RocksDBException { + ColumnFamilyHandle columnFamilyHandle = null; + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + ) { + try { + final String simplifiedChinese = "\u7b80\u4f53\u5b57"; + columnFamilyHandle = db.createColumnFamily( + new ColumnFamilyDescriptor(simplifiedChinese.getBytes())); + + final List families = RocksDB.listColumnFamilies(options, + dbFolder.getRoot().getAbsolutePath()); + assertThat(families).contains("default".getBytes(), + simplifiedChinese.getBytes()); + } finally { + if (columnFamilyHandle != null) { + columnFamilyHandle.close(); + } + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java new file mode 100755 index 0000000000..fcdd09acba --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/ComparatorOptionsTest.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ComparatorOptionsTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void comparatorOptions() { + try(final ComparatorOptions copt = new ComparatorOptions()) { + + assertThat(copt).isNotNull(); + // UseAdaptiveMutex test + copt.setUseAdaptiveMutex(true); + assertThat(copt.useAdaptiveMutex()).isTrue(); + + copt.setUseAdaptiveMutex(false); + assertThat(copt.useAdaptiveMutex()).isFalse(); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/ComparatorTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/ComparatorTest.java new file mode 100755 index 0000000000..b348218447 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/ComparatorTest.java @@ -0,0 +1,200 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.nio.file.FileSystems; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ComparatorTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void javaComparator() throws IOException, RocksDBException { + + final AbstractComparatorTest comparatorTest = new AbstractComparatorTest() { + @Override + public AbstractComparator getAscendingIntKeyComparator() { + return new Comparator(new ComparatorOptions()) { + + @Override + public String name() { + return "test.AscendingIntKeyComparator"; + } + + @Override + public int compare(final Slice a, final Slice b) { + return compareIntKeys(a.data(), b.data()); + } + }; + } + }; + + // test the round-tripability of keys written and read with the Comparator + comparatorTest.testRoundtrip(FileSystems.getDefault().getPath( + dbFolder.getRoot().getAbsolutePath())); + } + + @Test + public void javaComparatorCf() throws IOException, RocksDBException { + + final AbstractComparatorTest comparatorTest = new AbstractComparatorTest() { + @Override + public AbstractComparator getAscendingIntKeyComparator() { + return new Comparator(new ComparatorOptions()) { + + @Override + public String name() { + return "test.AscendingIntKeyComparator"; + } + + @Override + public int compare(final Slice a, final Slice b) { + return compareIntKeys(a.data(), b.data()); + } + }; + } + }; + + // test the round-tripability of keys written and read with the Comparator + comparatorTest.testRoundtripCf(FileSystems.getDefault().getPath( + dbFolder.getRoot().getAbsolutePath())); + } + + @Test + public void builtinForwardComparator() + throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(BuiltinComparator.BYTEWISE_COMPARATOR); + final RocksDB rocksDb = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + rocksDb.put("abc1".getBytes(), "abc1".getBytes()); + rocksDb.put("abc2".getBytes(), "abc2".getBytes()); + rocksDb.put("abc3".getBytes(), "abc3".getBytes()); + + try(final RocksIterator rocksIterator = rocksDb.newIterator()) { + // Iterate over keys using a iterator + rocksIterator.seekToFirst(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc2".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc2".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isFalse(); + // Get last one + rocksIterator.seekToLast(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + // Seek for abc + rocksIterator.seek("abc".getBytes()); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + } + } + } + + @Test + public void builtinReverseComparator() + throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR); + final RocksDB rocksDb = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + + rocksDb.put("abc1".getBytes(), "abc1".getBytes()); + rocksDb.put("abc2".getBytes(), "abc2".getBytes()); + rocksDb.put("abc3".getBytes(), "abc3".getBytes()); + + try (final RocksIterator rocksIterator = rocksDb.newIterator()) { + // Iterate over keys using a iterator + rocksIterator.seekToFirst(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc2".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc2".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + rocksIterator.next(); + assertThat(rocksIterator.isValid()).isFalse(); + // Get last one + rocksIterator.seekToLast(); + assertThat(rocksIterator.isValid()).isTrue(); + assertThat(rocksIterator.key()).isEqualTo( + "abc1".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc1".getBytes()); + // Will be invalid because abc is after abc1 + rocksIterator.seek("abc".getBytes()); + assertThat(rocksIterator.isValid()).isFalse(); + // Will be abc3 because the next one after abc999 + // is abc3 + rocksIterator.seek("abc999".getBytes()); + assertThat(rocksIterator.key()).isEqualTo( + "abc3".getBytes()); + assertThat(rocksIterator.value()).isEqualTo( + "abc3".getBytes()); + } + } + } + + @Test + public void builtinComparatorEnum(){ + assertThat(BuiltinComparator.BYTEWISE_COMPARATOR.ordinal()) + .isEqualTo(0); + assertThat( + BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR.ordinal()) + .isEqualTo(1); + assertThat(BuiltinComparator.values().length).isEqualTo(2); + assertThat(BuiltinComparator.valueOf("BYTEWISE_COMPARATOR")). + isEqualTo(BuiltinComparator.BYTEWISE_COMPARATOR); + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java new file mode 100755 index 0000000000..51b7259f6a --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/CompressionOptionsTest.java @@ -0,0 +1,20 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.Test; + + +public class CompressionOptionsTest { + @Test + public void getCompressionType() { + for (final CompressionType compressionType : CompressionType.values()) { + String libraryName = compressionType.getLibraryName(); + compressionType.equals(CompressionType.getCompressionType( + libraryName)); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java new file mode 100755 index 0000000000..523e537840 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/DBOptionsTest.java @@ -0,0 +1,362 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import java.util.Properties; +import java.util.Random; + +import static org.assertj.core.api.Assertions.assertThat; + +public class DBOptionsTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void getDBOptionsFromProps() { + // setup sample properties + final Properties properties = new Properties(); + properties.put("allow_mmap_reads", "true"); + properties.put("bytes_per_sync", "13"); + try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) { + assertThat(opt).isNotNull(); + assertThat(String.valueOf(opt.allowMmapReads())). + isEqualTo(properties.get("allow_mmap_reads")); + assertThat(String.valueOf(opt.bytesPerSync())). + isEqualTo(properties.get("bytes_per_sync")); + } + } + + @Test + public void failDBOptionsFromPropsWithIllegalValue() { + // setup sample properties + final Properties properties = new Properties(); + properties.put("tomato", "1024"); + properties.put("burger", "2"); + try(final DBOptions opt = DBOptions.getDBOptionsFromProps(properties)) { + assertThat(opt).isNull(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void failDBOptionsFromPropsWithNullValue() { + try(final DBOptions opt = DBOptions.getDBOptionsFromProps(null)) { + //no-op + } + } + + @Test(expected = IllegalArgumentException.class) + public void failDBOptionsFromPropsWithEmptyProps() { + try(final DBOptions opt = DBOptions.getDBOptionsFromProps( + new Properties())) { + //no-op + } + } + + @Test + public void setIncreaseParallelism() { + try(final DBOptions opt = new DBOptions()) { + final int threads = Runtime.getRuntime().availableProcessors() * 2; + opt.setIncreaseParallelism(threads); + } + } + + @Test + public void createIfMissing() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setCreateIfMissing(boolValue); + assertThat(opt.createIfMissing()).isEqualTo(boolValue); + } + } + + @Test + public void createMissingColumnFamilies() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setCreateMissingColumnFamilies(boolValue); + assertThat(opt.createMissingColumnFamilies()).isEqualTo(boolValue); + } + } + + @Test + public void errorIfExists() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setErrorIfExists(boolValue); + assertThat(opt.errorIfExists()).isEqualTo(boolValue); + } + } + + @Test + public void paranoidChecks() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setParanoidChecks(boolValue); + assertThat(opt.paranoidChecks()).isEqualTo(boolValue); + } + } + + @Test + public void maxTotalWalSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxTotalWalSize(longValue); + assertThat(opt.maxTotalWalSize()).isEqualTo(longValue); + } + } + + @Test + public void maxOpenFiles() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxOpenFiles(intValue); + assertThat(opt.maxOpenFiles()).isEqualTo(intValue); + } + } + + @Test + public void disableDataSync() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDisableDataSync(boolValue); + assertThat(opt.disableDataSync()).isEqualTo(boolValue); + } + } + + @Test + public void useFsync() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseFsync(boolValue); + assertThat(opt.useFsync()).isEqualTo(boolValue); + } + } + + @Test + public void dbLogDir() { + try(final DBOptions opt = new DBOptions()) { + final String str = "path/to/DbLogDir"; + opt.setDbLogDir(str); + assertThat(opt.dbLogDir()).isEqualTo(str); + } + } + + @Test + public void walDir() { + try(final DBOptions opt = new DBOptions()) { + final String str = "path/to/WalDir"; + opt.setWalDir(str); + assertThat(opt.walDir()).isEqualTo(str); + } + } + + @Test + public void deleteObsoleteFilesPeriodMicros() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setDeleteObsoleteFilesPeriodMicros(longValue); + assertThat(opt.deleteObsoleteFilesPeriodMicros()).isEqualTo(longValue); + } + } + + @Test + public void maxBackgroundCompactions() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxBackgroundCompactions(intValue); + assertThat(opt.maxBackgroundCompactions()).isEqualTo(intValue); + } + } + + @Test + public void maxBackgroundFlushes() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setMaxBackgroundFlushes(intValue); + assertThat(opt.maxBackgroundFlushes()).isEqualTo(intValue); + } + } + + @Test + public void maxLogFileSize() throws RocksDBException { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxLogFileSize(longValue); + assertThat(opt.maxLogFileSize()).isEqualTo(longValue); + } + } + + @Test + public void logFileTimeToRoll() throws RocksDBException { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setLogFileTimeToRoll(longValue); + assertThat(opt.logFileTimeToRoll()).isEqualTo(longValue); + } + } + + @Test + public void keepLogFileNum() throws RocksDBException { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setKeepLogFileNum(longValue); + assertThat(opt.keepLogFileNum()).isEqualTo(longValue); + } + } + + @Test + public void maxManifestFileSize() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setMaxManifestFileSize(longValue); + assertThat(opt.maxManifestFileSize()).isEqualTo(longValue); + } + } + + @Test + public void tableCacheNumshardbits() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setTableCacheNumshardbits(intValue); + assertThat(opt.tableCacheNumshardbits()).isEqualTo(intValue); + } + } + + @Test + public void walSizeLimitMB() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setWalSizeLimitMB(longValue); + assertThat(opt.walSizeLimitMB()).isEqualTo(longValue); + } + } + + @Test + public void walTtlSeconds() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setWalTtlSeconds(longValue); + assertThat(opt.walTtlSeconds()).isEqualTo(longValue); + } + } + + @Test + public void manifestPreallocationSize() throws RocksDBException { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setManifestPreallocationSize(longValue); + assertThat(opt.manifestPreallocationSize()).isEqualTo(longValue); + } + } + + @Test + public void allowOsBuffer() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowOsBuffer(boolValue); + assertThat(opt.allowOsBuffer()).isEqualTo(boolValue); + } + } + + @Test + public void allowMmapReads() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowMmapReads(boolValue); + assertThat(opt.allowMmapReads()).isEqualTo(boolValue); + } + } + + @Test + public void allowMmapWrites() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowMmapWrites(boolValue); + assertThat(opt.allowMmapWrites()).isEqualTo(boolValue); + } + } + + @Test + public void isFdCloseOnExec() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setIsFdCloseOnExec(boolValue); + assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue); + } + } + + @Test + public void statsDumpPeriodSec() { + try(final DBOptions opt = new DBOptions()) { + final int intValue = rand.nextInt(); + opt.setStatsDumpPeriodSec(intValue); + assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue); + } + } + + @Test + public void adviseRandomOnOpen() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAdviseRandomOnOpen(boolValue); + assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue); + } + } + + @Test + public void useAdaptiveMutex() { + try(final DBOptions opt = new DBOptions()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseAdaptiveMutex(boolValue); + assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue); + } + } + + @Test + public void bytesPerSync() { + try(final DBOptions opt = new DBOptions()) { + final long longValue = rand.nextLong(); + opt.setBytesPerSync(longValue); + assertThat(opt.bytesPerSync()).isEqualTo(longValue); + } + } + + @Test + public void rateLimiterConfig() { + try(final DBOptions options = new DBOptions(); + final DBOptions anotherOptions = new DBOptions()) { + final RateLimiterConfig rateLimiterConfig = + new GenericRateLimiterConfig(1000, 100 * 1000, 1); + options.setRateLimiterConfig(rateLimiterConfig); + // Test with parameter initialization + + anotherOptions.setRateLimiterConfig( + new GenericRateLimiterConfig(1000)); + } + } + + @Test + public void statistics() { + try(final DBOptions options = new DBOptions()) { + Statistics statistics = options.createStatistics(). + statisticsPtr(); + assertThat(statistics).isNotNull(); + + try(final DBOptions anotherOptions = new DBOptions()) { + statistics = anotherOptions.statisticsPtr(); + assertThat(statistics).isNotNull(); + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/DirectComparatorTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/DirectComparatorTest.java new file mode 100755 index 0000000000..abdbeada9e --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/DirectComparatorTest.java @@ -0,0 +1,52 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.IOException; +import java.nio.file.FileSystems; + +public class DirectComparatorTest { + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void directComparator() throws IOException, RocksDBException { + + final AbstractComparatorTest comparatorTest = new AbstractComparatorTest() { + @Override + public AbstractComparator getAscendingIntKeyComparator() { + return new DirectComparator(new ComparatorOptions()) { + + @Override + public String name() { + return "test.AscendingIntKeyDirectComparator"; + } + + @Override + public int compare(final DirectSlice a, final DirectSlice b) { + final byte ax[] = new byte[4], bx[] = new byte[4]; + a.data().get(ax); + b.data().get(bx); + return compareIntKeys(ax, bx); + } + }; + } + }; + + // test the round-tripability of keys written and read with the DirectComparator + comparatorTest.testRoundtrip(FileSystems.getDefault().getPath( + dbFolder.getRoot().getAbsolutePath())); + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java new file mode 100755 index 0000000000..2d3abea45b --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/DirectSliceTest.java @@ -0,0 +1,74 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import java.nio.ByteBuffer; + +import static org.assertj.core.api.Assertions.assertThat; + +public class DirectSliceTest { + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void directSlice() { + try(final DirectSlice directSlice = new DirectSlice("abc"); + final DirectSlice otherSlice = new DirectSlice("abc")) { + assertThat(directSlice.toString()).isEqualTo("abc"); + // clear first slice + directSlice.clear(); + assertThat(directSlice.toString()).isEmpty(); + // get first char in otherslice + assertThat(otherSlice.get(0)).isEqualTo("a".getBytes()[0]); + // remove prefix + otherSlice.removePrefix(1); + assertThat(otherSlice.toString()).isEqualTo("bc"); + } + } + + @Test + public void directSliceWithByteBuffer() { + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length + 1); + buffer.put(data); + buffer.put(data.length, (byte)0); + + try(final DirectSlice directSlice = new DirectSlice(buffer)) { + assertThat(directSlice.toString()).isEqualTo("Some text"); + } + } + + @Test + public void directSliceWithByteBufferAndLength() { + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.allocateDirect(data.length); + buffer.put(data); + try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) { + assertThat(directSlice.toString()).isEqualTo("Some"); + } + } + + @Test(expected = AssertionError.class) + public void directSliceInitWithoutDirectAllocation() { + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.wrap(data); + try(final DirectSlice directSlice = new DirectSlice(buffer)) { + //no-op + } + } + + @Test(expected = AssertionError.class) + public void directSlicePrefixInitWithoutDirectAllocation() { + final byte[] data = "Some text".getBytes(); + final ByteBuffer buffer = ByteBuffer.wrap(data); + try(final DirectSlice directSlice = new DirectSlice(buffer, 4)) { + //no-op + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java new file mode 100755 index 0000000000..e5bb60fda4 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/FilterTest.java @@ -0,0 +1,39 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +public class FilterTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void filter() { + // new Bloom filter + final BlockBasedTableConfig blockConfig = new BlockBasedTableConfig(); + try(final Options options = new Options()) { + + try(final Filter bloomFilter = new BloomFilter()) { + blockConfig.setFilter(bloomFilter); + options.setTableFormatConfig(blockConfig); + } + + try(final Filter bloomFilter = new BloomFilter(10)) { + blockConfig.setFilter(bloomFilter); + options.setTableFormatConfig(blockConfig); + } + + try(final Filter bloomFilter = new BloomFilter(10, false)) { + blockConfig.setFilter(bloomFilter); + options.setTableFormatConfig(blockConfig); + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java new file mode 100755 index 0000000000..f3530292ae --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/FlushTest.java @@ -0,0 +1,49 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class FlushTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void flush() throws RocksDBException { + try(final Options options = new Options() + .setCreateIfMissing(true) + .setMaxWriteBufferNumber(10) + .setMinWriteBufferNumberToMerge(10); + final WriteOptions wOpt = new WriteOptions() + .setDisableWAL(true); + final FlushOptions flushOptions = new FlushOptions() + .setWaitForFlush(true)) { + assertThat(flushOptions.waitForFlush()).isTrue(); + + try(final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put(wOpt, "key1".getBytes(), "value1".getBytes()); + db.put(wOpt, "key2".getBytes(), "value2".getBytes()); + db.put(wOpt, "key3".getBytes(), "value3".getBytes()); + db.put(wOpt, "key4".getBytes(), "value4".getBytes()); + assertThat(db.getProperty("rocksdb.num-entries-active-mem-table")) + .isEqualTo("4"); + db.flush(flushOptions); + assertThat(db.getProperty("rocksdb.num-entries-active-mem-table")) + .isEqualTo("0"); + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java new file mode 100755 index 0000000000..48ecfa16a9 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/InfoLogLevelTest.java @@ -0,0 +1,107 @@ +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.util.Environment; + +import java.io.IOException; + +import static java.nio.file.Files.readAllBytes; +import static java.nio.file.Paths.get; +import static org.assertj.core.api.Assertions.assertThat; + +public class InfoLogLevelTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void testInfoLogLevel() throws RocksDBException, + IOException { + try (final RocksDB db = + RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + assertThat(getLogContentsWithoutHeader()).isNotEmpty(); + } + } + + @Test + public void testFatalLogLevel() throws RocksDBException, + IOException { + try (final Options options = new Options(). + setCreateIfMissing(true). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(options.infoLogLevel()). + isEqualTo(InfoLogLevel.FATAL_LEVEL); + db.put("key".getBytes(), "value".getBytes()); + // As InfoLogLevel is set to FATAL_LEVEL, here we expect the log + // content to be empty. + assertThat(getLogContentsWithoutHeader()).isEmpty(); + } + } + + @Test + public void testFatalLogLevelWithDBOptions() + throws RocksDBException, IOException { + try (final DBOptions dbOptions = new DBOptions(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL); + final Options options = new Options(dbOptions, + new ColumnFamilyOptions()). + setCreateIfMissing(true); + final RocksDB db = + RocksDB.open(options, dbFolder.getRoot().getAbsolutePath())) { + assertThat(dbOptions.infoLogLevel()). + isEqualTo(InfoLogLevel.FATAL_LEVEL); + assertThat(options.infoLogLevel()). + isEqualTo(InfoLogLevel.FATAL_LEVEL); + db.put("key".getBytes(), "value".getBytes()); + assertThat(getLogContentsWithoutHeader()).isEmpty(); + } + } + + @Test(expected = IllegalArgumentException.class) + public void failIfIllegalByteValueProvided() { + InfoLogLevel.getInfoLogLevel((byte) -1); + } + + @Test + public void valueOf() { + assertThat(InfoLogLevel.valueOf("DEBUG_LEVEL")). + isEqualTo(InfoLogLevel.DEBUG_LEVEL); + } + + /** + * Read LOG file contents into String. + * + * @return LOG file contents as String. + * @throws IOException if file is not found. + */ + private String getLogContentsWithoutHeader() throws IOException { + final String separator = Environment.isWindows() ? + "\n" : System.getProperty("line.separator"); + final String[] lines = new String(readAllBytes(get( + dbFolder.getRoot().getAbsolutePath() + "/LOG"))).split(separator); + + int first_non_header = lines.length; + // Identify the last line of the header + for (int i = lines.length - 1; i >= 0; --i) { + if (lines[i].indexOf("Options.") >= 0 && lines[i].indexOf(':') >= 0) { + first_non_header = i + 1; + break; + } + } + StringBuilder builder = new StringBuilder(); + for (int i = first_non_header; i < lines.length; ++i) { + builder.append(lines[i]).append(separator); + } + return builder.toString(); + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java new file mode 100755 index 0000000000..bc341c9d21 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/KeyMayExistTest.java @@ -0,0 +1,87 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class KeyMayExistTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void keyMayExist() throws RocksDBException { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes()) + ); + + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions options = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try { + assertThat(columnFamilyHandleList.size()). + isEqualTo(2); + db.put("key".getBytes(), "value".getBytes()); + // Test without column family + StringBuffer retValue = new StringBuffer(); + boolean exists = db.keyMayExist("key".getBytes(), retValue); + assertThat(exists).isTrue(); + assertThat(retValue.toString()).isEqualTo("value"); + + // Test without column family but with readOptions + try (final ReadOptions readOptions = new ReadOptions()) { + retValue = new StringBuffer(); + exists = db.keyMayExist(readOptions, "key".getBytes(), retValue); + assertThat(exists).isTrue(); + assertThat(retValue.toString()).isEqualTo("value"); + } + + // Test with column family + retValue = new StringBuffer(); + exists = db.keyMayExist(columnFamilyHandleList.get(0), "key".getBytes(), + retValue); + assertThat(exists).isTrue(); + assertThat(retValue.toString()).isEqualTo("value"); + + // Test with column family and readOptions + try (final ReadOptions readOptions = new ReadOptions()) { + retValue = new StringBuffer(); + exists = db.keyMayExist(readOptions, + columnFamilyHandleList.get(0), "key".getBytes(), + retValue); + assertThat(exists).isTrue(); + assertThat(retValue.toString()).isEqualTo("value"); + } + + // KeyMayExist in CF1 must return false + assertThat(db.keyMayExist(columnFamilyHandleList.get(1), + "key".getBytes(), retValue)).isFalse(); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java new file mode 100755 index 0000000000..f83cff3b7f --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/LoggerTest.java @@ -0,0 +1,238 @@ +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.atomic.AtomicInteger; + +import static org.assertj.core.api.Assertions.assertThat; + +public class LoggerTest { + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void customLogger() throws RocksDBException { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + // Set custom logger to options + options.setLogger(logger); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + // there should be more than zero received log messages in + // debug level. + assertThat(logMessageCounter.get()).isGreaterThan(0); + } + } + } + + @Test + public void warnLogger() throws RocksDBException { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.WARN_LEVEL). + setCreateIfMissing(true); + + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + + // Set custom logger to options + options.setLogger(logger); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + // there should be zero messages + // using warn level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); + } + } + } + + + @Test + public void fatalLogger() throws RocksDBException { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + + // Set custom logger to options + options.setLogger(logger); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + // there should be zero messages + // using fatal level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); + } + } + } + + @Test + public void dbOptionsLogger() throws RocksDBException { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final DBOptions options = new DBOptions(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + // Set custom logger to options + options.setLogger(logger); + + final List cfDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY)); + final List cfHandles = new ArrayList<>(); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, cfHandles)) { + try { + // there should be zero messages + // using fatal level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : cfHandles) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test + public void setWarnLogLevel() { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + assertThat(logger.infoLogLevel()). + isEqualTo(InfoLogLevel.FATAL_LEVEL); + logger.setInfoLogLevel(InfoLogLevel.WARN_LEVEL); + assertThat(logger.infoLogLevel()). + isEqualTo(InfoLogLevel.WARN_LEVEL); + } + } + + @Test + public void setInfoLogLevel() { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + final Logger logger = new Logger(options) { + // Create new logger with max log level passed by options + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + assertThat(logger.infoLogLevel()). + isEqualTo(InfoLogLevel.FATAL_LEVEL); + logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL); + assertThat(logger.infoLogLevel()). + isEqualTo(InfoLogLevel.DEBUG_LEVEL); + } + } + + @Test + public void changeLogLevelAtRuntime() throws RocksDBException { + final AtomicInteger logMessageCounter = new AtomicInteger(); + try (final Options options = new Options(). + setInfoLogLevel(InfoLogLevel.FATAL_LEVEL). + setCreateIfMissing(true); + + // Create new logger with max log level passed by options + final Logger logger = new Logger(options) { + @Override + protected void log(InfoLogLevel infoLogLevel, String logMsg) { + assertThat(logMsg).isNotNull(); + assertThat(logMsg.length()).isGreaterThan(0); + logMessageCounter.incrementAndGet(); + } + } + ) { + // Set custom logger to options + options.setLogger(logger); + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + // there should be zero messages + // using fatal level as log level. + assertThat(logMessageCounter.get()).isEqualTo(0); + + // change log level to debug level + logger.setInfoLogLevel(InfoLogLevel.DEBUG_LEVEL); + + db.put("key".getBytes(), "value".getBytes()); + db.flush(new FlushOptions().setWaitForFlush(true)); + + // messages shall be received due to previous actions. + assertThat(logMessageCounter.get()).isNotEqualTo(0); + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java new file mode 100755 index 0000000000..bbd5e2055b --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/MemTableTest.java @@ -0,0 +1,111 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class MemTableTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void hashSkipListMemTable() throws RocksDBException { + try(final Options options = new Options()) { + // Test HashSkipListMemTableConfig + HashSkipListMemTableConfig memTableConfig = + new HashSkipListMemTableConfig(); + assertThat(memTableConfig.bucketCount()). + isEqualTo(1000000); + memTableConfig.setBucketCount(2000000); + assertThat(memTableConfig.bucketCount()). + isEqualTo(2000000); + assertThat(memTableConfig.height()). + isEqualTo(4); + memTableConfig.setHeight(5); + assertThat(memTableConfig.height()). + isEqualTo(5); + assertThat(memTableConfig.branchingFactor()). + isEqualTo(4); + memTableConfig.setBranchingFactor(6); + assertThat(memTableConfig.branchingFactor()). + isEqualTo(6); + options.setMemTableConfig(memTableConfig); + } + } + + @Test + public void skipListMemTable() throws RocksDBException { + try(final Options options = new Options()) { + SkipListMemTableConfig skipMemTableConfig = + new SkipListMemTableConfig(); + assertThat(skipMemTableConfig.lookahead()). + isEqualTo(0); + skipMemTableConfig.setLookahead(20); + assertThat(skipMemTableConfig.lookahead()). + isEqualTo(20); + options.setMemTableConfig(skipMemTableConfig); + } + } + + @Test + public void hashLinkedListMemTable() throws RocksDBException { + try(final Options options = new Options()) { + HashLinkedListMemTableConfig hashLinkedListMemTableConfig = + new HashLinkedListMemTableConfig(); + assertThat(hashLinkedListMemTableConfig.bucketCount()). + isEqualTo(50000); + hashLinkedListMemTableConfig.setBucketCount(100000); + assertThat(hashLinkedListMemTableConfig.bucketCount()). + isEqualTo(100000); + assertThat(hashLinkedListMemTableConfig.hugePageTlbSize()). + isEqualTo(0); + hashLinkedListMemTableConfig.setHugePageTlbSize(1); + assertThat(hashLinkedListMemTableConfig.hugePageTlbSize()). + isEqualTo(1); + assertThat(hashLinkedListMemTableConfig. + bucketEntriesLoggingThreshold()). + isEqualTo(4096); + hashLinkedListMemTableConfig. + setBucketEntriesLoggingThreshold(200); + assertThat(hashLinkedListMemTableConfig. + bucketEntriesLoggingThreshold()). + isEqualTo(200); + assertThat(hashLinkedListMemTableConfig. + ifLogBucketDistWhenFlush()).isTrue(); + hashLinkedListMemTableConfig. + setIfLogBucketDistWhenFlush(false); + assertThat(hashLinkedListMemTableConfig. + ifLogBucketDistWhenFlush()).isFalse(); + assertThat(hashLinkedListMemTableConfig. + thresholdUseSkiplist()). + isEqualTo(256); + hashLinkedListMemTableConfig.setThresholdUseSkiplist(29); + assertThat(hashLinkedListMemTableConfig. + thresholdUseSkiplist()). + isEqualTo(29); + options.setMemTableConfig(hashLinkedListMemTableConfig); + } + } + + @Test + public void vectorMemTable() throws RocksDBException { + try(final Options options = new Options()) { + VectorMemTableConfig vectorMemTableConfig = + new VectorMemTableConfig(); + assertThat(vectorMemTableConfig.reservedSize()). + isEqualTo(0); + vectorMemTableConfig.setReservedSize(123); + assertThat(vectorMemTableConfig.reservedSize()). + isEqualTo(123); + options.setMemTableConfig(vectorMemTableConfig); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java new file mode 100755 index 0000000000..d38df31958 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/MergeTest.java @@ -0,0 +1,241 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.Arrays; +import java.util.List; +import java.util.ArrayList; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class MergeTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void stringOption() + throws InterruptedException, RocksDBException { + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperatorName("stringappend"); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // writing aa under key + db.put("key".getBytes(), "aa".getBytes()); + // merge bb under key + db.merge("key".getBytes(), "bb".getBytes()); + + final byte[] value = db.get("key".getBytes()); + final String strValue = new String(value); + assertThat(strValue).isEqualTo("aa,bb"); + } + } + + @Test + public void cFStringOption() + throws InterruptedException, RocksDBException { + + try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions() + .setMergeOperatorName("stringappend"); + final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions() + .setMergeOperatorName("stringappend") + ) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1), + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt2) + ); + + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList)) { + try { + // writing aa under key + db.put(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "aa".getBytes()); + // merge bb under key + db.merge(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "bb".getBytes()); + + byte[] value = db.get(columnFamilyHandleList.get(1), + "cfkey".getBytes()); + String strValue = new String(value); + assertThat(strValue).isEqualTo("aa,bb"); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandleList) { + handle.close(); + } + } + } + } + } + + @Test + public void operatorOption() + throws InterruptedException, RocksDBException { + final StringAppendOperator stringAppendOperator = + new StringAppendOperator(); + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // Writing aa under key + db.put("key".getBytes(), "aa".getBytes()); + + // Writing bb under key + db.merge("key".getBytes(), "bb".getBytes()); + + final byte[] value = db.get("key".getBytes()); + final String strValue = new String(value); + + assertThat(strValue).isEqualTo("aa,bb"); + } + } + + @Test + public void cFOperatorOption() + throws InterruptedException, RocksDBException { + final StringAppendOperator stringAppendOperator = + new StringAppendOperator(); + try (final ColumnFamilyOptions cfOpt1 = new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator); + final ColumnFamilyOptions cfOpt2 = new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator) + ) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpt1), + new ColumnFamilyDescriptor("new_cf".getBytes(), cfOpt2) + ); + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions opt = new DBOptions() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + columnFamilyHandleList) + ) { + try { + // writing aa under key + db.put(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "aa".getBytes()); + // merge bb under key + db.merge(columnFamilyHandleList.get(1), + "cfkey".getBytes(), "bb".getBytes()); + byte[] value = db.get(columnFamilyHandleList.get(1), + "cfkey".getBytes()); + String strValue = new String(value); + + // Test also with createColumnFamily + try (final ColumnFamilyOptions cfHandleOpts = + new ColumnFamilyOptions() + .setMergeOperator(stringAppendOperator); + final ColumnFamilyHandle cfHandle = + db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf2".getBytes(), + cfHandleOpts)) + ) { + // writing xx under cfkey2 + db.put(cfHandle, "cfkey2".getBytes(), "xx".getBytes()); + // merge yy under cfkey2 + db.merge(cfHandle, new WriteOptions(), "cfkey2".getBytes(), + "yy".getBytes()); + value = db.get(cfHandle, "cfkey2".getBytes()); + String strValueTmpCf = new String(value); + + assertThat(strValue).isEqualTo("aa,bb"); + assertThat(strValueTmpCf).isEqualTo("xx,yy"); + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test + public void operatorGcBehaviour() + throws RocksDBException { + final StringAppendOperator stringAppendOperator + = new StringAppendOperator(); + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test reuse + try (final Options opt = new Options() + .setMergeOperator(stringAppendOperator); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test param init + try (final Options opt = new Options() + .setMergeOperator(new StringAppendOperator()); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + // test replace one with another merge operator instance + try (final Options opt = new Options() + .setMergeOperator(stringAppendOperator)) { + final StringAppendOperator newStringAppendOperator + = new StringAppendOperator(); + opt.setMergeOperator(newStringAppendOperator); + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + } + } + + @Test + public void emptyStringInSetMergeOperatorByName() { + try (final Options opt = new Options() + .setMergeOperatorName(""); + final ColumnFamilyOptions cOpt = new ColumnFamilyOptions() + .setMergeOperatorName("")) { + //no-op + } + } + + @Test(expected = IllegalArgumentException.class) + public void nullStringInSetMergeOperatorByNameOptions() { + try (final Options opt = new Options()) { + opt.setMergeOperatorName(null); + } + } + + @Test(expected = IllegalArgumentException.class) + public void + nullStringInSetMergeOperatorByNameColumnFamilyOptions() { + try (final ColumnFamilyOptions opt = new ColumnFamilyOptions()) { + opt.setMergeOperatorName(null); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java new file mode 100755 index 0000000000..bbe2957197 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/MixedOptionsTest.java @@ -0,0 +1,55 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class MixedOptionsTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void mixedOptionsTest(){ + // Set a table factory and check the names + try(final Filter bloomFilter = new BloomFilter(); + final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions() + .setTableFormatConfig( + new BlockBasedTableConfig().setFilter(bloomFilter)) + ) { + assertThat(cfOptions.tableFactoryName()).isEqualTo( + "BlockBasedTable"); + cfOptions.setTableFormatConfig(new PlainTableConfig()); + assertThat(cfOptions.tableFactoryName()).isEqualTo("PlainTable"); + // Initialize a dbOptions object from cf options and + // db options + try (final DBOptions dbOptions = new DBOptions(); + final Options options = new Options(dbOptions, cfOptions)) { + assertThat(options.tableFactoryName()).isEqualTo("PlainTable"); + // Free instances + } + } + + // Test Optimize for statements + try(final ColumnFamilyOptions cfOptions = new ColumnFamilyOptions()) { + cfOptions.optimizeUniversalStyleCompaction(); + cfOptions.optimizeLevelStyleCompaction(); + cfOptions.optimizeForPointLookup(1024); + try(final Options options = new Options()) { + options.optimizeLevelStyleCompaction(); + options.optimizeLevelStyleCompaction(400); + options.optimizeUniversalStyleCompaction(); + options.optimizeUniversalStyleCompaction(400); + options.optimizeForPointLookup(1024); + options.prepareForBulkLoad(); + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java new file mode 100755 index 0000000000..186108ffb6 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/NativeLibraryLoaderTest.java @@ -0,0 +1,41 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.rocksdb.util.Environment; + +import java.io.File; +import java.io.IOException; +import java.nio.file.*; + +import static org.assertj.core.api.Assertions.assertThat; + +public class NativeLibraryLoaderTest { + + @Rule + public TemporaryFolder temporaryFolder = new TemporaryFolder(); + + @Test + public void tempFolder() throws IOException { + NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp( + temporaryFolder.getRoot().getAbsolutePath()); + final Path path = Paths.get(temporaryFolder.getRoot().getAbsolutePath(), + Environment.getJniLibraryFileName("rocksdb")); + assertThat(Files.exists(path)).isTrue(); + assertThat(Files.isReadable(path)).isTrue(); + } + + @Test + public void overridesExistingLibrary() throws IOException { + File first = NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp( + temporaryFolder.getRoot().getAbsolutePath()); + NativeLibraryLoader.getInstance().loadLibraryFromJarToTemp( + temporaryFolder.getRoot().getAbsolutePath()); + assertThat(first.exists()).isTrue(); + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java new file mode 100755 index 0000000000..87e2040b95 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/OptionsTest.java @@ -0,0 +1,743 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.ArrayList; +import java.util.List; +import java.util.Random; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + + +public class OptionsTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void setIncreaseParallelism() { + try (final Options opt = new Options()) { + final int threads = Runtime.getRuntime().availableProcessors() * 2; + opt.setIncreaseParallelism(threads); + } + } + + @Test + public void writeBufferSize() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWriteBufferSize(longValue); + assertThat(opt.writeBufferSize()).isEqualTo(longValue); + } + } + + @Test + public void maxWriteBufferNumber() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxWriteBufferNumber(intValue); + assertThat(opt.maxWriteBufferNumber()).isEqualTo(intValue); + } + } + + @Test + public void minWriteBufferNumberToMerge() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMinWriteBufferNumberToMerge(intValue); + assertThat(opt.minWriteBufferNumberToMerge()).isEqualTo(intValue); + } + } + + @Test + public void numLevels() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setNumLevels(intValue); + assertThat(opt.numLevels()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroFileNumCompactionTrigger() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroFileNumCompactionTrigger(intValue); + assertThat(opt.levelZeroFileNumCompactionTrigger()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroSlowdownWritesTrigger() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroSlowdownWritesTrigger(intValue); + assertThat(opt.levelZeroSlowdownWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void levelZeroStopWritesTrigger() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setLevelZeroStopWritesTrigger(intValue); + assertThat(opt.levelZeroStopWritesTrigger()).isEqualTo(intValue); + } + } + + @Test + public void targetFileSizeBase() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setTargetFileSizeBase(longValue); + assertThat(opt.targetFileSizeBase()).isEqualTo(longValue); + } + } + + @Test + public void targetFileSizeMultiplier() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setTargetFileSizeMultiplier(intValue); + assertThat(opt.targetFileSizeMultiplier()).isEqualTo(intValue); + } + } + + @Test + public void maxBytesForLevelBase() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxBytesForLevelBase(longValue); + assertThat(opt.maxBytesForLevelBase()).isEqualTo(longValue); + } + } + + @Test + public void levelCompactionDynamicLevelBytes() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setLevelCompactionDynamicLevelBytes(boolValue); + assertThat(opt.levelCompactionDynamicLevelBytes()) + .isEqualTo(boolValue); + } + } + + @Test + public void maxBytesForLevelMultiplier() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxBytesForLevelMultiplier(intValue); + assertThat(opt.maxBytesForLevelMultiplier()).isEqualTo(intValue); + } + } + + @Test + public void expandedCompactionFactor() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setExpandedCompactionFactor(intValue); + assertThat(opt.expandedCompactionFactor()).isEqualTo(intValue); + } + } + + @Test + public void sourceCompactionFactor() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setSourceCompactionFactor(intValue); + assertThat(opt.sourceCompactionFactor()).isEqualTo(intValue); + } + } + + @Test + public void maxGrandparentOverlapFactor() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxGrandparentOverlapFactor(intValue); + assertThat(opt.maxGrandparentOverlapFactor()).isEqualTo(intValue); + } + } + + @Test + public void softRateLimit() { + try (final Options opt = new Options()) { + final double doubleValue = rand.nextDouble(); + opt.setSoftRateLimit(doubleValue); + assertThat(opt.softRateLimit()).isEqualTo(doubleValue); + } + } + + @Test + public void hardRateLimit() { + try (final Options opt = new Options()) { + final double doubleValue = rand.nextDouble(); + opt.setHardRateLimit(doubleValue); + assertThat(opt.hardRateLimit()).isEqualTo(doubleValue); + } + } + + @Test + public void rateLimitDelayMaxMilliseconds() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setRateLimitDelayMaxMilliseconds(intValue); + assertThat(opt.rateLimitDelayMaxMilliseconds()).isEqualTo(intValue); + } + } + + @Test + public void arenaBlockSize() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setArenaBlockSize(longValue); + assertThat(opt.arenaBlockSize()).isEqualTo(longValue); + } + } + + @Test + public void disableAutoCompactions() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDisableAutoCompactions(boolValue); + assertThat(opt.disableAutoCompactions()).isEqualTo(boolValue); + } + } + + @Test + public void purgeRedundantKvsWhileFlush() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setPurgeRedundantKvsWhileFlush(boolValue); + assertThat(opt.purgeRedundantKvsWhileFlush()).isEqualTo(boolValue); + } + } + + @Test + public void verifyChecksumsInCompaction() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setVerifyChecksumsInCompaction(boolValue); + assertThat(opt.verifyChecksumsInCompaction()).isEqualTo(boolValue); + } + } + + @Test + public void maxSequentialSkipInIterations() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxSequentialSkipInIterations(longValue); + assertThat(opt.maxSequentialSkipInIterations()).isEqualTo(longValue); + } + } + + @Test + public void inplaceUpdateSupport() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setInplaceUpdateSupport(boolValue); + assertThat(opt.inplaceUpdateSupport()).isEqualTo(boolValue); + } + } + + @Test + public void inplaceUpdateNumLocks() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setInplaceUpdateNumLocks(longValue); + assertThat(opt.inplaceUpdateNumLocks()).isEqualTo(longValue); + } + } + + @Test + public void memtablePrefixBloomSizeRatio() { + try (final Options opt = new Options()) { + final double doubleValue = rand.nextDouble(); + opt.setMemtablePrefixBloomSizeRatio(doubleValue); + assertThat(opt.memtablePrefixBloomSizeRatio()).isEqualTo(doubleValue); + } + } + + @Test + public void bloomLocality() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setBloomLocality(intValue); + assertThat(opt.bloomLocality()).isEqualTo(intValue); + } + } + + @Test + public void maxSuccessiveMerges() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxSuccessiveMerges(longValue); + assertThat(opt.maxSuccessiveMerges()).isEqualTo(longValue); + } + } + + @Test + public void minPartialMergeOperands() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMinPartialMergeOperands(intValue); + assertThat(opt.minPartialMergeOperands()).isEqualTo(intValue); + } + } + + @Test + public void optimizeFiltersForHits() { + try (final Options opt = new Options()) { + final boolean aBoolean = rand.nextBoolean(); + opt.setOptimizeFiltersForHits(aBoolean); + assertThat(opt.optimizeFiltersForHits()).isEqualTo(aBoolean); + } + } + + @Test + public void createIfMissing() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setCreateIfMissing(boolValue); + assertThat(opt.createIfMissing()). + isEqualTo(boolValue); + } + } + + @Test + public void createMissingColumnFamilies() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setCreateMissingColumnFamilies(boolValue); + assertThat(opt.createMissingColumnFamilies()). + isEqualTo(boolValue); + } + } + + @Test + public void errorIfExists() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setErrorIfExists(boolValue); + assertThat(opt.errorIfExists()).isEqualTo(boolValue); + } + } + + @Test + public void paranoidChecks() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setParanoidChecks(boolValue); + assertThat(opt.paranoidChecks()). + isEqualTo(boolValue); + } + } + + @Test + public void maxTotalWalSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxTotalWalSize(longValue); + assertThat(opt.maxTotalWalSize()). + isEqualTo(longValue); + } + } + + @Test + public void maxOpenFiles() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxOpenFiles(intValue); + assertThat(opt.maxOpenFiles()).isEqualTo(intValue); + } + } + + @Test + public void disableDataSync() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setDisableDataSync(boolValue); + assertThat(opt.disableDataSync()). + isEqualTo(boolValue); + } + } + + @Test + public void useFsync() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseFsync(boolValue); + assertThat(opt.useFsync()).isEqualTo(boolValue); + } + } + + @Test + public void dbLogDir() { + try (final Options opt = new Options()) { + final String str = "path/to/DbLogDir"; + opt.setDbLogDir(str); + assertThat(opt.dbLogDir()).isEqualTo(str); + } + } + + @Test + public void walDir() { + try (final Options opt = new Options()) { + final String str = "path/to/WalDir"; + opt.setWalDir(str); + assertThat(opt.walDir()).isEqualTo(str); + } + } + + @Test + public void deleteObsoleteFilesPeriodMicros() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setDeleteObsoleteFilesPeriodMicros(longValue); + assertThat(opt.deleteObsoleteFilesPeriodMicros()). + isEqualTo(longValue); + } + } + + @Test + public void maxBackgroundCompactions() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxBackgroundCompactions(intValue); + assertThat(opt.maxBackgroundCompactions()). + isEqualTo(intValue); + } + } + + @Test + public void maxBackgroundFlushes() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setMaxBackgroundFlushes(intValue); + assertThat(opt.maxBackgroundFlushes()). + isEqualTo(intValue); + } + } + + @Test + public void maxLogFileSize() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxLogFileSize(longValue); + assertThat(opt.maxLogFileSize()).isEqualTo(longValue); + } + } + + @Test + public void logFileTimeToRoll() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setLogFileTimeToRoll(longValue); + assertThat(opt.logFileTimeToRoll()). + isEqualTo(longValue); + } + } + + @Test + public void keepLogFileNum() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setKeepLogFileNum(longValue); + assertThat(opt.keepLogFileNum()).isEqualTo(longValue); + } + } + + @Test + public void maxManifestFileSize() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setMaxManifestFileSize(longValue); + assertThat(opt.maxManifestFileSize()). + isEqualTo(longValue); + } + } + + @Test + public void tableCacheNumshardbits() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setTableCacheNumshardbits(intValue); + assertThat(opt.tableCacheNumshardbits()). + isEqualTo(intValue); + } + } + + @Test + public void walSizeLimitMB() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWalSizeLimitMB(longValue); + assertThat(opt.walSizeLimitMB()).isEqualTo(longValue); + } + } + + @Test + public void walTtlSeconds() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setWalTtlSeconds(longValue); + assertThat(opt.walTtlSeconds()).isEqualTo(longValue); + } + } + + @Test + public void manifestPreallocationSize() throws RocksDBException { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setManifestPreallocationSize(longValue); + assertThat(opt.manifestPreallocationSize()). + isEqualTo(longValue); + } + } + + @Test + public void allowOsBuffer() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowOsBuffer(boolValue); + assertThat(opt.allowOsBuffer()).isEqualTo(boolValue); + } + } + + @Test + public void allowMmapReads() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowMmapReads(boolValue); + assertThat(opt.allowMmapReads()).isEqualTo(boolValue); + } + } + + @Test + public void allowMmapWrites() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAllowMmapWrites(boolValue); + assertThat(opt.allowMmapWrites()).isEqualTo(boolValue); + } + } + + @Test + public void isFdCloseOnExec() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setIsFdCloseOnExec(boolValue); + assertThat(opt.isFdCloseOnExec()).isEqualTo(boolValue); + } + } + + @Test + public void statsDumpPeriodSec() { + try (final Options opt = new Options()) { + final int intValue = rand.nextInt(); + opt.setStatsDumpPeriodSec(intValue); + assertThat(opt.statsDumpPeriodSec()).isEqualTo(intValue); + } + } + + @Test + public void adviseRandomOnOpen() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setAdviseRandomOnOpen(boolValue); + assertThat(opt.adviseRandomOnOpen()).isEqualTo(boolValue); + } + } + + @Test + public void useAdaptiveMutex() { + try (final Options opt = new Options()) { + final boolean boolValue = rand.nextBoolean(); + opt.setUseAdaptiveMutex(boolValue); + assertThat(opt.useAdaptiveMutex()).isEqualTo(boolValue); + } + } + + @Test + public void bytesPerSync() { + try (final Options opt = new Options()) { + final long longValue = rand.nextLong(); + opt.setBytesPerSync(longValue); + assertThat(opt.bytesPerSync()).isEqualTo(longValue); + } + } + + @Test + public void env() { + try (final Options options = new Options(); + final Env env = Env.getDefault()) { + options.setEnv(env); + assertThat(options.getEnv()).isSameAs(env); + } + } + + @Test + public void linkageOfPrepMethods() { + try (final Options options = new Options()) { + options.optimizeUniversalStyleCompaction(); + options.optimizeUniversalStyleCompaction(4000); + options.optimizeLevelStyleCompaction(); + options.optimizeLevelStyleCompaction(3000); + options.optimizeForPointLookup(10); + options.prepareForBulkLoad(); + } + } + + @Test + public void compressionTypes() { + try (final Options options = new Options()) { + for (final CompressionType compressionType : + CompressionType.values()) { + options.setCompressionType(compressionType); + assertThat(options.compressionType()). + isEqualTo(compressionType); + assertThat(CompressionType.valueOf("NO_COMPRESSION")). + isEqualTo(CompressionType.NO_COMPRESSION); + } + } + } + + @Test + public void compressionPerLevel() { + try (final ColumnFamilyOptions columnFamilyOptions = + new ColumnFamilyOptions()) { + assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); + List compressionTypeList = + new ArrayList<>(); + for (int i = 0; i < columnFamilyOptions.numLevels(); i++) { + compressionTypeList.add(CompressionType.NO_COMPRESSION); + } + columnFamilyOptions.setCompressionPerLevel(compressionTypeList); + compressionTypeList = columnFamilyOptions.compressionPerLevel(); + for (final CompressionType compressionType : compressionTypeList) { + assertThat(compressionType).isEqualTo( + CompressionType.NO_COMPRESSION); + } + } + } + + @Test + public void differentCompressionsPerLevel() { + try (final ColumnFamilyOptions columnFamilyOptions = + new ColumnFamilyOptions()) { + columnFamilyOptions.setNumLevels(3); + + assertThat(columnFamilyOptions.compressionPerLevel()).isEmpty(); + List compressionTypeList = new ArrayList<>(); + + compressionTypeList.add(CompressionType.BZLIB2_COMPRESSION); + compressionTypeList.add(CompressionType.SNAPPY_COMPRESSION); + compressionTypeList.add(CompressionType.LZ4_COMPRESSION); + + columnFamilyOptions.setCompressionPerLevel(compressionTypeList); + compressionTypeList = columnFamilyOptions.compressionPerLevel(); + + assertThat(compressionTypeList.size()).isEqualTo(3); + assertThat(compressionTypeList). + containsExactly( + CompressionType.BZLIB2_COMPRESSION, + CompressionType.SNAPPY_COMPRESSION, + CompressionType.LZ4_COMPRESSION); + + } + } + + @Test + public void compactionStyles() { + try (final Options options = new Options()) { + for (final CompactionStyle compactionStyle : + CompactionStyle.values()) { + options.setCompactionStyle(compactionStyle); + assertThat(options.compactionStyle()). + isEqualTo(compactionStyle); + assertThat(CompactionStyle.valueOf("FIFO")). + isEqualTo(CompactionStyle.FIFO); + } + } + } + + @Test + public void maxTableFilesSizeFIFO() { + try (final Options opt = new Options()) { + long longValue = rand.nextLong(); + // Size has to be positive + longValue = (longValue < 0) ? -longValue : longValue; + longValue = (longValue == 0) ? longValue + 1 : longValue; + opt.setMaxTableFilesSizeFIFO(longValue); + assertThat(opt.maxTableFilesSizeFIFO()). + isEqualTo(longValue); + } + } + + @Test + public void rateLimiterConfig() { + try (final Options options = new Options(); + final Options anotherOptions = new Options()) { + final RateLimiterConfig rateLimiterConfig = + new GenericRateLimiterConfig(1000, 100 * 1000, 1); + options.setRateLimiterConfig(rateLimiterConfig); + // Test with parameter initialization + + anotherOptions.setRateLimiterConfig( + new GenericRateLimiterConfig(1000)); + } + } + + @Test + public void shouldSetTestPrefixExtractor() { + try (final Options options = new Options()) { + options.useFixedLengthPrefixExtractor(100); + options.useFixedLengthPrefixExtractor(10); + } + } + + @Test + public void shouldSetTestCappedPrefixExtractor() { + try (final Options options = new Options()) { + options.useCappedPrefixExtractor(100); + options.useCappedPrefixExtractor(10); + } + } + + + @Test + public void shouldTestMemTableFactoryName() + throws RocksDBException { + try (final Options options = new Options()) { + options.setMemTableConfig(new VectorMemTableConfig()); + assertThat(options.memTableFactoryName()). + isEqualTo("VectorRepFactory"); + options.setMemTableConfig( + new HashLinkedListMemTableConfig()); + assertThat(options.memTableFactoryName()). + isEqualTo("HashLinkedListRepFactory"); + } + } + + @Test + public void statistics() { + try (final Options options = new Options()) { + Statistics statistics = options.createStatistics(). + statisticsPtr(); + assertThat(statistics).isNotNull(); + try (final Options anotherOptions = new Options()) { + statistics = anotherOptions.statisticsPtr(); + assertThat(statistics).isNotNull(); + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java new file mode 100755 index 0000000000..05bd13863d --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/PlainTableConfigTest.java @@ -0,0 +1,89 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class PlainTableConfigTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void keySize() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setKeySize(5); + assertThat(plainTableConfig.keySize()). + isEqualTo(5); + } + + @Test + public void bloomBitsPerKey() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setBloomBitsPerKey(11); + assertThat(plainTableConfig.bloomBitsPerKey()). + isEqualTo(11); + } + + @Test + public void hashTableRatio() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setHashTableRatio(0.95); + assertThat(plainTableConfig.hashTableRatio()). + isEqualTo(0.95); + } + + @Test + public void indexSparseness() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setIndexSparseness(18); + assertThat(plainTableConfig.indexSparseness()). + isEqualTo(18); + } + + @Test + public void hugePageTlbSize() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setHugePageTlbSize(1); + assertThat(plainTableConfig.hugePageTlbSize()). + isEqualTo(1); + } + + @Test + public void encodingType() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setEncodingType(EncodingType.kPrefix); + assertThat(plainTableConfig.encodingType()).isEqualTo( + EncodingType.kPrefix); + } + + @Test + public void fullScanMode() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setFullScanMode(true); + assertThat(plainTableConfig.fullScanMode()).isTrue(); } + + @Test + public void storeIndexInFile() { + PlainTableConfig plainTableConfig = new PlainTableConfig(); + plainTableConfig.setStoreIndexInFile(true); + assertThat(plainTableConfig.storeIndexInFile()). + isTrue(); + } + + @Test + public void plainTableConfig() { + try(final Options opt = new Options()) { + final PlainTableConfig plainTableConfig = new PlainTableConfig(); + opt.setTableFormatConfig(plainTableConfig); + assertThat(opt.tableFactoryName()).isEqualTo("PlainTable"); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java b/external/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java new file mode 100755 index 0000000000..b437e7f97b --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/PlatformRandomHelper.java @@ -0,0 +1,58 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.Random; + +/** + * Helper class to get the appropriate Random class instance dependent + * on the current platform architecture (32bit vs 64bit) + */ +public class PlatformRandomHelper { + /** + * Determine if OS is 32-Bit/64-Bit + * + * @return boolean value indicating if operating system is 64 Bit. + */ + public static boolean isOs64Bit(){ + final boolean is64Bit; + if (System.getProperty("os.name").contains("Windows")) { + is64Bit = (System.getenv("ProgramFiles(x86)") != null); + } else { + is64Bit = (System.getProperty("os.arch").contains("64")); + } + return is64Bit; + } + + /** + * Factory to get a platform specific Random instance + * + * @return {@link java.util.Random} instance. + */ + public static Random getPlatformSpecificRandomFactory(){ + if (isOs64Bit()) { + return new Random(); + } + return new Random32Bit(); + } + + /** + * Random32Bit is a class which overrides {@code nextLong} to + * provide random numbers which fit in size_t. This workaround + * is necessary because there is no unsigned_int < Java 8 + */ + private static class Random32Bit extends Random { + @Override + public long nextLong(){ + return this.nextInt(Integer.MAX_VALUE); + } + } + + /** + * Utility class constructor + */ + private PlatformRandomHelper() { } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java new file mode 100755 index 0000000000..d993c91484 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/ReadOnlyTest.java @@ -0,0 +1,305 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ReadOnlyTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void readOnlyOpen() throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + try (final RocksDB db2 = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath())) { + assertThat("value"). + isEqualTo(new String(db2.get("key".getBytes()))); + } + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = new ArrayList<>(); + cfDescriptors.add(new ColumnFamilyDescriptor( + RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts)); + + final List columnFamilyHandleList = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath(), + cfDescriptors, columnFamilyHandleList)) { + try (final ColumnFamilyOptions newCfOpts = new ColumnFamilyOptions(); + final ColumnFamilyOptions newCf2Opts = new ColumnFamilyOptions() + ) { + columnFamilyHandleList.add(db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf".getBytes(), newCfOpts))); + columnFamilyHandleList.add(db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf2".getBytes(), newCf2Opts))); + db.put(columnFamilyHandleList.get(2), "key2".getBytes(), + "value2".getBytes()); + + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB db2 = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList)) { + try (final ColumnFamilyOptions newCfOpts2 = + new ColumnFamilyOptions(); + final ColumnFamilyOptions newCf2Opts2 = + new ColumnFamilyOptions() + ) { + assertThat(db2.get("key2".getBytes())).isNull(); + assertThat(db2.get(readOnlyColumnFamilyHandleList.get(0), + "key2".getBytes())). + isNull(); + cfDescriptors.clear(); + cfDescriptors.add( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, + newCfOpts2)); + cfDescriptors.add(new ColumnFamilyDescriptor("new_cf2".getBytes(), + newCf2Opts2)); + + final List readOnlyColumnFamilyHandleList2 + = new ArrayList<>(); + try (final RocksDB db3 = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList2)) { + try { + assertThat(new String(db3.get( + readOnlyColumnFamilyHandleList2.get(1), + "key2".getBytes()))).isEqualTo("value2"); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList2) { + columnFamilyHandle.close(); + } + } + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failToWriteInReadOnly() throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true)) { + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList)) { + try { + // test that put fails in readonly mode + rDb.put("key".getBytes(), "value".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failToCFWriteInReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList)) { + try { + rDb.put(readOnlyColumnFamilyHandleList.get(0), + "key".getBytes(), "value".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failToRemoveInReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList)) { + try { + rDb.remove("key".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failToCFRemoveInReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList)) { + try { + rDb.remove(readOnlyColumnFamilyHandleList.get(0), + "key".getBytes()); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failToWriteBatchReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList); + final WriteBatch wb = new WriteBatch(); + final WriteOptions wOpts = new WriteOptions()) { + try { + wb.put("key".getBytes(), "value".getBytes()); + rDb.write(wOpts, wb); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } + + @Test(expected = RocksDBException.class) + public void failToCFWriteBatchReadOnly() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + //no-op + } + + try (final ColumnFamilyOptions cfOpts = new ColumnFamilyOptions()) { + final List cfDescriptors = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY, cfOpts) + ); + + final List readOnlyColumnFamilyHandleList = + new ArrayList<>(); + try (final RocksDB rDb = RocksDB.openReadOnly( + dbFolder.getRoot().getAbsolutePath(), cfDescriptors, + readOnlyColumnFamilyHandleList); + final WriteBatch wb = new WriteBatch(); + final WriteOptions wOpts = new WriteOptions()) { + try { + wb.put(readOnlyColumnFamilyHandleList.get(0), "key".getBytes(), + "value".getBytes()); + rDb.write(wOpts, wb); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + readOnlyColumnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java new file mode 100755 index 0000000000..58ed2ecc6f --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/ReadOptionsTest.java @@ -0,0 +1,175 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.Random; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.ExpectedException; + +import static org.assertj.core.api.Assertions.assertThat; + +public class ReadOptionsTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public ExpectedException exception = ExpectedException.none(); + + @Test + public void verifyChecksum() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final boolean boolValue = rand.nextBoolean(); + opt.setVerifyChecksums(boolValue); + assertThat(opt.verifyChecksums()).isEqualTo(boolValue); + } + } + + @Test + public void fillCache() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final boolean boolValue = rand.nextBoolean(); + opt.setFillCache(boolValue); + assertThat(opt.fillCache()).isEqualTo(boolValue); + } + } + + @Test + public void tailing() { + try (final ReadOptions opt = new ReadOptions()) { + final Random rand = new Random(); + final boolean boolValue = rand.nextBoolean(); + opt.setTailing(boolValue); + assertThat(opt.tailing()).isEqualTo(boolValue); + } + } + + @Test + public void snapshot() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setSnapshot(null); + assertThat(opt.snapshot()).isNull(); + } + } + + @Test + public void readTier() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setReadTier(ReadTier.BLOCK_CACHE_TIER); + assertThat(opt.readTier()).isEqualTo(ReadTier.BLOCK_CACHE_TIER); + } + } + + @Test + public void managed() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setManaged(true); + assertThat(opt.managed()).isTrue(); + } + } + + @Test + public void totalOrderSeek() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setTotalOrderSeek(true); + assertThat(opt.totalOrderSeek()).isTrue(); + } + } + + @Test + public void prefixSameAsStart() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setPrefixSameAsStart(true); + assertThat(opt.prefixSameAsStart()).isTrue(); + } + } + + @Test + public void pinData() { + try (final ReadOptions opt = new ReadOptions()) { + opt.setPinData(true); + assertThat(opt.pinData()).isTrue(); + } + } + + @Test + public void failSetVerifyChecksumUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setVerifyChecksums(true); + } + } + + @Test + public void failVerifyChecksumUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.verifyChecksums(); + } + } + + @Test + public void failSetFillCacheUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setFillCache(true); + } + } + + @Test + public void failFillCacheUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.fillCache(); + } + } + + @Test + public void failSetTailingUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setTailing(true); + } + } + + @Test + public void failTailingUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.tailing(); + } + } + + @Test + public void failSetSnapshotUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.setSnapshot(null); + } + } + + @Test + public void failSnapshotUninitialized() { + try (final ReadOptions readOptions = + setupUninitializedReadOptions(exception)) { + readOptions.snapshot(); + } + } + + private ReadOptions setupUninitializedReadOptions( + ExpectedException exception) { + final ReadOptions readOptions = new ReadOptions(); + readOptions.close(); + exception.expect(AssertionError.class); + return readOptions; + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java new file mode 100755 index 0000000000..2a31d826f3 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/RocksDBTest.java @@ -0,0 +1,665 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.*; + +import static org.assertj.core.api.Assertions.assertThat; + +public class RocksDBTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + public static final Random rand = PlatformRandomHelper. + getPlatformSpecificRandomFactory(); + + @Test + public void open() throws RocksDBException { + try (final RocksDB db = + RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); + } + } + + @Test + public void open_opt() throws RocksDBException { + try (final Options opt = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(db).isNotNull(); + } + } + + @Test + public void put() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final WriteOptions opt = new WriteOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put(opt, "key2".getBytes(), "12345678".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo( + "12345678".getBytes()); + } + } + + @Test + public void write() throws RocksDBException { + try (final Options options = new Options().setMergeOperator( + new StringAppendOperator()).setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions opts = new WriteOptions()) { + + try (final WriteBatch wb1 = new WriteBatch()) { + wb1.put("key1".getBytes(), "aa".getBytes()); + wb1.merge("key1".getBytes(), "bb".getBytes()); + + try (final WriteBatch wb2 = new WriteBatch()) { + wb2.put("key2".getBytes(), "xx".getBytes()); + wb2.merge("key2".getBytes(), "yy".getBytes()); + db.write(opts, wb1); + db.write(opts, wb2); + } + } + + assertThat(db.get("key1".getBytes())).isEqualTo( + "aa,bb".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo( + "xx,yy".getBytes()); + } + } + + @Test + public void getWithOutValue() throws RocksDBException { + try (final RocksDB db = + RocksDB.open(dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + byte[] outValue = new byte[5]; + // not found value + int getResult = db.get("keyNotFound".getBytes(), outValue); + assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND); + // found value which fits in outValue + getResult = db.get("key1".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("value".getBytes()); + // found value which fits partially + getResult = db.get("key2".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("12345".getBytes()); + } + } + + @Test + public void getWithOutValueReadOptions() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ReadOptions rOpt = new ReadOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + byte[] outValue = new byte[5]; + // not found value + int getResult = db.get(rOpt, "keyNotFound".getBytes(), + outValue); + assertThat(getResult).isEqualTo(RocksDB.NOT_FOUND); + // found value which fits in outValue + getResult = db.get(rOpt, "key1".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("value".getBytes()); + // found value which fits partially + getResult = db.get(rOpt, "key2".getBytes(), outValue); + assertThat(getResult).isNotEqualTo(RocksDB.NOT_FOUND); + assertThat(outValue).isEqualTo("12345".getBytes()); + } + } + + @Test + public void multiGet() throws RocksDBException, InterruptedException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final ReadOptions rOpt = new ReadOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + List lookupKeys = new ArrayList<>(); + lookupKeys.add("key1".getBytes()); + lookupKeys.add("key2".getBytes()); + Map results = db.multiGet(lookupKeys); + assertThat(results).isNotNull(); + assertThat(results.values()).isNotNull(); + assertThat(results.values()). + contains("value".getBytes(), "12345678".getBytes()); + // test same method with ReadOptions + results = db.multiGet(rOpt, lookupKeys); + assertThat(results).isNotNull(); + assertThat(results.values()).isNotNull(); + assertThat(results.values()). + contains("value".getBytes(), "12345678".getBytes()); + + // remove existing key + lookupKeys.remove("key2".getBytes()); + // add non existing key + lookupKeys.add("key3".getBytes()); + results = db.multiGet(lookupKeys); + assertThat(results).isNotNull(); + assertThat(results.values()).isNotNull(); + assertThat(results.values()). + contains("value".getBytes()); + // test same call with readOptions + results = db.multiGet(rOpt, lookupKeys); + assertThat(results).isNotNull(); + assertThat(results.values()).isNotNull(); + assertThat(results.values()). + contains("value".getBytes()); + } + } + + @Test + public void merge() throws RocksDBException { + try (final Options opt = new Options() + .setCreateIfMissing(true) + .setMergeOperator(new StringAppendOperator()); + final WriteOptions wOpt = new WriteOptions(); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()) + ) { + db.put("key1".getBytes(), "value".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value".getBytes()); + // merge key1 with another value portion + db.merge("key1".getBytes(), "value2".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value,value2".getBytes()); + // merge key1 with another value portion + db.merge(wOpt, "key1".getBytes(), "value3".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value,value2,value3".getBytes()); + // merge on non existent key shall insert the value + db.merge(wOpt, "key2".getBytes(), "xxxx".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo( + "xxxx".getBytes()); + } + } + + @Test + public void remove() throws RocksDBException { + try (final RocksDB db = RocksDB.open(dbFolder.getRoot().getAbsolutePath()); + final WriteOptions wOpt = new WriteOptions()) { + db.put("key1".getBytes(), "value".getBytes()); + db.put("key2".getBytes(), "12345678".getBytes()); + assertThat(db.get("key1".getBytes())).isEqualTo( + "value".getBytes()); + assertThat(db.get("key2".getBytes())).isEqualTo( + "12345678".getBytes()); + db.remove("key1".getBytes()); + db.remove(wOpt, "key2".getBytes()); + assertThat(db.get("key1".getBytes())).isNull(); + assertThat(db.get("key2".getBytes())).isNull(); + } + } + + @Test + public void getIntProperty() throws RocksDBException { + try ( + final Options options = new Options() + .setCreateIfMissing(true) + .setMaxWriteBufferNumber(10) + .setMinWriteBufferNumberToMerge(10); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final WriteOptions wOpt = new WriteOptions().setDisableWAL(true) + ) { + db.put(wOpt, "key1".getBytes(), "value1".getBytes()); + db.put(wOpt, "key2".getBytes(), "value2".getBytes()); + db.put(wOpt, "key3".getBytes(), "value3".getBytes()); + db.put(wOpt, "key4".getBytes(), "value4".getBytes()); + assertThat(db.getLongProperty("rocksdb.num-entries-active-mem-table")) + .isGreaterThan(0); + assertThat(db.getLongProperty("rocksdb.cur-size-active-mem-table")) + .isGreaterThan(0); + } + } + + @Test + public void fullCompactRange() throws RocksDBException { + try (final Options opt = new Options(). + setCreateIfMissing(true). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put((String.valueOf(i)).getBytes(), b); + } + db.compactRange(); + } + } + + @Test + public void fullCompactRangeColumnFamily() + throws RocksDBException { + try ( + final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts)); + + // open database + final List columnFamilyHandles = new ArrayList<>(); + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put(columnFamilyHandles.get(1), + String.valueOf(i).getBytes(), b); + } + db.compactRange(columnFamilyHandles.get(1)); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void compactRangeWithKeys() + throws RocksDBException { + try (final Options opt = new Options(). + setCreateIfMissing(true). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put((String.valueOf(i)).getBytes(), b); + } + db.compactRange("0".getBytes(), "201".getBytes()); + } + } + + @Test + public void compactRangeWithKeysReduce() + throws RocksDBException { + try ( + final Options opt = new Options(). + setCreateIfMissing(true). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put((String.valueOf(i)).getBytes(), b); + } + db.flush(new FlushOptions().setWaitForFlush(true)); + db.compactRange("0".getBytes(), "201".getBytes(), + true, -1, 0); + } + } + + @Test + public void compactRangeWithKeysColumnFamily() + throws RocksDBException { + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + // open database + final List columnFamilyHandles = + new ArrayList<>(); + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put(columnFamilyHandles.get(1), + String.valueOf(i).getBytes(), b); + } + db.compactRange(columnFamilyHandles.get(1), + "0".getBytes(), "201".getBytes()); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void compactRangeWithKeysReduceColumnFamily() + throws RocksDBException { + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setDisableAutoCompactions(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(4). + setWriteBufferSize(100 << 10). + setLevelZeroFileNumCompactionTrigger(3). + setTargetFileSizeBase(200 << 10). + setTargetFileSizeMultiplier(1). + setMaxBytesForLevelBase(500 << 10). + setMaxBytesForLevelMultiplier(1). + setDisableAutoCompactions(false) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + final List columnFamilyHandles = new ArrayList<>(); + // open database + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] b = new byte[10000]; + for (int i = 0; i < 200; i++) { + rand.nextBytes(b); + db.put(columnFamilyHandles.get(1), + String.valueOf(i).getBytes(), b); + } + db.compactRange(columnFamilyHandles.get(1), "0".getBytes(), + "201".getBytes(), true, -1, 0); + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void compactRangeToLevel() + throws RocksDBException, InterruptedException { + final int NUM_KEYS_PER_L0_FILE = 100; + final int KEY_SIZE = 20; + final int VALUE_SIZE = 300; + final int L0_FILE_SIZE = + NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE); + final int NUM_L0_FILES = 10; + final int TEST_SCALE = 5; + final int KEY_INTERVAL = 100; + try (final Options opt = new Options(). + setCreateIfMissing(true). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(5). + // a slightly bigger write buffer than L0 file + // so that we can ensure manual flush always + // go before background flush happens. + setWriteBufferSize(L0_FILE_SIZE * 2). + // Disable auto L0 -> L1 compaction + setLevelZeroFileNumCompactionTrigger(20). + setTargetFileSizeBase(L0_FILE_SIZE * 100). + setTargetFileSizeMultiplier(1). + // To disable auto compaction + setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100). + setMaxBytesForLevelMultiplier(2). + setDisableAutoCompactions(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath()) + ) { + // fill database with key/value pairs + byte[] value = new byte[VALUE_SIZE]; + int int_key = 0; + for (int round = 0; round < 5; ++round) { + int initial_key = int_key; + for (int f = 1; f <= NUM_L0_FILES; ++f) { + for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) { + int_key += KEY_INTERVAL; + rand.nextBytes(value); + + db.put(String.format("%020d", int_key).getBytes(), + value); + } + db.flush(new FlushOptions().setWaitForFlush(true)); + // Make sure we do create one more L0 files. + assertThat( + db.getProperty("rocksdb.num-files-at-level0")). + isEqualTo("" + f); + } + + // Compact all L0 files we just created + db.compactRange( + String.format("%020d", initial_key).getBytes(), + String.format("%020d", int_key - 1).getBytes()); + // Making sure there isn't any L0 files. + assertThat( + db.getProperty("rocksdb.num-files-at-level0")). + isEqualTo("0"); + // Making sure there are some L1 files. + // Here we only use != 0 instead of a specific number + // as we don't want the test make any assumption on + // how compaction works. + assertThat( + db.getProperty("rocksdb.num-files-at-level1")). + isNotEqualTo("0"); + // Because we only compacted those keys we issued + // in this round, there shouldn't be any L1 -> L2 + // compaction. So we expect zero L2 files here. + assertThat( + db.getProperty("rocksdb.num-files-at-level2")). + isEqualTo("0"); + } + } + } + + @Test + public void compactRangeToLevelColumnFamily() + throws RocksDBException { + final int NUM_KEYS_PER_L0_FILE = 100; + final int KEY_SIZE = 20; + final int VALUE_SIZE = 300; + final int L0_FILE_SIZE = + NUM_KEYS_PER_L0_FILE * (KEY_SIZE + VALUE_SIZE); + final int NUM_L0_FILES = 10; + final int TEST_SCALE = 5; + final int KEY_INTERVAL = 100; + + try (final DBOptions opt = new DBOptions(). + setCreateIfMissing(true). + setCreateMissingColumnFamilies(true); + final ColumnFamilyOptions new_cf_opts = new ColumnFamilyOptions(). + setCompactionStyle(CompactionStyle.LEVEL). + setNumLevels(5). + // a slightly bigger write buffer than L0 file + // so that we can ensure manual flush always + // go before background flush happens. + setWriteBufferSize(L0_FILE_SIZE * 2). + // Disable auto L0 -> L1 compaction + setLevelZeroFileNumCompactionTrigger(20). + setTargetFileSizeBase(L0_FILE_SIZE * 100). + setTargetFileSizeMultiplier(1). + // To disable auto compaction + setMaxBytesForLevelBase(NUM_L0_FILES * L0_FILE_SIZE * 100). + setMaxBytesForLevelMultiplier(2). + setDisableAutoCompactions(true) + ) { + final List columnFamilyDescriptors = + Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes(), new_cf_opts) + ); + + final List columnFamilyHandles = new ArrayList<>(); + // open database + try (final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath(), + columnFamilyDescriptors, + columnFamilyHandles)) { + try { + // fill database with key/value pairs + byte[] value = new byte[VALUE_SIZE]; + int int_key = 0; + for (int round = 0; round < 5; ++round) { + int initial_key = int_key; + for (int f = 1; f <= NUM_L0_FILES; ++f) { + for (int i = 0; i < NUM_KEYS_PER_L0_FILE; ++i) { + int_key += KEY_INTERVAL; + rand.nextBytes(value); + + db.put(columnFamilyHandles.get(1), + String.format("%020d", int_key).getBytes(), + value); + } + db.flush(new FlushOptions().setWaitForFlush(true), + columnFamilyHandles.get(1)); + // Make sure we do create one more L0 files. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level0")). + isEqualTo("" + f); + } + + // Compact all L0 files we just created + db.compactRange( + columnFamilyHandles.get(1), + String.format("%020d", initial_key).getBytes(), + String.format("%020d", int_key - 1).getBytes()); + // Making sure there isn't any L0 files. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level0")). + isEqualTo("0"); + // Making sure there are some L1 files. + // Here we only use != 0 instead of a specific number + // as we don't want the test make any assumption on + // how compaction works. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level1")). + isNotEqualTo("0"); + // Because we only compacted those keys we issued + // in this round, there shouldn't be any L1 -> L2 + // compaction. So we expect zero L2 files here. + assertThat( + db.getProperty(columnFamilyHandles.get(1), + "rocksdb.num-files-at-level2")). + isEqualTo("0"); + } + } finally { + for (final ColumnFamilyHandle handle : columnFamilyHandles) { + handle.close(); + } + } + } + } + } + + @Test + public void pauseContinueBackgroundWork() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + db.pauseBackgroundWork(); + db.continueBackgroundWork(); + db.pauseBackgroundWork(); + db.continueBackgroundWork(); + } + } + + @Test + public void enableDisableFileDeletions() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + db.disableFileDeletions(); + db.enableFileDeletions(false); + db.disableFileDeletions(); + db.enableFileDeletions(true); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/RocksEnvTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/RocksEnvTest.java new file mode 100755 index 0000000000..d89570aad2 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/RocksEnvTest.java @@ -0,0 +1,39 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class RocksEnvTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void rocksEnv() { + try (final Env rocksEnv = RocksEnv.getDefault()) { + rocksEnv.setBackgroundThreads(5); + // default rocksenv will always return zero for flush pool + // no matter what was set via setBackgroundThreads + assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)). + isEqualTo(0); + rocksEnv.setBackgroundThreads(5, RocksEnv.FLUSH_POOL); + // default rocksenv will always return zero for flush pool + // no matter what was set via setBackgroundThreads + assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.FLUSH_POOL)). + isEqualTo(0); + rocksEnv.setBackgroundThreads(5, RocksEnv.COMPACTION_POOL); + // default rocksenv will always return zero for compaction pool + // no matter what was set via setBackgroundThreads + assertThat(rocksEnv.getThreadPoolQueueLen(RocksEnv.COMPACTION_POOL)). + isEqualTo(0); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java new file mode 100755 index 0000000000..4471df9cce --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/RocksIteratorTest.java @@ -0,0 +1,58 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class RocksIteratorTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void rocksIterator() throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setCreateMissingColumnFamilies(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1".getBytes()); + db.put("key2".getBytes(), "value2".getBytes()); + + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isFalse(); + iterator.seekToLast(); + iterator.prev(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key1".getBytes()); + assertThat(iterator.value()).isEqualTo("value1".getBytes()); + iterator.seekToFirst(); + iterator.seekToLast(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + assertThat(iterator.value()).isEqualTo("value2".getBytes()); + iterator.status(); + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java new file mode 100755 index 0000000000..141f7f8506 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/RocksMemEnvTest.java @@ -0,0 +1,148 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class RocksMemEnvTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void memEnvFillAndReopen() throws RocksDBException { + + final byte[][] keys = { + "aaa".getBytes(), + "bbb".getBytes(), + "ccc".getBytes() + }; + + final byte[][] values = { + "foo".getBytes(), + "bar".getBytes(), + "baz".getBytes() + }; + + try (final Env env = new RocksMemEnv(); + final Options options = new Options() + .setCreateIfMissing(true) + .setEnv(env); + final FlushOptions flushOptions = new FlushOptions() + .setWaitForFlush(true); + ) { + try (final RocksDB db = RocksDB.open(options, "dir/db")) { + // write key/value pairs using MemEnv + for (int i = 0; i < keys.length; i++) { + db.put(keys[i], values[i]); + } + + // read key/value pairs using MemEnv + for (int i = 0; i < keys.length; i++) { + assertThat(db.get(keys[i])).isEqualTo(values[i]); + } + + // Check iterator access + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + for (int i = 0; i < keys.length; i++) { + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo(keys[i]); + assertThat(iterator.value()).isEqualTo(values[i]); + iterator.next(); + } + // reached end of database + assertThat(iterator.isValid()).isFalse(); + } + + // flush + db.flush(flushOptions); + + // read key/value pairs after flush using MemEnv + for (int i = 0; i < keys.length; i++) { + assertThat(db.get(keys[i])).isEqualTo(values[i]); + } + } + + options.setCreateIfMissing(false); + + // After reopen the values shall still be in the mem env. + // as long as the env is not freed. + try (final RocksDB db = RocksDB.open(options, "dir/db")) { + // read key/value pairs using MemEnv + for (int i = 0; i < keys.length; i++) { + assertThat(db.get(keys[i])).isEqualTo(values[i]); + } + } + } + } + + @Test + public void multipleDatabaseInstances() throws RocksDBException { + // db - keys + final byte[][] keys = { + "aaa".getBytes(), + "bbb".getBytes(), + "ccc".getBytes() + }; + // otherDb - keys + final byte[][] otherKeys = { + "111".getBytes(), + "222".getBytes(), + "333".getBytes() + }; + // values + final byte[][] values = { + "foo".getBytes(), + "bar".getBytes(), + "baz".getBytes() + }; + + try (final Env env = new RocksMemEnv(); + final Options options = new Options() + .setCreateIfMissing(true) + .setEnv(env); + final RocksDB db = RocksDB.open(options, "dir/db"); + final RocksDB otherDb = RocksDB.open(options, "dir/otherDb") + ) { + // write key/value pairs using MemEnv + // to db and to otherDb. + for (int i = 0; i < keys.length; i++) { + db.put(keys[i], values[i]); + otherDb.put(otherKeys[i], values[i]); + } + + // verify key/value pairs after flush using MemEnv + for (int i = 0; i < keys.length; i++) { + // verify db + assertThat(db.get(otherKeys[i])).isNull(); + assertThat(db.get(keys[i])).isEqualTo(values[i]); + + // verify otherDb + assertThat(otherDb.get(keys[i])).isNull(); + assertThat(otherDb.get(otherKeys[i])).isEqualTo(values[i]); + } + } + } + + @Test(expected = RocksDBException.class) + public void createIfMissingFalse() throws RocksDBException { + try (final Env env = new RocksMemEnv(); + final Options options = new Options() + .setCreateIfMissing(false) + .setEnv(env); + final RocksDB db = RocksDB.open(options, "db/dir")) { + // shall throw an exception because db dir does not + // exist. + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/RocksMemoryResource.java b/external/rocksdb/java/src/test/java/org/rocksdb/RocksMemoryResource.java new file mode 100755 index 0000000000..6fd1c7e667 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/RocksMemoryResource.java @@ -0,0 +1,24 @@ +package org.rocksdb; + +import org.junit.rules.ExternalResource; + +/** + * Resource to trigger garbage collection after each test + * run. + * + * @deprecated Will be removed with the implementation of + * {@link RocksObject#finalize()} + */ +@Deprecated +public class RocksMemoryResource extends ExternalResource { + + static { + RocksDB.loadLibrary(); + } + + @Override + protected void after() { + System.gc(); + System.runFinalization(); + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java new file mode 100755 index 0000000000..952c9ab866 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/SliceTest.java @@ -0,0 +1,61 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SliceTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void slice() { + try (final Slice slice = new Slice("testSlice")) { + assertThat(slice.empty()).isFalse(); + assertThat(slice.size()).isEqualTo(9); + assertThat(slice.data()).isEqualTo("testSlice".getBytes()); + } + + try (final Slice otherSlice = new Slice("otherSlice".getBytes())) { + assertThat(otherSlice.data()).isEqualTo("otherSlice".getBytes()); + } + + try (final Slice thirdSlice = new Slice("otherSlice".getBytes(), 5)) { + assertThat(thirdSlice.data()).isEqualTo("Slice".getBytes()); + } + } + + @Test + public void sliceEquals() { + try (final Slice slice = new Slice("abc"); + final Slice slice2 = new Slice("abc")) { + assertThat(slice.equals(slice2)).isTrue(); + assertThat(slice.hashCode() == slice2.hashCode()).isTrue(); + } + } + + @Test + public void sliceStartWith() { + try (final Slice slice = new Slice("matchpoint"); + final Slice match = new Slice("mat"); + final Slice noMatch = new Slice("nomatch")) { + assertThat(slice.startsWith(match)).isTrue(); + assertThat(slice.startsWith(noMatch)).isFalse(); + } + } + + @Test + public void sliceToString() { + try (final Slice slice = new Slice("stringTest")) { + assertThat(slice.toString()).isEqualTo("stringTest"); + assertThat(slice.toString(true)).isNotEqualTo(""); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java new file mode 100755 index 0000000000..581bae50b9 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/SnapshotTest.java @@ -0,0 +1,169 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SnapshotTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void snapshots() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + // Get new Snapshot of database + try (final Snapshot snapshot = db.getSnapshot()) { + assertThat(snapshot.getSequenceNumber()).isGreaterThan(0); + assertThat(snapshot.getSequenceNumber()).isEqualTo(1); + try (final ReadOptions readOptions = new ReadOptions()) { + // set snapshot in ReadOptions + readOptions.setSnapshot(snapshot); + + // retrieve key value pair + assertThat(new String(db.get("key".getBytes()))). + isEqualTo("value"); + // retrieve key value pair created before + // the snapshot was made + assertThat(new String(db.get(readOptions, + "key".getBytes()))).isEqualTo("value"); + // add new key/value pair + db.put("newkey".getBytes(), "newvalue".getBytes()); + // using no snapshot the latest db entries + // will be taken into account + assertThat(new String(db.get("newkey".getBytes()))). + isEqualTo("newvalue"); + // snapshopot was created before newkey + assertThat(db.get(readOptions, "newkey".getBytes())). + isNull(); + // Retrieve snapshot from read options + try (final Snapshot sameSnapshot = readOptions.snapshot()) { + readOptions.setSnapshot(sameSnapshot); + // results must be the same with new Snapshot + // instance using the same native pointer + assertThat(new String(db.get(readOptions, + "key".getBytes()))).isEqualTo("value"); + // update key value pair to newvalue + db.put("key".getBytes(), "newvalue".getBytes()); + // read with previously created snapshot will + // read previous version of key value pair + assertThat(new String(db.get(readOptions, + "key".getBytes()))).isEqualTo("value"); + // read for newkey using the snapshot must be + // null + assertThat(db.get(readOptions, "newkey".getBytes())). + isNull(); + // setting null to snapshot in ReadOptions leads + // to no Snapshot being used. + readOptions.setSnapshot(null); + assertThat(new String(db.get(readOptions, + "newkey".getBytes()))).isEqualTo("newvalue"); + // release Snapshot + db.releaseSnapshot(snapshot); + } + } + } + } + } + + @Test + public void iteratorWithSnapshot() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key".getBytes(), "value".getBytes()); + + // Get new Snapshot of database + // set snapshot in ReadOptions + try (final Snapshot snapshot = db.getSnapshot(); + final ReadOptions readOptions = + new ReadOptions().setSnapshot(snapshot)) { + db.put("key2".getBytes(), "value2".getBytes()); + + // iterate over current state of db + try (final RocksIterator iterator = db.newIterator()) { + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isFalse(); + } + + // iterate using a snapshot + try (final RocksIterator snapshotIterator = + db.newIterator(readOptions)) { + snapshotIterator.seekToFirst(); + assertThat(snapshotIterator.isValid()).isTrue(); + assertThat(snapshotIterator.key()).isEqualTo("key".getBytes()); + snapshotIterator.next(); + assertThat(snapshotIterator.isValid()).isFalse(); + } + + // release Snapshot + db.releaseSnapshot(snapshot); + } + } + } + + @Test + public void iteratorWithSnapshotOnColumnFamily() throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + db.put("key".getBytes(), "value".getBytes()); + + // Get new Snapshot of database + // set snapshot in ReadOptions + try (final Snapshot snapshot = db.getSnapshot(); + final ReadOptions readOptions = new ReadOptions() + .setSnapshot(snapshot)) { + db.put("key2".getBytes(), "value2".getBytes()); + + // iterate over current state of column family + try (final RocksIterator iterator = db.newIterator( + db.getDefaultColumnFamily())) { + iterator.seekToFirst(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isTrue(); + assertThat(iterator.key()).isEqualTo("key2".getBytes()); + iterator.next(); + assertThat(iterator.isValid()).isFalse(); + } + + // iterate using a snapshot on default column family + try (final RocksIterator snapshotIterator = db.newIterator( + db.getDefaultColumnFamily(), readOptions)) { + snapshotIterator.seekToFirst(); + assertThat(snapshotIterator.isValid()).isTrue(); + assertThat(snapshotIterator.key()).isEqualTo("key".getBytes()); + snapshotIterator.next(); + assertThat(snapshotIterator.isValid()).isFalse(); + + // release Snapshot + db.releaseSnapshot(snapshot); + } + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java new file mode 100755 index 0000000000..9f014d1d3f --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/StatisticsCollectorTest.java @@ -0,0 +1,52 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.util.Collections; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class StatisticsCollectorTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void statisticsCollector() + throws InterruptedException, RocksDBException { + try (final Options opt = new Options() + .createStatistics() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(opt, + dbFolder.getRoot().getAbsolutePath())) { + final Statistics stats = opt.statisticsPtr(); + + final StatsCallbackMock callback = new StatsCallbackMock(); + final StatsCollectorInput statsInput = + new StatsCollectorInput(stats, callback); + + final StatisticsCollector statsCollector = new StatisticsCollector( + Collections.singletonList(statsInput), 100); + statsCollector.start(); + + Thread.sleep(1000); + + assertThat(callback.tickerCallbackCount).isGreaterThan(0); + assertThat(callback.histCallbackCount).isGreaterThan(0); + + statsCollector.shutDown(1000); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java b/external/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java new file mode 100755 index 0000000000..2e28f28efa --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/StatsCallbackMock.java @@ -0,0 +1,20 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +public class StatsCallbackMock implements StatisticsCollectorCallback { + public int tickerCallbackCount = 0; + public int histCallbackCount = 0; + + public void tickerCallback(TickerType tickerType, long tickerCount) { + tickerCallbackCount++; + } + + public void histogramCallback(HistogramType histType, + HistogramData histData) { + histCallbackCount++; + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java new file mode 100755 index 0000000000..b619258ecc --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/TransactionLogIteratorTest.java @@ -0,0 +1,138 @@ +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TransactionLogIteratorTest { + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void transactionLogIterator() throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + //no-op + } + } + + @Test + public void getBatch() throws RocksDBException { + final int numberOfPuts = 5; + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000) + .setWalSizeLimitMB(10); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + for (int i = 0; i < numberOfPuts; i++) { + db.put(String.valueOf(i).getBytes(), + String.valueOf(i).getBytes()); + } + db.flush(new FlushOptions().setWaitForFlush(true)); + + // the latest sequence number is 5 because 5 puts + // were written beforehand + assertThat(db.getLatestSequenceNumber()). + isEqualTo(numberOfPuts); + + // insert 5 writes into a cf + try (final ColumnFamilyHandle cfHandle = db.createColumnFamily( + new ColumnFamilyDescriptor("new_cf".getBytes()))) { + for (int i = 0; i < numberOfPuts; i++) { + db.put(cfHandle, String.valueOf(i).getBytes(), + String.valueOf(i).getBytes()); + } + // the latest sequence number is 10 because + // (5 + 5) puts were written beforehand + assertThat(db.getLatestSequenceNumber()). + isEqualTo(numberOfPuts + numberOfPuts); + + // Get updates since the beginning + try (final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + assertThat(transactionLogIterator.isValid()).isTrue(); + transactionLogIterator.status(); + + // The first sequence number is 1 + final TransactionLogIterator.BatchResult batchResult = + transactionLogIterator.getBatch(); + assertThat(batchResult.sequenceNumber()).isEqualTo(1); + } + } + } + } + + @Test + public void transactionLogIteratorStallAtLastRecord() + throws RocksDBException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000) + .setWalSizeLimitMB(10); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + db.put("key1".getBytes(), "value1".getBytes()); + // Get updates since the beginning + try (final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + transactionLogIterator.status(); + assertThat(transactionLogIterator.isValid()).isTrue(); + transactionLogIterator.next(); + assertThat(transactionLogIterator.isValid()).isFalse(); + transactionLogIterator.status(); + db.put("key2".getBytes(), "value2".getBytes()); + transactionLogIterator.next(); + transactionLogIterator.status(); + assertThat(transactionLogIterator.isValid()).isTrue(); + } + } + } + + @Test + public void transactionLogIteratorCheckAfterRestart() + throws RocksDBException { + final int numberOfKeys = 2; + try (final Options options = new Options() + .setCreateIfMissing(true) + .setWalTtlSeconds(1000) + .setWalSizeLimitMB(10)) { + + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + db.put("key1".getBytes(), "value1".getBytes()); + db.put("key2".getBytes(), "value2".getBytes()); + db.flush(new FlushOptions().setWaitForFlush(true)); + + } + + // reopen + try (final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + assertThat(db.getLatestSequenceNumber()).isEqualTo(numberOfKeys); + + try (final TransactionLogIterator transactionLogIterator = + db.getUpdatesSince(0)) { + for (int i = 0; i < numberOfKeys; i++) { + transactionLogIterator.status(); + assertThat(transactionLogIterator.isValid()).isTrue(); + transactionLogIterator.next(); + } + } + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java new file mode 100755 index 0000000000..6539ea4e01 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/TtlDBTest.java @@ -0,0 +1,120 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; +import java.util.concurrent.TimeUnit; + +import static org.assertj.core.api.Assertions.assertThat; + +public class TtlDBTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void ttlDBOpen() throws RocksDBException, InterruptedException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setMaxGrandparentOverlapFactor(0); + final TtlDB ttlDB = TtlDB.open(options, + dbFolder.getRoot().getAbsolutePath()) + ) { + ttlDB.put("key".getBytes(), "value".getBytes()); + assertThat(ttlDB.get("key".getBytes())). + isEqualTo("value".getBytes()); + assertThat(ttlDB.get("key".getBytes())).isNotNull(); + } + } + + @Test + public void ttlDBOpenWithTtl() throws RocksDBException, InterruptedException { + try (final Options options = new Options() + .setCreateIfMissing(true) + .setMaxGrandparentOverlapFactor(0); + final TtlDB ttlDB = TtlDB.open(options, + dbFolder.getRoot().getAbsolutePath(), 1, false); + ) { + ttlDB.put("key".getBytes(), "value".getBytes()); + assertThat(ttlDB.get("key".getBytes())). + isEqualTo("value".getBytes()); + TimeUnit.SECONDS.sleep(2); + ttlDB.compactRange(); + assertThat(ttlDB.get("key".getBytes())).isNull(); + } + } + + @Test + public void ttlDbOpenWithColumnFamilies() throws RocksDBException, + InterruptedException { + final List cfNames = Arrays.asList( + new ColumnFamilyDescriptor(RocksDB.DEFAULT_COLUMN_FAMILY), + new ColumnFamilyDescriptor("new_cf".getBytes()) + ); + final List ttlValues = Arrays.asList(0, 1); + + final List columnFamilyHandleList = new ArrayList<>(); + try (final DBOptions dbOptions = new DBOptions() + .setCreateMissingColumnFamilies(true) + .setCreateIfMissing(true); + final TtlDB ttlDB = TtlDB.open(dbOptions, + dbFolder.getRoot().getAbsolutePath(), cfNames, + columnFamilyHandleList, ttlValues, false)) { + try { + ttlDB.put("key".getBytes(), "value".getBytes()); + assertThat(ttlDB.get("key".getBytes())). + isEqualTo("value".getBytes()); + ttlDB.put(columnFamilyHandleList.get(1), "key".getBytes(), + "value".getBytes()); + assertThat(ttlDB.get(columnFamilyHandleList.get(1), + "key".getBytes())).isEqualTo("value".getBytes()); + TimeUnit.SECONDS.sleep(2); + + ttlDB.compactRange(); + ttlDB.compactRange(columnFamilyHandleList.get(1)); + + assertThat(ttlDB.get("key".getBytes())).isNotNull(); + assertThat(ttlDB.get(columnFamilyHandleList.get(1), + "key".getBytes())).isNull(); + } finally { + for (final ColumnFamilyHandle columnFamilyHandle : + columnFamilyHandleList) { + columnFamilyHandle.close(); + } + } + } + } + + @Test + public void createTtlColumnFamily() throws RocksDBException, + InterruptedException { + try (final Options options = new Options().setCreateIfMissing(true); + final TtlDB ttlDB = TtlDB.open(options, + dbFolder.getRoot().getAbsolutePath()); + final ColumnFamilyHandle columnFamilyHandle = + ttlDB.createColumnFamilyWithTtl( + new ColumnFamilyDescriptor("new_cf".getBytes()), 1)) { + ttlDB.put(columnFamilyHandle, "key".getBytes(), + "value".getBytes()); + assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())). + isEqualTo("value".getBytes()); + TimeUnit.SECONDS.sleep(2); + ttlDB.compactRange(columnFamilyHandle); + assertThat(ttlDB.get(columnFamilyHandle, "key".getBytes())).isNull(); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/Types.java b/external/rocksdb/java/src/test/java/org/rocksdb/Types.java new file mode 100755 index 0000000000..ca5feb4cb1 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/Types.java @@ -0,0 +1,43 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +/** + * Simple type conversion methods + * for use in tests + */ +public class Types { + + /** + * Convert first 4 bytes of a byte array to an int + * + * @param data The byte array + * + * @return An integer + */ + public static int byteToInt(final byte data[]) { + return (data[0] & 0xff) | + ((data[1] & 0xff) << 8) | + ((data[2] & 0xff) << 16) | + ((data[3] & 0xff) << 24); + } + + /** + * Convert an int to 4 bytes + * + * @param v The int + * + * @return A byte array containing 4 bytes + */ + public static byte[] intToByte(final int v) { + return new byte[] { + (byte)((v >>> 0) & 0xff), + (byte)((v >>> 8) & 0xff), + (byte)((v >>> 16) & 0xff), + (byte)((v >>> 24) & 0xff) + }; + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java new file mode 100755 index 0000000000..35c63f2af1 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchHandlerTest.java @@ -0,0 +1,169 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import java.io.IOException; +import java.util.ArrayList; +import java.util.Arrays; +import java.util.List; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + + +public class WriteBatchHandlerTest { + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void writeBatchHandler() throws IOException, RocksDBException { + // setup test data + final List>> testEvents = Arrays.asList( + new Tuple<>(Action.DELETE, + new Tuple("k0".getBytes(), null)), + new Tuple<>(Action.PUT, + new Tuple<>("k1".getBytes(), "v1".getBytes())), + new Tuple<>(Action.PUT, + new Tuple<>("k2".getBytes(), "v2".getBytes())), + new Tuple<>(Action.PUT, + new Tuple<>("k3".getBytes(), "v3".getBytes())), + new Tuple<>(Action.LOG, + new Tuple(null, "log1".getBytes())), + new Tuple<>(Action.MERGE, + new Tuple<>("k2".getBytes(), "v22".getBytes())), + new Tuple<>(Action.DELETE, + new Tuple("k3".getBytes(), null)) + ); + + // load test data to the write batch + try (final WriteBatch batch = new WriteBatch()) { + for (final Tuple> testEvent : testEvents) { + final Tuple data = testEvent.value; + switch (testEvent.key) { + + case PUT: + batch.put(data.key, data.value); + break; + + case MERGE: + batch.merge(data.key, data.value); + break; + + case DELETE: + batch.remove(data.key); + break; + + case LOG: + batch.putLogData(data.value); + break; + } + } + + // attempt to read test data back from the WriteBatch by iterating + // with a handler + try (final CapturingWriteBatchHandler handler = + new CapturingWriteBatchHandler()) { + batch.iterate(handler); + + // compare the results to the test data + final List>> actualEvents = + handler.getEvents(); + assertThat(testEvents.size()).isSameAs(actualEvents.size()); + + for (int i = 0; i < testEvents.size(); i++) { + assertThat(equals(testEvents.get(i), actualEvents.get(i))).isTrue(); + } + } + } + } + + private static boolean equals( + final Tuple> expected, + final Tuple> actual) { + if (!expected.key.equals(actual.key)) { + return false; + } + + final Tuple expectedData = expected.value; + final Tuple actualData = actual.value; + + return equals(expectedData.key, actualData.key) + && equals(expectedData.value, actualData.value); + } + + private static boolean equals(byte[] expected, byte[] actual) { + if (expected != null) { + return Arrays.equals(expected, actual); + } else { + return actual == null; + } + } + + private static class Tuple { + public final K key; + public final V value; + + public Tuple(final K key, final V value) { + this.key = key; + this.value = value; + } + } + + /** + * Enumeration of Write Batch + * event actions + */ + private enum Action { + PUT, + MERGE, + DELETE, + LOG + } + + /** + * A simple WriteBatch Handler which adds a record + * of each event that it receives to a list + */ + private static class CapturingWriteBatchHandler extends WriteBatch.Handler { + + private final List>> events + = new ArrayList<>(); + + /** + * Returns a copy of the current events list + * + * @return a list of the events which have happened upto now + */ + public List>> getEvents() { + return new ArrayList<>(events); + } + + @Override + public void put(final byte[] key, final byte[] value) { + events.add(new Tuple<>(Action.PUT, new Tuple<>(key, value))); + } + + @Override + public void merge(final byte[] key, final byte[] value) { + events.add(new Tuple<>(Action.MERGE, new Tuple<>(key, value))); + } + + @Override + public void delete(final byte[] key) { + events.add(new Tuple<>(Action.DELETE, + new Tuple(key, null))); + } + + @Override + public void logData(final byte[] blob) { + events.add(new Tuple<>(Action.LOG, + new Tuple(null, blob))); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java new file mode 100755 index 0000000000..ba5d003970 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchTest.java @@ -0,0 +1,267 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.UnsupportedEncodingException; +import java.util.Arrays; + +import static org.assertj.core.api.Assertions.assertThat; + +/** + * This class mimics the db/write_batch_test.cc + * in the c++ rocksdb library. + *

+ * Not ported yet: + *

+ * Continue(); + * PutGatherSlices(); + */ +public class WriteBatchTest { + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void emptyWriteBatch() { + try (final WriteBatch batch = new WriteBatch()) { + assertThat(batch.count()).isEqualTo(0); + } + } + + @Test + public void multipleBatchOperations() + throws UnsupportedEncodingException { + try (WriteBatch batch = new WriteBatch()) { + batch.put("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII")); + batch.remove("box".getBytes("US-ASCII")); + batch.put("baz".getBytes("US-ASCII"), "boo".getBytes("US-ASCII")); + + WriteBatchTestInternalHelper.setSequence(batch, 100); + assertThat(WriteBatchTestInternalHelper.sequence(batch)). + isNotNull(). + isEqualTo(100); + assertThat(batch.count()).isEqualTo(3); + assertThat(new String(getContents(batch), "US-ASCII")). + isEqualTo("Put(baz, boo)@102" + + "Delete(box)@101" + + "Put(foo, bar)@100"); + } + } + + @Test + public void testAppendOperation() + throws UnsupportedEncodingException { + try (final WriteBatch b1 = new WriteBatch(); + final WriteBatch b2 = new WriteBatch()) { + WriteBatchTestInternalHelper.setSequence(b1, 200); + WriteBatchTestInternalHelper.setSequence(b2, 300); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat(getContents(b1).length).isEqualTo(0); + assertThat(b1.count()).isEqualTo(0); + b2.put("a".getBytes("US-ASCII"), "va".getBytes("US-ASCII")); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat("Put(a, va)@200".equals(new String(getContents(b1), + "US-ASCII"))); + assertThat(b1.count()).isEqualTo(1); + b2.clear(); + b2.put("b".getBytes("US-ASCII"), "vb".getBytes("US-ASCII")); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat(("Put(a, va)@200" + + "Put(b, vb)@201") + .equals(new String(getContents(b1), "US-ASCII"))); + assertThat(b1.count()).isEqualTo(2); + b2.remove("foo".getBytes("US-ASCII")); + WriteBatchTestInternalHelper.append(b1, b2); + assertThat(("Put(a, va)@200" + + "Put(b, vb)@202" + + "Put(b, vb)@201" + + "Delete(foo)@203") + .equals(new String(getContents(b1), "US-ASCII"))); + assertThat(b1.count()).isEqualTo(4); + } + } + + @Test + public void blobOperation() + throws UnsupportedEncodingException { + try (final WriteBatch batch = new WriteBatch()) { + batch.put("k1".getBytes("US-ASCII"), "v1".getBytes("US-ASCII")); + batch.put("k2".getBytes("US-ASCII"), "v2".getBytes("US-ASCII")); + batch.put("k3".getBytes("US-ASCII"), "v3".getBytes("US-ASCII")); + batch.putLogData("blob1".getBytes("US-ASCII")); + batch.remove("k2".getBytes("US-ASCII")); + batch.putLogData("blob2".getBytes("US-ASCII")); + batch.merge("foo".getBytes("US-ASCII"), "bar".getBytes("US-ASCII")); + assertThat(batch.count()).isEqualTo(5); + assertThat(("Merge(foo, bar)@4" + + "Put(k1, v1)@0" + + "Delete(k2)@3" + + "Put(k2, v2)@1" + + "Put(k3, v3)@2") + .equals(new String(getContents(batch), "US-ASCII"))); + } + } + + @Test + public void savePoints() + throws UnsupportedEncodingException, RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + batch.put("k1".getBytes("US-ASCII"), "v1".getBytes("US-ASCII")); + batch.put("k2".getBytes("US-ASCII"), "v2".getBytes("US-ASCII")); + batch.put("k3".getBytes("US-ASCII"), "v3".getBytes("US-ASCII")); + + assertThat(getFromWriteBatch(batch, "k1")).isEqualTo("v1"); + assertThat(getFromWriteBatch(batch, "k2")).isEqualTo("v2"); + assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3"); + + + batch.setSavePoint(); + + batch.remove("k2".getBytes("US-ASCII")); + batch.put("k3".getBytes("US-ASCII"), "v3-2".getBytes("US-ASCII")); + + assertThat(getFromWriteBatch(batch, "k2")).isNull(); + assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-2"); + + + batch.setSavePoint(); + + batch.put("k3".getBytes("US-ASCII"), "v3-3".getBytes("US-ASCII")); + batch.put("k4".getBytes("US-ASCII"), "v4".getBytes("US-ASCII")); + + assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-3"); + assertThat(getFromWriteBatch(batch, "k4")).isEqualTo("v4"); + + + batch.rollbackToSavePoint(); + + assertThat(getFromWriteBatch(batch, "k2")).isNull(); + assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3-2"); + assertThat(getFromWriteBatch(batch, "k4")).isNull(); + + + batch.rollbackToSavePoint(); + + assertThat(getFromWriteBatch(batch, "k1")).isEqualTo("v1"); + assertThat(getFromWriteBatch(batch, "k2")).isEqualTo("v2"); + assertThat(getFromWriteBatch(batch, "k3")).isEqualTo("v3"); + assertThat(getFromWriteBatch(batch, "k4")).isNull(); + } + } + + @Test(expected = RocksDBException.class) + public void restorePoints_withoutSavePoints() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + batch.rollbackToSavePoint(); + } + } + + @Test(expected = RocksDBException.class) + public void restorePoints_withoutSavePoints_nested() throws RocksDBException { + try (final WriteBatch batch = new WriteBatch()) { + + batch.setSavePoint(); + batch.rollbackToSavePoint(); + + // without previous corresponding setSavePoint + batch.rollbackToSavePoint(); + } + } + + static byte[] getContents(final WriteBatch wb) { + return getContents(wb.nativeHandle_); + } + + static String getFromWriteBatch(final WriteBatch wb, final String key) + throws RocksDBException, UnsupportedEncodingException { + final WriteBatchGetter getter = + new WriteBatchGetter(key.getBytes("US-ASCII")); + wb.iterate(getter); + if(getter.getValue() != null) { + return new String(getter.getValue(), "US-ASCII"); + } else { + return null; + } + } + + private static native byte[] getContents(final long writeBatchHandle); + + private static class WriteBatchGetter extends WriteBatch.Handler { + + private final byte[] key; + private byte[] value; + + public WriteBatchGetter(final byte[] key) { + this.key = key; + } + + public byte[] getValue() { + return value; + } + + @Override + public void put(final byte[] key, final byte[] value) { + if(Arrays.equals(this.key, key)) { + this.value = value; + } + } + + @Override + public void merge(final byte[] key, final byte[] value) { + if(Arrays.equals(this.key, key)) { + throw new UnsupportedOperationException(); + } + } + + @Override + public void delete(final byte[] key) { + if(Arrays.equals(this.key, key)) { + this.value = null; + } + } + + @Override + public void logData(final byte[] blob) { + } + } +} + +/** + * Package-private class which provides java api to access + * c++ WriteBatchInternal. + */ +class WriteBatchTestInternalHelper { + static void setSequence(final WriteBatch wb, final long sn) { + setSequence(wb.nativeHandle_, sn); + } + + static long sequence(final WriteBatch wb) { + return sequence(wb.nativeHandle_); + } + + static void append(final WriteBatch wb1, final WriteBatch wb2) { + append(wb1.nativeHandle_, wb2.nativeHandle_); + } + + private static native void setSequence(final long writeBatchHandle, + final long sn); + + private static native long sequence(final long writeBatchHandle); + + private static native void append(final long writeBatchHandle1, + final long writeBatchHandle2); +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java new file mode 100755 index 0000000000..66e1c8966b --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchThreadedTest.java @@ -0,0 +1,104 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb; + +import org.junit.After; +import org.junit.Before; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; +import org.junit.runner.RunWith; +import org.junit.runners.Parameterized; +import org.junit.runners.Parameterized.Parameter; +import org.junit.runners.Parameterized.Parameters; + +import java.nio.ByteBuffer; +import java.util.*; +import java.util.concurrent.*; + +@RunWith(Parameterized.class) +public class WriteBatchThreadedTest { + + @Parameters(name = "WriteBatchThreadedTest(threadCount={0})") + public static Iterable data() { + return Arrays.asList(new Integer[]{1, 10, 50, 100}); + } + + @Parameter + public int threadCount; + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + RocksDB db; + + @Before + public void setUp() throws Exception { + RocksDB.loadLibrary(); + final Options options = new Options() + .setCreateIfMissing(true) + .setIncreaseParallelism(32); + db = RocksDB.open(options, dbFolder.getRoot().getAbsolutePath()); + assert (db != null); + } + + @After + public void tearDown() throws Exception { + if (db != null) { + db.close(); + } + } + + @Test + public void threadedWrites() throws InterruptedException, ExecutionException { + final List> callables = new ArrayList<>(); + for (int i = 0; i < 100; i++) { + final int offset = i * 100; + callables.add(new Callable() { + @Override + public Void call() throws RocksDBException { + final WriteBatch wb = new WriteBatch(); + for (int i = offset; i < offset + 100; i++) { + wb.put(ByteBuffer.allocate(4).putInt(i).array(), + "parallel rocks test".getBytes()); + } + db.write(new WriteOptions(), wb); + + return null; + } + }); + } + + //submit the callables + final ExecutorService executorService = + Executors.newFixedThreadPool(threadCount); + try { + final ExecutorCompletionService completionService = + new ExecutorCompletionService<>(executorService); + final Set> futures = new HashSet<>(); + for (final Callable callable : callables) { + futures.add(completionService.submit(callable)); + } + + while (futures.size() > 0) { + final Future future = completionService.take(); + futures.remove(future); + + try { + future.get(); + } catch (final ExecutionException e) { + for (final Future f : futures) { + f.cancel(true); + } + + throw e; + } + } + } finally { + executorService.shutdown(); + executorService.awaitTermination(10, TimeUnit.SECONDS); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java new file mode 100755 index 0000000000..726c6f2915 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/WriteBatchWithIndexTest.java @@ -0,0 +1,315 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Rule; +import org.junit.Test; +import org.junit.rules.TemporaryFolder; + +import java.io.UnsupportedEncodingException; +import java.nio.ByteBuffer; +import java.util.Arrays; + +import static org.assertj.core.api.Assertions.assertThat; + + +public class WriteBatchWithIndexTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Rule + public TemporaryFolder dbFolder = new TemporaryFolder(); + + @Test + public void readYourOwnWrites() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + final byte[] k1 = "key1".getBytes(); + final byte[] v1 = "value1".getBytes(); + final byte[] k2 = "key2".getBytes(); + final byte[] v2 = "value2".getBytes(); + + db.put(k1, v1); + db.put(k2, v2); + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final RocksIterator base = db.newIterator(); + final RocksIterator it = wbwi.newIteratorWithBase(base)) { + + it.seek(k1); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k1); + assertThat(it.value()).isEqualTo(v1); + + it.seek(k2); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k2); + assertThat(it.value()).isEqualTo(v2); + + //put data to the write batch and make sure we can read it. + final byte[] k3 = "key3".getBytes(); + final byte[] v3 = "value3".getBytes(); + wbwi.put(k3, v3); + it.seek(k3); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k3); + assertThat(it.value()).isEqualTo(v3); + + //update k2 in the write batch and check the value + final byte[] v2Other = "otherValue2".getBytes(); + wbwi.put(k2, v2Other); + it.seek(k2); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k2); + assertThat(it.value()).isEqualTo(v2Other); + + //remove k1 and make sure we can read back the write + wbwi.remove(k1); + it.seek(k1); + assertThat(it.key()).isNotEqualTo(k1); + + //reinsert k1 and make sure we see the new value + final byte[] v1Other = "otherValue1".getBytes(); + wbwi.put(k1, v1Other); + it.seek(k1); + assertThat(it.isValid()).isTrue(); + assertThat(it.key()).isEqualTo(k1); + assertThat(it.value()).isEqualTo(v1Other); + } + } + } + + @Test + public void write_writeBatchWithIndex() throws RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + + final byte[] k1 = "key1".getBytes(); + final byte[] v1 = "value1".getBytes(); + final byte[] k2 = "key2".getBytes(); + final byte[] v2 = "value2".getBytes(); + + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + wbwi.put(k1, v1); + wbwi.put(k2, v2); + + db.write(new WriteOptions(), wbwi); + } + + assertThat(db.get(k1)).isEqualTo(v1); + assertThat(db.get(k2)).isEqualTo(v2); + } + } + + @Test + public void iterator() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) { + + final String k1 = "key1"; + final String v1 = "value1"; + final String k2 = "key2"; + final String v2 = "value2"; + final String k3 = "key3"; + final String v3 = "value3"; + final byte[] k1b = k1.getBytes(); + final byte[] v1b = v1.getBytes(); + final byte[] k2b = k2.getBytes(); + final byte[] v2b = v2.getBytes(); + final byte[] k3b = k3.getBytes(); + final byte[] v3b = v3.getBytes(); + + //add put records + wbwi.put(k1b, v1b); + wbwi.put(k2b, v2b); + wbwi.put(k3b, v3b); + + //add a deletion record + final String k4 = "key4"; + final byte[] k4b = k4.getBytes(); + wbwi.remove(k4b); + + final WBWIRocksIterator.WriteEntry[] expected = { + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(k1), new DirectSlice(v1)), + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(k2), new DirectSlice(v2)), + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(k3), new DirectSlice(v3)), + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.DELETE, + new DirectSlice(k4), DirectSlice.NONE) + }; + + try (final WBWIRocksIterator it = wbwi.newIterator()) { + //direct access - seek to key offsets + final int[] testOffsets = {2, 0, 1, 3}; + + for (int i = 0; i < testOffsets.length; i++) { + final int testOffset = testOffsets[i]; + final byte[] key = toArray(expected[testOffset].getKey().data()); + + it.seek(key); + assertThat(it.isValid()).isTrue(); + + final WBWIRocksIterator.WriteEntry entry = it.entry(); + assertThat(entry.equals(expected[testOffset])).isTrue(); + } + + //forward iterative access + int i = 0; + for (it.seekToFirst(); it.isValid(); it.next()) { + assertThat(it.entry().equals(expected[i++])).isTrue(); + } + + //reverse iterative access + i = expected.length - 1; + for (it.seekToLast(); it.isValid(); it.prev()) { + assertThat(it.entry().equals(expected[i--])).isTrue(); + } + } + } + } + + @Test + public void zeroByteTests() { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true)) { + final byte[] zeroByteValue = new byte[]{0, 0}; + //add zero byte value + wbwi.put(zeroByteValue, zeroByteValue); + + final ByteBuffer buffer = ByteBuffer.allocateDirect(zeroByteValue.length); + buffer.put(zeroByteValue); + + WBWIRocksIterator.WriteEntry[] expected = { + new WBWIRocksIterator.WriteEntry(WBWIRocksIterator.WriteType.PUT, + new DirectSlice(buffer, zeroByteValue.length), + new DirectSlice(buffer, zeroByteValue.length)) + }; + + try (final WBWIRocksIterator it = wbwi.newIterator()) { + it.seekToFirst(); + assertThat(it.entry().equals(expected[0])).isTrue(); + assertThat(it.entry().hashCode() == expected[0].hashCode()).isTrue(); + } + } + } + + @Test + public void savePoints() + throws UnsupportedEncodingException, RocksDBException { + try (final Options options = new Options().setCreateIfMissing(true); + final RocksDB db = RocksDB.open(options, + dbFolder.getRoot().getAbsolutePath())) { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex(true); + final ReadOptions readOptions = new ReadOptions()) { + wbwi.put("k1".getBytes(), "v1".getBytes()); + wbwi.put("k2".getBytes(), "v2".getBytes()); + wbwi.put("k3".getBytes(), "v3".getBytes()); + + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k1")) + .isEqualTo("v1"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2")) + .isEqualTo("v2"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3")) + .isEqualTo("v3"); + + + wbwi.setSavePoint(); + + wbwi.remove("k2".getBytes()); + wbwi.put("k3".getBytes(), "v3-2".getBytes()); + + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2")) + .isNull(); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3")) + .isEqualTo("v3-2"); + + + wbwi.setSavePoint(); + + wbwi.put("k3".getBytes(), "v3-3".getBytes()); + wbwi.put("k4".getBytes(), "v4".getBytes()); + + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3")) + .isEqualTo("v3-3"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4")) + .isEqualTo("v4"); + + + wbwi.rollbackToSavePoint(); + + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2")) + .isNull(); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3")) + .isEqualTo("v3-2"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4")) + .isNull(); + + + wbwi.rollbackToSavePoint(); + + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k1")) + .isEqualTo("v1"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k2")) + .isEqualTo("v2"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k3")) + .isEqualTo("v3"); + assertThat(getFromWriteBatchWithIndex(db, readOptions, wbwi, "k4")) + .isNull(); + } + } + } + + @Test(expected = RocksDBException.class) + public void restorePoints_withoutSavePoints() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + wbwi.rollbackToSavePoint(); + } + } + + @Test(expected = RocksDBException.class) + public void restorePoints_withoutSavePoints_nested() throws RocksDBException { + try (final WriteBatchWithIndex wbwi = new WriteBatchWithIndex()) { + + wbwi.setSavePoint(); + wbwi.rollbackToSavePoint(); + + // without previous corresponding setSavePoint + wbwi.rollbackToSavePoint(); + } + } + + private static String getFromWriteBatchWithIndex(final RocksDB db, + final ReadOptions readOptions, final WriteBatchWithIndex wbwi, + final String skey) { + final byte[] key = skey.getBytes(); + try(final RocksIterator baseIterator = db.newIterator(readOptions); + final RocksIterator iterator = wbwi.newIteratorWithBase(baseIterator)) { + iterator.seek(key); + + // Arrays.equals(key, iterator.key()) ensures an exact match in Rocks, + // instead of a nearest match + return iterator.isValid() && + Arrays.equals(key, iterator.key()) ? + new String(iterator.value()) : null; + } + } + + private byte[] toArray(final ByteBuffer buf) { + final byte[] ary = new byte[buf.remaining()]; + buf.get(ary); + return ary; + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java new file mode 100755 index 0000000000..c6af5c8185 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/WriteOptionsTest.java @@ -0,0 +1,32 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb; + +import org.junit.ClassRule; +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class WriteOptionsTest { + + @ClassRule + public static final RocksMemoryResource rocksMemoryResource = + new RocksMemoryResource(); + + @Test + public void writeOptions() { + try (final WriteOptions writeOptions = new WriteOptions()) { + writeOptions.setDisableWAL(true); + assertThat(writeOptions.disableWAL()).isTrue(); + writeOptions.setDisableWAL(false); + assertThat(writeOptions.disableWAL()).isFalse(); + writeOptions.setSync(true); + assertThat(writeOptions.sync()).isTrue(); + writeOptions.setSync(false); + assertThat(writeOptions.sync()).isFalse(); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java b/external/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java new file mode 100755 index 0000000000..044f96b941 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/test/RocksJunitRunner.java @@ -0,0 +1,68 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb.test; + +import org.junit.internal.JUnitSystem; +import org.junit.internal.RealSystem; +import org.junit.internal.TextListener; +import org.junit.runner.Description; +import org.junit.runner.JUnitCore; +import org.junit.runner.Result; + +import java.util.ArrayList; +import java.util.List; + +/** + * Custom Junit Runner to print also Test classes + * and executed methods to command prompt. + */ +public class RocksJunitRunner { + + /** + * Listener which overrides default functionality + * to print class and method to system out. + */ + static class RocksJunitListener extends TextListener { + + /** + * RocksJunitListener constructor + * + * @param system JUnitSystem + */ + public RocksJunitListener(JUnitSystem system) { + super(system); + } + + @Override + public void testStarted(Description description) { + System.out.format("Run: %s testing now -> %s \n", + description.getClassName(), + description.getMethodName()); + } + } + + /** + * Main method to execute tests + * + * @param args Test classes as String names + */ + public static void main(String[] args){ + JUnitCore runner = new JUnitCore(); + final JUnitSystem system = new RealSystem(); + runner.addListener(new RocksJunitListener(system)); + try { + List> classes = new ArrayList<>(); + for (String arg : args) { + classes.add(Class.forName(arg)); + } + final Result result = runner.run(classes.toArray(new Class[1])); + if(!result.wasSuccessful()) { + System.exit(-1); + } + } catch (ClassNotFoundException e) { + e.printStackTrace(); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java new file mode 100755 index 0000000000..01ea52c49c --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/util/BytewiseComparatorTest.java @@ -0,0 +1,480 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +package org.rocksdb.util; + +import org.junit.Test; +import org.rocksdb.*; +import org.rocksdb.Comparator; + +import java.io.IOException; +import java.nio.charset.StandardCharsets; +import java.nio.file.FileVisitResult; +import java.nio.file.Files; +import java.nio.file.Path; +import java.nio.file.SimpleFileVisitor; +import java.nio.file.attribute.BasicFileAttributes; +import java.util.*; + +import static org.junit.Assert.*; + +/** + * This is a direct port of various C++ + * tests from db/comparator_db_test.cc + * and some code to adapt it to RocksJava + */ +public class BytewiseComparatorTest { + + /** + * Open the database using the C++ BytewiseComparatorImpl + * and test the results against our Java BytewiseComparator + */ + @Test + public void java_vs_cpp_bytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = Files.createTempDirectory("comparator_db_test"); + try(final RocksDB db = openDatabase(dbDir, + BuiltinComparator.BYTEWISE_COMPARATOR)) { + final Random rnd = new Random(rand_seed); + doRandomIterationTest( + db, + toJavaComparator(new BytewiseComparator(new ComparatorOptions())), + Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"), + rnd, + 8, 100, 3 + ); + } finally { + removeData(dbDir); + } + } + } + + /** + * Open the database using the Java BytewiseComparator + * and test the results against another Java BytewiseComparator + */ + @Test + public void java_vs_java_bytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = Files.createTempDirectory("comparator_db_test"); + try(final RocksDB db = openDatabase(dbDir, new BytewiseComparator( + new ComparatorOptions()))) { + final Random rnd = new Random(rand_seed); + doRandomIterationTest( + db, + toJavaComparator(new BytewiseComparator(new ComparatorOptions())), + Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"), + rnd, + 8, 100, 3 + ); + } finally { + removeData(dbDir); + } + } + } + + /** + * Open the database using the C++ BytewiseComparatorImpl + * and test the results against our Java DirectBytewiseComparator + */ + @Test + public void java_vs_cpp_directBytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = Files.createTempDirectory("comparator_db_test"); + try(final RocksDB db = openDatabase(dbDir, + BuiltinComparator.BYTEWISE_COMPARATOR)) { + final Random rnd = new Random(rand_seed); + doRandomIterationTest( + db, + toJavaComparator(new DirectBytewiseComparator( + new ComparatorOptions()) + ), + Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"), + rnd, + 8, 100, 3 + ); + } finally { + removeData(dbDir); + } + } + } + + /** + * Open the database using the Java DirectBytewiseComparator + * and test the results against another Java DirectBytewiseComparator + */ + @Test + public void java_vs_java_directBytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = Files.createTempDirectory("comparator_db_test"); + try(final RocksDB db = openDatabase(dbDir, new DirectBytewiseComparator( + new ComparatorOptions()))) { + final Random rnd = new Random(rand_seed); + doRandomIterationTest( + db, + toJavaComparator(new DirectBytewiseComparator( + new ComparatorOptions()) + ), + Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"), + rnd, + 8, 100, 3 + ); + } finally { + removeData(dbDir); + } + } + } + + /** + * Open the database using the C++ ReverseBytewiseComparatorImpl + * and test the results against our Java ReverseBytewiseComparator + */ + @Test + public void java_vs_cpp_reverseBytewiseComparator() + throws IOException, RocksDBException { + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = Files.createTempDirectory("comparator_db_test"); + try(final RocksDB db = openDatabase(dbDir, + BuiltinComparator.REVERSE_BYTEWISE_COMPARATOR)) { + final Random rnd = new Random(rand_seed); + doRandomIterationTest( + db, + toJavaComparator( + new ReverseBytewiseComparator(new ComparatorOptions()) + ), + Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"), + rnd, + 8, 100, 3 + ); + } finally { + removeData(dbDir); + } + } + } + + /** + * Open the database using the Java ReverseBytewiseComparator + * and test the results against another Java ReverseBytewiseComparator + */ + @Test + public void java_vs_java_reverseBytewiseComparator() + throws IOException, RocksDBException { + + for(int rand_seed = 301; rand_seed < 306; rand_seed++) { + final Path dbDir = Files.createTempDirectory("comparator_db_test"); + try(final RocksDB db = openDatabase(dbDir, new ReverseBytewiseComparator( + new ComparatorOptions()))) { + final Random rnd = new Random(rand_seed); + doRandomIterationTest( + db, + toJavaComparator( + new ReverseBytewiseComparator(new ComparatorOptions()) + ), + Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i"), + rnd, + 8, 100, 3 + ); + } finally { + removeData(dbDir); + } + } + } + + private void doRandomIterationTest( + final RocksDB db, final java.util.Comparator javaComparator, + final List source_strings, final Random rnd, + final int num_writes, final int num_iter_ops, + final int num_trigger_flush) throws RocksDBException { + + final TreeMap map = new TreeMap<>(javaComparator); + + for (int i = 0; i < num_writes; i++) { + if (num_trigger_flush > 0 && i != 0 && i % num_trigger_flush == 0) { + db.flush(new FlushOptions()); + } + + final int type = rnd.nextInt(2); + final int index = rnd.nextInt(source_strings.size()); + final String key = source_strings.get(index); + switch (type) { + case 0: + // put + map.put(key, key); + db.put(new WriteOptions(), bytes(key), bytes(key)); + break; + case 1: + // delete + if (map.containsKey(key)) { + map.remove(key); + } + db.remove(new WriteOptions(), bytes(key)); + break; + + default: + fail("Should not be able to generate random outside range 1..2"); + } + } + + try(final RocksIterator iter = db.newIterator(new ReadOptions())) { + final KVIter result_iter = new KVIter(map); + + boolean is_valid = false; + for (int i = 0; i < num_iter_ops; i++) { + // Random walk and make sure iter and result_iter returns the + // same key and value + final int type = rnd.nextInt(6); + iter.status(); + switch (type) { + case 0: + // Seek to First + iter.seekToFirst(); + result_iter.seekToFirst(); + break; + case 1: + // Seek to last + iter.seekToLast(); + result_iter.seekToLast(); + break; + case 2: { + // Seek to random key + final int key_idx = rnd.nextInt(source_strings.size()); + final String key = source_strings.get(key_idx); + iter.seek(bytes(key)); + result_iter.seek(bytes(key)); + break; + } + case 3: + // Next + if (is_valid) { + iter.next(); + result_iter.next(); + } else { + continue; + } + break; + case 4: + // Prev + if (is_valid) { + iter.prev(); + result_iter.prev(); + } else { + continue; + } + break; + default: { + assert (type == 5); + final int key_idx = rnd.nextInt(source_strings.size()); + final String key = source_strings.get(key_idx); + final byte[] result = db.get(new ReadOptions(), bytes(key)); + if (!map.containsKey(key)) { + assertNull(result); + } else { + assertArrayEquals(bytes(map.get(key)), result); + } + break; + } + } + + assertEquals(result_iter.isValid(), iter.isValid()); + + is_valid = iter.isValid(); + + if (is_valid) { + assertArrayEquals(bytes(result_iter.key()), iter.key()); + + //note that calling value on a non-valid iterator from the Java API + //results in a SIGSEGV + assertArrayEquals(bytes(result_iter.value()), iter.value()); + } + } + } + } + + /** + * Open the database using a C++ Comparator + */ + private RocksDB openDatabase( + final Path dbDir, final BuiltinComparator cppComparator) + throws IOException, RocksDBException { + final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(cppComparator); + return RocksDB.open(options, dbDir.toAbsolutePath().toString()); + } + + /** + * Open the database using a Java Comparator + */ + private RocksDB openDatabase( + final Path dbDir, + final AbstractComparator> javaComparator) + throws IOException, RocksDBException { + final Options options = new Options() + .setCreateIfMissing(true) + .setComparator(javaComparator); + return RocksDB.open(options, dbDir.toAbsolutePath().toString()); + } + + private void closeDatabase(final RocksDB db) { + db.close(); + } + + private void removeData(final Path dbDir) throws IOException { + Files.walkFileTree(dbDir, new SimpleFileVisitor() { + @Override + public FileVisitResult visitFile( + final Path file, final BasicFileAttributes attrs) + throws IOException { + Files.delete(file); + return FileVisitResult.CONTINUE; + } + + @Override + public FileVisitResult postVisitDirectory( + final Path dir, final IOException exc) throws IOException { + Files.delete(dir); + return FileVisitResult.CONTINUE; + } + }); + } + + private byte[] bytes(final String s) { + return s.getBytes(StandardCharsets.UTF_8); + } + + private java.util.Comparator toJavaComparator( + final Comparator rocksComparator) { + return new java.util.Comparator() { + @Override + public int compare(final String s1, final String s2) { + return rocksComparator.compare(new Slice(s1), new Slice(s2)); + } + }; + } + + private java.util.Comparator toJavaComparator( + final DirectComparator rocksComparator) { + return new java.util.Comparator() { + @Override + public int compare(final String s1, final String s2) { + return rocksComparator.compare(new DirectSlice(s1), + new DirectSlice(s2)); + } + }; + } + + private class KVIter implements RocksIteratorInterface { + + private final List> entries; + private final java.util.Comparator comparator; + private int offset = -1; + + private int lastPrefixMatchIdx = -1; + private int lastPrefixMatch = 0; + + public KVIter(final TreeMap map) { + this.entries = new ArrayList<>(); + final Iterator> iterator = map.entrySet().iterator(); + while(iterator.hasNext()) { + entries.add(iterator.next()); + } + this.comparator = map.comparator(); + } + + + @Override + public boolean isValid() { + return offset > -1 && offset < entries.size(); + } + + @Override + public void seekToFirst() { + offset = 0; + } + + @Override + public void seekToLast() { + offset = entries.size() - 1; + } + + @Override + public void seek(final byte[] target) { + for(offset = 0; offset < entries.size(); offset++) { + if(comparator.compare(entries.get(offset).getKey(), + (K)new String(target, StandardCharsets.UTF_8)) >= 0) { + return; + } + } + } + + /** + * Is `a` a prefix of `b` + * + * @return The length of the matching prefix, or 0 if it is not a prefix + */ + private int isPrefix(final byte[] a, final byte[] b) { + if(b.length >= a.length) { + for(int i = 0; i < a.length; i++) { + if(a[i] != b[i]) { + return i; + } + } + return a.length; + } else { + return 0; + } + } + + @Override + public void next() { + if(offset < entries.size()) { + offset++; + } + } + + @Override + public void prev() { + if(offset >= 0) { + offset--; + } + } + + @Override + public void status() throws RocksDBException { + if(offset < 0 || offset >= entries.size()) { + throw new RocksDBException("Index out of bounds. Size is: " + + entries.size() + ", offset is: " + offset); + } + } + + public K key() { + if(!isValid()) { + if(entries.isEmpty()) { + return (K)""; + } else if(offset == -1){ + return entries.get(0).getKey(); + } else if(offset == entries.size()) { + return entries.get(offset - 1).getKey(); + } else { + return (K)""; + } + } else { + return entries.get(offset).getKey(); + } + } + + public V value() { + if(!isValid()) { + return (V)""; + } else { + return entries.get(offset).getValue(); + } + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java new file mode 100755 index 0000000000..2de1c45f74 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/util/EnvironmentTest.java @@ -0,0 +1,177 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb.util; + +import org.junit.AfterClass; +import org.junit.BeforeClass; +import org.junit.Test; + +import java.lang.reflect.Field; +import java.lang.reflect.Modifier; + +import static org.assertj.core.api.Assertions.assertThat; + +public class EnvironmentTest { + private final static String ARCH_FIELD_NAME = "ARCH"; + private final static String OS_FIELD_NAME = "OS"; + + private static String INITIAL_OS; + private static String INITIAL_ARCH; + + @BeforeClass + public static void saveState() { + INITIAL_ARCH = getEnvironmentClassField(ARCH_FIELD_NAME); + INITIAL_OS = getEnvironmentClassField(OS_FIELD_NAME); + } + + @Test + public void mac32() { + setEnvironmentClassFields("mac", "32"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".jnilib"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-osx.jnilib"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.dylib"); + } + + @Test + public void mac64() { + setEnvironmentClassFields("mac", "64"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".jnilib"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-osx.jnilib"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.dylib"); + } + + @Test + public void nix32() { + // Linux + setEnvironmentClassFields("Linux", "32"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux32.so"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + // UNIX + setEnvironmentClassFields("Unix", "32"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux32.so"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + // AIX + setEnvironmentClassFields("aix", "32"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux32.so"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + } + + @Test + public void nix64() { + setEnvironmentClassFields("Linux", "x64"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux64.so"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + // UNIX + setEnvironmentClassFields("Unix", "x64"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux64.so"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + // AIX + setEnvironmentClassFields("aix", "x64"); + assertThat(Environment.isWindows()).isFalse(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".so"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-linux64.so"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.so"); + } + + @Test + public void detectWindows(){ + setEnvironmentClassFields("win", "x64"); + assertThat(Environment.isWindows()).isTrue(); + } + + @Test + public void win64() { + setEnvironmentClassFields("win", "x64"); + assertThat(Environment.isWindows()).isTrue(); + assertThat(Environment.getJniLibraryExtension()). + isEqualTo(".dll"); + assertThat(Environment.getJniLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni-win64.dll"); + assertThat(Environment.getSharedLibraryFileName("rocksdb")). + isEqualTo("librocksdbjni.dll"); + } + + @Test(expected = UnsupportedOperationException.class) + public void win32(){ + setEnvironmentClassFields("win", "32"); + Environment.getJniLibraryFileName("rocksdb"); + } + + private void setEnvironmentClassFields(String osName, + String osArch) { + setEnvironmentClassField(OS_FIELD_NAME, osName); + setEnvironmentClassField(ARCH_FIELD_NAME, osArch); + } + + @AfterClass + public static void restoreState() { + setEnvironmentClassField(OS_FIELD_NAME, INITIAL_OS); + setEnvironmentClassField(ARCH_FIELD_NAME, INITIAL_ARCH); + } + + private static String getEnvironmentClassField(String fieldName) { + final Field field; + try { + field = Environment.class.getDeclaredField(fieldName); + field.setAccessible(true); + final Field modifiersField = Field.class.getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + return (String)field.get(null); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } + + private static void setEnvironmentClassField(String fieldName, String value) { + final Field field; + try { + field = Environment.class.getDeclaredField(fieldName); + field.setAccessible(true); + final Field modifiersField = Field.class.getDeclaredField("modifiers"); + modifiersField.setAccessible(true); + modifiersField.setInt(field, field.getModifiers() & ~Modifier.FINAL); + field.set(null, value); + } catch (NoSuchFieldException | IllegalAccessException e) { + throw new RuntimeException(e); + } + } +} diff --git a/external/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java b/external/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java new file mode 100755 index 0000000000..e74c041030 --- /dev/null +++ b/external/rocksdb/java/src/test/java/org/rocksdb/util/SizeUnitTest.java @@ -0,0 +1,27 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +package org.rocksdb.util; + +import org.junit.Test; + +import static org.assertj.core.api.Assertions.assertThat; + +public class SizeUnitTest { + + public static final long COMPUTATION_UNIT = 1024L; + + @Test + public void sizeUnit() { + assertThat(SizeUnit.KB).isEqualTo(COMPUTATION_UNIT); + assertThat(SizeUnit.MB).isEqualTo( + SizeUnit.KB * COMPUTATION_UNIT); + assertThat(SizeUnit.GB).isEqualTo( + SizeUnit.MB * COMPUTATION_UNIT); + assertThat(SizeUnit.TB).isEqualTo( + SizeUnit.GB * COMPUTATION_UNIT); + assertThat(SizeUnit.PB).isEqualTo( + SizeUnit.TB * COMPUTATION_UNIT); + } +} diff --git a/external/rocksdb/memtable/hash_cuckoo_rep.cc b/external/rocksdb/memtable/hash_cuckoo_rep.cc new file mode 100755 index 0000000000..6ae3e098bf --- /dev/null +++ b/external/rocksdb/memtable/hash_cuckoo_rep.cc @@ -0,0 +1,652 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// + +#ifndef ROCKSDB_LITE +#include "memtable/hash_cuckoo_rep.h" + +#include +#include +#include +#include +#include +#include +#include + +#include "db/memtable.h" +#include "db/skiplist.h" +#include "memtable/stl_wrappers.h" +#include "port/port.h" +#include "rocksdb/memtablerep.h" +#include "util/murmurhash.h" + +namespace rocksdb { +namespace { + +// the default maximum size of the cuckoo path searching queue +static const int kCuckooPathMaxSearchSteps = 100; + +struct CuckooStep { + static const int kNullStep = -1; + // the bucket id in the cuckoo array. + int bucket_id_; + // index of cuckoo-step array that points to its previous step, + // -1 if it the beginning step. + int prev_step_id_; + // the depth of the current step. + unsigned int depth_; + + CuckooStep() : bucket_id_(-1), prev_step_id_(kNullStep), depth_(1) {} + + // MSVC does not support = default yet + CuckooStep(CuckooStep&& o) ROCKSDB_NOEXCEPT { *this = std::move(o); } + + CuckooStep& operator=(CuckooStep&& rhs) { + bucket_id_ = std::move(rhs.bucket_id_); + prev_step_id_ = std::move(rhs.prev_step_id_); + depth_ = std::move(rhs.depth_); + return *this; + } + + CuckooStep(const CuckooStep&) = delete; + CuckooStep& operator=(const CuckooStep&) = delete; + + CuckooStep(int bucket_id, int prev_step_id, int depth) + : bucket_id_(bucket_id), prev_step_id_(prev_step_id), depth_(depth) {} +}; + +class HashCuckooRep : public MemTableRep { + public: + explicit HashCuckooRep(const MemTableRep::KeyComparator& compare, + MemTableAllocator* allocator, + const size_t bucket_count, + const unsigned int hash_func_count, + const size_t approximate_entry_size) + : MemTableRep(allocator), + compare_(compare), + allocator_(allocator), + bucket_count_(bucket_count), + approximate_entry_size_(approximate_entry_size), + cuckoo_path_max_depth_(kDefaultCuckooPathMaxDepth), + occupied_count_(0), + hash_function_count_(hash_func_count), + backup_table_(nullptr) { + char* mem = reinterpret_cast( + allocator_->Allocate(sizeof(std::atomic) * bucket_count_)); + cuckoo_array_ = new (mem) std::atomic[bucket_count_]; + for (unsigned int bid = 0; bid < bucket_count_; ++bid) { + cuckoo_array_[bid].store(nullptr, std::memory_order_relaxed); + } + + cuckoo_path_ = reinterpret_cast( + allocator_->Allocate(sizeof(int) * (cuckoo_path_max_depth_ + 1))); + is_nearly_full_ = false; + } + + // return false, indicating HashCuckooRep does not support merge operator. + virtual bool IsMergeOperatorSupported() const override { return false; } + + // return false, indicating HashCuckooRep does not support snapshot. + virtual bool IsSnapshotSupported() const override { return false; } + + // Returns true iff an entry that compares equal to key is in the collection. + virtual bool Contains(const char* internal_key) const override; + + virtual ~HashCuckooRep() override {} + + // Insert the specified key (internal_key) into the mem-table. Assertion + // fails if + // the current mem-table already contains the specified key. + virtual void Insert(KeyHandle handle) override; + + // This function returns bucket_count_ * approximate_entry_size_ when any + // of the followings happen to disallow further write operations: + // 1. when the fullness reaches kMaxFullnes. + // 2. when the backup_table_ is used. + // + // otherwise, this function will always return 0. + virtual size_t ApproximateMemoryUsage() override { + if (is_nearly_full_) { + return bucket_count_ * approximate_entry_size_; + } + return 0; + } + + virtual void Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, + const char* entry)) override; + + class Iterator : public MemTableRep::Iterator { + std::shared_ptr> bucket_; + std::vector::const_iterator mutable cit_; + const KeyComparator& compare_; + std::string tmp_; // For passing to EncodeKey + bool mutable sorted_; + void DoSort() const; + + public: + explicit Iterator(std::shared_ptr> bucket, + const KeyComparator& compare); + + // Initialize an iterator over the specified collection. + // The returned iterator is not valid. + // explicit Iterator(const MemTableRep* collection); + virtual ~Iterator() override{}; + + // Returns true iff the iterator is positioned at a valid node. + virtual bool Valid() const override; + + // Returns the key at the current position. + // REQUIRES: Valid() + virtual const char* key() const override; + + // Advances to the next position. + // REQUIRES: Valid() + virtual void Next() override; + + // Advances to the previous position. + // REQUIRES: Valid() + virtual void Prev() override; + + // Advance to the first entry with a key >= target + virtual void Seek(const Slice& user_key, const char* memtable_key) override; + + // Position at the first entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToFirst() override; + + // Position at the last entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToLast() override; + }; + + struct CuckooStepBuffer { + CuckooStepBuffer() : write_index_(0), read_index_(0) {} + ~CuckooStepBuffer() {} + + int write_index_; + int read_index_; + CuckooStep steps_[kCuckooPathMaxSearchSteps]; + + CuckooStep& NextWriteBuffer() { return steps_[write_index_++]; } + + inline const CuckooStep& ReadNext() { return steps_[read_index_++]; } + + inline bool HasNewWrite() { return write_index_ > read_index_; } + + inline void reset() { + write_index_ = 0; + read_index_ = 0; + } + + inline bool IsFull() { return write_index_ >= kCuckooPathMaxSearchSteps; } + + // returns the number of steps that has been read + inline int ReadCount() { return read_index_; } + + // returns the number of steps that has been written to the buffer. + inline int WriteCount() { return write_index_; } + }; + + private: + const MemTableRep::KeyComparator& compare_; + // the pointer to Allocator to allocate memory, immutable after construction. + MemTableAllocator* const allocator_; + // the number of hash bucket in the hash table. + const size_t bucket_count_; + // approximate size of each entry + const size_t approximate_entry_size_; + // the maxinum depth of the cuckoo path. + const unsigned int cuckoo_path_max_depth_; + // the current number of entries in cuckoo_array_ which has been occupied. + size_t occupied_count_; + // the current number of hash functions used in the cuckoo hash. + unsigned int hash_function_count_; + // the backup MemTableRep to handle the case where cuckoo hash cannot find + // a vacant bucket for inserting the key of a put request. + std::shared_ptr backup_table_; + // the array to store pointers, pointing to the actual data. + std::atomic* cuckoo_array_; + // a buffer to store cuckoo path + int* cuckoo_path_; + // a boolean flag indicating whether the fullness of bucket array + // reaches the point to make the current memtable immutable. + bool is_nearly_full_; + + // the default maximum depth of the cuckoo path. + static const unsigned int kDefaultCuckooPathMaxDepth = 10; + + CuckooStepBuffer step_buffer_; + + // returns the bucket id assogied to the input slice based on the + unsigned int GetHash(const Slice& slice, const int hash_func_id) const { + // the seeds used in the Murmur hash to produce different hash functions. + static const int kMurmurHashSeeds[HashCuckooRepFactory::kMaxHashCount] = { + 545609244, 1769731426, 763324157, 13099088, 592422103, + 1899789565, 248369300, 1984183468, 1613664382, 1491157517}; + return static_cast( + MurmurHash(slice.data(), static_cast(slice.size()), + kMurmurHashSeeds[hash_func_id]) % + bucket_count_); + } + + // A cuckoo path is a sequence of bucket ids, where each id points to a + // location of cuckoo_array_. This path describes the displacement sequence + // of entries in order to store the desired data specified by the input user + // key. The path starts from one of the locations associated with the + // specified user key and ends at a vacant space in the cuckoo array. This + // function will update the cuckoo_path. + // + // @return true if it found a cuckoo path. + bool FindCuckooPath(const char* internal_key, const Slice& user_key, + int* cuckoo_path, size_t* cuckoo_path_length, + int initial_hash_id = 0); + + // Perform quick insert by checking whether there is a vacant bucket in one + // of the possible locations of the input key. If so, then the function will + // return true and the key will be stored in that vacant bucket. + // + // This function is a helper function of FindCuckooPath that discovers the + // first possible steps of a cuckoo path. It begins by first computing + // the possible locations of the input keys (and stores them in bucket_ids.) + // Then, if one of its possible locations is vacant, then the input key will + // be stored in that vacant space and the function will return true. + // Otherwise, the function will return false indicating a complete search + // of cuckoo-path is needed. + bool QuickInsert(const char* internal_key, const Slice& user_key, + int bucket_ids[], const int initial_hash_id); + + // Returns the pointer to the internal iterator to the buckets where buckets + // are sorted according to the user specified KeyComparator. Note that + // any insert after this function call may affect the sorted nature of + // the returned iterator. + virtual MemTableRep::Iterator* GetIterator(Arena* arena) override { + std::vector compact_buckets; + for (unsigned int bid = 0; bid < bucket_count_; ++bid) { + const char* bucket = cuckoo_array_[bid].load(std::memory_order_relaxed); + if (bucket != nullptr) { + compact_buckets.push_back(bucket); + } + } + MemTableRep* backup_table = backup_table_.get(); + if (backup_table != nullptr) { + std::unique_ptr iter(backup_table->GetIterator()); + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + compact_buckets.push_back(iter->key()); + } + } + if (arena == nullptr) { + return new Iterator( + std::shared_ptr>( + new std::vector(std::move(compact_buckets))), + compare_); + } else { + auto mem = arena->AllocateAligned(sizeof(Iterator)); + return new (mem) Iterator( + std::shared_ptr>( + new std::vector(std::move(compact_buckets))), + compare_); + } + } +}; + +void HashCuckooRep::Get(const LookupKey& key, void* callback_args, + bool (*callback_func)(void* arg, const char* entry)) { + Slice user_key = key.user_key(); + for (unsigned int hid = 0; hid < hash_function_count_; ++hid) { + const char* bucket = + cuckoo_array_[GetHash(user_key, hid)].load(std::memory_order_acquire); + if (bucket != nullptr) { + Slice bucket_user_key = UserKey(bucket); + if (user_key == bucket_user_key) { + callback_func(callback_args, bucket); + break; + } + } else { + // as Put() always stores at the vacant bucket located by the + // hash function with the smallest possible id, when we first + // find a vacant bucket in Get(), that means a miss. + break; + } + } + MemTableRep* backup_table = backup_table_.get(); + if (backup_table != nullptr) { + backup_table->Get(key, callback_args, callback_func); + } +} + +void HashCuckooRep::Insert(KeyHandle handle) { + static const float kMaxFullness = 0.90f; + + auto* key = static_cast(handle); + int initial_hash_id = 0; + size_t cuckoo_path_length = 0; + auto user_key = UserKey(key); + // find cuckoo path + if (FindCuckooPath(key, user_key, cuckoo_path_, &cuckoo_path_length, + initial_hash_id) == false) { + // if true, then we can't find a vacant bucket for this key even we + // have used up all the hash functions. Then use a backup memtable to + // store such key, which will further make this mem-table become + // immutable. + if (backup_table_.get() == nullptr) { + VectorRepFactory factory(10); + backup_table_.reset( + factory.CreateMemTableRep(compare_, allocator_, nullptr, nullptr)); + is_nearly_full_ = true; + } + backup_table_->Insert(key); + return; + } + // when reaching this point, means the insert can be done successfully. + occupied_count_++; + if (occupied_count_ >= bucket_count_ * kMaxFullness) { + is_nearly_full_ = true; + } + + // perform kickout process if the length of cuckoo path > 1. + if (cuckoo_path_length == 0) return; + + // the cuckoo path stores the kickout path in reverse order. + // so the kickout or displacement is actually performed + // in reverse order, which avoids false-negatives on read + // by moving each key involved in the cuckoo path to the new + // location before replacing it. + for (size_t i = 1; i < cuckoo_path_length; ++i) { + int kicked_out_bid = cuckoo_path_[i - 1]; + int current_bid = cuckoo_path_[i]; + // since we only allow one writer at a time, it is safe to do relaxed read. + cuckoo_array_[kicked_out_bid] + .store(cuckoo_array_[current_bid].load(std::memory_order_relaxed), + std::memory_order_release); + } + int insert_key_bid = cuckoo_path_[cuckoo_path_length - 1]; + cuckoo_array_[insert_key_bid].store(key, std::memory_order_release); +} + +bool HashCuckooRep::Contains(const char* internal_key) const { + auto user_key = UserKey(internal_key); + for (unsigned int hid = 0; hid < hash_function_count_; ++hid) { + const char* stored_key = + cuckoo_array_[GetHash(user_key, hid)].load(std::memory_order_acquire); + if (stored_key != nullptr) { + if (compare_(internal_key, stored_key) == 0) { + return true; + } + } + } + return false; +} + +bool HashCuckooRep::QuickInsert(const char* internal_key, const Slice& user_key, + int bucket_ids[], const int initial_hash_id) { + int cuckoo_bucket_id = -1; + + // Below does the followings: + // 0. Calculate all possible locations of the input key. + // 1. Check if there is a bucket having same user_key as the input does. + // 2. If there exists such bucket, then replace this bucket by the newly + // insert data and return. This step also performs duplication check. + // 3. If no such bucket exists but exists a vacant bucket, then insert the + // input data into it. + // 4. If step 1 to 3 all fail, then return false. + for (unsigned int hid = initial_hash_id; hid < hash_function_count_; ++hid) { + bucket_ids[hid] = GetHash(user_key, hid); + // since only one PUT is allowed at a time, and this is part of the PUT + // operation, so we can safely perform relaxed load. + const char* stored_key = + cuckoo_array_[bucket_ids[hid]].load(std::memory_order_relaxed); + if (stored_key == nullptr) { + if (cuckoo_bucket_id == -1) { + cuckoo_bucket_id = bucket_ids[hid]; + } + } else { + const auto bucket_user_key = UserKey(stored_key); + if (bucket_user_key.compare(user_key) == 0) { + cuckoo_bucket_id = bucket_ids[hid]; + break; + } + } + } + + if (cuckoo_bucket_id != -1) { + cuckoo_array_[cuckoo_bucket_id].store(const_cast(internal_key), + std::memory_order_release); + return true; + } + + return false; +} + +// Perform pre-check and find the shortest cuckoo path. A cuckoo path +// is a displacement sequence for inserting the specified input key. +// +// @return true if it successfully found a vacant space or cuckoo-path. +// If the return value is true but the length of cuckoo_path is zero, +// then it indicates that a vacant bucket or an bucket with matched user +// key with the input is found, and a quick insertion is done. +bool HashCuckooRep::FindCuckooPath(const char* internal_key, + const Slice& user_key, int* cuckoo_path, + size_t* cuckoo_path_length, + const int initial_hash_id) { + int bucket_ids[HashCuckooRepFactory::kMaxHashCount]; + *cuckoo_path_length = 0; + + if (QuickInsert(internal_key, user_key, bucket_ids, initial_hash_id)) { + return true; + } + // If this step is reached, then it means: + // 1. no vacant bucket in any of the possible locations of the input key. + // 2. none of the possible locations of the input key has the same user + // key as the input `internal_key`. + + // the front and back indices for the step_queue_ + step_buffer_.reset(); + + for (unsigned int hid = initial_hash_id; hid < hash_function_count_; ++hid) { + /// CuckooStep& current_step = step_queue_[front_pos++]; + CuckooStep& current_step = step_buffer_.NextWriteBuffer(); + current_step.bucket_id_ = bucket_ids[hid]; + current_step.prev_step_id_ = CuckooStep::kNullStep; + current_step.depth_ = 1; + } + + while (step_buffer_.HasNewWrite()) { + int step_id = step_buffer_.read_index_; + const CuckooStep& step = step_buffer_.ReadNext(); + // Since it's a BFS process, then the first step with its depth deeper + // than the maximum allowed depth indicates all the remaining steps + // in the step buffer queue will all exceed the maximum depth. + // Return false immediately indicating we can't find a vacant bucket + // for the input key before the maximum allowed depth. + if (step.depth_ >= cuckoo_path_max_depth_) { + return false; + } + // again, we can perform no barrier load safely here as the current + // thread is the only writer. + Slice bucket_user_key = + UserKey(cuckoo_array_[step.bucket_id_].load(std::memory_order_relaxed)); + if (step.prev_step_id_ != CuckooStep::kNullStep) { + if (bucket_user_key == user_key) { + // then there is a loop in the current path, stop discovering this path. + continue; + } + } + // if the current bucket stores at its nth location, then we only consider + // its mth location where m > n. This property makes sure that all reads + // will not miss if we do have data associated to the query key. + // + // The n and m in the above statement is the start_hid and hid in the code. + unsigned int start_hid = hash_function_count_; + for (unsigned int hid = 0; hid < hash_function_count_; ++hid) { + bucket_ids[hid] = GetHash(bucket_user_key, hid); + if (step.bucket_id_ == bucket_ids[hid]) { + start_hid = hid; + } + } + // must found a bucket which is its current "home". + assert(start_hid != hash_function_count_); + + // explore all possible next steps from the current step. + for (unsigned int hid = start_hid + 1; hid < hash_function_count_; ++hid) { + CuckooStep& next_step = step_buffer_.NextWriteBuffer(); + next_step.bucket_id_ = bucket_ids[hid]; + next_step.prev_step_id_ = step_id; + next_step.depth_ = step.depth_ + 1; + // once a vacant bucket is found, trace back all its previous steps + // to generate a cuckoo path. + if (cuckoo_array_[next_step.bucket_id_].load(std::memory_order_relaxed) == + nullptr) { + // store the last step in the cuckoo path. Note that cuckoo_path + // stores steps in reverse order. This allows us to move keys along + // the cuckoo path by storing each key to the new place first before + // removing it from the old place. This property ensures reads will + // not missed due to moving keys along the cuckoo path. + cuckoo_path[(*cuckoo_path_length)++] = next_step.bucket_id_; + int depth; + for (depth = step.depth_; depth > 0 && step_id != CuckooStep::kNullStep; + depth--) { + const CuckooStep& prev_step = step_buffer_.steps_[step_id]; + cuckoo_path[(*cuckoo_path_length)++] = prev_step.bucket_id_; + step_id = prev_step.prev_step_id_; + } + assert(depth == 0 && step_id == CuckooStep::kNullStep); + return true; + } + if (step_buffer_.IsFull()) { + // if true, then it reaches maxinum number of cuckoo search steps. + return false; + } + } + } + + // tried all possible paths but still not unable to find a cuckoo path + // which path leads to a vacant bucket. + return false; +} + +HashCuckooRep::Iterator::Iterator( + std::shared_ptr> bucket, + const KeyComparator& compare) + : bucket_(bucket), + cit_(bucket_->end()), + compare_(compare), + sorted_(false) {} + +void HashCuckooRep::Iterator::DoSort() const { + if (!sorted_) { + std::sort(bucket_->begin(), bucket_->end(), + stl_wrappers::Compare(compare_)); + cit_ = bucket_->begin(); + sorted_ = true; + } +} + +// Returns true iff the iterator is positioned at a valid node. +bool HashCuckooRep::Iterator::Valid() const { + DoSort(); + return cit_ != bucket_->end(); +} + +// Returns the key at the current position. +// REQUIRES: Valid() +const char* HashCuckooRep::Iterator::key() const { + assert(Valid()); + return *cit_; +} + +// Advances to the next position. +// REQUIRES: Valid() +void HashCuckooRep::Iterator::Next() { + assert(Valid()); + if (cit_ == bucket_->end()) { + return; + } + ++cit_; +} + +// Advances to the previous position. +// REQUIRES: Valid() +void HashCuckooRep::Iterator::Prev() { + assert(Valid()); + if (cit_ == bucket_->begin()) { + // If you try to go back from the first element, the iterator should be + // invalidated. So we set it to past-the-end. This means that you can + // treat the container circularly. + cit_ = bucket_->end(); + } else { + --cit_; + } +} + +// Advance to the first entry with a key >= target +void HashCuckooRep::Iterator::Seek(const Slice& user_key, + const char* memtable_key) { + DoSort(); + // Do binary search to find first value not less than the target + const char* encoded_key = + (memtable_key != nullptr) ? memtable_key : EncodeKey(&tmp_, user_key); + cit_ = std::equal_range(bucket_->begin(), bucket_->end(), encoded_key, + [this](const char* a, const char* b) { + return compare_(a, b) < 0; + }).first; +} + +// Position at the first entry in collection. +// Final state of iterator is Valid() iff collection is not empty. +void HashCuckooRep::Iterator::SeekToFirst() { + DoSort(); + cit_ = bucket_->begin(); +} + +// Position at the last entry in collection. +// Final state of iterator is Valid() iff collection is not empty. +void HashCuckooRep::Iterator::SeekToLast() { + DoSort(); + cit_ = bucket_->end(); + if (bucket_->size() != 0) { + --cit_; + } +} + +} // anom namespace + +MemTableRep* HashCuckooRepFactory::CreateMemTableRep( + const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, + const SliceTransform* transform, Logger* logger) { + // The estimated average fullness. The write performance of any close hash + // degrades as the fullness of the mem-table increases. Setting kFullness + // to a value around 0.7 can better avoid write performance degradation while + // keeping efficient memory usage. + static const float kFullness = 0.7f; + size_t pointer_size = sizeof(std::atomic); + assert(write_buffer_size_ >= (average_data_size_ + pointer_size)); + size_t bucket_count = + static_cast( + (write_buffer_size_ / (average_data_size_ + pointer_size)) / kFullness + + 1); + unsigned int hash_function_count = hash_function_count_; + if (hash_function_count < 2) { + hash_function_count = 2; + } + if (hash_function_count > kMaxHashCount) { + hash_function_count = kMaxHashCount; + } + return new HashCuckooRep(compare, allocator, bucket_count, + hash_function_count, + static_cast( + (average_data_size_ + pointer_size) / kFullness) + ); +} + +MemTableRepFactory* NewHashCuckooRepFactory(size_t write_buffer_size, + size_t average_data_size, + unsigned int hash_function_count) { + return new HashCuckooRepFactory(write_buffer_size, average_data_size, + hash_function_count); +} + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/memtable/hash_cuckoo_rep.h b/external/rocksdb/memtable/hash_cuckoo_rep.h new file mode 100755 index 0000000000..173a907b4e --- /dev/null +++ b/external/rocksdb/memtable/hash_cuckoo_rep.h @@ -0,0 +1,43 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#ifndef ROCKSDB_LITE +#include "port/port.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/memtablerep.h" + +namespace rocksdb { + +class HashCuckooRepFactory : public MemTableRepFactory { + public: + // maxinum number of hash functions used in the cuckoo hash. + static const unsigned int kMaxHashCount = 10; + + explicit HashCuckooRepFactory(size_t write_buffer_size, + size_t average_data_size, + unsigned int hash_function_count) + : write_buffer_size_(write_buffer_size), + average_data_size_(average_data_size), + hash_function_count_(hash_function_count) {} + + virtual ~HashCuckooRepFactory() {} + + virtual MemTableRep* CreateMemTableRep( + const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, + const SliceTransform* transform, Logger* logger) override; + + virtual const char* Name() const override { return "HashCuckooRepFactory"; } + + private: + size_t write_buffer_size_; + size_t average_data_size_; + const unsigned int hash_function_count_; +}; +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/memtable/hash_linklist_rep.cc b/external/rocksdb/memtable/hash_linklist_rep.cc new file mode 100755 index 0000000000..902c30e8ac --- /dev/null +++ b/external/rocksdb/memtable/hash_linklist_rep.cc @@ -0,0 +1,821 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// + +#ifndef ROCKSDB_LITE +#include "memtable/hash_linklist_rep.h" + +#include +#include +#include "rocksdb/memtablerep.h" +#include "util/arena.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "port/port.h" +#include "util/histogram.h" +#include "util/murmurhash.h" +#include "db/memtable.h" +#include "db/skiplist.h" + +namespace rocksdb { +namespace { + +typedef const char* Key; +typedef SkipList MemtableSkipList; +typedef std::atomic Pointer; + +// A data structure used as the header of a link list of a hash bucket. +struct BucketHeader { + Pointer next; + std::atomic num_entries; + + explicit BucketHeader(void* n, uint32_t count) + : next(n), num_entries(count) {} + + bool IsSkipListBucket() { + return next.load(std::memory_order_relaxed) == this; + } + + uint32_t GetNumEntries() const { + return num_entries.load(std::memory_order_relaxed); + } + + // REQUIRES: called from single-threaded Insert() + void IncNumEntries() { + // Only one thread can do write at one time. No need to do atomic + // incremental. Update it with relaxed load and store. + num_entries.store(GetNumEntries() + 1, std::memory_order_relaxed); + } +}; + +// A data structure used as the header of a skip list of a hash bucket. +struct SkipListBucketHeader { + BucketHeader Counting_header; + MemtableSkipList skip_list; + + explicit SkipListBucketHeader(const MemTableRep::KeyComparator& cmp, + MemTableAllocator* allocator, uint32_t count) + : Counting_header(this, // Pointing to itself to indicate header type. + count), + skip_list(cmp, allocator) {} +}; + +struct Node { + // Accessors/mutators for links. Wrapped in methods so we can + // add the appropriate barriers as necessary. + Node* Next() { + // Use an 'acquire load' so that we observe a fully initialized + // version of the returned Node. + return next_.load(std::memory_order_acquire); + } + void SetNext(Node* x) { + // Use a 'release store' so that anybody who reads through this + // pointer observes a fully initialized version of the inserted node. + next_.store(x, std::memory_order_release); + } + // No-barrier variants that can be safely used in a few locations. + Node* NoBarrier_Next() { + return next_.load(std::memory_order_relaxed); + } + + void NoBarrier_SetNext(Node* x) { next_.store(x, std::memory_order_relaxed); } + + // Needed for placement new below which is fine + Node() {} + + private: + std::atomic next_; + + // Prohibit copying due to the below + Node(const Node&) = delete; + Node& operator=(const Node&) = delete; + + public: + char key[1]; +}; + +// Memory structure of the mem table: +// It is a hash table, each bucket points to one entry, a linked list or a +// skip list. In order to track total number of records in a bucket to determine +// whether should switch to skip list, a header is added just to indicate +// number of entries in the bucket. +// +// +// +-----> NULL Case 1. Empty bucket +// | +// | +// | +---> +-------+ +// | | | Next +--> NULL +// | | +-------+ +// +-----+ | | | | Case 2. One Entry in bucket. +// | +-+ | | Data | next pointer points to +// +-----+ | | | NULL. All other cases +// | | | | | next pointer is not NULL. +// +-----+ | +-------+ +// | +---+ +// +-----+ +-> +-------+ +> +-------+ +-> +-------+ +// | | | | Next +--+ | Next +--+ | Next +-->NULL +// +-----+ | +-------+ +-------+ +-------+ +// | +-----+ | Count | | | | | +// +-----+ +-------+ | Data | | Data | +// | | | | | | +// +-----+ Case 3. | | | | +// | | A header +-------+ +-------+ +// +-----+ points to +// | | a linked list. Count indicates total number +// +-----+ of rows in this bucket. +// | | +// +-----+ +-> +-------+ <--+ +// | | | | Next +----+ +// +-----+ | +-------+ Case 4. A header points to a skip +// | +----+ | Count | list and next pointer points to +// +-----+ +-------+ itself, to distinguish case 3 or 4. +// | | | | Count still is kept to indicates total +// +-----+ | Skip +--> of entries in the bucket for debugging +// | | | List | Data purpose. +// | | | +--> +// +-----+ | | +// | | +-------+ +// +-----+ +// +// We don't have data race when changing cases because: +// (1) When changing from case 2->3, we create a new bucket header, put the +// single node there first without changing the original node, and do a +// release store when changing the bucket pointer. In that case, a reader +// who sees a stale value of the bucket pointer will read this node, while +// a reader sees the correct value because of the release store. +// (2) When changing case 3->4, a new header is created with skip list points +// to the data, before doing an acquire store to change the bucket pointer. +// The old header and nodes are never changed, so any reader sees any +// of those existing pointers will guarantee to be able to iterate to the +// end of the linked list. +// (3) Header's next pointer in case 3 might change, but they are never equal +// to itself, so no matter a reader sees any stale or newer value, it will +// be able to correctly distinguish case 3 and 4. +// +// The reason that we use case 2 is we want to make the format to be efficient +// when the utilization of buckets is relatively low. If we use case 3 for +// single entry bucket, we will need to waste 12 bytes for every entry, +// which can be significant decrease of memory utilization. +class HashLinkListRep : public MemTableRep { + public: + HashLinkListRep(const MemTableRep::KeyComparator& compare, + MemTableAllocator* allocator, const SliceTransform* transform, + size_t bucket_size, uint32_t threshold_use_skiplist, + size_t huge_page_tlb_size, Logger* logger, + int bucket_entries_logging_threshold, + bool if_log_bucket_dist_when_flash); + + virtual KeyHandle Allocate(const size_t len, char** buf) override; + + virtual void Insert(KeyHandle handle) override; + + virtual bool Contains(const char* key) const override; + + virtual size_t ApproximateMemoryUsage() override; + + virtual void Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, + const char* entry)) override; + + virtual ~HashLinkListRep(); + + virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override; + + virtual MemTableRep::Iterator* GetDynamicPrefixIterator( + Arena* arena = nullptr) override; + + private: + friend class DynamicIterator; + + size_t bucket_size_; + + // Maps slices (which are transformed user keys) to buckets of keys sharing + // the same transform. + Pointer* buckets_; + + const uint32_t threshold_use_skiplist_; + + // The user-supplied transform whose domain is the user keys. + const SliceTransform* transform_; + + const MemTableRep::KeyComparator& compare_; + + Logger* logger_; + int bucket_entries_logging_threshold_; + bool if_log_bucket_dist_when_flash_; + + bool LinkListContains(Node* head, const Slice& key) const; + + SkipListBucketHeader* GetSkipListBucketHeader(Pointer* first_next_pointer) + const; + + Node* GetLinkListFirstNode(Pointer* first_next_pointer) const; + + Slice GetPrefix(const Slice& internal_key) const { + return transform_->Transform(ExtractUserKey(internal_key)); + } + + size_t GetHash(const Slice& slice) const { + return MurmurHash(slice.data(), static_cast(slice.size()), 0) % + bucket_size_; + } + + Pointer* GetBucket(size_t i) const { + return static_cast(buckets_[i].load(std::memory_order_acquire)); + } + + Pointer* GetBucket(const Slice& slice) const { + return GetBucket(GetHash(slice)); + } + + bool Equal(const Slice& a, const Key& b) const { + return (compare_(b, a) == 0); + } + + bool Equal(const Key& a, const Key& b) const { return (compare_(a, b) == 0); } + + bool KeyIsAfterNode(const Slice& internal_key, const Node* n) const { + // nullptr n is considered infinite + return (n != nullptr) && (compare_(n->key, internal_key) < 0); + } + + bool KeyIsAfterNode(const Key& key, const Node* n) const { + // nullptr n is considered infinite + return (n != nullptr) && (compare_(n->key, key) < 0); + } + + + Node* FindGreaterOrEqualInBucket(Node* head, const Slice& key) const; + + class FullListIterator : public MemTableRep::Iterator { + public: + explicit FullListIterator(MemtableSkipList* list, Allocator* allocator) + : iter_(list), full_list_(list), allocator_(allocator) {} + + virtual ~FullListIterator() { + } + + // Returns true iff the iterator is positioned at a valid node. + virtual bool Valid() const override { return iter_.Valid(); } + + // Returns the key at the current position. + // REQUIRES: Valid() + virtual const char* key() const override { + assert(Valid()); + return iter_.key(); + } + + // Advances to the next position. + // REQUIRES: Valid() + virtual void Next() override { + assert(Valid()); + iter_.Next(); + } + + // Advances to the previous position. + // REQUIRES: Valid() + virtual void Prev() override { + assert(Valid()); + iter_.Prev(); + } + + // Advance to the first entry with a key >= target + virtual void Seek(const Slice& internal_key, + const char* memtable_key) override { + const char* encoded_key = + (memtable_key != nullptr) ? + memtable_key : EncodeKey(&tmp_, internal_key); + iter_.Seek(encoded_key); + } + + // Position at the first entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToFirst() override { iter_.SeekToFirst(); } + + // Position at the last entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToLast() override { iter_.SeekToLast(); } + private: + MemtableSkipList::Iterator iter_; + // To destruct with the iterator. + std::unique_ptr full_list_; + std::unique_ptr allocator_; + std::string tmp_; // For passing to EncodeKey + }; + + class LinkListIterator : public MemTableRep::Iterator { + public: + explicit LinkListIterator(const HashLinkListRep* const hash_link_list_rep, + Node* head) + : hash_link_list_rep_(hash_link_list_rep), + head_(head), + node_(nullptr) {} + + virtual ~LinkListIterator() {} + + // Returns true iff the iterator is positioned at a valid node. + virtual bool Valid() const override { return node_ != nullptr; } + + // Returns the key at the current position. + // REQUIRES: Valid() + virtual const char* key() const override { + assert(Valid()); + return node_->key; + } + + // Advances to the next position. + // REQUIRES: Valid() + virtual void Next() override { + assert(Valid()); + node_ = node_->Next(); + } + + // Advances to the previous position. + // REQUIRES: Valid() + virtual void Prev() override { + // Prefix iterator does not support total order. + // We simply set the iterator to invalid state + Reset(nullptr); + } + + // Advance to the first entry with a key >= target + virtual void Seek(const Slice& internal_key, + const char* memtable_key) override { + node_ = hash_link_list_rep_->FindGreaterOrEqualInBucket(head_, + internal_key); + } + + // Position at the first entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToFirst() override { + // Prefix iterator does not support total order. + // We simply set the iterator to invalid state + Reset(nullptr); + } + + // Position at the last entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToLast() override { + // Prefix iterator does not support total order. + // We simply set the iterator to invalid state + Reset(nullptr); + } + + protected: + void Reset(Node* head) { + head_ = head; + node_ = nullptr; + } + private: + friend class HashLinkListRep; + const HashLinkListRep* const hash_link_list_rep_; + Node* head_; + Node* node_; + + virtual void SeekToHead() { + node_ = head_; + } + }; + + class DynamicIterator : public HashLinkListRep::LinkListIterator { + public: + explicit DynamicIterator(HashLinkListRep& memtable_rep) + : HashLinkListRep::LinkListIterator(&memtable_rep, nullptr), + memtable_rep_(memtable_rep) {} + + // Advance to the first entry with a key >= target + virtual void Seek(const Slice& k, const char* memtable_key) override { + auto transformed = memtable_rep_.GetPrefix(k); + auto* bucket = memtable_rep_.GetBucket(transformed); + + SkipListBucketHeader* skip_list_header = + memtable_rep_.GetSkipListBucketHeader(bucket); + if (skip_list_header != nullptr) { + // The bucket is organized as a skip list + if (!skip_list_iter_) { + skip_list_iter_.reset( + new MemtableSkipList::Iterator(&skip_list_header->skip_list)); + } else { + skip_list_iter_->SetList(&skip_list_header->skip_list); + } + if (memtable_key != nullptr) { + skip_list_iter_->Seek(memtable_key); + } else { + IterKey encoded_key; + encoded_key.EncodeLengthPrefixedKey(k); + skip_list_iter_->Seek(encoded_key.GetKey().data()); + } + } else { + // The bucket is organized as a linked list + skip_list_iter_.reset(); + Reset(memtable_rep_.GetLinkListFirstNode(bucket)); + HashLinkListRep::LinkListIterator::Seek(k, memtable_key); + } + } + + virtual bool Valid() const override { + if (skip_list_iter_) { + return skip_list_iter_->Valid(); + } + return HashLinkListRep::LinkListIterator::Valid(); + } + + virtual const char* key() const override { + if (skip_list_iter_) { + return skip_list_iter_->key(); + } + return HashLinkListRep::LinkListIterator::key(); + } + + virtual void Next() override { + if (skip_list_iter_) { + skip_list_iter_->Next(); + } else { + HashLinkListRep::LinkListIterator::Next(); + } + } + + private: + // the underlying memtable + const HashLinkListRep& memtable_rep_; + std::unique_ptr skip_list_iter_; + }; + + class EmptyIterator : public MemTableRep::Iterator { + // This is used when there wasn't a bucket. It is cheaper than + // instantiating an empty bucket over which to iterate. + public: + EmptyIterator() { } + virtual bool Valid() const override { return false; } + virtual const char* key() const override { + assert(false); + return nullptr; + } + virtual void Next() override {} + virtual void Prev() override {} + virtual void Seek(const Slice& user_key, + const char* memtable_key) override {} + virtual void SeekToFirst() override {} + virtual void SeekToLast() override {} + + private: + }; +}; + +HashLinkListRep::HashLinkListRep(const MemTableRep::KeyComparator& compare, + MemTableAllocator* allocator, + const SliceTransform* transform, + size_t bucket_size, + uint32_t threshold_use_skiplist, + size_t huge_page_tlb_size, Logger* logger, + int bucket_entries_logging_threshold, + bool if_log_bucket_dist_when_flash) + : MemTableRep(allocator), + bucket_size_(bucket_size), + // Threshold to use skip list doesn't make sense if less than 3, so we + // force it to be minimum of 3 to simplify implementation. + threshold_use_skiplist_(std::max(threshold_use_skiplist, 3U)), + transform_(transform), + compare_(compare), + logger_(logger), + bucket_entries_logging_threshold_(bucket_entries_logging_threshold), + if_log_bucket_dist_when_flash_(if_log_bucket_dist_when_flash) { + char* mem = allocator_->AllocateAligned(sizeof(Pointer) * bucket_size, + huge_page_tlb_size, logger); + + buckets_ = new (mem) Pointer[bucket_size]; + + for (size_t i = 0; i < bucket_size_; ++i) { + buckets_[i].store(nullptr, std::memory_order_relaxed); + } +} + +HashLinkListRep::~HashLinkListRep() { +} + +KeyHandle HashLinkListRep::Allocate(const size_t len, char** buf) { + char* mem = allocator_->AllocateAligned(sizeof(Node) + len); + Node* x = new (mem) Node(); + *buf = x->key; + return static_cast(x); +} + +SkipListBucketHeader* HashLinkListRep::GetSkipListBucketHeader( + Pointer* first_next_pointer) const { + if (first_next_pointer == nullptr) { + return nullptr; + } + if (first_next_pointer->load(std::memory_order_relaxed) == nullptr) { + // Single entry bucket + return nullptr; + } + // Counting header + BucketHeader* header = reinterpret_cast(first_next_pointer); + if (header->IsSkipListBucket()) { + assert(header->GetNumEntries() > threshold_use_skiplist_); + auto* skip_list_bucket_header = + reinterpret_cast(header); + assert(skip_list_bucket_header->Counting_header.next.load( + std::memory_order_relaxed) == header); + return skip_list_bucket_header; + } + assert(header->GetNumEntries() <= threshold_use_skiplist_); + return nullptr; +} + +Node* HashLinkListRep::GetLinkListFirstNode(Pointer* first_next_pointer) const { + if (first_next_pointer == nullptr) { + return nullptr; + } + if (first_next_pointer->load(std::memory_order_relaxed) == nullptr) { + // Single entry bucket + return reinterpret_cast(first_next_pointer); + } + // Counting header + BucketHeader* header = reinterpret_cast(first_next_pointer); + if (!header->IsSkipListBucket()) { + assert(header->GetNumEntries() <= threshold_use_skiplist_); + return reinterpret_cast( + header->next.load(std::memory_order_acquire)); + } + assert(header->GetNumEntries() > threshold_use_skiplist_); + return nullptr; +} + +void HashLinkListRep::Insert(KeyHandle handle) { + Node* x = static_cast(handle); + assert(!Contains(x->key)); + Slice internal_key = GetLengthPrefixedSlice(x->key); + auto transformed = GetPrefix(internal_key); + auto& bucket = buckets_[GetHash(transformed)]; + Pointer* first_next_pointer = + static_cast(bucket.load(std::memory_order_relaxed)); + + if (first_next_pointer == nullptr) { + // Case 1. empty bucket + // NoBarrier_SetNext() suffices since we will add a barrier when + // we publish a pointer to "x" in prev[i]. + x->NoBarrier_SetNext(nullptr); + bucket.store(x, std::memory_order_release); + return; + } + + BucketHeader* header = nullptr; + if (first_next_pointer->load(std::memory_order_relaxed) == nullptr) { + // Case 2. only one entry in the bucket + // Need to convert to a Counting bucket and turn to case 4. + Node* first = reinterpret_cast(first_next_pointer); + // Need to add a bucket header. + // We have to first convert it to a bucket with header before inserting + // the new node. Otherwise, we might need to change next pointer of first. + // In that case, a reader might sees the next pointer is NULL and wrongly + // think the node is a bucket header. + auto* mem = allocator_->AllocateAligned(sizeof(BucketHeader)); + header = new (mem) BucketHeader(first, 1); + bucket.store(header, std::memory_order_release); + } else { + header = reinterpret_cast(first_next_pointer); + if (header->IsSkipListBucket()) { + // Case 4. Bucket is already a skip list + assert(header->GetNumEntries() > threshold_use_skiplist_); + auto* skip_list_bucket_header = + reinterpret_cast(header); + // Only one thread can execute Insert() at one time. No need to do atomic + // incremental. + skip_list_bucket_header->Counting_header.IncNumEntries(); + skip_list_bucket_header->skip_list.Insert(x->key); + return; + } + } + + if (bucket_entries_logging_threshold_ > 0 && + header->GetNumEntries() == + static_cast(bucket_entries_logging_threshold_)) { + Info(logger_, "HashLinkedList bucket %" ROCKSDB_PRIszt + " has more than %d " + "entries. Key to insert: %s", + GetHash(transformed), header->GetNumEntries(), + GetLengthPrefixedSlice(x->key).ToString(true).c_str()); + } + + if (header->GetNumEntries() == threshold_use_skiplist_) { + // Case 3. number of entries reaches the threshold so need to convert to + // skip list. + LinkListIterator bucket_iter( + this, reinterpret_cast( + first_next_pointer->load(std::memory_order_relaxed))); + auto mem = allocator_->AllocateAligned(sizeof(SkipListBucketHeader)); + SkipListBucketHeader* new_skip_list_header = new (mem) + SkipListBucketHeader(compare_, allocator_, header->GetNumEntries() + 1); + auto& skip_list = new_skip_list_header->skip_list; + + // Add all current entries to the skip list + for (bucket_iter.SeekToHead(); bucket_iter.Valid(); bucket_iter.Next()) { + skip_list.Insert(bucket_iter.key()); + } + + // insert the new entry + skip_list.Insert(x->key); + // Set the bucket + bucket.store(new_skip_list_header, std::memory_order_release); + } else { + // Case 5. Need to insert to the sorted linked list without changing the + // header. + Node* first = + reinterpret_cast(header->next.load(std::memory_order_relaxed)); + assert(first != nullptr); + // Advance counter unless the bucket needs to be advanced to skip list. + // In that case, we need to make sure the previous count never exceeds + // threshold_use_skiplist_ to avoid readers to cast to wrong format. + header->IncNumEntries(); + + Node* cur = first; + Node* prev = nullptr; + while (true) { + if (cur == nullptr) { + break; + } + Node* next = cur->Next(); + // Make sure the lists are sorted. + // If x points to head_ or next points nullptr, it is trivially satisfied. + assert((cur == first) || (next == nullptr) || + KeyIsAfterNode(next->key, cur)); + if (KeyIsAfterNode(internal_key, cur)) { + // Keep searching in this list + prev = cur; + cur = next; + } else { + break; + } + } + + // Our data structure does not allow duplicate insertion + assert(cur == nullptr || !Equal(x->key, cur->key)); + + // NoBarrier_SetNext() suffices since we will add a barrier when + // we publish a pointer to "x" in prev[i]. + x->NoBarrier_SetNext(cur); + + if (prev) { + prev->SetNext(x); + } else { + header->next.store(static_cast(x), std::memory_order_release); + } + } +} + +bool HashLinkListRep::Contains(const char* key) const { + Slice internal_key = GetLengthPrefixedSlice(key); + + auto transformed = GetPrefix(internal_key); + auto bucket = GetBucket(transformed); + if (bucket == nullptr) { + return false; + } + + SkipListBucketHeader* skip_list_header = GetSkipListBucketHeader(bucket); + if (skip_list_header != nullptr) { + return skip_list_header->skip_list.Contains(key); + } else { + return LinkListContains(GetLinkListFirstNode(bucket), internal_key); + } +} + +size_t HashLinkListRep::ApproximateMemoryUsage() { + // Memory is always allocated from the allocator. + return 0; +} + +void HashLinkListRep::Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, const char* entry)) { + auto transformed = transform_->Transform(k.user_key()); + auto bucket = GetBucket(transformed); + + auto* skip_list_header = GetSkipListBucketHeader(bucket); + if (skip_list_header != nullptr) { + // Is a skip list + MemtableSkipList::Iterator iter(&skip_list_header->skip_list); + for (iter.Seek(k.memtable_key().data()); + iter.Valid() && callback_func(callback_args, iter.key()); + iter.Next()) { + } + } else { + auto* link_list_head = GetLinkListFirstNode(bucket); + if (link_list_head != nullptr) { + LinkListIterator iter(this, link_list_head); + for (iter.Seek(k.internal_key(), nullptr); + iter.Valid() && callback_func(callback_args, iter.key()); + iter.Next()) { + } + } + } +} + +MemTableRep::Iterator* HashLinkListRep::GetIterator(Arena* alloc_arena) { + // allocate a new arena of similar size to the one currently in use + Arena* new_arena = new Arena(allocator_->BlockSize()); + auto list = new MemtableSkipList(compare_, new_arena); + HistogramImpl keys_per_bucket_hist; + + for (size_t i = 0; i < bucket_size_; ++i) { + int count = 0; + auto* bucket = GetBucket(i); + if (bucket != nullptr) { + auto* skip_list_header = GetSkipListBucketHeader(bucket); + if (skip_list_header != nullptr) { + // Is a skip list + MemtableSkipList::Iterator itr(&skip_list_header->skip_list); + for (itr.SeekToFirst(); itr.Valid(); itr.Next()) { + list->Insert(itr.key()); + count++; + } + } else { + auto* link_list_head = GetLinkListFirstNode(bucket); + if (link_list_head != nullptr) { + LinkListIterator itr(this, link_list_head); + for (itr.SeekToHead(); itr.Valid(); itr.Next()) { + list->Insert(itr.key()); + count++; + } + } + } + } + if (if_log_bucket_dist_when_flash_) { + keys_per_bucket_hist.Add(count); + } + } + if (if_log_bucket_dist_when_flash_ && logger_ != nullptr) { + Info(logger_, "hashLinkedList Entry distribution among buckets: %s", + keys_per_bucket_hist.ToString().c_str()); + } + + if (alloc_arena == nullptr) { + return new FullListIterator(list, new_arena); + } else { + auto mem = alloc_arena->AllocateAligned(sizeof(FullListIterator)); + return new (mem) FullListIterator(list, new_arena); + } +} + +MemTableRep::Iterator* HashLinkListRep::GetDynamicPrefixIterator( + Arena* alloc_arena) { + if (alloc_arena == nullptr) { + return new DynamicIterator(*this); + } else { + auto mem = alloc_arena->AllocateAligned(sizeof(DynamicIterator)); + return new (mem) DynamicIterator(*this); + } +} + +bool HashLinkListRep::LinkListContains(Node* head, + const Slice& user_key) const { + Node* x = FindGreaterOrEqualInBucket(head, user_key); + return (x != nullptr && Equal(user_key, x->key)); +} + +Node* HashLinkListRep::FindGreaterOrEqualInBucket(Node* head, + const Slice& key) const { + Node* x = head; + while (true) { + if (x == nullptr) { + return x; + } + Node* next = x->Next(); + // Make sure the lists are sorted. + // If x points to head_ or next points nullptr, it is trivially satisfied. + assert((x == head) || (next == nullptr) || KeyIsAfterNode(next->key, x)); + if (KeyIsAfterNode(key, x)) { + // Keep searching in this list + x = next; + } else { + break; + } + } + return x; +} + +} // anon namespace + +MemTableRep* HashLinkListRepFactory::CreateMemTableRep( + const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, + const SliceTransform* transform, Logger* logger) { + return new HashLinkListRep(compare, allocator, transform, bucket_count_, + threshold_use_skiplist_, huge_page_tlb_size_, + logger, bucket_entries_logging_threshold_, + if_log_bucket_dist_when_flash_); +} + +MemTableRepFactory* NewHashLinkListRepFactory( + size_t bucket_count, size_t huge_page_tlb_size, + int bucket_entries_logging_threshold, bool if_log_bucket_dist_when_flash, + uint32_t threshold_use_skiplist) { + return new HashLinkListRepFactory( + bucket_count, threshold_use_skiplist, huge_page_tlb_size, + bucket_entries_logging_threshold, if_log_bucket_dist_when_flash); +} + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/memtable/hash_linklist_rep.h b/external/rocksdb/memtable/hash_linklist_rep.h new file mode 100755 index 0000000000..5197e7cfbb --- /dev/null +++ b/external/rocksdb/memtable/hash_linklist_rep.h @@ -0,0 +1,48 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#ifndef ROCKSDB_LITE +#include "rocksdb/slice_transform.h" +#include "rocksdb/memtablerep.h" + +namespace rocksdb { + +class HashLinkListRepFactory : public MemTableRepFactory { + public: + explicit HashLinkListRepFactory(size_t bucket_count, + uint32_t threshold_use_skiplist, + size_t huge_page_tlb_size, + int bucket_entries_logging_threshold, + bool if_log_bucket_dist_when_flash) + : bucket_count_(bucket_count), + threshold_use_skiplist_(threshold_use_skiplist), + huge_page_tlb_size_(huge_page_tlb_size), + bucket_entries_logging_threshold_(bucket_entries_logging_threshold), + if_log_bucket_dist_when_flash_(if_log_bucket_dist_when_flash) {} + + virtual ~HashLinkListRepFactory() {} + + virtual MemTableRep* CreateMemTableRep( + const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, + const SliceTransform* transform, Logger* logger) override; + + virtual const char* Name() const override { + return "HashLinkListRepFactory"; + } + + private: + const size_t bucket_count_; + const uint32_t threshold_use_skiplist_; + const size_t huge_page_tlb_size_; + int bucket_entries_logging_threshold_; + bool if_log_bucket_dist_when_flash_; +}; + +} +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/memtable/hash_skiplist_rep.cc b/external/rocksdb/memtable/hash_skiplist_rep.cc new file mode 100755 index 0000000000..73a917607b --- /dev/null +++ b/external/rocksdb/memtable/hash_skiplist_rep.cc @@ -0,0 +1,342 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// + +#ifndef ROCKSDB_LITE +#include "memtable/hash_skiplist_rep.h" + +#include + +#include "rocksdb/memtablerep.h" +#include "util/arena.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "port/port.h" +#include "util/murmurhash.h" +#include "db/memtable.h" +#include "db/skiplist.h" + +namespace rocksdb { +namespace { + +class HashSkipListRep : public MemTableRep { + public: + HashSkipListRep(const MemTableRep::KeyComparator& compare, + MemTableAllocator* allocator, const SliceTransform* transform, + size_t bucket_size, int32_t skiplist_height, + int32_t skiplist_branching_factor); + + virtual void Insert(KeyHandle handle) override; + + virtual bool Contains(const char* key) const override; + + virtual size_t ApproximateMemoryUsage() override; + + virtual void Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, + const char* entry)) override; + + virtual ~HashSkipListRep(); + + virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override; + + virtual MemTableRep::Iterator* GetDynamicPrefixIterator( + Arena* arena = nullptr) override; + + private: + friend class DynamicIterator; + typedef SkipList Bucket; + + size_t bucket_size_; + + const int32_t skiplist_height_; + const int32_t skiplist_branching_factor_; + + // Maps slices (which are transformed user keys) to buckets of keys sharing + // the same transform. + std::atomic* buckets_; + + // The user-supplied transform whose domain is the user keys. + const SliceTransform* transform_; + + const MemTableRep::KeyComparator& compare_; + // immutable after construction + MemTableAllocator* const allocator_; + + inline size_t GetHash(const Slice& slice) const { + return MurmurHash(slice.data(), static_cast(slice.size()), 0) % + bucket_size_; + } + inline Bucket* GetBucket(size_t i) const { + return buckets_[i].load(std::memory_order_acquire); + } + inline Bucket* GetBucket(const Slice& slice) const { + return GetBucket(GetHash(slice)); + } + // Get a bucket from buckets_. If the bucket hasn't been initialized yet, + // initialize it before returning. + Bucket* GetInitializedBucket(const Slice& transformed); + + class Iterator : public MemTableRep::Iterator { + public: + explicit Iterator(Bucket* list, bool own_list = true, + Arena* arena = nullptr) + : list_(list), iter_(list), own_list_(own_list), arena_(arena) {} + + virtual ~Iterator() { + // if we own the list, we should also delete it + if (own_list_) { + assert(list_ != nullptr); + delete list_; + } + } + + // Returns true iff the iterator is positioned at a valid node. + virtual bool Valid() const override { + return list_ != nullptr && iter_.Valid(); + } + + // Returns the key at the current position. + // REQUIRES: Valid() + virtual const char* key() const override { + assert(Valid()); + return iter_.key(); + } + + // Advances to the next position. + // REQUIRES: Valid() + virtual void Next() override { + assert(Valid()); + iter_.Next(); + } + + // Advances to the previous position. + // REQUIRES: Valid() + virtual void Prev() override { + assert(Valid()); + iter_.Prev(); + } + + // Advance to the first entry with a key >= target + virtual void Seek(const Slice& internal_key, + const char* memtable_key) override { + if (list_ != nullptr) { + const char* encoded_key = + (memtable_key != nullptr) ? + memtable_key : EncodeKey(&tmp_, internal_key); + iter_.Seek(encoded_key); + } + } + + // Position at the first entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToFirst() override { + if (list_ != nullptr) { + iter_.SeekToFirst(); + } + } + + // Position at the last entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToLast() override { + if (list_ != nullptr) { + iter_.SeekToLast(); + } + } + protected: + void Reset(Bucket* list) { + if (own_list_) { + assert(list_ != nullptr); + delete list_; + } + list_ = list; + iter_.SetList(list); + own_list_ = false; + } + private: + // if list_ is nullptr, we should NEVER call any methods on iter_ + // if list_ is nullptr, this Iterator is not Valid() + Bucket* list_; + Bucket::Iterator iter_; + // here we track if we own list_. If we own it, we are also + // responsible for it's cleaning. This is a poor man's shared_ptr + bool own_list_; + std::unique_ptr arena_; + std::string tmp_; // For passing to EncodeKey + }; + + class DynamicIterator : public HashSkipListRep::Iterator { + public: + explicit DynamicIterator(const HashSkipListRep& memtable_rep) + : HashSkipListRep::Iterator(nullptr, false), + memtable_rep_(memtable_rep) {} + + // Advance to the first entry with a key >= target + virtual void Seek(const Slice& k, const char* memtable_key) override { + auto transformed = memtable_rep_.transform_->Transform(ExtractUserKey(k)); + Reset(memtable_rep_.GetBucket(transformed)); + HashSkipListRep::Iterator::Seek(k, memtable_key); + } + + // Position at the first entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToFirst() override { + // Prefix iterator does not support total order. + // We simply set the iterator to invalid state + Reset(nullptr); + } + + // Position at the last entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToLast() override { + // Prefix iterator does not support total order. + // We simply set the iterator to invalid state + Reset(nullptr); + } + private: + // the underlying memtable + const HashSkipListRep& memtable_rep_; + }; + + class EmptyIterator : public MemTableRep::Iterator { + // This is used when there wasn't a bucket. It is cheaper than + // instantiating an empty bucket over which to iterate. + public: + EmptyIterator() { } + virtual bool Valid() const override { return false; } + virtual const char* key() const override { + assert(false); + return nullptr; + } + virtual void Next() override {} + virtual void Prev() override {} + virtual void Seek(const Slice& internal_key, + const char* memtable_key) override {} + virtual void SeekToFirst() override {} + virtual void SeekToLast() override {} + + private: + }; +}; + +HashSkipListRep::HashSkipListRep(const MemTableRep::KeyComparator& compare, + MemTableAllocator* allocator, + const SliceTransform* transform, + size_t bucket_size, int32_t skiplist_height, + int32_t skiplist_branching_factor) + : MemTableRep(allocator), + bucket_size_(bucket_size), + skiplist_height_(skiplist_height), + skiplist_branching_factor_(skiplist_branching_factor), + transform_(transform), + compare_(compare), + allocator_(allocator) { + auto mem = allocator->AllocateAligned( + sizeof(std::atomic) * bucket_size); + buckets_ = new (mem) std::atomic[bucket_size]; + + for (size_t i = 0; i < bucket_size_; ++i) { + buckets_[i].store(nullptr, std::memory_order_relaxed); + } +} + +HashSkipListRep::~HashSkipListRep() { +} + +HashSkipListRep::Bucket* HashSkipListRep::GetInitializedBucket( + const Slice& transformed) { + size_t hash = GetHash(transformed); + auto bucket = GetBucket(hash); + if (bucket == nullptr) { + auto addr = allocator_->AllocateAligned(sizeof(Bucket)); + bucket = new (addr) Bucket(compare_, allocator_, skiplist_height_, + skiplist_branching_factor_); + buckets_[hash].store(bucket, std::memory_order_release); + } + return bucket; +} + +void HashSkipListRep::Insert(KeyHandle handle) { + auto* key = static_cast(handle); + assert(!Contains(key)); + auto transformed = transform_->Transform(UserKey(key)); + auto bucket = GetInitializedBucket(transformed); + bucket->Insert(key); +} + +bool HashSkipListRep::Contains(const char* key) const { + auto transformed = transform_->Transform(UserKey(key)); + auto bucket = GetBucket(transformed); + if (bucket == nullptr) { + return false; + } + return bucket->Contains(key); +} + +size_t HashSkipListRep::ApproximateMemoryUsage() { + return 0; +} + +void HashSkipListRep::Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, const char* entry)) { + auto transformed = transform_->Transform(k.user_key()); + auto bucket = GetBucket(transformed); + if (bucket != nullptr) { + Bucket::Iterator iter(bucket); + for (iter.Seek(k.memtable_key().data()); + iter.Valid() && callback_func(callback_args, iter.key()); + iter.Next()) { + } + } +} + +MemTableRep::Iterator* HashSkipListRep::GetIterator(Arena* arena) { + // allocate a new arena of similar size to the one currently in use + Arena* new_arena = new Arena(allocator_->BlockSize()); + auto list = new Bucket(compare_, new_arena); + for (size_t i = 0; i < bucket_size_; ++i) { + auto bucket = GetBucket(i); + if (bucket != nullptr) { + Bucket::Iterator itr(bucket); + for (itr.SeekToFirst(); itr.Valid(); itr.Next()) { + list->Insert(itr.key()); + } + } + } + if (arena == nullptr) { + return new Iterator(list, true, new_arena); + } else { + auto mem = arena->AllocateAligned(sizeof(Iterator)); + return new (mem) Iterator(list, true, new_arena); + } +} + +MemTableRep::Iterator* HashSkipListRep::GetDynamicPrefixIterator(Arena* arena) { + if (arena == nullptr) { + return new DynamicIterator(*this); + } else { + auto mem = arena->AllocateAligned(sizeof(DynamicIterator)); + return new (mem) DynamicIterator(*this); + } +} + +} // anon namespace + +MemTableRep* HashSkipListRepFactory::CreateMemTableRep( + const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, + const SliceTransform* transform, Logger* logger) { + return new HashSkipListRep(compare, allocator, transform, bucket_count_, + skiplist_height_, skiplist_branching_factor_); +} + +MemTableRepFactory* NewHashSkipListRepFactory( + size_t bucket_count, int32_t skiplist_height, + int32_t skiplist_branching_factor) { + return new HashSkipListRepFactory(bucket_count, skiplist_height, + skiplist_branching_factor); +} + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/memtable/hash_skiplist_rep.h b/external/rocksdb/memtable/hash_skiplist_rep.h new file mode 100755 index 0000000000..56a289c4b1 --- /dev/null +++ b/external/rocksdb/memtable/hash_skiplist_rep.h @@ -0,0 +1,43 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#ifndef ROCKSDB_LITE +#include "rocksdb/slice_transform.h" +#include "rocksdb/memtablerep.h" + +namespace rocksdb { + +class HashSkipListRepFactory : public MemTableRepFactory { + public: + explicit HashSkipListRepFactory( + size_t bucket_count, + int32_t skiplist_height, + int32_t skiplist_branching_factor) + : bucket_count_(bucket_count), + skiplist_height_(skiplist_height), + skiplist_branching_factor_(skiplist_branching_factor) { } + + virtual ~HashSkipListRepFactory() {} + + virtual MemTableRep* CreateMemTableRep( + const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, + const SliceTransform* transform, Logger* logger) override; + + virtual const char* Name() const override { + return "HashSkipListRepFactory"; + } + + private: + const size_t bucket_count_; + const int32_t skiplist_height_; + const int32_t skiplist_branching_factor_; +}; + +} +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/memtable/skiplistrep.cc b/external/rocksdb/memtable/skiplistrep.cc new file mode 100755 index 0000000000..b8c90c6d6d --- /dev/null +++ b/external/rocksdb/memtable/skiplistrep.cc @@ -0,0 +1,252 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include "db/inlineskiplist.h" +#include "db/memtable.h" +#include "rocksdb/memtablerep.h" +#include "util/arena.h" + +namespace rocksdb { +namespace { +class SkipListRep : public MemTableRep { + InlineSkipList skip_list_; + const MemTableRep::KeyComparator& cmp_; + const SliceTransform* transform_; + const size_t lookahead_; + + friend class LookaheadIterator; +public: + explicit SkipListRep(const MemTableRep::KeyComparator& compare, + MemTableAllocator* allocator, + const SliceTransform* transform, const size_t lookahead) + : MemTableRep(allocator), skip_list_(compare, allocator), cmp_(compare), + transform_(transform), lookahead_(lookahead) { + } + + virtual KeyHandle Allocate(const size_t len, char** buf) override { + *buf = skip_list_.AllocateKey(len); + return static_cast(*buf); + } + + // Insert key into the list. + // REQUIRES: nothing that compares equal to key is currently in the list. + virtual void Insert(KeyHandle handle) override { + skip_list_.Insert(static_cast(handle)); + } + + virtual void InsertConcurrently(KeyHandle handle) override { + skip_list_.InsertConcurrently(static_cast(handle)); + } + + // Returns true iff an entry that compares equal to key is in the list. + virtual bool Contains(const char* key) const override { + return skip_list_.Contains(key); + } + + virtual size_t ApproximateMemoryUsage() override { + // All memory is allocated through allocator; nothing to report here + return 0; + } + + virtual void Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, + const char* entry)) override { + SkipListRep::Iterator iter(&skip_list_); + Slice dummy_slice; + for (iter.Seek(dummy_slice, k.memtable_key().data()); + iter.Valid() && callback_func(callback_args, iter.key()); + iter.Next()) { + } + } + + uint64_t ApproximateNumEntries(const Slice& start_ikey, + const Slice& end_ikey) override { + std::string tmp; + uint64_t start_count = + skip_list_.EstimateCount(EncodeKey(&tmp, start_ikey)); + uint64_t end_count = skip_list_.EstimateCount(EncodeKey(&tmp, end_ikey)); + return (end_count >= start_count) ? (end_count - start_count) : 0; + } + + virtual ~SkipListRep() override { } + + // Iteration over the contents of a skip list + class Iterator : public MemTableRep::Iterator { + InlineSkipList::Iterator iter_; + + public: + // Initialize an iterator over the specified list. + // The returned iterator is not valid. + explicit Iterator( + const InlineSkipList* list) + : iter_(list) {} + + virtual ~Iterator() override { } + + // Returns true iff the iterator is positioned at a valid node. + virtual bool Valid() const override { + return iter_.Valid(); + } + + // Returns the key at the current position. + // REQUIRES: Valid() + virtual const char* key() const override { + return iter_.key(); + } + + // Advances to the next position. + // REQUIRES: Valid() + virtual void Next() override { + iter_.Next(); + } + + // Advances to the previous position. + // REQUIRES: Valid() + virtual void Prev() override { + iter_.Prev(); + } + + // Advance to the first entry with a key >= target + virtual void Seek(const Slice& user_key, const char* memtable_key) + override { + if (memtable_key != nullptr) { + iter_.Seek(memtable_key); + } else { + iter_.Seek(EncodeKey(&tmp_, user_key)); + } + } + + // Position at the first entry in list. + // Final state of iterator is Valid() iff list is not empty. + virtual void SeekToFirst() override { + iter_.SeekToFirst(); + } + + // Position at the last entry in list. + // Final state of iterator is Valid() iff list is not empty. + virtual void SeekToLast() override { + iter_.SeekToLast(); + } + protected: + std::string tmp_; // For passing to EncodeKey + }; + + // Iterator over the contents of a skip list which also keeps track of the + // previously visited node. In Seek(), it examines a few nodes after it + // first, falling back to O(log n) search from the head of the list only if + // the target key hasn't been found. + class LookaheadIterator : public MemTableRep::Iterator { + public: + explicit LookaheadIterator(const SkipListRep& rep) : + rep_(rep), iter_(&rep_.skip_list_), prev_(iter_) {} + + virtual ~LookaheadIterator() override {} + + virtual bool Valid() const override { + return iter_.Valid(); + } + + virtual const char *key() const override { + assert(Valid()); + return iter_.key(); + } + + virtual void Next() override { + assert(Valid()); + + bool advance_prev = true; + if (prev_.Valid()) { + auto k1 = rep_.UserKey(prev_.key()); + auto k2 = rep_.UserKey(iter_.key()); + + if (k1.compare(k2) == 0) { + // same user key, don't move prev_ + advance_prev = false; + } else if (rep_.transform_) { + // only advance prev_ if it has the same prefix as iter_ + auto t1 = rep_.transform_->Transform(k1); + auto t2 = rep_.transform_->Transform(k2); + advance_prev = t1.compare(t2) == 0; + } + } + + if (advance_prev) { + prev_ = iter_; + } + iter_.Next(); + } + + virtual void Prev() override { + assert(Valid()); + iter_.Prev(); + prev_ = iter_; + } + + virtual void Seek(const Slice& internal_key, const char *memtable_key) + override { + const char *encoded_key = + (memtable_key != nullptr) ? + memtable_key : EncodeKey(&tmp_, internal_key); + + if (prev_.Valid() && rep_.cmp_(encoded_key, prev_.key()) >= 0) { + // prev_.key() is smaller or equal to our target key; do a quick + // linear search (at most lookahead_ steps) starting from prev_ + iter_ = prev_; + + size_t cur = 0; + while (cur++ <= rep_.lookahead_ && iter_.Valid()) { + if (rep_.cmp_(encoded_key, iter_.key()) <= 0) { + return; + } + Next(); + } + } + + iter_.Seek(encoded_key); + prev_ = iter_; + } + + virtual void SeekToFirst() override { + iter_.SeekToFirst(); + prev_ = iter_; + } + + virtual void SeekToLast() override { + iter_.SeekToLast(); + prev_ = iter_; + } + + protected: + std::string tmp_; // For passing to EncodeKey + + private: + const SkipListRep& rep_; + InlineSkipList::Iterator iter_; + InlineSkipList::Iterator prev_; + }; + + virtual MemTableRep::Iterator* GetIterator(Arena* arena = nullptr) override { + if (lookahead_ > 0) { + void *mem = + arena ? arena->AllocateAligned(sizeof(SkipListRep::LookaheadIterator)) + : operator new(sizeof(SkipListRep::LookaheadIterator)); + return new (mem) SkipListRep::LookaheadIterator(*this); + } else { + void *mem = + arena ? arena->AllocateAligned(sizeof(SkipListRep::Iterator)) + : operator new(sizeof(SkipListRep::Iterator)); + return new (mem) SkipListRep::Iterator(&skip_list_); + } + } +}; +} + +MemTableRep* SkipListFactory::CreateMemTableRep( + const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, + const SliceTransform* transform, Logger* logger) { + return new SkipListRep(compare, allocator, transform, lookahead_); +} + +} // namespace rocksdb diff --git a/external/rocksdb/memtable/stl_wrappers.h b/external/rocksdb/memtable/stl_wrappers.h new file mode 100755 index 0000000000..a431330171 --- /dev/null +++ b/external/rocksdb/memtable/stl_wrappers.h @@ -0,0 +1,34 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#include +#include + +#include "rocksdb/comparator.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/slice.h" +#include "util/coding.h" +#include "util/murmurhash.h" + +namespace rocksdb { +namespace stl_wrappers { + +class Base { + protected: + const MemTableRep::KeyComparator& compare_; + explicit Base(const MemTableRep::KeyComparator& compare) + : compare_(compare) {} +}; + +struct Compare : private Base { + explicit Compare(const MemTableRep::KeyComparator& compare) : Base(compare) {} + inline bool operator()(const char* a, const char* b) const { + return compare_(a, b) < 0; + } +}; + +} +} diff --git a/external/rocksdb/memtable/vectorrep.cc b/external/rocksdb/memtable/vectorrep.cc new file mode 100755 index 0000000000..b9d9ebe0a9 --- /dev/null +++ b/external/rocksdb/memtable/vectorrep.cc @@ -0,0 +1,292 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#ifndef ROCKSDB_LITE +#include "rocksdb/memtablerep.h" + +#include +#include +#include +#include +#include + +#include "util/arena.h" +#include "db/memtable.h" +#include "memtable/stl_wrappers.h" +#include "port/port.h" +#include "util/mutexlock.h" + +namespace rocksdb { +namespace { + +using namespace stl_wrappers; + +class VectorRep : public MemTableRep { + public: + VectorRep(const KeyComparator& compare, MemTableAllocator* allocator, + size_t count); + + // Insert key into the collection. (The caller will pack key and value into a + // single buffer and pass that in as the parameter to Insert) + // REQUIRES: nothing that compares equal to key is currently in the + // collection. + virtual void Insert(KeyHandle handle) override; + + // Returns true iff an entry that compares equal to key is in the collection. + virtual bool Contains(const char* key) const override; + + virtual void MarkReadOnly() override; + + virtual size_t ApproximateMemoryUsage() override; + + virtual void Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, + const char* entry)) override; + + virtual ~VectorRep() override { } + + class Iterator : public MemTableRep::Iterator { + class VectorRep* vrep_; + std::shared_ptr> bucket_; + std::vector::const_iterator mutable cit_; + const KeyComparator& compare_; + std::string tmp_; // For passing to EncodeKey + bool mutable sorted_; + void DoSort() const; + public: + explicit Iterator(class VectorRep* vrep, + std::shared_ptr> bucket, + const KeyComparator& compare); + + // Initialize an iterator over the specified collection. + // The returned iterator is not valid. + // explicit Iterator(const MemTableRep* collection); + virtual ~Iterator() override { }; + + // Returns true iff the iterator is positioned at a valid node. + virtual bool Valid() const override; + + // Returns the key at the current position. + // REQUIRES: Valid() + virtual const char* key() const override; + + // Advances to the next position. + // REQUIRES: Valid() + virtual void Next() override; + + // Advances to the previous position. + // REQUIRES: Valid() + virtual void Prev() override; + + // Advance to the first entry with a key >= target + virtual void Seek(const Slice& user_key, const char* memtable_key) override; + + // Position at the first entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToFirst() override; + + // Position at the last entry in collection. + // Final state of iterator is Valid() iff collection is not empty. + virtual void SeekToLast() override; + }; + + // Return an iterator over the keys in this representation. + virtual MemTableRep::Iterator* GetIterator(Arena* arena) override; + + private: + friend class Iterator; + typedef std::vector Bucket; + std::shared_ptr bucket_; + mutable port::RWMutex rwlock_; + bool immutable_; + bool sorted_; + const KeyComparator& compare_; +}; + +void VectorRep::Insert(KeyHandle handle) { + auto* key = static_cast(handle); + WriteLock l(&rwlock_); + assert(!immutable_); + bucket_->push_back(key); +} + +// Returns true iff an entry that compares equal to key is in the collection. +bool VectorRep::Contains(const char* key) const { + ReadLock l(&rwlock_); + return std::find(bucket_->begin(), bucket_->end(), key) != bucket_->end(); +} + +void VectorRep::MarkReadOnly() { + WriteLock l(&rwlock_); + immutable_ = true; +} + +size_t VectorRep::ApproximateMemoryUsage() { + return + sizeof(bucket_) + sizeof(*bucket_) + + bucket_->size() * + sizeof( + std::remove_reference::type::value_type + ); +} + +VectorRep::VectorRep(const KeyComparator& compare, MemTableAllocator* allocator, + size_t count) + : MemTableRep(allocator), + bucket_(new Bucket()), + immutable_(false), + sorted_(false), + compare_(compare) { bucket_.get()->reserve(count); } + +VectorRep::Iterator::Iterator(class VectorRep* vrep, + std::shared_ptr> bucket, + const KeyComparator& compare) +: vrep_(vrep), + bucket_(bucket), + cit_(bucket_->end()), + compare_(compare), + sorted_(false) { } + +void VectorRep::Iterator::DoSort() const { + // vrep is non-null means that we are working on an immutable memtable + if (!sorted_ && vrep_ != nullptr) { + WriteLock l(&vrep_->rwlock_); + if (!vrep_->sorted_) { + std::sort(bucket_->begin(), bucket_->end(), Compare(compare_)); + cit_ = bucket_->begin(); + vrep_->sorted_ = true; + } + sorted_ = true; + } + if (!sorted_) { + std::sort(bucket_->begin(), bucket_->end(), Compare(compare_)); + cit_ = bucket_->begin(); + sorted_ = true; + } + assert(sorted_); + assert(vrep_ == nullptr || vrep_->sorted_); +} + +// Returns true iff the iterator is positioned at a valid node. +bool VectorRep::Iterator::Valid() const { + DoSort(); + return cit_ != bucket_->end(); +} + +// Returns the key at the current position. +// REQUIRES: Valid() +const char* VectorRep::Iterator::key() const { + assert(sorted_); + return *cit_; +} + +// Advances to the next position. +// REQUIRES: Valid() +void VectorRep::Iterator::Next() { + assert(sorted_); + if (cit_ == bucket_->end()) { + return; + } + ++cit_; +} + +// Advances to the previous position. +// REQUIRES: Valid() +void VectorRep::Iterator::Prev() { + assert(sorted_); + if (cit_ == bucket_->begin()) { + // If you try to go back from the first element, the iterator should be + // invalidated. So we set it to past-the-end. This means that you can + // treat the container circularly. + cit_ = bucket_->end(); + } else { + --cit_; + } +} + +// Advance to the first entry with a key >= target +void VectorRep::Iterator::Seek(const Slice& user_key, + const char* memtable_key) { + DoSort(); + // Do binary search to find first value not less than the target + const char* encoded_key = + (memtable_key != nullptr) ? memtable_key : EncodeKey(&tmp_, user_key); + cit_ = std::equal_range(bucket_->begin(), + bucket_->end(), + encoded_key, + [this] (const char* a, const char* b) { + return compare_(a, b) < 0; + }).first; +} + +// Position at the first entry in collection. +// Final state of iterator is Valid() iff collection is not empty. +void VectorRep::Iterator::SeekToFirst() { + DoSort(); + cit_ = bucket_->begin(); +} + +// Position at the last entry in collection. +// Final state of iterator is Valid() iff collection is not empty. +void VectorRep::Iterator::SeekToLast() { + DoSort(); + cit_ = bucket_->end(); + if (bucket_->size() != 0) { + --cit_; + } +} + +void VectorRep::Get(const LookupKey& k, void* callback_args, + bool (*callback_func)(void* arg, const char* entry)) { + rwlock_.ReadLock(); + VectorRep* vector_rep; + std::shared_ptr bucket; + if (immutable_) { + vector_rep = this; + } else { + vector_rep = nullptr; + bucket.reset(new Bucket(*bucket_)); // make a copy + } + VectorRep::Iterator iter(vector_rep, immutable_ ? bucket_ : bucket, compare_); + rwlock_.ReadUnlock(); + + for (iter.Seek(k.user_key(), k.memtable_key().data()); + iter.Valid() && callback_func(callback_args, iter.key()); iter.Next()) { + } +} + +MemTableRep::Iterator* VectorRep::GetIterator(Arena* arena) { + char* mem = nullptr; + if (arena != nullptr) { + mem = arena->AllocateAligned(sizeof(Iterator)); + } + ReadLock l(&rwlock_); + // Do not sort here. The sorting would be done the first time + // a Seek is performed on the iterator. + if (immutable_) { + if (arena == nullptr) { + return new Iterator(this, bucket_, compare_); + } else { + return new (mem) Iterator(this, bucket_, compare_); + } + } else { + std::shared_ptr tmp; + tmp.reset(new Bucket(*bucket_)); // make a copy + if (arena == nullptr) { + return new Iterator(nullptr, tmp, compare_); + } else { + return new (mem) Iterator(nullptr, tmp, compare_); + } + } +} +} // anon namespace + +MemTableRep* VectorRepFactory::CreateMemTableRep( + const MemTableRep::KeyComparator& compare, MemTableAllocator* allocator, + const SliceTransform*, Logger* logger) { + return new VectorRep(compare, allocator, count_); +} +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/port/README b/external/rocksdb/port/README new file mode 100755 index 0000000000..422563e25c --- /dev/null +++ b/external/rocksdb/port/README @@ -0,0 +1,10 @@ +This directory contains interfaces and implementations that isolate the +rest of the package from platform details. + +Code in the rest of the package includes "port.h" from this directory. +"port.h" in turn includes a platform specific "port_.h" file +that provides the platform specific implementation. + +See port_posix.h for an example of what must be provided in a platform +specific header file. + diff --git a/external/rocksdb/port/dirent.h b/external/rocksdb/port/dirent.h new file mode 100755 index 0000000000..f927db7e24 --- /dev/null +++ b/external/rocksdb/port/dirent.h @@ -0,0 +1,47 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// See port_example.h for documentation for the following types/functions. + +#ifndef STORAGE_LEVELDB_PORT_DIRENT_H_ +#define STORAGE_LEVELDB_PORT_DIRENT_H_ + +#ifdef ROCKSDB_PLATFORM_POSIX +#include +#include +#elif defined(OS_WIN) + +namespace rocksdb { +namespace port { + +struct dirent { + char d_name[_MAX_PATH]; /* filename */ +}; + +struct DIR; + +DIR* opendir(const char* name); + +dirent* readdir(DIR* dirp); + +int closedir(DIR* dirp); + +} // namespace port + +using port::dirent; +using port::DIR; +using port::opendir; +using port::readdir; +using port::closedir; + +} // namespace rocksdb + +#endif // OS_WIN + +#endif // STORAGE_LEVELDB_PORT_DIRENT_H_ diff --git a/external/rocksdb/port/likely.h b/external/rocksdb/port/likely.h new file mode 100755 index 0000000000..d6e6295cc0 --- /dev/null +++ b/external/rocksdb/port/likely.h @@ -0,0 +1,21 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef PORT_LIKELY_H_ +#define PORT_LIKELY_H_ + +#if defined(__GNUC__) && __GNUC__ >= 4 +#define LIKELY(x) (__builtin_expect((x), 1)) +#define UNLIKELY(x) (__builtin_expect((x), 0)) +#else +#define LIKELY(x) (x) +#define UNLIKELY(x) (x) +#endif + +#endif // PORT_LIKELY_H_ diff --git a/external/rocksdb/port/port.h b/external/rocksdb/port/port.h new file mode 100755 index 0000000000..5f45dbb42c --- /dev/null +++ b/external/rocksdb/port/port.h @@ -0,0 +1,21 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include + +// Include the appropriate platform specific file below. If you are +// porting to a new platform, see "port_example.h" for documentation +// of what the new port_.h file must provide. +#if defined(ROCKSDB_PLATFORM_POSIX) +#include "port/port_posix.h" +#elif defined(OS_WIN) +#include "port/win/port_win.h" +#endif diff --git a/external/rocksdb/port/port_example.h b/external/rocksdb/port/port_example.h new file mode 100755 index 0000000000..e4bcb329b1 --- /dev/null +++ b/external/rocksdb/port/port_example.h @@ -0,0 +1,104 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// This file contains the specification, but not the implementations, +// of the types/operations/etc. that should be defined by a platform +// specific port_.h file. Use this file as a reference for +// how to port this package to a new platform. + +#ifndef STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_ +#define STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_ + +namespace rocksdb { +namespace port { + +// TODO(jorlow): Many of these belong more in the environment class rather than +// here. We should try moving them and see if it affects perf. + +// The following boolean constant must be true on a little-endian machine +// and false otherwise. +static const bool kLittleEndian = true /* or some other expression */; + +// ------------------ Threading ------------------- + +// A Mutex represents an exclusive lock. +class Mutex { + public: + Mutex(); + ~Mutex(); + + // Lock the mutex. Waits until other lockers have exited. + // Will deadlock if the mutex is already locked by this thread. + void Lock(); + + // Unlock the mutex. + // REQUIRES: This mutex was locked by this thread. + void Unlock(); + + // Optionally crash if this thread does not hold this mutex. + // The implementation must be fast, especially if NDEBUG is + // defined. The implementation is allowed to skip all checks. + void AssertHeld(); +}; + +class CondVar { + public: + explicit CondVar(Mutex* mu); + ~CondVar(); + + // Atomically release *mu and block on this condition variable until + // either a call to SignalAll(), or a call to Signal() that picks + // this thread to wakeup. + // REQUIRES: this thread holds *mu + void Wait(); + + // If there are some threads waiting, wake up at least one of them. + void Signal(); + + // Wake up all waiting threads. + void SignallAll(); +}; + +// Thread-safe initialization. +// Used as follows: +// static port::OnceType init_control = LEVELDB_ONCE_INIT; +// static void Initializer() { ... do something ...; } +// ... +// port::InitOnce(&init_control, &Initializer); +typedef intptr_t OnceType; +#define LEVELDB_ONCE_INIT 0 +extern void InitOnce(port::OnceType*, void (*initializer)()); + +// ------------------ Compression ------------------- + +// Store the snappy compression of "input[0,input_length-1]" in *output. +// Returns false if snappy is not supported by this port. +extern bool Snappy_Compress(const char* input, size_t input_length, + std::string* output); + +// If input[0,input_length-1] looks like a valid snappy compressed +// buffer, store the size of the uncompressed data in *result and +// return true. Else return false. +extern bool Snappy_GetUncompressedLength(const char* input, size_t length, + size_t* result); + +// Attempt to snappy uncompress input[0,input_length-1] into *output. +// Returns true if successful, false if the input is invalid lightweight +// compressed data. +// +// REQUIRES: at least the first "n" bytes of output[] must be writable +// where "n" is the result of a successful call to +// Snappy_GetUncompressedLength. +extern bool Snappy_Uncompress(const char* input_data, size_t input_length, + char* output); + +} // namespace port +} // namespace rocksdb + +#endif // STORAGE_LEVELDB_PORT_PORT_EXAMPLE_H_ diff --git a/external/rocksdb/port/port_posix.cc b/external/rocksdb/port/port_posix.cc new file mode 100755 index 0000000000..1ad81ad885 --- /dev/null +++ b/external/rocksdb/port/port_posix.cc @@ -0,0 +1,177 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "port/port_posix.h" + +#include +#if defined(__i386__) || defined(__x86_64__) +#include +#endif +#include +#include +#include +#include +#include +#include +#include +#include +#include "util/logging.h" + +namespace rocksdb { +namespace port { + +static int PthreadCall(const char* label, int result) { + if (result != 0 && result != ETIMEDOUT) { + fprintf(stderr, "pthread %s: %s\n", label, strerror(result)); + abort(); + } + return result; +} + +Mutex::Mutex(bool adaptive) { +#ifdef ROCKSDB_PTHREAD_ADAPTIVE_MUTEX + if (!adaptive) { + PthreadCall("init mutex", pthread_mutex_init(&mu_, nullptr)); + } else { + pthread_mutexattr_t mutex_attr; + PthreadCall("init mutex attr", pthread_mutexattr_init(&mutex_attr)); + PthreadCall("set mutex attr", + pthread_mutexattr_settype(&mutex_attr, + PTHREAD_MUTEX_ADAPTIVE_NP)); + PthreadCall("init mutex", pthread_mutex_init(&mu_, &mutex_attr)); + PthreadCall("destroy mutex attr", + pthread_mutexattr_destroy(&mutex_attr)); + } +#else + PthreadCall("init mutex", pthread_mutex_init(&mu_, nullptr)); +#endif // ROCKSDB_PTHREAD_ADAPTIVE_MUTEX +} + +Mutex::~Mutex() { PthreadCall("destroy mutex", pthread_mutex_destroy(&mu_)); } + +void Mutex::Lock() { + PthreadCall("lock", pthread_mutex_lock(&mu_)); +#ifndef NDEBUG + locked_ = true; +#endif +} + +void Mutex::Unlock() { +#ifndef NDEBUG + locked_ = false; +#endif + PthreadCall("unlock", pthread_mutex_unlock(&mu_)); +} + +void Mutex::AssertHeld() { +#ifndef NDEBUG + assert(locked_); +#endif +} + +CondVar::CondVar(Mutex* mu) + : mu_(mu) { + PthreadCall("init cv", pthread_cond_init(&cv_, nullptr)); +} + +CondVar::~CondVar() { PthreadCall("destroy cv", pthread_cond_destroy(&cv_)); } + +void CondVar::Wait() { +#ifndef NDEBUG + mu_->locked_ = false; +#endif + PthreadCall("wait", pthread_cond_wait(&cv_, &mu_->mu_)); +#ifndef NDEBUG + mu_->locked_ = true; +#endif +} + +bool CondVar::TimedWait(uint64_t abs_time_us) { + struct timespec ts; + ts.tv_sec = static_cast(abs_time_us / 1000000); + ts.tv_nsec = static_cast((abs_time_us % 1000000) * 1000); + +#ifndef NDEBUG + mu_->locked_ = false; +#endif + int err = pthread_cond_timedwait(&cv_, &mu_->mu_, &ts); +#ifndef NDEBUG + mu_->locked_ = true; +#endif + if (err == ETIMEDOUT) { + return true; + } + if (err != 0) { + PthreadCall("timedwait", err); + } + return false; +} + +void CondVar::Signal() { + PthreadCall("signal", pthread_cond_signal(&cv_)); +} + +void CondVar::SignalAll() { + PthreadCall("broadcast", pthread_cond_broadcast(&cv_)); +} + +RWMutex::RWMutex() { + PthreadCall("init mutex", pthread_rwlock_init(&mu_, nullptr)); +} + +RWMutex::~RWMutex() { PthreadCall("destroy mutex", pthread_rwlock_destroy(&mu_)); } + +void RWMutex::ReadLock() { PthreadCall("read lock", pthread_rwlock_rdlock(&mu_)); } + +void RWMutex::WriteLock() { PthreadCall("write lock", pthread_rwlock_wrlock(&mu_)); } + +void RWMutex::ReadUnlock() { PthreadCall("read unlock", pthread_rwlock_unlock(&mu_)); } + +void RWMutex::WriteUnlock() { PthreadCall("write unlock", pthread_rwlock_unlock(&mu_)); } + +int PhysicalCoreID() { +#if defined(__i386__) || defined(__x86_64__) + // if you ever find that this function is hot on Linux, you can go from + // ~200 nanos to ~20 nanos by adding the machinery to use __vdso_getcpu + unsigned eax, ebx = 0, ecx, edx; + __get_cpuid(1, &eax, &ebx, &ecx, &edx); + return ebx >> 24; +#else + // getcpu or sched_getcpu could work here + return -1; +#endif +} + +void InitOnce(OnceType* once, void (*initializer)()) { + PthreadCall("once", pthread_once(once, initializer)); +} + +void Crash(const std::string& srcfile, int srcline) { + fprintf(stdout, "Crashing at %s:%d\n", srcfile.c_str(), srcline); + fflush(stdout); + kill(getpid(), SIGTERM); +} + +int GetMaxOpenFiles() { +#if defined(RLIMIT_NOFILE) + struct rlimit no_files_limit; + if (getrlimit(RLIMIT_NOFILE, &no_files_limit) != 0) { + return -1; + } + // protect against overflow + if (no_files_limit.rlim_cur >= std::numeric_limits::max()) { + return std::numeric_limits::max(); + } + return static_cast(no_files_limit.rlim_cur); +#endif + return -1; +} + +} // namespace port +} // namespace rocksdb diff --git a/external/rocksdb/port/port_posix.h b/external/rocksdb/port/port_posix.h new file mode 100755 index 0000000000..15c4d0c0ae --- /dev/null +++ b/external/rocksdb/port/port_posix.h @@ -0,0 +1,174 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// See port_example.h for documentation for the following types/functions. + +#pragma once + +// size_t printf formatting named in the manner of C99 standard formatting +// strings such as PRIu64 +// in fact, we could use that one +#define ROCKSDB_PRIszt "zu" + +#define __declspec(S) + +#define ROCKSDB_NOEXCEPT noexcept + +#undef PLATFORM_IS_LITTLE_ENDIAN +#if defined(OS_MACOSX) + #include + #if defined(__DARWIN_LITTLE_ENDIAN) && defined(__DARWIN_BYTE_ORDER) + #define PLATFORM_IS_LITTLE_ENDIAN \ + (__DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN) + #endif +#elif defined(OS_SOLARIS) + #include + #ifdef _LITTLE_ENDIAN + #define PLATFORM_IS_LITTLE_ENDIAN true + #else + #define PLATFORM_IS_LITTLE_ENDIAN false + #endif +#elif defined(OS_FREEBSD) || defined(OS_OPENBSD) || defined(OS_NETBSD) || \ + defined(OS_DRAGONFLYBSD) || defined(OS_ANDROID) + #include + #include + #define PLATFORM_IS_LITTLE_ENDIAN (_BYTE_ORDER == _LITTLE_ENDIAN) +#else + #include +#endif +#include + +#include +#include +#include +#include + +#ifndef PLATFORM_IS_LITTLE_ENDIAN +#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN) +#endif + +#if defined(OS_MACOSX) || defined(OS_SOLARIS) || defined(OS_FREEBSD) ||\ + defined(OS_NETBSD) || defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) ||\ + defined(OS_ANDROID) || defined(CYGWIN) +// Use fread/fwrite/fflush on platforms without _unlocked variants +#define fread_unlocked fread +#define fwrite_unlocked fwrite +#define fflush_unlocked fflush +#endif + +#if defined(OS_MACOSX) || defined(OS_FREEBSD) ||\ + defined(OS_OPENBSD) || defined(OS_DRAGONFLYBSD) +// Use fsync() on platforms without fdatasync() +#define fdatasync fsync +#endif + +#if defined(OS_ANDROID) && __ANDROID_API__ < 9 +// fdatasync() was only introduced in API level 9 on Android. Use fsync() +// when targeting older platforms. +#define fdatasync fsync +#endif + +namespace rocksdb { +namespace port { + +// For use at db/file_indexer.h kLevelMaxIndex +const int kMaxInt32 = std::numeric_limits::max(); +const uint64_t kMaxUint64 = std::numeric_limits::max(); +const int64_t kMaxInt64 = std::numeric_limits::max(); +const size_t kMaxSizet = std::numeric_limits::max(); + +static const bool kLittleEndian = PLATFORM_IS_LITTLE_ENDIAN; +#undef PLATFORM_IS_LITTLE_ENDIAN + +class CondVar; + +class Mutex { + public: + /* implicit */ Mutex(bool adaptive = false); + ~Mutex(); + + void Lock(); + void Unlock(); + // this will assert if the mutex is not locked + // it does NOT verify that mutex is held by a calling thread + void AssertHeld(); + + private: + friend class CondVar; + pthread_mutex_t mu_; +#ifndef NDEBUG + bool locked_; +#endif + + // No copying + Mutex(const Mutex&); + void operator=(const Mutex&); +}; + +class RWMutex { + public: + RWMutex(); + ~RWMutex(); + + void ReadLock(); + void WriteLock(); + void ReadUnlock(); + void WriteUnlock(); + void AssertHeld() { } + + private: + pthread_rwlock_t mu_; // the underlying platform mutex + + // No copying allowed + RWMutex(const RWMutex&); + void operator=(const RWMutex&); +}; + +class CondVar { + public: + explicit CondVar(Mutex* mu); + ~CondVar(); + void Wait(); + // Timed condition wait. Returns true if timeout occurred. + bool TimedWait(uint64_t abs_time_us); + void Signal(); + void SignalAll(); + private: + pthread_cond_t cv_; + Mutex* mu_; +}; + +static inline void AsmVolatilePause() { +#if defined(__i386__) || defined(__x86_64__) + asm volatile("pause"); +#elif defined(__aarch64__) + asm volatile("wfe"); +#elif defined(__powerpc64__) + asm volatile("or 27,27,27"); +#endif + // it's okay for other platforms to be no-ops +} + +// Returns -1 if not available on this platform +extern int PhysicalCoreID(); + +typedef pthread_once_t OnceType; +#define LEVELDB_ONCE_INIT PTHREAD_ONCE_INIT +extern void InitOnce(OnceType* once, void (*initializer)()); + +#define CACHE_LINE_SIZE 64U + +#define PREFETCH(addr, rw, locality) __builtin_prefetch(addr, rw, locality) + +extern void Crash(const std::string& srcfile, int srcline); + +extern int GetMaxOpenFiles(); + +} // namespace port +} // namespace rocksdb diff --git a/external/rocksdb/port/stack_trace.cc b/external/rocksdb/port/stack_trace.cc new file mode 100755 index 0000000000..bec0c994b8 --- /dev/null +++ b/external/rocksdb/port/stack_trace.cc @@ -0,0 +1,137 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include "port/stack_trace.h" + +#if defined(ROCKSDB_LITE) || !(defined(ROCKSDB_BACKTRACE) || defined(OS_MACOSX)) || \ + defined(CYGWIN) || defined(OS_FREEBSD) + +// noop + +namespace rocksdb { +namespace port { +void InstallStackTraceHandler() {} +void PrintStack(int first_frames_to_skip) {} +} // namespace port +} // namespace rocksdb + +#else + +#include +#include +#include +#include +#include +#include +#include + +namespace rocksdb { +namespace port { + +namespace { + +#ifdef OS_LINUX +const char* GetExecutableName() { + static char name[1024]; + + char link[1024]; + snprintf(link, sizeof(link), "/proc/%d/exe", getpid()); + auto read = readlink(link, name, sizeof(name) - 1); + if (-1 == read) { + return nullptr; + } else { + name[read] = 0; + return name; + } +} + +void PrintStackTraceLine(const char* symbol, void* frame) { + static const char* executable = GetExecutableName(); + if (symbol) { + fprintf(stderr, "%s ", symbol); + } + if (executable) { + // out source to addr2line, for the address translation + const int kLineMax = 256; + char cmd[kLineMax]; + snprintf(cmd, kLineMax, "addr2line %p -e %s -f -C 2>&1", frame, executable); + auto f = popen(cmd, "r"); + if (f) { + char line[kLineMax]; + while (fgets(line, sizeof(line), f)) { + line[strlen(line) - 1] = 0; // remove newline + fprintf(stderr, "%s\t", line); + } + pclose(f); + } + } else { + fprintf(stderr, " %p", frame); + } + + fprintf(stderr, "\n"); +} +#elif defined(OS_MACOSX) + +void PrintStackTraceLine(const char* symbol, void* frame) { + static int pid = getpid(); + // out source to atos, for the address translation + const int kLineMax = 256; + char cmd[kLineMax]; + snprintf(cmd, kLineMax, "xcrun atos %p -p %d 2>&1", frame, pid); + auto f = popen(cmd, "r"); + if (f) { + char line[kLineMax]; + while (fgets(line, sizeof(line), f)) { + line[strlen(line) - 1] = 0; // remove newline + fprintf(stderr, "%s\t", line); + } + pclose(f); + } else if (symbol) { + fprintf(stderr, "%s ", symbol); + } + + fprintf(stderr, "\n"); +} + +#endif + +} // namespace + +void PrintStack(int first_frames_to_skip) { + const int kMaxFrames = 100; + void* frames[kMaxFrames]; + + auto num_frames = backtrace(frames, kMaxFrames); + auto symbols = backtrace_symbols(frames, num_frames); + + for (int i = first_frames_to_skip; i < num_frames; ++i) { + fprintf(stderr, "#%-2d ", i - first_frames_to_skip); + PrintStackTraceLine((symbols != nullptr) ? symbols[i] : nullptr, frames[i]); + } +} + +static void StackTraceHandler(int sig) { + // reset to default handler + signal(sig, SIG_DFL); + fprintf(stderr, "Received signal %d (%s)\n", sig, strsignal(sig)); + // skip the top three signal handler related frames + PrintStack(3); + // re-signal to default handler (so we still get core dump if needed...) + raise(sig); +} + +void InstallStackTraceHandler() { + // just use the plain old signal as it's simple and sufficient + // for this use case + signal(SIGILL, StackTraceHandler); + signal(SIGSEGV, StackTraceHandler); + signal(SIGBUS, StackTraceHandler); + signal(SIGABRT, StackTraceHandler); +} + +} // namespace port +} // namespace rocksdb + +#endif diff --git a/external/rocksdb/port/stack_trace.h b/external/rocksdb/port/stack_trace.h new file mode 100755 index 0000000000..3108b4d2e1 --- /dev/null +++ b/external/rocksdb/port/stack_trace.h @@ -0,0 +1,19 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#pragma once +namespace rocksdb { +namespace port { + +// Install a signal handler to print callstack on the following signals: +// SIGILL SIGSEGV SIGBUS SIGABRT +// Currently supports linux only. No-op otherwise. +void InstallStackTraceHandler(); + +// Prints stack, skips skip_first_frames frames +void PrintStack(int first_frames_to_skip = 0); + +} // namespace port +} // namespace rocksdb diff --git a/external/rocksdb/port/sys_time.h b/external/rocksdb/port/sys_time.h new file mode 100755 index 0000000000..53e646e69a --- /dev/null +++ b/external/rocksdb/port/sys_time.h @@ -0,0 +1,48 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +// This file is a portable substitute for sys/time.h which does not exist on +// Windows + +#ifndef STORAGE_LEVELDB_PORT_SYS_TIME_H_ +#define STORAGE_LEVELDB_PORT_SYS_TIME_H_ + +#if defined(OS_WIN) && defined(_MSC_VER) + +#include + +namespace rocksdb { + +namespace port { + +// Avoid including winsock2.h for this definition +typedef struct timeval { + long tv_sec; + long tv_usec; +} timeval; + +void gettimeofday(struct timeval* tv, struct timezone* tz); + +inline struct tm* localtime_r(const time_t* timep, struct tm* result) { + errno_t ret = localtime_s(result, timep); + return (ret == 0) ? result : NULL; +} +} + +using port::timeval; +using port::gettimeofday; +using port::localtime_r; +} + +#else +#include +#include +#endif + +#endif // STORAGE_LEVELDB_PORT_SYS_TIME_H_ diff --git a/external/rocksdb/port/util_logger.h b/external/rocksdb/port/util_logger.h new file mode 100755 index 0000000000..05782b0c58 --- /dev/null +++ b/external/rocksdb/port/util_logger.h @@ -0,0 +1,23 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_ +#define STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_ + +// Include the appropriate platform specific file below. If you are +// porting to a new platform, see "port_example.h" for documentation +// of what the new port_.h file must provide. + +#if defined(ROCKSDB_PLATFORM_POSIX) +#include "util/posix_logger.h" +#elif defined(OS_WIN) +#include "port/win/win_logger.h" +#endif + +#endif // STORAGE_LEVELDB_PORT_UTIL_LOGGER_H_ diff --git a/external/rocksdb/port/win/env_default.cc b/external/rocksdb/port/win/env_default.cc new file mode 100755 index 0000000000..09c25c02b8 --- /dev/null +++ b/external/rocksdb/port/win/env_default.cc @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include + +#include +#include "port/win/env_win.h" + +namespace rocksdb { +namespace port { + +// We choose to create this on the heap and using std::once for the following +// reasons +// 1) Currently available MS compiler does not implement atomic C++11 +// initialization of +// function local statics +// 2) We choose not to destroy the env because joining the threads from the +// system loader +// which destroys the statics (same as from DLLMain) creates a system loader +// dead-lock. +// in this manner any remaining threads are terminated OK. +namespace { + std::once_flag winenv_once_flag; + Env* envptr; +}; + +} + +Env* Env::Default() { + using namespace port; + std::call_once(winenv_once_flag, []() { envptr = new WinEnv(); }); + return envptr; +} + +} + diff --git a/external/rocksdb/port/win/env_win.cc b/external/rocksdb/port/win/env_win.cc new file mode 100755 index 0000000000..2a718185f4 --- /dev/null +++ b/external/rocksdb/port/win/env_win.cc @@ -0,0 +1,1038 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include +#include +#include + +#include +#include // _getpid +#include // _access +#include // _rmdir, _mkdir, _getcwd +#include +#include + +#include "rocksdb/env.h" +#include "rocksdb/slice.h" + +#include "port/port.h" +#include "port/dirent.h" +#include "port/win/win_logger.h" +#include "port/win/io_win.h" +#include "port/win/env_win.h" + +#include "util/iostats_context_imp.h" + +#include "util/thread_status_updater.h" +#include "util/thread_status_util.h" + +#include // For UUID generation +#include + +namespace rocksdb { + +ThreadStatusUpdater* CreateThreadStatusUpdater() { + return new ThreadStatusUpdater(); +} + +namespace { + +// RAII helpers for HANDLEs +const auto CloseHandleFunc = [](HANDLE h) { ::CloseHandle(h); }; +typedef std::unique_ptr UniqueCloseHandlePtr; + +void WinthreadCall(const char* label, std::error_code result) { + if (0 != result.value()) { + fprintf(stderr, "pthread %s: %s\n", label, strerror(result.value())); + abort(); + } +} + +} + +namespace port { + +WinEnvIO::WinEnvIO(Env* hosted_env) + : hosted_env_(hosted_env), + page_size_(4 * 1012), + allocation_granularity_(page_size_), + perf_counter_frequency_(0), + GetSystemTimePreciseAsFileTime_(NULL) { + + SYSTEM_INFO sinfo; + GetSystemInfo(&sinfo); + + page_size_ = sinfo.dwPageSize; + allocation_granularity_ = sinfo.dwAllocationGranularity; + + { + LARGE_INTEGER qpf; + BOOL ret = QueryPerformanceFrequency(&qpf); + assert(ret == TRUE); + perf_counter_frequency_ = qpf.QuadPart; + } + + HMODULE module = GetModuleHandle("kernel32.dll"); + if (module != NULL) { + GetSystemTimePreciseAsFileTime_ = (FnGetSystemTimePreciseAsFileTime)GetProcAddress( + module, "GetSystemTimePreciseAsFileTime"); + } +} + +WinEnvIO::~WinEnvIO() { +} + +Status WinEnvIO::DeleteFile(const std::string& fname) { + Status result; + + if (_unlink(fname.c_str())) { + result = IOError("Failed to delete: " + fname, errno); + } + + return result; +} + +Status WinEnvIO::GetCurrentTime(int64_t* unix_time) { + time_t time = std::time(nullptr); + if (time == (time_t)(-1)) { + return Status::NotSupported("Failed to get time"); + } + + *unix_time = time; + return Status::OK(); +} + +Status WinEnvIO::NewSequentialFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options) { + Status s; + + result->reset(); + + // Corruption test needs to rename and delete files of these kind + // while they are still open with another handle. For that reason we + // allow share_write and delete(allows rename). + HANDLE hFile = INVALID_HANDLE_VALUE; + { + IOSTATS_TIMER_GUARD(open_nanos); + hFile = CreateFileA( + fname.c_str(), GENERIC_READ, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, NULL, + OPEN_EXISTING, // Original fopen mode is "rb" + FILE_ATTRIBUTE_NORMAL, NULL); + } + + if (INVALID_HANDLE_VALUE == hFile) { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError("Failed to open NewSequentialFile" + fname, + lastError); + } else { + result->reset(new WinSequentialFile(fname, hFile, options)); + } + return s; +} + +Status WinEnvIO::NewRandomAccessFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options) { + result->reset(); + Status s; + + // Open the file for read-only random access + // Random access is to disable read-ahead as the system reads too much data + DWORD fileFlags = FILE_ATTRIBUTE_READONLY; + + if (!options.use_os_buffer && !options.use_mmap_reads) { + fileFlags |= FILE_FLAG_NO_BUFFERING; + } else { + fileFlags |= FILE_FLAG_RANDOM_ACCESS; + } + + /// Shared access is necessary for corruption test to pass + // almost all tests would work with a possible exception of fault_injection + HANDLE hFile = 0; + { + IOSTATS_TIMER_GUARD(open_nanos); + hFile = + CreateFileA(fname.c_str(), GENERIC_READ, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, OPEN_EXISTING, fileFlags, NULL); + } + + if (INVALID_HANDLE_VALUE == hFile) { + auto lastError = GetLastError(); + return IOErrorFromWindowsError( + "NewRandomAccessFile failed to Create/Open: " + fname, lastError); + } + + UniqueCloseHandlePtr fileGuard(hFile, CloseHandleFunc); + + // CAUTION! This will map the entire file into the process address space + if (options.use_mmap_reads && sizeof(void*) >= 8) { + // Use mmap when virtual address-space is plentiful. + uint64_t fileSize; + + s = GetFileSize(fname, &fileSize); + + if (s.ok()) { + // Will not map empty files + if (fileSize == 0) { + return IOError( + "NewRandomAccessFile failed to map empty file: " + fname, EINVAL); + } + + HANDLE hMap = CreateFileMappingA(hFile, NULL, PAGE_READONLY, + 0, // Whole file at its present length + 0, + NULL); // Mapping name + + if (!hMap) { + auto lastError = GetLastError(); + return IOErrorFromWindowsError( + "Failed to create file mapping for NewRandomAccessFile: " + fname, + lastError); + } + + UniqueCloseHandlePtr mapGuard(hMap, CloseHandleFunc); + + const void* mapped_region = + MapViewOfFileEx(hMap, FILE_MAP_READ, + 0, // High DWORD of access start + 0, // Low DWORD + fileSize, + NULL); // Let the OS choose the mapping + + if (!mapped_region) { + auto lastError = GetLastError(); + return IOErrorFromWindowsError( + "Failed to MapViewOfFile for NewRandomAccessFile: " + fname, + lastError); + } + + result->reset(new WinMmapReadableFile(fname, hFile, hMap, mapped_region, + fileSize)); + + mapGuard.release(); + fileGuard.release(); + } + } else { + result->reset(new WinRandomAccessFile(fname, hFile, page_size_, options)); + fileGuard.release(); + } + return s; +} + +Status WinEnvIO::NewWritableFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options) { + const size_t c_BufferCapacity = 64 * 1024; + + EnvOptions local_options(options); + + result->reset(); + Status s; + + DWORD fileFlags = FILE_ATTRIBUTE_NORMAL; + + if (!local_options.use_os_buffer && !local_options.use_mmap_writes) { + fileFlags = FILE_FLAG_NO_BUFFERING; + } + + // Desired access. We are want to write only here but if we want to memory + // map + // the file then there is no write only mode so we have to create it + // Read/Write + // However, MapViewOfFile specifies only Write only + DWORD desired_access = GENERIC_WRITE; + DWORD shared_mode = FILE_SHARE_READ; + + if (local_options.use_mmap_writes) { + desired_access |= GENERIC_READ; + } else { + // Adding this solely for tests to pass (fault_injection_test, + // wal_manager_test). + shared_mode |= (FILE_SHARE_WRITE | FILE_SHARE_DELETE); + } + + HANDLE hFile = 0; + { + IOSTATS_TIMER_GUARD(open_nanos); + hFile = CreateFileA( + fname.c_str(), + desired_access, // Access desired + shared_mode, + NULL, // Security attributes + CREATE_ALWAYS, // Posix env says O_CREAT | O_RDWR | O_TRUNC + fileFlags, // Flags + NULL); // Template File + } + + if (INVALID_HANDLE_VALUE == hFile) { + auto lastError = GetLastError(); + return IOErrorFromWindowsError( + "Failed to create a NewWriteableFile: " + fname, lastError); + } + + if (options.use_mmap_writes) { + // We usually do not use mmmapping on SSD and thus we pass memory + // page_size + result->reset(new WinMmapFile(fname, hFile, page_size_, + allocation_granularity_, local_options)); + } else { + // Here we want the buffer allocation to be aligned by the SSD page size + // and to be a multiple of it + result->reset(new WinWritableFile(fname, hFile, page_size_, + c_BufferCapacity, local_options)); + } + return s; +} + +Status WinEnvIO::NewDirectory(const std::string& name, + std::unique_ptr* result) { + Status s; + // Must be nullptr on failure + result->reset(); + // Must fail if directory does not exist + if (!DirExists(name)) { + s = IOError("Directory does not exist: " + name, EEXIST); + } else { + IOSTATS_TIMER_GUARD(open_nanos); + result->reset(new WinDirectory); + } + return s; +} + +Status WinEnvIO::FileExists(const std::string& fname) { + // F_OK == 0 + const int F_OK_ = 0; + return _access(fname.c_str(), F_OK_) == 0 ? Status::OK() + : Status::NotFound(); +} + +Status WinEnvIO::GetChildren(const std::string& dir, + std::vector* result) { + std::vector output; + + Status status; + + auto CloseDir = [](DIR* p) { closedir(p); }; + std::unique_ptr dirp(opendir(dir.c_str()), + CloseDir); + + if (!dirp) { + status = IOError(dir, errno); + } else { + if (result->capacity() > 0) { + output.reserve(result->capacity()); + } + + struct dirent* ent = readdir(dirp.get()); + while (ent) { + output.push_back(ent->d_name); + ent = readdir(dirp.get()); + } + } + + output.swap(*result); + + return status; +} + +Status WinEnvIO::CreateDir(const std::string& name) { + Status result; + + if (_mkdir(name.c_str()) != 0) { + auto code = errno; + result = IOError("Failed to create dir: " + name, code); + } + + return result; +} + +Status WinEnvIO::CreateDirIfMissing(const std::string& name) { + Status result; + + if (DirExists(name)) { + return result; + } + + if (_mkdir(name.c_str()) != 0) { + if (errno == EEXIST) { + result = + Status::IOError("`" + name + "' exists but is not a directory"); + } else { + auto code = errno; + result = IOError("Failed to create dir: " + name, code); + } + } + + return result; +} + +Status WinEnvIO::DeleteDir(const std::string& name) { + Status result; + if (_rmdir(name.c_str()) != 0) { + auto code = errno; + result = IOError("Failed to remove dir: " + name, code); + } + return result; +} + +Status WinEnvIO::GetFileSize(const std::string& fname, + uint64_t* size) { + Status s; + + WIN32_FILE_ATTRIBUTE_DATA attrs; + if (GetFileAttributesExA(fname.c_str(), GetFileExInfoStandard, &attrs)) { + ULARGE_INTEGER file_size; + file_size.HighPart = attrs.nFileSizeHigh; + file_size.LowPart = attrs.nFileSizeLow; + *size = file_size.QuadPart; + } else { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError("Can not get size for: " + fname, lastError); + } + return s; +} + +uint64_t WinEnvIO::FileTimeToUnixTime(const FILETIME& ftTime) { + const uint64_t c_FileTimePerSecond = 10000000U; + // UNIX epoch starts on 1970-01-01T00:00:00Z + // Windows FILETIME starts on 1601-01-01T00:00:00Z + // Therefore, we need to subtract the below number of seconds from + // the seconds that we obtain from FILETIME with an obvious loss of + // precision + const uint64_t c_SecondBeforeUnixEpoch = 11644473600U; + + ULARGE_INTEGER li; + li.HighPart = ftTime.dwHighDateTime; + li.LowPart = ftTime.dwLowDateTime; + + uint64_t result = + (li.QuadPart / c_FileTimePerSecond) - c_SecondBeforeUnixEpoch; + return result; +} + +Status WinEnvIO::GetFileModificationTime(const std::string& fname, + uint64_t* file_mtime) { + Status s; + + WIN32_FILE_ATTRIBUTE_DATA attrs; + if (GetFileAttributesExA(fname.c_str(), GetFileExInfoStandard, &attrs)) { + *file_mtime = FileTimeToUnixTime(attrs.ftLastWriteTime); + } else { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError( + "Can not get file modification time for: " + fname, lastError); + *file_mtime = 0; + } + + return s; +} + +Status WinEnvIO::RenameFile(const std::string& src, + const std::string& target) { + Status result; + + // rename() is not capable of replacing the existing file as on Linux + // so use OS API directly + if (!MoveFileExA(src.c_str(), target.c_str(), MOVEFILE_REPLACE_EXISTING)) { + DWORD lastError = GetLastError(); + + std::string text("Failed to rename: "); + text.append(src).append(" to: ").append(target); + + result = IOErrorFromWindowsError(text, lastError); + } + + return result; +} + +Status WinEnvIO::LinkFile(const std::string& src, + const std::string& target) { + Status result; + + if (!CreateHardLinkA(target.c_str(), src.c_str(), NULL)) { + DWORD lastError = GetLastError(); + + std::string text("Failed to link: "); + text.append(src).append(" to: ").append(target); + + result = IOErrorFromWindowsError(text, lastError); + } + + return result; +} + +Status WinEnvIO::LockFile(const std::string& lockFname, + FileLock** lock) { + assert(lock != nullptr); + + *lock = NULL; + Status result; + + // No-sharing, this is a LOCK file + const DWORD ExclusiveAccessON = 0; + + // Obtain exclusive access to the LOCK file + // Previously, instead of NORMAL attr we set DELETE on close and that worked + // well except with fault_injection test that insists on deleting it. + HANDLE hFile = 0; + { + IOSTATS_TIMER_GUARD(open_nanos); + hFile = CreateFileA(lockFname.c_str(), (GENERIC_READ | GENERIC_WRITE), + ExclusiveAccessON, NULL, CREATE_ALWAYS, + FILE_ATTRIBUTE_NORMAL, NULL); + } + + if (INVALID_HANDLE_VALUE == hFile) { + auto lastError = GetLastError(); + result = IOErrorFromWindowsError( + "Failed to create lock file: " + lockFname, lastError); + } else { + *lock = new WinFileLock(hFile); + } + + return result; +} + +Status WinEnvIO::UnlockFile(FileLock* lock) { + Status result; + + assert(lock != nullptr); + + delete lock; + + return result; +} + +Status WinEnvIO::GetTestDirectory(std::string* result) { + std::string output; + + const char* env = getenv("TEST_TMPDIR"); + if (env && env[0] != '\0') { + output = env; + CreateDir(output); + } else { + env = getenv("TMP"); + + if (env && env[0] != '\0') { + output = env; + } else { + output = "c:\\tmp"; + } + + CreateDir(output); + } + + output.append("\\testrocksdb-"); + output.append(std::to_string(_getpid())); + + CreateDir(output); + + output.swap(*result); + + return Status::OK(); +} + +Status WinEnvIO::NewLogger(const std::string& fname, + std::shared_ptr* result) { + Status s; + + result->reset(); + + HANDLE hFile = 0; + { + IOSTATS_TIMER_GUARD(open_nanos); + hFile = CreateFileA( + fname.c_str(), GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_DELETE, // In RocksDb log files are + // renamed and deleted before + // they are closed. This enables + // doing so. + NULL, + CREATE_ALWAYS, // Original fopen mode is "w" + FILE_ATTRIBUTE_NORMAL, NULL); + } + + if (INVALID_HANDLE_VALUE == hFile) { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError("Failed to open LogFile" + fname, lastError); + } else { + { + // With log files we want to set the true creation time as of now + // because the system + // for some reason caches the attributes of the previous file that just + // been renamed from + // this name so auto_roll_logger_test fails + FILETIME ft; + GetSystemTimeAsFileTime(&ft); + // Set creation, last access and last write time to the same value + SetFileTime(hFile, &ft, &ft, &ft); + } + result->reset(new WinLogger(&WinEnvThreads::gettid, hosted_env_, hFile)); + } + return s; +} + +#if (_WIN32_WINNT >= _WIN32_WINNT_WIN8) +uint64_t WinEnvIO::NowMicros() { + + if (GetSystemTimePreciseAsFileTime_ != NULL) { + // all std::chrono clocks on windows proved to return + // values that may repeat that is not good enough for some uses. + const int64_t c_UnixEpochStartTicks = 116444736000000000i64; + const int64_t c_FtToMicroSec = 10; + + // This interface needs to return system time and not + // just any microseconds because it is often used as an argument + // to TimedWait() on condition variable + FILETIME ftSystemTime; + GetSystemTimePreciseAsFileTime_(&ftSystemTime); + + LARGE_INTEGER li; + li.LowPart = ftSystemTime.dwLowDateTime; + li.HighPart = ftSystemTime.dwHighDateTime; + // Subtract unix epoch start + li.QuadPart -= c_UnixEpochStartTicks; + // Convert to microsecs + li.QuadPart /= c_FtToMicroSec; + return li.QuadPart; + } + using namespace std::chrono; + return duration_cast(system_clock::now().time_since_epoch()).count(); +} +#else +uint64_t WinEnvIO::NowMicros() { + // On Windows 7 and below, where GetSystemTimePreciseAsFileTime is not + // available, use QueryPerformanceCounter. Note that this may cause some + // of the tests to fail. + LARGE_INTEGER li; + QueryPerformanceCounter(&li); + li.QuadPart *= std::micro::den; + li.QuadPart /= perf_counter_frequency_; + return li.QuadPart; +} +#endif // (_WIN32_WINNT >= _WIN32_WINNT_WIN8) + +uint64_t WinEnvIO::NowNanos() { + // all std::chrono clocks on windows have the same resolution that is only + // good enough for microseconds but not nanoseconds + // On Windows 8 and Windows 2012 Server + // GetSystemTimePreciseAsFileTime(¤t_time) can be used + LARGE_INTEGER li; + QueryPerformanceCounter(&li); + // Convert to nanoseconds first to avoid loss of precision + // and divide by frequency + li.QuadPart *= std::nano::den; + li.QuadPart /= perf_counter_frequency_; + return li.QuadPart; +} + +Status WinEnvIO::GetHostName(char* name, uint64_t len) { + Status s; + DWORD nSize = static_cast( + std::min(len, std::numeric_limits::max())); + + if (!::GetComputerNameA(name, &nSize)) { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError("GetHostName", lastError); + } else { + name[nSize] = 0; + } + + return s; +} + +Status WinEnvIO::GetAbsolutePath(const std::string& db_path, + std::string* output_path) { + // Check if we already have an absolute path + // that starts with non dot and has a semicolon in it + if ((!db_path.empty() && (db_path[0] == '/' || db_path[0] == '\\')) || + (db_path.size() > 2 && db_path[0] != '.' && + ((db_path[1] == ':' && db_path[2] == '\\') || + (db_path[1] == ':' && db_path[2] == '/')))) { + *output_path = db_path; + return Status::OK(); + } + + std::string result; + result.resize(_MAX_PATH); + + char* ret = _getcwd(&result[0], _MAX_PATH); + if (ret == nullptr) { + return Status::IOError("Failed to get current working directory", + strerror(errno)); + } + + result.resize(strlen(result.data())); + + result.swap(*output_path); + return Status::OK(); +} + +std::string WinEnvIO::TimeToString(uint64_t secondsSince1970) { + std::string result; + + const time_t seconds = secondsSince1970; + const int maxsize = 64; + + struct tm t; + errno_t ret = localtime_s(&t, &seconds); + + if (ret) { + result = std::to_string(seconds); + } else { + result.resize(maxsize); + char* p = &result[0]; + + int len = snprintf(p, maxsize, "%04d/%02d/%02d-%02d:%02d:%02d ", + t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, + t.tm_min, t.tm_sec); + assert(len > 0); + + result.resize(len); + } + + return result; +} + +EnvOptions WinEnvIO::OptimizeForLogWrite(const EnvOptions& env_options, + const DBOptions& db_options) const { + EnvOptions optimized = env_options; + optimized.use_mmap_writes = false; + optimized.bytes_per_sync = db_options.wal_bytes_per_sync; + optimized.use_os_buffer = + true; // This is because we flush only whole pages on unbuffered io and + // the last records are not guaranteed to be flushed. + // TODO(icanadi) it's faster if fallocate_with_keep_size is false, but it + // breaks TransactionLogIteratorStallAtLastRecord unit test. Fix the unit + // test and make this false + optimized.fallocate_with_keep_size = true; + return optimized; +} + +EnvOptions WinEnvIO::OptimizeForManifestWrite( + const EnvOptions& env_options) const { + EnvOptions optimized = env_options; + optimized.use_mmap_writes = false; + optimized.use_os_buffer = true; + optimized.fallocate_with_keep_size = true; + return optimized; +} + +// Returns true iff the named directory exists and is a directory. +bool WinEnvIO::DirExists(const std::string& dname) { + WIN32_FILE_ATTRIBUTE_DATA attrs; + if (GetFileAttributesExA(dname.c_str(), GetFileExInfoStandard, &attrs)) { + return 0 != (attrs.dwFileAttributes & FILE_ATTRIBUTE_DIRECTORY); + } + return false; +} + +//////////////////////////////////////////////////////////////////////// +// WinEnvThreads + +WinEnvThreads::WinEnvThreads(Env* hosted_env) : hosted_env_(hosted_env), thread_pools_(Env::Priority::TOTAL) { + + for (int pool_id = 0; pool_id < Env::Priority::TOTAL; ++pool_id) { + thread_pools_[pool_id].SetThreadPriority( + static_cast(pool_id)); + // This allows later initializing the thread-local-env of each thread. + thread_pools_[pool_id].SetHostEnv(hosted_env); + } +} + +WinEnvThreads::~WinEnvThreads() { + + WaitForJoin(); + + for (auto& thpool : thread_pools_) { + thpool.JoinAllThreads(); + } +} + +void WinEnvThreads::Schedule(void(*function)(void*), void* arg, Env::Priority pri, + void* tag, void(*unschedFunction)(void* arg)) { + assert(pri >= Env::Priority::LOW && pri <= Env::Priority::HIGH); + thread_pools_[pri].Schedule(function, arg, tag, unschedFunction); +} + +int WinEnvThreads::UnSchedule(void* arg, Env::Priority pri) { + return thread_pools_[pri].UnSchedule(arg); +} + +namespace { + + struct StartThreadState { + void(*user_function)(void*); + void* arg; + }; + + void* StartThreadWrapper(void* arg) { + std::unique_ptr state( + reinterpret_cast(arg)); + state->user_function(state->arg); + return nullptr; + } + +} + +void WinEnvThreads::StartThread(void(*function)(void* arg), void* arg) { + std::unique_ptr state(new StartThreadState); + state->user_function = function; + state->arg = arg; + try { + + std::thread th(&StartThreadWrapper, state.get()); + state.release(); + + std::lock_guard lg(mu_); + threads_to_join_.push_back(std::move(th)); + + } catch (const std::system_error& ex) { + WinthreadCall("start thread", ex.code()); + } +} + +void WinEnvThreads::WaitForJoin() { + for (auto& th : threads_to_join_) { + th.join(); + } + threads_to_join_.clear(); +} + +unsigned int WinEnvThreads::GetThreadPoolQueueLen(Env::Priority pri) const { + assert(pri >= Env::Priority::LOW && pri <= Env::Priority::HIGH); + return thread_pools_[pri].GetQueueLen(); +} + +uint64_t WinEnvThreads::gettid() { + uint64_t thread_id = GetCurrentThreadId(); + return thread_id; +} + +uint64_t WinEnvThreads::GetThreadID() const { return gettid(); } + +void WinEnvThreads::SleepForMicroseconds(int micros) { + std::this_thread::sleep_for(std::chrono::microseconds(micros)); +} + +void WinEnvThreads::SetBackgroundThreads(int num, Env::Priority pri) { + assert(pri >= Env::Priority::LOW && pri <= Env::Priority::HIGH); + thread_pools_[pri].SetBackgroundThreads(num); +} + +void WinEnvThreads::IncBackgroundThreadsIfNeeded(int num, Env::Priority pri) { + assert(pri >= Env::Priority::LOW && pri <= Env::Priority::HIGH); + thread_pools_[pri].IncBackgroundThreadsIfNeeded(num); +} + +///////////////////////////////////////////////////////////////////////// +// WinEnv + +WinEnv::WinEnv() : winenv_io_(this), winenv_threads_(this) { + // Protected member of the base class + thread_status_updater_ = CreateThreadStatusUpdater(); +} + + +WinEnv::~WinEnv() { + // All threads must be joined before the deletion of + // thread_status_updater_. + delete thread_status_updater_; +} + +Status WinEnv::GetThreadList( + std::vector* thread_list) { + assert(thread_status_updater_); + return thread_status_updater_->GetThreadList(thread_list); +} + +Status WinEnv::DeleteFile(const std::string& fname) { + return winenv_io_.DeleteFile(fname); +} + +Status WinEnv::GetCurrentTime(int64_t* unix_time) { + return winenv_io_.GetCurrentTime(unix_time); +} + +Status WinEnv::NewSequentialFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options) { + return winenv_io_.NewSequentialFile(fname, result, options); +} + +Status WinEnv::NewRandomAccessFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options) { + return winenv_io_.NewRandomAccessFile(fname, result, options); +} + +Status WinEnv::NewWritableFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options) { + return winenv_io_.NewWritableFile(fname, result, options); +} + +Status WinEnv::NewDirectory(const std::string& name, + std::unique_ptr* result) { + return winenv_io_.NewDirectory(name, result); +} + +Status WinEnv::FileExists(const std::string& fname) { + return winenv_io_.FileExists(fname); +} + +Status WinEnv::GetChildren(const std::string& dir, + std::vector* result) { + return winenv_io_.GetChildren(dir, result); +} + +Status WinEnv::CreateDir(const std::string& name) { + return winenv_io_.CreateDir(name); +} + +Status WinEnv::CreateDirIfMissing(const std::string& name) { + return winenv_io_.CreateDirIfMissing(name); +} + +Status WinEnv::DeleteDir(const std::string& name) { + return winenv_io_.DeleteDir(name); +} + +Status WinEnv::GetFileSize(const std::string& fname, + uint64_t* size) { + return winenv_io_.GetFileSize(fname, size); +} + +Status WinEnv::GetFileModificationTime(const std::string& fname, + uint64_t* file_mtime) { + return winenv_io_.GetFileModificationTime(fname, file_mtime); +} + +Status WinEnv::RenameFile(const std::string& src, + const std::string& target) { + return winenv_io_.RenameFile(src, target); +} + +Status WinEnv::LinkFile(const std::string& src, + const std::string& target) { + return winenv_io_.LinkFile(src, target); +} + +Status WinEnv::LockFile(const std::string& lockFname, + FileLock** lock) { + return winenv_io_.LockFile(lockFname, lock); +} + +Status WinEnv::UnlockFile(FileLock* lock) { + return winenv_io_.UnlockFile(lock); +} + +Status WinEnv::GetTestDirectory(std::string* result) { + return winenv_io_.GetTestDirectory(result); +} + +Status WinEnv::NewLogger(const std::string& fname, + std::shared_ptr* result) { + return winenv_io_.NewLogger(fname, result); +} + +uint64_t WinEnv::NowMicros() { + return winenv_io_.NowMicros(); +} + +uint64_t WinEnv::NowNanos() { + return winenv_io_.NowNanos(); +} + +Status WinEnv::GetHostName(char* name, uint64_t len) { + return winenv_io_.GetHostName(name, len); +} + +Status WinEnv::GetAbsolutePath(const std::string& db_path, + std::string* output_path) { + return winenv_io_.GetAbsolutePath(db_path, output_path); +} + +std::string WinEnv::TimeToString(uint64_t secondsSince1970) { + return winenv_io_.TimeToString(secondsSince1970); +} + +void WinEnv::Schedule(void(*function)(void*), void* arg, Env::Priority pri, + void* tag, + void(*unschedFunction)(void* arg)) { + return winenv_threads_.Schedule(function, arg, pri, tag, unschedFunction); +} + +int WinEnv::UnSchedule(void* arg, Env::Priority pri) { + return winenv_threads_.UnSchedule(arg, pri); +} + +void WinEnv::StartThread(void(*function)(void* arg), void* arg) { + return winenv_threads_.StartThread(function, arg); +} + +void WinEnv::WaitForJoin() { + return winenv_threads_.WaitForJoin(); +} + +unsigned int WinEnv::GetThreadPoolQueueLen(Env::Priority pri) const { + return winenv_threads_.GetThreadPoolQueueLen(pri); +} + +uint64_t WinEnv::GetThreadID() const { + return winenv_threads_.GetThreadID(); +} + +void WinEnv::SleepForMicroseconds(int micros) { + return winenv_threads_.SleepForMicroseconds(micros); +} + +// Allow increasing the number of worker threads. +void WinEnv::SetBackgroundThreads(int num, Env::Priority pri) { + return winenv_threads_.SetBackgroundThreads(num, pri); +} + +void WinEnv::IncBackgroundThreadsIfNeeded(int num, Env::Priority pri) { + return winenv_threads_.IncBackgroundThreadsIfNeeded(num, pri); +} + +EnvOptions WinEnv::OptimizeForLogWrite(const EnvOptions& env_options, + const DBOptions& db_options) const { + return winenv_io_.OptimizeForLogWrite(env_options, db_options); +} + +EnvOptions WinEnv::OptimizeForManifestWrite( + const EnvOptions& env_options) const { + return winenv_io_.OptimizeForManifestWrite(env_options); +} + +} // namespace port + +std::string Env::GenerateUniqueId() { + std::string result; + + UUID uuid; + UuidCreateSequential(&uuid); + + RPC_CSTR rpc_str; + auto status = UuidToStringA(&uuid, &rpc_str); + assert(status == RPC_S_OK); + + result = reinterpret_cast(rpc_str); + + status = RpcStringFreeA(&rpc_str); + assert(status == RPC_S_OK); + + return result; +} + +} // namespace rocksdb diff --git a/external/rocksdb/port/win/env_win.h b/external/rocksdb/port/win/env_win.h new file mode 100755 index 0000000000..9b1e012c90 --- /dev/null +++ b/external/rocksdb/port/win/env_win.h @@ -0,0 +1,276 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// An Env is an interface used by the rocksdb implementation to access +// operating system functionality like the filesystem etc. Callers +// may wish to provide a custom Env object when opening a database to +// get fine gain control; e.g., to rate limit file system operations. +// +// All Env implementations are safe for concurrent access from +// multiple threads without any external synchronization. + +#pragma once + +#include +#include "util/threadpool.h" + +#include +#include + +namespace rocksdb { +namespace port { + +// Currently not designed for inheritance but rather a replacement +class WinEnvThreads { +public: + + explicit WinEnvThreads(Env* hosted_env); + + ~WinEnvThreads(); + + WinEnvThreads(const WinEnvThreads&) = delete; + WinEnvThreads& operator=(const WinEnvThreads&) = delete; + + void Schedule(void(*function)(void*), void* arg, Env::Priority pri, + void* tag, + void(*unschedFunction)(void* arg)); + + int UnSchedule(void* arg, Env::Priority pri); + + void StartThread(void(*function)(void* arg), void* arg); + + void WaitForJoin(); + + unsigned int GetThreadPoolQueueLen(Env::Priority pri) const; + + static uint64_t gettid(); + + uint64_t GetThreadID() const; + + void SleepForMicroseconds(int micros); + + // Allow increasing the number of worker threads. + void SetBackgroundThreads(int num, Env::Priority pri); + + void IncBackgroundThreadsIfNeeded(int num, Env::Priority pri); + +private: + + Env* hosted_env_; + mutable std::mutex mu_; + std::vector thread_pools_; + std::vector threads_to_join_; + +}; + +// Designed for inheritance so can be re-used +// but certain parts replaced +class WinEnvIO { +public: + explicit WinEnvIO(Env* hosted_env); + + virtual ~WinEnvIO(); + + virtual Status DeleteFile(const std::string& fname); + + virtual Status GetCurrentTime(int64_t* unix_time); + + virtual Status NewSequentialFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options); + + virtual Status NewRandomAccessFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options); + + virtual Status NewWritableFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options); + + virtual Status NewDirectory(const std::string& name, + std::unique_ptr* result); + + virtual Status FileExists(const std::string& fname); + + virtual Status GetChildren(const std::string& dir, + std::vector* result); + + virtual Status CreateDir(const std::string& name); + + virtual Status CreateDirIfMissing(const std::string& name); + + virtual Status DeleteDir(const std::string& name); + + virtual Status GetFileSize(const std::string& fname, + uint64_t* size); + + static uint64_t FileTimeToUnixTime(const FILETIME& ftTime); + + virtual Status GetFileModificationTime(const std::string& fname, + uint64_t* file_mtime); + + virtual Status RenameFile(const std::string& src, + const std::string& target); + + virtual Status LinkFile(const std::string& src, + const std::string& target); + + virtual Status LockFile(const std::string& lockFname, + FileLock** lock); + + virtual Status UnlockFile(FileLock* lock); + + virtual Status GetTestDirectory(std::string* result); + + virtual Status NewLogger(const std::string& fname, + std::shared_ptr* result); + + virtual uint64_t NowMicros(); + + virtual uint64_t NowNanos(); + + virtual Status GetHostName(char* name, uint64_t len); + + virtual Status GetAbsolutePath(const std::string& db_path, + std::string* output_path); + + virtual std::string TimeToString(uint64_t secondsSince1970); + + virtual EnvOptions OptimizeForLogWrite(const EnvOptions& env_options, + const DBOptions& db_options) const; + + virtual EnvOptions OptimizeForManifestWrite( + const EnvOptions& env_options) const; + + size_t GetPageSize() const { return page_size_; } + + size_t GetAllocationGranularity() const { return allocation_granularity_; } + + uint64_t GetPerfCounterFrequency() const { return perf_counter_frequency_; } + +private: + // Returns true iff the named directory exists and is a directory. + virtual bool DirExists(const std::string& dname); + + typedef VOID(WINAPI * FnGetSystemTimePreciseAsFileTime)(LPFILETIME); + + Env* hosted_env_; + size_t page_size_; + size_t allocation_granularity_; + uint64_t perf_counter_frequency_; + FnGetSystemTimePreciseAsFileTime GetSystemTimePreciseAsFileTime_; +}; + +class WinEnv : public Env { +public: + WinEnv(); + + ~WinEnv(); + + Status DeleteFile(const std::string& fname) override; + + Status GetCurrentTime(int64_t* unix_time) override; + + Status NewSequentialFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options) override; + + Status NewRandomAccessFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options) override; + + Status NewWritableFile(const std::string& fname, + std::unique_ptr* result, + const EnvOptions& options) override; + + Status NewDirectory(const std::string& name, + std::unique_ptr* result) override; + + Status FileExists(const std::string& fname) override; + + Status GetChildren(const std::string& dir, + std::vector* result) override; + + Status CreateDir(const std::string& name) override; + + Status CreateDirIfMissing(const std::string& name) override; + + Status DeleteDir(const std::string& name) override; + + Status GetFileSize(const std::string& fname, + uint64_t* size) override; + + Status GetFileModificationTime(const std::string& fname, + uint64_t* file_mtime) override; + + Status RenameFile(const std::string& src, + const std::string& target) override; + + Status LinkFile(const std::string& src, + const std::string& target) override; + + Status LockFile(const std::string& lockFname, + FileLock** lock) override; + + Status UnlockFile(FileLock* lock) override; + + Status GetTestDirectory(std::string* result) override; + + Status NewLogger(const std::string& fname, + std::shared_ptr* result) override; + + uint64_t NowMicros() override; + + uint64_t NowNanos() override; + + Status GetHostName(char* name, uint64_t len) override; + + Status GetAbsolutePath(const std::string& db_path, + std::string* output_path) override; + + std::string TimeToString(uint64_t secondsSince1970) override; + + Status GetThreadList( + std::vector* thread_list) override; + + void Schedule(void(*function)(void*), void* arg, Env::Priority pri, + void* tag, + void(*unschedFunction)(void* arg)) override; + + int UnSchedule(void* arg, Env::Priority pri) override; + + void StartThread(void(*function)(void* arg), void* arg) override; + + void WaitForJoin(); + + unsigned int GetThreadPoolQueueLen(Env::Priority pri) const override; + + uint64_t GetThreadID() const override; + + void SleepForMicroseconds(int micros) override; + + // Allow increasing the number of worker threads. + void SetBackgroundThreads(int num, Env::Priority pri) override; + + void IncBackgroundThreadsIfNeeded(int num, Env::Priority pri) override; + + EnvOptions OptimizeForLogWrite(const EnvOptions& env_options, + const DBOptions& db_options) const override; + + EnvOptions OptimizeForManifestWrite( + const EnvOptions& env_options) const override; + +private: + + WinEnvIO winenv_io_; + WinEnvThreads winenv_threads_; + +}; + +} +} \ No newline at end of file diff --git a/external/rocksdb/port/win/io_win.cc b/external/rocksdb/port/win/io_win.cc new file mode 100755 index 0000000000..c9ef1f29e1 --- /dev/null +++ b/external/rocksdb/port/win/io_win.cc @@ -0,0 +1,963 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "port/win/io_win.h" + +#include "util/sync_point.h" +#include "util/coding.h" +#include "util/iostats_context_imp.h" +#include "util/sync_point.h" +#include "util/aligned_buffer.h" + + +namespace rocksdb { +namespace port { + +std::string GetWindowsErrSz(DWORD err) { + LPSTR lpMsgBuf; + FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS, + NULL, err, + 0, // Default language + reinterpret_cast(&lpMsgBuf), 0, NULL); + + std::string Err = lpMsgBuf; + LocalFree(lpMsgBuf); + return Err; +} + +// We preserve the original name of this interface to denote the original idea +// behind it. +// All reads happen by a specified offset and pwrite interface does not change +// the position of the file pointer. Judging from the man page and errno it does +// execute +// lseek atomically to return the position of the file back where it was. +// WriteFile() does not +// have this capability. Therefore, for both pread and pwrite the pointer is +// advanced to the next position +// which is fine for writes because they are (should be) sequential. +// Because all the reads/writes happen by the specified offset, the caller in +// theory should not +// rely on the current file offset. +SSIZE_T pwrite(HANDLE hFile, const char* src, size_t numBytes, + uint64_t offset) { + assert(numBytes <= std::numeric_limits::max()); + OVERLAPPED overlapped = { 0 }; + ULARGE_INTEGER offsetUnion; + offsetUnion.QuadPart = offset; + + overlapped.Offset = offsetUnion.LowPart; + overlapped.OffsetHigh = offsetUnion.HighPart; + + SSIZE_T result = 0; + + unsigned long bytesWritten = 0; + + if (FALSE == WriteFile(hFile, src, static_cast(numBytes), &bytesWritten, + &overlapped)) { + result = -1; + } else { + result = bytesWritten; + } + + return result; +} + +// See comments for pwrite above +SSIZE_T pread(HANDLE hFile, char* src, size_t numBytes, uint64_t offset) { + assert(numBytes <= std::numeric_limits::max()); + OVERLAPPED overlapped = { 0 }; + ULARGE_INTEGER offsetUnion; + offsetUnion.QuadPart = offset; + + overlapped.Offset = offsetUnion.LowPart; + overlapped.OffsetHigh = offsetUnion.HighPart; + + SSIZE_T result = 0; + + unsigned long bytesRead = 0; + + if (FALSE == ReadFile(hFile, src, static_cast(numBytes), &bytesRead, + &overlapped)) { + return -1; + } else { + result = bytesRead; + } + + return result; +} + +// SetFileInformationByHandle() is capable of fast pre-allocates. +// However, this does not change the file end position unless the file is +// truncated and the pre-allocated space is not considered filled with zeros. +Status fallocate(const std::string& filename, HANDLE hFile, + uint64_t to_size) { + Status status; + + FILE_ALLOCATION_INFO alloc_info; + alloc_info.AllocationSize.QuadPart = to_size; + + if (!SetFileInformationByHandle(hFile, FileAllocationInfo, &alloc_info, + sizeof(FILE_ALLOCATION_INFO))) { + auto lastError = GetLastError(); + status = IOErrorFromWindowsError( + "Failed to pre-allocate space: " + filename, lastError); + } + + return status; +} + +Status ftruncate(const std::string& filename, HANDLE hFile, + uint64_t toSize) { + Status status; + + FILE_END_OF_FILE_INFO end_of_file; + end_of_file.EndOfFile.QuadPart = toSize; + + if (!SetFileInformationByHandle(hFile, FileEndOfFileInfo, &end_of_file, + sizeof(FILE_END_OF_FILE_INFO))) { + auto lastError = GetLastError(); + status = IOErrorFromWindowsError("Failed to Set end of file: " + filename, + lastError); + } + + return status; +} + +size_t GetUniqueIdFromFile(HANDLE hFile, char* id, size_t max_size) { + + if (max_size < kMaxVarint64Length * 3) { + return 0; + } + + BY_HANDLE_FILE_INFORMATION FileInfo; + + BOOL result = GetFileInformationByHandle(hFile, &FileInfo); + + TEST_SYNC_POINT_CALLBACK("GetUniqueIdFromFile:FS_IOC_GETVERSION", &result); + + if (!result) { + return 0; + } + + char* rid = id; + rid = EncodeVarint64(rid, uint64_t(FileInfo.dwVolumeSerialNumber)); + rid = EncodeVarint64(rid, uint64_t(FileInfo.nFileIndexHigh)); + rid = EncodeVarint64(rid, uint64_t(FileInfo.nFileIndexLow)); + + assert(rid >= id); + return static_cast(rid - id); +} + +WinMmapReadableFile::WinMmapReadableFile(const std::string& fileName, HANDLE hFile, HANDLE hMap, + const void* mapped_region, size_t length) + : fileName_(fileName), + hFile_(hFile), + hMap_(hMap), + mapped_region_(mapped_region), + length_(length) {} + +WinMmapReadableFile::~WinMmapReadableFile() { + BOOL ret = ::UnmapViewOfFile(mapped_region_); + assert(ret); + + ret = ::CloseHandle(hMap_); + assert(ret); + + ret = ::CloseHandle(hFile_); + assert(ret); +} + +Status WinMmapReadableFile::Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const { + Status s; + + if (offset > length_) { + *result = Slice(); + return IOError(fileName_, EINVAL); + } else if (offset + n > length_) { + n = length_ - offset; + } + *result = + Slice(reinterpret_cast(mapped_region_)+offset, n); + return s; +} + +Status WinMmapReadableFile::InvalidateCache(size_t offset, size_t length) { + return Status::OK(); +} + +size_t WinMmapReadableFile::GetUniqueId(char* id, size_t max_size) const { + return GetUniqueIdFromFile(hFile_, id, max_size); +} + +// Can only truncate or reserve to a sector size aligned if +// used on files that are opened with Unbuffered I/O +Status WinMmapFile::TruncateFile(uint64_t toSize) { + return ftruncate(filename_, hFile_, toSize); +} + +Status WinMmapFile::UnmapCurrentRegion() { + Status status; + + if (mapped_begin_ != nullptr) { + if (!::UnmapViewOfFile(mapped_begin_)) { + status = IOErrorFromWindowsError( + "Failed to unmap file view: " + filename_, GetLastError()); + } + + // Move on to the next portion of the file + file_offset_ += view_size_; + + // UnmapView automatically sends data to disk but not the metadata + // which is good and provides some equivalent of fdatasync() on Linux + // therefore, we donot need separate flag for metadata + mapped_begin_ = nullptr; + mapped_end_ = nullptr; + dst_ = nullptr; + + last_sync_ = nullptr; + pending_sync_ = false; + } + + return status; +} + +Status WinMmapFile::MapNewRegion() { + + Status status; + + assert(mapped_begin_ == nullptr); + + size_t minDiskSize = file_offset_ + view_size_; + + if (minDiskSize > reserved_size_) { + status = Allocate(file_offset_, view_size_); + if (!status.ok()) { + return status; + } + } + + // Need to remap + if (hMap_ == NULL || reserved_size_ > mapping_size_) { + + if (hMap_ != NULL) { + // Unmap the previous one + BOOL ret = ::CloseHandle(hMap_); + assert(ret); + hMap_ = NULL; + } + + ULARGE_INTEGER mappingSize; + mappingSize.QuadPart = reserved_size_; + + hMap_ = CreateFileMappingA( + hFile_, + NULL, // Security attributes + PAGE_READWRITE, // There is not a write only mode for mapping + mappingSize.HighPart, // Enable mapping the whole file but the actual + // amount mapped is determined by MapViewOfFile + mappingSize.LowPart, + NULL); // Mapping name + + if (NULL == hMap_) { + return IOErrorFromWindowsError( + "WindowsMmapFile failed to create file mapping for: " + filename_, + GetLastError()); + } + + mapping_size_ = reserved_size_; + } + + ULARGE_INTEGER offset; + offset.QuadPart = file_offset_; + + // View must begin at the granularity aligned offset + mapped_begin_ = reinterpret_cast( + MapViewOfFileEx(hMap_, FILE_MAP_WRITE, offset.HighPart, offset.LowPart, + view_size_, NULL)); + + if (!mapped_begin_) { + status = IOErrorFromWindowsError( + "WindowsMmapFile failed to map file view: " + filename_, + GetLastError()); + } else { + mapped_end_ = mapped_begin_ + view_size_; + dst_ = mapped_begin_; + last_sync_ = mapped_begin_; + pending_sync_ = false; + } + return status; +} + +Status WinMmapFile::PreallocateInternal(uint64_t spaceToReserve) { + return fallocate(filename_, hFile_, spaceToReserve); +} + +WinMmapFile::WinMmapFile(const std::string& fname, HANDLE hFile, size_t page_size, + size_t allocation_granularity, const EnvOptions& options) + : filename_(fname), + hFile_(hFile), + hMap_(NULL), + page_size_(page_size), + allocation_granularity_(allocation_granularity), + reserved_size_(0), + mapping_size_(0), + view_size_(0), + mapped_begin_(nullptr), + mapped_end_(nullptr), + dst_(nullptr), + last_sync_(nullptr), + file_offset_(0), + pending_sync_(false) { + // Allocation granularity must be obtained from GetSystemInfo() and must be + // a power of two. + assert(allocation_granularity > 0); + assert((allocation_granularity & (allocation_granularity - 1)) == 0); + + assert(page_size > 0); + assert((page_size & (page_size - 1)) == 0); + + // Only for memory mapped writes + assert(options.use_mmap_writes); + + // View size must be both the multiple of allocation_granularity AND the + // page size and the granularity is usually a multiple of a page size. + const size_t viewSize = 32 * 1024; // 32Kb similar to the Windows File Cache in buffered mode + view_size_ = Roundup(viewSize, allocation_granularity_); +} + +WinMmapFile::~WinMmapFile() { + if (hFile_) { + this->Close(); + } +} + +Status WinMmapFile::Append(const Slice& data) { + const char* src = data.data(); + size_t left = data.size(); + + while (left > 0) { + assert(mapped_begin_ <= dst_); + size_t avail = mapped_end_ - dst_; + + if (avail == 0) { + Status s = UnmapCurrentRegion(); + if (s.ok()) { + s = MapNewRegion(); + } + + if (!s.ok()) { + return s; + } + } else { + size_t n = std::min(left, avail); + memcpy(dst_, src, n); + dst_ += n; + src += n; + left -= n; + pending_sync_ = true; + } + } + + // Now make sure that the last partial page is padded with zeros if needed + size_t bytesToPad = Roundup(size_t(dst_), page_size_) - size_t(dst_); + if (bytesToPad > 0) { + memset(dst_, 0, bytesToPad); + } + + return Status::OK(); +} + +// Means Close() will properly take care of truncate +// and it does not need any additional information +Status WinMmapFile::Truncate(uint64_t size) { + return Status::OK(); +} + +Status WinMmapFile::Close() { + Status s; + + assert(NULL != hFile_); + + // We truncate to the precise size so no + // uninitialized data at the end. SetEndOfFile + // which we use does not write zeros and it is good. + uint64_t targetSize = GetFileSize(); + + if (mapped_begin_ != nullptr) { + // Sync before unmapping to make sure everything + // is on disk and there is not a lazy writing + // so we are deterministic with the tests + Sync(); + s = UnmapCurrentRegion(); + } + + if (NULL != hMap_) { + BOOL ret = ::CloseHandle(hMap_); + if (!ret && s.ok()) { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError( + "Failed to Close mapping for file: " + filename_, lastError); + } + + hMap_ = NULL; + } + + if (hFile_ != NULL) { + + TruncateFile(targetSize); + + BOOL ret = ::CloseHandle(hFile_); + hFile_ = NULL; + + if (!ret && s.ok()) { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError( + "Failed to close file map handle: " + filename_, lastError); + } + } + + return s; +} + +Status WinMmapFile::Flush() { return Status::OK(); } + +// Flush only data +Status WinMmapFile::Sync() { + Status s; + + // Some writes occurred since last sync + if (dst_ > last_sync_) { + assert(mapped_begin_); + assert(dst_); + assert(dst_ > mapped_begin_); + assert(dst_ < mapped_end_); + + size_t page_begin = + TruncateToPageBoundary(page_size_, last_sync_ - mapped_begin_); + size_t page_end = + TruncateToPageBoundary(page_size_, dst_ - mapped_begin_ - 1); + + // Flush only the amount of that is a multiple of pages + if (!::FlushViewOfFile(mapped_begin_ + page_begin, + (page_end - page_begin) + page_size_)) { + s = IOErrorFromWindowsError("Failed to FlushViewOfFile: " + filename_, + GetLastError()); + } else { + last_sync_ = dst_; + } + } + + return s; +} + +/** +* Flush data as well as metadata to stable storage. +*/ +Status WinMmapFile::Fsync() { + Status s = Sync(); + + // Flush metadata + if (s.ok() && pending_sync_) { + if (!::FlushFileBuffers(hFile_)) { + s = IOErrorFromWindowsError("Failed to FlushFileBuffers: " + filename_, + GetLastError()); + } + pending_sync_ = false; + } + + return s; +} + +/** +* Get the size of valid data in the file. This will not match the +* size that is returned from the filesystem because we use mmap +* to extend file by map_size every time. +*/ +uint64_t WinMmapFile::GetFileSize() { + size_t used = dst_ - mapped_begin_; + return file_offset_ + used; +} + +Status WinMmapFile::InvalidateCache(size_t offset, size_t length) { + return Status::OK(); +} + +Status WinMmapFile::Allocate(uint64_t offset, uint64_t len) { + Status status; + TEST_KILL_RANDOM("WinMmapFile::Allocate", rocksdb_kill_odds); + + // Make sure that we reserve an aligned amount of space + // since the reservation block size is driven outside so we want + // to check if we are ok with reservation here + size_t spaceToReserve = Roundup(offset + len, view_size_); + // Nothing to do + if (spaceToReserve <= reserved_size_) { + return status; + } + + IOSTATS_TIMER_GUARD(allocate_nanos); + status = PreallocateInternal(spaceToReserve); + if (status.ok()) { + reserved_size_ = spaceToReserve; + } + return status; +} + +size_t WinMmapFile::GetUniqueId(char* id, size_t max_size) const { + return GetUniqueIdFromFile(hFile_, id, max_size); +} + +WinSequentialFile::WinSequentialFile(const std::string& fname, HANDLE f, + const EnvOptions& options) + : filename_(fname), + file_(f), + use_os_buffer_(options.use_os_buffer) +{} + +WinSequentialFile::~WinSequentialFile() { + assert(file_ != INVALID_HANDLE_VALUE); + CloseHandle(file_); +} + +Status WinSequentialFile::Read(size_t n, Slice* result, char* scratch) { + Status s; + size_t r = 0; + + // Windows ReadFile API accepts a DWORD. + // While it is possible to read in a loop if n is > UINT_MAX + // it is a highly unlikely case. + if (n > UINT_MAX) { + return IOErrorFromWindowsError(filename_, ERROR_INVALID_PARAMETER); + } + + DWORD bytesToRead = static_cast(n); //cast is safe due to the check above + DWORD bytesRead = 0; + BOOL ret = ReadFile(file_, scratch, bytesToRead, &bytesRead, NULL); + if (ret == TRUE) { + r = bytesRead; + } else { + return IOErrorFromWindowsError(filename_, GetLastError()); + } + + *result = Slice(scratch, r); + + return s; +} + +Status WinSequentialFile::Skip(uint64_t n) { + // Can't handle more than signed max as SetFilePointerEx accepts a signed 64-bit + // integer. As such it is a highly unlikley case to have n so large. + if (n > _I64_MAX) { + return IOErrorFromWindowsError(filename_, ERROR_INVALID_PARAMETER); + } + + LARGE_INTEGER li; + li.QuadPart = static_cast(n); //cast is safe due to the check above + BOOL ret = SetFilePointerEx(file_, li, NULL, FILE_CURRENT); + if (ret == FALSE) { + return IOErrorFromWindowsError(filename_, GetLastError()); + } + return Status::OK(); +} + +Status WinSequentialFile::InvalidateCache(size_t offset, size_t length) { + return Status::OK(); +} + +SSIZE_T WinRandomAccessFile::ReadIntoBuffer(uint64_t user_offset, uint64_t first_page_start, + size_t bytes_to_read, size_t& left, + AlignedBuffer& buffer, char* dest) const { + assert(buffer.CurrentSize() == 0); + assert(buffer.Capacity() >= bytes_to_read); + + SSIZE_T read = + PositionedReadInternal(buffer.Destination(), bytes_to_read, first_page_start); + + if (read > 0) { + buffer.Size(read); + + // Let's figure out how much we read from the users standpoint + if ((first_page_start + buffer.CurrentSize()) > user_offset) { + assert(first_page_start <= user_offset); + size_t buffer_offset = user_offset - first_page_start; + read = buffer.Read(dest, buffer_offset, left); + } else { + read = 0; + } + left -= read; + } + return read; +} + +SSIZE_T WinRandomAccessFile::ReadIntoOneShotBuffer(uint64_t user_offset, uint64_t first_page_start, + size_t bytes_to_read, size_t& left, + char* dest) const { + AlignedBuffer bigBuffer; + bigBuffer.Alignment(buffer_.Alignment()); + bigBuffer.AllocateNewBuffer(bytes_to_read); + + return ReadIntoBuffer(user_offset, first_page_start, bytes_to_read, left, + bigBuffer, dest); +} + +SSIZE_T WinRandomAccessFile::ReadIntoInstanceBuffer(uint64_t user_offset, + uint64_t first_page_start, + size_t bytes_to_read, size_t& left, + char* dest) const { + SSIZE_T read = ReadIntoBuffer(user_offset, first_page_start, bytes_to_read, + left, buffer_, dest); + + if (read > 0) { + buffered_start_ = first_page_start; + } + + return read; +} + +void WinRandomAccessFile::CalculateReadParameters(uint64_t offset, size_t bytes_requested, + size_t& actual_bytes_toread, + uint64_t& first_page_start) const { + + const size_t alignment = buffer_.Alignment(); + + first_page_start = TruncateToPageBoundary(alignment, offset); + const uint64_t last_page_start = + TruncateToPageBoundary(alignment, offset + bytes_requested - 1); + actual_bytes_toread = (last_page_start - first_page_start) + alignment; +} + +SSIZE_T WinRandomAccessFile::PositionedReadInternal(char* src, size_t numBytes, + uint64_t offset) const { + return pread(hFile_, src, numBytes, offset); +} + +WinRandomAccessFile::WinRandomAccessFile(const std::string& fname, HANDLE hFile, size_t alignment, + const EnvOptions& options) + : filename_(fname), + hFile_(hFile), + use_os_buffer_(options.use_os_buffer), + read_ahead_(false), + compaction_readahead_size_(options.compaction_readahead_size), + random_access_max_buffer_size_(options.random_access_max_buffer_size), + buffer_(), + buffered_start_(0) { + assert(!options.use_mmap_reads); + + // Unbuffered access, use internal buffer for reads + if (!use_os_buffer_) { + // Do not allocate the buffer either until the first request or + // until there is a call to allocate a read-ahead buffer + buffer_.Alignment(alignment); + } +} + +WinRandomAccessFile::~WinRandomAccessFile() { + if (hFile_ != NULL && hFile_ != INVALID_HANDLE_VALUE) { + ::CloseHandle(hFile_); + } +} + +void WinRandomAccessFile::EnableReadAhead() { this->Hint(SEQUENTIAL); } + +Status WinRandomAccessFile::Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const { + + Status s; + SSIZE_T r = -1; + size_t left = n; + char* dest = scratch; + + if (n == 0) { + *result = Slice(scratch, 0); + return s; + } + + // When in unbuffered mode we need to do the following changes: + // - use our own aligned buffer + // - always read at the offset of that is a multiple of alignment + if (!use_os_buffer_) { + + uint64_t first_page_start = 0; + size_t actual_bytes_toread = 0; + size_t bytes_requested = left; + + if (!read_ahead_ && random_access_max_buffer_size_ == 0) { + CalculateReadParameters(offset, bytes_requested, actual_bytes_toread, + first_page_start); + + assert(actual_bytes_toread > 0); + + r = ReadIntoOneShotBuffer(offset, first_page_start, + actual_bytes_toread, left, dest); + } else { + + std::unique_lock lock(buffer_mut_); + + // Let's see if at least some of the requested data is already + // in the buffer + if (offset >= buffered_start_ && + offset < (buffered_start_ + buffer_.CurrentSize())) { + size_t buffer_offset = offset - buffered_start_; + r = buffer_.Read(dest, buffer_offset, left); + assert(r >= 0); + + left -= size_t(r); + offset += r; + dest += r; + } + + // Still some left or none was buffered + if (left > 0) { + // Figure out the start/end offset for reading and amount to read + bytes_requested = left; + + if (read_ahead_ && bytes_requested < compaction_readahead_size_) { + bytes_requested = compaction_readahead_size_; + } + + CalculateReadParameters(offset, bytes_requested, actual_bytes_toread, + first_page_start); + + assert(actual_bytes_toread > 0); + + if (buffer_.Capacity() < actual_bytes_toread) { + // If we are in read-ahead mode or the requested size + // exceeds max buffer size then use one-shot + // big buffer otherwise reallocate main buffer + if (read_ahead_ || + (actual_bytes_toread > random_access_max_buffer_size_)) { + // Unlock the mutex since we are not using instance buffer + lock.unlock(); + r = ReadIntoOneShotBuffer(offset, first_page_start, + actual_bytes_toread, left, dest); + } else { + buffer_.AllocateNewBuffer(actual_bytes_toread); + r = ReadIntoInstanceBuffer(offset, first_page_start, + actual_bytes_toread, left, dest); + } + } else { + buffer_.Clear(); + r = ReadIntoInstanceBuffer(offset, first_page_start, + actual_bytes_toread, left, dest); + } + } + } + } else { + r = PositionedReadInternal(scratch, left, offset); + if (r > 0) { + left -= r; + } + } + + *result = Slice(scratch, (r < 0) ? 0 : n - left); + + if (r < 0) { + s = IOErrorFromLastWindowsError(filename_); + } + return s; +} + +bool WinRandomAccessFile::ShouldForwardRawRequest() const { + return true; +} + +void WinRandomAccessFile::Hint(AccessPattern pattern) { + if (pattern == SEQUENTIAL && !use_os_buffer_ && + compaction_readahead_size_ > 0) { + std::lock_guard lg(buffer_mut_); + if (!read_ahead_) { + read_ahead_ = true; + // This would allocate read-ahead size + 2 alignments + // - one for memory alignment which added implicitly by AlignedBuffer + // - We add one more alignment because we will read one alignment more + // from disk + buffer_.AllocateNewBuffer(compaction_readahead_size_ + + buffer_.Alignment()); + } + } +} + +Status WinRandomAccessFile::InvalidateCache(size_t offset, size_t length) { + return Status::OK(); +} + +size_t WinRandomAccessFile::GetUniqueId(char* id, size_t max_size) const { + return GetUniqueIdFromFile(hFile_, id, max_size); +} + +Status WinWritableFile::PreallocateInternal(uint64_t spaceToReserve) { + return fallocate(filename_, hFile_, spaceToReserve); +} + +WinWritableFile::WinWritableFile(const std::string& fname, HANDLE hFile, size_t alignment, + size_t capacity, const EnvOptions& options) + : filename_(fname), + hFile_(hFile), + use_os_buffer_(options.use_os_buffer), + alignment_(alignment), + filesize_(0), + reservedsize_(0) { + assert(!options.use_mmap_writes); +} + +WinWritableFile::~WinWritableFile() { + if (NULL != hFile_ && INVALID_HANDLE_VALUE != hFile_) { + WinWritableFile::Close(); + } +} + + // Indicates if the class makes use of unbuffered I/O +bool WinWritableFile::UseOSBuffer() const { + return use_os_buffer_; +} + +size_t WinWritableFile::GetRequiredBufferAlignment() const { + return alignment_; +} + +Status WinWritableFile::Append(const Slice& data) { + + // Used for buffered access ONLY + assert(use_os_buffer_); + assert(data.size() < std::numeric_limits::max()); + + Status s; + + DWORD bytesWritten = 0; + if (!WriteFile(hFile_, data.data(), + static_cast(data.size()), &bytesWritten, NULL)) { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError( + "Failed to WriteFile: " + filename_, + lastError); + } else { + assert(size_t(bytesWritten) == data.size()); + filesize_ += data.size(); + } + + return s; +} + +Status WinWritableFile::PositionedAppend(const Slice& data, uint64_t offset) { + Status s; + + SSIZE_T ret = pwrite(hFile_, data.data(), data.size(), offset); + + // Error break + if (ret < 0) { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError( + "Failed to pwrite for: " + filename_, lastError); + } else { + // With positional write it is not clear at all + // if this actually extends the filesize + assert(size_t(ret) == data.size()); + filesize_ += data.size(); + } + return s; +} + + // Need to implement this so the file is truncated correctly + // when buffered and unbuffered mode +Status WinWritableFile::Truncate(uint64_t size) { + Status s = ftruncate(filename_, hFile_, size); + if (s.ok()) { + filesize_ = size; + } + return s; +} + +Status WinWritableFile::Close() { + + Status s; + + assert(INVALID_HANDLE_VALUE != hFile_); + + if (fsync(hFile_) < 0) { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError("fsync failed at Close() for: " + filename_, + lastError); + } + + if (FALSE == ::CloseHandle(hFile_)) { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError("CloseHandle failed for: " + filename_, + lastError); + } + + hFile_ = INVALID_HANDLE_VALUE; + return s; +} + + // write out the cached data to the OS cache + // This is now taken care of the WritableFileWriter +Status WinWritableFile::Flush() { + return Status::OK(); +} + +Status WinWritableFile::Sync() { + Status s; + // Calls flush buffers + if (fsync(hFile_) < 0) { + auto lastError = GetLastError(); + s = IOErrorFromWindowsError("fsync failed at Sync() for: " + filename_, + lastError); + } + return s; +} + +Status WinWritableFile::Fsync() { return Sync(); } + +uint64_t WinWritableFile::GetFileSize() { + // Double accounting now here with WritableFileWriter + // and this size will be wrong when unbuffered access is used + // but tests implement their own writable files and do not use WritableFileWrapper + // so we need to squeeze a square peg through + // a round hole here. + return filesize_; +} + +Status WinWritableFile::Allocate(uint64_t offset, uint64_t len) { + Status status; + TEST_KILL_RANDOM("WinWritableFile::Allocate", rocksdb_kill_odds); + + // Make sure that we reserve an aligned amount of space + // since the reservation block size is driven outside so we want + // to check if we are ok with reservation here + size_t spaceToReserve = Roundup(offset + len, alignment_); + // Nothing to do + if (spaceToReserve <= reservedsize_) { + return status; + } + + IOSTATS_TIMER_GUARD(allocate_nanos); + status = PreallocateInternal(spaceToReserve); + if (status.ok()) { + reservedsize_ = spaceToReserve; + } + return status; +} + +size_t WinWritableFile::GetUniqueId(char* id, size_t max_size) const { + return GetUniqueIdFromFile(hFile_, id, max_size); +} + +Status WinDirectory::Fsync() { return Status::OK(); } + +WinFileLock::~WinFileLock() { + BOOL ret = ::CloseHandle(hFile_); + assert(ret); +} + + +} +} + diff --git a/external/rocksdb/port/win/io_win.h b/external/rocksdb/port/win/io_win.h new file mode 100755 index 0000000000..8c3a4ba7a6 --- /dev/null +++ b/external/rocksdb/port/win/io_win.h @@ -0,0 +1,359 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#pragma once + +#include +#include + +#include "util/aligned_buffer.h" + +#include +#include + +#include + +#include + +namespace rocksdb { +namespace port { + +std::string GetWindowsErrSz(DWORD err); + +inline Status IOErrorFromWindowsError(const std::string& context, DWORD err) { + return Status::IOError(context, GetWindowsErrSz(err)); +} + +inline Status IOErrorFromLastWindowsError(const std::string& context) { + return IOErrorFromWindowsError(context, GetLastError()); +} + +inline Status IOError(const std::string& context, int err_number) { + return Status::IOError(context, strerror(err_number)); +} + +// Note the below two do not set errno because they are used only here in this +// file +// on a Windows handle and, therefore, not necessary. Translating GetLastError() +// to errno +// is a sad business +inline int fsync(HANDLE hFile) { + if (!FlushFileBuffers(hFile)) { + return -1; + } + + return 0; +} + +SSIZE_T pwrite(HANDLE hFile, const char* src, size_t numBytes, + uint64_t offset); + +SSIZE_T pread(HANDLE hFile, char* src, size_t numBytes, uint64_t offset); + +Status fallocate(const std::string& filename, HANDLE hFile, + uint64_t to_size); + +Status ftruncate(const std::string& filename, HANDLE hFile, + uint64_t toSize); + + +size_t GetUniqueIdFromFile(HANDLE hFile, char* id, size_t max_size); + +// mmap() based random-access +class WinMmapReadableFile : public RandomAccessFile { + const std::string fileName_; + HANDLE hFile_; + HANDLE hMap_; + + const void* mapped_region_; + const size_t length_; + +public: + // mapped_region_[0,length-1] contains the mmapped contents of the file. + WinMmapReadableFile(const std::string& fileName, HANDLE hFile, HANDLE hMap, + const void* mapped_region, size_t length); + + ~WinMmapReadableFile(); + + virtual Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override; + + virtual Status InvalidateCache(size_t offset, size_t length) override; + + virtual size_t GetUniqueId(char* id, size_t max_size) const override; +}; + +// We preallocate and use memcpy to append new +// data to the file. This is safe since we either properly close the +// file before reading from it, or for log files, the reading code +// knows enough to skip zero suffixes. +class WinMmapFile : public WritableFile { +private: + const std::string filename_; + HANDLE hFile_; + HANDLE hMap_; + + const size_t page_size_; // We flush the mapping view in page_size + // increments. We may decide if this is a memory + // page size or SSD page size + const size_t + allocation_granularity_; // View must start at such a granularity + + size_t reserved_size_; // Preallocated size + + size_t mapping_size_; // The max size of the mapping object + // we want to guess the final file size to minimize the remapping + size_t view_size_; // How much memory to map into a view at a time + + char* mapped_begin_; // Must begin at the file offset that is aligned with + // allocation_granularity_ + char* mapped_end_; + char* dst_; // Where to write next (in range [mapped_begin_,mapped_end_]) + char* last_sync_; // Where have we synced up to + + uint64_t file_offset_; // Offset of mapped_begin_ in file + + // Do we have unsynced writes? + bool pending_sync_; + + // Can only truncate or reserve to a sector size aligned if + // used on files that are opened with Unbuffered I/O + Status TruncateFile(uint64_t toSize); + + Status UnmapCurrentRegion(); + + Status MapNewRegion(); + + virtual Status PreallocateInternal(uint64_t spaceToReserve); + +public: + + WinMmapFile(const std::string& fname, HANDLE hFile, size_t page_size, + size_t allocation_granularity, const EnvOptions& options); + + ~WinMmapFile(); + + virtual Status Append(const Slice& data) override; + + // Means Close() will properly take care of truncate + // and it does not need any additional information + virtual Status Truncate(uint64_t size) override; + + virtual Status Close() override; + + virtual Status Flush() override; + + // Flush only data + virtual Status Sync() override; + + /** + * Flush data as well as metadata to stable storage. + */ + virtual Status Fsync() override; + + /** + * Get the size of valid data in the file. This will not match the + * size that is returned from the filesystem because we use mmap + * to extend file by map_size every time. + */ + virtual uint64_t GetFileSize() override; + + virtual Status InvalidateCache(size_t offset, size_t length) override; + + virtual Status Allocate(uint64_t offset, uint64_t len) override; + + virtual size_t GetUniqueId(char* id, size_t max_size) const override; +}; + +class WinSequentialFile : public SequentialFile { +private: + const std::string filename_; + HANDLE file_; + + // There is no equivalent of advising away buffered pages as in posix. + // To implement this flag we would need to do unbuffered reads which + // will need to be aligned (not sure there is a guarantee that the buffer + // passed in is aligned). + // Hence we currently ignore this flag. It is used only in a few cases + // which should not be perf critical. + // If perf evaluation finds this to be a problem, we can look into + // implementing this. + bool use_os_buffer_; + +public: + WinSequentialFile(const std::string& fname, HANDLE f, + const EnvOptions& options); + + ~WinSequentialFile(); + + virtual Status Read(size_t n, Slice* result, char* scratch) override; + + virtual Status Skip(uint64_t n) override; + + virtual Status InvalidateCache(size_t offset, size_t length) override; +}; + +// pread() based random-access +class WinRandomAccessFile : public RandomAccessFile { + const std::string filename_; + HANDLE hFile_; + const bool use_os_buffer_; + bool read_ahead_; + const size_t compaction_readahead_size_; + const size_t random_access_max_buffer_size_; + mutable std::mutex buffer_mut_; + mutable AlignedBuffer buffer_; + mutable uint64_t + buffered_start_; // file offset set that is currently buffered + + /* + * The function reads a requested amount of bytes into the specified aligned + * buffer Upon success the function sets the length of the buffer to the + * amount of bytes actually read even though it might be less than actually + * requested. It then copies the amount of bytes requested by the user (left) + * to the user supplied buffer (dest) and reduces left by the amount of bytes + * copied to the user buffer + * + * @user_offset [in] - offset on disk where the read was requested by the user + * @first_page_start [in] - actual page aligned disk offset that we want to + * read from + * @bytes_to_read [in] - total amount of bytes that will be read from disk + * which is generally greater or equal to the amount + * that the user has requested due to the + * either alignment requirements or read_ahead in + * effect. + * @left [in/out] total amount of bytes that needs to be copied to the user + * buffer. It is reduced by the amount of bytes that actually + * copied + * @buffer - buffer to use + * @dest - user supplied buffer + */ + SSIZE_T ReadIntoBuffer(uint64_t user_offset, uint64_t first_page_start, + size_t bytes_to_read, size_t& left, + AlignedBuffer& buffer, char* dest) const; + + SSIZE_T ReadIntoOneShotBuffer(uint64_t user_offset, uint64_t first_page_start, + size_t bytes_to_read, size_t& left, + char* dest) const; + + SSIZE_T ReadIntoInstanceBuffer(uint64_t user_offset, + uint64_t first_page_start, + size_t bytes_to_read, size_t& left, + char* dest) const; + + void CalculateReadParameters(uint64_t offset, size_t bytes_requested, + size_t& actual_bytes_toread, + uint64_t& first_page_start) const; + + // Override for behavior change + virtual SSIZE_T PositionedReadInternal(char* src, size_t numBytes, + uint64_t offset) const; + +public: + WinRandomAccessFile(const std::string& fname, HANDLE hFile, size_t alignment, + const EnvOptions& options); + + ~WinRandomAccessFile(); + + virtual void EnableReadAhead() override; + + virtual Status Read(uint64_t offset, size_t n, Slice* result, + char* scratch) const override; + + virtual bool ShouldForwardRawRequest() const override; + + virtual void Hint(AccessPattern pattern) override; + + virtual Status InvalidateCache(size_t offset, size_t length) override; + + virtual size_t GetUniqueId(char* id, size_t max_size) const override; +}; + + +// This is a sequential write class. It has been mimicked (as others) after +// the original Posix class. We add support for unbuffered I/O on windows as +// well +// we utilize the original buffer as an alignment buffer to write directly to +// file with no buffering. +// No buffering requires that the provided buffer is aligned to the physical +// sector size (SSD page size) and +// that all SetFilePointer() operations to occur with such an alignment. +// We thus always write in sector/page size increments to the drive and leave +// the tail for the next write OR for Close() at which point we pad with zeros. +// No padding is required for +// buffered access. +class WinWritableFile : public WritableFile { +private: + const std::string filename_; + HANDLE hFile_; + const bool use_os_buffer_; // Used to indicate unbuffered access, the file + const uint64_t alignment_; + // must be opened as unbuffered if false + uint64_t filesize_; // How much data is actually written disk + uint64_t reservedsize_; // how far we have reserved space + + virtual Status PreallocateInternal(uint64_t spaceToReserve); + +public: + WinWritableFile(const std::string& fname, HANDLE hFile, size_t alignment, + size_t capacity, const EnvOptions& options); + + ~WinWritableFile(); + + // Indicates if the class makes use of unbuffered I/O + virtual bool UseOSBuffer() const override; + + virtual size_t GetRequiredBufferAlignment() const override; + + virtual Status Append(const Slice& data) override; + + virtual Status PositionedAppend(const Slice& data, uint64_t offset) override; + + // Need to implement this so the file is truncated correctly + // when buffered and unbuffered mode + virtual Status Truncate(uint64_t size) override; + + virtual Status Close() override; + + // write out the cached data to the OS cache + // This is now taken care of the WritableFileWriter + virtual Status Flush() override; + + virtual Status Sync() override; + + virtual Status Fsync() override; + + virtual uint64_t GetFileSize() override; + + virtual Status Allocate(uint64_t offset, uint64_t len) override; + + virtual size_t GetUniqueId(char* id, size_t max_size) const override; +}; + +class WinDirectory : public Directory { +public: + WinDirectory() {} + + virtual Status Fsync() override; +}; + +class WinFileLock : public FileLock { +public: + explicit WinFileLock(HANDLE hFile) : hFile_(hFile) { + assert(hFile != NULL); + assert(hFile != INVALID_HANDLE_VALUE); + } + + ~WinFileLock(); + +private: + HANDLE hFile_; +}; + +} +} diff --git a/external/rocksdb/port/win/port_win.cc b/external/rocksdb/port/win/port_win.cc new file mode 100755 index 0000000000..dd87c35770 --- /dev/null +++ b/external/rocksdb/port/win/port_win.cc @@ -0,0 +1,307 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#if !defined(OS_WIN) && !defined(WIN32) && !defined(_WIN32) +#error Windows Specific Code +#endif + +#include "port/win/port_win.h" + +#include +#include "port/dirent.h" +#include "port/sys_time.h" + +#include +#include +#include +#include + +#include +#include +#include + +#include "util/logging.h" + +namespace rocksdb { +namespace port { + +void gettimeofday(struct timeval* tv, struct timezone* /* tz */) { + using namespace std::chrono; + + microseconds usNow( + duration_cast(system_clock::now().time_since_epoch())); + + seconds secNow(duration_cast(usNow)); + + tv->tv_sec = static_cast(secNow.count()); + tv->tv_usec = static_cast(usNow.count() - + duration_cast(secNow).count()); +} + +Mutex::~Mutex() {} + +CondVar::~CondVar() {} + +void CondVar::Wait() { + // Caller must ensure that mutex is held prior to calling this method + std::unique_lock lk(mu_->getLock(), std::adopt_lock); +#ifndef NDEBUG + mu_->locked_ = false; +#endif + cv_.wait(lk); +#ifndef NDEBUG + mu_->locked_ = true; +#endif + // Release ownership of the lock as we don't want it to be unlocked when + // it goes out of scope (as we adopted the lock and didn't lock it ourselves) + lk.release(); +} + +bool CondVar::TimedWait(uint64_t abs_time_us) { + + using namespace std::chrono; + + // MSVC++ library implements wait_until in terms of wait_for so + // we need to convert absolute wait into relative wait. + microseconds usAbsTime(abs_time_us); + + microseconds usNow( + duration_cast(system_clock::now().time_since_epoch())); + microseconds relTimeUs = + (usAbsTime > usNow) ? (usAbsTime - usNow) : microseconds::zero(); + + // Caller must ensure that mutex is held prior to calling this method + std::unique_lock lk(mu_->getLock(), std::adopt_lock); +#ifndef NDEBUG + mu_->locked_ = false; +#endif + std::cv_status cvStatus = cv_.wait_for(lk, relTimeUs); +#ifndef NDEBUG + mu_->locked_ = true; +#endif + // Release ownership of the lock as we don't want it to be unlocked when + // it goes out of scope (as we adopted the lock and didn't lock it ourselves) + lk.release(); + + if (cvStatus == std::cv_status::timeout) { + return true; + } + + return false; +} + +void CondVar::Signal() { cv_.notify_one(); } + +void CondVar::SignalAll() { cv_.notify_all(); } + +int PhysicalCoreID() { return GetCurrentProcessorNumber(); } + +void InitOnce(OnceType* once, void (*initializer)()) { + std::call_once(once->flag_, initializer); +} + +// Private structure, exposed only by pointer +struct DIR { + intptr_t handle_; + bool firstread_; + struct __finddata64_t data_; + dirent entry_; + + DIR() : handle_(-1), firstread_(true) {} + + DIR(const DIR&) = delete; + DIR& operator=(const DIR&) = delete; + + ~DIR() { + if (-1 != handle_) { + _findclose(handle_); + } + } +}; + +DIR* opendir(const char* name) { + if (!name || *name == 0) { + errno = ENOENT; + return nullptr; + } + + std::string pattern(name); + pattern.append("\\").append("*"); + + std::unique_ptr

dir(new DIR); + + dir->handle_ = _findfirst64(pattern.c_str(), &dir->data_); + + if (dir->handle_ == -1) { + return nullptr; + } + + strncpy_s(dir->entry_.d_name, dir->data_.name, strlen(dir->data_.name)); + + return dir.release(); +} + +struct dirent* readdir(DIR* dirp) { + if (!dirp || dirp->handle_ == -1) { + errno = EBADF; + return nullptr; + } + + if (dirp->firstread_) { + dirp->firstread_ = false; + return &dirp->entry_; + } + + auto ret = _findnext64(dirp->handle_, &dirp->data_); + + if (ret != 0) { + return nullptr; + } + + strncpy_s(dirp->entry_.d_name, dirp->data_.name, strlen(dirp->data_.name)); + + return &dirp->entry_; +} + +int closedir(DIR* dirp) { + delete dirp; + return 0; +} + +int truncate(const char* path, int64_t len) { + if (path == nullptr) { + errno = EFAULT; + return -1; + } + + if (len < 0) { + errno = EINVAL; + return -1; + } + + HANDLE hFile = + CreateFile(path, GENERIC_READ | GENERIC_WRITE, + FILE_SHARE_READ | FILE_SHARE_WRITE | FILE_SHARE_DELETE, + NULL, // Security attrs + OPEN_EXISTING, // Truncate existing file only + FILE_ATTRIBUTE_NORMAL, NULL); + + if (INVALID_HANDLE_VALUE == hFile) { + auto lastError = GetLastError(); + if (lastError == ERROR_FILE_NOT_FOUND) { + errno = ENOENT; + } else if (lastError == ERROR_ACCESS_DENIED) { + errno = EACCES; + } else { + errno = EIO; + } + return -1; + } + + int result = 0; + FILE_END_OF_FILE_INFO end_of_file; + end_of_file.EndOfFile.QuadPart = len; + + if (!SetFileInformationByHandle(hFile, FileEndOfFileInfo, &end_of_file, + sizeof(FILE_END_OF_FILE_INFO))) { + errno = EIO; + result = -1; + } + + CloseHandle(hFile); + return result; +} + +void Crash(const std::string& srcfile, int srcline) { + fprintf(stdout, "Crashing at %s:%d\n", srcfile.c_str(), srcline); + fflush(stdout); + abort(); +} + +int GetMaxOpenFiles() { return -1; } + +} // namespace port +} // namespace rocksdb + +#ifdef JEMALLOC + +#include "jemalloc/jemalloc.h" + +#ifndef JEMALLOC_NON_INIT + +namespace rocksdb { + +namespace port { + +__declspec(noinline) void WINAPI InitializeJemalloc() { + je_init(); + atexit(je_uninit); +} + +} // port +} // rocksdb + +extern "C" { + +#ifdef _WIN64 + +#pragma comment(linker, "/INCLUDE:p_rocksdb_init_jemalloc") + +typedef void(WINAPI* CRT_Startup_Routine)(void); + +// .CRT section is merged with .rdata on x64 so it must be constant data. +// must be of external linkage +// We put this into XCT since we want to run this earlier than C++ static +// constructors +// which are placed into XCU +#pragma const_seg(".CRT$XCT") +extern const CRT_Startup_Routine p_rocksdb_init_jemalloc; +const CRT_Startup_Routine p_rocksdb_init_jemalloc = + rocksdb::port::InitializeJemalloc; +#pragma const_seg() + +#else // _WIN64 + +// x86 untested + +#pragma comment(linker, "/INCLUDE:_p_rocksdb_init_jemalloc") + +#pragma section(".CRT$XCT", read) +JEMALLOC_SECTION(".CRT$XCT") JEMALLOC_ATTR(used) static const void( + WINAPI* p_rocksdb_init_jemalloc)(void) = rocksdb::port::InitializeJemalloc; + +#endif // _WIN64 + +} // extern "C" + +#endif // JEMALLOC_NON_INIT + +// Global operators to be replaced by a linker + +void* operator new(size_t size) { + void* p = je_malloc(size); + if (!p) { + throw std::bad_alloc(); + } + return p; +} + +void* operator new[](size_t size) { + void* p = je_malloc(size); + if (!p) { + throw std::bad_alloc(); + } + return p; +} + +void operator delete(void* p) { je_free(p); } + +void operator delete[](void* p) { je_free(p); } + +#endif // JEMALLOC diff --git a/external/rocksdb/port/win/port_win.h b/external/rocksdb/port/win/port_win.h new file mode 100755 index 0000000000..54f10a24c9 --- /dev/null +++ b/external/rocksdb/port/win/port_win.h @@ -0,0 +1,299 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// See port_example.h for documentation for the following types/functions. + +#ifndef STORAGE_LEVELDB_PORT_PORT_WIN_H_ +#define STORAGE_LEVELDB_PORT_PORT_WIN_H_ + +// Always want minimum headers +#ifndef WIN32_LEAN_AND_MEAN +#define WIN32_LEAN_AND_MEAN +#endif + +// Assume that for everywhere +#undef PLATFORM_IS_LITTLE_ENDIAN +#define PLATFORM_IS_LITTLE_ENDIAN true + +#include +#include +#include +#include +#include +#include + +#include + +#include "rocksdb/options.h" + +#undef min +#undef max +#undef DeleteFile +#undef GetCurrentTime + + +#ifndef strcasecmp +#define strcasecmp _stricmp +#endif + +#undef GetCurrentTime +#undef DeleteFile + +typedef SSIZE_T ssize_t; + +// size_t printf formatting named in the manner of C99 standard formatting +// strings such as PRIu64 +// in fact, we could use that one +#ifndef ROCKSDB_PRIszt +#define ROCKSDB_PRIszt "Iu" +#endif + +#define __attribute__(A) + +// Thread local storage on Linux +// There is thread_local in C++11 +#ifndef __thread +#define __thread __declspec(thread) +#endif + +#ifndef PLATFORM_IS_LITTLE_ENDIAN +#define PLATFORM_IS_LITTLE_ENDIAN (__BYTE_ORDER == __LITTLE_ENDIAN) +#endif + +namespace rocksdb { + +#define PREFETCH(addr, rw, locality) + +namespace port { + +// VS 15 +#if (defined _MSC_VER) && (_MSC_VER >= 1900) + +#define ROCKSDB_NOEXCEPT noexcept + +// For use at db/file_indexer.h kLevelMaxIndex +const int kMaxInt32 = std::numeric_limits::max(); +const uint64_t kMaxUint64 = std::numeric_limits::max(); +const int64_t kMaxInt64 = std::numeric_limits::max(); + +const size_t kMaxSizet = std::numeric_limits::max(); + +#else //_MSC_VER + +// VS 15 has snprintf +#define snprintf _snprintf + +#define ROCKSDB_NOEXCEPT +// std::numeric_limits::max() is not constexpr just yet +// therefore, use the same limits + +// For use at db/file_indexer.h kLevelMaxIndex +const int kMaxInt32 = INT32_MAX; +const int64_t kMaxInt64 = INT64_MAX; +const uint64_t kMaxUint64 = UINT64_MAX; + +#ifdef _WIN64 +const size_t kMaxSizet = UINT64_MAX; +#else +const size_t kMaxSizet = UINT_MAX; +#endif + +#endif //_MSC_VER + +const bool kLittleEndian = true; + +class CondVar; + +class Mutex { + public: + + /* implicit */ Mutex(bool adaptive = false) +#ifndef NDEBUG + : locked_(false) +#endif + { } + + ~Mutex(); + + void Lock() { + mutex_.lock(); +#ifndef NDEBUG + locked_ = true; +#endif + } + + void Unlock() { +#ifndef NDEBUG + locked_ = false; +#endif + mutex_.unlock(); + } + + // this will assert if the mutex is not locked + // it does NOT verify that mutex is held by a calling thread + void AssertHeld() { +#ifndef NDEBUG + assert(locked_); +#endif + } + + // Mutex is move only with lock ownership transfer + Mutex(const Mutex&) = delete; + void operator=(const Mutex&) = delete; + + private: + + friend class CondVar; + + std::mutex& getLock() { + return mutex_; + } + + std::mutex mutex_; +#ifndef NDEBUG + bool locked_; +#endif +}; + +class RWMutex { + public: + RWMutex() { InitializeSRWLock(&srwLock_); } + + void ReadLock() { AcquireSRWLockShared(&srwLock_); } + + void WriteLock() { AcquireSRWLockExclusive(&srwLock_); } + + void ReadUnlock() { ReleaseSRWLockShared(&srwLock_); } + + void WriteUnlock() { ReleaseSRWLockExclusive(&srwLock_); } + + // Empty as in POSIX + void AssertHeld() {} + + private: + SRWLOCK srwLock_; + // No copying allowed + RWMutex(const RWMutex&); + void operator=(const RWMutex&); +}; + +class CondVar { + public: + explicit CondVar(Mutex* mu) : mu_(mu) { + } + + ~CondVar(); + void Wait(); + bool TimedWait(uint64_t expiration_time); + void Signal(); + void SignalAll(); + + // Condition var is not copy/move constructible + CondVar(const CondVar&) = delete; + CondVar& operator=(const CondVar&) = delete; + + CondVar(CondVar&&) = delete; + CondVar& operator=(CondVar&&) = delete; + + private: + std::condition_variable cv_; + Mutex* mu_; +}; + + +// OnceInit type helps emulate +// Posix semantics with initialization +// adopted in the project +struct OnceType { + + struct Init {}; + + OnceType() {} + OnceType(const Init&) {} + OnceType(const OnceType&) = delete; + OnceType& operator=(const OnceType&) = delete; + + std::once_flag flag_; +}; + +#define LEVELDB_ONCE_INIT port::OnceType::Init() +extern void InitOnce(OnceType* once, void (*initializer)()); + +#define CACHE_LINE_SIZE 64U + +static inline void AsmVolatilePause() { +#if defined(_M_IX86) || defined(_M_X64) + YieldProcessor(); +#endif + // it would be nice to get "wfe" on ARM here +} + +extern int PhysicalCoreID(); + +// For Thread Local Storage abstraction +typedef DWORD pthread_key_t; + +inline int pthread_key_create(pthread_key_t* key, void (*destructor)(void*)) { + // Not used + (void)destructor; + + pthread_key_t k = TlsAlloc(); + if (TLS_OUT_OF_INDEXES == k) { + return ENOMEM; + } + + *key = k; + return 0; +} + +inline int pthread_key_delete(pthread_key_t key) { + if (!TlsFree(key)) { + return EINVAL; + } + return 0; +} + +inline int pthread_setspecific(pthread_key_t key, const void* value) { + if (!TlsSetValue(key, const_cast(value))) { + return ENOMEM; + } + return 0; +} + +inline void* pthread_getspecific(pthread_key_t key) { + void* result = TlsGetValue(key); + if (!result) { + if (GetLastError() != ERROR_SUCCESS) { + errno = EINVAL; + } else { + errno = NOERROR; + } + } + return result; +} + +// UNIX equiv although errno numbers will be off +// using C-runtime to implement. Note, this does not +// feel space with zeros in case the file is extended. +int truncate(const char* path, int64_t length); +void Crash(const std::string& srcfile, int srcline); +extern int GetMaxOpenFiles(); + +} // namespace port + +using port::pthread_key_t; +using port::pthread_key_create; +using port::pthread_key_delete; +using port::pthread_setspecific; +using port::pthread_getspecific; +using port::truncate; + +} // namespace rocksdb + +#endif // STORAGE_LEVELDB_PORT_PORT_WIN_H_ diff --git a/external/rocksdb/port/win/win_logger.cc b/external/rocksdb/port/win/win_logger.cc new file mode 100755 index 0000000000..3c4ae1f88c --- /dev/null +++ b/external/rocksdb/port/win/win_logger.cc @@ -0,0 +1,160 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Logger implementation that can be shared by all environments +// where enough posix functionality is available. + +#include "port/win/win_logger.h" +#include "port/win/io_win.h" + +#include +#include +#include +#include +#include + +#include "rocksdb/env.h" + +#include "port/sys_time.h" +#include "util/iostats_context_imp.h" + +namespace rocksdb { + +namespace port { + +WinLogger::WinLogger(uint64_t (*gettid)(), Env* env, HANDLE file, + const InfoLogLevel log_level) + : Logger(log_level), + gettid_(gettid), + log_size_(0), + last_flush_micros_(0), + env_(env), + flush_pending_(false), + file_(file) {} + +void WinLogger::DebugWriter(const char* str, int len) { + DWORD bytesWritten = 0; + BOOL ret = WriteFile(file_, str, len, &bytesWritten, NULL); + if (ret == FALSE) { + std::string errSz = GetWindowsErrSz(GetLastError()); + fprintf(stderr, errSz.c_str()); + } +} + +WinLogger::~WinLogger() { close(); } + +void WinLogger::close() { CloseHandle(file_); } + +void WinLogger::Flush() { + if (flush_pending_) { + flush_pending_ = false; + // With Windows API writes go to OS buffers directly so no fflush needed + // unlike with C runtime API. We don't flush all the way to disk + // for perf reasons. + } + + last_flush_micros_ = env_->NowMicros(); +} + +void WinLogger::Logv(const char* format, va_list ap) { + IOSTATS_TIMER_GUARD(logger_nanos); + + const uint64_t thread_id = (*gettid_)(); + + // We try twice: the first time with a fixed-size stack allocated buffer, + // and the second time with a much larger dynamically allocated buffer. + char buffer[500]; + std::unique_ptr largeBuffer; + for (int iter = 0; iter < 2; ++iter) { + char* base; + int bufsize; + if (iter == 0) { + bufsize = sizeof(buffer); + base = buffer; + } else { + bufsize = 30000; + largeBuffer.reset(new char[bufsize]); + base = largeBuffer.get(); + } + + char* p = base; + char* limit = base + bufsize; + + struct timeval now_tv; + gettimeofday(&now_tv, nullptr); + const time_t seconds = now_tv.tv_sec; + struct tm t; + localtime_s(&t, &seconds); + p += snprintf(p, limit - p, "%04d/%02d/%02d-%02d:%02d:%02d.%06d %llx ", + t.tm_year + 1900, t.tm_mon + 1, t.tm_mday, t.tm_hour, + t.tm_min, t.tm_sec, static_cast(now_tv.tv_usec), + static_cast(thread_id)); + + // Print the message + if (p < limit) { + va_list backup_ap; + va_copy(backup_ap, ap); + int done = vsnprintf(p, limit - p, format, backup_ap); + if (done > 0) { + p += done; + } else { + continue; + } + va_end(backup_ap); + } + + // Truncate to available space if necessary + if (p >= limit) { + if (iter == 0) { + continue; // Try again with larger buffer + } else { + p = limit - 1; + } + } + + // Add newline if necessary + if (p == base || p[-1] != '\n') { + *p++ = '\n'; + } + + assert(p <= limit); + const size_t write_size = p - base; + + DWORD bytesWritten = 0; + BOOL ret = WriteFile(file_, base, static_cast(write_size), + &bytesWritten, NULL); + if (ret == FALSE) { + std::string errSz = GetWindowsErrSz(GetLastError()); + fprintf(stderr, errSz.c_str()); + } + + flush_pending_ = true; + assert((bytesWritten == write_size) || (ret == FALSE)); + if (bytesWritten > 0) { + log_size_ += write_size; + } + + uint64_t now_micros = + static_cast(now_tv.tv_sec) * 1000000 + now_tv.tv_usec; + if (now_micros - last_flush_micros_ >= flush_every_seconds_ * 1000000) { + flush_pending_ = false; + // With Windows API writes go to OS buffers directly so no fflush needed + // unlike with C runtime API. We don't flush all the way to disk + // for perf reasons. + last_flush_micros_ = now_micros; + } + break; + } +} + +size_t WinLogger::GetLogFileSize() const { return log_size_; } + +} + +} // namespace rocksdb diff --git a/external/rocksdb/port/win/win_logger.h b/external/rocksdb/port/win/win_logger.h new file mode 100755 index 0000000000..84971363df --- /dev/null +++ b/external/rocksdb/port/win/win_logger.h @@ -0,0 +1,62 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Logger implementation that can be shared by all environments +// where enough posix functionality is available. + +#pragma once + +#include + +#include "rocksdb/env.h" + +#include +#include + +namespace rocksdb { + +class Env; + +namespace port { + +class WinLogger : public rocksdb::Logger { + public: + WinLogger(uint64_t (*gettid)(), Env* env, HANDLE file, + const InfoLogLevel log_level = InfoLogLevel::ERROR_LEVEL); + + virtual ~WinLogger(); + + WinLogger(const WinLogger&) = delete; + + WinLogger& operator=(const WinLogger&) = delete; + + void close(); + + void Flush() override; + + void Logv(const char* format, va_list ap) override; + + size_t GetLogFileSize() const override; + + void DebugWriter(const char* str, int len); + + private: + HANDLE file_; + uint64_t (*gettid_)(); // Return the thread id for the current thread + std::atomic_size_t log_size_; + std::atomic_uint_fast64_t last_flush_micros_; + Env* env_; + bool flush_pending_; + + const static uint64_t flush_every_seconds_ = 5; +}; + +} + +} // namespace rocksdb diff --git a/external/rocksdb/port/win/xpress_win.cc b/external/rocksdb/port/win/xpress_win.cc new file mode 100755 index 0000000000..a0206b5780 --- /dev/null +++ b/external/rocksdb/port/win/xpress_win.cc @@ -0,0 +1,267 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "port/win/xpress_win.h" +#include + +#include +#include +#include +#include + +#ifdef XPRESS + +#ifdef JEMALLOC +#include +#endif + +// Put this under ifdef so windows systems w/o this +// can still build +#include + +namespace rocksdb { +namespace port { +namespace xpress { + +// Helpers +namespace { + +auto CloseCompressorFun = [](void* h) { + if (NULL != h) { + ::CloseCompressor(reinterpret_cast(h)); + } +}; + +auto CloseDecompressorFun = [](void* h) { + if (NULL != h) { + ::CloseDecompressor(reinterpret_cast(h)); + } +}; + + +#ifdef JEMALLOC +// Make sure compressors use our jemalloc if redirected +PVOID CompressorAlloc(PVOID, SIZE_T size) { + return je_malloc(size); +} + +VOID CompressorFree(PVOID, PVOID p) { + if (p != NULL) { + je_free(p); + } +} + +#endif + +} + +bool Compress(const char* input, size_t length, std::string* output) { + + assert(input != nullptr); + assert(output != nullptr); + + if (length == 0) { + output->clear(); + return true; + } + + COMPRESS_ALLOCATION_ROUTINES* allocRoutinesPtr = nullptr; + +#ifdef JEMALLOC + COMPRESS_ALLOCATION_ROUTINES allocationRoutines; + + // Init. allocation routines + allocationRoutines.Allocate = CompressorAlloc; + allocationRoutines.Free = CompressorFree; + allocationRoutines.UserContext = NULL; + + allocRoutinesPtr = &allocationRoutines; +#endif + + COMPRESSOR_HANDLE compressor = NULL; + + BOOL success = CreateCompressor( + COMPRESS_ALGORITHM_XPRESS, // Compression Algorithm + allocRoutinesPtr, // Optional allocation routine + &compressor); // Handle + + if (!success) { +#ifdef _DEBUG + std::cerr << "XPRESS: Failed to create Compressor LastError: " << + GetLastError() << std::endl; +#endif + return false; + } + + std::unique_ptr + compressorGuard(compressor, CloseCompressorFun); + + SIZE_T compressedBufferSize = 0; + + // Query compressed buffer size. + success = ::Compress( + compressor, // Compressor Handle + const_cast(input), // Input buffer + length, // Uncompressed data size + NULL, // Compressed Buffer + 0, // Compressed Buffer size + &compressedBufferSize); // Compressed Data size + + if (!success) { + + auto lastError = GetLastError(); + + if (lastError != ERROR_INSUFFICIENT_BUFFER) { +#ifdef _DEBUG + std::cerr << + "XPRESS: Failed to estimate compressed buffer size LastError " << + lastError << std::endl; +#endif + return false; + } + } + + assert(compressedBufferSize > 0); + + std::string result; + result.resize(compressedBufferSize); + + SIZE_T compressedDataSize = 0; + + // Compress + success = ::Compress( + compressor, // Compressor Handle + const_cast(input), // Input buffer + length, // Uncompressed data size + &result[0], // Compressed Buffer + compressedBufferSize, // Compressed Buffer size + &compressedDataSize); // Compressed Data size + + if (!success) { +#ifdef _DEBUG + std::cerr << "XPRESS: Failed to compress LastError " << + GetLastError() << std::endl; +#endif + return false; + } + + result.resize(compressedDataSize); + output->swap(result); + + return true; +} + +char* Decompress(const char* input_data, size_t input_length, + int* decompress_size) { + + assert(input_data != nullptr); + assert(decompress_size != nullptr); + + if (input_length == 0) { + return nullptr; + } + + COMPRESS_ALLOCATION_ROUTINES* allocRoutinesPtr = nullptr; + +#ifdef JEMALLOC + COMPRESS_ALLOCATION_ROUTINES allocationRoutines; + + // Init. allocation routines + allocationRoutines.Allocate = CompressorAlloc; + allocationRoutines.Free = CompressorFree; + allocationRoutines.UserContext = NULL; + allocRoutinesPtr = &allocationRoutines; +#endif + + DECOMPRESSOR_HANDLE decompressor = NULL; + + BOOL success = CreateDecompressor( + COMPRESS_ALGORITHM_XPRESS, // Compression Algorithm + allocRoutinesPtr, // Optional allocation routine + &decompressor); // Handle + + + if (!success) { +#ifdef _DEBUG + std::cerr << "XPRESS: Failed to create Decompressor LastError " + << GetLastError() << std::endl; +#endif + return nullptr; + } + + std::unique_ptr + compressorGuard(decompressor, CloseDecompressorFun); + + SIZE_T decompressedBufferSize = 0; + + success = ::Decompress( + decompressor, // Compressor Handle + const_cast(input_data), // Compressed data + input_length, // Compressed data size + NULL, // Buffer set to NULL + 0, // Buffer size set to 0 + &decompressedBufferSize); // Decompressed Data size + + if (!success) { + + auto lastError = GetLastError(); + + if (lastError != ERROR_INSUFFICIENT_BUFFER) { +#ifdef _DEBUG + std::cerr + << "XPRESS: Failed to estimate decompressed buffer size LastError " + << lastError << std::endl; +#endif + return nullptr; + } + } + + assert(decompressedBufferSize > 0); + + // On Windows we are limited to a 32-bit int for the + // output data size argument + // so we hopefully never get here + if (decompressedBufferSize > std::numeric_limits::max()) { + assert(false); + return nullptr; + } + + // The callers are deallocating using delete[] + // thus we must allocate with new[] + std::unique_ptr outputBuffer(new char[decompressedBufferSize]); + + SIZE_T decompressedDataSize = 0; + + success = ::Decompress( + decompressor, + const_cast(input_data), + input_length, + outputBuffer.get(), + decompressedBufferSize, + &decompressedDataSize); + + if (!success) { +#ifdef _DEBUG + std::cerr << + "XPRESS: Failed to decompress LastError " << + GetLastError() << std::endl; +#endif + return nullptr; + } + + *decompress_size = static_cast(decompressedDataSize); + + // Return the raw buffer to the caller supporting the tradition + return outputBuffer.release(); +} +} +} +} + +#endif diff --git a/external/rocksdb/port/win/xpress_win.h b/external/rocksdb/port/win/xpress_win.h new file mode 100755 index 0000000000..7d1cbb68b3 --- /dev/null +++ b/external/rocksdb/port/win/xpress_win.h @@ -0,0 +1,26 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include + +namespace rocksdb { +namespace port { +namespace xpress { + +bool Compress(const char* input, size_t length, std::string* output); + +char* Decompress(const char* input_data, size_t input_length, + int* decompress_size); + +} +} +} + diff --git a/external/rocksdb/port/xpress.h b/external/rocksdb/port/xpress.h new file mode 100755 index 0000000000..db023a2d9e --- /dev/null +++ b/external/rocksdb/port/xpress.h @@ -0,0 +1,17 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +// Xpress on Windows is implemeted using Win API +#if defined(ROCKSDB_PLATFORM_POSIX) +#error "Xpress compression not implemented" +#elif defined(OS_WIN) +#include "port/win/xpress_win.h" +#endif diff --git a/external/rocksdb/src.mk b/external/rocksdb/src.mk new file mode 100755 index 0000000000..e647e2b3cb --- /dev/null +++ b/external/rocksdb/src.mk @@ -0,0 +1,353 @@ +# These are the sources from which librocksdb.a is built: +LIB_SOURCES = \ + db/auto_roll_logger.cc \ + db/builder.cc \ + db/c.cc \ + db/column_family.cc \ + db/compacted_db_impl.cc \ + db/compaction.cc \ + db/compaction_iterator.cc \ + db/compaction_job.cc \ + db/compaction_picker.cc \ + db/convenience.cc \ + db/db_filesnapshot.cc \ + db/dbformat.cc \ + db/db_impl.cc \ + db/db_impl_debug.cc \ + db/db_impl_readonly.cc \ + db/db_impl_experimental.cc \ + db/db_impl_add_file.cc \ + db/db_info_dumper.cc \ + db/db_iter.cc \ + db/experimental.cc \ + db/event_helpers.cc \ + db/file_indexer.cc \ + db/filename.cc \ + db/flush_job.cc \ + db/flush_scheduler.cc \ + db/forward_iterator.cc \ + db/internal_stats.cc \ + db/log_reader.cc \ + db/log_writer.cc \ + db/managed_iterator.cc \ + db/memtable_allocator.cc \ + db/memtable.cc \ + db/memtable_list.cc \ + db/merge_helper.cc \ + db/merge_operator.cc \ + db/repair.cc \ + db/snapshot_impl.cc \ + db/table_cache.cc \ + db/table_properties_collector.cc \ + db/transaction_log_impl.cc \ + db/version_builder.cc \ + db/version_edit.cc \ + db/version_set.cc \ + db/wal_manager.cc \ + db/write_batch.cc \ + db/write_batch_base.cc \ + db/write_controller.cc \ + db/write_thread.cc \ + db/xfunc_test_points.cc \ + memtable/hash_cuckoo_rep.cc \ + memtable/hash_linklist_rep.cc \ + memtable/hash_skiplist_rep.cc \ + memtable/skiplistrep.cc \ + memtable/vectorrep.cc \ + port/stack_trace.cc \ + port/port_posix.cc \ + table/adaptive_table_factory.cc \ + table/block_based_filter_block.cc \ + table/block_based_table_builder.cc \ + table/block_based_table_factory.cc \ + table/block_based_table_reader.cc \ + table/block_builder.cc \ + table/block.cc \ + table/block_prefix_index.cc \ + table/bloom_block.cc \ + table/cuckoo_table_builder.cc \ + table/cuckoo_table_factory.cc \ + table/cuckoo_table_reader.cc \ + table/flush_block_policy.cc \ + table/format.cc \ + table/full_filter_block.cc \ + table/get_context.cc \ + table/iterator.cc \ + table/merger.cc \ + table/meta_blocks.cc \ + table/sst_file_writer.cc \ + table/plain_table_builder.cc \ + table/plain_table_factory.cc \ + table/plain_table_index.cc \ + table/plain_table_key_coding.cc \ + table/plain_table_reader.cc \ + table/persistent_cache_helper.cc \ + table/table_properties.cc \ + table/two_level_iterator.cc \ + tools/dump/db_dump_tool.cc \ + util/arena.cc \ + util/bloom.cc \ + util/build_version.cc \ + util/coding.cc \ + util/comparator.cc \ + util/compaction_job_stats_impl.cc \ + util/concurrent_arena.cc \ + util/crc32c.cc \ + util/delete_scheduler.cc \ + util/dynamic_bloom.cc \ + util/env.cc \ + util/env_chroot.cc \ + util/env_hdfs.cc \ + util/env_posix.cc \ + util/file_util.cc \ + util/file_reader_writer.cc \ + util/filter_policy.cc \ + util/hash.cc \ + util/histogram.cc \ + util/histogram_windowing.cc \ + util/instrumented_mutex.cc \ + util/iostats_context.cc \ + util/io_posix.cc \ + util/lru_cache.cc \ + util/threadpool.cc \ + util/transaction_test_util.cc \ + util/sharded_cache.cc \ + util/sst_file_manager_impl.cc \ + utilities/backupable/backupable_db.cc \ + utilities/convenience/info_log_finder.cc \ + utilities/checkpoint/checkpoint.cc \ + utilities/compaction_filters/remove_emptyvalue_compactionfilter.cc \ + utilities/document/document_db.cc \ + utilities/document/json_document_builder.cc \ + utilities/document/json_document.cc \ + utilities/env_mirror.cc \ + utilities/env_registry.cc \ + utilities/flashcache/flashcache.cc \ + utilities/geodb/geodb_impl.cc \ + utilities/leveldb_options/leveldb_options.cc \ + utilities/memory/memory_util.cc \ + utilities/merge_operators/put.cc \ + utilities/merge_operators/max.cc \ + utilities/merge_operators/string_append/stringappend2.cc \ + utilities/merge_operators/string_append/stringappend.cc \ + utilities/merge_operators/uint64add.cc \ + utilities/option_change_migration/option_change_migration.cc \ + utilities/options/options_util.cc \ + utilities/persistent_cache/persistent_cache_tier.cc \ + utilities/persistent_cache/volatile_tier_impl.cc \ + utilities/persistent_cache/block_cache_tier_file.cc \ + utilities/persistent_cache/block_cache_tier_metadata.cc \ + utilities/redis/redis_lists.cc \ + utilities/simulator_cache/sim_cache.cc \ + utilities/spatialdb/spatial_db.cc \ + utilities/table_properties_collectors/compact_on_deletion_collector.cc \ + utilities/transactions/optimistic_transaction_impl.cc \ + utilities/transactions/optimistic_transaction_db_impl.cc \ + utilities/transactions/transaction_base.cc \ + utilities/transactions/transaction_db_impl.cc \ + utilities/transactions/transaction_db_mutex_impl.cc \ + utilities/transactions/transaction_lock_mgr.cc \ + utilities/transactions/transaction_impl.cc \ + utilities/transactions/transaction_util.cc \ + utilities/ttl/db_ttl_impl.cc \ + utilities/write_batch_with_index/write_batch_with_index.cc \ + utilities/write_batch_with_index/write_batch_with_index_internal.cc \ + util/event_logger.cc \ + util/log_buffer.cc \ + util/logging.cc \ + util/memenv.cc \ + util/murmurhash.cc \ + util/mutable_cf_options.cc \ + util/options.cc \ + util/options_helper.cc \ + util/options_parser.cc \ + util/options_sanity_check.cc \ + util/perf_context.cc \ + util/perf_level.cc \ + util/random.cc \ + util/rate_limiter.cc \ + util/slice.cc \ + util/statistics.cc \ + util/status.cc \ + util/status_message.cc \ + util/string_util.cc \ + util/sync_point.cc \ + util/thread_local.cc \ + util/thread_status_impl.cc \ + util/thread_status_updater.cc \ + util/thread_status_updater_debug.cc \ + util/thread_status_util.cc \ + util/thread_status_util_debug.cc \ + util/xfunc.cc \ + util/xxhash.cc \ + +TOOL_LIB_SOURCES = \ + tools/ldb_cmd.cc \ + tools/ldb_tool.cc \ + tools/sst_dump_tool.cc \ + +MOCK_LIB_SOURCES = \ + table/mock_table.cc \ + util/mock_env.cc \ + util/fault_injection_test_env.cc + +BENCH_LIB_SOURCES = \ + tools/db_bench_tool.cc + +TEST_LIB_SOURCES = \ + util/testharness.cc \ + util/testutil.cc \ + db/db_test_util.cc + +MAIN_SOURCES = \ + third-party/gtest-1.7.0/fused-src/gtest/gtest-all.cc \ + db/auto_roll_logger_test.cc \ + db/column_family_test.cc \ + db/compaction_job_test.cc \ + db/compaction_job_stats_test.cc \ + db/compaction_picker_test.cc \ + db/comparator_db_test.cc \ + db/corruption_test.cc \ + db/cuckoo_table_db_test.cc \ + db/dbformat_test.cc \ + db/db_iter_test.cc \ + db/db_test.cc \ + db/db_block_cache_test.cc \ + db/db_io_failure_test.cc \ + db/db_bloom_filter_test.cc \ + db/db_compaction_filter_test.cc \ + db/db_compaction_test.cc \ + db/db_dynamic_level_test.cc \ + db/db_flush_test.cc \ + db/db_inplace_update_test.cc \ + db/db_iterator_test.cc \ + db/db_log_iter_test.cc \ + db/db_options_test.cc \ + db/db_sst_test.cc \ + db/db_tailing_iter_test.cc \ + db/db_universal_compaction_test.cc \ + db/db_wal_test.cc \ + db/db_table_properties_test.cc \ + db/deletefile_test.cc \ + db/fault_injection_test.cc \ + db/file_indexer_test.cc \ + db/filename_test.cc \ + db/flush_job_test.cc \ + db/inlineskiplist_test.cc \ + db/listener_test.cc \ + db/log_test.cc \ + db/manual_compaction_test.cc \ + db/memtablerep_bench.cc \ + db/merge_test.cc \ + db/options_file_test.cc \ + db/perf_context_test.cc \ + db/plain_table_db_test.cc \ + db/prefix_test.cc \ + db/skiplist_test.cc \ + db/table_properties_collector_test.cc \ + db/version_builder_test.cc \ + db/version_edit_test.cc \ + db/version_set_test.cc \ + db/wal_manager_test.cc \ + db/write_batch_test.cc \ + db/write_controller_test.cc \ + db/write_callback_test.cc \ + table/block_based_filter_block_test.cc \ + table/block_test.cc \ + table/cuckoo_table_builder_test.cc \ + table/cuckoo_table_reader_test.cc \ + table/full_filter_block_test.cc \ + table/merger_test.cc \ + table/table_reader_bench.cc \ + table/table_test.cc \ + tools/db_bench.cc \ + tools/db_bench_tool_test.cc \ + tools/db_sanity_test.cc \ + tools/ldb_cmd_test.cc \ + tools/reduce_levels_test.cc \ + tools/sst_dump_test.cc \ + util/arena_test.cc \ + util/autovector_test.cc \ + util/bloom_test.cc \ + util/cache_bench.cc \ + util/cache_test.cc \ + util/coding_test.cc \ + util/crc32c_test.cc \ + util/dynamic_bloom_test.cc \ + util/env_basic_test.cc \ + util/env_test.cc \ + util/filelock_test.cc \ + util/histogram_test.cc \ + util/statistics_test.cc \ + utilities/backupable/backupable_db_test.cc \ + utilities/checkpoint/checkpoint_test.cc \ + utilities/document/document_db_test.cc \ + utilities/document/json_document_test.cc \ + utilities/env_registry_test.cc \ + utilities/geodb/geodb_test.cc \ + utilities/memory/memory_test.cc \ + utilities/merge_operators/string_append/stringappend_test.cc \ + utilities/option_change_migration/option_change_migration_test.cc \ + utilities/options/options_util_test.cc \ + utilities/redis/redis_lists_test.cc \ + utilities/simulator_cache/sim_cache_test.cc \ + utilities/spatialdb/spatial_db_test.cc \ + utilities/table_properties_collectors/compact_on_deletion_collector_test.cc \ + utilities/transactions/optimistic_transaction_test.cc \ + utilities/transactions/transaction_test.cc \ + utilities/ttl/ttl_test.cc \ + utilities/write_batch_with_index/write_batch_with_index_test.cc \ + util/iostats_context_test.cc \ + util/log_write_bench.cc \ + util/mock_env_test.cc \ + util/options_test.cc \ + util/event_logger_test.cc \ + util/rate_limiter_test.cc \ + util/slice_transform_test.cc \ + util/thread_list_test.cc \ + util/thread_local_test.cc + +JNI_NATIVE_SOURCES = \ + java/rocksjni/backupenginejni.cc \ + java/rocksjni/backupablejni.cc \ + java/rocksjni/checkpoint.cc \ + java/rocksjni/columnfamilyhandle.cc \ + java/rocksjni/compaction_filter.cc \ + java/rocksjni/comparator.cc \ + java/rocksjni/comparatorjnicallback.cc \ + java/rocksjni/env.cc \ + java/rocksjni/filter.cc \ + java/rocksjni/iterator.cc \ + java/rocksjni/loggerjnicallback.cc \ + java/rocksjni/memtablejni.cc \ + java/rocksjni/merge_operator.cc \ + java/rocksjni/options.cc \ + java/rocksjni/ratelimiterjni.cc \ + java/rocksjni/remove_emptyvalue_compactionfilterjni.cc \ + java/rocksjni/restorejni.cc \ + java/rocksjni/rocksjni.cc \ + java/rocksjni/slice.cc \ + java/rocksjni/snapshot.cc \ + java/rocksjni/statistics.cc \ + java/rocksjni/table.cc \ + java/rocksjni/transaction_log.cc \ + java/rocksjni/ttl.cc \ + java/rocksjni/write_batch.cc \ + java/rocksjni/writebatchhandlerjnicallback.cc \ + java/rocksjni/write_batch_test.cc \ + java/rocksjni/write_batch_with_index.cc + +# Currently, we do not generate dependencies for +# java/rocksjni/write_batch_test.cc, because its dependent, +# java/include/org_rocksdb_WriteBatch.h is generated. +# TODO/FIXME: fix the above. Otherwise, the current rules would fail: +# java/rocksjni/write_batch_test.cc:13:44: fatal error: include/org_rocksdb_WriteBatch.h: No such file or directory +# #include "include/org_rocksdb_WriteBatch.h" + +# These are the xfunc tests run : +XFUNC_TESTS = \ + "managed_new" \ + "managed_xftest_dropold" \ + "managed_xftest_release" \ + "inplace_lock_test" \ + "transaction" diff --git a/external/rocksdb/table/adaptive_table_factory.cc b/external/rocksdb/table/adaptive_table_factory.cc new file mode 100755 index 0000000000..bb19c417ed --- /dev/null +++ b/external/rocksdb/table/adaptive_table_factory.cc @@ -0,0 +1,122 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef ROCKSDB_LITE +#include "table/adaptive_table_factory.h" + +#include "table/table_builder.h" +#include "table/format.h" +#include "port/port.h" + +namespace rocksdb { + +AdaptiveTableFactory::AdaptiveTableFactory( + std::shared_ptr table_factory_to_write, + std::shared_ptr block_based_table_factory, + std::shared_ptr plain_table_factory, + std::shared_ptr cuckoo_table_factory) + : table_factory_to_write_(table_factory_to_write), + block_based_table_factory_(block_based_table_factory), + plain_table_factory_(plain_table_factory), + cuckoo_table_factory_(cuckoo_table_factory) { + if (!table_factory_to_write_) { + table_factory_to_write_ = block_based_table_factory_; + } + if (!plain_table_factory_) { + plain_table_factory_.reset(NewPlainTableFactory()); + } + if (!block_based_table_factory_) { + block_based_table_factory_.reset(NewBlockBasedTableFactory()); + } + if (!cuckoo_table_factory_) { + cuckoo_table_factory_.reset(NewCuckooTableFactory()); + } +} + +extern const uint64_t kPlainTableMagicNumber; +extern const uint64_t kLegacyPlainTableMagicNumber; +extern const uint64_t kBlockBasedTableMagicNumber; +extern const uint64_t kLegacyBlockBasedTableMagicNumber; +extern const uint64_t kCuckooTableMagicNumber; + +Status AdaptiveTableFactory::NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table, + bool prefetch_index_and_filter_in_cache) const { + Footer footer; + auto s = ReadFooterFromFile(file.get(), file_size, &footer); + if (!s.ok()) { + return s; + } + if (footer.table_magic_number() == kPlainTableMagicNumber || + footer.table_magic_number() == kLegacyPlainTableMagicNumber) { + return plain_table_factory_->NewTableReader( + table_reader_options, std::move(file), file_size, table); + } else if (footer.table_magic_number() == kBlockBasedTableMagicNumber || + footer.table_magic_number() == kLegacyBlockBasedTableMagicNumber) { + return block_based_table_factory_->NewTableReader( + table_reader_options, std::move(file), file_size, table); + } else if (footer.table_magic_number() == kCuckooTableMagicNumber) { + return cuckoo_table_factory_->NewTableReader( + table_reader_options, std::move(file), file_size, table); + } else { + return Status::NotSupported("Unidentified table format"); + } +} + +TableBuilder* AdaptiveTableFactory::NewTableBuilder( + const TableBuilderOptions& table_builder_options, uint32_t column_family_id, + WritableFileWriter* file) const { + return table_factory_to_write_->NewTableBuilder(table_builder_options, + column_family_id, file); +} + +std::string AdaptiveTableFactory::GetPrintableTableOptions() const { + std::string ret; + ret.reserve(20000); + const int kBufferSize = 200; + char buffer[kBufferSize]; + + if (table_factory_to_write_) { + snprintf(buffer, kBufferSize, " write factory (%s) options:\n%s\n", + (table_factory_to_write_->Name() ? table_factory_to_write_->Name() + : ""), + table_factory_to_write_->GetPrintableTableOptions().c_str()); + ret.append(buffer); + } + if (plain_table_factory_) { + snprintf(buffer, kBufferSize, " %s options:\n%s\n", + plain_table_factory_->Name() ? plain_table_factory_->Name() : "", + plain_table_factory_->GetPrintableTableOptions().c_str()); + ret.append(buffer); + } + if (block_based_table_factory_) { + snprintf( + buffer, kBufferSize, " %s options:\n%s\n", + (block_based_table_factory_->Name() ? block_based_table_factory_->Name() + : ""), + block_based_table_factory_->GetPrintableTableOptions().c_str()); + ret.append(buffer); + } + if (cuckoo_table_factory_) { + snprintf(buffer, kBufferSize, " %s options:\n%s\n", + cuckoo_table_factory_->Name() ? cuckoo_table_factory_->Name() : "", + cuckoo_table_factory_->GetPrintableTableOptions().c_str()); + ret.append(buffer); + } + return ret; +} + +extern TableFactory* NewAdaptiveTableFactory( + std::shared_ptr table_factory_to_write, + std::shared_ptr block_based_table_factory, + std::shared_ptr plain_table_factory, + std::shared_ptr cuckoo_table_factory) { + return new AdaptiveTableFactory(table_factory_to_write, + block_based_table_factory, plain_table_factory, cuckoo_table_factory); +} + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/adaptive_table_factory.h b/external/rocksdb/table/adaptive_table_factory.h new file mode 100755 index 0000000000..b7b52ba96f --- /dev/null +++ b/external/rocksdb/table/adaptive_table_factory.h @@ -0,0 +1,62 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#ifndef ROCKSDB_LITE + +#include +#include "rocksdb/options.h" +#include "rocksdb/table.h" + +namespace rocksdb { + +struct EnvOptions; + +using std::unique_ptr; +class Status; +class RandomAccessFile; +class WritableFile; +class Table; +class TableBuilder; + +class AdaptiveTableFactory : public TableFactory { + public: + ~AdaptiveTableFactory() {} + + explicit AdaptiveTableFactory( + std::shared_ptr table_factory_to_write, + std::shared_ptr block_based_table_factory, + std::shared_ptr plain_table_factory, + std::shared_ptr cuckoo_table_factory); + + const char* Name() const override { return "AdaptiveTableFactory"; } + + Status NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table, + bool prefetch_index_and_filter_in_cache = true) const override; + + TableBuilder* NewTableBuilder( + const TableBuilderOptions& table_builder_options, + uint32_t column_family_id, WritableFileWriter* file) const override; + + // Sanitizes the specified DB Options. + Status SanitizeOptions(const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const override { + return Status::OK(); + } + + std::string GetPrintableTableOptions() const override; + + private: + std::shared_ptr table_factory_to_write_; + std::shared_ptr block_based_table_factory_; + std::shared_ptr plain_table_factory_; + std::shared_ptr cuckoo_table_factory_; +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/block.cc b/external/rocksdb/table/block.cc new file mode 100755 index 0000000000..fa14a00f02 --- /dev/null +++ b/external/rocksdb/table/block.cc @@ -0,0 +1,410 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// Decodes the blocks generated by block_builder.cc. + +#include "table/block.h" + +#include +#include +#include +#include + +#include "rocksdb/comparator.h" +#include "table/format.h" +#include "table/block_prefix_index.h" +#include "util/coding.h" +#include "util/logging.h" +#include "util/perf_context_imp.h" + +namespace rocksdb { + +// Helper routine: decode the next block entry starting at "p", +// storing the number of shared key bytes, non_shared key bytes, +// and the length of the value in "*shared", "*non_shared", and +// "*value_length", respectively. Will not derefence past "limit". +// +// If any errors are detected, returns nullptr. Otherwise, returns a +// pointer to the key delta (just past the three decoded values). +static inline const char* DecodeEntry(const char* p, const char* limit, + uint32_t* shared, + uint32_t* non_shared, + uint32_t* value_length) { + if (limit - p < 3) return nullptr; + *shared = reinterpret_cast(p)[0]; + *non_shared = reinterpret_cast(p)[1]; + *value_length = reinterpret_cast(p)[2]; + if ((*shared | *non_shared | *value_length) < 128) { + // Fast path: all three values are encoded in one byte each + p += 3; + } else { + if ((p = GetVarint32Ptr(p, limit, shared)) == nullptr) return nullptr; + if ((p = GetVarint32Ptr(p, limit, non_shared)) == nullptr) return nullptr; + if ((p = GetVarint32Ptr(p, limit, value_length)) == nullptr) return nullptr; + } + + if (static_cast(limit - p) < (*non_shared + *value_length)) { + return nullptr; + } + return p; +} + +void BlockIter::Next() { + assert(Valid()); + ParseNextKey(); +} + +void BlockIter::Prev() { + assert(Valid()); + + assert(prev_entries_idx_ == -1 || + static_cast(prev_entries_idx_) < prev_entries_.size()); + // Check if we can use cached prev_entries_ + if (prev_entries_idx_ > 0 && + prev_entries_[prev_entries_idx_].offset == current_) { + // Read cached CachedPrevEntry + prev_entries_idx_--; + const CachedPrevEntry& current_prev_entry = + prev_entries_[prev_entries_idx_]; + + const char* key_ptr = nullptr; + if (current_prev_entry.key_ptr != nullptr) { + // The key is not delta encoded and stored in the data block + key_ptr = current_prev_entry.key_ptr; + key_pinned_ = true; + } else { + // The key is delta encoded and stored in prev_entries_keys_buff_ + key_ptr = prev_entries_keys_buff_.data() + current_prev_entry.key_offset; + key_pinned_ = false; + } + const Slice current_key(key_ptr, current_prev_entry.key_size); + + current_ = current_prev_entry.offset; + key_.SetKey(current_key, false /* copy */); + value_ = current_prev_entry.value; + + return; + } + + // Clear prev entries cache + prev_entries_idx_ = -1; + prev_entries_.clear(); + prev_entries_keys_buff_.clear(); + + // Scan backwards to a restart point before current_ + const uint32_t original = current_; + while (GetRestartPoint(restart_index_) >= original) { + if (restart_index_ == 0) { + // No more entries + current_ = restarts_; + restart_index_ = num_restarts_; + return; + } + restart_index_--; + } + + SeekToRestartPoint(restart_index_); + + do { + if (!ParseNextKey()) { + break; + } + Slice current_key = key(); + + if (key_.IsKeyPinned()) { + // The key is not delta encoded + prev_entries_.emplace_back(current_, current_key.data(), 0, + current_key.size(), value()); + } else { + // The key is delta encoded, cache decoded key in buffer + size_t new_key_offset = prev_entries_keys_buff_.size(); + prev_entries_keys_buff_.append(current_key.data(), current_key.size()); + + prev_entries_.emplace_back(current_, nullptr, new_key_offset, + current_key.size(), value()); + } + // Loop until end of current entry hits the start of original entry + } while (NextEntryOffset() < original); + prev_entries_idx_ = static_cast(prev_entries_.size()) - 1; +} + +void BlockIter::Seek(const Slice& target) { + PERF_TIMER_GUARD(block_seek_nanos); + if (data_ == nullptr) { // Not init yet + return; + } + uint32_t index = 0; + bool ok = false; + if (prefix_index_) { + ok = PrefixSeek(target, &index); + } else { + ok = BinarySeek(target, 0, num_restarts_ - 1, &index); + } + + if (!ok) { + return; + } + SeekToRestartPoint(index); + // Linear search (within restart block) for first key >= target + + while (true) { + if (!ParseNextKey() || Compare(key_.GetKey(), target) >= 0) { + return; + } + } +} + +void BlockIter::SeekToFirst() { + if (data_ == nullptr) { // Not init yet + return; + } + SeekToRestartPoint(0); + ParseNextKey(); +} + +void BlockIter::SeekToLast() { + if (data_ == nullptr) { // Not init yet + return; + } + SeekToRestartPoint(num_restarts_ - 1); + while (ParseNextKey() && NextEntryOffset() < restarts_) { + // Keep skipping + } +} + +void BlockIter::CorruptionError() { + current_ = restarts_; + restart_index_ = num_restarts_; + status_ = Status::Corruption("bad entry in block"); + key_.Clear(); + value_.clear(); +} + +bool BlockIter::ParseNextKey() { + current_ = NextEntryOffset(); + const char* p = data_ + current_; + const char* limit = data_ + restarts_; // Restarts come right after data + if (p >= limit) { + // No more entries to return. Mark as invalid. + current_ = restarts_; + restart_index_ = num_restarts_; + return false; + } + + // Decode next entry + uint32_t shared, non_shared, value_length; + p = DecodeEntry(p, limit, &shared, &non_shared, &value_length); + if (p == nullptr || key_.Size() < shared) { + CorruptionError(); + return false; + } else { + if (shared == 0) { + // If this key dont share any bytes with prev key then we dont need + // to decode it and can use it's address in the block directly. + key_.SetKey(Slice(p, non_shared), false /* copy */); + key_pinned_ = true; + } else { + // This key share `shared` bytes with prev key, we need to decode it + key_.TrimAppend(shared, p, non_shared); + key_pinned_ = false; + } + value_ = Slice(p + non_shared, value_length); + while (restart_index_ + 1 < num_restarts_ && + GetRestartPoint(restart_index_ + 1) < current_) { + ++restart_index_; + } + return true; + } + } + +// Binary search in restart array to find the first restart point +// with a key >= target (TODO: this comment is inaccurate) +bool BlockIter::BinarySeek(const Slice& target, uint32_t left, uint32_t right, + uint32_t* index) { + assert(left <= right); + + while (left < right) { + uint32_t mid = (left + right + 1) / 2; + uint32_t region_offset = GetRestartPoint(mid); + uint32_t shared, non_shared, value_length; + const char* key_ptr = + DecodeEntry(data_ + region_offset, data_ + restarts_, &shared, + &non_shared, &value_length); + if (key_ptr == nullptr || (shared != 0)) { + CorruptionError(); + return false; + } + Slice mid_key(key_ptr, non_shared); + int cmp = Compare(mid_key, target); + if (cmp < 0) { + // Key at "mid" is smaller than "target". Therefore all + // blocks before "mid" are uninteresting. + left = mid; + } else if (cmp > 0) { + // Key at "mid" is >= "target". Therefore all blocks at or + // after "mid" are uninteresting. + right = mid - 1; + } else { + left = right = mid; + } + } + + *index = left; + return true; +} + +// Compare target key and the block key of the block of `block_index`. +// Return -1 if error. +int BlockIter::CompareBlockKey(uint32_t block_index, const Slice& target) { + uint32_t region_offset = GetRestartPoint(block_index); + uint32_t shared, non_shared, value_length; + const char* key_ptr = DecodeEntry(data_ + region_offset, data_ + restarts_, + &shared, &non_shared, &value_length); + if (key_ptr == nullptr || (shared != 0)) { + CorruptionError(); + return 1; // Return target is smaller + } + Slice block_key(key_ptr, non_shared); + return Compare(block_key, target); +} + +// Binary search in block_ids to find the first block +// with a key >= target +bool BlockIter::BinaryBlockIndexSeek(const Slice& target, uint32_t* block_ids, + uint32_t left, uint32_t right, + uint32_t* index) { + assert(left <= right); + uint32_t left_bound = left; + + while (left <= right) { + uint32_t mid = (left + right) / 2; + + int cmp = CompareBlockKey(block_ids[mid], target); + if (!status_.ok()) { + return false; + } + if (cmp < 0) { + // Key at "target" is larger than "mid". Therefore all + // blocks before or at "mid" are uninteresting. + left = mid + 1; + } else { + // Key at "target" is <= "mid". Therefore all blocks + // after "mid" are uninteresting. + // If there is only one block left, we found it. + if (left == right) break; + right = mid; + } + } + + if (left == right) { + // In one of the two following cases: + // (1) left is the first one of block_ids + // (2) there is a gap of blocks between block of `left` and `left-1`. + // we can further distinguish the case of key in the block or key not + // existing, by comparing the target key and the key of the previous + // block to the left of the block found. + if (block_ids[left] > 0 && + (left == left_bound || block_ids[left - 1] != block_ids[left] - 1) && + CompareBlockKey(block_ids[left] - 1, target) > 0) { + current_ = restarts_; + return false; + } + + *index = block_ids[left]; + return true; + } else { + assert(left > right); + // Mark iterator invalid + current_ = restarts_; + return false; + } +} + +bool BlockIter::PrefixSeek(const Slice& target, uint32_t* index) { + assert(prefix_index_); + uint32_t* block_ids = nullptr; + uint32_t num_blocks = prefix_index_->GetBlocks(target, &block_ids); + + if (num_blocks == 0) { + current_ = restarts_; + return false; + } else { + return BinaryBlockIndexSeek(target, block_ids, 0, num_blocks - 1, index); + } +} + +uint32_t Block::NumRestarts() const { + assert(size_ >= 2*sizeof(uint32_t)); + return DecodeFixed32(data_ + size_ - sizeof(uint32_t)); +} + +Block::Block(BlockContents&& contents) + : contents_(std::move(contents)), + data_(contents_.data.data()), + size_(contents_.data.size()) { + if (size_ < sizeof(uint32_t)) { + size_ = 0; // Error marker + } else { + restart_offset_ = + static_cast(size_) - (1 + NumRestarts()) * sizeof(uint32_t); + if (restart_offset_ > size_ - sizeof(uint32_t)) { + // The size is too small for NumRestarts() and therefore + // restart_offset_ wrapped around. + size_ = 0; + } + } +} + +InternalIterator* Block::NewIterator(const Comparator* cmp, BlockIter* iter, + bool total_order_seek) { + if (size_ < 2*sizeof(uint32_t)) { + if (iter != nullptr) { + iter->SetStatus(Status::Corruption("bad block contents")); + return iter; + } else { + return NewErrorInternalIterator(Status::Corruption("bad block contents")); + } + } + const uint32_t num_restarts = NumRestarts(); + if (num_restarts == 0) { + if (iter != nullptr) { + iter->SetStatus(Status::OK()); + return iter; + } else { + return NewEmptyInternalIterator(); + } + } else { + BlockPrefixIndex* prefix_index_ptr = + total_order_seek ? nullptr : prefix_index_.get(); + + if (iter != nullptr) { + iter->Initialize(cmp, data_, restart_offset_, num_restarts, + prefix_index_ptr); + } else { + iter = new BlockIter(cmp, data_, restart_offset_, num_restarts, + prefix_index_ptr); + } + } + + return iter; +} + +void Block::SetBlockPrefixIndex(BlockPrefixIndex* prefix_index) { + prefix_index_.reset(prefix_index); +} + +size_t Block::ApproximateMemoryUsage() const { + size_t usage = usable_size(); + if (prefix_index_) { + usage += prefix_index_->ApproximateMemoryUsage(); + } + return usage; +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/block.h b/external/rocksdb/table/block.h new file mode 100755 index 0000000000..81ca2aa412 --- /dev/null +++ b/external/rocksdb/table/block.h @@ -0,0 +1,248 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include +#include +#include +#ifdef ROCKSDB_MALLOC_USABLE_SIZE +#include +#endif + +#include "db/dbformat.h" +#include "db/pinned_iterators_manager.h" +#include "rocksdb/iterator.h" +#include "rocksdb/options.h" +#include "table/block_prefix_index.h" +#include "table/internal_iterator.h" + +#include "format.h" + +namespace rocksdb { + +struct BlockContents; +class Comparator; +class BlockIter; +class BlockPrefixIndex; + +class Block { + public: + // Initialize the block with the specified contents. + explicit Block(BlockContents&& contents); + + ~Block() = default; + + size_t size() const { return size_; } + const char* data() const { return data_; } + bool cachable() const { return contents_.cachable; } + size_t usable_size() const { +#ifdef ROCKSDB_MALLOC_USABLE_SIZE + if (contents_.allocation.get() != nullptr) { + return malloc_usable_size(contents_.allocation.get()); + } +#endif // ROCKSDB_MALLOC_USABLE_SIZE + return size_; + } + uint32_t NumRestarts() const; + CompressionType compression_type() const { + return contents_.compression_type; + } + + // If hash index lookup is enabled and `use_hash_index` is true. This block + // will do hash lookup for the key prefix. + // + // NOTE: for the hash based lookup, if a key prefix doesn't match any key, + // the iterator will simply be set as "invalid", rather than returning + // the key that is just pass the target key. + // + // If iter is null, return new Iterator + // If iter is not null, update this one and return it as Iterator* + // + // If total_order_seek is true, hash_index_ and prefix_index_ are ignored. + // This option only applies for index block. For data block, hash_index_ + // and prefix_index_ are null, so this option does not matter. + InternalIterator* NewIterator(const Comparator* comparator, + BlockIter* iter = nullptr, + bool total_order_seek = true); + void SetBlockPrefixIndex(BlockPrefixIndex* prefix_index); + + // Report an approximation of how much memory has been used. + size_t ApproximateMemoryUsage() const; + + private: + BlockContents contents_; + const char* data_; // contents_.data.data() + size_t size_; // contents_.data.size() + uint32_t restart_offset_; // Offset in data_ of restart array + std::unique_ptr prefix_index_; + + // No copying allowed + Block(const Block&); + void operator=(const Block&); +}; + +class BlockIter : public InternalIterator { + public: + BlockIter() + : comparator_(nullptr), + data_(nullptr), + restarts_(0), + num_restarts_(0), + current_(0), + restart_index_(0), + status_(Status::OK()), + prefix_index_(nullptr), + key_pinned_(false) {} + + BlockIter(const Comparator* comparator, const char* data, uint32_t restarts, + uint32_t num_restarts, BlockPrefixIndex* prefix_index) + : BlockIter() { + Initialize(comparator, data, restarts, num_restarts, prefix_index); + } + + void Initialize(const Comparator* comparator, const char* data, + uint32_t restarts, uint32_t num_restarts, + BlockPrefixIndex* prefix_index) { + assert(data_ == nullptr); // Ensure it is called only once + assert(num_restarts > 0); // Ensure the param is valid + + comparator_ = comparator; + data_ = data; + restarts_ = restarts; + num_restarts_ = num_restarts; + current_ = restarts_; + restart_index_ = num_restarts_; + prefix_index_ = prefix_index; + } + + void SetStatus(Status s) { + status_ = s; + } + + virtual bool Valid() const override { return current_ < restarts_; } + virtual Status status() const override { return status_; } + virtual Slice key() const override { + assert(Valid()); + return key_.GetKey(); + } + virtual Slice value() const override { + assert(Valid()); + return value_; + } + + virtual void Next() override; + + virtual void Prev() override; + + virtual void Seek(const Slice& target) override; + + virtual void SeekToFirst() override; + + virtual void SeekToLast() override; + +#ifndef NDEBUG + ~BlockIter() { + // Assert that the BlockIter is never deleted while Pinning is Enabled. + assert(!pinned_iters_mgr_ || + (pinned_iters_mgr_ && !pinned_iters_mgr_->PinningEnabled())); + } + virtual void SetPinnedItersMgr( + PinnedIteratorsManager* pinned_iters_mgr) override { + pinned_iters_mgr_ = pinned_iters_mgr; + } + PinnedIteratorsManager* pinned_iters_mgr_ = nullptr; +#endif + + virtual bool IsKeyPinned() const override { return key_pinned_; } + + virtual bool IsValuePinned() const override { return true; } + + private: + const Comparator* comparator_; + const char* data_; // underlying block contents + uint32_t restarts_; // Offset of restart array (list of fixed32) + uint32_t num_restarts_; // Number of uint32_t entries in restart array + + // current_ is offset in data_ of current entry. >= restarts_ if !Valid + uint32_t current_; + uint32_t restart_index_; // Index of restart block in which current_ falls + IterKey key_; + Slice value_; + Status status_; + BlockPrefixIndex* prefix_index_; + bool key_pinned_; + + struct CachedPrevEntry { + explicit CachedPrevEntry(uint32_t _offset, const char* _key_ptr, + size_t _key_offset, size_t _key_size, Slice _value) + : offset(_offset), + key_ptr(_key_ptr), + key_offset(_key_offset), + key_size(_key_size), + value(_value) {} + + // offset of entry in block + uint32_t offset; + // Pointer to key data in block (nullptr if key is delta-encoded) + const char* key_ptr; + // offset of key in prev_entries_keys_buff_ (0 if key_ptr is not nullptr) + size_t key_offset; + // size of key + size_t key_size; + // value slice pointing to data in block + Slice value; + }; + std::string prev_entries_keys_buff_; + std::vector prev_entries_; + int32_t prev_entries_idx_ = -1; + + inline int Compare(const Slice& a, const Slice& b) const { + return comparator_->Compare(a, b); + } + + // Return the offset in data_ just past the end of the current entry. + inline uint32_t NextEntryOffset() const { + // NOTE: We don't support files bigger than 2GB + return static_cast((value_.data() + value_.size()) - data_); + } + + uint32_t GetRestartPoint(uint32_t index) { + assert(index < num_restarts_); + return DecodeFixed32(data_ + restarts_ + index * sizeof(uint32_t)); + } + + void SeekToRestartPoint(uint32_t index) { + key_.Clear(); + restart_index_ = index; + // current_ will be fixed by ParseNextKey(); + + // ParseNextKey() starts at the end of value_, so set value_ accordingly + uint32_t offset = GetRestartPoint(index); + value_ = Slice(data_ + offset, 0); + } + + void CorruptionError(); + + bool ParseNextKey(); + + bool BinarySeek(const Slice& target, uint32_t left, uint32_t right, + uint32_t* index); + + int CompareBlockKey(uint32_t block_index, const Slice& target); + + bool BinaryBlockIndexSeek(const Slice& target, uint32_t* block_ids, + uint32_t left, uint32_t right, + uint32_t* index); + + bool PrefixSeek(const Slice& target, uint32_t* index); + +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/block_based_filter_block.cc b/external/rocksdb/table/block_based_filter_block.cc new file mode 100755 index 0000000000..427c9fe9c0 --- /dev/null +++ b/external/rocksdb/table/block_based_filter_block.cc @@ -0,0 +1,250 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2012 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "table/block_based_filter_block.h" +#include + +#include "db/dbformat.h" +#include "rocksdb/filter_policy.h" +#include "util/coding.h" +#include "util/perf_context_imp.h" +#include "util/string_util.h" + +namespace rocksdb { + +namespace { + +void AppendItem(std::string* props, const std::string& key, + const std::string& value) { + char cspace = ' '; + std::string value_str(""); + size_t i = 0; + const size_t dataLength = 64; + const size_t tabLength = 2; + const size_t offLength = 16; + + value_str.append(&value[i], std::min(size_t(dataLength), value.size())); + i += dataLength; + while (i < value.size()) { + value_str.append("\n"); + value_str.append(offLength, cspace); + value_str.append(&value[i], std::min(size_t(dataLength), value.size() - i)); + i += dataLength; + } + + std::string result(""); + if (key.size() < (offLength - tabLength)) + result.append(size_t((offLength - tabLength)) - key.size(), cspace); + result.append(key); + + props->append(result + ": " + value_str + "\n"); +} + +template +void AppendItem(std::string* props, const TKey& key, const std::string& value) { + std::string key_str = rocksdb::ToString(key); + AppendItem(props, key_str, value); +} +} // namespace + + +// See doc/table_format.txt for an explanation of the filter block format. + +// Generate new filter every 2KB of data +static const size_t kFilterBaseLg = 11; +static const size_t kFilterBase = 1 << kFilterBaseLg; + +BlockBasedFilterBlockBuilder::BlockBasedFilterBlockBuilder( + const SliceTransform* prefix_extractor, + const BlockBasedTableOptions& table_opt) + : policy_(table_opt.filter_policy.get()), + prefix_extractor_(prefix_extractor), + whole_key_filtering_(table_opt.whole_key_filtering), + prev_prefix_start_(0), + prev_prefix_size_(0) { + assert(policy_); +} + +void BlockBasedFilterBlockBuilder::StartBlock(uint64_t block_offset) { + uint64_t filter_index = (block_offset / kFilterBase); + assert(filter_index >= filter_offsets_.size()); + while (filter_index > filter_offsets_.size()) { + GenerateFilter(); + } +} + +void BlockBasedFilterBlockBuilder::Add(const Slice& key) { + if (prefix_extractor_ && prefix_extractor_->InDomain(key)) { + AddPrefix(key); + } + + if (whole_key_filtering_) { + AddKey(key); + } +} + +// Add key to filter if needed +inline void BlockBasedFilterBlockBuilder::AddKey(const Slice& key) { + start_.push_back(entries_.size()); + entries_.append(key.data(), key.size()); +} + +// Add prefix to filter if needed +inline void BlockBasedFilterBlockBuilder::AddPrefix(const Slice& key) { + // get slice for most recently added entry + Slice prev; + if (prev_prefix_size_ > 0) { + prev = Slice(entries_.data() + prev_prefix_start_, prev_prefix_size_); + } + + Slice prefix = prefix_extractor_->Transform(key); + // insert prefix only when it's different from the previous prefix. + if (prev.size() == 0 || prefix != prev) { + start_.push_back(entries_.size()); + prev_prefix_start_ = entries_.size(); + prev_prefix_size_ = prefix.size(); + entries_.append(prefix.data(), prefix.size()); + } +} + +Slice BlockBasedFilterBlockBuilder::Finish() { + if (!start_.empty()) { + GenerateFilter(); + } + + // Append array of per-filter offsets + const uint32_t array_offset = static_cast(result_.size()); + for (size_t i = 0; i < filter_offsets_.size(); i++) { + PutFixed32(&result_, filter_offsets_[i]); + } + + PutFixed32(&result_, array_offset); + result_.push_back(kFilterBaseLg); // Save encoding parameter in result + return Slice(result_); +} + +void BlockBasedFilterBlockBuilder::GenerateFilter() { + const size_t num_entries = start_.size(); + if (num_entries == 0) { + // Fast path if there are no keys for this filter + filter_offsets_.push_back(static_cast(result_.size())); + return; + } + + // Make list of keys from flattened key structure + start_.push_back(entries_.size()); // Simplify length computation + tmp_entries_.resize(num_entries); + for (size_t i = 0; i < num_entries; i++) { + const char* base = entries_.data() + start_[i]; + size_t length = start_[i + 1] - start_[i]; + tmp_entries_[i] = Slice(base, length); + } + + // Generate filter for current set of keys and append to result_. + filter_offsets_.push_back(static_cast(result_.size())); + policy_->CreateFilter(&tmp_entries_[0], static_cast(num_entries), + &result_); + + tmp_entries_.clear(); + entries_.clear(); + start_.clear(); + prev_prefix_start_ = 0; + prev_prefix_size_ = 0; +} + +BlockBasedFilterBlockReader::BlockBasedFilterBlockReader( + const SliceTransform* prefix_extractor, + const BlockBasedTableOptions& table_opt, bool _whole_key_filtering, + BlockContents&& contents, Statistics* stats) + : FilterBlockReader(contents.data.size(), stats, _whole_key_filtering), + policy_(table_opt.filter_policy.get()), + prefix_extractor_(prefix_extractor), + data_(nullptr), + offset_(nullptr), + num_(0), + base_lg_(0), + contents_(std::move(contents)) { + assert(policy_); + size_t n = contents_.data.size(); + if (n < 5) return; // 1 byte for base_lg_ and 4 for start of offset array + base_lg_ = contents_.data[n - 1]; + uint32_t last_word = DecodeFixed32(contents_.data.data() + n - 5); + if (last_word > n - 5) return; + data_ = contents_.data.data(); + offset_ = data_ + last_word; + num_ = (n - 5 - last_word) / 4; +} + +bool BlockBasedFilterBlockReader::KeyMayMatch(const Slice& key, + uint64_t block_offset) { + assert(block_offset != kNotValid); + if (!whole_key_filtering_) { + return true; + } + return MayMatch(key, block_offset); +} + +bool BlockBasedFilterBlockReader::PrefixMayMatch(const Slice& prefix, + uint64_t block_offset) { + assert(block_offset != kNotValid); + if (!prefix_extractor_) { + return true; + } + return MayMatch(prefix, block_offset); +} + +bool BlockBasedFilterBlockReader::MayMatch(const Slice& entry, + uint64_t block_offset) { + uint64_t index = block_offset >> base_lg_; + if (index < num_) { + uint32_t start = DecodeFixed32(offset_ + index * 4); + uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4); + if (start <= limit && limit <= (uint32_t)(offset_ - data_)) { + Slice filter = Slice(data_ + start, limit - start); + bool const may_match = policy_->KeyMayMatch(entry, filter); + if (may_match) { + PERF_COUNTER_ADD(bloom_sst_hit_count, 1); + return true; + } else { + PERF_COUNTER_ADD(bloom_sst_miss_count, 1); + return false; + } + } else if (start == limit) { + // Empty filters do not match any entries + return false; + } + } + return true; // Errors are treated as potential matches +} + +size_t BlockBasedFilterBlockReader::ApproximateMemoryUsage() const { + return num_ * 4 + 5 + (offset_ - data_); +} + +std::string BlockBasedFilterBlockReader::ToString() const { + std::string result, filter_meta; + result.reserve(1024); + + std::string s_bo("Block offset"), s_hd("Hex dump"), s_fb("# filter blocks"); + AppendItem(&result, s_fb, rocksdb::ToString(num_)); + AppendItem(&result, s_bo, s_hd); + + for (size_t index = 0; index < num_; index++) { + uint32_t start = DecodeFixed32(offset_ + index * 4); + uint32_t limit = DecodeFixed32(offset_ + index * 4 + 4); + + if (start != limit) { + result.append(" filter block # " + rocksdb::ToString(index + 1) + "\n"); + Slice filter = Slice(data_ + start, limit - start); + AppendItem(&result, start, filter.ToString(true)); + } + } + return result; +} +} // namespace rocksdb diff --git a/external/rocksdb/table/block_based_filter_block.h b/external/rocksdb/table/block_based_filter_block.h new file mode 100755 index 0000000000..ca3f10e782 --- /dev/null +++ b/external/rocksdb/table/block_based_filter_block.h @@ -0,0 +1,107 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2012 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// A filter block is stored near the end of a Table file. It contains +// filters (e.g., bloom filters) for all data blocks in the table combined +// into a single filter block. + +#pragma once + +#include +#include +#include +#include +#include +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "table/filter_block.h" +#include "util/hash.h" + +namespace rocksdb { + + +// A BlockBasedFilterBlockBuilder is used to construct all of the filters for a +// particular Table. It generates a single string which is stored as +// a special block in the Table. +// +// The sequence of calls to BlockBasedFilterBlockBuilder must match the regexp: +// (StartBlock Add*)* Finish +class BlockBasedFilterBlockBuilder : public FilterBlockBuilder { + public: + BlockBasedFilterBlockBuilder(const SliceTransform* prefix_extractor, + const BlockBasedTableOptions& table_opt); + + virtual bool IsBlockBased() override { return true; } + virtual void StartBlock(uint64_t block_offset) override; + virtual void Add(const Slice& key) override; + virtual Slice Finish() override; + + private: + void AddKey(const Slice& key); + void AddPrefix(const Slice& key); + void GenerateFilter(); + + // important: all of these might point to invalid addresses + // at the time of destruction of this filter block. destructor + // should NOT dereference them. + const FilterPolicy* policy_; + const SliceTransform* prefix_extractor_; + bool whole_key_filtering_; + + size_t prev_prefix_start_; // the position of the last appended prefix + // to "entries_". + size_t prev_prefix_size_; // the length of the last appended prefix to + // "entries_". + std::string entries_; // Flattened entry contents + std::vector start_; // Starting index in entries_ of each entry + std::string result_; // Filter data computed so far + std::vector tmp_entries_; // policy_->CreateFilter() argument + std::vector filter_offsets_; + + // No copying allowed + BlockBasedFilterBlockBuilder(const BlockBasedFilterBlockBuilder&); + void operator=(const BlockBasedFilterBlockBuilder&); +}; + +// A FilterBlockReader is used to parse filter from SST table. +// KeyMayMatch and PrefixMayMatch would trigger filter checking +class BlockBasedFilterBlockReader : public FilterBlockReader { + public: + // REQUIRES: "contents" and *policy must stay live while *this is live. + BlockBasedFilterBlockReader(const SliceTransform* prefix_extractor, + const BlockBasedTableOptions& table_opt, + bool whole_key_filtering, + BlockContents&& contents, Statistics* statistics); + virtual bool IsBlockBased() override { return true; } + virtual bool KeyMayMatch(const Slice& key, + uint64_t block_offset = kNotValid) override; + virtual bool PrefixMayMatch(const Slice& prefix, + uint64_t block_offset = kNotValid) override; + virtual size_t ApproximateMemoryUsage() const override; + + // convert this object to a human readable form + std::string ToString() const override; + + private: + const FilterPolicy* policy_; + const SliceTransform* prefix_extractor_; + const char* data_; // Pointer to filter data (at block-start) + const char* offset_; // Pointer to beginning of offset array (at block-end) + size_t num_; // Number of entries in offset array + size_t base_lg_; // Encoding parameter (see kFilterBaseLg in .cc file) + BlockContents contents_; + + bool MayMatch(const Slice& entry, uint64_t block_offset); + + // No copying allowed + BlockBasedFilterBlockReader(const BlockBasedFilterBlockReader&); + void operator=(const BlockBasedFilterBlockReader&); +}; +} // namespace rocksdb diff --git a/external/rocksdb/table/block_based_filter_block_test.cc b/external/rocksdb/table/block_based_filter_block_test.cc new file mode 100755 index 0000000000..c28b0008d5 --- /dev/null +++ b/external/rocksdb/table/block_based_filter_block_test.cc @@ -0,0 +1,248 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2012 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "table/block_based_filter_block.h" + +#include "rocksdb/filter_policy.h" +#include "util/coding.h" +#include "util/hash.h" +#include "util/logging.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +// For testing: emit an array with one hash value per key +class TestHashFilter : public FilterPolicy { + public: + virtual const char* Name() const override { return "TestHashFilter"; } + + virtual void CreateFilter(const Slice* keys, int n, + std::string* dst) const override { + for (int i = 0; i < n; i++) { + uint32_t h = Hash(keys[i].data(), keys[i].size(), 1); + PutFixed32(dst, h); + } + } + + virtual bool KeyMayMatch(const Slice& key, + const Slice& filter) const override { + uint32_t h = Hash(key.data(), key.size(), 1); + for (unsigned int i = 0; i + 4 <= filter.size(); i += 4) { + if (h == DecodeFixed32(filter.data() + i)) { + return true; + } + } + return false; + } +}; + +class FilterBlockTest : public testing::Test { + public: + TestHashFilter policy_; + BlockBasedTableOptions table_options_; + + FilterBlockTest() { + table_options_.filter_policy.reset(new TestHashFilter()); + } +}; + +TEST_F(FilterBlockTest, EmptyBuilder) { + BlockBasedFilterBlockBuilder builder(nullptr, table_options_); + BlockContents block(builder.Finish(), false, kNoCompression); + ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block.data)); + BlockBasedFilterBlockReader reader(nullptr, table_options_, true, + std::move(block), nullptr); + ASSERT_TRUE(reader.KeyMayMatch("foo", 0)); + ASSERT_TRUE(reader.KeyMayMatch("foo", 100000)); +} + +TEST_F(FilterBlockTest, SingleChunk) { + BlockBasedFilterBlockBuilder builder(nullptr, table_options_); + builder.StartBlock(100); + builder.Add("foo"); + builder.Add("bar"); + builder.Add("box"); + builder.StartBlock(200); + builder.Add("box"); + builder.StartBlock(300); + builder.Add("hello"); + BlockContents block(builder.Finish(), false, kNoCompression); + BlockBasedFilterBlockReader reader(nullptr, table_options_, true, + std::move(block), nullptr); + ASSERT_TRUE(reader.KeyMayMatch("foo", 100)); + ASSERT_TRUE(reader.KeyMayMatch("bar", 100)); + ASSERT_TRUE(reader.KeyMayMatch("box", 100)); + ASSERT_TRUE(reader.KeyMayMatch("hello", 100)); + ASSERT_TRUE(reader.KeyMayMatch("foo", 100)); + ASSERT_TRUE(!reader.KeyMayMatch("missing", 100)); + ASSERT_TRUE(!reader.KeyMayMatch("other", 100)); +} + +TEST_F(FilterBlockTest, MultiChunk) { + BlockBasedFilterBlockBuilder builder(nullptr, table_options_); + + // First filter + builder.StartBlock(0); + builder.Add("foo"); + builder.StartBlock(2000); + builder.Add("bar"); + + // Second filter + builder.StartBlock(3100); + builder.Add("box"); + + // Third filter is empty + + // Last filter + builder.StartBlock(9000); + builder.Add("box"); + builder.Add("hello"); + + BlockContents block(builder.Finish(), false, kNoCompression); + BlockBasedFilterBlockReader reader(nullptr, table_options_, true, + std::move(block), nullptr); + + // Check first filter + ASSERT_TRUE(reader.KeyMayMatch("foo", 0)); + ASSERT_TRUE(reader.KeyMayMatch("bar", 2000)); + ASSERT_TRUE(!reader.KeyMayMatch("box", 0)); + ASSERT_TRUE(!reader.KeyMayMatch("hello", 0)); + + // Check second filter + ASSERT_TRUE(reader.KeyMayMatch("box", 3100)); + ASSERT_TRUE(!reader.KeyMayMatch("foo", 3100)); + ASSERT_TRUE(!reader.KeyMayMatch("bar", 3100)); + ASSERT_TRUE(!reader.KeyMayMatch("hello", 3100)); + + // Check third filter (empty) + ASSERT_TRUE(!reader.KeyMayMatch("foo", 4100)); + ASSERT_TRUE(!reader.KeyMayMatch("bar", 4100)); + ASSERT_TRUE(!reader.KeyMayMatch("box", 4100)); + ASSERT_TRUE(!reader.KeyMayMatch("hello", 4100)); + + // Check last filter + ASSERT_TRUE(reader.KeyMayMatch("box", 9000)); + ASSERT_TRUE(reader.KeyMayMatch("hello", 9000)); + ASSERT_TRUE(!reader.KeyMayMatch("foo", 9000)); + ASSERT_TRUE(!reader.KeyMayMatch("bar", 9000)); +} + +// Test for block based filter block +// use new interface in FilterPolicy to create filter builder/reader +class BlockBasedFilterBlockTest : public testing::Test { + public: + BlockBasedTableOptions table_options_; + + BlockBasedFilterBlockTest() { + table_options_.filter_policy.reset(NewBloomFilterPolicy(10)); + } + + ~BlockBasedFilterBlockTest() {} +}; + +TEST_F(BlockBasedFilterBlockTest, BlockBasedEmptyBuilder) { + FilterBlockBuilder* builder = new BlockBasedFilterBlockBuilder( + nullptr, table_options_); + BlockContents block(builder->Finish(), false, kNoCompression); + ASSERT_EQ("\\x00\\x00\\x00\\x00\\x0b", EscapeString(block.data)); + FilterBlockReader* reader = new BlockBasedFilterBlockReader( + nullptr, table_options_, true, std::move(block), nullptr); + ASSERT_TRUE(reader->KeyMayMatch("foo", 0)); + ASSERT_TRUE(reader->KeyMayMatch("foo", 100000)); + + delete builder; + delete reader; +} + +TEST_F(BlockBasedFilterBlockTest, BlockBasedSingleChunk) { + FilterBlockBuilder* builder = new BlockBasedFilterBlockBuilder( + nullptr, table_options_); + builder->StartBlock(100); + builder->Add("foo"); + builder->Add("bar"); + builder->Add("box"); + builder->StartBlock(200); + builder->Add("box"); + builder->StartBlock(300); + builder->Add("hello"); + BlockContents block(builder->Finish(), false, kNoCompression); + FilterBlockReader* reader = new BlockBasedFilterBlockReader( + nullptr, table_options_, true, std::move(block), nullptr); + ASSERT_TRUE(reader->KeyMayMatch("foo", 100)); + ASSERT_TRUE(reader->KeyMayMatch("bar", 100)); + ASSERT_TRUE(reader->KeyMayMatch("box", 100)); + ASSERT_TRUE(reader->KeyMayMatch("hello", 100)); + ASSERT_TRUE(reader->KeyMayMatch("foo", 100)); + ASSERT_TRUE(!reader->KeyMayMatch("missing", 100)); + ASSERT_TRUE(!reader->KeyMayMatch("other", 100)); + + delete builder; + delete reader; +} + +TEST_F(BlockBasedFilterBlockTest, BlockBasedMultiChunk) { + FilterBlockBuilder* builder = new BlockBasedFilterBlockBuilder( + nullptr, table_options_); + + // First filter + builder->StartBlock(0); + builder->Add("foo"); + builder->StartBlock(2000); + builder->Add("bar"); + + // Second filter + builder->StartBlock(3100); + builder->Add("box"); + + // Third filter is empty + + // Last filter + builder->StartBlock(9000); + builder->Add("box"); + builder->Add("hello"); + + BlockContents block(builder->Finish(), false, kNoCompression); + FilterBlockReader* reader = new BlockBasedFilterBlockReader( + nullptr, table_options_, true, std::move(block), nullptr); + + // Check first filter + ASSERT_TRUE(reader->KeyMayMatch("foo", 0)); + ASSERT_TRUE(reader->KeyMayMatch("bar", 2000)); + ASSERT_TRUE(!reader->KeyMayMatch("box", 0)); + ASSERT_TRUE(!reader->KeyMayMatch("hello", 0)); + + // Check second filter + ASSERT_TRUE(reader->KeyMayMatch("box", 3100)); + ASSERT_TRUE(!reader->KeyMayMatch("foo", 3100)); + ASSERT_TRUE(!reader->KeyMayMatch("bar", 3100)); + ASSERT_TRUE(!reader->KeyMayMatch("hello", 3100)); + + // Check third filter (empty) + ASSERT_TRUE(!reader->KeyMayMatch("foo", 4100)); + ASSERT_TRUE(!reader->KeyMayMatch("bar", 4100)); + ASSERT_TRUE(!reader->KeyMayMatch("box", 4100)); + ASSERT_TRUE(!reader->KeyMayMatch("hello", 4100)); + + // Check last filter + ASSERT_TRUE(reader->KeyMayMatch("box", 9000)); + ASSERT_TRUE(reader->KeyMayMatch("hello", 9000)); + ASSERT_TRUE(!reader->KeyMayMatch("foo", 9000)); + ASSERT_TRUE(!reader->KeyMayMatch("bar", 9000)); + + delete builder; + delete reader; +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/table/block_based_table_builder.cc b/external/rocksdb/table/block_based_table_builder.cc new file mode 100755 index 0000000000..21720bdb74 --- /dev/null +++ b/external/rocksdb/table/block_based_table_builder.cc @@ -0,0 +1,1000 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "table/block_based_table_builder.h" + +#include +#include +#include + +#include +#include +#include +#include +#include + +#include "db/dbformat.h" + +#include "rocksdb/cache.h" +#include "rocksdb/comparator.h" +#include "rocksdb/env.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/flush_block_policy.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/table.h" + +#include "table/block.h" +#include "table/block_based_table_reader.h" +#include "table/block_builder.h" +#include "table/filter_block.h" +#include "table/block_based_filter_block.h" +#include "table/block_based_table_factory.h" +#include "table/full_filter_block.h" +#include "table/format.h" +#include "table/meta_blocks.h" +#include "table/table_builder.h" + +#include "util/string_util.h" +#include "util/coding.h" +#include "util/compression.h" +#include "util/crc32c.h" +#include "util/stop_watch.h" +#include "util/xxhash.h" + +namespace rocksdb { + +extern const std::string kHashIndexPrefixesBlock; +extern const std::string kHashIndexPrefixesMetadataBlock; + +typedef BlockBasedTableOptions::IndexType IndexType; + +// The interface for building index. +// Instruction for adding a new concrete IndexBuilder: +// 1. Create a subclass instantiated from IndexBuilder. +// 2. Add a new entry associated with that subclass in TableOptions::IndexType. +// 3. Add a create function for the new subclass in CreateIndexBuilder. +// Note: we can devise more advanced design to simplify the process for adding +// new subclass, which will, on the other hand, increase the code complexity and +// catch unwanted attention from readers. Given that we won't add/change +// indexes frequently, it makes sense to just embrace a more straightforward +// design that just works. +class IndexBuilder { + public: + // Index builder will construct a set of blocks which contain: + // 1. One primary index block. + // 2. (Optional) a set of metablocks that contains the metadata of the + // primary index. + struct IndexBlocks { + Slice index_block_contents; + std::unordered_map meta_blocks; + }; + explicit IndexBuilder(const Comparator* comparator) + : comparator_(comparator) {} + + virtual ~IndexBuilder() {} + + // Add a new index entry to index block. + // To allow further optimization, we provide `last_key_in_current_block` and + // `first_key_in_next_block`, based on which the specific implementation can + // determine the best index key to be used for the index block. + // @last_key_in_current_block: this parameter maybe overridden with the value + // "substitute key". + // @first_key_in_next_block: it will be nullptr if the entry being added is + // the last one in the table + // + // REQUIRES: Finish() has not yet been called. + virtual void AddIndexEntry(std::string* last_key_in_current_block, + const Slice* first_key_in_next_block, + const BlockHandle& block_handle) = 0; + + // This method will be called whenever a key is added. The subclasses may + // override OnKeyAdded() if they need to collect additional information. + virtual void OnKeyAdded(const Slice& key) {} + + // Inform the index builder that all entries has been written. Block builder + // may therefore perform any operation required for block finalization. + // + // REQUIRES: Finish() has not yet been called. + virtual Status Finish(IndexBlocks* index_blocks) = 0; + + // Get the estimated size for index block. + virtual size_t EstimatedSize() const = 0; + + protected: + const Comparator* comparator_; +}; + +// This index builder builds space-efficient index block. +// +// Optimizations: +// 1. Made block's `block_restart_interval` to be 1, which will avoid linear +// search when doing index lookup (can be disabled by setting +// index_block_restart_interval). +// 2. Shorten the key length for index block. Other than honestly using the +// last key in the data block as the index key, we instead find a shortest +// substitute key that serves the same function. +class ShortenedIndexBuilder : public IndexBuilder { + public: + explicit ShortenedIndexBuilder(const Comparator* comparator, + int index_block_restart_interval) + : IndexBuilder(comparator), + index_block_builder_(index_block_restart_interval) {} + + virtual void AddIndexEntry(std::string* last_key_in_current_block, + const Slice* first_key_in_next_block, + const BlockHandle& block_handle) override { + if (first_key_in_next_block != nullptr) { + comparator_->FindShortestSeparator(last_key_in_current_block, + *first_key_in_next_block); + } else { + comparator_->FindShortSuccessor(last_key_in_current_block); + } + + std::string handle_encoding; + block_handle.EncodeTo(&handle_encoding); + index_block_builder_.Add(*last_key_in_current_block, handle_encoding); + } + + virtual Status Finish(IndexBlocks* index_blocks) override { + index_blocks->index_block_contents = index_block_builder_.Finish(); + return Status::OK(); + } + + virtual size_t EstimatedSize() const override { + return index_block_builder_.CurrentSizeEstimate(); + } + + private: + BlockBuilder index_block_builder_; +}; + +// HashIndexBuilder contains a binary-searchable primary index and the +// metadata for secondary hash index construction. +// The metadata for hash index consists two parts: +// - a metablock that compactly contains a sequence of prefixes. All prefixes +// are stored consectively without any metadata (like, prefix sizes) being +// stored, which is kept in the other metablock. +// - a metablock contains the metadata of the prefixes, including prefix size, +// restart index and number of block it spans. The format looks like: +// +// +-----------------+---------------------------+---------------------+ <=prefix 1 +// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes | +// +-----------------+---------------------------+---------------------+ <=prefix 2 +// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes | +// +-----------------+---------------------------+---------------------+ +// | | +// | .... | +// | | +// +-----------------+---------------------------+---------------------+ <=prefix n +// | length: 4 bytes | restart interval: 4 bytes | num-blocks: 4 bytes | +// +-----------------+---------------------------+---------------------+ +// +// The reason of separating these two metablocks is to enable the efficiently +// reuse the first metablock during hash index construction without unnecessary +// data copy or small heap allocations for prefixes. +class HashIndexBuilder : public IndexBuilder { + public: + explicit HashIndexBuilder(const Comparator* comparator, + const SliceTransform* hash_key_extractor, + int index_block_restart_interval) + : IndexBuilder(comparator), + primary_index_builder_(comparator, index_block_restart_interval), + hash_key_extractor_(hash_key_extractor) {} + + virtual void AddIndexEntry(std::string* last_key_in_current_block, + const Slice* first_key_in_next_block, + const BlockHandle& block_handle) override { + ++current_restart_index_; + primary_index_builder_.AddIndexEntry(last_key_in_current_block, + first_key_in_next_block, block_handle); + } + + virtual void OnKeyAdded(const Slice& key) override { + auto key_prefix = hash_key_extractor_->Transform(key); + bool is_first_entry = pending_block_num_ == 0; + + // Keys may share the prefix + if (is_first_entry || pending_entry_prefix_ != key_prefix) { + if (!is_first_entry) { + FlushPendingPrefix(); + } + + // need a hard copy otherwise the underlying data changes all the time. + // TODO(kailiu) ToString() is expensive. We may speed up can avoid data + // copy. + pending_entry_prefix_ = key_prefix.ToString(); + pending_block_num_ = 1; + pending_entry_index_ = static_cast(current_restart_index_); + } else { + // entry number increments when keys share the prefix reside in + // different data blocks. + auto last_restart_index = pending_entry_index_ + pending_block_num_ - 1; + assert(last_restart_index <= current_restart_index_); + if (last_restart_index != current_restart_index_) { + ++pending_block_num_; + } + } + } + + virtual Status Finish(IndexBlocks* index_blocks) override { + FlushPendingPrefix(); + primary_index_builder_.Finish(index_blocks); + index_blocks->meta_blocks.insert( + {kHashIndexPrefixesBlock.c_str(), prefix_block_}); + index_blocks->meta_blocks.insert( + {kHashIndexPrefixesMetadataBlock.c_str(), prefix_meta_block_}); + return Status::OK(); + } + + virtual size_t EstimatedSize() const override { + return primary_index_builder_.EstimatedSize() + prefix_block_.size() + + prefix_meta_block_.size(); + } + + private: + void FlushPendingPrefix() { + prefix_block_.append(pending_entry_prefix_.data(), + pending_entry_prefix_.size()); + PutVarint32Varint32Varint32( + &prefix_meta_block_, + static_cast(pending_entry_prefix_.size()), + pending_entry_index_, pending_block_num_); + } + + ShortenedIndexBuilder primary_index_builder_; + const SliceTransform* hash_key_extractor_; + + // stores a sequence of prefixes + std::string prefix_block_; + // stores the metadata of prefixes + std::string prefix_meta_block_; + + // The following 3 variables keeps unflushed prefix and its metadata. + // The details of block_num and entry_index can be found in + // "block_hash_index.{h,cc}" + uint32_t pending_block_num_ = 0; + uint32_t pending_entry_index_ = 0; + std::string pending_entry_prefix_; + + uint64_t current_restart_index_ = 0; +}; + +// Without anonymous namespace here, we fail the warning -Wmissing-prototypes +namespace { + +// Create a index builder based on its type. +IndexBuilder* CreateIndexBuilder(IndexType type, const Comparator* comparator, + const SliceTransform* prefix_extractor, + int index_block_restart_interval) { + switch (type) { + case BlockBasedTableOptions::kBinarySearch: { + return new ShortenedIndexBuilder(comparator, + index_block_restart_interval); + } + case BlockBasedTableOptions::kHashSearch: { + return new HashIndexBuilder(comparator, prefix_extractor, + index_block_restart_interval); + } + default: { + assert(!"Do not recognize the index type "); + return nullptr; + } + } + // impossible. + assert(false); + return nullptr; +} + +// Create a index builder based on its type. +FilterBlockBuilder* CreateFilterBlockBuilder(const ImmutableCFOptions& opt, + const BlockBasedTableOptions& table_opt) { + if (table_opt.filter_policy == nullptr) return nullptr; + + FilterBitsBuilder* filter_bits_builder = + table_opt.filter_policy->GetFilterBitsBuilder(); + if (filter_bits_builder == nullptr) { + return new BlockBasedFilterBlockBuilder(opt.prefix_extractor, table_opt); + } else { + return new FullFilterBlockBuilder(opt.prefix_extractor, + table_opt.whole_key_filtering, + filter_bits_builder); + } +} + +bool GoodCompressionRatio(size_t compressed_size, size_t raw_size) { + // Check to see if compressed less than 12.5% + return compressed_size < raw_size - (raw_size / 8u); +} + +// format_version is the block format as defined in include/rocksdb/table.h +Slice CompressBlock(const Slice& raw, + const CompressionOptions& compression_options, + CompressionType* type, uint32_t format_version, + const Slice& compression_dict, + std::string* compressed_output) { + if (*type == kNoCompression) { + return raw; + } + + // Will return compressed block contents if (1) the compression method is + // supported in this platform and (2) the compression rate is "good enough". + switch (*type) { + case kSnappyCompression: + if (Snappy_Compress(compression_options, raw.data(), raw.size(), + compressed_output) && + GoodCompressionRatio(compressed_output->size(), raw.size())) { + return *compressed_output; + } + break; // fall back to no compression. + case kZlibCompression: + if (Zlib_Compress( + compression_options, + GetCompressFormatForVersion(kZlibCompression, format_version), + raw.data(), raw.size(), compressed_output, compression_dict) && + GoodCompressionRatio(compressed_output->size(), raw.size())) { + return *compressed_output; + } + break; // fall back to no compression. + case kBZip2Compression: + if (BZip2_Compress( + compression_options, + GetCompressFormatForVersion(kBZip2Compression, format_version), + raw.data(), raw.size(), compressed_output) && + GoodCompressionRatio(compressed_output->size(), raw.size())) { + return *compressed_output; + } + break; // fall back to no compression. + case kLZ4Compression: + if (LZ4_Compress( + compression_options, + GetCompressFormatForVersion(kLZ4Compression, format_version), + raw.data(), raw.size(), compressed_output, compression_dict) && + GoodCompressionRatio(compressed_output->size(), raw.size())) { + return *compressed_output; + } + break; // fall back to no compression. + case kLZ4HCCompression: + if (LZ4HC_Compress( + compression_options, + GetCompressFormatForVersion(kLZ4HCCompression, format_version), + raw.data(), raw.size(), compressed_output, compression_dict) && + GoodCompressionRatio(compressed_output->size(), raw.size())) { + return *compressed_output; + } + break; // fall back to no compression. + case kXpressCompression: + if (XPRESS_Compress(raw.data(), raw.size(), + compressed_output) && + GoodCompressionRatio(compressed_output->size(), raw.size())) { + return *compressed_output; + } + break; + case kZSTDNotFinalCompression: + if (ZSTD_Compress(compression_options, raw.data(), raw.size(), + compressed_output, compression_dict) && + GoodCompressionRatio(compressed_output->size(), raw.size())) { + return *compressed_output; + } + break; // fall back to no compression. + default: {} // Do not recognize this compression type + } + + // Compression method is not supported, or not good compression ratio, so just + // fall back to uncompressed form. + *type = kNoCompression; + return raw; +} + +} // namespace + +// kBlockBasedTableMagicNumber was picked by running +// echo rocksdb.table.block_based | sha1sum +// and taking the leading 64 bits. +// Please note that kBlockBasedTableMagicNumber may also be accessed by other +// .cc files +// for that reason we declare it extern in the header but to get the space +// allocated +// it must be not extern in one place. +const uint64_t kBlockBasedTableMagicNumber = 0x88e241b785f4cff7ull; +// We also support reading and writing legacy block based table format (for +// backwards compatibility) +const uint64_t kLegacyBlockBasedTableMagicNumber = 0xdb4775248b80fb57ull; + +// A collector that collects properties of interest to block-based table. +// For now this class looks heavy-weight since we only write one additional +// property. +// But in the foreseeable future, we will add more and more properties that are +// specific to block-based table. +class BlockBasedTableBuilder::BlockBasedTablePropertiesCollector + : public IntTblPropCollector { + public: + explicit BlockBasedTablePropertiesCollector( + BlockBasedTableOptions::IndexType index_type, bool whole_key_filtering, + bool prefix_filtering) + : index_type_(index_type), + whole_key_filtering_(whole_key_filtering), + prefix_filtering_(prefix_filtering) {} + + virtual Status InternalAdd(const Slice& key, const Slice& value, + uint64_t file_size) override { + // Intentionally left blank. Have no interest in collecting stats for + // individual key/value pairs. + return Status::OK(); + } + + virtual Status Finish(UserCollectedProperties* properties) override { + std::string val; + PutFixed32(&val, static_cast(index_type_)); + properties->insert({BlockBasedTablePropertyNames::kIndexType, val}); + properties->insert({BlockBasedTablePropertyNames::kWholeKeyFiltering, + whole_key_filtering_ ? kPropTrue : kPropFalse}); + properties->insert({BlockBasedTablePropertyNames::kPrefixFiltering, + prefix_filtering_ ? kPropTrue : kPropFalse}); + return Status::OK(); + } + + // The name of the properties collector can be used for debugging purpose. + virtual const char* Name() const override { + return "BlockBasedTablePropertiesCollector"; + } + + virtual UserCollectedProperties GetReadableProperties() const override { + // Intentionally left blank. + return UserCollectedProperties(); + } + + private: + BlockBasedTableOptions::IndexType index_type_; + bool whole_key_filtering_; + bool prefix_filtering_; +}; + +struct BlockBasedTableBuilder::Rep { + const ImmutableCFOptions ioptions; + const BlockBasedTableOptions table_options; + const InternalKeyComparator& internal_comparator; + WritableFileWriter* file; + uint64_t offset = 0; + Status status; + BlockBuilder data_block; + + InternalKeySliceTransform internal_prefix_transform; + std::unique_ptr index_builder; + + std::string last_key; + const CompressionType compression_type; + const CompressionOptions compression_opts; + // Data for presetting the compression library's dictionary, or nullptr. + const std::string* compression_dict; + TableProperties props; + + bool closed = false; // Either Finish() or Abandon() has been called. + std::unique_ptr filter_block; + char compressed_cache_key_prefix[BlockBasedTable::kMaxCacheKeyPrefixSize]; + size_t compressed_cache_key_prefix_size; + + BlockHandle pending_handle; // Handle to add to index block + + std::string compressed_output; + std::unique_ptr flush_block_policy; + uint32_t column_family_id; + const std::string& column_family_name; + + std::vector> table_properties_collectors; + + Rep(const ImmutableCFOptions& _ioptions, + const BlockBasedTableOptions& table_opt, + const InternalKeyComparator& icomparator, + const std::vector>* + int_tbl_prop_collector_factories, + uint32_t _column_family_id, WritableFileWriter* f, + const CompressionType _compression_type, + const CompressionOptions& _compression_opts, + const std::string* _compression_dict, const bool skip_filters, + const std::string& _column_family_name) + : ioptions(_ioptions), + table_options(table_opt), + internal_comparator(icomparator), + file(f), + data_block(table_options.block_restart_interval, + table_options.use_delta_encoding), + internal_prefix_transform(_ioptions.prefix_extractor), + index_builder( + CreateIndexBuilder(table_options.index_type, &internal_comparator, + &this->internal_prefix_transform, + table_options.index_block_restart_interval)), + compression_type(_compression_type), + compression_opts(_compression_opts), + compression_dict(_compression_dict), + filter_block(skip_filters ? nullptr : CreateFilterBlockBuilder( + _ioptions, table_options)), + flush_block_policy( + table_options.flush_block_policy_factory->NewFlushBlockPolicy( + table_options, data_block)), + column_family_id(_column_family_id), + column_family_name(_column_family_name) { + for (auto& collector_factories : *int_tbl_prop_collector_factories) { + table_properties_collectors.emplace_back( + collector_factories->CreateIntTblPropCollector(column_family_id)); + } + table_properties_collectors.emplace_back( + new BlockBasedTablePropertiesCollector( + table_options.index_type, table_options.whole_key_filtering, + _ioptions.prefix_extractor != nullptr)); + } +}; + +BlockBasedTableBuilder::BlockBasedTableBuilder( + const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + const std::vector>* + int_tbl_prop_collector_factories, + uint32_t column_family_id, WritableFileWriter* file, + const CompressionType compression_type, + const CompressionOptions& compression_opts, + const std::string* compression_dict, const bool skip_filters, + const std::string& column_family_name) { + BlockBasedTableOptions sanitized_table_options(table_options); + if (sanitized_table_options.format_version == 0 && + sanitized_table_options.checksum != kCRC32c) { + Log(InfoLogLevel::WARN_LEVEL, ioptions.info_log, + "Silently converting format_version to 1 because checksum is " + "non-default"); + // silently convert format_version to 1 to keep consistent with current + // behavior + sanitized_table_options.format_version = 1; + } + + rep_ = new Rep(ioptions, sanitized_table_options, internal_comparator, + int_tbl_prop_collector_factories, column_family_id, file, + compression_type, compression_opts, compression_dict, + skip_filters, column_family_name); + + if (rep_->filter_block != nullptr) { + rep_->filter_block->StartBlock(0); + } + if (table_options.block_cache_compressed.get() != nullptr) { + BlockBasedTable::GenerateCachePrefix( + table_options.block_cache_compressed.get(), file->writable_file(), + &rep_->compressed_cache_key_prefix[0], + &rep_->compressed_cache_key_prefix_size); + } +} + +BlockBasedTableBuilder::~BlockBasedTableBuilder() { + assert(rep_->closed); // Catch errors where caller forgot to call Finish() + delete rep_; +} + +void BlockBasedTableBuilder::Add(const Slice& key, const Slice& value) { + Rep* r = rep_; + assert(!r->closed); + if (!ok()) return; + if (r->props.num_entries > 0) { + assert(r->internal_comparator.Compare(key, Slice(r->last_key)) > 0); + } + + auto should_flush = r->flush_block_policy->Update(key, value); + if (should_flush) { + assert(!r->data_block.empty()); + Flush(); + + // Add item to index block. + // We do not emit the index entry for a block until we have seen the + // first key for the next data block. This allows us to use shorter + // keys in the index block. For example, consider a block boundary + // between the keys "the quick brown fox" and "the who". We can use + // "the r" as the key for the index block entry since it is >= all + // entries in the first block and < all entries in subsequent + // blocks. + if (ok()) { + r->index_builder->AddIndexEntry(&r->last_key, &key, r->pending_handle); + } + } + + if (r->filter_block != nullptr) { + r->filter_block->Add(ExtractUserKey(key)); + } + + r->last_key.assign(key.data(), key.size()); + r->data_block.Add(key, value); + r->props.num_entries++; + r->props.raw_key_size += key.size(); + r->props.raw_value_size += value.size(); + + r->index_builder->OnKeyAdded(key); + NotifyCollectTableCollectorsOnAdd(key, value, r->offset, + r->table_properties_collectors, + r->ioptions.info_log); +} + +void BlockBasedTableBuilder::Flush() { + Rep* r = rep_; + assert(!r->closed); + if (!ok()) return; + if (r->data_block.empty()) return; + WriteBlock(&r->data_block, &r->pending_handle, true /* is_data_block */); + if (ok() && !r->table_options.skip_table_builder_flush) { + r->status = r->file->Flush(); + } + if (r->filter_block != nullptr) { + r->filter_block->StartBlock(r->offset); + } + r->props.data_size = r->offset; + ++r->props.num_data_blocks; +} + +void BlockBasedTableBuilder::WriteBlock(BlockBuilder* block, + BlockHandle* handle, + bool is_data_block) { + WriteBlock(block->Finish(), handle, is_data_block); + block->Reset(); +} + +void BlockBasedTableBuilder::WriteBlock(const Slice& raw_block_contents, + BlockHandle* handle, + bool is_data_block) { + // File format contains a sequence of blocks where each block has: + // block_data: uint8[n] + // type: uint8 + // crc: uint32 + assert(ok()); + Rep* r = rep_; + + auto type = r->compression_type; + Slice block_contents; + bool abort_compression = false; + + StopWatchNano timer(r->ioptions.env, + ShouldReportDetailedTime(r->ioptions.env, r->ioptions.statistics)); + + if (raw_block_contents.size() < kCompressionSizeLimit) { + Slice compression_dict; + if (is_data_block && r->compression_dict && r->compression_dict->size()) { + compression_dict = *r->compression_dict; + } + + block_contents = CompressBlock(raw_block_contents, r->compression_opts, + &type, r->table_options.format_version, + compression_dict, &r->compressed_output); + + // Some of the compression algorithms are known to be unreliable. If + // the verify_compression flag is set then try to de-compress the + // compressed data and compare to the input. + if (type != kNoCompression && r->table_options.verify_compression) { + // Retrieve the uncompressed contents into a new buffer + BlockContents contents; + Status stat = UncompressBlockContentsForCompressionType( + block_contents.data(), block_contents.size(), &contents, + r->table_options.format_version, compression_dict, type, + r->ioptions); + + if (stat.ok()) { + bool compressed_ok = contents.data.compare(raw_block_contents) == 0; + if (!compressed_ok) { + // The result of the compression was invalid. abort. + abort_compression = true; + Log(InfoLogLevel::ERROR_LEVEL, r->ioptions.info_log, + "Decompressed block did not match raw block"); + r->status = + Status::Corruption("Decompressed block did not match raw block"); + } + } else { + // Decompression reported an error. abort. + r->status = Status::Corruption("Could not decompress"); + abort_compression = true; + } + } + } else { + // Block is too big to be compressed. + abort_compression = true; + } + + // Abort compression if the block is too big, or did not pass + // verification. + if (abort_compression) { + RecordTick(r->ioptions.statistics, NUMBER_BLOCK_NOT_COMPRESSED); + type = kNoCompression; + block_contents = raw_block_contents; + } + else if (type != kNoCompression && + ShouldReportDetailedTime(r->ioptions.env, + r->ioptions.statistics)) { + MeasureTime(r->ioptions.statistics, COMPRESSION_TIMES_NANOS, + timer.ElapsedNanos()); + MeasureTime(r->ioptions.statistics, BYTES_COMPRESSED, + raw_block_contents.size()); + RecordTick(r->ioptions.statistics, NUMBER_BLOCK_COMPRESSED); + } + + WriteRawBlock(block_contents, type, handle); + r->compressed_output.clear(); +} + +void BlockBasedTableBuilder::WriteRawBlock(const Slice& block_contents, + CompressionType type, + BlockHandle* handle) { + Rep* r = rep_; + StopWatch sw(r->ioptions.env, r->ioptions.statistics, WRITE_RAW_BLOCK_MICROS); + handle->set_offset(r->offset); + handle->set_size(block_contents.size()); + r->status = r->file->Append(block_contents); + if (r->status.ok()) { + char trailer[kBlockTrailerSize]; + trailer[0] = type; + char* trailer_without_type = trailer + 1; + switch (r->table_options.checksum) { + case kNoChecksum: + // we don't support no checksum yet + assert(false); + // intentional fallthrough in release binary + case kCRC32c: { + auto crc = crc32c::Value(block_contents.data(), block_contents.size()); + crc = crc32c::Extend(crc, trailer, 1); // Extend to cover block type + EncodeFixed32(trailer_without_type, crc32c::Mask(crc)); + break; + } + case kxxHash: { + void* xxh = XXH32_init(0); + XXH32_update(xxh, block_contents.data(), + static_cast(block_contents.size())); + XXH32_update(xxh, trailer, 1); // Extend to cover block type + EncodeFixed32(trailer_without_type, XXH32_digest(xxh)); + break; + } + } + + r->status = r->file->Append(Slice(trailer, kBlockTrailerSize)); + if (r->status.ok()) { + r->status = InsertBlockInCache(block_contents, type, handle); + } + if (r->status.ok()) { + r->offset += block_contents.size() + kBlockTrailerSize; + } + } +} + +Status BlockBasedTableBuilder::status() const { + return rep_->status; +} + +static void DeleteCachedBlock(const Slice& key, void* value) { + Block* block = reinterpret_cast(value); + delete block; +} + +// +// Make a copy of the block contents and insert into compressed block cache +// +Status BlockBasedTableBuilder::InsertBlockInCache(const Slice& block_contents, + const CompressionType type, + const BlockHandle* handle) { + Rep* r = rep_; + Cache* block_cache_compressed = r->table_options.block_cache_compressed.get(); + + if (type != kNoCompression && block_cache_compressed != nullptr) { + + size_t size = block_contents.size(); + + std::unique_ptr ubuf(new char[size + 1]); + memcpy(ubuf.get(), block_contents.data(), size); + ubuf[size] = type; + + BlockContents results(std::move(ubuf), size, true, type); + + Block* block = new Block(std::move(results)); + + // make cache key by appending the file offset to the cache prefix id + char* end = EncodeVarint64( + r->compressed_cache_key_prefix + + r->compressed_cache_key_prefix_size, + handle->offset()); + Slice key(r->compressed_cache_key_prefix, static_cast + (end - r->compressed_cache_key_prefix)); + + // Insert into compressed block cache. + block_cache_compressed->Insert(key, block, block->usable_size(), + &DeleteCachedBlock); + + // Invalidate OS cache. + r->file->InvalidateCache(static_cast(r->offset), size); + } + return Status::OK(); +} + +Status BlockBasedTableBuilder::Finish() { + Rep* r = rep_; + bool empty_data_block = r->data_block.empty(); + Flush(); + assert(!r->closed); + r->closed = true; + + BlockHandle filter_block_handle, metaindex_block_handle, index_block_handle, + compression_dict_block_handle; + // Write filter block + if (ok() && r->filter_block != nullptr) { + auto filter_contents = r->filter_block->Finish(); + r->props.filter_size = filter_contents.size(); + WriteRawBlock(filter_contents, kNoCompression, &filter_block_handle); + } + + // To make sure properties block is able to keep the accurate size of index + // block, we will finish writing all index entries here and flush them + // to storage after metaindex block is written. + if (ok() && !empty_data_block) { + r->index_builder->AddIndexEntry( + &r->last_key, nullptr /* no next data block */, r->pending_handle); + } + + IndexBuilder::IndexBlocks index_blocks; + auto s = r->index_builder->Finish(&index_blocks); + if (!s.ok()) { + return s; + } + + // Write meta blocks and metaindex block with the following order. + // 1. [meta block: filter] + // 2. [other meta blocks] + // 3. [meta block: properties] + // 4. [metaindex block] + // write meta blocks + MetaIndexBuilder meta_index_builder; + for (const auto& item : index_blocks.meta_blocks) { + BlockHandle block_handle; + WriteBlock(item.second, &block_handle, false /* is_data_block */); + meta_index_builder.Add(item.first, block_handle); + } + + if (ok()) { + if (r->filter_block != nullptr) { + // Add mapping from ".Name" to location + // of filter data. + std::string key; + if (r->filter_block->IsBlockBased()) { + key = BlockBasedTable::kFilterBlockPrefix; + } else { + key = BlockBasedTable::kFullFilterBlockPrefix; + } + key.append(r->table_options.filter_policy->Name()); + meta_index_builder.Add(key, filter_block_handle); + } + + // Write properties and compression dictionary blocks. + { + PropertyBlockBuilder property_block_builder; + r->props.column_family_id = r->column_family_id; + r->props.column_family_name = r->column_family_name; + r->props.filter_policy_name = r->table_options.filter_policy != nullptr ? + r->table_options.filter_policy->Name() : ""; + r->props.index_size = + r->index_builder->EstimatedSize() + kBlockTrailerSize; + r->props.comparator_name = r->ioptions.comparator != nullptr + ? r->ioptions.comparator->Name() + : "nullptr"; + r->props.merge_operator_name = r->ioptions.merge_operator != nullptr + ? r->ioptions.merge_operator->Name() + : "nullptr"; + r->props.compression_name = CompressionTypeToString(r->compression_type); + + std::string property_collectors_names = "["; + property_collectors_names = "["; + for (size_t i = 0; + i < r->ioptions.table_properties_collector_factories.size(); ++i) { + if (i != 0) { + property_collectors_names += ","; + } + property_collectors_names += + r->ioptions.table_properties_collector_factories[i]->Name(); + } + property_collectors_names += "]"; + r->props.property_collectors_names = property_collectors_names; + + // Add basic properties + property_block_builder.AddTableProperty(r->props); + + // Add use collected properties + NotifyCollectTableCollectorsOnFinish(r->table_properties_collectors, + r->ioptions.info_log, + &property_block_builder); + + BlockHandle properties_block_handle; + WriteRawBlock( + property_block_builder.Finish(), + kNoCompression, + &properties_block_handle + ); + meta_index_builder.Add(kPropertiesBlock, properties_block_handle); + + // Write compression dictionary block + if (r->compression_dict && r->compression_dict->size()) { + WriteRawBlock(*r->compression_dict, kNoCompression, + &compression_dict_block_handle); + meta_index_builder.Add(kCompressionDictBlock, + compression_dict_block_handle); + } + } // end of properties/compression dictionary block writing + } // meta blocks + + // Write index block + if (ok()) { + // flush the meta index block + WriteRawBlock(meta_index_builder.Finish(), kNoCompression, + &metaindex_block_handle); + WriteBlock(index_blocks.index_block_contents, &index_block_handle, + false /* is_data_block */); + } + + // Write footer + if (ok()) { + // No need to write out new footer if we're using default checksum. + // We're writing legacy magic number because we want old versions of RocksDB + // be able to read files generated with new release (just in case if + // somebody wants to roll back after an upgrade) + // TODO(icanadi) at some point in the future, when we're absolutely sure + // nobody will roll back to RocksDB 2.x versions, retire the legacy magic + // number and always write new table files with new magic number + bool legacy = (r->table_options.format_version == 0); + // this is guaranteed by BlockBasedTableBuilder's constructor + assert(r->table_options.checksum == kCRC32c || + r->table_options.format_version != 0); + Footer footer(legacy ? kLegacyBlockBasedTableMagicNumber + : kBlockBasedTableMagicNumber, + r->table_options.format_version); + footer.set_metaindex_handle(metaindex_block_handle); + footer.set_index_handle(index_block_handle); + footer.set_checksum(r->table_options.checksum); + std::string footer_encoding; + footer.EncodeTo(&footer_encoding); + r->status = r->file->Append(footer_encoding); + if (r->status.ok()) { + r->offset += footer_encoding.size(); + } + } + + return r->status; +} + +void BlockBasedTableBuilder::Abandon() { + Rep* r = rep_; + assert(!r->closed); + r->closed = true; +} + +uint64_t BlockBasedTableBuilder::NumEntries() const { + return rep_->props.num_entries; +} + +uint64_t BlockBasedTableBuilder::FileSize() const { + return rep_->offset; +} + +bool BlockBasedTableBuilder::NeedCompact() const { + for (const auto& collector : rep_->table_properties_collectors) { + if (collector->NeedCompact()) { + return true; + } + } + return false; +} + +TableProperties BlockBasedTableBuilder::GetTableProperties() const { + TableProperties ret = rep_->props; + for (const auto& collector : rep_->table_properties_collectors) { + for (const auto& prop : collector->GetReadableProperties()) { + ret.readable_properties.insert(prop); + } + collector->Finish(&ret.user_collected_properties); + } + return ret; +} + +const std::string BlockBasedTable::kFilterBlockPrefix = "filter."; +const std::string BlockBasedTable::kFullFilterBlockPrefix = "fullfilter."; +} // namespace rocksdb diff --git a/external/rocksdb/table/block_based_table_builder.h b/external/rocksdb/table/block_based_table_builder.h new file mode 100755 index 0000000000..8172c238ec --- /dev/null +++ b/external/rocksdb/table/block_based_table_builder.h @@ -0,0 +1,120 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include +#include +#include +#include + +#include "rocksdb/flush_block_policy.h" +#include "rocksdb/options.h" +#include "rocksdb/status.h" +#include "table/table_builder.h" + +namespace rocksdb { + +class BlockBuilder; +class BlockHandle; +class WritableFile; +struct BlockBasedTableOptions; + +extern const uint64_t kBlockBasedTableMagicNumber; +extern const uint64_t kLegacyBlockBasedTableMagicNumber; + +class BlockBasedTableBuilder : public TableBuilder { + public: + // Create a builder that will store the contents of the table it is + // building in *file. Does not close the file. It is up to the + // caller to close the file after calling Finish(). + // @param compression_dict Data for presetting the compression library's + // dictionary, or nullptr. + BlockBasedTableBuilder( + const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + const std::vector>* + int_tbl_prop_collector_factories, + uint32_t column_family_id, WritableFileWriter* file, + const CompressionType compression_type, + const CompressionOptions& compression_opts, + const std::string* compression_dict, const bool skip_filters, + const std::string& column_family_name); + + // REQUIRES: Either Finish() or Abandon() has been called. + ~BlockBasedTableBuilder(); + + // Add key,value to the table being constructed. + // REQUIRES: key is after any previously added key according to comparator. + // REQUIRES: Finish(), Abandon() have not been called + void Add(const Slice& key, const Slice& value) override; + + // Return non-ok iff some error has been detected. + Status status() const override; + + // Finish building the table. Stops using the file passed to the + // constructor after this function returns. + // REQUIRES: Finish(), Abandon() have not been called + Status Finish() override; + + // Indicate that the contents of this builder should be abandoned. Stops + // using the file passed to the constructor after this function returns. + // If the caller is not going to call Finish(), it must call Abandon() + // before destroying this builder. + // REQUIRES: Finish(), Abandon() have not been called + void Abandon() override; + + // Number of calls to Add() so far. + uint64_t NumEntries() const override; + + // Size of the file generated so far. If invoked after a successful + // Finish() call, returns the size of the final generated file. + uint64_t FileSize() const override; + + bool NeedCompact() const override; + + // Get table properties + TableProperties GetTableProperties() const override; + + private: + bool ok() const { return status().ok(); } + + // Call block's Finish() method and then write the finalize block contents to + // file. + void WriteBlock(BlockBuilder* block, BlockHandle* handle, bool is_data_block); + + // Directly write block content to the file. + void WriteBlock(const Slice& block_contents, BlockHandle* handle, + bool is_data_block); + void WriteRawBlock(const Slice& data, CompressionType, BlockHandle* handle); + Status InsertBlockInCache(const Slice& block_contents, + const CompressionType type, + const BlockHandle* handle); + struct Rep; + class BlockBasedTablePropertiesCollectorFactory; + class BlockBasedTablePropertiesCollector; + Rep* rep_; + + // Advanced operation: flush any buffered key/value pairs to file. + // Can be used to ensure that two adjacent entries never live in + // the same data block. Most clients should not need to use this method. + // REQUIRES: Finish(), Abandon() have not been called + void Flush(); + + // Some compression libraries fail when the raw size is bigger than int. If + // uncompressed size is bigger than kCompressionSizeLimit, don't compress it + const uint64_t kCompressionSizeLimit = std::numeric_limits::max(); + + // No copying allowed + BlockBasedTableBuilder(const BlockBasedTableBuilder&) = delete; + void operator=(const BlockBasedTableBuilder&) = delete; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/block_based_table_factory.cc b/external/rocksdb/table/block_based_table_factory.cc new file mode 100755 index 0000000000..e9499cd676 --- /dev/null +++ b/external/rocksdb/table/block_based_table_factory.cc @@ -0,0 +1,200 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + + +#include "table/block_based_table_factory.h" + +#include +#include +#include + +#include "port/port.h" +#include "rocksdb/flush_block_policy.h" +#include "rocksdb/cache.h" +#include "table/block_based_table_builder.h" +#include "table/block_based_table_reader.h" +#include "table/format.h" + +namespace rocksdb { + +BlockBasedTableFactory::BlockBasedTableFactory( + const BlockBasedTableOptions& _table_options) + : table_options_(_table_options) { + if (table_options_.flush_block_policy_factory == nullptr) { + table_options_.flush_block_policy_factory.reset( + new FlushBlockBySizePolicyFactory()); + } + if (table_options_.no_block_cache) { + table_options_.block_cache.reset(); + } else if (table_options_.block_cache == nullptr) { + table_options_.block_cache = NewLRUCache(8 << 20); + } + if (table_options_.block_size_deviation < 0 || + table_options_.block_size_deviation > 100) { + table_options_.block_size_deviation = 0; + } + if (table_options_.block_restart_interval < 1) { + table_options_.block_restart_interval = 1; + } + if (table_options_.index_block_restart_interval < 1) { + table_options_.index_block_restart_interval = 1; + } +} + +Status BlockBasedTableFactory::NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table_reader, + bool prefetch_index_and_filter_in_cache) const { + return BlockBasedTable::Open( + table_reader_options.ioptions, table_reader_options.env_options, + table_options_, table_reader_options.internal_comparator, std::move(file), + file_size, table_reader, prefetch_index_and_filter_in_cache, + table_reader_options.skip_filters, table_reader_options.level); +} + +TableBuilder* BlockBasedTableFactory::NewTableBuilder( + const TableBuilderOptions& table_builder_options, uint32_t column_family_id, + WritableFileWriter* file) const { + auto table_builder = new BlockBasedTableBuilder( + table_builder_options.ioptions, table_options_, + table_builder_options.internal_comparator, + table_builder_options.int_tbl_prop_collector_factories, column_family_id, + file, table_builder_options.compression_type, + table_builder_options.compression_opts, + table_builder_options.compression_dict, + table_builder_options.skip_filters, + table_builder_options.column_family_name); + + return table_builder; +} + +Status BlockBasedTableFactory::SanitizeOptions( + const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const { + if (table_options_.index_type == BlockBasedTableOptions::kHashSearch && + cf_opts.prefix_extractor == nullptr) { + return Status::InvalidArgument("Hash index is specified for block-based " + "table, but prefix_extractor is not given"); + } + if (table_options_.cache_index_and_filter_blocks && + table_options_.no_block_cache) { + return Status::InvalidArgument("Enable cache_index_and_filter_blocks, " + ", but block cache is disabled"); + } + if (table_options_.pin_l0_filter_and_index_blocks_in_cache && + table_options_.no_block_cache) { + return Status::InvalidArgument( + "Enable pin_l0_filter_and_index_blocks_in_cache, " + ", but block cache is disabled"); + } + if (!BlockBasedTableSupportedVersion(table_options_.format_version)) { + return Status::InvalidArgument( + "Unsupported BlockBasedTable format_version. Please check " + "include/rocksdb/table.h for more info"); + } + return Status::OK(); +} + +std::string BlockBasedTableFactory::GetPrintableTableOptions() const { + std::string ret; + ret.reserve(20000); + const int kBufferSize = 200; + char buffer[kBufferSize]; + + snprintf(buffer, kBufferSize, " flush_block_policy_factory: %s (%p)\n", + table_options_.flush_block_policy_factory->Name(), + static_cast(table_options_.flush_block_policy_factory.get())); + ret.append(buffer); + snprintf(buffer, kBufferSize, " cache_index_and_filter_blocks: %d\n", + table_options_.cache_index_and_filter_blocks); + ret.append(buffer); + snprintf(buffer, kBufferSize, + " pin_l0_filter_and_index_blocks_in_cache: %d\n", + table_options_.pin_l0_filter_and_index_blocks_in_cache); + ret.append(buffer); + snprintf(buffer, kBufferSize, " index_type: %d\n", + table_options_.index_type); + ret.append(buffer); + snprintf(buffer, kBufferSize, " hash_index_allow_collision: %d\n", + table_options_.hash_index_allow_collision); + ret.append(buffer); + snprintf(buffer, kBufferSize, " checksum: %d\n", + table_options_.checksum); + ret.append(buffer); + snprintf(buffer, kBufferSize, " no_block_cache: %d\n", + table_options_.no_block_cache); + ret.append(buffer); + snprintf(buffer, kBufferSize, " block_cache: %p\n", + static_cast(table_options_.block_cache.get())); + ret.append(buffer); + if (table_options_.block_cache) { + snprintf(buffer, kBufferSize, " block_cache_size: %" ROCKSDB_PRIszt "\n", + table_options_.block_cache->GetCapacity()); + ret.append(buffer); + } + snprintf(buffer, kBufferSize, " block_cache_compressed: %p\n", + static_cast(table_options_.block_cache_compressed.get())); + ret.append(buffer); + if (table_options_.block_cache_compressed) { + snprintf(buffer, kBufferSize, + " block_cache_compressed_size: %" ROCKSDB_PRIszt "\n", + table_options_.block_cache_compressed->GetCapacity()); + ret.append(buffer); + } + snprintf(buffer, kBufferSize, " block_size: %" ROCKSDB_PRIszt "\n", + table_options_.block_size); + ret.append(buffer); + snprintf(buffer, kBufferSize, " block_size_deviation: %d\n", + table_options_.block_size_deviation); + ret.append(buffer); + snprintf(buffer, kBufferSize, " block_restart_interval: %d\n", + table_options_.block_restart_interval); + ret.append(buffer); + snprintf(buffer, kBufferSize, " index_block_restart_interval: %d\n", + table_options_.index_block_restart_interval); + ret.append(buffer); + snprintf(buffer, kBufferSize, " filter_policy: %s\n", + table_options_.filter_policy == nullptr ? + "nullptr" : table_options_.filter_policy->Name()); + ret.append(buffer); + snprintf(buffer, kBufferSize, " whole_key_filtering: %d\n", + table_options_.whole_key_filtering); + ret.append(buffer); + snprintf(buffer, kBufferSize, " skip_table_builder_flush: %d\n", + table_options_.skip_table_builder_flush); + ret.append(buffer); + snprintf(buffer, kBufferSize, " format_version: %d\n", + table_options_.format_version); + ret.append(buffer); + return ret; +} + +const BlockBasedTableOptions& BlockBasedTableFactory::table_options() const { + return table_options_; +} + +TableFactory* NewBlockBasedTableFactory( + const BlockBasedTableOptions& _table_options) { + return new BlockBasedTableFactory(_table_options); +} + +const std::string BlockBasedTablePropertyNames::kIndexType = + "rocksdb.block.based.table.index.type"; +const std::string BlockBasedTablePropertyNames::kWholeKeyFiltering = + "rocksdb.block.based.table.whole.key.filtering"; +const std::string BlockBasedTablePropertyNames::kPrefixFiltering = + "rocksdb.block.based.table.prefix.filtering"; +const std::string kHashIndexPrefixesBlock = "rocksdb.hashindex.prefixes"; +const std::string kHashIndexPrefixesMetadataBlock = + "rocksdb.hashindex.metadata"; +const std::string kPropTrue = "1"; +const std::string kPropFalse = "0"; + +} // namespace rocksdb diff --git a/external/rocksdb/table/block_based_table_factory.h b/external/rocksdb/table/block_based_table_factory.h new file mode 100755 index 0000000000..6ecb232e49 --- /dev/null +++ b/external/rocksdb/table/block_based_table_factory.h @@ -0,0 +1,65 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include + +#include +#include + +#include "rocksdb/flush_block_policy.h" +#include "rocksdb/table.h" +#include "db/dbformat.h" + +namespace rocksdb { + +struct EnvOptions; + +using std::unique_ptr; +class BlockBasedTableBuilder; + +class BlockBasedTableFactory : public TableFactory { + public: + explicit BlockBasedTableFactory( + const BlockBasedTableOptions& table_options = BlockBasedTableOptions()); + + ~BlockBasedTableFactory() {} + + const char* Name() const override { return "BlockBasedTable"; } + + Status NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table_reader, + bool prefetch_index_and_filter_in_cache = true) const override; + + TableBuilder* NewTableBuilder( + const TableBuilderOptions& table_builder_options, + uint32_t column_family_id, WritableFileWriter* file) const override; + + // Sanitizes the specified DB Options. + Status SanitizeOptions(const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const override; + + std::string GetPrintableTableOptions() const override; + + const BlockBasedTableOptions& table_options() const; + + void* GetOptions() override { return &table_options_; } + + private: + BlockBasedTableOptions table_options_; +}; + +extern const std::string kHashIndexPrefixesBlock; +extern const std::string kHashIndexPrefixesMetadataBlock; +extern const std::string kPropTrue; +extern const std::string kPropFalse; + +} // namespace rocksdb diff --git a/external/rocksdb/table/block_based_table_reader.cc b/external/rocksdb/table/block_based_table_reader.cc new file mode 100755 index 0000000000..7b98a48889 --- /dev/null +++ b/external/rocksdb/table/block_based_table_reader.cc @@ -0,0 +1,1941 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#include "table/block_based_table_reader.h" + +#include +#include + +#include "db/dbformat.h" +#include "db/pinned_iterators_manager.h" + +#include "rocksdb/cache.h" +#include "rocksdb/comparator.h" +#include "rocksdb/env.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/iterator.h" +#include "rocksdb/options.h" +#include "rocksdb/statistics.h" +#include "rocksdb/table.h" +#include "rocksdb/table_properties.h" + +#include "table/block.h" +#include "table/block_based_filter_block.h" +#include "table/block_based_table_factory.h" +#include "table/block_prefix_index.h" +#include "table/filter_block.h" +#include "table/format.h" +#include "table/full_filter_block.h" +#include "table/get_context.h" +#include "table/internal_iterator.h" +#include "table/meta_blocks.h" +#include "table/persistent_cache_helper.h" +#include "table/two_level_iterator.h" + +#include "util/coding.h" +#include "util/file_reader_writer.h" +#include "util/perf_context_imp.h" +#include "util/stop_watch.h" +#include "util/string_util.h" +#include "util/sync_point.h" + +namespace rocksdb { + +extern const uint64_t kBlockBasedTableMagicNumber; +extern const std::string kHashIndexPrefixesBlock; +extern const std::string kHashIndexPrefixesMetadataBlock; +using std::unique_ptr; + +typedef BlockBasedTable::IndexReader IndexReader; + +namespace { +// Read the block identified by "handle" from "file". +// The only relevant option is options.verify_checksums for now. +// On failure return non-OK. +// On success fill *result and return OK - caller owns *result +// @param compression_dict Data for presetting the compression library's +// dictionary. +Status ReadBlockFromFile(RandomAccessFileReader* file, const Footer& footer, + const ReadOptions& options, const BlockHandle& handle, + std::unique_ptr* result, + const ImmutableCFOptions &ioptions, + bool do_uncompress, const Slice& compression_dict, + const PersistentCacheOptions& cache_options) { + BlockContents contents; + Status s = ReadBlockContents(file, footer, options, handle, &contents, ioptions, + do_uncompress, compression_dict, cache_options); + if (s.ok()) { + result->reset(new Block(std::move(contents))); + } + + return s; +} + +// Delete the resource that is held by the iterator. +template +void DeleteHeldResource(void* arg, void* ignored) { + delete reinterpret_cast(arg); +} + +// Delete the entry resided in the cache. +template +void DeleteCachedEntry(const Slice& key, void* value) { + auto entry = reinterpret_cast(value); + delete entry; +} + +void DeleteCachedFilterEntry(const Slice& key, void* value); +void DeleteCachedIndexEntry(const Slice& key, void* value); + +// Release the cached entry and decrement its ref count. +void ReleaseCachedEntry(void* arg, void* h) { + Cache* cache = reinterpret_cast(arg); + Cache::Handle* handle = reinterpret_cast(h); + cache->Release(handle); +} + +Slice GetCacheKeyFromOffset(const char* cache_key_prefix, + size_t cache_key_prefix_size, uint64_t offset, + char* cache_key) { + assert(cache_key != nullptr); + assert(cache_key_prefix_size != 0); + assert(cache_key_prefix_size <= BlockBasedTable::kMaxCacheKeyPrefixSize); + memcpy(cache_key, cache_key_prefix, cache_key_prefix_size); + char* end = EncodeVarint64(cache_key + cache_key_prefix_size, offset); + return Slice(cache_key, static_cast(end - cache_key)); +} + +Cache::Handle* GetEntryFromCache(Cache* block_cache, const Slice& key, + Tickers block_cache_miss_ticker, + Tickers block_cache_hit_ticker, + Statistics* statistics) { + auto cache_handle = block_cache->Lookup(key); + if (cache_handle != nullptr) { + PERF_COUNTER_ADD(block_cache_hit_count, 1); + // overall cache hit + RecordTick(statistics, BLOCK_CACHE_HIT); + // total bytes read from cache + RecordTick(statistics, BLOCK_CACHE_BYTES_READ, + block_cache->GetUsage(cache_handle)); + // block-type specific cache hit + RecordTick(statistics, block_cache_hit_ticker); + } else { + // overall cache miss + RecordTick(statistics, BLOCK_CACHE_MISS); + // block-type specific cache miss + RecordTick(statistics, block_cache_miss_ticker); + } + + return cache_handle; +} + +} // namespace + +// -- IndexReader and its subclasses +// IndexReader is the interface that provide the functionality for index access. +class BlockBasedTable::IndexReader { + public: + explicit IndexReader(const Comparator* comparator, Statistics* stats) + : comparator_(comparator), statistics_(stats) {} + + virtual ~IndexReader() {} + + // Create an iterator for index access. + // An iter is passed in, if it is not null, update this one and return it + // If it is null, create a new Iterator + virtual InternalIterator* NewIterator(BlockIter* iter = nullptr, + bool total_order_seek = true) = 0; + + // The size of the index. + virtual size_t size() const = 0; + // Memory usage of the index block + virtual size_t usable_size() const = 0; + // return the statistics pointer + virtual Statistics* statistics() const { return statistics_; } + // Report an approximation of how much memory has been used other than memory + // that was allocated in block cache. + virtual size_t ApproximateMemoryUsage() const = 0; + + protected: + const Comparator* comparator_; + + private: + Statistics* statistics_; +}; + +// Index that allows binary search lookup for the first key of each block. +// This class can be viewed as a thin wrapper for `Block` class which already +// supports binary search. +class BinarySearchIndexReader : public IndexReader { + public: + // Read index from the file and create an intance for + // `BinarySearchIndexReader`. + // On success, index_reader will be populated; otherwise it will remain + // unmodified. + static Status Create(RandomAccessFileReader* file, const Footer& footer, + const BlockHandle& index_handle, + const ImmutableCFOptions &ioptions, + const Comparator* comparator, IndexReader** index_reader, + const PersistentCacheOptions& cache_options) { + std::unique_ptr index_block; + auto s = ReadBlockFromFile(file, footer, ReadOptions(), index_handle, + &index_block, ioptions, true /* decompress */, + Slice() /*compression dict*/, cache_options); + + if (s.ok()) { + *index_reader = new BinarySearchIndexReader( + comparator, std::move(index_block), ioptions.statistics); + } + + return s; + } + + virtual InternalIterator* NewIterator(BlockIter* iter = nullptr, + bool dont_care = true) override { + return index_block_->NewIterator(comparator_, iter, true); + } + + virtual size_t size() const override { return index_block_->size(); } + virtual size_t usable_size() const override { + return index_block_->usable_size(); + } + + virtual size_t ApproximateMemoryUsage() const override { + assert(index_block_); + return index_block_->ApproximateMemoryUsage(); + } + + private: + BinarySearchIndexReader(const Comparator* comparator, + std::unique_ptr&& index_block, + Statistics* stats) + : IndexReader(comparator, stats), index_block_(std::move(index_block)) { + assert(index_block_ != nullptr); + } + std::unique_ptr index_block_; +}; + +// Index that leverages an internal hash table to quicken the lookup for a given +// key. +class HashIndexReader : public IndexReader { + public: + static Status Create( + const SliceTransform* hash_key_extractor, const Footer& footer, + RandomAccessFileReader* file, const ImmutableCFOptions &ioptions, + const Comparator* comparator, const BlockHandle& index_handle, + InternalIterator* meta_index_iter, IndexReader** index_reader, + bool hash_index_allow_collision, + const PersistentCacheOptions& cache_options) { + std::unique_ptr index_block; + auto s = ReadBlockFromFile(file, footer, ReadOptions(), index_handle, + &index_block, ioptions, true /* decompress */, + Slice() /*compression dict*/, cache_options); + + if (!s.ok()) { + return s; + } + + // Note, failure to create prefix hash index does not need to be a + // hard error. We can still fall back to the original binary search index. + // So, Create will succeed regardless, from this point on. + + auto new_index_reader = + new HashIndexReader(comparator, std::move(index_block), + ioptions.statistics); + *index_reader = new_index_reader; + + // Get prefixes block + BlockHandle prefixes_handle; + s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesBlock, + &prefixes_handle); + if (!s.ok()) { + // TODO: log error + return Status::OK(); + } + + // Get index metadata block + BlockHandle prefixes_meta_handle; + s = FindMetaBlock(meta_index_iter, kHashIndexPrefixesMetadataBlock, + &prefixes_meta_handle); + if (!s.ok()) { + // TODO: log error + return Status::OK(); + } + + // Read contents for the blocks + BlockContents prefixes_contents; + s = ReadBlockContents(file, footer, ReadOptions(), prefixes_handle, + &prefixes_contents, ioptions, true /* decompress */, + Slice() /*compression dict*/, cache_options); + if (!s.ok()) { + return s; + } + BlockContents prefixes_meta_contents; + s = ReadBlockContents(file, footer, ReadOptions(), prefixes_meta_handle, + &prefixes_meta_contents, ioptions, true /* decompress */, + Slice() /*compression dict*/, cache_options); + if (!s.ok()) { + // TODO: log error + return Status::OK(); + } + + BlockPrefixIndex* prefix_index = nullptr; + s = BlockPrefixIndex::Create(hash_key_extractor, prefixes_contents.data, + prefixes_meta_contents.data, &prefix_index); + // TODO: log error + if (s.ok()) { + new_index_reader->index_block_->SetBlockPrefixIndex(prefix_index); + } + + return Status::OK(); + } + + virtual InternalIterator* NewIterator(BlockIter* iter = nullptr, + bool total_order_seek = true) override { + return index_block_->NewIterator(comparator_, iter, total_order_seek); + } + + virtual size_t size() const override { return index_block_->size(); } + virtual size_t usable_size() const override { + return index_block_->usable_size(); + } + + virtual size_t ApproximateMemoryUsage() const override { + assert(index_block_); + return index_block_->ApproximateMemoryUsage() + + prefixes_contents_.data.size(); + } + + private: + HashIndexReader(const Comparator* comparator, + std::unique_ptr&& index_block, Statistics* stats) + : IndexReader(comparator, stats), index_block_(std::move(index_block)) { + assert(index_block_ != nullptr); + } + + ~HashIndexReader() { + } + + std::unique_ptr index_block_; + BlockContents prefixes_contents_; +}; + +// CachableEntry represents the entries that *may* be fetched from block cache. +// field `value` is the item we want to get. +// field `cache_handle` is the cache handle to the block cache. If the value +// was not read from cache, `cache_handle` will be nullptr. +template +struct BlockBasedTable::CachableEntry { + CachableEntry(TValue* _value, Cache::Handle* _cache_handle) + : value(_value), cache_handle(_cache_handle) {} + CachableEntry() : CachableEntry(nullptr, nullptr) {} + void Release(Cache* cache) { + if (cache_handle) { + cache->Release(cache_handle); + value = nullptr; + cache_handle = nullptr; + } + } + bool IsSet() const { return cache_handle != nullptr; } + + TValue* value = nullptr; + // if the entry is from the cache, cache_handle will be populated. + Cache::Handle* cache_handle = nullptr; +}; + +struct BlockBasedTable::Rep { + Rep(const ImmutableCFOptions& _ioptions, const EnvOptions& _env_options, + const BlockBasedTableOptions& _table_opt, + const InternalKeyComparator& _internal_comparator, bool skip_filters) + : ioptions(_ioptions), + env_options(_env_options), + table_options(_table_opt), + filter_policy(skip_filters ? nullptr : _table_opt.filter_policy.get()), + internal_comparator(_internal_comparator), + filter_type(FilterType::kNoFilter), + whole_key_filtering(_table_opt.whole_key_filtering), + prefix_filtering(true) {} + + const ImmutableCFOptions& ioptions; + const EnvOptions& env_options; + const BlockBasedTableOptions& table_options; + const FilterPolicy* const filter_policy; + const InternalKeyComparator& internal_comparator; + Status status; + unique_ptr file; + char cache_key_prefix[kMaxCacheKeyPrefixSize]; + size_t cache_key_prefix_size = 0; + char persistent_cache_key_prefix[kMaxCacheKeyPrefixSize]; + size_t persistent_cache_key_prefix_size = 0; + char compressed_cache_key_prefix[kMaxCacheKeyPrefixSize]; + size_t compressed_cache_key_prefix_size = 0; + uint64_t dummy_index_reader_offset = + 0; // ID that is unique for the block cache. + PersistentCacheOptions persistent_cache_options; + + // Footer contains the fixed table information + Footer footer; + // index_reader and filter will be populated and used only when + // options.block_cache is nullptr; otherwise we will get the index block via + // the block cache. + unique_ptr index_reader; + unique_ptr filter; + + enum class FilterType { + kNoFilter, + kFullFilter, + kBlockFilter, + }; + FilterType filter_type; + BlockHandle filter_handle; + + std::shared_ptr table_properties; + // Block containing the data for the compression dictionary. We take ownership + // for the entire block struct, even though we only use its Slice member. This + // is easier because the Slice member depends on the continued existence of + // another member ("allocation"). + std::unique_ptr compression_dict_block; + BlockBasedTableOptions::IndexType index_type; + bool hash_index_allow_collision; + bool whole_key_filtering; + bool prefix_filtering; + // TODO(kailiu) It is very ugly to use internal key in table, since table + // module should not be relying on db module. However to make things easier + // and compatible with existing code, we introduce a wrapper that allows + // block to extract prefix without knowing if a key is internal or not. + unique_ptr internal_prefix_transform; + + // only used in level 0 files: + // when pin_l0_filter_and_index_blocks_in_cache is true, we do use the + // LRU cache, but we always keep the filter & idndex block's handle checked + // out here (=we don't call Release()), plus the parsed out objects + // the LRU cache will never push flush them out, hence they're pinned + CachableEntry filter_entry; + CachableEntry index_entry; +}; + +BlockBasedTable::~BlockBasedTable() { + Close(); + delete rep_; +} + +// Helper function to setup the cache key's prefix for the Table. +void BlockBasedTable::SetupCacheKeyPrefix(Rep* rep, uint64_t file_size) { + assert(kMaxCacheKeyPrefixSize >= 10); + rep->cache_key_prefix_size = 0; + rep->compressed_cache_key_prefix_size = 0; + if (rep->table_options.block_cache != nullptr) { + GenerateCachePrefix(rep->table_options.block_cache.get(), rep->file->file(), + &rep->cache_key_prefix[0], &rep->cache_key_prefix_size); + // Create dummy offset of index reader which is beyond the file size. + rep->dummy_index_reader_offset = + file_size + rep->table_options.block_cache->NewId(); + } + if (rep->table_options.persistent_cache != nullptr) { + GenerateCachePrefix(/*cache=*/nullptr, rep->file->file(), + &rep->persistent_cache_key_prefix[0], + &rep->persistent_cache_key_prefix_size); + } + if (rep->table_options.block_cache_compressed != nullptr) { + GenerateCachePrefix(rep->table_options.block_cache_compressed.get(), + rep->file->file(), &rep->compressed_cache_key_prefix[0], + &rep->compressed_cache_key_prefix_size); + } +} + +void BlockBasedTable::GenerateCachePrefix(Cache* cc, + RandomAccessFile* file, char* buffer, size_t* size) { + + // generate an id from the file + *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize); + + // If the prefix wasn't generated or was too long, + // create one from the cache. + if (cc && *size == 0) { + char* end = EncodeVarint64(buffer, cc->NewId()); + *size = static_cast(end - buffer); + } +} + +void BlockBasedTable::GenerateCachePrefix(Cache* cc, + WritableFile* file, char* buffer, size_t* size) { + + // generate an id from the file + *size = file->GetUniqueId(buffer, kMaxCacheKeyPrefixSize); + + // If the prefix wasn't generated or was too long, + // create one from the cache. + if (*size == 0) { + char* end = EncodeVarint64(buffer, cc->NewId()); + *size = static_cast(end - buffer); + } +} + +namespace { +// Return True if table_properties has `user_prop_name` has a `true` value +// or it doesn't contain this property (for backward compatible). +bool IsFeatureSupported(const TableProperties& table_properties, + const std::string& user_prop_name, Logger* info_log) { + auto& props = table_properties.user_collected_properties; + auto pos = props.find(user_prop_name); + // Older version doesn't have this value set. Skip this check. + if (pos != props.end()) { + if (pos->second == kPropFalse) { + return false; + } else if (pos->second != kPropTrue) { + Log(InfoLogLevel::WARN_LEVEL, info_log, + "Property %s has invalidate value %s", user_prop_name.c_str(), + pos->second.c_str()); + } + } + return true; +} +} // namespace + +Slice BlockBasedTable::GetCacheKey(const char* cache_key_prefix, + size_t cache_key_prefix_size, + const BlockHandle& handle, char* cache_key) { + assert(cache_key != nullptr); + assert(cache_key_prefix_size != 0); + assert(cache_key_prefix_size <= kMaxCacheKeyPrefixSize); + memcpy(cache_key, cache_key_prefix, cache_key_prefix_size); + char* end = + EncodeVarint64(cache_key + cache_key_prefix_size, handle.offset()); + return Slice(cache_key, static_cast(end - cache_key)); +} + +Status BlockBasedTable::Open(const ImmutableCFOptions& ioptions, + const EnvOptions& env_options, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + unique_ptr&& file, + uint64_t file_size, + unique_ptr* table_reader, + const bool prefetch_index_and_filter_in_cache, + const bool skip_filters, const int level) { + table_reader->reset(); + + Footer footer; + auto s = ReadFooterFromFile(file.get(), file_size, &footer, + kBlockBasedTableMagicNumber); + if (!s.ok()) { + return s; + } + if (!BlockBasedTableSupportedVersion(footer.version())) { + return Status::Corruption( + "Unknown Footer version. Maybe this file was created with newer " + "version of RocksDB?"); + } + + // We've successfully read the footer and the index block: we're + // ready to serve requests. + // Better not mutate rep_ after the creation. eg. internal_prefix_transform + // raw pointer will be used to create HashIndexReader, whose reset may + // access a dangling pointer. + Rep* rep = new BlockBasedTable::Rep(ioptions, env_options, table_options, + internal_comparator, skip_filters); + rep->file = std::move(file); + rep->footer = footer; + rep->index_type = table_options.index_type; + rep->hash_index_allow_collision = table_options.hash_index_allow_collision; + // We need to wrap data with internal_prefix_transform to make sure it can + // handle prefix correctly. + rep->internal_prefix_transform.reset( + new InternalKeySliceTransform(rep->ioptions.prefix_extractor)); + SetupCacheKeyPrefix(rep, file_size); + unique_ptr new_table(new BlockBasedTable(rep)); + + // page cache options + rep->persistent_cache_options = + PersistentCacheOptions(rep->table_options.persistent_cache, + std::string(rep->persistent_cache_key_prefix, + rep->persistent_cache_key_prefix_size), + rep->ioptions.statistics); + + // Read meta index + std::unique_ptr meta; + std::unique_ptr meta_iter; + s = ReadMetaBlock(rep, &meta, &meta_iter); + if (!s.ok()) { + return s; + } + + // Find filter handle and filter type + if (rep->filter_policy) { + for (auto prefix : {kFullFilterBlockPrefix, kFilterBlockPrefix}) { + std::string filter_block_key = prefix; + filter_block_key.append(rep->filter_policy->Name()); + if (FindMetaBlock(meta_iter.get(), filter_block_key, &rep->filter_handle) + .ok()) { + rep->filter_type = (prefix == kFullFilterBlockPrefix) + ? Rep::FilterType::kFullFilter + : Rep::FilterType::kBlockFilter; + break; + } + } + } + + // Read the properties + bool found_properties_block = true; + s = SeekToPropertiesBlock(meta_iter.get(), &found_properties_block); + + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log, + "Cannot seek to properties block from file: %s", + s.ToString().c_str()); + } else if (found_properties_block) { + s = meta_iter->status(); + TableProperties* table_properties = nullptr; + if (s.ok()) { + s = ReadProperties(meta_iter->value(), rep->file.get(), rep->footer, + rep->ioptions, &table_properties); + } + + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log, + "Encountered error while reading data from properties " + "block %s", s.ToString().c_str()); + } else { + rep->table_properties.reset(table_properties); + } + } else { + Log(InfoLogLevel::ERROR_LEVEL, rep->ioptions.info_log, + "Cannot find Properties block from file."); + } + + // Read the compression dictionary meta block + bool found_compression_dict; + s = SeekToCompressionDictBlock(meta_iter.get(), &found_compression_dict); + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log, + "Cannot seek to compression dictionary block from file: %s", + s.ToString().c_str()); + } else if (found_compression_dict) { + // TODO(andrewkr): Add to block cache if cache_index_and_filter_blocks is + // true. + unique_ptr compression_dict_block{new BlockContents()}; + s = rocksdb::ReadMetaBlock(rep->file.get(), file_size, + kBlockBasedTableMagicNumber, rep->ioptions, + rocksdb::kCompressionDictBlock, + compression_dict_block.get()); + if (!s.ok()) { + Log(InfoLogLevel::WARN_LEVEL, rep->ioptions.info_log, + "Encountered error while reading data from compression dictionary " + "block %s", + s.ToString().c_str()); + } else { + rep->compression_dict_block = std::move(compression_dict_block); + } + } + + // Determine whether whole key filtering is supported. + if (rep->table_properties) { + rep->whole_key_filtering &= + IsFeatureSupported(*(rep->table_properties), + BlockBasedTablePropertyNames::kWholeKeyFiltering, + rep->ioptions.info_log); + rep->prefix_filtering &= IsFeatureSupported( + *(rep->table_properties), + BlockBasedTablePropertyNames::kPrefixFiltering, rep->ioptions.info_log); + } + + // pre-fetching of blocks is turned on + // Will use block cache for index/filter blocks access + // Always prefetch index and filter for level 0 + if (table_options.cache_index_and_filter_blocks) { + if (prefetch_index_and_filter_in_cache || level == 0) { + assert(table_options.block_cache != nullptr); + // Hack: Call NewIndexIterator() to implicitly add index to the + // block_cache + + // if pin_l0_filter_and_index_blocks_in_cache is true and this is + // a level0 file, then we will pass in this pointer to rep->index + // to NewIndexIterator(), which will save the index block in there + // else it's a nullptr and nothing special happens + CachableEntry* index_entry = nullptr; + if (rep->table_options.pin_l0_filter_and_index_blocks_in_cache && + level == 0) { + index_entry = &rep->index_entry; + } + unique_ptr iter( + new_table->NewIndexIterator(ReadOptions(), nullptr, index_entry)); + s = iter->status(); + + if (s.ok()) { + // Hack: Call GetFilter() to implicitly add filter to the block_cache + auto filter_entry = new_table->GetFilter(); + // if pin_l0_filter_and_index_blocks_in_cache is true, and this is + // a level0 file, then save it in rep_->filter_entry; it will be + // released in the destructor only, hence it will be pinned in the + // cache while this reader is alive + if (rep->table_options.pin_l0_filter_and_index_blocks_in_cache && + level == 0) { + rep->filter_entry = filter_entry; + } else { + filter_entry.Release(table_options.block_cache.get()); + } + } + } + } else { + // If we don't use block cache for index/filter blocks access, we'll + // pre-load these blocks, which will kept in member variables in Rep + // and with a same life-time as this table object. + IndexReader* index_reader = nullptr; + s = new_table->CreateIndexReader(&index_reader, meta_iter.get()); + + if (s.ok()) { + rep->index_reader.reset(index_reader); + + // Set filter block + if (rep->filter_policy) { + rep->filter.reset(ReadFilter(rep)); + } + } else { + delete index_reader; + } + } + + if (s.ok()) { + *table_reader = std::move(new_table); + } + + return s; +} + +void BlockBasedTable::SetupForCompaction() { + switch (rep_->ioptions.access_hint_on_compaction_start) { + case Options::NONE: + break; + case Options::NORMAL: + rep_->file->file()->Hint(RandomAccessFile::NORMAL); + break; + case Options::SEQUENTIAL: + rep_->file->file()->Hint(RandomAccessFile::SEQUENTIAL); + break; + case Options::WILLNEED: + rep_->file->file()->Hint(RandomAccessFile::WILLNEED); + break; + default: + assert(false); + } + compaction_optimized_ = true; +} + +std::shared_ptr BlockBasedTable::GetTableProperties() + const { + return rep_->table_properties; +} + +size_t BlockBasedTable::ApproximateMemoryUsage() const { + size_t usage = 0; + if (rep_->filter) { + usage += rep_->filter->ApproximateMemoryUsage(); + } + if (rep_->index_reader) { + usage += rep_->index_reader->ApproximateMemoryUsage(); + } + return usage; +} + +// Load the meta-block from the file. On success, return the loaded meta block +// and its iterator. +Status BlockBasedTable::ReadMetaBlock(Rep* rep, + std::unique_ptr* meta_block, + std::unique_ptr* iter) { + // TODO(sanjay): Skip this if footer.metaindex_handle() size indicates + // it is an empty block. + // TODO: we never really verify check sum for meta index block + std::unique_ptr meta; + Status s = ReadBlockFromFile( + rep->file.get(), rep->footer, ReadOptions(), + rep->footer.metaindex_handle(), &meta, rep->ioptions, + true /* decompress */, Slice() /*compression dict*/, + rep->persistent_cache_options); + + if (!s.ok()) { + Log(InfoLogLevel::ERROR_LEVEL, rep->ioptions.info_log, + "Encountered error while reading data from properties" + " block %s", s.ToString().c_str()); + return s; + } + + *meta_block = std::move(meta); + // meta block uses bytewise comparator. + iter->reset(meta_block->get()->NewIterator(BytewiseComparator())); + return Status::OK(); +} + +Status BlockBasedTable::GetDataBlockFromCache( + const Slice& block_cache_key, const Slice& compressed_block_cache_key, + Cache* block_cache, Cache* block_cache_compressed, + const ImmutableCFOptions &ioptions, const ReadOptions& read_options, + BlockBasedTable::CachableEntry* block, uint32_t format_version, + const Slice& compression_dict) { + Status s; + Block* compressed_block = nullptr; + Cache::Handle* block_cache_compressed_handle = nullptr; + Statistics* statistics = ioptions.statistics; + + // Lookup uncompressed cache first + if (block_cache != nullptr) { + block->cache_handle = + GetEntryFromCache(block_cache, block_cache_key, BLOCK_CACHE_DATA_MISS, + BLOCK_CACHE_DATA_HIT, statistics); + if (block->cache_handle != nullptr) { + block->value = + reinterpret_cast(block_cache->Value(block->cache_handle)); + return s; + } + } + + // If not found, search from the compressed block cache. + assert(block->cache_handle == nullptr && block->value == nullptr); + + if (block_cache_compressed == nullptr) { + return s; + } + + assert(!compressed_block_cache_key.empty()); + block_cache_compressed_handle = + block_cache_compressed->Lookup(compressed_block_cache_key); + // if we found in the compressed cache, then uncompress and insert into + // uncompressed cache + if (block_cache_compressed_handle == nullptr) { + RecordTick(statistics, BLOCK_CACHE_COMPRESSED_MISS); + return s; + } + + // found compressed block + RecordTick(statistics, BLOCK_CACHE_COMPRESSED_HIT); + compressed_block = reinterpret_cast( + block_cache_compressed->Value(block_cache_compressed_handle)); + assert(compressed_block->compression_type() != kNoCompression); + + // Retrieve the uncompressed contents into a new buffer + BlockContents contents; + s = UncompressBlockContents(compressed_block->data(), + compressed_block->size(), &contents, + format_version, compression_dict, + ioptions); + + // Insert uncompressed block into block cache + if (s.ok()) { + block->value = new Block(std::move(contents)); // uncompressed block + assert(block->value->compression_type() == kNoCompression); + if (block_cache != nullptr && block->value->cachable() && + read_options.fill_cache) { + s = block_cache->Insert( + block_cache_key, block->value, block->value->usable_size(), + &DeleteCachedEntry, &(block->cache_handle)); + if (s.ok()) { + RecordTick(statistics, BLOCK_CACHE_ADD); + } else { + RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES); + delete block->value; + block->value = nullptr; + } + } + } + + // Release hold on compressed cache entry + block_cache_compressed->Release(block_cache_compressed_handle); + return s; +} + +Status BlockBasedTable::PutDataBlockToCache( + const Slice& block_cache_key, const Slice& compressed_block_cache_key, + Cache* block_cache, Cache* block_cache_compressed, + const ReadOptions& read_options, const ImmutableCFOptions &ioptions, + CachableEntry* block, Block* raw_block, uint32_t format_version, + const Slice& compression_dict) { + assert(raw_block->compression_type() == kNoCompression || + block_cache_compressed != nullptr); + + Status s; + // Retrieve the uncompressed contents into a new buffer + BlockContents contents; + Statistics* statistics = ioptions.statistics; + if (raw_block->compression_type() != kNoCompression) { + s = UncompressBlockContents(raw_block->data(), raw_block->size(), &contents, + format_version, compression_dict, ioptions); + } + if (!s.ok()) { + delete raw_block; + return s; + } + + if (raw_block->compression_type() != kNoCompression) { + block->value = new Block(std::move(contents)); // uncompressed block + } else { + block->value = raw_block; + raw_block = nullptr; + } + + // Insert compressed block into compressed block cache. + // Release the hold on the compressed cache entry immediately. + if (block_cache_compressed != nullptr && raw_block != nullptr && + raw_block->cachable()) { + s = block_cache_compressed->Insert(compressed_block_cache_key, raw_block, + raw_block->usable_size(), + &DeleteCachedEntry); + if (s.ok()) { + // Avoid the following code to delete this cached block. + raw_block = nullptr; + RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD); + } else { + RecordTick(statistics, BLOCK_CACHE_COMPRESSED_ADD_FAILURES); + } + } + delete raw_block; + + // insert into uncompressed block cache + assert((block->value->compression_type() == kNoCompression)); + if (block_cache != nullptr && block->value->cachable()) { + s = block_cache->Insert(block_cache_key, block->value, + block->value->usable_size(), + &DeleteCachedEntry, &(block->cache_handle)); + if (s.ok()) { + assert(block->cache_handle != nullptr); + RecordTick(statistics, BLOCK_CACHE_ADD); + RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, + block->value->usable_size()); + assert(reinterpret_cast( + block_cache->Value(block->cache_handle)) == block->value); + } else { + RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES); + delete block->value; + block->value = nullptr; + } + } + + return s; +} + +FilterBlockReader* BlockBasedTable::ReadFilter(Rep* rep) { + // TODO: We might want to unify with ReadBlockFromFile() if we start + // requiring checksum verification in Table::Open. + if (rep->filter_type == Rep::FilterType::kNoFilter) { + return nullptr; + } + BlockContents block; + if (!ReadBlockContents(rep->file.get(), rep->footer, ReadOptions(), + rep->filter_handle, &block, rep->ioptions, + false /* decompress */, Slice() /*compression dict*/, + rep->persistent_cache_options) + .ok()) { + // Error reading the block + return nullptr; + } + + assert(rep->filter_policy); + + if (rep->filter_type == Rep::FilterType::kBlockFilter) { + return new BlockBasedFilterBlockReader( + rep->prefix_filtering ? rep->ioptions.prefix_extractor : nullptr, + rep->table_options, rep->whole_key_filtering, std::move(block), + rep->ioptions.statistics); + } else if (rep->filter_type == Rep::FilterType::kFullFilter) { + auto filter_bits_reader = + rep->filter_policy->GetFilterBitsReader(block.data); + if (filter_bits_reader != nullptr) { + return new FullFilterBlockReader( + rep->prefix_filtering ? rep->ioptions.prefix_extractor : nullptr, + rep->whole_key_filtering, std::move(block), filter_bits_reader, + rep->ioptions.statistics); + } + } + + // filter_type is either kNoFilter (exited the function at the first if), + // kBlockFilter or kFullFilter. there is no way for the execution to come here + assert(false); + return nullptr; +} + +BlockBasedTable::CachableEntry BlockBasedTable::GetFilter( + bool no_io) const { + // If cache_index_and_filter_blocks is false, filter should be pre-populated. + // We will return rep_->filter anyway. rep_->filter can be nullptr if filter + // read fails at Open() time. We don't want to reload again since it will + // most probably fail again. + if (!rep_->table_options.cache_index_and_filter_blocks) { + return {rep_->filter.get(), nullptr /* cache handle */}; + } + + Cache* block_cache = rep_->table_options.block_cache.get(); + if (rep_->filter_policy == nullptr /* do not use filter */ || + block_cache == nullptr /* no block cache at all */) { + return {nullptr /* filter */, nullptr /* cache handle */}; + } + + // we have a pinned filter block + if (rep_->filter_entry.IsSet()) { + return rep_->filter_entry; + } + + PERF_TIMER_GUARD(read_filter_block_nanos); + + // Fetching from the cache + char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size, + rep_->footer.metaindex_handle(), + cache_key); + + Statistics* statistics = rep_->ioptions.statistics; + auto cache_handle = + GetEntryFromCache(block_cache, key, BLOCK_CACHE_FILTER_MISS, + BLOCK_CACHE_FILTER_HIT, statistics); + + FilterBlockReader* filter = nullptr; + if (cache_handle != nullptr) { + filter = reinterpret_cast( + block_cache->Value(cache_handle)); + } else if (no_io) { + // Do not invoke any io. + return CachableEntry(); + } else { + filter = ReadFilter(rep_); + if (filter != nullptr) { + assert(filter->size() > 0); + Status s = block_cache->Insert(key, filter, filter->size(), + &DeleteCachedFilterEntry, &cache_handle); + if (s.ok()) { + RecordTick(statistics, BLOCK_CACHE_ADD); + RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, filter->size()); + RecordTick(statistics, BLOCK_CACHE_FILTER_BYTES_INSERT, filter->size()); + } else { + RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES); + delete filter; + return CachableEntry(); + } + } + } + + return { filter, cache_handle }; +} + +InternalIterator* BlockBasedTable::NewIndexIterator( + const ReadOptions& read_options, BlockIter* input_iter, + CachableEntry* index_entry) { + // index reader has already been pre-populated. + if (rep_->index_reader) { + return rep_->index_reader->NewIterator( + input_iter, read_options.total_order_seek); + } + // we have a pinned index block + if (rep_->index_entry.IsSet()) { + return rep_->index_entry.value->NewIterator(input_iter, + read_options.total_order_seek); + } + + PERF_TIMER_GUARD(read_index_block_nanos); + + bool no_io = read_options.read_tier == kBlockCacheTier; + Cache* block_cache = rep_->table_options.block_cache.get(); + char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + auto key = + GetCacheKeyFromOffset(rep_->cache_key_prefix, rep_->cache_key_prefix_size, + rep_->dummy_index_reader_offset, cache_key); + Statistics* statistics = rep_->ioptions.statistics; + auto cache_handle = + GetEntryFromCache(block_cache, key, BLOCK_CACHE_INDEX_MISS, + BLOCK_CACHE_INDEX_HIT, statistics); + + if (cache_handle == nullptr && no_io) { + if (input_iter != nullptr) { + input_iter->SetStatus(Status::Incomplete("no blocking io")); + return input_iter; + } else { + return NewErrorInternalIterator(Status::Incomplete("no blocking io")); + } + } + + IndexReader* index_reader = nullptr; + if (cache_handle != nullptr) { + index_reader = + reinterpret_cast(block_cache->Value(cache_handle)); + } else { + // Create index reader and put it in the cache. + Status s; + TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread2:2"); + s = CreateIndexReader(&index_reader); + TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread1:1"); + TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread2:3"); + TEST_SYNC_POINT("BlockBasedTable::NewIndexIterator::thread1:4"); + if (s.ok()) { + assert(index_reader != nullptr); + s = block_cache->Insert(key, index_reader, index_reader->usable_size(), + &DeleteCachedIndexEntry, &cache_handle); + } + + if (s.ok()) { + size_t usable_size = index_reader->usable_size(); + RecordTick(statistics, BLOCK_CACHE_ADD); + RecordTick(statistics, BLOCK_CACHE_BYTES_WRITE, usable_size); + RecordTick(statistics, BLOCK_CACHE_INDEX_BYTES_INSERT, usable_size); + } else { + if (index_reader != nullptr) { + delete index_reader; + } + RecordTick(statistics, BLOCK_CACHE_ADD_FAILURES); + // make sure if something goes wrong, index_reader shall remain intact. + if (input_iter != nullptr) { + input_iter->SetStatus(s); + return input_iter; + } else { + return NewErrorInternalIterator(s); + } + } + + } + + assert(cache_handle); + auto* iter = index_reader->NewIterator( + input_iter, read_options.total_order_seek); + + // the caller would like to take ownership of the index block + // don't call RegisterCleanup() in this case, the caller will take care of it + if (index_entry != nullptr) { + *index_entry = {index_reader, cache_handle}; + } else { + iter->RegisterCleanup(&ReleaseCachedEntry, block_cache, cache_handle); + } + + return iter; +} + +// Convert an index iterator value (i.e., an encoded BlockHandle) +// into an iterator over the contents of the corresponding block. +// If input_iter is null, new a iterator +// If input_iter is not null, update this iter and return it +InternalIterator* BlockBasedTable::NewDataBlockIterator( + Rep* rep, const ReadOptions& ro, const Slice& index_value, + BlockIter* input_iter) { + PERF_TIMER_GUARD(new_table_block_iter_nanos); + + const bool no_io = (ro.read_tier == kBlockCacheTier); + Cache* block_cache = rep->table_options.block_cache.get(); + Cache* block_cache_compressed = + rep->table_options.block_cache_compressed.get(); + CachableEntry block; + + BlockHandle handle; + Slice input = index_value; + // We intentionally allow extra stuff in index_value so that we + // can add more features in the future. + Status s = handle.DecodeFrom(&input); + + if (!s.ok()) { + if (input_iter != nullptr) { + input_iter->SetStatus(s); + return input_iter; + } else { + return NewErrorInternalIterator(s); + } + } + + Slice compression_dict; + if (rep->compression_dict_block) { + compression_dict = rep->compression_dict_block->data; + } + // If either block cache is enabled, we'll try to read from it. + if (block_cache != nullptr || block_cache_compressed != nullptr) { + Statistics* statistics = rep->ioptions.statistics; + char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + char compressed_cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + Slice key, /* key to the block cache */ + ckey /* key to the compressed block cache */; + + // create key for block cache + if (block_cache != nullptr) { + key = GetCacheKey(rep->cache_key_prefix, rep->cache_key_prefix_size, + handle, cache_key); + } + + if (block_cache_compressed != nullptr) { + ckey = GetCacheKey(rep->compressed_cache_key_prefix, + rep->compressed_cache_key_prefix_size, handle, + compressed_cache_key); + } + + s = GetDataBlockFromCache( + key, ckey, block_cache, block_cache_compressed, rep->ioptions, ro, &block, + rep->table_options.format_version, compression_dict); + + if (block.value == nullptr && !no_io && ro.fill_cache) { + std::unique_ptr raw_block; + { + StopWatch sw(rep->ioptions.env, statistics, READ_BLOCK_GET_MICROS); + s = ReadBlockFromFile(rep->file.get(), rep->footer, ro, handle, + &raw_block, rep->ioptions, + block_cache_compressed == nullptr, + compression_dict, rep->persistent_cache_options); + } + + if (s.ok()) { + s = PutDataBlockToCache(key, ckey, block_cache, block_cache_compressed, + ro, rep->ioptions, &block, raw_block.release(), + rep->table_options.format_version, + compression_dict); + } + } + } + + // Didn't get any data from block caches. + if (s.ok() && block.value == nullptr) { + if (no_io) { + // Could not read from block_cache and can't do IO + if (input_iter != nullptr) { + input_iter->SetStatus(Status::Incomplete("no blocking io")); + return input_iter; + } else { + return NewErrorInternalIterator(Status::Incomplete("no blocking io")); + } + } + std::unique_ptr block_value; + s = ReadBlockFromFile(rep->file.get(), rep->footer, ro, handle, + &block_value, rep->ioptions, true /* compress */, + compression_dict, rep->persistent_cache_options); + if (s.ok()) { + block.value = block_value.release(); + } + } + + InternalIterator* iter; + if (s.ok()) { + assert(block.value != nullptr); + iter = block.value->NewIterator(&rep->internal_comparator, input_iter); + if (block.cache_handle != nullptr) { + iter->RegisterCleanup(&ReleaseCachedEntry, block_cache, + block.cache_handle); + } else { + iter->RegisterCleanup(&DeleteHeldResource, block.value, nullptr); + } + } else { + assert(block.value == nullptr); + if (input_iter != nullptr) { + input_iter->SetStatus(s); + iter = input_iter; + } else { + iter = NewErrorInternalIterator(s); + } + } + return iter; +} + +class BlockBasedTable::BlockEntryIteratorState : public TwoLevelIteratorState { + public: + BlockEntryIteratorState(BlockBasedTable* table, + const ReadOptions& read_options, bool skip_filters) + : TwoLevelIteratorState(table->rep_->ioptions.prefix_extractor != + nullptr), + table_(table), + read_options_(read_options), + skip_filters_(skip_filters) {} + + InternalIterator* NewSecondaryIterator(const Slice& index_value) override { + return NewDataBlockIterator(table_->rep_, read_options_, index_value); + } + + bool PrefixMayMatch(const Slice& internal_key) override { + if (read_options_.total_order_seek || skip_filters_) { + return true; + } + return table_->PrefixMayMatch(internal_key); + } + + private: + // Don't own table_ + BlockBasedTable* table_; + const ReadOptions read_options_; + bool skip_filters_; +}; + +// This will be broken if the user specifies an unusual implementation +// of Options.comparator, or if the user specifies an unusual +// definition of prefixes in BlockBasedTableOptions.filter_policy. +// In particular, we require the following three properties: +// +// 1) key.starts_with(prefix(key)) +// 2) Compare(prefix(key), key) <= 0. +// 3) If Compare(key1, key2) <= 0, then Compare(prefix(key1), prefix(key2)) <= 0 +// +// Otherwise, this method guarantees no I/O will be incurred. +// +// REQUIRES: this method shouldn't be called while the DB lock is held. +bool BlockBasedTable::PrefixMayMatch(const Slice& internal_key) { + if (!rep_->filter_policy) { + return true; + } + + assert(rep_->ioptions.prefix_extractor != nullptr); + auto user_key = ExtractUserKey(internal_key); + if (!rep_->ioptions.prefix_extractor->InDomain(user_key)) { + return true; + } + auto prefix = rep_->ioptions.prefix_extractor->Transform(user_key); + InternalKey internal_key_prefix(prefix, kMaxSequenceNumber, kTypeValue); + auto internal_prefix = internal_key_prefix.Encode(); + + bool may_match = true; + Status s; + + // To prevent any io operation in this method, we set `read_tier` to make + // sure we always read index or filter only when they have already been + // loaded to memory. + ReadOptions no_io_read_options; + no_io_read_options.read_tier = kBlockCacheTier; + + // First, try check with full filter + auto filter_entry = GetFilter(true /* no io */); + FilterBlockReader* filter = filter_entry.value; + if (filter != nullptr) { + if (!filter->IsBlockBased()) { + may_match = filter->PrefixMayMatch(prefix); + } else { + // Then, try find it within each block + unique_ptr iiter(NewIndexIterator(no_io_read_options)); + iiter->Seek(internal_prefix); + + if (!iiter->Valid()) { + // we're past end of file + // if it's incomplete, it means that we avoided I/O + // and we're not really sure that we're past the end + // of the file + may_match = iiter->status().IsIncomplete(); + } else if (ExtractUserKey(iiter->key()) + .starts_with(ExtractUserKey(internal_prefix))) { + // we need to check for this subtle case because our only + // guarantee is that "the key is a string >= last key in that data + // block" according to the doc/table_format.txt spec. + // + // Suppose iiter->key() starts with the desired prefix; it is not + // necessarily the case that the corresponding data block will + // contain the prefix, since iiter->key() need not be in the + // block. However, the next data block may contain the prefix, so + // we return true to play it safe. + may_match = true; + } else if (filter->IsBlockBased()) { + // iiter->key() does NOT start with the desired prefix. Because + // Seek() finds the first key that is >= the seek target, this + // means that iiter->key() > prefix. Thus, any data blocks coming + // after the data block corresponding to iiter->key() cannot + // possibly contain the key. Thus, the corresponding data block + // is the only on could potentially contain the prefix. + Slice handle_value = iiter->value(); + BlockHandle handle; + s = handle.DecodeFrom(&handle_value); + assert(s.ok()); + may_match = filter->PrefixMayMatch(prefix, handle.offset()); + } + } + } + + Statistics* statistics = rep_->ioptions.statistics; + RecordTick(statistics, BLOOM_FILTER_PREFIX_CHECKED); + if (!may_match) { + RecordTick(statistics, BLOOM_FILTER_PREFIX_USEFUL); + } + + // if rep_->filter_entry is not set, we should call Release(); otherwise + // don't call, in this case we have a local copy in rep_->filter_entry, + // it's pinned to the cache and will be released in the destructor + if (!rep_->filter_entry.IsSet()) { + filter_entry.Release(rep_->table_options.block_cache.get()); + } + + return may_match; +} + +InternalIterator* BlockBasedTable::NewIterator(const ReadOptions& read_options, + Arena* arena, + bool skip_filters) { + return NewTwoLevelIterator( + new BlockEntryIteratorState(this, read_options, skip_filters), + NewIndexIterator(read_options), arena); +} + +bool BlockBasedTable::FullFilterKeyMayMatch(const ReadOptions& read_options, + FilterBlockReader* filter, + const Slice& internal_key) const { + if (filter == nullptr || filter->IsBlockBased()) { + return true; + } + Slice user_key = ExtractUserKey(internal_key); + if (filter->whole_key_filtering()) { + return filter->KeyMayMatch(user_key); + } + if (!read_options.total_order_seek && rep_->ioptions.prefix_extractor && + rep_->ioptions.prefix_extractor->InDomain(user_key) && + !filter->PrefixMayMatch( + rep_->ioptions.prefix_extractor->Transform(user_key))) { + return false; + } + return true; +} + +Status BlockBasedTable::Get(const ReadOptions& read_options, const Slice& key, + GetContext* get_context, bool skip_filters) { + Status s; + CachableEntry filter_entry; + if (!skip_filters) { + filter_entry = GetFilter(read_options.read_tier == kBlockCacheTier); + } + FilterBlockReader* filter = filter_entry.value; + + // First check the full filter + // If full filter not useful, Then go into each block + if (!FullFilterKeyMayMatch(read_options, filter, key)) { + RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL); + } else { + BlockIter iiter; + NewIndexIterator(read_options, &iiter); + + PinnedIteratorsManager* pinned_iters_mgr = get_context->pinned_iters_mgr(); + bool pin_blocks = pinned_iters_mgr && pinned_iters_mgr->PinningEnabled(); + BlockIter* biter = nullptr; + + bool done = false; + for (iiter.Seek(key); iiter.Valid() && !done; iiter.Next()) { + Slice handle_value = iiter.value(); + + BlockHandle handle; + bool not_exist_in_filter = + filter != nullptr && filter->IsBlockBased() == true && + handle.DecodeFrom(&handle_value).ok() && + !filter->KeyMayMatch(ExtractUserKey(key), handle.offset()); + + if (not_exist_in_filter) { + // Not found + // TODO: think about interaction with Merge. If a user key cannot + // cross one data block, we should be fine. + RecordTick(rep_->ioptions.statistics, BLOOM_FILTER_USEFUL); + break; + } else { + BlockIter stack_biter; + if (pin_blocks) { + // We need to create the BlockIter on heap because we may need to + // pin it if we encounterd merge operands + biter = static_cast( + NewDataBlockIterator(rep_, read_options, iiter.value())); + } else { + biter = &stack_biter; + NewDataBlockIterator(rep_, read_options, iiter.value(), biter); + } + + if (read_options.read_tier == kBlockCacheTier && + biter->status().IsIncomplete()) { + // couldn't get block from block_cache + // Update Saver.state to Found because we are only looking for whether + // we can guarantee the key is not there when "no_io" is set + get_context->MarkKeyMayExist(); + break; + } + if (!biter->status().ok()) { + s = biter->status(); + break; + } + + // Call the *saver function on each entry/block until it returns false + for (biter->Seek(key); biter->Valid(); biter->Next()) { + ParsedInternalKey parsed_key; + if (!ParseInternalKey(biter->key(), &parsed_key)) { + s = Status::Corruption(Slice()); + } + + if (!get_context->SaveValue(parsed_key, biter->value(), pin_blocks)) { + done = true; + break; + } + } + s = biter->status(); + + if (pin_blocks) { + if (get_context->State() == GetContext::kMerge) { + // Pin blocks as long as we are merging + pinned_iters_mgr->PinIteratorIfNeeded(biter); + } else { + delete biter; + } + biter = nullptr; + } else { + // biter is on stack, Nothing to clean + } + } + } + if (pin_blocks && biter != nullptr) { + delete biter; + } + if (s.ok()) { + s = iiter.status(); + } + } + + // if rep_->filter_entry is not set, we should call Release(); otherwise + // don't call, in this case we have a local copy in rep_->filter_entry, + // it's pinned to the cache and will be released in the destructor + if (!rep_->filter_entry.IsSet()) { + filter_entry.Release(rep_->table_options.block_cache.get()); + } + return s; +} + +Status BlockBasedTable::Prefetch(const Slice* const begin, + const Slice* const end) { + auto& comparator = rep_->internal_comparator; + // pre-condition + if (begin && end && comparator.Compare(*begin, *end) > 0) { + return Status::InvalidArgument(*begin, *end); + } + + BlockIter iiter; + NewIndexIterator(ReadOptions(), &iiter); + + if (!iiter.status().ok()) { + // error opening index iterator + return iiter.status(); + } + + // indicates if we are on the last page that need to be pre-fetched + bool prefetching_boundary_page = false; + + for (begin ? iiter.Seek(*begin) : iiter.SeekToFirst(); iiter.Valid(); + iiter.Next()) { + Slice block_handle = iiter.value(); + + if (end && comparator.Compare(iiter.key(), *end) >= 0) { + if (prefetching_boundary_page) { + break; + } + + // The index entry represents the last key in the data block. + // We should load this page into memory as well, but no more + prefetching_boundary_page = true; + } + + // Load the block specified by the block_handle into the block cache + BlockIter biter; + NewDataBlockIterator(rep_, ReadOptions(), block_handle, &biter); + + if (!biter.status().ok()) { + // there was an unexpected error while pre-fetching + return biter.status(); + } + } + + return Status::OK(); +} + +bool BlockBasedTable::TEST_KeyInCache(const ReadOptions& options, + const Slice& key) { + std::unique_ptr iiter(NewIndexIterator(options)); + iiter->Seek(key); + assert(iiter->Valid()); + CachableEntry block; + + BlockHandle handle; + Slice input = iiter->value(); + Status s = handle.DecodeFrom(&input); + assert(s.ok()); + Cache* block_cache = rep_->table_options.block_cache.get(); + assert(block_cache != nullptr); + + char cache_key_storage[kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + Slice cache_key = + GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size, + handle, cache_key_storage); + Slice ckey; + + s = GetDataBlockFromCache(cache_key, ckey, block_cache, nullptr, + rep_->ioptions, options, &block, + rep_->table_options.format_version, + rep_->compression_dict_block + ? rep_->compression_dict_block->data + : Slice()); + assert(s.ok()); + bool in_cache = block.value != nullptr; + if (in_cache) { + ReleaseCachedEntry(block_cache, block.cache_handle); + } + return in_cache; +} + +// REQUIRES: The following fields of rep_ should have already been populated: +// 1. file +// 2. index_handle, +// 3. options +// 4. internal_comparator +// 5. index_type +Status BlockBasedTable::CreateIndexReader( + IndexReader** index_reader, InternalIterator* preloaded_meta_index_iter) { + // Some old version of block-based tables don't have index type present in + // table properties. If that's the case we can safely use the kBinarySearch. + auto index_type_on_file = BlockBasedTableOptions::kBinarySearch; + if (rep_->table_properties) { + auto& props = rep_->table_properties->user_collected_properties; + auto pos = props.find(BlockBasedTablePropertyNames::kIndexType); + if (pos != props.end()) { + index_type_on_file = static_cast( + DecodeFixed32(pos->second.c_str())); + } + } + + auto file = rep_->file.get(); + auto comparator = &rep_->internal_comparator; + const Footer& footer = rep_->footer; + if (index_type_on_file == BlockBasedTableOptions::kHashSearch && + rep_->ioptions.prefix_extractor == nullptr) { + Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log, + "BlockBasedTableOptions::kHashSearch requires " + "options.prefix_extractor to be set." + " Fall back to binary search index."); + index_type_on_file = BlockBasedTableOptions::kBinarySearch; + } + + switch (index_type_on_file) { + case BlockBasedTableOptions::kBinarySearch: { + return BinarySearchIndexReader::Create( + file, footer, footer.index_handle(), rep_->ioptions, comparator, + index_reader, rep_->persistent_cache_options); + } + case BlockBasedTableOptions::kHashSearch: { + std::unique_ptr meta_guard; + std::unique_ptr meta_iter_guard; + auto meta_index_iter = preloaded_meta_index_iter; + if (meta_index_iter == nullptr) { + auto s = ReadMetaBlock(rep_, &meta_guard, &meta_iter_guard); + if (!s.ok()) { + // we simply fall back to binary search in case there is any + // problem with prefix hash index loading. + Log(InfoLogLevel::WARN_LEVEL, rep_->ioptions.info_log, + "Unable to read the metaindex block." + " Fall back to binary search index."); + return BinarySearchIndexReader::Create( + file, footer, footer.index_handle(), rep_->ioptions, comparator, + index_reader, rep_->persistent_cache_options); + } + meta_index_iter = meta_iter_guard.get(); + } + + return HashIndexReader::Create( + rep_->internal_prefix_transform.get(), footer, file, rep_->ioptions, + comparator, footer.index_handle(), meta_index_iter, index_reader, + rep_->hash_index_allow_collision, rep_->persistent_cache_options); + } + default: { + std::string error_message = + "Unrecognized index type: " + ToString(rep_->index_type); + return Status::InvalidArgument(error_message.c_str()); + } + } +} + +uint64_t BlockBasedTable::ApproximateOffsetOf(const Slice& key) { + unique_ptr index_iter(NewIndexIterator(ReadOptions())); + + index_iter->Seek(key); + uint64_t result; + if (index_iter->Valid()) { + BlockHandle handle; + Slice input = index_iter->value(); + Status s = handle.DecodeFrom(&input); + if (s.ok()) { + result = handle.offset(); + } else { + // Strange: we can't decode the block handle in the index block. + // We'll just return the offset of the metaindex block, which is + // close to the whole file size for this case. + result = rep_->footer.metaindex_handle().offset(); + } + } else { + // key is past the last key in the file. If table_properties is not + // available, approximate the offset by returning the offset of the + // metaindex block (which is right near the end of the file). + result = 0; + if (rep_->table_properties) { + result = rep_->table_properties->data_size; + } + // table_properties is not present in the table. + if (result == 0) { + result = rep_->footer.metaindex_handle().offset(); + } + } + return result; +} + +bool BlockBasedTable::TEST_filter_block_preloaded() const { + return rep_->filter != nullptr; +} + +bool BlockBasedTable::TEST_index_reader_preloaded() const { + return rep_->index_reader != nullptr; +} + +Status BlockBasedTable::DumpTable(WritableFile* out_file) { + // Output Footer + out_file->Append( + "Footer Details:\n" + "--------------------------------------\n" + " "); + out_file->Append(rep_->footer.ToString().c_str()); + out_file->Append("\n"); + + // Output MetaIndex + out_file->Append( + "Metaindex Details:\n" + "--------------------------------------\n"); + std::unique_ptr meta; + std::unique_ptr meta_iter; + Status s = ReadMetaBlock(rep_, &meta, &meta_iter); + if (s.ok()) { + for (meta_iter->SeekToFirst(); meta_iter->Valid(); meta_iter->Next()) { + s = meta_iter->status(); + if (!s.ok()) { + return s; + } + if (meta_iter->key() == rocksdb::kPropertiesBlock) { + out_file->Append(" Properties block handle: "); + out_file->Append(meta_iter->value().ToString(true).c_str()); + out_file->Append("\n"); + } else if (meta_iter->key() == rocksdb::kCompressionDictBlock) { + out_file->Append(" Compression dictionary block handle: "); + out_file->Append(meta_iter->value().ToString(true).c_str()); + out_file->Append("\n"); + } else if (strstr(meta_iter->key().ToString().c_str(), + "filter.rocksdb.") != nullptr) { + out_file->Append(" Filter block handle: "); + out_file->Append(meta_iter->value().ToString(true).c_str()); + out_file->Append("\n"); + } + } + out_file->Append("\n"); + } else { + return s; + } + + // Output TableProperties + const rocksdb::TableProperties* table_properties; + table_properties = rep_->table_properties.get(); + + if (table_properties != nullptr) { + out_file->Append( + "Table Properties:\n" + "--------------------------------------\n" + " "); + out_file->Append(table_properties->ToString("\n ", ": ").c_str()); + out_file->Append("\n"); + } + + // Output Filter blocks + if (!rep_->filter && !table_properties->filter_policy_name.empty()) { + // Support only BloomFilter as off now + rocksdb::BlockBasedTableOptions table_options; + table_options.filter_policy.reset(rocksdb::NewBloomFilterPolicy(1)); + if (table_properties->filter_policy_name.compare( + table_options.filter_policy->Name()) == 0) { + std::string filter_block_key = kFilterBlockPrefix; + filter_block_key.append(table_properties->filter_policy_name); + BlockHandle handle; + if (FindMetaBlock(meta_iter.get(), filter_block_key, &handle).ok()) { + BlockContents block; + if (ReadBlockContents( + rep_->file.get(), rep_->footer, ReadOptions(), handle, &block, + rep_->ioptions, false /*decompress*/, + Slice() /*compression dict*/, rep_->persistent_cache_options) + .ok()) { + rep_->filter.reset(new BlockBasedFilterBlockReader( + rep_->ioptions.prefix_extractor, table_options, + table_options.whole_key_filtering, std::move(block), + rep_->ioptions.statistics)); + } + } + } + } + if (rep_->filter) { + out_file->Append( + "Filter Details:\n" + "--------------------------------------\n" + " "); + out_file->Append(rep_->filter->ToString().c_str()); + out_file->Append("\n"); + } + + // Output Index block + s = DumpIndexBlock(out_file); + if (!s.ok()) { + return s; + } + // Output Data blocks + s = DumpDataBlocks(out_file); + + return s; +} + +void BlockBasedTable::Close() { + rep_->filter_entry.Release(rep_->table_options.block_cache.get()); + rep_->index_entry.Release(rep_->table_options.block_cache.get()); + // cleanup index and filter blocks to avoid accessing dangling pointer + if (!rep_->table_options.no_block_cache) { + char cache_key[kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + // Get the filter block key + auto key = GetCacheKey(rep_->cache_key_prefix, rep_->cache_key_prefix_size, + rep_->footer.metaindex_handle(), cache_key); + rep_->table_options.block_cache.get()->Erase(key); + // Get the index block key + key = GetCacheKeyFromOffset(rep_->cache_key_prefix, + rep_->cache_key_prefix_size, + rep_->dummy_index_reader_offset, cache_key); + rep_->table_options.block_cache.get()->Erase(key); + } +} + +Status BlockBasedTable::DumpIndexBlock(WritableFile* out_file) { + out_file->Append( + "Index Details:\n" + "--------------------------------------\n"); + + std::unique_ptr blockhandles_iter( + NewIndexIterator(ReadOptions())); + Status s = blockhandles_iter->status(); + if (!s.ok()) { + out_file->Append("Can not read Index Block \n\n"); + return s; + } + + out_file->Append(" Block key hex dump: Data block handle\n"); + out_file->Append(" Block key ascii\n\n"); + for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid(); + blockhandles_iter->Next()) { + s = blockhandles_iter->status(); + if (!s.ok()) { + break; + } + Slice key = blockhandles_iter->key(); + InternalKey ikey; + ikey.DecodeFrom(key); + + out_file->Append(" HEX "); + out_file->Append(ikey.user_key().ToString(true).c_str()); + out_file->Append(": "); + out_file->Append(blockhandles_iter->value().ToString(true).c_str()); + out_file->Append("\n"); + + std::string str_key = ikey.user_key().ToString(); + std::string res_key(""); + char cspace = ' '; + for (size_t i = 0; i < str_key.size(); i++) { + res_key.append(&str_key[i], 1); + res_key.append(1, cspace); + } + out_file->Append(" ASCII "); + out_file->Append(res_key.c_str()); + out_file->Append("\n ------\n"); + } + out_file->Append("\n"); + return Status::OK(); +} + +Status BlockBasedTable::DumpDataBlocks(WritableFile* out_file) { + std::unique_ptr blockhandles_iter( + NewIndexIterator(ReadOptions())); + Status s = blockhandles_iter->status(); + if (!s.ok()) { + out_file->Append("Can not read Index Block \n\n"); + return s; + } + + size_t block_id = 1; + for (blockhandles_iter->SeekToFirst(); blockhandles_iter->Valid(); + block_id++, blockhandles_iter->Next()) { + s = blockhandles_iter->status(); + if (!s.ok()) { + break; + } + + out_file->Append("Data Block # "); + out_file->Append(rocksdb::ToString(block_id)); + out_file->Append(" @ "); + out_file->Append(blockhandles_iter->value().ToString(true).c_str()); + out_file->Append("\n"); + out_file->Append("--------------------------------------\n"); + + std::unique_ptr datablock_iter; + datablock_iter.reset( + NewDataBlockIterator(rep_, ReadOptions(), blockhandles_iter->value())); + s = datablock_iter->status(); + + if (!s.ok()) { + out_file->Append("Error reading the block - Skipped \n\n"); + continue; + } + + for (datablock_iter->SeekToFirst(); datablock_iter->Valid(); + datablock_iter->Next()) { + s = datablock_iter->status(); + if (!s.ok()) { + out_file->Append("Error reading the block - Skipped \n"); + break; + } + Slice key = datablock_iter->key(); + Slice value = datablock_iter->value(); + InternalKey ikey, iValue; + ikey.DecodeFrom(key); + iValue.DecodeFrom(value); + + out_file->Append(" HEX "); + out_file->Append(ikey.user_key().ToString(true).c_str()); + out_file->Append(": "); + out_file->Append(iValue.user_key().ToString(true).c_str()); + out_file->Append("\n"); + + std::string str_key = ikey.user_key().ToString(); + std::string str_value = iValue.user_key().ToString(); + std::string res_key(""), res_value(""); + char cspace = ' '; + for (size_t i = 0; i < str_key.size(); i++) { + res_key.append(&str_key[i], 1); + res_key.append(1, cspace); + } + for (size_t i = 0; i < str_value.size(); i++) { + res_value.append(&str_value[i], 1); + res_value.append(1, cspace); + } + + out_file->Append(" ASCII "); + out_file->Append(res_key.c_str()); + out_file->Append(": "); + out_file->Append(res_value.c_str()); + out_file->Append("\n ------\n"); + } + out_file->Append("\n"); + } + return Status::OK(); +} + +namespace { + +void DeleteCachedFilterEntry(const Slice& key, void* value) { + FilterBlockReader* filter = reinterpret_cast(value); + if (filter->statistics() != nullptr) { + RecordTick(filter->statistics(), BLOCK_CACHE_FILTER_BYTES_EVICT, + filter->size()); + } + delete filter; +} + +void DeleteCachedIndexEntry(const Slice& key, void* value) { + IndexReader* index_reader = reinterpret_cast(value); + if (index_reader->statistics() != nullptr) { + RecordTick(index_reader->statistics(), BLOCK_CACHE_INDEX_BYTES_EVICT, + index_reader->usable_size()); + } + delete index_reader; +} + +} // anonymous namespace + +} // namespace rocksdb diff --git a/external/rocksdb/table/block_based_table_reader.h b/external/rocksdb/table/block_based_table_reader.h new file mode 100755 index 0000000000..9c8b1b1148 --- /dev/null +++ b/external/rocksdb/table/block_based_table_reader.h @@ -0,0 +1,251 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include +#include +#include +#include + +#include "rocksdb/options.h" +#include "rocksdb/persistent_cache.h" +#include "rocksdb/statistics.h" +#include "rocksdb/status.h" +#include "rocksdb/table.h" +#include "table/table_properties_internal.h" +#include "table/table_reader.h" +#include "util/coding.h" +#include "util/file_reader_writer.h" + +namespace rocksdb { + +class Block; +class BlockIter; +class BlockHandle; +class Cache; +class FilterBlockReader; +class BlockBasedFilterBlockReader; +class FullFilterBlockReader; +class Footer; +class InternalKeyComparator; +class Iterator; +class RandomAccessFile; +class TableCache; +class TableReader; +class WritableFile; +struct BlockBasedTableOptions; +struct EnvOptions; +struct ReadOptions; +class GetContext; +class InternalIterator; + +using std::unique_ptr; + +// A Table is a sorted map from strings to strings. Tables are +// immutable and persistent. A Table may be safely accessed from +// multiple threads without external synchronization. +class BlockBasedTable : public TableReader { + public: + static const std::string kFilterBlockPrefix; + static const std::string kFullFilterBlockPrefix; + // The longest prefix of the cache key used to identify blocks. + // For Posix files the unique ID is three varints. + static const size_t kMaxCacheKeyPrefixSize = kMaxVarint64Length * 3 + 1; + + // Attempt to open the table that is stored in bytes [0..file_size) + // of "file", and read the metadata entries necessary to allow + // retrieving data from the table. + // + // If successful, returns ok and sets "*table_reader" to the newly opened + // table. The client should delete "*table_reader" when no longer needed. + // If there was an error while initializing the table, sets "*table_reader" + // to nullptr and returns a non-ok status. + // + // @param file must remain live while this Table is in use. + // @param prefetch_index_and_filter_in_cache can be used to disable + // prefetching of + // index and filter blocks into block cache at startup + // @param skip_filters Disables loading/accessing the filter block. Overrides + // prefetch_index_and_filter_in_cache, so filter will be skipped if both + // are set. + static Status Open(const ImmutableCFOptions& ioptions, + const EnvOptions& env_options, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_key_comparator, + unique_ptr&& file, + uint64_t file_size, unique_ptr* table_reader, + bool prefetch_index_and_filter_in_cache = true, + bool skip_filters = false, int level = -1); + + bool PrefixMayMatch(const Slice& internal_key); + + // Returns a new iterator over the table contents. + // The result of NewIterator() is initially invalid (caller must + // call one of the Seek methods on the iterator before using it). + // @param skip_filters Disables loading/accessing the filter block + InternalIterator* NewIterator(const ReadOptions&, Arena* arena = nullptr, + bool skip_filters = false) override; + + // @param skip_filters Disables loading/accessing the filter block + Status Get(const ReadOptions& readOptions, const Slice& key, + GetContext* get_context, bool skip_filters = false) override; + + // Pre-fetch the disk blocks that correspond to the key range specified by + // (kbegin, kend). The call will return error status in the event of + // IO or iteration error. + Status Prefetch(const Slice* begin, const Slice* end) override; + + // Given a key, return an approximate byte offset in the file where + // the data for that key begins (or would begin if the key were + // present in the file). The returned value is in terms of file + // bytes, and so includes effects like compression of the underlying data. + // E.g., the approximate offset of the last key in the table will + // be close to the file length. + uint64_t ApproximateOffsetOf(const Slice& key) override; + + // Returns true if the block for the specified key is in cache. + // REQUIRES: key is in this table && block cache enabled + bool TEST_KeyInCache(const ReadOptions& options, const Slice& key); + + // Set up the table for Compaction. Might change some parameters with + // posix_fadvise + void SetupForCompaction() override; + + std::shared_ptr GetTableProperties() const override; + + size_t ApproximateMemoryUsage() const override; + + // convert SST file to a human readable form + Status DumpTable(WritableFile* out_file) override; + + void Close() override; + + ~BlockBasedTable(); + + bool TEST_filter_block_preloaded() const; + bool TEST_index_reader_preloaded() const; + // Implementation of IndexReader will be exposed to internal cc file only. + class IndexReader; + + static Slice GetCacheKey(const char* cache_key_prefix, + size_t cache_key_prefix_size, + const BlockHandle& handle, char* cache_key); + + private: + template + struct CachableEntry; + + struct Rep; + Rep* rep_; + bool compaction_optimized_; + + class BlockEntryIteratorState; + // input_iter: if it is not null, update this one and return it as Iterator + static InternalIterator* NewDataBlockIterator( + Rep* rep, const ReadOptions& ro, const Slice& index_value, + BlockIter* input_iter = nullptr); + + // For the following two functions: + // if `no_io == true`, we will not try to read filter/index from sst file + // were they not present in cache yet. + CachableEntry GetFilter(bool no_io = false) const; + + // Get the iterator from the index reader. + // If input_iter is not set, return new Iterator + // If input_iter is set, update it and return it as Iterator + // + // Note: ErrorIterator with Status::Incomplete shall be returned if all the + // following conditions are met: + // 1. We enabled table_options.cache_index_and_filter_blocks. + // 2. index is not present in block cache. + // 3. We disallowed any io to be performed, that is, read_options == + // kBlockCacheTier + InternalIterator* NewIndexIterator( + const ReadOptions& read_options, BlockIter* input_iter = nullptr, + CachableEntry* index_entry = nullptr); + + // Read block cache from block caches (if set): block_cache and + // block_cache_compressed. + // On success, Status::OK with be returned and @block will be populated with + // pointer to the block as well as its block handle. + // @param compression_dict Data for presetting the compression library's + // dictionary. + static Status GetDataBlockFromCache( + const Slice& block_cache_key, const Slice& compressed_block_cache_key, + Cache* block_cache, Cache* block_cache_compressed, + const ImmutableCFOptions &ioptions, const ReadOptions& read_options, + BlockBasedTable::CachableEntry* block, uint32_t format_version, + const Slice& compression_dict); + + // Put a raw block (maybe compressed) to the corresponding block caches. + // This method will perform decompression against raw_block if needed and then + // populate the block caches. + // On success, Status::OK will be returned; also @block will be populated with + // uncompressed block and its cache handle. + // + // REQUIRES: raw_block is heap-allocated. PutDataBlockToCache() will be + // responsible for releasing its memory if error occurs. + // @param compression_dict Data for presetting the compression library's + // dictionary. + static Status PutDataBlockToCache( + const Slice& block_cache_key, const Slice& compressed_block_cache_key, + Cache* block_cache, Cache* block_cache_compressed, + const ReadOptions& read_options, const ImmutableCFOptions &ioptions, + CachableEntry* block, Block* raw_block, uint32_t format_version, + const Slice& compression_dict); + + // Calls (*handle_result)(arg, ...) repeatedly, starting with the entry found + // after a call to Seek(key), until handle_result returns false. + // May not make such a call if filter policy says that key is not present. + friend class TableCache; + friend class BlockBasedTableBuilder; + + void ReadMeta(const Footer& footer); + + // Create a index reader based on the index type stored in the table. + // Optionally, user can pass a preloaded meta_index_iter for the index that + // need to access extra meta blocks for index construction. This parameter + // helps avoid re-reading meta index block if caller already created one. + Status CreateIndexReader( + IndexReader** index_reader, + InternalIterator* preloaded_meta_index_iter = nullptr); + + bool FullFilterKeyMayMatch(const ReadOptions& read_options, + FilterBlockReader* filter, + const Slice& user_key) const; + + // Read the meta block from sst. + static Status ReadMetaBlock(Rep* rep, std::unique_ptr* meta_block, + std::unique_ptr* iter); + + // Create the filter from the filter block. + static FilterBlockReader* ReadFilter(Rep* rep); + + static void SetupCacheKeyPrefix(Rep* rep, uint64_t file_size); + + explicit BlockBasedTable(Rep* rep) + : rep_(rep), compaction_optimized_(false) {} + + // Generate a cache key prefix from the file + static void GenerateCachePrefix(Cache* cc, + RandomAccessFile* file, char* buffer, size_t* size); + static void GenerateCachePrefix(Cache* cc, + WritableFile* file, char* buffer, size_t* size); + + // Helper functions for DumpTable() + Status DumpIndexBlock(WritableFile* out_file); + Status DumpDataBlocks(WritableFile* out_file); + + // No copying allowed + explicit BlockBasedTable(const TableReader&) = delete; + void operator=(const TableReader&) = delete; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/block_builder.cc b/external/rocksdb/table/block_builder.cc new file mode 100755 index 0000000000..10901aaa02 --- /dev/null +++ b/external/rocksdb/table/block_builder.cc @@ -0,0 +1,131 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// BlockBuilder generates blocks where keys are prefix-compressed: +// +// When we store a key, we drop the prefix shared with the previous +// string. This helps reduce the space requirement significantly. +// Furthermore, once every K keys, we do not apply the prefix +// compression and store the entire key. We call this a "restart +// point". The tail end of the block stores the offsets of all of the +// restart points, and can be used to do a binary search when looking +// for a particular key. Values are stored as-is (without compression) +// immediately following the corresponding key. +// +// An entry for a particular key-value pair has the form: +// shared_bytes: varint32 +// unshared_bytes: varint32 +// value_length: varint32 +// key_delta: char[unshared_bytes] +// value: char[value_length] +// shared_bytes == 0 for restart points. +// +// The trailer of the block has the form: +// restarts: uint32[num_restarts] +// num_restarts: uint32 +// restarts[i] contains the offset within the block of the ith restart point. + +#include "table/block_builder.h" + +#include +#include +#include "rocksdb/comparator.h" +#include "db/dbformat.h" +#include "util/coding.h" + +namespace rocksdb { + +BlockBuilder::BlockBuilder(int block_restart_interval, bool use_delta_encoding) + : block_restart_interval_(block_restart_interval), + use_delta_encoding_(use_delta_encoding), + restarts_(), + counter_(0), + finished_(false) { + assert(block_restart_interval_ >= 1); + restarts_.push_back(0); // First restart point is at offset 0 + estimate_ = sizeof(uint32_t) + sizeof(uint32_t); +} + +void BlockBuilder::Reset() { + buffer_.clear(); + restarts_.clear(); + restarts_.push_back(0); // First restart point is at offset 0 + estimate_ = sizeof(uint32_t) + sizeof(uint32_t); + counter_ = 0; + finished_ = false; + last_key_.clear(); +} + +size_t BlockBuilder::EstimateSizeAfterKV(const Slice& key, const Slice& value) + const { + size_t estimate = CurrentSizeEstimate(); + estimate += key.size() + value.size(); + if (counter_ >= block_restart_interval_) { + estimate += sizeof(uint32_t); // a new restart entry. + } + + estimate += sizeof(int32_t); // varint for shared prefix length. + estimate += VarintLength(key.size()); // varint for key length. + estimate += VarintLength(value.size()); // varint for value length. + + return estimate; +} + +Slice BlockBuilder::Finish() { + // Append restart array + for (size_t i = 0; i < restarts_.size(); i++) { + PutFixed32(&buffer_, restarts_[i]); + } + PutFixed32(&buffer_, static_cast(restarts_.size())); + finished_ = true; + return Slice(buffer_); +} + +void BlockBuilder::Add(const Slice& key, const Slice& value) { + assert(!finished_); + assert(counter_ <= block_restart_interval_); + size_t shared = 0; // number of bytes shared with prev key + if (counter_ >= block_restart_interval_) { + // Restart compression + restarts_.push_back(static_cast(buffer_.size())); + estimate_ += sizeof(uint32_t); + counter_ = 0; + + if (use_delta_encoding_) { + // Update state + last_key_.assign(key.data(), key.size()); + } + } else if (use_delta_encoding_) { + Slice last_key_piece(last_key_); + // See how much sharing to do with previous string + shared = key.difference_offset(last_key_piece); + + // Update state + // We used to just copy the changed data here, but it appears to be + // faster to just copy the whole thing. + last_key_.assign(key.data(), key.size()); + } + + const size_t non_shared = key.size() - shared; + const size_t curr_size = buffer_.size(); + + // Add "" to buffer_ + PutVarint32Varint32Varint32(&buffer_, static_cast(shared), + static_cast(non_shared), + static_cast(value.size())); + + // Add string delta to buffer_ followed by value + buffer_.append(key.data() + shared, non_shared); + buffer_.append(value.data(), value.size()); + + counter_++; + estimate_ += buffer_.size() - curr_size; +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/block_builder.h b/external/rocksdb/table/block_builder.h new file mode 100755 index 0000000000..898e1ade21 --- /dev/null +++ b/external/rocksdb/table/block_builder.h @@ -0,0 +1,62 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include + +#include +#include "rocksdb/slice.h" + +namespace rocksdb { + +class BlockBuilder { + public: + BlockBuilder(const BlockBuilder&) = delete; + void operator=(const BlockBuilder&) = delete; + + explicit BlockBuilder(int block_restart_interval, + bool use_delta_encoding = true); + + // Reset the contents as if the BlockBuilder was just constructed. + void Reset(); + + // REQUIRES: Finish() has not been callled since the last call to Reset(). + // REQUIRES: key is larger than any previously added key + void Add(const Slice& key, const Slice& value); + + // Finish building the block and return a slice that refers to the + // block contents. The returned slice will remain valid for the + // lifetime of this builder or until Reset() is called. + Slice Finish(); + + // Returns an estimate of the current (uncompressed) size of the block + // we are building. + inline size_t CurrentSizeEstimate() const { return estimate_; } + + // Returns an estimated block size after appending key and value. + size_t EstimateSizeAfterKV(const Slice& key, const Slice& value) const; + + // Return true iff no entries have been added since the last Reset() + bool empty() const { + return buffer_.empty(); + } + + private: + const int block_restart_interval_; + const bool use_delta_encoding_; + + std::string buffer_; // Destination buffer + std::vector restarts_; // Restart points + size_t estimate_; + int counter_; // Number of entries emitted since restart + bool finished_; // Has Finish() been called? + std::string last_key_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/block_prefix_index.cc b/external/rocksdb/table/block_prefix_index.cc new file mode 100755 index 0000000000..10fcb05754 --- /dev/null +++ b/external/rocksdb/table/block_prefix_index.cc @@ -0,0 +1,237 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "table/block_prefix_index.h" + +#include + +#include "rocksdb/comparator.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "util/arena.h" +#include "util/coding.h" +#include "util/hash.h" + +namespace rocksdb { + +inline uint32_t Hash(const Slice& s) { + return rocksdb::Hash(s.data(), s.size(), 0); +} + +inline uint32_t PrefixToBucket(const Slice& prefix, uint32_t num_buckets) { + return Hash(prefix) % num_buckets; +} + +// The prefix block index is simply a bucket array, with each entry pointing to +// the blocks that span the prefixes hashed to this bucket. +// +// To reduce memory footprint, if there is only one block per bucket, the entry +// stores the block id directly. If there are more than one blocks per bucket, +// because of hash collision or a single prefix spanning multiple blocks, +// the entry points to an array of block ids. The block array is an array of +// uint32_t's. The first uint32_t indicates the total number of blocks, followed +// by the block ids. +// +// To differentiate the two cases, the high order bit of the entry indicates +// whether it is a 'pointer' into a separate block array. +// 0x7FFFFFFF is reserved for empty bucket. + +const uint32_t kNoneBlock = 0x7FFFFFFF; +const uint32_t kBlockArrayMask = 0x80000000; + +inline bool IsNone(uint32_t block_id) { + return block_id == kNoneBlock; +} + +inline bool IsBlockId(uint32_t block_id) { + return (block_id & kBlockArrayMask) == 0; +} + +inline uint32_t DecodeIndex(uint32_t block_id) { + uint32_t index = block_id ^ kBlockArrayMask; + assert(index < kBlockArrayMask); + return index; +} + +inline uint32_t EncodeIndex(uint32_t index) { + assert(index < kBlockArrayMask); + return index | kBlockArrayMask; +} + +// temporary storage for prefix information during index building +struct PrefixRecord { + Slice prefix; + uint32_t start_block; + uint32_t end_block; + uint32_t num_blocks; + PrefixRecord* next; +}; + +class BlockPrefixIndex::Builder { + public: + explicit Builder(const SliceTransform* internal_prefix_extractor) + : internal_prefix_extractor_(internal_prefix_extractor) {} + + void Add(const Slice& key_prefix, uint32_t start_block, + uint32_t num_blocks) { + PrefixRecord* record = reinterpret_cast( + arena_.AllocateAligned(sizeof(PrefixRecord))); + record->prefix = key_prefix; + record->start_block = start_block; + record->end_block = start_block + num_blocks - 1; + record->num_blocks = num_blocks; + prefixes_.push_back(record); + } + + BlockPrefixIndex* Finish() { + // For now, use roughly 1:1 prefix to bucket ratio. + uint32_t num_buckets = static_cast(prefixes_.size()) + 1; + + // Collect prefix records that hash to the same bucket, into a single + // linklist. + std::vector prefixes_per_bucket(num_buckets, nullptr); + std::vector num_blocks_per_bucket(num_buckets, 0); + for (PrefixRecord* current : prefixes_) { + uint32_t bucket = PrefixToBucket(current->prefix, num_buckets); + // merge the prefix block span if the first block of this prefix is + // connected to the last block of the previous prefix. + PrefixRecord* prev = prefixes_per_bucket[bucket]; + if (prev) { + assert(current->start_block >= prev->end_block); + auto distance = current->start_block - prev->end_block; + if (distance <= 1) { + prev->end_block = current->end_block; + prev->num_blocks = prev->end_block - prev->start_block + 1; + num_blocks_per_bucket[bucket] += (current->num_blocks + distance - 1); + continue; + } + } + current->next = prev; + prefixes_per_bucket[bucket] = current; + num_blocks_per_bucket[bucket] += current->num_blocks; + } + + // Calculate the block array buffer size + uint32_t total_block_array_entries = 0; + for (uint32_t i = 0; i < num_buckets; i++) { + uint32_t num_blocks = num_blocks_per_bucket[i]; + if (num_blocks > 1) { + total_block_array_entries += (num_blocks + 1); + } + } + + // Populate the final prefix block index + uint32_t* block_array_buffer = new uint32_t[total_block_array_entries]; + uint32_t* buckets = new uint32_t[num_buckets]; + uint32_t offset = 0; + for (uint32_t i = 0; i < num_buckets; i++) { + uint32_t num_blocks = num_blocks_per_bucket[i]; + if (num_blocks == 0) { + assert(prefixes_per_bucket[i] == nullptr); + buckets[i] = kNoneBlock; + } else if (num_blocks == 1) { + assert(prefixes_per_bucket[i] != nullptr); + assert(prefixes_per_bucket[i]->next == nullptr); + buckets[i] = prefixes_per_bucket[i]->start_block; + } else { + assert(total_block_array_entries > 0); + assert(prefixes_per_bucket[i] != nullptr); + buckets[i] = EncodeIndex(offset); + block_array_buffer[offset] = num_blocks; + uint32_t* last_block = &block_array_buffer[offset + num_blocks]; + auto current = prefixes_per_bucket[i]; + // populate block ids from largest to smallest + while (current != nullptr) { + for (uint32_t iter = 0; iter < current->num_blocks; iter++) { + *last_block = current->end_block - iter; + last_block--; + } + current = current->next; + } + assert(last_block == &block_array_buffer[offset]); + offset += (num_blocks + 1); + } + } + + assert(offset == total_block_array_entries); + + return new BlockPrefixIndex(internal_prefix_extractor_, num_buckets, + buckets, total_block_array_entries, + block_array_buffer); + } + + private: + const SliceTransform* internal_prefix_extractor_; + + std::vector prefixes_; + Arena arena_; +}; + + +Status BlockPrefixIndex::Create(const SliceTransform* internal_prefix_extractor, + const Slice& prefixes, const Slice& prefix_meta, + BlockPrefixIndex** prefix_index) { + uint64_t pos = 0; + auto meta_pos = prefix_meta; + Status s; + Builder builder(internal_prefix_extractor); + + while (!meta_pos.empty()) { + uint32_t prefix_size = 0; + uint32_t entry_index = 0; + uint32_t num_blocks = 0; + if (!GetVarint32(&meta_pos, &prefix_size) || + !GetVarint32(&meta_pos, &entry_index) || + !GetVarint32(&meta_pos, &num_blocks)) { + s = Status::Corruption( + "Corrupted prefix meta block: unable to read from it."); + break; + } + if (pos + prefix_size > prefixes.size()) { + s = Status::Corruption( + "Corrupted prefix meta block: size inconsistency."); + break; + } + Slice prefix(prefixes.data() + pos, prefix_size); + builder.Add(prefix, entry_index, num_blocks); + + pos += prefix_size; + } + + if (s.ok() && pos != prefixes.size()) { + s = Status::Corruption("Corrupted prefix meta block"); + } + + if (s.ok()) { + *prefix_index = builder.Finish(); + } + + return s; +} + +uint32_t BlockPrefixIndex::GetBlocks(const Slice& key, + uint32_t** blocks) { + Slice prefix = internal_prefix_extractor_->Transform(key); + + uint32_t bucket = PrefixToBucket(prefix, num_buckets_); + uint32_t block_id = buckets_[bucket]; + + if (IsNone(block_id)) { + return 0; + } else if (IsBlockId(block_id)) { + *blocks = &buckets_[bucket]; + return 1; + } else { + uint32_t index = DecodeIndex(block_id); + assert(index < num_block_array_buffer_entries_); + *blocks = &block_array_buffer_[index+1]; + uint32_t num_blocks = block_array_buffer_[index]; + assert(num_blocks > 1); + assert(index + num_blocks < num_block_array_buffer_entries_); + return num_blocks; + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/block_prefix_index.h b/external/rocksdb/table/block_prefix_index.h new file mode 100755 index 0000000000..d9c3b97e0a --- /dev/null +++ b/external/rocksdb/table/block_prefix_index.h @@ -0,0 +1,68 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#include +#include "rocksdb/status.h" + +namespace rocksdb { + +class Comparator; +class Iterator; +class Slice; +class SliceTransform; + +// Build a hash-based index to speed up the lookup for "index block". +// BlockHashIndex accepts a key and, if found, returns its restart index within +// that index block. +class BlockPrefixIndex { + public: + + // Maps a key to a list of data blocks that could potentially contain + // the key, based on the prefix. + // Returns the total number of relevant blocks, 0 means the key does + // not exist. + uint32_t GetBlocks(const Slice& key, uint32_t** blocks); + + size_t ApproximateMemoryUsage() const { + return sizeof(BlockPrefixIndex) + + (num_block_array_buffer_entries_ + num_buckets_) * sizeof(uint32_t); + } + + // Create hash index by reading from the metadata blocks. + // @params prefixes: a sequence of prefixes. + // @params prefix_meta: contains the "metadata" to of the prefixes. + static Status Create(const SliceTransform* hash_key_extractor, + const Slice& prefixes, const Slice& prefix_meta, + BlockPrefixIndex** prefix_index); + + ~BlockPrefixIndex() { + delete[] buckets_; + delete[] block_array_buffer_; + } + + private: + class Builder; + friend Builder; + + BlockPrefixIndex(const SliceTransform* internal_prefix_extractor, + uint32_t num_buckets, + uint32_t* buckets, + uint32_t num_block_array_buffer_entries, + uint32_t* block_array_buffer) + : internal_prefix_extractor_(internal_prefix_extractor), + num_buckets_(num_buckets), + num_block_array_buffer_entries_(num_block_array_buffer_entries), + buckets_(buckets), + block_array_buffer_(block_array_buffer) {} + + const SliceTransform* internal_prefix_extractor_; + uint32_t num_buckets_; + uint32_t num_block_array_buffer_entries_; + uint32_t* buckets_; + uint32_t* block_array_buffer_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/block_test.cc b/external/rocksdb/table/block_test.cc new file mode 100755 index 0000000000..424df87a3f --- /dev/null +++ b/external/rocksdb/table/block_test.cc @@ -0,0 +1,224 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +#include +#include +#include + +#include "db/dbformat.h" +#include "db/memtable.h" +#include "db/write_batch_internal.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/iterator.h" +#include "rocksdb/table.h" +#include "rocksdb/slice_transform.h" +#include "table/block.h" +#include "table/block_builder.h" +#include "table/format.h" +#include "util/random.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +static std::string RandomString(Random* rnd, int len) { + std::string r; + test::RandomString(rnd, len, &r); + return r; +} +std::string GenerateKey(int primary_key, int secondary_key, int padding_size, + Random *rnd) { + char buf[50]; + char *p = &buf[0]; + snprintf(buf, sizeof(buf), "%6d%4d", primary_key, secondary_key); + std::string k(p); + if (padding_size) { + k += RandomString(rnd, padding_size); + } + + return k; +} + +// Generate random key value pairs. +// The generated key will be sorted. You can tune the parameters to generated +// different kinds of test key/value pairs for different scenario. +void GenerateRandomKVs(std::vector *keys, + std::vector *values, const int from, + const int len, const int step = 1, + const int padding_size = 0, + const int keys_share_prefix = 1) { + Random rnd(302); + + // generate different prefix + for (int i = from; i < from + len; i += step) { + // generating keys that shares the prefix + for (int j = 0; j < keys_share_prefix; ++j) { + keys->emplace_back(GenerateKey(i, j, padding_size, &rnd)); + + // 100 bytes values + values->emplace_back(RandomString(&rnd, 100)); + } + } +} + +class BlockTest : public testing::Test {}; + +// block test +TEST_F(BlockTest, SimpleTest) { + Random rnd(301); + Options options = Options(); + std::unique_ptr ic; + ic.reset(new test::PlainInternalKeyComparator(options.comparator)); + + std::vector keys; + std::vector values; + BlockBuilder builder(16); + int num_records = 100000; + + GenerateRandomKVs(&keys, &values, 0, num_records); + // add a bunch of records to a block + for (int i = 0; i < num_records; i++) { + builder.Add(keys[i], values[i]); + } + + // read serialized contents of the block + Slice rawblock = builder.Finish(); + + // create block reader + BlockContents contents; + contents.data = rawblock; + contents.cachable = false; + Block reader(std::move(contents)); + + // read contents of block sequentially + int count = 0; + InternalIterator *iter = reader.NewIterator(options.comparator); + for (iter->SeekToFirst();iter->Valid(); count++, iter->Next()) { + + // read kv from block + Slice k = iter->key(); + Slice v = iter->value(); + + // compare with lookaside array + ASSERT_EQ(k.ToString().compare(keys[count]), 0); + ASSERT_EQ(v.ToString().compare(values[count]), 0); + } + delete iter; + + // read block contents randomly + iter = reader.NewIterator(options.comparator); + for (int i = 0; i < num_records; i++) { + + // find a random key in the lookaside array + int index = rnd.Uniform(num_records); + Slice k(keys[index]); + + // search in block for this key + iter->Seek(k); + ASSERT_TRUE(iter->Valid()); + Slice v = iter->value(); + ASSERT_EQ(v.ToString().compare(values[index]), 0); + } + delete iter; +} + +// return the block contents +BlockContents GetBlockContents(std::unique_ptr *builder, + const std::vector &keys, + const std::vector &values, + const int prefix_group_size = 1) { + builder->reset(new BlockBuilder(1 /* restart interval */)); + + // Add only half of the keys + for (size_t i = 0; i < keys.size(); ++i) { + (*builder)->Add(keys[i], values[i]); + } + Slice rawblock = (*builder)->Finish(); + + BlockContents contents; + contents.data = rawblock; + contents.cachable = false; + + return contents; +} + +void CheckBlockContents(BlockContents contents, const int max_key, + const std::vector &keys, + const std::vector &values) { + const size_t prefix_size = 6; + // create block reader + BlockContents contents_ref(contents.data, contents.cachable, + contents.compression_type); + Block reader1(std::move(contents)); + Block reader2(std::move(contents_ref)); + + std::unique_ptr prefix_extractor( + NewFixedPrefixTransform(prefix_size)); + + std::unique_ptr regular_iter( + reader2.NewIterator(BytewiseComparator())); + + // Seek existent keys + for (size_t i = 0; i < keys.size(); i++) { + regular_iter->Seek(keys[i]); + ASSERT_OK(regular_iter->status()); + ASSERT_TRUE(regular_iter->Valid()); + + Slice v = regular_iter->value(); + ASSERT_EQ(v.ToString().compare(values[i]), 0); + } + + // Seek non-existent keys. + // For hash index, if no key with a given prefix is not found, iterator will + // simply be set as invalid; whereas the binary search based iterator will + // return the one that is closest. + for (int i = 1; i < max_key - 1; i += 2) { + auto key = GenerateKey(i, 0, 0, nullptr); + regular_iter->Seek(key); + ASSERT_TRUE(regular_iter->Valid()); + } +} + +// In this test case, no two key share same prefix. +TEST_F(BlockTest, SimpleIndexHash) { + const int kMaxKey = 100000; + std::vector keys; + std::vector values; + GenerateRandomKVs(&keys, &values, 0 /* first key id */, + kMaxKey /* last key id */, 2 /* step */, + 8 /* padding size (8 bytes randomly generated suffix) */); + + std::unique_ptr builder; + auto contents = GetBlockContents(&builder, keys, values); + + CheckBlockContents(std::move(contents), kMaxKey, keys, values); +} + +TEST_F(BlockTest, IndexHashWithSharedPrefix) { + const int kMaxKey = 100000; + // for each prefix, there will be 5 keys starts with it. + const int kPrefixGroup = 5; + std::vector keys; + std::vector values; + // Generate keys with same prefix. + GenerateRandomKVs(&keys, &values, 0, // first key id + kMaxKey, // last key id + 2, // step + 10, // padding size, + kPrefixGroup); + + std::unique_ptr builder; + auto contents = GetBlockContents(&builder, keys, values, kPrefixGroup); + + CheckBlockContents(std::move(contents), kMaxKey, keys, values); +} + +} // namespace rocksdb + +int main(int argc, char **argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/table/bloom_block.cc b/external/rocksdb/table/bloom_block.cc new file mode 100755 index 0000000000..7eef9cc05b --- /dev/null +++ b/external/rocksdb/table/bloom_block.cc @@ -0,0 +1,23 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "table/bloom_block.h" + +#include +#include "rocksdb/slice.h" +#include "util/dynamic_bloom.h" + +namespace rocksdb { + +void BloomBlockBuilder::AddKeysHashes(const std::vector& keys_hashes) { + for (auto hash : keys_hashes) { + bloom_.AddHash(hash); + } +} + +Slice BloomBlockBuilder::Finish() { return bloom_.GetRawData(); } + +const std::string BloomBlockBuilder::kBloomBlock = "kBloomBlock"; +} // namespace rocksdb diff --git a/external/rocksdb/table/bloom_block.h b/external/rocksdb/table/bloom_block.h new file mode 100755 index 0000000000..5ba74601fc --- /dev/null +++ b/external/rocksdb/table/bloom_block.h @@ -0,0 +1,38 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#include +#include +#include "util/dynamic_bloom.h" + +namespace rocksdb { +class Logger; + +class BloomBlockBuilder { + public: + static const std::string kBloomBlock; + + explicit BloomBlockBuilder(uint32_t num_probes = 6) + : bloom_(num_probes, nullptr) {} + + void SetTotalBits(Allocator* allocator, uint32_t total_bits, + uint32_t locality, size_t huge_page_tlb_size, + Logger* logger) { + bloom_.SetTotalBits(allocator, total_bits, locality, huge_page_tlb_size, + logger); + } + + uint32_t GetNumBlocks() const { return bloom_.GetNumBlocks(); } + + void AddKeysHashes(const std::vector& keys_hashes); + + Slice Finish(); + + private: + DynamicBloom bloom_; +}; + +}; // namespace rocksdb diff --git a/external/rocksdb/table/cuckoo_table_builder.cc b/external/rocksdb/table/cuckoo_table_builder.cc new file mode 100755 index 0000000000..0f93b54edf --- /dev/null +++ b/external/rocksdb/table/cuckoo_table_builder.cc @@ -0,0 +1,515 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE +#include "table/cuckoo_table_builder.h" + +#include +#include +#include +#include +#include + +#include "db/dbformat.h" +#include "rocksdb/env.h" +#include "rocksdb/table.h" +#include "table/block_builder.h" +#include "table/cuckoo_table_factory.h" +#include "table/format.h" +#include "table/meta_blocks.h" +#include "util/autovector.h" +#include "util/file_reader_writer.h" +#include "util/random.h" +#include "util/string_util.h" + +namespace rocksdb { +const std::string CuckooTablePropertyNames::kEmptyKey = + "rocksdb.cuckoo.bucket.empty.key"; +const std::string CuckooTablePropertyNames::kNumHashFunc = + "rocksdb.cuckoo.hash.num"; +const std::string CuckooTablePropertyNames::kHashTableSize = + "rocksdb.cuckoo.hash.size"; +const std::string CuckooTablePropertyNames::kValueLength = + "rocksdb.cuckoo.value.length"; +const std::string CuckooTablePropertyNames::kIsLastLevel = + "rocksdb.cuckoo.file.islastlevel"; +const std::string CuckooTablePropertyNames::kCuckooBlockSize = + "rocksdb.cuckoo.hash.cuckooblocksize"; +const std::string CuckooTablePropertyNames::kIdentityAsFirstHash = + "rocksdb.cuckoo.hash.identityfirst"; +const std::string CuckooTablePropertyNames::kUseModuleHash = + "rocksdb.cuckoo.hash.usemodule"; +const std::string CuckooTablePropertyNames::kUserKeyLength = + "rocksdb.cuckoo.hash.userkeylength"; + +// Obtained by running echo rocksdb.table.cuckoo | sha1sum +extern const uint64_t kCuckooTableMagicNumber = 0x926789d0c5f17873ull; + +CuckooTableBuilder::CuckooTableBuilder( + WritableFileWriter* file, double max_hash_table_ratio, + uint32_t max_num_hash_table, uint32_t max_search_depth, + const Comparator* user_comparator, uint32_t cuckoo_block_size, + bool use_module_hash, bool identity_as_first_hash, + uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t), + uint32_t column_family_id, const std::string& column_family_name) + : num_hash_func_(2), + file_(file), + max_hash_table_ratio_(max_hash_table_ratio), + max_num_hash_func_(max_num_hash_table), + max_search_depth_(max_search_depth), + cuckoo_block_size_(std::max(1U, cuckoo_block_size)), + hash_table_size_(use_module_hash ? 0 : 2), + is_last_level_file_(false), + has_seen_first_key_(false), + has_seen_first_value_(false), + key_size_(0), + value_size_(0), + num_entries_(0), + num_values_(0), + ucomp_(user_comparator), + use_module_hash_(use_module_hash), + identity_as_first_hash_(identity_as_first_hash), + get_slice_hash_(get_slice_hash), + closed_(false) { + // Data is in a huge block. + properties_.num_data_blocks = 1; + properties_.index_size = 0; + properties_.filter_size = 0; + properties_.column_family_id = column_family_id; + properties_.column_family_name = column_family_name; +} + +void CuckooTableBuilder::Add(const Slice& key, const Slice& value) { + if (num_entries_ >= kMaxVectorIdx - 1) { + status_ = Status::NotSupported("Number of keys in a file must be < 2^32-1"); + return; + } + ParsedInternalKey ikey; + if (!ParseInternalKey(key, &ikey)) { + status_ = Status::Corruption("Unable to parse key into inernal key."); + return; + } + if (ikey.type != kTypeDeletion && ikey.type != kTypeValue) { + status_ = Status::NotSupported("Unsupported key type " + + ToString(ikey.type)); + return; + } + + // Determine if we can ignore the sequence number and value type from + // internal keys by looking at sequence number from first key. We assume + // that if first key has a zero sequence number, then all the remaining + // keys will have zero seq. no. + if (!has_seen_first_key_) { + is_last_level_file_ = ikey.sequence == 0; + has_seen_first_key_ = true; + smallest_user_key_.assign(ikey.user_key.data(), ikey.user_key.size()); + largest_user_key_.assign(ikey.user_key.data(), ikey.user_key.size()); + key_size_ = is_last_level_file_ ? ikey.user_key.size() : key.size(); + } + if (key_size_ != (is_last_level_file_ ? ikey.user_key.size() : key.size())) { + status_ = Status::NotSupported("all keys have to be the same size"); + return; + } + + if (ikey.type == kTypeValue) { + if (!has_seen_first_value_) { + has_seen_first_value_ = true; + value_size_ = value.size(); + } + if (value_size_ != value.size()) { + status_ = Status::NotSupported("all values have to be the same size"); + return; + } + + if (is_last_level_file_) { + kvs_.append(ikey.user_key.data(), ikey.user_key.size()); + } else { + kvs_.append(key.data(), key.size()); + } + kvs_.append(value.data(), value.size()); + ++num_values_; + } else { + if (is_last_level_file_) { + deleted_keys_.append(ikey.user_key.data(), ikey.user_key.size()); + } else { + deleted_keys_.append(key.data(), key.size()); + } + } + ++num_entries_; + + // In order to fill the empty buckets in the hash table, we identify a + // key which is not used so far (unused_user_key). We determine this by + // maintaining smallest and largest keys inserted so far in bytewise order + // and use them to find a key outside this range in Finish() operation. + // Note that this strategy is independent of user comparator used here. + if (ikey.user_key.compare(smallest_user_key_) < 0) { + smallest_user_key_.assign(ikey.user_key.data(), ikey.user_key.size()); + } else if (ikey.user_key.compare(largest_user_key_) > 0) { + largest_user_key_.assign(ikey.user_key.data(), ikey.user_key.size()); + } + if (!use_module_hash_) { + if (hash_table_size_ < num_entries_ / max_hash_table_ratio_) { + hash_table_size_ *= 2; + } + } +} + +bool CuckooTableBuilder::IsDeletedKey(uint64_t idx) const { + assert(closed_); + return idx >= num_values_; +} + +Slice CuckooTableBuilder::GetKey(uint64_t idx) const { + assert(closed_); + if (IsDeletedKey(idx)) { + return Slice(&deleted_keys_[(idx - num_values_) * key_size_], key_size_); + } + return Slice(&kvs_[idx * (key_size_ + value_size_)], key_size_); +} + +Slice CuckooTableBuilder::GetUserKey(uint64_t idx) const { + assert(closed_); + return is_last_level_file_ ? GetKey(idx) : ExtractUserKey(GetKey(idx)); +} + +Slice CuckooTableBuilder::GetValue(uint64_t idx) const { + assert(closed_); + if (IsDeletedKey(idx)) { + static std::string empty_value(value_size_, 'a'); + return Slice(empty_value); + } + return Slice(&kvs_[idx * (key_size_ + value_size_) + key_size_], value_size_); +} + +Status CuckooTableBuilder::MakeHashTable(std::vector* buckets) { + buckets->resize(hash_table_size_ + cuckoo_block_size_ - 1); + uint32_t make_space_for_key_call_id = 0; + for (uint32_t vector_idx = 0; vector_idx < num_entries_; vector_idx++) { + uint64_t bucket_id; + bool bucket_found = false; + autovector hash_vals; + Slice user_key = GetUserKey(vector_idx); + for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_ && !bucket_found; + ++hash_cnt) { + uint64_t hash_val = CuckooHash(user_key, hash_cnt, use_module_hash_, + hash_table_size_, identity_as_first_hash_, get_slice_hash_); + // If there is a collision, check next cuckoo_block_size_ locations for + // empty locations. While checking, if we reach end of the hash table, + // stop searching and proceed for next hash function. + for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_; + ++block_idx, ++hash_val) { + if ((*buckets)[hash_val].vector_idx == kMaxVectorIdx) { + bucket_id = hash_val; + bucket_found = true; + break; + } else { + if (ucomp_->Compare(user_key, + GetUserKey((*buckets)[hash_val].vector_idx)) == 0) { + return Status::NotSupported("Same key is being inserted again."); + } + hash_vals.push_back(hash_val); + } + } + } + while (!bucket_found && !MakeSpaceForKey(hash_vals, + ++make_space_for_key_call_id, buckets, &bucket_id)) { + // Rehash by increashing number of hash tables. + if (num_hash_func_ >= max_num_hash_func_) { + return Status::NotSupported("Too many collisions. Unable to hash."); + } + // We don't really need to rehash the entire table because old hashes are + // still valid and we only increased the number of hash functions. + uint64_t hash_val = CuckooHash(user_key, num_hash_func_, use_module_hash_, + hash_table_size_, identity_as_first_hash_, get_slice_hash_); + ++num_hash_func_; + for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_; + ++block_idx, ++hash_val) { + if ((*buckets)[hash_val].vector_idx == kMaxVectorIdx) { + bucket_found = true; + bucket_id = hash_val; + break; + } else { + hash_vals.push_back(hash_val); + } + } + } + (*buckets)[bucket_id].vector_idx = vector_idx; + } + return Status::OK(); +} + +Status CuckooTableBuilder::Finish() { + assert(!closed_); + closed_ = true; + std::vector buckets; + Status s; + std::string unused_bucket; + if (num_entries_ > 0) { + // Calculate the real hash size if module hash is enabled. + if (use_module_hash_) { + hash_table_size_ = + static_cast(num_entries_ / max_hash_table_ratio_); + } + s = MakeHashTable(&buckets); + if (!s.ok()) { + return s; + } + // Determine unused_user_key to fill empty buckets. + std::string unused_user_key = smallest_user_key_; + int curr_pos = static_cast(unused_user_key.size()) - 1; + while (curr_pos >= 0) { + --unused_user_key[curr_pos]; + if (Slice(unused_user_key).compare(smallest_user_key_) < 0) { + break; + } + --curr_pos; + } + if (curr_pos < 0) { + // Try using the largest key to identify an unused key. + unused_user_key = largest_user_key_; + curr_pos = static_cast(unused_user_key.size()) - 1; + while (curr_pos >= 0) { + ++unused_user_key[curr_pos]; + if (Slice(unused_user_key).compare(largest_user_key_) > 0) { + break; + } + --curr_pos; + } + } + if (curr_pos < 0) { + return Status::Corruption("Unable to find unused key"); + } + if (is_last_level_file_) { + unused_bucket = unused_user_key; + } else { + ParsedInternalKey ikey(unused_user_key, 0, kTypeValue); + AppendInternalKey(&unused_bucket, ikey); + } + } + properties_.num_entries = num_entries_; + properties_.fixed_key_len = key_size_; + properties_.user_collected_properties[ + CuckooTablePropertyNames::kValueLength].assign( + reinterpret_cast(&value_size_), sizeof(value_size_)); + + uint64_t bucket_size = key_size_ + value_size_; + unused_bucket.resize(bucket_size, 'a'); + // Write the table. + uint32_t num_added = 0; + for (auto& bucket : buckets) { + if (bucket.vector_idx == kMaxVectorIdx) { + s = file_->Append(Slice(unused_bucket)); + } else { + ++num_added; + s = file_->Append(GetKey(bucket.vector_idx)); + if (s.ok()) { + if (value_size_ > 0) { + s = file_->Append(GetValue(bucket.vector_idx)); + } + } + } + if (!s.ok()) { + return s; + } + } + assert(num_added == NumEntries()); + properties_.raw_key_size = num_added * properties_.fixed_key_len; + properties_.raw_value_size = num_added * value_size_; + + uint64_t offset = buckets.size() * bucket_size; + properties_.data_size = offset; + unused_bucket.resize(properties_.fixed_key_len); + properties_.user_collected_properties[ + CuckooTablePropertyNames::kEmptyKey] = unused_bucket; + properties_.user_collected_properties[ + CuckooTablePropertyNames::kNumHashFunc].assign( + reinterpret_cast(&num_hash_func_), sizeof(num_hash_func_)); + + properties_.user_collected_properties[ + CuckooTablePropertyNames::kHashTableSize].assign( + reinterpret_cast(&hash_table_size_), + sizeof(hash_table_size_)); + properties_.user_collected_properties[ + CuckooTablePropertyNames::kIsLastLevel].assign( + reinterpret_cast(&is_last_level_file_), + sizeof(is_last_level_file_)); + properties_.user_collected_properties[ + CuckooTablePropertyNames::kCuckooBlockSize].assign( + reinterpret_cast(&cuckoo_block_size_), + sizeof(cuckoo_block_size_)); + properties_.user_collected_properties[ + CuckooTablePropertyNames::kIdentityAsFirstHash].assign( + reinterpret_cast(&identity_as_first_hash_), + sizeof(identity_as_first_hash_)); + properties_.user_collected_properties[ + CuckooTablePropertyNames::kUseModuleHash].assign( + reinterpret_cast(&use_module_hash_), + sizeof(use_module_hash_)); + uint32_t user_key_len = static_cast(smallest_user_key_.size()); + properties_.user_collected_properties[ + CuckooTablePropertyNames::kUserKeyLength].assign( + reinterpret_cast(&user_key_len), + sizeof(user_key_len)); + + // Write meta blocks. + MetaIndexBuilder meta_index_builder; + PropertyBlockBuilder property_block_builder; + + property_block_builder.AddTableProperty(properties_); + property_block_builder.Add(properties_.user_collected_properties); + Slice property_block = property_block_builder.Finish(); + BlockHandle property_block_handle; + property_block_handle.set_offset(offset); + property_block_handle.set_size(property_block.size()); + s = file_->Append(property_block); + offset += property_block.size(); + if (!s.ok()) { + return s; + } + + meta_index_builder.Add(kPropertiesBlock, property_block_handle); + Slice meta_index_block = meta_index_builder.Finish(); + + BlockHandle meta_index_block_handle; + meta_index_block_handle.set_offset(offset); + meta_index_block_handle.set_size(meta_index_block.size()); + s = file_->Append(meta_index_block); + if (!s.ok()) { + return s; + } + + Footer footer(kCuckooTableMagicNumber, 1); + footer.set_metaindex_handle(meta_index_block_handle); + footer.set_index_handle(BlockHandle::NullBlockHandle()); + std::string footer_encoding; + footer.EncodeTo(&footer_encoding); + s = file_->Append(footer_encoding); + return s; +} + +void CuckooTableBuilder::Abandon() { + assert(!closed_); + closed_ = true; +} + +uint64_t CuckooTableBuilder::NumEntries() const { + return num_entries_; +} + +uint64_t CuckooTableBuilder::FileSize() const { + if (closed_) { + return file_->GetFileSize(); + } else if (num_entries_ == 0) { + return 0; + } + + if (use_module_hash_) { + return static_cast((key_size_ + value_size_) * + num_entries_ / max_hash_table_ratio_); + } else { + // Account for buckets being a power of two. + // As elements are added, file size remains constant for a while and + // doubles its size. Since compaction algorithm stops adding elements + // only after it exceeds the file limit, we account for the extra element + // being added here. + uint64_t expected_hash_table_size = hash_table_size_; + if (expected_hash_table_size < (num_entries_ + 1) / max_hash_table_ratio_) { + expected_hash_table_size *= 2; + } + return (key_size_ + value_size_) * expected_hash_table_size - 1; + } +} + +// This method is invoked when there is no place to insert the target key. +// It searches for a set of elements that can be moved to accommodate target +// key. The search is a BFS graph traversal with first level (hash_vals) +// being all the buckets target key could go to. +// Then, from each node (curr_node), we find all the buckets that curr_node +// could go to. They form the children of curr_node in the tree. +// We continue the traversal until we find an empty bucket, in which case, we +// move all elements along the path from first level to this empty bucket, to +// make space for target key which is inserted at first level (*bucket_id). +// If tree depth exceedes max depth, we return false indicating failure. +bool CuckooTableBuilder::MakeSpaceForKey( + const autovector& hash_vals, + const uint32_t make_space_for_key_call_id, + std::vector* buckets, uint64_t* bucket_id) { + struct CuckooNode { + uint64_t bucket_id; + uint32_t depth; + uint32_t parent_pos; + CuckooNode(uint64_t _bucket_id, uint32_t _depth, int _parent_pos) + : bucket_id(_bucket_id), depth(_depth), parent_pos(_parent_pos) {} + }; + // This is BFS search tree that is stored simply as a vector. + // Each node stores the index of parent node in the vector. + std::vector tree; + // We want to identify already visited buckets in the current method call so + // that we don't add same buckets again for exploration in the tree. + // We do this by maintaining a count of current method call in + // make_space_for_key_call_id, which acts as a unique id for this invocation + // of the method. We store this number into the nodes that we explore in + // current method call. + // It is unlikely for the increment operation to overflow because the maximum + // no. of times this will be called is <= max_num_hash_func_ + num_entries_. + for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) { + uint64_t bid = hash_vals[hash_cnt]; + (*buckets)[bid].make_space_for_key_call_id = make_space_for_key_call_id; + tree.push_back(CuckooNode(bid, 0, 0)); + } + bool null_found = false; + uint32_t curr_pos = 0; + while (!null_found && curr_pos < tree.size()) { + CuckooNode& curr_node = tree[curr_pos]; + uint32_t curr_depth = curr_node.depth; + if (curr_depth >= max_search_depth_) { + break; + } + CuckooBucket& curr_bucket = (*buckets)[curr_node.bucket_id]; + for (uint32_t hash_cnt = 0; + hash_cnt < num_hash_func_ && !null_found; ++hash_cnt) { + uint64_t child_bucket_id = CuckooHash(GetUserKey(curr_bucket.vector_idx), + hash_cnt, use_module_hash_, hash_table_size_, identity_as_first_hash_, + get_slice_hash_); + // Iterate inside Cuckoo Block. + for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_; + ++block_idx, ++child_bucket_id) { + if ((*buckets)[child_bucket_id].make_space_for_key_call_id == + make_space_for_key_call_id) { + continue; + } + (*buckets)[child_bucket_id].make_space_for_key_call_id = + make_space_for_key_call_id; + tree.push_back(CuckooNode(child_bucket_id, curr_depth + 1, + curr_pos)); + if ((*buckets)[child_bucket_id].vector_idx == kMaxVectorIdx) { + null_found = true; + break; + } + } + } + ++curr_pos; + } + + if (null_found) { + // There is an empty node in tree.back(). Now, traverse the path from this + // empty node to top of the tree and at every node in the path, replace + // child with the parent. Stop when first level is reached in the tree + // (happens when 0 <= bucket_to_replace_pos < num_hash_func_) and return + // this location in first level for target key to be inserted. + uint32_t bucket_to_replace_pos = static_cast(tree.size()) - 1; + while (bucket_to_replace_pos >= num_hash_func_) { + CuckooNode& curr_node = tree[bucket_to_replace_pos]; + (*buckets)[curr_node.bucket_id] = + (*buckets)[tree[curr_node.parent_pos].bucket_id]; + bucket_to_replace_pos = curr_node.parent_pos; + } + *bucket_id = tree[bucket_to_replace_pos].bucket_id; + } + return null_found; +} + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/cuckoo_table_builder.h b/external/rocksdb/table/cuckoo_table_builder.h new file mode 100755 index 0000000000..266a71be04 --- /dev/null +++ b/external/rocksdb/table/cuckoo_table_builder.h @@ -0,0 +1,127 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE +#include +#include +#include +#include +#include +#include "port/port.h" +#include "rocksdb/status.h" +#include "table/table_builder.h" +#include "rocksdb/table.h" +#include "rocksdb/table_properties.h" +#include "util/autovector.h" + +namespace rocksdb { + +class CuckooTableBuilder: public TableBuilder { + public: + CuckooTableBuilder(WritableFileWriter* file, double max_hash_table_ratio, + uint32_t max_num_hash_func, uint32_t max_search_depth, + const Comparator* user_comparator, + uint32_t cuckoo_block_size, bool use_module_hash, + bool identity_as_first_hash, + uint64_t (*get_slice_hash)(const Slice&, uint32_t, + uint64_t), + uint32_t column_family_id, + const std::string& column_family_name); + + // REQUIRES: Either Finish() or Abandon() has been called. + ~CuckooTableBuilder() {} + + // Add key,value to the table being constructed. + // REQUIRES: key is after any previously added key according to comparator. + // REQUIRES: Finish(), Abandon() have not been called + void Add(const Slice& key, const Slice& value) override; + + // Return non-ok iff some error has been detected. + Status status() const override { return status_; } + + // Finish building the table. Stops using the file passed to the + // constructor after this function returns. + // REQUIRES: Finish(), Abandon() have not been called + Status Finish() override; + + // Indicate that the contents of this builder should be abandoned. Stops + // using the file passed to the constructor after this function returns. + // If the caller is not going to call Finish(), it must call Abandon() + // before destroying this builder. + // REQUIRES: Finish(), Abandon() have not been called + void Abandon() override; + + // Number of calls to Add() so far. + uint64_t NumEntries() const override; + + // Size of the file generated so far. If invoked after a successful + // Finish() call, returns the size of the final generated file. + uint64_t FileSize() const override; + + TableProperties GetTableProperties() const override { return properties_; } + + private: + struct CuckooBucket { + CuckooBucket() + : vector_idx(kMaxVectorIdx), make_space_for_key_call_id(0) {} + uint32_t vector_idx; + // This number will not exceed kvs_.size() + max_num_hash_func_. + // We assume number of items is <= 2^32. + uint32_t make_space_for_key_call_id; + }; + static const uint32_t kMaxVectorIdx = port::kMaxInt32; + + bool MakeSpaceForKey(const autovector& hash_vals, + const uint32_t call_id, + std::vector* buckets, uint64_t* bucket_id); + Status MakeHashTable(std::vector* buckets); + + inline bool IsDeletedKey(uint64_t idx) const; + inline Slice GetKey(uint64_t idx) const; + inline Slice GetUserKey(uint64_t idx) const; + inline Slice GetValue(uint64_t idx) const; + + uint32_t num_hash_func_; + WritableFileWriter* file_; + const double max_hash_table_ratio_; + const uint32_t max_num_hash_func_; + const uint32_t max_search_depth_; + const uint32_t cuckoo_block_size_; + uint64_t hash_table_size_; + bool is_last_level_file_; + bool has_seen_first_key_; + bool has_seen_first_value_; + uint64_t key_size_; + uint64_t value_size_; + // A list of fixed-size key-value pairs concatenating into a string. + // Use GetKey(), GetUserKey(), and GetValue() to retrieve a specific + // key / value given an index + std::string kvs_; + std::string deleted_keys_; + // Number of key-value pairs stored in kvs_ + number of deleted keys + uint64_t num_entries_; + // Number of keys that contain value (non-deletion op) + uint64_t num_values_; + Status status_; + TableProperties properties_; + const Comparator* ucomp_; + bool use_module_hash_; + bool identity_as_first_hash_; + uint64_t (*get_slice_hash_)(const Slice& s, uint32_t index, + uint64_t max_num_buckets); + std::string largest_user_key_ = ""; + std::string smallest_user_key_ = ""; + + bool closed_; // Either Finish() or Abandon() has been called. + + // No copying allowed + CuckooTableBuilder(const CuckooTableBuilder&) = delete; + void operator=(const CuckooTableBuilder&) = delete; +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/cuckoo_table_builder_test.cc b/external/rocksdb/table/cuckoo_table_builder_test.cc new file mode 100755 index 0000000000..ac5bed1575 --- /dev/null +++ b/external/rocksdb/table/cuckoo_table_builder_test.cc @@ -0,0 +1,628 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#include +#include +#include +#include + +#include "table/meta_blocks.h" +#include "table/cuckoo_table_builder.h" +#include "util/file_reader_writer.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { +extern const uint64_t kCuckooTableMagicNumber; + +namespace { +std::unordered_map> hash_map; + +uint64_t GetSliceHash(const Slice& s, uint32_t index, + uint64_t max_num_buckets) { + return hash_map[s.ToString()][index]; +} +} // namespace + +class CuckooBuilderTest : public testing::Test { + public: + CuckooBuilderTest() { + env_ = Env::Default(); + Options options; + options.allow_mmap_reads = true; + env_options_ = EnvOptions(options); + } + + void CheckFileContents(const std::vector& keys, + const std::vector& values, + const std::vector& expected_locations, + std::string expected_unused_bucket, uint64_t expected_table_size, + uint32_t expected_num_hash_func, bool expected_is_last_level, + uint32_t expected_cuckoo_block_size = 1) { + // Read file + unique_ptr read_file; + ASSERT_OK(env_->NewRandomAccessFile(fname, &read_file, env_options_)); + uint64_t read_file_size; + ASSERT_OK(env_->GetFileSize(fname, &read_file_size)); + + Options options; + options.allow_mmap_reads = true; + ImmutableCFOptions ioptions(options); + + // Assert Table Properties. + TableProperties* props = nullptr; + unique_ptr file_reader( + new RandomAccessFileReader(std::move(read_file))); + ASSERT_OK(ReadTableProperties(file_reader.get(), read_file_size, + kCuckooTableMagicNumber, ioptions, + &props)); + // Check unused bucket. + std::string unused_key = props->user_collected_properties[ + CuckooTablePropertyNames::kEmptyKey]; + ASSERT_EQ(expected_unused_bucket.substr(0, + props->fixed_key_len), unused_key); + + uint32_t value_len_found = + *reinterpret_cast(props->user_collected_properties[ + CuckooTablePropertyNames::kValueLength].data()); + ASSERT_EQ(values.empty() ? 0 : values[0].size(), value_len_found); + ASSERT_EQ(props->raw_value_size, values.size()*value_len_found); + const uint64_t table_size = + *reinterpret_cast(props->user_collected_properties[ + CuckooTablePropertyNames::kHashTableSize].data()); + ASSERT_EQ(expected_table_size, table_size); + const uint32_t num_hash_func_found = + *reinterpret_cast(props->user_collected_properties[ + CuckooTablePropertyNames::kNumHashFunc].data()); + ASSERT_EQ(expected_num_hash_func, num_hash_func_found); + const uint32_t cuckoo_block_size = + *reinterpret_cast(props->user_collected_properties[ + CuckooTablePropertyNames::kCuckooBlockSize].data()); + ASSERT_EQ(expected_cuckoo_block_size, cuckoo_block_size); + const bool is_last_level_found = + *reinterpret_cast(props->user_collected_properties[ + CuckooTablePropertyNames::kIsLastLevel].data()); + ASSERT_EQ(expected_is_last_level, is_last_level_found); + + ASSERT_EQ(props->num_entries, keys.size()); + ASSERT_EQ(props->fixed_key_len, keys.empty() ? 0 : keys[0].size()); + ASSERT_EQ(props->data_size, expected_unused_bucket.size() * + (expected_table_size + expected_cuckoo_block_size - 1)); + ASSERT_EQ(props->raw_key_size, keys.size()*props->fixed_key_len); + ASSERT_EQ(props->column_family_id, 0); + ASSERT_EQ(props->column_family_name, kDefaultColumnFamilyName); + delete props; + + // Check contents of the bucket. + std::vector keys_found(keys.size(), false); + size_t bucket_size = expected_unused_bucket.size(); + for (uint32_t i = 0; i < table_size + cuckoo_block_size - 1; ++i) { + Slice read_slice; + ASSERT_OK(file_reader->Read(i * bucket_size, bucket_size, &read_slice, + nullptr)); + size_t key_idx = + std::find(expected_locations.begin(), expected_locations.end(), i) - + expected_locations.begin(); + if (key_idx == keys.size()) { + // i is not one of the expected locaitons. Empty bucket. + ASSERT_EQ(read_slice.compare(expected_unused_bucket), 0); + } else { + keys_found[key_idx] = true; + ASSERT_EQ(read_slice.compare(keys[key_idx] + values[key_idx]), 0); + } + } + for (auto key_found : keys_found) { + // Check that all keys wereReader found. + ASSERT_TRUE(key_found); + } + } + + std::string GetInternalKey(Slice user_key, bool zero_seqno) { + IterKey ikey; + ikey.SetInternalKey(user_key, zero_seqno ? 0 : 1000, kTypeValue); + return ikey.GetKey().ToString(); + } + + uint64_t NextPowOf2(uint64_t num) { + uint64_t n = 2; + while (n <= num) { + n *= 2; + } + return n; + } + + uint64_t GetExpectedTableSize(uint64_t num) { + return NextPowOf2(static_cast(num / kHashTableRatio)); + } + + + Env* env_; + EnvOptions env_options_; + std::string fname; + const double kHashTableRatio = 0.9; +}; + +TEST_F(CuckooBuilderTest, SuccessWithEmptyFile) { + unique_ptr writable_file; + fname = test::TmpDir() + "/EmptyFile"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, 4, 100, + BytewiseComparator(), 1, false, false, + GetSliceHash, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + ASSERT_EQ(0UL, builder.FileSize()); + ASSERT_OK(builder.Finish()); + ASSERT_OK(file_writer->Close()); + CheckFileContents({}, {}, {}, "", 2, 2, false); +} + +TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionFullKey) { + uint32_t num_hash_fun = 4; + std::vector user_keys = {"key01", "key02", "key03", "key04"}; + std::vector values = {"v01", "v02", "v03", "v04"}; + // Need to have a temporary variable here as VS compiler does not currently + // support operator= with initializer_list as a parameter + std::unordered_map> hm = { + {user_keys[0], {0, 1, 2, 3}}, + {user_keys[1], {1, 2, 3, 4}}, + {user_keys[2], {2, 3, 4, 5}}, + {user_keys[3], {3, 4, 5, 6}}}; + hash_map = std::move(hm); + + std::vector expected_locations = {0, 1, 2, 3}; + std::vector keys; + for (auto& user_key : user_keys) { + keys.push_back(GetInternalKey(user_key, false)); + } + uint64_t expected_table_size = GetExpectedTableSize(keys.size()); + + unique_ptr writable_file; + fname = test::TmpDir() + "/NoCollisionFullKey"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun, + 100, BytewiseComparator(), 1, false, false, + GetSliceHash, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint32_t i = 0; i < user_keys.size(); i++) { + builder.Add(Slice(keys[i]), Slice(values[i])); + ASSERT_EQ(builder.NumEntries(), i + 1); + ASSERT_OK(builder.status()); + } + size_t bucket_size = keys[0].size() + values[0].size(); + ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); + ASSERT_OK(builder.Finish()); + ASSERT_OK(file_writer->Close()); + ASSERT_LE(expected_table_size * bucket_size, builder.FileSize()); + + std::string expected_unused_bucket = GetInternalKey("key00", true); + expected_unused_bucket += std::string(values[0].size(), 'a'); + CheckFileContents(keys, values, expected_locations, + expected_unused_bucket, expected_table_size, 2, false); +} + +TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionFullKey) { + uint32_t num_hash_fun = 4; + std::vector user_keys = {"key01", "key02", "key03", "key04"}; + std::vector values = {"v01", "v02", "v03", "v04"}; + // Need to have a temporary variable here as VS compiler does not currently + // support operator= with initializer_list as a parameter + std::unordered_map> hm = { + {user_keys[0], {0, 1, 2, 3}}, + {user_keys[1], {0, 1, 2, 3}}, + {user_keys[2], {0, 1, 2, 3}}, + {user_keys[3], {0, 1, 2, 3}}, + }; + hash_map = std::move(hm); + + std::vector expected_locations = {0, 1, 2, 3}; + std::vector keys; + for (auto& user_key : user_keys) { + keys.push_back(GetInternalKey(user_key, false)); + } + uint64_t expected_table_size = GetExpectedTableSize(keys.size()); + + unique_ptr writable_file; + fname = test::TmpDir() + "/WithCollisionFullKey"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun, + 100, BytewiseComparator(), 1, false, false, + GetSliceHash, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint32_t i = 0; i < user_keys.size(); i++) { + builder.Add(Slice(keys[i]), Slice(values[i])); + ASSERT_EQ(builder.NumEntries(), i + 1); + ASSERT_OK(builder.status()); + } + size_t bucket_size = keys[0].size() + values[0].size(); + ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); + ASSERT_OK(builder.Finish()); + ASSERT_OK(file_writer->Close()); + ASSERT_LE(expected_table_size * bucket_size, builder.FileSize()); + + std::string expected_unused_bucket = GetInternalKey("key00", true); + expected_unused_bucket += std::string(values[0].size(), 'a'); + CheckFileContents(keys, values, expected_locations, + expected_unused_bucket, expected_table_size, 4, false); +} + +TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionAndCuckooBlock) { + uint32_t num_hash_fun = 4; + std::vector user_keys = {"key01", "key02", "key03", "key04"}; + std::vector values = {"v01", "v02", "v03", "v04"}; + // Need to have a temporary variable here as VS compiler does not currently + // support operator= with initializer_list as a parameter + std::unordered_map> hm = { + {user_keys[0], {0, 1, 2, 3}}, + {user_keys[1], {0, 1, 2, 3}}, + {user_keys[2], {0, 1, 2, 3}}, + {user_keys[3], {0, 1, 2, 3}}, + }; + hash_map = std::move(hm); + + std::vector expected_locations = {0, 1, 2, 3}; + std::vector keys; + for (auto& user_key : user_keys) { + keys.push_back(GetInternalKey(user_key, false)); + } + uint64_t expected_table_size = GetExpectedTableSize(keys.size()); + + unique_ptr writable_file; + uint32_t cuckoo_block_size = 2; + fname = test::TmpDir() + "/WithCollisionFullKey2"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder( + file_writer.get(), kHashTableRatio, num_hash_fun, 100, + BytewiseComparator(), cuckoo_block_size, false, false, GetSliceHash, + 0 /* column_family_id */, kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint32_t i = 0; i < user_keys.size(); i++) { + builder.Add(Slice(keys[i]), Slice(values[i])); + ASSERT_EQ(builder.NumEntries(), i + 1); + ASSERT_OK(builder.status()); + } + size_t bucket_size = keys[0].size() + values[0].size(); + ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); + ASSERT_OK(builder.Finish()); + ASSERT_OK(file_writer->Close()); + ASSERT_LE(expected_table_size * bucket_size, builder.FileSize()); + + std::string expected_unused_bucket = GetInternalKey("key00", true); + expected_unused_bucket += std::string(values[0].size(), 'a'); + CheckFileContents(keys, values, expected_locations, + expected_unused_bucket, expected_table_size, 3, false, cuckoo_block_size); +} + +TEST_F(CuckooBuilderTest, WithCollisionPathFullKey) { + // Have two hash functions. Insert elements with overlapping hashes. + // Finally insert an element with hash value somewhere in the middle + // so that it displaces all the elements after that. + uint32_t num_hash_fun = 2; + std::vector user_keys = {"key01", "key02", "key03", + "key04", "key05"}; + std::vector values = {"v01", "v02", "v03", "v04", "v05"}; + // Need to have a temporary variable here as VS compiler does not currently + // support operator= with initializer_list as a parameter + std::unordered_map> hm = { + {user_keys[0], {0, 1}}, + {user_keys[1], {1, 2}}, + {user_keys[2], {2, 3}}, + {user_keys[3], {3, 4}}, + {user_keys[4], {0, 2}}, + }; + hash_map = std::move(hm); + + std::vector expected_locations = {0, 1, 3, 4, 2}; + std::vector keys; + for (auto& user_key : user_keys) { + keys.push_back(GetInternalKey(user_key, false)); + } + uint64_t expected_table_size = GetExpectedTableSize(keys.size()); + + unique_ptr writable_file; + fname = test::TmpDir() + "/WithCollisionPathFullKey"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun, + 100, BytewiseComparator(), 1, false, false, + GetSliceHash, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint32_t i = 0; i < user_keys.size(); i++) { + builder.Add(Slice(keys[i]), Slice(values[i])); + ASSERT_EQ(builder.NumEntries(), i + 1); + ASSERT_OK(builder.status()); + } + size_t bucket_size = keys[0].size() + values[0].size(); + ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); + ASSERT_OK(builder.Finish()); + ASSERT_OK(file_writer->Close()); + ASSERT_LE(expected_table_size * bucket_size, builder.FileSize()); + + std::string expected_unused_bucket = GetInternalKey("key00", true); + expected_unused_bucket += std::string(values[0].size(), 'a'); + CheckFileContents(keys, values, expected_locations, + expected_unused_bucket, expected_table_size, 2, false); +} + +TEST_F(CuckooBuilderTest, WithCollisionPathFullKeyAndCuckooBlock) { + uint32_t num_hash_fun = 2; + std::vector user_keys = {"key01", "key02", "key03", + "key04", "key05"}; + std::vector values = {"v01", "v02", "v03", "v04", "v05"}; + // Need to have a temporary variable here as VS compiler does not currently + // support operator= with initializer_list as a parameter + std::unordered_map> hm = { + {user_keys[0], {0, 1}}, + {user_keys[1], {1, 2}}, + {user_keys[2], {3, 4}}, + {user_keys[3], {4, 5}}, + {user_keys[4], {0, 3}}, + }; + hash_map = std::move(hm); + + std::vector expected_locations = {2, 1, 3, 4, 0}; + std::vector keys; + for (auto& user_key : user_keys) { + keys.push_back(GetInternalKey(user_key, false)); + } + uint64_t expected_table_size = GetExpectedTableSize(keys.size()); + + unique_ptr writable_file; + fname = test::TmpDir() + "/WithCollisionPathFullKeyAndCuckooBlock"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun, + 100, BytewiseComparator(), 2, false, false, + GetSliceHash, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint32_t i = 0; i < user_keys.size(); i++) { + builder.Add(Slice(keys[i]), Slice(values[i])); + ASSERT_EQ(builder.NumEntries(), i + 1); + ASSERT_OK(builder.status()); + } + size_t bucket_size = keys[0].size() + values[0].size(); + ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); + ASSERT_OK(builder.Finish()); + ASSERT_OK(file_writer->Close()); + ASSERT_LE(expected_table_size * bucket_size, builder.FileSize()); + + std::string expected_unused_bucket = GetInternalKey("key00", true); + expected_unused_bucket += std::string(values[0].size(), 'a'); + CheckFileContents(keys, values, expected_locations, + expected_unused_bucket, expected_table_size, 2, false, 2); +} + +TEST_F(CuckooBuilderTest, WriteSuccessNoCollisionUserKey) { + uint32_t num_hash_fun = 4; + std::vector user_keys = {"key01", "key02", "key03", "key04"}; + std::vector values = {"v01", "v02", "v03", "v04"}; + // Need to have a temporary variable here as VS compiler does not currently + // support operator= with initializer_list as a parameter + std::unordered_map> hm = { + {user_keys[0], {0, 1, 2, 3}}, + {user_keys[1], {1, 2, 3, 4}}, + {user_keys[2], {2, 3, 4, 5}}, + {user_keys[3], {3, 4, 5, 6}}}; + hash_map = std::move(hm); + + std::vector expected_locations = {0, 1, 2, 3}; + uint64_t expected_table_size = GetExpectedTableSize(user_keys.size()); + + unique_ptr writable_file; + fname = test::TmpDir() + "/NoCollisionUserKey"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun, + 100, BytewiseComparator(), 1, false, false, + GetSliceHash, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint32_t i = 0; i < user_keys.size(); i++) { + builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i])); + ASSERT_EQ(builder.NumEntries(), i + 1); + ASSERT_OK(builder.status()); + } + size_t bucket_size = user_keys[0].size() + values[0].size(); + ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); + ASSERT_OK(builder.Finish()); + ASSERT_OK(file_writer->Close()); + ASSERT_LE(expected_table_size * bucket_size, builder.FileSize()); + + std::string expected_unused_bucket = "key00"; + expected_unused_bucket += std::string(values[0].size(), 'a'); + CheckFileContents(user_keys, values, expected_locations, + expected_unused_bucket, expected_table_size, 2, true); +} + +TEST_F(CuckooBuilderTest, WriteSuccessWithCollisionUserKey) { + uint32_t num_hash_fun = 4; + std::vector user_keys = {"key01", "key02", "key03", "key04"}; + std::vector values = {"v01", "v02", "v03", "v04"}; + // Need to have a temporary variable here as VS compiler does not currently + // support operator= with initializer_list as a parameter + std::unordered_map> hm = { + {user_keys[0], {0, 1, 2, 3}}, + {user_keys[1], {0, 1, 2, 3}}, + {user_keys[2], {0, 1, 2, 3}}, + {user_keys[3], {0, 1, 2, 3}}, + }; + hash_map = std::move(hm); + + std::vector expected_locations = {0, 1, 2, 3}; + uint64_t expected_table_size = GetExpectedTableSize(user_keys.size()); + + unique_ptr writable_file; + fname = test::TmpDir() + "/WithCollisionUserKey"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun, + 100, BytewiseComparator(), 1, false, false, + GetSliceHash, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint32_t i = 0; i < user_keys.size(); i++) { + builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i])); + ASSERT_EQ(builder.NumEntries(), i + 1); + ASSERT_OK(builder.status()); + } + size_t bucket_size = user_keys[0].size() + values[0].size(); + ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); + ASSERT_OK(builder.Finish()); + ASSERT_OK(file_writer->Close()); + ASSERT_LE(expected_table_size * bucket_size, builder.FileSize()); + + std::string expected_unused_bucket = "key00"; + expected_unused_bucket += std::string(values[0].size(), 'a'); + CheckFileContents(user_keys, values, expected_locations, + expected_unused_bucket, expected_table_size, 4, true); +} + +TEST_F(CuckooBuilderTest, WithCollisionPathUserKey) { + uint32_t num_hash_fun = 2; + std::vector user_keys = {"key01", "key02", "key03", + "key04", "key05"}; + std::vector values = {"v01", "v02", "v03", "v04", "v05"}; + // Need to have a temporary variable here as VS compiler does not currently + // support operator= with initializer_list as a parameter + std::unordered_map> hm = { + {user_keys[0], {0, 1}}, + {user_keys[1], {1, 2}}, + {user_keys[2], {2, 3}}, + {user_keys[3], {3, 4}}, + {user_keys[4], {0, 2}}, + }; + hash_map = std::move(hm); + + std::vector expected_locations = {0, 1, 3, 4, 2}; + uint64_t expected_table_size = GetExpectedTableSize(user_keys.size()); + + unique_ptr writable_file; + fname = test::TmpDir() + "/WithCollisionPathUserKey"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun, + 2, BytewiseComparator(), 1, false, false, + GetSliceHash, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint32_t i = 0; i < user_keys.size(); i++) { + builder.Add(Slice(GetInternalKey(user_keys[i], true)), Slice(values[i])); + ASSERT_EQ(builder.NumEntries(), i + 1); + ASSERT_OK(builder.status()); + } + size_t bucket_size = user_keys[0].size() + values[0].size(); + ASSERT_EQ(expected_table_size * bucket_size - 1, builder.FileSize()); + ASSERT_OK(builder.Finish()); + ASSERT_OK(file_writer->Close()); + ASSERT_LE(expected_table_size * bucket_size, builder.FileSize()); + + std::string expected_unused_bucket = "key00"; + expected_unused_bucket += std::string(values[0].size(), 'a'); + CheckFileContents(user_keys, values, expected_locations, + expected_unused_bucket, expected_table_size, 2, true); +} + +TEST_F(CuckooBuilderTest, FailWhenCollisionPathTooLong) { + // Have two hash functions. Insert elements with overlapping hashes. + // Finally try inserting an element with hash value somewhere in the middle + // and it should fail because the no. of elements to displace is too high. + uint32_t num_hash_fun = 2; + std::vector user_keys = {"key01", "key02", "key03", + "key04", "key05"}; + // Need to have a temporary variable here as VS compiler does not currently + // support operator= with initializer_list as a parameter + std::unordered_map> hm = { + {user_keys[0], {0, 1}}, + {user_keys[1], {1, 2}}, + {user_keys[2], {2, 3}}, + {user_keys[3], {3, 4}}, + {user_keys[4], {0, 1}}, + }; + hash_map = std::move(hm); + + unique_ptr writable_file; + fname = test::TmpDir() + "/WithCollisionPathUserKey"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun, + 2, BytewiseComparator(), 1, false, false, + GetSliceHash, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint32_t i = 0; i < user_keys.size(); i++) { + builder.Add(Slice(GetInternalKey(user_keys[i], false)), Slice("value")); + ASSERT_EQ(builder.NumEntries(), i + 1); + ASSERT_OK(builder.status()); + } + ASSERT_TRUE(builder.Finish().IsNotSupported()); + ASSERT_OK(file_writer->Close()); +} + +TEST_F(CuckooBuilderTest, FailWhenSameKeyInserted) { + // Need to have a temporary variable here as VS compiler does not currently + // support operator= with initializer_list as a parameter + std::unordered_map> hm = { + {"repeatedkey", {0, 1, 2, 3}}}; + hash_map = std::move(hm); + uint32_t num_hash_fun = 4; + std::string user_key = "repeatedkey"; + + unique_ptr writable_file; + fname = test::TmpDir() + "/FailWhenSameKeyInserted"; + ASSERT_OK(env_->NewWritableFile(fname, &writable_file, env_options_)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), EnvOptions())); + CuckooTableBuilder builder(file_writer.get(), kHashTableRatio, num_hash_fun, + 100, BytewiseComparator(), 1, false, false, + GetSliceHash, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + + builder.Add(Slice(GetInternalKey(user_key, false)), Slice("value1")); + ASSERT_EQ(builder.NumEntries(), 1u); + ASSERT_OK(builder.status()); + builder.Add(Slice(GetInternalKey(user_key, true)), Slice("value2")); + ASSERT_EQ(builder.NumEntries(), 2u); + ASSERT_OK(builder.status()); + + ASSERT_TRUE(builder.Finish().IsNotSupported()); + ASSERT_OK(file_writer->Close()); +} +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, "SKIPPED as Cuckoo table is not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/cuckoo_table_factory.cc b/external/rocksdb/table/cuckoo_table_factory.cc new file mode 100755 index 0000000000..87107ee7fc --- /dev/null +++ b/external/rocksdb/table/cuckoo_table_factory.cc @@ -0,0 +1,72 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE +#include "table/cuckoo_table_factory.h" + +#include "db/dbformat.h" +#include "table/cuckoo_table_builder.h" +#include "table/cuckoo_table_reader.h" + +namespace rocksdb { + +Status CuckooTableFactory::NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + std::unique_ptr* table, + bool prefetch_index_and_filter_in_cache) const { + std::unique_ptr new_reader(new CuckooTableReader( + table_reader_options.ioptions, std::move(file), file_size, + table_reader_options.internal_comparator.user_comparator(), nullptr)); + Status s = new_reader->status(); + if (s.ok()) { + *table = std::move(new_reader); + } + return s; +} + +TableBuilder* CuckooTableFactory::NewTableBuilder( + const TableBuilderOptions& table_builder_options, uint32_t column_family_id, + WritableFileWriter* file) const { + // Ignore the skipFIlters flag. Does not apply to this file format + // + + // TODO: change builder to take the option struct + return new CuckooTableBuilder( + file, table_options_.hash_table_ratio, 64, + table_options_.max_search_depth, + table_builder_options.internal_comparator.user_comparator(), + table_options_.cuckoo_block_size, table_options_.use_module_hash, + table_options_.identity_as_first_hash, nullptr /* get_slice_hash */, + column_family_id, table_builder_options.column_family_name); +} + +std::string CuckooTableFactory::GetPrintableTableOptions() const { + std::string ret; + ret.reserve(2000); + const int kBufferSize = 200; + char buffer[kBufferSize]; + + snprintf(buffer, kBufferSize, " hash_table_ratio: %lf\n", + table_options_.hash_table_ratio); + ret.append(buffer); + snprintf(buffer, kBufferSize, " max_search_depth: %u\n", + table_options_.max_search_depth); + ret.append(buffer); + snprintf(buffer, kBufferSize, " cuckoo_block_size: %u\n", + table_options_.cuckoo_block_size); + ret.append(buffer); + snprintf(buffer, kBufferSize, " identity_as_first_hash: %d\n", + table_options_.identity_as_first_hash); + ret.append(buffer); + return ret; +} + +TableFactory* NewCuckooTableFactory(const CuckooTableOptions& table_options) { + return new CuckooTableFactory(table_options); +} + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/cuckoo_table_factory.h b/external/rocksdb/table/cuckoo_table_factory.h new file mode 100755 index 0000000000..ff67ae2476 --- /dev/null +++ b/external/rocksdb/table/cuckoo_table_factory.h @@ -0,0 +1,83 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include "rocksdb/table.h" +#include "util/murmurhash.h" +#include "rocksdb/options.h" + +namespace rocksdb { + +const uint32_t kCuckooMurmurSeedMultiplier = 816922183; +static inline uint64_t CuckooHash( + const Slice& user_key, uint32_t hash_cnt, bool use_module_hash, + uint64_t table_size_, bool identity_as_first_hash, + uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t)) { +#if !defined NDEBUG || defined OS_WIN + // This part is used only in unit tests but we have to keep it for Windows + // build as we run test in both debug and release modes under Windows. + if (get_slice_hash != nullptr) { + return get_slice_hash(user_key, hash_cnt, table_size_); + } +#endif + + uint64_t value = 0; + if (hash_cnt == 0 && identity_as_first_hash) { + value = (*reinterpret_cast(user_key.data())); + } else { + value = MurmurHash(user_key.data(), static_cast(user_key.size()), + kCuckooMurmurSeedMultiplier * hash_cnt); + } + if (use_module_hash) { + return value % table_size_; + } else { + return value & (table_size_ - 1); + } +} + +// Cuckoo Table is designed for applications that require fast point lookups +// but not fast range scans. +// +// Some assumptions: +// - Key length and Value length are fixed. +// - Does not support Snapshot. +// - Does not support Merge operations. +class CuckooTableFactory : public TableFactory { + public: + explicit CuckooTableFactory(const CuckooTableOptions& table_options) + : table_options_(table_options) {} + ~CuckooTableFactory() {} + + const char* Name() const override { return "CuckooTable"; } + + Status NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table, + bool prefetch_index_and_filter_in_cache = true) const override; + + TableBuilder* NewTableBuilder( + const TableBuilderOptions& table_builder_options, + uint32_t column_family_id, WritableFileWriter* file) const override; + + // Sanitizes the specified DB Options. + Status SanitizeOptions(const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const override { + return Status::OK(); + } + + std::string GetPrintableTableOptions() const override; + + void* GetOptions() override { return &table_options_; } + + private: + CuckooTableOptions table_options_; +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/cuckoo_table_reader.cc b/external/rocksdb/table/cuckoo_table_reader.cc new file mode 100755 index 0000000000..f6d69154ef --- /dev/null +++ b/external/rocksdb/table/cuckoo_table_reader.cc @@ -0,0 +1,383 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef ROCKSDB_LITE +#include "table/cuckoo_table_reader.h" + +#include +#include +#include +#include +#include +#include "rocksdb/iterator.h" +#include "rocksdb/table.h" +#include "table/internal_iterator.h" +#include "table/meta_blocks.h" +#include "table/cuckoo_table_factory.h" +#include "table/get_context.h" +#include "util/arena.h" +#include "util/coding.h" + +namespace rocksdb { +namespace { +const uint64_t CACHE_LINE_MASK = ~((uint64_t)CACHE_LINE_SIZE - 1); +const uint32_t kInvalidIndex = std::numeric_limits::max(); +} + +extern const uint64_t kCuckooTableMagicNumber; + +CuckooTableReader::CuckooTableReader( + const ImmutableCFOptions& ioptions, + std::unique_ptr&& file, uint64_t file_size, + const Comparator* comparator, + uint64_t (*get_slice_hash)(const Slice&, uint32_t, uint64_t)) + : file_(std::move(file)), + ucomp_(comparator), + get_slice_hash_(get_slice_hash) { + if (!ioptions.allow_mmap_reads) { + status_ = Status::InvalidArgument("File is not mmaped"); + } + TableProperties* props = nullptr; + status_ = ReadTableProperties(file_.get(), file_size, kCuckooTableMagicNumber, + ioptions, &props); + if (!status_.ok()) { + return; + } + table_props_.reset(props); + auto& user_props = props->user_collected_properties; + auto hash_funs = user_props.find(CuckooTablePropertyNames::kNumHashFunc); + if (hash_funs == user_props.end()) { + status_ = Status::Corruption("Number of hash functions not found"); + return; + } + num_hash_func_ = *reinterpret_cast(hash_funs->second.data()); + auto unused_key = user_props.find(CuckooTablePropertyNames::kEmptyKey); + if (unused_key == user_props.end()) { + status_ = Status::Corruption("Empty bucket value not found"); + return; + } + unused_key_ = unused_key->second; + + key_length_ = static_cast(props->fixed_key_len); + auto user_key_len = user_props.find(CuckooTablePropertyNames::kUserKeyLength); + if (user_key_len == user_props.end()) { + status_ = Status::Corruption("User key length not found"); + return; + } + user_key_length_ = *reinterpret_cast( + user_key_len->second.data()); + + auto value_length = user_props.find(CuckooTablePropertyNames::kValueLength); + if (value_length == user_props.end()) { + status_ = Status::Corruption("Value length not found"); + return; + } + value_length_ = *reinterpret_cast( + value_length->second.data()); + bucket_length_ = key_length_ + value_length_; + + auto hash_table_size = user_props.find( + CuckooTablePropertyNames::kHashTableSize); + if (hash_table_size == user_props.end()) { + status_ = Status::Corruption("Hash table size not found"); + return; + } + table_size_ = *reinterpret_cast( + hash_table_size->second.data()); + + auto is_last_level = user_props.find(CuckooTablePropertyNames::kIsLastLevel); + if (is_last_level == user_props.end()) { + status_ = Status::Corruption("Is last level not found"); + return; + } + is_last_level_ = *reinterpret_cast(is_last_level->second.data()); + + auto identity_as_first_hash = user_props.find( + CuckooTablePropertyNames::kIdentityAsFirstHash); + if (identity_as_first_hash == user_props.end()) { + status_ = Status::Corruption("identity as first hash not found"); + return; + } + identity_as_first_hash_ = *reinterpret_cast( + identity_as_first_hash->second.data()); + + auto use_module_hash = user_props.find( + CuckooTablePropertyNames::kUseModuleHash); + if (use_module_hash == user_props.end()) { + status_ = Status::Corruption("hash type is not found"); + return; + } + use_module_hash_ = *reinterpret_cast( + use_module_hash->second.data()); + auto cuckoo_block_size = user_props.find( + CuckooTablePropertyNames::kCuckooBlockSize); + if (cuckoo_block_size == user_props.end()) { + status_ = Status::Corruption("Cuckoo block size not found"); + return; + } + cuckoo_block_size_ = *reinterpret_cast( + cuckoo_block_size->second.data()); + cuckoo_block_bytes_minus_one_ = cuckoo_block_size_ * bucket_length_ - 1; + status_ = file_->Read(0, file_size, &file_data_, nullptr); +} + +Status CuckooTableReader::Get(const ReadOptions& readOptions, const Slice& key, + GetContext* get_context, bool skip_filters) { + assert(key.size() == key_length_ + (is_last_level_ ? 8 : 0)); + Slice user_key = ExtractUserKey(key); + for (uint32_t hash_cnt = 0; hash_cnt < num_hash_func_; ++hash_cnt) { + uint64_t offset = bucket_length_ * CuckooHash( + user_key, hash_cnt, use_module_hash_, table_size_, + identity_as_first_hash_, get_slice_hash_); + const char* bucket = &file_data_.data()[offset]; + for (uint32_t block_idx = 0; block_idx < cuckoo_block_size_; + ++block_idx, bucket += bucket_length_) { + if (ucomp_->Equal(Slice(unused_key_.data(), user_key.size()), + Slice(bucket, user_key.size()))) { + return Status::OK(); + } + // Here, we compare only the user key part as we support only one entry + // per user key and we don't support snapshot. + if (ucomp_->Equal(user_key, Slice(bucket, user_key.size()))) { + Slice value(bucket + key_length_, value_length_); + if (is_last_level_) { + // Sequence number is not stored at the last level, so we will use + // kMaxSequenceNumber since it is unknown. This could cause some + // transactions to fail to lock a key due to known sequence number. + // However, it is expected for anyone to use a CuckooTable in a + // TransactionDB. + get_context->SaveValue(value, kMaxSequenceNumber); + } else { + Slice full_key(bucket, key_length_); + ParsedInternalKey found_ikey; + ParseInternalKey(full_key, &found_ikey); + get_context->SaveValue(found_ikey, value); + } + // We don't support merge operations. So, we return here. + return Status::OK(); + } + } + } + return Status::OK(); +} + +void CuckooTableReader::Prepare(const Slice& key) { + // Prefetch the first Cuckoo Block. + Slice user_key = ExtractUserKey(key); + uint64_t addr = reinterpret_cast(file_data_.data()) + + bucket_length_ * CuckooHash(user_key, 0, use_module_hash_, table_size_, + identity_as_first_hash_, nullptr); + uint64_t end_addr = addr + cuckoo_block_bytes_minus_one_; + for (addr &= CACHE_LINE_MASK; addr < end_addr; addr += CACHE_LINE_SIZE) { + PREFETCH(reinterpret_cast(addr), 0, 3); + } +} + +class CuckooTableIterator : public InternalIterator { + public: + explicit CuckooTableIterator(CuckooTableReader* reader); + ~CuckooTableIterator() {} + bool Valid() const override; + void SeekToFirst() override; + void SeekToLast() override; + void Seek(const Slice& target) override; + void Next() override; + void Prev() override; + Slice key() const override; + Slice value() const override; + Status status() const override { return status_; } + void InitIfNeeded(); + + private: + struct BucketComparator { + BucketComparator(const Slice& file_data, const Comparator* ucomp, + uint32_t bucket_len, uint32_t user_key_len, + const Slice& target = Slice()) + : file_data_(file_data), + ucomp_(ucomp), + bucket_len_(bucket_len), + user_key_len_(user_key_len), + target_(target) {} + bool operator()(const uint32_t first, const uint32_t second) const { + const char* first_bucket = + (first == kInvalidIndex) ? target_.data() : + &file_data_.data()[first * bucket_len_]; + const char* second_bucket = + (second == kInvalidIndex) ? target_.data() : + &file_data_.data()[second * bucket_len_]; + return ucomp_->Compare(Slice(first_bucket, user_key_len_), + Slice(second_bucket, user_key_len_)) < 0; + } + private: + const Slice file_data_; + const Comparator* ucomp_; + const uint32_t bucket_len_; + const uint32_t user_key_len_; + const Slice target_; + }; + + const BucketComparator bucket_comparator_; + void PrepareKVAtCurrIdx(); + CuckooTableReader* reader_; + bool initialized_; + Status status_; + // Contains a map of keys to bucket_id sorted in key order. + std::vector sorted_bucket_ids_; + // We assume that the number of items can be stored in uint32 (4 Billion). + uint32_t curr_key_idx_; + Slice curr_value_; + IterKey curr_key_; + // No copying allowed + CuckooTableIterator(const CuckooTableIterator&) = delete; + void operator=(const Iterator&) = delete; +}; + +CuckooTableIterator::CuckooTableIterator(CuckooTableReader* reader) + : bucket_comparator_(reader->file_data_, reader->ucomp_, + reader->bucket_length_, reader->user_key_length_), + reader_(reader), + initialized_(false), + curr_key_idx_(kInvalidIndex) { + sorted_bucket_ids_.clear(); + curr_value_.clear(); + curr_key_.Clear(); +} + +void CuckooTableIterator::InitIfNeeded() { + if (initialized_) { + return; + } + sorted_bucket_ids_.reserve(reader_->GetTableProperties()->num_entries); + uint64_t num_buckets = reader_->table_size_ + reader_->cuckoo_block_size_ - 1; + assert(num_buckets < kInvalidIndex); + const char* bucket = reader_->file_data_.data(); + for (uint32_t bucket_id = 0; bucket_id < num_buckets; ++bucket_id) { + if (Slice(bucket, reader_->key_length_) != Slice(reader_->unused_key_)) { + sorted_bucket_ids_.push_back(bucket_id); + } + bucket += reader_->bucket_length_; + } + assert(sorted_bucket_ids_.size() == + reader_->GetTableProperties()->num_entries); + std::sort(sorted_bucket_ids_.begin(), sorted_bucket_ids_.end(), + bucket_comparator_); + curr_key_idx_ = kInvalidIndex; + initialized_ = true; +} + +void CuckooTableIterator::SeekToFirst() { + InitIfNeeded(); + curr_key_idx_ = 0; + PrepareKVAtCurrIdx(); +} + +void CuckooTableIterator::SeekToLast() { + InitIfNeeded(); + curr_key_idx_ = static_cast(sorted_bucket_ids_.size()) - 1; + PrepareKVAtCurrIdx(); +} + +void CuckooTableIterator::Seek(const Slice& target) { + InitIfNeeded(); + const BucketComparator seek_comparator( + reader_->file_data_, reader_->ucomp_, + reader_->bucket_length_, reader_->user_key_length_, + ExtractUserKey(target)); + auto seek_it = std::lower_bound(sorted_bucket_ids_.begin(), + sorted_bucket_ids_.end(), + kInvalidIndex, + seek_comparator); + curr_key_idx_ = + static_cast(std::distance(sorted_bucket_ids_.begin(), seek_it)); + PrepareKVAtCurrIdx(); +} + +bool CuckooTableIterator::Valid() const { + return curr_key_idx_ < sorted_bucket_ids_.size(); +} + +void CuckooTableIterator::PrepareKVAtCurrIdx() { + if (!Valid()) { + curr_value_.clear(); + curr_key_.Clear(); + return; + } + uint32_t id = sorted_bucket_ids_[curr_key_idx_]; + const char* offset = reader_->file_data_.data() + + id * reader_->bucket_length_; + if (reader_->is_last_level_) { + // Always return internal key. + curr_key_.SetInternalKey(Slice(offset, reader_->user_key_length_), + 0, kTypeValue); + } else { + curr_key_.SetKey(Slice(offset, reader_->key_length_)); + } + curr_value_ = Slice(offset + reader_->key_length_, reader_->value_length_); +} + +void CuckooTableIterator::Next() { + if (!Valid()) { + curr_value_.clear(); + curr_key_.Clear(); + return; + } + ++curr_key_idx_; + PrepareKVAtCurrIdx(); +} + +void CuckooTableIterator::Prev() { + if (curr_key_idx_ == 0) { + curr_key_idx_ = static_cast(sorted_bucket_ids_.size()); + } + if (!Valid()) { + curr_value_.clear(); + curr_key_.Clear(); + return; + } + --curr_key_idx_; + PrepareKVAtCurrIdx(); +} + +Slice CuckooTableIterator::key() const { + assert(Valid()); + return curr_key_.GetKey(); +} + +Slice CuckooTableIterator::value() const { + assert(Valid()); + return curr_value_; +} + +extern InternalIterator* NewErrorInternalIterator(const Status& status, + Arena* arena); + +InternalIterator* CuckooTableReader::NewIterator( + const ReadOptions& read_options, Arena* arena, bool skip_filters) { + if (!status().ok()) { + return NewErrorInternalIterator( + Status::Corruption("CuckooTableReader status is not okay."), arena); + } + if (read_options.total_order_seek) { + return NewErrorInternalIterator( + Status::InvalidArgument("total_order_seek is not supported."), arena); + } + CuckooTableIterator* iter; + if (arena == nullptr) { + iter = new CuckooTableIterator(this); + } else { + auto iter_mem = arena->AllocateAligned(sizeof(CuckooTableIterator)); + iter = new (iter_mem) CuckooTableIterator(this); + } + return iter; +} + +size_t CuckooTableReader::ApproximateMemoryUsage() const { return 0; } + +} // namespace rocksdb +#endif diff --git a/external/rocksdb/table/cuckoo_table_reader.h b/external/rocksdb/table/cuckoo_table_reader.h new file mode 100755 index 0000000000..5e3e5528a9 --- /dev/null +++ b/external/rocksdb/table/cuckoo_table_reader.h @@ -0,0 +1,84 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#ifndef ROCKSDB_LITE +#include +#include +#include +#include + +#include "db/dbformat.h" +#include "rocksdb/env.h" +#include "rocksdb/options.h" +#include "table/table_reader.h" +#include "util/file_reader_writer.h" + +namespace rocksdb { + +class Arena; +class TableReader; +class InternalIterator; + +class CuckooTableReader: public TableReader { + public: + CuckooTableReader(const ImmutableCFOptions& ioptions, + std::unique_ptr&& file, + uint64_t file_size, const Comparator* user_comparator, + uint64_t (*get_slice_hash)(const Slice&, uint32_t, + uint64_t)); + ~CuckooTableReader() {} + + std::shared_ptr GetTableProperties() const override { + return table_props_; + } + + Status status() const { return status_; } + + Status Get(const ReadOptions& read_options, const Slice& key, + GetContext* get_context, bool skip_filters = false) override; + + InternalIterator* NewIterator(const ReadOptions&, Arena* arena = nullptr, + bool skip_filters = false) override; + void Prepare(const Slice& target) override; + + // Report an approximation of how much memory has been used. + size_t ApproximateMemoryUsage() const override; + + // Following methods are not implemented for Cuckoo Table Reader + uint64_t ApproximateOffsetOf(const Slice& key) override { return 0; } + void SetupForCompaction() override {} + // End of methods not implemented. + + private: + friend class CuckooTableIterator; + void LoadAllKeys(std::vector>* key_to_bucket_id); + std::unique_ptr file_; + Slice file_data_; + bool is_last_level_; + bool identity_as_first_hash_; + bool use_module_hash_; + std::shared_ptr table_props_; + Status status_; + uint32_t num_hash_func_; + std::string unused_key_; + uint32_t key_length_; + uint32_t user_key_length_; + uint32_t value_length_; + uint32_t bucket_length_; + uint32_t cuckoo_block_size_; + uint32_t cuckoo_block_bytes_minus_one_; + uint64_t table_size_; + const Comparator* ucomp_; + uint64_t (*get_slice_hash_)(const Slice& s, uint32_t index, + uint64_t max_num_buckets); +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/cuckoo_table_reader_test.cc b/external/rocksdb/table/cuckoo_table_reader_test.cc new file mode 100755 index 0000000000..e440af90ab --- /dev/null +++ b/external/rocksdb/table/cuckoo_table_reader_test.cc @@ -0,0 +1,559 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#ifndef GFLAGS +#include +int main() { + fprintf(stderr, "Please install gflags to run this test... Skipping...\n"); + return 0; +} +#else + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include +#include +#include +#include +#include + +#include "table/meta_blocks.h" +#include "table/cuckoo_table_builder.h" +#include "table/cuckoo_table_reader.h" +#include "table/cuckoo_table_factory.h" +#include "table/get_context.h" +#include "util/arena.h" +#include "util/random.h" +#include "util/string_util.h" +#include "util/testharness.h" +#include "util/testutil.h" + +using GFLAGS::ParseCommandLineFlags; +using GFLAGS::SetUsageMessage; + +DEFINE_string(file_dir, "", "Directory where the files will be created" + " for benchmark. Added for using tmpfs."); +DEFINE_bool(enable_perf, false, "Run Benchmark Tests too."); +DEFINE_bool(write, false, + "Should write new values to file in performance tests?"); +DEFINE_bool(identity_as_first_hash, true, "use identity as first hash"); + +namespace rocksdb { + +namespace { +const uint32_t kNumHashFunc = 10; +// Methods, variables related to Hash functions. +std::unordered_map> hash_map; + +void AddHashLookups(const std::string& s, uint64_t bucket_id, + uint32_t num_hash_fun) { + std::vector v; + for (uint32_t i = 0; i < num_hash_fun; i++) { + v.push_back(bucket_id + i); + } + hash_map[s] = v; +} + +uint64_t GetSliceHash(const Slice& s, uint32_t index, + uint64_t max_num_buckets) { + return hash_map[s.ToString()][index]; +} +} // namespace + +class CuckooReaderTest : public testing::Test { + public: + using testing::Test::SetUp; + + CuckooReaderTest() { + options.allow_mmap_reads = true; + env = options.env; + env_options = EnvOptions(options); + } + + void SetUp(int num) { + num_items = num; + hash_map.clear(); + keys.clear(); + keys.resize(num_items); + user_keys.clear(); + user_keys.resize(num_items); + values.clear(); + values.resize(num_items); + } + + std::string NumToStr(int64_t i) { + return std::string(reinterpret_cast(&i), sizeof(i)); + } + + void CreateCuckooFileAndCheckReader( + const Comparator* ucomp = BytewiseComparator()) { + std::unique_ptr writable_file; + ASSERT_OK(env->NewWritableFile(fname, &writable_file, env_options)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), env_options)); + + CuckooTableBuilder builder( + file_writer.get(), 0.9, kNumHashFunc, 100, ucomp, 2, false, false, + GetSliceHash, 0 /* column_family_id */, kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint32_t key_idx = 0; key_idx < num_items; ++key_idx) { + builder.Add(Slice(keys[key_idx]), Slice(values[key_idx])); + ASSERT_OK(builder.status()); + ASSERT_EQ(builder.NumEntries(), key_idx + 1); + } + ASSERT_OK(builder.Finish()); + ASSERT_EQ(num_items, builder.NumEntries()); + file_size = builder.FileSize(); + ASSERT_OK(file_writer->Close()); + + // Check reader now. + std::unique_ptr read_file; + ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options)); + unique_ptr file_reader( + new RandomAccessFileReader(std::move(read_file))); + const ImmutableCFOptions ioptions(options); + CuckooTableReader reader(ioptions, std::move(file_reader), file_size, ucomp, + GetSliceHash); + ASSERT_OK(reader.status()); + // Assume no merge/deletion + for (uint32_t i = 0; i < num_items; ++i) { + std::string value; + GetContext get_context(ucomp, nullptr, nullptr, nullptr, + GetContext::kNotFound, Slice(user_keys[i]), &value, + nullptr, nullptr, nullptr); + ASSERT_OK(reader.Get(ReadOptions(), Slice(keys[i]), &get_context)); + ASSERT_EQ(values[i], value); + } + } + void UpdateKeys(bool with_zero_seqno) { + for (uint32_t i = 0; i < num_items; i++) { + ParsedInternalKey ikey(user_keys[i], + with_zero_seqno ? 0 : i + 1000, kTypeValue); + keys[i].clear(); + AppendInternalKey(&keys[i], ikey); + } + } + + void CheckIterator(const Comparator* ucomp = BytewiseComparator()) { + std::unique_ptr read_file; + ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options)); + unique_ptr file_reader( + new RandomAccessFileReader(std::move(read_file))); + const ImmutableCFOptions ioptions(options); + CuckooTableReader reader(ioptions, std::move(file_reader), file_size, ucomp, + GetSliceHash); + ASSERT_OK(reader.status()); + InternalIterator* it = reader.NewIterator(ReadOptions(), nullptr); + ASSERT_OK(it->status()); + ASSERT_TRUE(!it->Valid()); + it->SeekToFirst(); + int cnt = 0; + while (it->Valid()) { + ASSERT_OK(it->status()); + ASSERT_TRUE(Slice(keys[cnt]) == it->key()); + ASSERT_TRUE(Slice(values[cnt]) == it->value()); + ++cnt; + it->Next(); + } + ASSERT_EQ(static_cast(cnt), num_items); + + it->SeekToLast(); + cnt = static_cast(num_items) - 1; + ASSERT_TRUE(it->Valid()); + while (it->Valid()) { + ASSERT_OK(it->status()); + ASSERT_TRUE(Slice(keys[cnt]) == it->key()); + ASSERT_TRUE(Slice(values[cnt]) == it->value()); + --cnt; + it->Prev(); + } + ASSERT_EQ(cnt, -1); + + cnt = static_cast(num_items) / 2; + it->Seek(keys[cnt]); + while (it->Valid()) { + ASSERT_OK(it->status()); + ASSERT_TRUE(Slice(keys[cnt]) == it->key()); + ASSERT_TRUE(Slice(values[cnt]) == it->value()); + ++cnt; + it->Next(); + } + ASSERT_EQ(static_cast(cnt), num_items); + delete it; + + Arena arena; + it = reader.NewIterator(ReadOptions(), &arena); + ASSERT_OK(it->status()); + ASSERT_TRUE(!it->Valid()); + it->Seek(keys[num_items/2]); + ASSERT_TRUE(it->Valid()); + ASSERT_OK(it->status()); + ASSERT_TRUE(keys[num_items/2] == it->key()); + ASSERT_TRUE(values[num_items/2] == it->value()); + ASSERT_OK(it->status()); + it->~InternalIterator(); + } + + std::vector keys; + std::vector user_keys; + std::vector values; + uint64_t num_items; + std::string fname; + uint64_t file_size; + Options options; + Env* env; + EnvOptions env_options; +}; + +TEST_F(CuckooReaderTest, WhenKeyExists) { + SetUp(kNumHashFunc); + fname = test::TmpDir() + "/CuckooReader_WhenKeyExists"; + for (uint64_t i = 0; i < num_items; i++) { + user_keys[i] = "key" + NumToStr(i); + ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue); + AppendInternalKey(&keys[i], ikey); + values[i] = "value" + NumToStr(i); + // Give disjoint hash values. + AddHashLookups(user_keys[i], i, kNumHashFunc); + } + CreateCuckooFileAndCheckReader(); + // Last level file. + UpdateKeys(true); + CreateCuckooFileAndCheckReader(); + // Test with collision. Make all hash values collide. + hash_map.clear(); + for (uint32_t i = 0; i < num_items; i++) { + AddHashLookups(user_keys[i], 0, kNumHashFunc); + } + UpdateKeys(false); + CreateCuckooFileAndCheckReader(); + // Last level file. + UpdateKeys(true); + CreateCuckooFileAndCheckReader(); +} + +TEST_F(CuckooReaderTest, WhenKeyExistsWithUint64Comparator) { + SetUp(kNumHashFunc); + fname = test::TmpDir() + "/CuckooReaderUint64_WhenKeyExists"; + for (uint64_t i = 0; i < num_items; i++) { + user_keys[i].resize(8); + memcpy(&user_keys[i][0], static_cast(&i), 8); + ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue); + AppendInternalKey(&keys[i], ikey); + values[i] = "value" + NumToStr(i); + // Give disjoint hash values. + AddHashLookups(user_keys[i], i, kNumHashFunc); + } + CreateCuckooFileAndCheckReader(test::Uint64Comparator()); + // Last level file. + UpdateKeys(true); + CreateCuckooFileAndCheckReader(test::Uint64Comparator()); + // Test with collision. Make all hash values collide. + hash_map.clear(); + for (uint32_t i = 0; i < num_items; i++) { + AddHashLookups(user_keys[i], 0, kNumHashFunc); + } + UpdateKeys(false); + CreateCuckooFileAndCheckReader(test::Uint64Comparator()); + // Last level file. + UpdateKeys(true); + CreateCuckooFileAndCheckReader(test::Uint64Comparator()); +} + +TEST_F(CuckooReaderTest, CheckIterator) { + SetUp(2*kNumHashFunc); + fname = test::TmpDir() + "/CuckooReader_CheckIterator"; + for (uint64_t i = 0; i < num_items; i++) { + user_keys[i] = "key" + NumToStr(i); + ParsedInternalKey ikey(user_keys[i], 1000, kTypeValue); + AppendInternalKey(&keys[i], ikey); + values[i] = "value" + NumToStr(i); + // Give disjoint hash values, in reverse order. + AddHashLookups(user_keys[i], num_items-i-1, kNumHashFunc); + } + CreateCuckooFileAndCheckReader(); + CheckIterator(); + // Last level file. + UpdateKeys(true); + CreateCuckooFileAndCheckReader(); + CheckIterator(); +} + +TEST_F(CuckooReaderTest, CheckIteratorUint64) { + SetUp(2*kNumHashFunc); + fname = test::TmpDir() + "/CuckooReader_CheckIterator"; + for (uint64_t i = 0; i < num_items; i++) { + user_keys[i].resize(8); + memcpy(&user_keys[i][0], static_cast(&i), 8); + ParsedInternalKey ikey(user_keys[i], 1000, kTypeValue); + AppendInternalKey(&keys[i], ikey); + values[i] = "value" + NumToStr(i); + // Give disjoint hash values, in reverse order. + AddHashLookups(user_keys[i], num_items-i-1, kNumHashFunc); + } + CreateCuckooFileAndCheckReader(test::Uint64Comparator()); + CheckIterator(test::Uint64Comparator()); + // Last level file. + UpdateKeys(true); + CreateCuckooFileAndCheckReader(test::Uint64Comparator()); + CheckIterator(test::Uint64Comparator()); +} + +TEST_F(CuckooReaderTest, WhenKeyNotFound) { + // Add keys with colliding hash values. + SetUp(kNumHashFunc); + fname = test::TmpDir() + "/CuckooReader_WhenKeyNotFound"; + for (uint64_t i = 0; i < num_items; i++) { + user_keys[i] = "key" + NumToStr(i); + ParsedInternalKey ikey(user_keys[i], i + 1000, kTypeValue); + AppendInternalKey(&keys[i], ikey); + values[i] = "value" + NumToStr(i); + // Make all hash values collide. + AddHashLookups(user_keys[i], 0, kNumHashFunc); + } + auto* ucmp = BytewiseComparator(); + CreateCuckooFileAndCheckReader(); + std::unique_ptr read_file; + ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options)); + unique_ptr file_reader( + new RandomAccessFileReader(std::move(read_file))); + const ImmutableCFOptions ioptions(options); + CuckooTableReader reader(ioptions, std::move(file_reader), file_size, ucmp, + GetSliceHash); + ASSERT_OK(reader.status()); + // Search for a key with colliding hash values. + std::string not_found_user_key = "key" + NumToStr(num_items); + std::string not_found_key; + AddHashLookups(not_found_user_key, 0, kNumHashFunc); + ParsedInternalKey ikey(not_found_user_key, 1000, kTypeValue); + AppendInternalKey(¬_found_key, ikey); + std::string value; + GetContext get_context(ucmp, nullptr, nullptr, nullptr, GetContext::kNotFound, + Slice(not_found_key), &value, nullptr, nullptr, + nullptr); + ASSERT_OK(reader.Get(ReadOptions(), Slice(not_found_key), &get_context)); + ASSERT_TRUE(value.empty()); + ASSERT_OK(reader.status()); + // Search for a key with an independent hash value. + std::string not_found_user_key2 = "key" + NumToStr(num_items + 1); + AddHashLookups(not_found_user_key2, kNumHashFunc, kNumHashFunc); + ParsedInternalKey ikey2(not_found_user_key2, 1000, kTypeValue); + std::string not_found_key2; + AppendInternalKey(¬_found_key2, ikey2); + GetContext get_context2(ucmp, nullptr, nullptr, nullptr, + GetContext::kNotFound, Slice(not_found_key2), &value, + nullptr, nullptr, nullptr); + ASSERT_OK(reader.Get(ReadOptions(), Slice(not_found_key2), &get_context2)); + ASSERT_TRUE(value.empty()); + ASSERT_OK(reader.status()); + + // Test read when key is unused key. + std::string unused_key = + reader.GetTableProperties()->user_collected_properties.at( + CuckooTablePropertyNames::kEmptyKey); + // Add hash values that map to empty buckets. + AddHashLookups(ExtractUserKey(unused_key).ToString(), + kNumHashFunc, kNumHashFunc); + GetContext get_context3(ucmp, nullptr, nullptr, nullptr, + GetContext::kNotFound, Slice(unused_key), &value, + nullptr, nullptr, nullptr); + ASSERT_OK(reader.Get(ReadOptions(), Slice(unused_key), &get_context3)); + ASSERT_TRUE(value.empty()); + ASSERT_OK(reader.status()); +} + +// Performance tests +namespace { +void GetKeys(uint64_t num, std::vector* keys) { + keys->clear(); + IterKey k; + k.SetInternalKey("", 0, kTypeValue); + std::string internal_key_suffix = k.GetKey().ToString(); + ASSERT_EQ(static_cast(8), internal_key_suffix.size()); + for (uint64_t key_idx = 0; key_idx < num; ++key_idx) { + uint64_t value = 2 * key_idx; + std::string new_key(reinterpret_cast(&value), sizeof(value)); + new_key += internal_key_suffix; + keys->push_back(new_key); + } +} + +std::string GetFileName(uint64_t num) { + if (FLAGS_file_dir.empty()) { + FLAGS_file_dir = test::TmpDir(); + } + return FLAGS_file_dir + "/cuckoo_read_benchmark" + + ToString(num/1000000) + "Mkeys"; +} + +// Create last level file as we are interested in measuring performance of +// last level file only. +void WriteFile(const std::vector& keys, + const uint64_t num, double hash_ratio) { + Options options; + options.allow_mmap_reads = true; + Env* env = options.env; + EnvOptions env_options = EnvOptions(options); + std::string fname = GetFileName(num); + + std::unique_ptr writable_file; + ASSERT_OK(env->NewWritableFile(fname, &writable_file, env_options)); + unique_ptr file_writer( + new WritableFileWriter(std::move(writable_file), env_options)); + CuckooTableBuilder builder( + file_writer.get(), hash_ratio, 64, 1000, test::Uint64Comparator(), 5, + false, FLAGS_identity_as_first_hash, nullptr, 0 /* column_family_id */, + kDefaultColumnFamilyName); + ASSERT_OK(builder.status()); + for (uint64_t key_idx = 0; key_idx < num; ++key_idx) { + // Value is just a part of key. + builder.Add(Slice(keys[key_idx]), Slice(&keys[key_idx][0], 4)); + ASSERT_EQ(builder.NumEntries(), key_idx + 1); + ASSERT_OK(builder.status()); + } + ASSERT_OK(builder.Finish()); + ASSERT_EQ(num, builder.NumEntries()); + ASSERT_OK(file_writer->Close()); + + uint64_t file_size; + env->GetFileSize(fname, &file_size); + std::unique_ptr read_file; + ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options)); + unique_ptr file_reader( + new RandomAccessFileReader(std::move(read_file))); + + const ImmutableCFOptions ioptions(options); + CuckooTableReader reader(ioptions, std::move(file_reader), file_size, + test::Uint64Comparator(), nullptr); + ASSERT_OK(reader.status()); + ReadOptions r_options; + std::string value; + // Assume only the fast path is triggered + GetContext get_context(nullptr, nullptr, nullptr, nullptr, + GetContext::kNotFound, Slice(), &value, nullptr, + nullptr, nullptr); + for (uint64_t i = 0; i < num; ++i) { + value.clear(); + ASSERT_OK(reader.Get(r_options, Slice(keys[i]), &get_context)); + ASSERT_TRUE(Slice(keys[i]) == Slice(&keys[i][0], 4)); + } +} + +void ReadKeys(uint64_t num, uint32_t batch_size) { + Options options; + options.allow_mmap_reads = true; + Env* env = options.env; + EnvOptions env_options = EnvOptions(options); + std::string fname = GetFileName(num); + + uint64_t file_size; + env->GetFileSize(fname, &file_size); + std::unique_ptr read_file; + ASSERT_OK(env->NewRandomAccessFile(fname, &read_file, env_options)); + unique_ptr file_reader( + new RandomAccessFileReader(std::move(read_file))); + + const ImmutableCFOptions ioptions(options); + CuckooTableReader reader(ioptions, std::move(file_reader), file_size, + test::Uint64Comparator(), nullptr); + ASSERT_OK(reader.status()); + const UserCollectedProperties user_props = + reader.GetTableProperties()->user_collected_properties; + const uint32_t num_hash_fun = *reinterpret_cast( + user_props.at(CuckooTablePropertyNames::kNumHashFunc).data()); + const uint64_t table_size = *reinterpret_cast( + user_props.at(CuckooTablePropertyNames::kHashTableSize).data()); + fprintf(stderr, "With %" PRIu64 " items, utilization is %.2f%%, number of" + " hash functions: %u.\n", num, num * 100.0 / (table_size), num_hash_fun); + ReadOptions r_options; + + std::vector keys; + keys.reserve(num); + for (uint64_t i = 0; i < num; ++i) { + keys.push_back(2 * i); + } + std::random_shuffle(keys.begin(), keys.end()); + + std::string value; + // Assume only the fast path is triggered + GetContext get_context(nullptr, nullptr, nullptr, nullptr, + GetContext::kNotFound, Slice(), &value, nullptr, + nullptr, nullptr); + uint64_t start_time = env->NowMicros(); + if (batch_size > 0) { + for (uint64_t i = 0; i < num; i += batch_size) { + for (uint64_t j = i; j < i+batch_size && j < num; ++j) { + reader.Prepare(Slice(reinterpret_cast(&keys[j]), 16)); + } + for (uint64_t j = i; j < i+batch_size && j < num; ++j) { + reader.Get(r_options, Slice(reinterpret_cast(&keys[j]), 16), + &get_context); + } + } + } else { + for (uint64_t i = 0; i < num; i++) { + reader.Get(r_options, Slice(reinterpret_cast(&keys[i]), 16), + &get_context); + } + } + float time_per_op = (env->NowMicros() - start_time) * 1.0f / num; + fprintf(stderr, + "Time taken per op is %.3fus (%.1f Mqps) with batch size of %u\n", + time_per_op, 1.0 / time_per_op, batch_size); +} +} // namespace. + +TEST_F(CuckooReaderTest, TestReadPerformance) { + if (!FLAGS_enable_perf) { + return; + } + double hash_ratio = 0.95; + // These numbers are chosen to have a hash utilizaiton % close to + // 0.9, 0.75, 0.6 and 0.5 respectively. + // They all create 128 M buckets. + std::vector nums = {120*1024*1024, 100*1024*1024, 80*1024*1024, + 70*1024*1024}; +#ifndef NDEBUG + fprintf(stdout, + "WARNING: Not compiled with DNDEBUG. Performance tests may be slow.\n"); +#endif + for (uint64_t num : nums) { + if (FLAGS_write || + Env::Default()->FileExists(GetFileName(num)).IsNotFound()) { + std::vector all_keys; + GetKeys(num, &all_keys); + WriteFile(all_keys, num, hash_ratio); + } + ReadKeys(num, 0); + ReadKeys(num, 10); + ReadKeys(num, 25); + ReadKeys(num, 50); + ReadKeys(num, 100); + fprintf(stderr, "\n"); + } +} +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + ParseCommandLineFlags(&argc, &argv, true); + return RUN_ALL_TESTS(); +} + +#endif // GFLAGS. + +#else +#include + +int main(int argc, char** argv) { + fprintf(stderr, "SKIPPED as Cuckoo table is not supported in ROCKSDB_LITE\n"); + return 0; +} + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/filter_block.h b/external/rocksdb/table/filter_block.h new file mode 100755 index 0000000000..1fe428ec59 --- /dev/null +++ b/external/rocksdb/table/filter_block.h @@ -0,0 +1,105 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2012 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// +// A filter block is stored near the end of a Table file. It contains +// filters (e.g., bloom filters) for all data blocks in the table combined +// into a single filter block. +// +// It is a base class for BlockBasedFilter and FullFilter. +// These two are both used in BlockBasedTable. The first one contain filter +// For a part of keys in sst file, the second contain filter for all keys +// in sst file. + +#pragma once + +#include +#include +#include +#include +#include +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/table.h" +#include "util/hash.h" +#include "format.h" + +namespace rocksdb { + +const uint64_t kNotValid = ULLONG_MAX; +class FilterPolicy; + +// A FilterBlockBuilder is used to construct all of the filters for a +// particular Table. It generates a single string which is stored as +// a special block in the Table. +// +// The sequence of calls to FilterBlockBuilder must match the regexp: +// (StartBlock Add*)* Finish +// +// BlockBased/Full FilterBlock would be called in the same way. +class FilterBlockBuilder { + public: + explicit FilterBlockBuilder() {} + virtual ~FilterBlockBuilder() {} + + virtual bool IsBlockBased() = 0; // If is blockbased filter + virtual void StartBlock(uint64_t block_offset) = 0; // Start new block filter + virtual void Add(const Slice& key) = 0; // Add a key to current filter + virtual Slice Finish() = 0; // Generate Filter + + private: + // No copying allowed + FilterBlockBuilder(const FilterBlockBuilder&); + void operator=(const FilterBlockBuilder&); +}; + +// A FilterBlockReader is used to parse filter from SST table. +// KeyMayMatch and PrefixMayMatch would trigger filter checking +// +// BlockBased/Full FilterBlock would be called in the same way. +class FilterBlockReader { + public: + explicit FilterBlockReader() + : whole_key_filtering_(true), size_(0), statistics_(nullptr) {} + explicit FilterBlockReader(size_t s, Statistics* stats, + bool _whole_key_filtering) + : whole_key_filtering_(_whole_key_filtering), + size_(s), + statistics_(stats) {} + virtual ~FilterBlockReader() {} + + virtual bool IsBlockBased() = 0; // If is blockbased filter + virtual bool KeyMayMatch(const Slice& key, + uint64_t block_offset = kNotValid) = 0; + virtual bool PrefixMayMatch(const Slice& prefix, + uint64_t block_offset = kNotValid) = 0; + virtual size_t ApproximateMemoryUsage() const = 0; + virtual size_t size() const { return size_; } + virtual Statistics* statistics() const { return statistics_; } + + bool whole_key_filtering() const { return whole_key_filtering_; } + + // convert this object to a human readable form + virtual std::string ToString() const { + std::string error_msg("Unsupported filter \n"); + return error_msg; + } + + protected: + bool whole_key_filtering_; + + private: + // No copying allowed + FilterBlockReader(const FilterBlockReader&); + void operator=(const FilterBlockReader&); + size_t size_; + Statistics* statistics_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/flush_block_policy.cc b/external/rocksdb/table/flush_block_policy.cc new file mode 100755 index 0000000000..8fef4d9146 --- /dev/null +++ b/external/rocksdb/table/flush_block_policy.cc @@ -0,0 +1,74 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "rocksdb/options.h" +#include "rocksdb/flush_block_policy.h" +#include "rocksdb/slice.h" +#include "table/block_builder.h" + +#include + +namespace rocksdb { + +// Flush block by size +class FlushBlockBySizePolicy : public FlushBlockPolicy { + public: + // @params block_size: Approximate size of user data packed per + // block. + // @params block_size_deviation: This is used to close a block before it + // reaches the configured + FlushBlockBySizePolicy(const uint64_t block_size, + const uint64_t block_size_deviation, + const BlockBuilder& data_block_builder) + : block_size_(block_size), + block_size_deviation_limit_( + ((block_size * (100 - block_size_deviation)) + 99) / 100), + data_block_builder_(data_block_builder) {} + + virtual bool Update(const Slice& key, + const Slice& value) override { + // it makes no sense to flush when the data block is empty + if (data_block_builder_.empty()) { + return false; + } + + auto curr_size = data_block_builder_.CurrentSizeEstimate(); + + // Do flush if one of the below two conditions is true: + // 1) if the current estimated size already exceeds the block size, + // 2) block_size_deviation is set and the estimated size after appending + // the kv will exceed the block size and the current size is under the + // the deviation. + return curr_size >= block_size_ || BlockAlmostFull(key, value); + } + + private: + bool BlockAlmostFull(const Slice& key, const Slice& value) const { + if (block_size_deviation_limit_ == 0) { + return false; + } + + const auto curr_size = data_block_builder_.CurrentSizeEstimate(); + const auto estimated_size_after = + data_block_builder_.EstimateSizeAfterKV(key, value); + + return estimated_size_after > block_size_ && + curr_size > block_size_deviation_limit_; + } + + const uint64_t block_size_; + const uint64_t block_size_deviation_limit_; + const BlockBuilder& data_block_builder_; +}; + +FlushBlockPolicy* FlushBlockBySizePolicyFactory::NewFlushBlockPolicy( + const BlockBasedTableOptions& table_options, + const BlockBuilder& data_block_builder) const { + return new FlushBlockBySizePolicy( + table_options.block_size, table_options.block_size_deviation, + data_block_builder); +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/format.cc b/external/rocksdb/table/format.cc new file mode 100755 index 0000000000..3675bbadec --- /dev/null +++ b/external/rocksdb/table/format.cc @@ -0,0 +1,542 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "table/format.h" + +#include +#include + +#include "rocksdb/env.h" +#include "table/block.h" +#include "table/block_based_table_reader.h" +#include "table/persistent_cache_helper.h" +#include "util/coding.h" +#include "util/compression.h" +#include "util/crc32c.h" +#include "util/file_reader_writer.h" +#include "util/perf_context_imp.h" +#include "util/string_util.h" +#include "util/xxhash.h" +#include "util/statistics.h" +#include "util/stop_watch.h" + + +namespace rocksdb { + +extern const uint64_t kLegacyBlockBasedTableMagicNumber; +extern const uint64_t kBlockBasedTableMagicNumber; + +#ifndef ROCKSDB_LITE +extern const uint64_t kLegacyPlainTableMagicNumber; +extern const uint64_t kPlainTableMagicNumber; +#else +// ROCKSDB_LITE doesn't have plain table +const uint64_t kLegacyPlainTableMagicNumber = 0; +const uint64_t kPlainTableMagicNumber = 0; +#endif +const uint32_t DefaultStackBufferSize = 5000; + +bool ShouldReportDetailedTime(Env* env, Statistics* stats) { + return env != nullptr && stats != nullptr && + stats->stats_level_ > kExceptDetailedTimers; +} + +void BlockHandle::EncodeTo(std::string* dst) const { + // Sanity check that all fields have been set + assert(offset_ != ~static_cast(0)); + assert(size_ != ~static_cast(0)); + PutVarint64Varint64(dst, offset_, size_); +} + +Status BlockHandle::DecodeFrom(Slice* input) { + if (GetVarint64(input, &offset_) && + GetVarint64(input, &size_)) { + return Status::OK(); + } else { + return Status::Corruption("bad block handle"); + } +} + +// Return a string that contains the copy of handle. +std::string BlockHandle::ToString(bool hex) const { + std::string handle_str; + EncodeTo(&handle_str); + if (hex) { + return Slice(handle_str).ToString(true); + } else { + return handle_str; + } +} + +const BlockHandle BlockHandle::kNullBlockHandle(0, 0); + +namespace { +inline bool IsLegacyFooterFormat(uint64_t magic_number) { + return magic_number == kLegacyBlockBasedTableMagicNumber || + magic_number == kLegacyPlainTableMagicNumber; +} +inline uint64_t UpconvertLegacyFooterFormat(uint64_t magic_number) { + if (magic_number == kLegacyBlockBasedTableMagicNumber) { + return kBlockBasedTableMagicNumber; + } + if (magic_number == kLegacyPlainTableMagicNumber) { + return kPlainTableMagicNumber; + } + assert(false); + return 0; +} +} // namespace + +// legacy footer format: +// metaindex handle (varint64 offset, varint64 size) +// index handle (varint64 offset, varint64 size) +// to make the total size 2 * BlockHandle::kMaxEncodedLength +// table_magic_number (8 bytes) +// new footer format: +// checksum (char, 1 byte) +// metaindex handle (varint64 offset, varint64 size) +// index handle (varint64 offset, varint64 size) +// to make the total size 2 * BlockHandle::kMaxEncodedLength + 1 +// footer version (4 bytes) +// table_magic_number (8 bytes) +void Footer::EncodeTo(std::string* dst) const { + assert(HasInitializedTableMagicNumber()); + if (IsLegacyFooterFormat(table_magic_number())) { + // has to be default checksum with legacy footer + assert(checksum_ == kCRC32c); + const size_t original_size = dst->size(); + metaindex_handle_.EncodeTo(dst); + index_handle_.EncodeTo(dst); + dst->resize(original_size + 2 * BlockHandle::kMaxEncodedLength); // Padding + PutFixed32(dst, static_cast(table_magic_number() & 0xffffffffu)); + PutFixed32(dst, static_cast(table_magic_number() >> 32)); + assert(dst->size() == original_size + kVersion0EncodedLength); + } else { + const size_t original_size = dst->size(); + dst->push_back(static_cast(checksum_)); + metaindex_handle_.EncodeTo(dst); + index_handle_.EncodeTo(dst); + dst->resize(original_size + kNewVersionsEncodedLength - 12); // Padding + PutFixed32(dst, version()); + PutFixed32(dst, static_cast(table_magic_number() & 0xffffffffu)); + PutFixed32(dst, static_cast(table_magic_number() >> 32)); + assert(dst->size() == original_size + kNewVersionsEncodedLength); + } +} + +Footer::Footer(uint64_t _table_magic_number, uint32_t _version) + : version_(_version), + checksum_(kCRC32c), + table_magic_number_(_table_magic_number) { + // This should be guaranteed by constructor callers + assert(!IsLegacyFooterFormat(_table_magic_number) || version_ == 0); +} + +Status Footer::DecodeFrom(Slice* input) { + assert(!HasInitializedTableMagicNumber()); + assert(input != nullptr); + assert(input->size() >= kMinEncodedLength); + + const char *magic_ptr = + input->data() + input->size() - kMagicNumberLengthByte; + const uint32_t magic_lo = DecodeFixed32(magic_ptr); + const uint32_t magic_hi = DecodeFixed32(magic_ptr + 4); + uint64_t magic = ((static_cast(magic_hi) << 32) | + (static_cast(magic_lo))); + + // We check for legacy formats here and silently upconvert them + bool legacy = IsLegacyFooterFormat(magic); + if (legacy) { + magic = UpconvertLegacyFooterFormat(magic); + } + set_table_magic_number(magic); + + if (legacy) { + // The size is already asserted to be at least kMinEncodedLength + // at the beginning of the function + input->remove_prefix(input->size() - kVersion0EncodedLength); + version_ = 0 /* legacy */; + checksum_ = kCRC32c; + } else { + version_ = DecodeFixed32(magic_ptr - 4); + // Footer version 1 and higher will always occupy exactly this many bytes. + // It consists of the checksum type, two block handles, padding, + // a version number, and a magic number + if (input->size() < kNewVersionsEncodedLength) { + return Status::Corruption("input is too short to be an sstable"); + } else { + input->remove_prefix(input->size() - kNewVersionsEncodedLength); + } + uint32_t chksum; + if (!GetVarint32(input, &chksum)) { + return Status::Corruption("bad checksum type"); + } + checksum_ = static_cast(chksum); + } + + Status result = metaindex_handle_.DecodeFrom(input); + if (result.ok()) { + result = index_handle_.DecodeFrom(input); + } + if (result.ok()) { + // We skip over any leftover data (just padding for now) in "input" + const char* end = magic_ptr + kMagicNumberLengthByte; + *input = Slice(end, input->data() + input->size() - end); + } + return result; +} + +std::string Footer::ToString() const { + std::string result, handle_; + result.reserve(1024); + + bool legacy = IsLegacyFooterFormat(table_magic_number_); + if (legacy) { + result.append("metaindex handle: " + metaindex_handle_.ToString() + "\n "); + result.append("index handle: " + index_handle_.ToString() + "\n "); + result.append("table_magic_number: " + + rocksdb::ToString(table_magic_number_) + "\n "); + } else { + result.append("checksum: " + rocksdb::ToString(checksum_) + "\n "); + result.append("metaindex handle: " + metaindex_handle_.ToString() + "\n "); + result.append("index handle: " + index_handle_.ToString() + "\n "); + result.append("footer version: " + rocksdb::ToString(version_) + "\n "); + result.append("table_magic_number: " + + rocksdb::ToString(table_magic_number_) + "\n "); + } + return result; +} + +Status ReadFooterFromFile(RandomAccessFileReader* file, uint64_t file_size, + Footer* footer, uint64_t enforce_table_magic_number) { + if (file_size < Footer::kMinEncodedLength) { + return Status::Corruption("file is too short to be an sstable"); + } + + char footer_space[Footer::kMaxEncodedLength]; + Slice footer_input; + size_t read_offset = + (file_size > Footer::kMaxEncodedLength) + ? static_cast(file_size - Footer::kMaxEncodedLength) + : 0; + Status s = file->Read(read_offset, Footer::kMaxEncodedLength, &footer_input, + footer_space); + if (!s.ok()) return s; + + // Check that we actually read the whole footer from the file. It may be + // that size isn't correct. + if (footer_input.size() < Footer::kMinEncodedLength) { + return Status::Corruption("file is too short to be an sstable"); + } + + s = footer->DecodeFrom(&footer_input); + if (!s.ok()) { + return s; + } + if (enforce_table_magic_number != 0 && + enforce_table_magic_number != footer->table_magic_number()) { + return Status::Corruption("Bad table magic number"); + } + return Status::OK(); +} + +// Without anonymous namespace here, we fail the warning -Wmissing-prototypes +namespace { + +// Read a block and check its CRC +// contents is the result of reading. +// According to the implementation of file->Read, contents may not point to buf +Status ReadBlock(RandomAccessFileReader* file, const Footer& footer, + const ReadOptions& options, const BlockHandle& handle, + Slice* contents, /* result of reading */ char* buf) { + size_t n = static_cast(handle.size()); + Status s; + + { + PERF_TIMER_GUARD(block_read_time); + s = file->Read(handle.offset(), n + kBlockTrailerSize, contents, buf); + } + + PERF_COUNTER_ADD(block_read_count, 1); + PERF_COUNTER_ADD(block_read_byte, n + kBlockTrailerSize); + + if (!s.ok()) { + return s; + } + if (contents->size() != n + kBlockTrailerSize) { + return Status::Corruption("truncated block read"); + } + + // Check the crc of the type and the block contents + const char* data = contents->data(); // Pointer to where Read put the data + if (options.verify_checksums) { + PERF_TIMER_GUARD(block_checksum_time); + uint32_t value = DecodeFixed32(data + n + 1); + uint32_t actual = 0; + switch (footer.checksum()) { + case kCRC32c: + value = crc32c::Unmask(value); + actual = crc32c::Value(data, n + 1); + break; + case kxxHash: + actual = XXH32(data, static_cast(n) + 1, 0); + break; + default: + s = Status::Corruption("unknown checksum type"); + } + if (s.ok() && actual != value) { + s = Status::Corruption("block checksum mismatch"); + } + if (!s.ok()) { + return s; + } + } + return s; +} + +} // namespace + +Status ReadBlockContents(RandomAccessFileReader* file, const Footer& footer, + const ReadOptions& read_options, + const BlockHandle& handle, BlockContents* contents, + const ImmutableCFOptions &ioptions, + bool decompression_requested, + const Slice& compression_dict, + const PersistentCacheOptions& cache_options) { + Status status; + Slice slice; + size_t n = static_cast(handle.size()); + std::unique_ptr heap_buf; + char stack_buf[DefaultStackBufferSize]; + char* used_buf = nullptr; + rocksdb::CompressionType compression_type; + + if (cache_options.persistent_cache && + !cache_options.persistent_cache->IsCompressed()) { + status = PersistentCacheHelper::LookupUncompressedPage(cache_options, + handle, contents); + if (status.ok()) { + // uncompressed page is found for the block handle + return status; + } else { + // uncompressed page is not found + if (ioptions.info_log && !status.IsNotFound()) { + assert(!status.ok()); + Log(InfoLogLevel::INFO_LEVEL, ioptions.info_log, + "Error reading from persistent cache. %s", + status.ToString().c_str()); + } + } + } + + if (cache_options.persistent_cache && + cache_options.persistent_cache->IsCompressed()) { + // lookup uncompressed cache mode p-cache + status = PersistentCacheHelper::LookupRawPage( + cache_options, handle, &heap_buf, n + kBlockTrailerSize); + } else { + status = Status::NotFound(); + } + + if (status.ok()) { + // cache hit + used_buf = heap_buf.get(); + slice = Slice(heap_buf.get(), n); + } else { + if (ioptions.info_log && !status.IsNotFound()) { + assert(!status.ok()); + Log(InfoLogLevel::INFO_LEVEL, ioptions.info_log, + "Error reading from persistent cache. %s", status.ToString().c_str()); + } + // cache miss read from device + if (decompression_requested && + n + kBlockTrailerSize < DefaultStackBufferSize) { + // If we've got a small enough hunk of data, read it in to the + // trivially allocated stack buffer instead of needing a full malloc() + used_buf = &stack_buf[0]; + } else { + heap_buf = std::unique_ptr(new char[n + kBlockTrailerSize]); + used_buf = heap_buf.get(); + } + + status = ReadBlock(file, footer, read_options, handle, &slice, used_buf); + if (status.ok() && read_options.fill_cache && + cache_options.persistent_cache && + cache_options.persistent_cache->IsCompressed()) { + // insert to raw cache + PersistentCacheHelper::InsertRawPage(cache_options, handle, used_buf, + n + kBlockTrailerSize); + } + } + + if (!status.ok()) { + return status; + } + + PERF_TIMER_GUARD(block_decompress_time); + + compression_type = static_cast(slice.data()[n]); + + if (decompression_requested && compression_type != kNoCompression) { + // compressed page, uncompress, update cache + status = UncompressBlockContents(slice.data(), n, contents, + footer.version(), compression_dict, + ioptions); + } else if (slice.data() != used_buf) { + // the slice content is not the buffer provided + *contents = BlockContents(Slice(slice.data(), n), false, compression_type); + } else { + // page is uncompressed, the buffer either stack or heap provided + if (used_buf == &stack_buf[0]) { + heap_buf = std::unique_ptr(new char[n]); + memcpy(heap_buf.get(), stack_buf, n); + } + *contents = BlockContents(std::move(heap_buf), n, true, compression_type); + } + + if (status.ok() && read_options.fill_cache && + cache_options.persistent_cache && + !cache_options.persistent_cache->IsCompressed()) { + // insert to uncompressed cache + PersistentCacheHelper::InsertUncompressedPage(cache_options, handle, + *contents); + } + + return status; +} + +Status UncompressBlockContentsForCompressionType( + const char* data, size_t n, BlockContents* contents, + uint32_t format_version, const Slice& compression_dict, + CompressionType compression_type, const ImmutableCFOptions &ioptions) { + std::unique_ptr ubuf; + + assert(compression_type != kNoCompression && "Invalid compression type"); + + StopWatchNano timer(ioptions.env, + ShouldReportDetailedTime(ioptions.env, ioptions.statistics)); + int decompress_size = 0; + switch (compression_type) { + case kSnappyCompression: { + size_t ulength = 0; + static char snappy_corrupt_msg[] = + "Snappy not supported or corrupted Snappy compressed block contents"; + if (!Snappy_GetUncompressedLength(data, n, &ulength)) { + return Status::Corruption(snappy_corrupt_msg); + } + ubuf.reset(new char[ulength]); + if (!Snappy_Uncompress(data, n, ubuf.get())) { + return Status::Corruption(snappy_corrupt_msg); + } + *contents = BlockContents(std::move(ubuf), ulength, true, kNoCompression); + break; + } + case kZlibCompression: + ubuf.reset(Zlib_Uncompress( + data, n, &decompress_size, + GetCompressFormatForVersion(kZlibCompression, format_version), + compression_dict)); + if (!ubuf) { + static char zlib_corrupt_msg[] = + "Zlib not supported or corrupted Zlib compressed block contents"; + return Status::Corruption(zlib_corrupt_msg); + } + *contents = + BlockContents(std::move(ubuf), decompress_size, true, kNoCompression); + break; + case kBZip2Compression: + ubuf.reset(BZip2_Uncompress( + data, n, &decompress_size, + GetCompressFormatForVersion(kBZip2Compression, format_version))); + if (!ubuf) { + static char bzip2_corrupt_msg[] = + "Bzip2 not supported or corrupted Bzip2 compressed block contents"; + return Status::Corruption(bzip2_corrupt_msg); + } + *contents = + BlockContents(std::move(ubuf), decompress_size, true, kNoCompression); + break; + case kLZ4Compression: + ubuf.reset(LZ4_Uncompress( + data, n, &decompress_size, + GetCompressFormatForVersion(kLZ4Compression, format_version), + compression_dict)); + if (!ubuf) { + static char lz4_corrupt_msg[] = + "LZ4 not supported or corrupted LZ4 compressed block contents"; + return Status::Corruption(lz4_corrupt_msg); + } + *contents = + BlockContents(std::move(ubuf), decompress_size, true, kNoCompression); + break; + case kLZ4HCCompression: + ubuf.reset(LZ4_Uncompress( + data, n, &decompress_size, + GetCompressFormatForVersion(kLZ4HCCompression, format_version), + compression_dict)); + if (!ubuf) { + static char lz4hc_corrupt_msg[] = + "LZ4HC not supported or corrupted LZ4HC compressed block contents"; + return Status::Corruption(lz4hc_corrupt_msg); + } + *contents = + BlockContents(std::move(ubuf), decompress_size, true, kNoCompression); + break; + case kXpressCompression: + ubuf.reset(XPRESS_Uncompress(data, n, &decompress_size)); + if (!ubuf) { + static char xpress_corrupt_msg[] = + "XPRESS not supported or corrupted XPRESS compressed block contents"; + return Status::Corruption(xpress_corrupt_msg); + } + *contents = + BlockContents(std::move(ubuf), decompress_size, true, kNoCompression); + break; + case kZSTDNotFinalCompression: + ubuf.reset(ZSTD_Uncompress(data, n, &decompress_size, compression_dict)); + if (!ubuf) { + static char zstd_corrupt_msg[] = + "ZSTD not supported or corrupted ZSTD compressed block contents"; + return Status::Corruption(zstd_corrupt_msg); + } + *contents = + BlockContents(std::move(ubuf), decompress_size, true, kNoCompression); + break; + default: + return Status::Corruption("bad block type"); + } + + if(ShouldReportDetailedTime(ioptions.env, ioptions.statistics)){ + MeasureTime(ioptions.statistics, DECOMPRESSION_TIMES_NANOS, + timer.ElapsedNanos()); + MeasureTime(ioptions.statistics, BYTES_DECOMPRESSED, contents->data.size()); + RecordTick(ioptions.statistics, NUMBER_BLOCK_DECOMPRESSED); + } + + return Status::OK(); +} + +// +// The 'data' points to the raw block contents that was read in from file. +// This method allocates a new heap buffer and the raw block +// contents are uncompresed into this buffer. This +// buffer is returned via 'result' and it is upto the caller to +// free this buffer. +// format_version is the block format as defined in include/rocksdb/table.h +Status UncompressBlockContents(const char* data, size_t n, + BlockContents* contents, uint32_t format_version, + const Slice& compression_dict, + const ImmutableCFOptions &ioptions) { + assert(data[n] != kNoCompression); + return UncompressBlockContentsForCompressionType( + data, n, contents, format_version, compression_dict, + (CompressionType)data[n], ioptions); +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/format.h b/external/rocksdb/table/format.h new file mode 100755 index 0000000000..571659d596 --- /dev/null +++ b/external/rocksdb/table/format.h @@ -0,0 +1,252 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include +#include +#include "rocksdb/slice.h" +#include "rocksdb/status.h" +#include "rocksdb/options.h" +#include "rocksdb/table.h" + +#include "port/port.h" // noexcept +#include "table/persistent_cache_helper.h" + +namespace rocksdb { + +class Block; +class RandomAccessFile; +struct ReadOptions; + +extern bool ShouldReportDetailedTime(Env* env, Statistics* stats); + +// the length of the magic number in bytes. +const int kMagicNumberLengthByte = 8; + +// BlockHandle is a pointer to the extent of a file that stores a data +// block or a meta block. +class BlockHandle { + public: + BlockHandle(); + BlockHandle(uint64_t offset, uint64_t size); + + // The offset of the block in the file. + uint64_t offset() const { return offset_; } + void set_offset(uint64_t _offset) { offset_ = _offset; } + + // The size of the stored block + uint64_t size() const { return size_; } + void set_size(uint64_t _size) { size_ = _size; } + + void EncodeTo(std::string* dst) const; + Status DecodeFrom(Slice* input); + + // Return a string that contains the copy of handle. + std::string ToString(bool hex = true) const; + + // if the block handle's offset and size are both "0", we will view it + // as a null block handle that points to no where. + bool IsNull() const { + return offset_ == 0 && size_ == 0; + } + + static const BlockHandle& NullBlockHandle() { + return kNullBlockHandle; + } + + // Maximum encoding length of a BlockHandle + enum { kMaxEncodedLength = 10 + 10 }; + + private: + uint64_t offset_ = 0; + uint64_t size_ = 0; + + static const BlockHandle kNullBlockHandle; +}; + +inline uint32_t GetCompressFormatForVersion(CompressionType compression_type, + uint32_t version) { + // snappy is not versioned + assert(compression_type != kSnappyCompression && + compression_type != kXpressCompression && + compression_type != kNoCompression); + // As of version 2, we encode compressed block with + // compress_format_version == 2. Before that, the version is 1. + // DO NOT CHANGE THIS FUNCTION, it affects disk format + return version >= 2 ? 2 : 1; +} + +inline bool BlockBasedTableSupportedVersion(uint32_t version) { + return version <= 2; +} + +// Footer encapsulates the fixed information stored at the tail +// end of every table file. +class Footer { + public: + // Constructs a footer without specifying its table magic number. + // In such case, the table magic number of such footer should be + // initialized via @ReadFooterFromFile(). + // Use this when you plan to load Footer with DecodeFrom(). Never use this + // when you plan to EncodeTo. + Footer() : Footer(kInvalidTableMagicNumber, 0) {} + + // Use this constructor when you plan to write out the footer using + // EncodeTo(). Never use this constructor with DecodeFrom(). + Footer(uint64_t table_magic_number, uint32_t version); + + // The version of the footer in this file + uint32_t version() const { return version_; } + + // The checksum type used in this file + ChecksumType checksum() const { return checksum_; } + void set_checksum(const ChecksumType c) { checksum_ = c; } + + // The block handle for the metaindex block of the table + const BlockHandle& metaindex_handle() const { return metaindex_handle_; } + void set_metaindex_handle(const BlockHandle& h) { metaindex_handle_ = h; } + + // The block handle for the index block of the table + const BlockHandle& index_handle() const { return index_handle_; } + + void set_index_handle(const BlockHandle& h) { index_handle_ = h; } + + uint64_t table_magic_number() const { return table_magic_number_; } + + void EncodeTo(std::string* dst) const; + + // Set the current footer based on the input slice. + // + // REQUIRES: table_magic_number_ is not set (i.e., + // HasInitializedTableMagicNumber() is true). The function will initialize the + // magic number + Status DecodeFrom(Slice* input); + + // Encoded length of a Footer. Note that the serialization of a Footer will + // always occupy at least kMinEncodedLength bytes. If fields are changed + // the version number should be incremented and kMaxEncodedLength should be + // increased accordingly. + enum { + // Footer version 0 (legacy) will always occupy exactly this many bytes. + // It consists of two block handles, padding, and a magic number. + kVersion0EncodedLength = 2 * BlockHandle::kMaxEncodedLength + 8, + // Footer of versions 1 and higher will always occupy exactly this many + // bytes. It consists of the checksum type, two block handles, padding, + // a version number (bigger than 1), and a magic number + kNewVersionsEncodedLength = 1 + 2 * BlockHandle::kMaxEncodedLength + 4 + 8, + kMinEncodedLength = kVersion0EncodedLength, + kMaxEncodedLength = kNewVersionsEncodedLength, + }; + + static const uint64_t kInvalidTableMagicNumber = 0; + + // convert this object to a human readable form + std::string ToString() const; + + private: + // REQUIRES: magic number wasn't initialized. + void set_table_magic_number(uint64_t magic_number) { + assert(!HasInitializedTableMagicNumber()); + table_magic_number_ = magic_number; + } + + // return true if @table_magic_number_ is set to a value different + // from @kInvalidTableMagicNumber. + bool HasInitializedTableMagicNumber() const { + return (table_magic_number_ != kInvalidTableMagicNumber); + } + + uint32_t version_; + ChecksumType checksum_; + BlockHandle metaindex_handle_; + BlockHandle index_handle_; + uint64_t table_magic_number_ = 0; +}; + +// Read the footer from file +// If enforce_table_magic_number != 0, ReadFooterFromFile() will return +// corruption if table_magic number is not equal to enforce_table_magic_number +Status ReadFooterFromFile(RandomAccessFileReader* file, uint64_t file_size, + Footer* footer, + uint64_t enforce_table_magic_number = 0); + +// 1-byte type + 32-bit crc +static const size_t kBlockTrailerSize = 5; + +struct BlockContents { + Slice data; // Actual contents of data + bool cachable; // True iff data can be cached + CompressionType compression_type; + std::unique_ptr allocation; + + BlockContents() : cachable(false), compression_type(kNoCompression) {} + + BlockContents(const Slice& _data, bool _cachable, + CompressionType _compression_type) + : data(_data), cachable(_cachable), compression_type(_compression_type) {} + + BlockContents(std::unique_ptr&& _data, size_t _size, bool _cachable, + CompressionType _compression_type) + : data(_data.get(), _size), + cachable(_cachable), + compression_type(_compression_type), + allocation(std::move(_data)) {} + + BlockContents(BlockContents&& other) ROCKSDB_NOEXCEPT { *this = std::move(other); } + + BlockContents& operator=(BlockContents&& other) { + data = std::move(other.data); + cachable = other.cachable; + compression_type = other.compression_type; + allocation = std::move(other.allocation); + return *this; + } +}; + +// Read the block identified by "handle" from "file". On failure +// return non-OK. On success fill *result and return OK. +extern Status ReadBlockContents( + RandomAccessFileReader* file, const Footer& footer, + const ReadOptions& options, const BlockHandle& handle, + BlockContents* contents, const ImmutableCFOptions &ioptions, + bool do_uncompress = true, const Slice& compression_dict = Slice(), + const PersistentCacheOptions& cache_options = PersistentCacheOptions()); + +// The 'data' points to the raw block contents read in from file. +// This method allocates a new heap buffer and the raw block +// contents are uncompresed into this buffer. This buffer is +// returned via 'result' and it is upto the caller to +// free this buffer. +// For description of compress_format_version and possible values, see +// util/compression.h +extern Status UncompressBlockContents(const char* data, size_t n, + BlockContents* contents, + uint32_t compress_format_version, + const Slice& compression_dict, + const ImmutableCFOptions &ioptions); + +// This is an extension to UncompressBlockContents that accepts +// a specific compression type. This is used by un-wrapped blocks +// with no compression header. +extern Status UncompressBlockContentsForCompressionType( + const char* data, size_t n, BlockContents* contents, + uint32_t compress_format_version, const Slice& compression_dict, + CompressionType compression_type, const ImmutableCFOptions &ioptions); + +// Implementation details follow. Clients should ignore, + +inline BlockHandle::BlockHandle() + : BlockHandle(~static_cast(0), + ~static_cast(0)) { +} + +inline BlockHandle::BlockHandle(uint64_t _offset, uint64_t _size) + : offset_(_offset), size_(_size) {} + +} // namespace rocksdb diff --git a/external/rocksdb/table/full_filter_block.cc b/external/rocksdb/table/full_filter_block.cc new file mode 100755 index 0000000000..1c89cc1c4f --- /dev/null +++ b/external/rocksdb/table/full_filter_block.cc @@ -0,0 +1,109 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "table/full_filter_block.h" + +#include "rocksdb/filter_policy.h" +#include "port/port.h" +#include "util/coding.h" +#include "util/perf_context_imp.h" + +namespace rocksdb { + +FullFilterBlockBuilder::FullFilterBlockBuilder( + const SliceTransform* prefix_extractor, bool whole_key_filtering, + FilterBitsBuilder* filter_bits_builder) + : prefix_extractor_(prefix_extractor), + whole_key_filtering_(whole_key_filtering), + num_added_(0) { + assert(filter_bits_builder != nullptr); + filter_bits_builder_.reset(filter_bits_builder); +} + +void FullFilterBlockBuilder::Add(const Slice& key) { + if (whole_key_filtering_) { + AddKey(key); + } + if (prefix_extractor_ && prefix_extractor_->InDomain(key)) { + AddPrefix(key); + } +} + +// Add key to filter if needed +inline void FullFilterBlockBuilder::AddKey(const Slice& key) { + filter_bits_builder_->AddKey(key); + num_added_++; +} + +// Add prefix to filter if needed +inline void FullFilterBlockBuilder::AddPrefix(const Slice& key) { + Slice prefix = prefix_extractor_->Transform(key); + filter_bits_builder_->AddKey(prefix); + num_added_++; +} + +Slice FullFilterBlockBuilder::Finish() { + if (num_added_ != 0) { + num_added_ = 0; + return filter_bits_builder_->Finish(&filter_data_); + } + return Slice(); +} + +FullFilterBlockReader::FullFilterBlockReader( + const SliceTransform* prefix_extractor, bool _whole_key_filtering, + const Slice& contents, FilterBitsReader* filter_bits_reader, + Statistics* stats) + : FilterBlockReader(contents.size(), stats, _whole_key_filtering), + prefix_extractor_(prefix_extractor), + contents_(contents) { + assert(filter_bits_reader != nullptr); + filter_bits_reader_.reset(filter_bits_reader); +} + +FullFilterBlockReader::FullFilterBlockReader( + const SliceTransform* prefix_extractor, bool _whole_key_filtering, + BlockContents&& contents, FilterBitsReader* filter_bits_reader, + Statistics* stats) + : FullFilterBlockReader(prefix_extractor, _whole_key_filtering, + contents.data, filter_bits_reader, stats) { + block_contents_ = std::move(contents); +} + +bool FullFilterBlockReader::KeyMayMatch(const Slice& key, + uint64_t block_offset) { + assert(block_offset == kNotValid); + if (!whole_key_filtering_) { + return true; + } + return MayMatch(key); +} + +bool FullFilterBlockReader::PrefixMayMatch(const Slice& prefix, + uint64_t block_offset) { + assert(block_offset == kNotValid); + if (!prefix_extractor_) { + return true; + } + return MayMatch(prefix); +} + +bool FullFilterBlockReader::MayMatch(const Slice& entry) { + if (contents_.size() != 0) { + if (filter_bits_reader_->MayMatch(entry)) { + PERF_COUNTER_ADD(bloom_sst_hit_count, 1); + return true; + } else { + PERF_COUNTER_ADD(bloom_sst_miss_count, 1); + return false; + } + } + return true; // remain the same with block_based filter +} + +size_t FullFilterBlockReader::ApproximateMemoryUsage() const { + return contents_.size(); +} +} // namespace rocksdb diff --git a/external/rocksdb/table/full_filter_block.h b/external/rocksdb/table/full_filter_block.h new file mode 100755 index 0000000000..4aa357f8a8 --- /dev/null +++ b/external/rocksdb/table/full_filter_block.h @@ -0,0 +1,112 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include +#include +#include +#include +#include +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "rocksdb/slice_transform.h" +#include "db/dbformat.h" +#include "util/hash.h" +#include "table/filter_block.h" + +namespace rocksdb { + +class FilterPolicy; +class FilterBitsBuilder; +class FilterBitsReader; + +// A FullFilterBlockBuilder is used to construct a full filter for a +// particular Table. It generates a single string which is stored as +// a special block in the Table. +// The format of full filter block is: +// +----------------------------------------------------------------+ +// | full filter for all keys in sst file | +// +----------------------------------------------------------------+ +// The full filter can be very large. At the end of it, we put +// num_probes: how many hash functions are used in bloom filter +// +class FullFilterBlockBuilder : public FilterBlockBuilder { + public: + explicit FullFilterBlockBuilder(const SliceTransform* prefix_extractor, + bool whole_key_filtering, + FilterBitsBuilder* filter_bits_builder); + // bits_builder is created in filter_policy, it should be passed in here + // directly. and be deleted here + ~FullFilterBlockBuilder() {} + + virtual bool IsBlockBased() override { return false; } + virtual void StartBlock(uint64_t block_offset) override {} + virtual void Add(const Slice& key) override; + virtual Slice Finish() override; + + private: + // important: all of these might point to invalid addresses + // at the time of destruction of this filter block. destructor + // should NOT dereference them. + const SliceTransform* prefix_extractor_; + bool whole_key_filtering_; + + uint32_t num_added_; + std::unique_ptr filter_bits_builder_; + std::unique_ptr filter_data_; + + void AddKey(const Slice& key); + void AddPrefix(const Slice& key); + + // No copying allowed + FullFilterBlockBuilder(const FullFilterBlockBuilder&); + void operator=(const FullFilterBlockBuilder&); +}; + +// A FilterBlockReader is used to parse filter from SST table. +// KeyMayMatch and PrefixMayMatch would trigger filter checking +class FullFilterBlockReader : public FilterBlockReader { + public: + // REQUIRES: "contents" and filter_bits_reader must stay live + // while *this is live. + explicit FullFilterBlockReader(const SliceTransform* prefix_extractor, + bool whole_key_filtering, + const Slice& contents, + FilterBitsReader* filter_bits_reader, + Statistics* statistics); + explicit FullFilterBlockReader(const SliceTransform* prefix_extractor, + bool whole_key_filtering, + BlockContents&& contents, + FilterBitsReader* filter_bits_reader, + Statistics* statistics); + + // bits_reader is created in filter_policy, it should be passed in here + // directly. and be deleted here + ~FullFilterBlockReader() {} + + virtual bool IsBlockBased() override { return false; } + virtual bool KeyMayMatch(const Slice& key, + uint64_t block_offset = kNotValid) override; + virtual bool PrefixMayMatch(const Slice& prefix, + uint64_t block_offset = kNotValid) override; + virtual size_t ApproximateMemoryUsage() const override; + + private: + const SliceTransform* prefix_extractor_; + + std::unique_ptr filter_bits_reader_; + Slice contents_; + BlockContents block_contents_; + std::unique_ptr filter_data_; + + bool MayMatch(const Slice& entry); + + // No copying allowed + FullFilterBlockReader(const FullFilterBlockReader&); + void operator=(const FullFilterBlockReader&); +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/full_filter_block_test.cc b/external/rocksdb/table/full_filter_block_test.cc new file mode 100755 index 0000000000..51ce1aaa99 --- /dev/null +++ b/external/rocksdb/table/full_filter_block_test.cc @@ -0,0 +1,189 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "table/full_filter_block.h" + +#include "rocksdb/filter_policy.h" +#include "util/coding.h" +#include "util/hash.h" +#include "util/logging.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class TestFilterBitsBuilder : public FilterBitsBuilder { + public: + explicit TestFilterBitsBuilder() {} + + // Add Key to filter + virtual void AddKey(const Slice& key) override { + hash_entries_.push_back(Hash(key.data(), key.size(), 1)); + } + + // Generate the filter using the keys that are added + virtual Slice Finish(std::unique_ptr* buf) override { + uint32_t len = static_cast(hash_entries_.size()) * 4; + char* data = new char[len]; + for (size_t i = 0; i < hash_entries_.size(); i++) { + EncodeFixed32(data + i * 4, hash_entries_[i]); + } + const char* const_data = data; + buf->reset(const_data); + return Slice(data, len); + } + + private: + std::vector hash_entries_; +}; + +class TestFilterBitsReader : public FilterBitsReader { + public: + explicit TestFilterBitsReader(const Slice& contents) + : data_(contents.data()), len_(static_cast(contents.size())) {} + + virtual bool MayMatch(const Slice& entry) override { + uint32_t h = Hash(entry.data(), entry.size(), 1); + for (size_t i = 0; i + 4 <= len_; i += 4) { + if (h == DecodeFixed32(data_ + i)) { + return true; + } + } + return false; + } + + private: + const char* data_; + uint32_t len_; +}; + + +class TestHashFilter : public FilterPolicy { + public: + virtual const char* Name() const override { return "TestHashFilter"; } + + virtual void CreateFilter(const Slice* keys, int n, + std::string* dst) const override { + for (int i = 0; i < n; i++) { + uint32_t h = Hash(keys[i].data(), keys[i].size(), 1); + PutFixed32(dst, h); + } + } + + virtual bool KeyMayMatch(const Slice& key, + const Slice& filter) const override { + uint32_t h = Hash(key.data(), key.size(), 1); + for (unsigned int i = 0; i + 4 <= filter.size(); i += 4) { + if (h == DecodeFixed32(filter.data() + i)) { + return true; + } + } + return false; + } + + virtual FilterBitsBuilder* GetFilterBitsBuilder() const override { + return new TestFilterBitsBuilder(); + } + + virtual FilterBitsReader* GetFilterBitsReader(const Slice& contents) + const override { + return new TestFilterBitsReader(contents); + } +}; + +class PluginFullFilterBlockTest : public testing::Test { + public: + BlockBasedTableOptions table_options_; + + PluginFullFilterBlockTest() { + table_options_.filter_policy.reset(new TestHashFilter()); + } +}; + +TEST_F(PluginFullFilterBlockTest, PluginEmptyBuilder) { + FullFilterBlockBuilder builder( + nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder()); + Slice block = builder.Finish(); + ASSERT_EQ("", EscapeString(block)); + + FullFilterBlockReader reader( + nullptr, true, block, + table_options_.filter_policy->GetFilterBitsReader(block), nullptr); + // Remain same symantic with blockbased filter + ASSERT_TRUE(reader.KeyMayMatch("foo")); +} + +TEST_F(PluginFullFilterBlockTest, PluginSingleChunk) { + FullFilterBlockBuilder builder( + nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder()); + builder.Add("foo"); + builder.Add("bar"); + builder.Add("box"); + builder.Add("box"); + builder.Add("hello"); + Slice block = builder.Finish(); + FullFilterBlockReader reader( + nullptr, true, block, + table_options_.filter_policy->GetFilterBitsReader(block), nullptr); + ASSERT_TRUE(reader.KeyMayMatch("foo")); + ASSERT_TRUE(reader.KeyMayMatch("bar")); + ASSERT_TRUE(reader.KeyMayMatch("box")); + ASSERT_TRUE(reader.KeyMayMatch("hello")); + ASSERT_TRUE(reader.KeyMayMatch("foo")); + ASSERT_TRUE(!reader.KeyMayMatch("missing")); + ASSERT_TRUE(!reader.KeyMayMatch("other")); +} + +class FullFilterBlockTest : public testing::Test { + public: + BlockBasedTableOptions table_options_; + + FullFilterBlockTest() { + table_options_.filter_policy.reset(NewBloomFilterPolicy(10, false)); + } + + ~FullFilterBlockTest() {} +}; + +TEST_F(FullFilterBlockTest, EmptyBuilder) { + FullFilterBlockBuilder builder( + nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder()); + Slice block = builder.Finish(); + ASSERT_EQ("", EscapeString(block)); + + FullFilterBlockReader reader( + nullptr, true, block, + table_options_.filter_policy->GetFilterBitsReader(block), nullptr); + // Remain same symantic with blockbased filter + ASSERT_TRUE(reader.KeyMayMatch("foo")); +} + +TEST_F(FullFilterBlockTest, SingleChunk) { + FullFilterBlockBuilder builder( + nullptr, true, table_options_.filter_policy->GetFilterBitsBuilder()); + builder.Add("foo"); + builder.Add("bar"); + builder.Add("box"); + builder.Add("box"); + builder.Add("hello"); + Slice block = builder.Finish(); + FullFilterBlockReader reader( + nullptr, true, block, + table_options_.filter_policy->GetFilterBitsReader(block), nullptr); + ASSERT_TRUE(reader.KeyMayMatch("foo")); + ASSERT_TRUE(reader.KeyMayMatch("bar")); + ASSERT_TRUE(reader.KeyMayMatch("box")); + ASSERT_TRUE(reader.KeyMayMatch("hello")); + ASSERT_TRUE(reader.KeyMayMatch("foo")); + ASSERT_TRUE(!reader.KeyMayMatch("missing")); + ASSERT_TRUE(!reader.KeyMayMatch("other")); +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/table/get_context.cc b/external/rocksdb/table/get_context.cc new file mode 100755 index 0000000000..4a7a9693b7 --- /dev/null +++ b/external/rocksdb/table/get_context.cc @@ -0,0 +1,180 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "table/get_context.h" +#include "db/merge_helper.h" +#include "db/pinned_iterators_manager.h" +#include "rocksdb/env.h" +#include "rocksdb/merge_operator.h" +#include "rocksdb/statistics.h" +#include "util/perf_context_imp.h" +#include "util/statistics.h" + +namespace rocksdb { + +namespace { + +void appendToReplayLog(std::string* replay_log, ValueType type, Slice value) { +#ifndef ROCKSDB_LITE + if (replay_log) { + if (replay_log->empty()) { + // Optimization: in the common case of only one operation in the + // log, we allocate the exact amount of space needed. + replay_log->reserve(1 + VarintLength(value.size()) + value.size()); + } + replay_log->push_back(type); + PutLengthPrefixedSlice(replay_log, value); + } +#endif // ROCKSDB_LITE +} + +} // namespace + +GetContext::GetContext(const Comparator* ucmp, + const MergeOperator* merge_operator, Logger* logger, + Statistics* statistics, GetState init_state, + const Slice& user_key, std::string* ret_value, + bool* value_found, MergeContext* merge_context, Env* env, + SequenceNumber* seq, + PinnedIteratorsManager* _pinned_iters_mgr) + : ucmp_(ucmp), + merge_operator_(merge_operator), + logger_(logger), + statistics_(statistics), + state_(init_state), + user_key_(user_key), + value_(ret_value), + value_found_(value_found), + merge_context_(merge_context), + env_(env), + seq_(seq), + replay_log_(nullptr), + pinned_iters_mgr_(_pinned_iters_mgr) { + if (seq_) { + *seq_ = kMaxSequenceNumber; + } +} + +// Called from TableCache::Get and Table::Get when file/block in which +// key may exist are not there in TableCache/BlockCache respectively. In this +// case we can't guarantee that key does not exist and are not permitted to do +// IO to be certain.Set the status=kFound and value_found=false to let the +// caller know that key may exist but is not there in memory +void GetContext::MarkKeyMayExist() { + state_ = kFound; + if (value_found_ != nullptr) { + *value_found_ = false; + } +} + +void GetContext::SaveValue(const Slice& value, SequenceNumber seq) { + assert(state_ == kNotFound); + appendToReplayLog(replay_log_, kTypeValue, value); + + state_ = kFound; + if (value_ != nullptr) { + value_->assign(value.data(), value.size()); + } +} + +bool GetContext::SaveValue(const ParsedInternalKey& parsed_key, + const Slice& value, bool value_pinned) { + assert((state_ != kMerge && parsed_key.type != kTypeMerge) || + merge_context_ != nullptr); + if (ucmp_->Equal(parsed_key.user_key, user_key_)) { + appendToReplayLog(replay_log_, parsed_key.type, value); + + if (seq_ != nullptr) { + // Set the sequence number if it is uninitialized + if (*seq_ == kMaxSequenceNumber) { + *seq_ = parsed_key.sequence; + } + } + + // Key matches. Process it + switch (parsed_key.type) { + case kTypeValue: + assert(state_ == kNotFound || state_ == kMerge); + if (kNotFound == state_) { + state_ = kFound; + if (value_ != nullptr) { + value_->assign(value.data(), value.size()); + } + } else if (kMerge == state_) { + assert(merge_operator_ != nullptr); + state_ = kFound; + if (value_ != nullptr) { + Status merge_status = + MergeHelper::TimedFullMerge(merge_operator_, user_key_, &value, + merge_context_->GetOperands(), + value_, logger_, statistics_, env_); + if (!merge_status.ok()) { + state_ = kCorrupt; + } + } + } + return false; + + case kTypeDeletion: + case kTypeSingleDeletion: + // TODO(noetzli): Verify correctness once merge of single-deletes + // is supported + assert(state_ == kNotFound || state_ == kMerge); + if (kNotFound == state_) { + state_ = kDeleted; + } else if (kMerge == state_) { + state_ = kFound; + if (value_ != nullptr) { + Status merge_status = + MergeHelper::TimedFullMerge(merge_operator_, user_key_, nullptr, + merge_context_->GetOperands(), + value_, logger_, statistics_, env_); + + if (!merge_status.ok()) { + state_ = kCorrupt; + } + } + } + return false; + + case kTypeMerge: + assert(state_ == kNotFound || state_ == kMerge); + state_ = kMerge; + merge_context_->PushOperand(value, value_pinned); + return true; + + default: + assert(false); + break; + } + } + + // state_ could be Corrupt, merge or notfound + return false; +} + +void replayGetContextLog(const Slice& replay_log, const Slice& user_key, + GetContext* get_context) { +#ifndef ROCKSDB_LITE + Slice s = replay_log; + while (s.size()) { + auto type = static_cast(*s.data()); + s.remove_prefix(1); + Slice value; + bool ret = GetLengthPrefixedSlice(&s, &value); + assert(ret); + (void)ret; + + // Since SequenceNumber is not stored and unknown, we will use + // kMaxSequenceNumber. + get_context->SaveValue( + ParsedInternalKey(user_key, kMaxSequenceNumber, type), value, true); + } +#else // ROCKSDB_LITE + assert(false); +#endif // ROCKSDB_LITE +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/get_context.h b/external/rocksdb/table/get_context.h new file mode 100755 index 0000000000..4cee09a8d5 --- /dev/null +++ b/external/rocksdb/table/get_context.h @@ -0,0 +1,83 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#include +#include "db/merge_context.h" +#include "rocksdb/env.h" +#include "rocksdb/types.h" + +namespace rocksdb { +class MergeContext; +class PinnedIteratorsManager; + +class GetContext { + public: + enum GetState { + kNotFound, + kFound, + kDeleted, + kCorrupt, + kMerge // saver contains the current merge result (the operands) + }; + + GetContext(const Comparator* ucmp, const MergeOperator* merge_operator, + Logger* logger, Statistics* statistics, GetState init_state, + const Slice& user_key, std::string* ret_value, bool* value_found, + MergeContext* merge_context, Env* env, + SequenceNumber* seq = nullptr, + PinnedIteratorsManager* _pinned_iters_mgr = nullptr); + + void MarkKeyMayExist(); + + // Records this key, value, and any meta-data (such as sequence number and + // state) into this GetContext. + // + // Returns True if more keys need to be read (due to merges) or + // False if the complete value has been found. + bool SaveValue(const ParsedInternalKey& parsed_key, const Slice& value, + bool value_pinned = false); + + // Simplified version of the previous function. Should only be used when we + // know that the operation is a Put. + void SaveValue(const Slice& value, SequenceNumber seq); + + GetState State() const { return state_; } + + PinnedIteratorsManager* pinned_iters_mgr() { return pinned_iters_mgr_; } + + // If a non-null string is passed, all the SaveValue calls will be + // logged into the string. The operations can then be replayed on + // another GetContext with replayGetContextLog. + void SetReplayLog(std::string* replay_log) { replay_log_ = replay_log; } + + // Do we need to fetch the SequenceNumber for this key? + bool NeedToReadSequence() const { return (seq_ != nullptr); } + + private: + const Comparator* ucmp_; + const MergeOperator* merge_operator_; + // the merge operations encountered; + Logger* logger_; + Statistics* statistics_; + + GetState state_; + Slice user_key_; + std::string* value_; + bool* value_found_; // Is value set correctly? Used by KeyMayExist + MergeContext* merge_context_; + Env* env_; + // If a key is found, seq_ will be set to the SequenceNumber of most recent + // write to the key or kMaxSequenceNumber if unknown + SequenceNumber* seq_; + std::string* replay_log_; + // Used to temporarily pin blocks when state_ == GetContext::kMerge + PinnedIteratorsManager* pinned_iters_mgr_; +}; + +void replayGetContextLog(const Slice& replay_log, const Slice& user_key, + GetContext* get_context); + +} // namespace rocksdb diff --git a/external/rocksdb/table/internal_iterator.h b/external/rocksdb/table/internal_iterator.h new file mode 100755 index 0000000000..f1f1e0bffc --- /dev/null +++ b/external/rocksdb/table/internal_iterator.h @@ -0,0 +1,104 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// + +#pragma once + +#include +#include "rocksdb/iterator.h" +#include "rocksdb/status.h" + +namespace rocksdb { + +class PinnedIteratorsManager; + +class InternalIterator : public Cleanable { + public: + InternalIterator() {} + virtual ~InternalIterator() {} + + // An iterator is either positioned at a key/value pair, or + // not valid. This method returns true iff the iterator is valid. + virtual bool Valid() const = 0; + + // Position at the first key in the source. The iterator is Valid() + // after this call iff the source is not empty. + virtual void SeekToFirst() = 0; + + // Position at the last key in the source. The iterator is + // Valid() after this call iff the source is not empty. + virtual void SeekToLast() = 0; + + // Position at the first key in the source that at or past target + // The iterator is Valid() after this call iff the source contains + // an entry that comes at or past target. + virtual void Seek(const Slice& target) = 0; + + // Moves to the next entry in the source. After this call, Valid() is + // true iff the iterator was not positioned at the last entry in the source. + // REQUIRES: Valid() + virtual void Next() = 0; + + // Moves to the previous entry in the source. After this call, Valid() is + // true iff the iterator was not positioned at the first entry in source. + // REQUIRES: Valid() + virtual void Prev() = 0; + + // Return the key for the current entry. The underlying storage for + // the returned slice is valid only until the next modification of + // the iterator. + // REQUIRES: Valid() + virtual Slice key() const = 0; + + // Return the value for the current entry. The underlying storage for + // the returned slice is valid only until the next modification of + // the iterator. + // REQUIRES: !AtEnd() && !AtStart() + virtual Slice value() const = 0; + + // If an error has occurred, return it. Else return an ok status. + // If non-blocking IO is requested and this operation cannot be + // satisfied without doing some IO, then this returns Status::Incomplete(). + virtual Status status() const = 0; + + // Pass the PinnedIteratorsManager to the Iterator, most Iterators dont + // communicate with PinnedIteratorsManager so default implementation is no-op + // but for Iterators that need to communicate with PinnedIteratorsManager + // they will implement this function and use the passed pointer to communicate + // with PinnedIteratorsManager. + virtual void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) {} + + // If true, this means that the Slice returned by key() is valid as long as + // PinnedIteratorsManager::ReleasePinnedIterators is not called and the + // Iterator is not deleted. + // + // IsKeyPinned() is guaranteed to always return true if + // - Iterator is created with ReadOptions::pin_data = true + // - DB tables were created with BlockBasedTableOptions::use_delta_encoding + // set to false. + virtual bool IsKeyPinned() const { return false; } + + // If true, this means that the Slice returned by value() is valid as long as + // PinnedIteratorsManager::ReleasePinnedIterators is not called and the + // Iterator is not deleted. + virtual bool IsValuePinned() const { return false; } + + virtual Status GetProperty(std::string prop_name, std::string* prop) { + return Status::NotSupported(""); + } + + private: + // No copying allowed + InternalIterator(const InternalIterator&) = delete; + InternalIterator& operator=(const InternalIterator&) = delete; +}; + +// Return an empty iterator (yields nothing). +extern InternalIterator* NewEmptyInternalIterator(); + +// Return an empty iterator with the specified status. +extern InternalIterator* NewErrorInternalIterator(const Status& status); + +} // namespace rocksdb diff --git a/external/rocksdb/table/iter_heap.h b/external/rocksdb/table/iter_heap.h new file mode 100755 index 0000000000..642383345c --- /dev/null +++ b/external/rocksdb/table/iter_heap.h @@ -0,0 +1,42 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// + +#pragma once + +#include "rocksdb/comparator.h" +#include "table/iterator_wrapper.h" + +namespace rocksdb { + +// When used with std::priority_queue, this comparison functor puts the +// iterator with the max/largest key on top. +class MaxIteratorComparator { + public: + MaxIteratorComparator(const Comparator* comparator) : + comparator_(comparator) {} + + bool operator()(IteratorWrapper* a, IteratorWrapper* b) const { + return comparator_->Compare(a->key(), b->key()) < 0; + } + private: + const Comparator* comparator_; +}; + +// When used with std::priority_queue, this comparison functor puts the +// iterator with the min/smallest key on top. +class MinIteratorComparator { + public: + MinIteratorComparator(const Comparator* comparator) : + comparator_(comparator) {} + + bool operator()(IteratorWrapper* a, IteratorWrapper* b) const { + return comparator_->Compare(a->key(), b->key()) > 0; + } + private: + const Comparator* comparator_; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/iterator.cc b/external/rocksdb/table/iterator.cc new file mode 100755 index 0000000000..09f7f8e687 --- /dev/null +++ b/external/rocksdb/table/iterator.cc @@ -0,0 +1,142 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "rocksdb/iterator.h" +#include "table/internal_iterator.h" +#include "table/iterator_wrapper.h" +#include "util/arena.h" + +namespace rocksdb { + +Cleanable::Cleanable() { + cleanup_.function = nullptr; + cleanup_.next = nullptr; +} + +Cleanable::~Cleanable() { + if (cleanup_.function != nullptr) { + (*cleanup_.function)(cleanup_.arg1, cleanup_.arg2); + for (Cleanup* c = cleanup_.next; c != nullptr; ) { + (*c->function)(c->arg1, c->arg2); + Cleanup* next = c->next; + delete c; + c = next; + } + } +} + +void Cleanable::RegisterCleanup(CleanupFunction func, void* arg1, void* arg2) { + assert(func != nullptr); + Cleanup* c; + if (cleanup_.function == nullptr) { + c = &cleanup_; + } else { + c = new Cleanup; + c->next = cleanup_.next; + cleanup_.next = c; + } + c->function = func; + c->arg1 = arg1; + c->arg2 = arg2; +} + +Status Iterator::GetProperty(std::string prop_name, std::string* prop) { + if (prop == nullptr) { + return Status::InvalidArgument("prop is nullptr"); + } + if (prop_name == "rocksdb.iterator.is-key-pinned") { + *prop = "0"; + return Status::OK(); + } + return Status::InvalidArgument("Undentified property."); +} + +namespace { +class EmptyIterator : public Iterator { + public: + explicit EmptyIterator(const Status& s) : status_(s) { } + virtual bool Valid() const override { return false; } + virtual void Seek(const Slice& target) override {} + virtual void SeekToFirst() override {} + virtual void SeekToLast() override {} + virtual void Next() override { assert(false); } + virtual void Prev() override { assert(false); } + Slice key() const override { + assert(false); + return Slice(); + } + Slice value() const override { + assert(false); + return Slice(); + } + virtual Status status() const override { return status_; } + + private: + Status status_; +}; + +class EmptyInternalIterator : public InternalIterator { + public: + explicit EmptyInternalIterator(const Status& s) : status_(s) {} + virtual bool Valid() const override { return false; } + virtual void Seek(const Slice& target) override {} + virtual void SeekToFirst() override {} + virtual void SeekToLast() override {} + virtual void Next() override { assert(false); } + virtual void Prev() override { assert(false); } + Slice key() const override { + assert(false); + return Slice(); + } + Slice value() const override { + assert(false); + return Slice(); + } + virtual Status status() const override { return status_; } + + private: + Status status_; +}; +} // namespace + +Iterator* NewEmptyIterator() { + return new EmptyIterator(Status::OK()); +} + +Iterator* NewErrorIterator(const Status& status) { + return new EmptyIterator(status); +} + +InternalIterator* NewEmptyInternalIterator() { + return new EmptyInternalIterator(Status::OK()); +} + +InternalIterator* NewEmptyInternalIterator(Arena* arena) { + if (arena == nullptr) { + return NewEmptyInternalIterator(); + } else { + auto mem = arena->AllocateAligned(sizeof(EmptyIterator)); + return new (mem) EmptyInternalIterator(Status::OK()); + } +} + +InternalIterator* NewErrorInternalIterator(const Status& status) { + return new EmptyInternalIterator(status); +} + +InternalIterator* NewErrorInternalIterator(const Status& status, Arena* arena) { + if (arena == nullptr) { + return NewErrorInternalIterator(status); + } else { + auto mem = arena->AllocateAligned(sizeof(EmptyIterator)); + return new (mem) EmptyInternalIterator(status); + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/iterator_wrapper.h b/external/rocksdb/table/iterator_wrapper.h new file mode 100755 index 0000000000..e68bbf3f05 --- /dev/null +++ b/external/rocksdb/table/iterator_wrapper.h @@ -0,0 +1,101 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include + +#include "table/internal_iterator.h" + +namespace rocksdb { + +// A internal wrapper class with an interface similar to Iterator that caches +// the valid() and key() results for an underlying iterator. +// This can help avoid virtual function calls and also gives better +// cache locality. +class IteratorWrapper { + public: + IteratorWrapper() : iter_(nullptr), valid_(false) {} + explicit IteratorWrapper(InternalIterator* _iter) : iter_(nullptr) { + Set(_iter); + } + ~IteratorWrapper() {} + InternalIterator* iter() const { return iter_; } + + // Set the underlying Iterator to _iter and return + // previous underlying Iterator. + InternalIterator* Set(InternalIterator* _iter) { + InternalIterator* old_iter = iter_; + + iter_ = _iter; + if (iter_ == nullptr) { + valid_ = false; + } else { + Update(); + } + return old_iter; + } + + void DeleteIter(bool is_arena_mode) { + if (iter_) { + if (!is_arena_mode) { + delete iter_; + } else { + iter_->~InternalIterator(); + } + } + } + + // Iterator interface methods + bool Valid() const { return valid_; } + Slice key() const { assert(Valid()); return key_; } + Slice value() const { assert(Valid()); return iter_->value(); } + // Methods below require iter() != nullptr + Status status() const { assert(iter_); return iter_->status(); } + void Next() { assert(iter_); iter_->Next(); Update(); } + void Prev() { assert(iter_); iter_->Prev(); Update(); } + void Seek(const Slice& k) { assert(iter_); iter_->Seek(k); Update(); } + void SeekToFirst() { assert(iter_); iter_->SeekToFirst(); Update(); } + void SeekToLast() { assert(iter_); iter_->SeekToLast(); Update(); } + + void SetPinnedItersMgr(PinnedIteratorsManager* pinned_iters_mgr) { + assert(iter_); + iter_->SetPinnedItersMgr(pinned_iters_mgr); + } + bool IsKeyPinned() const { + assert(Valid()); + return iter_->IsKeyPinned(); + } + bool IsValuePinned() const { + assert(Valid()); + return iter_->IsValuePinned(); + } + + private: + void Update() { + valid_ = iter_->Valid(); + if (valid_) { + key_ = iter_->key(); + } + } + + InternalIterator* iter_; + bool valid_; + Slice key_; +}; + +class Arena; +// Return an empty iterator (yields nothing) allocated from arena. +extern InternalIterator* NewEmptyInternalIterator(Arena* arena); + +// Return an empty iterator with the specified status, allocated arena. +extern InternalIterator* NewErrorInternalIterator(const Status& status, + Arena* arena); + +} // namespace rocksdb diff --git a/external/rocksdb/table/merger.cc b/external/rocksdb/table/merger.cc new file mode 100755 index 0000000000..637959d9ac --- /dev/null +++ b/external/rocksdb/table/merger.cc @@ -0,0 +1,366 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "table/merger.h" + +#include + +#include "db/pinned_iterators_manager.h" +#include "rocksdb/comparator.h" +#include "rocksdb/iterator.h" +#include "rocksdb/options.h" +#include "table/internal_iterator.h" +#include "table/iter_heap.h" +#include "table/iterator_wrapper.h" +#include "util/arena.h" +#include "util/autovector.h" +#include "util/heap.h" +#include "util/perf_context_imp.h" +#include "util/stop_watch.h" +#include "util/sync_point.h" + +namespace rocksdb { +// Without anonymous namespace here, we fail the warning -Wmissing-prototypes +namespace { +typedef BinaryHeap MergerMaxIterHeap; +typedef BinaryHeap MergerMinIterHeap; +} // namespace + +const size_t kNumIterReserve = 4; + +class MergingIterator : public InternalIterator { + public: + MergingIterator(const Comparator* comparator, InternalIterator** children, + int n, bool is_arena_mode) + : is_arena_mode_(is_arena_mode), + comparator_(comparator), + current_(nullptr), + direction_(kForward), + minHeap_(comparator_), + pinned_iters_mgr_(nullptr) { + children_.resize(n); + for (int i = 0; i < n; i++) { + children_[i].Set(children[i]); + } + for (auto& child : children_) { + if (child.Valid()) { + minHeap_.push(&child); + } + } + current_ = CurrentForward(); + } + + virtual void AddIterator(InternalIterator* iter) { + assert(direction_ == kForward); + children_.emplace_back(iter); + if (pinned_iters_mgr_) { + iter->SetPinnedItersMgr(pinned_iters_mgr_); + } + auto new_wrapper = children_.back(); + if (new_wrapper.Valid()) { + minHeap_.push(&new_wrapper); + current_ = CurrentForward(); + } + } + + virtual ~MergingIterator() { + for (auto& child : children_) { + child.DeleteIter(is_arena_mode_); + } + } + + virtual bool Valid() const override { return (current_ != nullptr); } + + virtual void SeekToFirst() override { + ClearHeaps(); + for (auto& child : children_) { + child.SeekToFirst(); + if (child.Valid()) { + minHeap_.push(&child); + } + } + direction_ = kForward; + current_ = CurrentForward(); + } + + virtual void SeekToLast() override { + ClearHeaps(); + InitMaxHeap(); + for (auto& child : children_) { + child.SeekToLast(); + if (child.Valid()) { + maxHeap_->push(&child); + } + } + direction_ = kReverse; + current_ = CurrentReverse(); + } + + virtual void Seek(const Slice& target) override { + ClearHeaps(); + for (auto& child : children_) { + { + PERF_TIMER_GUARD(seek_child_seek_time); + child.Seek(target); + } + PERF_COUNTER_ADD(seek_child_seek_count, 1); + + if (child.Valid()) { + PERF_TIMER_GUARD(seek_min_heap_time); + minHeap_.push(&child); + } + } + direction_ = kForward; + { + PERF_TIMER_GUARD(seek_min_heap_time); + current_ = CurrentForward(); + } + } + + virtual void Next() override { + assert(Valid()); + + // Ensure that all children are positioned after key(). + // If we are moving in the forward direction, it is already + // true for all of the non-current children since current_ is + // the smallest child and key() == current_->key(). + if (direction_ != kForward) { + // Otherwise, advance the non-current children. We advance current_ + // just after the if-block. + ClearHeaps(); + for (auto& child : children_) { + if (&child != current_) { + child.Seek(key()); + if (child.Valid() && comparator_->Equal(key(), child.key())) { + child.Next(); + } + } + if (child.Valid()) { + minHeap_.push(&child); + } + } + direction_ = kForward; + // The loop advanced all non-current children to be > key() so current_ + // should still be strictly the smallest key. + assert(current_ == CurrentForward()); + } + + // For the heap modifications below to be correct, current_ must be the + // current top of the heap. + assert(current_ == CurrentForward()); + + // as the current points to the current record. move the iterator forward. + current_->Next(); + if (current_->Valid()) { + // current is still valid after the Next() call above. Call + // replace_top() to restore the heap property. When the same child + // iterator yields a sequence of keys, this is cheap. + minHeap_.replace_top(current_); + } else { + // current stopped being valid, remove it from the heap. + minHeap_.pop(); + } + current_ = CurrentForward(); + } + + virtual void Prev() override { + assert(Valid()); + // Ensure that all children are positioned before key(). + // If we are moving in the reverse direction, it is already + // true for all of the non-current children since current_ is + // the largest child and key() == current_->key(). + if (direction_ != kReverse) { + // Otherwise, retreat the non-current children. We retreat current_ + // just after the if-block. + ClearHeaps(); + InitMaxHeap(); + for (auto& child : children_) { + if (&child != current_) { + child.Seek(key()); + if (child.Valid()) { + // Child is at first entry >= key(). Step back one to be < key() + TEST_SYNC_POINT_CALLBACK("MergeIterator::Prev:BeforePrev", &child); + child.Prev(); + } else { + // Child has no entries >= key(). Position at last entry. + TEST_SYNC_POINT("MergeIterator::Prev:BeforeSeekToLast"); + child.SeekToLast(); + } + } + if (child.Valid()) { + maxHeap_->push(&child); + } + } + direction_ = kReverse; + // Note that we don't do assert(current_ == CurrentReverse()) here + // because it is possible to have some keys larger than the seek-key + // inserted between Seek() and SeekToLast(), which makes current_ not + // equal to CurrentReverse(). + current_ = CurrentReverse(); + } + + // For the heap modifications below to be correct, current_ must be the + // current top of the heap. + assert(current_ == CurrentReverse()); + + current_->Prev(); + if (current_->Valid()) { + // current is still valid after the Prev() call above. Call + // replace_top() to restore the heap property. When the same child + // iterator yields a sequence of keys, this is cheap. + maxHeap_->replace_top(current_); + } else { + // current stopped being valid, remove it from the heap. + maxHeap_->pop(); + } + current_ = CurrentReverse(); + } + + virtual Slice key() const override { + assert(Valid()); + return current_->key(); + } + + virtual Slice value() const override { + assert(Valid()); + return current_->value(); + } + + virtual Status status() const override { + Status s; + for (auto& child : children_) { + s = child.status(); + if (!s.ok()) { + break; + } + } + return s; + } + + virtual void SetPinnedItersMgr( + PinnedIteratorsManager* pinned_iters_mgr) override { + pinned_iters_mgr_ = pinned_iters_mgr; + for (auto& child : children_) { + child.SetPinnedItersMgr(pinned_iters_mgr); + } + } + + virtual bool IsKeyPinned() const override { + assert(Valid()); + return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() && + current_->IsKeyPinned(); + } + + virtual bool IsValuePinned() const override { + assert(Valid()); + return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() && + current_->IsValuePinned(); + } + + private: + // Clears heaps for both directions, used when changing direction or seeking + void ClearHeaps(); + // Ensures that maxHeap_ is initialized when starting to go in the reverse + // direction + void InitMaxHeap(); + + bool is_arena_mode_; + const Comparator* comparator_; + autovector children_; + + // Cached pointer to child iterator with the current key, or nullptr if no + // child iterators are valid. This is the top of minHeap_ or maxHeap_ + // depending on the direction. + IteratorWrapper* current_; + // Which direction is the iterator moving? + enum Direction { + kForward, + kReverse + }; + Direction direction_; + MergerMinIterHeap minHeap_; + // Max heap is used for reverse iteration, which is way less common than + // forward. Lazily initialize it to save memory. + std::unique_ptr maxHeap_; + PinnedIteratorsManager* pinned_iters_mgr_; + + IteratorWrapper* CurrentForward() const { + assert(direction_ == kForward); + return !minHeap_.empty() ? minHeap_.top() : nullptr; + } + + IteratorWrapper* CurrentReverse() const { + assert(direction_ == kReverse); + assert(maxHeap_); + return !maxHeap_->empty() ? maxHeap_->top() : nullptr; + } +}; + +void MergingIterator::ClearHeaps() { + minHeap_.clear(); + if (maxHeap_) { + maxHeap_->clear(); + } +} + +void MergingIterator::InitMaxHeap() { + if (!maxHeap_) { + maxHeap_.reset(new MergerMaxIterHeap(comparator_)); + } +} + +InternalIterator* NewMergingIterator(const Comparator* cmp, + InternalIterator** list, int n, + Arena* arena) { + assert(n >= 0); + if (n == 0) { + return NewEmptyInternalIterator(arena); + } else if (n == 1) { + return list[0]; + } else { + if (arena == nullptr) { + return new MergingIterator(cmp, list, n, false); + } else { + auto mem = arena->AllocateAligned(sizeof(MergingIterator)); + return new (mem) MergingIterator(cmp, list, n, true); + } + } +} + +MergeIteratorBuilder::MergeIteratorBuilder(const Comparator* comparator, + Arena* a) + : first_iter(nullptr), use_merging_iter(false), arena(a) { + + auto mem = arena->AllocateAligned(sizeof(MergingIterator)); + merge_iter = new (mem) MergingIterator(comparator, nullptr, 0, true); +} + +void MergeIteratorBuilder::AddIterator(InternalIterator* iter) { + if (!use_merging_iter && first_iter != nullptr) { + merge_iter->AddIterator(first_iter); + use_merging_iter = true; + } + if (use_merging_iter) { + merge_iter->AddIterator(iter); + } else { + first_iter = iter; + } +} + +InternalIterator* MergeIteratorBuilder::Finish() { + if (!use_merging_iter) { + return first_iter; + } else { + auto ret = merge_iter; + merge_iter = nullptr; + return ret; + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/merger.h b/external/rocksdb/table/merger.h new file mode 100755 index 0000000000..7291a03782 --- /dev/null +++ b/external/rocksdb/table/merger.h @@ -0,0 +1,60 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include "rocksdb/types.h" + +namespace rocksdb { + +class Comparator; +class InternalIterator; +class Env; +class Arena; + +// Return an iterator that provided the union of the data in +// children[0,n-1]. Takes ownership of the child iterators and +// will delete them when the result iterator is deleted. +// +// The result does no duplicate suppression. I.e., if a particular +// key is present in K child iterators, it will be yielded K times. +// +// REQUIRES: n >= 0 +extern InternalIterator* NewMergingIterator(const Comparator* comparator, + InternalIterator** children, int n, + Arena* arena = nullptr); + +class MergingIterator; + +// A builder class to build a merging iterator by adding iterators one by one. +class MergeIteratorBuilder { + public: + // comparator: the comparator used in merging comparator + // arena: where the merging iterator needs to be allocated from. + explicit MergeIteratorBuilder(const Comparator* comparator, Arena* arena); + ~MergeIteratorBuilder() {} + + // Add iter to the merging iterator. + void AddIterator(InternalIterator* iter); + + // Get arena used to build the merging iterator. It is called one a child + // iterator needs to be allocated. + Arena* GetArena() { return arena; } + + // Return the result merging iterator. + InternalIterator* Finish(); + + private: + MergingIterator* merge_iter; + InternalIterator* first_iter; + bool use_merging_iter; + Arena* arena; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/merger_test.cc b/external/rocksdb/table/merger_test.cc new file mode 100755 index 0000000000..97979af7ce --- /dev/null +++ b/external/rocksdb/table/merger_test.cc @@ -0,0 +1,169 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include +#include + +#include "table/merger.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { + +class MergerTest : public testing::Test { + public: + MergerTest() + : rnd_(3), merging_iterator_(nullptr), single_iterator_(nullptr) {} + ~MergerTest() = default; + std::vector GenerateStrings(size_t len, int string_len) { + std::vector ret; + for (size_t i = 0; i < len; ++i) { + ret.push_back(test::RandomHumanReadableString(&rnd_, string_len)); + } + return ret; + } + + void AssertEquivalence() { + auto a = merging_iterator_.get(); + auto b = single_iterator_.get(); + if (!a->Valid()) { + ASSERT_TRUE(!b->Valid()); + } else { + ASSERT_TRUE(b->Valid()); + ASSERT_EQ(b->key().ToString(), a->key().ToString()); + ASSERT_EQ(b->value().ToString(), a->value().ToString()); + } + } + + void SeekToRandom() { Seek(test::RandomHumanReadableString(&rnd_, 5)); } + + void Seek(std::string target) { + merging_iterator_->Seek(target); + single_iterator_->Seek(target); + } + + void SeekToFirst() { + merging_iterator_->SeekToFirst(); + single_iterator_->SeekToFirst(); + } + + void SeekToLast() { + merging_iterator_->SeekToLast(); + single_iterator_->SeekToLast(); + } + + void Next(int times) { + for (int i = 0; i < times && merging_iterator_->Valid(); ++i) { + AssertEquivalence(); + merging_iterator_->Next(); + single_iterator_->Next(); + } + AssertEquivalence(); + } + + void Prev(int times) { + for (int i = 0; i < times && merging_iterator_->Valid(); ++i) { + AssertEquivalence(); + merging_iterator_->Prev(); + single_iterator_->Prev(); + } + AssertEquivalence(); + } + + void NextAndPrev(int times) { + for (int i = 0; i < times && merging_iterator_->Valid(); ++i) { + AssertEquivalence(); + if (rnd_.OneIn(2)) { + merging_iterator_->Prev(); + single_iterator_->Prev(); + } else { + merging_iterator_->Next(); + single_iterator_->Next(); + } + } + AssertEquivalence(); + } + + void Generate(size_t num_iterators, size_t strings_per_iterator, + int letters_per_string) { + std::vector small_iterators; + for (size_t i = 0; i < num_iterators; ++i) { + auto strings = GenerateStrings(strings_per_iterator, letters_per_string); + small_iterators.push_back(new test::VectorIterator(strings)); + all_keys_.insert(all_keys_.end(), strings.begin(), strings.end()); + } + + merging_iterator_.reset( + NewMergingIterator(BytewiseComparator(), &small_iterators[0], + static_cast(small_iterators.size()))); + single_iterator_.reset(new test::VectorIterator(all_keys_)); + } + + Random rnd_; + std::unique_ptr merging_iterator_; + std::unique_ptr single_iterator_; + std::vector all_keys_; +}; + +TEST_F(MergerTest, SeekToRandomNextTest) { + Generate(1000, 50, 50); + for (int i = 0; i < 10; ++i) { + SeekToRandom(); + AssertEquivalence(); + Next(50000); + } +} + +TEST_F(MergerTest, SeekToRandomNextSmallStringsTest) { + Generate(1000, 50, 2); + for (int i = 0; i < 10; ++i) { + SeekToRandom(); + AssertEquivalence(); + Next(50000); + } +} + +TEST_F(MergerTest, SeekToRandomPrevTest) { + Generate(1000, 50, 50); + for (int i = 0; i < 10; ++i) { + SeekToRandom(); + AssertEquivalence(); + Prev(50000); + } +} + +TEST_F(MergerTest, SeekToRandomRandomTest) { + Generate(200, 50, 50); + for (int i = 0; i < 3; ++i) { + SeekToRandom(); + AssertEquivalence(); + NextAndPrev(5000); + } +} + +TEST_F(MergerTest, SeekToFirstTest) { + Generate(1000, 50, 50); + for (int i = 0; i < 10; ++i) { + SeekToFirst(); + AssertEquivalence(); + Next(50000); + } +} + +TEST_F(MergerTest, SeekToLastTest) { + Generate(1000, 50, 50); + for (int i = 0; i < 10; ++i) { + SeekToLast(); + AssertEquivalence(); + Prev(50000); + } +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/table/meta_blocks.cc b/external/rocksdb/table/meta_blocks.cc new file mode 100755 index 0000000000..e98a638e04 --- /dev/null +++ b/external/rocksdb/table/meta_blocks.cc @@ -0,0 +1,378 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#include "table/meta_blocks.h" + +#include +#include + +#include "db/table_properties_collector.h" +#include "rocksdb/table.h" +#include "rocksdb/table_properties.h" +#include "table/block.h" +#include "table/format.h" +#include "table/internal_iterator.h" +#include "table/persistent_cache_helper.h" +#include "table/table_properties_internal.h" +#include "util/coding.h" + +namespace rocksdb { + +MetaIndexBuilder::MetaIndexBuilder() + : meta_index_block_(new BlockBuilder(1 /* restart interval */)) {} + +void MetaIndexBuilder::Add(const std::string& key, + const BlockHandle& handle) { + std::string handle_encoding; + handle.EncodeTo(&handle_encoding); + meta_block_handles_.insert({key, handle_encoding}); +} + +Slice MetaIndexBuilder::Finish() { + for (const auto& metablock : meta_block_handles_) { + meta_index_block_->Add(metablock.first, metablock.second); + } + return meta_index_block_->Finish(); +} + +PropertyBlockBuilder::PropertyBlockBuilder() + : properties_block_(new BlockBuilder(1 /* restart interval */)) {} + +void PropertyBlockBuilder::Add(const std::string& name, + const std::string& val) { + props_.insert({name, val}); +} + +void PropertyBlockBuilder::Add(const std::string& name, uint64_t val) { + assert(props_.find(name) == props_.end()); + + std::string dst; + PutVarint64(&dst, val); + + Add(name, dst); +} + +void PropertyBlockBuilder::Add( + const UserCollectedProperties& user_collected_properties) { + for (const auto& prop : user_collected_properties) { + Add(prop.first, prop.second); + } +} + +void PropertyBlockBuilder::AddTableProperty(const TableProperties& props) { + Add(TablePropertiesNames::kRawKeySize, props.raw_key_size); + Add(TablePropertiesNames::kRawValueSize, props.raw_value_size); + Add(TablePropertiesNames::kDataSize, props.data_size); + Add(TablePropertiesNames::kIndexSize, props.index_size); + Add(TablePropertiesNames::kNumEntries, props.num_entries); + Add(TablePropertiesNames::kNumDataBlocks, props.num_data_blocks); + Add(TablePropertiesNames::kFilterSize, props.filter_size); + Add(TablePropertiesNames::kFormatVersion, props.format_version); + Add(TablePropertiesNames::kFixedKeyLen, props.fixed_key_len); + Add(TablePropertiesNames::kColumnFamilyId, props.column_family_id); + + if (!props.filter_policy_name.empty()) { + Add(TablePropertiesNames::kFilterPolicy, props.filter_policy_name); + } + if (!props.comparator_name.empty()) { + Add(TablePropertiesNames::kComparator, props.comparator_name); + } + + if (!props.merge_operator_name.empty()) { + Add(TablePropertiesNames::kMergeOperator, props.merge_operator_name); + } + if (!props.property_collectors_names.empty()) { + Add(TablePropertiesNames::kPropertyCollectors, + props.property_collectors_names); + } + if (!props.column_family_name.empty()) { + Add(TablePropertiesNames::kColumnFamilyName, props.column_family_name); + } + + if (!props.compression_name.empty()) { + Add(TablePropertiesNames::kCompression, props.compression_name); + } +} + +Slice PropertyBlockBuilder::Finish() { + for (const auto& prop : props_) { + properties_block_->Add(prop.first, prop.second); + } + + return properties_block_->Finish(); +} + +void LogPropertiesCollectionError( + Logger* info_log, const std::string& method, const std::string& name) { + assert(method == "Add" || method == "Finish"); + + std::string msg = + "Encountered error when calling TablePropertiesCollector::" + + method + "() with collector name: " + name; + Log(InfoLogLevel::ERROR_LEVEL, info_log, "%s", msg.c_str()); +} + +bool NotifyCollectTableCollectorsOnAdd( + const Slice& key, const Slice& value, uint64_t file_size, + const std::vector>& collectors, + Logger* info_log) { + bool all_succeeded = true; + for (auto& collector : collectors) { + Status s = collector->InternalAdd(key, value, file_size); + all_succeeded = all_succeeded && s.ok(); + if (!s.ok()) { + LogPropertiesCollectionError(info_log, "Add" /* method */, + collector->Name()); + } + } + return all_succeeded; +} + +bool NotifyCollectTableCollectorsOnFinish( + const std::vector>& collectors, + Logger* info_log, PropertyBlockBuilder* builder) { + bool all_succeeded = true; + for (auto& collector : collectors) { + UserCollectedProperties user_collected_properties; + Status s = collector->Finish(&user_collected_properties); + + all_succeeded = all_succeeded && s.ok(); + if (!s.ok()) { + LogPropertiesCollectionError(info_log, "Finish" /* method */, + collector->Name()); + } else { + builder->Add(user_collected_properties); + } + } + + return all_succeeded; +} + +Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file, + const Footer& footer, const ImmutableCFOptions &ioptions, + TableProperties** table_properties) { + assert(table_properties); + + Slice v = handle_value; + BlockHandle handle; + if (!handle.DecodeFrom(&v).ok()) { + return Status::InvalidArgument("Failed to decode properties block handle"); + } + + BlockContents block_contents; + ReadOptions read_options; + read_options.verify_checksums = false; + Status s; + s = ReadBlockContents(file, footer, read_options, handle, &block_contents, + ioptions, false /* decompress */); + + if (!s.ok()) { + return s; + } + + Block properties_block(std::move(block_contents)); + std::unique_ptr iter( + properties_block.NewIterator(BytewiseComparator())); + + auto new_table_properties = new TableProperties(); + // All pre-defined properties of type uint64_t + std::unordered_map predefined_uint64_properties = { + {TablePropertiesNames::kDataSize, &new_table_properties->data_size}, + {TablePropertiesNames::kIndexSize, &new_table_properties->index_size}, + {TablePropertiesNames::kFilterSize, &new_table_properties->filter_size}, + {TablePropertiesNames::kRawKeySize, &new_table_properties->raw_key_size}, + {TablePropertiesNames::kRawValueSize, + &new_table_properties->raw_value_size}, + {TablePropertiesNames::kNumDataBlocks, + &new_table_properties->num_data_blocks}, + {TablePropertiesNames::kNumEntries, &new_table_properties->num_entries}, + {TablePropertiesNames::kFormatVersion, + &new_table_properties->format_version}, + {TablePropertiesNames::kFixedKeyLen, + &new_table_properties->fixed_key_len}, + {TablePropertiesNames::kColumnFamilyId, + &new_table_properties->column_family_id}, + }; + + std::string last_key; + for (iter->SeekToFirst(); iter->Valid(); iter->Next()) { + s = iter->status(); + if (!s.ok()) { + break; + } + + auto key = iter->key().ToString(); + // properties block is strictly sorted with no duplicate key. + assert(last_key.empty() || + BytewiseComparator()->Compare(key, last_key) > 0); + last_key = key; + + auto raw_val = iter->value(); + auto pos = predefined_uint64_properties.find(key); + + if (pos != predefined_uint64_properties.end()) { + // handle predefined rocksdb properties + uint64_t val; + if (!GetVarint64(&raw_val, &val)) { + // skip malformed value + auto error_msg = + "Detect malformed value in properties meta-block:" + "\tkey: " + key + "\tval: " + raw_val.ToString(); + Log(InfoLogLevel::ERROR_LEVEL, ioptions.info_log, "%s", + error_msg.c_str()); + continue; + } + *(pos->second) = val; + } else if (key == TablePropertiesNames::kFilterPolicy) { + new_table_properties->filter_policy_name = raw_val.ToString(); + } else if (key == TablePropertiesNames::kColumnFamilyName) { + new_table_properties->column_family_name = raw_val.ToString(); + } else if (key == TablePropertiesNames::kComparator) { + new_table_properties->comparator_name = raw_val.ToString(); + } else if (key == TablePropertiesNames::kMergeOperator) { + new_table_properties->merge_operator_name = raw_val.ToString(); + } else if (key == TablePropertiesNames::kPropertyCollectors) { + new_table_properties->property_collectors_names = raw_val.ToString(); + } else if (key == TablePropertiesNames::kCompression) { + new_table_properties->compression_name = raw_val.ToString(); + } else { + // handle user-collected properties + new_table_properties->user_collected_properties.insert( + {key, raw_val.ToString()}); + } + } + if (s.ok()) { + *table_properties = new_table_properties; + } else { + delete new_table_properties; + } + + return s; +} + +Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size, + uint64_t table_magic_number, + const ImmutableCFOptions &ioptions, + TableProperties** properties) { + // -- Read metaindex block + Footer footer; + auto s = ReadFooterFromFile(file, file_size, &footer, table_magic_number); + if (!s.ok()) { + return s; + } + + auto metaindex_handle = footer.metaindex_handle(); + BlockContents metaindex_contents; + ReadOptions read_options; + read_options.verify_checksums = false; + s = ReadBlockContents(file, footer, read_options, metaindex_handle, + &metaindex_contents, ioptions, false /* decompress */); + if (!s.ok()) { + return s; + } + Block metaindex_block(std::move(metaindex_contents)); + std::unique_ptr meta_iter( + metaindex_block.NewIterator(BytewiseComparator())); + + // -- Read property block + bool found_properties_block = true; + s = SeekToPropertiesBlock(meta_iter.get(), &found_properties_block); + if (!s.ok()) { + return s; + } + + TableProperties table_properties; + if (found_properties_block == true) { + s = ReadProperties(meta_iter->value(), file, footer, ioptions, properties); + } else { + s = Status::NotFound(); + } + + return s; +} + +Status FindMetaBlock(InternalIterator* meta_index_iter, + const std::string& meta_block_name, + BlockHandle* block_handle) { + meta_index_iter->Seek(meta_block_name); + if (meta_index_iter->status().ok() && meta_index_iter->Valid() && + meta_index_iter->key() == meta_block_name) { + Slice v = meta_index_iter->value(); + return block_handle->DecodeFrom(&v); + } else { + return Status::Corruption("Cannot find the meta block", meta_block_name); + } +} + +Status FindMetaBlock(RandomAccessFileReader* file, uint64_t file_size, + uint64_t table_magic_number, + const ImmutableCFOptions &ioptions, + const std::string& meta_block_name, + BlockHandle* block_handle) { + Footer footer; + auto s = ReadFooterFromFile(file, file_size, &footer, table_magic_number); + if (!s.ok()) { + return s; + } + + auto metaindex_handle = footer.metaindex_handle(); + BlockContents metaindex_contents; + ReadOptions read_options; + read_options.verify_checksums = false; + s = ReadBlockContents(file, footer, read_options, metaindex_handle, + &metaindex_contents, ioptions, false /* do decompression */); + if (!s.ok()) { + return s; + } + Block metaindex_block(std::move(metaindex_contents)); + + std::unique_ptr meta_iter; + meta_iter.reset(metaindex_block.NewIterator(BytewiseComparator())); + + return FindMetaBlock(meta_iter.get(), meta_block_name, block_handle); +} + +Status ReadMetaBlock(RandomAccessFileReader* file, uint64_t file_size, + uint64_t table_magic_number, + const ImmutableCFOptions &ioptions, + const std::string& meta_block_name, + BlockContents* contents) { + Status status; + Footer footer; + status = ReadFooterFromFile(file, file_size, &footer, table_magic_number); + if (!status.ok()) { + return status; + } + + // Reading metaindex block + auto metaindex_handle = footer.metaindex_handle(); + BlockContents metaindex_contents; + ReadOptions read_options; + read_options.verify_checksums = false; + status = ReadBlockContents(file, footer, read_options, metaindex_handle, + &metaindex_contents, ioptions, + false /* decompress */); + if (!status.ok()) { + return status; + } + + // Finding metablock + Block metaindex_block(std::move(metaindex_contents)); + + std::unique_ptr meta_iter; + meta_iter.reset(metaindex_block.NewIterator(BytewiseComparator())); + + BlockHandle block_handle; + status = FindMetaBlock(meta_iter.get(), meta_block_name, &block_handle); + + if (!status.ok()) { + return status; + } + + // Reading metablock + return ReadBlockContents(file, footer, read_options, block_handle, contents, + ioptions, false /* decompress */); +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/meta_blocks.h b/external/rocksdb/table/meta_blocks.h new file mode 100755 index 0000000000..99084d7902 --- /dev/null +++ b/external/rocksdb/table/meta_blocks.h @@ -0,0 +1,130 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#include +#include +#include +#include + +#include "db/builder.h" +#include "db/table_properties_collector.h" +#include "util/kv_map.h" +#include "rocksdb/comparator.h" +#include "rocksdb/options.h" +#include "rocksdb/slice.h" +#include "table/block_builder.h" +#include "table/format.h" + +namespace rocksdb { + +class BlockBuilder; +class BlockHandle; +class Env; +class Footer; +class Logger; +class RandomAccessFile; +struct TableProperties; +class InternalIterator; + +class MetaIndexBuilder { + public: + MetaIndexBuilder(const MetaIndexBuilder&) = delete; + MetaIndexBuilder& operator=(const MetaIndexBuilder&) = delete; + + MetaIndexBuilder(); + void Add(const std::string& key, const BlockHandle& handle); + + // Write all the added key/value pairs to the block and return the contents + // of the block. + Slice Finish(); + + private: + // store the sorted key/handle of the metablocks. + stl_wrappers::KVMap meta_block_handles_; + std::unique_ptr meta_index_block_; +}; + +class PropertyBlockBuilder { + public: + PropertyBlockBuilder(const PropertyBlockBuilder&) = delete; + PropertyBlockBuilder& operator=(const PropertyBlockBuilder&) = delete; + + PropertyBlockBuilder(); + + void AddTableProperty(const TableProperties& props); + void Add(const std::string& key, uint64_t value); + void Add(const std::string& key, const std::string& value); + void Add(const UserCollectedProperties& user_collected_properties); + + // Write all the added entries to the block and return the block contents + Slice Finish(); + + private: + std::unique_ptr properties_block_; + stl_wrappers::KVMap props_; +}; + +// Were we encounter any error occurs during user-defined statistics collection, +// we'll write the warning message to info log. +void LogPropertiesCollectionError( + Logger* info_log, const std::string& method, const std::string& name); + +// Utility functions help table builder to trigger batch events for user +// defined property collectors. +// Return value indicates if there is any error occurred; if error occurred, +// the warning message will be logged. +// NotifyCollectTableCollectorsOnAdd() triggers the `Add` event for all +// property collectors. +bool NotifyCollectTableCollectorsOnAdd( + const Slice& key, const Slice& value, uint64_t file_size, + const std::vector>& collectors, + Logger* info_log); + +// NotifyCollectTableCollectorsOnAdd() triggers the `Finish` event for all +// property collectors. The collected properties will be added to `builder`. +bool NotifyCollectTableCollectorsOnFinish( + const std::vector>& collectors, + Logger* info_log, PropertyBlockBuilder* builder); + +// Read the properties from the table. +// @returns a status to indicate if the operation succeeded. On success, +// *table_properties will point to a heap-allocated TableProperties +// object, otherwise value of `table_properties` will not be modified. +Status ReadProperties(const Slice& handle_value, RandomAccessFileReader* file, + const Footer& footer, const ImmutableCFOptions &ioptions, + TableProperties** table_properties); + +// Directly read the properties from the properties block of a plain table. +// @returns a status to indicate if the operation succeeded. On success, +// *table_properties will point to a heap-allocated TableProperties +// object, otherwise value of `table_properties` will not be modified. +Status ReadTableProperties(RandomAccessFileReader* file, uint64_t file_size, + uint64_t table_magic_number, + const ImmutableCFOptions &ioptions, + TableProperties** properties); + +// Find the meta block from the meta index block. +Status FindMetaBlock(InternalIterator* meta_index_iter, + const std::string& meta_block_name, + BlockHandle* block_handle); + +// Find the meta block +Status FindMetaBlock(RandomAccessFileReader* file, uint64_t file_size, + uint64_t table_magic_number, + const ImmutableCFOptions &ioptions, + const std::string& meta_block_name, + BlockHandle* block_handle); + +// Read the specified meta block with name meta_block_name +// from `file` and initialize `contents` with contents of this block. +// Return Status::OK in case of success. +Status ReadMetaBlock(RandomAccessFileReader* file, uint64_t file_size, + uint64_t table_magic_number, + const ImmutableCFOptions &ioptions, + const std::string& meta_block_name, + BlockContents* contents); + +} // namespace rocksdb diff --git a/external/rocksdb/table/mock_table.cc b/external/rocksdb/table/mock_table.cc new file mode 100755 index 0000000000..2b311d0f8b --- /dev/null +++ b/external/rocksdb/table/mock_table.cc @@ -0,0 +1,143 @@ +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "table/mock_table.h" + +#include "db/dbformat.h" +#include "port/port.h" +#include "rocksdb/table_properties.h" +#include "table/get_context.h" +#include "util/coding.h" +#include "util/file_reader_writer.h" + +namespace rocksdb { +namespace mock { + +namespace { + +const InternalKeyComparator icmp_(BytewiseComparator()); + +} // namespace + +stl_wrappers::KVMap MakeMockFile( + std::initializer_list> l) { + return stl_wrappers::KVMap(l, stl_wrappers::LessOfComparator(&icmp_)); +} + +InternalIterator* MockTableReader::NewIterator(const ReadOptions&, Arena* arena, + bool skip_filters) { + return new MockTableIterator(table_); +} + +Status MockTableReader::Get(const ReadOptions&, const Slice& key, + GetContext* get_context, bool skip_filters) { + std::unique_ptr iter(new MockTableIterator(table_)); + for (iter->Seek(key); iter->Valid(); iter->Next()) { + ParsedInternalKey parsed_key; + if (!ParseInternalKey(iter->key(), &parsed_key)) { + return Status::Corruption(Slice()); + } + + if (!get_context->SaveValue(parsed_key, iter->value())) { + break; + } + } + return Status::OK(); +} + +std::shared_ptr MockTableReader::GetTableProperties() + const { + return std::shared_ptr(new TableProperties()); +} + +MockTableFactory::MockTableFactory() : next_id_(1) {} + +Status MockTableFactory::NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table_reader, + bool prefetch_index_and_filter_in_cache) const { + uint32_t id = GetIDFromFile(file.get()); + + MutexLock lock_guard(&file_system_.mutex); + + auto it = file_system_.files.find(id); + if (it == file_system_.files.end()) { + return Status::IOError("Mock file not found"); + } + + table_reader->reset(new MockTableReader(it->second)); + + return Status::OK(); +} + +TableBuilder* MockTableFactory::NewTableBuilder( + const TableBuilderOptions& table_builder_options, uint32_t column_family_id, + WritableFileWriter* file) const { + uint32_t id = GetAndWriteNextID(file); + + return new MockTableBuilder(id, &file_system_); +} + +Status MockTableFactory::CreateMockTable(Env* env, const std::string& fname, + stl_wrappers::KVMap file_contents) { + std::unique_ptr file; + auto s = env->NewWritableFile(fname, &file, EnvOptions()); + if (!s.ok()) { + return s; + } + + WritableFileWriter file_writer(std::move(file), EnvOptions()); + + uint32_t id = GetAndWriteNextID(&file_writer); + file_system_.files.insert({id, std::move(file_contents)}); + return Status::OK(); +} + +uint32_t MockTableFactory::GetAndWriteNextID(WritableFileWriter* file) const { + uint32_t next_id = next_id_.fetch_add(1); + char buf[4]; + EncodeFixed32(buf, next_id); + file->Append(Slice(buf, 4)); + return next_id; +} + +uint32_t MockTableFactory::GetIDFromFile(RandomAccessFileReader* file) const { + char buf[4]; + Slice result; + file->Read(0, 4, &result, buf); + assert(result.size() == 4); + return DecodeFixed32(buf); +} + +void MockTableFactory::AssertSingleFile( + const stl_wrappers::KVMap& file_contents) { + ASSERT_EQ(file_system_.files.size(), 1U); + ASSERT_TRUE(file_contents == file_system_.files.begin()->second); +} + +void MockTableFactory::AssertLatestFile( + const stl_wrappers::KVMap& file_contents) { + ASSERT_GE(file_system_.files.size(), 1U); + auto latest = file_system_.files.end(); + --latest; + + if (file_contents != latest->second) { + std::cout << "Wrong content! Content of latest file:" << std::endl; + for (const auto& kv : latest->second) { + ParsedInternalKey ikey; + std::string key, value; + std::tie(key, value) = kv; + ParseInternalKey(Slice(key), &ikey); + std::cout << ikey.DebugString(false) << " -> " << value << std::endl; + } + ASSERT_TRUE(false); + } +} + +} // namespace mock +} // namespace rocksdb diff --git a/external/rocksdb/table/mock_table.h b/external/rocksdb/table/mock_table.h new file mode 100755 index 0000000000..d9afba46f8 --- /dev/null +++ b/external/rocksdb/table/mock_table.h @@ -0,0 +1,189 @@ +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include "util/kv_map.h" +#include "port/port.h" +#include "rocksdb/comparator.h" +#include "rocksdb/table.h" +#include "table/internal_iterator.h" +#include "table/table_builder.h" +#include "table/table_reader.h" +#include "util/mutexlock.h" +#include "util/testharness.h" +#include "util/testutil.h" + +namespace rocksdb { +namespace mock { + +stl_wrappers::KVMap MakeMockFile( + std::initializer_list> l = {}); + +struct MockTableFileSystem { + port::Mutex mutex; + std::map files; +}; + +class MockTableReader : public TableReader { + public: + explicit MockTableReader(const stl_wrappers::KVMap& table) : table_(table) {} + + InternalIterator* NewIterator(const ReadOptions&, Arena* arena, + bool skip_filters = false) override; + + Status Get(const ReadOptions&, const Slice& key, GetContext* get_context, + bool skip_filters = false) override; + + uint64_t ApproximateOffsetOf(const Slice& key) override { return 0; } + + virtual size_t ApproximateMemoryUsage() const override { return 0; } + + void SetupForCompaction() override {} + + std::shared_ptr GetTableProperties() const override; + + ~MockTableReader() {} + + private: + const stl_wrappers::KVMap& table_; +}; + +class MockTableIterator : public InternalIterator { + public: + explicit MockTableIterator(const stl_wrappers::KVMap& table) : table_(table) { + itr_ = table_.end(); + } + + bool Valid() const override { return itr_ != table_.end(); } + + void SeekToFirst() override { itr_ = table_.begin(); } + + void SeekToLast() override { + itr_ = table_.end(); + --itr_; + } + + void Seek(const Slice& target) override { + std::string str_target(target.data(), target.size()); + itr_ = table_.lower_bound(str_target); + } + + void Next() override { ++itr_; } + + void Prev() override { + if (itr_ == table_.begin()) { + itr_ = table_.end(); + } else { + --itr_; + } + } + + Slice key() const override { return Slice(itr_->first); } + + Slice value() const override { return Slice(itr_->second); } + + Status status() const override { return Status::OK(); } + + private: + const stl_wrappers::KVMap& table_; + stl_wrappers::KVMap::const_iterator itr_; +}; + +class MockTableBuilder : public TableBuilder { + public: + MockTableBuilder(uint32_t id, MockTableFileSystem* file_system) + : id_(id), file_system_(file_system) { + table_ = MakeMockFile({}); + } + + // REQUIRES: Either Finish() or Abandon() has been called. + ~MockTableBuilder() {} + + // Add key,value to the table being constructed. + // REQUIRES: key is after any previously added key according to comparator. + // REQUIRES: Finish(), Abandon() have not been called + void Add(const Slice& key, const Slice& value) override { + table_.insert({key.ToString(), value.ToString()}); + } + + // Return non-ok iff some error has been detected. + Status status() const override { return Status::OK(); } + + Status Finish() override { + MutexLock lock_guard(&file_system_->mutex); + file_system_->files.insert({id_, table_}); + return Status::OK(); + } + + void Abandon() override {} + + uint64_t NumEntries() const override { return table_.size(); } + + uint64_t FileSize() const override { return table_.size(); } + + TableProperties GetTableProperties() const override { + return TableProperties(); + } + + private: + uint32_t id_; + MockTableFileSystem* file_system_; + stl_wrappers::KVMap table_; +}; + +class MockTableFactory : public TableFactory { + public: + MockTableFactory(); + const char* Name() const override { return "MockTable"; } + Status NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table_reader, + bool prefetch_index_and_filter_in_cache = true) const override; + TableBuilder* NewTableBuilder( + const TableBuilderOptions& table_builder_options, + uint32_t column_familly_id, WritableFileWriter* file) const override; + + // This function will directly create mock table instead of going through + // MockTableBuilder. file_contents has to have a format of . Those key-value pairs will then be inserted into the mock table. + Status CreateMockTable(Env* env, const std::string& fname, + stl_wrappers::KVMap file_contents); + + virtual Status SanitizeOptions( + const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const override { + return Status::OK(); + } + + virtual std::string GetPrintableTableOptions() const override { + return std::string(); + } + + // This function will assert that only a single file exists and that the + // contents are equal to file_contents + void AssertSingleFile(const stl_wrappers::KVMap& file_contents); + void AssertLatestFile(const stl_wrappers::KVMap& file_contents); + + private: + uint32_t GetAndWriteNextID(WritableFileWriter* file) const; + uint32_t GetIDFromFile(RandomAccessFileReader* file) const; + + mutable MockTableFileSystem file_system_; + mutable std::atomic next_id_; +}; + +} // namespace mock +} // namespace rocksdb diff --git a/external/rocksdb/table/persistent_cache_helper.cc b/external/rocksdb/table/persistent_cache_helper.cc new file mode 100755 index 0000000000..d68c80735c --- /dev/null +++ b/external/rocksdb/table/persistent_cache_helper.cc @@ -0,0 +1,112 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#include "table/persistent_cache_helper.h" +#include "table/format.h" + +namespace rocksdb { + +void PersistentCacheHelper::InsertRawPage( + const PersistentCacheOptions& cache_options, const BlockHandle& handle, + const char* data, const size_t size) { + assert(cache_options.persistent_cache); + assert(cache_options.persistent_cache->IsCompressed()); + + // construct the page key + char cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + auto key = BlockBasedTable::GetCacheKey(cache_options.key_prefix.c_str(), + cache_options.key_prefix.size(), + handle, cache_key); + // insert content to cache + cache_options.persistent_cache->Insert(key, data, size); +} + +void PersistentCacheHelper::InsertUncompressedPage( + const PersistentCacheOptions& cache_options, const BlockHandle& handle, + const BlockContents& contents) { + assert(cache_options.persistent_cache); + assert(!cache_options.persistent_cache->IsCompressed()); + if (!contents.cachable || contents.compression_type != kNoCompression) { + // We shouldn't cache this. Either + // (1) content is not cacheable + // (2) content is compressed + return; + } + + // construct the page key + char cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + auto key = BlockBasedTable::GetCacheKey(cache_options.key_prefix.c_str(), + cache_options.key_prefix.size(), + handle, cache_key); + // insert block contents to page cache + cache_options.persistent_cache->Insert(key, contents.data.data(), + contents.data.size()); +} + +Status PersistentCacheHelper::LookupRawPage( + const PersistentCacheOptions& cache_options, const BlockHandle& handle, + std::unique_ptr* raw_data, const size_t raw_data_size) { + assert(cache_options.persistent_cache); + assert(cache_options.persistent_cache->IsCompressed()); + + // construct the page key + char cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + auto key = BlockBasedTable::GetCacheKey(cache_options.key_prefix.c_str(), + cache_options.key_prefix.size(), + handle, cache_key); + // Lookup page + size_t size; + Status s = cache_options.persistent_cache->Lookup(key, raw_data, &size); + if (!s.ok()) { + // cache miss + RecordTick(cache_options.statistics, PERSISTENT_CACHE_MISS); + return s; + } + + // cache hit + assert(raw_data_size == handle.size() + kBlockTrailerSize); + assert(size == raw_data_size); + RecordTick(cache_options.statistics, PERSISTENT_CACHE_HIT); + return Status::OK(); +} + +Status PersistentCacheHelper::LookupUncompressedPage( + const PersistentCacheOptions& cache_options, const BlockHandle& handle, + BlockContents* contents) { + assert(cache_options.persistent_cache); + assert(!cache_options.persistent_cache->IsCompressed()); + if (!contents) { + // We shouldn't lookup in the cache. Either + // (1) Nowhere to store + return Status::NotFound(); + } + + // construct the page key + char cache_key[BlockBasedTable::kMaxCacheKeyPrefixSize + kMaxVarint64Length]; + auto key = BlockBasedTable::GetCacheKey(cache_options.key_prefix.c_str(), + cache_options.key_prefix.size(), + handle, cache_key); + // Lookup page + std::unique_ptr data; + size_t size; + Status s = cache_options.persistent_cache->Lookup(key, &data, &size); + if (!s.ok()) { + // cache miss + RecordTick(cache_options.statistics, PERSISTENT_CACHE_MISS); + return s; + } + + // please note we are potentially comparing compressed data size with + // uncompressed data size + assert(handle.size() <= size); + + // update stats + RecordTick(cache_options.statistics, PERSISTENT_CACHE_HIT); + // construct result and return + *contents = + BlockContents(std::move(data), size, false /*cacheable*/, kNoCompression); + return Status::OK(); +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/persistent_cache_helper.h b/external/rocksdb/table/persistent_cache_helper.h new file mode 100755 index 0000000000..45a1f87d2a --- /dev/null +++ b/external/rocksdb/table/persistent_cache_helper.h @@ -0,0 +1,63 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +#pragma once + +#include + +#include "table/block_based_table_reader.h" +#include "util/statistics.h" + +namespace rocksdb { + +struct BlockContents; + +// PersistentCacheOptions +// +// This describe the caching behavior for page cache +// This is used to pass the context for caching and the cache handle +struct PersistentCacheOptions { + PersistentCacheOptions() {} + explicit PersistentCacheOptions( + const std::shared_ptr& _persistent_cache, + const std::string _key_prefix, Statistics* const _statistics) + : persistent_cache(_persistent_cache), + key_prefix(_key_prefix), + statistics(_statistics) {} + + virtual ~PersistentCacheOptions() {} + + std::shared_ptr persistent_cache; + std::string key_prefix; + Statistics* statistics = nullptr; +}; + +// PersistentCacheHelper +// +// Encapsulates some of the helper logic for read and writing from the cache +class PersistentCacheHelper { + public: + // insert block into raw page cache + static void InsertRawPage(const PersistentCacheOptions& cache_options, + const BlockHandle& handle, const char* data, + const size_t size); + + // insert block into uncompressed cache + static void InsertUncompressedPage( + const PersistentCacheOptions& cache_options, const BlockHandle& handle, + const BlockContents& contents); + + // lookup block from raw page cacge + static Status LookupRawPage(const PersistentCacheOptions& cache_options, + const BlockHandle& handle, + std::unique_ptr* raw_data, + const size_t raw_data_size); + + // lookup block from uncompressed cache + static Status LookupUncompressedPage( + const PersistentCacheOptions& cache_options, const BlockHandle& handle, + BlockContents* contents); +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/plain_table_builder.cc b/external/rocksdb/table/plain_table_builder.cc new file mode 100755 index 0000000000..b438ed86e2 --- /dev/null +++ b/external/rocksdb/table/plain_table_builder.cc @@ -0,0 +1,290 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE +#include "table/plain_table_builder.h" + +#include + +#include +#include +#include + +#include "rocksdb/comparator.h" +#include "rocksdb/env.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/options.h" +#include "rocksdb/table.h" +#include "table/plain_table_factory.h" +#include "db/dbformat.h" +#include "table/block_builder.h" +#include "table/bloom_block.h" +#include "table/plain_table_index.h" +#include "table/format.h" +#include "table/meta_blocks.h" +#include "util/coding.h" +#include "util/crc32c.h" +#include "util/file_reader_writer.h" +#include "util/stop_watch.h" + +namespace rocksdb { + +namespace { + +// a utility that helps writing block content to the file +// @offset will advance if @block_contents was successfully written. +// @block_handle the block handle this particular block. +Status WriteBlock(const Slice& block_contents, WritableFileWriter* file, + uint64_t* offset, BlockHandle* block_handle) { + block_handle->set_offset(*offset); + block_handle->set_size(block_contents.size()); + Status s = file->Append(block_contents); + + if (s.ok()) { + *offset += block_contents.size(); + } + return s; +} + +} // namespace + +// kPlainTableMagicNumber was picked by running +// echo rocksdb.table.plain | sha1sum +// and taking the leading 64 bits. +extern const uint64_t kPlainTableMagicNumber = 0x8242229663bf9564ull; +extern const uint64_t kLegacyPlainTableMagicNumber = 0x4f3418eb7a8f13b8ull; + +PlainTableBuilder::PlainTableBuilder( + const ImmutableCFOptions& ioptions, + const std::vector>* + int_tbl_prop_collector_factories, + uint32_t column_family_id, WritableFileWriter* file, uint32_t user_key_len, + EncodingType encoding_type, size_t index_sparseness, + uint32_t bloom_bits_per_key, const std::string& column_family_name, + uint32_t num_probes, size_t huge_page_tlb_size, double hash_table_ratio, + bool store_index_in_file) + : ioptions_(ioptions), + bloom_block_(num_probes), + file_(file), + bloom_bits_per_key_(bloom_bits_per_key), + huge_page_tlb_size_(huge_page_tlb_size), + encoder_(encoding_type, user_key_len, ioptions.prefix_extractor, + index_sparseness), + store_index_in_file_(store_index_in_file), + prefix_extractor_(ioptions.prefix_extractor) { + // Build index block and save it in the file if hash_table_ratio > 0 + if (store_index_in_file_) { + assert(hash_table_ratio > 0 || IsTotalOrderMode()); + index_builder_.reset( + new PlainTableIndexBuilder(&arena_, ioptions, index_sparseness, + hash_table_ratio, huge_page_tlb_size_)); + assert(bloom_bits_per_key_ > 0); + properties_.user_collected_properties + [PlainTablePropertyNames::kBloomVersion] = "1"; // For future use + } + + properties_.fixed_key_len = user_key_len; + + // for plain table, we put all the data in a big chuck. + properties_.num_data_blocks = 1; + // Fill it later if store_index_in_file_ == true + properties_.index_size = 0; + properties_.filter_size = 0; + // To support roll-back to previous version, now still use version 0 for + // plain encoding. + properties_.format_version = (encoding_type == kPlain) ? 0 : 1; + properties_.column_family_id = column_family_id; + properties_.column_family_name = column_family_name; + + if (ioptions_.prefix_extractor) { + properties_.user_collected_properties + [PlainTablePropertyNames::kPrefixExtractorName] = + ioptions_.prefix_extractor->Name(); + } + + std::string val; + PutFixed32(&val, static_cast(encoder_.GetEncodingType())); + properties_.user_collected_properties + [PlainTablePropertyNames::kEncodingType] = val; + + for (auto& collector_factories : *int_tbl_prop_collector_factories) { + table_properties_collectors_.emplace_back( + collector_factories->CreateIntTblPropCollector(column_family_id)); + } +} + +PlainTableBuilder::~PlainTableBuilder() { +} + +void PlainTableBuilder::Add(const Slice& key, const Slice& value) { + // temp buffer for metadata bytes between key and value. + char meta_bytes_buf[6]; + size_t meta_bytes_buf_size = 0; + + ParsedInternalKey internal_key; + ParseInternalKey(key, &internal_key); + + // Store key hash + if (store_index_in_file_) { + if (ioptions_.prefix_extractor == nullptr) { + keys_or_prefixes_hashes_.push_back(GetSliceHash(internal_key.user_key)); + } else { + Slice prefix = + ioptions_.prefix_extractor->Transform(internal_key.user_key); + keys_or_prefixes_hashes_.push_back(GetSliceHash(prefix)); + } + } + + // Write value + assert(offset_ <= std::numeric_limits::max()); + auto prev_offset = static_cast(offset_); + // Write out the key + encoder_.AppendKey(key, file_, &offset_, meta_bytes_buf, + &meta_bytes_buf_size); + if (SaveIndexInFile()) { + index_builder_->AddKeyPrefix(GetPrefix(internal_key), prev_offset); + } + + // Write value length + uint32_t value_size = static_cast(value.size()); + char* end_ptr = + EncodeVarint32(meta_bytes_buf + meta_bytes_buf_size, value_size); + assert(end_ptr <= meta_bytes_buf + sizeof(meta_bytes_buf)); + meta_bytes_buf_size = end_ptr - meta_bytes_buf; + file_->Append(Slice(meta_bytes_buf, meta_bytes_buf_size)); + + // Write value + file_->Append(value); + offset_ += value_size + meta_bytes_buf_size; + + properties_.num_entries++; + properties_.raw_key_size += key.size(); + properties_.raw_value_size += value.size(); + + // notify property collectors + NotifyCollectTableCollectorsOnAdd( + key, value, offset_, table_properties_collectors_, ioptions_.info_log); +} + +Status PlainTableBuilder::status() const { return status_; } + +Status PlainTableBuilder::Finish() { + assert(!closed_); + closed_ = true; + + properties_.data_size = offset_; + + // Write the following blocks + // 1. [meta block: bloom] - optional + // 2. [meta block: index] - optional + // 3. [meta block: properties] + // 4. [metaindex block] + // 5. [footer] + + MetaIndexBuilder meta_index_builer; + + if (store_index_in_file_ && (properties_.num_entries > 0)) { + assert(properties_.num_entries <= std::numeric_limits::max()); + bloom_block_.SetTotalBits( + &arena_, + static_cast(properties_.num_entries) * bloom_bits_per_key_, + ioptions_.bloom_locality, huge_page_tlb_size_, ioptions_.info_log); + + PutVarint32(&properties_.user_collected_properties + [PlainTablePropertyNames::kNumBloomBlocks], + bloom_block_.GetNumBlocks()); + + bloom_block_.AddKeysHashes(keys_or_prefixes_hashes_); + BlockHandle bloom_block_handle; + auto finish_result = bloom_block_.Finish(); + + properties_.filter_size = finish_result.size(); + auto s = WriteBlock(finish_result, file_, &offset_, &bloom_block_handle); + + if (!s.ok()) { + return s; + } + + BlockHandle index_block_handle; + finish_result = index_builder_->Finish(); + + properties_.index_size = finish_result.size(); + s = WriteBlock(finish_result, file_, &offset_, &index_block_handle); + + if (!s.ok()) { + return s; + } + + meta_index_builer.Add(BloomBlockBuilder::kBloomBlock, bloom_block_handle); + meta_index_builer.Add(PlainTableIndexBuilder::kPlainTableIndexBlock, + index_block_handle); + } + + // Calculate bloom block size and index block size + PropertyBlockBuilder property_block_builder; + // -- Add basic properties + property_block_builder.AddTableProperty(properties_); + + property_block_builder.Add(properties_.user_collected_properties); + + // -- Add user collected properties + NotifyCollectTableCollectorsOnFinish(table_properties_collectors_, + ioptions_.info_log, + &property_block_builder); + + // -- Write property block + BlockHandle property_block_handle; + auto s = WriteBlock( + property_block_builder.Finish(), + file_, + &offset_, + &property_block_handle + ); + if (!s.ok()) { + return s; + } + meta_index_builer.Add(kPropertiesBlock, property_block_handle); + + // -- write metaindex block + BlockHandle metaindex_block_handle; + s = WriteBlock( + meta_index_builer.Finish(), + file_, + &offset_, + &metaindex_block_handle + ); + if (!s.ok()) { + return s; + } + + // Write Footer + // no need to write out new footer if we're using default checksum + Footer footer(kLegacyPlainTableMagicNumber, 0); + footer.set_metaindex_handle(metaindex_block_handle); + footer.set_index_handle(BlockHandle::NullBlockHandle()); + std::string footer_encoding; + footer.EncodeTo(&footer_encoding); + s = file_->Append(footer_encoding); + if (s.ok()) { + offset_ += footer_encoding.size(); + } + + return s; +} + +void PlainTableBuilder::Abandon() { + closed_ = true; +} + +uint64_t PlainTableBuilder::NumEntries() const { + return properties_.num_entries; +} + +uint64_t PlainTableBuilder::FileSize() const { + return offset_; +} + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/plain_table_builder.h b/external/rocksdb/table/plain_table_builder.h new file mode 100755 index 0000000000..ba63a82f6f --- /dev/null +++ b/external/rocksdb/table/plain_table_builder.h @@ -0,0 +1,137 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE +#include +#include +#include +#include "rocksdb/options.h" +#include "rocksdb/status.h" +#include "rocksdb/table.h" +#include "rocksdb/table_properties.h" +#include "table/bloom_block.h" +#include "table/plain_table_index.h" +#include "table/plain_table_key_coding.h" +#include "table/table_builder.h" + +namespace rocksdb { + +class BlockBuilder; +class BlockHandle; +class WritableFile; +class TableBuilder; + +class PlainTableBuilder: public TableBuilder { + public: + // Create a builder that will store the contents of the table it is + // building in *file. Does not close the file. It is up to the + // caller to close the file after calling Finish(). The output file + // will be part of level specified by 'level'. A value of -1 means + // that the caller does not know which level the output file will reside. + PlainTableBuilder( + const ImmutableCFOptions& ioptions, + const std::vector>* + int_tbl_prop_collector_factories, + uint32_t column_family_id, WritableFileWriter* file, + uint32_t user_key_size, EncodingType encoding_type, + size_t index_sparseness, uint32_t bloom_bits_per_key, + const std::string& column_family_name, uint32_t num_probes = 6, + size_t huge_page_tlb_size = 0, double hash_table_ratio = 0, + bool store_index_in_file = false); + + // REQUIRES: Either Finish() or Abandon() has been called. + ~PlainTableBuilder(); + + // Add key,value to the table being constructed. + // REQUIRES: key is after any previously added key according to comparator. + // REQUIRES: Finish(), Abandon() have not been called + void Add(const Slice& key, const Slice& value) override; + + // Return non-ok iff some error has been detected. + Status status() const override; + + // Finish building the table. Stops using the file passed to the + // constructor after this function returns. + // REQUIRES: Finish(), Abandon() have not been called + Status Finish() override; + + // Indicate that the contents of this builder should be abandoned. Stops + // using the file passed to the constructor after this function returns. + // If the caller is not going to call Finish(), it must call Abandon() + // before destroying this builder. + // REQUIRES: Finish(), Abandon() have not been called + void Abandon() override; + + // Number of calls to Add() so far. + uint64_t NumEntries() const override; + + // Size of the file generated so far. If invoked after a successful + // Finish() call, returns the size of the final generated file. + uint64_t FileSize() const override; + + TableProperties GetTableProperties() const override { return properties_; } + + bool SaveIndexInFile() const { return store_index_in_file_; } + + private: + Arena arena_; + const ImmutableCFOptions& ioptions_; + std::vector> + table_properties_collectors_; + + BloomBlockBuilder bloom_block_; + std::unique_ptr index_builder_; + + WritableFileWriter* file_; + uint64_t offset_ = 0; + uint32_t bloom_bits_per_key_; + size_t huge_page_tlb_size_; + Status status_; + TableProperties properties_; + PlainTableKeyEncoder encoder_; + + bool store_index_in_file_; + + std::vector keys_or_prefixes_hashes_; + bool closed_ = false; // Either Finish() or Abandon() has been called. + + const SliceTransform* prefix_extractor_; + + Slice GetPrefix(const Slice& target) const { + assert(target.size() >= 8); // target is internal key + return GetPrefixFromUserKey(GetUserKey(target)); + } + + Slice GetPrefix(const ParsedInternalKey& target) const { + return GetPrefixFromUserKey(target.user_key); + } + + Slice GetUserKey(const Slice& key) const { + return Slice(key.data(), key.size() - 8); + } + + Slice GetPrefixFromUserKey(const Slice& user_key) const { + if (!IsTotalOrderMode()) { + return prefix_extractor_->Transform(user_key); + } else { + // Use empty slice as prefix if prefix_extractor is not set. + // In that case, + // it falls back to pure binary search and + // total iterator seek is supported. + return Slice(); + } + } + + bool IsTotalOrderMode() const { return (prefix_extractor_ == nullptr); } + + // No copying allowed + PlainTableBuilder(const PlainTableBuilder&) = delete; + void operator=(const PlainTableBuilder&) = delete; +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/plain_table_factory.cc b/external/rocksdb/table/plain_table_factory.cc new file mode 100755 index 0000000000..55a68fce17 --- /dev/null +++ b/external/rocksdb/table/plain_table_factory.cc @@ -0,0 +1,101 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef ROCKSDB_LITE +#include "table/plain_table_factory.h" + +#include +#include +#include "db/dbformat.h" +#include "table/plain_table_builder.h" +#include "table/plain_table_reader.h" +#include "port/port.h" + +namespace rocksdb { + +Status PlainTableFactory::NewTableReader( + const TableReaderOptions& table_reader_options, + unique_ptr&& file, uint64_t file_size, + unique_ptr* table, + bool prefetch_index_and_filter_in_cache) const { + return PlainTableReader::Open( + table_reader_options.ioptions, table_reader_options.env_options, + table_reader_options.internal_comparator, std::move(file), file_size, + table, table_options_.bloom_bits_per_key, table_options_.hash_table_ratio, + table_options_.index_sparseness, table_options_.huge_page_tlb_size, + table_options_.full_scan_mode); +} + +TableBuilder* PlainTableFactory::NewTableBuilder( + const TableBuilderOptions& table_builder_options, uint32_t column_family_id, + WritableFileWriter* file) const { + // Ignore the skip_filters flag. PlainTable format is optimized for small + // in-memory dbs. The skip_filters optimization is not useful for plain + // tables + // + return new PlainTableBuilder( + table_builder_options.ioptions, + table_builder_options.int_tbl_prop_collector_factories, column_family_id, + file, table_options_.user_key_len, table_options_.encoding_type, + table_options_.index_sparseness, table_options_.bloom_bits_per_key, + table_builder_options.column_family_name, 6, + table_options_.huge_page_tlb_size, table_options_.hash_table_ratio, + table_options_.store_index_in_file); +} + +std::string PlainTableFactory::GetPrintableTableOptions() const { + std::string ret; + ret.reserve(20000); + const int kBufferSize = 200; + char buffer[kBufferSize]; + + snprintf(buffer, kBufferSize, " user_key_len: %u\n", + table_options_.user_key_len); + ret.append(buffer); + snprintf(buffer, kBufferSize, " bloom_bits_per_key: %d\n", + table_options_.bloom_bits_per_key); + ret.append(buffer); + snprintf(buffer, kBufferSize, " hash_table_ratio: %lf\n", + table_options_.hash_table_ratio); + ret.append(buffer); + snprintf(buffer, kBufferSize, " index_sparseness: %" ROCKSDB_PRIszt "\n", + table_options_.index_sparseness); + ret.append(buffer); + snprintf(buffer, kBufferSize, " huge_page_tlb_size: %" ROCKSDB_PRIszt "\n", + table_options_.huge_page_tlb_size); + ret.append(buffer); + snprintf(buffer, kBufferSize, " encoding_type: %d\n", + table_options_.encoding_type); + ret.append(buffer); + snprintf(buffer, kBufferSize, " full_scan_mode: %d\n", + table_options_.full_scan_mode); + ret.append(buffer); + snprintf(buffer, kBufferSize, " store_index_in_file: %d\n", + table_options_.store_index_in_file); + ret.append(buffer); + return ret; +} + +const PlainTableOptions& PlainTableFactory::table_options() const { + return table_options_; +} + +extern TableFactory* NewPlainTableFactory(const PlainTableOptions& options) { + return new PlainTableFactory(options); +} + +const std::string PlainTablePropertyNames::kPrefixExtractorName = + "rocksdb.prefix.extractor.name"; + +const std::string PlainTablePropertyNames::kEncodingType = + "rocksdb.plain.table.encoding.type"; + +const std::string PlainTablePropertyNames::kBloomVersion = + "rocksdb.plain.table.bloom.version"; + +const std::string PlainTablePropertyNames::kNumBloomBlocks = + "rocksdb.plain.table.bloom.numblocks"; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/plain_table_factory.h b/external/rocksdb/table/plain_table_factory.h new file mode 100755 index 0000000000..33cd313471 --- /dev/null +++ b/external/rocksdb/table/plain_table_factory.h @@ -0,0 +1,178 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#ifndef ROCKSDB_LITE +#include +#include +#include + +#include "rocksdb/options.h" +#include "rocksdb/table.h" + +namespace rocksdb { + +struct EnvOptions; + +using std::unique_ptr; +class Status; +class RandomAccessFile; +class WritableFile; +class Table; +class TableBuilder; + +// IndexedTable requires fixed length key, configured as a constructor +// parameter of the factory class. Output file format: +// +-------------+-----------------+ +// | version | user_key_length | +// +------------++------------+-----------------+ <= key1 offset +// | encoded key1 | value_size | | +// +------------+-------------+-------------+ | +// | value1 | +// | | +// +--------------------------+-------------+---+ <= key2 offset +// | encoded key2 | value_size | | +// +------------+-------------+-------------+ | +// | value2 | +// | | +// | ...... | +// +-----------------+--------------------------+ +// +// When the key encoding type is kPlain. Key part is encoded as: +// +------------+--------------------+ +// | [key_size] | internal key | +// +------------+--------------------+ +// for the case of user_key_len = kPlainTableVariableLength case, +// and simply: +// +----------------------+ +// | internal key | +// +----------------------+ +// for user_key_len != kPlainTableVariableLength case. +// +// If key encoding type is kPrefix. Keys are encoding in this format. +// There are three ways to encode a key: +// (1) Full Key +// +---------------+---------------+-------------------+ +// | Full Key Flag | Full Key Size | Full Internal Key | +// +---------------+---------------+-------------------+ +// which simply encodes a full key +// +// (2) A key shared the same prefix as the previous key, which is encoded as +// format of (1). +// +-------------+-------------+-------------+-------------+------------+ +// | Prefix Flag | Prefix Size | Suffix Flag | Suffix Size | Key Suffix | +// +-------------+-------------+-------------+-------------+------------+ +// where key is the suffix part of the key, including the internal bytes. +// the actual key will be constructed by concatenating prefix part of the +// previous key, with the suffix part of the key here, with sizes given here. +// +// (3) A key shared the same prefix as the previous key, which is encoded as +// the format of (2). +// +-----------------+-----------------+------------------------+ +// | Key Suffix Flag | Key Suffix Size | Suffix of Internal Key | +// +-----------------+-----------------+------------------------+ +// The key will be constructed by concatenating previous key's prefix (which is +// also a prefix which the last key encoded in the format of (1)) and the +// key given here. +// +// For example, we for following keys (prefix and suffix are separated by +// spaces): +// 0000 0001 +// 0000 00021 +// 0000 0002 +// 00011 00 +// 0002 0001 +// Will be encoded like this: +// FK 8 00000001 +// PF 4 SF 5 00021 +// SF 4 0002 +// FK 7 0001100 +// FK 8 00020001 +// (where FK means full key flag, PF means prefix flag and SF means suffix flag) +// +// All those "key flag + key size" shown above are in this format: +// The 8 bits of the first byte: +// +----+----+----+----+----+----+----+----+ +// | Type | Size | +// +----+----+----+----+----+----+----+----+ +// Type indicates: full key, prefix, or suffix. +// The last 6 bits are for size. If the size bits are not all 1, it means the +// size of the key. Otherwise, varint32 is read after this byte. This varint +// value + 0x3F (the value of all 1) will be the key size. +// +// For example, full key with length 16 will be encoded as (binary): +// 00 010000 +// (00 means full key) +// and a prefix with 100 bytes will be encoded as: +// 01 111111 00100101 +// (63) (37) +// (01 means key suffix) +// +// All the internal keys above (including kPlain and kPrefix) are encoded in +// this format: +// There are two types: +// (1) normal internal key format +// +----------- ...... -------------+----+---+---+---+---+---+---+---+ +// | user key |type| sequence ID | +// +----------- ..... --------------+----+---+---+---+---+---+---+---+ +// (2) Special case for keys whose sequence ID is 0 and is value type +// +----------- ...... -------------+----+ +// | user key |0x80| +// +----------- ..... --------------+----+ +// To save 7 bytes for the special case where sequence ID = 0. +// +// +class PlainTableFactory : public TableFactory { + public: + ~PlainTableFactory() {} + // user_key_len is the length of the user key. If it is set to be + // kPlainTableVariableLength, then it means variable length. Otherwise, all + // the keys need to have the fix length of this value. bloom_bits_per_key is + // number of bits used for bloom filer per key. hash_table_ratio is + // the desired utilization of the hash table used for prefix hashing. + // hash_table_ratio = number of prefixes / #buckets in the hash table + // hash_table_ratio = 0 means skip hash table but only replying on binary + // search. + // index_sparseness determines index interval for keys + // inside the same prefix. It will be the maximum number of linear search + // required after hash and binary search. + // index_sparseness = 0 means index for every key. + // huge_page_tlb_size determines whether to allocate hash indexes from huge + // page TLB and the page size if allocating from there. See comments of + // Arena::AllocateAligned() for details. + explicit PlainTableFactory( + const PlainTableOptions& _table_options = PlainTableOptions()) + : table_options_(_table_options) {} + + const char* Name() const override { return "PlainTable"; } + Status NewTableReader(const TableReaderOptions& table_reader_options, + unique_ptr&& file, + uint64_t file_size, unique_ptr* table, + bool prefetch_index_and_filter_in_cache) const override; + + TableBuilder* NewTableBuilder( + const TableBuilderOptions& table_builder_options, + uint32_t column_family_id, WritableFileWriter* file) const override; + + std::string GetPrintableTableOptions() const override; + + const PlainTableOptions& table_options() const; + + static const char kValueTypeSeqId0 = char(0xFF); + + // Sanitizes the specified DB Options. + Status SanitizeOptions(const DBOptions& db_opts, + const ColumnFamilyOptions& cf_opts) const override { + return Status::OK(); + } + + void* GetOptions() override { return &table_options_; } + + private: + PlainTableOptions table_options_; +}; + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/plain_table_index.cc b/external/rocksdb/table/plain_table_index.cc new file mode 100755 index 0000000000..c8081c006b --- /dev/null +++ b/external/rocksdb/table/plain_table_index.cc @@ -0,0 +1,216 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#include + +#include "table/plain_table_index.h" +#include "util/coding.h" +#include "util/hash.h" + +namespace rocksdb { + +namespace { +inline uint32_t GetBucketIdFromHash(uint32_t hash, uint32_t num_buckets) { + assert(num_buckets > 0); + return hash % num_buckets; +} +} + +Status PlainTableIndex::InitFromRawData(Slice data) { + if (!GetVarint32(&data, &index_size_)) { + return Status::Corruption("Couldn't read the index size!"); + } + assert(index_size_ > 0); + if (!GetVarint32(&data, &num_prefixes_)) { + return Status::Corruption("Couldn't read the index size!"); + } + sub_index_size_ = + static_cast(data.size()) - index_size_ * kOffsetLen; + + char* index_data_begin = const_cast(data.data()); + index_ = reinterpret_cast(index_data_begin); + sub_index_ = reinterpret_cast(index_ + index_size_); + return Status::OK(); +} + +PlainTableIndex::IndexSearchResult PlainTableIndex::GetOffset( + uint32_t prefix_hash, uint32_t* bucket_value) const { + int bucket = GetBucketIdFromHash(prefix_hash, index_size_); + *bucket_value = index_[bucket]; + if ((*bucket_value & kSubIndexMask) == kSubIndexMask) { + *bucket_value ^= kSubIndexMask; + return kSubindex; + } + if (*bucket_value >= kMaxFileSize) { + return kNoPrefixForBucket; + } else { + // point directly to the file + return kDirectToFile; + } +} + +void PlainTableIndexBuilder::IndexRecordList::AddRecord(uint32_t hash, + uint32_t offset) { + if (num_records_in_current_group_ == kNumRecordsPerGroup) { + current_group_ = AllocateNewGroup(); + num_records_in_current_group_ = 0; + } + auto& new_record = current_group_[num_records_in_current_group_++]; + new_record.hash = hash; + new_record.offset = offset; + new_record.next = nullptr; +} + +void PlainTableIndexBuilder::AddKeyPrefix(Slice key_prefix_slice, + uint32_t key_offset) { + if (is_first_record_ || prev_key_prefix_ != key_prefix_slice.ToString()) { + ++num_prefixes_; + if (!is_first_record_) { + keys_per_prefix_hist_.Add(num_keys_per_prefix_); + } + num_keys_per_prefix_ = 0; + prev_key_prefix_ = key_prefix_slice.ToString(); + prev_key_prefix_hash_ = GetSliceHash(key_prefix_slice); + due_index_ = true; + } + + if (due_index_) { + // Add an index key for every kIndexIntervalForSamePrefixKeys keys + record_list_.AddRecord(prev_key_prefix_hash_, key_offset); + due_index_ = false; + } + + num_keys_per_prefix_++; + if (index_sparseness_ == 0 || num_keys_per_prefix_ % index_sparseness_ == 0) { + due_index_ = true; + } + is_first_record_ = false; +} + +Slice PlainTableIndexBuilder::Finish() { + AllocateIndex(); + std::vector hash_to_offsets(index_size_, nullptr); + std::vector entries_per_bucket(index_size_, 0); + BucketizeIndexes(&hash_to_offsets, &entries_per_bucket); + + keys_per_prefix_hist_.Add(num_keys_per_prefix_); + Log(InfoLogLevel::INFO_LEVEL, ioptions_.info_log, + "Number of Keys per prefix Histogram: %s", + keys_per_prefix_hist_.ToString().c_str()); + + // From the temp data structure, populate indexes. + return FillIndexes(hash_to_offsets, entries_per_bucket); +} + +void PlainTableIndexBuilder::AllocateIndex() { + if (prefix_extractor_ == nullptr || hash_table_ratio_ <= 0) { + // Fall back to pure binary search if the user fails to specify a prefix + // extractor. + index_size_ = 1; + } else { + double hash_table_size_multipier = 1.0 / hash_table_ratio_; + index_size_ = + static_cast(num_prefixes_ * hash_table_size_multipier) + 1; + assert(index_size_ > 0); + } +} + +void PlainTableIndexBuilder::BucketizeIndexes( + std::vector* hash_to_offsets, + std::vector* entries_per_bucket) { + bool first = true; + uint32_t prev_hash = 0; + size_t num_records = record_list_.GetNumRecords(); + for (size_t i = 0; i < num_records; i++) { + IndexRecord* index_record = record_list_.At(i); + uint32_t cur_hash = index_record->hash; + if (first || prev_hash != cur_hash) { + prev_hash = cur_hash; + first = false; + } + uint32_t bucket = GetBucketIdFromHash(cur_hash, index_size_); + IndexRecord* prev_bucket_head = (*hash_to_offsets)[bucket]; + index_record->next = prev_bucket_head; + (*hash_to_offsets)[bucket] = index_record; + (*entries_per_bucket)[bucket]++; + } + + sub_index_size_ = 0; + for (auto entry_count : *entries_per_bucket) { + if (entry_count <= 1) { + continue; + } + // Only buckets with more than 1 entry will have subindex. + sub_index_size_ += VarintLength(entry_count); + // total bytes needed to store these entries' in-file offsets. + sub_index_size_ += entry_count * PlainTableIndex::kOffsetLen; + } +} + +Slice PlainTableIndexBuilder::FillIndexes( + const std::vector& hash_to_offsets, + const std::vector& entries_per_bucket) { + Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log, + "Reserving %" PRIu32 " bytes for plain table's sub_index", + sub_index_size_); + auto total_allocate_size = GetTotalSize(); + char* allocated = arena_->AllocateAligned( + total_allocate_size, huge_page_tlb_size_, ioptions_.info_log); + + auto temp_ptr = EncodeVarint32(allocated, index_size_); + uint32_t* index = + reinterpret_cast(EncodeVarint32(temp_ptr, num_prefixes_)); + char* sub_index = reinterpret_cast(index + index_size_); + + uint32_t sub_index_offset = 0; + for (uint32_t i = 0; i < index_size_; i++) { + uint32_t num_keys_for_bucket = entries_per_bucket[i]; + switch (num_keys_for_bucket) { + case 0: + // No key for bucket + index[i] = PlainTableIndex::kMaxFileSize; + break; + case 1: + // point directly to the file offset + index[i] = hash_to_offsets[i]->offset; + break; + default: + // point to second level indexes. + index[i] = sub_index_offset | PlainTableIndex::kSubIndexMask; + char* prev_ptr = &sub_index[sub_index_offset]; + char* cur_ptr = EncodeVarint32(prev_ptr, num_keys_for_bucket); + sub_index_offset += static_cast(cur_ptr - prev_ptr); + char* sub_index_pos = &sub_index[sub_index_offset]; + IndexRecord* record = hash_to_offsets[i]; + int j; + for (j = num_keys_for_bucket - 1; j >= 0 && record; + j--, record = record->next) { + EncodeFixed32(sub_index_pos + j * sizeof(uint32_t), record->offset); + } + assert(j == -1 && record == nullptr); + sub_index_offset += PlainTableIndex::kOffsetLen * num_keys_for_bucket; + assert(sub_index_offset <= sub_index_size_); + break; + } + } + assert(sub_index_offset == sub_index_size_); + + Log(InfoLogLevel::DEBUG_LEVEL, ioptions_.info_log, + "hash table size: %d, suffix_map length %" ROCKSDB_PRIszt, index_size_, + sub_index_size_); + return Slice(allocated, GetTotalSize()); +} + +const std::string PlainTableIndexBuilder::kPlainTableIndexBlock = + "PlainTableIndexBlock"; +}; // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/plain_table_index.h b/external/rocksdb/table/plain_table_index.h new file mode 100755 index 0000000000..ab2be3d1e2 --- /dev/null +++ b/external/rocksdb/table/plain_table_index.h @@ -0,0 +1,225 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#ifndef ROCKSDB_LITE + +#include +#include + +#include "db/dbformat.h" +#include "rocksdb/options.h" +#include "util/murmurhash.h" +#include "util/hash.h" +#include "util/arena.h" +#include "util/histogram.h" + +namespace rocksdb { + +// PlainTableIndex contains buckets size of index_size_, each is a +// 32-bit integer. The lower 31 bits contain an offset value (explained below) +// and the first bit of the integer indicates type of the offset. +// +// +--------------+------------------------------------------------------+ +// | Flag (1 bit) | Offset to binary search buffer or file (31 bits) + +// +--------------+------------------------------------------------------+ +// +// Explanation for the "flag bit": +// +// 0 indicates that the bucket contains only one prefix (no conflict when +// hashing this prefix), whose first row starts from this offset of the +// file. +// 1 indicates that the bucket contains more than one prefixes, or there +// are too many rows for one prefix so we need a binary search for it. In +// this case, the offset indicates the offset of sub_index_ holding the +// binary search indexes of keys for those rows. Those binary search indexes +// are organized in this way: +// +// The first 4 bytes, indicate how many indexes (N) are stored after it. After +// it, there are N 32-bit integers, each points of an offset of the file, +// which +// points to starting of a row. Those offsets need to be guaranteed to be in +// ascending order so the keys they are pointing to are also in ascending +// order +// to make sure we can use them to do binary searches. Below is visual +// presentation of a bucket. +// +// +// number_of_records: varint32 +// record 1 file offset: fixedint32 +// record 2 file offset: fixedint32 +// .... +// record N file offset: fixedint32 +// +class PlainTableIndex { + public: + enum IndexSearchResult { + kNoPrefixForBucket = 0, + kDirectToFile = 1, + kSubindex = 2 + }; + + explicit PlainTableIndex(Slice data) { InitFromRawData(data); } + + PlainTableIndex() + : index_size_(0), + sub_index_size_(0), + num_prefixes_(0), + index_(nullptr), + sub_index_(nullptr) {} + + IndexSearchResult GetOffset(uint32_t prefix_hash, + uint32_t* bucket_value) const; + + Status InitFromRawData(Slice data); + + const char* GetSubIndexBasePtrAndUpperBound(uint32_t offset, + uint32_t* upper_bound) const { + const char* index_ptr = &sub_index_[offset]; + return GetVarint32Ptr(index_ptr, index_ptr + 4, upper_bound); + } + + uint32_t GetIndexSize() const { return index_size_; } + + uint32_t GetSubIndexSize() const { return sub_index_size_; } + + uint32_t GetNumPrefixes() const { return num_prefixes_; } + + static const uint64_t kMaxFileSize = (1u << 31) - 1; + static const uint32_t kSubIndexMask = 0x80000000; + static const size_t kOffsetLen = sizeof(uint32_t); + + private: + uint32_t index_size_; + uint32_t sub_index_size_; + uint32_t num_prefixes_; + + uint32_t* index_; + char* sub_index_; +}; + +// PlainTableIndexBuilder is used to create plain table index. +// After calling Finish(), it returns Slice, which is usually +// used either to initialize PlainTableIndex or +// to save index to sst file. +// For more details about the index, please refer to: +// https://github.com/facebook/rocksdb/wiki/PlainTable-Format +// #wiki-in-memory-index-format +class PlainTableIndexBuilder { + public: + PlainTableIndexBuilder(Arena* arena, const ImmutableCFOptions& ioptions, + size_t index_sparseness, double hash_table_ratio, + size_t huge_page_tlb_size) + : arena_(arena), + ioptions_(ioptions), + record_list_(kRecordsPerGroup), + is_first_record_(true), + due_index_(false), + num_prefixes_(0), + num_keys_per_prefix_(0), + prev_key_prefix_hash_(0), + index_sparseness_(index_sparseness), + prefix_extractor_(ioptions.prefix_extractor), + hash_table_ratio_(hash_table_ratio), + huge_page_tlb_size_(huge_page_tlb_size) {} + + void AddKeyPrefix(Slice key_prefix_slice, uint32_t key_offset); + + Slice Finish(); + + uint32_t GetTotalSize() const { + return VarintLength(index_size_) + VarintLength(num_prefixes_) + + PlainTableIndex::kOffsetLen * index_size_ + sub_index_size_; + } + + static const std::string kPlainTableIndexBlock; + + private: + struct IndexRecord { + uint32_t hash; // hash of the prefix + uint32_t offset; // offset of a row + IndexRecord* next; + }; + + // Helper class to track all the index records + class IndexRecordList { + public: + explicit IndexRecordList(size_t num_records_per_group) + : kNumRecordsPerGroup(num_records_per_group), + current_group_(nullptr), + num_records_in_current_group_(num_records_per_group) {} + + ~IndexRecordList() { + for (size_t i = 0; i < groups_.size(); i++) { + delete[] groups_[i]; + } + } + + void AddRecord(uint32_t hash, uint32_t offset); + + size_t GetNumRecords() const { + return (groups_.size() - 1) * kNumRecordsPerGroup + + num_records_in_current_group_; + } + IndexRecord* At(size_t index) { + return &(groups_[index / kNumRecordsPerGroup] + [index % kNumRecordsPerGroup]); + } + + private: + IndexRecord* AllocateNewGroup() { + IndexRecord* result = new IndexRecord[kNumRecordsPerGroup]; + groups_.push_back(result); + return result; + } + + // Each group in `groups_` contains fix-sized records (determined by + // kNumRecordsPerGroup). Which can help us minimize the cost if resizing + // occurs. + const size_t kNumRecordsPerGroup; + IndexRecord* current_group_; + // List of arrays allocated + std::vector groups_; + size_t num_records_in_current_group_; + }; + + void AllocateIndex(); + + // Internal helper function to bucket index record list to hash buckets. + void BucketizeIndexes(std::vector* hash_to_offsets, + std::vector* entries_per_bucket); + + // Internal helper class to fill the indexes and bloom filters to internal + // data structures. + Slice FillIndexes(const std::vector& hash_to_offsets, + const std::vector& entries_per_bucket); + + Arena* arena_; + const ImmutableCFOptions ioptions_; + HistogramImpl keys_per_prefix_hist_; + IndexRecordList record_list_; + bool is_first_record_; + bool due_index_; + uint32_t num_prefixes_; + uint32_t num_keys_per_prefix_; + + uint32_t prev_key_prefix_hash_; + size_t index_sparseness_; + uint32_t index_size_; + uint32_t sub_index_size_; + + const SliceTransform* prefix_extractor_; + double hash_table_ratio_; + size_t huge_page_tlb_size_; + + std::string prev_key_prefix_; + + static const size_t kRecordsPerGroup = 256; +}; + +}; // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/plain_table_key_coding.cc b/external/rocksdb/table/plain_table_key_coding.cc new file mode 100755 index 0000000000..8442f11295 --- /dev/null +++ b/external/rocksdb/table/plain_table_key_coding.cc @@ -0,0 +1,495 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef ROCKSDB_LITE +#include "table/plain_table_key_coding.h" + +#include +#include +#include "db/dbformat.h" +#include "table/plain_table_reader.h" +#include "table/plain_table_factory.h" +#include "util/file_reader_writer.h" + +namespace rocksdb { + +enum PlainTableEntryType : unsigned char { + kFullKey = 0, + kPrefixFromPreviousKey = 1, + kKeySuffix = 2, +}; + +namespace { + +// Control byte: +// First two bits indicate type of entry +// Other bytes are inlined sizes. If all bits are 1 (0x03F), overflow bytes +// are used. key_size-0x3F will be encoded as a variint32 after this bytes. + +const unsigned char kSizeInlineLimit = 0x3F; + +// Return 0 for error +size_t EncodeSize(PlainTableEntryType type, uint32_t key_size, + char* out_buffer) { + out_buffer[0] = type << 6; + + if (key_size < static_cast(kSizeInlineLimit)) { + // size inlined + out_buffer[0] |= static_cast(key_size); + return 1; + } else { + out_buffer[0] |= kSizeInlineLimit; + char* ptr = EncodeVarint32(out_buffer + 1, key_size - kSizeInlineLimit); + return ptr - out_buffer; + } +} +} // namespace + +// Fill bytes_read with number of bytes read. +inline Status PlainTableKeyDecoder::DecodeSize(uint32_t start_offset, + PlainTableEntryType* entry_type, + uint32_t* key_size, + uint32_t* bytes_read) { + Slice next_byte_slice; + bool success = file_reader_.Read(start_offset, 1, &next_byte_slice); + if (!success) { + return file_reader_.status(); + } + *entry_type = static_cast( + (static_cast(next_byte_slice[0]) & ~kSizeInlineLimit) >> + 6); + char inline_key_size = next_byte_slice[0] & kSizeInlineLimit; + if (inline_key_size < kSizeInlineLimit) { + *key_size = inline_key_size; + *bytes_read = 1; + return Status::OK(); + } else { + uint32_t extra_size; + uint32_t tmp_bytes_read; + success = file_reader_.ReadVarint32(start_offset + 1, &extra_size, + &tmp_bytes_read); + if (!success) { + return file_reader_.status(); + } + assert(tmp_bytes_read > 0); + *key_size = kSizeInlineLimit + extra_size; + *bytes_read = tmp_bytes_read + 1; + return Status::OK(); + } +} + +Status PlainTableKeyEncoder::AppendKey(const Slice& key, + WritableFileWriter* file, + uint64_t* offset, char* meta_bytes_buf, + size_t* meta_bytes_buf_size) { + ParsedInternalKey parsed_key; + if (!ParseInternalKey(key, &parsed_key)) { + return Status::Corruption(Slice()); + } + + Slice key_to_write = key; // Portion of internal key to write out. + + uint32_t user_key_size = static_cast(key.size() - 8); + if (encoding_type_ == kPlain) { + if (fixed_user_key_len_ == kPlainTableVariableLength) { + // Write key length + char key_size_buf[5]; // tmp buffer for key size as varint32 + char* ptr = EncodeVarint32(key_size_buf, user_key_size); + assert(ptr <= key_size_buf + sizeof(key_size_buf)); + auto len = ptr - key_size_buf; + Status s = file->Append(Slice(key_size_buf, len)); + if (!s.ok()) { + return s; + } + *offset += len; + } + } else { + assert(encoding_type_ == kPrefix); + char size_bytes[12]; + size_t size_bytes_pos = 0; + + Slice prefix = + prefix_extractor_->Transform(Slice(key.data(), user_key_size)); + if (key_count_for_prefix_ == 0 || prefix != pre_prefix_.GetKey() || + key_count_for_prefix_ % index_sparseness_ == 0) { + key_count_for_prefix_ = 1; + pre_prefix_.SetKey(prefix); + size_bytes_pos += EncodeSize(kFullKey, user_key_size, size_bytes); + Status s = file->Append(Slice(size_bytes, size_bytes_pos)); + if (!s.ok()) { + return s; + } + *offset += size_bytes_pos; + } else { + key_count_for_prefix_++; + if (key_count_for_prefix_ == 2) { + // For second key within a prefix, need to encode prefix length + size_bytes_pos += + EncodeSize(kPrefixFromPreviousKey, + static_cast(pre_prefix_.GetKey().size()), + size_bytes + size_bytes_pos); + } + uint32_t prefix_len = static_cast(pre_prefix_.GetKey().size()); + size_bytes_pos += EncodeSize(kKeySuffix, user_key_size - prefix_len, + size_bytes + size_bytes_pos); + Status s = file->Append(Slice(size_bytes, size_bytes_pos)); + if (!s.ok()) { + return s; + } + *offset += size_bytes_pos; + key_to_write = Slice(key.data() + prefix_len, key.size() - prefix_len); + } + } + + // Encode full key + // For value size as varint32 (up to 5 bytes). + // If the row is of value type with seqId 0, flush the special flag together + // in this buffer to safe one file append call, which takes 1 byte. + if (parsed_key.sequence == 0 && parsed_key.type == kTypeValue) { + Status s = + file->Append(Slice(key_to_write.data(), key_to_write.size() - 8)); + if (!s.ok()) { + return s; + } + *offset += key_to_write.size() - 8; + meta_bytes_buf[*meta_bytes_buf_size] = PlainTableFactory::kValueTypeSeqId0; + *meta_bytes_buf_size += 1; + } else { + file->Append(key_to_write); + *offset += key_to_write.size(); + } + + return Status::OK(); +} + +Slice PlainTableFileReader::GetFromBuffer(Buffer* buffer, uint32_t file_offset, + uint32_t len) { + assert(file_offset + len <= file_info_->data_end_offset); + return Slice(buffer->buf.get() + (file_offset - buffer->buf_start_offset), + len); +} + +bool PlainTableFileReader::ReadNonMmap(uint32_t file_offset, uint32_t len, + Slice* out) { + const uint32_t kPrefetchSize = 256u; + + // Try to read from buffers. + for (uint32_t i = 0; i < num_buf_; i++) { + Buffer* buffer = buffers_[num_buf_ - 1 - i].get(); + if (file_offset >= buffer->buf_start_offset && + file_offset + len <= buffer->buf_start_offset + buffer->buf_len) { + *out = GetFromBuffer(buffer, file_offset, len); + return true; + } + } + + Buffer* new_buffer; + // Data needed is not in any of the buffer. Allocate a new buffer. + if (num_buf_ < buffers_.size()) { + // Add a new buffer + new_buffer = new Buffer(); + buffers_[num_buf_++].reset(new_buffer); + } else { + // Now simply replace the last buffer. Can improve the placement policy + // if needed. + new_buffer = buffers_[num_buf_ - 1].get(); + } + + assert(file_offset + len <= file_info_->data_end_offset); + uint32_t size_to_read = std::min(file_info_->data_end_offset - file_offset, + std::max(kPrefetchSize, len)); + if (size_to_read > new_buffer->buf_capacity) { + new_buffer->buf.reset(new char[size_to_read]); + new_buffer->buf_capacity = size_to_read; + new_buffer->buf_len = 0; + } + Slice read_result; + Status s = file_info_->file->Read(file_offset, size_to_read, &read_result, + new_buffer->buf.get()); + if (!s.ok()) { + status_ = s; + return false; + } + new_buffer->buf_start_offset = file_offset; + new_buffer->buf_len = size_to_read; + *out = GetFromBuffer(new_buffer, file_offset, len); + return true; +} + +inline bool PlainTableFileReader::ReadVarint32(uint32_t offset, uint32_t* out, + uint32_t* bytes_read) { + if (file_info_->is_mmap_mode) { + const char* start = file_info_->file_data.data() + offset; + const char* limit = + file_info_->file_data.data() + file_info_->data_end_offset; + const char* key_ptr = GetVarint32Ptr(start, limit, out); + assert(key_ptr != nullptr); + *bytes_read = static_cast(key_ptr - start); + return true; + } else { + return ReadVarint32NonMmap(offset, out, bytes_read); + } +} + +bool PlainTableFileReader::ReadVarint32NonMmap(uint32_t offset, uint32_t* out, + uint32_t* bytes_read) { + const char* start; + const char* limit; + const uint32_t kMaxVarInt32Size = 6u; + uint32_t bytes_to_read = + std::min(file_info_->data_end_offset - offset, kMaxVarInt32Size); + Slice bytes; + if (!Read(offset, bytes_to_read, &bytes)) { + return false; + } + start = bytes.data(); + limit = bytes.data() + bytes.size(); + + const char* key_ptr = GetVarint32Ptr(start, limit, out); + *bytes_read = + (key_ptr != nullptr) ? static_cast(key_ptr - start) : 0; + return true; +} + +Status PlainTableKeyDecoder::ReadInternalKey( + uint32_t file_offset, uint32_t user_key_size, ParsedInternalKey* parsed_key, + uint32_t* bytes_read, bool* internal_key_valid, Slice* internal_key) { + Slice tmp_slice; + bool success = file_reader_.Read(file_offset, user_key_size + 1, &tmp_slice); + if (!success) { + return file_reader_.status(); + } + if (tmp_slice[user_key_size] == PlainTableFactory::kValueTypeSeqId0) { + // Special encoding for the row with seqID=0 + parsed_key->user_key = Slice(tmp_slice.data(), user_key_size); + parsed_key->sequence = 0; + parsed_key->type = kTypeValue; + *bytes_read += user_key_size + 1; + *internal_key_valid = false; + } else { + success = file_reader_.Read(file_offset, user_key_size + 8, internal_key); + if (!success) { + return file_reader_.status(); + } + *internal_key_valid = true; + if (!ParseInternalKey(*internal_key, parsed_key)) { + return Status::Corruption( + Slice("Incorrect value type found when reading the next key")); + } + *bytes_read += user_key_size + 8; + } + return Status::OK(); +} + +Status PlainTableKeyDecoder::NextPlainEncodingKey(uint32_t start_offset, + ParsedInternalKey* parsed_key, + Slice* internal_key, + uint32_t* bytes_read, + bool* seekable) { + uint32_t user_key_size = 0; + Status s; + if (fixed_user_key_len_ != kPlainTableVariableLength) { + user_key_size = fixed_user_key_len_; + } else { + uint32_t tmp_size = 0; + uint32_t tmp_read; + bool success = + file_reader_.ReadVarint32(start_offset, &tmp_size, &tmp_read); + if (!success) { + return file_reader_.status(); + } + assert(tmp_read > 0); + user_key_size = tmp_size; + *bytes_read = tmp_read; + } + // dummy initial value to avoid compiler complain + bool decoded_internal_key_valid = true; + Slice decoded_internal_key; + s = ReadInternalKey(start_offset + *bytes_read, user_key_size, parsed_key, + bytes_read, &decoded_internal_key_valid, + &decoded_internal_key); + if (!s.ok()) { + return s; + } + if (!file_reader_.file_info()->is_mmap_mode) { + cur_key_.SetInternalKey(*parsed_key); + parsed_key->user_key = Slice(cur_key_.GetKey().data(), user_key_size); + if (internal_key != nullptr) { + *internal_key = cur_key_.GetKey(); + } + } else if (internal_key != nullptr) { + if (decoded_internal_key_valid) { + *internal_key = decoded_internal_key; + } else { + // Need to copy out the internal key + cur_key_.SetInternalKey(*parsed_key); + *internal_key = cur_key_.GetKey(); + } + } + return Status::OK(); +} + +Status PlainTableKeyDecoder::NextPrefixEncodingKey( + uint32_t start_offset, ParsedInternalKey* parsed_key, Slice* internal_key, + uint32_t* bytes_read, bool* seekable) { + PlainTableEntryType entry_type; + + bool expect_suffix = false; + Status s; + do { + uint32_t size = 0; + // dummy initial value to avoid compiler complain + bool decoded_internal_key_valid = true; + uint32_t my_bytes_read = 0; + s = DecodeSize(start_offset + *bytes_read, &entry_type, &size, + &my_bytes_read); + if (!s.ok()) { + return s; + } + if (my_bytes_read == 0) { + return Status::Corruption("Unexpected EOF when reading size of the key"); + } + *bytes_read += my_bytes_read; + + switch (entry_type) { + case kFullKey: { + expect_suffix = false; + Slice decoded_internal_key; + s = ReadInternalKey(start_offset + *bytes_read, size, parsed_key, + bytes_read, &decoded_internal_key_valid, + &decoded_internal_key); + if (!s.ok()) { + return s; + } + if (!file_reader_.file_info()->is_mmap_mode || + (internal_key != nullptr && !decoded_internal_key_valid)) { + // In non-mmap mode, always need to make a copy of keys returned to + // users, because after reading value for the key, the key might + // be invalid. + cur_key_.SetInternalKey(*parsed_key); + saved_user_key_ = cur_key_.GetKey(); + if (!file_reader_.file_info()->is_mmap_mode) { + parsed_key->user_key = Slice(cur_key_.GetKey().data(), size); + } + if (internal_key != nullptr) { + *internal_key = cur_key_.GetKey(); + } + } else { + if (internal_key != nullptr) { + *internal_key = decoded_internal_key; + } + saved_user_key_ = parsed_key->user_key; + } + break; + } + case kPrefixFromPreviousKey: { + if (seekable != nullptr) { + *seekable = false; + } + prefix_len_ = size; + assert(prefix_extractor_ == nullptr || + prefix_extractor_->Transform(saved_user_key_).size() == + prefix_len_); + // Need read another size flag for suffix + expect_suffix = true; + break; + } + case kKeySuffix: { + expect_suffix = false; + if (seekable != nullptr) { + *seekable = false; + } + + Slice tmp_slice; + s = ReadInternalKey(start_offset + *bytes_read, size, parsed_key, + bytes_read, &decoded_internal_key_valid, + &tmp_slice); + if (!s.ok()) { + return s; + } + if (!file_reader_.file_info()->is_mmap_mode) { + // In non-mmap mode, we need to make a copy of keys returned to + // users, because after reading value for the key, the key might + // be invalid. + // saved_user_key_ points to cur_key_. We are making a copy of + // the prefix part to another string, and construct the current + // key from the prefix part and the suffix part back to cur_key_. + std::string tmp = + Slice(saved_user_key_.data(), prefix_len_).ToString(); + cur_key_.Reserve(prefix_len_ + size); + cur_key_.SetInternalKey(tmp, *parsed_key); + parsed_key->user_key = + Slice(cur_key_.GetKey().data(), prefix_len_ + size); + saved_user_key_ = cur_key_.GetKey(); + } else { + cur_key_.Reserve(prefix_len_ + size); + cur_key_.SetInternalKey(Slice(saved_user_key_.data(), prefix_len_), + *parsed_key); + } + parsed_key->user_key = ExtractUserKey(cur_key_.GetKey()); + if (internal_key != nullptr) { + *internal_key = cur_key_.GetKey(); + } + break; + } + default: + return Status::Corruption("Un-identified size flag."); + } + } while (expect_suffix); // Another round if suffix is expected. + return Status::OK(); +} + +Status PlainTableKeyDecoder::NextKey(uint32_t start_offset, + ParsedInternalKey* parsed_key, + Slice* internal_key, Slice* value, + uint32_t* bytes_read, bool* seekable) { + assert(value != nullptr); + Status s = NextKeyNoValue(start_offset, parsed_key, internal_key, bytes_read, + seekable); + if (s.ok()) { + assert(bytes_read != nullptr); + uint32_t value_size; + uint32_t value_size_bytes; + bool success = file_reader_.ReadVarint32(start_offset + *bytes_read, + &value_size, &value_size_bytes); + if (!success) { + return file_reader_.status(); + } + if (value_size_bytes == 0) { + return Status::Corruption( + "Unexpected EOF when reading the next value's size."); + } + *bytes_read += value_size_bytes; + success = file_reader_.Read(start_offset + *bytes_read, value_size, value); + if (!success) { + return file_reader_.status(); + } + *bytes_read += value_size; + } + return s; +} + +Status PlainTableKeyDecoder::NextKeyNoValue(uint32_t start_offset, + ParsedInternalKey* parsed_key, + Slice* internal_key, + uint32_t* bytes_read, + bool* seekable) { + *bytes_read = 0; + if (seekable != nullptr) { + *seekable = true; + } + Status s; + if (encoding_type_ == kPlain) { + return NextPlainEncodingKey(start_offset, parsed_key, internal_key, + bytes_read, seekable); + } else { + assert(encoding_type_ == kPrefix); + return NextPrefixEncodingKey(start_offset, parsed_key, internal_key, + bytes_read, seekable); + } +} + +} // namespace rocksdb +#endif // ROCKSDB_LIT diff --git a/external/rocksdb/table/plain_table_key_coding.h b/external/rocksdb/table/plain_table_key_coding.h new file mode 100755 index 0000000000..ed4ce5d387 --- /dev/null +++ b/external/rocksdb/table/plain_table_key_coding.h @@ -0,0 +1,183 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once +#ifndef ROCKSDB_LITE + +#include +#include "rocksdb/slice.h" +#include "db/dbformat.h" +#include "table/plain_table_reader.h" + +namespace rocksdb { + +class WritableFile; +struct ParsedInternalKey; +struct PlainTableReaderFileInfo; +enum PlainTableEntryType : unsigned char; + +// Helper class to write out a key to an output file +// Actual data format of the key is documented in plain_table_factory.h +class PlainTableKeyEncoder { + public: + explicit PlainTableKeyEncoder(EncodingType encoding_type, + uint32_t user_key_len, + const SliceTransform* prefix_extractor, + size_t index_sparseness) + : encoding_type_((prefix_extractor != nullptr) ? encoding_type : kPlain), + fixed_user_key_len_(user_key_len), + prefix_extractor_(prefix_extractor), + index_sparseness_((index_sparseness > 1) ? index_sparseness : 1), + key_count_for_prefix_(0) {} + // key: the key to write out, in the format of internal key. + // file: the output file to write out + // offset: offset in the file. Needs to be updated after appending bytes + // for the key + // meta_bytes_buf: buffer for extra meta bytes + // meta_bytes_buf_size: offset to append extra meta bytes. Will be updated + // if meta_bytes_buf is updated. + Status AppendKey(const Slice& key, WritableFileWriter* file, uint64_t* offset, + char* meta_bytes_buf, size_t* meta_bytes_buf_size); + + // Return actual encoding type to be picked + EncodingType GetEncodingType() { return encoding_type_; } + + private: + EncodingType encoding_type_; + uint32_t fixed_user_key_len_; + const SliceTransform* prefix_extractor_; + const size_t index_sparseness_; + size_t key_count_for_prefix_; + IterKey pre_prefix_; +}; + +class PlainTableFileReader { + public: + explicit PlainTableFileReader(const PlainTableReaderFileInfo* _file_info) + : file_info_(_file_info), num_buf_(0) {} + // In mmaped mode, the results point to mmaped area of the file, which + // means it is always valid before closing the file. + // In non-mmap mode, the results point to an internal buffer. If the caller + // makes another read call, the results may not be valid. So callers should + // make a copy when needed. + // In order to save read calls to files, we keep two internal buffers: + // the first read and the most recent read. This is efficient because it + // columns these two common use cases: + // (1) hash index only identify one location, we read the key to verify + // the location, and read key and value if it is the right location. + // (2) after hash index checking, we identify two locations (because of + // hash bucket conflicts), we binary search the two location to see + // which one is what we need and start to read from the location. + // These two most common use cases will be covered by the two buffers + // so that we don't need to re-read the same location. + // Currently we keep a fixed size buffer. If a read doesn't exactly fit + // the buffer, we replace the second buffer with the location user reads. + // + // If return false, status code is stored in status_. + bool Read(uint32_t file_offset, uint32_t len, Slice* out) { + if (file_info_->is_mmap_mode) { + assert(file_offset + len <= file_info_->data_end_offset); + *out = Slice(file_info_->file_data.data() + file_offset, len); + return true; + } else { + return ReadNonMmap(file_offset, len, out); + } + } + + // If return false, status code is stored in status_. + bool ReadNonMmap(uint32_t file_offset, uint32_t len, Slice* output); + + // *bytes_read = 0 means eof. false means failure and status is saved + // in status_. Not directly returning Status to save copying status + // object to map previous performance of mmap mode. + inline bool ReadVarint32(uint32_t offset, uint32_t* output, + uint32_t* bytes_read); + + bool ReadVarint32NonMmap(uint32_t offset, uint32_t* output, + uint32_t* bytes_read); + + Status status() const { return status_; } + + const PlainTableReaderFileInfo* file_info() { return file_info_; } + + private: + const PlainTableReaderFileInfo* file_info_; + + struct Buffer { + Buffer() : buf_start_offset(0), buf_len(0), buf_capacity(0) {} + std::unique_ptr buf; + uint32_t buf_start_offset; + uint32_t buf_len; + uint32_t buf_capacity; + }; + + // Keep buffers for two recent reads. + std::array, 2> buffers_; + uint32_t num_buf_; + Status status_; + + Slice GetFromBuffer(Buffer* buf, uint32_t file_offset, uint32_t len); +}; + +// A helper class to decode keys from input buffer +// Actual data format of the key is documented in plain_table_factory.h +class PlainTableKeyDecoder { + public: + explicit PlainTableKeyDecoder(const PlainTableReaderFileInfo* file_info, + EncodingType encoding_type, + uint32_t user_key_len, + const SliceTransform* prefix_extractor) + : file_reader_(file_info), + encoding_type_(encoding_type), + prefix_len_(0), + fixed_user_key_len_(user_key_len), + prefix_extractor_(prefix_extractor), + in_prefix_(false) {} + // Find the next key. + // start: char array where the key starts. + // limit: boundary of the char array + // parsed_key: the output of the result key + // internal_key: if not null, fill with the output of the result key in + // un-parsed format + // bytes_read: how many bytes read from start. Output + // seekable: whether key can be read from this place. Used when building + // indexes. Output. + Status NextKey(uint32_t start_offset, ParsedInternalKey* parsed_key, + Slice* internal_key, Slice* value, uint32_t* bytes_read, + bool* seekable = nullptr); + + Status NextKeyNoValue(uint32_t start_offset, ParsedInternalKey* parsed_key, + Slice* internal_key, uint32_t* bytes_read, + bool* seekable = nullptr); + + PlainTableFileReader file_reader_; + EncodingType encoding_type_; + uint32_t prefix_len_; + uint32_t fixed_user_key_len_; + Slice saved_user_key_; + IterKey cur_key_; + const SliceTransform* prefix_extractor_; + bool in_prefix_; + + private: + Status NextPlainEncodingKey(uint32_t start_offset, + ParsedInternalKey* parsed_key, + Slice* internal_key, uint32_t* bytes_read, + bool* seekable = nullptr); + Status NextPrefixEncodingKey(uint32_t start_offset, + ParsedInternalKey* parsed_key, + Slice* internal_key, uint32_t* bytes_read, + bool* seekable = nullptr); + Status ReadInternalKey(uint32_t file_offset, uint32_t user_key_size, + ParsedInternalKey* parsed_key, uint32_t* bytes_read, + bool* internal_key_valid, Slice* internal_key); + inline Status DecodeSize(uint32_t start_offset, + PlainTableEntryType* entry_type, uint32_t* key_size, + uint32_t* bytes_read); +}; + +} // namespace rocksdb + +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/plain_table_reader.cc b/external/rocksdb/table/plain_table_reader.cc new file mode 100755 index 0000000000..a5155254b9 --- /dev/null +++ b/external/rocksdb/table/plain_table_reader.cc @@ -0,0 +1,722 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#ifndef ROCKSDB_LITE + +#include "table/plain_table_reader.h" + +#include +#include + +#include "db/dbformat.h" + +#include "rocksdb/cache.h" +#include "rocksdb/comparator.h" +#include "rocksdb/env.h" +#include "rocksdb/filter_policy.h" +#include "rocksdb/options.h" +#include "rocksdb/statistics.h" + +#include "table/block.h" +#include "table/bloom_block.h" +#include "table/filter_block.h" +#include "table/format.h" +#include "table/internal_iterator.h" +#include "table/meta_blocks.h" +#include "table/two_level_iterator.h" +#include "table/plain_table_factory.h" +#include "table/plain_table_key_coding.h" +#include "table/get_context.h" + +#include "util/arena.h" +#include "util/coding.h" +#include "util/dynamic_bloom.h" +#include "util/hash.h" +#include "util/histogram.h" +#include "util/murmurhash.h" +#include "util/perf_context_imp.h" +#include "util/stop_watch.h" +#include "util/string_util.h" + + +namespace rocksdb { + +namespace { + +// Safely getting a uint32_t element from a char array, where, starting from +// `base`, every 4 bytes are considered as an fixed 32 bit integer. +inline uint32_t GetFixed32Element(const char* base, size_t offset) { + return DecodeFixed32(base + offset * sizeof(uint32_t)); +} +} // namespace + +// Iterator to iterate IndexedTable +class PlainTableIterator : public InternalIterator { + public: + explicit PlainTableIterator(PlainTableReader* table, bool use_prefix_seek); + ~PlainTableIterator(); + + bool Valid() const override; + + void SeekToFirst() override; + + void SeekToLast() override; + + void Seek(const Slice& target) override; + + void Next() override; + + void Prev() override; + + Slice key() const override; + + Slice value() const override; + + Status status() const override; + + private: + PlainTableReader* table_; + PlainTableKeyDecoder decoder_; + bool use_prefix_seek_; + uint32_t offset_; + uint32_t next_offset_; + Slice key_; + Slice value_; + Status status_; + // No copying allowed + PlainTableIterator(const PlainTableIterator&) = delete; + void operator=(const Iterator&) = delete; +}; + +extern const uint64_t kPlainTableMagicNumber; +PlainTableReader::PlainTableReader(const ImmutableCFOptions& ioptions, + unique_ptr&& file, + const EnvOptions& storage_options, + const InternalKeyComparator& icomparator, + EncodingType encoding_type, + uint64_t file_size, + const TableProperties* table_properties) + : internal_comparator_(icomparator), + encoding_type_(encoding_type), + full_scan_mode_(false), + user_key_len_(static_cast(table_properties->fixed_key_len)), + prefix_extractor_(ioptions.prefix_extractor), + enable_bloom_(false), + bloom_(6, nullptr), + file_info_(std::move(file), storage_options, + static_cast(table_properties->data_size)), + ioptions_(ioptions), + file_size_(file_size), + table_properties_(nullptr) {} + +PlainTableReader::~PlainTableReader() { +} + +Status PlainTableReader::Open(const ImmutableCFOptions& ioptions, + const EnvOptions& env_options, + const InternalKeyComparator& internal_comparator, + unique_ptr&& file, + uint64_t file_size, + unique_ptr* table_reader, + const int bloom_bits_per_key, + double hash_table_ratio, size_t index_sparseness, + size_t huge_page_tlb_size, bool full_scan_mode) { + if (file_size > PlainTableIndex::kMaxFileSize) { + return Status::NotSupported("File is too large for PlainTableReader!"); + } + + TableProperties* props = nullptr; + auto s = ReadTableProperties(file.get(), file_size, kPlainTableMagicNumber, + ioptions, &props); + if (!s.ok()) { + return s; + } + + assert(hash_table_ratio >= 0.0); + auto& user_props = props->user_collected_properties; + auto prefix_extractor_in_file = + user_props.find(PlainTablePropertyNames::kPrefixExtractorName); + + if (!full_scan_mode && prefix_extractor_in_file != user_props.end()) { + if (!ioptions.prefix_extractor) { + return Status::InvalidArgument( + "Prefix extractor is missing when opening a PlainTable built " + "using a prefix extractor"); + } else if (prefix_extractor_in_file->second.compare( + ioptions.prefix_extractor->Name()) != 0) { + return Status::InvalidArgument( + "Prefix extractor given doesn't match the one used to build " + "PlainTable"); + } + } + + EncodingType encoding_type = kPlain; + auto encoding_type_prop = + user_props.find(PlainTablePropertyNames::kEncodingType); + if (encoding_type_prop != user_props.end()) { + encoding_type = static_cast( + DecodeFixed32(encoding_type_prop->second.c_str())); + } + + std::unique_ptr new_reader(new PlainTableReader( + ioptions, std::move(file), env_options, internal_comparator, + encoding_type, file_size, props)); + + s = new_reader->MmapDataIfNeeded(); + if (!s.ok()) { + return s; + } + + if (!full_scan_mode) { + s = new_reader->PopulateIndex(props, bloom_bits_per_key, hash_table_ratio, + index_sparseness, huge_page_tlb_size); + if (!s.ok()) { + return s; + } + } else { + // Flag to indicate it is a full scan mode so that none of the indexes + // can be used. + new_reader->full_scan_mode_ = true; + } + + *table_reader = std::move(new_reader); + return s; +} + +void PlainTableReader::SetupForCompaction() { +} + +InternalIterator* PlainTableReader::NewIterator(const ReadOptions& options, + Arena* arena, + bool skip_filters) { + if (options.total_order_seek && !IsTotalOrderMode()) { + return NewErrorInternalIterator( + Status::InvalidArgument("total_order_seek not supported"), arena); + } + if (arena == nullptr) { + return new PlainTableIterator(this, prefix_extractor_ != nullptr); + } else { + auto mem = arena->AllocateAligned(sizeof(PlainTableIterator)); + return new (mem) PlainTableIterator(this, prefix_extractor_ != nullptr); + } +} + +Status PlainTableReader::PopulateIndexRecordList( + PlainTableIndexBuilder* index_builder, vector* prefix_hashes) { + Slice prev_key_prefix_slice; + std::string prev_key_prefix_buf; + uint32_t pos = data_start_offset_; + + bool is_first_record = true; + Slice key_prefix_slice; + PlainTableKeyDecoder decoder(&file_info_, encoding_type_, user_key_len_, + ioptions_.prefix_extractor); + while (pos < file_info_.data_end_offset) { + uint32_t key_offset = pos; + ParsedInternalKey key; + Slice value_slice; + bool seekable = false; + Status s = Next(&decoder, &pos, &key, nullptr, &value_slice, &seekable); + if (!s.ok()) { + return s; + } + + key_prefix_slice = GetPrefix(key); + if (enable_bloom_) { + bloom_.AddHash(GetSliceHash(key.user_key)); + } else { + if (is_first_record || prev_key_prefix_slice != key_prefix_slice) { + if (!is_first_record) { + prefix_hashes->push_back(GetSliceHash(prev_key_prefix_slice)); + } + if (file_info_.is_mmap_mode) { + prev_key_prefix_slice = key_prefix_slice; + } else { + prev_key_prefix_buf = key_prefix_slice.ToString(); + prev_key_prefix_slice = prev_key_prefix_buf; + } + } + } + + index_builder->AddKeyPrefix(GetPrefix(key), key_offset); + + if (!seekable && is_first_record) { + return Status::Corruption("Key for a prefix is not seekable"); + } + + is_first_record = false; + } + + prefix_hashes->push_back(GetSliceHash(key_prefix_slice)); + auto s = index_.InitFromRawData(index_builder->Finish()); + return s; +} + +void PlainTableReader::AllocateAndFillBloom(int bloom_bits_per_key, + int num_prefixes, + size_t huge_page_tlb_size, + vector* prefix_hashes) { + if (!IsTotalOrderMode()) { + uint32_t bloom_total_bits = num_prefixes * bloom_bits_per_key; + if (bloom_total_bits > 0) { + enable_bloom_ = true; + bloom_.SetTotalBits(&arena_, bloom_total_bits, ioptions_.bloom_locality, + huge_page_tlb_size, ioptions_.info_log); + FillBloom(prefix_hashes); + } + } +} + +void PlainTableReader::FillBloom(vector* prefix_hashes) { + assert(bloom_.IsInitialized()); + for (auto prefix_hash : *prefix_hashes) { + bloom_.AddHash(prefix_hash); + } +} + +Status PlainTableReader::MmapDataIfNeeded() { + if (file_info_.is_mmap_mode) { + // Get mmapped memory. + return file_info_.file->Read(0, file_size_, &file_info_.file_data, nullptr); + } + return Status::OK(); +} + +Status PlainTableReader::PopulateIndex(TableProperties* props, + int bloom_bits_per_key, + double hash_table_ratio, + size_t index_sparseness, + size_t huge_page_tlb_size) { + assert(props != nullptr); + table_properties_.reset(props); + + BlockContents bloom_block_contents; + auto s = ReadMetaBlock(file_info_.file.get(), file_size_, + kPlainTableMagicNumber, ioptions_, + BloomBlockBuilder::kBloomBlock, &bloom_block_contents); + bool index_in_file = s.ok(); + + BlockContents index_block_contents; + s = ReadMetaBlock( + file_info_.file.get(), file_size_, kPlainTableMagicNumber, ioptions_, + PlainTableIndexBuilder::kPlainTableIndexBlock, &index_block_contents); + + index_in_file &= s.ok(); + + Slice* bloom_block; + if (index_in_file) { + // If bloom_block_contents.allocation is not empty (which will be the case + // for non-mmap mode), it holds the alloated memory for the bloom block. + // It needs to be kept alive to keep `bloom_block` valid. + bloom_block_alloc_ = std::move(bloom_block_contents.allocation); + bloom_block = &bloom_block_contents.data; + } else { + bloom_block = nullptr; + } + + // index_in_file == true only if there are kBloomBlock and + // kPlainTableIndexBlock in file + Slice* index_block; + if (index_in_file) { + // If index_block_contents.allocation is not empty (which will be the case + // for non-mmap mode), it holds the alloated memory for the index block. + // It needs to be kept alive to keep `index_block` valid. + index_block_alloc_ = std::move(index_block_contents.allocation); + index_block = &index_block_contents.data; + } else { + index_block = nullptr; + } + + if ((ioptions_.prefix_extractor == nullptr) && + (hash_table_ratio != 0)) { + // ioptions.prefix_extractor is requried for a hash-based look-up. + return Status::NotSupported( + "PlainTable requires a prefix extractor enable prefix hash mode."); + } + + // First, read the whole file, for every kIndexIntervalForSamePrefixKeys rows + // for a prefix (starting from the first one), generate a record of (hash, + // offset) and append it to IndexRecordList, which is a data structure created + // to store them. + + if (!index_in_file) { + // Allocate bloom filter here for total order mode. + if (IsTotalOrderMode()) { + uint32_t num_bloom_bits = + static_cast(table_properties_->num_entries) * + bloom_bits_per_key; + if (num_bloom_bits > 0) { + enable_bloom_ = true; + bloom_.SetTotalBits(&arena_, num_bloom_bits, ioptions_.bloom_locality, + huge_page_tlb_size, ioptions_.info_log); + } + } + } else { + enable_bloom_ = true; + auto num_blocks_property = props->user_collected_properties.find( + PlainTablePropertyNames::kNumBloomBlocks); + + uint32_t num_blocks = 0; + if (num_blocks_property != props->user_collected_properties.end()) { + Slice temp_slice(num_blocks_property->second); + if (!GetVarint32(&temp_slice, &num_blocks)) { + num_blocks = 0; + } + } + // cast away const qualifier, because bloom_ won't be changed + bloom_.SetRawData( + const_cast( + reinterpret_cast(bloom_block->data())), + static_cast(bloom_block->size()) * 8, num_blocks); + } + + PlainTableIndexBuilder index_builder(&arena_, ioptions_, index_sparseness, + hash_table_ratio, huge_page_tlb_size); + + std::vector prefix_hashes; + if (!index_in_file) { + s = PopulateIndexRecordList(&index_builder, &prefix_hashes); + if (!s.ok()) { + return s; + } + } else { + s = index_.InitFromRawData(*index_block); + if (!s.ok()) { + return s; + } + } + + if (!index_in_file) { + // Calculated bloom filter size and allocate memory for + // bloom filter based on the number of prefixes, then fill it. + AllocateAndFillBloom(bloom_bits_per_key, index_.GetNumPrefixes(), + huge_page_tlb_size, &prefix_hashes); + } + + // Fill two table properties. + if (!index_in_file) { + props->user_collected_properties["plain_table_hash_table_size"] = + ToString(index_.GetIndexSize() * PlainTableIndex::kOffsetLen); + props->user_collected_properties["plain_table_sub_index_size"] = + ToString(index_.GetSubIndexSize()); + } else { + props->user_collected_properties["plain_table_hash_table_size"] = + ToString(0); + props->user_collected_properties["plain_table_sub_index_size"] = + ToString(0); + } + + return Status::OK(); +} + +Status PlainTableReader::GetOffset(PlainTableKeyDecoder* decoder, + const Slice& target, const Slice& prefix, + uint32_t prefix_hash, bool& prefix_matched, + uint32_t* offset) const { + prefix_matched = false; + uint32_t prefix_index_offset; + auto res = index_.GetOffset(prefix_hash, &prefix_index_offset); + if (res == PlainTableIndex::kNoPrefixForBucket) { + *offset = file_info_.data_end_offset; + return Status::OK(); + } else if (res == PlainTableIndex::kDirectToFile) { + *offset = prefix_index_offset; + return Status::OK(); + } + + // point to sub-index, need to do a binary search + uint32_t upper_bound; + const char* base_ptr = + index_.GetSubIndexBasePtrAndUpperBound(prefix_index_offset, &upper_bound); + uint32_t low = 0; + uint32_t high = upper_bound; + ParsedInternalKey mid_key; + ParsedInternalKey parsed_target; + if (!ParseInternalKey(target, &parsed_target)) { + return Status::Corruption(Slice()); + } + + // The key is between [low, high). Do a binary search between it. + while (high - low > 1) { + uint32_t mid = (high + low) / 2; + uint32_t file_offset = GetFixed32Element(base_ptr, mid); + uint32_t tmp; + Status s = decoder->NextKeyNoValue(file_offset, &mid_key, nullptr, &tmp); + if (!s.ok()) { + return s; + } + int cmp_result = internal_comparator_.Compare(mid_key, parsed_target); + if (cmp_result < 0) { + low = mid; + } else { + if (cmp_result == 0) { + // Happen to have found the exact key or target is smaller than the + // first key after base_offset. + prefix_matched = true; + *offset = file_offset; + return Status::OK(); + } else { + high = mid; + } + } + } + // Both of the key at the position low or low+1 could share the same + // prefix as target. We need to rule out one of them to avoid to go + // to the wrong prefix. + ParsedInternalKey low_key; + uint32_t tmp; + uint32_t low_key_offset = GetFixed32Element(base_ptr, low); + Status s = decoder->NextKeyNoValue(low_key_offset, &low_key, nullptr, &tmp); + if (!s.ok()) { + return s; + } + + if (GetPrefix(low_key) == prefix) { + prefix_matched = true; + *offset = low_key_offset; + } else if (low + 1 < upper_bound) { + // There is possible a next prefix, return it + prefix_matched = false; + *offset = GetFixed32Element(base_ptr, low + 1); + } else { + // target is larger than a key of the last prefix in this bucket + // but with a different prefix. Key does not exist. + *offset = file_info_.data_end_offset; + } + return Status::OK(); +} + +bool PlainTableReader::MatchBloom(uint32_t hash) const { + if (!enable_bloom_) { + return true; + } + + if (bloom_.MayContainHash(hash)) { + PERF_COUNTER_ADD(bloom_sst_hit_count, 1); + return true; + } else { + PERF_COUNTER_ADD(bloom_sst_miss_count, 1); + return false; + } +} + +Status PlainTableReader::Next(PlainTableKeyDecoder* decoder, uint32_t* offset, + ParsedInternalKey* parsed_key, + Slice* internal_key, Slice* value, + bool* seekable) const { + if (*offset == file_info_.data_end_offset) { + *offset = file_info_.data_end_offset; + return Status::OK(); + } + + if (*offset > file_info_.data_end_offset) { + return Status::Corruption("Offset is out of file size"); + } + + uint32_t bytes_read; + Status s = decoder->NextKey(*offset, parsed_key, internal_key, value, + &bytes_read, seekable); + if (!s.ok()) { + return s; + } + *offset = *offset + bytes_read; + return Status::OK(); +} + +void PlainTableReader::Prepare(const Slice& target) { + if (enable_bloom_) { + uint32_t prefix_hash = GetSliceHash(GetPrefix(target)); + bloom_.Prefetch(prefix_hash); + } +} + +Status PlainTableReader::Get(const ReadOptions& ro, const Slice& target, + GetContext* get_context, bool skip_filters) { + // Check bloom filter first. + Slice prefix_slice; + uint32_t prefix_hash; + if (IsTotalOrderMode()) { + if (full_scan_mode_) { + status_ = + Status::InvalidArgument("Get() is not allowed in full scan mode."); + } + // Match whole user key for bloom filter check. + if (!MatchBloom(GetSliceHash(GetUserKey(target)))) { + return Status::OK(); + } + // in total order mode, there is only one bucket 0, and we always use empty + // prefix. + prefix_slice = Slice(); + prefix_hash = 0; + } else { + prefix_slice = GetPrefix(target); + prefix_hash = GetSliceHash(prefix_slice); + if (!MatchBloom(prefix_hash)) { + return Status::OK(); + } + } + uint32_t offset; + bool prefix_match; + PlainTableKeyDecoder decoder(&file_info_, encoding_type_, user_key_len_, + ioptions_.prefix_extractor); + Status s = GetOffset(&decoder, target, prefix_slice, prefix_hash, + prefix_match, &offset); + + if (!s.ok()) { + return s; + } + ParsedInternalKey found_key; + ParsedInternalKey parsed_target; + if (!ParseInternalKey(target, &parsed_target)) { + return Status::Corruption(Slice()); + } + Slice found_value; + while (offset < file_info_.data_end_offset) { + s = Next(&decoder, &offset, &found_key, nullptr, &found_value); + if (!s.ok()) { + return s; + } + if (!prefix_match) { + // Need to verify prefix for the first key found if it is not yet + // checked. + if (GetPrefix(found_key) != prefix_slice) { + return Status::OK(); + } + prefix_match = true; + } + // TODO(ljin): since we know the key comparison result here, + // can we enable the fast path? + if (internal_comparator_.Compare(found_key, parsed_target) >= 0) { + if (!get_context->SaveValue(found_key, found_value)) { + break; + } + } + } + return Status::OK(); +} + +uint64_t PlainTableReader::ApproximateOffsetOf(const Slice& key) { + return 0; +} + +PlainTableIterator::PlainTableIterator(PlainTableReader* table, + bool use_prefix_seek) + : table_(table), + decoder_(&table_->file_info_, table_->encoding_type_, + table_->user_key_len_, table_->prefix_extractor_), + use_prefix_seek_(use_prefix_seek) { + next_offset_ = offset_ = table_->file_info_.data_end_offset; +} + +PlainTableIterator::~PlainTableIterator() { +} + +bool PlainTableIterator::Valid() const { + return offset_ < table_->file_info_.data_end_offset && + offset_ >= table_->data_start_offset_; +} + +void PlainTableIterator::SeekToFirst() { + next_offset_ = table_->data_start_offset_; + if (next_offset_ >= table_->file_info_.data_end_offset) { + next_offset_ = offset_ = table_->file_info_.data_end_offset; + } else { + Next(); + } +} + +void PlainTableIterator::SeekToLast() { + assert(false); + status_ = Status::NotSupported("SeekToLast() is not supported in PlainTable"); +} + +void PlainTableIterator::Seek(const Slice& target) { + // If the user doesn't set prefix seek option and we are not able to do a + // total Seek(). assert failure. + if (!use_prefix_seek_) { + if (table_->full_scan_mode_) { + status_ = + Status::InvalidArgument("Seek() is not allowed in full scan mode."); + offset_ = next_offset_ = table_->file_info_.data_end_offset; + return; + } else if (table_->GetIndexSize() > 1) { + assert(false); + status_ = Status::NotSupported( + "PlainTable cannot issue non-prefix seek unless in total order " + "mode."); + offset_ = next_offset_ = table_->file_info_.data_end_offset; + return; + } + } + + Slice prefix_slice = table_->GetPrefix(target); + uint32_t prefix_hash = 0; + // Bloom filter is ignored in total-order mode. + if (!table_->IsTotalOrderMode()) { + prefix_hash = GetSliceHash(prefix_slice); + if (!table_->MatchBloom(prefix_hash)) { + offset_ = next_offset_ = table_->file_info_.data_end_offset; + return; + } + } + bool prefix_match; + status_ = table_->GetOffset(&decoder_, target, prefix_slice, prefix_hash, + prefix_match, &next_offset_); + if (!status_.ok()) { + offset_ = next_offset_ = table_->file_info_.data_end_offset; + return; + } + + if (next_offset_ < table_->file_info_.data_end_offset) { + for (Next(); status_.ok() && Valid(); Next()) { + if (!prefix_match) { + // Need to verify the first key's prefix + if (table_->GetPrefix(key()) != prefix_slice) { + offset_ = next_offset_ = table_->file_info_.data_end_offset; + break; + } + prefix_match = true; + } + if (table_->internal_comparator_.Compare(key(), target) >= 0) { + break; + } + } + } else { + offset_ = table_->file_info_.data_end_offset; + } +} + +void PlainTableIterator::Next() { + offset_ = next_offset_; + if (offset_ < table_->file_info_.data_end_offset) { + Slice tmp_slice; + ParsedInternalKey parsed_key; + status_ = + table_->Next(&decoder_, &next_offset_, &parsed_key, &key_, &value_); + if (!status_.ok()) { + offset_ = next_offset_ = table_->file_info_.data_end_offset; + } + } +} + +void PlainTableIterator::Prev() { + assert(false); +} + +Slice PlainTableIterator::key() const { + assert(Valid()); + return key_; +} + +Slice PlainTableIterator::value() const { + assert(Valid()); + return value_; +} + +Status PlainTableIterator::status() const { + return status_; +} + +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/plain_table_reader.h b/external/rocksdb/table/plain_table_reader.h new file mode 100755 index 0000000000..baa156d798 --- /dev/null +++ b/external/rocksdb/table/plain_table_reader.h @@ -0,0 +1,233 @@ +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#ifndef ROCKSDB_LITE +#include +#include +#include +#include +#include + +#include "db/dbformat.h" +#include "rocksdb/env.h" +#include "rocksdb/iterator.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/table.h" +#include "rocksdb/table_properties.h" +#include "table/table_reader.h" +#include "table/plain_table_factory.h" +#include "table/plain_table_index.h" +#include "util/arena.h" +#include "util/dynamic_bloom.h" +#include "util/file_reader_writer.h" + +namespace rocksdb { + +class Block; +struct BlockContents; +class BlockHandle; +class Footer; +struct Options; +class RandomAccessFile; +struct ReadOptions; +class TableCache; +class TableReader; +class InternalKeyComparator; +class PlainTableKeyDecoder; +class GetContext; +class InternalIterator; + +using std::unique_ptr; +using std::unordered_map; +using std::vector; +extern const uint32_t kPlainTableVariableLength; + +struct PlainTableReaderFileInfo { + bool is_mmap_mode; + Slice file_data; + uint32_t data_end_offset; + unique_ptr file; + + PlainTableReaderFileInfo(unique_ptr&& _file, + const EnvOptions& storage_options, + uint32_t _data_size_offset) + : is_mmap_mode(storage_options.use_mmap_reads), + data_end_offset(_data_size_offset), + file(std::move(_file)) {} +}; + +// Based on following output file format shown in plain_table_factory.h +// When opening the output file, IndexedTableReader creates a hash table +// from key prefixes to offset of the output file. IndexedTable will decide +// whether it points to the data offset of the first key with the key prefix +// or the offset of it. If there are too many keys share this prefix, it will +// create a binary search-able index from the suffix to offset on disk. +// +// The implementation of IndexedTableReader requires output file is mmaped +class PlainTableReader: public TableReader { + public: + static Status Open(const ImmutableCFOptions& ioptions, + const EnvOptions& env_options, + const InternalKeyComparator& internal_comparator, + unique_ptr&& file, + uint64_t file_size, unique_ptr* table, + const int bloom_bits_per_key, double hash_table_ratio, + size_t index_sparseness, size_t huge_page_tlb_size, + bool full_scan_mode); + + InternalIterator* NewIterator(const ReadOptions&, Arena* arena = nullptr, + bool skip_filters = false) override; + + void Prepare(const Slice& target) override; + + Status Get(const ReadOptions&, const Slice& key, GetContext* get_context, + bool skip_filters = false) override; + + uint64_t ApproximateOffsetOf(const Slice& key) override; + + uint32_t GetIndexSize() const { return index_.GetIndexSize(); } + void SetupForCompaction() override; + + std::shared_ptr GetTableProperties() const override { + return table_properties_; + } + + virtual size_t ApproximateMemoryUsage() const override { + return arena_.MemoryAllocatedBytes(); + } + + PlainTableReader(const ImmutableCFOptions& ioptions, + unique_ptr&& file, + const EnvOptions& env_options, + const InternalKeyComparator& internal_comparator, + EncodingType encoding_type, uint64_t file_size, + const TableProperties* table_properties); + virtual ~PlainTableReader(); + + protected: + // Check bloom filter to see whether it might contain this prefix. + // The hash of the prefix is given, since it can be reused for index lookup + // too. + virtual bool MatchBloom(uint32_t hash) const; + + // PopulateIndex() builds index of keys. It must be called before any query + // to the table. + // + // props: the table properties object that need to be stored. Ownership of + // the object will be passed. + // + + Status PopulateIndex(TableProperties* props, int bloom_bits_per_key, + double hash_table_ratio, size_t index_sparseness, + size_t huge_page_tlb_size); + + Status MmapDataIfNeeded(); + + private: + const InternalKeyComparator internal_comparator_; + EncodingType encoding_type_; + // represents plain table's current status. + Status status_; + + PlainTableIndex index_; + bool full_scan_mode_; + + // data_start_offset_ and data_end_offset_ defines the range of the + // sst file that stores data. + const uint32_t data_start_offset_ = 0; + const uint32_t user_key_len_; + const SliceTransform* prefix_extractor_; + + static const size_t kNumInternalBytes = 8; + + // Bloom filter is used to rule out non-existent key + bool enable_bloom_; + DynamicBloom bloom_; + PlainTableReaderFileInfo file_info_; + Arena arena_; + std::unique_ptr index_block_alloc_; + std::unique_ptr bloom_block_alloc_; + + const ImmutableCFOptions& ioptions_; + uint64_t file_size_; + std::shared_ptr table_properties_; + + bool IsFixedLength() const { + return user_key_len_ != kPlainTableVariableLength; + } + + size_t GetFixedInternalKeyLength() const { + return user_key_len_ + kNumInternalBytes; + } + + Slice GetPrefix(const Slice& target) const { + assert(target.size() >= 8); // target is internal key + return GetPrefixFromUserKey(GetUserKey(target)); + } + + Slice GetPrefix(const ParsedInternalKey& target) const { + return GetPrefixFromUserKey(target.user_key); + } + + Slice GetUserKey(const Slice& key) const { + return Slice(key.data(), key.size() - 8); + } + + Slice GetPrefixFromUserKey(const Slice& user_key) const { + if (!IsTotalOrderMode()) { + return prefix_extractor_->Transform(user_key); + } else { + // Use empty slice as prefix if prefix_extractor is not set. + // In that case, + // it falls back to pure binary search and + // total iterator seek is supported. + return Slice(); + } + } + + friend class TableCache; + friend class PlainTableIterator; + + // Internal helper function to generate an IndexRecordList object from all + // the rows, which contains index records as a list. + // If bloom_ is not null, all the keys' full-key hash will be added to the + // bloom filter. + Status PopulateIndexRecordList(PlainTableIndexBuilder* index_builder, + vector* prefix_hashes); + + // Internal helper function to allocate memory for bloom filter and fill it + void AllocateAndFillBloom(int bloom_bits_per_key, int num_prefixes, + size_t huge_page_tlb_size, + vector* prefix_hashes); + + void FillBloom(vector* prefix_hashes); + + // Read the key and value at `offset` to parameters for keys, the and + // `seekable`. + // On success, `offset` will be updated as the offset for the next key. + // `parsed_key` will be key in parsed format. + // if `internal_key` is not empty, it will be filled with key with slice + // format. + // if `seekable` is not null, it will return whether we can directly read + // data using this offset. + Status Next(PlainTableKeyDecoder* decoder, uint32_t* offset, + ParsedInternalKey* parsed_key, Slice* internal_key, Slice* value, + bool* seekable = nullptr) const; + // Get file offset for key target. + // return value prefix_matched is set to true if the offset is confirmed + // for a key with the same prefix as target. + Status GetOffset(PlainTableKeyDecoder* decoder, const Slice& target, + const Slice& prefix, uint32_t prefix_hash, + bool& prefix_matched, uint32_t* offset) const; + + bool IsTotalOrderMode() const { return (prefix_extractor_ == nullptr); } + + // No copying allowed + explicit PlainTableReader(const TableReader&) = delete; + void operator=(const TableReader&) = delete; +}; +} // namespace rocksdb +#endif // ROCKSDB_LITE diff --git a/external/rocksdb/table/scoped_arena_iterator.h b/external/rocksdb/table/scoped_arena_iterator.h new file mode 100755 index 0000000000..5629ba5aac --- /dev/null +++ b/external/rocksdb/table/scoped_arena_iterator.h @@ -0,0 +1,29 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. +#pragma once + +#include "table/internal_iterator.h" + +namespace rocksdb { +class ScopedArenaIterator { + public: + explicit ScopedArenaIterator(InternalIterator* iter = nullptr) + : iter_(iter) {} + + InternalIterator* operator->() { return iter_; } + + void set(InternalIterator* iter) { iter_ = iter; } + + InternalIterator* get() { return iter_; } + + ~ScopedArenaIterator() { iter_->~InternalIterator(); } + + private: + InternalIterator* iter_; +}; +} // namespace rocksdb diff --git a/external/rocksdb/table/sst_file_writer.cc b/external/rocksdb/table/sst_file_writer.cc new file mode 100755 index 0000000000..98f2d1f2dc --- /dev/null +++ b/external/rocksdb/table/sst_file_writer.cc @@ -0,0 +1,204 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "rocksdb/sst_file_writer.h" + +#include +#include "db/dbformat.h" +#include "rocksdb/table.h" +#include "table/block_based_table_builder.h" +#include "util/file_reader_writer.h" +#include "util/string_util.h" + +namespace rocksdb { + +const std::string ExternalSstFilePropertyNames::kVersion = + "rocksdb.external_sst_file.version"; + +// PropertiesCollector used to add properties specific to tables +// generated by SstFileWriter +class SstFileWriter::SstFileWriterPropertiesCollector + : public IntTblPropCollector { + public: + explicit SstFileWriterPropertiesCollector(int32_t version) + : version_(version) {} + + virtual Status InternalAdd(const Slice& key, const Slice& value, + uint64_t file_size) override { + // Intentionally left blank. Have no interest in collecting stats for + // individual key/value pairs. + return Status::OK(); + } + + virtual Status Finish(UserCollectedProperties* properties) override { + std::string version_val; + PutFixed32(&version_val, static_cast(version_)); + properties->insert({ExternalSstFilePropertyNames::kVersion, version_val}); + return Status::OK(); + } + + virtual const char* Name() const override { + return "SstFileWriterPropertiesCollector"; + } + + virtual UserCollectedProperties GetReadableProperties() const override { + return {{ExternalSstFilePropertyNames::kVersion, ToString(version_)}}; + } + + private: + int32_t version_; +}; + +class SstFileWriter::SstFileWriterPropertiesCollectorFactory + : public IntTblPropCollectorFactory { + public: + explicit SstFileWriterPropertiesCollectorFactory(int32_t version) + : version_(version) {} + + virtual IntTblPropCollector* CreateIntTblPropCollector( + uint32_t column_family_id) override { + return new SstFileWriterPropertiesCollector(version_); + } + + virtual const char* Name() const override { + return "SstFileWriterPropertiesCollector"; + } + + private: + int32_t version_; +}; + +struct SstFileWriter::Rep { + Rep(const EnvOptions& _env_options, const Options& options, + const Comparator* _user_comparator) + : env_options(_env_options), + ioptions(options), + mutable_cf_options(options, ioptions), + internal_comparator(_user_comparator) {} + + std::unique_ptr file_writer; + std::unique_ptr builder; + EnvOptions env_options; + ImmutableCFOptions ioptions; + MutableCFOptions mutable_cf_options; + InternalKeyComparator internal_comparator; + ExternalSstFileInfo file_info; + std::string column_family_name; + InternalKey ikey; +}; + +SstFileWriter::SstFileWriter(const EnvOptions& env_options, + const Options& options, + const Comparator* user_comparator) + : rep_(new Rep(env_options, options, user_comparator)) {} + +SstFileWriter::~SstFileWriter() { delete rep_; } + +Status SstFileWriter::Open(const std::string& file_path) { + Rep* r = rep_; + Status s; + std::unique_ptr sst_file; + s = r->ioptions.env->NewWritableFile(file_path, &sst_file, r->env_options); + if (!s.ok()) { + return s; + } + + CompressionType compression_type; + if (r->ioptions.bottommost_compression != kDisableCompressionOption) { + compression_type = r->ioptions.bottommost_compression; + } else if (!r->ioptions.compression_per_level.empty()) { + // Use the compression of the last level if we have per level compression + compression_type = *(r->ioptions.compression_per_level.rbegin()); + } else { + compression_type = r->mutable_cf_options.compression; + } + + std::vector> + int_tbl_prop_collector_factories; + int_tbl_prop_collector_factories.emplace_back( + new SstFileWriterPropertiesCollectorFactory(1 /* version */)); + + TableBuilderOptions table_builder_options( + r->ioptions, r->internal_comparator, &int_tbl_prop_collector_factories, + compression_type, r->ioptions.compression_opts, + nullptr /* compression_dict */, false /* skip_filters */, + r->column_family_name); + r->file_writer.reset( + new WritableFileWriter(std::move(sst_file), r->env_options)); + r->builder.reset(r->ioptions.table_factory->NewTableBuilder( + table_builder_options, + TablePropertiesCollectorFactory::Context::kUnknownColumnFamily, + r->file_writer.get())); + + r->file_info.file_path = file_path; + r->file_info.file_size = 0; + r->file_info.num_entries = 0; + r->file_info.sequence_number = 0; + r->file_info.version = 1; + return s; +} + +Status SstFileWriter::Add(const Slice& user_key, const Slice& value) { + Rep* r = rep_; + if (!r->builder) { + return Status::InvalidArgument("File is not opened"); + } + + if (r->file_info.num_entries == 0) { + r->file_info.smallest_key.assign(user_key.data(), user_key.size()); + } else { + if (r->internal_comparator.user_comparator()->Compare( + user_key, r->file_info.largest_key) <= 0) { + // Make sure that keys are added in order + return Status::InvalidArgument("Keys must be added in order"); + } + } + + // update file info + r->file_info.num_entries++; + r->file_info.largest_key.assign(user_key.data(), user_key.size()); + r->file_info.file_size = r->builder->FileSize(); + + r->ikey.Set(user_key, 0 /* Sequence Number */, + ValueType::kTypeValue /* Put */); + r->builder->Add(r->ikey.Encode(), value); + + return Status::OK(); +} + +Status SstFileWriter::Finish(ExternalSstFileInfo* file_info) { + Rep* r = rep_; + if (!r->builder) { + return Status::InvalidArgument("File is not opened"); + } + if (r->file_info.num_entries == 0) { + return Status::InvalidArgument("Cannot create sst file with no entries"); + } + + Status s = r->builder->Finish(); + if (s.ok()) { + if (!r->ioptions.disable_data_sync) { + s = r->file_writer->Sync(r->ioptions.use_fsync); + } + if (s.ok()) { + s = r->file_writer->Close(); + } + } else { + r->builder->Abandon(); + } + + if (!s.ok()) { + r->ioptions.env->DeleteFile(r->file_info.file_path); + } + + if (s.ok() && file_info != nullptr) { + r->file_info.file_size = r->builder->FileSize(); + *file_info = r->file_info; + } + + r->builder.reset(); + return s; +} +} // namespace rocksdb diff --git a/external/rocksdb/table/table_builder.h b/external/rocksdb/table/table_builder.h new file mode 100755 index 0000000000..be19636f6d --- /dev/null +++ b/external/rocksdb/table/table_builder.h @@ -0,0 +1,123 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once + +#include +#include +#include +#include +#include "db/table_properties_collector.h" +#include "rocksdb/options.h" +#include "rocksdb/table_properties.h" +#include "util/file_reader_writer.h" +#include "util/mutable_cf_options.h" + +namespace rocksdb { + +class Slice; +class Status; + +struct TableReaderOptions { + // @param skip_filters Disables loading/accessing the filter block + TableReaderOptions(const ImmutableCFOptions& _ioptions, + const EnvOptions& _env_options, + const InternalKeyComparator& _internal_comparator, + bool _skip_filters = false, int _level = -1) + : ioptions(_ioptions), + env_options(_env_options), + internal_comparator(_internal_comparator), + skip_filters(_skip_filters), + level(_level) {} + + const ImmutableCFOptions& ioptions; + const EnvOptions& env_options; + const InternalKeyComparator& internal_comparator; + // This is only used for BlockBasedTable (reader) + bool skip_filters; + // what level this table/file is on, -1 for "not set, don't know" + int level; +}; + +struct TableBuilderOptions { + TableBuilderOptions( + const ImmutableCFOptions& _ioptions, + const InternalKeyComparator& _internal_comparator, + const std::vector>* + _int_tbl_prop_collector_factories, + CompressionType _compression_type, + const CompressionOptions& _compression_opts, + const std::string* _compression_dict, bool _skip_filters, + const std::string& _column_family_name) + : ioptions(_ioptions), + internal_comparator(_internal_comparator), + int_tbl_prop_collector_factories(_int_tbl_prop_collector_factories), + compression_type(_compression_type), + compression_opts(_compression_opts), + compression_dict(_compression_dict), + skip_filters(_skip_filters), + column_family_name(_column_family_name) {} + const ImmutableCFOptions& ioptions; + const InternalKeyComparator& internal_comparator; + const std::vector>* + int_tbl_prop_collector_factories; + CompressionType compression_type; + const CompressionOptions& compression_opts; + // Data for presetting the compression library's dictionary, or nullptr. + const std::string* compression_dict; + bool skip_filters; // only used by BlockBasedTableBuilder + const std::string& column_family_name; +}; + +// TableBuilder provides the interface used to build a Table +// (an immutable and sorted map from keys to values). +// +// Multiple threads can invoke const methods on a TableBuilder without +// external synchronization, but if any of the threads may call a +// non-const method, all threads accessing the same TableBuilder must use +// external synchronization. +class TableBuilder { + public: + // REQUIRES: Either Finish() or Abandon() has been called. + virtual ~TableBuilder() {} + + // Add key,value to the table being constructed. + // REQUIRES: key is after any previously added key according to comparator. + // REQUIRES: Finish(), Abandon() have not been called + virtual void Add(const Slice& key, const Slice& value) = 0; + + // Return non-ok iff some error has been detected. + virtual Status status() const = 0; + + // Finish building the table. + // REQUIRES: Finish(), Abandon() have not been called + virtual Status Finish() = 0; + + // Indicate that the contents of this builder should be abandoned. + // If the caller is not going to call Finish(), it must call Abandon() + // before destroying this builder. + // REQUIRES: Finish(), Abandon() have not been called + virtual void Abandon() = 0; + + // Number of calls to Add() so far. + virtual uint64_t NumEntries() const = 0; + + // Size of the file generated so far. If invoked after a successful + // Finish() call, returns the size of the final generated file. + virtual uint64_t FileSize() const = 0; + + // If the user defined table properties collector suggest the file to + // be further compacted. + virtual bool NeedCompact() const { return false; } + + // Returns table properties + virtual TableProperties GetTableProperties() const = 0; +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/table_properties.cc b/external/rocksdb/table/table_properties.cc new file mode 100755 index 0000000000..12e9054ada --- /dev/null +++ b/external/rocksdb/table/table_properties.cc @@ -0,0 +1,186 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#include "table/table_properties_internal.h" +#include "rocksdb/table_properties.h" +#include "rocksdb/iterator.h" +#include "rocksdb/env.h" +#include "port/port.h" +#include "table/internal_iterator.h" +#include "util/string_util.h" + +namespace rocksdb { + +const uint32_t TablePropertiesCollectorFactory::Context::kUnknownColumnFamily = + port::kMaxInt32; + +namespace { + void AppendProperty( + std::string& props, + const std::string& key, + const std::string& value, + const std::string& prop_delim, + const std::string& kv_delim) { + props.append(key); + props.append(kv_delim); + props.append(value); + props.append(prop_delim); + } + + template + void AppendProperty( + std::string& props, + const std::string& key, + const TValue& value, + const std::string& prop_delim, + const std::string& kv_delim) { + AppendProperty( + props, key, ToString(value), prop_delim, kv_delim + ); + } + + // Seek to the specified meta block. + // Return true if it successfully seeks to that block. + Status SeekToMetaBlock(InternalIterator* meta_iter, + const std::string& block_name, bool* is_found) { + *is_found = true; + meta_iter->Seek(block_name); + if (meta_iter->status().ok() && + (!meta_iter->Valid() || meta_iter->key() != block_name)) { + *is_found = false; + } + return meta_iter->status(); + } +} + +std::string TableProperties::ToString( + const std::string& prop_delim, + const std::string& kv_delim) const { + std::string result; + result.reserve(1024); + + // Basic Info + AppendProperty(result, "# data blocks", num_data_blocks, prop_delim, + kv_delim); + AppendProperty(result, "# entries", num_entries, prop_delim, kv_delim); + + AppendProperty(result, "raw key size", raw_key_size, prop_delim, kv_delim); + AppendProperty(result, "raw average key size", + num_entries != 0 ? 1.0 * raw_key_size / num_entries : 0.0, + prop_delim, kv_delim); + AppendProperty(result, "raw value size", raw_value_size, prop_delim, + kv_delim); + AppendProperty(result, "raw average value size", + num_entries != 0 ? 1.0 * raw_value_size / num_entries : 0.0, + prop_delim, kv_delim); + + AppendProperty(result, "data block size", data_size, prop_delim, kv_delim); + AppendProperty(result, "index block size", index_size, prop_delim, kv_delim); + AppendProperty(result, "filter block size", filter_size, prop_delim, + kv_delim); + AppendProperty(result, "(estimated) table size", + data_size + index_size + filter_size, prop_delim, kv_delim); + + AppendProperty( + result, "filter policy name", + filter_policy_name.empty() ? std::string("N/A") : filter_policy_name, + prop_delim, kv_delim); + + AppendProperty(result, "column family ID", + column_family_id == rocksdb::TablePropertiesCollectorFactory:: + Context::kUnknownColumnFamily + ? std::string("N/A") + : rocksdb::ToString(column_family_id), + prop_delim, kv_delim); + AppendProperty( + result, "column family name", + column_family_name.empty() ? std::string("N/A") : column_family_name, + prop_delim, kv_delim); + + AppendProperty(result, "comparator name", + comparator_name.empty() ? std::string("N/A") : comparator_name, + prop_delim, kv_delim); + + AppendProperty( + result, "merge operator name", + merge_operator_name.empty() ? std::string("N/A") : merge_operator_name, + prop_delim, kv_delim); + + AppendProperty(result, "property collectors names", + property_collectors_names.empty() ? std::string("N/A") + : property_collectors_names, + prop_delim, kv_delim); + + AppendProperty( + result, "SST file compression algo", + compression_name.empty() ? std::string("N/A") : compression_name, + prop_delim, kv_delim); + + return result; +} + +void TableProperties::Add(const TableProperties& tp) { + data_size += tp.data_size; + index_size += tp.index_size; + filter_size += tp.filter_size; + raw_key_size += tp.raw_key_size; + raw_value_size += tp.raw_value_size; + num_data_blocks += tp.num_data_blocks; + num_entries += tp.num_entries; +} + +const std::string TablePropertiesNames::kDataSize = + "rocksdb.data.size"; +const std::string TablePropertiesNames::kIndexSize = + "rocksdb.index.size"; +const std::string TablePropertiesNames::kFilterSize = + "rocksdb.filter.size"; +const std::string TablePropertiesNames::kRawKeySize = + "rocksdb.raw.key.size"; +const std::string TablePropertiesNames::kRawValueSize = + "rocksdb.raw.value.size"; +const std::string TablePropertiesNames::kNumDataBlocks = + "rocksdb.num.data.blocks"; +const std::string TablePropertiesNames::kNumEntries = + "rocksdb.num.entries"; +const std::string TablePropertiesNames::kFilterPolicy = + "rocksdb.filter.policy"; +const std::string TablePropertiesNames::kFormatVersion = + "rocksdb.format.version"; +const std::string TablePropertiesNames::kFixedKeyLen = + "rocksdb.fixed.key.length"; +const std::string TablePropertiesNames::kColumnFamilyId = + "rocksdb.column.family.id"; +const std::string TablePropertiesNames::kColumnFamilyName = + "rocksdb.column.family.name"; +const std::string TablePropertiesNames::kComparator = "rocksdb.comparator"; +const std::string TablePropertiesNames::kMergeOperator = + "rocksdb.merge.operator"; +const std::string TablePropertiesNames::kPropertyCollectors = + "rocksdb.property.collectors"; +const std::string TablePropertiesNames::kCompression = "rocksdb.compression"; + +extern const std::string kPropertiesBlock = "rocksdb.properties"; +// Old property block name for backward compatibility +extern const std::string kPropertiesBlockOldName = "rocksdb.stats"; +extern const std::string kCompressionDictBlock = "rocksdb.compression_dict"; + +// Seek to the properties block. +// Return true if it successfully seeks to the properties block. +Status SeekToPropertiesBlock(InternalIterator* meta_iter, bool* is_found) { + Status status = SeekToMetaBlock(meta_iter, kPropertiesBlock, is_found); + if (!*is_found && status.ok()) { + status = SeekToMetaBlock(meta_iter, kPropertiesBlockOldName, is_found); + } + return status; +} + +// Seek to the compression dictionary block. +// Return true if it successfully seeks to that block. +Status SeekToCompressionDictBlock(InternalIterator* meta_iter, bool* is_found) { + return SeekToMetaBlock(meta_iter, kCompressionDictBlock, is_found); +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/table_properties_internal.h b/external/rocksdb/table/table_properties_internal.h new file mode 100755 index 0000000000..3d3a4b5f8b --- /dev/null +++ b/external/rocksdb/table/table_properties_internal.h @@ -0,0 +1,25 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#pragma once + +#include "rocksdb/status.h" +#include "rocksdb/iterator.h" + +namespace rocksdb { + +class InternalIterator; + +// Seek to the properties block. +// If it successfully seeks to the properties block, "is_found" will be +// set to true. +Status SeekToPropertiesBlock(InternalIterator* meta_iter, bool* is_found); + +// Seek to the compression dictionary block. +// If it successfully seeks to the properties block, "is_found" will be +// set to true. +Status SeekToCompressionDictBlock(InternalIterator* meta_iter, bool* is_found); + +} // namespace rocksdb diff --git a/external/rocksdb/table/table_reader.h b/external/rocksdb/table/table_reader.h new file mode 100755 index 0000000000..c047bf8cb2 --- /dev/null +++ b/external/rocksdb/table/table_reader.h @@ -0,0 +1,98 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include + +namespace rocksdb { + +class Iterator; +struct ParsedInternalKey; +class Slice; +class Arena; +struct ReadOptions; +struct TableProperties; +class GetContext; +class InternalIterator; + +// A Table is a sorted map from strings to strings. Tables are +// immutable and persistent. A Table may be safely accessed from +// multiple threads without external synchronization. +class TableReader { + public: + virtual ~TableReader() {} + + // Returns a new iterator over the table contents. + // The result of NewIterator() is initially invalid (caller must + // call one of the Seek methods on the iterator before using it). + // arena: If not null, the arena needs to be used to allocate the Iterator. + // When destroying the iterator, the caller will not call "delete" + // but Iterator::~Iterator() directly. The destructor needs to destroy + // all the states but those allocated in arena. + // skip_filters: disables checking the bloom filters even if they exist. This + // option is effective only for block-based table format. + virtual InternalIterator* NewIterator(const ReadOptions&, + Arena* arena = nullptr, + bool skip_filters = false) = 0; + + // Given a key, return an approximate byte offset in the file where + // the data for that key begins (or would begin if the key were + // present in the file). The returned value is in terms of file + // bytes, and so includes effects like compression of the underlying data. + // E.g., the approximate offset of the last key in the table will + // be close to the file length. + virtual uint64_t ApproximateOffsetOf(const Slice& key) = 0; + + // Set up the table for Compaction. Might change some parameters with + // posix_fadvise + virtual void SetupForCompaction() = 0; + + virtual std::shared_ptr GetTableProperties() const = 0; + + // Prepare work that can be done before the real Get() + virtual void Prepare(const Slice& target) {} + + // Report an approximation of how much memory has been used. + virtual size_t ApproximateMemoryUsage() const = 0; + + // Calls get_context->SaveValue() repeatedly, starting with + // the entry found after a call to Seek(key), until it returns false. + // May not make such a call if filter policy says that key is not present. + // + // get_context->MarkKeyMayExist needs to be called when it is configured to be + // memory only and the key is not found in the block cache. + // + // readOptions is the options for the read + // key is the key to search for + // skip_filters: disables checking the bloom filters even if they exist. This + // option is effective only for block-based table format. + virtual Status Get(const ReadOptions& readOptions, const Slice& key, + GetContext* get_context, bool skip_filters = false) = 0; + + // Prefetch data corresponding to a give range of keys + // Typically this functionality is required for table implementations that + // persists the data on a non volatile storage medium like disk/SSD + virtual Status Prefetch(const Slice* begin = nullptr, + const Slice* end = nullptr) { + (void) begin; + (void) end; + // Default implementation is NOOP. + // The child class should implement functionality when applicable + return Status::OK(); + } + + // convert db file to a human readable form + virtual Status DumpTable(WritableFile* out_file) { + return Status::NotSupported("DumpTable() not supported"); + } + + virtual void Close() {} +}; + +} // namespace rocksdb diff --git a/external/rocksdb/table/table_reader_bench.cc b/external/rocksdb/table/table_reader_bench.cc new file mode 100755 index 0000000000..0a227d1e38 --- /dev/null +++ b/external/rocksdb/table/table_reader_bench.cc @@ -0,0 +1,338 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. + +#ifndef GFLAGS +#include +int main() { + fprintf(stderr, "Please install gflags to run rocksdb tools\n"); + return 1; +} +#else + +#include + +#include "rocksdb/db.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/table.h" +#include "db/db_impl.h" +#include "db/dbformat.h" +#include "table/block_based_table_factory.h" +#include "table/internal_iterator.h" +#include "table/plain_table_factory.h" +#include "table/table_builder.h" +#include "table/get_context.h" +#include "util/file_reader_writer.h" +#include "util/histogram.h" +#include "util/testharness.h" +#include "util/testutil.h" + +using GFLAGS::ParseCommandLineFlags; +using GFLAGS::SetUsageMessage; + +namespace rocksdb { + +namespace { +// Make a key that i determines the first 4 characters and j determines the +// last 4 characters. +static std::string MakeKey(int i, int j, bool through_db) { + char buf[100]; + snprintf(buf, sizeof(buf), "%04d__key___%04d", i, j); + if (through_db) { + return std::string(buf); + } + // If we directly query table, which operates on internal keys + // instead of user keys, we need to add 8 bytes of internal + // information (row type etc) to user key to make an internal + // key. + InternalKey key(std::string(buf), 0, ValueType::kTypeValue); + return key.Encode().ToString(); +} + +uint64_t Now(Env* env, bool measured_by_nanosecond) { + return measured_by_nanosecond ? env->NowNanos() : env->NowMicros(); +} +} // namespace + +// A very simple benchmark that. +// Create a table with roughly numKey1 * numKey2 keys, +// where there are numKey1 prefixes of the key, each has numKey2 number of +// distinguished key, differing in the suffix part. +// If if_query_empty_keys = false, query the existing keys numKey1 * numKey2 +// times randomly. +// If if_query_empty_keys = true, query numKey1 * numKey2 random empty keys. +// Print out the total time. +// If through_db=true, a full DB will be created and queries will be against +// it. Otherwise, operations will be directly through table level. +// +// If for_terator=true, instead of just query one key each time, it queries +// a range sharing the same prefix. +namespace { +void TableReaderBenchmark(Options& opts, EnvOptions& env_options, + ReadOptions& read_options, int num_keys1, + int num_keys2, int num_iter, int prefix_len, + bool if_query_empty_keys, bool for_iterator, + bool through_db, bool measured_by_nanosecond) { + rocksdb::InternalKeyComparator ikc(opts.comparator); + + std::string file_name = test::TmpDir() + + "/rocksdb_table_reader_benchmark"; + std::string dbname = test::TmpDir() + "/rocksdb_table_reader_bench_db"; + WriteOptions wo; + Env* env = Env::Default(); + TableBuilder* tb = nullptr; + DB* db = nullptr; + Status s; + const ImmutableCFOptions ioptions(opts); + unique_ptr file_writer; + if (!through_db) { + unique_ptr file; + env->NewWritableFile(file_name, &file, env_options); + + std::vector > + int_tbl_prop_collector_factories; + + file_writer.reset(new WritableFileWriter(std::move(file), env_options)); + + tb = opts.table_factory->NewTableBuilder( + TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories, + CompressionType::kNoCompression, + CompressionOptions(), + nullptr /* compression_dict */, + false /* skip_filters */, kDefaultColumnFamilyName), + 0 /* column_family_id */, file_writer.get()); + } else { + s = DB::Open(opts, dbname, &db); + ASSERT_OK(s); + ASSERT_TRUE(db != nullptr); + } + // Populate slightly more than 1M keys + for (int i = 0; i < num_keys1; i++) { + for (int j = 0; j < num_keys2; j++) { + std::string key = MakeKey(i * 2, j, through_db); + if (!through_db) { + tb->Add(key, key); + } else { + db->Put(wo, key, key); + } + } + } + if (!through_db) { + tb->Finish(); + file_writer->Close(); + } else { + db->Flush(FlushOptions()); + } + + unique_ptr table_reader; + if (!through_db) { + unique_ptr raf; + s = env->NewRandomAccessFile(file_name, &raf, env_options); + if (!s.ok()) { + fprintf(stderr, "Create File Error: %s\n", s.ToString().c_str()); + exit(1); + } + uint64_t file_size; + env->GetFileSize(file_name, &file_size); + unique_ptr file_reader( + new RandomAccessFileReader(std::move(raf))); + s = opts.table_factory->NewTableReader( + TableReaderOptions(ioptions, env_options, ikc), std::move(file_reader), + file_size, &table_reader); + if (!s.ok()) { + fprintf(stderr, "Open Table Error: %s\n", s.ToString().c_str()); + exit(1); + } + } + + Random rnd(301); + std::string result; + HistogramImpl hist; + + for (int it = 0; it < num_iter; it++) { + for (int i = 0; i < num_keys1; i++) { + for (int j = 0; j < num_keys2; j++) { + int r1 = rnd.Uniform(num_keys1) * 2; + int r2 = rnd.Uniform(num_keys2); + if (if_query_empty_keys) { + r1++; + r2 = num_keys2 * 2 - r2; + } + + if (!for_iterator) { + // Query one existing key; + std::string key = MakeKey(r1, r2, through_db); + uint64_t start_time = Now(env, measured_by_nanosecond); + if (!through_db) { + std::string value; + MergeContext merge_context; + GetContext get_context(ioptions.comparator, ioptions.merge_operator, + ioptions.info_log, ioptions.statistics, + GetContext::kNotFound, Slice(key), &value, + nullptr, &merge_context, env); + s = table_reader->Get(read_options, key, &get_context); + } else { + s = db->Get(read_options, key, &result); + } + hist.Add(Now(env, measured_by_nanosecond) - start_time); + } else { + int r2_len; + if (if_query_empty_keys) { + r2_len = 0; + } else { + r2_len = rnd.Uniform(num_keys2) + 1; + if (r2_len + r2 > num_keys2) { + r2_len = num_keys2 - r2; + } + } + std::string start_key = MakeKey(r1, r2, through_db); + std::string end_key = MakeKey(r1, r2 + r2_len, through_db); + uint64_t total_time = 0; + uint64_t start_time = Now(env, measured_by_nanosecond); + Iterator* iter = nullptr; + InternalIterator* iiter = nullptr; + if (!through_db) { + iiter = table_reader->NewIterator(read_options); + } else { + iter = db->NewIterator(read_options); + } + int count = 0; + for (through_db ? iter->Seek(start_key) : iiter->Seek(start_key); + through_db ? iter->Valid() : iiter->Valid(); + through_db ? iter->Next() : iiter->Next()) { + if (if_query_empty_keys) { + break; + } + // verify key; + total_time += Now(env, measured_by_nanosecond) - start_time; + assert(Slice(MakeKey(r1, r2 + count, through_db)) == + (through_db ? iter->key() : iiter->key())); + start_time = Now(env, measured_by_nanosecond); + if (++count >= r2_len) { + break; + } + } + if (count != r2_len) { + fprintf( + stderr, "Iterator cannot iterate expected number of entries. " + "Expected %d but got %d\n", r2_len, count); + assert(false); + } + delete iter; + total_time += Now(env, measured_by_nanosecond) - start_time; + hist.Add(total_time); + } + } + } + } + + fprintf( + stderr, + "===================================================" + "====================================================\n" + "InMemoryTableSimpleBenchmark: %20s num_key1: %5d " + "num_key2: %5d %10s\n" + "===================================================" + "====================================================" + "\nHistogram (unit: %s): \n%s", + opts.table_factory->Name(), num_keys1, num_keys2, + for_iterator ? "iterator" : (if_query_empty_keys ? "empty" : "non_empty"), + measured_by_nanosecond ? "nanosecond" : "microsecond", + hist.ToString().c_str()); + if (!through_db) { + env->DeleteFile(file_name); + } else { + delete db; + db = nullptr; + DestroyDB(dbname, opts); + } +} +} // namespace +} // namespace rocksdb + +DEFINE_bool(query_empty, false, "query non-existing keys instead of existing " + "ones."); +DEFINE_int32(num_keys1, 4096, "number of distinguish prefix of keys"); +DEFINE_int32(num_keys2, 512, "number of distinguish keys for each prefix"); +DEFINE_int32(iter, 3, "query non-existing keys instead of existing ones"); +DEFINE_int32(prefix_len, 16, "Prefix length used for iterators and indexes"); +DEFINE_bool(iterator, false, "For test iterator"); +DEFINE_bool(through_db, false, "If enable, a DB instance will be created and " + "the query will be against DB. Otherwise, will be directly against " + "a table reader."); +DEFINE_bool(mmap_read, true, "Whether use mmap read"); +DEFINE_string(table_factory, "block_based", + "Table factory to use: `block_based` (default), `plain_table` or " + "`cuckoo_hash`."); +DEFINE_string(time_unit, "microsecond", + "The time unit used for measuring performance. User can specify " + "`microsecond` (default) or `nanosecond`"); + +int main(int argc, char** argv) { + SetUsageMessage(std::string("\nUSAGE:\n") + std::string(argv[0]) + + " [OPTIONS]..."); + ParseCommandLineFlags(&argc, &argv, true); + + std::shared_ptr tf; + rocksdb::Options options; + if (FLAGS_prefix_len < 16) { + options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform( + FLAGS_prefix_len)); + } + rocksdb::ReadOptions ro; + rocksdb::EnvOptions env_options; + options.create_if_missing = true; + options.compression = rocksdb::CompressionType::kNoCompression; + + if (FLAGS_table_factory == "cuckoo_hash") { +#ifndef ROCKSDB_LITE + options.allow_mmap_reads = FLAGS_mmap_read; + env_options.use_mmap_reads = FLAGS_mmap_read; + rocksdb::CuckooTableOptions table_options; + table_options.hash_table_ratio = 0.75; + tf.reset(rocksdb::NewCuckooTableFactory(table_options)); +#else + fprintf(stderr, "Plain table is not supported in lite mode\n"); + exit(1); +#endif // ROCKSDB_LITE + } else if (FLAGS_table_factory == "plain_table") { +#ifndef ROCKSDB_LITE + options.allow_mmap_reads = FLAGS_mmap_read; + env_options.use_mmap_reads = FLAGS_mmap_read; + + rocksdb::PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 16; + plain_table_options.bloom_bits_per_key = (FLAGS_prefix_len == 16) ? 0 : 8; + plain_table_options.hash_table_ratio = 0.75; + + tf.reset(new rocksdb::PlainTableFactory(plain_table_options)); + options.prefix_extractor.reset(rocksdb::NewFixedPrefixTransform( + FLAGS_prefix_len)); +#else + fprintf(stderr, "Cuckoo table is not supported in lite mode\n"); + exit(1); +#endif // ROCKSDB_LITE + } else if (FLAGS_table_factory == "block_based") { + tf.reset(new rocksdb::BlockBasedTableFactory()); + } else { + fprintf(stderr, "Invalid table type %s\n", FLAGS_table_factory.c_str()); + } + + if (tf) { + // if user provides invalid options, just fall back to microsecond. + bool measured_by_nanosecond = FLAGS_time_unit == "nanosecond"; + + options.table_factory = tf; + rocksdb::TableReaderBenchmark(options, env_options, ro, FLAGS_num_keys1, + FLAGS_num_keys2, FLAGS_iter, FLAGS_prefix_len, + FLAGS_query_empty, FLAGS_iterator, + FLAGS_through_db, measured_by_nanosecond); + } else { + return 1; + } + + return 0; +} + +#endif // GFLAGS diff --git a/external/rocksdb/table/table_test.cc b/external/rocksdb/table/table_test.cc new file mode 100755 index 0000000000..1f930f97e5 --- /dev/null +++ b/external/rocksdb/table/table_test.cc @@ -0,0 +1,2622 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include +#include + +#include +#include +#include +#include +#include +#include + +#include "db/dbformat.h" +#include "db/memtable.h" +#include "db/write_batch_internal.h" +#include "memtable/stl_wrappers.h" +#include "rocksdb/cache.h" +#include "rocksdb/db.h" +#include "rocksdb/env.h" +#include "rocksdb/iterator.h" +#include "rocksdb/memtablerep.h" +#include "rocksdb/perf_context.h" +#include "rocksdb/slice_transform.h" +#include "rocksdb/statistics.h" +#include "rocksdb/write_buffer_manager.h" +#include "table/block.h" +#include "table/block_based_table_builder.h" +#include "table/block_based_table_factory.h" +#include "table/block_based_table_reader.h" +#include "table/block_builder.h" +#include "table/format.h" +#include "table/get_context.h" +#include "table/internal_iterator.h" +#include "table/meta_blocks.h" +#include "table/plain_table_factory.h" +#include "table/scoped_arena_iterator.h" +#include "util/compression.h" +#include "util/random.h" +#include "util/statistics.h" +#include "util/string_util.h" +#include "util/sync_point.h" +#include "util/testharness.h" +#include "util/testutil.h" +#include "utilities/merge_operators.h" + +namespace rocksdb { + +extern const uint64_t kLegacyBlockBasedTableMagicNumber; +extern const uint64_t kLegacyPlainTableMagicNumber; +extern const uint64_t kBlockBasedTableMagicNumber; +extern const uint64_t kPlainTableMagicNumber; + +namespace { + +// DummyPropertiesCollector used to test BlockBasedTableProperties +class DummyPropertiesCollector : public TablePropertiesCollector { + public: + const char* Name() const { return ""; } + + Status Finish(UserCollectedProperties* properties) { return Status::OK(); } + + Status Add(const Slice& user_key, const Slice& value) { return Status::OK(); } + + virtual UserCollectedProperties GetReadableProperties() const { + return UserCollectedProperties{}; + } +}; + +class DummyPropertiesCollectorFactory1 + : public TablePropertiesCollectorFactory { + public: + virtual TablePropertiesCollector* CreateTablePropertiesCollector( + TablePropertiesCollectorFactory::Context context) { + return new DummyPropertiesCollector(); + } + const char* Name() const { return "DummyPropertiesCollector1"; } +}; + +class DummyPropertiesCollectorFactory2 + : public TablePropertiesCollectorFactory { + public: + virtual TablePropertiesCollector* CreateTablePropertiesCollector( + TablePropertiesCollectorFactory::Context context) { + return new DummyPropertiesCollector(); + } + const char* Name() const { return "DummyPropertiesCollector2"; } +}; + +// Return reverse of "key". +// Used to test non-lexicographic comparators. +std::string Reverse(const Slice& key) { + auto rev = key.ToString(); + std::reverse(rev.begin(), rev.end()); + return rev; +} + +class ReverseKeyComparator : public Comparator { + public: + virtual const char* Name() const override { + return "rocksdb.ReverseBytewiseComparator"; + } + + virtual int Compare(const Slice& a, const Slice& b) const override { + return BytewiseComparator()->Compare(Reverse(a), Reverse(b)); + } + + virtual void FindShortestSeparator(std::string* start, + const Slice& limit) const override { + std::string s = Reverse(*start); + std::string l = Reverse(limit); + BytewiseComparator()->FindShortestSeparator(&s, l); + *start = Reverse(s); + } + + virtual void FindShortSuccessor(std::string* key) const override { + std::string s = Reverse(*key); + BytewiseComparator()->FindShortSuccessor(&s); + *key = Reverse(s); + } +}; + +ReverseKeyComparator reverse_key_comparator; + +void Increment(const Comparator* cmp, std::string* key) { + if (cmp == BytewiseComparator()) { + key->push_back('\0'); + } else { + assert(cmp == &reverse_key_comparator); + std::string rev = Reverse(*key); + rev.push_back('\0'); + *key = Reverse(rev); + } +} + +} // namespace + +// Helper class for tests to unify the interface between +// BlockBuilder/TableBuilder and Block/Table. +class Constructor { + public: + explicit Constructor(const Comparator* cmp) + : data_(stl_wrappers::LessOfComparator(cmp)) {} + virtual ~Constructor() { } + + void Add(const std::string& key, const Slice& value) { + data_[key] = value.ToString(); + } + + // Finish constructing the data structure with all the keys that have + // been added so far. Returns the keys in sorted order in "*keys" + // and stores the key/value pairs in "*kvmap" + void Finish(const Options& options, const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + std::vector* keys, stl_wrappers::KVMap* kvmap) { + last_internal_key_ = &internal_comparator; + *kvmap = data_; + keys->clear(); + for (const auto& kv : data_) { + keys->push_back(kv.first); + } + data_.clear(); + Status s = FinishImpl(options, ioptions, table_options, + internal_comparator, *kvmap); + ASSERT_TRUE(s.ok()) << s.ToString(); + } + + // Construct the data structure from the data in "data" + virtual Status FinishImpl(const Options& options, + const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + const stl_wrappers::KVMap& data) = 0; + + virtual InternalIterator* NewIterator() const = 0; + + virtual const stl_wrappers::KVMap& data() { return data_; } + + virtual bool IsArenaMode() const { return false; } + + virtual DB* db() const { return nullptr; } // Overridden in DBConstructor + + virtual bool AnywayDeleteIterator() const { return false; } + + protected: + const InternalKeyComparator* last_internal_key_; + + private: + stl_wrappers::KVMap data_; +}; + +class BlockConstructor: public Constructor { + public: + explicit BlockConstructor(const Comparator* cmp) + : Constructor(cmp), + comparator_(cmp), + block_(nullptr) { } + ~BlockConstructor() { + delete block_; + } + virtual Status FinishImpl(const Options& options, + const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + const stl_wrappers::KVMap& kv_map) override { + delete block_; + block_ = nullptr; + BlockBuilder builder(table_options.block_restart_interval); + + for (const auto kv : kv_map) { + builder.Add(kv.first, kv.second); + } + // Open the block + data_ = builder.Finish().ToString(); + BlockContents contents; + contents.data = data_; + contents.cachable = false; + block_ = new Block(std::move(contents)); + return Status::OK(); + } + virtual InternalIterator* NewIterator() const override { + return block_->NewIterator(comparator_); + } + + private: + const Comparator* comparator_; + std::string data_; + Block* block_; + + BlockConstructor(); +}; + +// A helper class that converts internal format keys into user keys +class KeyConvertingIterator : public InternalIterator { + public: + explicit KeyConvertingIterator(InternalIterator* iter, + bool arena_mode = false) + : iter_(iter), arena_mode_(arena_mode) {} + virtual ~KeyConvertingIterator() { + if (arena_mode_) { + iter_->~InternalIterator(); + } else { + delete iter_; + } + } + virtual bool Valid() const override { return iter_->Valid(); } + virtual void Seek(const Slice& target) override { + ParsedInternalKey ikey(target, kMaxSequenceNumber, kTypeValue); + std::string encoded; + AppendInternalKey(&encoded, ikey); + iter_->Seek(encoded); + } + virtual void SeekToFirst() override { iter_->SeekToFirst(); } + virtual void SeekToLast() override { iter_->SeekToLast(); } + virtual void Next() override { iter_->Next(); } + virtual void Prev() override { iter_->Prev(); } + + virtual Slice key() const override { + assert(Valid()); + ParsedInternalKey parsed_key; + if (!ParseInternalKey(iter_->key(), &parsed_key)) { + status_ = Status::Corruption("malformed internal key"); + return Slice("corrupted key"); + } + return parsed_key.user_key; + } + + virtual Slice value() const override { return iter_->value(); } + virtual Status status() const override { + return status_.ok() ? iter_->status() : status_; + } + + private: + mutable Status status_; + InternalIterator* iter_; + bool arena_mode_; + + // No copying allowed + KeyConvertingIterator(const KeyConvertingIterator&); + void operator=(const KeyConvertingIterator&); +}; + +class TableConstructor: public Constructor { + public: + explicit TableConstructor(const Comparator* cmp, + bool convert_to_internal_key = false) + : Constructor(cmp), + convert_to_internal_key_(convert_to_internal_key) {} + ~TableConstructor() { Reset(); } + + virtual Status FinishImpl(const Options& options, + const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + const stl_wrappers::KVMap& kv_map) override { + Reset(); + soptions.use_mmap_reads = ioptions.allow_mmap_reads; + file_writer_.reset(test::GetWritableFileWriter(new test::StringSink())); + unique_ptr builder; + std::vector> + int_tbl_prop_collector_factories; + std::string column_family_name; + builder.reset(ioptions.table_factory->NewTableBuilder( + TableBuilderOptions(ioptions, internal_comparator, + &int_tbl_prop_collector_factories, + options.compression, CompressionOptions(), + nullptr /* compression_dict */, + false /* skip_filters */, column_family_name), + TablePropertiesCollectorFactory::Context::kUnknownColumnFamily, + file_writer_.get())); + + for (const auto kv : kv_map) { + if (convert_to_internal_key_) { + ParsedInternalKey ikey(kv.first, kMaxSequenceNumber, kTypeValue); + std::string encoded; + AppendInternalKey(&encoded, ikey); + builder->Add(encoded, kv.second); + } else { + builder->Add(kv.first, kv.second); + } + EXPECT_TRUE(builder->status().ok()); + } + Status s = builder->Finish(); + file_writer_->Flush(); + EXPECT_TRUE(s.ok()) << s.ToString(); + + EXPECT_EQ(GetSink()->contents().size(), builder->FileSize()); + + // Open the table + uniq_id_ = cur_uniq_id_++; + file_reader_.reset(test::GetRandomAccessFileReader(new test::StringSource( + GetSink()->contents(), uniq_id_, ioptions.allow_mmap_reads))); + return ioptions.table_factory->NewTableReader( + TableReaderOptions(ioptions, soptions, internal_comparator), + std::move(file_reader_), GetSink()->contents().size(), &table_reader_); + } + + virtual InternalIterator* NewIterator() const override { + ReadOptions ro; + InternalIterator* iter = table_reader_->NewIterator(ro); + if (convert_to_internal_key_) { + return new KeyConvertingIterator(iter); + } else { + return iter; + } + } + + uint64_t ApproximateOffsetOf(const Slice& key) const { + return table_reader_->ApproximateOffsetOf(key); + } + + virtual Status Reopen(const ImmutableCFOptions& ioptions) { + file_reader_.reset(test::GetRandomAccessFileReader(new test::StringSource( + GetSink()->contents(), uniq_id_, ioptions.allow_mmap_reads))); + return ioptions.table_factory->NewTableReader( + TableReaderOptions(ioptions, soptions, *last_internal_key_), + std::move(file_reader_), GetSink()->contents().size(), &table_reader_); + } + + virtual TableReader* GetTableReader() { + return table_reader_.get(); + } + + virtual bool AnywayDeleteIterator() const override { + return convert_to_internal_key_; + } + + void ResetTableReader() { table_reader_.reset(); } + + private: + void Reset() { + uniq_id_ = 0; + table_reader_.reset(); + file_writer_.reset(); + file_reader_.reset(); + } + + test::StringSink* GetSink() { + return static_cast(file_writer_->writable_file()); + } + + uint64_t uniq_id_; + unique_ptr file_writer_; + unique_ptr file_reader_; + unique_ptr table_reader_; + bool convert_to_internal_key_; + + TableConstructor(); + + static uint64_t cur_uniq_id_; + EnvOptions soptions; +}; +uint64_t TableConstructor::cur_uniq_id_ = 1; + +class MemTableConstructor: public Constructor { + public: + explicit MemTableConstructor(const Comparator* cmp, WriteBufferManager* wb) + : Constructor(cmp), + internal_comparator_(cmp), + write_buffer_manager_(wb), + table_factory_(new SkipListFactory) { + options_.memtable_factory = table_factory_; + ImmutableCFOptions ioptions(options_); + memtable_ = new MemTable(internal_comparator_, ioptions, + MutableCFOptions(options_, ioptions), wb, + kMaxSequenceNumber); + memtable_->Ref(); + } + ~MemTableConstructor() { + delete memtable_->Unref(); + } + virtual Status FinishImpl(const Options&, const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + const stl_wrappers::KVMap& kv_map) override { + delete memtable_->Unref(); + ImmutableCFOptions mem_ioptions(ioptions); + memtable_ = new MemTable(internal_comparator_, mem_ioptions, + MutableCFOptions(options_, mem_ioptions), + write_buffer_manager_, kMaxSequenceNumber); + memtable_->Ref(); + int seq = 1; + for (const auto kv : kv_map) { + memtable_->Add(seq, kTypeValue, kv.first, kv.second); + seq++; + } + return Status::OK(); + } + virtual InternalIterator* NewIterator() const override { + return new KeyConvertingIterator( + memtable_->NewIterator(ReadOptions(), &arena_), true); + } + + virtual bool AnywayDeleteIterator() const override { return true; } + + virtual bool IsArenaMode() const override { return true; } + + private: + mutable Arena arena_; + InternalKeyComparator internal_comparator_; + Options options_; + WriteBufferManager* write_buffer_manager_; + MemTable* memtable_; + std::shared_ptr table_factory_; +}; + +class InternalIteratorFromIterator : public InternalIterator { + public: + explicit InternalIteratorFromIterator(Iterator* it) : it_(it) {} + virtual bool Valid() const override { return it_->Valid(); } + virtual void Seek(const Slice& target) override { it_->Seek(target); } + virtual void SeekToFirst() override { it_->SeekToFirst(); } + virtual void SeekToLast() override { it_->SeekToLast(); } + virtual void Next() override { it_->Next(); } + virtual void Prev() override { it_->Prev(); } + Slice key() const override { return it_->key(); } + Slice value() const override { return it_->value(); } + virtual Status status() const override { return it_->status(); } + + private: + unique_ptr it_; +}; + +class DBConstructor: public Constructor { + public: + explicit DBConstructor(const Comparator* cmp) + : Constructor(cmp), + comparator_(cmp) { + db_ = nullptr; + NewDB(); + } + ~DBConstructor() { + delete db_; + } + virtual Status FinishImpl(const Options& options, + const ImmutableCFOptions& ioptions, + const BlockBasedTableOptions& table_options, + const InternalKeyComparator& internal_comparator, + const stl_wrappers::KVMap& kv_map) override { + delete db_; + db_ = nullptr; + NewDB(); + for (const auto kv : kv_map) { + WriteBatch batch; + batch.Put(kv.first, kv.second); + EXPECT_TRUE(db_->Write(WriteOptions(), &batch).ok()); + } + return Status::OK(); + } + + virtual InternalIterator* NewIterator() const override { + return new InternalIteratorFromIterator(db_->NewIterator(ReadOptions())); + } + + virtual DB* db() const override { return db_; } + + private: + void NewDB() { + std::string name = test::TmpDir() + "/table_testdb"; + + Options options; + options.comparator = comparator_; + Status status = DestroyDB(name, options); + ASSERT_TRUE(status.ok()) << status.ToString(); + + options.create_if_missing = true; + options.error_if_exists = true; + options.write_buffer_size = 10000; // Something small to force merging + status = DB::Open(options, name, &db_); + ASSERT_TRUE(status.ok()) << status.ToString(); + } + + const Comparator* comparator_; + DB* db_; +}; + +enum TestType { + BLOCK_BASED_TABLE_TEST, +#ifndef ROCKSDB_LITE + PLAIN_TABLE_SEMI_FIXED_PREFIX, + PLAIN_TABLE_FULL_STR_PREFIX, + PLAIN_TABLE_TOTAL_ORDER, +#endif // !ROCKSDB_LITE + BLOCK_TEST, + MEMTABLE_TEST, + DB_TEST +}; + +struct TestArgs { + TestType type; + bool reverse_compare; + int restart_interval; + CompressionType compression; + uint32_t format_version; + bool use_mmap; +}; + +static std::vector GenerateArgList() { + std::vector test_args; + std::vector test_types = { + BLOCK_BASED_TABLE_TEST, +#ifndef ROCKSDB_LITE + PLAIN_TABLE_SEMI_FIXED_PREFIX, + PLAIN_TABLE_FULL_STR_PREFIX, + PLAIN_TABLE_TOTAL_ORDER, +#endif // !ROCKSDB_LITE + BLOCK_TEST, + MEMTABLE_TEST, DB_TEST}; + std::vector reverse_compare_types = {false, true}; + std::vector restart_intervals = {16, 1, 1024}; + + // Only add compression if it is supported + std::vector> compression_types; + compression_types.emplace_back(kNoCompression, false); + if (Snappy_Supported()) { + compression_types.emplace_back(kSnappyCompression, false); + } + if (Zlib_Supported()) { + compression_types.emplace_back(kZlibCompression, false); + compression_types.emplace_back(kZlibCompression, true); + } + if (BZip2_Supported()) { + compression_types.emplace_back(kBZip2Compression, false); + compression_types.emplace_back(kBZip2Compression, true); + } + if (LZ4_Supported()) { + compression_types.emplace_back(kLZ4Compression, false); + compression_types.emplace_back(kLZ4Compression, true); + compression_types.emplace_back(kLZ4HCCompression, false); + compression_types.emplace_back(kLZ4HCCompression, true); + } + if (XPRESS_Supported()) { + compression_types.emplace_back(kXpressCompression, false); + compression_types.emplace_back(kXpressCompression, true); + } + if (ZSTD_Supported()) { + compression_types.emplace_back(kZSTDNotFinalCompression, false); + compression_types.emplace_back(kZSTDNotFinalCompression, true); + } + + for (auto test_type : test_types) { + for (auto reverse_compare : reverse_compare_types) { +#ifndef ROCKSDB_LITE + if (test_type == PLAIN_TABLE_SEMI_FIXED_PREFIX || + test_type == PLAIN_TABLE_FULL_STR_PREFIX || + test_type == PLAIN_TABLE_TOTAL_ORDER) { + // Plain table doesn't use restart index or compression. + TestArgs one_arg; + one_arg.type = test_type; + one_arg.reverse_compare = reverse_compare; + one_arg.restart_interval = restart_intervals[0]; + one_arg.compression = compression_types[0].first; + one_arg.use_mmap = true; + test_args.push_back(one_arg); + one_arg.use_mmap = false; + test_args.push_back(one_arg); + continue; + } +#endif // !ROCKSDB_LITE + + for (auto restart_interval : restart_intervals) { + for (auto compression_type : compression_types) { + TestArgs one_arg; + one_arg.type = test_type; + one_arg.reverse_compare = reverse_compare; + one_arg.restart_interval = restart_interval; + one_arg.compression = compression_type.first; + one_arg.format_version = compression_type.second ? 2 : 1; + one_arg.use_mmap = false; + test_args.push_back(one_arg); + } + } + } + } + return test_args; +} + +// In order to make all tests run for plain table format, including +// those operating on empty keys, create a new prefix transformer which +// return fixed prefix if the slice is not shorter than the prefix length, +// and the full slice if it is shorter. +class FixedOrLessPrefixTransform : public SliceTransform { + private: + const size_t prefix_len_; + + public: + explicit FixedOrLessPrefixTransform(size_t prefix_len) : + prefix_len_(prefix_len) { + } + + virtual const char* Name() const override { return "rocksdb.FixedPrefix"; } + + virtual Slice Transform(const Slice& src) const override { + assert(InDomain(src)); + if (src.size() < prefix_len_) { + return src; + } + return Slice(src.data(), prefix_len_); + } + + virtual bool InDomain(const Slice& src) const override { return true; } + + virtual bool InRange(const Slice& dst) const override { + return (dst.size() <= prefix_len_); + } +}; + +class HarnessTest : public testing::Test { + public: + HarnessTest() + : ioptions_(options_), + constructor_(nullptr), + write_buffer_(options_.db_write_buffer_size) {} + + void Init(const TestArgs& args) { + delete constructor_; + constructor_ = nullptr; + options_ = Options(); + options_.compression = args.compression; + // Use shorter block size for tests to exercise block boundary + // conditions more. + if (args.reverse_compare) { + options_.comparator = &reverse_key_comparator; + } + + internal_comparator_.reset( + new test::PlainInternalKeyComparator(options_.comparator)); + + support_prev_ = true; + only_support_prefix_seek_ = false; + options_.allow_mmap_reads = args.use_mmap; + switch (args.type) { + case BLOCK_BASED_TABLE_TEST: + table_options_.flush_block_policy_factory.reset( + new FlushBlockBySizePolicyFactory()); + table_options_.block_size = 256; + table_options_.block_restart_interval = args.restart_interval; + table_options_.index_block_restart_interval = args.restart_interval; + table_options_.format_version = args.format_version; + options_.table_factory.reset( + new BlockBasedTableFactory(table_options_)); + constructor_ = new TableConstructor(options_.comparator); + break; +// Plain table is not supported in ROCKSDB_LITE +#ifndef ROCKSDB_LITE + case PLAIN_TABLE_SEMI_FIXED_PREFIX: + support_prev_ = false; + only_support_prefix_seek_ = true; + options_.prefix_extractor.reset(new FixedOrLessPrefixTransform(2)); + options_.table_factory.reset(NewPlainTableFactory()); + constructor_ = new TableConstructor(options_.comparator, true); + internal_comparator_.reset( + new InternalKeyComparator(options_.comparator)); + break; + case PLAIN_TABLE_FULL_STR_PREFIX: + support_prev_ = false; + only_support_prefix_seek_ = true; + options_.prefix_extractor.reset(NewNoopTransform()); + options_.table_factory.reset(NewPlainTableFactory()); + constructor_ = new TableConstructor(options_.comparator, true); + internal_comparator_.reset( + new InternalKeyComparator(options_.comparator)); + break; + case PLAIN_TABLE_TOTAL_ORDER: + support_prev_ = false; + only_support_prefix_seek_ = false; + options_.prefix_extractor = nullptr; + + { + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = kPlainTableVariableLength; + plain_table_options.bloom_bits_per_key = 0; + plain_table_options.hash_table_ratio = 0; + + options_.table_factory.reset( + NewPlainTableFactory(plain_table_options)); + } + constructor_ = new TableConstructor(options_.comparator, true); + internal_comparator_.reset( + new InternalKeyComparator(options_.comparator)); + break; +#endif // !ROCKSDB_LITE + case BLOCK_TEST: + table_options_.block_size = 256; + options_.table_factory.reset( + new BlockBasedTableFactory(table_options_)); + constructor_ = new BlockConstructor(options_.comparator); + break; + case MEMTABLE_TEST: + table_options_.block_size = 256; + options_.table_factory.reset( + new BlockBasedTableFactory(table_options_)); + constructor_ = new MemTableConstructor(options_.comparator, + &write_buffer_); + break; + case DB_TEST: + table_options_.block_size = 256; + options_.table_factory.reset( + new BlockBasedTableFactory(table_options_)); + constructor_ = new DBConstructor(options_.comparator); + break; + } + ioptions_ = ImmutableCFOptions(options_); + } + + ~HarnessTest() { delete constructor_; } + + void Add(const std::string& key, const std::string& value) { + constructor_->Add(key, value); + } + + void Test(Random* rnd) { + std::vector keys; + stl_wrappers::KVMap data; + constructor_->Finish(options_, ioptions_, table_options_, + *internal_comparator_, &keys, &data); + + TestForwardScan(keys, data); + if (support_prev_) { + TestBackwardScan(keys, data); + } + TestRandomAccess(rnd, keys, data); + } + + void TestForwardScan(const std::vector& keys, + const stl_wrappers::KVMap& data) { + InternalIterator* iter = constructor_->NewIterator(); + ASSERT_TRUE(!iter->Valid()); + iter->SeekToFirst(); + for (stl_wrappers::KVMap::const_iterator model_iter = data.begin(); + model_iter != data.end(); ++model_iter) { + ASSERT_EQ(ToString(data, model_iter), ToString(iter)); + iter->Next(); + } + ASSERT_TRUE(!iter->Valid()); + if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) { + iter->~InternalIterator(); + } else { + delete iter; + } + } + + void TestBackwardScan(const std::vector& keys, + const stl_wrappers::KVMap& data) { + InternalIterator* iter = constructor_->NewIterator(); + ASSERT_TRUE(!iter->Valid()); + iter->SeekToLast(); + for (stl_wrappers::KVMap::const_reverse_iterator model_iter = data.rbegin(); + model_iter != data.rend(); ++model_iter) { + ASSERT_EQ(ToString(data, model_iter), ToString(iter)); + iter->Prev(); + } + ASSERT_TRUE(!iter->Valid()); + if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) { + iter->~InternalIterator(); + } else { + delete iter; + } + } + + void TestRandomAccess(Random* rnd, const std::vector& keys, + const stl_wrappers::KVMap& data) { + static const bool kVerbose = false; + InternalIterator* iter = constructor_->NewIterator(); + ASSERT_TRUE(!iter->Valid()); + stl_wrappers::KVMap::const_iterator model_iter = data.begin(); + if (kVerbose) fprintf(stderr, "---\n"); + for (int i = 0; i < 200; i++) { + const int toss = rnd->Uniform(support_prev_ ? 5 : 3); + switch (toss) { + case 0: { + if (iter->Valid()) { + if (kVerbose) fprintf(stderr, "Next\n"); + iter->Next(); + ++model_iter; + ASSERT_EQ(ToString(data, model_iter), ToString(iter)); + } + break; + } + + case 1: { + if (kVerbose) fprintf(stderr, "SeekToFirst\n"); + iter->SeekToFirst(); + model_iter = data.begin(); + ASSERT_EQ(ToString(data, model_iter), ToString(iter)); + break; + } + + case 2: { + std::string key = PickRandomKey(rnd, keys); + model_iter = data.lower_bound(key); + if (kVerbose) fprintf(stderr, "Seek '%s'\n", + EscapeString(key).c_str()); + iter->Seek(Slice(key)); + ASSERT_EQ(ToString(data, model_iter), ToString(iter)); + break; + } + + case 3: { + if (iter->Valid()) { + if (kVerbose) fprintf(stderr, "Prev\n"); + iter->Prev(); + if (model_iter == data.begin()) { + model_iter = data.end(); // Wrap around to invalid value + } else { + --model_iter; + } + ASSERT_EQ(ToString(data, model_iter), ToString(iter)); + } + break; + } + + case 4: { + if (kVerbose) fprintf(stderr, "SeekToLast\n"); + iter->SeekToLast(); + if (keys.empty()) { + model_iter = data.end(); + } else { + std::string last = data.rbegin()->first; + model_iter = data.lower_bound(last); + } + ASSERT_EQ(ToString(data, model_iter), ToString(iter)); + break; + } + } + } + if (constructor_->IsArenaMode() && !constructor_->AnywayDeleteIterator()) { + iter->~InternalIterator(); + } else { + delete iter; + } + } + + std::string ToString(const stl_wrappers::KVMap& data, + const stl_wrappers::KVMap::const_iterator& it) { + if (it == data.end()) { + return "END"; + } else { + return "'" + it->first + "->" + it->second + "'"; + } + } + + std::string ToString(const stl_wrappers::KVMap& data, + const stl_wrappers::KVMap::const_reverse_iterator& it) { + if (it == data.rend()) { + return "END"; + } else { + return "'" + it->first + "->" + it->second + "'"; + } + } + + std::string ToString(const InternalIterator* it) { + if (!it->Valid()) { + return "END"; + } else { + return "'" + it->key().ToString() + "->" + it->value().ToString() + "'"; + } + } + + std::string PickRandomKey(Random* rnd, const std::vector& keys) { + if (keys.empty()) { + return "foo"; + } else { + const int index = rnd->Uniform(static_cast(keys.size())); + std::string result = keys[index]; + switch (rnd->Uniform(support_prev_ ? 3 : 1)) { + case 0: + // Return an existing key + break; + case 1: { + // Attempt to return something smaller than an existing key + if (result.size() > 0 && result[result.size() - 1] > '\0' + && (!only_support_prefix_seek_ + || options_.prefix_extractor->Transform(result).size() + < result.size())) { + result[result.size() - 1]--; + } + break; + } + case 2: { + // Return something larger than an existing key + Increment(options_.comparator, &result); + break; + } + } + return result; + } + } + + // Returns nullptr if not running against a DB + DB* db() const { return constructor_->db(); } + + private: + Options options_ = Options(); + ImmutableCFOptions ioptions_; + BlockBasedTableOptions table_options_ = BlockBasedTableOptions(); + Constructor* constructor_; + WriteBufferManager write_buffer_; + bool support_prev_; + bool only_support_prefix_seek_; + shared_ptr internal_comparator_; +}; + +static bool Between(uint64_t val, uint64_t low, uint64_t high) { + bool result = (val >= low) && (val <= high); + if (!result) { + fprintf(stderr, "Value %llu is not in range [%llu, %llu]\n", + (unsigned long long)(val), + (unsigned long long)(low), + (unsigned long long)(high)); + } + return result; +} + +// Tests against all kinds of tables +class TableTest : public testing::Test { + public: + const InternalKeyComparator& GetPlainInternalComparator( + const Comparator* comp) { + if (!plain_internal_comparator) { + plain_internal_comparator.reset( + new test::PlainInternalKeyComparator(comp)); + } + return *plain_internal_comparator; + } + + private: + std::unique_ptr plain_internal_comparator; +}; + +class GeneralTableTest : public TableTest {}; +class BlockBasedTableTest : public TableTest {}; +class PlainTableTest : public TableTest {}; +class TablePropertyTest : public testing::Test {}; + +// This test serves as the living tutorial for the prefix scan of user collected +// properties. +TEST_F(TablePropertyTest, PrefixScanTest) { + UserCollectedProperties props{{"num.111.1", "1"}, + {"num.111.2", "2"}, + {"num.111.3", "3"}, + {"num.333.1", "1"}, + {"num.333.2", "2"}, + {"num.333.3", "3"}, + {"num.555.1", "1"}, + {"num.555.2", "2"}, + {"num.555.3", "3"}, }; + + // prefixes that exist + for (const std::string& prefix : {"num.111", "num.333", "num.555"}) { + int num = 0; + for (auto pos = props.lower_bound(prefix); + pos != props.end() && + pos->first.compare(0, prefix.size(), prefix) == 0; + ++pos) { + ++num; + auto key = prefix + "." + ToString(num); + ASSERT_EQ(key, pos->first); + ASSERT_EQ(ToString(num), pos->second); + } + ASSERT_EQ(3, num); + } + + // prefixes that don't exist + for (const std::string& prefix : + {"num.000", "num.222", "num.444", "num.666"}) { + auto pos = props.lower_bound(prefix); + ASSERT_TRUE(pos == props.end() || + pos->first.compare(0, prefix.size(), prefix) != 0); + } +} + +// This test include all the basic checks except those for index size and block +// size, which will be conducted in separated unit tests. +TEST_F(BlockBasedTableTest, BasicBlockBasedTableProperties) { + TableConstructor c(BytewiseComparator()); + + c.Add("a1", "val1"); + c.Add("b2", "val2"); + c.Add("c3", "val3"); + c.Add("d4", "val4"); + c.Add("e5", "val5"); + c.Add("f6", "val6"); + c.Add("g7", "val7"); + c.Add("h8", "val8"); + c.Add("j9", "val9"); + + std::vector keys; + stl_wrappers::KVMap kvmap; + Options options; + options.compression = kNoCompression; + BlockBasedTableOptions table_options; + table_options.block_restart_interval = 1; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &keys, &kvmap); + + auto& props = *c.GetTableReader()->GetTableProperties(); + ASSERT_EQ(kvmap.size(), props.num_entries); + + auto raw_key_size = kvmap.size() * 2ul; + auto raw_value_size = kvmap.size() * 4ul; + + ASSERT_EQ(raw_key_size, props.raw_key_size); + ASSERT_EQ(raw_value_size, props.raw_value_size); + ASSERT_EQ(1ul, props.num_data_blocks); + ASSERT_EQ("", props.filter_policy_name); // no filter policy is used + + // Verify data size. + BlockBuilder block_builder(1); + for (const auto& item : kvmap) { + block_builder.Add(item.first, item.second); + } + Slice content = block_builder.Finish(); + ASSERT_EQ(content.size() + kBlockTrailerSize, props.data_size); + c.ResetTableReader(); +} + +TEST_F(BlockBasedTableTest, BlockBasedTableProperties2) { + TableConstructor c(&reverse_key_comparator); + std::vector keys; + stl_wrappers::KVMap kvmap; + + { + Options options; + options.compression = CompressionType::kNoCompression; + BlockBasedTableOptions table_options; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &keys, &kvmap); + + auto& props = *c.GetTableReader()->GetTableProperties(); + + // Default comparator + ASSERT_EQ("leveldb.BytewiseComparator", props.comparator_name); + // No merge operator + ASSERT_EQ("nullptr", props.merge_operator_name); + // No property collectors + ASSERT_EQ("[]", props.property_collectors_names); + // No filter policy is used + ASSERT_EQ("", props.filter_policy_name); + // Compression type == that set: + ASSERT_EQ("NoCompression", props.compression_name); + c.ResetTableReader(); + } + + { + Options options; + BlockBasedTableOptions table_options; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + options.comparator = &reverse_key_comparator; + options.merge_operator = MergeOperators::CreateUInt64AddOperator(); + options.table_properties_collector_factories.emplace_back( + new DummyPropertiesCollectorFactory1()); + options.table_properties_collector_factories.emplace_back( + new DummyPropertiesCollectorFactory2()); + + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &keys, &kvmap); + + auto& props = *c.GetTableReader()->GetTableProperties(); + + ASSERT_EQ("rocksdb.ReverseBytewiseComparator", props.comparator_name); + ASSERT_EQ("UInt64AddOperator", props.merge_operator_name); + ASSERT_EQ("[DummyPropertiesCollector1,DummyPropertiesCollector2]", + props.property_collectors_names); + ASSERT_EQ("", props.filter_policy_name); // no filter policy is used + c.ResetTableReader(); + } +} + +TEST_F(BlockBasedTableTest, FilterPolicyNameProperties) { + TableConstructor c(BytewiseComparator(), true); + c.Add("a1", "val1"); + std::vector keys; + stl_wrappers::KVMap kvmap; + BlockBasedTableOptions table_options; + table_options.filter_policy.reset(NewBloomFilterPolicy(10)); + Options options; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &keys, &kvmap); + auto& props = *c.GetTableReader()->GetTableProperties(); + ASSERT_EQ("rocksdb.BuiltinBloomFilter", props.filter_policy_name); + c.ResetTableReader(); +} + +// +// BlockBasedTableTest::PrefetchTest +// +void AssertKeysInCache(BlockBasedTable* table_reader, + const std::vector& keys_in_cache, + const std::vector& keys_not_in_cache) { + for (auto key : keys_in_cache) { + ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), key)); + } + + for (auto key : keys_not_in_cache) { + ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), key)); + } +} + +void PrefetchRange(TableConstructor* c, Options* opt, + BlockBasedTableOptions* table_options, + const std::vector& keys, const char* key_begin, + const char* key_end, + const std::vector& keys_in_cache, + const std::vector& keys_not_in_cache, + const Status expected_status = Status::OK()) { + // reset the cache and reopen the table + table_options->block_cache = NewLRUCache(16 * 1024 * 1024, 4); + opt->table_factory.reset(NewBlockBasedTableFactory(*table_options)); + const ImmutableCFOptions ioptions2(*opt); + ASSERT_OK(c->Reopen(ioptions2)); + + // prefetch + auto* table_reader = dynamic_cast(c->GetTableReader()); + // empty string replacement is a trick so we don't crash the test + Slice begin(key_begin ? key_begin : ""); + Slice end(key_end ? key_end : ""); + Status s = table_reader->Prefetch(key_begin ? &begin : nullptr, + key_end ? &end : nullptr); + ASSERT_TRUE(s.code() == expected_status.code()); + + // assert our expectation in cache warmup + AssertKeysInCache(table_reader, keys_in_cache, keys_not_in_cache); + c->ResetTableReader(); +} + +TEST_F(BlockBasedTableTest, PrefetchTest) { + // The purpose of this test is to test the prefetching operation built into + // BlockBasedTable. + Options opt; + unique_ptr ikc; + ikc.reset(new test::PlainInternalKeyComparator(opt.comparator)); + opt.compression = kNoCompression; + BlockBasedTableOptions table_options; + table_options.block_size = 1024; + // big enough so we don't ever lose cached values. + table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4); + opt.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + TableConstructor c(BytewiseComparator()); + c.Add("k01", "hello"); + c.Add("k02", "hello2"); + c.Add("k03", std::string(10000, 'x')); + c.Add("k04", std::string(200000, 'x')); + c.Add("k05", std::string(300000, 'x')); + c.Add("k06", "hello3"); + c.Add("k07", std::string(100000, 'x')); + std::vector keys; + stl_wrappers::KVMap kvmap; + const ImmutableCFOptions ioptions(opt); + c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap); + c.ResetTableReader(); + + // We get the following data spread : + // + // Data block Index + // ======================== + // [ k01 k02 k03 ] k03 + // [ k04 ] k04 + // [ k05 ] k05 + // [ k06 k07 ] k07 + + + // Simple + PrefetchRange(&c, &opt, &table_options, keys, + /*key_range=*/ "k01", "k05", + /*keys_in_cache=*/ {"k01", "k02", "k03", "k04", "k05"}, + /*keys_not_in_cache=*/ {"k06", "k07"}); + PrefetchRange(&c, &opt, &table_options, keys, + "k01", "k01", + {"k01", "k02", "k03"}, + {"k04", "k05", "k06", "k07"}); + // odd + PrefetchRange(&c, &opt, &table_options, keys, + "a", "z", + {"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, + {}); + PrefetchRange(&c, &opt, &table_options, keys, + "k00", "k00", + {"k01", "k02", "k03"}, + {"k04", "k05", "k06", "k07"}); + // Edge cases + PrefetchRange(&c, &opt, &table_options, keys, + "k00", "k06", + {"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, + {}); + PrefetchRange(&c, &opt, &table_options, keys, + "k00", "zzz", + {"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, + {}); + // null keys + PrefetchRange(&c, &opt, &table_options, keys, + nullptr, nullptr, + {"k01", "k02", "k03", "k04", "k05", "k06", "k07"}, + {}); + PrefetchRange(&c, &opt, &table_options, keys, + "k04", nullptr, + {"k04", "k05", "k06", "k07"}, + {"k01", "k02", "k03"}); + PrefetchRange(&c, &opt, &table_options, keys, + nullptr, "k05", + {"k01", "k02", "k03", "k04", "k05"}, + {"k06", "k07"}); + // invalid + PrefetchRange(&c, &opt, &table_options, keys, + "k06", "k00", {}, {}, + Status::InvalidArgument(Slice("k06 "), Slice("k07"))); + c.ResetTableReader(); +} + +TEST_F(BlockBasedTableTest, TotalOrderSeekOnHashIndex) { + BlockBasedTableOptions table_options; + for (int i = 0; i < 4; ++i) { + Options options; + // Make each key/value an individual block + table_options.block_size = 64; + switch (i) { + case 0: + // Binary search index + table_options.index_type = BlockBasedTableOptions::kBinarySearch; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + break; + case 1: + // Hash search index + table_options.index_type = BlockBasedTableOptions::kHashSearch; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + options.prefix_extractor.reset(NewFixedPrefixTransform(4)); + break; + case 2: + // Hash search index with hash_index_allow_collision + table_options.index_type = BlockBasedTableOptions::kHashSearch; + table_options.hash_index_allow_collision = true; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + options.prefix_extractor.reset(NewFixedPrefixTransform(4)); + break; + case 3: + default: + // Hash search index with filter policy + table_options.index_type = BlockBasedTableOptions::kHashSearch; + table_options.filter_policy.reset(NewBloomFilterPolicy(10)); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + options.prefix_extractor.reset(NewFixedPrefixTransform(4)); + break; + } + + TableConstructor c(BytewiseComparator(), true); + c.Add("aaaa1", std::string('a', 56)); + c.Add("bbaa1", std::string('a', 56)); + c.Add("cccc1", std::string('a', 56)); + c.Add("bbbb1", std::string('a', 56)); + c.Add("baaa1", std::string('a', 56)); + c.Add("abbb1", std::string('a', 56)); + c.Add("cccc2", std::string('a', 56)); + std::vector keys; + stl_wrappers::KVMap kvmap; + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &keys, &kvmap); + auto props = c.GetTableReader()->GetTableProperties(); + ASSERT_EQ(7u, props->num_data_blocks); + auto* reader = c.GetTableReader(); + ReadOptions ro; + ro.total_order_seek = true; + std::unique_ptr iter(reader->NewIterator(ro)); + + iter->Seek(InternalKey("b", 0, kTypeValue).Encode()); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("baaa1", ExtractUserKey(iter->key()).ToString()); + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("bbaa1", ExtractUserKey(iter->key()).ToString()); + + iter->Seek(InternalKey("bb", 0, kTypeValue).Encode()); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("bbaa1", ExtractUserKey(iter->key()).ToString()); + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("bbbb1", ExtractUserKey(iter->key()).ToString()); + + iter->Seek(InternalKey("bbb", 0, kTypeValue).Encode()); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("bbbb1", ExtractUserKey(iter->key()).ToString()); + iter->Next(); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("cccc1", ExtractUserKey(iter->key()).ToString()); + } +} + +TEST_F(BlockBasedTableTest, NoopTransformSeek) { + BlockBasedTableOptions table_options; + table_options.filter_policy.reset(NewBloomFilterPolicy(10)); + + Options options; + options.comparator = BytewiseComparator(); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + options.prefix_extractor.reset(NewNoopTransform()); + + TableConstructor c(options.comparator); + // To tickle the PrefixMayMatch bug it is important that the + // user-key is a single byte so that the index key exactly matches + // the user-key. + InternalKey key("a", 1, kTypeValue); + c.Add(key.Encode().ToString(), "b"); + std::vector keys; + stl_wrappers::KVMap kvmap; + const ImmutableCFOptions ioptions(options); + const InternalKeyComparator internal_comparator(options.comparator); + c.Finish(options, ioptions, table_options, internal_comparator, &keys, + &kvmap); + + auto* reader = c.GetTableReader(); + for (int i = 0; i < 2; ++i) { + ReadOptions ro; + ro.total_order_seek = (i == 0); + std::unique_ptr iter(reader->NewIterator(ro)); + + iter->Seek(key.Encode()); + ASSERT_OK(iter->status()); + ASSERT_TRUE(iter->Valid()); + ASSERT_EQ("a", ExtractUserKey(iter->key()).ToString()); + } +} + +static std::string RandomString(Random* rnd, int len) { + std::string r; + test::RandomString(rnd, len, &r); + return r; +} + +void AddInternalKey(TableConstructor* c, const std::string& prefix, + int suffix_len = 800) { + static Random rnd(1023); + InternalKey k(prefix + RandomString(&rnd, 800), 0, kTypeValue); + c->Add(k.Encode().ToString(), "v"); +} + +TEST_F(TableTest, HashIndexTest) { + TableConstructor c(BytewiseComparator()); + + // keys with prefix length 3, make sure the key/value is big enough to fill + // one block + AddInternalKey(&c, "0015"); + AddInternalKey(&c, "0035"); + + AddInternalKey(&c, "0054"); + AddInternalKey(&c, "0055"); + + AddInternalKey(&c, "0056"); + AddInternalKey(&c, "0057"); + + AddInternalKey(&c, "0058"); + AddInternalKey(&c, "0075"); + + AddInternalKey(&c, "0076"); + AddInternalKey(&c, "0095"); + + std::vector keys; + stl_wrappers::KVMap kvmap; + Options options; + options.prefix_extractor.reset(NewFixedPrefixTransform(3)); + BlockBasedTableOptions table_options; + table_options.index_type = BlockBasedTableOptions::kHashSearch; + table_options.hash_index_allow_collision = true; + table_options.block_size = 1700; + table_options.block_cache = NewLRUCache(1024, 4); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + std::unique_ptr comparator( + new InternalKeyComparator(BytewiseComparator())); + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, *comparator, &keys, &kvmap); + auto reader = c.GetTableReader(); + + auto props = reader->GetTableProperties(); + ASSERT_EQ(5u, props->num_data_blocks); + + std::unique_ptr hash_iter( + reader->NewIterator(ReadOptions())); + + // -- Find keys do not exist, but have common prefix. + std::vector prefixes = {"001", "003", "005", "007", "009"}; + std::vector lower_bound = {keys[0], keys[1], keys[2], + keys[7], keys[9], }; + + // find the lower bound of the prefix + for (size_t i = 0; i < prefixes.size(); ++i) { + hash_iter->Seek(InternalKey(prefixes[i], 0, kTypeValue).Encode()); + ASSERT_OK(hash_iter->status()); + ASSERT_TRUE(hash_iter->Valid()); + + // seek the first element in the block + ASSERT_EQ(lower_bound[i], hash_iter->key().ToString()); + ASSERT_EQ("v", hash_iter->value().ToString()); + } + + // find the upper bound of prefixes + std::vector upper_bound = {keys[1], keys[2], keys[7], keys[9], }; + + // find existing keys + for (const auto& item : kvmap) { + auto ukey = ExtractUserKey(item.first).ToString(); + hash_iter->Seek(ukey); + + // ASSERT_OK(regular_iter->status()); + ASSERT_OK(hash_iter->status()); + + // ASSERT_TRUE(regular_iter->Valid()); + ASSERT_TRUE(hash_iter->Valid()); + + ASSERT_EQ(item.first, hash_iter->key().ToString()); + ASSERT_EQ(item.second, hash_iter->value().ToString()); + } + + for (size_t i = 0; i < prefixes.size(); ++i) { + // the key is greater than any existing keys. + auto key = prefixes[i] + "9"; + hash_iter->Seek(InternalKey(key, 0, kTypeValue).Encode()); + + ASSERT_OK(hash_iter->status()); + if (i == prefixes.size() - 1) { + // last key + ASSERT_TRUE(!hash_iter->Valid()); + } else { + ASSERT_TRUE(hash_iter->Valid()); + // seek the first element in the block + ASSERT_EQ(upper_bound[i], hash_iter->key().ToString()); + ASSERT_EQ("v", hash_iter->value().ToString()); + } + } + + // find keys with prefix that don't match any of the existing prefixes. + std::vector non_exist_prefixes = {"002", "004", "006", "008"}; + for (const auto& prefix : non_exist_prefixes) { + hash_iter->Seek(InternalKey(prefix, 0, kTypeValue).Encode()); + // regular_iter->Seek(prefix); + + ASSERT_OK(hash_iter->status()); + // Seek to non-existing prefixes should yield either invalid, or a + // key with prefix greater than the target. + if (hash_iter->Valid()) { + Slice ukey = ExtractUserKey(hash_iter->key()); + Slice ukey_prefix = options.prefix_extractor->Transform(ukey); + ASSERT_TRUE(BytewiseComparator()->Compare(prefix, ukey_prefix) < 0); + } + } + c.ResetTableReader(); +} + +// It's very hard to figure out the index block size of a block accurately. +// To make sure we get the index size, we just make sure as key number +// grows, the filter block size also grows. +TEST_F(BlockBasedTableTest, IndexSizeStat) { + uint64_t last_index_size = 0; + + // we need to use random keys since the pure human readable texts + // may be well compressed, resulting insignifcant change of index + // block size. + Random rnd(test::RandomSeed()); + std::vector keys; + + for (int i = 0; i < 100; ++i) { + keys.push_back(RandomString(&rnd, 10000)); + } + + // Each time we load one more key to the table. the table index block + // size is expected to be larger than last time's. + for (size_t i = 1; i < keys.size(); ++i) { + TableConstructor c(BytewiseComparator()); + for (size_t j = 0; j < i; ++j) { + c.Add(keys[j], "val"); + } + + std::vector ks; + stl_wrappers::KVMap kvmap; + Options options; + options.compression = kNoCompression; + BlockBasedTableOptions table_options; + table_options.block_restart_interval = 1; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &ks, &kvmap); + auto index_size = c.GetTableReader()->GetTableProperties()->index_size; + ASSERT_GT(index_size, last_index_size); + last_index_size = index_size; + c.ResetTableReader(); + } +} + +TEST_F(BlockBasedTableTest, NumBlockStat) { + Random rnd(test::RandomSeed()); + TableConstructor c(BytewiseComparator()); + Options options; + options.compression = kNoCompression; + BlockBasedTableOptions table_options; + table_options.block_restart_interval = 1; + table_options.block_size = 1000; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + for (int i = 0; i < 10; ++i) { + // the key/val are slightly smaller than block size, so that each block + // holds roughly one key/value pair. + c.Add(RandomString(&rnd, 900), "val"); + } + + std::vector ks; + stl_wrappers::KVMap kvmap; + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &ks, &kvmap); + ASSERT_EQ(kvmap.size(), + c.GetTableReader()->GetTableProperties()->num_data_blocks); + c.ResetTableReader(); +} + +// A simple tool that takes the snapshot of block cache statistics. +class BlockCachePropertiesSnapshot { + public: + explicit BlockCachePropertiesSnapshot(Statistics* statistics) { + block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_MISS); + block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_HIT); + index_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_INDEX_MISS); + index_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_INDEX_HIT); + data_block_cache_miss = statistics->getTickerCount(BLOCK_CACHE_DATA_MISS); + data_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_DATA_HIT); + filter_block_cache_miss = + statistics->getTickerCount(BLOCK_CACHE_FILTER_MISS); + filter_block_cache_hit = statistics->getTickerCount(BLOCK_CACHE_FILTER_HIT); + block_cache_bytes_read = statistics->getTickerCount(BLOCK_CACHE_BYTES_READ); + block_cache_bytes_write = + statistics->getTickerCount(BLOCK_CACHE_BYTES_WRITE); + } + + void AssertIndexBlockStat(int64_t expected_index_block_cache_miss, + int64_t expected_index_block_cache_hit) { + ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss); + ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit); + } + + void AssertFilterBlockStat(int64_t expected_filter_block_cache_miss, + int64_t expected_filter_block_cache_hit) { + ASSERT_EQ(expected_filter_block_cache_miss, filter_block_cache_miss); + ASSERT_EQ(expected_filter_block_cache_hit, filter_block_cache_hit); + } + + // Check if the fetched props matches the expected ones. + // TODO(kailiu) Use this only when you disabled filter policy! + void AssertEqual(int64_t expected_index_block_cache_miss, + int64_t expected_index_block_cache_hit, + int64_t expected_data_block_cache_miss, + int64_t expected_data_block_cache_hit) const { + ASSERT_EQ(expected_index_block_cache_miss, index_block_cache_miss); + ASSERT_EQ(expected_index_block_cache_hit, index_block_cache_hit); + ASSERT_EQ(expected_data_block_cache_miss, data_block_cache_miss); + ASSERT_EQ(expected_data_block_cache_hit, data_block_cache_hit); + ASSERT_EQ(expected_index_block_cache_miss + expected_data_block_cache_miss, + block_cache_miss); + ASSERT_EQ(expected_index_block_cache_hit + expected_data_block_cache_hit, + block_cache_hit); + } + + int64_t GetCacheBytesRead() { return block_cache_bytes_read; } + + int64_t GetCacheBytesWrite() { return block_cache_bytes_write; } + + private: + int64_t block_cache_miss = 0; + int64_t block_cache_hit = 0; + int64_t index_block_cache_miss = 0; + int64_t index_block_cache_hit = 0; + int64_t data_block_cache_miss = 0; + int64_t data_block_cache_hit = 0; + int64_t filter_block_cache_miss = 0; + int64_t filter_block_cache_hit = 0; + int64_t block_cache_bytes_read = 0; + int64_t block_cache_bytes_write = 0; +}; + +// Make sure, by default, index/filter blocks were pre-loaded (meaning we won't +// use block cache to store them). +TEST_F(BlockBasedTableTest, BlockCacheDisabledTest) { + Options options; + options.create_if_missing = true; + options.statistics = CreateDBStatistics(); + BlockBasedTableOptions table_options; + table_options.block_cache = NewLRUCache(1024, 4); + table_options.filter_policy.reset(NewBloomFilterPolicy(10)); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + std::vector keys; + stl_wrappers::KVMap kvmap; + + TableConstructor c(BytewiseComparator(), true); + c.Add("key", "value"); + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &keys, &kvmap); + + // preloading filter/index blocks is enabled. + auto reader = dynamic_cast(c.GetTableReader()); + ASSERT_TRUE(reader->TEST_filter_block_preloaded()); + ASSERT_TRUE(reader->TEST_index_reader_preloaded()); + + { + // nothing happens in the beginning + BlockCachePropertiesSnapshot props(options.statistics.get()); + props.AssertIndexBlockStat(0, 0); + props.AssertFilterBlockStat(0, 0); + } + + { + GetContext get_context(options.comparator, nullptr, nullptr, nullptr, + GetContext::kNotFound, Slice(), nullptr, nullptr, + nullptr, nullptr); + // a hack that just to trigger BlockBasedTable::GetFilter. + reader->Get(ReadOptions(), "non-exist-key", &get_context); + BlockCachePropertiesSnapshot props(options.statistics.get()); + props.AssertIndexBlockStat(0, 0); + props.AssertFilterBlockStat(0, 0); + } +} + +// Due to the difficulities of the intersaction between statistics, this test +// only tests the case when "index block is put to block cache" +TEST_F(BlockBasedTableTest, FilterBlockInBlockCache) { + // -- Table construction + Options options; + options.create_if_missing = true; + options.statistics = CreateDBStatistics(); + + // Enable the cache for index/filter blocks + BlockBasedTableOptions table_options; + table_options.block_cache = NewLRUCache(1024, 4); + table_options.cache_index_and_filter_blocks = true; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + std::vector keys; + stl_wrappers::KVMap kvmap; + + TableConstructor c(BytewiseComparator()); + c.Add("key", "value"); + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &keys, &kvmap); + // preloading filter/index blocks is prohibited. + auto* reader = dynamic_cast(c.GetTableReader()); + ASSERT_TRUE(!reader->TEST_filter_block_preloaded()); + ASSERT_TRUE(!reader->TEST_index_reader_preloaded()); + + // -- PART 1: Open with regular block cache. + // Since block_cache is disabled, no cache activities will be involved. + unique_ptr iter; + + int64_t last_cache_bytes_read = 0; + // At first, no block will be accessed. + { + BlockCachePropertiesSnapshot props(options.statistics.get()); + // index will be added to block cache. + props.AssertEqual(1, // index block miss + 0, 0, 0); + ASSERT_EQ(props.GetCacheBytesRead(), 0); + ASSERT_EQ(props.GetCacheBytesWrite(), + table_options.block_cache->GetUsage()); + last_cache_bytes_read = props.GetCacheBytesRead(); + } + + // Only index block will be accessed + { + iter.reset(c.NewIterator()); + BlockCachePropertiesSnapshot props(options.statistics.get()); + // NOTE: to help better highlight the "detla" of each ticker, I use + // + to indicate the increment of changed + // value; other numbers remain the same. + props.AssertEqual(1, 0 + 1, // index block hit + 0, 0); + // Cache hit, bytes read from cache should increase + ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read); + ASSERT_EQ(props.GetCacheBytesWrite(), + table_options.block_cache->GetUsage()); + last_cache_bytes_read = props.GetCacheBytesRead(); + } + + // Only data block will be accessed + { + iter->SeekToFirst(); + BlockCachePropertiesSnapshot props(options.statistics.get()); + props.AssertEqual(1, 1, 0 + 1, // data block miss + 0); + // Cache miss, Bytes read from cache should not change + ASSERT_EQ(props.GetCacheBytesRead(), last_cache_bytes_read); + ASSERT_EQ(props.GetCacheBytesWrite(), + table_options.block_cache->GetUsage()); + last_cache_bytes_read = props.GetCacheBytesRead(); + } + + // Data block will be in cache + { + iter.reset(c.NewIterator()); + iter->SeekToFirst(); + BlockCachePropertiesSnapshot props(options.statistics.get()); + props.AssertEqual(1, 1 + 1, /* index block hit */ + 1, 0 + 1 /* data block hit */); + // Cache hit, bytes read from cache should increase + ASSERT_GT(props.GetCacheBytesRead(), last_cache_bytes_read); + ASSERT_EQ(props.GetCacheBytesWrite(), + table_options.block_cache->GetUsage()); + } + // release the iterator so that the block cache can reset correctly. + iter.reset(); + + c.ResetTableReader(); + + // -- PART 2: Open with very small block cache + // In this test, no block will ever get hit since the block cache is + // too small to fit even one entry. + table_options.block_cache = NewLRUCache(1, 4); + options.statistics = CreateDBStatistics(); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + const ImmutableCFOptions ioptions2(options); + c.Reopen(ioptions2); + { + BlockCachePropertiesSnapshot props(options.statistics.get()); + props.AssertEqual(1, // index block miss + 0, 0, 0); + // Cache miss, Bytes read from cache should not change + ASSERT_EQ(props.GetCacheBytesRead(), 0); + } + + { + // Both index and data block get accessed. + // It first cache index block then data block. But since the cache size + // is only 1, index block will be purged after data block is inserted. + iter.reset(c.NewIterator()); + BlockCachePropertiesSnapshot props(options.statistics.get()); + props.AssertEqual(1 + 1, // index block miss + 0, 0, // data block miss + 0); + // Cache hit, bytes read from cache should increase + ASSERT_EQ(props.GetCacheBytesRead(), 0); + } + + { + // SeekToFirst() accesses data block. With similar reason, we expect data + // block's cache miss. + iter->SeekToFirst(); + BlockCachePropertiesSnapshot props(options.statistics.get()); + props.AssertEqual(2, 0, 0 + 1, // data block miss + 0); + // Cache miss, Bytes read from cache should not change + ASSERT_EQ(props.GetCacheBytesRead(), 0); + } + iter.reset(); + c.ResetTableReader(); + + // -- PART 3: Open table with bloom filter enabled but not in SST file + table_options.block_cache = NewLRUCache(4096, 4); + table_options.cache_index_and_filter_blocks = false; + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + TableConstructor c3(BytewiseComparator()); + std::string user_key = "k01"; + InternalKey internal_key(user_key, 0, kTypeValue); + c3.Add(internal_key.Encode().ToString(), "hello"); + ImmutableCFOptions ioptions3(options); + // Generate table without filter policy + c3.Finish(options, ioptions3, table_options, + GetPlainInternalComparator(options.comparator), &keys, &kvmap); + c3.ResetTableReader(); + + // Open table with filter policy + table_options.filter_policy.reset(NewBloomFilterPolicy(1)); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + options.statistics = CreateDBStatistics(); + ImmutableCFOptions ioptions4(options); + ASSERT_OK(c3.Reopen(ioptions4)); + reader = dynamic_cast(c3.GetTableReader()); + ASSERT_TRUE(!reader->TEST_filter_block_preloaded()); + std::string value; + GetContext get_context(options.comparator, nullptr, nullptr, nullptr, + GetContext::kNotFound, user_key, &value, nullptr, + nullptr, nullptr); + ASSERT_OK(reader->Get(ReadOptions(), user_key, &get_context)); + ASSERT_EQ(value, "hello"); + BlockCachePropertiesSnapshot props(options.statistics.get()); + props.AssertFilterBlockStat(0, 0); + c3.ResetTableReader(); +} + +void ValidateBlockSizeDeviation(int value, int expected) { + BlockBasedTableOptions table_options; + table_options.block_size_deviation = value; + BlockBasedTableFactory* factory = new BlockBasedTableFactory(table_options); + + const BlockBasedTableOptions* normalized_table_options = + (const BlockBasedTableOptions*)factory->GetOptions(); + ASSERT_EQ(normalized_table_options->block_size_deviation, expected); + + delete factory; +} + +void ValidateBlockRestartInterval(int value, int expected) { + BlockBasedTableOptions table_options; + table_options.block_restart_interval = value; + BlockBasedTableFactory* factory = new BlockBasedTableFactory(table_options); + + const BlockBasedTableOptions* normalized_table_options = + (const BlockBasedTableOptions*)factory->GetOptions(); + ASSERT_EQ(normalized_table_options->block_restart_interval, expected); + + delete factory; +} + +TEST_F(BlockBasedTableTest, InvalidOptions) { + // invalid values for block_size_deviation (<0 or >100) are silently set to 0 + ValidateBlockSizeDeviation(-10, 0); + ValidateBlockSizeDeviation(-1, 0); + ValidateBlockSizeDeviation(0, 0); + ValidateBlockSizeDeviation(1, 1); + ValidateBlockSizeDeviation(99, 99); + ValidateBlockSizeDeviation(100, 100); + ValidateBlockSizeDeviation(101, 0); + ValidateBlockSizeDeviation(1000, 0); + + // invalid values for block_restart_interval (<1) are silently set to 1 + ValidateBlockRestartInterval(-10, 1); + ValidateBlockRestartInterval(-1, 1); + ValidateBlockRestartInterval(0, 1); + ValidateBlockRestartInterval(1, 1); + ValidateBlockRestartInterval(2, 2); + ValidateBlockRestartInterval(1000, 1000); +} + +TEST_F(BlockBasedTableTest, BlockReadCountTest) { + // bloom_filter_type = 0 -- block-based filter + // bloom_filter_type = 0 -- full filter + for (int bloom_filter_type = 0; bloom_filter_type < 2; ++bloom_filter_type) { + for (int index_and_filter_in_cache = 0; index_and_filter_in_cache < 2; + ++index_and_filter_in_cache) { + Options options; + options.create_if_missing = true; + + BlockBasedTableOptions table_options; + table_options.block_cache = NewLRUCache(1, 0); + table_options.cache_index_and_filter_blocks = index_and_filter_in_cache; + table_options.filter_policy.reset( + NewBloomFilterPolicy(10, bloom_filter_type == 0)); + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + std::vector keys; + stl_wrappers::KVMap kvmap; + + TableConstructor c(BytewiseComparator()); + std::string user_key = "k04"; + InternalKey internal_key(user_key, 0, kTypeValue); + std::string encoded_key = internal_key.Encode().ToString(); + c.Add(encoded_key, "hello"); + ImmutableCFOptions ioptions(options); + // Generate table with filter policy + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &keys, &kvmap); + auto reader = c.GetTableReader(); + std::string value; + GetContext get_context(options.comparator, nullptr, nullptr, nullptr, + GetContext::kNotFound, user_key, &value, nullptr, + nullptr, nullptr); + perf_context.Reset(); + ASSERT_OK(reader->Get(ReadOptions(), encoded_key, &get_context)); + if (index_and_filter_in_cache) { + // data, index and filter block + ASSERT_EQ(perf_context.block_read_count, 3); + } else { + // just the data block + ASSERT_EQ(perf_context.block_read_count, 1); + } + ASSERT_EQ(get_context.State(), GetContext::kFound); + ASSERT_EQ(value, "hello"); + + // Get non-existing key + user_key = "does-not-exist"; + internal_key = InternalKey(user_key, 0, kTypeValue); + encoded_key = internal_key.Encode().ToString(); + + get_context = GetContext(options.comparator, nullptr, nullptr, nullptr, + GetContext::kNotFound, user_key, &value, nullptr, + nullptr, nullptr); + perf_context.Reset(); + ASSERT_OK(reader->Get(ReadOptions(), encoded_key, &get_context)); + ASSERT_EQ(get_context.State(), GetContext::kNotFound); + + if (index_and_filter_in_cache) { + if (bloom_filter_type == 0) { + // with block-based, we read index and then the filter + ASSERT_EQ(perf_context.block_read_count, 2); + } else { + // with full-filter, we read filter first and then we stop + ASSERT_EQ(perf_context.block_read_count, 1); + } + } else { + // filter is already in memory and it figures out that the key doesn't + // exist + ASSERT_EQ(perf_context.block_read_count, 0); + } + } + } +} + +TEST_F(BlockBasedTableTest, BlockCacheLeak) { + // Check that when we reopen a table we don't lose access to blocks already + // in the cache. This test checks whether the Table actually makes use of the + // unique ID from the file. + + Options opt; + unique_ptr ikc; + ikc.reset(new test::PlainInternalKeyComparator(opt.comparator)); + opt.compression = kNoCompression; + BlockBasedTableOptions table_options; + table_options.block_size = 1024; + // big enough so we don't ever lose cached values. + table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4); + opt.table_factory.reset(NewBlockBasedTableFactory(table_options)); + + TableConstructor c(BytewiseComparator()); + c.Add("k01", "hello"); + c.Add("k02", "hello2"); + c.Add("k03", std::string(10000, 'x')); + c.Add("k04", std::string(200000, 'x')); + c.Add("k05", std::string(300000, 'x')); + c.Add("k06", "hello3"); + c.Add("k07", std::string(100000, 'x')); + std::vector keys; + stl_wrappers::KVMap kvmap; + const ImmutableCFOptions ioptions(opt); + c.Finish(opt, ioptions, table_options, *ikc, &keys, &kvmap); + + unique_ptr iter(c.NewIterator()); + iter->SeekToFirst(); + while (iter->Valid()) { + iter->key(); + iter->value(); + iter->Next(); + } + ASSERT_OK(iter->status()); + + const ImmutableCFOptions ioptions1(opt); + ASSERT_OK(c.Reopen(ioptions1)); + auto table_reader = dynamic_cast(c.GetTableReader()); + for (const std::string& key : keys) { + ASSERT_TRUE(table_reader->TEST_KeyInCache(ReadOptions(), key)); + } + c.ResetTableReader(); + + // rerun with different block cache + table_options.block_cache = NewLRUCache(16 * 1024 * 1024, 4); + opt.table_factory.reset(NewBlockBasedTableFactory(table_options)); + const ImmutableCFOptions ioptions2(opt); + ASSERT_OK(c.Reopen(ioptions2)); + table_reader = dynamic_cast(c.GetTableReader()); + for (const std::string& key : keys) { + ASSERT_TRUE(!table_reader->TEST_KeyInCache(ReadOptions(), key)); + } + c.ResetTableReader(); +} + +TEST_F(BlockBasedTableTest, NewIndexIteratorLeak) { + // A regression test to avoid data race described in + // https://github.com/facebook/rocksdb/issues/1267 + TableConstructor c(BytewiseComparator(), true /* convert_to_internal_key_ */); + std::vector keys; + stl_wrappers::KVMap kvmap; + c.Add("a1", "val1"); + Options options; + options.prefix_extractor.reset(NewFixedPrefixTransform(1)); + BlockBasedTableOptions table_options; + table_options.index_type = BlockBasedTableOptions::kHashSearch; + table_options.cache_index_and_filter_blocks = true; + table_options.block_cache = NewLRUCache(0); + options.table_factory.reset(NewBlockBasedTableFactory(table_options)); + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, + GetPlainInternalComparator(options.comparator), &keys, &kvmap); + + rocksdb::SyncPoint::GetInstance()->LoadDependencyAndMarkers( + { + {"BlockBasedTable::NewIndexIterator::thread1:1", + "BlockBasedTable::NewIndexIterator::thread2:2"}, + {"BlockBasedTable::NewIndexIterator::thread2:3", + "BlockBasedTable::NewIndexIterator::thread1:4"}, + }, + { + {"BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker", + "BlockBasedTable::NewIndexIterator::thread1:1"}, + {"BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker", + "BlockBasedTable::NewIndexIterator::thread1:4"}, + {"BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker", + "BlockBasedTable::NewIndexIterator::thread2:2"}, + {"BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker", + "BlockBasedTable::NewIndexIterator::thread2:3"}, + }); + + rocksdb::SyncPoint::GetInstance()->EnableProcessing(); + ReadOptions ro; + auto* reader = c.GetTableReader(); + + std::function func1 = [&]() { + TEST_SYNC_POINT("BlockBasedTableTest::NewIndexIteratorLeak:Thread1Marker"); + std::unique_ptr iter(reader->NewIterator(ro)); + iter->Seek(InternalKey("a1", 0, kTypeValue).Encode()); + }; + + std::function func2 = [&]() { + TEST_SYNC_POINT("BlockBasedTableTest::NewIndexIteratorLeak:Thread2Marker"); + std::unique_ptr iter(reader->NewIterator(ro)); + }; + + auto thread1 = std::thread(func1); + auto thread2 = std::thread(func2); + thread1.join(); + thread2.join(); + rocksdb::SyncPoint::GetInstance()->DisableProcessing(); + c.ResetTableReader(); +} + +// Plain table is not supported in ROCKSDB_LITE +#ifndef ROCKSDB_LITE +TEST_F(PlainTableTest, BasicPlainTableProperties) { + PlainTableOptions plain_table_options; + plain_table_options.user_key_len = 8; + plain_table_options.bloom_bits_per_key = 8; + plain_table_options.hash_table_ratio = 0; + + PlainTableFactory factory(plain_table_options); + test::StringSink sink; + unique_ptr file_writer( + test::GetWritableFileWriter(new test::StringSink())); + Options options; + const ImmutableCFOptions ioptions(options); + InternalKeyComparator ikc(options.comparator); + std::vector> + int_tbl_prop_collector_factories; + std::string column_family_name; + std::unique_ptr builder(factory.NewTableBuilder( + TableBuilderOptions(ioptions, ikc, &int_tbl_prop_collector_factories, + kNoCompression, CompressionOptions(), + nullptr /* compression_dict */, + false /* skip_filters */, column_family_name), + TablePropertiesCollectorFactory::Context::kUnknownColumnFamily, + file_writer.get())); + + for (char c = 'a'; c <= 'z'; ++c) { + std::string key(8, c); + key.append("\1 "); // PlainTable expects internal key structure + std::string value(28, c + 42); + builder->Add(key, value); + } + ASSERT_OK(builder->Finish()); + file_writer->Flush(); + + test::StringSink* ss = + static_cast(file_writer->writable_file()); + unique_ptr file_reader( + test::GetRandomAccessFileReader( + new test::StringSource(ss->contents(), 72242, true))); + + TableProperties* props = nullptr; + auto s = ReadTableProperties(file_reader.get(), ss->contents().size(), + kPlainTableMagicNumber, ioptions, + &props); + std::unique_ptr props_guard(props); + ASSERT_OK(s); + + ASSERT_EQ(0ul, props->index_size); + ASSERT_EQ(0ul, props->filter_size); + ASSERT_EQ(16ul * 26, props->raw_key_size); + ASSERT_EQ(28ul * 26, props->raw_value_size); + ASSERT_EQ(26ul, props->num_entries); + ASSERT_EQ(1ul, props->num_data_blocks); +} +#endif // !ROCKSDB_LITE + +TEST_F(GeneralTableTest, ApproximateOffsetOfPlain) { + TableConstructor c(BytewiseComparator()); + c.Add("k01", "hello"); + c.Add("k02", "hello2"); + c.Add("k03", std::string(10000, 'x')); + c.Add("k04", std::string(200000, 'x')); + c.Add("k05", std::string(300000, 'x')); + c.Add("k06", "hello3"); + c.Add("k07", std::string(100000, 'x')); + std::vector keys; + stl_wrappers::KVMap kvmap; + Options options; + test::PlainInternalKeyComparator internal_comparator(options.comparator); + options.compression = kNoCompression; + BlockBasedTableOptions table_options; + table_options.block_size = 1024; + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, internal_comparator, + &keys, &kvmap); + + ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01a"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 10000, 11000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04a"), 210000, 211000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k05"), 210000, 211000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k06"), 510000, 511000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k07"), 510000, 511000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 610000, 612000)); + c.ResetTableReader(); +} + +static void DoCompressionTest(CompressionType comp) { + Random rnd(301); + TableConstructor c(BytewiseComparator()); + std::string tmp; + c.Add("k01", "hello"); + c.Add("k02", test::CompressibleString(&rnd, 0.25, 10000, &tmp)); + c.Add("k03", "hello3"); + c.Add("k04", test::CompressibleString(&rnd, 0.25, 10000, &tmp)); + std::vector keys; + stl_wrappers::KVMap kvmap; + Options options; + test::PlainInternalKeyComparator ikc(options.comparator); + options.compression = comp; + BlockBasedTableOptions table_options; + table_options.block_size = 1024; + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, ikc, &keys, &kvmap); + + ASSERT_TRUE(Between(c.ApproximateOffsetOf("abc"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k01"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k02"), 0, 0)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k03"), 2000, 3000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("k04"), 2000, 3000)); + ASSERT_TRUE(Between(c.ApproximateOffsetOf("xyz"), 4000, 6100)); + c.ResetTableReader(); +} + +TEST_F(GeneralTableTest, ApproximateOffsetOfCompressed) { + std::vector compression_state; + if (!Snappy_Supported()) { + fprintf(stderr, "skipping snappy compression tests\n"); + } else { + compression_state.push_back(kSnappyCompression); + } + + if (!Zlib_Supported()) { + fprintf(stderr, "skipping zlib compression tests\n"); + } else { + compression_state.push_back(kZlibCompression); + } + + // TODO(kailiu) DoCompressionTest() doesn't work with BZip2. + /* + if (!BZip2_Supported()) { + fprintf(stderr, "skipping bzip2 compression tests\n"); + } else { + compression_state.push_back(kBZip2Compression); + } + */ + + if (!LZ4_Supported()) { + fprintf(stderr, "skipping lz4 and lz4hc compression tests\n"); + } else { + compression_state.push_back(kLZ4Compression); + compression_state.push_back(kLZ4HCCompression); + } + + if (!XPRESS_Supported()) { + fprintf(stderr, "skipping xpress and xpress compression tests\n"); + } + else { + compression_state.push_back(kXpressCompression); + } + + for (auto state : compression_state) { + DoCompressionTest(state); + } +} + +TEST_F(HarnessTest, Randomized) { + std::vector args = GenerateArgList(); + for (unsigned int i = 0; i < args.size(); i++) { + Init(args[i]); + Random rnd(test::RandomSeed() + 5); + for (int num_entries = 0; num_entries < 2000; + num_entries += (num_entries < 50 ? 1 : 200)) { + if ((num_entries % 10) == 0) { + fprintf(stderr, "case %d of %d: num_entries = %d\n", (i + 1), + static_cast(args.size()), num_entries); + } + for (int e = 0; e < num_entries; e++) { + std::string v; + Add(test::RandomKey(&rnd, rnd.Skewed(4)), + test::RandomString(&rnd, rnd.Skewed(5), &v).ToString()); + } + Test(&rnd); + } + } +} + +#ifndef ROCKSDB_LITE +TEST_F(HarnessTest, RandomizedLongDB) { + Random rnd(test::RandomSeed()); + TestArgs args = {DB_TEST, false, 16, kNoCompression, 0, false}; + Init(args); + int num_entries = 100000; + for (int e = 0; e < num_entries; e++) { + std::string v; + Add(test::RandomKey(&rnd, rnd.Skewed(4)), + test::RandomString(&rnd, rnd.Skewed(5), &v).ToString()); + } + Test(&rnd); + + // We must have created enough data to force merging + int files = 0; + for (int level = 0; level < db()->NumberLevels(); level++) { + std::string value; + char name[100]; + snprintf(name, sizeof(name), "rocksdb.num-files-at-level%d", level); + ASSERT_TRUE(db()->GetProperty(name, &value)); + files += atoi(value.c_str()); + } + ASSERT_GT(files, 0); +} +#endif // ROCKSDB_LITE + +class MemTableTest : public testing::Test {}; + +TEST_F(MemTableTest, Simple) { + InternalKeyComparator cmp(BytewiseComparator()); + auto table_factory = std::make_shared(); + Options options; + options.memtable_factory = table_factory; + ImmutableCFOptions ioptions(options); + WriteBufferManager wb(options.db_write_buffer_size); + MemTable* memtable = + new MemTable(cmp, ioptions, MutableCFOptions(options, ioptions), &wb, + kMaxSequenceNumber); + memtable->Ref(); + WriteBatch batch; + WriteBatchInternal::SetSequence(&batch, 100); + batch.Put(std::string("k1"), std::string("v1")); + batch.Put(std::string("k2"), std::string("v2")); + batch.Put(std::string("k3"), std::string("v3")); + batch.Put(std::string("largekey"), std::string("vlarge")); + ColumnFamilyMemTablesDefault cf_mems_default(memtable); + ASSERT_TRUE( + WriteBatchInternal::InsertInto(&batch, &cf_mems_default, nullptr).ok()); + + Arena arena; + ScopedArenaIterator iter(memtable->NewIterator(ReadOptions(), &arena)); + iter->SeekToFirst(); + while (iter->Valid()) { + fprintf(stderr, "key: '%s' -> '%s'\n", + iter->key().ToString().c_str(), + iter->value().ToString().c_str()); + iter->Next(); + } + + delete memtable->Unref(); +} + +// Test the empty key +TEST_F(HarnessTest, SimpleEmptyKey) { + auto args = GenerateArgList(); + for (const auto& arg : args) { + Init(arg); + Random rnd(test::RandomSeed() + 1); + Add("", "v"); + Test(&rnd); + } +} + +TEST_F(HarnessTest, SimpleSingle) { + auto args = GenerateArgList(); + for (const auto& arg : args) { + Init(arg); + Random rnd(test::RandomSeed() + 2); + Add("abc", "v"); + Test(&rnd); + } +} + +TEST_F(HarnessTest, SimpleMulti) { + auto args = GenerateArgList(); + for (const auto& arg : args) { + Init(arg); + Random rnd(test::RandomSeed() + 3); + Add("abc", "v"); + Add("abcd", "v"); + Add("ac", "v2"); + Test(&rnd); + } +} + +TEST_F(HarnessTest, SimpleSpecialKey) { + auto args = GenerateArgList(); + for (const auto& arg : args) { + Init(arg); + Random rnd(test::RandomSeed() + 4); + Add("\xff\xff", "v3"); + Test(&rnd); + } +} + +TEST_F(HarnessTest, FooterTests) { + { + // upconvert legacy block based + std::string encoded; + Footer footer(kLegacyBlockBasedTableMagicNumber, 0); + BlockHandle meta_index(10, 5), index(20, 15); + footer.set_metaindex_handle(meta_index); + footer.set_index_handle(index); + footer.EncodeTo(&encoded); + Footer decoded_footer; + Slice encoded_slice(encoded); + decoded_footer.DecodeFrom(&encoded_slice); + ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber); + ASSERT_EQ(decoded_footer.checksum(), kCRC32c); + ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset()); + ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size()); + ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset()); + ASSERT_EQ(decoded_footer.index_handle().size(), index.size()); + ASSERT_EQ(decoded_footer.version(), 0U); + } + { + // xxhash block based + std::string encoded; + Footer footer(kBlockBasedTableMagicNumber, 1); + BlockHandle meta_index(10, 5), index(20, 15); + footer.set_metaindex_handle(meta_index); + footer.set_index_handle(index); + footer.set_checksum(kxxHash); + footer.EncodeTo(&encoded); + Footer decoded_footer; + Slice encoded_slice(encoded); + decoded_footer.DecodeFrom(&encoded_slice); + ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber); + ASSERT_EQ(decoded_footer.checksum(), kxxHash); + ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset()); + ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size()); + ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset()); + ASSERT_EQ(decoded_footer.index_handle().size(), index.size()); + ASSERT_EQ(decoded_footer.version(), 1U); + } +// Plain table is not supported in ROCKSDB_LITE +#ifndef ROCKSDB_LITE + { + // upconvert legacy plain table + std::string encoded; + Footer footer(kLegacyPlainTableMagicNumber, 0); + BlockHandle meta_index(10, 5), index(20, 15); + footer.set_metaindex_handle(meta_index); + footer.set_index_handle(index); + footer.EncodeTo(&encoded); + Footer decoded_footer; + Slice encoded_slice(encoded); + decoded_footer.DecodeFrom(&encoded_slice); + ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber); + ASSERT_EQ(decoded_footer.checksum(), kCRC32c); + ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset()); + ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size()); + ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset()); + ASSERT_EQ(decoded_footer.index_handle().size(), index.size()); + ASSERT_EQ(decoded_footer.version(), 0U); + } + { + // xxhash block based + std::string encoded; + Footer footer(kPlainTableMagicNumber, 1); + BlockHandle meta_index(10, 5), index(20, 15); + footer.set_metaindex_handle(meta_index); + footer.set_index_handle(index); + footer.set_checksum(kxxHash); + footer.EncodeTo(&encoded); + Footer decoded_footer; + Slice encoded_slice(encoded); + decoded_footer.DecodeFrom(&encoded_slice); + ASSERT_EQ(decoded_footer.table_magic_number(), kPlainTableMagicNumber); + ASSERT_EQ(decoded_footer.checksum(), kxxHash); + ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset()); + ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size()); + ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset()); + ASSERT_EQ(decoded_footer.index_handle().size(), index.size()); + ASSERT_EQ(decoded_footer.version(), 1U); + } +#endif // !ROCKSDB_LITE + { + // version == 2 + std::string encoded; + Footer footer(kBlockBasedTableMagicNumber, 2); + BlockHandle meta_index(10, 5), index(20, 15); + footer.set_metaindex_handle(meta_index); + footer.set_index_handle(index); + footer.EncodeTo(&encoded); + Footer decoded_footer; + Slice encoded_slice(encoded); + decoded_footer.DecodeFrom(&encoded_slice); + ASSERT_EQ(decoded_footer.table_magic_number(), kBlockBasedTableMagicNumber); + ASSERT_EQ(decoded_footer.checksum(), kCRC32c); + ASSERT_EQ(decoded_footer.metaindex_handle().offset(), meta_index.offset()); + ASSERT_EQ(decoded_footer.metaindex_handle().size(), meta_index.size()); + ASSERT_EQ(decoded_footer.index_handle().offset(), index.offset()); + ASSERT_EQ(decoded_footer.index_handle().size(), index.size()); + ASSERT_EQ(decoded_footer.version(), 2U); + } +} + +class IndexBlockRestartIntervalTest + : public BlockBasedTableTest, + public ::testing::WithParamInterface { + public: + static std::vector GetRestartValues() { return {-1, 0, 1, 8, 16, 32}; } +}; + +INSTANTIATE_TEST_CASE_P( + IndexBlockRestartIntervalTest, IndexBlockRestartIntervalTest, + ::testing::ValuesIn(IndexBlockRestartIntervalTest::GetRestartValues())); + +TEST_P(IndexBlockRestartIntervalTest, IndexBlockRestartInterval) { + const int kKeysInTable = 10000; + const int kKeySize = 100; + const int kValSize = 500; + + int index_block_restart_interval = GetParam(); + + Options options; + BlockBasedTableOptions table_options; + table_options.block_size = 64; // small block size to get big index block + table_options.index_block_restart_interval = index_block_restart_interval; + options.table_factory.reset(new BlockBasedTableFactory(table_options)); + + TableConstructor c(BytewiseComparator()); + static Random rnd(301); + for (int i = 0; i < kKeysInTable; i++) { + InternalKey k(RandomString(&rnd, kKeySize), 0, kTypeValue); + c.Add(k.Encode().ToString(), RandomString(&rnd, kValSize)); + } + + std::vector keys; + stl_wrappers::KVMap kvmap; + std::unique_ptr comparator( + new InternalKeyComparator(BytewiseComparator())); + const ImmutableCFOptions ioptions(options); + c.Finish(options, ioptions, table_options, *comparator, &keys, &kvmap); + auto reader = c.GetTableReader(); + + std::unique_ptr db_iter(reader->NewIterator(ReadOptions())); + + // Test point lookup + for (auto& kv : kvmap) { + db_iter->Seek(kv.first); + + ASSERT_TRUE(db_iter->Valid()); + ASSERT_OK(db_iter->status()); + ASSERT_EQ(db_iter->key(), kv.first); + ASSERT_EQ(db_iter->value(), kv.second); + } + + // Test iterating + auto kv_iter = kvmap.begin(); + for (db_iter->SeekToFirst(); db_iter->Valid(); db_iter->Next()) { + ASSERT_EQ(db_iter->key(), kv_iter->first); + ASSERT_EQ(db_iter->value(), kv_iter->second); + kv_iter++; + } + ASSERT_EQ(kv_iter, kvmap.end()); + c.ResetTableReader(); +} + +class PrefixTest : public testing::Test { + public: + PrefixTest() : testing::Test() {} + ~PrefixTest() {} +}; + +namespace { +// A simple PrefixExtractor that only works for test PrefixAndWholeKeyTest +class TestPrefixExtractor : public rocksdb::SliceTransform { + public: + ~TestPrefixExtractor() override{}; + const char* Name() const override { return "TestPrefixExtractor"; } + + rocksdb::Slice Transform(const rocksdb::Slice& src) const override { + assert(IsValid(src)); + return rocksdb::Slice(src.data(), 3); + } + + bool InDomain(const rocksdb::Slice& src) const override { + assert(IsValid(src)); + return true; + } + + bool InRange(const rocksdb::Slice& dst) const override { return true; } + + bool IsValid(const rocksdb::Slice& src) const { + if (src.size() != 4) { + return false; + } + if (src[0] != '[') { + return false; + } + if (src[1] < '0' || src[1] > '9') { + return false; + } + if (src[2] != ']') { + return false; + } + if (src[3] < '0' || src[3] > '9') { + return false; + } + return true; + } +}; +} // namespace + +TEST_F(PrefixTest, PrefixAndWholeKeyTest) { + rocksdb::Options options; + options.compaction_style = rocksdb::kCompactionStyleUniversal; + options.num_levels = 20; + options.create_if_missing = true; + options.optimize_filters_for_hits = false; + options.target_file_size_base = 268435456; + options.prefix_extractor = std::make_shared(); + rocksdb::BlockBasedTableOptions bbto; + bbto.filter_policy.reset(rocksdb::NewBloomFilterPolicy(10)); + bbto.block_size = 262144; + + bbto.whole_key_filtering = true; + + const std::string kDBPath = test::TmpDir() + "/table_prefix_test"; + options.table_factory.reset(NewBlockBasedTableFactory(bbto)); + DestroyDB(kDBPath, options); + rocksdb::DB* db; + ASSERT_OK(rocksdb::DB::Open(options, kDBPath, &db)); + + // Create a bunch of keys with 10 filters. + for (int i = 0; i < 10; i++) { + std::string prefix = "[" + std::to_string(i) + "]"; + for (int j = 0; j < 10; j++) { + std::string key = prefix + std::to_string(j); + db->Put(rocksdb::WriteOptions(), key, "1"); + } + } + + // Trigger compaction. + db->CompactRange(CompactRangeOptions(), nullptr, nullptr); + delete db; + // In the second round, turn whole_key_filtering off and expect + // rocksdb still works. +} + +} // namespace rocksdb + +int main(int argc, char** argv) { + ::testing::InitGoogleTest(&argc, argv); + return RUN_ALL_TESTS(); +} diff --git a/external/rocksdb/table/two_level_iterator.cc b/external/rocksdb/table/two_level_iterator.cc new file mode 100755 index 0000000000..81dc8792ba --- /dev/null +++ b/external/rocksdb/table/two_level_iterator.cc @@ -0,0 +1,245 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#include "table/two_level_iterator.h" + +#include "db/pinned_iterators_manager.h" +#include "rocksdb/options.h" +#include "rocksdb/table.h" +#include "table/block.h" +#include "table/format.h" +#include "util/arena.h" + +namespace rocksdb { + +namespace { + +class TwoLevelIterator : public InternalIterator { + public: + explicit TwoLevelIterator(TwoLevelIteratorState* state, + InternalIterator* first_level_iter, + bool need_free_iter_and_state); + + virtual ~TwoLevelIterator() { + // Assert that the TwoLevelIterator is never deleted while Pinning is + // Enabled. + assert(!pinned_iters_mgr_ || + (pinned_iters_mgr_ && !pinned_iters_mgr_->PinningEnabled())); + first_level_iter_.DeleteIter(!need_free_iter_and_state_); + second_level_iter_.DeleteIter(false); + if (need_free_iter_and_state_) { + delete state_; + } else { + state_->~TwoLevelIteratorState(); + } + } + + virtual void Seek(const Slice& target) override; + virtual void SeekToFirst() override; + virtual void SeekToLast() override; + virtual void Next() override; + virtual void Prev() override; + + virtual bool Valid() const override { return second_level_iter_.Valid(); } + virtual Slice key() const override { + assert(Valid()); + return second_level_iter_.key(); + } + virtual Slice value() const override { + assert(Valid()); + return second_level_iter_.value(); + } + virtual Status status() const override { + // It'd be nice if status() returned a const Status& instead of a Status + if (!first_level_iter_.status().ok()) { + return first_level_iter_.status(); + } else if (second_level_iter_.iter() != nullptr && + !second_level_iter_.status().ok()) { + return second_level_iter_.status(); + } else { + return status_; + } + } + virtual void SetPinnedItersMgr( + PinnedIteratorsManager* pinned_iters_mgr) override { + pinned_iters_mgr_ = pinned_iters_mgr; + first_level_iter_.SetPinnedItersMgr(pinned_iters_mgr); + if (second_level_iter_.iter()) { + second_level_iter_.SetPinnedItersMgr(pinned_iters_mgr); + } + } + virtual bool IsKeyPinned() const override { + return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() && + second_level_iter_.iter() && second_level_iter_.IsKeyPinned(); + } + virtual bool IsValuePinned() const override { + return pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled() && + second_level_iter_.iter() && second_level_iter_.IsValuePinned(); + } + + private: + void SaveError(const Status& s) { + if (status_.ok() && !s.ok()) status_ = s; + } + void SkipEmptyDataBlocksForward(); + void SkipEmptyDataBlocksBackward(); + void SetSecondLevelIterator(InternalIterator* iter); + void InitDataBlock(); + + TwoLevelIteratorState* state_; + IteratorWrapper first_level_iter_; + IteratorWrapper second_level_iter_; // May be nullptr + bool need_free_iter_and_state_; + PinnedIteratorsManager* pinned_iters_mgr_; + Status status_; + // If second_level_iter is non-nullptr, then "data_block_handle_" holds the + // "index_value" passed to block_function_ to create the second_level_iter. + std::string data_block_handle_; +}; + +TwoLevelIterator::TwoLevelIterator(TwoLevelIteratorState* state, + InternalIterator* first_level_iter, + bool need_free_iter_and_state) + : state_(state), + first_level_iter_(first_level_iter), + need_free_iter_and_state_(need_free_iter_and_state), + pinned_iters_mgr_(nullptr) {} + +void TwoLevelIterator::Seek(const Slice& target) { + if (state_->check_prefix_may_match && + !state_->PrefixMayMatch(target)) { + SetSecondLevelIterator(nullptr); + return; + } + first_level_iter_.Seek(target); + + InitDataBlock(); + if (second_level_iter_.iter() != nullptr) { + second_level_iter_.Seek(target); + } + SkipEmptyDataBlocksForward(); +} + +void TwoLevelIterator::SeekToFirst() { + first_level_iter_.SeekToFirst(); + InitDataBlock(); + if (second_level_iter_.iter() != nullptr) { + second_level_iter_.SeekToFirst(); + } + SkipEmptyDataBlocksForward(); +} + +void TwoLevelIterator::SeekToLast() { + first_level_iter_.SeekToLast(); + InitDataBlock(); + if (second_level_iter_.iter() != nullptr) { + second_level_iter_.SeekToLast(); + } + SkipEmptyDataBlocksBackward(); +} + +void TwoLevelIterator::Next() { + assert(Valid()); + second_level_iter_.Next(); + SkipEmptyDataBlocksForward(); +} + +void TwoLevelIterator::Prev() { + assert(Valid()); + second_level_iter_.Prev(); + SkipEmptyDataBlocksBackward(); +} + + +void TwoLevelIterator::SkipEmptyDataBlocksForward() { + while (second_level_iter_.iter() == nullptr || + (!second_level_iter_.Valid() && + !second_level_iter_.status().IsIncomplete())) { + // Move to next block + if (!first_level_iter_.Valid()) { + SetSecondLevelIterator(nullptr); + return; + } + first_level_iter_.Next(); + InitDataBlock(); + if (second_level_iter_.iter() != nullptr) { + second_level_iter_.SeekToFirst(); + } + } +} + +void TwoLevelIterator::SkipEmptyDataBlocksBackward() { + while (second_level_iter_.iter() == nullptr || + (!second_level_iter_.Valid() && + !second_level_iter_.status().IsIncomplete())) { + // Move to next block + if (!first_level_iter_.Valid()) { + SetSecondLevelIterator(nullptr); + return; + } + first_level_iter_.Prev(); + InitDataBlock(); + if (second_level_iter_.iter() != nullptr) { + second_level_iter_.SeekToLast(); + } + } +} + +void TwoLevelIterator::SetSecondLevelIterator(InternalIterator* iter) { + if (second_level_iter_.iter() != nullptr) { + SaveError(second_level_iter_.status()); + } + + if (pinned_iters_mgr_ && iter) { + iter->SetPinnedItersMgr(pinned_iters_mgr_); + } + + InternalIterator* old_iter = second_level_iter_.Set(iter); + if (pinned_iters_mgr_ && pinned_iters_mgr_->PinningEnabled()) { + pinned_iters_mgr_->PinIteratorIfNeeded(old_iter); + } else { + delete old_iter; + } +} + +void TwoLevelIterator::InitDataBlock() { + if (!first_level_iter_.Valid()) { + SetSecondLevelIterator(nullptr); + } else { + Slice handle = first_level_iter_.value(); + if (second_level_iter_.iter() != nullptr && + !second_level_iter_.status().IsIncomplete() && + handle.compare(data_block_handle_) == 0) { + // second_level_iter is already constructed with this iterator, so + // no need to change anything + } else { + InternalIterator* iter = state_->NewSecondaryIterator(handle); + data_block_handle_.assign(handle.data(), handle.size()); + SetSecondLevelIterator(iter); + } + } +} + +} // namespace + +InternalIterator* NewTwoLevelIterator(TwoLevelIteratorState* state, + InternalIterator* first_level_iter, + Arena* arena, + bool need_free_iter_and_state) { + if (arena == nullptr) { + return new TwoLevelIterator(state, first_level_iter, + need_free_iter_and_state); + } else { + auto mem = arena->AllocateAligned(sizeof(TwoLevelIterator)); + return new (mem) + TwoLevelIterator(state, first_level_iter, need_free_iter_and_state); + } +} + +} // namespace rocksdb diff --git a/external/rocksdb/table/two_level_iterator.h b/external/rocksdb/table/two_level_iterator.h new file mode 100755 index 0000000000..d210132cbb --- /dev/null +++ b/external/rocksdb/table/two_level_iterator.h @@ -0,0 +1,52 @@ +// Copyright (c) 2011-present, Facebook, Inc. All rights reserved. +// This source code is licensed under the BSD-style license found in the +// LICENSE file in the root directory of this source tree. An additional grant +// of patent rights can be found in the PATENTS file in the same directory. +// +// Copyright (c) 2011 The LevelDB Authors. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be +// found in the LICENSE file. See the AUTHORS file for names of contributors. + +#pragma once +#include "rocksdb/iterator.h" +#include "rocksdb/env.h" +#include "table/iterator_wrapper.h" + +namespace rocksdb { + +struct ReadOptions; +class InternalKeyComparator; +class Arena; + +struct TwoLevelIteratorState { + explicit TwoLevelIteratorState(bool _check_prefix_may_match) + : check_prefix_may_match(_check_prefix_may_match) {} + + virtual ~TwoLevelIteratorState() {} + virtual InternalIterator* NewSecondaryIterator(const Slice& handle) = 0; + virtual bool PrefixMayMatch(const Slice& internal_key) = 0; + + // If call PrefixMayMatch() + bool check_prefix_may_match; +}; + + +// Return a new two level iterator. A two-level iterator contains an +// index iterator whose values point to a sequence of blocks where +// each block is itself a sequence of key,value pairs. The returned +// two-level iterator yields the concatenation of all key/value pairs +// in the sequence of blocks. Takes ownership of "index_iter" and +// will delete it when no longer needed. +// +// Uses a supplied function to convert an index_iter value into +// an iterator over the contents of the corresponding block. +// arena: If not null, the arena is used to allocate the Iterator. +// When destroying the iterator, the destructor will destroy +// all the states but those allocated in arena. +// need_free_iter_and_state: free `state` and `first_level_iter` if +// true. Otherwise, just call destructor. +extern InternalIterator* NewTwoLevelIterator( + TwoLevelIteratorState* state, InternalIterator* first_level_iter, + Arena* arena = nullptr, bool need_free_iter_and_state = true); + +} // namespace rocksdb diff --git a/external/rocksdb/third-party/fbson/COMMIT.md b/external/rocksdb/third-party/fbson/COMMIT.md new file mode 100755 index 0000000000..b38b5424d3 --- /dev/null +++ b/external/rocksdb/third-party/fbson/COMMIT.md @@ -0,0 +1,5 @@ +fbson commit: +https://github.com/facebook/mysql-5.6/commit/55ef9ff25c934659a70b4094e9b406c48e9dd43d + +# TODO. +* Had to convert zero sized array to [1] sized arrays due to the fact that MS Compiler complains about it not being standard. At some point need to contribute this change back to MySql where this code was taken from. diff --git a/external/rocksdb/third-party/fbson/FbsonDocument.h b/external/rocksdb/third-party/fbson/FbsonDocument.h new file mode 100755 index 0000000000..9a00e24714 --- /dev/null +++ b/external/rocksdb/third-party/fbson/FbsonDocument.h @@ -0,0 +1,898 @@ +/* + * Copyright (c) 2011-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + * + */ + +/* + * This header defines FbsonDocument, FbsonKeyValue, and various value classes + * which are derived from FbsonValue, and a forward iterator for container + * values - essentially everything that is related to FBSON binary data + * structures. + * + * Implementation notes: + * + * None of the classes in this header file can be instantiated directly (i.e. + * you cannot create a FbsonKeyValue or FbsonValue object - all constructors + * are declared non-public). We use the classes as wrappers on the packed FBSON + * bytes (serialized), and cast the classes (types) to the underlying packed + * byte array. + * + * For the same reason, we cannot define any FBSON value class to be virtual, + * since we never call constructors, and will not instantiate vtbl and vptrs. + * + * Therefore, the classes are defined as packed structures (i.e. no data + * alignment and padding), and the private member variables of the classes are + * defined precisely in the same order as the FBSON spec. This ensures we + * access the packed FBSON bytes correctly. + * + * The packed structures are highly optimized for in-place operations with low + * overhead. The reads (and in-place writes) are performed directly on packed + * bytes. There is no memory allocation at all at runtime. + * + * For updates/writes of values that will expand the original FBSON size, the + * write will fail, and the caller needs to handle buffer increase. + * + * ** Iterator ** + * Both ObjectVal class and ArrayVal class have iterator type that you can use + * to declare an iterator on a container object to go through the key-value + * pairs or value list. The iterator has both non-const and const types. + * + * Note: iterators are forward direction only. + * + * ** Query ** + * Querying into containers is through the member functions find (for key/value + * pairs) and get (for array elements), and is in streaming style. We don't + * need to read/scan the whole FBSON packed bytes in order to return results. + * Once the key/index is found, we will stop search. You can use text to query + * both objects and array (for array, text will be converted to integer index), + * and use index to retrieve from array. Array index is 0-based. + * + * ** External dictionary ** + * During query processing, you can also pass a call-back function, so the + * search will first try to check if the key string exists in the dictionary. + * If so, search will be based on the id instead of the key string. + * + * @author Tian Xia + */ + +#ifndef FBSON_FBSONDOCUMENT_H +#define FBSON_FBSONDOCUMENT_H + +#include +#include +#include + +namespace fbson { + +#pragma pack(push, 1) + +#define FBSON_VER 1 + +// forward declaration +class FbsonValue; +class ObjectVal; + +/* + * FbsonDocument is the main object that accesses and queries FBSON packed + * bytes. NOTE: FbsonDocument only allows object container as the top level + * FBSON value. However, you can use the static method "createValue" to get any + * FbsonValue object from the packed bytes. + * + * FbsonDocument object also dereferences to an object container value + * (ObjectVal) once FBSON is loaded. + * + * ** Load ** + * FbsonDocument is usable after loading packed bytes (memory location) into + * the object. We only need the header and first few bytes of the payload after + * header to verify the FBSON. + * + * Note: creating an FbsonDocument (through createDocument) does not allocate + * any memory. The document object is an efficient wrapper on the packed bytes + * which is accessed directly. + * + * ** Query ** + * Query is through dereferencing into ObjectVal. + */ +class FbsonDocument { + public: + // create an FbsonDocument object from FBSON packed bytes + static FbsonDocument* createDocument(const char* pb, uint32_t size); + + // create an FbsonValue from FBSON packed bytes + static FbsonValue* createValue(const char* pb, uint32_t size); + + uint8_t version() { return header_.ver_; } + + FbsonValue* getValue() { return ((FbsonValue*)payload_); } + + ObjectVal* operator->() { return ((ObjectVal*)payload_); } + + const ObjectVal* operator->() const { return ((const ObjectVal*)payload_); } + + private: + /* + * FbsonHeader class defines FBSON header (internal to FbsonDocument). + * + * Currently it only contains version information (1-byte). We may expand the + * header to include checksum of the FBSON binary for more security. + */ + struct FbsonHeader { + uint8_t ver_; + } header_; + + char payload_[1]; + + FbsonDocument(); + + FbsonDocument(const FbsonDocument&) = delete; + FbsonDocument& operator=(const FbsonDocument&) = delete; +}; + +/* + * FbsonFwdIteratorT implements FBSON's iterator template. + * + * Note: it is an FORWARD iterator only due to the design of FBSON format. + */ +template +class FbsonFwdIteratorT { + typedef Iter_Type iterator; + typedef typename std::iterator_traits::pointer pointer; + typedef typename std::iterator_traits::reference reference; + + public: + explicit FbsonFwdIteratorT(const iterator& i) : current_(i) {} + + // allow non-const to const iterator conversion (same container type) + template + FbsonFwdIteratorT(const FbsonFwdIteratorT& rhs) + : current_(rhs.base()) {} + + bool operator==(const FbsonFwdIteratorT& rhs) const { + return (current_ == rhs.current_); + } + + bool operator!=(const FbsonFwdIteratorT& rhs) const { + return !operator==(rhs); + } + + bool operator<(const FbsonFwdIteratorT& rhs) const { + return (current_ < rhs.current_); + } + + bool operator>(const FbsonFwdIteratorT& rhs) const { return !operator<(rhs); } + + FbsonFwdIteratorT& operator++() { + current_ = (iterator)(((char*)current_) + current_->numPackedBytes()); + return *this; + } + + FbsonFwdIteratorT operator++(int) { + auto tmp = *this; + current_ = (iterator)(((char*)current_) + current_->numPackedBytes()); + return tmp; + } + + explicit operator pointer() { return current_; } + + reference operator*() const { return *current_; } + + pointer operator->() const { return current_; } + + iterator base() const { return current_; } + + private: + iterator current_; +}; + +typedef int (*hDictInsert)(const char* key, unsigned len); +typedef int (*hDictFind)(const char* key, unsigned len); + +/* + * FbsonType defines 10 primitive types and 2 container types, as described + * below. + * + * primitive_value ::= + * 0x00 //null value (0 byte) + * | 0x01 //boolean true (0 byte) + * | 0x02 //boolean false (0 byte) + * | 0x03 int8 //char/int8 (1 byte) + * | 0x04 int16 //int16 (2 bytes) + * | 0x05 int32 //int32 (4 bytes) + * | 0x06 int64 //int64 (8 bytes) + * | 0x07 double //floating point (8 bytes) + * | 0x08 string //variable length string + * | 0x09 binary //variable length binary + * + * container ::= + * 0x0A int32 key_value_list //object, int32 is the total bytes of the object + * | 0x0B int32 value_list //array, int32 is the total bytes of the array + */ +enum class FbsonType : char { + T_Null = 0x00, + T_True = 0x01, + T_False = 0x02, + T_Int8 = 0x03, + T_Int16 = 0x04, + T_Int32 = 0x05, + T_Int64 = 0x06, + T_Double = 0x07, + T_String = 0x08, + T_Binary = 0x09, + T_Object = 0x0A, + T_Array = 0x0B, + NUM_TYPES, +}; + +typedef std::underlying_type::type FbsonTypeUnder; + +/* + * FbsonKeyValue class defines FBSON key type, as described below. + * + * key ::= + * 0x00 int8 //1-byte dictionary id + * | int8 (byte*) //int8 (>0) is the size of the key string + * + * value ::= primitive_value | container + * + * FbsonKeyValue can be either an id mapping to the key string in an external + * dictionary, or it is the original key string. Whether to read an id or a + * string is decided by the first byte (size_). + * + * Note: a key object must be followed by a value object. Therefore, a key + * object implicitly refers to a key-value pair, and you can get the value + * object right after the key object. The function numPackedBytes hence + * indicates the total size of the key-value pair, so that we will be able go + * to next pair from the key. + * + * ** Dictionary size ** + * By default, the dictionary size is 255 (1-byte). Users can define + * "USE_LARGE_DICT" to increase the dictionary size to 655535 (2-byte). + */ +class FbsonKeyValue { + public: +#ifdef USE_LARGE_DICT + static const int sMaxKeyId = 65535; + typedef uint16_t keyid_type; +#else + static const int sMaxKeyId = 255; + typedef uint8_t keyid_type; +#endif // #ifdef USE_LARGE_DICT + + static const uint8_t sMaxKeyLen = 64; + + // size of the key. 0 indicates it is stored as id + uint8_t klen() const { return size_; } + + // get the key string. Note the string may not be null terminated. + const char* getKeyStr() const { return key_.str_; } + + keyid_type getKeyId() const { return key_.id_; } + + unsigned int keyPackedBytes() const { + return size_ ? (sizeof(size_) + size_) + : (sizeof(size_) + sizeof(keyid_type)); + } + + FbsonValue* value() const { + return (FbsonValue*)(((char*)this) + keyPackedBytes()); + } + + // size of the total packed bytes (key+value) + unsigned int numPackedBytes() const; + + private: + uint8_t size_; + + union key_ { + keyid_type id_; + char str_[1]; + } key_; + + FbsonKeyValue(); +}; + +/* + * FbsonValue is the base class of all FBSON types. It contains only one member + * variable - type info, which can be retrieved by member functions is[Type]() + * or type(). + */ +class FbsonValue { + public: + static const uint32_t sMaxValueLen = 1 << 24; // 16M + + bool isNull() const { return (type_ == FbsonType::T_Null); } + bool isTrue() const { return (type_ == FbsonType::T_True); } + bool isFalse() const { return (type_ == FbsonType::T_False); } + bool isInt8() const { return (type_ == FbsonType::T_Int8); } + bool isInt16() const { return (type_ == FbsonType::T_Int16); } + bool isInt32() const { return (type_ == FbsonType::T_Int32); } + bool isInt64() const { return (type_ == FbsonType::T_Int64); } + bool isDouble() const { return (type_ == FbsonType::T_Double); } + bool isString() const { return (type_ == FbsonType::T_String); } + bool isBinary() const { return (type_ == FbsonType::T_Binary); } + bool isObject() const { return (type_ == FbsonType::T_Object); } + bool isArray() const { return (type_ == FbsonType::T_Array); } + + FbsonType type() const { return type_; } + + // size of the total packed bytes + unsigned int numPackedBytes() const; + + // size of the value in bytes + unsigned int size() const; + + // get the raw byte array of the value + const char* getValuePtr() const; + + // find the FBSON value by a key path string (null terminated) + FbsonValue* findPath(const char* key_path, + const char* delim = ".", + hDictFind handler = nullptr) { + return findPath(key_path, (unsigned int)strlen(key_path), delim, handler); + } + + // find the FBSON value by a key path string (with length) + FbsonValue* findPath(const char* key_path, + unsigned int len, + const char* delim, + hDictFind handler); + + protected: + FbsonType type_; // type info + + FbsonValue(); +}; + +/* + * NumerValT is the template class (derived from FbsonValue) of all number + * types (integers and double). + */ +template +class NumberValT : public FbsonValue { + public: + T val() const { return num_; } + + unsigned int numPackedBytes() const { return sizeof(FbsonValue) + sizeof(T); } + + // catch all unknow specialization of the template class + bool setVal(T value) { return false; } + + private: + T num_; + + NumberValT(); +}; + +typedef NumberValT Int8Val; + +// override setVal for Int8Val +template <> +inline bool Int8Val::setVal(int8_t value) { + if (!isInt8()) { + return false; + } + + num_ = value; + return true; +} + +typedef NumberValT Int16Val; + +// override setVal for Int16Val +template <> +inline bool Int16Val::setVal(int16_t value) { + if (!isInt16()) { + return false; + } + + num_ = value; + return true; +} + +typedef NumberValT Int32Val; + +// override setVal for Int32Val +template <> +inline bool Int32Val::setVal(int32_t value) { + if (!isInt32()) { + return false; + } + + num_ = value; + return true; +} + +typedef NumberValT Int64Val; + +// override setVal for Int64Val +template <> +inline bool Int64Val::setVal(int64_t value) { + if (!isInt64()) { + return false; + } + + num_ = value; + return true; +} + +typedef NumberValT DoubleVal; + +// override setVal for DoubleVal +template <> +inline bool DoubleVal::setVal(double value) { + if (!isDouble()) { + return false; + } + + num_ = value; + return true; +} + +/* + * BlobVal is the base class (derived from FbsonValue) for string and binary + * types. The size_ indicates the total bytes of the payload_. + */ +class BlobVal : public FbsonValue { + public: + // size of the blob payload only + unsigned int getBlobLen() const { return size_; } + + // return the blob as byte array + const char* getBlob() const { return payload_; } + + // size of the total packed bytes + unsigned int numPackedBytes() const { + return sizeof(FbsonValue) + sizeof(size_) + size_; + } + + protected: + uint32_t size_; + char payload_[1]; + + // set new blob bytes + bool internalSetVal(const char* blob, uint32_t blobSize) { + // if we cannot fit the new blob, fail the operation + if (blobSize > size_) { + return false; + } + + memcpy(payload_, blob, blobSize); + + // Set the reset of the bytes to 0. Note we cannot change the size_ of the + // current payload, as all values are packed. + memset(payload_ + blobSize, 0, size_ - blobSize); + + return true; + } + + BlobVal(); + + private: + // Disable as this class can only be allocated dynamically + BlobVal(const BlobVal&) = delete; + BlobVal& operator=(const BlobVal&) = delete; +}; + +/* + * Binary type + */ +class BinaryVal : public BlobVal { + public: + bool setVal(const char* blob, uint32_t blobSize) { + if (!isBinary()) { + return false; + } + + return internalSetVal(blob, blobSize); + } + + private: + BinaryVal(); +}; + +/* + * String type + * Note: FBSON string may not be a c-string (NULL-terminated) + */ +class StringVal : public BlobVal { + public: + bool setVal(const char* str, uint32_t blobSize) { + if (!isString()) { + return false; + } + + return internalSetVal(str, blobSize); + } + + private: + StringVal(); +}; + +/* + * ContainerVal is the base class (derived from FbsonValue) for object and + * array types. The size_ indicates the total bytes of the payload_. + */ +class ContainerVal : public FbsonValue { + public: + // size of the container payload only + unsigned int getContainerSize() const { return size_; } + + // return the container payload as byte array + const char* getPayload() const { return payload_; } + + // size of the total packed bytes + unsigned int numPackedBytes() const { + return sizeof(FbsonValue) + sizeof(size_) + size_; + } + + protected: + uint32_t size_; + char payload_[1]; + + ContainerVal(); + + ContainerVal(const ContainerVal&) = delete; + ContainerVal& operator=(const ContainerVal&) = delete; +}; + +/* + * Object type + */ +class ObjectVal : public ContainerVal { + public: + // find the FBSON value by a key string (null terminated) + FbsonValue* find(const char* key, hDictFind handler = nullptr) const { + if (!key) + return nullptr; + + return find(key, (unsigned int)strlen(key), handler); + } + + // find the FBSON value by a key string (with length) + FbsonValue* find(const char* key, + unsigned int klen, + hDictFind handler = nullptr) const { + if (!key || !klen) + return nullptr; + + int key_id = -1; + if (handler && (key_id = handler(key, klen)) >= 0) { + return find(key_id); + } + + return internalFind(key, klen); + } + + // find the FBSON value by a key dictionary ID + FbsonValue* find(int key_id) const { + if (key_id < 0 || key_id > FbsonKeyValue::sMaxKeyId) + return nullptr; + + const char* pch = payload_; + const char* fence = payload_ + size_; + + while (pch < fence) { + FbsonKeyValue* pkey = (FbsonKeyValue*)(pch); + if (!pkey->klen() && key_id == pkey->getKeyId()) { + return pkey->value(); + } + pch += pkey->numPackedBytes(); + } + + assert(pch == fence); + + return nullptr; + } + + typedef FbsonKeyValue value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef FbsonFwdIteratorT iterator; + typedef FbsonFwdIteratorT const_iterator; + + iterator begin() { return iterator((pointer)payload_); } + + const_iterator begin() const { return const_iterator((pointer)payload_); } + + iterator end() { return iterator((pointer)(payload_ + size_)); } + + const_iterator end() const { + return const_iterator((pointer)(payload_ + size_)); + } + + private: + FbsonValue* internalFind(const char* key, unsigned int klen) const { + const char* pch = payload_; + const char* fence = payload_ + size_; + + while (pch < fence) { + FbsonKeyValue* pkey = (FbsonKeyValue*)(pch); + if (klen == pkey->klen() && strncmp(key, pkey->getKeyStr(), klen) == 0) { + return pkey->value(); + } + pch += pkey->numPackedBytes(); + } + + assert(pch == fence); + + return nullptr; + } + + private: + ObjectVal(); +}; + +/* + * Array type + */ +class ArrayVal : public ContainerVal { + public: + // get the FBSON value at index + FbsonValue* get(int idx) const { + if (idx < 0) + return nullptr; + + const char* pch = payload_; + const char* fence = payload_ + size_; + + while (pch < fence && idx-- > 0) + pch += ((FbsonValue*)pch)->numPackedBytes(); + + if (idx == -1) + return (FbsonValue*)pch; + else { + assert(pch == fence); + return nullptr; + } + } + + // Get number of elements in array + unsigned int numElem() const { + const char* pch = payload_; + const char* fence = payload_ + size_; + + unsigned int num = 0; + while (pch < fence) { + ++num; + pch += ((FbsonValue*)pch)->numPackedBytes(); + } + + assert(pch == fence); + + return num; + } + + typedef FbsonValue value_type; + typedef value_type* pointer; + typedef const value_type* const_pointer; + typedef FbsonFwdIteratorT iterator; + typedef FbsonFwdIteratorT const_iterator; + + iterator begin() { return iterator((pointer)payload_); } + + const_iterator begin() const { return const_iterator((pointer)payload_); } + + iterator end() { return iterator((pointer)(payload_ + size_)); } + + const_iterator end() const { + return const_iterator((pointer)(payload_ + size_)); + } + + private: + ArrayVal(); +}; + +inline FbsonDocument* FbsonDocument::createDocument(const char* pb, + uint32_t size) { + if (!pb || size < sizeof(FbsonHeader) + sizeof(FbsonValue)) { + return nullptr; + } + + FbsonDocument* doc = (FbsonDocument*)pb; + if (doc->header_.ver_ != FBSON_VER) { + return nullptr; + } + + FbsonValue* val = (FbsonValue*)doc->payload_; + if (!val->isObject() || size != sizeof(FbsonHeader) + val->numPackedBytes()) { + return nullptr; + } + + return doc; +} + +inline FbsonValue* FbsonDocument::createValue(const char* pb, uint32_t size) { + if (!pb || size < sizeof(FbsonHeader) + sizeof(FbsonValue)) { + return nullptr; + } + + FbsonDocument* doc = (FbsonDocument*)pb; + if (doc->header_.ver_ != FBSON_VER) { + return nullptr; + } + + FbsonValue* val = (FbsonValue*)doc->payload_; + if (size != sizeof(FbsonHeader) + val->numPackedBytes()) { + return nullptr; + } + + return val; +} + +inline unsigned int FbsonKeyValue::numPackedBytes() const { + unsigned int ks = keyPackedBytes(); + FbsonValue* val = (FbsonValue*)(((char*)this) + ks); + return ks + val->numPackedBytes(); +} + +// Poor man's "virtual" function FbsonValue::numPackedBytes +inline unsigned int FbsonValue::numPackedBytes() const { + switch (type_) { + case FbsonType::T_Null: + case FbsonType::T_True: + case FbsonType::T_False: { + return sizeof(type_); + } + + case FbsonType::T_Int8: { + return sizeof(type_) + sizeof(int8_t); + } + case FbsonType::T_Int16: { + return sizeof(type_) + sizeof(int16_t); + } + case FbsonType::T_Int32: { + return sizeof(type_) + sizeof(int32_t); + } + case FbsonType::T_Int64: { + return sizeof(type_) + sizeof(int64_t); + } + case FbsonType::T_Double: { + return sizeof(type_) + sizeof(double); + } + case FbsonType::T_String: + case FbsonType::T_Binary: { + return ((BlobVal*)(this))->numPackedBytes(); + } + + case FbsonType::T_Object: + case FbsonType::T_Array: { + return ((ContainerVal*)(this))->numPackedBytes(); + } + default: + return 0; + } +} + +inline unsigned int FbsonValue::size() const { + switch (type_) { + case FbsonType::T_Int8: { + return sizeof(int8_t); + } + case FbsonType::T_Int16: { + return sizeof(int16_t); + } + case FbsonType::T_Int32: { + return sizeof(int32_t); + } + case FbsonType::T_Int64: { + return sizeof(int64_t); + } + case FbsonType::T_Double: { + return sizeof(double); + } + case FbsonType::T_String: + case FbsonType::T_Binary: { + return ((BlobVal*)(this))->getBlobLen(); + } + + case FbsonType::T_Object: + case FbsonType::T_Array: { + return ((ContainerVal*)(this))->getContainerSize(); + } + case FbsonType::T_Null: + case FbsonType::T_True: + case FbsonType::T_False: + default: + return 0; + } +} + +inline const char* FbsonValue::getValuePtr() const { + switch (type_) { + case FbsonType::T_Int8: + case FbsonType::T_Int16: + case FbsonType::T_Int32: + case FbsonType::T_Int64: + case FbsonType::T_Double: + return ((char*)this) + sizeof(FbsonType); + + case FbsonType::T_String: + case FbsonType::T_Binary: + return ((BlobVal*)(this))->getBlob(); + + case FbsonType::T_Object: + case FbsonType::T_Array: + return ((ContainerVal*)(this))->getPayload(); + + case FbsonType::T_Null: + case FbsonType::T_True: + case FbsonType::T_False: + default: + return nullptr; + } +} + +inline FbsonValue* FbsonValue::findPath(const char* key_path, + unsigned int kp_len, + const char* delim = ".", + hDictFind handler = nullptr) { + if (!key_path || !kp_len) + return nullptr; + + if (!delim) + delim = "."; // default delimiter + + FbsonValue* pval = this; + const char* fence = key_path + kp_len; + char idx_buf[21]; // buffer to parse array index (integer value) + + while (pval && key_path < fence) { + const char* key = key_path; + unsigned int klen = 0; + // find the current key + for (; key_path != fence && *key_path != *delim; ++key_path, ++klen) + ; + + if (!klen) + return nullptr; + + switch (pval->type_) { + case FbsonType::T_Object: { + pval = ((ObjectVal*)pval)->find(key, klen, handler); + break; + } + + case FbsonType::T_Array: { + // parse string into an integer (array index) + if (klen >= sizeof(idx_buf)) + return nullptr; + + memcpy(idx_buf, key, klen); + idx_buf[klen] = 0; + + char* end = nullptr; + int index = (int)strtol(idx_buf, &end, 10); + if (end && !*end) + pval = ((fbson::ArrayVal*)pval)->get(index); + else + // incorrect index string + return nullptr; + break; + } + + default: + return nullptr; + } + + // skip the delimiter + if (key_path < fence) { + ++key_path; + if (key_path == fence) + // we have a trailing delimiter at the end + return nullptr; + } + } + + return pval; +} + +#pragma pack(pop) + +} // namespace fbson + +#endif // FBSON_FBSONDOCUMENT_H diff --git a/external/rocksdb/third-party/fbson/FbsonJsonParser.h b/external/rocksdb/third-party/fbson/FbsonJsonParser.h new file mode 100755 index 0000000000..678d970fb0 --- /dev/null +++ b/external/rocksdb/third-party/fbson/FbsonJsonParser.h @@ -0,0 +1,746 @@ +/* + * Copyright (c) 2011-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + * + */ + +/* + * This file defines FbsonJsonParserT (template) and FbsonJsonParser. + * + * FbsonJsonParserT is a template class which implements a JSON parser. + * FbsonJsonParserT parses JSON text, and serialize it to FBSON binary format + * by using FbsonWriterT object. By default, FbsonJsonParserT creates a new + * FbsonWriterT object with an output stream object. However, you can also + * pass in your FbsonWriterT or any stream object that implements some basic + * interface of std::ostream (see FbsonStream.h). + * + * FbsonJsonParser specializes FbsonJsonParserT with FbsonOutStream type (see + * FbsonStream.h). So unless you want to provide own a different output stream + * type, use FbsonJsonParser object. + * + * ** Parsing JSON ** + * FbsonJsonParserT parses JSON string, and directly serializes into FBSON + * packed bytes. There are three ways to parse a JSON string: (1) using + * c-string, (2) using string with len, (3) using std::istream object. You can + * use custome streambuf to redirect output. FbsonOutBuffer is a streambuf used + * internally if the input is raw character buffer. + * + * You can reuse an FbsonJsonParserT object to parse/serialize multiple JSON + * strings, and the previous FBSON will be overwritten. + * + * If parsing fails (returned false), the error code will be set to one of + * FbsonErrType, and can be retrieved by calling getErrorCode(). + * + * ** External dictionary ** + * During parsing a JSON string, you can pass a call-back function to map a key + * string to an id, and store the dictionary id in FBSON to save space. The + * purpose of using an external dictionary is more towards a collection of + * documents (which has common keys) rather than a single document, so that + * space saving will be significant. + * + * ** Endianness ** + * Note: FBSON serialization doesn't assume endianness of the server. However + * you will need to ensure that the endianness at the reader side is the same + * as that at the writer side (if they are on different machines). Otherwise, + * proper conversion is needed when a number value is returned to the + * caller/writer. + * + * @author Tian Xia + */ + +#ifndef FBSON_FBSONPARSER_H +#define FBSON_FBSONPARSER_H + +#include +#include +#include "FbsonDocument.h" +#include "FbsonWriter.h" + +namespace fbson { + +const char* const kJsonDelim = " ,]}\t\r\n"; +const char* const kWhiteSpace = " \t\n\r"; + +/* + * Error codes + */ +enum class FbsonErrType { + E_NONE = 0, + E_INVALID_VER, + E_EMPTY_STR, + E_OUTPUT_FAIL, + E_INVALID_DOCU, + E_INVALID_VALUE, + E_INVALID_KEY, + E_INVALID_STR, + E_INVALID_OBJ, + E_INVALID_ARR, + E_INVALID_HEX, + E_INVALID_OCTAL, + E_INVALID_DECIMAL, + E_INVALID_EXPONENT, + E_HEX_OVERFLOW, + E_OCTAL_OVERFLOW, + E_DECIMAL_OVERFLOW, + E_DOUBLE_OVERFLOW, + E_EXPONENT_OVERFLOW, +}; + +/* + * Template FbsonJsonParserT + */ +template +class FbsonJsonParserT { + public: + FbsonJsonParserT() : err_(FbsonErrType::E_NONE) {} + + explicit FbsonJsonParserT(OS_TYPE& os) + : writer_(os), err_(FbsonErrType::E_NONE) {} + + // parse a UTF-8 JSON string + bool parse(const std::string& str, hDictInsert handler = nullptr) { + return parse(str.c_str(), (unsigned int)str.size(), handler); + } + + // parse a UTF-8 JSON c-style string (NULL terminated) + bool parse(const char* c_str, hDictInsert handler = nullptr) { + return parse(c_str, (unsigned int)strlen(c_str), handler); + } + + // parse a UTF-8 JSON string with length + bool parse(const char* pch, unsigned int len, hDictInsert handler = nullptr) { + if (!pch || len == 0) { + err_ = FbsonErrType::E_EMPTY_STR; + return false; + } + + FbsonInBuffer sb(pch, len); + std::istream in(&sb); + return parse(in, handler); + } + + // parse UTF-8 JSON text from an input stream + bool parse(std::istream& in, hDictInsert handler = nullptr) { + bool res = false; + + // reset output stream + writer_.reset(); + + trim(in); + + if (in.peek() == '{') { + in.ignore(); + res = parseObject(in, handler); + } else if (in.peek() == '[') { + in.ignore(); + res = parseArray(in, handler); + } else { + err_ = FbsonErrType::E_INVALID_DOCU; + } + + trim(in); + if (res && !in.eof()) { + err_ = FbsonErrType::E_INVALID_DOCU; + return false; + } + + return res; + } + + FbsonWriterT& getWriter() { return writer_; } + + FbsonErrType getErrorCode() { return err_; } + + // clear error code + void clearErr() { err_ = FbsonErrType::E_NONE; } + + private: + // parse a JSON object (comma-separated list of key-value pairs) + bool parseObject(std::istream& in, hDictInsert handler) { + if (!writer_.writeStartObject()) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + + trim(in); + + if (in.peek() == '}') { + in.ignore(); + // empty object + if (!writer_.writeEndObject()) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + return true; + } + + while (in.good()) { + if (in.get() != '"') { + err_ = FbsonErrType::E_INVALID_KEY; + return false; + } + + if (!parseKVPair(in, handler)) { + return false; + } + + trim(in); + + char ch = in.get(); + if (ch == '}') { + // end of the object + if (!writer_.writeEndObject()) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + return true; + } else if (ch != ',') { + err_ = FbsonErrType::E_INVALID_OBJ; + return false; + } + + trim(in); + } + + err_ = FbsonErrType::E_INVALID_OBJ; + return false; + } + + // parse a JSON array (comma-separated list of values) + bool parseArray(std::istream& in, hDictInsert handler) { + if (!writer_.writeStartArray()) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + + trim(in); + + if (in.peek() == ']') { + in.ignore(); + // empty array + if (!writer_.writeEndArray()) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + return true; + } + + while (in.good()) { + if (!parseValue(in, handler)) { + return false; + } + + trim(in); + + char ch = in.get(); + if (ch == ']') { + // end of the array + if (!writer_.writeEndArray()) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + return true; + } else if (ch != ',') { + err_ = FbsonErrType::E_INVALID_ARR; + return false; + } + + trim(in); + } + + err_ = FbsonErrType::E_INVALID_ARR; + return false; + } + + // parse a key-value pair, separated by ":" + bool parseKVPair(std::istream& in, hDictInsert handler) { + if (parseKey(in, handler) && parseValue(in, handler)) { + return true; + } + + return false; + } + + // parse a key (must be string) + bool parseKey(std::istream& in, hDictInsert handler) { + char key[FbsonKeyValue::sMaxKeyLen]; + int i = 0; + while (in.good() && in.peek() != '"' && i < FbsonKeyValue::sMaxKeyLen) { + key[i++] = in.get(); + } + + if (!in.good() || in.peek() != '"' || i == 0) { + err_ = FbsonErrType::E_INVALID_KEY; + return false; + } + + in.ignore(); // discard '"' + + int key_id = -1; + if (handler) { + key_id = handler(key, i); + } + + if (key_id < 0) { + writer_.writeKey(key, i); + } else { + writer_.writeKey(key_id); + } + + trim(in); + + if (in.get() != ':') { + err_ = FbsonErrType::E_INVALID_OBJ; + return false; + } + + return true; + } + + // parse a value + bool parseValue(std::istream& in, hDictInsert handler) { + bool res = false; + + trim(in); + + switch (in.peek()) { + case 'N': + case 'n': { + in.ignore(); + res = parseNull(in); + break; + } + case 'T': + case 't': { + in.ignore(); + res = parseTrue(in); + break; + } + case 'F': + case 'f': { + in.ignore(); + res = parseFalse(in); + break; + } + case '"': { + in.ignore(); + res = parseString(in); + break; + } + case '{': { + in.ignore(); + res = parseObject(in, handler); + break; + } + case '[': { + in.ignore(); + res = parseArray(in, handler); + break; + } + default: { + res = parseNumber(in); + break; + } + } + + return res; + } + + // parse NULL value + bool parseNull(std::istream& in) { + if (tolower(in.get()) == 'u' && tolower(in.get()) == 'l' && + tolower(in.get()) == 'l') { + writer_.writeNull(); + return true; + } + + err_ = FbsonErrType::E_INVALID_VALUE; + return false; + } + + // parse TRUE value + bool parseTrue(std::istream& in) { + if (tolower(in.get()) == 'r' && tolower(in.get()) == 'u' && + tolower(in.get()) == 'e') { + writer_.writeBool(true); + return true; + } + + err_ = FbsonErrType::E_INVALID_VALUE; + return false; + } + + // parse FALSE value + bool parseFalse(std::istream& in) { + if (tolower(in.get()) == 'a' && tolower(in.get()) == 'l' && + tolower(in.get()) == 's' && tolower(in.get()) == 'e') { + writer_.writeBool(false); + return true; + } + + err_ = FbsonErrType::E_INVALID_VALUE; + return false; + } + + // parse a string + bool parseString(std::istream& in) { + if (!writer_.writeStartString()) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + + bool escaped = false; + char buffer[4096]; // write 4KB at a time + int nread = 0; + while (in.good()) { + char ch = in.get(); + if (ch != '"' || escaped) { + buffer[nread++] = ch; + if (nread == 4096) { + // flush buffer + if (!writer_.writeString(buffer, nread)) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + nread = 0; + } + // set/reset escape + if (ch == '\\' || escaped) { + escaped = !escaped; + } + } else { + // write all remaining bytes in the buffer + if (nread > 0) { + if (!writer_.writeString(buffer, nread)) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + } + // end writing string + if (!writer_.writeEndString()) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + return true; + } + } + + err_ = FbsonErrType::E_INVALID_STR; + return false; + } + + // parse a number + // Number format can be hex, octal, or decimal (including float). + // Only decimal can have (+/-) sign prefix. + bool parseNumber(std::istream& in) { + bool ret = false; + switch (in.peek()) { + case '0': { + in.ignore(); + + if (in.peek() == 'x' || in.peek() == 'X') { + in.ignore(); + ret = parseHex(in); + } else if (in.peek() == '.') { + in.ignore(); + ret = parseDouble(in, 0, 0, 1); + } else { + ret = parseOctal(in); + } + + break; + } + case '-': { + in.ignore(); + ret = parseDecimal(in, -1); + break; + } + case '+': + in.ignore(); + // fall through + default: + ret = parseDecimal(in, 1); + break; + } + + return ret; + } + + // parse a number in hex format + bool parseHex(std::istream& in) { + uint64_t val = 0; + int num_digits = 0; + char ch = tolower(in.peek()); + while (in.good() && !strchr(kJsonDelim, ch) && (++num_digits) <= 16) { + if (ch >= '0' && ch <= '9') { + val = (val << 4) + (ch - '0'); + } else if (ch >= 'a' && ch <= 'f') { + val = (val << 4) + (ch - 'a' + 10); + } else { // unrecognized hex digit + err_ = FbsonErrType::E_INVALID_HEX; + return false; + } + + in.ignore(); + ch = tolower(in.peek()); + } + + int size = 0; + if (num_digits <= 2) { + size = writer_.writeInt8((int8_t)val); + } else if (num_digits <= 4) { + size = writer_.writeInt16((int16_t)val); + } else if (num_digits <= 8) { + size = writer_.writeInt32((int32_t)val); + } else if (num_digits <= 16) { + size = writer_.writeInt64(val); + } else { + err_ = FbsonErrType::E_HEX_OVERFLOW; + return false; + } + + if (size == 0) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + + return true; + } + + // parse a number in octal format + bool parseOctal(std::istream& in) { + int64_t val = 0; + char ch = in.peek(); + while (in.good() && !strchr(kJsonDelim, ch)) { + if (ch >= '0' && ch <= '7') { + val = val * 8 + (ch - '0'); + } else { + err_ = FbsonErrType::E_INVALID_OCTAL; + return false; + } + + // check if the number overflows + if (val < 0) { + err_ = FbsonErrType::E_OCTAL_OVERFLOW; + return false; + } + + in.ignore(); + ch = in.peek(); + } + + int size = 0; + if (val <= std::numeric_limits::max()) { + size = writer_.writeInt8((int8_t)val); + } else if (val <= std::numeric_limits::max()) { + size = writer_.writeInt16((int16_t)val); + } else if (val <= std::numeric_limits::max()) { + size = writer_.writeInt32((int32_t)val); + } else { // val <= INT64_MAX + size = writer_.writeInt64(val); + } + + if (size == 0) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + + return true; + } + + // parse a number in decimal (including float) + bool parseDecimal(std::istream& in, int sign) { + int64_t val = 0; + int precision = 0; + + char ch = 0; + while (in.good() && (ch = in.peek()) == '0') + in.ignore(); + + while (in.good() && !strchr(kJsonDelim, ch)) { + if (ch >= '0' && ch <= '9') { + val = val * 10 + (ch - '0'); + ++precision; + } else if (ch == '.') { + // note we don't pop out '.' + return parseDouble(in, static_cast(val), precision, sign); + } else { + err_ = FbsonErrType::E_INVALID_DECIMAL; + return false; + } + + in.ignore(); + + // if the number overflows int64_t, first parse it as double iff we see a + // decimal point later. Otherwise, will treat it as overflow + if (val < 0 && val > std::numeric_limits::min()) { + return parseDouble(in, static_cast(val), precision, sign); + } + + ch = in.peek(); + } + + if (sign < 0) { + val = -val; + } + + int size = 0; + if (val >= std::numeric_limits::min() && + val <= std::numeric_limits::max()) { + size = writer_.writeInt8((int8_t)val); + } else if (val >= std::numeric_limits::min() && + val <= std::numeric_limits::max()) { + size = writer_.writeInt16((int16_t)val); + } else if (val >= std::numeric_limits::min() && + val <= std::numeric_limits::max()) { + size = writer_.writeInt32((int32_t)val); + } else { // val <= INT64_MAX + size = writer_.writeInt64(val); + } + + if (size == 0) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + + return true; + } + + // parse IEEE745 double precision: + // Significand precision length - 15 + // Maximum exponent value - 308 + // + // "If a decimal string with at most 15 significant digits is converted to + // IEEE 754 double precision representation and then converted back to a + // string with the same number of significant digits, then the final string + // should match the original" + bool parseDouble(std::istream& in, double val, int precision, int sign) { + int integ = precision; + int frac = 0; + bool is_frac = false; + + char ch = in.peek(); + if (ch == '.') { + is_frac = true; + in.ignore(); + ch = in.peek(); + } + + int exp = 0; + while (in.good() && !strchr(kJsonDelim, ch)) { + if (ch >= '0' && ch <= '9') { + if (precision < 15) { + val = val * 10 + (ch - '0'); + if (is_frac) { + ++frac; + } else { + ++integ; + } + ++precision; + } else if (!is_frac) { + ++exp; + } + } else if (ch == 'e' || ch == 'E') { + in.ignore(); + int exp2; + if (!parseExponent(in, exp2)) { + return false; + } + + exp += exp2; + // check if exponent overflows + if (exp > 308 || exp < -308) { + err_ = FbsonErrType::E_EXPONENT_OVERFLOW; + return false; + } + + is_frac = true; + break; + } + + in.ignore(); + ch = in.peek(); + } + + if (!is_frac) { + err_ = FbsonErrType::E_DECIMAL_OVERFLOW; + return false; + } + + val *= std::pow(10, exp - frac); + if (std::isnan(val) || std::isinf(val)) { + err_ = FbsonErrType::E_DOUBLE_OVERFLOW; + return false; + } + + if (sign < 0) { + val = -val; + } + + if (writer_.writeDouble(val) == 0) { + err_ = FbsonErrType::E_OUTPUT_FAIL; + return false; + } + + return true; + } + + // parse the exponent part of a double number + bool parseExponent(std::istream& in, int& exp) { + bool neg = false; + + char ch = in.peek(); + if (ch == '+') { + in.ignore(); + ch = in.peek(); + } else if (ch == '-') { + neg = true; + in.ignore(); + ch = in.peek(); + } + + exp = 0; + while (in.good() && !strchr(kJsonDelim, ch)) { + if (ch >= '0' && ch <= '9') { + exp = exp * 10 + (ch - '0'); + } else { + err_ = FbsonErrType::E_INVALID_EXPONENT; + return false; + } + + if (exp > 308) { + err_ = FbsonErrType::E_EXPONENT_OVERFLOW; + return false; + } + + in.ignore(); + ch = in.peek(); + } + + if (neg) { + exp = -exp; + } + + return true; + } + + void trim(std::istream& in) { + while (in.good() && strchr(kWhiteSpace, in.peek())) { + in.ignore(); + } + } + + private: + FbsonWriterT writer_; + FbsonErrType err_; +}; + +typedef FbsonJsonParserT FbsonJsonParser; + +} // namespace fbson + +#endif // FBSON_FBSONPARSER_H diff --git a/external/rocksdb/third-party/fbson/FbsonStream.h b/external/rocksdb/third-party/fbson/FbsonStream.h new file mode 100755 index 0000000000..5f70221db5 --- /dev/null +++ b/external/rocksdb/third-party/fbson/FbsonStream.h @@ -0,0 +1,187 @@ +/* + * Copyright (c) 2011-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + * + */ + +/* + * This header file defines FbsonInBuffer and FbsonOutStream classes. + * + * ** Input Buffer ** + * FbsonInBuffer is a customer input buffer to wrap raw character buffer. Its + * object instances are used to create std::istream objects interally. + * + * ** Output Stream ** + * FbsonOutStream is a custom output stream classes, to contain the FBSON + * serialized binary. The class is conveniently used to specialize templates of + * FbsonParser and FbsonWriter. + * + * @author Tian Xia + */ + +#ifndef FBSON_FBSONSTREAM_H +#define FBSON_FBSONSTREAM_H + +#ifndef __STDC_FORMAT_MACROS +#define __STDC_FORMAT_MACROS +#endif + +#if defined OS_WIN && !defined snprintf +#define snprintf _snprintf +#endif + +#include +#include + +namespace fbson { + +// lengths includes sign +#define MAX_INT_DIGITS 11 +#define MAX_INT64_DIGITS 20 +#define MAX_DOUBLE_DIGITS 23 // 1(sign)+16(significant)+1(decimal)+5(exponent) + +/* + * FBSON's implementation of input buffer + */ +class FbsonInBuffer : public std::streambuf { + public: + FbsonInBuffer(const char* str, uint32_t len) { + // this is read buffer and the str will not be changed + // so we use const_cast (ugly!) to remove constness + char* pch(const_cast(str)); + setg(pch, pch, pch + len); + } +}; + +/* + * FBSON's implementation of output stream. + * + * This is a wrapper of a char buffer. By default, the buffer capacity is 1024 + * bytes. We will double the buffer if realloc is needed for writes. + */ +class FbsonOutStream : public std::ostream { + public: + explicit FbsonOutStream(uint32_t capacity = 1024) + : std::ostream(nullptr), + head_(nullptr), + size_(0), + capacity_(capacity), + alloc_(true) { + if (capacity_ == 0) { + capacity_ = 1024; + } + + head_ = (char*)malloc(capacity_); + } + + FbsonOutStream(char* buffer, uint32_t capacity) + : std::ostream(nullptr), + head_(buffer), + size_(0), + capacity_(capacity), + alloc_(false) { + assert(buffer && capacity_ > 0); + } + + ~FbsonOutStream() { + if (alloc_) { + free(head_); + } + } + + void put(char c) { write(&c, 1); } + + void write(const char* c_str) { write(c_str, (uint32_t)strlen(c_str)); } + + void write(const char* bytes, uint32_t len) { + if (len == 0) + return; + + if (size_ + len > capacity_) { + realloc(len); + } + + memcpy(head_ + size_, bytes, len); + size_ += len; + } + + // write the integer to string + void write(int i) { + // snprintf automatically adds a NULL, so we need one more char + if (size_ + MAX_INT_DIGITS + 1 > capacity_) { + realloc(MAX_INT_DIGITS + 1); + } + + int len = snprintf(head_ + size_, MAX_INT_DIGITS + 1, "%d", i); + assert(len > 0); + size_ += len; + } + + // write the 64bit integer to string + void write(int64_t l) { + // snprintf automatically adds a NULL, so we need one more char + if (size_ + MAX_INT64_DIGITS + 1 > capacity_) { + realloc(MAX_INT64_DIGITS + 1); + } + + int len = snprintf(head_ + size_, MAX_INT64_DIGITS + 1, "%" PRIi64, l); + assert(len > 0); + size_ += len; + } + + // write the double to string + void write(double d) { + // snprintf automatically adds a NULL, so we need one more char + if (size_ + MAX_DOUBLE_DIGITS + 1 > capacity_) { + realloc(MAX_DOUBLE_DIGITS + 1); + } + + int len = snprintf(head_ + size_, MAX_DOUBLE_DIGITS + 1, "%.15g", d); + assert(len > 0); + size_ += len; + } + + pos_type tellp() const { return size_; } + + void seekp(pos_type pos) { size_ = (uint32_t)pos; } + + const char* getBuffer() const { return head_; } + + pos_type getSize() const { return tellp(); } + + private: + void realloc(uint32_t len) { + assert(capacity_ > 0); + + capacity_ *= 2; + while (capacity_ < size_ + len) { + capacity_ *= 2; + } + + if (alloc_) { + char* new_buf = (char*)::realloc(head_, capacity_); + assert(new_buf); + head_ = new_buf; + } else { + char* new_buf = (char*)::malloc(capacity_); + assert(new_buf); + memcpy(new_buf, head_, size_); + head_ = new_buf; + alloc_ = true; + } + } + + private: + char* head_; + uint32_t size_; + uint32_t capacity_; + bool alloc_; +}; + +} // namespace fbson + +#endif // FBSON_FBSONSTREAM_H diff --git a/external/rocksdb/third-party/fbson/FbsonUtil.h b/external/rocksdb/third-party/fbson/FbsonUtil.h new file mode 100755 index 0000000000..2c41547699 --- /dev/null +++ b/external/rocksdb/third-party/fbson/FbsonUtil.h @@ -0,0 +1,168 @@ +/* + * Copyright (c) 2011-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + * + */ + +/* + * This header file defines miscellaneous utility classes. + * + * @author Tian Xia + */ + +#ifndef FBSON_FBSONUTIL_H +#define FBSON_FBSONUTIL_H + +#include +#include "FbsonDocument.h" + +namespace fbson { + +#define OUT_BUF_SIZE 1024 + +/* + * FbsonToJson converts an FbsonValue object to a JSON string. + */ +class FbsonToJson { + public: + FbsonToJson() : os_(buffer_, OUT_BUF_SIZE) {} + + // get json string + const char* json(const FbsonValue* pval) { + os_.clear(); + os_.seekp(0); + + if (pval) { + intern_json(pval); + } + + os_.put(0); + return os_.getBuffer(); + } + + private: + // recursively convert FbsonValue + void intern_json(const FbsonValue* val) { + switch (val->type()) { + case FbsonType::T_Null: { + os_.write("null", 4); + break; + } + case FbsonType::T_True: { + os_.write("true", 4); + break; + } + case FbsonType::T_False: { + os_.write("false", 5); + break; + } + case FbsonType::T_Int8: { + os_.write(((Int8Val*)val)->val()); + break; + } + case FbsonType::T_Int16: { + os_.write(((Int16Val*)val)->val()); + break; + } + case FbsonType::T_Int32: { + os_.write(((Int32Val*)val)->val()); + break; + } + case FbsonType::T_Int64: { + os_.write(((Int64Val*)val)->val()); + break; + } + case FbsonType::T_Double: { + os_.write(((DoubleVal*)val)->val()); + break; + } + case FbsonType::T_String: { + os_.put('"'); + os_.write(((StringVal*)val)->getBlob(), ((StringVal*)val)->getBlobLen()); + os_.put('"'); + break; + } + case FbsonType::T_Binary: { + os_.write("\"", 9); + os_.write(((BinaryVal*)val)->getBlob(), ((BinaryVal*)val)->getBlobLen()); + os_.write("\"", 9); + break; + } + case FbsonType::T_Object: { + object_to_json((ObjectVal*)val); + break; + } + case FbsonType::T_Array: { + array_to_json((ArrayVal*)val); + break; + } + default: + break; + } + } + + // convert object + void object_to_json(const ObjectVal* val) { + os_.put('{'); + + auto iter = val->begin(); + auto iter_fence = val->end(); + + while (iter < iter_fence) { + // write key + if (iter->klen()) { + os_.put('"'); + os_.write(iter->getKeyStr(), iter->klen()); + os_.put('"'); + } else { + os_.write(iter->getKeyId()); + } + os_.put(':'); + + // convert value + intern_json(iter->value()); + + ++iter; + if (iter != iter_fence) { + os_.put(','); + } + } + + assert(iter == iter_fence); + + os_.put('}'); + } + + // convert array to json + void array_to_json(const ArrayVal* val) { + os_.put('['); + + auto iter = val->begin(); + auto iter_fence = val->end(); + + while (iter != iter_fence) { + // convert value + intern_json((const FbsonValue*)iter); + ++iter; + if (iter != iter_fence) { + os_.put(','); + } + } + + assert(iter == iter_fence); + + os_.put(']'); + } + + private: + FbsonOutStream os_; + char buffer_[OUT_BUF_SIZE]; +}; + +} // namespace fbson + +#endif // FBSON_FBSONUTIL_H diff --git a/external/rocksdb/third-party/fbson/FbsonWriter.h b/external/rocksdb/third-party/fbson/FbsonWriter.h new file mode 100755 index 0000000000..4efaf817c2 --- /dev/null +++ b/external/rocksdb/third-party/fbson/FbsonWriter.h @@ -0,0 +1,435 @@ +/* + * Copyright (c) 2011-present, Facebook, Inc. + * All rights reserved. + * + * This source code is licensed under the BSD-style license found in the + * LICENSE file in the root directory of this source tree. An additional grant + * of patent rights can be found in the PATENTS file in the same directory. + * + */ + +/* + * This file defines FbsonWriterT (template) and FbsonWriter. + * + * FbsonWriterT is a template class which implements an FBSON serializer. + * Users call various write functions of FbsonWriterT object to write values + * directly to FBSON packed bytes. All write functions of value or key return + * the number of bytes written to FBSON, or 0 if there is an error. To write an + * object, an array, or a string, you must call writeStart[..] before writing + * values or key, and call writeEnd[..] after finishing at the end. + * + * By default, an FbsonWriterT object creates an output stream buffer. + * Alternatively, you can also pass any output stream object to a writer, as + * long as the stream object implements some basic functions of std::ostream + * (such as FbsonOutStream, see FbsonStream.h). + * + * FbsonWriter specializes FbsonWriterT with FbsonOutStream type (see + * FbsonStream.h). So unless you want to provide own a different output stream + * type, use FbsonParser object. + * + * @author Tian Xia + */ + +#ifndef FBSON_FBSONWRITER_H +#define FBSON_FBSONWRITER_H + +#include +#include "FbsonDocument.h" +#include "FbsonStream.h" + +namespace fbson { + +template +class FbsonWriterT { + public: + FbsonWriterT() + : alloc_(true), hasHdr_(false), kvState_(WS_Value), str_pos_(0) { + os_ = new OS_TYPE(); + } + + explicit FbsonWriterT(OS_TYPE& os) + : os_(&os), + alloc_(false), + hasHdr_(false), + kvState_(WS_Value), + str_pos_(0) {} + + ~FbsonWriterT() { + if (alloc_) { + delete os_; + } + } + + void reset() { + os_->clear(); + os_->seekp(0); + hasHdr_ = false; + kvState_ = WS_Value; + for (; !stack_.empty(); stack_.pop()) + ; + } + + // write a key string (or key id if an external dict is provided) + uint32_t writeKey(const char* key, + uint8_t len, + hDictInsert handler = nullptr) { + if (len && !stack_.empty() && verifyKeyState()) { + int key_id = -1; + if (handler) { + key_id = handler(key, len); + } + + uint32_t size = sizeof(uint8_t); + if (key_id < 0) { + os_->put(len); + os_->write(key, len); + size += len; + } else if (key_id <= FbsonKeyValue::sMaxKeyId) { + FbsonKeyValue::keyid_type idx = key_id; + os_->put(0); + os_->write((char*)&idx, sizeof(FbsonKeyValue::keyid_type)); + size += sizeof(FbsonKeyValue::keyid_type); + } else { // key id overflow + assert(0); + return 0; + } + + kvState_ = WS_Key; + return size; + } + + return 0; + } + + // write a key id + uint32_t writeKey(FbsonKeyValue::keyid_type idx) { + if (!stack_.empty() && verifyKeyState()) { + os_->put(0); + os_->write((char*)&idx, sizeof(FbsonKeyValue::keyid_type)); + kvState_ = WS_Key; + return sizeof(uint8_t) + sizeof(FbsonKeyValue::keyid_type); + } + + return 0; + } + + uint32_t writeNull() { + if (!stack_.empty() && verifyValueState()) { + os_->put((FbsonTypeUnder)FbsonType::T_Null); + kvState_ = WS_Value; + return sizeof(FbsonValue); + } + + return 0; + } + + uint32_t writeBool(bool b) { + if (!stack_.empty() && verifyValueState()) { + if (b) { + os_->put((FbsonTypeUnder)FbsonType::T_True); + } else { + os_->put((FbsonTypeUnder)FbsonType::T_False); + } + + kvState_ = WS_Value; + return sizeof(FbsonValue); + } + + return 0; + } + + uint32_t writeInt8(int8_t v) { + if (!stack_.empty() && verifyValueState()) { + os_->put((FbsonTypeUnder)FbsonType::T_Int8); + os_->put(v); + kvState_ = WS_Value; + return sizeof(Int8Val); + } + + return 0; + } + + uint32_t writeInt16(int16_t v) { + if (!stack_.empty() && verifyValueState()) { + os_->put((FbsonTypeUnder)FbsonType::T_Int16); + os_->write((char*)&v, sizeof(int16_t)); + kvState_ = WS_Value; + return sizeof(Int16Val); + } + + return 0; + } + + uint32_t writeInt32(int32_t v) { + if (!stack_.empty() && verifyValueState()) { + os_->put((FbsonTypeUnder)FbsonType::T_Int32); + os_->write((char*)&v, sizeof(int32_t)); + kvState_ = WS_Value; + return sizeof(Int32Val); + } + + return 0; + } + + uint32_t writeInt64(int64_t v) { + if (!stack_.empty() && verifyValueState()) { + os_->put((FbsonTypeUnder)FbsonType::T_Int64); + os_->write((char*)&v, sizeof(int64_t)); + kvState_ = WS_Value; + return sizeof(Int64Val); + } + + return 0; + } + + uint32_t writeDouble(double v) { + if (!stack_.empty() && verifyValueState()) { + os_->put((FbsonTypeUnder)FbsonType::T_Double); + os_->write((char*)&v, sizeof(double)); + kvState_ = WS_Value; + return sizeof(DoubleVal); + } + + return 0; + } + + // must call writeStartString before writing a string val + bool writeStartString() { + if (!stack_.empty() && verifyValueState()) { + os_->put((FbsonTypeUnder)FbsonType::T_String); + str_pos_ = os_->tellp(); + + // fill the size bytes with 0 for now + uint32_t size = 0; + os_->write((char*)&size, sizeof(uint32_t)); + + kvState_ = WS_String; + return true; + } + + return false; + } + + // finish writing a string val + bool writeEndString() { + if (kvState_ == WS_String) { + std::streampos cur_pos = os_->tellp(); + int32_t size = (int32_t)(cur_pos - str_pos_ - sizeof(uint32_t)); + assert(size >= 0); + + os_->seekp(str_pos_); + os_->write((char*)&size, sizeof(uint32_t)); + os_->seekp(cur_pos); + + kvState_ = WS_Value; + return true; + } + + return false; + } + + uint32_t writeString(const char* str, uint32_t len) { + if (kvState_ == WS_String) { + os_->write(str, len); + return len; + } + + return 0; + } + + uint32_t writeString(char ch) { + if (kvState_ == WS_String) { + os_->put(ch); + return 1; + } + + return 0; + } + + // must call writeStartBinary before writing a binary val + bool writeStartBinary() { + if (!stack_.empty() && verifyValueState()) { + os_->put((FbsonTypeUnder)FbsonType::T_Binary); + str_pos_ = os_->tellp(); + + // fill the size bytes with 0 for now + uint32_t size = 0; + os_->write((char*)&size, sizeof(uint32_t)); + + kvState_ = WS_Binary; + return true; + } + + return false; + } + + // finish writing a binary val + bool writeEndBinary() { + if (kvState_ == WS_Binary) { + std::streampos cur_pos = os_->tellp(); + int32_t size = (int32_t)(cur_pos - str_pos_ - sizeof(uint32_t)); + assert(size >= 0); + + os_->seekp(str_pos_); + os_->write((char*)&size, sizeof(uint32_t)); + os_->seekp(cur_pos); + + kvState_ = WS_Value; + return true; + } + + return false; + } + + uint32_t writeBinary(const char* bin, uint32_t len) { + if (kvState_ == WS_Binary) { + os_->write(bin, len); + return len; + } + + return 0; + } + + // must call writeStartObject before writing an object val + bool writeStartObject() { + if (stack_.empty() || verifyValueState()) { + if (stack_.empty()) { + // if this is a new FBSON, write the header + if (!hasHdr_) { + writeHeader(); + } else + return false; + } + + os_->put((FbsonTypeUnder)FbsonType::T_Object); + // save the size position + stack_.push(WriteInfo({WS_Object, os_->tellp()})); + + // fill the size bytes with 0 for now + uint32_t size = 0; + os_->write((char*)&size, sizeof(uint32_t)); + + kvState_ = WS_Value; + return true; + } + + return false; + } + + // finish writing an object val + bool writeEndObject() { + if (!stack_.empty() && stack_.top().state == WS_Object && + kvState_ == WS_Value) { + WriteInfo& ci = stack_.top(); + std::streampos cur_pos = os_->tellp(); + int32_t size = (int32_t)(cur_pos - ci.sz_pos - sizeof(uint32_t)); + assert(size >= 0); + + os_->seekp(ci.sz_pos); + os_->write((char*)&size, sizeof(uint32_t)); + os_->seekp(cur_pos); + stack_.pop(); + + return true; + } + + return false; + } + + // must call writeStartArray before writing an array val + bool writeStartArray() { + if (stack_.empty() || verifyValueState()) { + if (stack_.empty()) { + // if this is a new FBSON, write the header + if (!hasHdr_) { + writeHeader(); + } else + return false; + } + + os_->put((FbsonTypeUnder)FbsonType::T_Array); + // save the size position + stack_.push(WriteInfo({WS_Array, os_->tellp()})); + + // fill the size bytes with 0 for now + uint32_t size = 0; + os_->write((char*)&size, sizeof(uint32_t)); + + kvState_ = WS_Value; + return true; + } + + return false; + } + + // finish writing an array val + bool writeEndArray() { + if (!stack_.empty() && stack_.top().state == WS_Array && + kvState_ == WS_Value) { + WriteInfo& ci = stack_.top(); + std::streampos cur_pos = os_->tellp(); + int32_t size = (int32_t)(cur_pos - ci.sz_pos - sizeof(uint32_t)); + assert(size >= 0); + + os_->seekp(ci.sz_pos); + os_->write((char*)&size, sizeof(uint32_t)); + os_->seekp(cur_pos); + stack_.pop(); + + return true; + } + + return false; + } + + OS_TYPE* getOutput() { return os_; } + + private: + // verify we are in the right state before writing a value + bool verifyValueState() { + assert(!stack_.empty()); + return (stack_.top().state == WS_Object && kvState_ == WS_Key) || + (stack_.top().state == WS_Array && kvState_ == WS_Value); + } + + // verify we are in the right state before writing a key + bool verifyKeyState() { + assert(!stack_.empty()); + return stack_.top().state == WS_Object && kvState_ == WS_Value; + } + + void writeHeader() { + os_->put(FBSON_VER); + hasHdr_ = true; + } + + private: + enum WriteState { + WS_NONE, + WS_Array, + WS_Object, + WS_Key, + WS_Value, + WS_String, + WS_Binary, + }; + + struct WriteInfo { + WriteState state; + std::streampos sz_pos; + }; + + private: + OS_TYPE* os_; + bool alloc_; + bool hasHdr_; + WriteState kvState_; // key or value state + std::streampos str_pos_; + std::stack stack_; +}; + +typedef FbsonWriterT FbsonWriter; + +} // namespace fbson + +#endif // FBSON_FBSONWRITER_H diff --git a/external/rocksdb/third-party/flashcache/flashcache_ioctl.h b/external/rocksdb/third-party/flashcache/flashcache_ioctl.h new file mode 100755 index 0000000000..af111ab4d4 --- /dev/null +++ b/external/rocksdb/third-party/flashcache/flashcache_ioctl.h @@ -0,0 +1,55 @@ +/**************************************************************************** + * flashcache_ioctl.h + * FlashCache: Device mapper target for block-level disk caching + * + * Copyright 2010 Facebook, Inc. + * Author: Mohan Srinivasan (mohan@facebook.com) + * + * Based on DM-Cache: + * Copyright (C) International Business Machines Corp., 2006 + * Author: Ming Zhao (mingzhao@ufl.edu) + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; under version 2 of the License. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see . + ****************************************************************************/ + +#ifdef OS_LINUX +#ifndef FLASHCACHE_IOCTL_H +#define FLASHCACHE_IOCTL_H + +#include + +#define FLASHCACHE_IOCTL 0xfe + +enum { + FLASHCACHEADDNCPID_CMD=200, + FLASHCACHEDELNCPID_CMD, + FLASHCACHEDELNCALL_CMD, + FLASHCACHEADDWHITELIST_CMD, + FLASHCACHEDELWHITELIST_CMD, + FLASHCACHEDELWHITELISTALL_CMD, +}; + +#define FLASHCACHEADDNCPID _IOW(FLASHCACHE_IOCTL, FLASHCACHEADDNCPID_CMD, pid_t) +#define FLASHCACHEDELNCPID _IOW(FLASHCACHE_IOCTL, FLASHCACHEDELNCPID_CMD, pid_t) +#define FLASHCACHEDELNCALL _IOW(FLASHCACHE_IOCTL, FLASHCACHEDELNCALL_CMD, pid_t) + +#define FLASHCACHEADDBLACKLIST FLASHCACHEADDNCPID +#define FLASHCACHEDELBLACKLIST FLASHCACHEDELNCPID +#define FLASHCACHEDELALLBLACKLIST FLASHCACHEDELNCALL + +#define FLASHCACHEADDWHITELIST _IOW(FLASHCACHE_IOCTL, FLASHCACHEADDWHITELIST_CMD, pid_t) +#define FLASHCACHEDELWHITELIST _IOW(FLASHCACHE_IOCTL, FLASHCACHEDELWHITELIST_CMD, pid_t) +#define FLASHCACHEDELALLWHITELIST _IOW(FLASHCACHE_IOCTL, FLASHCACHEDELWHITELISTALL_CMD, pid_t) + +#endif /* FLASHCACHE_IOCTL_H */ +#endif /* OS_LINUX */ diff --git a/external/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/CMakeLists.txt b/external/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/CMakeLists.txt new file mode 100755 index 0000000000..90cff08807 --- /dev/null +++ b/external/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/CMakeLists.txt @@ -0,0 +1 @@ +add_library(gtest gtest-all.cc) diff --git a/external/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest-all.cc b/external/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest-all.cc new file mode 100755 index 0000000000..154ec6f26b --- /dev/null +++ b/external/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest-all.cc @@ -0,0 +1,10261 @@ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// Google C++ Testing Framework (Google Test) +// +// Sometimes it's desirable to build Google Test by compiling a single file. +// This file serves this purpose. + +// Suppress clang analyzer warnings. +#ifndef __clang_analyzer__ + +// This line ensures that gtest.h can be compiled on its own, even +// when it's fused. +#include "gtest/gtest.h" + +// The following lines pull in the real gtest *.cc files. +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) + +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Utilities for testing Google Test itself and code that uses Google Test +// (e.g. frameworks built on top of Google Test). + +#ifndef GTEST_INCLUDE_GTEST_GTEST_SPI_H_ +#define GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + +namespace testing { + +// This helper class can be used to mock out Google Test failure reporting +// so that we can test Google Test or code that builds on Google Test. +// +// An object of this class appends a TestPartResult object to the +// TestPartResultArray object given in the constructor whenever a Google Test +// failure is reported. It can either intercept only failures that are +// generated in the same thread that created this object or it can intercept +// all generated failures. The scope of this mock object can be controlled with +// the second argument to the two arguments constructor. +class GTEST_API_ ScopedFakeTestPartResultReporter + : public TestPartResultReporterInterface { + public: + // The two possible mocking modes of this object. + enum InterceptMode { + INTERCEPT_ONLY_CURRENT_THREAD, // Intercepts only thread local failures. + INTERCEPT_ALL_THREADS // Intercepts all failures. + }; + + // The c'tor sets this object as the test part result reporter used + // by Google Test. The 'result' parameter specifies where to report the + // results. This reporter will only catch failures generated in the current + // thread. DEPRECATED + explicit ScopedFakeTestPartResultReporter(TestPartResultArray* result); + + // Same as above, but you can choose the interception scope of this object. + ScopedFakeTestPartResultReporter(InterceptMode intercept_mode, + TestPartResultArray* result); + + // The d'tor restores the previous test part result reporter. + virtual ~ScopedFakeTestPartResultReporter(); + + // Appends the TestPartResult object to the TestPartResultArray + // received in the constructor. + // + // This method is from the TestPartResultReporterInterface + // interface. + virtual void ReportTestPartResult(const TestPartResult& result); + private: + void Init(); + + const InterceptMode intercept_mode_; + TestPartResultReporterInterface* old_reporter_; + TestPartResultArray* const result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedFakeTestPartResultReporter); +}; + +namespace internal { + +// A helper class for implementing EXPECT_FATAL_FAILURE() and +// EXPECT_NONFATAL_FAILURE(). Its destructor verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +class GTEST_API_ SingleFailureChecker { + public: + // The constructor remembers the arguments. + SingleFailureChecker(const TestPartResultArray* results, + TestPartResult::Type type, + const string& substr); + ~SingleFailureChecker(); + private: + const TestPartResultArray* const results_; + const TestPartResult::Type type_; + const string substr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SingleFailureChecker); +}; + +} // namespace internal + +} // namespace testing + +// A set of macros for testing Google Test assertions or code that's expected +// to generate Google Test fatal failures. It verifies that the given +// statement will cause exactly one fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_FATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_FATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - 'statement' cannot reference local non-static variables or +// non-static members of the current object. +// - 'statement' cannot return a value. +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. The AcceptsMacroThatExpandsToUnprotectedComma test in +// gtest_unittest.cc will fail to compile if we do that. +#define EXPECT_FATAL_FAILURE(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_FATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do { \ + class GTestExpectFatalFailureHelper {\ + public:\ + static void Execute() { statement; }\ + };\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kFatalFailure, (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ALL_THREADS, >est_failures);\ + GTestExpectFatalFailureHelper::Execute();\ + }\ + } while (::testing::internal::AlwaysFalse()) + +// A macro for testing Google Test assertions or code that's expected to +// generate Google Test non-fatal failures. It asserts that the given +// statement will cause exactly one non-fatal Google Test failure with 'substr' +// being part of the failure message. +// +// There are two different versions of this macro. EXPECT_NONFATAL_FAILURE only +// affects and considers failures generated in the current thread and +// EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS does the same but for all threads. +// +// 'statement' is allowed to reference local variables and members of +// the current object. +// +// The verification of the assertion is done correctly even when the statement +// throws an exception or aborts the current function. +// +// Known restrictions: +// - You cannot stream a failure message to this macro. +// +// Note that even though the implementations of the following two +// macros are much alike, we cannot refactor them to use a common +// helper macro, due to some peculiarity in how the preprocessor +// works. If we do that, the code won't compile when the user gives +// EXPECT_NONFATAL_FAILURE() a statement that contains a macro that +// expands to code containing an unprotected comma. The +// AcceptsMacroThatExpandsToUnprotectedComma test in gtest_unittest.cc +// catches that. +// +// For the same reason, we have to write +// if (::testing::internal::AlwaysTrue()) { statement; } +// instead of +// GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) +// to avoid an MSVC warning on unreachable code. +#define EXPECT_NONFATAL_FAILURE(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter:: \ + INTERCEPT_ONLY_CURRENT_THREAD, >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#define EXPECT_NONFATAL_FAILURE_ON_ALL_THREADS(statement, substr) \ + do {\ + ::testing::TestPartResultArray gtest_failures;\ + ::testing::internal::SingleFailureChecker gtest_checker(\ + >est_failures, ::testing::TestPartResult::kNonFatalFailure, \ + (substr));\ + {\ + ::testing::ScopedFakeTestPartResultReporter gtest_reporter(\ + ::testing::ScopedFakeTestPartResultReporter::INTERCEPT_ALL_THREADS, \ + >est_failures);\ + if (::testing::internal::AlwaysTrue()) { statement; }\ + }\ + } while (::testing::internal::AlwaysFalse()) + +#endif // GTEST_INCLUDE_GTEST_GTEST_SPI_H_ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include // NOLINT +#include +#include + +#if GTEST_OS_LINUX + +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +# include // NOLINT +# include // NOLINT +# include // NOLINT +// Declares vsnprintf(). This header is not available on Windows. +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include + +#elif GTEST_OS_SYMBIAN +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +#elif GTEST_OS_ZOS +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT + +// On z/OS we additionally need strings.h for strcasecmp. +# include // NOLINT + +#elif GTEST_OS_WINDOWS_MOBILE // We are on Windows CE. + +# include // NOLINT +# undef min + +#elif GTEST_OS_WINDOWS // We are on Windows proper. + +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT + +# if GTEST_OS_WINDOWS_MINGW +// MinGW has gettimeofday() but not _ftime64(). +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +// TODO(kenton@google.com): There are other ways to get the time on +// Windows, like GetTickCount() or GetSystemTimeAsFileTime(). MinGW +// supports these. consider using them instead. +# define GTEST_HAS_GETTIMEOFDAY_ 1 +# include // NOLINT +# endif // GTEST_OS_WINDOWS_MINGW + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT +# undef min + +#else + +// Assume other platforms have gettimeofday(). +// TODO(kenton@google.com): Use autoconf to detect availability of +// gettimeofday(). +# define GTEST_HAS_GETTIMEOFDAY_ 1 + +// cpplint thinks that the header is already included, so we want to +// silence it. +# include // NOLINT +# include // NOLINT + +#endif // GTEST_OS_LINUX + +#if GTEST_HAS_EXCEPTIONS +# include +#endif + +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +# include // NOLINT +# include // NOLINT +#endif + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick is to +// prevent a user from accidentally including gtest-internal-inl.h in +// his code. +#define GTEST_IMPLEMENTATION_ 1 +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Utility functions and classes used by the Google C++ testing framework. +// +// Author: wan@google.com (Zhanyong Wan) +// +// This file contains purely Google Test's internal implementation. Please +// DO NOT #INCLUDE IT IN A USER PROGRAM. + +#ifndef GTEST_SRC_GTEST_INTERNAL_INL_H_ +#define GTEST_SRC_GTEST_INTERNAL_INL_H_ + +// GTEST_IMPLEMENTATION_ is defined to 1 iff the current translation unit is +// part of Google Test's implementation; otherwise it's undefined. +#if !GTEST_IMPLEMENTATION_ +// If this file is included from the user's code, just say no. +# error "gtest-internal-inl.h is part of Google Test's internal implementation." +# error "It must not be included except by Google Test itself." +#endif // GTEST_IMPLEMENTATION_ + +#ifndef _WIN32_WCE +# include +#endif // !_WIN32_WCE +#include +#include // For strtoll/_strtoul64/malloc/free. +#include // For memmove. + +#include +#include +#include + + +#if GTEST_CAN_STREAM_RESULTS_ +# include // NOLINT +# include // NOLINT +#endif + +#if GTEST_OS_WINDOWS +# include // NOLINT +#endif // GTEST_OS_WINDOWS + + +namespace testing { + +// Declares the flags. +// +// We don't want the users to modify this flag in the code, but want +// Google Test's own unit tests to be able to access it. Therefore we +// declare it here as opposed to in gtest.h. +GTEST_DECLARE_bool_(death_test_use_fork); + +namespace internal { + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +GTEST_API_ extern const TypeId kTestTypeIdInGoogleTest; + +// Names of the flags (needed for parsing Google Test flags). +const char kAlsoRunDisabledTestsFlag[] = "also_run_disabled_tests"; +const char kBreakOnFailureFlag[] = "break_on_failure"; +const char kCatchExceptionsFlag[] = "catch_exceptions"; +const char kColorFlag[] = "color"; +const char kFilterFlag[] = "filter"; +const char kListTestsFlag[] = "list_tests"; +const char kOutputFlag[] = "output"; +const char kPrintTimeFlag[] = "print_time"; +const char kRandomSeedFlag[] = "random_seed"; +const char kRepeatFlag[] = "repeat"; +const char kShuffleFlag[] = "shuffle"; +const char kStackTraceDepthFlag[] = "stack_trace_depth"; +const char kStreamResultToFlag[] = "stream_result_to"; +const char kThrowOnFailureFlag[] = "throw_on_failure"; + +// A valid random seed must be in [1, kMaxRandomSeed]. +const int kMaxRandomSeed = 99999; + +// g_help_flag is true iff the --help flag or an equivalent form is +// specified on the command line. +GTEST_API_ extern bool g_help_flag; + +// Returns the current time in milliseconds. +GTEST_API_ TimeInMillis GetTimeInMillis(); + +// Returns true iff Google Test should use colors in the output. +GTEST_API_ bool ShouldUseColor(bool stdout_is_tty); + +// Formats the given time in milliseconds as seconds. +GTEST_API_ std::string FormatTimeInMillisAsSeconds(TimeInMillis ms); + +// Converts the given time in milliseconds to a date string in the ISO 8601 +// format, without the timezone information. N.B.: due to the use the +// non-reentrant localtime() function, this function is not thread safe. Do +// not use it in any code that can be called from multiple threads. +GTEST_API_ std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms); + +// Parses a string for an Int32 flag, in the form of "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +GTEST_API_ bool ParseInt32Flag( + const char* str, const char* flag, Int32* value); + +// Returns a random seed in range [1, kMaxRandomSeed] based on the +// given --gtest_random_seed flag value. +inline int GetRandomSeedFromFlag(Int32 random_seed_flag) { + const unsigned int raw_seed = (random_seed_flag == 0) ? + static_cast(GetTimeInMillis()) : + static_cast(random_seed_flag); + + // Normalizes the actual seed to range [1, kMaxRandomSeed] such that + // it's easy to type. + const int normalized_seed = + static_cast((raw_seed - 1U) % + static_cast(kMaxRandomSeed)) + 1; + return normalized_seed; +} + +// Returns the first valid random seed after 'seed'. The behavior is +// undefined if 'seed' is invalid. The seed after kMaxRandomSeed is +// considered to be 1. +inline int GetNextRandomSeed(int seed) { + GTEST_CHECK_(1 <= seed && seed <= kMaxRandomSeed) + << "Invalid random seed " << seed << " - must be in [1, " + << kMaxRandomSeed << "]."; + const int next_seed = seed + 1; + return (next_seed > kMaxRandomSeed) ? 1 : next_seed; +} + +// This class saves the values of all Google Test flags in its c'tor, and +// restores them in its d'tor. +class GTestFlagSaver { + public: + // The c'tor. + GTestFlagSaver() { + also_run_disabled_tests_ = GTEST_FLAG(also_run_disabled_tests); + break_on_failure_ = GTEST_FLAG(break_on_failure); + catch_exceptions_ = GTEST_FLAG(catch_exceptions); + color_ = GTEST_FLAG(color); + death_test_style_ = GTEST_FLAG(death_test_style); + death_test_use_fork_ = GTEST_FLAG(death_test_use_fork); + filter_ = GTEST_FLAG(filter); + internal_run_death_test_ = GTEST_FLAG(internal_run_death_test); + list_tests_ = GTEST_FLAG(list_tests); + output_ = GTEST_FLAG(output); + print_time_ = GTEST_FLAG(print_time); + random_seed_ = GTEST_FLAG(random_seed); + repeat_ = GTEST_FLAG(repeat); + shuffle_ = GTEST_FLAG(shuffle); + stack_trace_depth_ = GTEST_FLAG(stack_trace_depth); + stream_result_to_ = GTEST_FLAG(stream_result_to); + throw_on_failure_ = GTEST_FLAG(throw_on_failure); + } + + // The d'tor is not virtual. DO NOT INHERIT FROM THIS CLASS. + ~GTestFlagSaver() { + GTEST_FLAG(also_run_disabled_tests) = also_run_disabled_tests_; + GTEST_FLAG(break_on_failure) = break_on_failure_; + GTEST_FLAG(catch_exceptions) = catch_exceptions_; + GTEST_FLAG(color) = color_; + GTEST_FLAG(death_test_style) = death_test_style_; + GTEST_FLAG(death_test_use_fork) = death_test_use_fork_; + GTEST_FLAG(filter) = filter_; + GTEST_FLAG(internal_run_death_test) = internal_run_death_test_; + GTEST_FLAG(list_tests) = list_tests_; + GTEST_FLAG(output) = output_; + GTEST_FLAG(print_time) = print_time_; + GTEST_FLAG(random_seed) = random_seed_; + GTEST_FLAG(repeat) = repeat_; + GTEST_FLAG(shuffle) = shuffle_; + GTEST_FLAG(stack_trace_depth) = stack_trace_depth_; + GTEST_FLAG(stream_result_to) = stream_result_to_; + GTEST_FLAG(throw_on_failure) = throw_on_failure_; + } + + private: + // Fields for saving the original values of flags. + bool also_run_disabled_tests_; + bool break_on_failure_; + bool catch_exceptions_; + std::string color_; + std::string death_test_style_; + bool death_test_use_fork_; + std::string filter_; + std::string internal_run_death_test_; + bool list_tests_; + std::string output_; + bool print_time_; + internal::Int32 random_seed_; + internal::Int32 repeat_; + bool shuffle_; + internal::Int32 stack_trace_depth_; + std::string stream_result_to_; + bool throw_on_failure_; +} GTEST_ATTRIBUTE_UNUSED_; + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted +// to "(Invalid Unicode 0xXXXXXXXX)". +GTEST_API_ std::string CodePointToUtf8(UInt32 code_point); + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +GTEST_API_ std::string WideStringToUtf8(const wchar_t* str, int num_chars); + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded(); + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (e.g., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +GTEST_API_ bool ShouldShard(const char* total_shards_str, + const char* shard_index_str, + bool in_subprocess_for_death_test); + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error and +// and aborts. +GTEST_API_ Int32 Int32FromEnvOrDie(const char* env_var, Int32 default_val); + +// Given the total number of shards, the shard index, and the test id, +// returns true iff the test should be run on this shard. The test id is +// some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +GTEST_API_ bool ShouldRunTestOnShard( + int total_shards, int shard_index, int test_id); + +// STL container utilities. + +// Returns the number of elements in the given container that satisfy +// the given predicate. +template +inline int CountIf(const Container& c, Predicate predicate) { + // Implemented as an explicit loop since std::count_if() in libCstd on + // Solaris has a non-standard signature. + int count = 0; + for (typename Container::const_iterator it = c.begin(); it != c.end(); ++it) { + if (predicate(*it)) + ++count; + } + return count; +} + +// Applies a function/functor to each element in the container. +template +void ForEach(const Container& c, Functor functor) { + std::for_each(c.begin(), c.end(), functor); +} + +// Returns the i-th element of the vector, or default_value if i is not +// in range [0, v.size()). +template +inline E GetElementOr(const std::vector& v, int i, E default_value) { + return (i < 0 || i >= static_cast(v.size())) ? default_value : v[i]; +} + +// Performs an in-place shuffle of a range of the vector's elements. +// 'begin' and 'end' are element indices as an STL-style range; +// i.e. [begin, end) are shuffled, where 'end' == size() means to +// shuffle to the end of the vector. +template +void ShuffleRange(internal::Random* random, int begin, int end, + std::vector* v) { + const int size = static_cast(v->size()); + GTEST_CHECK_(0 <= begin && begin <= size) + << "Invalid shuffle range start " << begin << ": must be in range [0, " + << size << "]."; + GTEST_CHECK_(begin <= end && end <= size) + << "Invalid shuffle range finish " << end << ": must be in range [" + << begin << ", " << size << "]."; + + // Fisher-Yates shuffle, from + // http://en.wikipedia.org/wiki/Fisher-Yates_shuffle + for (int range_width = end - begin; range_width >= 2; range_width--) { + const int last_in_range = begin + range_width - 1; + const int selected = begin + random->Generate(range_width); + std::swap((*v)[selected], (*v)[last_in_range]); + } +} + +// Performs an in-place shuffle of the vector's elements. +template +inline void Shuffle(internal::Random* random, std::vector* v) { + ShuffleRange(random, 0, static_cast(v->size()), v); +} + +// A function for deleting an object. Handy for being used as a +// functor. +template +static void Delete(T* x) { + delete x; +} + +// A predicate that checks the key of a TestProperty against a known key. +// +// TestPropertyKeyIs is copyable. +class TestPropertyKeyIs { + public: + // Constructor. + // + // TestPropertyKeyIs has NO default constructor. + explicit TestPropertyKeyIs(const std::string& key) : key_(key) {} + + // Returns true iff the test name of test property matches on key_. + bool operator()(const TestProperty& test_property) const { + return test_property.key() == key_; + } + + private: + std::string key_; +}; + +// Class UnitTestOptions. +// +// This class contains functions for processing options the user +// specifies when running the tests. It has only static members. +// +// In most cases, the user can specify an option using either an +// environment variable or a command line flag. E.g. you can set the +// test filter using either GTEST_FILTER or --gtest_filter. If both +// the variable and the flag are present, the latter overrides the +// former. +class GTEST_API_ UnitTestOptions { + public: + // Functions for processing the gtest_output flag. + + // Returns the output format, or "" for normal printed output. + static std::string GetOutputFormat(); + + // Returns the absolute path of the requested output file, or the + // default (test_detail.xml in the original working directory) if + // none was explicitly specified. + static std::string GetAbsolutePathToOutputFile(); + + // Functions for processing the gtest_filter flag. + + // Returns true iff the wildcard pattern matches the string. The + // first ':' or '\0' character in pattern marks the end of it. + // + // This recursive algorithm isn't very efficient, but is clear and + // works well enough for matching test names, which are short. + static bool PatternMatchesString(const char *pattern, const char *str); + + // Returns true iff the user-specified filter matches the test case + // name and the test name. + static bool FilterMatchesTest(const std::string &test_case_name, + const std::string &test_name); + +#if GTEST_OS_WINDOWS + // Function for supporting the gtest_catch_exception flag. + + // Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the + // given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. + // This function is useful as an __except condition. + static int GTestShouldProcessSEH(DWORD exception_code); +#endif // GTEST_OS_WINDOWS + + // Returns true if "name" matches the ':' separated list of glob-style + // filters in "filter". + static bool MatchesFilter(const std::string& name, const char* filter); +}; + +// Returns the current application's name, removing directory path if that +// is present. Used by UnitTestOptions::GetOutputFile. +GTEST_API_ FilePath GetCurrentExecutableName(); + +// The role interface for getting the OS stack trace as a string. +class OsStackTraceGetterInterface { + public: + OsStackTraceGetterInterface() {} + virtual ~OsStackTraceGetterInterface() {} + + // Returns the current OS stack trace as an std::string. Parameters: + // + // max_depth - the maximum number of stack frames to be included + // in the trace. + // skip_count - the number of top frames to be skipped; doesn't count + // against max_depth. + virtual string CurrentStackTrace(int max_depth, int skip_count) = 0; + + // UponLeavingGTest() should be called immediately before Google Test calls + // user code. It saves some information about the current stack that + // CurrentStackTrace() will use to find and hide Google Test stack frames. + virtual void UponLeavingGTest() = 0; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetterInterface); +}; + +// A working implementation of the OsStackTraceGetterInterface interface. +class OsStackTraceGetter : public OsStackTraceGetterInterface { + public: + OsStackTraceGetter() : caller_frame_(NULL) {} + + virtual string CurrentStackTrace(int max_depth, int skip_count) + GTEST_LOCK_EXCLUDED_(mutex_); + + virtual void UponLeavingGTest() GTEST_LOCK_EXCLUDED_(mutex_); + + // This string is inserted in place of stack frames that are part of + // Google Test's implementation. + static const char* const kElidedFramesMarker; + + private: + Mutex mutex_; // protects all internal state + + // We save the stack frame below the frame that calls user code. + // We do this because the address of the frame immediately below + // the user code changes between the call to UponLeavingGTest() + // and any calls to CurrentStackTrace() from within the user code. + void* caller_frame_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(OsStackTraceGetter); +}; + +// Information about a Google Test trace point. +struct TraceInfo { + const char* file; + int line; + std::string message; +}; + +// This is the default global test part result reporter used in UnitTestImpl. +// This class should only be used by UnitTestImpl. +class DefaultGlobalTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultGlobalTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. Reports the test part + // result in the current test. + virtual void ReportTestPartResult(const TestPartResult& result); + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultGlobalTestPartResultReporter); +}; + +// This is the default per thread test part result reporter used in +// UnitTestImpl. This class should only be used by UnitTestImpl. +class DefaultPerThreadTestPartResultReporter + : public TestPartResultReporterInterface { + public: + explicit DefaultPerThreadTestPartResultReporter(UnitTestImpl* unit_test); + // Implements the TestPartResultReporterInterface. The implementation just + // delegates to the current global test part result reporter of *unit_test_. + virtual void ReportTestPartResult(const TestPartResult& result); + + private: + UnitTestImpl* const unit_test_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DefaultPerThreadTestPartResultReporter); +}; + +// The private implementation of the UnitTest class. We don't protect +// the methods under a mutex, as this class is not accessible by a +// user and the UnitTest class that delegates work to this class does +// proper locking. +class GTEST_API_ UnitTestImpl { + public: + explicit UnitTestImpl(UnitTest* parent); + virtual ~UnitTestImpl(); + + // There are two different ways to register your own TestPartResultReporter. + // You can register your own repoter to listen either only for test results + // from the current thread or for results from all threads. + // By default, each per-thread test result repoter just passes a new + // TestPartResult to the global test result reporter, which registers the + // test part result for the currently running test. + + // Returns the global test part result reporter. + TestPartResultReporterInterface* GetGlobalTestPartResultReporter(); + + // Sets the global test part result reporter. + void SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter); + + // Returns the test part result reporter for the current thread. + TestPartResultReporterInterface* GetTestPartResultReporterForCurrentThread(); + + // Sets the test part result reporter for the current thread. + void SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter); + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const { return start_timestamp_; } + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const { return !Failed(); } + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const { + return failed_test_case_count() > 0 || ad_hoc_test_result()->Failed(); + } + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const { + const int index = GetElementOr(test_case_indices_, i, -1); + return index < 0 ? NULL : test_cases_[i]; + } + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i) { + const int index = GetElementOr(test_case_indices_, i, -1); + return index < 0 ? NULL : test_cases_[index]; + } + + // Provides access to the event listener list. + TestEventListeners* listeners() { return &listeners_; } + + // Returns the TestResult for the test that's currently running, or + // the TestResult for the ad hoc test if no test is running. + TestResult* current_test_result(); + + // Returns the TestResult for the ad hoc test. + const TestResult* ad_hoc_test_result() const { return &ad_hoc_test_result_; } + + // Sets the OS stack trace getter. + // + // Does nothing if the input and the current OS stack trace getter + // are the same; otherwise, deletes the old getter and makes the + // input the current getter. + void set_os_stack_trace_getter(OsStackTraceGetterInterface* getter); + + // Returns the current OS stack trace getter if it is not NULL; + // otherwise, creates an OsStackTraceGetter, makes it the current + // getter, and returns it. + OsStackTraceGetterInterface* os_stack_trace_getter(); + + // Returns the current OS stack trace as an std::string. + // + // The maximum number of stack frames to be included is specified by + // the gtest_stack_trace_depth flag. The skip_count parameter + // specifies the number of top frames to be skipped, which doesn't + // count against the number of frames to be included. + // + // For example, if Foo() calls Bar(), which in turn calls + // CurrentOsStackTraceExceptTop(1), Foo() will be included in the + // trace but Bar() and CurrentOsStackTraceExceptTop() won't. + std::string CurrentOsStackTraceExceptTop(int skip_count) GTEST_NO_INLINE_; + + // Finds and returns a TestCase with the given name. If one doesn't + // exist, creates one and returns it. + // + // Arguments: + // + // test_case_name: name of the test case + // type_param: the name of the test's type parameter, or NULL if + // this is not a typed or a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase* GetTestCase(const char* test_case_name, + const char* type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Adds a TestInfo to the unit test. + // + // Arguments: + // + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + // test_info: the TestInfo object + void AddTestInfo(Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + TestInfo* test_info) { + // In order to support thread-safe death tests, we need to + // remember the original working directory when the test program + // was first invoked. We cannot do this in RUN_ALL_TESTS(), as + // the user may have changed the current directory before calling + // RUN_ALL_TESTS(). Therefore we capture the current directory in + // AddTestInfo(), which is called to register a TEST or TEST_F + // before main() is reached. + if (original_working_dir_.IsEmpty()) { + original_working_dir_.Set(FilePath::GetCurrentDir()); + GTEST_CHECK_(!original_working_dir_.IsEmpty()) + << "Failed to get the current working directory."; + } + + GetTestCase(test_info->test_case_name(), + test_info->type_param(), + set_up_tc, + tear_down_tc)->AddTestInfo(test_info); + } + +#if GTEST_HAS_PARAM_TEST + // Returns ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry() { + return parameterized_test_registry_; + } +#endif // GTEST_HAS_PARAM_TEST + + // Sets the TestCase object for the test that's currently running. + void set_current_test_case(TestCase* a_current_test_case) { + current_test_case_ = a_current_test_case; + } + + // Sets the TestInfo object for the test that's currently running. If + // current_test_info is NULL, the assertion results will be stored in + // ad_hoc_test_result_. + void set_current_test_info(TestInfo* a_current_test_info) { + current_test_info_ = a_current_test_info; + } + + // Registers all parameterized tests defined using TEST_P and + // INSTANTIATE_TEST_CASE_P, creating regular tests for each test/parameter + // combination. This method can be called more then once; it has guards + // protecting from registering the tests more then once. If + // value-parameterized tests are disabled, RegisterParameterizedTests is + // present but does nothing. + void RegisterParameterizedTests(); + + // Runs all tests in this UnitTest object, prints the result, and + // returns true if all tests are successful. If any exception is + // thrown during a test, this test is considered to be failed, but + // the rest of the tests will still be run. + bool RunAllTests(); + + // Clears the results of all tests, except the ad hoc tests. + void ClearNonAdHocTestResult() { + ForEach(test_cases_, TestCase::ClearTestCaseResult); + } + + // Clears the results of ad-hoc test assertions. + void ClearAdHocTestResult() { + ad_hoc_test_result_.Clear(); + } + + // Adds a TestProperty to the current TestResult object when invoked in a + // context of a test or a test case, or to the global property set. If the + // result already contains a property with the same key, the value will be + // updated. + void RecordProperty(const TestProperty& test_property); + + enum ReactionToSharding { + HONOR_SHARDING_PROTOCOL, + IGNORE_SHARDING_PROTOCOL + }; + + // Matches the full name of each test against the user-specified + // filter to decide whether the test should run, then records the + // result in each TestCase and TestInfo object. + // If shard_tests == HONOR_SHARDING_PROTOCOL, further filters tests + // based on sharding variables in the environment. + // Returns the number of tests that should run. + int FilterTests(ReactionToSharding shard_tests); + + // Prints the names of the tests matching the user-specified filter flag. + void ListTestsMatchingFilter(); + + const TestCase* current_test_case() const { return current_test_case_; } + TestInfo* current_test_info() { return current_test_info_; } + const TestInfo* current_test_info() const { return current_test_info_; } + + // Returns the vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector& environments() { return environments_; } + + // Getters for the per-thread Google Test trace stack. + std::vector& gtest_trace_stack() { + return *(gtest_trace_stack_.pointer()); + } + const std::vector& gtest_trace_stack() const { + return gtest_trace_stack_.get(); + } + +#if GTEST_HAS_DEATH_TEST + void InitDeathTestSubprocessControlInfo() { + internal_run_death_test_flag_.reset(ParseInternalRunDeathTestFlag()); + } + // Returns a pointer to the parsed --gtest_internal_run_death_test + // flag, or NULL if that flag was not specified. + // This information is useful only in a death test child process. + // Must not be called before a call to InitGoogleTest. + const InternalRunDeathTestFlag* internal_run_death_test_flag() const { + return internal_run_death_test_flag_.get(); + } + + // Returns a pointer to the current death test factory. + internal::DeathTestFactory* death_test_factory() { + return death_test_factory_.get(); + } + + void SuppressTestEventsIfInSubprocess(); + + friend class ReplaceDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + + // Initializes the event listener performing XML output as specified by + // UnitTestOptions. Must not be called before InitGoogleTest. + void ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Initializes the event listener for streaming test results to a socket. + // Must not be called before InitGoogleTest. + void ConfigureStreamingOutput(); +#endif + + // Performs initialization dependent upon flag values obtained in + // ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to + // ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest + // this function is also called from RunAllTests. Since this function can be + // called more than once, it has to be idempotent. + void PostFlagParsingInit(); + + // Gets the random seed used at the start of the current test iteration. + int random_seed() const { return random_seed_; } + + // Gets the random number generator. + internal::Random* random() { return &random_; } + + // Shuffles all test cases, and the tests within each test case, + // making sure that death tests are still run first. + void ShuffleTests(); + + // Restores the test cases and tests to their order before the first shuffle. + void UnshuffleTests(); + + // Returns the value of GTEST_FLAG(catch_exceptions) at the moment + // UnitTest::Run() starts. + bool catch_exceptions() const { return catch_exceptions_; } + + private: + friend class ::testing::UnitTest; + + // Used by UnitTest::Run() to capture the state of + // GTEST_FLAG(catch_exceptions) at the moment it starts. + void set_catch_exceptions(bool value) { catch_exceptions_ = value; } + + // The UnitTest object that owns this implementation object. + UnitTest* const parent_; + + // The working directory when the first TEST() or TEST_F() was + // executed. + internal::FilePath original_working_dir_; + + // The default test part result reporters. + DefaultGlobalTestPartResultReporter default_global_test_part_result_reporter_; + DefaultPerThreadTestPartResultReporter + default_per_thread_test_part_result_reporter_; + + // Points to (but doesn't own) the global test part result reporter. + TestPartResultReporterInterface* global_test_part_result_repoter_; + + // Protects read and write access to global_test_part_result_reporter_. + internal::Mutex global_test_part_result_reporter_mutex_; + + // Points to (but doesn't own) the per-thread test part result reporter. + internal::ThreadLocal + per_thread_test_part_result_reporter_; + + // The vector of environments that need to be set-up/torn-down + // before/after the tests are run. + std::vector environments_; + + // The vector of TestCases in their original order. It owns the + // elements in the vector. + std::vector test_cases_; + + // Provides a level of indirection for the test case list to allow + // easy shuffling and restoring the test case order. The i-th + // element of this vector is the index of the i-th test case in the + // shuffled order. + std::vector test_case_indices_; + +#if GTEST_HAS_PARAM_TEST + // ParameterizedTestRegistry object used to register value-parameterized + // tests. + internal::ParameterizedTestCaseRegistry parameterized_test_registry_; + + // Indicates whether RegisterParameterizedTests() has been called already. + bool parameterized_tests_registered_; +#endif // GTEST_HAS_PARAM_TEST + + // Index of the last death test case registered. Initially -1. + int last_death_test_case_; + + // This points to the TestCase for the currently running test. It + // changes as Google Test goes through one test case after another. + // When no test is running, this is set to NULL and Google Test + // stores assertion results in ad_hoc_test_result_. Initially NULL. + TestCase* current_test_case_; + + // This points to the TestInfo for the currently running test. It + // changes as Google Test goes through one test after another. When + // no test is running, this is set to NULL and Google Test stores + // assertion results in ad_hoc_test_result_. Initially NULL. + TestInfo* current_test_info_; + + // Normally, a user only writes assertions inside a TEST or TEST_F, + // or inside a function called by a TEST or TEST_F. Since Google + // Test keeps track of which test is current running, it can + // associate such an assertion with the test it belongs to. + // + // If an assertion is encountered when no TEST or TEST_F is running, + // Google Test attributes the assertion result to an imaginary "ad hoc" + // test, and records the result in ad_hoc_test_result_. + TestResult ad_hoc_test_result_; + + // The list of event listeners that can be used to track events inside + // Google Test. + TestEventListeners listeners_; + + // The OS stack trace getter. Will be deleted when the UnitTest + // object is destructed. By default, an OsStackTraceGetter is used, + // but the user can set this field to use a custom getter if that is + // desired. + OsStackTraceGetterInterface* os_stack_trace_getter_; + + // True iff PostFlagParsingInit() has been called. + bool post_flag_parse_init_performed_; + + // The random number seed used at the beginning of the test run. + int random_seed_; + + // Our random number generator. + internal::Random random_; + + // The time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp_; + + // How long the test took to run, in milliseconds. + TimeInMillis elapsed_time_; + +#if GTEST_HAS_DEATH_TEST + // The decomposed components of the gtest_internal_run_death_test flag, + // parsed when RUN_ALL_TESTS is called. + internal::scoped_ptr internal_run_death_test_flag_; + internal::scoped_ptr death_test_factory_; +#endif // GTEST_HAS_DEATH_TEST + + // A per-thread stack of traces created by the SCOPED_TRACE() macro. + internal::ThreadLocal > gtest_trace_stack_; + + // The value of GTEST_FLAG(catch_exceptions) at the moment RunAllTests() + // starts. + bool catch_exceptions_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTestImpl); +}; // class UnitTestImpl + +// Convenience function for accessing the global UnitTest +// implementation object. +inline UnitTestImpl* GetUnitTestImpl() { + return UnitTest::GetInstance()->impl(); +} + +#if GTEST_USES_SIMPLE_RE + +// Internal helper functions for implementing the simple regular +// expression matcher. +GTEST_API_ bool IsInSet(char ch, const char* str); +GTEST_API_ bool IsAsciiDigit(char ch); +GTEST_API_ bool IsAsciiPunct(char ch); +GTEST_API_ bool IsRepeat(char ch); +GTEST_API_ bool IsAsciiWhiteSpace(char ch); +GTEST_API_ bool IsAsciiWordChar(char ch); +GTEST_API_ bool IsValidEscape(char ch); +GTEST_API_ bool AtomMatchesChar(bool escaped, char pattern, char ch); +GTEST_API_ bool ValidateRegex(const char* regex); +GTEST_API_ bool MatchRegexAtHead(const char* regex, const char* str); +GTEST_API_ bool MatchRepetitionAndRegexAtHead( + bool escaped, char ch, char repeat, const char* regex, const char* str); +GTEST_API_ bool MatchRegexAnywhere(const char* regex, const char* str); + +#endif // GTEST_USES_SIMPLE_RE + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, char** argv); +GTEST_API_ void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv); + +#if GTEST_HAS_DEATH_TEST + +// Returns the message describing the last system error, regardless of the +// platform. +GTEST_API_ std::string GetLastErrnoDescription(); + +// Attempts to parse a string into a positive integer pointed to by the +// number parameter. Returns true if that is possible. +// GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we can use +// it here. +template +bool ParseNaturalNumber(const ::std::string& str, Integer* number) { + // Fail fast if the given string does not begin with a digit; + // this bypasses strtoXXX's "optional leading whitespace and plus + // or minus sign" semantics, which are undesirable here. + if (str.empty() || !IsDigit(str[0])) { + return false; + } + errno = 0; + + char* end; + // BiggestConvertible is the largest integer type that system-provided + // string-to-number conversion routines can return. + +# if GTEST_OS_WINDOWS && !defined(__GNUC__) + + // MSVC and C++ Builder define __int64 instead of the standard long long. + typedef unsigned __int64 BiggestConvertible; + const BiggestConvertible parsed = _strtoui64(str.c_str(), &end, 10); + +# else + + typedef unsigned long long BiggestConvertible; // NOLINT + const BiggestConvertible parsed = strtoull(str.c_str(), &end, 10); + +# endif // GTEST_OS_WINDOWS && !defined(__GNUC__) + + const bool parse_success = *end == '\0' && errno == 0; + + // TODO(vladl@google.com): Convert this to compile time assertion when it is + // available. + GTEST_CHECK_(sizeof(Integer) <= sizeof(parsed)); + + const Integer result = static_cast(parsed); + if (parse_success && static_cast(result) == parsed) { + *number = result; + return true; + } + return false; +} +#endif // GTEST_HAS_DEATH_TEST + +// TestResult contains some private methods that should be hidden from +// Google Test user but are required for testing. This class allow our tests +// to access them. +// +// This class is supplied only for the purpose of testing Google Test's own +// constructs. Do not use it in user tests, either directly or indirectly. +class TestResultAccessor { + public: + static void RecordProperty(TestResult* test_result, + const std::string& xml_element, + const TestProperty& property) { + test_result->RecordProperty(xml_element, property); + } + + static void ClearTestPartResults(TestResult* test_result) { + test_result->ClearTestPartResults(); + } + + static const std::vector& test_part_results( + const TestResult& test_result) { + return test_result.test_part_results(); + } +}; + +#if GTEST_CAN_STREAM_RESULTS_ + +// Streams test results to the given port on the given host machine. +class StreamingListener : public EmptyTestEventListener { + public: + // Abstract base class for writing strings to a socket. + class AbstractSocketWriter { + public: + virtual ~AbstractSocketWriter() {} + + // Sends a string to the socket. + virtual void Send(const string& message) = 0; + + // Closes the socket. + virtual void CloseConnection() {} + + // Sends a string and a newline to the socket. + void SendLn(const string& message) { + Send(message + "\n"); + } + }; + + // Concrete class for actually writing strings to a socket. + class SocketWriter : public AbstractSocketWriter { + public: + SocketWriter(const string& host, const string& port) + : sockfd_(-1), host_name_(host), port_num_(port) { + MakeConnection(); + } + + virtual ~SocketWriter() { + if (sockfd_ != -1) + CloseConnection(); + } + + // Sends a string to the socket. + virtual void Send(const string& message) { + GTEST_CHECK_(sockfd_ != -1) + << "Send() can be called only when there is a connection."; + + const int len = static_cast(message.length()); + if (write(sockfd_, message.c_str(), len) != len) { + GTEST_LOG_(WARNING) + << "stream_result_to: failed to stream to " + << host_name_ << ":" << port_num_; + } + } + + private: + // Creates a client socket and connects to the server. + void MakeConnection(); + + // Closes the socket. + void CloseConnection() { + GTEST_CHECK_(sockfd_ != -1) + << "CloseConnection() can be called only when there is a connection."; + + close(sockfd_); + sockfd_ = -1; + } + + int sockfd_; // socket file descriptor + const string host_name_; + const string port_num_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(SocketWriter); + }; // class SocketWriter + + // Escapes '=', '&', '%', and '\n' characters in str as "%xx". + static string UrlEncode(const char* str); + + StreamingListener(const string& host, const string& port) + : socket_writer_(new SocketWriter(host, port)) { Start(); } + + explicit StreamingListener(AbstractSocketWriter* socket_writer) + : socket_writer_(socket_writer) { Start(); } + + void OnTestProgramStart(const UnitTest& /* unit_test */) { + SendLn("event=TestProgramStart"); + } + + void OnTestProgramEnd(const UnitTest& unit_test) { + // Note that Google Test current only report elapsed time for each + // test iteration, not for the entire test program. + SendLn("event=TestProgramEnd&passed=" + FormatBool(unit_test.Passed())); + + // Notify the streaming server to stop. + socket_writer_->CloseConnection(); + } + + void OnTestIterationStart(const UnitTest& /* unit_test */, int iteration) { + SendLn("event=TestIterationStart&iteration=" + + StreamableToString(iteration)); + } + + void OnTestIterationEnd(const UnitTest& unit_test, int /* iteration */) { + SendLn("event=TestIterationEnd&passed=" + + FormatBool(unit_test.Passed()) + "&elapsed_time=" + + StreamableToString(unit_test.elapsed_time()) + "ms"); + } + + void OnTestCaseStart(const TestCase& test_case) { + SendLn(std::string("event=TestCaseStart&name=") + test_case.name()); + } + + void OnTestCaseEnd(const TestCase& test_case) { + SendLn("event=TestCaseEnd&passed=" + FormatBool(test_case.Passed()) + + "&elapsed_time=" + StreamableToString(test_case.elapsed_time()) + + "ms"); + } + + void OnTestStart(const TestInfo& test_info) { + SendLn(std::string("event=TestStart&name=") + test_info.name()); + } + + void OnTestEnd(const TestInfo& test_info) { + SendLn("event=TestEnd&passed=" + + FormatBool((test_info.result())->Passed()) + + "&elapsed_time=" + + StreamableToString((test_info.result())->elapsed_time()) + "ms"); + } + + void OnTestPartResult(const TestPartResult& test_part_result) { + const char* file_name = test_part_result.file_name(); + if (file_name == NULL) + file_name = ""; + SendLn("event=TestPartResult&file=" + UrlEncode(file_name) + + "&line=" + StreamableToString(test_part_result.line_number()) + + "&message=" + UrlEncode(test_part_result.message())); + } + + private: + // Sends the given message and a newline to the socket. + void SendLn(const string& message) { socket_writer_->SendLn(message); } + + // Called at the start of streaming to notify the receiver what + // protocol we are using. + void Start() { SendLn("gtest_streaming_protocol_version=1.0"); } + + string FormatBool(bool value) { return value ? "1" : "0"; } + + const scoped_ptr socket_writer_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(StreamingListener); +}; // class StreamingListener + +#endif // GTEST_CAN_STREAM_RESULTS_ + +} // namespace internal +} // namespace testing + +#endif // GTEST_SRC_GTEST_INTERNAL_INL_H_ +#undef GTEST_IMPLEMENTATION_ + +#if GTEST_OS_WINDOWS +# define vsnprintf _vsnprintf +#endif // GTEST_OS_WINDOWS + +namespace testing { + +using internal::CountIf; +using internal::ForEach; +using internal::GetElementOr; +using internal::Shuffle; + +// Constants. + +// A test whose test case name or test name matches this filter is +// disabled and not run. +static const char kDisableTestFilter[] = "DISABLED_*:*/DISABLED_*"; + +// A test case whose name matches this filter is considered a death +// test case and will be run before test cases whose name doesn't +// match this filter. +static const char kDeathTestCaseFilter[] = "*DeathTest:*DeathTest/*"; + +// A test filter that matches everything. +static const char kUniversalFilter[] = "*"; + +// The default output file for XML output. +static const char kDefaultOutputFile[] = "test_detail.xml"; + +// The environment variable name for the test shard index. +static const char kTestShardIndex[] = "GTEST_SHARD_INDEX"; +// The environment variable name for the total number of test shards. +static const char kTestTotalShards[] = "GTEST_TOTAL_SHARDS"; +// The environment variable name for the test shard status file. +static const char kTestShardStatusFile[] = "GTEST_SHARD_STATUS_FILE"; + +namespace internal { + +// The text used in failure messages to indicate the start of the +// stack trace. +const char kStackTraceMarker[] = "\nStack trace:\n"; + +// g_help_flag is true iff the --help flag or an equivalent form is +// specified on the command line. +bool g_help_flag = false; + +} // namespace internal + +static const char* GetDefaultFilter() { + return kUniversalFilter; +} + +GTEST_DEFINE_bool_( + also_run_disabled_tests, + internal::BoolFromGTestEnv("also_run_disabled_tests", false), + "Run disabled tests too, in addition to the tests normally being run."); + +GTEST_DEFINE_bool_( + break_on_failure, + internal::BoolFromGTestEnv("break_on_failure", false), + "True iff a failed assertion should be a debugger break-point."); + +GTEST_DEFINE_bool_( + catch_exceptions, + internal::BoolFromGTestEnv("catch_exceptions", true), + "True iff " GTEST_NAME_ + " should catch exceptions and treat them as test failures."); + +GTEST_DEFINE_string_( + color, + internal::StringFromGTestEnv("color", "auto"), + "Whether to use colors in the output. Valid values: yes, no, " + "and auto. 'auto' means to use colors if the output is " + "being sent to a terminal and the TERM environment variable " + "is set to a terminal type that supports colors."); + +GTEST_DEFINE_string_( + filter, + internal::StringFromGTestEnv("filter", GetDefaultFilter()), + "A colon-separated list of glob (not regex) patterns " + "for filtering the tests to run, optionally followed by a " + "'-' and a : separated list of negative patterns (tests to " + "exclude). A test is run if it matches one of the positive " + "patterns and does not match any of the negative patterns."); + +GTEST_DEFINE_bool_(list_tests, false, + "List all tests without running them."); + +GTEST_DEFINE_string_( + output, + internal::StringFromGTestEnv("output", ""), + "A format (currently must be \"xml\"), optionally followed " + "by a colon and an output file name or directory. A directory " + "is indicated by a trailing pathname separator. " + "Examples: \"xml:filename.xml\", \"xml::directoryname/\". " + "If a directory is specified, output files will be created " + "within that directory, with file-names based on the test " + "executable's name and, if necessary, made unique by adding " + "digits."); + +GTEST_DEFINE_bool_( + print_time, + internal::BoolFromGTestEnv("print_time", true), + "True iff " GTEST_NAME_ + " should display elapsed time in text output."); + +GTEST_DEFINE_int32_( + random_seed, + internal::Int32FromGTestEnv("random_seed", 0), + "Random number seed to use when shuffling test orders. Must be in range " + "[1, 99999], or 0 to use a seed based on the current time."); + +GTEST_DEFINE_int32_( + repeat, + internal::Int32FromGTestEnv("repeat", 1), + "How many times to repeat each test. Specify a negative number " + "for repeating forever. Useful for shaking out flaky tests."); + +GTEST_DEFINE_bool_( + show_internal_stack_frames, false, + "True iff " GTEST_NAME_ " should include internal stack frames when " + "printing test failure stack traces."); + +GTEST_DEFINE_bool_( + shuffle, + internal::BoolFromGTestEnv("shuffle", false), + "True iff " GTEST_NAME_ + " should randomize tests' order on every run."); + +GTEST_DEFINE_int32_( + stack_trace_depth, + internal::Int32FromGTestEnv("stack_trace_depth", kMaxStackTraceDepth), + "The maximum number of stack frames to print when an " + "assertion fails. The valid range is 0 through 100, inclusive."); + +GTEST_DEFINE_string_( + stream_result_to, + internal::StringFromGTestEnv("stream_result_to", ""), + "This flag specifies the host name and the port number on which to stream " + "test results. Example: \"localhost:555\". The flag is effective only on " + "Linux."); + +GTEST_DEFINE_bool_( + throw_on_failure, + internal::BoolFromGTestEnv("throw_on_failure", false), + "When this flag is specified, a failed assertion will throw an exception " + "if exceptions are enabled or exit the program with a non-zero code " + "otherwise."); + +namespace internal { + +// Generates a random number from [0, range), using a Linear +// Congruential Generator (LCG). Crashes if 'range' is 0 or greater +// than kMaxRange. +UInt32 Random::Generate(UInt32 range) { + // These constants are the same as are used in glibc's rand(3). + state_ = (1103515245U*state_ + 12345U) % kMaxRange; + + GTEST_CHECK_(range > 0) + << "Cannot generate a number in the range [0, 0)."; + GTEST_CHECK_(range <= kMaxRange) + << "Generation of a number in [0, " << range << ") was requested, " + << "but this can only generate numbers in [0, " << kMaxRange << ")."; + + // Converting via modulus introduces a bit of downward bias, but + // it's simple, and a linear congruential generator isn't too good + // to begin with. + return state_ % range; +} + +// GTestIsInitialized() returns true iff the user has initialized +// Google Test. Useful for catching the user mistake of not initializing +// Google Test before calling RUN_ALL_TESTS(). +// +// A user must call testing::InitGoogleTest() to initialize Google +// Test. g_init_gtest_count is set to the number of times +// InitGoogleTest() has been called. We don't protect this variable +// under a mutex as it is only accessed in the main thread. +GTEST_API_ int g_init_gtest_count = 0; +static bool GTestIsInitialized() { return g_init_gtest_count != 0; } + +// Iterates over a vector of TestCases, keeping a running sum of the +// results of calling a given int-returning method on each. +// Returns the sum. +static int SumOverTestCaseList(const std::vector& case_list, + int (TestCase::*method)() const) { + int sum = 0; + for (size_t i = 0; i < case_list.size(); i++) { + sum += (case_list[i]->*method)(); + } + return sum; +} + +// Returns true iff the test case passed. +static bool TestCasePassed(const TestCase* test_case) { + return test_case->should_run() && test_case->Passed(); +} + +// Returns true iff the test case failed. +static bool TestCaseFailed(const TestCase* test_case) { + return test_case->should_run() && test_case->Failed(); +} + +// Returns true iff test_case contains at least one test that should +// run. +static bool ShouldRunTestCase(const TestCase* test_case) { + return test_case->should_run(); +} + +// AssertHelper constructor. +AssertHelper::AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message) + : data_(new AssertHelperData(type, file, line, message)) { +} + +AssertHelper::~AssertHelper() { + delete data_; +} + +// Message assignment, for assertion streaming support. +void AssertHelper::operator=(const Message& message) const { + UnitTest::GetInstance()-> + AddTestPartResult(data_->type, data_->file, data_->line, + AppendUserMessage(data_->message, message), + UnitTest::GetInstance()->impl() + ->CurrentOsStackTraceExceptTop(1) + // Skips the stack frame for this function itself. + ); // NOLINT +} + +// Mutex for linked pointers. +GTEST_API_ GTEST_DEFINE_STATIC_MUTEX_(g_linked_ptr_mutex); + +// Application pathname gotten in InitGoogleTest. +std::string g_executable_path; + +// Returns the current application's name, removing directory path if that +// is present. +FilePath GetCurrentExecutableName() { + FilePath result; + +#if GTEST_OS_WINDOWS + result.Set(FilePath(g_executable_path).RemoveExtension("exe")); +#else + result.Set(FilePath(g_executable_path)); +#endif // GTEST_OS_WINDOWS + + return result.RemoveDirectoryName(); +} + +// Functions for processing the gtest_output flag. + +// Returns the output format, or "" for normal printed output. +std::string UnitTestOptions::GetOutputFormat() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + if (gtest_output_flag == NULL) return std::string(""); + + const char* const colon = strchr(gtest_output_flag, ':'); + return (colon == NULL) ? + std::string(gtest_output_flag) : + std::string(gtest_output_flag, colon - gtest_output_flag); +} + +// Returns the name of the requested output file, or the default if none +// was explicitly specified. +std::string UnitTestOptions::GetAbsolutePathToOutputFile() { + const char* const gtest_output_flag = GTEST_FLAG(output).c_str(); + if (gtest_output_flag == NULL) + return ""; + + const char* const colon = strchr(gtest_output_flag, ':'); + if (colon == NULL) + return internal::FilePath::ConcatPaths( + internal::FilePath( + UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(kDefaultOutputFile)).string(); + + internal::FilePath output_name(colon + 1); + if (!output_name.IsAbsolutePath()) + // TODO(wan@google.com): on Windows \some\path is not an absolute + // path (as its meaning depends on the current drive), yet the + // following logic for turning it into an absolute path is wrong. + // Fix it. + output_name = internal::FilePath::ConcatPaths( + internal::FilePath(UnitTest::GetInstance()->original_working_dir()), + internal::FilePath(colon + 1)); + + if (!output_name.IsDirectory()) + return output_name.string(); + + internal::FilePath result(internal::FilePath::GenerateUniqueFileName( + output_name, internal::GetCurrentExecutableName(), + GetOutputFormat().c_str())); + return result.string(); +} + +// Returns true iff the wildcard pattern matches the string. The +// first ':' or '\0' character in pattern marks the end of it. +// +// This recursive algorithm isn't very efficient, but is clear and +// works well enough for matching test names, which are short. +bool UnitTestOptions::PatternMatchesString(const char *pattern, + const char *str) { + switch (*pattern) { + case '\0': + case ':': // Either ':' or '\0' marks the end of the pattern. + return *str == '\0'; + case '?': // Matches any single character. + return *str != '\0' && PatternMatchesString(pattern + 1, str + 1); + case '*': // Matches any string (possibly empty) of characters. + return (*str != '\0' && PatternMatchesString(pattern, str + 1)) || + PatternMatchesString(pattern + 1, str); + default: // Non-special character. Matches itself. + return *pattern == *str && + PatternMatchesString(pattern + 1, str + 1); + } +} + +bool UnitTestOptions::MatchesFilter( + const std::string& name, const char* filter) { + const char *cur_pattern = filter; + for (;;) { + if (PatternMatchesString(cur_pattern, name.c_str())) { + return true; + } + + // Finds the next pattern in the filter. + cur_pattern = strchr(cur_pattern, ':'); + + // Returns if no more pattern can be found. + if (cur_pattern == NULL) { + return false; + } + + // Skips the pattern separater (the ':' character). + cur_pattern++; + } +} + +// Returns true iff the user-specified filter matches the test case +// name and the test name. +bool UnitTestOptions::FilterMatchesTest(const std::string &test_case_name, + const std::string &test_name) { + const std::string& full_name = test_case_name + "." + test_name.c_str(); + + // Split --gtest_filter at '-', if there is one, to separate into + // positive filter and negative filter portions + const char* const p = GTEST_FLAG(filter).c_str(); + const char* const dash = strchr(p, '-'); + std::string positive; + std::string negative; + if (dash == NULL) { + positive = GTEST_FLAG(filter).c_str(); // Whole string is a positive filter + negative = ""; + } else { + positive = std::string(p, dash); // Everything up to the dash + negative = std::string(dash + 1); // Everything after the dash + if (positive.empty()) { + // Treat '-test1' as the same as '*-test1' + positive = kUniversalFilter; + } + } + + // A filter is a colon-separated list of patterns. It matches a + // test if any pattern in it matches the test. + return (MatchesFilter(full_name, positive.c_str()) && + !MatchesFilter(full_name, negative.c_str())); +} + +#if GTEST_HAS_SEH +// Returns EXCEPTION_EXECUTE_HANDLER if Google Test should handle the +// given SEH exception, or EXCEPTION_CONTINUE_SEARCH otherwise. +// This function is useful as an __except condition. +int UnitTestOptions::GTestShouldProcessSEH(DWORD exception_code) { + // Google Test should handle a SEH exception if: + // 1. the user wants it to, AND + // 2. this is not a breakpoint exception, AND + // 3. this is not a C++ exception (VC++ implements them via SEH, + // apparently). + // + // SEH exception code for C++ exceptions. + // (see http://support.microsoft.com/kb/185294 for more information). + const DWORD kCxxExceptionCode = 0xe06d7363; + + bool should_handle = true; + + if (!GTEST_FLAG(catch_exceptions)) + should_handle = false; + else if (exception_code == EXCEPTION_BREAKPOINT) + should_handle = false; + else if (exception_code == kCxxExceptionCode) + should_handle = false; + + return should_handle ? EXCEPTION_EXECUTE_HANDLER : EXCEPTION_CONTINUE_SEARCH; +} +#endif // GTEST_HAS_SEH + +} // namespace internal + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. Intercepts only failures from the current thread. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + TestPartResultArray* result) + : intercept_mode_(INTERCEPT_ONLY_CURRENT_THREAD), + result_(result) { + Init(); +} + +// The c'tor sets this object as the test part result reporter used by +// Google Test. The 'result' parameter specifies where to report the +// results. +ScopedFakeTestPartResultReporter::ScopedFakeTestPartResultReporter( + InterceptMode intercept_mode, TestPartResultArray* result) + : intercept_mode_(intercept_mode), + result_(result) { + Init(); +} + +void ScopedFakeTestPartResultReporter::Init() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + old_reporter_ = impl->GetGlobalTestPartResultReporter(); + impl->SetGlobalTestPartResultReporter(this); + } else { + old_reporter_ = impl->GetTestPartResultReporterForCurrentThread(); + impl->SetTestPartResultReporterForCurrentThread(this); + } +} + +// The d'tor restores the test part result reporter used by Google Test +// before. +ScopedFakeTestPartResultReporter::~ScopedFakeTestPartResultReporter() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + if (intercept_mode_ == INTERCEPT_ALL_THREADS) { + impl->SetGlobalTestPartResultReporter(old_reporter_); + } else { + impl->SetTestPartResultReporterForCurrentThread(old_reporter_); + } +} + +// Increments the test part result count and remembers the result. +// This method is from the TestPartResultReporterInterface interface. +void ScopedFakeTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + result_->Append(result); +} + +namespace internal { + +// Returns the type ID of ::testing::Test. We should always call this +// instead of GetTypeId< ::testing::Test>() to get the type ID of +// testing::Test. This is to work around a suspected linker bug when +// using Google Test as a framework on Mac OS X. The bug causes +// GetTypeId< ::testing::Test>() to return different values depending +// on whether the call is from the Google Test framework itself or +// from user test code. GetTestTypeId() is guaranteed to always +// return the same value, as it always calls GetTypeId<>() from the +// gtest.cc, which is within the Google Test framework. +TypeId GetTestTypeId() { + return GetTypeId(); +} + +// The value of GetTestTypeId() as seen from within the Google Test +// library. This is solely for testing GetTestTypeId(). +extern const TypeId kTestTypeIdInGoogleTest = GetTestTypeId(); + +// This predicate-formatter checks that 'results' contains a test part +// failure of the given type and that the failure message contains the +// given substring. +AssertionResult HasOneFailure(const char* /* results_expr */, + const char* /* type_expr */, + const char* /* substr_expr */, + const TestPartResultArray& results, + TestPartResult::Type type, + const string& substr) { + const std::string expected(type == TestPartResult::kFatalFailure ? + "1 fatal failure" : + "1 non-fatal failure"); + Message msg; + if (results.size() != 1) { + msg << "Expected: " << expected << "\n" + << " Actual: " << results.size() << " failures"; + for (int i = 0; i < results.size(); i++) { + msg << "\n" << results.GetTestPartResult(i); + } + return AssertionFailure() << msg; + } + + const TestPartResult& r = results.GetTestPartResult(0); + if (r.type() != type) { + return AssertionFailure() << "Expected: " << expected << "\n" + << " Actual:\n" + << r; + } + + if (strstr(r.message(), substr.c_str()) == NULL) { + return AssertionFailure() << "Expected: " << expected << " containing \"" + << substr << "\"\n" + << " Actual:\n" + << r; + } + + return AssertionSuccess(); +} + +// The constructor of SingleFailureChecker remembers where to look up +// test part results, what type of failure we expect, and what +// substring the failure message should contain. +SingleFailureChecker:: SingleFailureChecker( + const TestPartResultArray* results, + TestPartResult::Type type, + const string& substr) + : results_(results), + type_(type), + substr_(substr) {} + +// The destructor of SingleFailureChecker verifies that the given +// TestPartResultArray contains exactly one failure that has the given +// type and contains the given substring. If that's not the case, a +// non-fatal failure will be generated. +SingleFailureChecker::~SingleFailureChecker() { + EXPECT_PRED_FORMAT3(HasOneFailure, *results_, type_, substr_); +} + +DefaultGlobalTestPartResultReporter::DefaultGlobalTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultGlobalTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->current_test_result()->AddTestPartResult(result); + unit_test_->listeners()->repeater()->OnTestPartResult(result); +} + +DefaultPerThreadTestPartResultReporter::DefaultPerThreadTestPartResultReporter( + UnitTestImpl* unit_test) : unit_test_(unit_test) {} + +void DefaultPerThreadTestPartResultReporter::ReportTestPartResult( + const TestPartResult& result) { + unit_test_->GetGlobalTestPartResultReporter()->ReportTestPartResult(result); +} + +// Returns the global test part result reporter. +TestPartResultReporterInterface* +UnitTestImpl::GetGlobalTestPartResultReporter() { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + return global_test_part_result_repoter_; +} + +// Sets the global test part result reporter. +void UnitTestImpl::SetGlobalTestPartResultReporter( + TestPartResultReporterInterface* reporter) { + internal::MutexLock lock(&global_test_part_result_reporter_mutex_); + global_test_part_result_repoter_ = reporter; +} + +// Returns the test part result reporter for the current thread. +TestPartResultReporterInterface* +UnitTestImpl::GetTestPartResultReporterForCurrentThread() { + return per_thread_test_part_result_reporter_.get(); +} + +// Sets the test part result reporter for the current thread. +void UnitTestImpl::SetTestPartResultReporterForCurrentThread( + TestPartResultReporterInterface* reporter) { + per_thread_test_part_result_reporter_.set(reporter); +} + +// Gets the number of successful test cases. +int UnitTestImpl::successful_test_case_count() const { + return CountIf(test_cases_, TestCasePassed); +} + +// Gets the number of failed test cases. +int UnitTestImpl::failed_test_case_count() const { + return CountIf(test_cases_, TestCaseFailed); +} + +// Gets the number of all test cases. +int UnitTestImpl::total_test_case_count() const { + return static_cast(test_cases_.size()); +} + +// Gets the number of all test cases that contain at least one test +// that should run. +int UnitTestImpl::test_case_to_run_count() const { + return CountIf(test_cases_, ShouldRunTestCase); +} + +// Gets the number of successful tests. +int UnitTestImpl::successful_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::successful_test_count); +} + +// Gets the number of failed tests. +int UnitTestImpl::failed_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::failed_test_count); +} + +// Gets the number of disabled tests that will be reported in the XML report. +int UnitTestImpl::reportable_disabled_test_count() const { + return SumOverTestCaseList(test_cases_, + &TestCase::reportable_disabled_test_count); +} + +// Gets the number of disabled tests. +int UnitTestImpl::disabled_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::disabled_test_count); +} + +// Gets the number of tests to be printed in the XML report. +int UnitTestImpl::reportable_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::reportable_test_count); +} + +// Gets the number of all tests. +int UnitTestImpl::total_test_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::total_test_count); +} + +// Gets the number of tests that should run. +int UnitTestImpl::test_to_run_count() const { + return SumOverTestCaseList(test_cases_, &TestCase::test_to_run_count); +} + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// CurrentOsStackTraceExceptTop(1), Foo() will be included in the +// trace but Bar() and CurrentOsStackTraceExceptTop() won't. +std::string UnitTestImpl::CurrentOsStackTraceExceptTop(int skip_count) { + (void)skip_count; + return ""; +} + +// Returns the current time in milliseconds. +TimeInMillis GetTimeInMillis() { +#if GTEST_OS_WINDOWS_MOBILE || defined(__BORLANDC__) + // Difference between 1970-01-01 and 1601-01-01 in milliseconds. + // http://analogous.blogspot.com/2005/04/epoch.html + const TimeInMillis kJavaEpochToWinFileTimeDelta = + static_cast(116444736UL) * 100000UL; + const DWORD kTenthMicrosInMilliSecond = 10000; + + SYSTEMTIME now_systime; + FILETIME now_filetime; + ULARGE_INTEGER now_int64; + // TODO(kenton@google.com): Shouldn't this just use + // GetSystemTimeAsFileTime()? + GetSystemTime(&now_systime); + if (SystemTimeToFileTime(&now_systime, &now_filetime)) { + now_int64.LowPart = now_filetime.dwLowDateTime; + now_int64.HighPart = now_filetime.dwHighDateTime; + now_int64.QuadPart = (now_int64.QuadPart / kTenthMicrosInMilliSecond) - + kJavaEpochToWinFileTimeDelta; + return now_int64.QuadPart; + } + return 0; +#elif GTEST_OS_WINDOWS && !GTEST_HAS_GETTIMEOFDAY_ + __timeb64 now; + + // MSVC 8 deprecates _ftime64(), so we want to suppress warning 4996 + // (deprecated function) there. + // TODO(kenton@google.com): Use GetTickCount()? Or use + // SystemTimeToFileTime() + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996) + _ftime64(&now); + GTEST_DISABLE_MSC_WARNINGS_POP_() + + return static_cast(now.time) * 1000 + now.millitm; +#elif GTEST_HAS_GETTIMEOFDAY_ + struct timeval now; + gettimeofday(&now, NULL); + return static_cast(now.tv_sec) * 1000 + now.tv_usec / 1000; +#else +# error "Don't know how to get the current time on your system." +#endif +} + +// Utilities + +// class String. + +#if GTEST_OS_WINDOWS_MOBILE +// Creates a UTF-16 wide string from the given ANSI string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the wide string, or NULL if the +// input is NULL. +LPCWSTR String::AnsiToUtf16(const char* ansi) { + if (!ansi) return NULL; + const int length = strlen(ansi); + const int unicode_length = + MultiByteToWideChar(CP_ACP, 0, ansi, length, + NULL, 0); + WCHAR* unicode = new WCHAR[unicode_length + 1]; + MultiByteToWideChar(CP_ACP, 0, ansi, length, + unicode, unicode_length); + unicode[unicode_length] = 0; + return unicode; +} + +// Creates an ANSI string from the given wide string, allocating +// memory using new. The caller is responsible for deleting the return +// value using delete[]. Returns the ANSI string, or NULL if the +// input is NULL. +const char* String::Utf16ToAnsi(LPCWSTR utf16_str) { + if (!utf16_str) return NULL; + const int ansi_length = + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, + NULL, 0, NULL, NULL); + char* ansi = new char[ansi_length + 1]; + WideCharToMultiByte(CP_ACP, 0, utf16_str, -1, + ansi, ansi_length, NULL, NULL); + ansi[ansi_length] = 0; + return ansi; +} + +#endif // GTEST_OS_WINDOWS_MOBILE + +// Compares two C strings. Returns true iff they have the same content. +// +// Unlike strcmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CStringEquals(const char * lhs, const char * rhs) { + if ( lhs == NULL ) return rhs == NULL; + + if ( rhs == NULL ) return false; + + return strcmp(lhs, rhs) == 0; +} + +#if GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING + +// Converts an array of wide chars to a narrow string using the UTF-8 +// encoding, and streams the result to the given Message object. +static void StreamWideCharsToMessage(const wchar_t* wstr, size_t length, + Message* msg) { + for (size_t i = 0; i != length; ) { // NOLINT + if (wstr[i] != L'\0') { + *msg << WideStringToUtf8(wstr + i, static_cast(length - i)); + while (i != length && wstr[i] != L'\0') + i++; + } else { + *msg << '\0'; + i++; + } + } +} + +#endif // GTEST_HAS_STD_WSTRING || GTEST_HAS_GLOBAL_WSTRING + +} // namespace internal + +// Constructs an empty Message. +// We allocate the stringstream separately because otherwise each use of +// ASSERT/EXPECT in a procedure adds over 200 bytes to the procedure's +// stack frame leading to huge stack frames in some cases; gcc does not reuse +// the stack space. +Message::Message() : ss_(new ::std::stringstream) { + // By default, we want there to be enough precision when printing + // a double to a Message. + *ss_ << std::setprecision(std::numeric_limits::digits10 + 2); +} + +// These two overloads allow streaming a wide C string to a Message +// using the UTF-8 encoding. +Message& Message::operator <<(const wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); +} +Message& Message::operator <<(wchar_t* wide_c_str) { + return *this << internal::String::ShowWideCString(wide_c_str); +} + +#if GTEST_HAS_STD_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::std::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_WSTRING +// Converts the given wide string to a narrow string using the UTF-8 +// encoding, and streams the result to this Message object. +Message& Message::operator <<(const ::wstring& wstr) { + internal::StreamWideCharsToMessage(wstr.c_str(), wstr.length(), this); + return *this; +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +// Gets the text streamed to this object so far as an std::string. +// Each '\0' character in the buffer is replaced with "\\0". +std::string Message::GetString() const { + return internal::StringStreamToString(ss_.get()); +} + +// AssertionResult constructors. +// Used in EXPECT_TRUE/FALSE(assertion_result). +AssertionResult::AssertionResult(const AssertionResult& other) + : success_(other.success_), + message_(other.message_.get() != NULL ? + new ::std::string(*other.message_) : + static_cast< ::std::string*>(NULL)) { +} + +// Swaps two AssertionResults. +void AssertionResult::swap(AssertionResult& other) { + using std::swap; + swap(success_, other.success_); + swap(message_, other.message_); +} + +// Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. +AssertionResult AssertionResult::operator!() const { + AssertionResult negation(!success_); + if (message_.get() != NULL) + negation << *message_; + return negation; +} + +// Makes a successful assertion result. +AssertionResult AssertionSuccess() { + return AssertionResult(true); +} + +// Makes a failed assertion result. +AssertionResult AssertionFailure() { + return AssertionResult(false); +} + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << message. +AssertionResult AssertionFailure(const Message& message) { + return AssertionFailure() << message; +} + +namespace internal { + +namespace edit_distance { +std::vector CalculateOptimalEdits(const std::vector& left, + const std::vector& right) { + std::vector > costs( + left.size() + 1, std::vector(right.size() + 1)); + std::vector > best_move( + left.size() + 1, std::vector(right.size() + 1)); + + // Populate for empty right. + for (size_t l_i = 0; l_i < costs.size(); ++l_i) { + costs[l_i][0] = static_cast(l_i); + best_move[l_i][0] = kRemove; + } + // Populate for empty left. + for (size_t r_i = 1; r_i < costs[0].size(); ++r_i) { + costs[0][r_i] = static_cast(r_i); + best_move[0][r_i] = kAdd; + } + + for (size_t l_i = 0; l_i < left.size(); ++l_i) { + for (size_t r_i = 0; r_i < right.size(); ++r_i) { + if (left[l_i] == right[r_i]) { + // Found a match. Consume it. + costs[l_i + 1][r_i + 1] = costs[l_i][r_i]; + best_move[l_i + 1][r_i + 1] = kMatch; + continue; + } + + const double add = costs[l_i + 1][r_i]; + const double remove = costs[l_i][r_i + 1]; + const double replace = costs[l_i][r_i]; + if (add < remove && add < replace) { + costs[l_i + 1][r_i + 1] = add + 1; + best_move[l_i + 1][r_i + 1] = kAdd; + } else if (remove < add && remove < replace) { + costs[l_i + 1][r_i + 1] = remove + 1; + best_move[l_i + 1][r_i + 1] = kRemove; + } else { + // We make replace a little more expensive than add/remove to lower + // their priority. + costs[l_i + 1][r_i + 1] = replace + 1.00001; + best_move[l_i + 1][r_i + 1] = kReplace; + } + } + } + + // Reconstruct the best path. We do it in reverse order. + std::vector best_path; + for (size_t l_i = left.size(), r_i = right.size(); l_i > 0 || r_i > 0;) { + EditType move = best_move[l_i][r_i]; + best_path.push_back(move); + l_i -= move != kAdd; + r_i -= move != kRemove; + } + std::reverse(best_path.begin(), best_path.end()); + return best_path; +} + +namespace { + +// Helper class to convert string into ids with deduplication. +class InternalStrings { + public: + size_t GetId(const std::string& str) { + IdMap::iterator it = ids_.find(str); + if (it != ids_.end()) return it->second; + size_t id = ids_.size(); + return ids_[str] = id; + } + + private: + typedef std::map IdMap; + IdMap ids_; +}; + +} // namespace + +std::vector CalculateOptimalEdits( + const std::vector& left, + const std::vector& right) { + std::vector left_ids, right_ids; + { + InternalStrings intern_table; + for (size_t i = 0; i < left.size(); ++i) { + left_ids.push_back(intern_table.GetId(left[i])); + } + for (size_t i = 0; i < right.size(); ++i) { + right_ids.push_back(intern_table.GetId(right[i])); + } + } + return CalculateOptimalEdits(left_ids, right_ids); +} + +namespace { + +// Helper class that holds the state for one hunk and prints it out to the +// stream. +// It reorders adds/removes when possible to group all removes before all +// adds. It also adds the hunk header before printint into the stream. +class Hunk { + public: + Hunk(size_t left_start, size_t right_start) + : left_start_(left_start), + right_start_(right_start), + adds_(), + removes_(), + common_() {} + + void PushLine(char edit, const char* line) { + switch (edit) { + case ' ': + ++common_; + FlushEdits(); + hunk_.push_back(std::make_pair(' ', line)); + break; + case '-': + ++removes_; + hunk_removes_.push_back(std::make_pair('-', line)); + break; + case '+': + ++adds_; + hunk_adds_.push_back(std::make_pair('+', line)); + break; + } + } + + void PrintTo(std::ostream* os) { + PrintHeader(os); + FlushEdits(); + for (std::list >::const_iterator it = + hunk_.begin(); + it != hunk_.end(); ++it) { + *os << it->first << it->second << "\n"; + } + } + + bool has_edits() const { return adds_ || removes_; } + + private: + void FlushEdits() { + hunk_.splice(hunk_.end(), hunk_removes_); + hunk_.splice(hunk_.end(), hunk_adds_); + } + + // Print a unified diff header for one hunk. + // The format is + // "@@ -, +, @@" + // where the left/right parts are omitted if unnecessary. + void PrintHeader(std::ostream* ss) const { + *ss << "@@ "; + if (removes_) { + *ss << "-" << left_start_ << "," << (removes_ + common_); + } + if (removes_ && adds_) { + *ss << " "; + } + if (adds_) { + *ss << "+" << right_start_ << "," << (adds_ + common_); + } + *ss << " @@\n"; + } + + size_t left_start_, right_start_; + size_t adds_, removes_, common_; + std::list > hunk_, hunk_adds_, hunk_removes_; +}; + +} // namespace + +// Create a list of diff hunks in Unified diff format. +// Each hunk has a header generated by PrintHeader above plus a body with +// lines prefixed with ' ' for no change, '-' for deletion and '+' for +// addition. +// 'context' represents the desired unchanged prefix/suffix around the diff. +// If two hunks are close enough that their contexts overlap, then they are +// joined into one hunk. +std::string CreateUnifiedDiff(const std::vector& left, + const std::vector& right, + size_t context) { + const std::vector edits = CalculateOptimalEdits(left, right); + + size_t l_i = 0, r_i = 0, edit_i = 0; + std::stringstream ss; + while (edit_i < edits.size()) { + // Find first edit. + while (edit_i < edits.size() && edits[edit_i] == kMatch) { + ++l_i; + ++r_i; + ++edit_i; + } + + // Find the first line to include in the hunk. + const size_t prefix_context = std::min(l_i, context); + Hunk hunk(l_i - prefix_context + 1, r_i - prefix_context + 1); + for (size_t i = prefix_context; i > 0; --i) { + hunk.PushLine(' ', left[l_i - i].c_str()); + } + + // Iterate the edits until we found enough suffix for the hunk or the input + // is over. + size_t n_suffix = 0; + for (; edit_i < edits.size(); ++edit_i) { + if (n_suffix >= context) { + // Continue only if the next hunk is very close. + std::vector::const_iterator it = edits.begin() + edit_i; + while (it != edits.end() && *it == kMatch) ++it; + if (it == edits.end() || (it - edits.begin()) - edit_i >= context) { + // There is no next edit or it is too far away. + break; + } + } + + EditType edit = edits[edit_i]; + // Reset count when a non match is found. + n_suffix = edit == kMatch ? n_suffix + 1 : 0; + + if (edit == kMatch || edit == kRemove || edit == kReplace) { + hunk.PushLine(edit == kMatch ? ' ' : '-', left[l_i].c_str()); + } + if (edit == kAdd || edit == kReplace) { + hunk.PushLine('+', right[r_i].c_str()); + } + + // Advance indices, depending on edit type. + l_i += edit != kAdd; + r_i += edit != kRemove; + } + + if (!hunk.has_edits()) { + // We are done. We don't want this hunk. + break; + } + + hunk.PrintTo(&ss); + } + return ss.str(); +} + +} // namespace edit_distance + +namespace { + +// The string representation of the values received in EqFailure() are already +// escaped. Split them on escaped '\n' boundaries. Leave all other escaped +// characters the same. +std::vector SplitEscapedString(const std::string& str) { + std::vector lines; + size_t start = 0, end = str.size(); + if (end > 2 && str[0] == '"' && str[end - 1] == '"') { + ++start; + --end; + } + bool escaped = false; + for (size_t i = start; i + 1 < end; ++i) { + if (escaped) { + escaped = false; + if (str[i] == 'n') { + lines.push_back(str.substr(start, i - start - 1)); + start = i + 1; + } + } else { + escaped = str[i] == '\\'; + } + } + lines.push_back(str.substr(start, end - start)); + return lines; +} + +} // namespace + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true iff the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const std::string& expected_value, + const std::string& actual_value, + bool ignoring_case) { + Message msg; + msg << "Value of: " << actual_expression; + if (actual_value != actual_expression) { + msg << "\n Actual: " << actual_value; + } + + msg << "\nExpected: " << expected_expression; + if (ignoring_case) { + msg << " (ignoring case)"; + } + if (expected_value != expected_expression) { + msg << "\nWhich is: " << expected_value; + } + + if (!expected_value.empty() && !actual_value.empty()) { + const std::vector expected_lines = + SplitEscapedString(expected_value); + const std::vector actual_lines = + SplitEscapedString(actual_value); + if (expected_lines.size() > 1 || actual_lines.size() > 1) { + msg << "\nWith diff:\n" + << edit_distance::CreateUnifiedDiff(expected_lines, actual_lines); + } + } + + return AssertionFailure() << msg; +} + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +std::string GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value) { + const char* actual_message = assertion_result.message(); + Message msg; + msg << "Value of: " << expression_text + << "\n Actual: " << actual_predicate_value; + if (actual_message[0] != '\0') + msg << " (" << actual_message << ")"; + msg << "\nExpected: " << expected_predicate_value; + return msg.GetString(); +} + +// Helper function for implementing ASSERT_NEAR. +AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error) { + const double diff = fabs(val1 - val2); + if (diff <= abs_error) return AssertionSuccess(); + + // TODO(wan): do not print the value of an expression if it's + // already a literal. + return AssertionFailure() + << "The difference between " << expr1 << " and " << expr2 + << " is " << diff << ", which exceeds " << abs_error_expr << ", where\n" + << expr1 << " evaluates to " << val1 << ",\n" + << expr2 << " evaluates to " << val2 << ", and\n" + << abs_error_expr << " evaluates to " << abs_error << "."; +} + + +// Helper template for implementing FloatLE() and DoubleLE(). +template +AssertionResult FloatingPointLE(const char* expr1, + const char* expr2, + RawType val1, + RawType val2) { + // Returns success if val1 is less than val2, + if (val1 < val2) { + return AssertionSuccess(); + } + + // or if val1 is almost equal to val2. + const FloatingPoint lhs(val1), rhs(val2); + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + // Note that the above two checks will both fail if either val1 or + // val2 is NaN, as the IEEE floating-point standard requires that + // any predicate involving a NaN must return false. + + ::std::stringstream val1_ss; + val1_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val1; + + ::std::stringstream val2_ss; + val2_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << val2; + + return AssertionFailure() + << "Expected: (" << expr1 << ") <= (" << expr2 << ")\n" + << " Actual: " << StringStreamToString(&val1_ss) << " vs " + << StringStreamToString(&val2_ss); +} + +} // namespace internal + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2) { + return internal::FloatingPointLE(expr1, expr2, val1, val2); +} + +namespace internal { + +// The helper function for {ASSERT|EXPECT}_EQ with int or enum +// arguments. +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + if (expected == actual) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_?? with integer or enum arguments. It is here +// just to avoid copy-and-paste of similar code. +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + BiggestInt val1, BiggestInt val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return AssertionFailure() \ + << "Expected: (" << expr1 << ") " #op " (" << expr2\ + << "), actual: " << FormatForComparisonFailureMessage(val1, val2)\ + << " vs " << FormatForComparisonFailureMessage(val2, val1);\ + }\ +} + +// Implements the helper function for {ASSERT|EXPECT}_NE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(NE, !=) +// Implements the helper function for {ASSERT|EXPECT}_LE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LE, <=) +// Implements the helper function for {ASSERT|EXPECT}_LT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(LT, < ) +// Implements the helper function for {ASSERT|EXPECT}_GE with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GE, >=) +// Implements the helper function for {ASSERT|EXPECT}_GT with int or +// enum arguments. +GTEST_IMPL_CMP_HELPER_(GT, > ) + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual) { + if (String::CStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + PrintToString(expected), + PrintToString(actual), + false); +} + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual) { + if (String::CaseInsensitiveCStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + PrintToString(expected), + PrintToString(actual), + true); +} + +// The helper function for {ASSERT|EXPECT}_STRNE. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2) { + if (!String::CaseInsensitiveCStringEquals(s1, s2)) { + return AssertionSuccess(); + } else { + return AssertionFailure() + << "Expected: (" << s1_expression << ") != (" + << s2_expression << ") (ignoring case), actual: \"" + << s1 << "\" vs \"" << s2 << "\""; + } +} + +} // namespace internal + +namespace { + +// Helper functions for implementing IsSubString() and IsNotSubstring(). + +// This group of overloaded functions return true iff needle is a +// substring of haystack. NULL is considered a substring of itself +// only. + +bool IsSubstringPred(const char* needle, const char* haystack) { + if (needle == NULL || haystack == NULL) + return needle == haystack; + + return strstr(haystack, needle) != NULL; +} + +bool IsSubstringPred(const wchar_t* needle, const wchar_t* haystack) { + if (needle == NULL || haystack == NULL) + return needle == haystack; + + return wcsstr(haystack, needle) != NULL; +} + +// StringType here can be either ::std::string or ::std::wstring. +template +bool IsSubstringPred(const StringType& needle, + const StringType& haystack) { + return haystack.find(needle) != StringType::npos; +} + +// This function implements either IsSubstring() or IsNotSubstring(), +// depending on the value of the expected_to_be_substring parameter. +// StringType here can be const char*, const wchar_t*, ::std::string, +// or ::std::wstring. +template +AssertionResult IsSubstringImpl( + bool expected_to_be_substring, + const char* needle_expr, const char* haystack_expr, + const StringType& needle, const StringType& haystack) { + if (IsSubstringPred(needle, haystack) == expected_to_be_substring) + return AssertionSuccess(); + + const bool is_wide_string = sizeof(needle[0]) > 1; + const char* const begin_string_quote = is_wide_string ? "L\"" : "\""; + return AssertionFailure() + << "Value of: " << needle_expr << "\n" + << " Actual: " << begin_string_quote << needle << "\"\n" + << "Expected: " << (expected_to_be_substring ? "" : "not ") + << "a substring of " << haystack_expr << "\n" + << "Which is: " << begin_string_quote << haystack << "\""; +} + +} // namespace + +// IsSubstring() and IsNotSubstring() check whether needle is a +// substring of haystack (NULL is considered a substring of itself +// only), and return an appropriate error message when they fail. + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} + +#if GTEST_HAS_STD_WSTRING +AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(true, needle_expr, haystack_expr, needle, haystack); +} + +AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack) { + return IsSubstringImpl(false, needle_expr, haystack_expr, needle, haystack); +} +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +#if GTEST_OS_WINDOWS + +namespace { + +// Helper function for IsHRESULT{SuccessFailure} predicates +AssertionResult HRESULTFailureHelper(const char* expr, + const char* expected, + long hr) { // NOLINT +# if GTEST_OS_WINDOWS_MOBILE + + // Windows CE doesn't support FormatMessage. + const char error_text[] = ""; + +# else + + // Looks up the human-readable system message for the HRESULT code + // and since we're not passing any params to FormatMessage, we don't + // want inserts expanded. + const DWORD kFlags = FORMAT_MESSAGE_FROM_SYSTEM | + FORMAT_MESSAGE_IGNORE_INSERTS; + const DWORD kBufSize = 4096; + // Gets the system's human readable message string for this HRESULT. + char error_text[kBufSize] = { '\0' }; + DWORD message_length = ::FormatMessageA(kFlags, + 0, // no source, we're asking system + hr, // the error + 0, // no line width restrictions + error_text, // output buffer + kBufSize, // buf size + NULL); // no arguments for inserts + // Trims tailing white space (FormatMessage leaves a trailing CR-LF) + for (; message_length && IsSpace(error_text[message_length - 1]); + --message_length) { + error_text[message_length - 1] = '\0'; + } + +# endif // GTEST_OS_WINDOWS_MOBILE + + const std::string error_hex("0x" + String::FormatHexInt(hr)); + return ::testing::AssertionFailure() + << "Expected: " << expr << " " << expected << ".\n" + << " Actual: " << error_hex << " " << error_text << "\n"; +} + +} // namespace + +AssertionResult IsHRESULTSuccess(const char* expr, long hr) { // NOLINT + if (SUCCEEDED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "succeeds", hr); +} + +AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT + if (FAILED(hr)) { + return AssertionSuccess(); + } + return HRESULTFailureHelper(expr, "fails", hr); +} + +#endif // GTEST_OS_WINDOWS + +// Utility functions for encoding Unicode text (wide strings) in +// UTF-8. + +// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8 +// like this: +// +// Code-point length Encoding +// 0 - 7 bits 0xxxxxxx +// 8 - 11 bits 110xxxxx 10xxxxxx +// 12 - 16 bits 1110xxxx 10xxxxxx 10xxxxxx +// 17 - 21 bits 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx + +// The maximum code-point a one-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint1 = (static_cast(1) << 7) - 1; + +// The maximum code-point a two-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint2 = (static_cast(1) << (5 + 6)) - 1; + +// The maximum code-point a three-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint3 = (static_cast(1) << (4 + 2*6)) - 1; + +// The maximum code-point a four-byte UTF-8 sequence can represent. +const UInt32 kMaxCodePoint4 = (static_cast(1) << (3 + 3*6)) - 1; + +// Chops off the n lowest bits from a bit pattern. Returns the n +// lowest bits. As a side effect, the original bit pattern will be +// shifted to the right by n bits. +inline UInt32 ChopLowBits(UInt32* bits, int n) { + const UInt32 low_bits = *bits & ((static_cast(1) << n) - 1); + *bits >>= n; + return low_bits; +} + +// Converts a Unicode code point to a narrow string in UTF-8 encoding. +// code_point parameter is of type UInt32 because wchar_t may not be +// wide enough to contain a code point. +// If the code_point is not a valid Unicode code point +// (i.e. outside of Unicode range U+0 to U+10FFFF) it will be converted +// to "(Invalid Unicode 0xXXXXXXXX)". +std::string CodePointToUtf8(UInt32 code_point) { + if (code_point > kMaxCodePoint4) { + return "(Invalid Unicode 0x" + String::FormatHexInt(code_point) + ")"; + } + + char str[5]; // Big enough for the largest valid code point. + if (code_point <= kMaxCodePoint1) { + str[1] = '\0'; + str[0] = static_cast(code_point); // 0xxxxxxx + } else if (code_point <= kMaxCodePoint2) { + str[2] = '\0'; + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xC0 | code_point); // 110xxxxx + } else if (code_point <= kMaxCodePoint3) { + str[3] = '\0'; + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xE0 | code_point); // 1110xxxx + } else { // code_point <= kMaxCodePoint4 + str[4] = '\0'; + str[3] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[2] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[1] = static_cast(0x80 | ChopLowBits(&code_point, 6)); // 10xxxxxx + str[0] = static_cast(0xF0 | code_point); // 11110xxx + } + return str; +} + +// The following two functions only make sense if the the system +// uses UTF-16 for wide string encoding. All supported systems +// with 16 bit wchar_t (Windows, Cygwin, Symbian OS) do use UTF-16. + +// Determines if the arguments constitute UTF-16 surrogate pair +// and thus should be combined into a single Unicode code point +// using CreateCodePointFromUtf16SurrogatePair. +inline bool IsUtf16SurrogatePair(wchar_t first, wchar_t second) { + return sizeof(wchar_t) == 2 && + (first & 0xFC00) == 0xD800 && (second & 0xFC00) == 0xDC00; +} + +// Creates a Unicode code point from UTF16 surrogate pair. +inline UInt32 CreateCodePointFromUtf16SurrogatePair(wchar_t first, + wchar_t second) { + const UInt32 mask = (1 << 10) - 1; + return (sizeof(wchar_t) == 2) ? + (((first & mask) << 10) | (second & mask)) + 0x10000 : + // This function should not be called when the condition is + // false, but we provide a sensible default in case it is. + static_cast(first); +} + +// Converts a wide string to a narrow string in UTF-8 encoding. +// The wide string is assumed to have the following encoding: +// UTF-16 if sizeof(wchar_t) == 2 (on Windows, Cygwin, Symbian OS) +// UTF-32 if sizeof(wchar_t) == 4 (on Linux) +// Parameter str points to a null-terminated wide string. +// Parameter num_chars may additionally limit the number +// of wchar_t characters processed. -1 is used when the entire string +// should be processed. +// If the string contains code points that are not valid Unicode code points +// (i.e. outside of Unicode range U+0 to U+10FFFF) they will be output +// as '(Invalid Unicode 0xXXXXXXXX)'. If the string is in UTF16 encoding +// and contains invalid UTF-16 surrogate pairs, values in those pairs +// will be encoded as individual Unicode characters from Basic Normal Plane. +std::string WideStringToUtf8(const wchar_t* str, int num_chars) { + if (num_chars == -1) + num_chars = static_cast(wcslen(str)); + + ::std::stringstream stream; + for (int i = 0; i < num_chars; ++i) { + UInt32 unicode_code_point; + + if (str[i] == L'\0') { + break; + } else if (i + 1 < num_chars && IsUtf16SurrogatePair(str[i], str[i + 1])) { + unicode_code_point = CreateCodePointFromUtf16SurrogatePair(str[i], + str[i + 1]); + i++; + } else { + unicode_code_point = static_cast(str[i]); + } + + stream << CodePointToUtf8(unicode_code_point); + } + return StringStreamToString(&stream); +} + +// Converts a wide C string to an std::string using the UTF-8 encoding. +// NULL will be converted to "(null)". +std::string String::ShowWideCString(const wchar_t * wide_c_str) { + if (wide_c_str == NULL) return "(null)"; + + return internal::WideStringToUtf8(wide_c_str, -1); +} + +// Compares two wide C strings. Returns true iff they have the same +// content. +// +// Unlike wcscmp(), this function can handle NULL argument(s). A NULL +// C string is considered different to any non-NULL C string, +// including the empty string. +bool String::WideCStringEquals(const wchar_t * lhs, const wchar_t * rhs) { + if (lhs == NULL) return rhs == NULL; + + if (rhs == NULL) return false; + + return wcscmp(lhs, rhs) == 0; +} + +// Helper function for *_STREQ on wide strings. +AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual) { + if (String::WideCStringEquals(expected, actual)) { + return AssertionSuccess(); + } + + return EqFailure(expected_expression, + actual_expression, + PrintToString(expected), + PrintToString(actual), + false); +} + +// Helper function for *_STRNE on wide strings. +AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2) { + if (!String::WideCStringEquals(s1, s2)) { + return AssertionSuccess(); + } + + return AssertionFailure() << "Expected: (" << s1_expression << ") != (" + << s2_expression << "), actual: " + << PrintToString(s1) + << " vs " << PrintToString(s2); +} + +// Compares two C strings, ignoring case. Returns true iff they have +// the same content. +// +// Unlike strcasecmp(), this function can handle NULL argument(s). A +// NULL C string is considered different to any non-NULL C string, +// including the empty string. +bool String::CaseInsensitiveCStringEquals(const char * lhs, const char * rhs) { + if (lhs == NULL) + return rhs == NULL; + if (rhs == NULL) + return false; + return posix::StrCaseCmp(lhs, rhs) == 0; +} + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. +bool String::CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs) { + if (lhs == NULL) return rhs == NULL; + + if (rhs == NULL) return false; + +#if GTEST_OS_WINDOWS + return _wcsicmp(lhs, rhs) == 0; +#elif GTEST_OS_LINUX && !GTEST_OS_LINUX_ANDROID + return wcscasecmp(lhs, rhs) == 0; +#else + // Android, Mac OS X and Cygwin don't define wcscasecmp. + // Other unknown OSes may not define it either. + wint_t left, right; + do { + left = towlower(*lhs++); + right = towlower(*rhs++); + } while (left && left == right); + return left == right; +#endif // OS selector +} + +// Returns true iff str ends with the given suffix, ignoring case. +// Any string is considered to end with an empty suffix. +bool String::EndsWithCaseInsensitive( + const std::string& str, const std::string& suffix) { + const size_t str_len = str.length(); + const size_t suffix_len = suffix.length(); + return (str_len >= suffix_len) && + CaseInsensitiveCStringEquals(str.c_str() + str_len - suffix_len, + suffix.c_str()); +} + +// Formats an int value as "%02d". +std::string String::FormatIntWidth2(int value) { + std::stringstream ss; + ss << std::setfill('0') << std::setw(2) << value; + return ss.str(); +} + +// Formats an int value as "%X". +std::string String::FormatHexInt(int value) { + std::stringstream ss; + ss << std::hex << std::uppercase << value; + return ss.str(); +} + +// Formats a byte as "%02X". +std::string String::FormatByte(unsigned char value) { + std::stringstream ss; + ss << std::setfill('0') << std::setw(2) << std::hex << std::uppercase + << static_cast(value); + return ss.str(); +} + +// Converts the buffer in a stringstream to an std::string, converting NUL +// bytes to "\\0" along the way. +std::string StringStreamToString(::std::stringstream* ss) { + const ::std::string& str = ss->str(); + const char* const start = str.c_str(); + const char* const end = start + str.length(); + + std::string result; + result.reserve(2 * (end - start)); + for (const char* ch = start; ch != end; ++ch) { + if (*ch == '\0') { + result += "\\0"; // Replaces NUL with "\\0"; + } else { + result += *ch; + } + } + + return result; +} + +// Appends the user-supplied message to the Google-Test-generated message. +std::string AppendUserMessage(const std::string& gtest_msg, + const Message& user_msg) { + // Appends the user message if it's non-empty. + const std::string user_msg_string = user_msg.GetString(); + if (user_msg_string.empty()) { + return gtest_msg; + } + + return gtest_msg + "\n" + user_msg_string; +} + +} // namespace internal + +// class TestResult + +// Creates an empty TestResult. +TestResult::TestResult() + : death_test_count_(0), + elapsed_time_(0) { +} + +// D'tor. +TestResult::~TestResult() { +} + +// Returns the i-th test part result among all the results. i can +// range from 0 to total_part_count() - 1. If i is not in that range, +// aborts the program. +const TestPartResult& TestResult::GetTestPartResult(int i) const { + if (i < 0 || i >= total_part_count()) + internal::posix::Abort(); + return test_part_results_.at(i); +} + +// Returns the i-th test property. i can range from 0 to +// test_property_count() - 1. If i is not in that range, aborts the +// program. +const TestProperty& TestResult::GetTestProperty(int i) const { + if (i < 0 || i >= test_property_count()) + internal::posix::Abort(); + return test_properties_.at(i); +} + +// Clears the test part results. +void TestResult::ClearTestPartResults() { + test_part_results_.clear(); +} + +// Adds a test part result to the list. +void TestResult::AddTestPartResult(const TestPartResult& test_part_result) { + test_part_results_.push_back(test_part_result); +} + +// Adds a test property to the list. If a property with the same key as the +// supplied property is already represented, the value of this test_property +// replaces the old value for that key. +void TestResult::RecordProperty(const std::string& xml_element, + const TestProperty& test_property) { + if (!ValidateTestProperty(xml_element, test_property)) { + return; + } + internal::MutexLock lock(&test_properites_mutex_); + const std::vector::iterator property_with_matching_key = + std::find_if(test_properties_.begin(), test_properties_.end(), + internal::TestPropertyKeyIs(test_property.key())); + if (property_with_matching_key == test_properties_.end()) { + test_properties_.push_back(test_property); + return; + } + property_with_matching_key->SetValue(test_property.value()); +} + +// The list of reserved attributes used in the element of XML +// output. +static const char* const kReservedTestSuitesAttributes[] = { + "disabled", + "errors", + "failures", + "name", + "random_seed", + "tests", + "time", + "timestamp" +}; + +// The list of reserved attributes used in the element of XML +// output. +static const char* const kReservedTestSuiteAttributes[] = { + "disabled", + "errors", + "failures", + "name", + "tests", + "time" +}; + +// The list of reserved attributes used in the element of XML output. +static const char* const kReservedTestCaseAttributes[] = { + "classname", + "name", + "status", + "time", + "type_param", + "value_param" +}; + +template +std::vector ArrayAsVector(const char* const (&array)[kSize]) { + return std::vector(array, array + kSize); +} + +static std::vector GetReservedAttributesForElement( + const std::string& xml_element) { + if (xml_element == "testsuites") { + return ArrayAsVector(kReservedTestSuitesAttributes); + } else if (xml_element == "testsuite") { + return ArrayAsVector(kReservedTestSuiteAttributes); + } else if (xml_element == "testcase") { + return ArrayAsVector(kReservedTestCaseAttributes); + } else { + GTEST_CHECK_(false) << "Unrecognized xml_element provided: " << xml_element; + } + // This code is unreachable but some compilers may not realizes that. + return std::vector(); +} + +static std::string FormatWordList(const std::vector& words) { + Message word_list; + for (size_t i = 0; i < words.size(); ++i) { + if (i > 0 && words.size() > 2) { + word_list << ", "; + } + if (i == words.size() - 1) { + word_list << "and "; + } + word_list << "'" << words[i] << "'"; + } + return word_list.GetString(); +} + +bool ValidateTestPropertyName(const std::string& property_name, + const std::vector& reserved_names) { + if (std::find(reserved_names.begin(), reserved_names.end(), property_name) != + reserved_names.end()) { + ADD_FAILURE() << "Reserved key used in RecordProperty(): " << property_name + << " (" << FormatWordList(reserved_names) + << " are reserved by " << GTEST_NAME_ << ")"; + return false; + } + return true; +} + +// Adds a failure if the key is a reserved attribute of the element named +// xml_element. Returns true if the property is valid. +bool TestResult::ValidateTestProperty(const std::string& xml_element, + const TestProperty& test_property) { + return ValidateTestPropertyName(test_property.key(), + GetReservedAttributesForElement(xml_element)); +} + +// Clears the object. +void TestResult::Clear() { + test_part_results_.clear(); + test_properties_.clear(); + death_test_count_ = 0; + elapsed_time_ = 0; +} + +// Returns true iff the test failed. +bool TestResult::Failed() const { + for (int i = 0; i < total_part_count(); ++i) { + if (GetTestPartResult(i).failed()) + return true; + } + return false; +} + +// Returns true iff the test part fatally failed. +static bool TestPartFatallyFailed(const TestPartResult& result) { + return result.fatally_failed(); +} + +// Returns true iff the test fatally failed. +bool TestResult::HasFatalFailure() const { + return CountIf(test_part_results_, TestPartFatallyFailed) > 0; +} + +// Returns true iff the test part non-fatally failed. +static bool TestPartNonfatallyFailed(const TestPartResult& result) { + return result.nonfatally_failed(); +} + +// Returns true iff the test has a non-fatal failure. +bool TestResult::HasNonfatalFailure() const { + return CountIf(test_part_results_, TestPartNonfatallyFailed) > 0; +} + +// Gets the number of all test parts. This is the sum of the number +// of successful test parts and the number of failed test parts. +int TestResult::total_part_count() const { + return static_cast(test_part_results_.size()); +} + +// Returns the number of the test properties. +int TestResult::test_property_count() const { + return static_cast(test_properties_.size()); +} + +// class Test + +// Creates a Test object. + +// The c'tor saves the values of all Google Test flags. +Test::Test() + : gtest_flag_saver_(new internal::GTestFlagSaver) { +} + +// The d'tor restores the values of all Google Test flags. +Test::~Test() { + delete gtest_flag_saver_; +} + +// Sets up the test fixture. +// +// A sub-class may override this. +void Test::SetUp() { +} + +// Tears down the test fixture. +// +// A sub-class may override this. +void Test::TearDown() { +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const std::string& key, const std::string& value) { + UnitTest::GetInstance()->RecordProperty(key, value); +} + +// Allows user supplied key value pairs to be recorded for later output. +void Test::RecordProperty(const std::string& key, int value) { + Message value_message; + value_message << value; + RecordProperty(key, value_message.GetString().c_str()); +} + +namespace internal { + +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const std::string& message) { + // This function is a friend of UnitTest and as such has access to + // AddTestPartResult. + UnitTest::GetInstance()->AddTestPartResult( + result_type, + NULL, // No info about the source file where the exception occurred. + -1, // We have no info on which line caused the exception. + message, + ""); // No stack trace, either. +} + +} // namespace internal + +// Google Test requires all tests in the same test case to use the same test +// fixture class. This function checks if the current test has the +// same fixture class as the first test in the current test case. If +// yes, it returns true; otherwise it generates a Google Test failure and +// returns false. +bool Test::HasSameFixtureClass() { + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + const TestCase* const test_case = impl->current_test_case(); + + // Info about the first test in the current test case. + const TestInfo* const first_test_info = test_case->test_info_list()[0]; + const internal::TypeId first_fixture_id = first_test_info->fixture_class_id_; + const char* const first_test_name = first_test_info->name(); + + // Info about the current test. + const TestInfo* const this_test_info = impl->current_test_info(); + const internal::TypeId this_fixture_id = this_test_info->fixture_class_id_; + const char* const this_test_name = this_test_info->name(); + + if (this_fixture_id != first_fixture_id) { + // Is the first test defined using TEST? + const bool first_is_TEST = first_fixture_id == internal::GetTestTypeId(); + // Is this test defined using TEST? + const bool this_is_TEST = this_fixture_id == internal::GetTestTypeId(); + + if (first_is_TEST || this_is_TEST) { + // Both TEST and TEST_F appear in same test case, which is incorrect. + // Tell the user how to fix this. + + // Gets the name of the TEST and the name of the TEST_F. Note + // that first_is_TEST and this_is_TEST cannot both be true, as + // the fixture IDs are different for the two tests. + const char* const TEST_name = + first_is_TEST ? first_test_name : this_test_name; + const char* const TEST_F_name = + first_is_TEST ? this_test_name : first_test_name; + + ADD_FAILURE() + << "All tests in the same test case must use the same test fixture\n" + << "class, so mixing TEST_F and TEST in the same test case is\n" + << "illegal. In test case " << this_test_info->test_case_name() + << ",\n" + << "test " << TEST_F_name << " is defined using TEST_F but\n" + << "test " << TEST_name << " is defined using TEST. You probably\n" + << "want to change the TEST to TEST_F or move it to another test\n" + << "case."; + } else { + // Two fixture classes with the same name appear in two different + // namespaces, which is not allowed. Tell the user how to fix this. + ADD_FAILURE() + << "All tests in the same test case must use the same test fixture\n" + << "class. However, in test case " + << this_test_info->test_case_name() << ",\n" + << "you defined test " << first_test_name + << " and test " << this_test_name << "\n" + << "using two different test fixture classes. This can happen if\n" + << "the two classes are from different namespaces or translation\n" + << "units and have the same name. You should probably rename one\n" + << "of the classes to put the tests into different test cases."; + } + return false; + } + + return true; +} + +#if GTEST_HAS_SEH + +// Adds an "exception thrown" fatal failure to the current test. This +// function returns its result via an output parameter pointer because VC++ +// prohibits creation of objects with destructors on stack in functions +// using __try (see error C2712). +static std::string* FormatSehExceptionMessage(DWORD exception_code, + const char* location) { + Message message; + message << "SEH exception with code 0x" << std::setbase(16) << + exception_code << std::setbase(10) << " thrown in " << location << "."; + + return new std::string(message.GetString()); +} + +#endif // GTEST_HAS_SEH + +namespace internal { + +#if GTEST_HAS_EXCEPTIONS + +// Adds an "exception thrown" fatal failure to the current test. +static std::string FormatCxxExceptionMessage(const char* description, + const char* location) { + Message message; + if (description != NULL) { + message << "C++ exception with description \"" << description << "\""; + } else { + message << "Unknown C++ exception"; + } + message << " thrown in " << location << "."; + + return message.GetString(); +} + +static std::string PrintTestPartResultToString( + const TestPartResult& test_part_result); + +GoogleTestFailureException::GoogleTestFailureException( + const TestPartResult& failure) + : ::std::runtime_error(PrintTestPartResultToString(failure).c_str()) {} + +#endif // GTEST_HAS_EXCEPTIONS + +// We put these helper functions in the internal namespace as IBM's xlC +// compiler rejects the code if they were declared static. + +// Runs the given method and handles SEH exceptions it throws, when +// SEH is supported; returns the 0-value for type Result in case of an +// SEH exception. (Microsoft compilers cannot handle SEH and C++ +// exceptions in the same function. Therefore, we provide a separate +// wrapper function for handling SEH exceptions.) +template +Result HandleSehExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { +#if GTEST_HAS_SEH + __try { + return (object->*method)(); + } __except (internal::UnitTestOptions::GTestShouldProcessSEH( // NOLINT + GetExceptionCode())) { + // We create the exception message on the heap because VC++ prohibits + // creation of objects with destructors on stack in functions using __try + // (see error C2712). + std::string* exception_message = FormatSehExceptionMessage( + GetExceptionCode(), location); + internal::ReportFailureInUnknownLocation(TestPartResult::kFatalFailure, + *exception_message); + delete exception_message; + return static_cast(0); + } +#else + (void)location; + return (object->*method)(); +#endif // GTEST_HAS_SEH +} + +// Runs the given method and catches and reports C++ and/or SEH-style +// exceptions, if they are supported; returns the 0-value for type +// Result in case of an SEH exception. +template +Result HandleExceptionsInMethodIfSupported( + T* object, Result (T::*method)(), const char* location) { + // NOTE: The user code can affect the way in which Google Test handles + // exceptions by setting GTEST_FLAG(catch_exceptions), but only before + // RUN_ALL_TESTS() starts. It is technically possible to check the flag + // after the exception is caught and either report or re-throw the + // exception based on the flag's value: + // + // try { + // // Perform the test method. + // } catch (...) { + // if (GTEST_FLAG(catch_exceptions)) + // // Report the exception as failure. + // else + // throw; // Re-throws the original exception. + // } + // + // However, the purpose of this flag is to allow the program to drop into + // the debugger when the exception is thrown. On most platforms, once the + // control enters the catch block, the exception origin information is + // lost and the debugger will stop the program at the point of the + // re-throw in this function -- instead of at the point of the original + // throw statement in the code under test. For this reason, we perform + // the check early, sacrificing the ability to affect Google Test's + // exception handling in the method where the exception is thrown. + if (internal::GetUnitTestImpl()->catch_exceptions()) { +#if GTEST_HAS_EXCEPTIONS + try { + return HandleSehExceptionsInMethodIfSupported(object, method, location); + } catch (const internal::GoogleTestFailureException&) { // NOLINT + // This exception type can only be thrown by a failed Google + // Test assertion with the intention of letting another testing + // framework catch it. Therefore we just re-throw it. + throw; + } catch (const std::exception& e) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(e.what(), location)); + } catch (...) { // NOLINT + internal::ReportFailureInUnknownLocation( + TestPartResult::kFatalFailure, + FormatCxxExceptionMessage(NULL, location)); + } + return static_cast(0); +#else + return HandleSehExceptionsInMethodIfSupported(object, method, location); +#endif // GTEST_HAS_EXCEPTIONS + } else { + return (object->*method)(); + } +} + +} // namespace internal + +// Runs the test and updates the test result. +void Test::Run() { + if (!HasSameFixtureClass()) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported(this, &Test::SetUp, "SetUp()"); + // We will run the test only if SetUp() was successful. + if (!HasFatalFailure()) { + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TestBody, "the test body"); + } + + // However, we want to clean up as much as possible. Hence we will + // always call TearDown(), even if SetUp() or the test body has + // failed. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &Test::TearDown, "TearDown()"); +} + +// Returns true iff the current test has a fatal failure. +bool Test::HasFatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()->HasFatalFailure(); +} + +// Returns true iff the current test has a non-fatal failure. +bool Test::HasNonfatalFailure() { + return internal::GetUnitTestImpl()->current_test_result()-> + HasNonfatalFailure(); +} + +// class TestInfo + +// Constructs a TestInfo object. It assumes ownership of the test factory +// object. +TestInfo::TestInfo(const std::string& a_test_case_name, + const std::string& a_name, + const char* a_type_param, + const char* a_value_param, + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory) + : test_case_name_(a_test_case_name), + name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : NULL), + value_param_(a_value_param ? new std::string(a_value_param) : NULL), + fixture_class_id_(fixture_class_id), + should_run_(false), + is_disabled_(false), + matches_filter_(false), + factory_(factory), + result_() {} + +// Destructs a TestInfo object. +TestInfo::~TestInfo() { delete factory_; } + +namespace internal { + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_case_name: name of the test case +// name: name of the test +// type_param: the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param: text representation of the test's value parameter, +// or NULL if this is not a value-parameterized test. +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +TestInfo* MakeAndRegisterTestInfo( + const char* test_case_name, + const char* name, + const char* type_param, + const char* value_param, + TypeId fixture_class_id, + SetUpTestCaseFunc set_up_tc, + TearDownTestCaseFunc tear_down_tc, + TestFactoryBase* factory) { + TestInfo* const test_info = + new TestInfo(test_case_name, name, type_param, value_param, + fixture_class_id, factory); + GetUnitTestImpl()->AddTestInfo(set_up_tc, tear_down_tc, test_info); + return test_info; +} + +#if GTEST_HAS_PARAM_TEST +void ReportInvalidTestCaseType(const char* test_case_name, + const char* file, int line) { + Message errors; + errors + << "Attempted redefinition of test case " << test_case_name << ".\n" + << "All tests in the same test case must use the same test fixture\n" + << "class. However, in test case " << test_case_name << ", you tried\n" + << "to define a test using a fixture class different from the one\n" + << "used earlier. This can happen if the two fixture classes are\n" + << "from different namespaces and have the same name. You should\n" + << "probably rename one of the classes to put the tests into different\n" + << "test cases."; + + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors.GetString().c_str()); +} +#endif // GTEST_HAS_PARAM_TEST + +} // namespace internal + +namespace { + +// A predicate that checks the test name of a TestInfo against a known +// value. +// +// This is used for implementation of the TestCase class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestNameIs is copyable. +class TestNameIs { + public: + // Constructor. + // + // TestNameIs has NO default constructor. + explicit TestNameIs(const char* name) + : name_(name) {} + + // Returns true iff the test name of test_info matches name_. + bool operator()(const TestInfo * test_info) const { + return test_info && test_info->name() == name_; + } + + private: + std::string name_; +}; + +} // namespace + +namespace internal { + +// This method expands all parameterized tests registered with macros TEST_P +// and INSTANTIATE_TEST_CASE_P into regular tests and registers those. +// This will be done just once during the program runtime. +void UnitTestImpl::RegisterParameterizedTests() { +#if GTEST_HAS_PARAM_TEST + if (!parameterized_tests_registered_) { + parameterized_test_registry_.RegisterTests(); + parameterized_tests_registered_ = true; + } +#endif +} + +} // namespace internal + +// Creates the test object, runs it, records its result, and then +// deletes it. +void TestInfo::Run() { + if (!should_run_) return; + + // Tells UnitTest where to store test result. + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_info(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + // Notifies the unit test event listeners that a test is about to start. + repeater->OnTestStart(*this); + + const TimeInMillis start = internal::GetTimeInMillis(); + + impl->os_stack_trace_getter()->UponLeavingGTest(); + + // Creates the test object. + Test* const test = internal::HandleExceptionsInMethodIfSupported( + factory_, &internal::TestFactoryBase::CreateTest, + "the test fixture's constructor"); + + // Runs the test only if the test object was created and its + // constructor didn't generate a fatal failure. + if ((test != NULL) && !Test::HasFatalFailure()) { + // This doesn't throw as all user code that can throw are wrapped into + // exception handling code. + test->Run(); + } + + // Deletes the test object. + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + test, &Test::DeleteSelf_, "the test fixture's destructor"); + + result_.set_elapsed_time(internal::GetTimeInMillis() - start); + + // Notifies the unit test event listener that a test has just finished. + repeater->OnTestEnd(*this); + + // Tells UnitTest to stop associating assertion results to this + // test. + impl->set_current_test_info(NULL); +} + +// class TestCase + +// Gets the number of successful tests in this test case. +int TestCase::successful_test_count() const { + return CountIf(test_info_list_, TestPassed); +} + +// Gets the number of failed tests in this test case. +int TestCase::failed_test_count() const { + return CountIf(test_info_list_, TestFailed); +} + +// Gets the number of disabled tests that will be reported in the XML report. +int TestCase::reportable_disabled_test_count() const { + return CountIf(test_info_list_, TestReportableDisabled); +} + +// Gets the number of disabled tests in this test case. +int TestCase::disabled_test_count() const { + return CountIf(test_info_list_, TestDisabled); +} + +// Gets the number of tests to be printed in the XML report. +int TestCase::reportable_test_count() const { + return CountIf(test_info_list_, TestReportable); +} + +// Get the number of tests in this test case that should run. +int TestCase::test_to_run_count() const { + return CountIf(test_info_list_, ShouldRunTest); +} + +// Gets the number of all tests. +int TestCase::total_test_count() const { + return static_cast(test_info_list_.size()); +} + +// Creates a TestCase with the given name. +// +// Arguments: +// +// name: name of the test case +// a_type_param: the name of the test case's type parameter, or NULL if +// this is not a typed or a type-parameterized test case. +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +TestCase::TestCase(const char* a_name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc) + : name_(a_name), + type_param_(a_type_param ? new std::string(a_type_param) : NULL), + set_up_tc_(set_up_tc), + tear_down_tc_(tear_down_tc), + should_run_(false), + elapsed_time_(0) { +} + +// Destructor of TestCase. +TestCase::~TestCase() { + // Deletes every Test in the collection. + ForEach(test_info_list_, internal::Delete); +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +const TestInfo* TestCase::GetTestInfo(int i) const { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? NULL : test_info_list_[index]; +} + +// Returns the i-th test among all the tests. i can range from 0 to +// total_test_count() - 1. If i is not in that range, returns NULL. +TestInfo* TestCase::GetMutableTestInfo(int i) { + const int index = GetElementOr(test_indices_, i, -1); + return index < 0 ? NULL : test_info_list_[index]; +} + +// Adds a test to this test case. Will delete the test upon +// destruction of the TestCase object. +void TestCase::AddTestInfo(TestInfo * test_info) { + test_info_list_.push_back(test_info); + test_indices_.push_back(static_cast(test_indices_.size())); +} + +// Runs every test in this TestCase. +void TestCase::Run() { + if (!should_run_) return; + + internal::UnitTestImpl* const impl = internal::GetUnitTestImpl(); + impl->set_current_test_case(this); + + TestEventListener* repeater = UnitTest::GetInstance()->listeners().repeater(); + + repeater->OnTestCaseStart(*this); + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestCase::RunSetUpTestCase, "SetUpTestCase()"); + + const internal::TimeInMillis start = internal::GetTimeInMillis(); + for (int i = 0; i < total_test_count(); i++) { + GetMutableTestInfo(i)->Run(); + } + elapsed_time_ = internal::GetTimeInMillis() - start; + + impl->os_stack_trace_getter()->UponLeavingGTest(); + internal::HandleExceptionsInMethodIfSupported( + this, &TestCase::RunTearDownTestCase, "TearDownTestCase()"); + + repeater->OnTestCaseEnd(*this); + impl->set_current_test_case(NULL); +} + +// Clears the results of all tests in this test case. +void TestCase::ClearResult() { + ad_hoc_test_result_.Clear(); + ForEach(test_info_list_, TestInfo::ClearTestResult); +} + +// Shuffles the tests in this test case. +void TestCase::ShuffleTests(internal::Random* random) { + Shuffle(random, &test_indices_); +} + +// Restores the test order to before the first shuffle. +void TestCase::UnshuffleTests() { + for (size_t i = 0; i < test_indices_.size(); i++) { + test_indices_[i] = static_cast(i); + } +} + +// Formats a countable noun. Depending on its quantity, either the +// singular form or the plural form is used. e.g. +// +// FormatCountableNoun(1, "formula", "formuli") returns "1 formula". +// FormatCountableNoun(5, "book", "books") returns "5 books". +static std::string FormatCountableNoun(int count, + const char * singular_form, + const char * plural_form) { + return internal::StreamableToString(count) + " " + + (count == 1 ? singular_form : plural_form); +} + +// Formats the count of tests. +static std::string FormatTestCount(int test_count) { + return FormatCountableNoun(test_count, "test", "tests"); +} + +// Formats the count of test cases. +static std::string FormatTestCaseCount(int test_case_count) { + return FormatCountableNoun(test_case_count, "test case", "test cases"); +} + +// Converts a TestPartResult::Type enum to human-friendly string +// representation. Both kNonFatalFailure and kFatalFailure are translated +// to "Failure", as the user usually doesn't care about the difference +// between the two when viewing the test result. +static const char * TestPartResultTypeToString(TestPartResult::Type type) { + switch (type) { + case TestPartResult::kSuccess: + return "Success"; + + case TestPartResult::kNonFatalFailure: + case TestPartResult::kFatalFailure: +#ifdef _MSC_VER + return "error: "; +#else + return "Failure\n"; +#endif + default: + return "Unknown result type"; + } +} + +namespace internal { + +// Prints a TestPartResult to an std::string. +static std::string PrintTestPartResultToString( + const TestPartResult& test_part_result) { + return (Message() + << internal::FormatFileLocation(test_part_result.file_name(), + test_part_result.line_number()) + << " " << TestPartResultTypeToString(test_part_result.type()) + << test_part_result.message()).GetString(); +} + +// Prints a TestPartResult. +static void PrintTestPartResult(const TestPartResult& test_part_result) { + const std::string& result = + PrintTestPartResultToString(test_part_result); + printf("%s\n", result.c_str()); + fflush(stdout); + // If the test program runs in Visual Studio or a debugger, the + // following statements add the test part result message to the Output + // window such that the user can double-click on it to jump to the + // corresponding source code location; otherwise they do nothing. +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + // We don't call OutputDebugString*() on Windows Mobile, as printing + // to stdout is done by OutputDebugString() there already - we don't + // want the same message printed twice. + ::OutputDebugStringA(result.c_str()); + ::OutputDebugStringA("\n"); +#endif +} + +// class PrettyUnitTestResultPrinter + +enum GTestColor { + COLOR_DEFAULT, + COLOR_RED, + COLOR_GREEN, + COLOR_YELLOW +}; + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \ + !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT + +// Returns the character attribute for the given color. +WORD GetColorAttribute(GTestColor color) { + switch (color) { + case COLOR_RED: return FOREGROUND_RED; + case COLOR_GREEN: return FOREGROUND_GREEN; + case COLOR_YELLOW: return FOREGROUND_RED | FOREGROUND_GREEN; + default: return 0; + } +} + +#else + +// Returns the ANSI color code for the given color. COLOR_DEFAULT is +// an invalid input. +const char* GetAnsiColorCode(GTestColor color) { + switch (color) { + case COLOR_RED: return "1"; + case COLOR_GREEN: return "2"; + case COLOR_YELLOW: return "3"; + default: return NULL; + }; +} + +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + +// Returns true iff Google Test should use colors in the output. +bool ShouldUseColor(bool stdout_is_tty) { + const char* const gtest_color = GTEST_FLAG(color).c_str(); + + if (String::CaseInsensitiveCStringEquals(gtest_color, "auto")) { +#if GTEST_OS_WINDOWS + // On Windows the TERM variable is usually not set, but the + // console there does support colors. + return stdout_is_tty; +#else + // On non-Windows platforms, we rely on the TERM variable. + const char* const term = posix::GetEnv("TERM"); + const bool term_supports_color = + String::CStringEquals(term, "xterm") || + String::CStringEquals(term, "xterm-color") || + String::CStringEquals(term, "xterm-256color") || + String::CStringEquals(term, "screen") || + String::CStringEquals(term, "screen-256color") || + String::CStringEquals(term, "linux") || + String::CStringEquals(term, "cygwin"); + return stdout_is_tty && term_supports_color; +#endif // GTEST_OS_WINDOWS + } + + return String::CaseInsensitiveCStringEquals(gtest_color, "yes") || + String::CaseInsensitiveCStringEquals(gtest_color, "true") || + String::CaseInsensitiveCStringEquals(gtest_color, "t") || + String::CStringEquals(gtest_color, "1"); + // We take "yes", "true", "t", and "1" as meaning "yes". If the + // value is neither one of these nor "auto", we treat it as "no" to + // be conservative. +} + +// Helpers for printing colored strings to stdout. Note that on Windows, we +// cannot simply emit special characters and have the terminal change colors. +// This routine must actually emit the characters rather than return a string +// that would be colored when printed, as can be done on Linux. +void ColoredPrintf(GTestColor color, const char* fmt, ...) { + va_list args; + va_start(args, fmt); + +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS || \ + GTEST_OS_IOS || GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT + const bool use_color = AlwaysFalse(); +#else + static const bool in_color_mode = + ShouldUseColor(posix::IsATTY(posix::FileNo(stdout)) != 0); + const bool use_color = in_color_mode && (color != COLOR_DEFAULT); +#endif // GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || GTEST_OS_ZOS + // The '!= 0' comparison is necessary to satisfy MSVC 7.1. + + if (!use_color) { + vprintf(fmt, args); + va_end(args); + return; + } + +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE && \ + !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT + const HANDLE stdout_handle = GetStdHandle(STD_OUTPUT_HANDLE); + + // Gets the current text color. + CONSOLE_SCREEN_BUFFER_INFO buffer_info; + GetConsoleScreenBufferInfo(stdout_handle, &buffer_info); + const WORD old_color_attrs = buffer_info.wAttributes; + + // We need to flush the stream buffers into the console before each + // SetConsoleTextAttribute call lest it affect the text that is already + // printed but has not yet reached the console. + fflush(stdout); + SetConsoleTextAttribute(stdout_handle, + GetColorAttribute(color) | FOREGROUND_INTENSITY); + vprintf(fmt, args); + + fflush(stdout); + // Restores the text color. + SetConsoleTextAttribute(stdout_handle, old_color_attrs); +#else + printf("\033[0;3%sm", GetAnsiColorCode(color)); + vprintf(fmt, args); + printf("\033[m"); // Resets the terminal to default. +#endif // GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_MOBILE + va_end(args); +} + +// Text printed in Google Test's text output and --gunit_list_tests +// output to label the type parameter and value parameter for a test. +static const char kTypeParamLabel[] = "TypeParam"; +static const char kValueParamLabel[] = "GetParam()"; + +void PrintFullTestCommentIfPresent(const TestInfo& test_info) { + const char* const type_param = test_info.type_param(); + const char* const value_param = test_info.value_param(); + + if (type_param != NULL || value_param != NULL) { + printf(", where "); + if (type_param != NULL) { + printf("%s = %s", kTypeParamLabel, type_param); + if (value_param != NULL) + printf(" and "); + } + if (value_param != NULL) { + printf("%s = %s", kValueParamLabel, value_param); + } + } +} + +// This class implements the TestEventListener interface. +// +// Class PrettyUnitTestResultPrinter is copyable. +class PrettyUnitTestResultPrinter : public TestEventListener { + public: + PrettyUnitTestResultPrinter() {} + static void PrintTestName(const char * test_case, const char * test) { + printf("%s.%s", test_case, test); + } + + // The following methods override what's in the TestEventListener class. + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestCaseStart(const TestCase& test_case); + virtual void OnTestStart(const TestInfo& test_info); + virtual void OnTestPartResult(const TestPartResult& result); + virtual void OnTestEnd(const TestInfo& test_info); + virtual void OnTestCaseEnd(const TestCase& test_case); + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} + + private: + static void PrintFailedTests(const UnitTest& unit_test); +}; + + // Fired before each iteration of tests starts. +void PrettyUnitTestResultPrinter::OnTestIterationStart( + const UnitTest& unit_test, int iteration) { + if (GTEST_FLAG(repeat) != 1) + printf("\nRepeating all tests (iteration %d) . . .\n\n", iteration + 1); + + const char* const filter = GTEST_FLAG(filter).c_str(); + + // Prints the filter if it's not *. This reminds the user that some + // tests may be skipped. + if (!String::CStringEquals(filter, kUniversalFilter)) { + ColoredPrintf(COLOR_YELLOW, + "Note: %s filter = %s\n", GTEST_NAME_, filter); + } + + if (internal::ShouldShard(kTestTotalShards, kTestShardIndex, false)) { + const Int32 shard_index = Int32FromEnvOrDie(kTestShardIndex, -1); + ColoredPrintf(COLOR_YELLOW, + "Note: This is test shard %d of %s.\n", + static_cast(shard_index) + 1, + internal::posix::GetEnv(kTestTotalShards)); + } + + if (GTEST_FLAG(shuffle)) { + ColoredPrintf(COLOR_YELLOW, + "Note: Randomizing tests' orders with a seed of %d .\n", + unit_test.random_seed()); + } + + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("Running %s from %s.\n", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsSetUpStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment set-up.\n"); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestCaseStart(const TestCase& test_case) { + const std::string counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s", counts.c_str(), test_case.name()); + if (test_case.type_param() == NULL) { + printf("\n"); + } else { + printf(", where %s = %s\n", kTypeParamLabel, test_case.type_param()); + } + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestStart(const TestInfo& test_info) { + ColoredPrintf(COLOR_GREEN, "[ RUN ] "); + PrintTestName(test_info.test_case_name(), test_info.name()); + printf("\n"); + fflush(stdout); +} + +// Called after an assertion failure. +void PrettyUnitTestResultPrinter::OnTestPartResult( + const TestPartResult& result) { + // If the test part succeeded, we don't need to do anything. + if (result.type() == TestPartResult::kSuccess) + return; + + // Print failure message from the assertion (e.g. expected this and got that). + PrintTestPartResult(result); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestEnd(const TestInfo& test_info) { + if (test_info.result()->Passed()) { + ColoredPrintf(COLOR_GREEN, "[ OK ] "); + } else { + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + } + PrintTestName(test_info.test_case_name(), test_info.name()); + if (test_info.result()->Failed()) + PrintFullTestCommentIfPresent(test_info); + + if (GTEST_FLAG(print_time)) { + printf(" (%s ms)\n", internal::StreamableToString( + test_info.result()->elapsed_time()).c_str()); + } else { + printf("\n"); + } + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnTestCaseEnd(const TestCase& test_case) { + if (!GTEST_FLAG(print_time)) return; + + const std::string counts = + FormatCountableNoun(test_case.test_to_run_count(), "test", "tests"); + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("%s from %s (%s ms total)\n\n", + counts.c_str(), test_case.name(), + internal::StreamableToString(test_case.elapsed_time()).c_str()); + fflush(stdout); +} + +void PrettyUnitTestResultPrinter::OnEnvironmentsTearDownStart( + const UnitTest& /*unit_test*/) { + ColoredPrintf(COLOR_GREEN, "[----------] "); + printf("Global test environment tear-down\n"); + fflush(stdout); +} + +// Internal helper for printing the list of failed tests. +void PrettyUnitTestResultPrinter::PrintFailedTests(const UnitTest& unit_test) { + const int failed_test_count = unit_test.failed_test_count(); + if (failed_test_count == 0) { + return; + } + + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + const TestCase& test_case = *unit_test.GetTestCase(i); + if (!test_case.should_run() || (test_case.failed_test_count() == 0)) { + continue; + } + for (int j = 0; j < test_case.total_test_count(); ++j) { + const TestInfo& test_info = *test_case.GetTestInfo(j); + if (!test_info.should_run() || test_info.result()->Passed()) { + continue; + } + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s.%s", test_case.name(), test_info.name()); + PrintFullTestCommentIfPresent(test_info); + printf("\n"); + } + } +} + +void PrettyUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + ColoredPrintf(COLOR_GREEN, "[==========] "); + printf("%s from %s ran.", + FormatTestCount(unit_test.test_to_run_count()).c_str(), + FormatTestCaseCount(unit_test.test_case_to_run_count()).c_str()); + if (GTEST_FLAG(print_time)) { + printf(" (%s ms total)", + internal::StreamableToString(unit_test.elapsed_time()).c_str()); + } + printf("\n"); + ColoredPrintf(COLOR_GREEN, "[ PASSED ] "); + printf("%s.\n", FormatTestCount(unit_test.successful_test_count()).c_str()); + + int num_failures = unit_test.failed_test_count(); + if (!unit_test.Passed()) { + const int failed_test_count = unit_test.failed_test_count(); + ColoredPrintf(COLOR_RED, "[ FAILED ] "); + printf("%s, listed below:\n", FormatTestCount(failed_test_count).c_str()); + PrintFailedTests(unit_test); + printf("\n%2d FAILED %s\n", num_failures, + num_failures == 1 ? "TEST" : "TESTS"); + } + + int num_disabled = unit_test.reportable_disabled_test_count(); + if (num_disabled && !GTEST_FLAG(also_run_disabled_tests)) { + if (!num_failures) { + printf("\n"); // Add a spacer if no FAILURE banner is displayed. + } + ColoredPrintf(COLOR_YELLOW, + " YOU HAVE %d DISABLED %s\n\n", + num_disabled, + num_disabled == 1 ? "TEST" : "TESTS"); + } + // Ensure that Google Test output is printed before, e.g., heapchecker output. + fflush(stdout); +} + +// End PrettyUnitTestResultPrinter + +// class TestEventRepeater +// +// This class forwards events to other event listeners. +class TestEventRepeater : public TestEventListener { + public: + TestEventRepeater() : forwarding_enabled_(true) {} + virtual ~TestEventRepeater(); + void Append(TestEventListener *listener); + TestEventListener* Release(TestEventListener* listener); + + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled() const { return forwarding_enabled_; } + void set_forwarding_enabled(bool enable) { forwarding_enabled_ = enable; } + + virtual void OnTestProgramStart(const UnitTest& unit_test); + virtual void OnTestIterationStart(const UnitTest& unit_test, int iteration); + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test); + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test); + virtual void OnTestCaseStart(const TestCase& test_case); + virtual void OnTestStart(const TestInfo& test_info); + virtual void OnTestPartResult(const TestPartResult& result); + virtual void OnTestEnd(const TestInfo& test_info); + virtual void OnTestCaseEnd(const TestCase& test_case); + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test); + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test); + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + virtual void OnTestProgramEnd(const UnitTest& unit_test); + + private: + // Controls whether events will be forwarded to listeners_. Set to false + // in death test child processes. + bool forwarding_enabled_; + // The list of listeners that receive events. + std::vector listeners_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventRepeater); +}; + +TestEventRepeater::~TestEventRepeater() { + ForEach(listeners_, Delete); +} + +void TestEventRepeater::Append(TestEventListener *listener) { + listeners_.push_back(listener); +} + +// TODO(vladl@google.com): Factor the search functionality into Vector::Find. +TestEventListener* TestEventRepeater::Release(TestEventListener *listener) { + for (size_t i = 0; i < listeners_.size(); ++i) { + if (listeners_[i] == listener) { + listeners_.erase(listeners_.begin() + i); + return listener; + } + } + + return NULL; +} + +// Since most methods are very similar, use macros to reduce boilerplate. +// This defines a member that forwards the call to all listeners. +#define GTEST_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (size_t i = 0; i < listeners_.size(); i++) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} +// This defines a member that forwards the call to all listeners in reverse +// order. +#define GTEST_REVERSE_REPEATER_METHOD_(Name, Type) \ +void TestEventRepeater::Name(const Type& parameter) { \ + if (forwarding_enabled_) { \ + for (int i = static_cast(listeners_.size()) - 1; i >= 0; i--) { \ + listeners_[i]->Name(parameter); \ + } \ + } \ +} + +GTEST_REPEATER_METHOD_(OnTestProgramStart, UnitTest) +GTEST_REPEATER_METHOD_(OnEnvironmentsSetUpStart, UnitTest) +GTEST_REPEATER_METHOD_(OnTestCaseStart, TestCase) +GTEST_REPEATER_METHOD_(OnTestStart, TestInfo) +GTEST_REPEATER_METHOD_(OnTestPartResult, TestPartResult) +GTEST_REPEATER_METHOD_(OnEnvironmentsTearDownStart, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsSetUpEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnEnvironmentsTearDownEnd, UnitTest) +GTEST_REVERSE_REPEATER_METHOD_(OnTestEnd, TestInfo) +GTEST_REVERSE_REPEATER_METHOD_(OnTestCaseEnd, TestCase) +GTEST_REVERSE_REPEATER_METHOD_(OnTestProgramEnd, UnitTest) + +#undef GTEST_REPEATER_METHOD_ +#undef GTEST_REVERSE_REPEATER_METHOD_ + +void TestEventRepeater::OnTestIterationStart(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (size_t i = 0; i < listeners_.size(); i++) { + listeners_[i]->OnTestIterationStart(unit_test, iteration); + } + } +} + +void TestEventRepeater::OnTestIterationEnd(const UnitTest& unit_test, + int iteration) { + if (forwarding_enabled_) { + for (int i = static_cast(listeners_.size()) - 1; i >= 0; i--) { + listeners_[i]->OnTestIterationEnd(unit_test, iteration); + } + } +} + +// End TestEventRepeater + +// This class generates an XML output file. +class XmlUnitTestResultPrinter : public EmptyTestEventListener { + public: + explicit XmlUnitTestResultPrinter(const char* output_file); + + virtual void OnTestIterationEnd(const UnitTest& unit_test, int iteration); + + private: + // Is c a whitespace character that is normalized to a space character + // when it appears in an XML attribute value? + static bool IsNormalizableWhitespace(char c) { + return c == 0x9 || c == 0xA || c == 0xD; + } + + // May c appear in a well-formed XML document? + static bool IsValidXmlCharacter(char c) { + return IsNormalizableWhitespace(c) || c >= 0x20; + } + + // Returns an XML-escaped copy of the input string str. If + // is_attribute is true, the text is meant to appear as an attribute + // value, and normalizable whitespace is preserved by replacing it + // with character references. + static std::string EscapeXml(const std::string& str, bool is_attribute); + + // Returns the given string with all characters invalid in XML removed. + static std::string RemoveInvalidXmlCharacters(const std::string& str); + + // Convenience wrapper around EscapeXml when str is an attribute value. + static std::string EscapeXmlAttribute(const std::string& str) { + return EscapeXml(str, true); + } + + // Convenience wrapper around EscapeXml when str is not an attribute value. + static std::string EscapeXmlText(const char* str) { + return EscapeXml(str, false); + } + + // Verifies that the given attribute belongs to the given element and + // streams the attribute as XML. + static void OutputXmlAttribute(std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value); + + // Streams an XML CDATA section, escaping invalid CDATA sequences as needed. + static void OutputXmlCDataSection(::std::ostream* stream, const char* data); + + // Streams an XML representation of a TestInfo object. + static void OutputXmlTestInfo(::std::ostream* stream, + const char* test_case_name, + const TestInfo& test_info); + + // Prints an XML representation of a TestCase object + static void PrintXmlTestCase(::std::ostream* stream, + const TestCase& test_case); + + // Prints an XML summary of unit_test to output stream out. + static void PrintXmlUnitTest(::std::ostream* stream, + const UnitTest& unit_test); + + // Produces a string representing the test properties in a result as space + // delimited XML attributes based on the property key="value" pairs. + // When the std::string is not empty, it includes a space at the beginning, + // to delimit this attribute from prior attributes. + static std::string TestPropertiesAsXmlAttributes(const TestResult& result); + + // The output file. + const std::string output_file_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(XmlUnitTestResultPrinter); +}; + +// Creates a new XmlUnitTestResultPrinter. +XmlUnitTestResultPrinter::XmlUnitTestResultPrinter(const char* output_file) + : output_file_(output_file) { + if (output_file_.c_str() == NULL || output_file_.empty()) { + fprintf(stderr, "XML output file may not be null\n"); + fflush(stderr); + exit(EXIT_FAILURE); + } +} + +// Called after the unit test ends. +void XmlUnitTestResultPrinter::OnTestIterationEnd(const UnitTest& unit_test, + int /*iteration*/) { + FILE* xmlout = NULL; + FilePath output_file(output_file_); + FilePath output_dir(output_file.RemoveFileName()); + + if (output_dir.CreateDirectoriesRecursively()) { + xmlout = posix::FOpen(output_file_.c_str(), "w"); + } + if (xmlout == NULL) { + // TODO(wan): report the reason of the failure. + // + // We don't do it for now as: + // + // 1. There is no urgent need for it. + // 2. It's a bit involved to make the errno variable thread-safe on + // all three operating systems (Linux, Windows, and Mac OS). + // 3. To interpret the meaning of errno in a thread-safe way, + // we need the strerror_r() function, which is not available on + // Windows. + fprintf(stderr, + "Unable to open file \"%s\"\n", + output_file_.c_str()); + fflush(stderr); + exit(EXIT_FAILURE); + } + std::stringstream stream; + PrintXmlUnitTest(&stream, unit_test); + fprintf(xmlout, "%s", StringStreamToString(&stream).c_str()); + fclose(xmlout); +} + +// Returns an XML-escaped copy of the input string str. If is_attribute +// is true, the text is meant to appear as an attribute value, and +// normalizable whitespace is preserved by replacing it with character +// references. +// +// Invalid XML characters in str, if any, are stripped from the output. +// It is expected that most, if not all, of the text processed by this +// module will consist of ordinary English text. +// If this module is ever modified to produce version 1.1 XML output, +// most invalid characters can be retained using character references. +// TODO(wan): It might be nice to have a minimally invasive, human-readable +// escaping scheme for invalid characters, rather than dropping them. +std::string XmlUnitTestResultPrinter::EscapeXml( + const std::string& str, bool is_attribute) { + Message m; + + for (size_t i = 0; i < str.size(); ++i) { + const char ch = str[i]; + switch (ch) { + case '<': + m << "<"; + break; + case '>': + m << ">"; + break; + case '&': + m << "&"; + break; + case '\'': + if (is_attribute) + m << "'"; + else + m << '\''; + break; + case '"': + if (is_attribute) + m << """; + else + m << '"'; + break; + default: + if (IsValidXmlCharacter(ch)) { + if (is_attribute && IsNormalizableWhitespace(ch)) + m << "&#x" << String::FormatByte(static_cast(ch)) + << ";"; + else + m << ch; + } + break; + } + } + + return m.GetString(); +} + +// Returns the given string with all characters invalid in XML removed. +// Currently invalid characters are dropped from the string. An +// alternative is to replace them with certain characters such as . or ?. +std::string XmlUnitTestResultPrinter::RemoveInvalidXmlCharacters( + const std::string& str) { + std::string output; + output.reserve(str.size()); + for (std::string::const_iterator it = str.begin(); it != str.end(); ++it) + if (IsValidXmlCharacter(*it)) + output.push_back(*it); + + return output; +} + +// The following routines generate an XML representation of a UnitTest +// object. +// +// This is how Google Test concepts map to the DTD: +// +// <-- corresponds to a UnitTest object +// <-- corresponds to a TestCase object +// <-- corresponds to a TestInfo object +// ... +// ... +// ... +// <-- individual assertion failures +// +// +// + +// Formats the given time in milliseconds as seconds. +std::string FormatTimeInMillisAsSeconds(TimeInMillis ms) { + ::std::stringstream ss; + ss << ms/1000.0; + return ss.str(); +} + +static bool PortableLocaltime(time_t seconds, struct tm* out) { +#if defined(_MSC_VER) + return localtime_s(out, &seconds) == 0; +#elif defined(__MINGW32__) || defined(__MINGW64__) + // MINGW provides neither localtime_r nor localtime_s, but uses + // Windows' localtime(), which has a thread-local tm buffer. + struct tm* tm_ptr = localtime(&seconds); // NOLINT + if (tm_ptr == NULL) + return false; + *out = *tm_ptr; + return true; +#else + return localtime_r(&seconds, out) != NULL; +#endif +} + +// Converts the given epoch time in milliseconds to a date string in the ISO +// 8601 format, without the timezone information. +std::string FormatEpochTimeInMillisAsIso8601(TimeInMillis ms) { + struct tm time_struct; + if (!PortableLocaltime(static_cast(ms / 1000), &time_struct)) + return ""; + // YYYY-MM-DDThh:mm:ss + return StreamableToString(time_struct.tm_year + 1900) + "-" + + String::FormatIntWidth2(time_struct.tm_mon + 1) + "-" + + String::FormatIntWidth2(time_struct.tm_mday) + "T" + + String::FormatIntWidth2(time_struct.tm_hour) + ":" + + String::FormatIntWidth2(time_struct.tm_min) + ":" + + String::FormatIntWidth2(time_struct.tm_sec); +} + +// Streams an XML CDATA section, escaping invalid CDATA sequences as needed. +void XmlUnitTestResultPrinter::OutputXmlCDataSection(::std::ostream* stream, + const char* data) { + const char* segment = data; + *stream << ""); + if (next_segment != NULL) { + stream->write( + segment, static_cast(next_segment - segment)); + *stream << "]]>]]>"); + } else { + *stream << segment; + break; + } + } + *stream << "]]>"; +} + +void XmlUnitTestResultPrinter::OutputXmlAttribute( + std::ostream* stream, + const std::string& element_name, + const std::string& name, + const std::string& value) { + const std::vector& allowed_names = + GetReservedAttributesForElement(element_name); + + GTEST_CHECK_(std::find(allowed_names.begin(), allowed_names.end(), name) != + allowed_names.end()) + << "Attribute " << name << " is not allowed for element <" << element_name + << ">."; + + *stream << " " << name << "=\"" << EscapeXmlAttribute(value) << "\""; +} + +// Prints an XML representation of a TestInfo object. +// TODO(wan): There is also value in printing properties with the plain printer. +void XmlUnitTestResultPrinter::OutputXmlTestInfo(::std::ostream* stream, + const char* test_case_name, + const TestInfo& test_info) { + const TestResult& result = *test_info.result(); + const std::string kTestcase = "testcase"; + + *stream << " \n"; + } + const string location = internal::FormatCompilerIndependentFileLocation( + part.file_name(), part.line_number()); + const string summary = location + "\n" + part.summary(); + *stream << " "; + const string detail = location + "\n" + part.message(); + OutputXmlCDataSection(stream, RemoveInvalidXmlCharacters(detail).c_str()); + *stream << "\n"; + } + } + + if (failures == 0) + *stream << " />\n"; + else + *stream << " \n"; +} + +// Prints an XML representation of a TestCase object +void XmlUnitTestResultPrinter::PrintXmlTestCase(std::ostream* stream, + const TestCase& test_case) { + const std::string kTestsuite = "testsuite"; + *stream << " <" << kTestsuite; + OutputXmlAttribute(stream, kTestsuite, "name", test_case.name()); + OutputXmlAttribute(stream, kTestsuite, "tests", + StreamableToString(test_case.reportable_test_count())); + OutputXmlAttribute(stream, kTestsuite, "failures", + StreamableToString(test_case.failed_test_count())); + OutputXmlAttribute( + stream, kTestsuite, "disabled", + StreamableToString(test_case.reportable_disabled_test_count())); + OutputXmlAttribute(stream, kTestsuite, "errors", "0"); + OutputXmlAttribute(stream, kTestsuite, "time", + FormatTimeInMillisAsSeconds(test_case.elapsed_time())); + *stream << TestPropertiesAsXmlAttributes(test_case.ad_hoc_test_result()) + << ">\n"; + + for (int i = 0; i < test_case.total_test_count(); ++i) { + if (test_case.GetTestInfo(i)->is_reportable()) + OutputXmlTestInfo(stream, test_case.name(), *test_case.GetTestInfo(i)); + } + *stream << " \n"; +} + +// Prints an XML summary of unit_test to output stream out. +void XmlUnitTestResultPrinter::PrintXmlUnitTest(std::ostream* stream, + const UnitTest& unit_test) { + const std::string kTestsuites = "testsuites"; + + *stream << "\n"; + *stream << "<" << kTestsuites; + + OutputXmlAttribute(stream, kTestsuites, "tests", + StreamableToString(unit_test.reportable_test_count())); + OutputXmlAttribute(stream, kTestsuites, "failures", + StreamableToString(unit_test.failed_test_count())); + OutputXmlAttribute( + stream, kTestsuites, "disabled", + StreamableToString(unit_test.reportable_disabled_test_count())); + OutputXmlAttribute(stream, kTestsuites, "errors", "0"); + OutputXmlAttribute( + stream, kTestsuites, "timestamp", + FormatEpochTimeInMillisAsIso8601(unit_test.start_timestamp())); + OutputXmlAttribute(stream, kTestsuites, "time", + FormatTimeInMillisAsSeconds(unit_test.elapsed_time())); + + if (GTEST_FLAG(shuffle)) { + OutputXmlAttribute(stream, kTestsuites, "random_seed", + StreamableToString(unit_test.random_seed())); + } + + *stream << TestPropertiesAsXmlAttributes(unit_test.ad_hoc_test_result()); + + OutputXmlAttribute(stream, kTestsuites, "name", "AllTests"); + *stream << ">\n"; + + for (int i = 0; i < unit_test.total_test_case_count(); ++i) { + if (unit_test.GetTestCase(i)->reportable_test_count() > 0) + PrintXmlTestCase(stream, *unit_test.GetTestCase(i)); + } + *stream << "\n"; +} + +// Produces a string representing the test properties in a result as space +// delimited XML attributes based on the property key="value" pairs. +std::string XmlUnitTestResultPrinter::TestPropertiesAsXmlAttributes( + const TestResult& result) { + Message attributes; + for (int i = 0; i < result.test_property_count(); ++i) { + const TestProperty& property = result.GetTestProperty(i); + attributes << " " << property.key() << "=" + << "\"" << EscapeXmlAttribute(property.value()) << "\""; + } + return attributes.GetString(); +} + +// End XmlUnitTestResultPrinter + +#if GTEST_CAN_STREAM_RESULTS_ + +// Checks if str contains '=', '&', '%' or '\n' characters. If yes, +// replaces them by "%xx" where xx is their hexadecimal value. For +// example, replaces "=" with "%3D". This algorithm is O(strlen(str)) +// in both time and space -- important as the input str may contain an +// arbitrarily long test failure message and stack trace. +string StreamingListener::UrlEncode(const char* str) { + string result; + result.reserve(strlen(str) + 1); + for (char ch = *str; ch != '\0'; ch = *++str) { + switch (ch) { + case '%': + case '=': + case '&': + case '\n': + result.append("%" + String::FormatByte(static_cast(ch))); + break; + default: + result.push_back(ch); + break; + } + } + return result; +} + +void StreamingListener::SocketWriter::MakeConnection() { + GTEST_CHECK_(sockfd_ == -1) + << "MakeConnection() can't be called when there is already a connection."; + + addrinfo hints; + memset(&hints, 0, sizeof(hints)); + hints.ai_family = AF_UNSPEC; // To allow both IPv4 and IPv6 addresses. + hints.ai_socktype = SOCK_STREAM; + addrinfo* servinfo = NULL; + + // Use the getaddrinfo() to get a linked list of IP addresses for + // the given host name. + const int error_num = getaddrinfo( + host_name_.c_str(), port_num_.c_str(), &hints, &servinfo); + if (error_num != 0) { + GTEST_LOG_(WARNING) << "stream_result_to: getaddrinfo() failed: " + << gai_strerror(error_num); + } + + // Loop through all the results and connect to the first we can. + for (addrinfo* cur_addr = servinfo; sockfd_ == -1 && cur_addr != NULL; + cur_addr = cur_addr->ai_next) { + sockfd_ = socket( + cur_addr->ai_family, cur_addr->ai_socktype, cur_addr->ai_protocol); + if (sockfd_ != -1) { + // Connect the client socket to the server socket. + if (connect(sockfd_, cur_addr->ai_addr, cur_addr->ai_addrlen) == -1) { + close(sockfd_); + sockfd_ = -1; + } + } + } + + freeaddrinfo(servinfo); // all done with this structure + + if (sockfd_ == -1) { + GTEST_LOG_(WARNING) << "stream_result_to: failed to connect to " + << host_name_ << ":" << port_num_; + } +} + +// End of class Streaming Listener +#endif // GTEST_CAN_STREAM_RESULTS__ + +// Class ScopedTrace + +// Pushes the given source file location and message onto a per-thread +// trace stack maintained by Google Test. +ScopedTrace::ScopedTrace(const char* file, int line, const Message& message) + GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { + TraceInfo trace; + trace.file = file; + trace.line = line; + trace.message = message.GetString(); + + UnitTest::GetInstance()->PushGTestTrace(trace); +} + +// Pops the info pushed by the c'tor. +ScopedTrace::~ScopedTrace() + GTEST_LOCK_EXCLUDED_(&UnitTest::mutex_) { + UnitTest::GetInstance()->PopGTestTrace(); +} + + +// class OsStackTraceGetter + +// Returns the current OS stack trace as an std::string. Parameters: +// +// max_depth - the maximum number of stack frames to be included +// in the trace. +// skip_count - the number of top frames to be skipped; doesn't count +// against max_depth. +// +string OsStackTraceGetter::CurrentStackTrace(int /* max_depth */, + int /* skip_count */) + GTEST_LOCK_EXCLUDED_(mutex_) { + return ""; +} + +void OsStackTraceGetter::UponLeavingGTest() + GTEST_LOCK_EXCLUDED_(mutex_) { +} + +const char* const +OsStackTraceGetter::kElidedFramesMarker = + "... " GTEST_NAME_ " internal frames ..."; + +// A helper class that creates the premature-exit file in its +// constructor and deletes the file in its destructor. +class ScopedPrematureExitFile { + public: + explicit ScopedPrematureExitFile(const char* premature_exit_filepath) + : premature_exit_filepath_(premature_exit_filepath) { + // If a path to the premature-exit file is specified... + if (premature_exit_filepath != NULL && *premature_exit_filepath != '\0') { + // create the file with a single "0" character in it. I/O + // errors are ignored as there's nothing better we can do and we + // don't want to fail the test because of this. + FILE* pfile = posix::FOpen(premature_exit_filepath, "w"); + fwrite("0", 1, 1, pfile); + fclose(pfile); + } + } + + ~ScopedPrematureExitFile() { + if (premature_exit_filepath_ != NULL && *premature_exit_filepath_ != '\0') { + remove(premature_exit_filepath_); + } + } + + private: + const char* const premature_exit_filepath_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedPrematureExitFile); +}; + +} // namespace internal + +// class TestEventListeners + +TestEventListeners::TestEventListeners() + : repeater_(new internal::TestEventRepeater()), + default_result_printer_(NULL), + default_xml_generator_(NULL) { +} + +TestEventListeners::~TestEventListeners() { delete repeater_; } + +// Returns the standard listener responsible for the default console +// output. Can be removed from the listeners list to shut down default +// console output. Note that removing this object from the listener list +// with Release transfers its ownership to the user. +void TestEventListeners::Append(TestEventListener* listener) { + repeater_->Append(listener); +} + +// Removes the given event listener from the list and returns it. It then +// becomes the caller's responsibility to delete the listener. Returns +// NULL if the listener is not found in the list. +TestEventListener* TestEventListeners::Release(TestEventListener* listener) { + if (listener == default_result_printer_) + default_result_printer_ = NULL; + else if (listener == default_xml_generator_) + default_xml_generator_ = NULL; + return repeater_->Release(listener); +} + +// Returns repeater that broadcasts the TestEventListener events to all +// subscribers. +TestEventListener* TestEventListeners::repeater() { return repeater_; } + +// Sets the default_result_printer attribute to the provided listener. +// The listener is also added to the listener list and previous +// default_result_printer is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultResultPrinter(TestEventListener* listener) { + if (default_result_printer_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_result_printer_); + default_result_printer_ = listener; + if (listener != NULL) + Append(listener); + } +} + +// Sets the default_xml_generator attribute to the provided listener. The +// listener is also added to the listener list and previous +// default_xml_generator is removed from it and deleted. The listener can +// also be NULL in which case it will not be added to the list. Does +// nothing if the previous and the current listener objects are the same. +void TestEventListeners::SetDefaultXmlGenerator(TestEventListener* listener) { + if (default_xml_generator_ != listener) { + // It is an error to pass this method a listener that is already in the + // list. + delete Release(default_xml_generator_); + default_xml_generator_ = listener; + if (listener != NULL) + Append(listener); + } +} + +// Controls whether events will be forwarded by the repeater to the +// listeners in the list. +bool TestEventListeners::EventForwardingEnabled() const { + return repeater_->forwarding_enabled(); +} + +void TestEventListeners::SuppressEventForwarding() { + repeater_->set_forwarding_enabled(false); +} + +// class UnitTest + +// Gets the singleton UnitTest object. The first time this method is +// called, a UnitTest object is constructed and returned. Consecutive +// calls will return the same object. +// +// We don't protect this under mutex_ as a user is not supposed to +// call this before main() starts, from which point on the return +// value will never change. +UnitTest* UnitTest::GetInstance() { + // When compiled with MSVC 7.1 in optimized mode, destroying the + // UnitTest object upon exiting the program messes up the exit code, + // causing successful tests to appear failed. We have to use a + // different implementation in this case to bypass the compiler bug. + // This implementation makes the compiler happy, at the cost of + // leaking the UnitTest object. + + // CodeGear C++Builder insists on a public destructor for the + // default implementation. Use this implementation to keep good OO + // design with private destructor. + +#if (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__) + static UnitTest* const instance = new UnitTest; + return instance; +#else + static UnitTest instance; + return &instance; +#endif // (_MSC_VER == 1310 && !defined(_DEBUG)) || defined(__BORLANDC__) +} + +// Gets the number of successful test cases. +int UnitTest::successful_test_case_count() const { + return impl()->successful_test_case_count(); +} + +// Gets the number of failed test cases. +int UnitTest::failed_test_case_count() const { + return impl()->failed_test_case_count(); +} + +// Gets the number of all test cases. +int UnitTest::total_test_case_count() const { + return impl()->total_test_case_count(); +} + +// Gets the number of all test cases that contain at least one test +// that should run. +int UnitTest::test_case_to_run_count() const { + return impl()->test_case_to_run_count(); +} + +// Gets the number of successful tests. +int UnitTest::successful_test_count() const { + return impl()->successful_test_count(); +} + +// Gets the number of failed tests. +int UnitTest::failed_test_count() const { return impl()->failed_test_count(); } + +// Gets the number of disabled tests that will be reported in the XML report. +int UnitTest::reportable_disabled_test_count() const { + return impl()->reportable_disabled_test_count(); +} + +// Gets the number of disabled tests. +int UnitTest::disabled_test_count() const { + return impl()->disabled_test_count(); +} + +// Gets the number of tests to be printed in the XML report. +int UnitTest::reportable_test_count() const { + return impl()->reportable_test_count(); +} + +// Gets the number of all tests. +int UnitTest::total_test_count() const { return impl()->total_test_count(); } + +// Gets the number of tests that should run. +int UnitTest::test_to_run_count() const { return impl()->test_to_run_count(); } + +// Gets the time of the test program start, in ms from the start of the +// UNIX epoch. +internal::TimeInMillis UnitTest::start_timestamp() const { + return impl()->start_timestamp(); +} + +// Gets the elapsed time, in milliseconds. +internal::TimeInMillis UnitTest::elapsed_time() const { + return impl()->elapsed_time(); +} + +// Returns true iff the unit test passed (i.e. all test cases passed). +bool UnitTest::Passed() const { return impl()->Passed(); } + +// Returns true iff the unit test failed (i.e. some test case failed +// or something outside of all tests failed). +bool UnitTest::Failed() const { return impl()->Failed(); } + +// Gets the i-th test case among all the test cases. i can range from 0 to +// total_test_case_count() - 1. If i is not in that range, returns NULL. +const TestCase* UnitTest::GetTestCase(int i) const { + return impl()->GetTestCase(i); +} + +// Returns the TestResult containing information on test failures and +// properties logged outside of individual test cases. +const TestResult& UnitTest::ad_hoc_test_result() const { + return *impl()->ad_hoc_test_result(); +} + +// Gets the i-th test case among all the test cases. i can range from 0 to +// total_test_case_count() - 1. If i is not in that range, returns NULL. +TestCase* UnitTest::GetMutableTestCase(int i) { + return impl()->GetMutableTestCase(i); +} + +// Returns the list of event listeners that can be used to track events +// inside Google Test. +TestEventListeners& UnitTest::listeners() { + return *impl()->listeners(); +} + +// Registers and returns a global test environment. When a test +// program is run, all global test environments will be set-up in the +// order they were registered. After all tests in the program have +// finished, all global test environments will be torn-down in the +// *reverse* order they were registered. +// +// The UnitTest object takes ownership of the given environment. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +Environment* UnitTest::AddEnvironment(Environment* env) { + if (env == NULL) { + return NULL; + } + + impl_->environments().push_back(env); + return env; +} + +// Adds a TestPartResult to the current TestResult object. All Google Test +// assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) eventually call +// this to report their results. The user code should use the +// assertion macros instead of calling this directly. +void UnitTest::AddTestPartResult( + TestPartResult::Type result_type, + const char* file_name, + int line_number, + const std::string& message, + const std::string& os_stack_trace) GTEST_LOCK_EXCLUDED_(mutex_) { + Message msg; + msg << message; + + internal::MutexLock lock(&mutex_); + if (impl_->gtest_trace_stack().size() > 0) { + msg << "\n" << GTEST_NAME_ << " trace:"; + + for (int i = static_cast(impl_->gtest_trace_stack().size()); + i > 0; --i) { + const internal::TraceInfo& trace = impl_->gtest_trace_stack()[i - 1]; + msg << "\n" << internal::FormatFileLocation(trace.file, trace.line) + << " " << trace.message; + } + } + + if (os_stack_trace.c_str() != NULL && !os_stack_trace.empty()) { + msg << internal::kStackTraceMarker << os_stack_trace; + } + + const TestPartResult result = + TestPartResult(result_type, file_name, line_number, + msg.GetString().c_str()); + impl_->GetTestPartResultReporterForCurrentThread()-> + ReportTestPartResult(result); + + if (result_type != TestPartResult::kSuccess) { + // gtest_break_on_failure takes precedence over + // gtest_throw_on_failure. This allows a user to set the latter + // in the code (perhaps in order to use Google Test assertions + // with another testing framework) and specify the former on the + // command line for debugging. + if (GTEST_FLAG(break_on_failure)) { +#if GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT + // Using DebugBreak on Windows allows gtest to still break into a debugger + // when a failure happens and both the --gtest_break_on_failure and + // the --gtest_catch_exceptions flags are specified. + DebugBreak(); +#else + // Dereference NULL through a volatile pointer to prevent the compiler + // from removing. We use this rather than abort() or __builtin_trap() for + // portability: Symbian doesn't implement abort() well, and some debuggers + // don't correctly trap abort(). + *static_cast(NULL) = 1; +#endif // GTEST_OS_WINDOWS + } else if (GTEST_FLAG(throw_on_failure)) { +#if GTEST_HAS_EXCEPTIONS + throw internal::GoogleTestFailureException(result); +#else + // We cannot call abort() as it generates a pop-up in debug mode + // that cannot be suppressed in VC 7.1 or below. + exit(1); +#endif + } + } +} + +// Adds a TestProperty to the current TestResult object when invoked from +// inside a test, to current TestCase's ad_hoc_test_result_ when invoked +// from SetUpTestCase or TearDownTestCase, or to the global property set +// when invoked elsewhere. If the result already contains a property with +// the same key, the value will be updated. +void UnitTest::RecordProperty(const std::string& key, + const std::string& value) { + impl_->RecordProperty(TestProperty(key, value)); +} + +// Runs all tests in this UnitTest object and prints the result. +// Returns 0 if successful, or 1 otherwise. +// +// We don't protect this under mutex_, as we only support calling it +// from the main thread. +int UnitTest::Run() { + const bool in_death_test_child_process = + internal::GTEST_FLAG(internal_run_death_test).length() > 0; + + // Google Test implements this protocol for catching that a test + // program exits before returning control to Google Test: + // + // 1. Upon start, Google Test creates a file whose absolute path + // is specified by the environment variable + // TEST_PREMATURE_EXIT_FILE. + // 2. When Google Test has finished its work, it deletes the file. + // + // This allows a test runner to set TEST_PREMATURE_EXIT_FILE before + // running a Google-Test-based test program and check the existence + // of the file at the end of the test execution to see if it has + // exited prematurely. + + // If we are in the child process of a death test, don't + // create/delete the premature exit file, as doing so is unnecessary + // and will confuse the parent process. Otherwise, create/delete + // the file upon entering/leaving this function. If the program + // somehow exits before this function has a chance to return, the + // premature-exit file will be left undeleted, causing a test runner + // that understands the premature-exit-file protocol to report the + // test as having failed. + const internal::ScopedPrematureExitFile premature_exit_file( + in_death_test_child_process ? + NULL : internal::posix::GetEnv("TEST_PREMATURE_EXIT_FILE")); + + // Captures the value of GTEST_FLAG(catch_exceptions). This value will be + // used for the duration of the program. + impl()->set_catch_exceptions(GTEST_FLAG(catch_exceptions)); + +#if GTEST_HAS_SEH + // Either the user wants Google Test to catch exceptions thrown by the + // tests or this is executing in the context of death test child + // process. In either case the user does not want to see pop-up dialogs + // about crashes - they are expected. + if (impl()->catch_exceptions() || in_death_test_child_process) { +# if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT + // SetErrorMode doesn't exist on CE. + SetErrorMode(SEM_FAILCRITICALERRORS | SEM_NOALIGNMENTFAULTEXCEPT | + SEM_NOGPFAULTERRORBOX | SEM_NOOPENFILEERRORBOX); +# endif // !GTEST_OS_WINDOWS_MOBILE + +# if (defined(_MSC_VER) || GTEST_OS_WINDOWS_MINGW) && !GTEST_OS_WINDOWS_MOBILE + // Death test children can be terminated with _abort(). On Windows, + // _abort() can show a dialog with a warning message. This forces the + // abort message to go to stderr instead. + _set_error_mode(_OUT_TO_STDERR); +# endif + +# if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE + // In the debug version, Visual Studio pops up a separate dialog + // offering a choice to debug the aborted program. We need to suppress + // this dialog or it will pop up for every EXPECT/ASSERT_DEATH statement + // executed. Google Test will notify the user of any unexpected + // failure via stderr. + // + // VC++ doesn't define _set_abort_behavior() prior to the version 8.0. + // Users of prior VC versions shall suffer the agony and pain of + // clicking through the countless debug dialogs. + // TODO(vladl@google.com): find a way to suppress the abort dialog() in the + // debug mode when compiled with VC 7.1 or lower. + if (!GTEST_FLAG(break_on_failure)) + _set_abort_behavior( + 0x0, // Clear the following flags: + _WRITE_ABORT_MSG | _CALL_REPORTFAULT); // pop-up window, core dump. +# endif + } +#endif // GTEST_HAS_SEH + + return internal::HandleExceptionsInMethodIfSupported( + impl(), + &internal::UnitTestImpl::RunAllTests, + "auxiliary test code (environments or event listeners)") ? 0 : 1; +} + +// Returns the working directory when the first TEST() or TEST_F() was +// executed. +const char* UnitTest::original_working_dir() const { + return impl_->original_working_dir_.c_str(); +} + +// Returns the TestCase object for the test that's currently running, +// or NULL if no test is running. +const TestCase* UnitTest::current_test_case() const + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + return impl_->current_test_case(); +} + +// Returns the TestInfo object for the test that's currently running, +// or NULL if no test is running. +const TestInfo* UnitTest::current_test_info() const + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + return impl_->current_test_info(); +} + +// Returns the random seed used at the start of the current test run. +int UnitTest::random_seed() const { return impl_->random_seed(); } + +#if GTEST_HAS_PARAM_TEST +// Returns ParameterizedTestCaseRegistry object used to keep track of +// value-parameterized tests and instantiate and register them. +internal::ParameterizedTestCaseRegistry& + UnitTest::parameterized_test_registry() + GTEST_LOCK_EXCLUDED_(mutex_) { + return impl_->parameterized_test_registry(); +} +#endif // GTEST_HAS_PARAM_TEST + +// Creates an empty UnitTest. +UnitTest::UnitTest() { + impl_ = new internal::UnitTestImpl(this); +} + +// Destructor of UnitTest. +UnitTest::~UnitTest() { + delete impl_; +} + +// Pushes a trace defined by SCOPED_TRACE() on to the per-thread +// Google Test trace stack. +void UnitTest::PushGTestTrace(const internal::TraceInfo& trace) + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().push_back(trace); +} + +// Pops a trace from the per-thread Google Test trace stack. +void UnitTest::PopGTestTrace() + GTEST_LOCK_EXCLUDED_(mutex_) { + internal::MutexLock lock(&mutex_); + impl_->gtest_trace_stack().pop_back(); +} + +namespace internal { + +UnitTestImpl::UnitTestImpl(UnitTest* parent) + : parent_(parent), + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4355 /* using this in initializer */) + default_global_test_part_result_reporter_(this), + default_per_thread_test_part_result_reporter_(this), + GTEST_DISABLE_MSC_WARNINGS_POP_() + global_test_part_result_repoter_( + &default_global_test_part_result_reporter_), + per_thread_test_part_result_reporter_( + &default_per_thread_test_part_result_reporter_), +#if GTEST_HAS_PARAM_TEST + parameterized_test_registry_(), + parameterized_tests_registered_(false), +#endif // GTEST_HAS_PARAM_TEST + last_death_test_case_(-1), + current_test_case_(NULL), + current_test_info_(NULL), + ad_hoc_test_result_(), + os_stack_trace_getter_(NULL), + post_flag_parse_init_performed_(false), + random_seed_(0), // Will be overridden by the flag before first use. + random_(0), // Will be reseeded before first use. + start_timestamp_(0), + elapsed_time_(0), +#if GTEST_HAS_DEATH_TEST + death_test_factory_(new DefaultDeathTestFactory), +#endif + // Will be overridden by the flag before first use. + catch_exceptions_(false) { + listeners()->SetDefaultResultPrinter(new PrettyUnitTestResultPrinter); +} + +UnitTestImpl::~UnitTestImpl() { + // Deletes every TestCase. + ForEach(test_cases_, internal::Delete); + + // Deletes every Environment. + ForEach(environments_, internal::Delete); + + delete os_stack_trace_getter_; +} + +// Adds a TestProperty to the current TestResult object when invoked in a +// context of a test, to current test case's ad_hoc_test_result when invoke +// from SetUpTestCase/TearDownTestCase, or to the global property set +// otherwise. If the result already contains a property with the same key, +// the value will be updated. +void UnitTestImpl::RecordProperty(const TestProperty& test_property) { + std::string xml_element; + TestResult* test_result; // TestResult appropriate for property recording. + + if (current_test_info_ != NULL) { + xml_element = "testcase"; + test_result = &(current_test_info_->result_); + } else if (current_test_case_ != NULL) { + xml_element = "testsuite"; + test_result = &(current_test_case_->ad_hoc_test_result_); + } else { + xml_element = "testsuites"; + test_result = &ad_hoc_test_result_; + } + test_result->RecordProperty(xml_element, test_property); +} + +#if GTEST_HAS_DEATH_TEST +// Disables event forwarding if the control is currently in a death test +// subprocess. Must not be called before InitGoogleTest. +void UnitTestImpl::SuppressTestEventsIfInSubprocess() { + if (internal_run_death_test_flag_.get() != NULL) + listeners()->SuppressEventForwarding(); +} +#endif // GTEST_HAS_DEATH_TEST + +// Initializes event listeners performing XML output as specified by +// UnitTestOptions. Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureXmlOutput() { + const std::string& output_format = UnitTestOptions::GetOutputFormat(); + if (output_format == "xml") { + listeners()->SetDefaultXmlGenerator(new XmlUnitTestResultPrinter( + UnitTestOptions::GetAbsolutePathToOutputFile().c_str())); + } else if (output_format != "") { + printf("WARNING: unrecognized output format \"%s\" ignored.\n", + output_format.c_str()); + fflush(stdout); + } +} + +#if GTEST_CAN_STREAM_RESULTS_ +// Initializes event listeners for streaming test results in string form. +// Must not be called before InitGoogleTest. +void UnitTestImpl::ConfigureStreamingOutput() { + const std::string& target = GTEST_FLAG(stream_result_to); + if (!target.empty()) { + const size_t pos = target.find(':'); + if (pos != std::string::npos) { + listeners()->Append(new StreamingListener(target.substr(0, pos), + target.substr(pos+1))); + } else { + printf("WARNING: unrecognized streaming target \"%s\" ignored.\n", + target.c_str()); + fflush(stdout); + } + } +} +#endif // GTEST_CAN_STREAM_RESULTS_ + +// Performs initialization dependent upon flag values obtained in +// ParseGoogleTestFlagsOnly. Is called from InitGoogleTest after the call to +// ParseGoogleTestFlagsOnly. In case a user neglects to call InitGoogleTest +// this function is also called from RunAllTests. Since this function can be +// called more than once, it has to be idempotent. +void UnitTestImpl::PostFlagParsingInit() { + // Ensures that this function does not execute more than once. + if (!post_flag_parse_init_performed_) { + post_flag_parse_init_performed_ = true; + +#if GTEST_HAS_DEATH_TEST + InitDeathTestSubprocessControlInfo(); + SuppressTestEventsIfInSubprocess(); +#endif // GTEST_HAS_DEATH_TEST + + // Registers parameterized tests. This makes parameterized tests + // available to the UnitTest reflection API without running + // RUN_ALL_TESTS. + RegisterParameterizedTests(); + + // Configures listeners for XML output. This makes it possible for users + // to shut down the default XML output before invoking RUN_ALL_TESTS. + ConfigureXmlOutput(); + +#if GTEST_CAN_STREAM_RESULTS_ + // Configures listeners for streaming test results to the specified server. + ConfigureStreamingOutput(); +#endif // GTEST_CAN_STREAM_RESULTS_ + } +} + +// A predicate that checks the name of a TestCase against a known +// value. +// +// This is used for implementation of the UnitTest class only. We put +// it in the anonymous namespace to prevent polluting the outer +// namespace. +// +// TestCaseNameIs is copyable. +class TestCaseNameIs { + public: + // Constructor. + explicit TestCaseNameIs(const std::string& name) + : name_(name) {} + + // Returns true iff the name of test_case matches name_. + bool operator()(const TestCase* test_case) const { + return test_case != NULL && strcmp(test_case->name(), name_.c_str()) == 0; + } + + private: + std::string name_; +}; + +// Finds and returns a TestCase with the given name. If one doesn't +// exist, creates one and returns it. It's the CALLER'S +// RESPONSIBILITY to ensure that this function is only called WHEN THE +// TESTS ARE NOT SHUFFLED. +// +// Arguments: +// +// test_case_name: name of the test case +// type_param: the name of the test case's type parameter, or NULL if +// this is not a typed or a type-parameterized test case. +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +TestCase* UnitTestImpl::GetTestCase(const char* test_case_name, + const char* type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc) { + // Can we find a TestCase with the given name? + const std::vector::const_iterator test_case = + std::find_if(test_cases_.begin(), test_cases_.end(), + TestCaseNameIs(test_case_name)); + + if (test_case != test_cases_.end()) + return *test_case; + + // No. Let's create one. + TestCase* const new_test_case = + new TestCase(test_case_name, type_param, set_up_tc, tear_down_tc); + + // Is this a death test case? + if (internal::UnitTestOptions::MatchesFilter(test_case_name, + kDeathTestCaseFilter)) { + // Yes. Inserts the test case after the last death test case + // defined so far. This only works when the test cases haven't + // been shuffled. Otherwise we may end up running a death test + // after a non-death test. + ++last_death_test_case_; + test_cases_.insert(test_cases_.begin() + last_death_test_case_, + new_test_case); + } else { + // No. Appends to the end of the list. + test_cases_.push_back(new_test_case); + } + + test_case_indices_.push_back(static_cast(test_case_indices_.size())); + return new_test_case; +} + +// Helpers for setting up / tearing down the given environment. They +// are for use in the ForEach() function. +static void SetUpEnvironment(Environment* env) { env->SetUp(); } +static void TearDownEnvironment(Environment* env) { env->TearDown(); } + +// Runs all tests in this UnitTest object, prints the result, and +// returns true if all tests are successful. If any exception is +// thrown during a test, the test is considered to be failed, but the +// rest of the tests will still be run. +// +// When parameterized tests are enabled, it expands and registers +// parameterized tests first in RegisterParameterizedTests(). +// All other functions called from RunAllTests() may safely assume that +// parameterized tests are ready to be counted and run. +bool UnitTestImpl::RunAllTests() { + // Makes sure InitGoogleTest() was called. + if (!GTestIsInitialized()) { + printf("%s", + "\nThis test program did NOT call ::testing::InitGoogleTest " + "before calling RUN_ALL_TESTS(). Please fix it.\n"); + return false; + } + + // Do not run any test if the --help flag was specified. + if (g_help_flag) + return true; + + // Repeats the call to the post-flag parsing initialization in case the + // user didn't call InitGoogleTest. + PostFlagParsingInit(); + + // Even if sharding is not on, test runners may want to use the + // GTEST_SHARD_STATUS_FILE to query whether the test supports the sharding + // protocol. + internal::WriteToShardStatusFileIfNeeded(); + + // True iff we are in a subprocess for running a thread-safe-style + // death test. + bool in_subprocess_for_death_test = false; + +#if GTEST_HAS_DEATH_TEST + in_subprocess_for_death_test = (internal_run_death_test_flag_.get() != NULL); +#endif // GTEST_HAS_DEATH_TEST + + const bool should_shard = ShouldShard(kTestTotalShards, kTestShardIndex, + in_subprocess_for_death_test); + + // Compares the full test names with the filter to decide which + // tests to run. + const bool has_tests_to_run = FilterTests(should_shard + ? HONOR_SHARDING_PROTOCOL + : IGNORE_SHARDING_PROTOCOL) > 0; + + // Lists the tests and exits if the --gtest_list_tests flag was specified. + if (GTEST_FLAG(list_tests)) { + // This must be called *after* FilterTests() has been called. + ListTestsMatchingFilter(); + return true; + } + + random_seed_ = GTEST_FLAG(shuffle) ? + GetRandomSeedFromFlag(GTEST_FLAG(random_seed)) : 0; + + // True iff at least one test has failed. + bool failed = false; + + TestEventListener* repeater = listeners()->repeater(); + + start_timestamp_ = GetTimeInMillis(); + repeater->OnTestProgramStart(*parent_); + + // How many times to repeat the tests? We don't want to repeat them + // when we are inside the subprocess of a death test. + const int repeat = in_subprocess_for_death_test ? 1 : GTEST_FLAG(repeat); + // Repeats forever if the repeat count is negative. + const bool forever = repeat < 0; + for (int i = 0; forever || i != repeat; i++) { + // We want to preserve failures generated by ad-hoc test + // assertions executed before RUN_ALL_TESTS(). + ClearNonAdHocTestResult(); + + const TimeInMillis start = GetTimeInMillis(); + + // Shuffles test cases and tests if requested. + if (has_tests_to_run && GTEST_FLAG(shuffle)) { + random()->Reseed(random_seed_); + // This should be done before calling OnTestIterationStart(), + // such that a test event listener can see the actual test order + // in the event. + ShuffleTests(); + } + + // Tells the unit test event listeners that the tests are about to start. + repeater->OnTestIterationStart(*parent_, i); + + // Runs each test case if there is at least one test to run. + if (has_tests_to_run) { + // Sets up all environments beforehand. + repeater->OnEnvironmentsSetUpStart(*parent_); + ForEach(environments_, SetUpEnvironment); + repeater->OnEnvironmentsSetUpEnd(*parent_); + + // Runs the tests only if there was no fatal failure during global + // set-up. + if (!Test::HasFatalFailure()) { + for (int test_index = 0; test_index < total_test_case_count(); + test_index++) { + GetMutableTestCase(test_index)->Run(); + } + } + + // Tears down all environments in reverse order afterwards. + repeater->OnEnvironmentsTearDownStart(*parent_); + std::for_each(environments_.rbegin(), environments_.rend(), + TearDownEnvironment); + repeater->OnEnvironmentsTearDownEnd(*parent_); + } + + elapsed_time_ = GetTimeInMillis() - start; + + // Tells the unit test event listener that the tests have just finished. + repeater->OnTestIterationEnd(*parent_, i); + + // Gets the result and clears it. + if (!Passed()) { + failed = true; + } + + // Restores the original test order after the iteration. This + // allows the user to quickly repro a failure that happens in the + // N-th iteration without repeating the first (N - 1) iterations. + // This is not enclosed in "if (GTEST_FLAG(shuffle)) { ... }", in + // case the user somehow changes the value of the flag somewhere + // (it's always safe to unshuffle the tests). + UnshuffleTests(); + + if (GTEST_FLAG(shuffle)) { + // Picks a new random seed for each iteration. + random_seed_ = GetNextRandomSeed(random_seed_); + } + } + + repeater->OnTestProgramEnd(*parent_); + + return !failed; +} + +// Reads the GTEST_SHARD_STATUS_FILE environment variable, and creates the file +// if the variable is present. If a file already exists at this location, this +// function will write over it. If the variable is present, but the file cannot +// be created, prints an error and exits. +void WriteToShardStatusFileIfNeeded() { + const char* const test_shard_file = posix::GetEnv(kTestShardStatusFile); + if (test_shard_file != NULL) { + FILE* const file = posix::FOpen(test_shard_file, "w"); + if (file == NULL) { + ColoredPrintf(COLOR_RED, + "Could not write to the test shard status file \"%s\" " + "specified by the %s environment variable.\n", + test_shard_file, kTestShardStatusFile); + fflush(stdout); + exit(EXIT_FAILURE); + } + fclose(file); + } +} + +// Checks whether sharding is enabled by examining the relevant +// environment variable values. If the variables are present, +// but inconsistent (i.e., shard_index >= total_shards), prints +// an error and exits. If in_subprocess_for_death_test, sharding is +// disabled because it must only be applied to the original test +// process. Otherwise, we could filter out death tests we intended to execute. +bool ShouldShard(const char* total_shards_env, + const char* shard_index_env, + bool in_subprocess_for_death_test) { + if (in_subprocess_for_death_test) { + return false; + } + + const Int32 total_shards = Int32FromEnvOrDie(total_shards_env, -1); + const Int32 shard_index = Int32FromEnvOrDie(shard_index_env, -1); + + if (total_shards == -1 && shard_index == -1) { + return false; + } else if (total_shards == -1 && shard_index != -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestShardIndex << " = " << shard_index + << ", but have left " << kTestTotalShards << " unset.\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (total_shards != -1 && shard_index == -1) { + const Message msg = Message() + << "Invalid environment variables: you have " + << kTestTotalShards << " = " << total_shards + << ", but have left " << kTestShardIndex << " unset.\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } else if (shard_index < 0 || shard_index >= total_shards) { + const Message msg = Message() + << "Invalid environment variables: we require 0 <= " + << kTestShardIndex << " < " << kTestTotalShards + << ", but you have " << kTestShardIndex << "=" << shard_index + << ", " << kTestTotalShards << "=" << total_shards << ".\n"; + ColoredPrintf(COLOR_RED, msg.GetString().c_str()); + fflush(stdout); + exit(EXIT_FAILURE); + } + + return total_shards > 1; +} + +// Parses the environment variable var as an Int32. If it is unset, +// returns default_val. If it is not an Int32, prints an error +// and aborts. +Int32 Int32FromEnvOrDie(const char* var, Int32 default_val) { + const char* str_val = posix::GetEnv(var); + if (str_val == NULL) { + return default_val; + } + + Int32 result; + if (!ParseInt32(Message() << "The value of environment variable " << var, + str_val, &result)) { + exit(EXIT_FAILURE); + } + return result; +} + +// Given the total number of shards, the shard index, and the test id, +// returns true iff the test should be run on this shard. The test id is +// some arbitrary but unique non-negative integer assigned to each test +// method. Assumes that 0 <= shard_index < total_shards. +bool ShouldRunTestOnShard(int total_shards, int shard_index, int test_id) { + return (test_id % total_shards) == shard_index; +} + +// Compares the name of each test with the user-specified filter to +// decide whether the test should be run, then records the result in +// each TestCase and TestInfo object. +// If shard_tests == true, further filters tests based on sharding +// variables in the environment - see +// http://code.google.com/p/googletest/wiki/GoogleTestAdvancedGuide. +// Returns the number of tests that should run. +int UnitTestImpl::FilterTests(ReactionToSharding shard_tests) { + const Int32 total_shards = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestTotalShards, -1) : -1; + const Int32 shard_index = shard_tests == HONOR_SHARDING_PROTOCOL ? + Int32FromEnvOrDie(kTestShardIndex, -1) : -1; + + // num_runnable_tests are the number of tests that will + // run across all shards (i.e., match filter and are not disabled). + // num_selected_tests are the number of tests to be run on + // this shard. + int num_runnable_tests = 0; + int num_selected_tests = 0; + for (size_t i = 0; i < test_cases_.size(); i++) { + TestCase* const test_case = test_cases_[i]; + const std::string &test_case_name = test_case->name(); + test_case->set_should_run(false); + + for (size_t j = 0; j < test_case->test_info_list().size(); j++) { + TestInfo* const test_info = test_case->test_info_list()[j]; + const std::string test_name(test_info->name()); + // A test is disabled if test case name or test name matches + // kDisableTestFilter. + const bool is_disabled = + internal::UnitTestOptions::MatchesFilter(test_case_name, + kDisableTestFilter) || + internal::UnitTestOptions::MatchesFilter(test_name, + kDisableTestFilter); + test_info->is_disabled_ = is_disabled; + + const bool matches_filter = + internal::UnitTestOptions::FilterMatchesTest(test_case_name, + test_name); + test_info->matches_filter_ = matches_filter; + + const bool is_runnable = + (GTEST_FLAG(also_run_disabled_tests) || !is_disabled) && + matches_filter; + + const bool is_selected = is_runnable && + (shard_tests == IGNORE_SHARDING_PROTOCOL || + ShouldRunTestOnShard(total_shards, shard_index, + num_runnable_tests)); + + num_runnable_tests += is_runnable; + num_selected_tests += is_selected; + + test_info->should_run_ = is_selected; + test_case->set_should_run(test_case->should_run() || is_selected); + } + } + return num_selected_tests; +} + +// Prints the given C-string on a single line by replacing all '\n' +// characters with string "\\n". If the output takes more than +// max_length characters, only prints the first max_length characters +// and "...". +static void PrintOnOneLine(const char* str, int max_length) { + if (str != NULL) { + for (int i = 0; *str != '\0'; ++str) { + if (i >= max_length) { + printf("..."); + break; + } + if (*str == '\n') { + printf("\\n"); + i += 2; + } else { + printf("%c", *str); + ++i; + } + } + } +} + +// Prints the names of the tests matching the user-specified filter flag. +void UnitTestImpl::ListTestsMatchingFilter() { + // Print at most this many characters for each type/value parameter. + const int kMaxParamLength = 250; + + for (size_t i = 0; i < test_cases_.size(); i++) { + const TestCase* const test_case = test_cases_[i]; + bool printed_test_case_name = false; + + for (size_t j = 0; j < test_case->test_info_list().size(); j++) { + const TestInfo* const test_info = + test_case->test_info_list()[j]; + if (test_info->matches_filter_) { + if (!printed_test_case_name) { + printed_test_case_name = true; + printf("%s.", test_case->name()); + if (test_case->type_param() != NULL) { + printf(" # %s = ", kTypeParamLabel); + // We print the type parameter on a single line to make + // the output easy to parse by a program. + PrintOnOneLine(test_case->type_param(), kMaxParamLength); + } + printf("\n"); + } + printf(" %s", test_info->name()); + if (test_info->value_param() != NULL) { + printf(" # %s = ", kValueParamLabel); + // We print the value parameter on a single line to make the + // output easy to parse by a program. + PrintOnOneLine(test_info->value_param(), kMaxParamLength); + } + printf("\n"); + } + } + } + fflush(stdout); +} + +// Sets the OS stack trace getter. +// +// Does nothing if the input and the current OS stack trace getter are +// the same; otherwise, deletes the old getter and makes the input the +// current getter. +void UnitTestImpl::set_os_stack_trace_getter( + OsStackTraceGetterInterface* getter) { + if (os_stack_trace_getter_ != getter) { + delete os_stack_trace_getter_; + os_stack_trace_getter_ = getter; + } +} + +// Returns the current OS stack trace getter if it is not NULL; +// otherwise, creates an OsStackTraceGetter, makes it the current +// getter, and returns it. +OsStackTraceGetterInterface* UnitTestImpl::os_stack_trace_getter() { + if (os_stack_trace_getter_ == NULL) { + os_stack_trace_getter_ = new OsStackTraceGetter; + } + + return os_stack_trace_getter_; +} + +// Returns the TestResult for the test that's currently running, or +// the TestResult for the ad hoc test if no test is running. +TestResult* UnitTestImpl::current_test_result() { + return current_test_info_ ? + &(current_test_info_->result_) : &ad_hoc_test_result_; +} + +// Shuffles all test cases, and the tests within each test case, +// making sure that death tests are still run first. +void UnitTestImpl::ShuffleTests() { + // Shuffles the death test cases. + ShuffleRange(random(), 0, last_death_test_case_ + 1, &test_case_indices_); + + // Shuffles the non-death test cases. + ShuffleRange(random(), last_death_test_case_ + 1, + static_cast(test_cases_.size()), &test_case_indices_); + + // Shuffles the tests inside each test case. + for (size_t i = 0; i < test_cases_.size(); i++) { + test_cases_[i]->ShuffleTests(random()); + } +} + +// Restores the test cases and tests to their order before the first shuffle. +void UnitTestImpl::UnshuffleTests() { + for (size_t i = 0; i < test_cases_.size(); i++) { + // Unshuffles the tests in each test case. + test_cases_[i]->UnshuffleTests(); + // Resets the index of each test case. + test_case_indices_[i] = static_cast(i); + } +} + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +std::string GetCurrentOsStackTraceExceptTop(UnitTest* /*unit_test*/, + int skip_count) { + // We pass skip_count + 1 to skip this wrapper function in addition + // to what the user really wants to skip. + return GetUnitTestImpl()->CurrentOsStackTraceExceptTop(skip_count + 1); +} + +// Used by the GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_ macro to +// suppress unreachable code warnings. +namespace { +class ClassUniqueToAlwaysTrue {}; +} + +bool IsTrue(bool condition) { return condition; } + +bool AlwaysTrue() { +#if GTEST_HAS_EXCEPTIONS + // This condition is always false so AlwaysTrue() never actually throws, + // but it makes the compiler think that it may throw. + if (IsTrue(false)) + throw ClassUniqueToAlwaysTrue(); +#endif // GTEST_HAS_EXCEPTIONS + return true; +} + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +bool SkipPrefix(const char* prefix, const char** pstr) { + const size_t prefix_len = strlen(prefix); + if (strncmp(*pstr, prefix, prefix_len) == 0) { + *pstr += prefix_len; + return true; + } + return false; +} + +// Parses a string as a command line flag. The string should have +// the format "--flag=value". When def_optional is true, the "=value" +// part can be omitted. +// +// Returns the value of the flag, or NULL if the parsing failed. +const char* ParseFlagValue(const char* str, + const char* flag, + bool def_optional) { + // str and flag must not be NULL. + if (str == NULL || flag == NULL) return NULL; + + // The flag must start with "--" followed by GTEST_FLAG_PREFIX_. + const std::string flag_str = std::string("--") + GTEST_FLAG_PREFIX_ + flag; + const size_t flag_len = flag_str.length(); + if (strncmp(str, flag_str.c_str(), flag_len) != 0) return NULL; + + // Skips the flag name. + const char* flag_end = str + flag_len; + + // When def_optional is true, it's OK to not have a "=value" part. + if (def_optional && (flag_end[0] == '\0')) { + return flag_end; + } + + // If def_optional is true and there are more characters after the + // flag name, or if def_optional is false, there must be a '=' after + // the flag name. + if (flag_end[0] != '=') return NULL; + + // Returns the string after "=". + return flag_end + 1; +} + +// Parses a string for a bool flag, in the form of either +// "--flag=value" or "--flag". +// +// In the former case, the value is taken as true as long as it does +// not start with '0', 'f', or 'F'. +// +// In the latter case, the value is taken as true. +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseBoolFlag(const char* str, const char* flag, bool* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, true); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Converts the string value to a bool. + *value = !(*value_str == '0' || *value_str == 'f' || *value_str == 'F'); + return true; +} + +// Parses a string for an Int32 flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseInt32Flag(const char* str, const char* flag, Int32* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Sets *value to the value of the flag. + return ParseInt32(Message() << "The value of flag --" << flag, + value_str, value); +} + +// Parses a string for a string flag, in the form of +// "--flag=value". +// +// On success, stores the value of the flag in *value, and returns +// true. On failure, returns false without changing *value. +bool ParseStringFlag(const char* str, const char* flag, std::string* value) { + // Gets the value of the flag as a string. + const char* const value_str = ParseFlagValue(str, flag, false); + + // Aborts if the parsing failed. + if (value_str == NULL) return false; + + // Sets *value to the value of the flag. + *value = value_str; + return true; +} + +// Determines whether a string has a prefix that Google Test uses for its +// flags, i.e., starts with GTEST_FLAG_PREFIX_ or GTEST_FLAG_PREFIX_DASH_. +// If Google Test detects that a command line flag has its prefix but is not +// recognized, it will print its help message. Flags starting with +// GTEST_INTERNAL_PREFIX_ followed by "internal_" are considered Google Test +// internal flags and do not trigger the help message. +static bool HasGoogleTestFlagPrefix(const char* str) { + return (SkipPrefix("--", &str) || + SkipPrefix("-", &str) || + SkipPrefix("/", &str)) && + !SkipPrefix(GTEST_FLAG_PREFIX_ "internal_", &str) && + (SkipPrefix(GTEST_FLAG_PREFIX_, &str) || + SkipPrefix(GTEST_FLAG_PREFIX_DASH_, &str)); +} + +// Prints a string containing code-encoded text. The following escape +// sequences can be used in the string to control the text color: +// +// @@ prints a single '@' character. +// @R changes the color to red. +// @G changes the color to green. +// @Y changes the color to yellow. +// @D changes to the default terminal text color. +// +// TODO(wan@google.com): Write tests for this once we add stdout +// capturing to Google Test. +static void PrintColorEncoded(const char* str) { + GTestColor color = COLOR_DEFAULT; // The current color. + + // Conceptually, we split the string into segments divided by escape + // sequences. Then we print one segment at a time. At the end of + // each iteration, the str pointer advances to the beginning of the + // next segment. + for (;;) { + const char* p = strchr(str, '@'); + if (p == NULL) { + ColoredPrintf(color, "%s", str); + return; + } + + ColoredPrintf(color, "%s", std::string(str, p).c_str()); + + const char ch = p[1]; + str = p + 2; + if (ch == '@') { + ColoredPrintf(color, "@"); + } else if (ch == 'D') { + color = COLOR_DEFAULT; + } else if (ch == 'R') { + color = COLOR_RED; + } else if (ch == 'G') { + color = COLOR_GREEN; + } else if (ch == 'Y') { + color = COLOR_YELLOW; + } else { + --str; + } + } +} + +static const char kColorEncodedHelpMessage[] = +"This program contains tests written using " GTEST_NAME_ ". You can use the\n" +"following command line flags to control its behavior:\n" +"\n" +"Test Selection:\n" +" @G--" GTEST_FLAG_PREFIX_ "list_tests@D\n" +" List the names of all tests instead of running them. The name of\n" +" TEST(Foo, Bar) is \"Foo.Bar\".\n" +" @G--" GTEST_FLAG_PREFIX_ "filter=@YPOSTIVE_PATTERNS" + "[@G-@YNEGATIVE_PATTERNS]@D\n" +" Run only the tests whose name matches one of the positive patterns but\n" +" none of the negative patterns. '?' matches any single character; '*'\n" +" matches any substring; ':' separates two patterns.\n" +" @G--" GTEST_FLAG_PREFIX_ "also_run_disabled_tests@D\n" +" Run all disabled tests too.\n" +"\n" +"Test Execution:\n" +" @G--" GTEST_FLAG_PREFIX_ "repeat=@Y[COUNT]@D\n" +" Run the tests repeatedly; use a negative count to repeat forever.\n" +" @G--" GTEST_FLAG_PREFIX_ "shuffle@D\n" +" Randomize tests' orders on every iteration.\n" +" @G--" GTEST_FLAG_PREFIX_ "random_seed=@Y[NUMBER]@D\n" +" Random number seed to use for shuffling test orders (between 1 and\n" +" 99999, or 0 to use a seed based on the current time).\n" +"\n" +"Test Output:\n" +" @G--" GTEST_FLAG_PREFIX_ "color=@Y(@Gyes@Y|@Gno@Y|@Gauto@Y)@D\n" +" Enable/disable colored output. The default is @Gauto@D.\n" +" -@G-" GTEST_FLAG_PREFIX_ "print_time=0@D\n" +" Don't print the elapsed time of each test.\n" +" @G--" GTEST_FLAG_PREFIX_ "output=xml@Y[@G:@YDIRECTORY_PATH@G" + GTEST_PATH_SEP_ "@Y|@G:@YFILE_PATH]@D\n" +" Generate an XML report in the given directory or with the given file\n" +" name. @YFILE_PATH@D defaults to @Gtest_details.xml@D.\n" +#if GTEST_CAN_STREAM_RESULTS_ +" @G--" GTEST_FLAG_PREFIX_ "stream_result_to=@YHOST@G:@YPORT@D\n" +" Stream test results to the given server.\n" +#endif // GTEST_CAN_STREAM_RESULTS_ +"\n" +"Assertion Behavior:\n" +#if GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "death_test_style=@Y(@Gfast@Y|@Gthreadsafe@Y)@D\n" +" Set the default death test style.\n" +#endif // GTEST_HAS_DEATH_TEST && !GTEST_OS_WINDOWS +" @G--" GTEST_FLAG_PREFIX_ "break_on_failure@D\n" +" Turn assertion failures into debugger break-points.\n" +" @G--" GTEST_FLAG_PREFIX_ "throw_on_failure@D\n" +" Turn assertion failures into C++ exceptions.\n" +" @G--" GTEST_FLAG_PREFIX_ "catch_exceptions=0@D\n" +" Do not report exceptions as test failures. Instead, allow them\n" +" to crash the program or throw a pop-up (on Windows).\n" +"\n" +"Except for @G--" GTEST_FLAG_PREFIX_ "list_tests@D, you can alternatively set " + "the corresponding\n" +"environment variable of a flag (all letters in upper-case). For example, to\n" +"disable colored text output, you can either specify @G--" GTEST_FLAG_PREFIX_ + "color=no@D or set\n" +"the @G" GTEST_FLAG_PREFIX_UPPER_ "COLOR@D environment variable to @Gno@D.\n" +"\n" +"For more information, please read the " GTEST_NAME_ " documentation at\n" +"@G" GTEST_PROJECT_URL_ "@D. If you find a bug in " GTEST_NAME_ "\n" +"(not one in your own code or tests), please report it to\n" +"@G<" GTEST_DEV_EMAIL_ ">@D.\n"; + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. The type parameter CharType can be +// instantiated to either char or wchar_t. +template +void ParseGoogleTestFlagsOnlyImpl(int* argc, CharType** argv) { + for (int i = 1; i < *argc; i++) { + const std::string arg_string = StreamableToString(argv[i]); + const char* const arg = arg_string.c_str(); + + using internal::ParseBoolFlag; + using internal::ParseInt32Flag; + using internal::ParseStringFlag; + + // Do we see a Google Test flag? + if (ParseBoolFlag(arg, kAlsoRunDisabledTestsFlag, + >EST_FLAG(also_run_disabled_tests)) || + ParseBoolFlag(arg, kBreakOnFailureFlag, + >EST_FLAG(break_on_failure)) || + ParseBoolFlag(arg, kCatchExceptionsFlag, + >EST_FLAG(catch_exceptions)) || + ParseStringFlag(arg, kColorFlag, >EST_FLAG(color)) || + ParseStringFlag(arg, kDeathTestStyleFlag, + >EST_FLAG(death_test_style)) || + ParseBoolFlag(arg, kDeathTestUseFork, + >EST_FLAG(death_test_use_fork)) || + ParseStringFlag(arg, kFilterFlag, >EST_FLAG(filter)) || + ParseStringFlag(arg, kInternalRunDeathTestFlag, + >EST_FLAG(internal_run_death_test)) || + ParseBoolFlag(arg, kListTestsFlag, >EST_FLAG(list_tests)) || + ParseStringFlag(arg, kOutputFlag, >EST_FLAG(output)) || + ParseBoolFlag(arg, kPrintTimeFlag, >EST_FLAG(print_time)) || + ParseInt32Flag(arg, kRandomSeedFlag, >EST_FLAG(random_seed)) || + ParseInt32Flag(arg, kRepeatFlag, >EST_FLAG(repeat)) || + ParseBoolFlag(arg, kShuffleFlag, >EST_FLAG(shuffle)) || + ParseInt32Flag(arg, kStackTraceDepthFlag, + >EST_FLAG(stack_trace_depth)) || + ParseStringFlag(arg, kStreamResultToFlag, + >EST_FLAG(stream_result_to)) || + ParseBoolFlag(arg, kThrowOnFailureFlag, + >EST_FLAG(throw_on_failure)) + ) { + // Yes. Shift the remainder of the argv list left by one. Note + // that argv has (*argc + 1) elements, the last one always being + // NULL. The following loop moves the trailing NULL element as + // well. + for (int j = i; j != *argc; j++) { + argv[j] = argv[j + 1]; + } + + // Decrements the argument count. + (*argc)--; + + // We also need to decrement the iterator as we just removed + // an element. + i--; + } else if (arg_string == "--help" || arg_string == "-h" || + arg_string == "-?" || arg_string == "/?" || + HasGoogleTestFlagPrefix(arg)) { + // Both help flag and unrecognized Google Test flags (excluding + // internal ones) trigger help display. + g_help_flag = true; + } + } + + if (g_help_flag) { + // We print the help here instead of in RUN_ALL_TESTS(), as the + // latter may not be called at all if the user is using Google + // Test with another testing framework. + PrintColorEncoded(kColorEncodedHelpMessage); + } +} + +// Parses the command line for Google Test flags, without initializing +// other parts of Google Test. +void ParseGoogleTestFlagsOnly(int* argc, char** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} +void ParseGoogleTestFlagsOnly(int* argc, wchar_t** argv) { + ParseGoogleTestFlagsOnlyImpl(argc, argv); +} + +// The internal implementation of InitGoogleTest(). +// +// The type parameter CharType can be instantiated to either char or +// wchar_t. +template +void InitGoogleTestImpl(int* argc, CharType** argv) { + g_init_gtest_count++; + + // We don't want to run the initialization code twice. + if (g_init_gtest_count != 1) return; + + if (*argc <= 0) return; + + internal::g_executable_path = internal::StreamableToString(argv[0]); + +#if GTEST_HAS_DEATH_TEST + + g_argvs.clear(); + for (int i = 0; i != *argc; i++) { + g_argvs.push_back(StreamableToString(argv[i])); + } + +#endif // GTEST_HAS_DEATH_TEST + + ParseGoogleTestFlagsOnly(argc, argv); + GetUnitTestImpl()->PostFlagParsingInit(); +} + +} // namespace internal + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +void InitGoogleTest(int* argc, char** argv) { + internal::InitGoogleTestImpl(argc, argv); +} + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +void InitGoogleTest(int* argc, wchar_t** argv) { + internal::InitGoogleTestImpl(argc, argv); +} + +} // namespace testing +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan), vladl@google.com (Vlad Losev) +// +// This file implements death tests. + + +#if GTEST_HAS_DEATH_TEST + +# if GTEST_OS_MAC +# include +# endif // GTEST_OS_MAC + +# include +# include +# include + +# if GTEST_OS_LINUX +# include +# endif // GTEST_OS_LINUX + +# include + +# if GTEST_OS_WINDOWS +# include +# else +# include +# include +# endif // GTEST_OS_WINDOWS + +# if GTEST_OS_QNX +# include +# endif // GTEST_OS_QNX + +#endif // GTEST_HAS_DEATH_TEST + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick exists to +// prevent the accidental inclusion of gtest-internal-inl.h in the +// user's code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +// Constants. + +// The default death test style. +static const char kDefaultDeathTestStyle[] = "fast"; + +GTEST_DEFINE_string_( + death_test_style, + internal::StringFromGTestEnv("death_test_style", kDefaultDeathTestStyle), + "Indicates how to run a death test in a forked child process: " + "\"threadsafe\" (child process re-executes the test binary " + "from the beginning, running only the specific death test) or " + "\"fast\" (child process runs the death test immediately " + "after forking)."); + +GTEST_DEFINE_bool_( + death_test_use_fork, + internal::BoolFromGTestEnv("death_test_use_fork", false), + "Instructs to use fork()/_exit() instead of clone() in death tests. " + "Ignored and always uses fork() on POSIX systems where clone() is not " + "implemented. Useful when running under valgrind or similar tools if " + "those do not support clone(). Valgrind 3.3.1 will just fail if " + "it sees an unsupported combination of clone() flags. " + "It is not recommended to use this flag w/o valgrind though it will " + "work in 99% of the cases. Once valgrind is fixed, this flag will " + "most likely be removed."); + +namespace internal { +GTEST_DEFINE_string_( + internal_run_death_test, "", + "Indicates the file, line number, temporal index of " + "the single death test to run, and a file descriptor to " + "which a success code may be sent, all separated by " + "the '|' characters. This flag is specified if and only if the current " + "process is a sub-process launched for running a thread-safe " + "death test. FOR INTERNAL USE ONLY."); +} // namespace internal + +#if GTEST_HAS_DEATH_TEST + +namespace internal { + +// Valid only for fast death tests. Indicates the code is running in the +// child process of a fast style death test. +static bool g_in_fast_death_test_child = false; + +// Returns a Boolean value indicating whether the caller is currently +// executing in the context of the death test child process. Tools such as +// Valgrind heap checkers may need this to modify their behavior in death +// tests. IMPORTANT: This is an internal utility. Using it may break the +// implementation of death tests. User code MUST NOT use it. +bool InDeathTestChild() { +# if GTEST_OS_WINDOWS + + // On Windows, death tests are thread-safe regardless of the value of the + // death_test_style flag. + return !GTEST_FLAG(internal_run_death_test).empty(); + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") + return !GTEST_FLAG(internal_run_death_test).empty(); + else + return g_in_fast_death_test_child; +#endif +} + +} // namespace internal + +// ExitedWithCode constructor. +ExitedWithCode::ExitedWithCode(int exit_code) : exit_code_(exit_code) { +} + +// ExitedWithCode function-call operator. +bool ExitedWithCode::operator()(int exit_status) const { +# if GTEST_OS_WINDOWS + + return exit_status == exit_code_; + +# else + + return WIFEXITED(exit_status) && WEXITSTATUS(exit_status) == exit_code_; + +# endif // GTEST_OS_WINDOWS +} + +# if !GTEST_OS_WINDOWS +// KilledBySignal constructor. +KilledBySignal::KilledBySignal(int signum) : signum_(signum) { +} + +// KilledBySignal function-call operator. +bool KilledBySignal::operator()(int exit_status) const { + return WIFSIGNALED(exit_status) && WTERMSIG(exit_status) == signum_; +} +# endif // !GTEST_OS_WINDOWS + +namespace internal { + +// Utilities needed for death tests. + +// Generates a textual description of a given exit code, in the format +// specified by wait(2). +static std::string ExitSummary(int exit_code) { + Message m; + +# if GTEST_OS_WINDOWS + + m << "Exited with exit status " << exit_code; + +# else + + if (WIFEXITED(exit_code)) { + m << "Exited with exit status " << WEXITSTATUS(exit_code); + } else if (WIFSIGNALED(exit_code)) { + m << "Terminated by signal " << WTERMSIG(exit_code); + } +# ifdef WCOREDUMP + if (WCOREDUMP(exit_code)) { + m << " (core dumped)"; + } +# endif +# endif // GTEST_OS_WINDOWS + + return m.GetString(); +} + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +bool ExitedUnsuccessfully(int exit_status) { + return !ExitedWithCode(0)(exit_status); +} + +# if !GTEST_OS_WINDOWS +// Generates a textual failure message when a death test finds more than +// one thread running, or cannot determine the number of threads, prior +// to executing the given statement. It is the responsibility of the +// caller not to pass a thread_count of 1. +static std::string DeathTestThreadWarning(size_t thread_count) { + Message msg; + msg << "Death tests use fork(), which is unsafe particularly" + << " in a threaded context. For this test, " << GTEST_NAME_ << " "; + if (thread_count == 0) + msg << "couldn't detect the number of threads."; + else + msg << "detected " << thread_count << " threads."; + return msg.GetString(); +} +# endif // !GTEST_OS_WINDOWS + +// Flag characters for reporting a death test that did not die. +static const char kDeathTestLived = 'L'; +static const char kDeathTestReturned = 'R'; +static const char kDeathTestThrew = 'T'; +static const char kDeathTestInternalError = 'I'; + +// An enumeration describing all of the possible ways that a death test can +// conclude. DIED means that the process died while executing the test +// code; LIVED means that process lived beyond the end of the test code; +// RETURNED means that the test statement attempted to execute a return +// statement, which is not allowed; THREW means that the test statement +// returned control by throwing an exception. IN_PROGRESS means the test +// has not yet concluded. +// TODO(vladl@google.com): Unify names and possibly values for +// AbortReason, DeathTestOutcome, and flag characters above. +enum DeathTestOutcome { IN_PROGRESS, DIED, LIVED, RETURNED, THREW }; + +// Routine for aborting the program which is safe to call from an +// exec-style death test child process, in which case the error +// message is propagated back to the parent process. Otherwise, the +// message is simply printed to stderr. In either case, the program +// then exits with status 1. +void DeathTestAbort(const std::string& message) { + // On a POSIX system, this function may be called from a threadsafe-style + // death test child process, which operates on a very small stack. Use + // the heap for any additional non-minuscule memory requirements. + const InternalRunDeathTestFlag* const flag = + GetUnitTestImpl()->internal_run_death_test_flag(); + if (flag != NULL) { + FILE* parent = posix::FDOpen(flag->write_fd(), "w"); + fputc(kDeathTestInternalError, parent); + fprintf(parent, "%s", message.c_str()); + fflush(parent); + _exit(1); + } else { + fprintf(stderr, "%s", message.c_str()); + fflush(stderr); + posix::Abort(); + } +} + +// A replacement for CHECK that calls DeathTestAbort if the assertion +// fails. +# define GTEST_DEATH_TEST_CHECK_(expression) \ + do { \ + if (!::testing::internal::IsTrue(expression)) { \ + DeathTestAbort( \ + ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ + + ::testing::internal::StreamableToString(__LINE__) + ": " \ + + #expression); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// This macro is similar to GTEST_DEATH_TEST_CHECK_, but it is meant for +// evaluating any system call that fulfills two conditions: it must return +// -1 on failure, and set errno to EINTR when it is interrupted and +// should be tried again. The macro expands to a loop that repeatedly +// evaluates the expression as long as it evaluates to -1 and sets +// errno to EINTR. If the expression evaluates to -1 but errno is +// something other than EINTR, DeathTestAbort is called. +# define GTEST_DEATH_TEST_CHECK_SYSCALL_(expression) \ + do { \ + int gtest_retval; \ + do { \ + gtest_retval = (expression); \ + } while (gtest_retval == -1 && errno == EINTR); \ + if (gtest_retval == -1) { \ + DeathTestAbort( \ + ::std::string("CHECK failed: File ") + __FILE__ + ", line " \ + + ::testing::internal::StreamableToString(__LINE__) + ": " \ + + #expression + " != -1"); \ + } \ + } while (::testing::internal::AlwaysFalse()) + +// Returns the message describing the last system error in errno. +std::string GetLastErrnoDescription() { + return errno == 0 ? "" : posix::StrError(errno); +} + +// This is called from a death test parent process to read a failure +// message from the death test child process and log it with the FATAL +// severity. On Windows, the message is read from a pipe handle. On other +// platforms, it is read from a file descriptor. +static void FailFromInternalError(int fd) { + Message error; + char buffer[256]; + int num_read; + + do { + while ((num_read = posix::Read(fd, buffer, 255)) > 0) { + buffer[num_read] = '\0'; + error << buffer; + } + } while (num_read == -1 && errno == EINTR); + + if (num_read == 0) { + GTEST_LOG_(FATAL) << error.GetString(); + } else { + const int last_error = errno; + GTEST_LOG_(FATAL) << "Error while reading death test internal: " + << GetLastErrnoDescription() << " [" << last_error << "]"; + } +} + +// Death test constructor. Increments the running death test count +// for the current test. +DeathTest::DeathTest() { + TestInfo* const info = GetUnitTestImpl()->current_test_info(); + if (info == NULL) { + DeathTestAbort("Cannot run a death test outside of a TEST or " + "TEST_F construct"); + } +} + +// Creates and returns a death test by dispatching to the current +// death test factory. +bool DeathTest::Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test) { + return GetUnitTestImpl()->death_test_factory()->Create( + statement, regex, file, line, test); +} + +const char* DeathTest::LastMessage() { + return last_death_test_message_.c_str(); +} + +void DeathTest::set_last_death_test_message(const std::string& message) { + last_death_test_message_ = message; +} + +std::string DeathTest::last_death_test_message_; + +// Provides cross platform implementation for some death functionality. +class DeathTestImpl : public DeathTest { + protected: + DeathTestImpl(const char* a_statement, const RE* a_regex) + : statement_(a_statement), + regex_(a_regex), + spawned_(false), + status_(-1), + outcome_(IN_PROGRESS), + read_fd_(-1), + write_fd_(-1) {} + + // read_fd_ is expected to be closed and cleared by a derived class. + ~DeathTestImpl() { GTEST_DEATH_TEST_CHECK_(read_fd_ == -1); } + + void Abort(AbortReason reason); + virtual bool Passed(bool status_ok); + + const char* statement() const { return statement_; } + const RE* regex() const { return regex_; } + bool spawned() const { return spawned_; } + void set_spawned(bool is_spawned) { spawned_ = is_spawned; } + int status() const { return status_; } + void set_status(int a_status) { status_ = a_status; } + DeathTestOutcome outcome() const { return outcome_; } + void set_outcome(DeathTestOutcome an_outcome) { outcome_ = an_outcome; } + int read_fd() const { return read_fd_; } + void set_read_fd(int fd) { read_fd_ = fd; } + int write_fd() const { return write_fd_; } + void set_write_fd(int fd) { write_fd_ = fd; } + + // Called in the parent process only. Reads the result code of the death + // test child process via a pipe, interprets it to set the outcome_ + // member, and closes read_fd_. Outputs diagnostics and terminates in + // case of unexpected codes. + void ReadAndInterpretStatusByte(); + + private: + // The textual content of the code this object is testing. This class + // doesn't own this string and should not attempt to delete it. + const char* const statement_; + // The regular expression which test output must match. DeathTestImpl + // doesn't own this object and should not attempt to delete it. + const RE* const regex_; + // True if the death test child process has been successfully spawned. + bool spawned_; + // The exit status of the child process. + int status_; + // How the death test concluded. + DeathTestOutcome outcome_; + // Descriptor to the read end of the pipe to the child process. It is + // always -1 in the child process. The child keeps its write end of the + // pipe in write_fd_. + int read_fd_; + // Descriptor to the child's write end of the pipe to the parent process. + // It is always -1 in the parent process. The parent keeps its end of the + // pipe in read_fd_. + int write_fd_; +}; + +// Called in the parent process only. Reads the result code of the death +// test child process via a pipe, interprets it to set the outcome_ +// member, and closes read_fd_. Outputs diagnostics and terminates in +// case of unexpected codes. +void DeathTestImpl::ReadAndInterpretStatusByte() { + char flag; + int bytes_read; + + // The read() here blocks until data is available (signifying the + // failure of the death test) or until the pipe is closed (signifying + // its success), so it's okay to call this in the parent before + // the child process has exited. + do { + bytes_read = posix::Read(read_fd(), &flag, 1); + } while (bytes_read == -1 && errno == EINTR); + + if (bytes_read == 0) { + set_outcome(DIED); + } else if (bytes_read == 1) { + switch (flag) { + case kDeathTestReturned: + set_outcome(RETURNED); + break; + case kDeathTestThrew: + set_outcome(THREW); + break; + case kDeathTestLived: + set_outcome(LIVED); + break; + case kDeathTestInternalError: + FailFromInternalError(read_fd()); // Does not return. + break; + default: + GTEST_LOG_(FATAL) << "Death test child process reported " + << "unexpected status byte (" + << static_cast(flag) << ")"; + } + } else { + GTEST_LOG_(FATAL) << "Read from death test child process failed: " + << GetLastErrnoDescription(); + } + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Close(read_fd())); + set_read_fd(-1); +} + +// Signals that the death test code which should have exited, didn't. +// Should be called only in a death test child process. +// Writes a status byte to the child's status file descriptor, then +// calls _exit(1). +void DeathTestImpl::Abort(AbortReason reason) { + // The parent process considers the death test to be a failure if + // it finds any data in our pipe. So, here we write a single flag byte + // to the pipe, then exit. + const char status_ch = + reason == TEST_DID_NOT_DIE ? kDeathTestLived : + reason == TEST_THREW_EXCEPTION ? kDeathTestThrew : kDeathTestReturned; + + GTEST_DEATH_TEST_CHECK_SYSCALL_(posix::Write(write_fd(), &status_ch, 1)); + // We are leaking the descriptor here because on some platforms (i.e., + // when built as Windows DLL), destructors of global objects will still + // run after calling _exit(). On such systems, write_fd_ will be + // indirectly closed from the destructor of UnitTestImpl, causing double + // close if it is also closed here. On debug configurations, double close + // may assert. As there are no in-process buffers to flush here, we are + // relying on the OS to close the descriptor after the process terminates + // when the destructors are not run. + _exit(1); // Exits w/o any normal exit hooks (we were supposed to crash) +} + +// Returns an indented copy of stderr output for a death test. +// This makes distinguishing death test output lines from regular log lines +// much easier. +static ::std::string FormatDeathTestOutput(const ::std::string& output) { + ::std::string ret; + for (size_t at = 0; ; ) { + const size_t line_end = output.find('\n', at); + ret += "[ DEATH ] "; + if (line_end == ::std::string::npos) { + ret += output.substr(at); + break; + } + ret += output.substr(at, line_end + 1 - at); + at = line_end + 1; + } + return ret; +} + +// Assesses the success or failure of a death test, using both private +// members which have previously been set, and one argument: +// +// Private data members: +// outcome: An enumeration describing how the death test +// concluded: DIED, LIVED, THREW, or RETURNED. The death test +// fails in the latter three cases. +// status: The exit status of the child process. On *nix, it is in the +// in the format specified by wait(2). On Windows, this is the +// value supplied to the ExitProcess() API or a numeric code +// of the exception that terminated the program. +// regex: A regular expression object to be applied to +// the test's captured standard error output; the death test +// fails if it does not match. +// +// Argument: +// status_ok: true if exit_status is acceptable in the context of +// this particular death test, which fails if it is false +// +// Returns true iff all of the above conditions are met. Otherwise, the +// first failing condition, in the order given above, is the one that is +// reported. Also sets the last death test message string. +bool DeathTestImpl::Passed(bool status_ok) { + if (!spawned()) + return false; + + const std::string error_message = GetCapturedStderr(); + + bool success = false; + Message buffer; + + buffer << "Death test: " << statement() << "\n"; + switch (outcome()) { + case LIVED: + buffer << " Result: failed to die.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case THREW: + buffer << " Result: threw an exception.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case RETURNED: + buffer << " Result: illegal return in test statement.\n" + << " Error msg:\n" << FormatDeathTestOutput(error_message); + break; + case DIED: + if (status_ok) { + const bool matched = RE::PartialMatch(error_message.c_str(), *regex()); + if (matched) { + success = true; + } else { + buffer << " Result: died but not with expected error.\n" + << " Expected: " << regex()->pattern() << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + } else { + buffer << " Result: died but not with expected exit code:\n" + << " " << ExitSummary(status()) << "\n" + << "Actual msg:\n" << FormatDeathTestOutput(error_message); + } + break; + case IN_PROGRESS: + default: + GTEST_LOG_(FATAL) + << "DeathTest::Passed somehow called before conclusion of test"; + } + + DeathTest::set_last_death_test_message(buffer.GetString()); + return success; +} + +# if GTEST_OS_WINDOWS +// WindowsDeathTest implements death tests on Windows. Due to the +// specifics of starting new processes on Windows, death tests there are +// always threadsafe, and Google Test considers the +// --gtest_death_test_style=fast setting to be equivalent to +// --gtest_death_test_style=threadsafe there. +// +// A few implementation notes: Like the Linux version, the Windows +// implementation uses pipes for child-to-parent communication. But due to +// the specifics of pipes on Windows, some extra steps are required: +// +// 1. The parent creates a communication pipe and stores handles to both +// ends of it. +// 2. The parent starts the child and provides it with the information +// necessary to acquire the handle to the write end of the pipe. +// 3. The child acquires the write end of the pipe and signals the parent +// using a Windows event. +// 4. Now the parent can release the write end of the pipe on its side. If +// this is done before step 3, the object's reference count goes down to +// 0 and it is destroyed, preventing the child from acquiring it. The +// parent now has to release it, or read operations on the read end of +// the pipe will not return when the child terminates. +// 5. The parent reads child's output through the pipe (outcome code and +// any possible error messages) from the pipe, and its stderr and then +// determines whether to fail the test. +// +// Note: to distinguish Win32 API calls from the local method and function +// calls, the former are explicitly resolved in the global namespace. +// +class WindowsDeathTest : public DeathTestImpl { + public: + WindowsDeathTest(const char* a_statement, + const RE* a_regex, + const char* file, + int line) + : DeathTestImpl(a_statement, a_regex), file_(file), line_(line) {} + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + virtual TestRole AssumeRole(); + + private: + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; + // Handle to the write end of the pipe to the child process. + AutoHandle write_handle_; + // Child process handle. + AutoHandle child_handle_; + // Event the child process uses to signal the parent that it has + // acquired the handle to the write end of the pipe. After seeing this + // event the parent can release its own handles to make sure its + // ReadFile() calls return when the child terminates. + AutoHandle event_handle_; +}; + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int WindowsDeathTest::Wait() { + if (!spawned()) + return 0; + + // Wait until the child either signals that it has acquired the write end + // of the pipe or it dies. + const HANDLE wait_handles[2] = { child_handle_.Get(), event_handle_.Get() }; + switch (::WaitForMultipleObjects(2, + wait_handles, + FALSE, // Waits for any of the handles. + INFINITE)) { + case WAIT_OBJECT_0: + case WAIT_OBJECT_0 + 1: + break; + default: + GTEST_DEATH_TEST_CHECK_(false); // Should not get here. + } + + // The child has acquired the write end of the pipe or exited. + // We release the handle on our side and continue. + write_handle_.Reset(); + event_handle_.Reset(); + + ReadAndInterpretStatusByte(); + + // Waits for the child process to exit if it haven't already. This + // returns immediately if the child has already exited, regardless of + // whether previous calls to WaitForMultipleObjects synchronized on this + // handle or not. + GTEST_DEATH_TEST_CHECK_( + WAIT_OBJECT_0 == ::WaitForSingleObject(child_handle_.Get(), + INFINITE)); + DWORD status_code; + GTEST_DEATH_TEST_CHECK_( + ::GetExitCodeProcess(child_handle_.Get(), &status_code) != FALSE); + child_handle_.Reset(); + set_status(static_cast(status_code)); + return status(); +} + +// The AssumeRole process for a Windows death test. It creates a child +// process with the same executable as the current process to run the +// death test. The child process is given the --gtest_filter and +// --gtest_internal_run_death_test flags such that it knows to run the +// current death test only. +DeathTest::TestRole WindowsDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != NULL) { + // ParseInternalRunDeathTestFlag() has performed all the necessary + // processing. + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + // WindowsDeathTest uses an anonymous pipe to communicate results of + // a death test. + SECURITY_ATTRIBUTES handles_are_inheritable = { + sizeof(SECURITY_ATTRIBUTES), NULL, TRUE }; + HANDLE read_handle, write_handle; + GTEST_DEATH_TEST_CHECK_( + ::CreatePipe(&read_handle, &write_handle, &handles_are_inheritable, + 0) // Default buffer size. + != FALSE); + set_read_fd(::_open_osfhandle(reinterpret_cast(read_handle), + O_RDONLY)); + write_handle_.Reset(write_handle); + event_handle_.Reset(::CreateEvent( + &handles_are_inheritable, + TRUE, // The event will automatically reset to non-signaled state. + FALSE, // The initial state is non-signalled. + NULL)); // The even is unnamed. + GTEST_DEATH_TEST_CHECK_(event_handle_.Get() != NULL); + const std::string filter_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" + + info->test_case_name() + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + + "=" + file_ + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index) + "|" + + StreamableToString(static_cast(::GetCurrentProcessId())) + + // size_t has the same width as pointers on both 32-bit and 64-bit + // Windows platforms. + // See http://msdn.microsoft.com/en-us/library/tcxf1dw6.aspx. + "|" + StreamableToString(reinterpret_cast(write_handle)) + + "|" + StreamableToString(reinterpret_cast(event_handle_.Get())); + + char executable_path[_MAX_PATH + 1]; // NOLINT + GTEST_DEATH_TEST_CHECK_( + _MAX_PATH + 1 != ::GetModuleFileNameA(NULL, + executable_path, + _MAX_PATH)); + + std::string command_line = + std::string(::GetCommandLineA()) + " " + filter_flag + " \"" + + internal_flag + "\""; + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // Flush the log buffers since the log streams are shared with the child. + FlushInfoLog(); + + // The child process will share the standard handles with the parent. + STARTUPINFOA startup_info; + memset(&startup_info, 0, sizeof(STARTUPINFO)); + startup_info.dwFlags = STARTF_USESTDHANDLES; + startup_info.hStdInput = ::GetStdHandle(STD_INPUT_HANDLE); + startup_info.hStdOutput = ::GetStdHandle(STD_OUTPUT_HANDLE); + startup_info.hStdError = ::GetStdHandle(STD_ERROR_HANDLE); + + PROCESS_INFORMATION process_info; + GTEST_DEATH_TEST_CHECK_(::CreateProcessA( + executable_path, + const_cast(command_line.c_str()), + NULL, // Retuned process handle is not inheritable. + NULL, // Retuned thread handle is not inheritable. + TRUE, // Child inherits all inheritable handles (for write_handle_). + 0x0, // Default creation flags. + NULL, // Inherit the parent's environment. + UnitTest::GetInstance()->original_working_dir(), + &startup_info, + &process_info) != FALSE); + child_handle_.Reset(process_info.hProcess); + ::CloseHandle(process_info.hThread); + set_spawned(true); + return OVERSEE_TEST; +} +# else // We are not on Windows. + +// ForkingDeathTest provides implementations for most of the abstract +// methods of the DeathTest interface. Only the AssumeRole method is +// left undefined. +class ForkingDeathTest : public DeathTestImpl { + public: + ForkingDeathTest(const char* statement, const RE* regex); + + // All of these virtual functions are inherited from DeathTest. + virtual int Wait(); + + protected: + void set_child_pid(pid_t child_pid) { child_pid_ = child_pid; } + + private: + // PID of child process during death test; 0 in the child process itself. + pid_t child_pid_; +}; + +// Constructs a ForkingDeathTest. +ForkingDeathTest::ForkingDeathTest(const char* a_statement, const RE* a_regex) + : DeathTestImpl(a_statement, a_regex), + child_pid_(-1) {} + +// Waits for the child in a death test to exit, returning its exit +// status, or 0 if no child process exists. As a side effect, sets the +// outcome data member. +int ForkingDeathTest::Wait() { + if (!spawned()) + return 0; + + ReadAndInterpretStatusByte(); + + int status_value; + GTEST_DEATH_TEST_CHECK_SYSCALL_(waitpid(child_pid_, &status_value, 0)); + set_status(status_value); + return status_value; +} + +// A concrete death test class that forks, then immediately runs the test +// in the child process. +class NoExecDeathTest : public ForkingDeathTest { + public: + NoExecDeathTest(const char* a_statement, const RE* a_regex) : + ForkingDeathTest(a_statement, a_regex) { } + virtual TestRole AssumeRole(); +}; + +// The AssumeRole process for a fork-and-run death test. It implements a +// straightforward fork, with a simple pipe to transmit the status byte. +DeathTest::TestRole NoExecDeathTest::AssumeRole() { + const size_t thread_count = GetThreadCount(); + if (thread_count != 1) { + GTEST_LOG_(WARNING) << DeathTestThreadWarning(thread_count); + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + + DeathTest::set_last_death_test_message(""); + CaptureStderr(); + // When we fork the process below, the log file buffers are copied, but the + // file descriptors are shared. We flush all log files here so that closing + // the file descriptors in the child process doesn't throw off the + // synchronization between descriptors and buffers in the parent process. + // This is as close to the fork as possible to avoid a race condition in case + // there are multiple threads running before the death test, and another + // thread writes to the log file. + FlushInfoLog(); + + const pid_t child_pid = fork(); + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + set_child_pid(child_pid); + if (child_pid == 0) { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[0])); + set_write_fd(pipe_fd[1]); + // Redirects all logging to stderr in the child process to prevent + // concurrent writes to the log files. We capture stderr in the parent + // process and append the child process' output to a log. + LogToStderr(); + // Event forwarding to the listeners of event listener API mush be shut + // down in death test subprocesses. + GetUnitTestImpl()->listeners()->SuppressEventForwarding(); + g_in_fast_death_test_child = true; + return EXECUTE_TEST; + } else { + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; + } +} + +// A concrete death test class that forks and re-executes the main +// program from the beginning, with command-line flags set that cause +// only this specific death test to be run. +class ExecDeathTest : public ForkingDeathTest { + public: + ExecDeathTest(const char* a_statement, const RE* a_regex, + const char* file, int line) : + ForkingDeathTest(a_statement, a_regex), file_(file), line_(line) { } + virtual TestRole AssumeRole(); + private: + static ::std::vector + GetArgvsForDeathTestChildProcess() { + ::std::vector args = GetInjectableArgvs(); + return args; + } + // The name of the file in which the death test is located. + const char* const file_; + // The line number on which the death test is located. + const int line_; +}; + +// Utility class for accumulating command-line arguments. +class Arguments { + public: + Arguments() { + args_.push_back(NULL); + } + + ~Arguments() { + for (std::vector::iterator i = args_.begin(); i != args_.end(); + ++i) { + free(*i); + } + } + void AddArgument(const char* argument) { + args_.insert(args_.end() - 1, posix::StrDup(argument)); + } + + template + void AddArguments(const ::std::vector& arguments) { + for (typename ::std::vector::const_iterator i = arguments.begin(); + i != arguments.end(); + ++i) { + args_.insert(args_.end() - 1, posix::StrDup(i->c_str())); + } + } + char* const* Argv() { + return &args_[0]; + } + + private: + std::vector args_; +}; + +// A struct that encompasses the arguments to the child process of a +// threadsafe-style death test process. +struct ExecDeathTestArgs { + char* const* argv; // Command-line arguments for the child's call to exec + int close_fd; // File descriptor to close; the read end of a pipe +}; + +# if GTEST_OS_MAC +inline char** GetEnviron() { + // When Google Test is built as a framework on MacOS X, the environ variable + // is unavailable. Apple's documentation (man environ) recommends using + // _NSGetEnviron() instead. + return *_NSGetEnviron(); +} +# else +// Some POSIX platforms expect you to declare environ. extern "C" makes +// it reside in the global namespace. +extern "C" char** environ; +inline char** GetEnviron() { return environ; } +# endif // GTEST_OS_MAC + +# if !GTEST_OS_QNX +// The main function for a threadsafe-style death test child process. +// This function is called in a clone()-ed process and thus must avoid +// any potentially unsafe operations like malloc or libc functions. +static int ExecDeathTestChildMain(void* child_arg) { + ExecDeathTestArgs* const args = static_cast(child_arg); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(args->close_fd)); + + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; + } + + // We can safely call execve() as it's a direct system call. We + // cannot use execvp() as it's a libc function and thus potentially + // unsafe. Since execve() doesn't search the PATH, the user must + // invoke the test program via a valid path that contains at least + // one path separator. + execve(args->argv[0], args->argv, GetEnviron()); + DeathTestAbort(std::string("execve(") + args->argv[0] + ", ...) in " + + original_dir + " failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; +} +# endif // !GTEST_OS_QNX + +// Two utility routines that together determine the direction the stack +// grows. +// This could be accomplished more elegantly by a single recursive +// function, but we want to guard against the unlikely possibility of +// a smart compiler optimizing the recursion away. +// +// GTEST_NO_INLINE_ is required to prevent GCC 4.6 from inlining +// StackLowerThanAddress into StackGrowsDown, which then doesn't give +// correct answer. +void StackLowerThanAddress(const void* ptr, bool* result) GTEST_NO_INLINE_; +void StackLowerThanAddress(const void* ptr, bool* result) { + int dummy; + *result = (&dummy < ptr); +} + +// Make sure AddressSanitizer does not tamper with the stack here. +GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +bool StackGrowsDown() { + int dummy; + bool result; + StackLowerThanAddress(&dummy, &result); + return result; +} + +// Spawns a child process with the same executable as the current process in +// a thread-safe manner and instructs it to run the death test. The +// implementation uses fork(2) + exec. On systems where clone(2) is +// available, it is used instead, being slightly more thread-safe. On QNX, +// fork supports only single-threaded environments, so this function uses +// spawn(2) there instead. The function dies with an error message if +// anything goes wrong. +static pid_t ExecDeathTestSpawnChild(char* const* argv, int close_fd) { + ExecDeathTestArgs args = { argv, close_fd }; + pid_t child_pid = -1; + +# if GTEST_OS_QNX + // Obtains the current directory and sets it to be closed in the child + // process. + const int cwd_fd = open(".", O_RDONLY); + GTEST_DEATH_TEST_CHECK_(cwd_fd != -1); + GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(cwd_fd, F_SETFD, FD_CLOEXEC)); + // We need to execute the test program in the same environment where + // it was originally invoked. Therefore we change to the original + // working directory first. + const char* const original_dir = + UnitTest::GetInstance()->original_working_dir(); + // We can safely call chdir() as it's a direct system call. + if (chdir(original_dir) != 0) { + DeathTestAbort(std::string("chdir(\"") + original_dir + "\") failed: " + + GetLastErrnoDescription()); + return EXIT_FAILURE; + } + + int fd_flags; + // Set close_fd to be closed after spawn. + GTEST_DEATH_TEST_CHECK_SYSCALL_(fd_flags = fcntl(close_fd, F_GETFD)); + GTEST_DEATH_TEST_CHECK_SYSCALL_(fcntl(close_fd, F_SETFD, + fd_flags | FD_CLOEXEC)); + struct inheritance inherit = {0}; + // spawn is a system call. + child_pid = spawn(args.argv[0], 0, NULL, &inherit, args.argv, GetEnviron()); + // Restores the current working directory. + GTEST_DEATH_TEST_CHECK_(fchdir(cwd_fd) != -1); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(cwd_fd)); + +# else // GTEST_OS_QNX +# if GTEST_OS_LINUX + // When a SIGPROF signal is received while fork() or clone() are executing, + // the process may hang. To avoid this, we ignore SIGPROF here and re-enable + // it after the call to fork()/clone() is complete. + struct sigaction saved_sigprof_action; + struct sigaction ignore_sigprof_action; + memset(&ignore_sigprof_action, 0, sizeof(ignore_sigprof_action)); + sigemptyset(&ignore_sigprof_action.sa_mask); + ignore_sigprof_action.sa_handler = SIG_IGN; + GTEST_DEATH_TEST_CHECK_SYSCALL_(sigaction( + SIGPROF, &ignore_sigprof_action, &saved_sigprof_action)); +# endif // GTEST_OS_LINUX + +# if GTEST_HAS_CLONE + const bool use_fork = GTEST_FLAG(death_test_use_fork); + + if (!use_fork) { + static const bool stack_grows_down = StackGrowsDown(); + const size_t stack_size = getpagesize(); + // MMAP_ANONYMOUS is not defined on Mac, so we use MAP_ANON instead. + void* const stack = mmap(NULL, stack_size, PROT_READ | PROT_WRITE, + MAP_ANON | MAP_PRIVATE, -1, 0); + GTEST_DEATH_TEST_CHECK_(stack != MAP_FAILED); + + // Maximum stack alignment in bytes: For a downward-growing stack, this + // amount is subtracted from size of the stack space to get an address + // that is within the stack space and is aligned on all systems we care + // about. As far as I know there is no ABI with stack alignment greater + // than 64. We assume stack and stack_size already have alignment of + // kMaxStackAlignment. + const size_t kMaxStackAlignment = 64; + void* const stack_top = + static_cast(stack) + + (stack_grows_down ? stack_size - kMaxStackAlignment : 0); + GTEST_DEATH_TEST_CHECK_(stack_size > kMaxStackAlignment && + reinterpret_cast(stack_top) % kMaxStackAlignment == 0); + + child_pid = clone(&ExecDeathTestChildMain, stack_top, SIGCHLD, &args); + + GTEST_DEATH_TEST_CHECK_(munmap(stack, stack_size) != -1); + } +# else + const bool use_fork = true; +# endif // GTEST_HAS_CLONE + + if (use_fork && (child_pid = fork()) == 0) { + ExecDeathTestChildMain(&args); + _exit(0); + } +# endif // GTEST_OS_QNX +# if GTEST_OS_LINUX + GTEST_DEATH_TEST_CHECK_SYSCALL_( + sigaction(SIGPROF, &saved_sigprof_action, NULL)); +# endif // GTEST_OS_LINUX + + GTEST_DEATH_TEST_CHECK_(child_pid != -1); + return child_pid; +} + +// The AssumeRole process for a fork-and-exec death test. It re-executes the +// main program from the beginning, setting the --gtest_filter +// and --gtest_internal_run_death_test flags to cause only the current +// death test to be re-run. +DeathTest::TestRole ExecDeathTest::AssumeRole() { + const UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const TestInfo* const info = impl->current_test_info(); + const int death_test_index = info->result()->death_test_count(); + + if (flag != NULL) { + set_write_fd(flag->write_fd()); + return EXECUTE_TEST; + } + + int pipe_fd[2]; + GTEST_DEATH_TEST_CHECK_(pipe(pipe_fd) != -1); + // Clear the close-on-exec flag on the write end of the pipe, lest + // it be closed when the child process does an exec: + GTEST_DEATH_TEST_CHECK_(fcntl(pipe_fd[1], F_SETFD, 0) != -1); + + const std::string filter_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kFilterFlag + "=" + + info->test_case_name() + "." + info->name(); + const std::string internal_flag = + std::string("--") + GTEST_FLAG_PREFIX_ + kInternalRunDeathTestFlag + "=" + + file_ + "|" + StreamableToString(line_) + "|" + + StreamableToString(death_test_index) + "|" + + StreamableToString(pipe_fd[1]); + Arguments args; + args.AddArguments(GetArgvsForDeathTestChildProcess()); + args.AddArgument(filter_flag.c_str()); + args.AddArgument(internal_flag.c_str()); + + DeathTest::set_last_death_test_message(""); + + CaptureStderr(); + // See the comment in NoExecDeathTest::AssumeRole for why the next line + // is necessary. + FlushInfoLog(); + + const pid_t child_pid = ExecDeathTestSpawnChild(args.Argv(), pipe_fd[0]); + GTEST_DEATH_TEST_CHECK_SYSCALL_(close(pipe_fd[1])); + set_child_pid(child_pid); + set_read_fd(pipe_fd[0]); + set_spawned(true); + return OVERSEE_TEST; +} + +# endif // !GTEST_OS_WINDOWS + +// Creates a concrete DeathTest-derived class that depends on the +// --gtest_death_test_style flag, and sets the pointer pointed to +// by the "test" argument to its address. If the test should be +// skipped, sets that pointer to NULL. Returns true, unless the +// flag is set to an invalid value. +bool DefaultDeathTestFactory::Create(const char* statement, const RE* regex, + const char* file, int line, + DeathTest** test) { + UnitTestImpl* const impl = GetUnitTestImpl(); + const InternalRunDeathTestFlag* const flag = + impl->internal_run_death_test_flag(); + const int death_test_index = impl->current_test_info() + ->increment_death_test_count(); + + if (flag != NULL) { + if (death_test_index > flag->index()) { + DeathTest::set_last_death_test_message( + "Death test count (" + StreamableToString(death_test_index) + + ") somehow exceeded expected maximum (" + + StreamableToString(flag->index()) + ")"); + return false; + } + + if (!(flag->file() == file && flag->line() == line && + flag->index() == death_test_index)) { + *test = NULL; + return true; + } + } + +# if GTEST_OS_WINDOWS + + if (GTEST_FLAG(death_test_style) == "threadsafe" || + GTEST_FLAG(death_test_style) == "fast") { + *test = new WindowsDeathTest(statement, regex, file, line); + } + +# else + + if (GTEST_FLAG(death_test_style) == "threadsafe") { + *test = new ExecDeathTest(statement, regex, file, line); + } else if (GTEST_FLAG(death_test_style) == "fast") { + *test = new NoExecDeathTest(statement, regex); + } + +# endif // GTEST_OS_WINDOWS + + else { // NOLINT - this is more readable than unbalanced brackets inside #if. + DeathTest::set_last_death_test_message( + "Unknown death test style \"" + GTEST_FLAG(death_test_style) + + "\" encountered"); + return false; + } + + return true; +} + +// Splits a given string on a given delimiter, populating a given +// vector with the fields. GTEST_HAS_DEATH_TEST implies that we have +// ::std::string, so we can use it here. +static void SplitString(const ::std::string& str, char delimiter, + ::std::vector< ::std::string>* dest) { + ::std::vector< ::std::string> parsed; + ::std::string::size_type pos = 0; + while (::testing::internal::AlwaysTrue()) { + const ::std::string::size_type colon = str.find(delimiter, pos); + if (colon == ::std::string::npos) { + parsed.push_back(str.substr(pos)); + break; + } else { + parsed.push_back(str.substr(pos, colon - pos)); + pos = colon + 1; + } + } + dest->swap(parsed); +} + +# if GTEST_OS_WINDOWS +// Recreates the pipe and event handles from the provided parameters, +// signals the event, and returns a file descriptor wrapped around the pipe +// handle. This function is called in the child process only. +int GetStatusFileDescriptor(unsigned int parent_process_id, + size_t write_handle_as_size_t, + size_t event_handle_as_size_t) { + AutoHandle parent_process_handle(::OpenProcess(PROCESS_DUP_HANDLE, + FALSE, // Non-inheritable. + parent_process_id)); + if (parent_process_handle.Get() == INVALID_HANDLE_VALUE) { + DeathTestAbort("Unable to open parent process " + + StreamableToString(parent_process_id)); + } + + // TODO(vladl@google.com): Replace the following check with a + // compile-time assertion when available. + GTEST_CHECK_(sizeof(HANDLE) <= sizeof(size_t)); + + const HANDLE write_handle = + reinterpret_cast(write_handle_as_size_t); + HANDLE dup_write_handle; + + // The newly initialized handle is accessible only in in the parent + // process. To obtain one accessible within the child, we need to use + // DuplicateHandle. + if (!::DuplicateHandle(parent_process_handle.Get(), write_handle, + ::GetCurrentProcess(), &dup_write_handle, + 0x0, // Requested privileges ignored since + // DUPLICATE_SAME_ACCESS is used. + FALSE, // Request non-inheritable handler. + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort("Unable to duplicate the pipe handle " + + StreamableToString(write_handle_as_size_t) + + " from the parent process " + + StreamableToString(parent_process_id)); + } + + const HANDLE event_handle = reinterpret_cast(event_handle_as_size_t); + HANDLE dup_event_handle; + + if (!::DuplicateHandle(parent_process_handle.Get(), event_handle, + ::GetCurrentProcess(), &dup_event_handle, + 0x0, + FALSE, + DUPLICATE_SAME_ACCESS)) { + DeathTestAbort("Unable to duplicate the event handle " + + StreamableToString(event_handle_as_size_t) + + " from the parent process " + + StreamableToString(parent_process_id)); + } + + const int write_fd = + ::_open_osfhandle(reinterpret_cast(dup_write_handle), O_APPEND); + if (write_fd == -1) { + DeathTestAbort("Unable to convert pipe handle " + + StreamableToString(write_handle_as_size_t) + + " to a file descriptor"); + } + + // Signals the parent that the write end of the pipe has been acquired + // so the parent can release its own write end. + ::SetEvent(dup_event_handle); + + return write_fd; +} +# endif // GTEST_OS_WINDOWS + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag() { + if (GTEST_FLAG(internal_run_death_test) == "") return NULL; + + // GTEST_HAS_DEATH_TEST implies that we have ::std::string, so we + // can use it here. + int line = -1; + int index = -1; + ::std::vector< ::std::string> fields; + SplitString(GTEST_FLAG(internal_run_death_test).c_str(), '|', &fields); + int write_fd = -1; + +# if GTEST_OS_WINDOWS + + unsigned int parent_process_id = 0; + size_t write_handle_as_size_t = 0; + size_t event_handle_as_size_t = 0; + + if (fields.size() != 6 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &parent_process_id) + || !ParseNaturalNumber(fields[4], &write_handle_as_size_t) + || !ParseNaturalNumber(fields[5], &event_handle_as_size_t)) { + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); + } + write_fd = GetStatusFileDescriptor(parent_process_id, + write_handle_as_size_t, + event_handle_as_size_t); +# else + + if (fields.size() != 4 + || !ParseNaturalNumber(fields[1], &line) + || !ParseNaturalNumber(fields[2], &index) + || !ParseNaturalNumber(fields[3], &write_fd)) { + DeathTestAbort("Bad --gtest_internal_run_death_test flag: " + + GTEST_FLAG(internal_run_death_test)); + } + +# endif // GTEST_OS_WINDOWS + + return new InternalRunDeathTestFlag(fields[0], line, index, write_fd); +} + +} // namespace internal + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: keith.ray@gmail.com (Keith Ray) + + +#include + +#if GTEST_OS_WINDOWS_MOBILE +# include +#elif GTEST_OS_WINDOWS +# include +# include +#elif GTEST_OS_SYMBIAN +// Symbian OpenC has PATH_MAX in sys/syslimits.h +# include +#else +# include +# include // Some Linux distributions define PATH_MAX here. +#endif // GTEST_OS_WINDOWS_MOBILE + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_MAX_ _MAX_PATH +#elif defined(PATH_MAX) +# define GTEST_PATH_MAX_ PATH_MAX +#elif defined(_XOPEN_PATH_MAX) +# define GTEST_PATH_MAX_ _XOPEN_PATH_MAX +#else +# define GTEST_PATH_MAX_ _POSIX_PATH_MAX +#endif // GTEST_OS_WINDOWS + + +namespace testing { +namespace internal { + +#if GTEST_OS_WINDOWS +// On Windows, '\\' is the standard path separator, but many tools and the +// Windows API also accept '/' as an alternate path separator. Unless otherwise +// noted, a file path can contain either kind of path separators, or a mixture +// of them. +const char kPathSeparator = '\\'; +const char kAlternatePathSeparator = '/'; +const char kAlternatePathSeparatorString[] = "/"; +# if GTEST_OS_WINDOWS_MOBILE +// Windows CE doesn't have a current directory. You should not use +// the current directory in tests on Windows CE, but this at least +// provides a reasonable fallback. +const char kCurrentDirectoryString[] = "\\"; +// Windows CE doesn't define INVALID_FILE_ATTRIBUTES +const DWORD kInvalidFileAttributes = 0xffffffff; +# else +const char kCurrentDirectoryString[] = ".\\"; +# endif // GTEST_OS_WINDOWS_MOBILE +#else +const char kPathSeparator = '/'; +const char kCurrentDirectoryString[] = "./"; +#endif // GTEST_OS_WINDOWS + +// Returns whether the given character is a valid path separator. +static bool IsPathSeparator(char c) { +#if GTEST_HAS_ALT_PATH_SEP_ + return (c == kPathSeparator) || (c == kAlternatePathSeparator); +#else + return c == kPathSeparator; +#endif +} + +// Returns the current working directory, or "" if unsuccessful. +FilePath FilePath::GetCurrentDir() { +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT + // Windows CE doesn't have a current directory, so we just return + // something reasonable. + return FilePath(kCurrentDirectoryString); +#elif GTEST_OS_WINDOWS + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + return FilePath(_getcwd(cwd, sizeof(cwd)) == NULL ? "" : cwd); +#else + char cwd[GTEST_PATH_MAX_ + 1] = { '\0' }; + char* result = getcwd(cwd, sizeof(cwd)); +# if GTEST_OS_NACL + // getcwd will likely fail in NaCl due to the sandbox, so return something + // reasonable. The user may have provided a shim implementation for getcwd, + // however, so fallback only when failure is detected. + return FilePath(result == NULL ? kCurrentDirectoryString : cwd); +# endif // GTEST_OS_NACL + return FilePath(result == NULL ? "" : cwd); +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns a copy of the FilePath with the case-insensitive extension removed. +// Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns +// FilePath("dir/file"). If a case-insensitive extension is not +// found, returns a copy of the original FilePath. +FilePath FilePath::RemoveExtension(const char* extension) const { + const std::string dot_extension = std::string(".") + extension; + if (String::EndsWithCaseInsensitive(pathname_, dot_extension)) { + return FilePath(pathname_.substr( + 0, pathname_.length() - dot_extension.length())); + } + return *this; +} + +// Returns a pointer to the last occurence of a valid path separator in +// the FilePath. On Windows, for example, both '/' and '\' are valid path +// separators. Returns NULL if no path separator was found. +const char* FilePath::FindLastPathSeparator() const { + const char* const last_sep = strrchr(c_str(), kPathSeparator); +#if GTEST_HAS_ALT_PATH_SEP_ + const char* const last_alt_sep = strrchr(c_str(), kAlternatePathSeparator); + // Comparing two pointers of which only one is NULL is undefined. + if (last_alt_sep != NULL && + (last_sep == NULL || last_alt_sep > last_sep)) { + return last_alt_sep; + } +#endif + return last_sep; +} + +// Returns a copy of the FilePath with the directory part removed. +// Example: FilePath("path/to/file").RemoveDirectoryName() returns +// FilePath("file"). If there is no directory part ("just_a_file"), it returns +// the FilePath unmodified. If there is no file part ("just_a_dir/") it +// returns an empty FilePath (""). +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveDirectoryName() const { + const char* const last_sep = FindLastPathSeparator(); + return last_sep ? FilePath(last_sep + 1) : *this; +} + +// RemoveFileName returns the directory path with the filename removed. +// Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". +// If the FilePath is "a_file" or "/a_file", RemoveFileName returns +// FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does +// not have a file, like "just/a/dir/", it returns the FilePath unmodified. +// On Windows platform, '\' is the path separator, otherwise it is '/'. +FilePath FilePath::RemoveFileName() const { + const char* const last_sep = FindLastPathSeparator(); + std::string dir; + if (last_sep) { + dir = std::string(c_str(), last_sep + 1 - c_str()); + } else { + dir = kCurrentDirectoryString; + } + return FilePath(dir); +} + +// Helper functions for naming files in a directory for xml output. + +// Given directory = "dir", base_name = "test", number = 0, +// extension = "xml", returns "dir/test.xml". If number is greater +// than zero (e.g., 12), returns "dir/test_12.xml". +// On Windows platform, uses \ as the separator rather than /. +FilePath FilePath::MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension) { + std::string file; + if (number == 0) { + file = base_name.string() + "." + extension; + } else { + file = base_name.string() + "_" + StreamableToString(number) + + "." + extension; + } + return ConcatPaths(directory, FilePath(file)); +} + +// Given directory = "dir", relative_path = "test.xml", returns "dir/test.xml". +// On Windows, uses \ as the separator rather than /. +FilePath FilePath::ConcatPaths(const FilePath& directory, + const FilePath& relative_path) { + if (directory.IsEmpty()) + return relative_path; + const FilePath dir(directory.RemoveTrailingPathSeparator()); + return FilePath(dir.string() + kPathSeparator + relative_path.string()); +} + +// Returns true if pathname describes something findable in the file-system, +// either a file, directory, or whatever. +bool FilePath::FileOrDirectoryExists() const { +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(pathname_.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + return attributes != kInvalidFileAttributes; +#else + posix::StatStruct file_stat; + return posix::Stat(pathname_.c_str(), &file_stat) == 0; +#endif // GTEST_OS_WINDOWS_MOBILE +} + +// Returns true if pathname describes a directory in the file-system +// that exists. +bool FilePath::DirectoryExists() const { + bool result = false; +#if GTEST_OS_WINDOWS + // Don't strip off trailing separator if path is a root directory on + // Windows (like "C:\\"). + const FilePath& path(IsRootDirectory() ? *this : + RemoveTrailingPathSeparator()); +#else + const FilePath& path(*this); +#endif + +#if GTEST_OS_WINDOWS_MOBILE + LPCWSTR unicode = String::AnsiToUtf16(path.c_str()); + const DWORD attributes = GetFileAttributes(unicode); + delete [] unicode; + if ((attributes != kInvalidFileAttributes) && + (attributes & FILE_ATTRIBUTE_DIRECTORY)) { + result = true; + } +#else + posix::StatStruct file_stat; + result = posix::Stat(path.c_str(), &file_stat) == 0 && + posix::IsDir(file_stat); +#endif // GTEST_OS_WINDOWS_MOBILE + + return result; +} + +// Returns true if pathname describes a root directory. (Windows has one +// root directory per disk drive.) +bool FilePath::IsRootDirectory() const { +#if GTEST_OS_WINDOWS + // TODO(wan@google.com): on Windows a network share like + // \\server\share can be a root directory, although it cannot be the + // current directory. Handle this properly. + return pathname_.length() == 3 && IsAbsolutePath(); +#else + return pathname_.length() == 1 && IsPathSeparator(pathname_.c_str()[0]); +#endif +} + +// Returns true if pathname describes an absolute path. +bool FilePath::IsAbsolutePath() const { + const char* const name = pathname_.c_str(); +#if GTEST_OS_WINDOWS + return pathname_.length() >= 3 && + ((name[0] >= 'a' && name[0] <= 'z') || + (name[0] >= 'A' && name[0] <= 'Z')) && + name[1] == ':' && + IsPathSeparator(name[2]); +#else + return IsPathSeparator(name[0]); +#endif +} + +// Returns a pathname for a file that does not currently exist. The pathname +// will be directory/base_name.extension or +// directory/base_name_.extension if directory/base_name.extension +// already exists. The number will be incremented until a pathname is found +// that does not already exist. +// Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. +// There could be a race condition if two or more processes are calling this +// function at the same time -- they could both pick the same filename. +FilePath FilePath::GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension) { + FilePath full_pathname; + int number = 0; + do { + full_pathname.Set(MakeFileName(directory, base_name, number++, extension)); + } while (full_pathname.FileOrDirectoryExists()); + return full_pathname; +} + +// Returns true if FilePath ends with a path separator, which indicates that +// it is intended to represent a directory. Returns false otherwise. +// This does NOT check that a directory (or file) actually exists. +bool FilePath::IsDirectory() const { + return !pathname_.empty() && + IsPathSeparator(pathname_.c_str()[pathname_.length() - 1]); +} + +// Create directories so that path exists. Returns true if successful or if +// the directories already exist; returns false if unable to create directories +// for any reason. +bool FilePath::CreateDirectoriesRecursively() const { + if (!this->IsDirectory()) { + return false; + } + + if (pathname_.length() == 0 || this->DirectoryExists()) { + return true; + } + + const FilePath parent(this->RemoveTrailingPathSeparator().RemoveFileName()); + return parent.CreateDirectoriesRecursively() && this->CreateFolder(); +} + +// Create the directory so that path exists. Returns true if successful or +// if the directory already exists; returns false if unable to create the +// directory for any reason, including if the parent directory does not +// exist. Not named "CreateDirectory" because that's a macro on Windows. +bool FilePath::CreateFolder() const { +#if GTEST_OS_WINDOWS_MOBILE + FilePath removed_sep(this->RemoveTrailingPathSeparator()); + LPCWSTR unicode = String::AnsiToUtf16(removed_sep.c_str()); + int result = CreateDirectory(unicode, NULL) ? 0 : -1; + delete [] unicode; +#elif GTEST_OS_WINDOWS + int result = _mkdir(pathname_.c_str()); +#else + int result = mkdir(pathname_.c_str(), 0777); +#endif // GTEST_OS_WINDOWS_MOBILE + + if (result == -1) { + return this->DirectoryExists(); // An error is OK if the directory exists. + } + return true; // No error. +} + +// If input name has a trailing separator character, remove it and return the +// name, otherwise return the name string unmodified. +// On Windows platform, uses \ as the separator, other platforms use /. +FilePath FilePath::RemoveTrailingPathSeparator() const { + return IsDirectory() + ? FilePath(pathname_.substr(0, pathname_.length() - 1)) + : *this; +} + +// Removes any redundant separators that might be in the pathname. +// For example, "bar///foo" becomes "bar/foo". Does not eliminate other +// redundancies that might be in a pathname involving "." or "..". +// TODO(wan@google.com): handle Windows network shares (e.g. \\server\share). +void FilePath::Normalize() { + if (pathname_.c_str() == NULL) { + pathname_ = ""; + return; + } + const char* src = pathname_.c_str(); + char* const dest = new char[pathname_.length() + 1]; + char* dest_ptr = dest; + memset(dest_ptr, 0, pathname_.length() + 1); + + while (*src != '\0') { + *dest_ptr = *src; + if (!IsPathSeparator(*src)) { + src++; + } else { +#if GTEST_HAS_ALT_PATH_SEP_ + if (*dest_ptr == kAlternatePathSeparator) { + *dest_ptr = kPathSeparator; + } +#endif + while (IsPathSeparator(*src)) + src++; + } + dest_ptr++; + } + *dest_ptr = '\0'; + pathname_ = dest; + delete[] dest; +} + +} // namespace internal +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + + +#include +#include +#include +#include + +#if GTEST_OS_WINDOWS +# include +# include +# include +# include // Used in ThreadLocal. +#else +# include +#endif // GTEST_OS_WINDOWS + +#if GTEST_OS_MAC +# include +# include +# include +#endif // GTEST_OS_MAC + +#if GTEST_OS_QNX +# include +# include +# include +#endif // GTEST_OS_QNX + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick exists to +// prevent the accidental inclusion of gtest-internal-inl.h in the +// user's code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { +namespace internal { + +#if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC and C++Builder do not provide a definition of STDERR_FILENO. +const int kStdOutFileno = 1; +const int kStdErrFileno = 2; +#else +const int kStdOutFileno = STDOUT_FILENO; +const int kStdErrFileno = STDERR_FILENO; +#endif // _MSC_VER + +#if GTEST_OS_MAC + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + const task_t task = mach_task_self(); + mach_msg_type_number_t thread_count; + thread_act_array_t thread_list; + const kern_return_t status = task_threads(task, &thread_list, &thread_count); + if (status == KERN_SUCCESS) { + // task_threads allocates resources in thread_list and we need to free them + // to avoid leaks. + vm_deallocate(task, + reinterpret_cast(thread_list), + sizeof(thread_t) * thread_count); + return static_cast(thread_count); + } else { + return 0; + } +} + +#elif GTEST_OS_QNX + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +size_t GetThreadCount() { + const int fd = open("/proc/self/as", O_RDONLY); + if (fd < 0) { + return 0; + } + procfs_info process_info; + const int status = + devctl(fd, DCMD_PROC_INFO, &process_info, sizeof(process_info), NULL); + close(fd); + if (status == EOK) { + return static_cast(process_info.num_threads); + } else { + return 0; + } +} + +#else + +size_t GetThreadCount() { + // There's no portable way to detect the number of threads, so we just + // return 0 to indicate that we cannot detect it. + return 0; +} + +#endif // GTEST_OS_MAC + +#if GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS + +void SleepMilliseconds(int n) { + ::Sleep(n); +} + +AutoHandle::AutoHandle() + : handle_(INVALID_HANDLE_VALUE) {} + +AutoHandle::AutoHandle(Handle handle) + : handle_(handle) {} + +AutoHandle::~AutoHandle() { + Reset(); +} + +AutoHandle::Handle AutoHandle::Get() const { + return handle_; +} + +void AutoHandle::Reset() { + Reset(INVALID_HANDLE_VALUE); +} + +void AutoHandle::Reset(HANDLE handle) { + // Resetting with the same handle we already own is invalid. + if (handle_ != handle) { + if (IsCloseable()) { + ::CloseHandle(handle_); + } + handle_ = handle; + } else { + GTEST_CHECK_(!IsCloseable()) + << "Resetting a valid handle to itself is likely a programmer error " + "and thus not allowed."; + } +} + +bool AutoHandle::IsCloseable() const { + // Different Windows APIs may use either of these values to represent an + // invalid handle. + return handle_ != NULL && handle_ != INVALID_HANDLE_VALUE; +} + +Notification::Notification() + : event_(::CreateEvent(NULL, // Default security attributes. + TRUE, // Do not reset automatically. + FALSE, // Initially unset. + NULL)) { // Anonymous event. + GTEST_CHECK_(event_.Get() != NULL); +} + +void Notification::Notify() { + GTEST_CHECK_(::SetEvent(event_.Get()) != FALSE); +} + +void Notification::WaitForNotification() { + GTEST_CHECK_( + ::WaitForSingleObject(event_.Get(), INFINITE) == WAIT_OBJECT_0); +} + +Mutex::Mutex() + : type_(kDynamic), + owner_thread_id_(0), + critical_section_init_phase_(0), + critical_section_(new CRITICAL_SECTION) { + ::InitializeCriticalSection(critical_section_); +} + +Mutex::~Mutex() { + // Static mutexes are leaked intentionally. It is not thread-safe to try + // to clean them up. + // TODO(yukawa): Switch to Slim Reader/Writer (SRW) Locks, which requires + // nothing to clean it up but is available only on Vista and later. + // http://msdn.microsoft.com/en-us/library/windows/desktop/aa904937.aspx + if (type_ == kDynamic) { + ::DeleteCriticalSection(critical_section_); + delete critical_section_; + critical_section_ = NULL; + } +} + +void Mutex::Lock() { + ThreadSafeLazyInit(); + ::EnterCriticalSection(critical_section_); + owner_thread_id_ = ::GetCurrentThreadId(); +} + +void Mutex::Unlock() { + ThreadSafeLazyInit(); + // We don't protect writing to owner_thread_id_ here, as it's the + // caller's responsibility to ensure that the current thread holds the + // mutex when this is called. + owner_thread_id_ = 0; + ::LeaveCriticalSection(critical_section_); +} + +// Does nothing if the current thread holds the mutex. Otherwise, crashes +// with high probability. +void Mutex::AssertHeld() { + ThreadSafeLazyInit(); + GTEST_CHECK_(owner_thread_id_ == ::GetCurrentThreadId()) + << "The current thread is not holding the mutex @" << this; +} + +// Initializes owner_thread_id_ and critical_section_ in static mutexes. +void Mutex::ThreadSafeLazyInit() { + // Dynamic mutexes are initialized in the constructor. + if (type_ == kStatic) { + switch ( + ::InterlockedCompareExchange(&critical_section_init_phase_, 1L, 0L)) { + case 0: + // If critical_section_init_phase_ was 0 before the exchange, we + // are the first to test it and need to perform the initialization. + owner_thread_id_ = 0; + critical_section_ = new CRITICAL_SECTION; + ::InitializeCriticalSection(critical_section_); + // Updates the critical_section_init_phase_ to 2 to signal + // initialization complete. + GTEST_CHECK_(::InterlockedCompareExchange( + &critical_section_init_phase_, 2L, 1L) == + 1L); + break; + case 1: + // Somebody else is already initializing the mutex; spin until they + // are done. + while (::InterlockedCompareExchange(&critical_section_init_phase_, + 2L, + 2L) != 2L) { + // Possibly yields the rest of the thread's time slice to other + // threads. + ::Sleep(0); + } + break; + + case 2: + break; // The mutex is already initialized and ready for use. + + default: + GTEST_CHECK_(false) + << "Unexpected value of critical_section_init_phase_ " + << "while initializing a static mutex."; + } + } +} + +namespace { + +class ThreadWithParamSupport : public ThreadWithParamBase { + public: + static HANDLE CreateThread(Runnable* runnable, + Notification* thread_can_start) { + ThreadMainParam* param = new ThreadMainParam(runnable, thread_can_start); + DWORD thread_id; + // TODO(yukawa): Consider to use _beginthreadex instead. + HANDLE thread_handle = ::CreateThread( + NULL, // Default security. + 0, // Default stack size. + &ThreadWithParamSupport::ThreadMain, + param, // Parameter to ThreadMainStatic + 0x0, // Default creation flags. + &thread_id); // Need a valid pointer for the call to work under Win98. + GTEST_CHECK_(thread_handle != NULL) << "CreateThread failed with error " + << ::GetLastError() << "."; + if (thread_handle == NULL) { + delete param; + } + return thread_handle; + } + + private: + struct ThreadMainParam { + ThreadMainParam(Runnable* runnable, Notification* thread_can_start) + : runnable_(runnable), + thread_can_start_(thread_can_start) { + } + scoped_ptr runnable_; + // Does not own. + Notification* thread_can_start_; + }; + + static DWORD WINAPI ThreadMain(void* ptr) { + // Transfers ownership. + scoped_ptr param(static_cast(ptr)); + if (param->thread_can_start_ != NULL) + param->thread_can_start_->WaitForNotification(); + param->runnable_->Run(); + return 0; + } + + // Prohibit instantiation. + ThreadWithParamSupport(); + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParamSupport); +}; + +} // namespace + +ThreadWithParamBase::ThreadWithParamBase(Runnable *runnable, + Notification* thread_can_start) + : thread_(ThreadWithParamSupport::CreateThread(runnable, + thread_can_start)) { +} + +ThreadWithParamBase::~ThreadWithParamBase() { + Join(); +} + +void ThreadWithParamBase::Join() { + GTEST_CHECK_(::WaitForSingleObject(thread_.Get(), INFINITE) == WAIT_OBJECT_0) + << "Failed to join the thread with error " << ::GetLastError() << "."; +} + +// Maps a thread to a set of ThreadIdToThreadLocals that have values +// instantiated on that thread and notifies them when the thread exits. A +// ThreadLocal instance is expected to persist until all threads it has +// values on have terminated. +class ThreadLocalRegistryImpl { + public: + // Registers thread_local_instance as having value on the current thread. + // Returns a value that can be used to identify the thread from other threads. + static ThreadLocalValueHolderBase* GetValueOnCurrentThread( + const ThreadLocalBase* thread_local_instance) { + DWORD current_thread = ::GetCurrentThreadId(); + MutexLock lock(&mutex_); + ThreadIdToThreadLocals* const thread_to_thread_locals = + GetThreadLocalsMapLocked(); + ThreadIdToThreadLocals::iterator thread_local_pos = + thread_to_thread_locals->find(current_thread); + if (thread_local_pos == thread_to_thread_locals->end()) { + thread_local_pos = thread_to_thread_locals->insert( + std::make_pair(current_thread, ThreadLocalValues())).first; + StartWatcherThreadFor(current_thread); + } + ThreadLocalValues& thread_local_values = thread_local_pos->second; + ThreadLocalValues::iterator value_pos = + thread_local_values.find(thread_local_instance); + if (value_pos == thread_local_values.end()) { + value_pos = + thread_local_values + .insert(std::make_pair( + thread_local_instance, + linked_ptr( + thread_local_instance->NewValueForCurrentThread()))) + .first; + } + return value_pos->second.get(); + } + + static void OnThreadLocalDestroyed( + const ThreadLocalBase* thread_local_instance) { + std::vector > value_holders; + // Clean up the ThreadLocalValues data structure while holding the lock, but + // defer the destruction of the ThreadLocalValueHolderBases. + { + MutexLock lock(&mutex_); + ThreadIdToThreadLocals* const thread_to_thread_locals = + GetThreadLocalsMapLocked(); + for (ThreadIdToThreadLocals::iterator it = + thread_to_thread_locals->begin(); + it != thread_to_thread_locals->end(); + ++it) { + ThreadLocalValues& thread_local_values = it->second; + ThreadLocalValues::iterator value_pos = + thread_local_values.find(thread_local_instance); + if (value_pos != thread_local_values.end()) { + value_holders.push_back(value_pos->second); + thread_local_values.erase(value_pos); + // This 'if' can only be successful at most once, so theoretically we + // could break out of the loop here, but we don't bother doing so. + } + } + } + // Outside the lock, let the destructor for 'value_holders' deallocate the + // ThreadLocalValueHolderBases. + } + + static void OnThreadExit(DWORD thread_id) { + GTEST_CHECK_(thread_id != 0) << ::GetLastError(); + std::vector > value_holders; + // Clean up the ThreadIdToThreadLocals data structure while holding the + // lock, but defer the destruction of the ThreadLocalValueHolderBases. + { + MutexLock lock(&mutex_); + ThreadIdToThreadLocals* const thread_to_thread_locals = + GetThreadLocalsMapLocked(); + ThreadIdToThreadLocals::iterator thread_local_pos = + thread_to_thread_locals->find(thread_id); + if (thread_local_pos != thread_to_thread_locals->end()) { + ThreadLocalValues& thread_local_values = thread_local_pos->second; + for (ThreadLocalValues::iterator value_pos = + thread_local_values.begin(); + value_pos != thread_local_values.end(); + ++value_pos) { + value_holders.push_back(value_pos->second); + } + thread_to_thread_locals->erase(thread_local_pos); + } + } + // Outside the lock, let the destructor for 'value_holders' deallocate the + // ThreadLocalValueHolderBases. + } + + private: + // In a particular thread, maps a ThreadLocal object to its value. + typedef std::map > ThreadLocalValues; + // Stores all ThreadIdToThreadLocals having values in a thread, indexed by + // thread's ID. + typedef std::map ThreadIdToThreadLocals; + + // Holds the thread id and thread handle that we pass from + // StartWatcherThreadFor to WatcherThreadFunc. + typedef std::pair ThreadIdAndHandle; + + static void StartWatcherThreadFor(DWORD thread_id) { + // The returned handle will be kept in thread_map and closed by + // watcher_thread in WatcherThreadFunc. + HANDLE thread = ::OpenThread(SYNCHRONIZE | THREAD_QUERY_INFORMATION, + FALSE, + thread_id); + GTEST_CHECK_(thread != NULL); + // We need to to pass a valid thread ID pointer into CreateThread for it + // to work correctly under Win98. + DWORD watcher_thread_id; + HANDLE watcher_thread = ::CreateThread( + NULL, // Default security. + 0, // Default stack size + &ThreadLocalRegistryImpl::WatcherThreadFunc, + reinterpret_cast(new ThreadIdAndHandle(thread_id, thread)), + CREATE_SUSPENDED, + &watcher_thread_id); + GTEST_CHECK_(watcher_thread != NULL); + // Give the watcher thread the same priority as ours to avoid being + // blocked by it. + ::SetThreadPriority(watcher_thread, + ::GetThreadPriority(::GetCurrentThread())); + ::ResumeThread(watcher_thread); + ::CloseHandle(watcher_thread); + } + + // Monitors exit from a given thread and notifies those + // ThreadIdToThreadLocals about thread termination. + static DWORD WINAPI WatcherThreadFunc(LPVOID param) { + const ThreadIdAndHandle* tah = + reinterpret_cast(param); + GTEST_CHECK_( + ::WaitForSingleObject(tah->second, INFINITE) == WAIT_OBJECT_0); + OnThreadExit(tah->first); + ::CloseHandle(tah->second); + delete tah; + return 0; + } + + // Returns map of thread local instances. + static ThreadIdToThreadLocals* GetThreadLocalsMapLocked() { + mutex_.AssertHeld(); + static ThreadIdToThreadLocals* map = new ThreadIdToThreadLocals; + return map; + } + + // Protects access to GetThreadLocalsMapLocked() and its return value. + static Mutex mutex_; + // Protects access to GetThreadMapLocked() and its return value. + static Mutex thread_map_mutex_; +}; + +Mutex ThreadLocalRegistryImpl::mutex_(Mutex::kStaticMutex); +Mutex ThreadLocalRegistryImpl::thread_map_mutex_(Mutex::kStaticMutex); + +ThreadLocalValueHolderBase* ThreadLocalRegistry::GetValueOnCurrentThread( + const ThreadLocalBase* thread_local_instance) { + return ThreadLocalRegistryImpl::GetValueOnCurrentThread( + thread_local_instance); +} + +void ThreadLocalRegistry::OnThreadLocalDestroyed( + const ThreadLocalBase* thread_local_instance) { + ThreadLocalRegistryImpl::OnThreadLocalDestroyed(thread_local_instance); +} + +#endif // GTEST_IS_THREADSAFE && GTEST_OS_WINDOWS + +#if GTEST_USES_POSIX_RE + +// Implements RE. Currently only needed for death tests. + +RE::~RE() { + if (is_valid_) { + // regfree'ing an invalid regex might crash because the content + // of the regex is undefined. Since the regex's are essentially + // the same, one cannot be valid (or invalid) without the other + // being so too. + regfree(&partial_regex_); + regfree(&full_regex_); + } + free(const_cast(pattern_)); +} + +// Returns true iff regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.full_regex_, str, 1, &match, 0) == 0; +} + +// Returns true iff regular expression re matches a substring of str +// (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + if (!re.is_valid_) return false; + + regmatch_t match; + return regexec(&re.partial_regex_, str, 1, &match, 0) == 0; +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = posix::StrDup(regex); + + // Reserves enough bytes to hold the regular expression used for a + // full match. + const size_t full_regex_len = strlen(regex) + 10; + char* const full_pattern = new char[full_regex_len]; + + snprintf(full_pattern, full_regex_len, "^(%s)$", regex); + is_valid_ = regcomp(&full_regex_, full_pattern, REG_EXTENDED) == 0; + // We want to call regcomp(&partial_regex_, ...) even if the + // previous expression returns false. Otherwise partial_regex_ may + // not be properly initialized can may cause trouble when it's + // freed. + // + // Some implementation of POSIX regex (e.g. on at least some + // versions of Cygwin) doesn't accept the empty string as a valid + // regex. We change it to an equivalent form "()" to be safe. + if (is_valid_) { + const char* const partial_regex = (*regex == '\0') ? "()" : regex; + is_valid_ = regcomp(&partial_regex_, partial_regex, REG_EXTENDED) == 0; + } + EXPECT_TRUE(is_valid_) + << "Regular expression \"" << regex + << "\" is not a valid POSIX Extended regular expression."; + + delete[] full_pattern; +} + +#elif GTEST_USES_SIMPLE_RE + +// Returns true iff ch appears anywhere in str (excluding the +// terminating '\0' character). +bool IsInSet(char ch, const char* str) { + return ch != '\0' && strchr(str, ch) != NULL; +} + +// Returns true iff ch belongs to the given classification. Unlike +// similar functions in , these aren't affected by the +// current locale. +bool IsAsciiDigit(char ch) { return '0' <= ch && ch <= '9'; } +bool IsAsciiPunct(char ch) { + return IsInSet(ch, "^-!\"#$%&'()*+,./:;<=>?@[\\]_`{|}~"); +} +bool IsRepeat(char ch) { return IsInSet(ch, "?*+"); } +bool IsAsciiWhiteSpace(char ch) { return IsInSet(ch, " \f\n\r\t\v"); } +bool IsAsciiWordChar(char ch) { + return ('a' <= ch && ch <= 'z') || ('A' <= ch && ch <= 'Z') || + ('0' <= ch && ch <= '9') || ch == '_'; +} + +// Returns true iff "\\c" is a supported escape sequence. +bool IsValidEscape(char c) { + return (IsAsciiPunct(c) || IsInSet(c, "dDfnrsStvwW")); +} + +// Returns true iff the given atom (specified by escaped and pattern) +// matches ch. The result is undefined if the atom is invalid. +bool AtomMatchesChar(bool escaped, char pattern_char, char ch) { + if (escaped) { // "\\p" where p is pattern_char. + switch (pattern_char) { + case 'd': return IsAsciiDigit(ch); + case 'D': return !IsAsciiDigit(ch); + case 'f': return ch == '\f'; + case 'n': return ch == '\n'; + case 'r': return ch == '\r'; + case 's': return IsAsciiWhiteSpace(ch); + case 'S': return !IsAsciiWhiteSpace(ch); + case 't': return ch == '\t'; + case 'v': return ch == '\v'; + case 'w': return IsAsciiWordChar(ch); + case 'W': return !IsAsciiWordChar(ch); + } + return IsAsciiPunct(pattern_char) && pattern_char == ch; + } + + return (pattern_char == '.' && ch != '\n') || pattern_char == ch; +} + +// Helper function used by ValidateRegex() to format error messages. +std::string FormatRegexSyntaxError(const char* regex, int index) { + return (Message() << "Syntax error at index " << index + << " in simple regular expression \"" << regex << "\": ").GetString(); +} + +// Generates non-fatal failures and returns false if regex is invalid; +// otherwise returns true. +bool ValidateRegex(const char* regex) { + if (regex == NULL) { + // TODO(wan@google.com): fix the source file location in the + // assertion failures to match where the regex is used in user + // code. + ADD_FAILURE() << "NULL is not a valid simple regular expression."; + return false; + } + + bool is_valid = true; + + // True iff ?, *, or + can follow the previous atom. + bool prev_repeatable = false; + for (int i = 0; regex[i]; i++) { + if (regex[i] == '\\') { // An escape sequence + i++; + if (regex[i] == '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "'\\' cannot appear at the end."; + return false; + } + + if (!IsValidEscape(regex[i])) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i - 1) + << "invalid escape sequence \"\\" << regex[i] << "\"."; + is_valid = false; + } + prev_repeatable = true; + } else { // Not an escape sequence. + const char ch = regex[i]; + + if (ch == '^' && i > 0) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'^' can only appear at the beginning."; + is_valid = false; + } else if (ch == '$' && regex[i + 1] != '\0') { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'$' can only appear at the end."; + is_valid = false; + } else if (IsInSet(ch, "()[]{}|")) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' is unsupported."; + is_valid = false; + } else if (IsRepeat(ch) && !prev_repeatable) { + ADD_FAILURE() << FormatRegexSyntaxError(regex, i) + << "'" << ch << "' can only follow a repeatable token."; + is_valid = false; + } + + prev_repeatable = !IsInSet(ch, "^$?*+"); + } + } + + return is_valid; +} + +// Matches a repeated regex atom followed by a valid simple regular +// expression. The regex atom is defined as c if escaped is false, +// or \c otherwise. repeat is the repetition meta character (?, *, +// or +). The behavior is undefined if str contains too many +// characters to be indexable by size_t, in which case the test will +// probably time out anyway. We are fine with this limitation as +// std::string has it too. +bool MatchRepetitionAndRegexAtHead( + bool escaped, char c, char repeat, const char* regex, + const char* str) { + const size_t min_count = (repeat == '+') ? 1 : 0; + const size_t max_count = (repeat == '?') ? 1 : + static_cast(-1) - 1; + // We cannot call numeric_limits::max() as it conflicts with the + // max() macro on Windows. + + for (size_t i = 0; i <= max_count; ++i) { + // We know that the atom matches each of the first i characters in str. + if (i >= min_count && MatchRegexAtHead(regex, str + i)) { + // We have enough matches at the head, and the tail matches too. + // Since we only care about *whether* the pattern matches str + // (as opposed to *how* it matches), there is no need to find a + // greedy match. + return true; + } + if (str[i] == '\0' || !AtomMatchesChar(escaped, c, str[i])) + return false; + } + return false; +} + +// Returns true iff regex matches a prefix of str. regex must be a +// valid simple regular expression and not start with "^", or the +// result is undefined. +bool MatchRegexAtHead(const char* regex, const char* str) { + if (*regex == '\0') // An empty regex matches a prefix of anything. + return true; + + // "$" only matches the end of a string. Note that regex being + // valid guarantees that there's nothing after "$" in it. + if (*regex == '$') + return *str == '\0'; + + // Is the first thing in regex an escape sequence? + const bool escaped = *regex == '\\'; + if (escaped) + ++regex; + if (IsRepeat(regex[1])) { + // MatchRepetitionAndRegexAtHead() calls MatchRegexAtHead(), so + // here's an indirect recursion. It terminates as the regex gets + // shorter in each recursion. + return MatchRepetitionAndRegexAtHead( + escaped, regex[0], regex[1], regex + 2, str); + } else { + // regex isn't empty, isn't "$", and doesn't start with a + // repetition. We match the first atom of regex with the first + // character of str and recurse. + return (*str != '\0') && AtomMatchesChar(escaped, *regex, *str) && + MatchRegexAtHead(regex + 1, str + 1); + } +} + +// Returns true iff regex matches any substring of str. regex must be +// a valid simple regular expression, or the result is undefined. +// +// The algorithm is recursive, but the recursion depth doesn't exceed +// the regex length, so we won't need to worry about running out of +// stack space normally. In rare cases the time complexity can be +// exponential with respect to the regex length + the string length, +// but usually it's must faster (often close to linear). +bool MatchRegexAnywhere(const char* regex, const char* str) { + if (regex == NULL || str == NULL) + return false; + + if (*regex == '^') + return MatchRegexAtHead(regex + 1, str); + + // A successful match can be anywhere in str. + do { + if (MatchRegexAtHead(regex, str)) + return true; + } while (*str++ != '\0'); + return false; +} + +// Implements the RE class. + +RE::~RE() { + free(const_cast(pattern_)); + free(const_cast(full_pattern_)); +} + +// Returns true iff regular expression re matches the entire str. +bool RE::FullMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.full_pattern_, str); +} + +// Returns true iff regular expression re matches a substring of str +// (including str itself). +bool RE::PartialMatch(const char* str, const RE& re) { + return re.is_valid_ && MatchRegexAnywhere(re.pattern_, str); +} + +// Initializes an RE from its string representation. +void RE::Init(const char* regex) { + pattern_ = full_pattern_ = NULL; + if (regex != NULL) { + pattern_ = posix::StrDup(regex); + } + + is_valid_ = ValidateRegex(regex); + if (!is_valid_) { + // No need to calculate the full pattern when the regex is invalid. + return; + } + + const size_t len = strlen(regex); + // Reserves enough bytes to hold the regular expression used for a + // full match: we need space to prepend a '^', append a '$', and + // terminate the string with '\0'. + char* buffer = static_cast(malloc(len + 3)); + full_pattern_ = buffer; + + if (*regex != '^') + *buffer++ = '^'; // Makes sure full_pattern_ starts with '^'. + + // We don't use snprintf or strncpy, as they trigger a warning when + // compiled with VC++ 8.0. + memcpy(buffer, regex, len); + buffer += len; + + if (len == 0 || regex[len - 1] != '$') + *buffer++ = '$'; // Makes sure full_pattern_ ends with '$'. + + *buffer = '\0'; +} + +#endif // GTEST_USES_POSIX_RE + +const char kUnknownFile[] = "unknown file"; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line) { + const std::string file_name(file == NULL ? kUnknownFile : file); + + if (line < 0) { + return file_name + ":"; + } +#ifdef _MSC_VER + return file_name + "(" + StreamableToString(line) + "):"; +#else + return file_name + ":" + StreamableToString(line) + ":"; +#endif // _MSC_VER +} + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +// Note that FormatCompilerIndependentFileLocation() does NOT append colon +// to the file location it produces, unlike FormatFileLocation(). +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation( + const char* file, int line) { + const std::string file_name(file == NULL ? kUnknownFile : file); + + if (line < 0) + return file_name; + else + return file_name + ":" + StreamableToString(line); +} + + +GTestLog::GTestLog(GTestLogSeverity severity, const char* file, int line) + : severity_(severity) { + const char* const marker = + severity == GTEST_INFO ? "[ INFO ]" : + severity == GTEST_WARNING ? "[WARNING]" : + severity == GTEST_ERROR ? "[ ERROR ]" : "[ FATAL ]"; + GetStream() << ::std::endl << marker << " " + << FormatFileLocation(file, line).c_str() << ": "; +} + +// Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. +GTestLog::~GTestLog() { + GetStream() << ::std::endl; + if (severity_ == GTEST_FATAL) { + fflush(stderr); + posix::Abort(); + } +} +// Disable Microsoft deprecation warnings for POSIX functions called from +// this class (creat, dup, dup2, and close) +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996) + +#if GTEST_HAS_STREAM_REDIRECTION + +// Object that captures an output stream (stdout/stderr). +class CapturedStream { + public: + // The ctor redirects the stream to a temporary file. + explicit CapturedStream(int fd) : fd_(fd), uncaptured_fd_(dup(fd)) { +# if GTEST_OS_WINDOWS + char temp_dir_path[MAX_PATH + 1] = { '\0' }; // NOLINT + char temp_file_path[MAX_PATH + 1] = { '\0' }; // NOLINT + + ::GetTempPathA(sizeof(temp_dir_path), temp_dir_path); + const UINT success = ::GetTempFileNameA(temp_dir_path, + "gtest_redir", + 0, // Generate unique file name. + temp_file_path); + GTEST_CHECK_(success != 0) + << "Unable to create a temporary file in " << temp_dir_path; + const int captured_fd = creat(temp_file_path, _S_IREAD | _S_IWRITE); + GTEST_CHECK_(captured_fd != -1) << "Unable to open temporary file " + << temp_file_path; + filename_ = temp_file_path; +# else + // There's no guarantee that a test has write access to the current + // directory, so we create the temporary file in the /tmp directory + // instead. We use /tmp on most systems, and /sdcard on Android. + // That's because Android doesn't have /tmp. +# if GTEST_OS_LINUX_ANDROID + // Note: Android applications are expected to call the framework's + // Context.getExternalStorageDirectory() method through JNI to get + // the location of the world-writable SD Card directory. However, + // this requires a Context handle, which cannot be retrieved + // globally from native code. Doing so also precludes running the + // code as part of a regular standalone executable, which doesn't + // run in a Dalvik process (e.g. when running it through 'adb shell'). + // + // The location /sdcard is directly accessible from native code + // and is the only location (unofficially) supported by the Android + // team. It's generally a symlink to the real SD Card mount point + // which can be /mnt/sdcard, /mnt/sdcard0, /system/media/sdcard, or + // other OEM-customized locations. Never rely on these, and always + // use /sdcard. + char name_template[] = "/sdcard/gtest_captured_stream.XXXXXX"; +# else + char name_template[] = "/tmp/captured_stream.XXXXXX"; +# endif // GTEST_OS_LINUX_ANDROID + const int captured_fd = mkstemp(name_template); + filename_ = name_template; +# endif // GTEST_OS_WINDOWS + fflush(NULL); + dup2(captured_fd, fd_); + close(captured_fd); + } + + ~CapturedStream() { + remove(filename_.c_str()); + } + + std::string GetCapturedString() { + if (uncaptured_fd_ != -1) { + // Restores the original stream. + fflush(NULL); + dup2(uncaptured_fd_, fd_); + close(uncaptured_fd_); + uncaptured_fd_ = -1; + } + + FILE* const file = posix::FOpen(filename_.c_str(), "r"); + const std::string content = ReadEntireFile(file); + posix::FClose(file); + return content; + } + + private: + // Reads the entire content of a file as an std::string. + static std::string ReadEntireFile(FILE* file); + + // Returns the size (in bytes) of a file. + static size_t GetFileSize(FILE* file); + + const int fd_; // A stream to capture. + int uncaptured_fd_; + // Name of the temporary file holding the stderr output. + ::std::string filename_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(CapturedStream); +}; + +// Returns the size (in bytes) of a file. +size_t CapturedStream::GetFileSize(FILE* file) { + fseek(file, 0, SEEK_END); + return static_cast(ftell(file)); +} + +// Reads the entire content of a file as a string. +std::string CapturedStream::ReadEntireFile(FILE* file) { + const size_t file_size = GetFileSize(file); + char* const buffer = new char[file_size]; + + size_t bytes_last_read = 0; // # of bytes read in the last fread() + size_t bytes_read = 0; // # of bytes read so far + + fseek(file, 0, SEEK_SET); + + // Keeps reading the file until we cannot read further or the + // pre-determined file size is reached. + do { + bytes_last_read = fread(buffer+bytes_read, 1, file_size-bytes_read, file); + bytes_read += bytes_last_read; + } while (bytes_last_read > 0 && bytes_read < file_size); + + const std::string content(buffer, bytes_read); + delete[] buffer; + + return content; +} + +GTEST_DISABLE_MSC_WARNINGS_POP_() + +static CapturedStream* g_captured_stderr = NULL; +static CapturedStream* g_captured_stdout = NULL; + +// Starts capturing an output stream (stdout/stderr). +void CaptureStream(int fd, const char* stream_name, CapturedStream** stream) { + if (*stream != NULL) { + GTEST_LOG_(FATAL) << "Only one " << stream_name + << " capturer can exist at a time."; + } + *stream = new CapturedStream(fd); +} + +// Stops capturing the output stream and returns the captured string. +std::string GetCapturedStream(CapturedStream** captured_stream) { + const std::string content = (*captured_stream)->GetCapturedString(); + + delete *captured_stream; + *captured_stream = NULL; + + return content; +} + +// Starts capturing stdout. +void CaptureStdout() { + CaptureStream(kStdOutFileno, "stdout", &g_captured_stdout); +} + +// Starts capturing stderr. +void CaptureStderr() { + CaptureStream(kStdErrFileno, "stderr", &g_captured_stderr); +} + +// Stops capturing stdout and returns the captured string. +std::string GetCapturedStdout() { + return GetCapturedStream(&g_captured_stdout); +} + +// Stops capturing stderr and returns the captured string. +std::string GetCapturedStderr() { + return GetCapturedStream(&g_captured_stderr); +} + +#endif // GTEST_HAS_STREAM_REDIRECTION + +#if GTEST_HAS_DEATH_TEST + +// A copy of all command line arguments. Set by InitGoogleTest(). +::std::vector g_argvs; + +static const ::std::vector* g_injected_test_argvs = + NULL; // Owned. + +void SetInjectableArgvs(const ::std::vector* argvs) { + if (g_injected_test_argvs != argvs) + delete g_injected_test_argvs; + g_injected_test_argvs = argvs; +} + +const ::std::vector& GetInjectableArgvs() { + if (g_injected_test_argvs != NULL) { + return *g_injected_test_argvs; + } + return g_argvs; +} +#endif // GTEST_HAS_DEATH_TEST + +#if GTEST_OS_WINDOWS_MOBILE +namespace posix { +void Abort() { + DebugBreak(); + TerminateProcess(GetCurrentProcess(), 1); +} +} // namespace posix +#endif // GTEST_OS_WINDOWS_MOBILE + +// Returns the name of the environment variable corresponding to the +// given flag. For example, FlagToEnvVar("foo") will return +// "GTEST_FOO" in the open-source version. +static std::string FlagToEnvVar(const char* flag) { + const std::string full_flag = + (Message() << GTEST_FLAG_PREFIX_ << flag).GetString(); + + Message env_var; + for (size_t i = 0; i != full_flag.length(); i++) { + env_var << ToUpper(full_flag.c_str()[i]); + } + + return env_var.GetString(); +} + +// Parses 'str' for a 32-bit signed integer. If successful, writes +// the result to *value and returns true; otherwise leaves *value +// unchanged and returns false. +bool ParseInt32(const Message& src_text, const char* str, Int32* value) { + // Parses the environment variable as a decimal integer. + char* end = NULL; + const long long_value = strtol(str, &end, 10); // NOLINT + + // Has strtol() consumed all characters in the string? + if (*end != '\0') { + // No - an invalid character was encountered. + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value \"" << str << "\".\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + // Is the parsed value in the range of an Int32? + const Int32 result = static_cast(long_value); + if (long_value == LONG_MAX || long_value == LONG_MIN || + // The parsed value overflows as a long. (strtol() returns + // LONG_MAX or LONG_MIN when the input overflows.) + result != long_value + // The parsed value overflows as an Int32. + ) { + Message msg; + msg << "WARNING: " << src_text + << " is expected to be a 32-bit integer, but actually" + << " has value " << str << ", which overflows.\n"; + printf("%s", msg.GetString().c_str()); + fflush(stdout); + return false; + } + + *value = result; + return true; +} + +// Reads and returns the Boolean environment variable corresponding to +// the given flag; if it's not set, returns default_value. +// +// The value is considered true iff it's not "0". +bool BoolFromGTestEnv(const char* flag, bool default_value) { + const std::string env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + return string_value == NULL ? + default_value : strcmp(string_value, "0") != 0; +} + +// Reads and returns a 32-bit integer stored in the environment +// variable corresponding to the given flag; if it isn't set or +// doesn't represent a valid 32-bit integer, returns default_value. +Int32 Int32FromGTestEnv(const char* flag, Int32 default_value) { + const std::string env_var = FlagToEnvVar(flag); + const char* const string_value = posix::GetEnv(env_var.c_str()); + if (string_value == NULL) { + // The environment variable is not set. + return default_value; + } + + Int32 result = default_value; + if (!ParseInt32(Message() << "Environment variable " << env_var, + string_value, &result)) { + printf("The default value %s is used.\n", + (Message() << default_value).GetString().c_str()); + fflush(stdout); + return default_value; + } + + return result; +} + +// Reads and returns the string environment variable corresponding to +// the given flag; if it's not set, returns default_value. +const char* StringFromGTestEnv(const char* flag, const char* default_value) { + const std::string env_var = FlagToEnvVar(flag); + const char* const value = posix::GetEnv(env_var.c_str()); + return value == NULL ? default_value : value; +} + +} // namespace internal +} // namespace testing +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// It uses the << operator when possible, and prints the bytes in the +// object otherwise. A user can override its behavior for a class +// type Foo by defining either operator<<(::std::ostream&, const Foo&) +// or void PrintTo(const Foo&, ::std::ostream*) in the namespace that +// defines Foo. + +#include +#include +#include +#include // NOLINT +#include + +namespace testing { + +namespace { + +using ::std::ostream; + +// Prints a segment of bytes in the given object. +GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +void PrintByteSegmentInObjectTo(const unsigned char* obj_bytes, size_t start, + size_t count, ostream* os) { + char text[5] = ""; + for (size_t i = 0; i != count; i++) { + const size_t j = start + i; + if (i != 0) { + // Organizes the bytes into groups of 2 for easy parsing by + // human. + if ((j % 2) == 0) + *os << ' '; + else + *os << '-'; + } + GTEST_SNPRINTF_(text, sizeof(text), "%02X", obj_bytes[j]); + *os << text; + } +} + +// Prints the bytes in the given value to the given ostream. +void PrintBytesInObjectToImpl(const unsigned char* obj_bytes, size_t count, + ostream* os) { + // Tells the user how big the object is. + *os << count << "-byte object <"; + + const size_t kThreshold = 132; + const size_t kChunkSize = 64; + // If the object size is bigger than kThreshold, we'll have to omit + // some details by printing only the first and the last kChunkSize + // bytes. + // TODO(wan): let the user control the threshold using a flag. + if (count < kThreshold) { + PrintByteSegmentInObjectTo(obj_bytes, 0, count, os); + } else { + PrintByteSegmentInObjectTo(obj_bytes, 0, kChunkSize, os); + *os << " ... "; + // Rounds up to 2-byte boundary. + const size_t resume_pos = (count - kChunkSize + 1)/2*2; + PrintByteSegmentInObjectTo(obj_bytes, resume_pos, count - resume_pos, os); + } + *os << ">"; +} + +} // namespace + +namespace internal2 { + +// Delegates to PrintBytesInObjectToImpl() to print the bytes in the +// given object. The delegation simplifies the implementation, which +// uses the << operator and thus is easier done outside of the +// ::testing::internal namespace, which contains a << operator that +// sometimes conflicts with the one in STL. +void PrintBytesInObjectTo(const unsigned char* obj_bytes, size_t count, + ostream* os) { + PrintBytesInObjectToImpl(obj_bytes, count, os); +} + +} // namespace internal2 + +namespace internal { + +// Depending on the value of a char (or wchar_t), we print it in one +// of three formats: +// - as is if it's a printable ASCII (e.g. 'a', '2', ' '), +// - as a hexidecimal escape sequence (e.g. '\x7F'), or +// - as a special escape sequence (e.g. '\r', '\n'). +enum CharFormat { + kAsIs, + kHexEscape, + kSpecialEscape +}; + +// Returns true if c is a printable ASCII character. We test the +// value of c directly instead of calling isprint(), which is buggy on +// Windows Mobile. +inline bool IsPrintableAscii(wchar_t c) { + return 0x20 <= c && c <= 0x7E; +} + +// Prints a wide or narrow char c as a character literal without the +// quotes, escaping it when necessary; returns how c was formatted. +// The template argument UnsignedChar is the unsigned version of Char, +// which is the type of c. +template +static CharFormat PrintAsCharLiteralTo(Char c, ostream* os) { + switch (static_cast(c)) { + case L'\0': + *os << "\\0"; + break; + case L'\'': + *os << "\\'"; + break; + case L'\\': + *os << "\\\\"; + break; + case L'\a': + *os << "\\a"; + break; + case L'\b': + *os << "\\b"; + break; + case L'\f': + *os << "\\f"; + break; + case L'\n': + *os << "\\n"; + break; + case L'\r': + *os << "\\r"; + break; + case L'\t': + *os << "\\t"; + break; + case L'\v': + *os << "\\v"; + break; + default: + if (IsPrintableAscii(c)) { + *os << static_cast(c); + return kAsIs; + } else { + *os << "\\x" + String::FormatHexInt(static_cast(c)); + return kHexEscape; + } + } + return kSpecialEscape; +} + +// Prints a wchar_t c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsStringLiteralTo(wchar_t c, ostream* os) { + switch (c) { + case L'\'': + *os << "'"; + return kAsIs; + case L'"': + *os << "\\\""; + return kSpecialEscape; + default: + return PrintAsCharLiteralTo(c, os); + } +} + +// Prints a char c as if it's part of a string literal, escaping it when +// necessary; returns how c was formatted. +static CharFormat PrintAsStringLiteralTo(char c, ostream* os) { + return PrintAsStringLiteralTo( + static_cast(static_cast(c)), os); +} + +// Prints a wide or narrow character c and its code. '\0' is printed +// as "'\\0'", other unprintable characters are also properly escaped +// using the standard C++ escape sequence. The template argument +// UnsignedChar is the unsigned version of Char, which is the type of c. +template +void PrintCharAndCodeTo(Char c, ostream* os) { + // First, print c as a literal in the most readable form we can find. + *os << ((sizeof(c) > 1) ? "L'" : "'"); + const CharFormat format = PrintAsCharLiteralTo(c, os); + *os << "'"; + + // To aid user debugging, we also print c's code in decimal, unless + // it's 0 (in which case c was printed as '\\0', making the code + // obvious). + if (c == 0) + return; + *os << " (" << static_cast(c); + + // For more convenience, we print c's code again in hexidecimal, + // unless c was already printed in the form '\x##' or the code is in + // [1, 9]. + if (format == kHexEscape || (1 <= c && c <= 9)) { + // Do nothing. + } else { + *os << ", 0x" << String::FormatHexInt(static_cast(c)); + } + *os << ")"; +} + +void PrintTo(unsigned char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} +void PrintTo(signed char c, ::std::ostream* os) { + PrintCharAndCodeTo(c, os); +} + +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its code. L'\0' is printed as "L'\\0'". +void PrintTo(wchar_t wc, ostream* os) { + PrintCharAndCodeTo(wc, os); +} + +// Prints the given array of characters to the ostream. CharType must be either +// char or wchar_t. +// The array starts at begin, the length is len, it may include '\0' characters +// and may not be NUL-terminated. +template +GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +static void PrintCharsAsStringTo( + const CharType* begin, size_t len, ostream* os) { + const char* const kQuoteBegin = sizeof(CharType) == 1 ? "\"" : "L\""; + *os << kQuoteBegin; + bool is_previous_hex = false; + for (size_t index = 0; index < len; ++index) { + const CharType cur = begin[index]; + if (is_previous_hex && IsXDigit(cur)) { + // Previous character is of '\x..' form and this character can be + // interpreted as another hexadecimal digit in its number. Break string to + // disambiguate. + *os << "\" " << kQuoteBegin; + } + is_previous_hex = PrintAsStringLiteralTo(cur, os) == kHexEscape; + } + *os << "\""; +} + +// Prints a (const) char/wchar_t array of 'len' elements, starting at address +// 'begin'. CharType must be either char or wchar_t. +template +GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +static void UniversalPrintCharArray( + const CharType* begin, size_t len, ostream* os) { + // The code + // const char kFoo[] = "foo"; + // generates an array of 4, not 3, elements, with the last one being '\0'. + // + // Therefore when printing a char array, we don't print the last element if + // it's '\0', such that the output matches the string literal as it's + // written in the source code. + if (len > 0 && begin[len - 1] == '\0') { + PrintCharsAsStringTo(begin, len - 1, os); + return; + } + + // If, however, the last element in the array is not '\0', e.g. + // const char kFoo[] = { 'f', 'o', 'o' }; + // we must print the entire array. We also print a message to indicate + // that the array is not NUL-terminated. + PrintCharsAsStringTo(begin, len, os); + *os << " (no terminating NUL)"; +} + +// Prints a (const) char array of 'len' elements, starting at address 'begin'. +void UniversalPrintArray(const char* begin, size_t len, ostream* os) { + UniversalPrintCharArray(begin, len, os); +} + +// Prints a (const) wchar_t array of 'len' elements, starting at address +// 'begin'. +void UniversalPrintArray(const wchar_t* begin, size_t len, ostream* os) { + UniversalPrintCharArray(begin, len, os); +} + +// Prints the given C string to the ostream. +void PrintTo(const char* s, ostream* os) { + if (s == NULL) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintCharsAsStringTo(s, strlen(s), os); + } +} + +// MSVC compiler can be configured to define whar_t as a typedef +// of unsigned short. Defining an overload for const wchar_t* in that case +// would cause pointers to unsigned shorts be printed as wide strings, +// possibly accessing more memory than intended and causing invalid +// memory accesses. MSVC defines _NATIVE_WCHAR_T_DEFINED symbol when +// wchar_t is implemented as a native type. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Prints the given wide C string to the ostream. +void PrintTo(const wchar_t* s, ostream* os) { + if (s == NULL) { + *os << "NULL"; + } else { + *os << ImplicitCast_(s) << " pointing to "; + PrintCharsAsStringTo(s, std::wcslen(s), os); + } +} +#endif // wchar_t is native + +// Prints a ::string object. +#if GTEST_HAS_GLOBAL_STRING +void PrintStringTo(const ::string& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_GLOBAL_STRING + +void PrintStringTo(const ::std::string& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} + +// Prints a ::wstring object. +#if GTEST_HAS_GLOBAL_WSTRING +void PrintWideStringTo(const ::wstring& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +void PrintWideStringTo(const ::std::wstring& s, ostream* os) { + PrintCharsAsStringTo(s.data(), s.size(), os); +} +#endif // GTEST_HAS_STD_WSTRING + +} // namespace internal + +} // namespace testing +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// +// The Google C++ Testing Framework (Google Test) + + +// Indicates that this translation unit is part of Google Test's +// implementation. It must come before gtest-internal-inl.h is +// included, or there will be a compiler error. This trick exists to +// prevent the accidental inclusion of gtest-internal-inl.h in the +// user's code. +#define GTEST_IMPLEMENTATION_ 1 +#undef GTEST_IMPLEMENTATION_ + +namespace testing { + +using internal::GetUnitTestImpl; + +// Gets the summary of the failure message by omitting the stack trace +// in it. +std::string TestPartResult::ExtractSummary(const char* message) { + const char* const stack_trace = strstr(message, internal::kStackTraceMarker); + return stack_trace == NULL ? message : + std::string(message, stack_trace); +} + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result) { + return os + << result.file_name() << ":" << result.line_number() << ": " + << (result.type() == TestPartResult::kSuccess ? "Success" : + result.type() == TestPartResult::kFatalFailure ? "Fatal failure" : + "Non-fatal failure") << ":\n" + << result.message() << std::endl; +} + +// Appends a TestPartResult to the array. +void TestPartResultArray::Append(const TestPartResult& result) { + array_.push_back(result); +} + +// Returns the TestPartResult at the given index (0-based). +const TestPartResult& TestPartResultArray::GetTestPartResult(int index) const { + if (index < 0 || index >= size()) { + printf("\nInvalid index (%d) into TestPartResultArray.\n", index); + internal::posix::Abort(); + } + + return array_[index]; +} + +// Returns the number of TestPartResult objects in the array. +int TestPartResultArray::size() const { + return static_cast(array_.size()); +} + +namespace internal { + +HasNewFatalFailureHelper::HasNewFatalFailureHelper() + : has_new_fatal_failure_(false), + original_reporter_(GetUnitTestImpl()-> + GetTestPartResultReporterForCurrentThread()) { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread(this); +} + +HasNewFatalFailureHelper::~HasNewFatalFailureHelper() { + GetUnitTestImpl()->SetTestPartResultReporterForCurrentThread( + original_reporter_); +} + +void HasNewFatalFailureHelper::ReportTestPartResult( + const TestPartResult& result) { + if (result.fatally_failed()) + has_new_fatal_failure_ = true; + original_reporter_->ReportTestPartResult(result); +} + +} // namespace internal + +} // namespace testing +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + + +namespace testing { +namespace internal { + +#if GTEST_HAS_TYPED_TEST_P + +// Skips to the first non-space char in str. Returns an empty string if str +// contains only whitespace characters. +static const char* SkipSpaces(const char* str) { + while (IsSpace(*str)) + str++; + return str; +} + +static std::vector SplitIntoTestNames(const char* src) { + std::vector name_vec; + src = SkipSpaces(src); + for (; src != NULL; src = SkipComma(src)) { + name_vec.push_back(StripTrailingSpaces(GetPrefixUntilComma(src))); + } + return name_vec; +} + +// Verifies that registered_tests match the test names in +// defined_test_names_; returns registered_tests if successful, or +// aborts the program otherwise. +const char* TypedTestCasePState::VerifyRegisteredTestNames( + const char* file, int line, const char* registered_tests) { + typedef ::std::set::const_iterator DefinedTestIter; + registered_ = true; + + std::vector name_vec = SplitIntoTestNames(registered_tests); + + Message errors; + + std::set tests; + for (std::vector::const_iterator name_it = name_vec.begin(); + name_it != name_vec.end(); ++name_it) { + const std::string& name = *name_it; + if (tests.count(name) != 0) { + errors << "Test " << name << " is listed more than once.\n"; + continue; + } + + bool found = false; + for (DefinedTestIter it = defined_test_names_.begin(); + it != defined_test_names_.end(); + ++it) { + if (name == *it) { + found = true; + break; + } + } + + if (found) { + tests.insert(name); + } else { + errors << "No test named " << name + << " can be found in this test case.\n"; + } + } + + for (DefinedTestIter it = defined_test_names_.begin(); + it != defined_test_names_.end(); + ++it) { + if (tests.count(*it) == 0) { + errors << "You forgot to list test " << *it << ".\n"; + } + } + + const std::string& errors_str = errors.GetString(); + if (errors_str != "") { + fprintf(stderr, "%s %s", FormatFileLocation(file, line).c_str(), + errors_str.c_str()); + fflush(stderr); + posix::Abort(); + } + + return registered_tests; +} + +#endif // GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing + +#endif // __clang_analyzer__ diff --git a/external/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest.h b/external/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest.h new file mode 100755 index 0000000000..e3f0cfb95c --- /dev/null +++ b/external/rocksdb/third-party/gtest-1.7.0/fused-src/gtest/gtest.h @@ -0,0 +1,20725 @@ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for Google Test. It should be +// included by any test program that uses Google Test. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! +// +// Acknowledgment: Google Test borrowed the idea of automatic test +// registration from Barthelemy Dagenais' (barthelemy@prologique.com) +// easyUnit framework. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_H_ + +#include +#include +#include + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares functions and macros used internally by +// Google Test. They are subject to change without notice. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan) +// +// Low-level types and utilities for porting Google Test to various +// platforms. All macros ending with _ and symbols defined in an +// internal namespace are subject to change without notice. Code +// outside Google Test MUST NOT USE THEM DIRECTLY. Macros that don't +// end with _ are part of Google Test's public API and can be used by +// code outside Google Test. +// +// This file is fundamental to Google Test. All other Google Test source +// files are expected to #include this. Therefore, it cannot #include +// any other Google Test header. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + +// Environment-describing macros +// ----------------------------- +// +// Google Test can be used in many different environments. Macros in +// this section tell Google Test what kind of environment it is being +// used in, such that Google Test can provide environment-specific +// features and implementations. +// +// Google Test tries to automatically detect the properties of its +// environment, so users usually don't need to worry about these +// macros. However, the automatic detection is not perfect. +// Sometimes it's necessary for a user to define some of the following +// macros in the build script to override Google Test's decisions. +// +// If the user doesn't define a macro in the list, Google Test will +// provide a default definition. After this header is #included, all +// macros in this list will be defined to either 1 or 0. +// +// Notes to maintainers: +// - Each macro here is a user-tweakable knob; do not grow the list +// lightly. +// - Use #if to key off these macros. Don't use #ifdef or "#if +// defined(...)", which will not work as these macros are ALWAYS +// defined. +// +// GTEST_HAS_CLONE - Define it to 1/0 to indicate that clone(2) +// is/isn't available. +// GTEST_HAS_EXCEPTIONS - Define it to 1/0 to indicate that exceptions +// are enabled. +// GTEST_HAS_GLOBAL_STRING - Define it to 1/0 to indicate that ::string +// is/isn't available (some systems define +// ::string, which is different to std::string). +// GTEST_HAS_GLOBAL_WSTRING - Define it to 1/0 to indicate that ::string +// is/isn't available (some systems define +// ::wstring, which is different to std::wstring). +// GTEST_HAS_POSIX_RE - Define it to 1/0 to indicate that POSIX regular +// expressions are/aren't available. +// GTEST_HAS_PTHREAD - Define it to 1/0 to indicate that +// is/isn't available. +// GTEST_HAS_RTTI - Define it to 1/0 to indicate that RTTI is/isn't +// enabled. +// GTEST_HAS_STD_WSTRING - Define it to 1/0 to indicate that +// std::wstring does/doesn't work (Google Test can +// be used where std::wstring is unavailable). +// GTEST_HAS_TR1_TUPLE - Define it to 1/0 to indicate tr1::tuple +// is/isn't available. +// GTEST_HAS_SEH - Define it to 1/0 to indicate whether the +// compiler supports Microsoft's "Structured +// Exception Handling". +// GTEST_HAS_STREAM_REDIRECTION +// - Define it to 1/0 to indicate whether the +// platform supports I/O stream redirection using +// dup() and dup2(). +// GTEST_USE_OWN_TR1_TUPLE - Define it to 1/0 to indicate whether Google +// Test's own tr1 tuple implementation should be +// used. Unused when the user sets +// GTEST_HAS_TR1_TUPLE to 0. +// GTEST_LANG_CXX11 - Define it to 1/0 to indicate that Google Test +// is building in C++11/C++98 mode. +// GTEST_LINKED_AS_SHARED_LIBRARY +// - Define to 1 when compiling tests that use +// Google Test as a shared library (known as +// DLL on Windows). +// GTEST_CREATE_SHARED_LIBRARY +// - Define to 1 when compiling Google Test itself +// as a shared library. + +// Platform-indicating macros +// -------------------------- +// +// Macros indicating the platform on which Google Test is being used +// (a macro is defined to 1 if compiled on the given platform; +// otherwise UNDEFINED -- it's never defined to 0.). Google Test +// defines these macros automatically. Code outside Google Test MUST +// NOT define them. +// +// GTEST_OS_AIX - IBM AIX +// GTEST_OS_CYGWIN - Cygwin +// GTEST_OS_FREEBSD - FreeBSD +// GTEST_OS_HPUX - HP-UX +// GTEST_OS_LINUX - Linux +// GTEST_OS_LINUX_ANDROID - Google Android +// GTEST_OS_MAC - Mac OS X +// GTEST_OS_IOS - iOS +// GTEST_OS_NACL - Google Native Client (NaCl) +// GTEST_OS_OPENBSD - OpenBSD +// GTEST_OS_QNX - QNX +// GTEST_OS_SOLARIS - Sun Solaris +// GTEST_OS_SYMBIAN - Symbian +// GTEST_OS_WINDOWS - Windows (Desktop, MinGW, or Mobile) +// GTEST_OS_WINDOWS_DESKTOP - Windows Desktop +// GTEST_OS_WINDOWS_MINGW - MinGW +// GTEST_OS_WINDOWS_MOBILE - Windows Mobile +// GTEST_OS_WINDOWS_PHONE - Windows Phone +// GTEST_OS_WINDOWS_RT - Windows Store App/WinRT +// GTEST_OS_ZOS - z/OS +// +// Among the platforms, Cygwin, Linux, Max OS X, and Windows have the +// most stable support. Since core members of the Google Test project +// don't have access to other platforms, support for them may be less +// stable. If you notice any problems on your platform, please notify +// googletestframework@googlegroups.com (patches for fixing them are +// even more welcome!). +// +// It is possible that none of the GTEST_OS_* macros are defined. + +// Feature-indicating macros +// ------------------------- +// +// Macros indicating which Google Test features are available (a macro +// is defined to 1 if the corresponding feature is supported; +// otherwise UNDEFINED -- it's never defined to 0.). Google Test +// defines these macros automatically. Code outside Google Test MUST +// NOT define them. +// +// These macros are public so that portable tests can be written. +// Such tests typically surround code using a feature with an #if +// which controls that code. For example: +// +// #if GTEST_HAS_DEATH_TEST +// EXPECT_DEATH(DoSomethingDeadly()); +// #endif +// +// GTEST_HAS_COMBINE - the Combine() function (for value-parameterized +// tests) +// GTEST_HAS_DEATH_TEST - death tests +// GTEST_HAS_PARAM_TEST - value-parameterized tests +// GTEST_HAS_TYPED_TEST - typed tests +// GTEST_HAS_TYPED_TEST_P - type-parameterized tests +// GTEST_IS_THREADSAFE - Google Test is thread-safe. +// GTEST_USES_POSIX_RE - enhanced POSIX regex is used. Do not confuse with +// GTEST_HAS_POSIX_RE (see above) which users can +// define themselves. +// GTEST_USES_SIMPLE_RE - our own simple regex is used; +// the above two are mutually exclusive. +// GTEST_CAN_COMPARE_NULL - accepts untyped NULL in EXPECT_EQ(). + +// Misc public macros +// ------------------ +// +// GTEST_FLAG(flag_name) - references the variable corresponding to +// the given Google Test flag. + +// Internal utilities +// ------------------ +// +// The following macros and utilities are for Google Test's INTERNAL +// use only. Code outside Google Test MUST NOT USE THEM DIRECTLY. +// +// Macros for basic C++ coding: +// GTEST_AMBIGUOUS_ELSE_BLOCKER_ - for disabling a gcc warning. +// GTEST_ATTRIBUTE_UNUSED_ - declares that a class' instances or a +// variable don't have to be used. +// GTEST_DISALLOW_ASSIGN_ - disables operator=. +// GTEST_DISALLOW_COPY_AND_ASSIGN_ - disables copy ctor and operator=. +// GTEST_MUST_USE_RESULT_ - declares that a function's result must be used. +// GTEST_INTENTIONAL_CONST_COND_PUSH_ - start code section where MSVC C4127 is +// suppressed (constant conditional). +// GTEST_INTENTIONAL_CONST_COND_POP_ - finish code section where MSVC C4127 +// is suppressed. +// +// C++11 feature wrappers: +// +// testing::internal::move - portability wrapper for std::move. +// +// Synchronization: +// Mutex, MutexLock, ThreadLocal, GetThreadCount() +// - synchronization primitives. +// +// Template meta programming: +// is_pointer - as in TR1; needed on Symbian and IBM XL C/C++ only. +// IteratorTraits - partial implementation of std::iterator_traits, which +// is not available in libCstd when compiled with Sun C++. +// +// Smart pointers: +// scoped_ptr - as in TR2. +// +// Regular expressions: +// RE - a simple regular expression class using the POSIX +// Extended Regular Expression syntax on UNIX-like +// platforms, or a reduced regular exception syntax on +// other platforms, including Windows. +// +// Logging: +// GTEST_LOG_() - logs messages at the specified severity level. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. +// +// Stdout and stderr capturing: +// CaptureStdout() - starts capturing stdout. +// GetCapturedStdout() - stops capturing stdout and returns the captured +// string. +// CaptureStderr() - starts capturing stderr. +// GetCapturedStderr() - stops capturing stderr and returns the captured +// string. +// +// Integer types: +// TypeWithSize - maps an integer to a int type. +// Int32, UInt32, Int64, UInt64, TimeInMillis +// - integers of known sizes. +// BiggestInt - the biggest signed integer type. +// +// Command-line utilities: +// GTEST_DECLARE_*() - declares a flag. +// GTEST_DEFINE_*() - defines a flag. +// GetInjectableArgvs() - returns the command line as a vector of strings. +// +// Environment variable utilities: +// GetEnv() - gets the value of an environment variable. +// BoolFromGTestEnv() - parses a bool environment variable. +// Int32FromGTestEnv() - parses an Int32 environment variable. +// StringFromGTestEnv() - parses a string environment variable. + +#include // for isspace, etc +#include // for ptrdiff_t +#include +#include +#include +#ifndef _WIN32_WCE +# include +# include +#endif // !_WIN32_WCE + +#if defined __APPLE__ +# include +# include +#endif + +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include // NOLINT +#include + +#define GTEST_DEV_EMAIL_ "googletestframework@@googlegroups.com" +#define GTEST_FLAG_PREFIX_ "gtest_" +#define GTEST_FLAG_PREFIX_DASH_ "gtest-" +#define GTEST_FLAG_PREFIX_UPPER_ "GTEST_" +#define GTEST_NAME_ "Google Test" +#define GTEST_PROJECT_URL_ "http://code.google.com/p/googletest/" + +// Determines the version of gcc that is used to compile this. +#ifdef __GNUC__ +// 40302 means version 4.3.2. +# define GTEST_GCC_VER_ \ + (__GNUC__*10000 + __GNUC_MINOR__*100 + __GNUC_PATCHLEVEL__) +#endif // __GNUC__ + +// Determines the platform on which Google Test is compiled. +#ifdef __CYGWIN__ +# define GTEST_OS_CYGWIN 1 +#elif defined __SYMBIAN32__ +# define GTEST_OS_SYMBIAN 1 +#elif defined _WIN32 +# define GTEST_OS_WINDOWS 1 +# ifdef _WIN32_WCE +# define GTEST_OS_WINDOWS_MOBILE 1 +# elif defined(__MINGW__) || defined(__MINGW32__) +# define GTEST_OS_WINDOWS_MINGW 1 +# elif defined(WINAPI_FAMILY) +# include +# if WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_DESKTOP) +# define GTEST_OS_WINDOWS_DESKTOP 1 +# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_PHONE_APP) +# define GTEST_OS_WINDOWS_PHONE 1 +# elif WINAPI_FAMILY_PARTITION(WINAPI_PARTITION_APP) +# define GTEST_OS_WINDOWS_RT 1 +# else + // WINAPI_FAMILY defined but no known partition matched. + // Default to desktop. +# define GTEST_OS_WINDOWS_DESKTOP 1 +# endif +# else +# define GTEST_OS_WINDOWS_DESKTOP 1 +# endif // _WIN32_WCE +#elif defined __APPLE__ +# define GTEST_OS_MAC 1 +# if TARGET_OS_IPHONE +# define GTEST_OS_IOS 1 +# endif +#elif defined __FreeBSD__ +# define GTEST_OS_FREEBSD 1 +#elif defined __linux__ +# define GTEST_OS_LINUX 1 +# if defined __ANDROID__ +# define GTEST_OS_LINUX_ANDROID 1 +# endif +#elif defined __MVS__ +# define GTEST_OS_ZOS 1 +#elif defined(__sun) && defined(__SVR4) +# define GTEST_OS_SOLARIS 1 +#elif defined(_AIX) +# define GTEST_OS_AIX 1 +#elif defined(__hpux) +# define GTEST_OS_HPUX 1 +#elif defined __native_client__ +# define GTEST_OS_NACL 1 +#elif defined __OpenBSD__ +# define GTEST_OS_OPENBSD 1 +#elif defined __QNX__ +# define GTEST_OS_QNX 1 +#endif // __CYGWIN__ + +// Macros for disabling Microsoft Visual C++ warnings. +// +// GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 4385) +// /* code that triggers warnings C4800 and C4385 */ +// GTEST_DISABLE_MSC_WARNINGS_POP_() +#if _MSC_VER >= 1500 +# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) \ + __pragma(warning(push)) \ + __pragma(warning(disable: warnings)) +# define GTEST_DISABLE_MSC_WARNINGS_POP_() \ + __pragma(warning(pop)) +#else +// Older versions of MSVC don't have __pragma. +# define GTEST_DISABLE_MSC_WARNINGS_PUSH_(warnings) +# define GTEST_DISABLE_MSC_WARNINGS_POP_() +#endif + +#ifndef GTEST_LANG_CXX11 +// gcc and clang define __GXX_EXPERIMENTAL_CXX0X__ when +// -std={c,gnu}++{0x,11} is passed. The C++11 standard specifies a +// value for __cplusplus, and recent versions of clang, gcc, and +// probably other compilers set that too in C++11 mode. +# if __GXX_EXPERIMENTAL_CXX0X__ || __cplusplus >= 201103L +// Compiling in at least C++11 mode. +# define GTEST_LANG_CXX11 1 +# else +# define GTEST_LANG_CXX11 0 +# endif +#endif + +// Distinct from C++11 language support, some environments don't provide +// proper C++11 library support. Notably, it's possible to build in +// C++11 mode when targeting Mac OS X 10.6, which has an old libstdc++ +// with no C++11 support. +// +// libstdc++ has sufficient C++11 support as of GCC 4.6.0, __GLIBCXX__ +// 20110325, but maintenance releases in the 4.4 and 4.5 series followed +// this date, so check for those versions by their date stamps. +// https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html#abi.versioning +#if GTEST_LANG_CXX11 && \ + (!defined(__GLIBCXX__) || ( \ + __GLIBCXX__ >= 20110325ul && /* GCC >= 4.6.0 */ \ + /* Blacklist of patch releases of older branches: */ \ + __GLIBCXX__ != 20110416ul && /* GCC 4.4.6 */ \ + __GLIBCXX__ != 20120313ul && /* GCC 4.4.7 */ \ + __GLIBCXX__ != 20110428ul && /* GCC 4.5.3 */ \ + __GLIBCXX__ != 20120702ul)) /* GCC 4.5.4 */ +# define GTEST_STDLIB_CXX11 1 +#endif + +// Only use C++11 library features if the library provides them. +#if GTEST_STDLIB_CXX11 +# define GTEST_HAS_STD_BEGIN_AND_END_ 1 +# define GTEST_HAS_STD_FORWARD_LIST_ 1 +# define GTEST_HAS_STD_FUNCTION_ 1 +# define GTEST_HAS_STD_INITIALIZER_LIST_ 1 +# define GTEST_HAS_STD_MOVE_ 1 +# define GTEST_HAS_STD_UNIQUE_PTR_ 1 +#endif + +// C++11 specifies that provides std::tuple. +// Some platforms still might not have it, however. +#if GTEST_LANG_CXX11 +# define GTEST_HAS_STD_TUPLE_ 1 +# if defined(__clang__) +// Inspired by http://clang.llvm.org/docs/LanguageExtensions.html#__has_include +# if defined(__has_include) && !__has_include() +# undef GTEST_HAS_STD_TUPLE_ +# endif +# elif defined(_MSC_VER) +// Inspired by boost/config/stdlib/dinkumware.hpp +# if defined(_CPPLIB_VER) && _CPPLIB_VER < 520 +# undef GTEST_HAS_STD_TUPLE_ +# endif +# elif defined(__GLIBCXX__) +// Inspired by boost/config/stdlib/libstdcpp3.hpp, +// http://gcc.gnu.org/gcc-4.2/changes.html and +// http://gcc.gnu.org/onlinedocs/libstdc++/manual/bk01pt01ch01.html#manual.intro.status.standard.200x +# if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 2) +# undef GTEST_HAS_STD_TUPLE_ +# endif +# endif +#endif + +// Brings in definitions for functions used in the testing::internal::posix +// namespace (read, write, close, chdir, isatty, stat). We do not currently +// use them on Windows Mobile. +#if GTEST_OS_WINDOWS +# if !GTEST_OS_WINDOWS_MOBILE +# include +# include +# endif +// In order to avoid having to include , use forward declaration +// assuming CRITICAL_SECTION is a typedef of _RTL_CRITICAL_SECTION. +// This assumption is verified by +// WindowsTypesTest.CRITICAL_SECTIONIs_RTL_CRITICAL_SECTION. +struct _RTL_CRITICAL_SECTION; +#else +// This assumes that non-Windows OSes provide unistd.h. For OSes where this +// is not the case, we need to include headers that provide the functions +// mentioned above. +# include +# include +#endif // GTEST_OS_WINDOWS + +#if GTEST_OS_LINUX_ANDROID +// Used to define __ANDROID_API__ matching the target NDK API level. +# include // NOLINT +#endif + +// Defines this to true iff Google Test can use POSIX regular expressions. +#ifndef GTEST_HAS_POSIX_RE +# if GTEST_OS_LINUX_ANDROID +// On Android, is only available starting with Gingerbread. +# define GTEST_HAS_POSIX_RE (__ANDROID_API__ >= 9) +# else +# define GTEST_HAS_POSIX_RE (!GTEST_OS_WINDOWS) +# endif +#endif + +#if GTEST_HAS_POSIX_RE + +// On some platforms, needs someone to define size_t, and +// won't compile otherwise. We can #include it here as we already +// included , which is guaranteed to define size_t through +// . +# include // NOLINT + +# define GTEST_USES_POSIX_RE 1 + +#elif GTEST_OS_WINDOWS + +// is not available on Windows. Use our own simple regex +// implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#else + +// may not be available on this platform. Use our own +// simple regex implementation instead. +# define GTEST_USES_SIMPLE_RE 1 + +#endif // GTEST_HAS_POSIX_RE + +#ifndef GTEST_HAS_EXCEPTIONS +// The user didn't tell us whether exceptions are enabled, so we need +// to figure it out. +# if defined(_MSC_VER) || defined(__BORLANDC__) +// MSVC's and C++Builder's implementations of the STL use the _HAS_EXCEPTIONS +// macro to enable exceptions, so we'll do the same. +// Assumes that exceptions are enabled by default. +# ifndef _HAS_EXCEPTIONS +# define _HAS_EXCEPTIONS 1 +# endif // _HAS_EXCEPTIONS +# define GTEST_HAS_EXCEPTIONS _HAS_EXCEPTIONS +# elif defined(__clang__) +// clang defines __EXCEPTIONS iff exceptions are enabled before clang 220714, +// but iff cleanups are enabled after that. In Obj-C++ files, there can be +// cleanups for ObjC exceptions which also need cleanups, even if C++ exceptions +// are disabled. clang has __has_feature(cxx_exceptions) which checks for C++ +// exceptions starting at clang r206352, but which checked for cleanups prior to +// that. To reliably check for C++ exception availability with clang, check for +// __EXCEPTIONS && __has_feature(cxx_exceptions). +# define GTEST_HAS_EXCEPTIONS (__EXCEPTIONS && __has_feature(cxx_exceptions)) +# elif defined(__GNUC__) && __EXCEPTIONS +// gcc defines __EXCEPTIONS to 1 iff exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__SUNPRO_CC) +// Sun Pro CC supports exceptions. However, there is no compile-time way of +// detecting whether they are enabled or not. Therefore, we assume that +// they are enabled unless the user tells us otherwise. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__IBMCPP__) && __EXCEPTIONS +// xlC defines __EXCEPTIONS to 1 iff exceptions are enabled. +# define GTEST_HAS_EXCEPTIONS 1 +# elif defined(__HP_aCC) +// Exception handling is in effect by default in HP aCC compiler. It has to +// be turned of by +noeh compiler option if desired. +# define GTEST_HAS_EXCEPTIONS 1 +# else +// For other compilers, we assume exceptions are disabled to be +// conservative. +# define GTEST_HAS_EXCEPTIONS 0 +# endif // defined(_MSC_VER) || defined(__BORLANDC__) +#endif // GTEST_HAS_EXCEPTIONS + +#if !defined(GTEST_HAS_STD_STRING) +// Even though we don't use this macro any longer, we keep it in case +// some clients still depend on it. +# define GTEST_HAS_STD_STRING 1 +#elif !GTEST_HAS_STD_STRING +// The user told us that ::std::string isn't available. +# error "Google Test cannot be used where ::std::string isn't available." +#endif // !defined(GTEST_HAS_STD_STRING) + +#ifndef GTEST_HAS_GLOBAL_STRING +// The user didn't tell us whether ::string is available, so we need +// to figure it out. + +# define GTEST_HAS_GLOBAL_STRING 0 + +#endif // GTEST_HAS_GLOBAL_STRING + +#ifndef GTEST_HAS_STD_WSTRING +// The user didn't tell us whether ::std::wstring is available, so we need +// to figure it out. +// TODO(wan@google.com): uses autoconf to detect whether ::std::wstring +// is available. + +// Cygwin 1.7 and below doesn't support ::std::wstring. +// Solaris' libc++ doesn't support it either. Android has +// no support for it at least as recent as Froyo (2.2). +# define GTEST_HAS_STD_WSTRING \ + (!(GTEST_OS_LINUX_ANDROID || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS)) + +#endif // GTEST_HAS_STD_WSTRING + +#ifndef GTEST_HAS_GLOBAL_WSTRING +// The user didn't tell us whether ::wstring is available, so we need +// to figure it out. +# define GTEST_HAS_GLOBAL_WSTRING \ + (GTEST_HAS_STD_WSTRING && GTEST_HAS_GLOBAL_STRING) +#endif // GTEST_HAS_GLOBAL_WSTRING + +// Determines whether RTTI is available. +#ifndef GTEST_HAS_RTTI +// The user didn't tell us whether RTTI is enabled, so we need to +// figure it out. + +# ifdef _MSC_VER + +# ifdef _CPPRTTI // MSVC defines this macro iff RTTI is enabled. +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +// Starting with version 4.3.2, gcc defines __GXX_RTTI iff RTTI is enabled. +# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40302) + +# ifdef __GXX_RTTI +// When building against STLport with the Android NDK and with +// -frtti -fno-exceptions, the build fails at link time with undefined +// references to __cxa_bad_typeid. Note sure if STL or toolchain bug, +// so disable RTTI when detected. +# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) && \ + !defined(__EXCEPTIONS) +# define GTEST_HAS_RTTI 0 +# else +# define GTEST_HAS_RTTI 1 +# endif // GTEST_OS_LINUX_ANDROID && __STLPORT_MAJOR && !__EXCEPTIONS +# else +# define GTEST_HAS_RTTI 0 +# endif // __GXX_RTTI + +// Clang defines __GXX_RTTI starting with version 3.0, but its manual recommends +// using has_feature instead. has_feature(cxx_rtti) is supported since 2.7, the +// first version with C++ support. +# elif defined(__clang__) + +# define GTEST_HAS_RTTI __has_feature(cxx_rtti) + +// Starting with version 9.0 IBM Visual Age defines __RTTI_ALL__ to 1 if +// both the typeid and dynamic_cast features are present. +# elif defined(__IBMCPP__) && (__IBMCPP__ >= 900) + +# ifdef __RTTI_ALL__ +# define GTEST_HAS_RTTI 1 +# else +# define GTEST_HAS_RTTI 0 +# endif + +# else + +// For all other compilers, we assume RTTI is enabled. +# define GTEST_HAS_RTTI 1 + +# endif // _MSC_VER + +#endif // GTEST_HAS_RTTI + +// It's this header's responsibility to #include when RTTI +// is enabled. +#if GTEST_HAS_RTTI +# include +#endif + +// Determines whether Google Test can use the pthreads library. +#ifndef GTEST_HAS_PTHREAD +// The user didn't tell us explicitly, so we make reasonable assumptions about +// which platforms have pthreads support. +// +// To disable threading support in Google Test, add -DGTEST_HAS_PTHREAD=0 +// to your compiler flags. +# define GTEST_HAS_PTHREAD (GTEST_OS_LINUX || GTEST_OS_MAC || GTEST_OS_HPUX \ + || GTEST_OS_QNX || GTEST_OS_FREEBSD || GTEST_OS_NACL) +#endif // GTEST_HAS_PTHREAD + +#if GTEST_HAS_PTHREAD +// gtest-port.h guarantees to #include when GTEST_HAS_PTHREAD is +// true. +# include // NOLINT + +// For timespec and nanosleep, used below. +# include // NOLINT +#endif + +// Determines whether Google Test can use tr1/tuple. You can define +// this macro to 0 to prevent Google Test from using tuple (any +// feature depending on tuple with be disabled in this mode). +#ifndef GTEST_HAS_TR1_TUPLE +# if GTEST_OS_LINUX_ANDROID && defined(_STLPORT_MAJOR) +// STLport, provided with the Android NDK, has neither or . +# define GTEST_HAS_TR1_TUPLE 0 +# else +// The user didn't tell us not to do it, so we assume it's OK. +# define GTEST_HAS_TR1_TUPLE 1 +# endif +#endif // GTEST_HAS_TR1_TUPLE + +// Determines whether Google Test's own tr1 tuple implementation +// should be used. +#ifndef GTEST_USE_OWN_TR1_TUPLE +// The user didn't tell us, so we need to figure it out. + +// We use our own TR1 tuple if we aren't sure the user has an +// implementation of it already. At this time, libstdc++ 4.0.0+ and +// MSVC 2010 are the only mainstream standard libraries that come +// with a TR1 tuple implementation. NVIDIA's CUDA NVCC compiler +// pretends to be GCC by defining __GNUC__ and friends, but cannot +// compile GCC's tuple implementation. MSVC 2008 (9.0) provides TR1 +// tuple in a 323 MB Feature Pack download, which we cannot assume the +// user has. QNX's QCC compiler is a modified GCC but it doesn't +// support TR1 tuple. libc++ only provides std::tuple, in C++11 mode, +// and it can be used with some compilers that define __GNUC__. +# if (defined(__GNUC__) && !defined(__CUDACC__) && (GTEST_GCC_VER_ >= 40000) \ + && !GTEST_OS_QNX && !defined(_LIBCPP_VERSION)) || _MSC_VER >= 1600 +# define GTEST_ENV_HAS_TR1_TUPLE_ 1 +# endif + +// C++11 specifies that provides std::tuple. Use that if gtest is used +// in C++11 mode and libstdc++ isn't very old (binaries targeting OS X 10.6 +// can build with clang but need to use gcc4.2's libstdc++). +# if GTEST_LANG_CXX11 && (!defined(__GLIBCXX__) || __GLIBCXX__ > 20110325) +# define GTEST_ENV_HAS_STD_TUPLE_ 1 +# endif + +# if GTEST_ENV_HAS_TR1_TUPLE_ || GTEST_ENV_HAS_STD_TUPLE_ +# define GTEST_USE_OWN_TR1_TUPLE 0 +# else +# define GTEST_USE_OWN_TR1_TUPLE 1 +# endif + +#endif // GTEST_USE_OWN_TR1_TUPLE + +// To avoid conditional compilation everywhere, we make it +// gtest-port.h's responsibility to #include the header implementing +// tuple. +#if GTEST_HAS_STD_TUPLE_ +# include // IWYU pragma: export +# define GTEST_TUPLE_NAMESPACE_ ::std +#endif // GTEST_HAS_STD_TUPLE_ + +// We include tr1::tuple even if std::tuple is available to define printers for +// them. +#if GTEST_HAS_TR1_TUPLE +# ifndef GTEST_TUPLE_NAMESPACE_ +# define GTEST_TUPLE_NAMESPACE_ ::std::tr1 +# endif // GTEST_TUPLE_NAMESPACE_ + +# if GTEST_USE_OWN_TR1_TUPLE +// This file was GENERATED by command: +// pump.py gtest-tuple.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2009 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Implements a subset of TR1 tuple needed by Google Test and Google Mock. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ + +#include // For ::std::pair. + +// The compiler used in Symbian has a bug that prevents us from declaring the +// tuple template as a friend (it complains that tuple is redefined). This +// hack bypasses the bug by declaring the members that should otherwise be +// private as public. +// Sun Studio versions < 12 also have the above bug. +#if defined(__SYMBIAN32__) || (defined(__SUNPRO_CC) && __SUNPRO_CC < 0x590) +# define GTEST_DECLARE_TUPLE_AS_FRIEND_ public: +#else +# define GTEST_DECLARE_TUPLE_AS_FRIEND_ \ + template friend class tuple; \ + private: +#endif + +// Visual Studio 2010, 2012, and 2013 define symbols in std::tr1 that conflict +// with our own definitions. Therefore using our own tuple does not work on +// those compilers. +#if defined(_MSC_VER) && _MSC_VER >= 1600 /* 1600 is Visual Studio 2010 */ +# error "gtest's tuple doesn't compile on Visual Studio 2010 or later. \ +GTEST_USE_OWN_TR1_TUPLE must be set to 0 on those compilers." +#endif + +// GTEST_n_TUPLE_(T) is the type of an n-tuple. +#define GTEST_0_TUPLE_(T) tuple<> +#define GTEST_1_TUPLE_(T) tuple +#define GTEST_2_TUPLE_(T) tuple +#define GTEST_3_TUPLE_(T) tuple +#define GTEST_4_TUPLE_(T) tuple +#define GTEST_5_TUPLE_(T) tuple +#define GTEST_6_TUPLE_(T) tuple +#define GTEST_7_TUPLE_(T) tuple +#define GTEST_8_TUPLE_(T) tuple +#define GTEST_9_TUPLE_(T) tuple +#define GTEST_10_TUPLE_(T) tuple + +// GTEST_n_TYPENAMES_(T) declares a list of n typenames. +#define GTEST_0_TYPENAMES_(T) +#define GTEST_1_TYPENAMES_(T) typename T##0 +#define GTEST_2_TYPENAMES_(T) typename T##0, typename T##1 +#define GTEST_3_TYPENAMES_(T) typename T##0, typename T##1, typename T##2 +#define GTEST_4_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3 +#define GTEST_5_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4 +#define GTEST_6_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5 +#define GTEST_7_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6 +#define GTEST_8_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, typename T##7 +#define GTEST_9_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, \ + typename T##7, typename T##8 +#define GTEST_10_TYPENAMES_(T) typename T##0, typename T##1, typename T##2, \ + typename T##3, typename T##4, typename T##5, typename T##6, \ + typename T##7, typename T##8, typename T##9 + +// In theory, defining stuff in the ::std namespace is undefined +// behavior. We can do this as we are playing the role of a standard +// library vendor. +namespace std { +namespace tr1 { + +template +class tuple; + +// Anything in namespace gtest_internal is Google Test's INTERNAL +// IMPLEMENTATION DETAIL and MUST NOT BE USED DIRECTLY in user code. +namespace gtest_internal { + +// ByRef::type is T if T is a reference; otherwise it's const T&. +template +struct ByRef { typedef const T& type; }; // NOLINT +template +struct ByRef { typedef T& type; }; // NOLINT + +// A handy wrapper for ByRef. +#define GTEST_BY_REF_(T) typename ::std::tr1::gtest_internal::ByRef::type + +// AddRef::type is T if T is a reference; otherwise it's T&. This +// is the same as tr1::add_reference::type. +template +struct AddRef { typedef T& type; }; // NOLINT +template +struct AddRef { typedef T& type; }; // NOLINT + +// A handy wrapper for AddRef. +#define GTEST_ADD_REF_(T) typename ::std::tr1::gtest_internal::AddRef::type + +// A helper for implementing get(). +template class Get; + +// A helper for implementing tuple_element. kIndexValid is true +// iff k < the number of fields in tuple type T. +template +struct TupleElement; + +template +struct TupleElement { + typedef T0 type; +}; + +template +struct TupleElement { + typedef T1 type; +}; + +template +struct TupleElement { + typedef T2 type; +}; + +template +struct TupleElement { + typedef T3 type; +}; + +template +struct TupleElement { + typedef T4 type; +}; + +template +struct TupleElement { + typedef T5 type; +}; + +template +struct TupleElement { + typedef T6 type; +}; + +template +struct TupleElement { + typedef T7 type; +}; + +template +struct TupleElement { + typedef T8 type; +}; + +template +struct TupleElement { + typedef T9 type; +}; + +} // namespace gtest_internal + +template <> +class tuple<> { + public: + tuple() {} + tuple(const tuple& /* t */) {} + tuple& operator=(const tuple& /* t */) { return *this; } +}; + +template +class GTEST_1_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0) : f0_(f0) {} + + tuple(const tuple& t) : f0_(t.f0_) {} + + template + tuple(const GTEST_1_TUPLE_(U)& t) : f0_(t.f0_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_1_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_1_TUPLE_(U)& t) { + f0_ = t.f0_; + return *this; + } + + T0 f0_; +}; + +template +class GTEST_2_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1) : f0_(f0), + f1_(f1) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_) {} + + template + tuple(const GTEST_2_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_) {} + template + tuple(const ::std::pair& p) : f0_(p.first), f1_(p.second) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_2_TUPLE_(U)& t) { + return CopyFrom(t); + } + template + tuple& operator=(const ::std::pair& p) { + f0_ = p.first; + f1_ = p.second; + return *this; + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_2_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + return *this; + } + + T0 f0_; + T1 f1_; +}; + +template +class GTEST_3_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2) : f0_(f0), f1_(f1), f2_(f2) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {} + + template + tuple(const GTEST_3_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_3_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_3_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; +}; + +template +class GTEST_4_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_) {} + + template + tuple(const GTEST_4_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_4_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_4_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; +}; + +template +class GTEST_5_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, + GTEST_BY_REF_(T4) f4) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_) {} + + template + tuple(const GTEST_5_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_5_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_5_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; +}; + +template +class GTEST_6_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_) {} + + template + tuple(const GTEST_6_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_6_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_6_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; +}; + +template +class GTEST_7_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3), f4_(f4), f5_(f5), f6_(f6) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {} + + template + tuple(const GTEST_7_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_7_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_7_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; +}; + +template +class GTEST_8_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, + GTEST_BY_REF_(T7) f7) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5), f6_(f6), f7_(f7) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {} + + template + tuple(const GTEST_8_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_8_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_8_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; +}; + +template +class GTEST_9_TUPLE_(T) { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7, + GTEST_BY_REF_(T8) f8) : f0_(f0), f1_(f1), f2_(f2), f3_(f3), f4_(f4), + f5_(f5), f6_(f6), f7_(f7), f8_(f8) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {} + + template + tuple(const GTEST_9_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_9_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_9_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + f8_ = t.f8_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; + T8 f8_; +}; + +template +class tuple { + public: + template friend class gtest_internal::Get; + + tuple() : f0_(), f1_(), f2_(), f3_(), f4_(), f5_(), f6_(), f7_(), f8_(), + f9_() {} + + explicit tuple(GTEST_BY_REF_(T0) f0, GTEST_BY_REF_(T1) f1, + GTEST_BY_REF_(T2) f2, GTEST_BY_REF_(T3) f3, GTEST_BY_REF_(T4) f4, + GTEST_BY_REF_(T5) f5, GTEST_BY_REF_(T6) f6, GTEST_BY_REF_(T7) f7, + GTEST_BY_REF_(T8) f8, GTEST_BY_REF_(T9) f9) : f0_(f0), f1_(f1), f2_(f2), + f3_(f3), f4_(f4), f5_(f5), f6_(f6), f7_(f7), f8_(f8), f9_(f9) {} + + tuple(const tuple& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), f3_(t.f3_), + f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), f9_(t.f9_) {} + + template + tuple(const GTEST_10_TUPLE_(U)& t) : f0_(t.f0_), f1_(t.f1_), f2_(t.f2_), + f3_(t.f3_), f4_(t.f4_), f5_(t.f5_), f6_(t.f6_), f7_(t.f7_), f8_(t.f8_), + f9_(t.f9_) {} + + tuple& operator=(const tuple& t) { return CopyFrom(t); } + + template + tuple& operator=(const GTEST_10_TUPLE_(U)& t) { + return CopyFrom(t); + } + + GTEST_DECLARE_TUPLE_AS_FRIEND_ + + template + tuple& CopyFrom(const GTEST_10_TUPLE_(U)& t) { + f0_ = t.f0_; + f1_ = t.f1_; + f2_ = t.f2_; + f3_ = t.f3_; + f4_ = t.f4_; + f5_ = t.f5_; + f6_ = t.f6_; + f7_ = t.f7_; + f8_ = t.f8_; + f9_ = t.f9_; + return *this; + } + + T0 f0_; + T1 f1_; + T2 f2_; + T3 f3_; + T4 f4_; + T5 f5_; + T6 f6_; + T7 f7_; + T8 f8_; + T9 f9_; +}; + +// 6.1.3.2 Tuple creation functions. + +// Known limitations: we don't support passing an +// std::tr1::reference_wrapper to make_tuple(). And we don't +// implement tie(). + +inline tuple<> make_tuple() { return tuple<>(); } + +template +inline GTEST_1_TUPLE_(T) make_tuple(const T0& f0) { + return GTEST_1_TUPLE_(T)(f0); +} + +template +inline GTEST_2_TUPLE_(T) make_tuple(const T0& f0, const T1& f1) { + return GTEST_2_TUPLE_(T)(f0, f1); +} + +template +inline GTEST_3_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2) { + return GTEST_3_TUPLE_(T)(f0, f1, f2); +} + +template +inline GTEST_4_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3) { + return GTEST_4_TUPLE_(T)(f0, f1, f2, f3); +} + +template +inline GTEST_5_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4) { + return GTEST_5_TUPLE_(T)(f0, f1, f2, f3, f4); +} + +template +inline GTEST_6_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5) { + return GTEST_6_TUPLE_(T)(f0, f1, f2, f3, f4, f5); +} + +template +inline GTEST_7_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6) { + return GTEST_7_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6); +} + +template +inline GTEST_8_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7) { + return GTEST_8_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7); +} + +template +inline GTEST_9_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7, + const T8& f8) { + return GTEST_9_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8); +} + +template +inline GTEST_10_TUPLE_(T) make_tuple(const T0& f0, const T1& f1, const T2& f2, + const T3& f3, const T4& f4, const T5& f5, const T6& f6, const T7& f7, + const T8& f8, const T9& f9) { + return GTEST_10_TUPLE_(T)(f0, f1, f2, f3, f4, f5, f6, f7, f8, f9); +} + +// 6.1.3.3 Tuple helper classes. + +template struct tuple_size; + +template +struct tuple_size { + static const int value = 0; +}; + +template +struct tuple_size { + static const int value = 1; +}; + +template +struct tuple_size { + static const int value = 2; +}; + +template +struct tuple_size { + static const int value = 3; +}; + +template +struct tuple_size { + static const int value = 4; +}; + +template +struct tuple_size { + static const int value = 5; +}; + +template +struct tuple_size { + static const int value = 6; +}; + +template +struct tuple_size { + static const int value = 7; +}; + +template +struct tuple_size { + static const int value = 8; +}; + +template +struct tuple_size { + static const int value = 9; +}; + +template +struct tuple_size { + static const int value = 10; +}; + +template +struct tuple_element { + typedef typename gtest_internal::TupleElement< + k < (tuple_size::value), k, Tuple>::type type; +}; + +#define GTEST_TUPLE_ELEMENT_(k, Tuple) typename tuple_element::type + +// 6.1.3.4 Element access. + +namespace gtest_internal { + +template <> +class Get<0> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple)) + Field(Tuple& t) { return t.f0_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(0, Tuple)) + ConstField(const Tuple& t) { return t.f0_; } +}; + +template <> +class Get<1> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple)) + Field(Tuple& t) { return t.f1_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(1, Tuple)) + ConstField(const Tuple& t) { return t.f1_; } +}; + +template <> +class Get<2> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple)) + Field(Tuple& t) { return t.f2_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(2, Tuple)) + ConstField(const Tuple& t) { return t.f2_; } +}; + +template <> +class Get<3> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple)) + Field(Tuple& t) { return t.f3_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(3, Tuple)) + ConstField(const Tuple& t) { return t.f3_; } +}; + +template <> +class Get<4> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple)) + Field(Tuple& t) { return t.f4_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(4, Tuple)) + ConstField(const Tuple& t) { return t.f4_; } +}; + +template <> +class Get<5> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple)) + Field(Tuple& t) { return t.f5_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(5, Tuple)) + ConstField(const Tuple& t) { return t.f5_; } +}; + +template <> +class Get<6> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple)) + Field(Tuple& t) { return t.f6_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(6, Tuple)) + ConstField(const Tuple& t) { return t.f6_; } +}; + +template <> +class Get<7> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple)) + Field(Tuple& t) { return t.f7_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(7, Tuple)) + ConstField(const Tuple& t) { return t.f7_; } +}; + +template <> +class Get<8> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple)) + Field(Tuple& t) { return t.f8_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(8, Tuple)) + ConstField(const Tuple& t) { return t.f8_; } +}; + +template <> +class Get<9> { + public: + template + static GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple)) + Field(Tuple& t) { return t.f9_; } // NOLINT + + template + static GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(9, Tuple)) + ConstField(const Tuple& t) { return t.f9_; } +}; + +} // namespace gtest_internal + +template +GTEST_ADD_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T))) +get(GTEST_10_TUPLE_(T)& t) { + return gtest_internal::Get::Field(t); +} + +template +GTEST_BY_REF_(GTEST_TUPLE_ELEMENT_(k, GTEST_10_TUPLE_(T))) +get(const GTEST_10_TUPLE_(T)& t) { + return gtest_internal::Get::ConstField(t); +} + +// 6.1.3.5 Relational operators + +// We only implement == and !=, as we don't have a need for the rest yet. + +namespace gtest_internal { + +// SameSizeTuplePrefixComparator::Eq(t1, t2) returns true if the +// first k fields of t1 equals the first k fields of t2. +// SameSizeTuplePrefixComparator(k1, k2) would be a compiler error if +// k1 != k2. +template +struct SameSizeTuplePrefixComparator; + +template <> +struct SameSizeTuplePrefixComparator<0, 0> { + template + static bool Eq(const Tuple1& /* t1 */, const Tuple2& /* t2 */) { + return true; + } +}; + +template +struct SameSizeTuplePrefixComparator { + template + static bool Eq(const Tuple1& t1, const Tuple2& t2) { + return SameSizeTuplePrefixComparator::Eq(t1, t2) && + ::std::tr1::get(t1) == ::std::tr1::get(t2); + } +}; + +} // namespace gtest_internal + +template +inline bool operator==(const GTEST_10_TUPLE_(T)& t, + const GTEST_10_TUPLE_(U)& u) { + return gtest_internal::SameSizeTuplePrefixComparator< + tuple_size::value, + tuple_size::value>::Eq(t, u); +} + +template +inline bool operator!=(const GTEST_10_TUPLE_(T)& t, + const GTEST_10_TUPLE_(U)& u) { return !(t == u); } + +// 6.1.4 Pairs. +// Unimplemented. + +} // namespace tr1 +} // namespace std + +#undef GTEST_0_TUPLE_ +#undef GTEST_1_TUPLE_ +#undef GTEST_2_TUPLE_ +#undef GTEST_3_TUPLE_ +#undef GTEST_4_TUPLE_ +#undef GTEST_5_TUPLE_ +#undef GTEST_6_TUPLE_ +#undef GTEST_7_TUPLE_ +#undef GTEST_8_TUPLE_ +#undef GTEST_9_TUPLE_ +#undef GTEST_10_TUPLE_ + +#undef GTEST_0_TYPENAMES_ +#undef GTEST_1_TYPENAMES_ +#undef GTEST_2_TYPENAMES_ +#undef GTEST_3_TYPENAMES_ +#undef GTEST_4_TYPENAMES_ +#undef GTEST_5_TYPENAMES_ +#undef GTEST_6_TYPENAMES_ +#undef GTEST_7_TYPENAMES_ +#undef GTEST_8_TYPENAMES_ +#undef GTEST_9_TYPENAMES_ +#undef GTEST_10_TYPENAMES_ + +#undef GTEST_DECLARE_TUPLE_AS_FRIEND_ +#undef GTEST_BY_REF_ +#undef GTEST_ADD_REF_ +#undef GTEST_TUPLE_ELEMENT_ + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TUPLE_H_ +# elif GTEST_ENV_HAS_STD_TUPLE_ +# include +// C++11 puts its tuple into the ::std namespace rather than +// ::std::tr1. gtest expects tuple to live in ::std::tr1, so put it there. +// This causes undefined behavior, but supported compilers react in +// the way we intend. +namespace std { +namespace tr1 { +using ::std::get; +using ::std::make_tuple; +using ::std::tuple; +using ::std::tuple_element; +using ::std::tuple_size; +} +} + +# elif GTEST_OS_SYMBIAN + +// On Symbian, BOOST_HAS_TR1_TUPLE causes Boost's TR1 tuple library to +// use STLport's tuple implementation, which unfortunately doesn't +// work as the copy of STLport distributed with Symbian is incomplete. +// By making sure BOOST_HAS_TR1_TUPLE is undefined, we force Boost to +// use its own tuple implementation. +# ifdef BOOST_HAS_TR1_TUPLE +# undef BOOST_HAS_TR1_TUPLE +# endif // BOOST_HAS_TR1_TUPLE + +// This prevents , which defines +// BOOST_HAS_TR1_TUPLE, from being #included by Boost's . +# define BOOST_TR1_DETAIL_CONFIG_HPP_INCLUDED +# include // IWYU pragma: export // NOLINT + +# elif defined(__GNUC__) && (GTEST_GCC_VER_ >= 40000) +// GCC 4.0+ implements tr1/tuple in the header. This does +// not conform to the TR1 spec, which requires the header to be . + +# if !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302 +// Until version 4.3.2, gcc has a bug that causes , +// which is #included by , to not compile when RTTI is +// disabled. _TR1_FUNCTIONAL is the header guard for +// . Hence the following #define is a hack to prevent +// from being included. +# define _TR1_FUNCTIONAL 1 +# include +# undef _TR1_FUNCTIONAL // Allows the user to #include + // if he chooses to. +# else +# include // NOLINT +# endif // !GTEST_HAS_RTTI && GTEST_GCC_VER_ < 40302 + +# else +// If the compiler is not GCC 4.0+, we assume the user is using a +// spec-conforming TR1 implementation. +# include // IWYU pragma: export // NOLINT +# endif // GTEST_USE_OWN_TR1_TUPLE + +#endif // GTEST_HAS_TR1_TUPLE + +// Determines whether clone(2) is supported. +// Usually it will only be available on Linux, excluding +// Linux on the Itanium architecture. +// Also see http://linux.die.net/man/2/clone. +#ifndef GTEST_HAS_CLONE +// The user didn't tell us, so we need to figure it out. + +# if GTEST_OS_LINUX && !defined(__ia64__) +# if GTEST_OS_LINUX_ANDROID +// On Android, clone() is only available on ARM starting with Gingerbread. +# if defined(__arm__) && __ANDROID_API__ >= 9 +# define GTEST_HAS_CLONE 1 +# else +# define GTEST_HAS_CLONE 0 +# endif +# else +# define GTEST_HAS_CLONE 1 +# endif +# else +# define GTEST_HAS_CLONE 0 +# endif // GTEST_OS_LINUX && !defined(__ia64__) + +#endif // GTEST_HAS_CLONE + +// Determines whether to support stream redirection. This is used to test +// output correctness and to implement death tests. +#ifndef GTEST_HAS_STREAM_REDIRECTION +// By default, we assume that stream redirection is supported on all +// platforms except known mobile ones. +# if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_SYMBIAN || \ + GTEST_OS_WINDOWS_PHONE || GTEST_OS_WINDOWS_RT +# define GTEST_HAS_STREAM_REDIRECTION 0 +# else +# define GTEST_HAS_STREAM_REDIRECTION 1 +# endif // !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_SYMBIAN +#endif // GTEST_HAS_STREAM_REDIRECTION + +// Determines whether to support death tests. +// Google Test does not support death tests for VC 7.1 and earlier as +// abort() in a VC 7.1 application compiled as GUI in debug config +// pops up a dialog window that cannot be suppressed programmatically. +#if (GTEST_OS_LINUX || GTEST_OS_CYGWIN || GTEST_OS_SOLARIS || \ + (GTEST_OS_MAC && !GTEST_OS_IOS) || \ + (GTEST_OS_WINDOWS_DESKTOP && _MSC_VER >= 1400) || \ + GTEST_OS_WINDOWS_MINGW || GTEST_OS_AIX || GTEST_OS_HPUX || \ + GTEST_OS_OPENBSD || GTEST_OS_QNX || GTEST_OS_FREEBSD) +# define GTEST_HAS_DEATH_TEST 1 +# include // NOLINT +#endif + +// We don't support MSVC 7.1 with exceptions disabled now. Therefore +// all the compilers we care about are adequate for supporting +// value-parameterized tests. +#define GTEST_HAS_PARAM_TEST 1 + +// Determines whether to support type-driven tests. + +// Typed tests need and variadic macros, which GCC, VC++ 8.0, +// Sun Pro CC, IBM Visual Age, and HP aCC support. +#if defined(__GNUC__) || (_MSC_VER >= 1400) || defined(__SUNPRO_CC) || \ + defined(__IBMCPP__) || defined(__HP_aCC) +# define GTEST_HAS_TYPED_TEST 1 +# define GTEST_HAS_TYPED_TEST_P 1 +#endif + +// Determines whether to support Combine(). This only makes sense when +// value-parameterized tests are enabled. The implementation doesn't +// work on Sun Studio since it doesn't understand templated conversion +// operators. +#if GTEST_HAS_PARAM_TEST && GTEST_HAS_TR1_TUPLE && !defined(__SUNPRO_CC) +# define GTEST_HAS_COMBINE 1 +#endif + +// Determines whether the system compiler uses UTF-16 for encoding wide strings. +#define GTEST_WIDE_STRING_USES_UTF16_ \ + (GTEST_OS_WINDOWS || GTEST_OS_CYGWIN || GTEST_OS_SYMBIAN || GTEST_OS_AIX) + +// Determines whether test results can be streamed to a socket. +#if GTEST_OS_LINUX +# define GTEST_CAN_STREAM_RESULTS_ 1 +#endif + +// Defines some utility macros. + +// The GNU compiler emits a warning if nested "if" statements are followed by +// an "else" statement and braces are not used to explicitly disambiguate the +// "else" binding. This leads to problems with code like: +// +// if (gate) +// ASSERT_*(condition) << "Some message"; +// +// The "switch (0) case 0:" idiom is used to suppress this. +#ifdef __INTEL_COMPILER +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ +#else +# define GTEST_AMBIGUOUS_ELSE_BLOCKER_ switch (0) case 0: default: // NOLINT +#endif + +// Use this annotation at the end of a struct/class definition to +// prevent the compiler from optimizing away instances that are never +// used. This is useful when all interesting logic happens inside the +// c'tor and / or d'tor. Example: +// +// struct Foo { +// Foo() { ... } +// } GTEST_ATTRIBUTE_UNUSED_; +// +// Also use it after a variable or parameter declaration to tell the +// compiler the variable/parameter does not have to be used. +#if defined(__GNUC__) && !defined(COMPILER_ICC) +# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused)) +#elif defined(__clang__) +# if __has_attribute(unused) +# define GTEST_ATTRIBUTE_UNUSED_ __attribute__ ((unused)) +# endif +#endif +#ifndef GTEST_ATTRIBUTE_UNUSED_ +# define GTEST_ATTRIBUTE_UNUSED_ +#endif + +// A macro to disallow operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_ASSIGN_(type)\ + void operator=(type const &) + +// A macro to disallow copy constructor and operator= +// This should be used in the private: declarations for a class. +#define GTEST_DISALLOW_COPY_AND_ASSIGN_(type)\ + type(type const &);\ + GTEST_DISALLOW_ASSIGN_(type) + +// Tell the compiler to warn about unused return values for functions declared +// with this macro. The macro should be used on function declarations +// following the argument list: +// +// Sprocket* AllocateSprocket() GTEST_MUST_USE_RESULT_; +#if defined(__GNUC__) && (GTEST_GCC_VER_ >= 30400) && !defined(COMPILER_ICC) +# define GTEST_MUST_USE_RESULT_ __attribute__ ((warn_unused_result)) +#else +# define GTEST_MUST_USE_RESULT_ +#endif // __GNUC__ && (GTEST_GCC_VER_ >= 30400) && !COMPILER_ICC + +// MS C++ compiler emits warning when a conditional expression is compile time +// constant. In some contexts this warning is false positive and needs to be +// suppressed. Use the following two macros in such cases: +// +// GTEST_INTENTIONAL_CONST_COND_PUSH_() +// while (true) { +// GTEST_INTENTIONAL_CONST_COND_POP_() +// } +# define GTEST_INTENTIONAL_CONST_COND_PUSH_() \ + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4127) +# define GTEST_INTENTIONAL_CONST_COND_POP_() \ + GTEST_DISABLE_MSC_WARNINGS_POP_() + +// Determine whether the compiler supports Microsoft's Structured Exception +// Handling. This is supported by several Windows compilers but generally +// does not exist on any other system. +#ifndef GTEST_HAS_SEH +// The user didn't tell us, so we need to figure it out. + +# if defined(_MSC_VER) || defined(__BORLANDC__) +// These two compilers are known to support SEH. +# define GTEST_HAS_SEH 1 +# else +// Assume no SEH. +# define GTEST_HAS_SEH 0 +# endif + +#define GTEST_IS_THREADSAFE \ + (0 \ + || (GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT) \ + || GTEST_HAS_PTHREAD) + +#endif // GTEST_HAS_SEH + +#ifdef _MSC_VER + +# if GTEST_LINKED_AS_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllimport) +# elif GTEST_CREATE_SHARED_LIBRARY +# define GTEST_API_ __declspec(dllexport) +# endif + +#endif // _MSC_VER + +#ifndef GTEST_API_ +# define GTEST_API_ +#endif + +#ifdef __GNUC__ +// Ask the compiler to never inline a given function. +# define GTEST_NO_INLINE_ __attribute__((noinline)) +#else +# define GTEST_NO_INLINE_ +#endif + +// _LIBCPP_VERSION is defined by the libc++ library from the LLVM project. +#if defined(__GLIBCXX__) || defined(_LIBCPP_VERSION) +# define GTEST_HAS_CXXABI_H_ 1 +#else +# define GTEST_HAS_CXXABI_H_ 0 +#endif + +// A function level attribute to disable checking for use of uninitialized +// memory when built with MemorySanitizer. +#if defined(__clang__) +# if __has_feature(memory_sanitizer) +# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ \ + __attribute__((no_sanitize_memory)) +# else +# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +# endif // __has_feature(memory_sanitizer) +#else +# define GTEST_ATTRIBUTE_NO_SANITIZE_MEMORY_ +#endif // __clang__ + +// A function level attribute to disable AddressSanitizer instrumentation. +#if defined(__clang__) +# if __has_feature(address_sanitizer) +# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ \ + __attribute__((no_sanitize_address)) +# else +# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +# endif // __has_feature(address_sanitizer) +#else +# define GTEST_ATTRIBUTE_NO_SANITIZE_ADDRESS_ +#endif // __clang__ + +// A function level attribute to disable ThreadSanitizer instrumentation. +#if defined(__clang__) +# if __has_feature(thread_sanitizer) +# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ \ + __attribute__((no_sanitize_thread)) +# else +# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +# endif // __has_feature(thread_sanitizer) +#else +# define GTEST_ATTRIBUTE_NO_SANITIZE_THREAD_ +#endif // __clang__ + +namespace testing { + +class Message; + +#if defined(GTEST_TUPLE_NAMESPACE_) +// Import tuple and friends into the ::testing namespace. +// It is part of our interface, having them in ::testing allows us to change +// their types as needed. +using GTEST_TUPLE_NAMESPACE_::get; +using GTEST_TUPLE_NAMESPACE_::make_tuple; +using GTEST_TUPLE_NAMESPACE_::tuple; +using GTEST_TUPLE_NAMESPACE_::tuple_size; +using GTEST_TUPLE_NAMESPACE_::tuple_element; +#endif // defined(GTEST_TUPLE_NAMESPACE_) + +namespace internal { + +// A secret type that Google Test users don't know about. It has no +// definition on purpose. Therefore it's impossible to create a +// Secret object, which is what we want. +class Secret; + +// The GTEST_COMPILE_ASSERT_ macro can be used to verify that a compile time +// expression is true. For example, you could use it to verify the +// size of a static array: +// +// GTEST_COMPILE_ASSERT_(GTEST_ARRAY_SIZE_(names) == NUM_NAMES, +// names_incorrect_size); +// +// or to make sure a struct is smaller than a certain size: +// +// GTEST_COMPILE_ASSERT_(sizeof(foo) < 128, foo_too_large); +// +// The second argument to the macro is the name of the variable. If +// the expression is false, most compilers will issue a warning/error +// containing the name of the variable. + +#if GTEST_LANG_CXX11 +# define GTEST_COMPILE_ASSERT_(expr, msg) static_assert(expr, #msg) +#else // !GTEST_LANG_CXX11 +template + struct CompileAssert { +}; + +# define GTEST_COMPILE_ASSERT_(expr, msg) \ + typedef ::testing::internal::CompileAssert<(static_cast(expr))> \ + msg[static_cast(expr) ? 1 : -1] GTEST_ATTRIBUTE_UNUSED_ +#endif // !GTEST_LANG_CXX11 + +// Implementation details of GTEST_COMPILE_ASSERT_: +// +// (In C++11, we simply use static_assert instead of the following) +// +// - GTEST_COMPILE_ASSERT_ works by defining an array type that has -1 +// elements (and thus is invalid) when the expression is false. +// +// - The simpler definition +// +// #define GTEST_COMPILE_ASSERT_(expr, msg) typedef char msg[(expr) ? 1 : -1] +// +// does not work, as gcc supports variable-length arrays whose sizes +// are determined at run-time (this is gcc's extension and not part +// of the C++ standard). As a result, gcc fails to reject the +// following code with the simple definition: +// +// int foo; +// GTEST_COMPILE_ASSERT_(foo, msg); // not supposed to compile as foo is +// // not a compile-time constant. +// +// - By using the type CompileAssert<(bool(expr))>, we ensures that +// expr is a compile-time constant. (Template arguments must be +// determined at compile-time.) +// +// - The outter parentheses in CompileAssert<(bool(expr))> are necessary +// to work around a bug in gcc 3.4.4 and 4.0.1. If we had written +// +// CompileAssert +// +// instead, these compilers will refuse to compile +// +// GTEST_COMPILE_ASSERT_(5 > 0, some_message); +// +// (They seem to think the ">" in "5 > 0" marks the end of the +// template argument list.) +// +// - The array size is (bool(expr) ? 1 : -1), instead of simply +// +// ((expr) ? 1 : -1). +// +// This is to avoid running into a bug in MS VC 7.1, which +// causes ((0.0) ? 1 : -1) to incorrectly evaluate to 1. + +// StaticAssertTypeEqHelper is used by StaticAssertTypeEq defined in gtest.h. +// +// This template is declared, but intentionally undefined. +template +struct StaticAssertTypeEqHelper; + +template +struct StaticAssertTypeEqHelper { + enum { value = true }; +}; + +// Evaluates to the number of elements in 'array'. +#define GTEST_ARRAY_SIZE_(array) (sizeof(array) / sizeof(array[0])) + +#if GTEST_HAS_GLOBAL_STRING +typedef ::string string; +#else +typedef ::std::string string; +#endif // GTEST_HAS_GLOBAL_STRING + +#if GTEST_HAS_GLOBAL_WSTRING +typedef ::wstring wstring; +#elif GTEST_HAS_STD_WSTRING +typedef ::std::wstring wstring; +#endif // GTEST_HAS_GLOBAL_WSTRING + +// A helper for suppressing warnings on constant condition. It just +// returns 'condition'. +GTEST_API_ bool IsTrue(bool condition); + +// Defines scoped_ptr. + +// This implementation of scoped_ptr is PARTIAL - it only contains +// enough stuff to satisfy Google Test's need. +template +class scoped_ptr { + public: + typedef T element_type; + + explicit scoped_ptr(T* p = NULL) : ptr_(p) {} + ~scoped_ptr() { reset(); } + + T& operator*() const { return *ptr_; } + T* operator->() const { return ptr_; } + T* get() const { return ptr_; } + + T* release() { + T* const ptr = ptr_; + ptr_ = NULL; + return ptr; + } + + void reset(T* p = NULL) { + if (p != ptr_) { + if (IsTrue(sizeof(T) > 0)) { // Makes sure T is a complete type. + delete ptr_; + } + ptr_ = p; + } + } + + friend void swap(scoped_ptr& a, scoped_ptr& b) { + using std::swap; + swap(a.ptr_, b.ptr_); + } + + private: + T* ptr_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(scoped_ptr); +}; + +// Defines RE. + +// A simple C++ wrapper for . It uses the POSIX Extended +// Regular Expression syntax. +class GTEST_API_ RE { + public: + // A copy constructor is required by the Standard to initialize object + // references from r-values. + RE(const RE& other) { Init(other.pattern()); } + + // Constructs an RE from a string. + RE(const ::std::string& regex) { Init(regex.c_str()); } // NOLINT + +#if GTEST_HAS_GLOBAL_STRING + + RE(const ::string& regex) { Init(regex.c_str()); } // NOLINT + +#endif // GTEST_HAS_GLOBAL_STRING + + RE(const char* regex) { Init(regex); } // NOLINT + ~RE(); + + // Returns the string representation of the regex. + const char* pattern() const { return pattern_; } + + // FullMatch(str, re) returns true iff regular expression re matches + // the entire str. + // PartialMatch(str, re) returns true iff regular expression re + // matches a substring of str (including str itself). + // + // TODO(wan@google.com): make FullMatch() and PartialMatch() work + // when str contains NUL characters. + static bool FullMatch(const ::std::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::std::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + +#if GTEST_HAS_GLOBAL_STRING + + static bool FullMatch(const ::string& str, const RE& re) { + return FullMatch(str.c_str(), re); + } + static bool PartialMatch(const ::string& str, const RE& re) { + return PartialMatch(str.c_str(), re); + } + +#endif // GTEST_HAS_GLOBAL_STRING + + static bool FullMatch(const char* str, const RE& re); + static bool PartialMatch(const char* str, const RE& re); + + private: + void Init(const char* regex); + + // We use a const char* instead of an std::string, as Google Test used to be + // used where std::string is not available. TODO(wan@google.com): change to + // std::string. + const char* pattern_; + bool is_valid_; + +#if GTEST_USES_POSIX_RE + + regex_t full_regex_; // For FullMatch(). + regex_t partial_regex_; // For PartialMatch(). + +#else // GTEST_USES_SIMPLE_RE + + const char* full_pattern_; // For FullMatch(); + +#endif + + GTEST_DISALLOW_ASSIGN_(RE); +}; + +// Formats a source file path and a line number as they would appear +// in an error message from the compiler used to compile this code. +GTEST_API_ ::std::string FormatFileLocation(const char* file, int line); + +// Formats a file location for compiler-independent XML output. +// Although this function is not platform dependent, we put it next to +// FormatFileLocation in order to contrast the two functions. +GTEST_API_ ::std::string FormatCompilerIndependentFileLocation(const char* file, + int line); + +// Defines logging utilities: +// GTEST_LOG_(severity) - logs messages at the specified severity level. The +// message itself is streamed into the macro. +// LogToStderr() - directs all log messages to stderr. +// FlushInfoLog() - flushes informational log messages. + +enum GTestLogSeverity { + GTEST_INFO, + GTEST_WARNING, + GTEST_ERROR, + GTEST_FATAL +}; + +// Formats log entry severity, provides a stream object for streaming the +// log message, and terminates the message with a newline when going out of +// scope. +class GTEST_API_ GTestLog { + public: + GTestLog(GTestLogSeverity severity, const char* file, int line); + + // Flushes the buffers and, if severity is GTEST_FATAL, aborts the program. + ~GTestLog(); + + ::std::ostream& GetStream() { return ::std::cerr; } + + private: + const GTestLogSeverity severity_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestLog); +}; + +#define GTEST_LOG_(severity) \ + ::testing::internal::GTestLog(::testing::internal::GTEST_##severity, \ + __FILE__, __LINE__).GetStream() + +inline void LogToStderr() {} +inline void FlushInfoLog() { fflush(NULL); } + +// INTERNAL IMPLEMENTATION - DO NOT USE. +// +// GTEST_CHECK_ is an all-mode assert. It aborts the program if the condition +// is not satisfied. +// Synopsys: +// GTEST_CHECK_(boolean_condition); +// or +// GTEST_CHECK_(boolean_condition) << "Additional message"; +// +// This checks the condition and if the condition is not satisfied +// it prints message about the condition violation, including the +// condition itself, plus additional message streamed into it, if any, +// and then it aborts the program. It aborts the program irrespective of +// whether it is built in the debug mode or not. +#define GTEST_CHECK_(condition) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::IsTrue(condition)) \ + ; \ + else \ + GTEST_LOG_(FATAL) << "Condition " #condition " failed. " + +// An all-mode assert to verify that the given POSIX-style function +// call returns 0 (indicating success). Known limitation: this +// doesn't expand to a balanced 'if' statement, so enclose the macro +// in {} if you need to use it as the only statement in an 'if' +// branch. +#define GTEST_CHECK_POSIX_SUCCESS_(posix_call) \ + if (const int gtest_error = (posix_call)) \ + GTEST_LOG_(FATAL) << #posix_call << "failed with error " \ + << gtest_error + +#if GTEST_HAS_STD_MOVE_ +using std::move; +#else // GTEST_HAS_STD_MOVE_ +template +const T& move(const T& t) { + return t; +} +#endif // GTEST_HAS_STD_MOVE_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Use ImplicitCast_ as a safe version of static_cast for upcasting in +// the type hierarchy (e.g. casting a Foo* to a SuperclassOfFoo* or a +// const Foo*). When you use ImplicitCast_, the compiler checks that +// the cast is safe. Such explicit ImplicitCast_s are necessary in +// surprisingly many situations where C++ demands an exact type match +// instead of an argument type convertable to a target type. +// +// The syntax for using ImplicitCast_ is the same as for static_cast: +// +// ImplicitCast_(expr) +// +// ImplicitCast_ would have been part of the C++ standard library, +// but the proposal was submitted too late. It will probably make +// its way into the language in the future. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., implicit_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template +inline To ImplicitCast_(To x) { return ::testing::internal::move(x); } + +// When you upcast (that is, cast a pointer from type Foo to type +// SuperclassOfFoo), it's fine to use ImplicitCast_<>, since upcasts +// always succeed. When you downcast (that is, cast a pointer from +// type Foo to type SubclassOfFoo), static_cast<> isn't safe, because +// how do you know the pointer is really of type SubclassOfFoo? It +// could be a bare Foo, or of type DifferentSubclassOfFoo. Thus, +// when you downcast, you should use this macro. In debug mode, we +// use dynamic_cast<> to double-check the downcast is legal (we die +// if it's not). In normal mode, we do the efficient static_cast<> +// instead. Thus, it's important to test in debug mode to make sure +// the cast is legal! +// This is the only place in the code we should use dynamic_cast<>. +// In particular, you SHOULDN'T be using dynamic_cast<> in order to +// do RTTI (eg code like this: +// if (dynamic_cast(foo)) HandleASubclass1Object(foo); +// if (dynamic_cast(foo)) HandleASubclass2Object(foo); +// You should design the code some other way not to need this. +// +// This relatively ugly name is intentional. It prevents clashes with +// similar functions users may have (e.g., down_cast). The internal +// namespace alone is not enough because the function can be found by ADL. +template // use like this: DownCast_(foo); +inline To DownCast_(From* f) { // so we only accept pointers + // Ensures that To is a sub-type of From *. This test is here only + // for compile-time type checking, and has no overhead in an + // optimized build at run-time, as it will be optimized away + // completely. + GTEST_INTENTIONAL_CONST_COND_PUSH_() + if (false) { + GTEST_INTENTIONAL_CONST_COND_POP_() + const To to = NULL; + ::testing::internal::ImplicitCast_(to); + } + +#if GTEST_HAS_RTTI + // RTTI: debug mode only! + GTEST_CHECK_(f == NULL || dynamic_cast(f) != NULL); +#endif + return static_cast(f); +} + +// Downcasts the pointer of type Base to Derived. +// Derived must be a subclass of Base. The parameter MUST +// point to a class of type Derived, not any subclass of it. +// When RTTI is available, the function performs a runtime +// check to enforce this. +template +Derived* CheckedDowncastToActualType(Base* base) { +#if GTEST_HAS_RTTI + GTEST_CHECK_(typeid(*base) == typeid(Derived)); + return dynamic_cast(base); // NOLINT +#else + return static_cast(base); // Poor man's downcast. +#endif +} + +#if GTEST_HAS_STREAM_REDIRECTION + +// Defines the stderr capturer: +// CaptureStdout - starts capturing stdout. +// GetCapturedStdout - stops capturing stdout and returns the captured string. +// CaptureStderr - starts capturing stderr. +// GetCapturedStderr - stops capturing stderr and returns the captured string. +// +GTEST_API_ void CaptureStdout(); +GTEST_API_ std::string GetCapturedStdout(); +GTEST_API_ void CaptureStderr(); +GTEST_API_ std::string GetCapturedStderr(); + +#endif // GTEST_HAS_STREAM_REDIRECTION + + +#if GTEST_HAS_DEATH_TEST + +const ::std::vector& GetInjectableArgvs(); +void SetInjectableArgvs(const ::std::vector* + new_argvs); + +// A copy of all command line arguments. Set by InitGoogleTest(). +extern ::std::vector g_argvs; + +#endif // GTEST_HAS_DEATH_TEST + +// Defines synchronization primitives. +#if GTEST_IS_THREADSAFE +# if GTEST_HAS_PTHREAD +// Sleeps for (roughly) n milliseconds. This function is only for testing +// Google Test's own constructs. Don't use it in user tests, either +// directly or indirectly. +inline void SleepMilliseconds(int n) { + const timespec time = { + 0, // 0 seconds. + n * 1000L * 1000L, // And n ms. + }; + nanosleep(&time, NULL); +} +# endif // GTEST_HAS_PTHREAD + +# if 0 // OS detection +# elif GTEST_HAS_PTHREAD +// Allows a controller thread to pause execution of newly created +// threads until notified. Instances of this class must be created +// and destroyed in the controller thread. +// +// This class is only for testing Google Test's own constructs. Do not +// use it in user tests, either directly or indirectly. +class Notification { + public: + Notification() : notified_(false) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL)); + } + ~Notification() { + pthread_mutex_destroy(&mutex_); + } + + // Notifies all threads created with this notification to start. Must + // be called from the controller thread. + void Notify() { + pthread_mutex_lock(&mutex_); + notified_ = true; + pthread_mutex_unlock(&mutex_); + } + + // Blocks until the controller thread notifies. Must be called from a test + // thread. + void WaitForNotification() { + for (;;) { + pthread_mutex_lock(&mutex_); + const bool notified = notified_; + pthread_mutex_unlock(&mutex_); + if (notified) + break; + SleepMilliseconds(10); + } + } + + private: + pthread_mutex_t mutex_; + bool notified_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); +}; + +# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT + +GTEST_API_ void SleepMilliseconds(int n); + +// Provides leak-safe Windows kernel handle ownership. +// Used in death tests and in threading support. +class GTEST_API_ AutoHandle { + public: + // Assume that Win32 HANDLE type is equivalent to void*. Doing so allows us to + // avoid including in this header file. Including is + // undesirable because it defines a lot of symbols and macros that tend to + // conflict with client code. This assumption is verified by + // WindowsTypesTest.HANDLEIsVoidStar. + typedef void* Handle; + AutoHandle(); + explicit AutoHandle(Handle handle); + + ~AutoHandle(); + + Handle Get() const; + void Reset(); + void Reset(Handle handle); + + private: + // Returns true iff the handle is a valid handle object that can be closed. + bool IsCloseable() const; + + Handle handle_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AutoHandle); +}; + +// Allows a controller thread to pause execution of newly created +// threads until notified. Instances of this class must be created +// and destroyed in the controller thread. +// +// This class is only for testing Google Test's own constructs. Do not +// use it in user tests, either directly or indirectly. +class GTEST_API_ Notification { + public: + Notification(); + void Notify(); + void WaitForNotification(); + + private: + AutoHandle event_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(Notification); +}; +# endif // OS detection + +// On MinGW, we can have both GTEST_OS_WINDOWS and GTEST_HAS_PTHREAD +// defined, but we don't want to use MinGW's pthreads implementation, which +// has conformance problems with some versions of the POSIX standard. +# if GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW + +// As a C-function, ThreadFuncWithCLinkage cannot be templated itself. +// Consequently, it cannot select a correct instantiation of ThreadWithParam +// in order to call its Run(). Introducing ThreadWithParamBase as a +// non-templated base class for ThreadWithParam allows us to bypass this +// problem. +class ThreadWithParamBase { + public: + virtual ~ThreadWithParamBase() {} + virtual void Run() = 0; +}; + +// pthread_create() accepts a pointer to a function type with the C linkage. +// According to the Standard (7.5/1), function types with different linkages +// are different even if they are otherwise identical. Some compilers (for +// example, SunStudio) treat them as different types. Since class methods +// cannot be defined with C-linkage we need to define a free C-function to +// pass into pthread_create(). +extern "C" inline void* ThreadFuncWithCLinkage(void* thread) { + static_cast(thread)->Run(); + return NULL; +} + +// Helper class for testing Google Test's multi-threading constructs. +// To use it, write: +// +// void ThreadFunc(int param) { /* Do things with param */ } +// Notification thread_can_start; +// ... +// // The thread_can_start parameter is optional; you can supply NULL. +// ThreadWithParam thread(&ThreadFunc, 5, &thread_can_start); +// thread_can_start.Notify(); +// +// These classes are only for testing Google Test's own constructs. Do +// not use them in user tests, either directly or indirectly. +template +class ThreadWithParam : public ThreadWithParamBase { + public: + typedef void UserThreadFunc(T); + + ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start) + : func_(func), + param_(param), + thread_can_start_(thread_can_start), + finished_(false) { + ThreadWithParamBase* const base = this; + // The thread can be created only after all fields except thread_ + // have been initialized. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_create(&thread_, 0, &ThreadFuncWithCLinkage, base)); + } + ~ThreadWithParam() { Join(); } + + void Join() { + if (!finished_) { + GTEST_CHECK_POSIX_SUCCESS_(pthread_join(thread_, 0)); + finished_ = true; + } + } + + virtual void Run() { + if (thread_can_start_ != NULL) + thread_can_start_->WaitForNotification(); + func_(param_); + } + + private: + UserThreadFunc* const func_; // User-supplied thread function. + const T param_; // User-supplied parameter to the thread function. + // When non-NULL, used to block execution until the controller thread + // notifies. + Notification* const thread_can_start_; + bool finished_; // true iff we know that the thread function has finished. + pthread_t thread_; // The native thread object. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam); +}; +# endif // GTEST_HAS_PTHREAD && !GTEST_OS_WINDOWS_MINGW + +# if 0 // OS detection +# elif GTEST_OS_WINDOWS && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT + +// Mutex implements mutex on Windows platforms. It is used in conjunction +// with class MutexLock: +// +// Mutex mutex; +// ... +// MutexLock lock(&mutex); // Acquires the mutex and releases it at the +// // end of the current scope. +// +// A static Mutex *must* be defined or declared using one of the following +// macros: +// GTEST_DEFINE_STATIC_MUTEX_(g_some_mutex); +// GTEST_DECLARE_STATIC_MUTEX_(g_some_mutex); +// +// (A non-static Mutex is defined/declared in the usual way). +class GTEST_API_ Mutex { + public: + enum MutexType { kStatic = 0, kDynamic = 1 }; + // We rely on kStaticMutex being 0 as it is to what the linker initializes + // type_ in static mutexes. critical_section_ will be initialized lazily + // in ThreadSafeLazyInit(). + enum StaticConstructorSelector { kStaticMutex = 0 }; + + // This constructor intentionally does nothing. It relies on type_ being + // statically initialized to 0 (effectively setting it to kStatic) and on + // ThreadSafeLazyInit() to lazily initialize the rest of the members. + explicit Mutex(StaticConstructorSelector /*dummy*/) {} + + Mutex(); + ~Mutex(); + + void Lock(); + + void Unlock(); + + // Does nothing if the current thread holds the mutex. Otherwise, crashes + // with high probability. + void AssertHeld(); + + private: + // Initializes owner_thread_id_ and critical_section_ in static mutexes. + void ThreadSafeLazyInit(); + + // Per http://blogs.msdn.com/b/oldnewthing/archive/2004/02/23/78395.aspx, + // we assume that 0 is an invalid value for thread IDs. + unsigned int owner_thread_id_; + + // For static mutexes, we rely on these members being initialized to zeros + // by the linker. + MutexType type_; + long critical_section_init_phase_; // NOLINT + _RTL_CRITICAL_SECTION* critical_section_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex); +}; + +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::Mutex mutex + +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ + ::testing::internal::Mutex mutex(::testing::internal::Mutex::kStaticMutex) + +// We cannot name this class MutexLock because the ctor declaration would +// conflict with a macro named MutexLock, which is defined on some +// platforms. That macro is used as a defensive measure to prevent against +// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than +// "MutexLock l(&mu)". Hence the typedef trick below. +class GTestMutexLock { + public: + explicit GTestMutexLock(Mutex* mutex) + : mutex_(mutex) { mutex_->Lock(); } + + ~GTestMutexLock() { mutex_->Unlock(); } + + private: + Mutex* const mutex_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock); +}; + +typedef GTestMutexLock MutexLock; + +// Base class for ValueHolder. Allows a caller to hold and delete a value +// without knowing its type. +class ThreadLocalValueHolderBase { + public: + virtual ~ThreadLocalValueHolderBase() {} +}; + +// Provides a way for a thread to send notifications to a ThreadLocal +// regardless of its parameter type. +class ThreadLocalBase { + public: + // Creates a new ValueHolder object holding a default value passed to + // this ThreadLocal's constructor and returns it. It is the caller's + // responsibility not to call this when the ThreadLocal instance already + // has a value on the current thread. + virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const = 0; + + protected: + ThreadLocalBase() {} + virtual ~ThreadLocalBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocalBase); +}; + +// Maps a thread to a set of ThreadLocals that have values instantiated on that +// thread and notifies them when the thread exits. A ThreadLocal instance is +// expected to persist until all threads it has values on have terminated. +class GTEST_API_ ThreadLocalRegistry { + public: + // Registers thread_local_instance as having value on the current thread. + // Returns a value that can be used to identify the thread from other threads. + static ThreadLocalValueHolderBase* GetValueOnCurrentThread( + const ThreadLocalBase* thread_local_instance); + + // Invoked when a ThreadLocal instance is destroyed. + static void OnThreadLocalDestroyed( + const ThreadLocalBase* thread_local_instance); +}; + +class GTEST_API_ ThreadWithParamBase { + public: + void Join(); + + protected: + class Runnable { + public: + virtual ~Runnable() {} + virtual void Run() = 0; + }; + + ThreadWithParamBase(Runnable *runnable, Notification* thread_can_start); + virtual ~ThreadWithParamBase(); + + private: + AutoHandle thread_; +}; + +// Helper class for testing Google Test's multi-threading constructs. +template +class ThreadWithParam : public ThreadWithParamBase { + public: + typedef void UserThreadFunc(T); + + ThreadWithParam(UserThreadFunc* func, T param, Notification* thread_can_start) + : ThreadWithParamBase(new RunnableImpl(func, param), thread_can_start) { + } + virtual ~ThreadWithParam() {} + + private: + class RunnableImpl : public Runnable { + public: + RunnableImpl(UserThreadFunc* func, T param) + : func_(func), + param_(param) { + } + virtual ~RunnableImpl() {} + virtual void Run() { + func_(param_); + } + + private: + UserThreadFunc* const func_; + const T param_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(RunnableImpl); + }; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadWithParam); +}; + +// Implements thread-local storage on Windows systems. +// +// // Thread 1 +// ThreadLocal tl(100); // 100 is the default value for each thread. +// +// // Thread 2 +// tl.set(150); // Changes the value for thread 2 only. +// EXPECT_EQ(150, tl.get()); +// +// // Thread 1 +// EXPECT_EQ(100, tl.get()); // In thread 1, tl has the original value. +// tl.set(200); +// EXPECT_EQ(200, tl.get()); +// +// The template type argument T must have a public copy constructor. +// In addition, the default ThreadLocal constructor requires T to have +// a public default constructor. +// +// The users of a TheadLocal instance have to make sure that all but one +// threads (including the main one) using that instance have exited before +// destroying it. Otherwise, the per-thread objects managed for them by the +// ThreadLocal instance are not guaranteed to be destroyed on all platforms. +// +// Google Test only uses global ThreadLocal objects. That means they +// will die after main() has returned. Therefore, no per-thread +// object managed by Google Test will be leaked as long as all threads +// using Google Test have exited when main() returns. +template +class ThreadLocal : public ThreadLocalBase { + public: + ThreadLocal() : default_() {} + explicit ThreadLocal(const T& value) : default_(value) {} + + ~ThreadLocal() { ThreadLocalRegistry::OnThreadLocalDestroyed(this); } + + T* pointer() { return GetOrCreateValue(); } + const T* pointer() const { return GetOrCreateValue(); } + const T& get() const { return *pointer(); } + void set(const T& value) { *pointer() = value; } + + private: + // Holds a value of T. Can be deleted via its base class without the caller + // knowing the type of T. + class ValueHolder : public ThreadLocalValueHolderBase { + public: + explicit ValueHolder(const T& value) : value_(value) {} + + T* pointer() { return &value_; } + + private: + T value_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder); + }; + + + T* GetOrCreateValue() const { + return static_cast( + ThreadLocalRegistry::GetValueOnCurrentThread(this))->pointer(); + } + + virtual ThreadLocalValueHolderBase* NewValueForCurrentThread() const { + return new ValueHolder(default_); + } + + const T default_; // The default value for each thread. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal); +}; + +# elif GTEST_HAS_PTHREAD + +// MutexBase and Mutex implement mutex on pthreads-based platforms. +class MutexBase { + public: + // Acquires this mutex. + void Lock() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_lock(&mutex_)); + owner_ = pthread_self(); + has_owner_ = true; + } + + // Releases this mutex. + void Unlock() { + // Since the lock is being released the owner_ field should no longer be + // considered valid. We don't protect writing to has_owner_ here, as it's + // the caller's responsibility to ensure that the current thread holds the + // mutex when this is called. + has_owner_ = false; + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_unlock(&mutex_)); + } + + // Does nothing if the current thread holds the mutex. Otherwise, crashes + // with high probability. + void AssertHeld() const { + GTEST_CHECK_(has_owner_ && pthread_equal(owner_, pthread_self())) + << "The current thread is not holding the mutex @" << this; + } + + // A static mutex may be used before main() is entered. It may even + // be used before the dynamic initialization stage. Therefore we + // must be able to initialize a static mutex object at link time. + // This means MutexBase has to be a POD and its member variables + // have to be public. + public: + pthread_mutex_t mutex_; // The underlying pthread mutex. + // has_owner_ indicates whether the owner_ field below contains a valid thread + // ID and is therefore safe to inspect (e.g., to use in pthread_equal()). All + // accesses to the owner_ field should be protected by a check of this field. + // An alternative might be to memset() owner_ to all zeros, but there's no + // guarantee that a zero'd pthread_t is necessarily invalid or even different + // from pthread_self(). + bool has_owner_; + pthread_t owner_; // The thread holding the mutex. +}; + +// Forward-declares a static mutex. +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::MutexBase mutex + +// Defines and statically (i.e. at link time) initializes a static mutex. +// The initialization list here does not explicitly initialize each field, +// instead relying on default initialization for the unspecified fields. In +// particular, the owner_ field (a pthread_t) is not explicitly initialized. +// This allows initialization to work whether pthread_t is a scalar or struct. +// The flag -Wmissing-field-initializers must not be specified for this to work. +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) \ + ::testing::internal::MutexBase mutex = { PTHREAD_MUTEX_INITIALIZER, false } + +// The Mutex class can only be used for mutexes created at runtime. It +// shares its API with MutexBase otherwise. +class Mutex : public MutexBase { + public: + Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_init(&mutex_, NULL)); + has_owner_ = false; + } + ~Mutex() { + GTEST_CHECK_POSIX_SUCCESS_(pthread_mutex_destroy(&mutex_)); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(Mutex); +}; + +// We cannot name this class MutexLock because the ctor declaration would +// conflict with a macro named MutexLock, which is defined on some +// platforms. That macro is used as a defensive measure to prevent against +// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than +// "MutexLock l(&mu)". Hence the typedef trick below. +class GTestMutexLock { + public: + explicit GTestMutexLock(MutexBase* mutex) + : mutex_(mutex) { mutex_->Lock(); } + + ~GTestMutexLock() { mutex_->Unlock(); } + + private: + MutexBase* const mutex_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(GTestMutexLock); +}; + +typedef GTestMutexLock MutexLock; + +// Helpers for ThreadLocal. + +// pthread_key_create() requires DeleteThreadLocalValue() to have +// C-linkage. Therefore it cannot be templatized to access +// ThreadLocal. Hence the need for class +// ThreadLocalValueHolderBase. +class ThreadLocalValueHolderBase { + public: + virtual ~ThreadLocalValueHolderBase() {} +}; + +// Called by pthread to delete thread-local data stored by +// pthread_setspecific(). +extern "C" inline void DeleteThreadLocalValue(void* value_holder) { + delete static_cast(value_holder); +} + +// Implements thread-local storage on pthreads-based systems. +template +class ThreadLocal { + public: + ThreadLocal() : key_(CreateKey()), + default_() {} + explicit ThreadLocal(const T& value) : key_(CreateKey()), + default_(value) {} + + ~ThreadLocal() { + // Destroys the managed object for the current thread, if any. + DeleteThreadLocalValue(pthread_getspecific(key_)); + + // Releases resources associated with the key. This will *not* + // delete managed objects for other threads. + GTEST_CHECK_POSIX_SUCCESS_(pthread_key_delete(key_)); + } + + T* pointer() { return GetOrCreateValue(); } + const T* pointer() const { return GetOrCreateValue(); } + const T& get() const { return *pointer(); } + void set(const T& value) { *pointer() = value; } + + private: + // Holds a value of type T. + class ValueHolder : public ThreadLocalValueHolderBase { + public: + explicit ValueHolder(const T& value) : value_(value) {} + + T* pointer() { return &value_; } + + private: + T value_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ValueHolder); + }; + + static pthread_key_t CreateKey() { + pthread_key_t key; + // When a thread exits, DeleteThreadLocalValue() will be called on + // the object managed for that thread. + GTEST_CHECK_POSIX_SUCCESS_( + pthread_key_create(&key, &DeleteThreadLocalValue)); + return key; + } + + T* GetOrCreateValue() const { + ThreadLocalValueHolderBase* const holder = + static_cast(pthread_getspecific(key_)); + if (holder != NULL) { + return CheckedDowncastToActualType(holder)->pointer(); + } + + ValueHolder* const new_holder = new ValueHolder(default_); + ThreadLocalValueHolderBase* const holder_base = new_holder; + GTEST_CHECK_POSIX_SUCCESS_(pthread_setspecific(key_, holder_base)); + return new_holder->pointer(); + } + + // A key pthreads uses for looking up per-thread values. + const pthread_key_t key_; + const T default_; // The default value for each thread. + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ThreadLocal); +}; + +# endif // OS detection + +#else // GTEST_IS_THREADSAFE + +// A dummy implementation of synchronization primitives (mutex, lock, +// and thread-local variable). Necessary for compiling Google Test where +// mutex is not supported - using Google Test in multiple threads is not +// supported on such platforms. + +class Mutex { + public: + Mutex() {} + void Lock() {} + void Unlock() {} + void AssertHeld() const {} +}; + +# define GTEST_DECLARE_STATIC_MUTEX_(mutex) \ + extern ::testing::internal::Mutex mutex + +# define GTEST_DEFINE_STATIC_MUTEX_(mutex) ::testing::internal::Mutex mutex + +// We cannot name this class MutexLock because the ctor declaration would +// conflict with a macro named MutexLock, which is defined on some +// platforms. That macro is used as a defensive measure to prevent against +// inadvertent misuses of MutexLock like "MutexLock(&mu)" rather than +// "MutexLock l(&mu)". Hence the typedef trick below. +class GTestMutexLock { + public: + explicit GTestMutexLock(Mutex*) {} // NOLINT +}; + +typedef GTestMutexLock MutexLock; + +template +class ThreadLocal { + public: + ThreadLocal() : value_() {} + explicit ThreadLocal(const T& value) : value_(value) {} + T* pointer() { return &value_; } + const T* pointer() const { return &value_; } + const T& get() const { return value_; } + void set(const T& value) { value_ = value; } + private: + T value_; +}; + +#endif // GTEST_IS_THREADSAFE + +// Returns the number of threads running in the process, or 0 to indicate that +// we cannot detect it. +GTEST_API_ size_t GetThreadCount(); + +// Passing non-POD classes through ellipsis (...) crashes the ARM +// compiler and generates a warning in Sun Studio. The Nokia Symbian +// and the IBM XL C/C++ compiler try to instantiate a copy constructor +// for objects passed through ellipsis (...), failing for uncopyable +// objects. We define this to ensure that only POD is passed through +// ellipsis on these systems. +#if defined(__SYMBIAN32__) || defined(__IBMCPP__) || defined(__SUNPRO_CC) +// We lose support for NULL detection where the compiler doesn't like +// passing non-POD classes through ellipsis (...). +# define GTEST_ELLIPSIS_NEEDS_POD_ 1 +#else +# define GTEST_CAN_COMPARE_NULL 1 +#endif + +// The Nokia Symbian and IBM XL C/C++ compilers cannot decide between +// const T& and const T* in a function template. These compilers +// _can_ decide between class template specializations for T and T*, +// so a tr1::type_traits-like is_pointer works. +#if defined(__SYMBIAN32__) || defined(__IBMCPP__) +# define GTEST_NEEDS_IS_POINTER_ 1 +#endif + +template +struct bool_constant { + typedef bool_constant type; + static const bool value = bool_value; +}; +template const bool bool_constant::value; + +typedef bool_constant false_type; +typedef bool_constant true_type; + +template +struct is_pointer : public false_type {}; + +template +struct is_pointer : public true_type {}; + +template +struct IteratorTraits { + typedef typename Iterator::value_type value_type; +}; + +template +struct IteratorTraits { + typedef T value_type; +}; + +template +struct IteratorTraits { + typedef T value_type; +}; + +#if GTEST_OS_WINDOWS +# define GTEST_PATH_SEP_ "\\" +# define GTEST_HAS_ALT_PATH_SEP_ 1 +// The biggest signed integer type the compiler supports. +typedef __int64 BiggestInt; +#else +# define GTEST_PATH_SEP_ "/" +# define GTEST_HAS_ALT_PATH_SEP_ 0 +typedef long long BiggestInt; // NOLINT +#endif // GTEST_OS_WINDOWS + +// Utilities for char. + +// isspace(int ch) and friends accept an unsigned char or EOF. char +// may be signed, depending on the compiler (or compiler flags). +// Therefore we need to cast a char to unsigned char before calling +// isspace(), etc. + +inline bool IsAlpha(char ch) { + return isalpha(static_cast(ch)) != 0; +} +inline bool IsAlNum(char ch) { + return isalnum(static_cast(ch)) != 0; +} +inline bool IsDigit(char ch) { + return isdigit(static_cast(ch)) != 0; +} +inline bool IsLower(char ch) { + return islower(static_cast(ch)) != 0; +} +inline bool IsSpace(char ch) { + return isspace(static_cast(ch)) != 0; +} +inline bool IsUpper(char ch) { + return isupper(static_cast(ch)) != 0; +} +inline bool IsXDigit(char ch) { + return isxdigit(static_cast(ch)) != 0; +} +inline bool IsXDigit(wchar_t ch) { + const unsigned char low_byte = static_cast(ch); + return ch == low_byte && isxdigit(low_byte) != 0; +} + +inline char ToLower(char ch) { + return static_cast(tolower(static_cast(ch))); +} +inline char ToUpper(char ch) { + return static_cast(toupper(static_cast(ch))); +} + +inline std::string StripTrailingSpaces(std::string str) { + std::string::iterator it = str.end(); + while (it != str.begin() && IsSpace(*--it)) + it = str.erase(it); + return str; +} + +// The testing::internal::posix namespace holds wrappers for common +// POSIX functions. These wrappers hide the differences between +// Windows/MSVC and POSIX systems. Since some compilers define these +// standard functions as macros, the wrapper cannot have the same name +// as the wrapped function. + +namespace posix { + +// Functions with a different name on Windows. + +#if GTEST_OS_WINDOWS + +typedef struct _stat StatStruct; + +# ifdef __BORLANDC__ +inline int IsATTY(int fd) { return isatty(fd); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +# else // !__BORLANDC__ +# if GTEST_OS_WINDOWS_MOBILE +inline int IsATTY(int /* fd */) { return 0; } +# else +inline int IsATTY(int fd) { return _isatty(fd); } +# endif // GTEST_OS_WINDOWS_MOBILE +inline int StrCaseCmp(const char* s1, const char* s2) { + return _stricmp(s1, s2); +} +inline char* StrDup(const char* src) { return _strdup(src); } +# endif // __BORLANDC__ + +# if GTEST_OS_WINDOWS_MOBILE +inline int FileNo(FILE* file) { return reinterpret_cast(_fileno(file)); } +// Stat(), RmDir(), and IsDir() are not needed on Windows CE at this +// time and thus not defined there. +# else +inline int FileNo(FILE* file) { return _fileno(file); } +inline int Stat(const char* path, StatStruct* buf) { return _stat(path, buf); } +inline int RmDir(const char* dir) { return _rmdir(dir); } +inline bool IsDir(const StatStruct& st) { + return (_S_IFDIR & st.st_mode) != 0; +} +# endif // GTEST_OS_WINDOWS_MOBILE + +#else + +typedef struct stat StatStruct; + +inline int FileNo(FILE* file) { return fileno(file); } +inline int IsATTY(int fd) { return isatty(fd); } +inline int Stat(const char* path, StatStruct* buf) { return stat(path, buf); } +inline int StrCaseCmp(const char* s1, const char* s2) { + return strcasecmp(s1, s2); +} +inline char* StrDup(const char* src) { return strdup(src); } +inline int RmDir(const char* dir) { return rmdir(dir); } +inline bool IsDir(const StatStruct& st) { return S_ISDIR(st.st_mode); } + +#endif // GTEST_OS_WINDOWS + +// Functions deprecated by MSVC 8.0. + +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4996 /* deprecated function */) + +inline const char* StrNCpy(char* dest, const char* src, size_t n) { + return strncpy(dest, src, n); +} + +// ChDir(), FReopen(), FDOpen(), Read(), Write(), Close(), and +// StrError() aren't needed on Windows CE at this time and thus not +// defined there. + +#if !GTEST_OS_WINDOWS_MOBILE && !GTEST_OS_WINDOWS_PHONE && !GTEST_OS_WINDOWS_RT +inline int ChDir(const char* dir) { return chdir(dir); } +#endif +inline FILE* FOpen(const char* path, const char* mode) { + return fopen(path, mode); +} +#if !GTEST_OS_WINDOWS_MOBILE +inline FILE *FReopen(const char* path, const char* mode, FILE* stream) { + return freopen(path, mode, stream); +} +inline FILE* FDOpen(int fd, const char* mode) { return fdopen(fd, mode); } +#endif +inline int FClose(FILE* fp) { return fclose(fp); } +#if !GTEST_OS_WINDOWS_MOBILE +inline int Read(int fd, void* buf, unsigned int count) { + return static_cast(read(fd, buf, count)); +} +inline int Write(int fd, const void* buf, unsigned int count) { + return static_cast(write(fd, buf, count)); +} +inline int Close(int fd) { return close(fd); } +inline const char* StrError(int errnum) { return strerror(errnum); } +#endif +inline const char* GetEnv(const char* name) { +#if GTEST_OS_WINDOWS_MOBILE || GTEST_OS_WINDOWS_PHONE | GTEST_OS_WINDOWS_RT + // We are on Windows CE, which has no environment variables. + static_cast(name); // To prevent 'unused argument' warning. + return NULL; +#elif defined(__BORLANDC__) || defined(__SunOS_5_8) || defined(__SunOS_5_9) + // Environment variables which we programmatically clear will be set to the + // empty string rather than unset (NULL). Handle that case. + const char* const env = getenv(name); + return (env != NULL && env[0] != '\0') ? env : NULL; +#else + return getenv(name); +#endif +} + +GTEST_DISABLE_MSC_WARNINGS_POP_() + +#if GTEST_OS_WINDOWS_MOBILE +// Windows CE has no C library. The abort() function is used in +// several places in Google Test. This implementation provides a reasonable +// imitation of standard behaviour. +void Abort(); +#else +inline void Abort() { abort(); } +#endif // GTEST_OS_WINDOWS_MOBILE + +} // namespace posix + +// MSVC "deprecates" snprintf and issues warnings wherever it is used. In +// order to avoid these warnings, we need to use _snprintf or _snprintf_s on +// MSVC-based platforms. We map the GTEST_SNPRINTF_ macro to the appropriate +// function in order to achieve that. We use macro definition here because +// snprintf is a variadic function. +#if _MSC_VER >= 1400 && !GTEST_OS_WINDOWS_MOBILE +// MSVC 2005 and above support variadic macros. +# define GTEST_SNPRINTF_(buffer, size, format, ...) \ + _snprintf_s(buffer, size, size, format, __VA_ARGS__) +#elif defined(_MSC_VER) +// Windows CE does not define _snprintf_s and MSVC prior to 2005 doesn't +// complain about _snprintf. +# define GTEST_SNPRINTF_ _snprintf +#else +# define GTEST_SNPRINTF_ snprintf +#endif + +// The maximum number a BiggestInt can represent. This definition +// works no matter BiggestInt is represented in one's complement or +// two's complement. +// +// We cannot rely on numeric_limits in STL, as __int64 and long long +// are not part of standard C++ and numeric_limits doesn't need to be +// defined for them. +const BiggestInt kMaxBiggestInt = + ~(static_cast(1) << (8*sizeof(BiggestInt) - 1)); + +// This template class serves as a compile-time function from size to +// type. It maps a size in bytes to a primitive type with that +// size. e.g. +// +// TypeWithSize<4>::UInt +// +// is typedef-ed to be unsigned int (unsigned integer made up of 4 +// bytes). +// +// Such functionality should belong to STL, but I cannot find it +// there. +// +// Google Test uses this class in the implementation of floating-point +// comparison. +// +// For now it only handles UInt (unsigned int) as that's all Google Test +// needs. Other types can be easily added in the future if need +// arises. +template +class TypeWithSize { + public: + // This prevents the user from using TypeWithSize with incorrect + // values of N. + typedef void UInt; +}; + +// The specialization for size 4. +template <> +class TypeWithSize<4> { + public: + // unsigned int has size 4 in both gcc and MSVC. + // + // As base/basictypes.h doesn't compile on Windows, we cannot use + // uint32, uint64, and etc here. + typedef int Int; + typedef unsigned int UInt; +}; + +// The specialization for size 8. +template <> +class TypeWithSize<8> { + public: +#if GTEST_OS_WINDOWS + typedef __int64 Int; + typedef unsigned __int64 UInt; +#else + typedef long long Int; // NOLINT + typedef unsigned long long UInt; // NOLINT +#endif // GTEST_OS_WINDOWS +}; + +// Integer types of known sizes. +typedef TypeWithSize<4>::Int Int32; +typedef TypeWithSize<4>::UInt UInt32; +typedef TypeWithSize<8>::Int Int64; +typedef TypeWithSize<8>::UInt UInt64; +typedef TypeWithSize<8>::Int TimeInMillis; // Represents time in milliseconds. + +// Utilities for command line flags and environment variables. + +// Macro for referencing flags. +#define GTEST_FLAG(name) FLAGS_gtest_##name + +// Macros for declaring flags. +#define GTEST_DECLARE_bool_(name) GTEST_API_ extern bool GTEST_FLAG(name) +#define GTEST_DECLARE_int32_(name) \ + GTEST_API_ extern ::testing::internal::Int32 GTEST_FLAG(name) +#define GTEST_DECLARE_string_(name) \ + GTEST_API_ extern ::std::string GTEST_FLAG(name) + +// Macros for defining flags. +#define GTEST_DEFINE_bool_(name, default_val, doc) \ + GTEST_API_ bool GTEST_FLAG(name) = (default_val) +#define GTEST_DEFINE_int32_(name, default_val, doc) \ + GTEST_API_ ::testing::internal::Int32 GTEST_FLAG(name) = (default_val) +#define GTEST_DEFINE_string_(name, default_val, doc) \ + GTEST_API_ ::std::string GTEST_FLAG(name) = (default_val) + +// Thread annotations +#define GTEST_EXCLUSIVE_LOCK_REQUIRED_(locks) +#define GTEST_LOCK_EXCLUDED_(locks) + +// Parses 'str' for a 32-bit signed integer. If successful, writes the result +// to *value and returns true; otherwise leaves *value unchanged and returns +// false. +// TODO(chandlerc): Find a better way to refactor flag and environment parsing +// out of both gtest-port.cc and gtest.cc to avoid exporting this utility +// function. +bool ParseInt32(const Message& src_text, const char* str, Int32* value); + +// Parses a bool/Int32/string from the environment variable +// corresponding to the given Google Test flag. +bool BoolFromGTestEnv(const char* flag, bool default_val); +GTEST_API_ Int32 Int32FromGTestEnv(const char* flag, Int32 default_val); +const char* StringFromGTestEnv(const char* flag, const char* default_val); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PORT_H_ + + +#if GTEST_OS_LINUX +# include +# include +# include +# include +#endif // GTEST_OS_LINUX + +#if GTEST_HAS_EXCEPTIONS +# include +#endif + +#include +#include +#include +#include +#include +#include +#include +#include + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the Message class. +// +// IMPORTANT NOTE: Due to limitation of the C++ language, we have to +// leave some internal implementation details in this header file. +// They are clearly marked by comments like this: +// +// // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +// +// Such code is NOT meant to be used by a user directly, and is subject +// to CHANGE WITHOUT NOTICE. Therefore DO NOT DEPEND ON IT in a user +// program! + +#ifndef GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +#define GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ + +#include + + +// Ensures that there is at least one operator<< in the global namespace. +// See Message& operator<<(...) below for why. +void operator<<(const testing::internal::Secret&, int); + +namespace testing { + +// The Message class works like an ostream repeater. +// +// Typical usage: +// +// 1. You stream a bunch of values to a Message object. +// It will remember the text in a stringstream. +// 2. Then you stream the Message object to an ostream. +// This causes the text in the Message to be streamed +// to the ostream. +// +// For example; +// +// testing::Message foo; +// foo << 1 << " != " << 2; +// std::cout << foo; +// +// will print "1 != 2". +// +// Message is not intended to be inherited from. In particular, its +// destructor is not virtual. +// +// Note that stringstream behaves differently in gcc and in MSVC. You +// can stream a NULL char pointer to it in the former, but not in the +// latter (it causes an access violation if you do). The Message +// class hides this difference by treating a NULL char pointer as +// "(null)". +class GTEST_API_ Message { + private: + // The type of basic IO manipulators (endl, ends, and flush) for + // narrow streams. + typedef std::ostream& (*BasicNarrowIoManip)(std::ostream&); + + public: + // Constructs an empty Message. + Message(); + + // Copy constructor. + Message(const Message& msg) : ss_(new ::std::stringstream) { // NOLINT + *ss_ << msg.GetString(); + } + + // Constructs a Message from a C-string. + explicit Message(const char* str) : ss_(new ::std::stringstream) { + *ss_ << str; + } + +#if GTEST_OS_SYMBIAN + // Streams a value (either a pointer or not) to this object. + template + inline Message& operator <<(const T& value) { + StreamHelper(typename internal::is_pointer::type(), value); + return *this; + } +#else + // Streams a non-pointer value to this object. + template + inline Message& operator <<(const T& val) { + // Some libraries overload << for STL containers. These + // overloads are defined in the global namespace instead of ::std. + // + // C++'s symbol lookup rule (i.e. Koenig lookup) says that these + // overloads are visible in either the std namespace or the global + // namespace, but not other namespaces, including the testing + // namespace which Google Test's Message class is in. + // + // To allow STL containers (and other types that has a << operator + // defined in the global namespace) to be used in Google Test + // assertions, testing::Message must access the custom << operator + // from the global namespace. With this using declaration, + // overloads of << defined in the global namespace and those + // visible via Koenig lookup are both exposed in this function. + using ::operator <<; + *ss_ << val; + return *this; + } + + // Streams a pointer value to this object. + // + // This function is an overload of the previous one. When you + // stream a pointer to a Message, this definition will be used as it + // is more specialized. (The C++ Standard, section + // [temp.func.order].) If you stream a non-pointer, then the + // previous definition will be used. + // + // The reason for this overload is that streaming a NULL pointer to + // ostream is undefined behavior. Depending on the compiler, you + // may get "0", "(nil)", "(null)", or an access violation. To + // ensure consistent result across compilers, we always treat NULL + // as "(null)". + template + inline Message& operator <<(T* const& pointer) { // NOLINT + if (pointer == NULL) { + *ss_ << "(null)"; + } else { + *ss_ << pointer; + } + return *this; + } +#endif // GTEST_OS_SYMBIAN + + // Since the basic IO manipulators are overloaded for both narrow + // and wide streams, we have to provide this specialized definition + // of operator <<, even though its body is the same as the + // templatized version above. Without this definition, streaming + // endl or other basic IO manipulators to Message will confuse the + // compiler. + Message& operator <<(BasicNarrowIoManip val) { + *ss_ << val; + return *this; + } + + // Instead of 1/0, we want to see true/false for bool values. + Message& operator <<(bool b) { + return *this << (b ? "true" : "false"); + } + + // These two overloads allow streaming a wide C string to a Message + // using the UTF-8 encoding. + Message& operator <<(const wchar_t* wide_c_str); + Message& operator <<(wchar_t* wide_c_str); + +#if GTEST_HAS_STD_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::std::wstring& wstr); +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_GLOBAL_WSTRING + // Converts the given wide string to a narrow string using the UTF-8 + // encoding, and streams the result to this Message object. + Message& operator <<(const ::wstring& wstr); +#endif // GTEST_HAS_GLOBAL_WSTRING + + // Gets the text streamed to this object so far as an std::string. + // Each '\0' character in the buffer is replaced with "\\0". + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + std::string GetString() const; + + private: + +#if GTEST_OS_SYMBIAN + // These are needed as the Nokia Symbian Compiler cannot decide between + // const T& and const T* in a function template. The Nokia compiler _can_ + // decide between class template specializations for T and T*, so a + // tr1::type_traits-like is_pointer works, and we can overload on that. + template + inline void StreamHelper(internal::true_type /*is_pointer*/, T* pointer) { + if (pointer == NULL) { + *ss_ << "(null)"; + } else { + *ss_ << pointer; + } + } + template + inline void StreamHelper(internal::false_type /*is_pointer*/, + const T& value) { + // See the comments in Message& operator <<(const T&) above for why + // we need this using statement. + using ::operator <<; + *ss_ << value; + } +#endif // GTEST_OS_SYMBIAN + + // We'll hold the text streamed to this object here. + const internal::scoped_ptr< ::std::stringstream> ss_; + + // We declare (but don't implement) this to prevent the compiler + // from implementing the assignment operator. + void operator=(const Message&); +}; + +// Streams a Message to an ostream. +inline std::ostream& operator <<(std::ostream& os, const Message& sb) { + return os << sb.GetString(); +} + +namespace internal { + +// Converts a streamable value to an std::string. A NULL pointer is +// converted to "(null)". When the input value is a ::string, +// ::std::string, ::wstring, or ::std::wstring object, each NUL +// character in it is replaced with "\\0". +template +std::string StreamableToString(const T& streamable) { + return (Message() << streamable).GetString(); +} + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_MESSAGE_H_ +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file declares the String class and functions used internally by +// Google Test. They are subject to change without notice. They should not used +// by code external to Google Test. +// +// This header file is #included by . +// It should not be #included by other files. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ + +#ifdef __BORLANDC__ +// string.h is not guaranteed to provide strcpy on C++ Builder. +# include +#endif + +#include +#include + + +namespace testing { +namespace internal { + +// String - an abstract class holding static string utilities. +class GTEST_API_ String { + public: + // Static utility methods + + // Clones a 0-terminated C string, allocating memory using new. The + // caller is responsible for deleting the return value using + // delete[]. Returns the cloned string, or NULL if the input is + // NULL. + // + // This is different from strdup() in string.h, which allocates + // memory using malloc(). + static const char* CloneCString(const char* c_str); + +#if GTEST_OS_WINDOWS_MOBILE + // Windows CE does not have the 'ANSI' versions of Win32 APIs. To be + // able to pass strings to Win32 APIs on CE we need to convert them + // to 'Unicode', UTF-16. + + // Creates a UTF-16 wide string from the given ANSI string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the wide string, or NULL if the + // input is NULL. + // + // The wide string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static LPCWSTR AnsiToUtf16(const char* c_str); + + // Creates an ANSI string from the given wide string, allocating + // memory using new. The caller is responsible for deleting the return + // value using delete[]. Returns the ANSI string, or NULL if the + // input is NULL. + // + // The returned string is created using the ANSI codepage (CP_ACP) to + // match the behaviour of the ANSI versions of Win32 calls and the + // C runtime. + static const char* Utf16ToAnsi(LPCWSTR utf16_str); +#endif + + // Compares two C strings. Returns true iff they have the same content. + // + // Unlike strcmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CStringEquals(const char* lhs, const char* rhs); + + // Converts a wide C string to a String using the UTF-8 encoding. + // NULL will be converted to "(null)". If an error occurred during + // the conversion, "(failed to convert from wide string)" is + // returned. + static std::string ShowWideCString(const wchar_t* wide_c_str); + + // Compares two wide C strings. Returns true iff they have the same + // content. + // + // Unlike wcscmp(), this function can handle NULL argument(s). A + // NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool WideCStringEquals(const wchar_t* lhs, const wchar_t* rhs); + + // Compares two C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike strcasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL C string, + // including the empty string. + static bool CaseInsensitiveCStringEquals(const char* lhs, + const char* rhs); + + // Compares two wide C strings, ignoring case. Returns true iff they + // have the same content. + // + // Unlike wcscasecmp(), this function can handle NULL argument(s). + // A NULL C string is considered different to any non-NULL wide C string, + // including the empty string. + // NB: The implementations on different platforms slightly differ. + // On windows, this method uses _wcsicmp which compares according to LC_CTYPE + // environment variable. On GNU platform this method uses wcscasecmp + // which compares according to LC_CTYPE category of the current locale. + // On MacOS X, it uses towlower, which also uses LC_CTYPE category of the + // current locale. + static bool CaseInsensitiveWideCStringEquals(const wchar_t* lhs, + const wchar_t* rhs); + + // Returns true iff the given string ends with the given suffix, ignoring + // case. Any string is considered to end with an empty suffix. + static bool EndsWithCaseInsensitive( + const std::string& str, const std::string& suffix); + + // Formats an int value as "%02d". + static std::string FormatIntWidth2(int value); // "%02d" for width == 2 + + // Formats an int value as "%X". + static std::string FormatHexInt(int value); + + // Formats a byte as "%02X". + static std::string FormatByte(unsigned char value); + + private: + String(); // Not meant to be instantiated. +}; // class String + +// Gets the content of the stringstream's buffer as an std::string. Each '\0' +// character in the buffer is replaced with "\\0". +GTEST_API_ std::string StringStreamToString(::std::stringstream* stream); + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_STRING_H_ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: keith.ray@gmail.com (Keith Ray) +// +// Google Test filepath utilities +// +// This header file declares classes and functions used internally by +// Google Test. They are subject to change without notice. +// +// This file is #included in . +// Do not include this header file separately! + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ + + +namespace testing { +namespace internal { + +// FilePath - a class for file and directory pathname manipulation which +// handles platform-specific conventions (like the pathname separator). +// Used for helper functions for naming files in a directory for xml output. +// Except for Set methods, all methods are const or static, which provides an +// "immutable value object" -- useful for peace of mind. +// A FilePath with a value ending in a path separator ("like/this/") represents +// a directory, otherwise it is assumed to represent a file. In either case, +// it may or may not represent an actual file or directory in the file system. +// Names are NOT checked for syntax correctness -- no checking for illegal +// characters, malformed paths, etc. + +class GTEST_API_ FilePath { + public: + FilePath() : pathname_("") { } + FilePath(const FilePath& rhs) : pathname_(rhs.pathname_) { } + + explicit FilePath(const std::string& pathname) : pathname_(pathname) { + Normalize(); + } + + FilePath& operator=(const FilePath& rhs) { + Set(rhs); + return *this; + } + + void Set(const FilePath& rhs) { + pathname_ = rhs.pathname_; + } + + const std::string& string() const { return pathname_; } + const char* c_str() const { return pathname_.c_str(); } + + // Returns the current working directory, or "" if unsuccessful. + static FilePath GetCurrentDir(); + + // Given directory = "dir", base_name = "test", number = 0, + // extension = "xml", returns "dir/test.xml". If number is greater + // than zero (e.g., 12), returns "dir/test_12.xml". + // On Windows platform, uses \ as the separator rather than /. + static FilePath MakeFileName(const FilePath& directory, + const FilePath& base_name, + int number, + const char* extension); + + // Given directory = "dir", relative_path = "test.xml", + // returns "dir/test.xml". + // On Windows, uses \ as the separator rather than /. + static FilePath ConcatPaths(const FilePath& directory, + const FilePath& relative_path); + + // Returns a pathname for a file that does not currently exist. The pathname + // will be directory/base_name.extension or + // directory/base_name_.extension if directory/base_name.extension + // already exists. The number will be incremented until a pathname is found + // that does not already exist. + // Examples: 'dir/foo_test.xml' or 'dir/foo_test_1.xml'. + // There could be a race condition if two or more processes are calling this + // function at the same time -- they could both pick the same filename. + static FilePath GenerateUniqueFileName(const FilePath& directory, + const FilePath& base_name, + const char* extension); + + // Returns true iff the path is "". + bool IsEmpty() const { return pathname_.empty(); } + + // If input name has a trailing separator character, removes it and returns + // the name, otherwise return the name string unmodified. + // On Windows platform, uses \ as the separator, other platforms use /. + FilePath RemoveTrailingPathSeparator() const; + + // Returns a copy of the FilePath with the directory part removed. + // Example: FilePath("path/to/file").RemoveDirectoryName() returns + // FilePath("file"). If there is no directory part ("just_a_file"), it returns + // the FilePath unmodified. If there is no file part ("just_a_dir/") it + // returns an empty FilePath (""). + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveDirectoryName() const; + + // RemoveFileName returns the directory path with the filename removed. + // Example: FilePath("path/to/file").RemoveFileName() returns "path/to/". + // If the FilePath is "a_file" or "/a_file", RemoveFileName returns + // FilePath("./") or, on Windows, FilePath(".\\"). If the filepath does + // not have a file, like "just/a/dir/", it returns the FilePath unmodified. + // On Windows platform, '\' is the path separator, otherwise it is '/'. + FilePath RemoveFileName() const; + + // Returns a copy of the FilePath with the case-insensitive extension removed. + // Example: FilePath("dir/file.exe").RemoveExtension("EXE") returns + // FilePath("dir/file"). If a case-insensitive extension is not + // found, returns a copy of the original FilePath. + FilePath RemoveExtension(const char* extension) const; + + // Creates directories so that path exists. Returns true if successful or if + // the directories already exist; returns false if unable to create + // directories for any reason. Will also return false if the FilePath does + // not represent a directory (that is, it doesn't end with a path separator). + bool CreateDirectoriesRecursively() const; + + // Create the directory so that path exists. Returns true if successful or + // if the directory already exists; returns false if unable to create the + // directory for any reason, including if the parent directory does not + // exist. Not named "CreateDirectory" because that's a macro on Windows. + bool CreateFolder() const; + + // Returns true if FilePath describes something in the file-system, + // either a file, directory, or whatever, and that something exists. + bool FileOrDirectoryExists() const; + + // Returns true if pathname describes a directory in the file-system + // that exists. + bool DirectoryExists() const; + + // Returns true if FilePath ends with a path separator, which indicates that + // it is intended to represent a directory. Returns false otherwise. + // This does NOT check that a directory (or file) actually exists. + bool IsDirectory() const; + + // Returns true if pathname describes a root directory. (Windows has one + // root directory per disk drive.) + bool IsRootDirectory() const; + + // Returns true if pathname describes an absolute path. + bool IsAbsolutePath() const; + + private: + // Replaces multiple consecutive separators with a single separator. + // For example, "bar///foo" becomes "bar/foo". Does not eliminate other + // redundancies that might be in a pathname involving "." or "..". + // + // A pathname with multiple consecutive separators may occur either through + // user error or as a result of some scripts or APIs that generate a pathname + // with a trailing separator. On other platforms the same API or script + // may NOT generate a pathname with a trailing "/". Then elsewhere that + // pathname may have another "/" and pathname components added to it, + // without checking for the separator already being there. + // The script language and operating system may allow paths like "foo//bar" + // but some of the functions in FilePath will not handle that correctly. In + // particular, RemoveTrailingPathSeparator() only removes one separator, and + // it is called in CreateDirectoriesRecursively() assuming that it will change + // a pathname from directory syntax (trailing separator) to filename syntax. + // + // On Windows this method also replaces the alternate path separator '/' with + // the primary path separator '\\', so that for example "bar\\/\\foo" becomes + // "bar\\foo". + + void Normalize(); + + // Returns a pointer to the last occurence of a valid path separator in + // the FilePath. On Windows, for example, both '/' and '\' are valid path + // separators. Returns NULL if no path separator was found. + const char* FindLastPathSeparator() const; + + std::string pathname_; +}; // class FilePath + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_FILEPATH_H_ +// This file was GENERATED by command: +// pump.py gtest-type-util.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Type utilities needed for implementing typed and type-parameterized +// tests. This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// Currently we support at most 50 types in a list, and at most 50 +// type-parameterized tests in one type-parameterized test case. +// Please contact googletestframework@googlegroups.com if you need +// more. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + + +// #ifdef __GNUC__ is too general here. It is possible to use gcc without using +// libstdc++ (which is where cxxabi.h comes from). +# if GTEST_HAS_CXXABI_H_ +# include +# elif defined(__HP_aCC) +# include +# endif // GTEST_HASH_CXXABI_H_ + +namespace testing { +namespace internal { + +// GetTypeName() returns a human-readable name of type T. +// NB: This function is also used in Google Mock, so don't move it inside of +// the typed-test-only section below. +template +std::string GetTypeName() { +# if GTEST_HAS_RTTI + + const char* const name = typeid(T).name(); +# if GTEST_HAS_CXXABI_H_ || defined(__HP_aCC) + int status = 0; + // gcc's implementation of typeid(T).name() mangles the type name, + // so we have to demangle it. +# if GTEST_HAS_CXXABI_H_ + using abi::__cxa_demangle; +# endif // GTEST_HAS_CXXABI_H_ + char* const readable_name = __cxa_demangle(name, 0, 0, &status); + const std::string name_str(status == 0 ? readable_name : name); + free(readable_name); + return name_str; +# else + return name; +# endif // GTEST_HAS_CXXABI_H_ || __HP_aCC + +# else + + return ""; + +# endif // GTEST_HAS_RTTI +} + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// AssertyTypeEq::type is defined iff T1 and T2 are the same +// type. This can be used as a compile-time assertion to ensure that +// two types are equal. + +template +struct AssertTypeEq; + +template +struct AssertTypeEq { + typedef bool type; +}; + +// A unique type used as the default value for the arguments of class +// template Types. This allows us to simulate variadic templates +// (e.g. Types, Type, and etc), which C++ doesn't +// support directly. +struct None {}; + +// The following family of struct and struct templates are used to +// represent type lists. In particular, TypesN +// represents a type list with N types (T1, T2, ..., and TN) in it. +// Except for Types0, every struct in the family has two member types: +// Head for the first type in the list, and Tail for the rest of the +// list. + +// The empty type list. +struct Types0 {}; + +// Type lists of length 1, 2, 3, and so on. + +template +struct Types1 { + typedef T1 Head; + typedef Types0 Tail; +}; +template +struct Types2 { + typedef T1 Head; + typedef Types1 Tail; +}; + +template +struct Types3 { + typedef T1 Head; + typedef Types2 Tail; +}; + +template +struct Types4 { + typedef T1 Head; + typedef Types3 Tail; +}; + +template +struct Types5 { + typedef T1 Head; + typedef Types4 Tail; +}; + +template +struct Types6 { + typedef T1 Head; + typedef Types5 Tail; +}; + +template +struct Types7 { + typedef T1 Head; + typedef Types6 Tail; +}; + +template +struct Types8 { + typedef T1 Head; + typedef Types7 Tail; +}; + +template +struct Types9 { + typedef T1 Head; + typedef Types8 Tail; +}; + +template +struct Types10 { + typedef T1 Head; + typedef Types9 Tail; +}; + +template +struct Types11 { + typedef T1 Head; + typedef Types10 Tail; +}; + +template +struct Types12 { + typedef T1 Head; + typedef Types11 Tail; +}; + +template +struct Types13 { + typedef T1 Head; + typedef Types12 Tail; +}; + +template +struct Types14 { + typedef T1 Head; + typedef Types13 Tail; +}; + +template +struct Types15 { + typedef T1 Head; + typedef Types14 Tail; +}; + +template +struct Types16 { + typedef T1 Head; + typedef Types15 Tail; +}; + +template +struct Types17 { + typedef T1 Head; + typedef Types16 Tail; +}; + +template +struct Types18 { + typedef T1 Head; + typedef Types17 Tail; +}; + +template +struct Types19 { + typedef T1 Head; + typedef Types18 Tail; +}; + +template +struct Types20 { + typedef T1 Head; + typedef Types19 Tail; +}; + +template +struct Types21 { + typedef T1 Head; + typedef Types20 Tail; +}; + +template +struct Types22 { + typedef T1 Head; + typedef Types21 Tail; +}; + +template +struct Types23 { + typedef T1 Head; + typedef Types22 Tail; +}; + +template +struct Types24 { + typedef T1 Head; + typedef Types23 Tail; +}; + +template +struct Types25 { + typedef T1 Head; + typedef Types24 Tail; +}; + +template +struct Types26 { + typedef T1 Head; + typedef Types25 Tail; +}; + +template +struct Types27 { + typedef T1 Head; + typedef Types26 Tail; +}; + +template +struct Types28 { + typedef T1 Head; + typedef Types27 Tail; +}; + +template +struct Types29 { + typedef T1 Head; + typedef Types28 Tail; +}; + +template +struct Types30 { + typedef T1 Head; + typedef Types29 Tail; +}; + +template +struct Types31 { + typedef T1 Head; + typedef Types30 Tail; +}; + +template +struct Types32 { + typedef T1 Head; + typedef Types31 Tail; +}; + +template +struct Types33 { + typedef T1 Head; + typedef Types32 Tail; +}; + +template +struct Types34 { + typedef T1 Head; + typedef Types33 Tail; +}; + +template +struct Types35 { + typedef T1 Head; + typedef Types34 Tail; +}; + +template +struct Types36 { + typedef T1 Head; + typedef Types35 Tail; +}; + +template +struct Types37 { + typedef T1 Head; + typedef Types36 Tail; +}; + +template +struct Types38 { + typedef T1 Head; + typedef Types37 Tail; +}; + +template +struct Types39 { + typedef T1 Head; + typedef Types38 Tail; +}; + +template +struct Types40 { + typedef T1 Head; + typedef Types39 Tail; +}; + +template +struct Types41 { + typedef T1 Head; + typedef Types40 Tail; +}; + +template +struct Types42 { + typedef T1 Head; + typedef Types41 Tail; +}; + +template +struct Types43 { + typedef T1 Head; + typedef Types42 Tail; +}; + +template +struct Types44 { + typedef T1 Head; + typedef Types43 Tail; +}; + +template +struct Types45 { + typedef T1 Head; + typedef Types44 Tail; +}; + +template +struct Types46 { + typedef T1 Head; + typedef Types45 Tail; +}; + +template +struct Types47 { + typedef T1 Head; + typedef Types46 Tail; +}; + +template +struct Types48 { + typedef T1 Head; + typedef Types47 Tail; +}; + +template +struct Types49 { + typedef T1 Head; + typedef Types48 Tail; +}; + +template +struct Types50 { + typedef T1 Head; + typedef Types49 Tail; +}; + + +} // namespace internal + +// We don't want to require the users to write TypesN<...> directly, +// as that would require them to count the length. Types<...> is much +// easier to write, but generates horrible messages when there is a +// compiler error, as gcc insists on printing out each template +// argument, even if it has the default value (this means Types +// will appear as Types in the compiler +// errors). +// +// Our solution is to combine the best part of the two approaches: a +// user would write Types, and Google Test will translate +// that to TypesN internally to make error messages +// readable. The translation is done by the 'type' member of the +// Types template. +template +struct Types { + typedef internal::Types50 type; +}; + +template <> +struct Types { + typedef internal::Types0 type; +}; +template +struct Types { + typedef internal::Types1 type; +}; +template +struct Types { + typedef internal::Types2 type; +}; +template +struct Types { + typedef internal::Types3 type; +}; +template +struct Types { + typedef internal::Types4 type; +}; +template +struct Types { + typedef internal::Types5 type; +}; +template +struct Types { + typedef internal::Types6 type; +}; +template +struct Types { + typedef internal::Types7 type; +}; +template +struct Types { + typedef internal::Types8 type; +}; +template +struct Types { + typedef internal::Types9 type; +}; +template +struct Types { + typedef internal::Types10 type; +}; +template +struct Types { + typedef internal::Types11 type; +}; +template +struct Types { + typedef internal::Types12 type; +}; +template +struct Types { + typedef internal::Types13 type; +}; +template +struct Types { + typedef internal::Types14 type; +}; +template +struct Types { + typedef internal::Types15 type; +}; +template +struct Types { + typedef internal::Types16 type; +}; +template +struct Types { + typedef internal::Types17 type; +}; +template +struct Types { + typedef internal::Types18 type; +}; +template +struct Types { + typedef internal::Types19 type; +}; +template +struct Types { + typedef internal::Types20 type; +}; +template +struct Types { + typedef internal::Types21 type; +}; +template +struct Types { + typedef internal::Types22 type; +}; +template +struct Types { + typedef internal::Types23 type; +}; +template +struct Types { + typedef internal::Types24 type; +}; +template +struct Types { + typedef internal::Types25 type; +}; +template +struct Types { + typedef internal::Types26 type; +}; +template +struct Types { + typedef internal::Types27 type; +}; +template +struct Types { + typedef internal::Types28 type; +}; +template +struct Types { + typedef internal::Types29 type; +}; +template +struct Types { + typedef internal::Types30 type; +}; +template +struct Types { + typedef internal::Types31 type; +}; +template +struct Types { + typedef internal::Types32 type; +}; +template +struct Types { + typedef internal::Types33 type; +}; +template +struct Types { + typedef internal::Types34 type; +}; +template +struct Types { + typedef internal::Types35 type; +}; +template +struct Types { + typedef internal::Types36 type; +}; +template +struct Types { + typedef internal::Types37 type; +}; +template +struct Types { + typedef internal::Types38 type; +}; +template +struct Types { + typedef internal::Types39 type; +}; +template +struct Types { + typedef internal::Types40 type; +}; +template +struct Types { + typedef internal::Types41 type; +}; +template +struct Types { + typedef internal::Types42 type; +}; +template +struct Types { + typedef internal::Types43 type; +}; +template +struct Types { + typedef internal::Types44 type; +}; +template +struct Types { + typedef internal::Types45 type; +}; +template +struct Types { + typedef internal::Types46 type; +}; +template +struct Types { + typedef internal::Types47 type; +}; +template +struct Types { + typedef internal::Types48 type; +}; +template +struct Types { + typedef internal::Types49 type; +}; + +namespace internal { + +# define GTEST_TEMPLATE_ template class + +// The template "selector" struct TemplateSel is used to +// represent Tmpl, which must be a class template with one type +// parameter, as a type. TemplateSel::Bind::type is defined +// as the type Tmpl. This allows us to actually instantiate the +// template "selected" by TemplateSel. +// +// This trick is necessary for simulating typedef for class templates, +// which C++ doesn't support directly. +template +struct TemplateSel { + template + struct Bind { + typedef Tmpl type; + }; +}; + +# define GTEST_BIND_(TmplSel, T) \ + TmplSel::template Bind::type + +// A unique struct template used as the default value for the +// arguments of class template Templates. This allows us to simulate +// variadic templates (e.g. Templates, Templates, +// and etc), which C++ doesn't support directly. +template +struct NoneT {}; + +// The following family of struct and struct templates are used to +// represent template lists. In particular, TemplatesN represents a list of N templates (T1, T2, ..., and TN). Except +// for Templates0, every struct in the family has two member types: +// Head for the selector of the first template in the list, and Tail +// for the rest of the list. + +// The empty template list. +struct Templates0 {}; + +// Template lists of length 1, 2, 3, and so on. + +template +struct Templates1 { + typedef TemplateSel Head; + typedef Templates0 Tail; +}; +template +struct Templates2 { + typedef TemplateSel Head; + typedef Templates1 Tail; +}; + +template +struct Templates3 { + typedef TemplateSel Head; + typedef Templates2 Tail; +}; + +template +struct Templates4 { + typedef TemplateSel Head; + typedef Templates3 Tail; +}; + +template +struct Templates5 { + typedef TemplateSel Head; + typedef Templates4 Tail; +}; + +template +struct Templates6 { + typedef TemplateSel Head; + typedef Templates5 Tail; +}; + +template +struct Templates7 { + typedef TemplateSel Head; + typedef Templates6 Tail; +}; + +template +struct Templates8 { + typedef TemplateSel Head; + typedef Templates7 Tail; +}; + +template +struct Templates9 { + typedef TemplateSel Head; + typedef Templates8 Tail; +}; + +template +struct Templates10 { + typedef TemplateSel Head; + typedef Templates9 Tail; +}; + +template +struct Templates11 { + typedef TemplateSel Head; + typedef Templates10 Tail; +}; + +template +struct Templates12 { + typedef TemplateSel Head; + typedef Templates11 Tail; +}; + +template +struct Templates13 { + typedef TemplateSel Head; + typedef Templates12 Tail; +}; + +template +struct Templates14 { + typedef TemplateSel Head; + typedef Templates13 Tail; +}; + +template +struct Templates15 { + typedef TemplateSel Head; + typedef Templates14 Tail; +}; + +template +struct Templates16 { + typedef TemplateSel Head; + typedef Templates15 Tail; +}; + +template +struct Templates17 { + typedef TemplateSel Head; + typedef Templates16 Tail; +}; + +template +struct Templates18 { + typedef TemplateSel Head; + typedef Templates17 Tail; +}; + +template +struct Templates19 { + typedef TemplateSel Head; + typedef Templates18 Tail; +}; + +template +struct Templates20 { + typedef TemplateSel Head; + typedef Templates19 Tail; +}; + +template +struct Templates21 { + typedef TemplateSel Head; + typedef Templates20 Tail; +}; + +template +struct Templates22 { + typedef TemplateSel Head; + typedef Templates21 Tail; +}; + +template +struct Templates23 { + typedef TemplateSel Head; + typedef Templates22 Tail; +}; + +template +struct Templates24 { + typedef TemplateSel Head; + typedef Templates23 Tail; +}; + +template +struct Templates25 { + typedef TemplateSel Head; + typedef Templates24 Tail; +}; + +template +struct Templates26 { + typedef TemplateSel Head; + typedef Templates25 Tail; +}; + +template +struct Templates27 { + typedef TemplateSel Head; + typedef Templates26 Tail; +}; + +template +struct Templates28 { + typedef TemplateSel Head; + typedef Templates27 Tail; +}; + +template +struct Templates29 { + typedef TemplateSel Head; + typedef Templates28 Tail; +}; + +template +struct Templates30 { + typedef TemplateSel Head; + typedef Templates29 Tail; +}; + +template +struct Templates31 { + typedef TemplateSel Head; + typedef Templates30 Tail; +}; + +template +struct Templates32 { + typedef TemplateSel Head; + typedef Templates31 Tail; +}; + +template +struct Templates33 { + typedef TemplateSel Head; + typedef Templates32 Tail; +}; + +template +struct Templates34 { + typedef TemplateSel Head; + typedef Templates33 Tail; +}; + +template +struct Templates35 { + typedef TemplateSel Head; + typedef Templates34 Tail; +}; + +template +struct Templates36 { + typedef TemplateSel Head; + typedef Templates35 Tail; +}; + +template +struct Templates37 { + typedef TemplateSel Head; + typedef Templates36 Tail; +}; + +template +struct Templates38 { + typedef TemplateSel Head; + typedef Templates37 Tail; +}; + +template +struct Templates39 { + typedef TemplateSel Head; + typedef Templates38 Tail; +}; + +template +struct Templates40 { + typedef TemplateSel Head; + typedef Templates39 Tail; +}; + +template +struct Templates41 { + typedef TemplateSel Head; + typedef Templates40 Tail; +}; + +template +struct Templates42 { + typedef TemplateSel Head; + typedef Templates41 Tail; +}; + +template +struct Templates43 { + typedef TemplateSel Head; + typedef Templates42 Tail; +}; + +template +struct Templates44 { + typedef TemplateSel Head; + typedef Templates43 Tail; +}; + +template +struct Templates45 { + typedef TemplateSel Head; + typedef Templates44 Tail; +}; + +template +struct Templates46 { + typedef TemplateSel Head; + typedef Templates45 Tail; +}; + +template +struct Templates47 { + typedef TemplateSel Head; + typedef Templates46 Tail; +}; + +template +struct Templates48 { + typedef TemplateSel Head; + typedef Templates47 Tail; +}; + +template +struct Templates49 { + typedef TemplateSel Head; + typedef Templates48 Tail; +}; + +template +struct Templates50 { + typedef TemplateSel Head; + typedef Templates49 Tail; +}; + + +// We don't want to require the users to write TemplatesN<...> directly, +// as that would require them to count the length. Templates<...> is much +// easier to write, but generates horrible messages when there is a +// compiler error, as gcc insists on printing out each template +// argument, even if it has the default value (this means Templates +// will appear as Templates in the compiler +// errors). +// +// Our solution is to combine the best part of the two approaches: a +// user would write Templates, and Google Test will translate +// that to TemplatesN internally to make error messages +// readable. The translation is done by the 'type' member of the +// Templates template. +template +struct Templates { + typedef Templates50 type; +}; + +template <> +struct Templates { + typedef Templates0 type; +}; +template +struct Templates { + typedef Templates1 type; +}; +template +struct Templates { + typedef Templates2 type; +}; +template +struct Templates { + typedef Templates3 type; +}; +template +struct Templates { + typedef Templates4 type; +}; +template +struct Templates { + typedef Templates5 type; +}; +template +struct Templates { + typedef Templates6 type; +}; +template +struct Templates { + typedef Templates7 type; +}; +template +struct Templates { + typedef Templates8 type; +}; +template +struct Templates { + typedef Templates9 type; +}; +template +struct Templates { + typedef Templates10 type; +}; +template +struct Templates { + typedef Templates11 type; +}; +template +struct Templates { + typedef Templates12 type; +}; +template +struct Templates { + typedef Templates13 type; +}; +template +struct Templates { + typedef Templates14 type; +}; +template +struct Templates { + typedef Templates15 type; +}; +template +struct Templates { + typedef Templates16 type; +}; +template +struct Templates { + typedef Templates17 type; +}; +template +struct Templates { + typedef Templates18 type; +}; +template +struct Templates { + typedef Templates19 type; +}; +template +struct Templates { + typedef Templates20 type; +}; +template +struct Templates { + typedef Templates21 type; +}; +template +struct Templates { + typedef Templates22 type; +}; +template +struct Templates { + typedef Templates23 type; +}; +template +struct Templates { + typedef Templates24 type; +}; +template +struct Templates { + typedef Templates25 type; +}; +template +struct Templates { + typedef Templates26 type; +}; +template +struct Templates { + typedef Templates27 type; +}; +template +struct Templates { + typedef Templates28 type; +}; +template +struct Templates { + typedef Templates29 type; +}; +template +struct Templates { + typedef Templates30 type; +}; +template +struct Templates { + typedef Templates31 type; +}; +template +struct Templates { + typedef Templates32 type; +}; +template +struct Templates { + typedef Templates33 type; +}; +template +struct Templates { + typedef Templates34 type; +}; +template +struct Templates { + typedef Templates35 type; +}; +template +struct Templates { + typedef Templates36 type; +}; +template +struct Templates { + typedef Templates37 type; +}; +template +struct Templates { + typedef Templates38 type; +}; +template +struct Templates { + typedef Templates39 type; +}; +template +struct Templates { + typedef Templates40 type; +}; +template +struct Templates { + typedef Templates41 type; +}; +template +struct Templates { + typedef Templates42 type; +}; +template +struct Templates { + typedef Templates43 type; +}; +template +struct Templates { + typedef Templates44 type; +}; +template +struct Templates { + typedef Templates45 type; +}; +template +struct Templates { + typedef Templates46 type; +}; +template +struct Templates { + typedef Templates47 type; +}; +template +struct Templates { + typedef Templates48 type; +}; +template +struct Templates { + typedef Templates49 type; +}; + +// The TypeList template makes it possible to use either a single type +// or a Types<...> list in TYPED_TEST_CASE() and +// INSTANTIATE_TYPED_TEST_CASE_P(). + +template +struct TypeList { + typedef Types1 type; +}; + +template +struct TypeList > { + typedef typename Types::type type; +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_TYPE_UTIL_H_ + +// Due to C++ preprocessor weirdness, we need double indirection to +// concatenate two tokens when one of them is __LINE__. Writing +// +// foo ## __LINE__ +// +// will result in the token foo__LINE__, instead of foo followed by +// the current line number. For more details, see +// http://www.parashift.com/c++-faq-lite/misc-technical-issues.html#faq-39.6 +#define GTEST_CONCAT_TOKEN_(foo, bar) GTEST_CONCAT_TOKEN_IMPL_(foo, bar) +#define GTEST_CONCAT_TOKEN_IMPL_(foo, bar) foo ## bar + +class ProtocolMessage; +namespace proto2 { class Message; } + +namespace testing { + +// Forward declarations. + +class AssertionResult; // Result of an assertion. +class Message; // Represents a failure message. +class Test; // Represents a test. +class TestInfo; // Information about a test. +class TestPartResult; // Result of a test part. +class UnitTest; // A collection of test cases. + +template +::std::string PrintToString(const T& value); + +namespace internal { + +struct TraceInfo; // Information about a trace point. +class ScopedTrace; // Implements scoped trace. +class TestInfoImpl; // Opaque implementation of TestInfo +class UnitTestImpl; // Opaque implementation of UnitTest + +// How many times InitGoogleTest() has been called. +GTEST_API_ extern int g_init_gtest_count; + +// The text used in failure messages to indicate the start of the +// stack trace. +GTEST_API_ extern const char kStackTraceMarker[]; + +// Two overloaded helpers for checking at compile time whether an +// expression is a null pointer literal (i.e. NULL or any 0-valued +// compile-time integral constant). Their return values have +// different sizes, so we can use sizeof() to test which version is +// picked by the compiler. These helpers have no implementations, as +// we only need their signatures. +// +// Given IsNullLiteralHelper(x), the compiler will pick the first +// version if x can be implicitly converted to Secret*, and pick the +// second version otherwise. Since Secret is a secret and incomplete +// type, the only expression a user can write that has type Secret* is +// a null pointer literal. Therefore, we know that x is a null +// pointer literal if and only if the first version is picked by the +// compiler. +char IsNullLiteralHelper(Secret* p); +char (&IsNullLiteralHelper(...))[2]; // NOLINT + +// A compile-time bool constant that is true if and only if x is a +// null pointer literal (i.e. NULL or any 0-valued compile-time +// integral constant). +#ifdef GTEST_ELLIPSIS_NEEDS_POD_ +// We lose support for NULL detection where the compiler doesn't like +// passing non-POD classes through ellipsis (...). +# define GTEST_IS_NULL_LITERAL_(x) false +#else +# define GTEST_IS_NULL_LITERAL_(x) \ + (sizeof(::testing::internal::IsNullLiteralHelper(x)) == 1) +#endif // GTEST_ELLIPSIS_NEEDS_POD_ + +// Appends the user-supplied message to the Google-Test-generated message. +GTEST_API_ std::string AppendUserMessage( + const std::string& gtest_msg, const Message& user_msg); + +#if GTEST_HAS_EXCEPTIONS + +// This exception is thrown by (and only by) a failed Google Test +// assertion when GTEST_FLAG(throw_on_failure) is true (if exceptions +// are enabled). We derive it from std::runtime_error, which is for +// errors presumably detectable only at run time. Since +// std::runtime_error inherits from std::exception, many testing +// frameworks know how to extract and print the message inside it. +class GTEST_API_ GoogleTestFailureException : public ::std::runtime_error { + public: + explicit GoogleTestFailureException(const TestPartResult& failure); +}; + +#endif // GTEST_HAS_EXCEPTIONS + +// A helper class for creating scoped traces in user programs. +class GTEST_API_ ScopedTrace { + public: + // The c'tor pushes the given source file location and message onto + // a trace stack maintained by Google Test. + ScopedTrace(const char* file, int line, const Message& message); + + // The d'tor pops the info pushed by the c'tor. + // + // Note that the d'tor is not virtual in order to be efficient. + // Don't inherit from ScopedTrace! + ~ScopedTrace(); + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ScopedTrace); +} GTEST_ATTRIBUTE_UNUSED_; // A ScopedTrace object does its job in its + // c'tor and d'tor. Therefore it doesn't + // need to be used otherwise. + +namespace edit_distance { +// Returns the optimal edits to go from 'left' to 'right'. +// All edits cost the same, with replace having lower priority than +// add/remove. +// Simple implementation of the Wagner-Fischer algorithm. +// See http://en.wikipedia.org/wiki/Wagner-Fischer_algorithm +enum EditType { kMatch, kAdd, kRemove, kReplace }; +GTEST_API_ std::vector CalculateOptimalEdits( + const std::vector& left, const std::vector& right); + +// Same as above, but the input is represented as strings. +GTEST_API_ std::vector CalculateOptimalEdits( + const std::vector& left, + const std::vector& right); + +// Create a diff of the input strings in Unified diff format. +GTEST_API_ std::string CreateUnifiedDiff(const std::vector& left, + const std::vector& right, + size_t context = 2); + +} // namespace edit_distance + +// Calculate the diff between 'left' and 'right' and return it in unified diff +// format. +// If not null, stores in 'total_line_count' the total number of lines found +// in left + right. +GTEST_API_ std::string DiffStrings(const std::string& left, + const std::string& right, + size_t* total_line_count); + +// Constructs and returns the message for an equality assertion +// (e.g. ASSERT_EQ, EXPECT_STREQ, etc) failure. +// +// The first four parameters are the expressions used in the assertion +// and their values, as strings. For example, for ASSERT_EQ(foo, bar) +// where foo is 5 and bar is 6, we have: +// +// expected_expression: "foo" +// actual_expression: "bar" +// expected_value: "5" +// actual_value: "6" +// +// The ignoring_case parameter is true iff the assertion is a +// *_STRCASEEQ*. When it's true, the string " (ignoring case)" will +// be inserted into the message. +GTEST_API_ AssertionResult EqFailure(const char* expected_expression, + const char* actual_expression, + const std::string& expected_value, + const std::string& actual_value, + bool ignoring_case); + +// Constructs a failure message for Boolean assertions such as EXPECT_TRUE. +GTEST_API_ std::string GetBoolAssertionFailureMessage( + const AssertionResult& assertion_result, + const char* expression_text, + const char* actual_predicate_value, + const char* expected_predicate_value); + +// This template class represents an IEEE floating-point number +// (either single-precision or double-precision, depending on the +// template parameters). +// +// The purpose of this class is to do more sophisticated number +// comparison. (Due to round-off error, etc, it's very unlikely that +// two floating-points will be equal exactly. Hence a naive +// comparison by the == operation often doesn't work.) +// +// Format of IEEE floating-point: +// +// The most-significant bit being the leftmost, an IEEE +// floating-point looks like +// +// sign_bit exponent_bits fraction_bits +// +// Here, sign_bit is a single bit that designates the sign of the +// number. +// +// For float, there are 8 exponent bits and 23 fraction bits. +// +// For double, there are 11 exponent bits and 52 fraction bits. +// +// More details can be found at +// http://en.wikipedia.org/wiki/IEEE_floating-point_standard. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +template +class FloatingPoint { + public: + // Defines the unsigned integer type that has the same size as the + // floating point number. + typedef typename TypeWithSize::UInt Bits; + + // Constants. + + // # of bits in a number. + static const size_t kBitCount = 8*sizeof(RawType); + + // # of fraction bits in a number. + static const size_t kFractionBitCount = + std::numeric_limits::digits - 1; + + // # of exponent bits in a number. + static const size_t kExponentBitCount = kBitCount - 1 - kFractionBitCount; + + // The mask for the sign bit. + static const Bits kSignBitMask = static_cast(1) << (kBitCount - 1); + + // The mask for the fraction bits. + static const Bits kFractionBitMask = + ~static_cast(0) >> (kExponentBitCount + 1); + + // The mask for the exponent bits. + static const Bits kExponentBitMask = ~(kSignBitMask | kFractionBitMask); + + // How many ULP's (Units in the Last Place) we want to tolerate when + // comparing two numbers. The larger the value, the more error we + // allow. A 0 value means that two numbers must be exactly the same + // to be considered equal. + // + // The maximum error of a single floating-point operation is 0.5 + // units in the last place. On Intel CPU's, all floating-point + // calculations are done with 80-bit precision, while double has 64 + // bits. Therefore, 4 should be enough for ordinary use. + // + // See the following article for more details on ULP: + // http://randomascii.wordpress.com/2012/02/25/comparing-floating-point-numbers-2012-edition/ + static const size_t kMaxUlps = 4; + + // Constructs a FloatingPoint from a raw floating-point number. + // + // On an Intel CPU, passing a non-normalized NAN (Not a Number) + // around may change its bits, although the new value is guaranteed + // to be also a NAN. Therefore, don't expect this constructor to + // preserve the bits in x when x is a NAN. + explicit FloatingPoint(const RawType& x) { u_.value_ = x; } + + // Static methods + + // Reinterprets a bit pattern as a floating-point number. + // + // This function is needed to test the AlmostEquals() method. + static RawType ReinterpretBits(const Bits bits) { + FloatingPoint fp(0); + fp.u_.bits_ = bits; + return fp.u_.value_; + } + + // Returns the floating-point number that represent positive infinity. + static RawType Infinity() { + return ReinterpretBits(kExponentBitMask); + } + + // Returns the maximum representable finite floating-point number. + static RawType Max(); + + // Non-static methods + + // Returns the bits that represents this number. + const Bits &bits() const { return u_.bits_; } + + // Returns the exponent bits of this number. + Bits exponent_bits() const { return kExponentBitMask & u_.bits_; } + + // Returns the fraction bits of this number. + Bits fraction_bits() const { return kFractionBitMask & u_.bits_; } + + // Returns the sign bit of this number. + Bits sign_bit() const { return kSignBitMask & u_.bits_; } + + // Returns true iff this is NAN (not a number). + bool is_nan() const { + // It's a NAN if the exponent bits are all ones and the fraction + // bits are not entirely zeros. + return (exponent_bits() == kExponentBitMask) && (fraction_bits() != 0); + } + + // Returns true iff this number is at most kMaxUlps ULP's away from + // rhs. In particular, this function: + // + // - returns false if either number is (or both are) NAN. + // - treats really large numbers as almost equal to infinity. + // - thinks +0.0 and -0.0 are 0 DLP's apart. + bool AlmostEquals(const FloatingPoint& rhs) const { + // The IEEE standard says that any comparison operation involving + // a NAN must return false. + if (is_nan() || rhs.is_nan()) return false; + + return DistanceBetweenSignAndMagnitudeNumbers(u_.bits_, rhs.u_.bits_) + <= kMaxUlps; + } + + private: + // The data type used to store the actual floating-point number. + union FloatingPointUnion { + RawType value_; // The raw floating-point number. + Bits bits_; // The bits that represent the number. + }; + + // Converts an integer from the sign-and-magnitude representation to + // the biased representation. More precisely, let N be 2 to the + // power of (kBitCount - 1), an integer x is represented by the + // unsigned number x + N. + // + // For instance, + // + // -N + 1 (the most negative number representable using + // sign-and-magnitude) is represented by 1; + // 0 is represented by N; and + // N - 1 (the biggest number representable using + // sign-and-magnitude) is represented by 2N - 1. + // + // Read http://en.wikipedia.org/wiki/Signed_number_representations + // for more details on signed number representations. + static Bits SignAndMagnitudeToBiased(const Bits &sam) { + if (kSignBitMask & sam) { + // sam represents a negative number. + return ~sam + 1; + } else { + // sam represents a positive number. + return kSignBitMask | sam; + } + } + + // Given two numbers in the sign-and-magnitude representation, + // returns the distance between them as an unsigned number. + static Bits DistanceBetweenSignAndMagnitudeNumbers(const Bits &sam1, + const Bits &sam2) { + const Bits biased1 = SignAndMagnitudeToBiased(sam1); + const Bits biased2 = SignAndMagnitudeToBiased(sam2); + return (biased1 >= biased2) ? (biased1 - biased2) : (biased2 - biased1); + } + + FloatingPointUnion u_; +}; + +// We cannot use std::numeric_limits::max() as it clashes with the max() +// macro defined by . +template <> +inline float FloatingPoint::Max() { return FLT_MAX; } +template <> +inline double FloatingPoint::Max() { return DBL_MAX; } + +// Typedefs the instances of the FloatingPoint template class that we +// care to use. +typedef FloatingPoint Float; +typedef FloatingPoint Double; + +// In order to catch the mistake of putting tests that use different +// test fixture classes in the same test case, we need to assign +// unique IDs to fixture classes and compare them. The TypeId type is +// used to hold such IDs. The user should treat TypeId as an opaque +// type: the only operation allowed on TypeId values is to compare +// them for equality using the == operator. +typedef const void* TypeId; + +template +class TypeIdHelper { + public: + // dummy_ must not have a const type. Otherwise an overly eager + // compiler (e.g. MSVC 7.1 & 8.0) may try to merge + // TypeIdHelper::dummy_ for different Ts as an "optimization". + static bool dummy_; +}; + +template +bool TypeIdHelper::dummy_ = false; + +// GetTypeId() returns the ID of type T. Different values will be +// returned for different types. Calling the function twice with the +// same type argument is guaranteed to return the same ID. +template +TypeId GetTypeId() { + // The compiler is required to allocate a different + // TypeIdHelper::dummy_ variable for each T used to instantiate + // the template. Therefore, the address of dummy_ is guaranteed to + // be unique. + return &(TypeIdHelper::dummy_); +} + +// Returns the type ID of ::testing::Test. Always call this instead +// of GetTypeId< ::testing::Test>() to get the type ID of +// ::testing::Test, as the latter may give the wrong result due to a +// suspected linker bug when compiling Google Test as a Mac OS X +// framework. +GTEST_API_ TypeId GetTestTypeId(); + +// Defines the abstract factory interface that creates instances +// of a Test object. +class TestFactoryBase { + public: + virtual ~TestFactoryBase() {} + + // Creates a test instance to run. The instance is both created and destroyed + // within TestInfoImpl::Run() + virtual Test* CreateTest() = 0; + + protected: + TestFactoryBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestFactoryBase); +}; + +// This class provides implementation of TeastFactoryBase interface. +// It is used in TEST and TEST_F macros. +template +class TestFactoryImpl : public TestFactoryBase { + public: + virtual Test* CreateTest() { return new TestClass; } +}; + +#if GTEST_OS_WINDOWS + +// Predicate-formatters for implementing the HRESULT checking macros +// {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED} +// We pass a long instead of HRESULT to avoid causing an +// include dependency for the HRESULT type. +GTEST_API_ AssertionResult IsHRESULTSuccess(const char* expr, + long hr); // NOLINT +GTEST_API_ AssertionResult IsHRESULTFailure(const char* expr, + long hr); // NOLINT + +#endif // GTEST_OS_WINDOWS + +// Types of SetUpTestCase() and TearDownTestCase() functions. +typedef void (*SetUpTestCaseFunc)(); +typedef void (*TearDownTestCaseFunc)(); + +// Creates a new TestInfo object and registers it with Google Test; +// returns the created object. +// +// Arguments: +// +// test_case_name: name of the test case +// name: name of the test +// type_param the name of the test's type parameter, or NULL if +// this is not a typed or a type-parameterized test. +// value_param text representation of the test's value parameter, +// or NULL if this is not a type-parameterized test. +// fixture_class_id: ID of the test fixture class +// set_up_tc: pointer to the function that sets up the test case +// tear_down_tc: pointer to the function that tears down the test case +// factory: pointer to the factory that creates a test object. +// The newly created TestInfo instance will assume +// ownership of the factory object. +GTEST_API_ TestInfo* MakeAndRegisterTestInfo( + const char* test_case_name, + const char* name, + const char* type_param, + const char* value_param, + TypeId fixture_class_id, + SetUpTestCaseFunc set_up_tc, + TearDownTestCaseFunc tear_down_tc, + TestFactoryBase* factory); + +// If *pstr starts with the given prefix, modifies *pstr to be right +// past the prefix and returns true; otherwise leaves *pstr unchanged +// and returns false. None of pstr, *pstr, and prefix can be NULL. +GTEST_API_ bool SkipPrefix(const char* prefix, const char** pstr); + +#if GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// State of the definition of a type-parameterized test case. +class GTEST_API_ TypedTestCasePState { + public: + TypedTestCasePState() : registered_(false) {} + + // Adds the given test name to defined_test_names_ and return true + // if the test case hasn't been registered; otherwise aborts the + // program. + bool AddTestName(const char* file, int line, const char* case_name, + const char* test_name) { + if (registered_) { + fprintf(stderr, "%s Test %s must be defined before " + "REGISTER_TYPED_TEST_CASE_P(%s, ...).\n", + FormatFileLocation(file, line).c_str(), test_name, case_name); + fflush(stderr); + posix::Abort(); + } + defined_test_names_.insert(test_name); + return true; + } + + // Verifies that registered_tests match the test names in + // defined_test_names_; returns registered_tests if successful, or + // aborts the program otherwise. + const char* VerifyRegisteredTestNames( + const char* file, int line, const char* registered_tests); + + private: + bool registered_; + ::std::set defined_test_names_; +}; + +// Skips to the first non-space char after the first comma in 'str'; +// returns NULL if no comma is found in 'str'. +inline const char* SkipComma(const char* str) { + const char* comma = strchr(str, ','); + if (comma == NULL) { + return NULL; + } + while (IsSpace(*(++comma))) {} + return comma; +} + +// Returns the prefix of 'str' before the first comma in it; returns +// the entire string if it contains no comma. +inline std::string GetPrefixUntilComma(const char* str) { + const char* comma = strchr(str, ','); + return comma == NULL ? str : std::string(str, comma); +} + +// TypeParameterizedTest::Register() +// registers a list of type-parameterized tests with Google Test. The +// return value is insignificant - we just need to return something +// such that we can call this function in a namespace scope. +// +// Implementation note: The GTEST_TEMPLATE_ macro declares a template +// template parameter. It's defined in gtest-type-util.h. +template +class TypeParameterizedTest { + public: + // 'index' is the index of the test in the type list 'Types' + // specified in INSTANTIATE_TYPED_TEST_CASE_P(Prefix, TestCase, + // Types). Valid values for 'index' are [0, N - 1] where N is the + // length of Types. + static bool Register(const char* prefix, const char* case_name, + const char* test_names, int index) { + typedef typename Types::Head Type; + typedef Fixture FixtureClass; + typedef typename GTEST_BIND_(TestSel, Type) TestClass; + + // First, registers the first type-parameterized test in the type + // list. + MakeAndRegisterTestInfo( + (std::string(prefix) + (prefix[0] == '\0' ? "" : "/") + case_name + "/" + + StreamableToString(index)).c_str(), + StripTrailingSpaces(GetPrefixUntilComma(test_names)).c_str(), + GetTypeName().c_str(), + NULL, // No value parameter. + GetTypeId(), + TestClass::SetUpTestCase, + TestClass::TearDownTestCase, + new TestFactoryImpl); + + // Next, recurses (at compile time) with the tail of the type list. + return TypeParameterizedTest + ::Register(prefix, case_name, test_names, index + 1); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTest { + public: + static bool Register(const char* /*prefix*/, const char* /*case_name*/, + const char* /*test_names*/, int /*index*/) { + return true; + } +}; + +// TypeParameterizedTestCase::Register() +// registers *all combinations* of 'Tests' and 'Types' with Google +// Test. The return value is insignificant - we just need to return +// something such that we can call this function in a namespace scope. +template +class TypeParameterizedTestCase { + public: + static bool Register(const char* prefix, const char* case_name, + const char* test_names) { + typedef typename Tests::Head Head; + + // First, register the first test in 'Test' for each type in 'Types'. + TypeParameterizedTest::Register( + prefix, case_name, test_names, 0); + + // Next, recurses (at compile time) with the tail of the test list. + return TypeParameterizedTestCase + ::Register(prefix, case_name, SkipComma(test_names)); + } +}; + +// The base case for the compile time recursion. +template +class TypeParameterizedTestCase { + public: + static bool Register(const char* /*prefix*/, const char* /*case_name*/, + const char* /*test_names*/) { + return true; + } +}; + +#endif // GTEST_HAS_TYPED_TEST || GTEST_HAS_TYPED_TEST_P + +// Returns the current OS stack trace as an std::string. +// +// The maximum number of stack frames to be included is specified by +// the gtest_stack_trace_depth flag. The skip_count parameter +// specifies the number of top frames to be skipped, which doesn't +// count against the number of frames to be included. +// +// For example, if Foo() calls Bar(), which in turn calls +// GetCurrentOsStackTraceExceptTop(..., 1), Foo() will be included in +// the trace but Bar() and GetCurrentOsStackTraceExceptTop() won't. +GTEST_API_ std::string GetCurrentOsStackTraceExceptTop( + UnitTest* unit_test, int skip_count); + +// Helpers for suppressing warnings on unreachable code or constant +// condition. + +// Always returns true. +GTEST_API_ bool AlwaysTrue(); + +// Always returns false. +inline bool AlwaysFalse() { return !AlwaysTrue(); } + +// Helper for suppressing false warning from Clang on a const char* +// variable declared in a conditional expression always being NULL in +// the else branch. +struct GTEST_API_ ConstCharPtr { + ConstCharPtr(const char* str) : value(str) {} + operator bool() const { return true; } + const char* value; +}; + +// A simple Linear Congruential Generator for generating random +// numbers with a uniform distribution. Unlike rand() and srand(), it +// doesn't use global state (and therefore can't interfere with user +// code). Unlike rand_r(), it's portable. An LCG isn't very random, +// but it's good enough for our purposes. +class GTEST_API_ Random { + public: + static const UInt32 kMaxRange = 1u << 31; + + explicit Random(UInt32 seed) : state_(seed) {} + + void Reseed(UInt32 seed) { state_ = seed; } + + // Generates a random number from [0, range). Crashes if 'range' is + // 0 or greater than kMaxRange. + UInt32 Generate(UInt32 range); + + private: + UInt32 state_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(Random); +}; + +// Defining a variable of type CompileAssertTypesEqual will cause a +// compiler error iff T1 and T2 are different types. +template +struct CompileAssertTypesEqual; + +template +struct CompileAssertTypesEqual { +}; + +// Removes the reference from a type if it is a reference type, +// otherwise leaves it unchanged. This is the same as +// tr1::remove_reference, which is not widely available yet. +template +struct RemoveReference { typedef T type; }; // NOLINT +template +struct RemoveReference { typedef T type; }; // NOLINT + +// A handy wrapper around RemoveReference that works when the argument +// T depends on template parameters. +#define GTEST_REMOVE_REFERENCE_(T) \ + typename ::testing::internal::RemoveReference::type + +// Removes const from a type if it is a const type, otherwise leaves +// it unchanged. This is the same as tr1::remove_const, which is not +// widely available yet. +template +struct RemoveConst { typedef T type; }; // NOLINT +template +struct RemoveConst { typedef T type; }; // NOLINT + +// MSVC 8.0, Sun C++, and IBM XL C++ have a bug which causes the above +// definition to fail to remove the const in 'const int[3]' and 'const +// char[3][4]'. The following specialization works around the bug. +template +struct RemoveConst { + typedef typename RemoveConst::type type[N]; +}; + +#if defined(_MSC_VER) && _MSC_VER < 1400 +// This is the only specialization that allows VC++ 7.1 to remove const in +// 'const int[3] and 'const int[3][4]'. However, it causes trouble with GCC +// and thus needs to be conditionally compiled. +template +struct RemoveConst { + typedef typename RemoveConst::type type[N]; +}; +#endif + +// A handy wrapper around RemoveConst that works when the argument +// T depends on template parameters. +#define GTEST_REMOVE_CONST_(T) \ + typename ::testing::internal::RemoveConst::type + +// Turns const U&, U&, const U, and U all into U. +#define GTEST_REMOVE_REFERENCE_AND_CONST_(T) \ + GTEST_REMOVE_CONST_(GTEST_REMOVE_REFERENCE_(T)) + +// Adds reference to a type if it is not a reference type, +// otherwise leaves it unchanged. This is the same as +// tr1::add_reference, which is not widely available yet. +template +struct AddReference { typedef T& type; }; // NOLINT +template +struct AddReference { typedef T& type; }; // NOLINT + +// A handy wrapper around AddReference that works when the argument T +// depends on template parameters. +#define GTEST_ADD_REFERENCE_(T) \ + typename ::testing::internal::AddReference::type + +// Adds a reference to const on top of T as necessary. For example, +// it transforms +// +// char ==> const char& +// const char ==> const char& +// char& ==> const char& +// const char& ==> const char& +// +// The argument T must depend on some template parameters. +#define GTEST_REFERENCE_TO_CONST_(T) \ + GTEST_ADD_REFERENCE_(const GTEST_REMOVE_REFERENCE_(T)) + +// ImplicitlyConvertible::value is a compile-time bool +// constant that's true iff type From can be implicitly converted to +// type To. +template +class ImplicitlyConvertible { + private: + // We need the following helper functions only for their types. + // They have no implementations. + + // MakeFrom() is an expression whose type is From. We cannot simply + // use From(), as the type From may not have a public default + // constructor. + static typename AddReference::type MakeFrom(); + + // These two functions are overloaded. Given an expression + // Helper(x), the compiler will pick the first version if x can be + // implicitly converted to type To; otherwise it will pick the + // second version. + // + // The first version returns a value of size 1, and the second + // version returns a value of size 2. Therefore, by checking the + // size of Helper(x), which can be done at compile time, we can tell + // which version of Helper() is used, and hence whether x can be + // implicitly converted to type To. + static char Helper(To); + static char (&Helper(...))[2]; // NOLINT + + // We have to put the 'public' section after the 'private' section, + // or MSVC refuses to compile the code. + public: +#if defined(__BORLANDC__) + // C++Builder cannot use member overload resolution during template + // instantiation. The simplest workaround is to use its C++0x type traits + // functions (C++Builder 2009 and above only). + static const bool value = __is_convertible(From, To); +#else + // MSVC warns about implicitly converting from double to int for + // possible loss of data, so we need to temporarily disable the + // warning. + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4244) + static const bool value = + sizeof(Helper(ImplicitlyConvertible::MakeFrom())) == 1; + GTEST_DISABLE_MSC_WARNINGS_POP_() +#endif // __BORLANDC__ +}; +template +const bool ImplicitlyConvertible::value; + +// IsAProtocolMessage::value is a compile-time bool constant that's +// true iff T is type ProtocolMessage, proto2::Message, or a subclass +// of those. +template +struct IsAProtocolMessage + : public bool_constant< + ImplicitlyConvertible::value || + ImplicitlyConvertible::value> { +}; + +// When the compiler sees expression IsContainerTest(0), if C is an +// STL-style container class, the first overload of IsContainerTest +// will be viable (since both C::iterator* and C::const_iterator* are +// valid types and NULL can be implicitly converted to them). It will +// be picked over the second overload as 'int' is a perfect match for +// the type of argument 0. If C::iterator or C::const_iterator is not +// a valid type, the first overload is not viable, and the second +// overload will be picked. Therefore, we can determine whether C is +// a container class by checking the type of IsContainerTest(0). +// The value of the expression is insignificant. +// +// Note that we look for both C::iterator and C::const_iterator. The +// reason is that C++ injects the name of a class as a member of the +// class itself (e.g. you can refer to class iterator as either +// 'iterator' or 'iterator::iterator'). If we look for C::iterator +// only, for example, we would mistakenly think that a class named +// iterator is an STL container. +// +// Also note that the simpler approach of overloading +// IsContainerTest(typename C::const_iterator*) and +// IsContainerTest(...) doesn't work with Visual Age C++ and Sun C++. +typedef int IsContainer; +template +IsContainer IsContainerTest(int /* dummy */, + typename C::iterator* /* it */ = NULL, + typename C::const_iterator* /* const_it */ = NULL) { + return 0; +} + +typedef char IsNotContainer; +template +IsNotContainer IsContainerTest(long /* dummy */) { return '\0'; } + +// EnableIf::type is void when 'Cond' is true, and +// undefined when 'Cond' is false. To use SFINAE to make a function +// overload only apply when a particular expression is true, add +// "typename EnableIf::type* = 0" as the last parameter. +template struct EnableIf; +template<> struct EnableIf { typedef void type; }; // NOLINT + +// Utilities for native arrays. + +// ArrayEq() compares two k-dimensional native arrays using the +// elements' operator==, where k can be any integer >= 0. When k is +// 0, ArrayEq() degenerates into comparing a single pair of values. + +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs); + +// This generic version is used when k is 0. +template +inline bool ArrayEq(const T& lhs, const U& rhs) { return lhs == rhs; } + +// This overload is used when k >= 1. +template +inline bool ArrayEq(const T(&lhs)[N], const U(&rhs)[N]) { + return internal::ArrayEq(lhs, N, rhs); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous ArrayEq() function, arrays with different sizes would +// lead to different copies of the template code. +template +bool ArrayEq(const T* lhs, size_t size, const U* rhs) { + for (size_t i = 0; i != size; i++) { + if (!internal::ArrayEq(lhs[i], rhs[i])) + return false; + } + return true; +} + +// Finds the first element in the iterator range [begin, end) that +// equals elem. Element may be a native array type itself. +template +Iter ArrayAwareFind(Iter begin, Iter end, const Element& elem) { + for (Iter it = begin; it != end; ++it) { + if (internal::ArrayEq(*it, elem)) + return it; + } + return end; +} + +// CopyArray() copies a k-dimensional native array using the elements' +// operator=, where k can be any integer >= 0. When k is 0, +// CopyArray() degenerates into copying a single value. + +template +void CopyArray(const T* from, size_t size, U* to); + +// This generic version is used when k is 0. +template +inline void CopyArray(const T& from, U* to) { *to = from; } + +// This overload is used when k >= 1. +template +inline void CopyArray(const T(&from)[N], U(*to)[N]) { + internal::CopyArray(from, N, *to); +} + +// This helper reduces code bloat. If we instead put its logic inside +// the previous CopyArray() function, arrays with different sizes +// would lead to different copies of the template code. +template +void CopyArray(const T* from, size_t size, U* to) { + for (size_t i = 0; i != size; i++) { + internal::CopyArray(from[i], to + i); + } +} + +// The relation between an NativeArray object (see below) and the +// native array it represents. +// We use 2 different structs to allow non-copyable types to be used, as long +// as RelationToSourceReference() is passed. +struct RelationToSourceReference {}; +struct RelationToSourceCopy {}; + +// Adapts a native array to a read-only STL-style container. Instead +// of the complete STL container concept, this adaptor only implements +// members useful for Google Mock's container matchers. New members +// should be added as needed. To simplify the implementation, we only +// support Element being a raw type (i.e. having no top-level const or +// reference modifier). It's the client's responsibility to satisfy +// this requirement. Element can be an array type itself (hence +// multi-dimensional arrays are supported). +template +class NativeArray { + public: + // STL-style container typedefs. + typedef Element value_type; + typedef Element* iterator; + typedef const Element* const_iterator; + + // Constructs from a native array. References the source. + NativeArray(const Element* array, size_t count, RelationToSourceReference) { + InitRef(array, count); + } + + // Constructs from a native array. Copies the source. + NativeArray(const Element* array, size_t count, RelationToSourceCopy) { + InitCopy(array, count); + } + + // Copy constructor. + NativeArray(const NativeArray& rhs) { + (this->*rhs.clone_)(rhs.array_, rhs.size_); + } + + ~NativeArray() { + if (clone_ != &NativeArray::InitRef) + delete[] array_; + } + + // STL-style container methods. + size_t size() const { return size_; } + const_iterator begin() const { return array_; } + const_iterator end() const { return array_ + size_; } + bool operator==(const NativeArray& rhs) const { + return size() == rhs.size() && + ArrayEq(begin(), size(), rhs.begin()); + } + + private: + enum { + kCheckTypeIsNotConstOrAReference = StaticAssertTypeEqHelper< + Element, GTEST_REMOVE_REFERENCE_AND_CONST_(Element)>::value, + }; + + // Initializes this object with a copy of the input. + void InitCopy(const Element* array, size_t a_size) { + Element* const copy = new Element[a_size]; + CopyArray(array, a_size, copy); + array_ = copy; + size_ = a_size; + clone_ = &NativeArray::InitCopy; + } + + // Initializes this object with a reference of the input. + void InitRef(const Element* array, size_t a_size) { + array_ = array; + size_ = a_size; + clone_ = &NativeArray::InitRef; + } + + const Element* array_; + size_t size_; + void (NativeArray::*clone_)(const Element*, size_t); + + GTEST_DISALLOW_ASSIGN_(NativeArray); +}; + +} // namespace internal +} // namespace testing + +#define GTEST_MESSAGE_AT_(file, line, message, result_type) \ + ::testing::internal::AssertHelper(result_type, file, line, message) \ + = ::testing::Message() + +#define GTEST_MESSAGE_(message, result_type) \ + GTEST_MESSAGE_AT_(__FILE__, __LINE__, message, result_type) + +#define GTEST_FATAL_FAILURE_(message) \ + return GTEST_MESSAGE_(message, ::testing::TestPartResult::kFatalFailure) + +#define GTEST_NONFATAL_FAILURE_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kNonFatalFailure) + +#define GTEST_SUCCESS_(message) \ + GTEST_MESSAGE_(message, ::testing::TestPartResult::kSuccess) + +// Suppresses MSVC warnings 4072 (unreachable code) for the code following +// statement if it returns or throws (or doesn't return or throw in some +// situations). +#define GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) \ + if (::testing::internal::AlwaysTrue()) { statement; } + +#define GTEST_TEST_THROW_(statement, expected_exception, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::ConstCharPtr gtest_msg = "") { \ + bool gtest_caught_expected = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (expected_exception const&) { \ + gtest_caught_expected = true; \ + } \ + catch (...) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws a different type."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + if (!gtest_caught_expected) { \ + gtest_msg.value = \ + "Expected: " #statement " throws an exception of type " \ + #expected_exception ".\n Actual: it throws nothing."; \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testthrow_, __LINE__): \ + fail(gtest_msg.value) + +#define GTEST_TEST_NO_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnothrow_, __LINE__): \ + fail("Expected: " #statement " doesn't throw an exception.\n" \ + " Actual: it throws.") + +#define GTEST_TEST_ANY_THROW_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + bool gtest_caught_any = false; \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } \ + catch (...) { \ + gtest_caught_any = true; \ + } \ + if (!gtest_caught_any) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testanythrow_, __LINE__): \ + fail("Expected: " #statement " throws an exception.\n" \ + " Actual: it doesn't.") + + +// Implements Boolean test assertions such as EXPECT_TRUE. expression can be +// either a boolean expression or an AssertionResult. text is a textual +// represenation of expression as it was passed into the EXPECT_TRUE. +#define GTEST_TEST_BOOLEAN_(expression, text, actual, expected, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar_ = \ + ::testing::AssertionResult(expression)) \ + ; \ + else \ + fail(::testing::internal::GetBoolAssertionFailureMessage(\ + gtest_ar_, text, #actual, #expected).c_str()) + +#define GTEST_TEST_NO_FATAL_FAILURE_(statement, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + ::testing::internal::HasNewFatalFailureHelper gtest_fatal_failure_checker; \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + if (gtest_fatal_failure_checker.has_new_fatal_failure()) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__); \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_testnofatal_, __LINE__): \ + fail("Expected: " #statement " doesn't generate new fatal " \ + "failures in the current thread.\n" \ + " Actual: it does.") + +// Expands to the name of the class that implements the given test. +#define GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \ + test_case_name##_##test_name##_Test + +// Helper macro for defining tests. +#define GTEST_TEST_(test_case_name, test_name, parent_class, parent_id)\ +class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) : public parent_class {\ + public:\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {}\ + private:\ + virtual void TestBody();\ + static ::testing::TestInfo* const test_info_ GTEST_ATTRIBUTE_UNUSED_;\ + GTEST_DISALLOW_COPY_AND_ASSIGN_(\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name));\ +};\ +\ +::testing::TestInfo* const GTEST_TEST_CLASS_NAME_(test_case_name, test_name)\ + ::test_info_ =\ + ::testing::internal::MakeAndRegisterTestInfo(\ + #test_case_name, #test_name, NULL, NULL, \ + (parent_id), \ + parent_class::SetUpTestCase, \ + parent_class::TearDownTestCase, \ + new ::testing::internal::TestFactoryImpl<\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>);\ +void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_INTERNAL_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines the public API for death tests. It is +// #included by gtest.h so a user doesn't need to include this +// directly. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ + +// Copyright 2005, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: wan@google.com (Zhanyong Wan), eefacm@gmail.com (Sean Mcafee) +// +// The Google C++ Testing Framework (Google Test) +// +// This header file defines internal utilities needed for implementing +// death tests. They are subject to change without notice. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + + +#include + +namespace testing { +namespace internal { + +GTEST_DECLARE_string_(internal_run_death_test); + +// Names of the flags (needed for parsing Google Test flags). +const char kDeathTestStyleFlag[] = "death_test_style"; +const char kDeathTestUseFork[] = "death_test_use_fork"; +const char kInternalRunDeathTestFlag[] = "internal_run_death_test"; + +#if GTEST_HAS_DEATH_TEST + +// DeathTest is a class that hides much of the complexity of the +// GTEST_DEATH_TEST_ macro. It is abstract; its static Create method +// returns a concrete class that depends on the prevailing death test +// style, as defined by the --gtest_death_test_style and/or +// --gtest_internal_run_death_test flags. + +// In describing the results of death tests, these terms are used with +// the corresponding definitions: +// +// exit status: The integer exit information in the format specified +// by wait(2) +// exit code: The integer code passed to exit(3), _exit(2), or +// returned from main() +class GTEST_API_ DeathTest { + public: + // Create returns false if there was an error determining the + // appropriate action to take for the current death test; for example, + // if the gtest_death_test_style flag is set to an invalid value. + // The LastMessage method will return a more detailed message in that + // case. Otherwise, the DeathTest pointer pointed to by the "test" + // argument is set. If the death test should be skipped, the pointer + // is set to NULL; otherwise, it is set to the address of a new concrete + // DeathTest object that controls the execution of the current test. + static bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test); + DeathTest(); + virtual ~DeathTest() { } + + // A helper class that aborts a death test when it's deleted. + class ReturnSentinel { + public: + explicit ReturnSentinel(DeathTest* test) : test_(test) { } + ~ReturnSentinel() { test_->Abort(TEST_ENCOUNTERED_RETURN_STATEMENT); } + private: + DeathTest* const test_; + GTEST_DISALLOW_COPY_AND_ASSIGN_(ReturnSentinel); + } GTEST_ATTRIBUTE_UNUSED_; + + // An enumeration of possible roles that may be taken when a death + // test is encountered. EXECUTE means that the death test logic should + // be executed immediately. OVERSEE means that the program should prepare + // the appropriate environment for a child process to execute the death + // test, then wait for it to complete. + enum TestRole { OVERSEE_TEST, EXECUTE_TEST }; + + // An enumeration of the three reasons that a test might be aborted. + enum AbortReason { + TEST_ENCOUNTERED_RETURN_STATEMENT, + TEST_THREW_EXCEPTION, + TEST_DID_NOT_DIE + }; + + // Assumes one of the above roles. + virtual TestRole AssumeRole() = 0; + + // Waits for the death test to finish and returns its status. + virtual int Wait() = 0; + + // Returns true if the death test passed; that is, the test process + // exited during the test, its exit status matches a user-supplied + // predicate, and its stderr output matches a user-supplied regular + // expression. + // The user-supplied predicate may be a macro expression rather + // than a function pointer or functor, or else Wait and Passed could + // be combined. + virtual bool Passed(bool exit_status_ok) = 0; + + // Signals that the death test did not die as expected. + virtual void Abort(AbortReason reason) = 0; + + // Returns a human-readable outcome message regarding the outcome of + // the last death test. + static const char* LastMessage(); + + static void set_last_death_test_message(const std::string& message); + + private: + // A string containing a description of the outcome of the last death test. + static std::string last_death_test_message_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(DeathTest); +}; + +// Factory interface for death tests. May be mocked out for testing. +class DeathTestFactory { + public: + virtual ~DeathTestFactory() { } + virtual bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test) = 0; +}; + +// A concrete DeathTestFactory implementation for normal use. +class DefaultDeathTestFactory : public DeathTestFactory { + public: + virtual bool Create(const char* statement, const RE* regex, + const char* file, int line, DeathTest** test); +}; + +// Returns true if exit_status describes a process that was terminated +// by a signal, or exited normally with a nonzero exit code. +GTEST_API_ bool ExitedUnsuccessfully(int exit_status); + +// Traps C++ exceptions escaping statement and reports them as test +// failures. Note that trapping SEH exceptions is not implemented here. +# if GTEST_HAS_EXCEPTIONS +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + try { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } catch (const ::std::exception& gtest_exception) { \ + fprintf(\ + stderr, \ + "\n%s: Caught std::exception-derived exception escaping the " \ + "death test statement. Exception message: %s\n", \ + ::testing::internal::FormatFileLocation(__FILE__, __LINE__).c_str(), \ + gtest_exception.what()); \ + fflush(stderr); \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } catch (...) { \ + death_test->Abort(::testing::internal::DeathTest::TEST_THREW_EXCEPTION); \ + } + +# else +# define GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, death_test) \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement) + +# endif + +// This macro is for implementing ASSERT_DEATH*, EXPECT_DEATH*, +// ASSERT_EXIT*, and EXPECT_EXIT*. +# define GTEST_DEATH_TEST_(statement, predicate, regex, fail) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + const ::testing::internal::RE& gtest_regex = (regex); \ + ::testing::internal::DeathTest* gtest_dt; \ + if (!::testing::internal::DeathTest::Create(#statement, >est_regex, \ + __FILE__, __LINE__, >est_dt)) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + if (gtest_dt != NULL) { \ + ::testing::internal::scoped_ptr< ::testing::internal::DeathTest> \ + gtest_dt_ptr(gtest_dt); \ + switch (gtest_dt->AssumeRole()) { \ + case ::testing::internal::DeathTest::OVERSEE_TEST: \ + if (!gtest_dt->Passed(predicate(gtest_dt->Wait()))) { \ + goto GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__); \ + } \ + break; \ + case ::testing::internal::DeathTest::EXECUTE_TEST: { \ + ::testing::internal::DeathTest::ReturnSentinel \ + gtest_sentinel(gtest_dt); \ + GTEST_EXECUTE_DEATH_TEST_STATEMENT_(statement, gtest_dt); \ + gtest_dt->Abort(::testing::internal::DeathTest::TEST_DID_NOT_DIE); \ + break; \ + } \ + default: \ + break; \ + } \ + } \ + } else \ + GTEST_CONCAT_TOKEN_(gtest_label_, __LINE__): \ + fail(::testing::internal::DeathTest::LastMessage()) +// The symbol "fail" here expands to something into which a message +// can be streamed. + +// This macro is for implementing ASSERT/EXPECT_DEBUG_DEATH when compiled in +// NDEBUG mode. In this case we need the statements to be executed, the regex is +// ignored, and the macro must accept a streamed message even though the message +// is never printed. +# define GTEST_EXECUTE_STATEMENT_(statement, regex) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + } else \ + ::testing::Message() + +// A class representing the parsed contents of the +// --gtest_internal_run_death_test flag, as it existed when +// RUN_ALL_TESTS was called. +class InternalRunDeathTestFlag { + public: + InternalRunDeathTestFlag(const std::string& a_file, + int a_line, + int an_index, + int a_write_fd) + : file_(a_file), line_(a_line), index_(an_index), + write_fd_(a_write_fd) {} + + ~InternalRunDeathTestFlag() { + if (write_fd_ >= 0) + posix::Close(write_fd_); + } + + const std::string& file() const { return file_; } + int line() const { return line_; } + int index() const { return index_; } + int write_fd() const { return write_fd_; } + + private: + std::string file_; + int line_; + int index_; + int write_fd_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(InternalRunDeathTestFlag); +}; + +// Returns a newly created InternalRunDeathTestFlag object with fields +// initialized from the GTEST_FLAG(internal_run_death_test) flag if +// the flag is specified; otherwise returns NULL. +InternalRunDeathTestFlag* ParseInternalRunDeathTestFlag(); + +#else // GTEST_HAS_DEATH_TEST + +// This macro is used for implementing macros such as +// EXPECT_DEATH_IF_SUPPORTED and ASSERT_DEATH_IF_SUPPORTED on systems where +// death tests are not supported. Those macros must compile on such systems +// iff EXPECT_DEATH and ASSERT_DEATH compile with the same parameters on +// systems that support death tests. This allows one to write such a macro +// on a system that does not support death tests and be sure that it will +// compile on a death-test supporting system. +// +// Parameters: +// statement - A statement that a macro such as EXPECT_DEATH would test +// for program termination. This macro has to make sure this +// statement is compiled but not executed, to ensure that +// EXPECT_DEATH_IF_SUPPORTED compiles with a certain +// parameter iff EXPECT_DEATH compiles with it. +// regex - A regex that a macro such as EXPECT_DEATH would use to test +// the output of statement. This parameter has to be +// compiled but not evaluated by this macro, to ensure that +// this macro only accepts expressions that a macro such as +// EXPECT_DEATH would accept. +// terminator - Must be an empty statement for EXPECT_DEATH_IF_SUPPORTED +// and a return statement for ASSERT_DEATH_IF_SUPPORTED. +// This ensures that ASSERT_DEATH_IF_SUPPORTED will not +// compile inside functions where ASSERT_DEATH doesn't +// compile. +// +// The branch that has an always false condition is used to ensure that +// statement and regex are compiled (and thus syntactically correct) but +// never executed. The unreachable code macro protects the terminator +// statement from generating an 'unreachable code' warning in case +// statement unconditionally returns or throws. The Message constructor at +// the end allows the syntax of streaming additional messages into the +// macro, for compilational compatibility with EXPECT_DEATH/ASSERT_DEATH. +# define GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, terminator) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (::testing::internal::AlwaysTrue()) { \ + GTEST_LOG_(WARNING) \ + << "Death tests are not supported on this platform.\n" \ + << "Statement '" #statement "' cannot be verified."; \ + } else if (::testing::internal::AlwaysFalse()) { \ + ::testing::internal::RE::PartialMatch(".*", (regex)); \ + GTEST_SUPPRESS_UNREACHABLE_CODE_WARNING_BELOW_(statement); \ + terminator; \ + } else \ + ::testing::Message() + +#endif // GTEST_HAS_DEATH_TEST + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_DEATH_TEST_INTERNAL_H_ + +namespace testing { + +// This flag controls the style of death tests. Valid values are "threadsafe", +// meaning that the death test child process will re-execute the test binary +// from the start, running only a single death test, or "fast", +// meaning that the child process will execute the test logic immediately +// after forking. +GTEST_DECLARE_string_(death_test_style); + +#if GTEST_HAS_DEATH_TEST + +namespace internal { + +// Returns a Boolean value indicating whether the caller is currently +// executing in the context of the death test child process. Tools such as +// Valgrind heap checkers may need this to modify their behavior in death +// tests. IMPORTANT: This is an internal utility. Using it may break the +// implementation of death tests. User code MUST NOT use it. +GTEST_API_ bool InDeathTestChild(); + +} // namespace internal + +// The following macros are useful for writing death tests. + +// Here's what happens when an ASSERT_DEATH* or EXPECT_DEATH* is +// executed: +// +// 1. It generates a warning if there is more than one active +// thread. This is because it's safe to fork() or clone() only +// when there is a single thread. +// +// 2. The parent process clone()s a sub-process and runs the death +// test in it; the sub-process exits with code 0 at the end of the +// death test, if it hasn't exited already. +// +// 3. The parent process waits for the sub-process to terminate. +// +// 4. The parent process checks the exit code and error message of +// the sub-process. +// +// Examples: +// +// ASSERT_DEATH(server.SendMessage(56, "Hello"), "Invalid port number"); +// for (int i = 0; i < 5; i++) { +// EXPECT_DEATH(server.ProcessRequest(i), +// "Invalid request .* in ProcessRequest()") +// << "Failed to die on request " << i; +// } +// +// ASSERT_EXIT(server.ExitNow(), ::testing::ExitedWithCode(0), "Exiting"); +// +// bool KilledBySIGHUP(int exit_code) { +// return WIFSIGNALED(exit_code) && WTERMSIG(exit_code) == SIGHUP; +// } +// +// ASSERT_EXIT(client.HangUpServer(), KilledBySIGHUP, "Hanging up!"); +// +// On the regular expressions used in death tests: +// +// On POSIX-compliant systems (*nix), we use the library, +// which uses the POSIX extended regex syntax. +// +// On other platforms (e.g. Windows), we only support a simple regex +// syntax implemented as part of Google Test. This limited +// implementation should be enough most of the time when writing +// death tests; though it lacks many features you can find in PCRE +// or POSIX extended regex syntax. For example, we don't support +// union ("x|y"), grouping ("(xy)"), brackets ("[xy]"), and +// repetition count ("x{5,7}"), among others. +// +// Below is the syntax that we do support. We chose it to be a +// subset of both PCRE and POSIX extended regex, so it's easy to +// learn wherever you come from. In the following: 'A' denotes a +// literal character, period (.), or a single \\ escape sequence; +// 'x' and 'y' denote regular expressions; 'm' and 'n' are for +// natural numbers. +// +// c matches any literal character c +// \\d matches any decimal digit +// \\D matches any character that's not a decimal digit +// \\f matches \f +// \\n matches \n +// \\r matches \r +// \\s matches any ASCII whitespace, including \n +// \\S matches any character that's not a whitespace +// \\t matches \t +// \\v matches \v +// \\w matches any letter, _, or decimal digit +// \\W matches any character that \\w doesn't match +// \\c matches any literal character c, which must be a punctuation +// . matches any single character except \n +// A? matches 0 or 1 occurrences of A +// A* matches 0 or many occurrences of A +// A+ matches 1 or many occurrences of A +// ^ matches the beginning of a string (not that of each line) +// $ matches the end of a string (not that of each line) +// xy matches x followed by y +// +// If you accidentally use PCRE or POSIX extended regex features +// not implemented by us, you will get a run-time failure. In that +// case, please try to rewrite your regular expression within the +// above syntax. +// +// This implementation is *not* meant to be as highly tuned or robust +// as a compiled regex library, but should perform well enough for a +// death test, which already incurs significant overhead by launching +// a child process. +// +// Known caveats: +// +// A "threadsafe" style death test obtains the path to the test +// program from argv[0] and re-executes it in the sub-process. For +// simplicity, the current implementation doesn't search the PATH +// when launching the sub-process. This means that the user must +// invoke the test program via a path that contains at least one +// path separator (e.g. path/to/foo_test and +// /absolute/path/to/bar_test are fine, but foo_test is not). This +// is rarely a problem as people usually don't put the test binary +// directory in PATH. +// +// TODO(wan@google.com): make thread-safe death tests search the PATH. + +// Asserts that a given statement causes the program to exit, with an +// integer exit status that satisfies predicate, and emitting error output +// that matches regex. +# define ASSERT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_FATAL_FAILURE_) + +// Like ASSERT_EXIT, but continues on to successive tests in the +// test case, if any: +# define EXPECT_EXIT(statement, predicate, regex) \ + GTEST_DEATH_TEST_(statement, predicate, regex, GTEST_NONFATAL_FAILURE_) + +// Asserts that a given statement causes the program to exit, either by +// explicitly exiting with a nonzero exit code or being killed by a +// signal, and emitting error output that matches regex. +# define ASSERT_DEATH(statement, regex) \ + ASSERT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Like ASSERT_DEATH, but continues on to successive tests in the +// test case, if any: +# define EXPECT_DEATH(statement, regex) \ + EXPECT_EXIT(statement, ::testing::internal::ExitedUnsuccessfully, regex) + +// Two predicate classes that can be used in {ASSERT,EXPECT}_EXIT*: + +// Tests that an exit code describes a normal exit with a given exit code. +class GTEST_API_ ExitedWithCode { + public: + explicit ExitedWithCode(int exit_code); + bool operator()(int exit_status) const; + private: + // No implementation - assignment is unsupported. + void operator=(const ExitedWithCode& other); + + const int exit_code_; +}; + +# if !GTEST_OS_WINDOWS +// Tests that an exit code describes an exit due to termination by a +// given signal. +class GTEST_API_ KilledBySignal { + public: + explicit KilledBySignal(int signum); + bool operator()(int exit_status) const; + private: + const int signum_; +}; +# endif // !GTEST_OS_WINDOWS + +// EXPECT_DEBUG_DEATH asserts that the given statements die in debug mode. +// The death testing framework causes this to have interesting semantics, +// since the sideeffects of the call are only visible in opt mode, and not +// in debug mode. +// +// In practice, this can be used to test functions that utilize the +// LOG(DFATAL) macro using the following style: +// +// int DieInDebugOr12(int* sideeffect) { +// if (sideeffect) { +// *sideeffect = 12; +// } +// LOG(DFATAL) << "death"; +// return 12; +// } +// +// TEST(TestCase, TestDieOr12WorksInDgbAndOpt) { +// int sideeffect = 0; +// // Only asserts in dbg. +// EXPECT_DEBUG_DEATH(DieInDebugOr12(&sideeffect), "death"); +// +// #ifdef NDEBUG +// // opt-mode has sideeffect visible. +// EXPECT_EQ(12, sideeffect); +// #else +// // dbg-mode no visible sideeffect. +// EXPECT_EQ(0, sideeffect); +// #endif +// } +// +// This will assert that DieInDebugReturn12InOpt() crashes in debug +// mode, usually due to a DCHECK or LOG(DFATAL), but returns the +// appropriate fallback value (12 in this case) in opt mode. If you +// need to test that a function has appropriate side-effects in opt +// mode, include assertions against the side-effects. A general +// pattern for this is: +// +// EXPECT_DEBUG_DEATH({ +// // Side-effects here will have an effect after this statement in +// // opt mode, but none in debug mode. +// EXPECT_EQ(12, DieInDebugOr12(&sideeffect)); +// }, "death"); +// +# ifdef NDEBUG + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + GTEST_EXECUTE_STATEMENT_(statement, regex) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + GTEST_EXECUTE_STATEMENT_(statement, regex) + +# else + +# define EXPECT_DEBUG_DEATH(statement, regex) \ + EXPECT_DEATH(statement, regex) + +# define ASSERT_DEBUG_DEATH(statement, regex) \ + ASSERT_DEATH(statement, regex) + +# endif // NDEBUG for EXPECT_DEBUG_DEATH +#endif // GTEST_HAS_DEATH_TEST + +// EXPECT_DEATH_IF_SUPPORTED(statement, regex) and +// ASSERT_DEATH_IF_SUPPORTED(statement, regex) expand to real death tests if +// death tests are supported; otherwise they just issue a warning. This is +// useful when you are combining death test assertions with normal test +// assertions in one test. +#if GTEST_HAS_DEATH_TEST +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + EXPECT_DEATH(statement, regex) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + ASSERT_DEATH(statement, regex) +#else +# define EXPECT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, ) +# define ASSERT_DEATH_IF_SUPPORTED(statement, regex) \ + GTEST_UNSUPPORTED_DEATH_TEST_(statement, regex, return) +#endif + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_DEATH_TEST_H_ +// This file was GENERATED by command: +// pump.py gtest-param-test.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: vladl@google.com (Vlad Losev) +// +// Macros and functions for implementing parameterized tests +// in Google C++ Testing Framework (Google Test) +// +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +#ifndef GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ + + +// Value-parameterized tests allow you to test your code with different +// parameters without writing multiple copies of the same test. +// +// Here is how you use value-parameterized tests: + +#if 0 + +// To write value-parameterized tests, first you should define a fixture +// class. It is usually derived from testing::TestWithParam (see below for +// another inheritance scheme that's sometimes useful in more complicated +// class hierarchies), where the type of your parameter values. +// TestWithParam is itself derived from testing::Test. T can be any +// copyable type. If it's a raw pointer, you are responsible for managing the +// lifespan of the pointed values. + +class FooTest : public ::testing::TestWithParam { + // You can implement all the usual class fixture members here. +}; + +// Then, use the TEST_P macro to define as many parameterized tests +// for this fixture as you want. The _P suffix is for "parameterized" +// or "pattern", whichever you prefer to think. + +TEST_P(FooTest, DoesBlah) { + // Inside a test, access the test parameter with the GetParam() method + // of the TestWithParam class: + EXPECT_TRUE(foo.Blah(GetParam())); + ... +} + +TEST_P(FooTest, HasBlahBlah) { + ... +} + +// Finally, you can use INSTANTIATE_TEST_CASE_P to instantiate the test +// case with any set of parameters you want. Google Test defines a number +// of functions for generating test parameters. They return what we call +// (surprise!) parameter generators. Here is a summary of them, which +// are all in the testing namespace: +// +// +// Range(begin, end [, step]) - Yields values {begin, begin+step, +// begin+step+step, ...}. The values do not +// include end. step defaults to 1. +// Values(v1, v2, ..., vN) - Yields values {v1, v2, ..., vN}. +// ValuesIn(container) - Yields values from a C-style array, an STL +// ValuesIn(begin,end) container, or an iterator range [begin, end). +// Bool() - Yields sequence {false, true}. +// Combine(g1, g2, ..., gN) - Yields all combinations (the Cartesian product +// for the math savvy) of the values generated +// by the N generators. +// +// For more details, see comments at the definitions of these functions below +// in this file. +// +// The following statement will instantiate tests from the FooTest test case +// each with parameter values "meeny", "miny", and "moe". + +INSTANTIATE_TEST_CASE_P(InstantiationName, + FooTest, + Values("meeny", "miny", "moe")); + +// To distinguish different instances of the pattern, (yes, you +// can instantiate it more then once) the first argument to the +// INSTANTIATE_TEST_CASE_P macro is a prefix that will be added to the +// actual test case name. Remember to pick unique prefixes for different +// instantiations. The tests from the instantiation above will have +// these names: +// +// * InstantiationName/FooTest.DoesBlah/0 for "meeny" +// * InstantiationName/FooTest.DoesBlah/1 for "miny" +// * InstantiationName/FooTest.DoesBlah/2 for "moe" +// * InstantiationName/FooTest.HasBlahBlah/0 for "meeny" +// * InstantiationName/FooTest.HasBlahBlah/1 for "miny" +// * InstantiationName/FooTest.HasBlahBlah/2 for "moe" +// +// You can use these names in --gtest_filter. +// +// This statement will instantiate all tests from FooTest again, each +// with parameter values "cat" and "dog": + +const char* pets[] = {"cat", "dog"}; +INSTANTIATE_TEST_CASE_P(AnotherInstantiationName, FooTest, ValuesIn(pets)); + +// The tests from the instantiation above will have these names: +// +// * AnotherInstantiationName/FooTest.DoesBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.DoesBlah/1 for "dog" +// * AnotherInstantiationName/FooTest.HasBlahBlah/0 for "cat" +// * AnotherInstantiationName/FooTest.HasBlahBlah/1 for "dog" +// +// Please note that INSTANTIATE_TEST_CASE_P will instantiate all tests +// in the given test case, whether their definitions come before or +// AFTER the INSTANTIATE_TEST_CASE_P statement. +// +// Please also note that generator expressions (including parameters to the +// generators) are evaluated in InitGoogleTest(), after main() has started. +// This allows the user on one hand, to adjust generator parameters in order +// to dynamically determine a set of tests to run and on the other hand, +// give the user a chance to inspect the generated tests with Google Test +// reflection API before RUN_ALL_TESTS() is executed. +// +// You can see samples/sample7_unittest.cc and samples/sample8_unittest.cc +// for more examples. +// +// In the future, we plan to publish the API for defining new parameter +// generators. But for now this interface remains part of the internal +// implementation and is subject to change. +// +// +// A parameterized test fixture must be derived from testing::Test and from +// testing::WithParamInterface, where T is the type of the parameter +// values. Inheriting from TestWithParam satisfies that requirement because +// TestWithParam inherits from both Test and WithParamInterface. In more +// complicated hierarchies, however, it is occasionally useful to inherit +// separately from Test and WithParamInterface. For example: + +class BaseTest : public ::testing::Test { + // You can inherit all the usual members for a non-parameterized test + // fixture here. +}; + +class DerivedTest : public BaseTest, public ::testing::WithParamInterface { + // The usual test fixture members go here too. +}; + +TEST_F(BaseTest, HasFoo) { + // This is an ordinary non-parameterized test. +} + +TEST_P(DerivedTest, DoesBlah) { + // GetParam works just the same here as if you inherit from TestWithParam. + EXPECT_TRUE(foo.Blah(GetParam())); +} + +#endif // 0 + + +#if !GTEST_OS_SYMBIAN +# include +#endif + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// Type and function utilities for implementing parameterized tests. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ + +#include +#include +#include + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. +// Copyright 2003 Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Authors: Dan Egnor (egnor@google.com) +// +// A "smart" pointer type with reference tracking. Every pointer to a +// particular object is kept on a circular linked list. When the last pointer +// to an object is destroyed or reassigned, the object is deleted. +// +// Used properly, this deletes the object when the last reference goes away. +// There are several caveats: +// - Like all reference counting schemes, cycles lead to leaks. +// - Each smart pointer is actually two pointers (8 bytes instead of 4). +// - Every time a pointer is assigned, the entire list of pointers to that +// object is traversed. This class is therefore NOT SUITABLE when there +// will often be more than two or three pointers to a particular object. +// - References are only tracked as long as linked_ptr<> objects are copied. +// If a linked_ptr<> is converted to a raw pointer and back, BAD THINGS +// will happen (double deletion). +// +// A good use of this class is storing object references in STL containers. +// You can safely put linked_ptr<> in a vector<>. +// Other uses may not be as good. +// +// Note: If you use an incomplete type with linked_ptr<>, the class +// *containing* linked_ptr<> must have a constructor and destructor (even +// if they do nothing!). +// +// Bill Gibbons suggested we use something like this. +// +// Thread Safety: +// Unlike other linked_ptr implementations, in this implementation +// a linked_ptr object is thread-safe in the sense that: +// - it's safe to copy linked_ptr objects concurrently, +// - it's safe to copy *from* a linked_ptr and read its underlying +// raw pointer (e.g. via get()) concurrently, and +// - it's safe to write to two linked_ptrs that point to the same +// shared object concurrently. +// TODO(wan@google.com): rename this to safe_linked_ptr to avoid +// confusion with normal linked_ptr. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ + +#include +#include + + +namespace testing { +namespace internal { + +// Protects copying of all linked_ptr objects. +GTEST_API_ GTEST_DECLARE_STATIC_MUTEX_(g_linked_ptr_mutex); + +// This is used internally by all instances of linked_ptr<>. It needs to be +// a non-template class because different types of linked_ptr<> can refer to +// the same object (linked_ptr(obj) vs linked_ptr(obj)). +// So, it needs to be possible for different types of linked_ptr to participate +// in the same circular linked list, so we need a single class type here. +// +// DO NOT USE THIS CLASS DIRECTLY YOURSELF. Use linked_ptr. +class linked_ptr_internal { + public: + // Create a new circle that includes only this instance. + void join_new() { + next_ = this; + } + + // Many linked_ptr operations may change p.link_ for some linked_ptr + // variable p in the same circle as this object. Therefore we need + // to prevent two such operations from occurring concurrently. + // + // Note that different types of linked_ptr objects can coexist in a + // circle (e.g. linked_ptr, linked_ptr, and + // linked_ptr). Therefore we must use a single mutex to + // protect all linked_ptr objects. This can create serious + // contention in production code, but is acceptable in a testing + // framework. + + // Join an existing circle. + void join(linked_ptr_internal const* ptr) + GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) { + MutexLock lock(&g_linked_ptr_mutex); + + linked_ptr_internal const* p = ptr; + while (p->next_ != ptr) { + assert(p->next_ != this && + "Trying to join() a linked ring we are already in. " + "Is GMock thread safety enabled?"); + p = p->next_; + } + p->next_ = this; + next_ = ptr; + } + + // Leave whatever circle we're part of. Returns true if we were the + // last member of the circle. Once this is done, you can join() another. + bool depart() + GTEST_LOCK_EXCLUDED_(g_linked_ptr_mutex) { + MutexLock lock(&g_linked_ptr_mutex); + + if (next_ == this) return true; + linked_ptr_internal const* p = next_; + while (p->next_ != this) { + assert(p->next_ != next_ && + "Trying to depart() a linked ring we are not in. " + "Is GMock thread safety enabled?"); + p = p->next_; + } + p->next_ = next_; + return false; + } + + private: + mutable linked_ptr_internal const* next_; +}; + +template +class linked_ptr { + public: + typedef T element_type; + + // Take over ownership of a raw pointer. This should happen as soon as + // possible after the object is created. + explicit linked_ptr(T* ptr = NULL) { capture(ptr); } + ~linked_ptr() { depart(); } + + // Copy an existing linked_ptr<>, adding ourselves to the list of references. + template linked_ptr(linked_ptr const& ptr) { copy(&ptr); } + linked_ptr(linked_ptr const& ptr) { // NOLINT + assert(&ptr != this); + copy(&ptr); + } + + // Assignment releases the old value and acquires the new. + template linked_ptr& operator=(linked_ptr const& ptr) { + depart(); + copy(&ptr); + return *this; + } + + linked_ptr& operator=(linked_ptr const& ptr) { + if (&ptr != this) { + depart(); + copy(&ptr); + } + return *this; + } + + // Smart pointer members. + void reset(T* ptr = NULL) { + depart(); + capture(ptr); + } + T* get() const { return value_; } + T* operator->() const { return value_; } + T& operator*() const { return *value_; } + + bool operator==(T* p) const { return value_ == p; } + bool operator!=(T* p) const { return value_ != p; } + template + bool operator==(linked_ptr const& ptr) const { + return value_ == ptr.get(); + } + template + bool operator!=(linked_ptr const& ptr) const { + return value_ != ptr.get(); + } + + private: + template + friend class linked_ptr; + + T* value_; + linked_ptr_internal link_; + + void depart() { + if (link_.depart()) delete value_; + } + + void capture(T* ptr) { + value_ = ptr; + link_.join_new(); + } + + template void copy(linked_ptr const* ptr) { + value_ = ptr->get(); + if (value_) + link_.join(&ptr->link_); + else + link_.join_new(); + } +}; + +template inline +bool operator==(T* ptr, const linked_ptr& x) { + return ptr == x.get(); +} + +template inline +bool operator!=(T* ptr, const linked_ptr& x) { + return ptr != x.get(); +} + +// A function to convert T* into linked_ptr +// Doing e.g. make_linked_ptr(new FooBarBaz(arg)) is a shorter notation +// for linked_ptr >(new FooBarBaz(arg)) +template +linked_ptr make_linked_ptr(T* ptr) { + return linked_ptr(ptr); +} + +} // namespace internal +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_LINKED_PTR_H_ +// Copyright 2007, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +// Google Test - The Google C++ Testing Framework +// +// This file implements a universal value printer that can print a +// value of any type T: +// +// void ::testing::internal::UniversalPrinter::Print(value, ostream_ptr); +// +// A user can teach this function how to print a class type T by +// defining either operator<<() or PrintTo() in the namespace that +// defines T. More specifically, the FIRST defined function in the +// following list will be used (assuming T is defined in namespace +// foo): +// +// 1. foo::PrintTo(const T&, ostream*) +// 2. operator<<(ostream&, const T&) defined in either foo or the +// global namespace. +// +// If none of the above is defined, it will print the debug string of +// the value if it is a protocol buffer, or print the raw bytes in the +// value otherwise. +// +// To aid debugging: when T is a reference type, the address of the +// value is also printed; when T is a (const) char pointer, both the +// pointer value and the NUL-terminated string it points to are +// printed. +// +// We also provide some convenient wrappers: +// +// // Prints a value to a string. For a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// std::string ::testing::PrintToString(const T& value); +// +// // Prints a value tersely: for a reference type, the referenced +// // value (but not the address) is printed; for a (const or not) char +// // pointer, the NUL-terminated string (but not the pointer) is +// // printed. +// void ::testing::internal::UniversalTersePrint(const T& value, ostream*); +// +// // Prints value using the type inferred by the compiler. The difference +// // from UniversalTersePrint() is that this function prints both the +// // pointer and the NUL-terminated string for a (const or not) char pointer. +// void ::testing::internal::UniversalPrint(const T& value, ostream*); +// +// // Prints the fields of a tuple tersely to a string vector, one +// // element for each field. Tuple support must be enabled in +// // gtest-port.h. +// std::vector UniversalTersePrintTupleFieldsToStrings( +// const Tuple& value); +// +// Known limitation: +// +// The print primitives print the elements of an STL-style container +// using the compiler-inferred type of *iter where iter is a +// const_iterator of the container. When const_iterator is an input +// iterator but not a forward iterator, this inferred type may not +// match value_type, and the print output may be incorrect. In +// practice, this is rarely a problem as for most containers +// const_iterator is a forward iterator. We'll fix this if there's an +// actual need for it. Note that this fix cannot rely on value_type +// being defined as many user-defined container types don't have +// value_type. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#include // NOLINT +#include +#include +#include +#include + +#if GTEST_HAS_STD_TUPLE_ +# include +#endif + +namespace testing { + +// Definitions in the 'internal' and 'internal2' name spaces are +// subject to change without notice. DO NOT USE THEM IN USER CODE! +namespace internal2 { + +// Prints the given number of bytes in the given object to the given +// ostream. +GTEST_API_ void PrintBytesInObjectTo(const unsigned char* obj_bytes, + size_t count, + ::std::ostream* os); + +// For selecting which printer to use when a given type has neither << +// nor PrintTo(). +enum TypeKind { + kProtobuf, // a protobuf type + kConvertibleToInteger, // a type implicitly convertible to BiggestInt + // (e.g. a named or unnamed enum type) + kOtherType // anything else +}; + +// TypeWithoutFormatter::PrintValue(value, os) is called +// by the universal printer to print a value of type T when neither +// operator<< nor PrintTo() is defined for T, where kTypeKind is the +// "kind" of T as defined by enum TypeKind. +template +class TypeWithoutFormatter { + public: + // This default version is called when kTypeKind is kOtherType. + static void PrintValue(const T& value, ::std::ostream* os) { + PrintBytesInObjectTo(reinterpret_cast(&value), + sizeof(value), os); + } +}; + +// We print a protobuf using its ShortDebugString() when the string +// doesn't exceed this many characters; otherwise we print it using +// DebugString() for better readability. +const size_t kProtobufOneLinerMaxLength = 50; + +template +class TypeWithoutFormatter { + public: + static void PrintValue(const T& value, ::std::ostream* os) { + const ::testing::internal::string short_str = value.ShortDebugString(); + const ::testing::internal::string pretty_str = + short_str.length() <= kProtobufOneLinerMaxLength ? + short_str : ("\n" + value.DebugString()); + *os << ("<" + pretty_str + ">"); + } +}; + +template +class TypeWithoutFormatter { + public: + // Since T has no << operator or PrintTo() but can be implicitly + // converted to BiggestInt, we print it as a BiggestInt. + // + // Most likely T is an enum type (either named or unnamed), in which + // case printing it as an integer is the desired behavior. In case + // T is not an enum, printing it as an integer is the best we can do + // given that it has no user-defined printer. + static void PrintValue(const T& value, ::std::ostream* os) { + const internal::BiggestInt kBigInt = value; + *os << kBigInt; + } +}; + +// Prints the given value to the given ostream. If the value is a +// protocol message, its debug string is printed; if it's an enum or +// of a type implicitly convertible to BiggestInt, it's printed as an +// integer; otherwise the bytes in the value are printed. This is +// what UniversalPrinter::Print() does when it knows nothing about +// type T and T has neither << operator nor PrintTo(). +// +// A user can override this behavior for a class type Foo by defining +// a << operator in the namespace where Foo is defined. +// +// We put this operator in namespace 'internal2' instead of 'internal' +// to simplify the implementation, as much code in 'internal' needs to +// use << in STL, which would conflict with our own << were it defined +// in 'internal'. +// +// Note that this operator<< takes a generic std::basic_ostream type instead of the more restricted std::ostream. If +// we define it to take an std::ostream instead, we'll get an +// "ambiguous overloads" compiler error when trying to print a type +// Foo that supports streaming to std::basic_ostream, as the compiler cannot tell whether +// operator<<(std::ostream&, const T&) or +// operator<<(std::basic_stream, const Foo&) is more +// specific. +template +::std::basic_ostream& operator<<( + ::std::basic_ostream& os, const T& x) { + TypeWithoutFormatter::value ? kProtobuf : + internal::ImplicitlyConvertible::value ? + kConvertibleToInteger : kOtherType)>::PrintValue(x, &os); + return os; +} + +} // namespace internal2 +} // namespace testing + +// This namespace MUST NOT BE NESTED IN ::testing, or the name look-up +// magic needed for implementing UniversalPrinter won't work. +namespace testing_internal { + +// Used to print a value that is not an STL-style container when the +// user doesn't define PrintTo() for it. +template +void DefaultPrintNonContainerTo(const T& value, ::std::ostream* os) { + // With the following statement, during unqualified name lookup, + // testing::internal2::operator<< appears as if it was declared in + // the nearest enclosing namespace that contains both + // ::testing_internal and ::testing::internal2, i.e. the global + // namespace. For more details, refer to the C++ Standard section + // 7.3.4-1 [namespace.udir]. This allows us to fall back onto + // testing::internal2::operator<< in case T doesn't come with a << + // operator. + // + // We cannot write 'using ::testing::internal2::operator<<;', which + // gcc 3.3 fails to compile due to a compiler bug. + using namespace ::testing::internal2; // NOLINT + + // Assuming T is defined in namespace foo, in the next statement, + // the compiler will consider all of: + // + // 1. foo::operator<< (thanks to Koenig look-up), + // 2. ::operator<< (as the current namespace is enclosed in ::), + // 3. testing::internal2::operator<< (thanks to the using statement above). + // + // The operator<< whose type matches T best will be picked. + // + // We deliberately allow #2 to be a candidate, as sometimes it's + // impossible to define #1 (e.g. when foo is ::std, defining + // anything in it is undefined behavior unless you are a compiler + // vendor.). + *os << value; +} + +} // namespace testing_internal + +namespace testing { +namespace internal { + +// UniversalPrinter::Print(value, ostream_ptr) prints the given +// value to the given ostream. The caller must ensure that +// 'ostream_ptr' is not NULL, or the behavior is undefined. +// +// We define UniversalPrinter as a class template (as opposed to a +// function template), as we need to partially specialize it for +// reference types, which cannot be done with function templates. +template +class UniversalPrinter; + +template +void UniversalPrint(const T& value, ::std::ostream* os); + +// Used to print an STL-style container when the user doesn't define +// a PrintTo() for it. +template +void DefaultPrintTo(IsContainer /* dummy */, + false_type /* is not a pointer */, + const C& container, ::std::ostream* os) { + const size_t kMaxCount = 32; // The maximum number of elements to print. + *os << '{'; + size_t count = 0; + for (typename C::const_iterator it = container.begin(); + it != container.end(); ++it, ++count) { + if (count > 0) { + *os << ','; + if (count == kMaxCount) { // Enough has been printed. + *os << " ..."; + break; + } + } + *os << ' '; + // We cannot call PrintTo(*it, os) here as PrintTo() doesn't + // handle *it being a native array. + internal::UniversalPrint(*it, os); + } + + if (count > 0) { + *os << ' '; + } + *os << '}'; +} + +// Used to print a pointer that is neither a char pointer nor a member +// pointer, when the user doesn't define PrintTo() for it. (A member +// variable pointer or member function pointer doesn't really point to +// a location in the address space. Their representation is +// implementation-defined. Therefore they will be printed as raw +// bytes.) +template +void DefaultPrintTo(IsNotContainer /* dummy */, + true_type /* is a pointer */, + T* p, ::std::ostream* os) { + if (p == NULL) { + *os << "NULL"; + } else { + // C++ doesn't allow casting from a function pointer to any object + // pointer. + // + // IsTrue() silences warnings: "Condition is always true", + // "unreachable code". + if (IsTrue(ImplicitlyConvertible::value)) { + // T is not a function type. We just call << to print p, + // relying on ADL to pick up user-defined << for their pointer + // types, if any. + *os << p; + } else { + // T is a function type, so '*os << p' doesn't do what we want + // (it just prints p as bool). We want to print p as a const + // void*. However, we cannot cast it to const void* directly, + // even using reinterpret_cast, as earlier versions of gcc + // (e.g. 3.4.5) cannot compile the cast when p is a function + // pointer. Casting to UInt64 first solves the problem. + *os << reinterpret_cast( + reinterpret_cast(p)); + } + } +} + +// Used to print a non-container, non-pointer value when the user +// doesn't define PrintTo() for it. +template +void DefaultPrintTo(IsNotContainer /* dummy */, + false_type /* is not a pointer */, + const T& value, ::std::ostream* os) { + ::testing_internal::DefaultPrintNonContainerTo(value, os); +} + +// Prints the given value using the << operator if it has one; +// otherwise prints the bytes in it. This is what +// UniversalPrinter::Print() does when PrintTo() is not specialized +// or overloaded for type T. +// +// A user can override this behavior for a class type Foo by defining +// an overload of PrintTo() in the namespace where Foo is defined. We +// give the user this option as sometimes defining a << operator for +// Foo is not desirable (e.g. the coding style may prevent doing it, +// or there is already a << operator but it doesn't do what the user +// wants). +template +void PrintTo(const T& value, ::std::ostream* os) { + // DefaultPrintTo() is overloaded. The type of its first two + // arguments determine which version will be picked. If T is an + // STL-style container, the version for container will be called; if + // T is a pointer, the pointer version will be called; otherwise the + // generic version will be called. + // + // Note that we check for container types here, prior to we check + // for protocol message types in our operator<<. The rationale is: + // + // For protocol messages, we want to give people a chance to + // override Google Mock's format by defining a PrintTo() or + // operator<<. For STL containers, other formats can be + // incompatible with Google Mock's format for the container + // elements; therefore we check for container types here to ensure + // that our format is used. + // + // The second argument of DefaultPrintTo() is needed to bypass a bug + // in Symbian's C++ compiler that prevents it from picking the right + // overload between: + // + // PrintTo(const T& x, ...); + // PrintTo(T* x, ...); + DefaultPrintTo(IsContainerTest(0), is_pointer(), value, os); +} + +// The following list of PrintTo() overloads tells +// UniversalPrinter::Print() how to print standard types (built-in +// types, strings, plain arrays, and pointers). + +// Overloads for various char types. +GTEST_API_ void PrintTo(unsigned char c, ::std::ostream* os); +GTEST_API_ void PrintTo(signed char c, ::std::ostream* os); +inline void PrintTo(char c, ::std::ostream* os) { + // When printing a plain char, we always treat it as unsigned. This + // way, the output won't be affected by whether the compiler thinks + // char is signed or not. + PrintTo(static_cast(c), os); +} + +// Overloads for other simple built-in types. +inline void PrintTo(bool x, ::std::ostream* os) { + *os << (x ? "true" : "false"); +} + +// Overload for wchar_t type. +// Prints a wchar_t as a symbol if it is printable or as its internal +// code otherwise and also as its decimal code (except for L'\0'). +// The L'\0' char is printed as "L'\\0'". The decimal code is printed +// as signed integer when wchar_t is implemented by the compiler +// as a signed type and is printed as an unsigned integer when wchar_t +// is implemented as an unsigned type. +GTEST_API_ void PrintTo(wchar_t wc, ::std::ostream* os); + +// Overloads for C strings. +GTEST_API_ void PrintTo(const char* s, ::std::ostream* os); +inline void PrintTo(char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// signed/unsigned char is often used for representing binary data, so +// we print pointers to it as void* to be safe. +inline void PrintTo(const signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(signed char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(const unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +inline void PrintTo(unsigned char* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} + +// MSVC can be configured to define wchar_t as a typedef of unsigned +// short. It defines _NATIVE_WCHAR_T_DEFINED when wchar_t is a native +// type. When wchar_t is a typedef, defining an overload for const +// wchar_t* would cause unsigned short* be printed as a wide string, +// possibly causing invalid memory accesses. +#if !defined(_MSC_VER) || defined(_NATIVE_WCHAR_T_DEFINED) +// Overloads for wide C strings +GTEST_API_ void PrintTo(const wchar_t* s, ::std::ostream* os); +inline void PrintTo(wchar_t* s, ::std::ostream* os) { + PrintTo(ImplicitCast_(s), os); +} +#endif + +// Overload for C arrays. Multi-dimensional arrays are printed +// properly. + +// Prints the given number of elements in an array, without printing +// the curly braces. +template +void PrintRawArrayTo(const T a[], size_t count, ::std::ostream* os) { + UniversalPrint(a[0], os); + for (size_t i = 1; i != count; i++) { + *os << ", "; + UniversalPrint(a[i], os); + } +} + +// Overloads for ::string and ::std::string. +#if GTEST_HAS_GLOBAL_STRING +GTEST_API_ void PrintStringTo(const ::string&s, ::std::ostream* os); +inline void PrintTo(const ::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} +#endif // GTEST_HAS_GLOBAL_STRING + +GTEST_API_ void PrintStringTo(const ::std::string&s, ::std::ostream* os); +inline void PrintTo(const ::std::string& s, ::std::ostream* os) { + PrintStringTo(s, os); +} + +// Overloads for ::wstring and ::std::wstring. +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_API_ void PrintWideStringTo(const ::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_GLOBAL_WSTRING + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ void PrintWideStringTo(const ::std::wstring&s, ::std::ostream* os); +inline void PrintTo(const ::std::wstring& s, ::std::ostream* os) { + PrintWideStringTo(s, os); +} +#endif // GTEST_HAS_STD_WSTRING + +#if GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_ +// Helper function for printing a tuple. T must be instantiated with +// a tuple type. +template +void PrintTupleTo(const T& t, ::std::ostream* os); +#endif // GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_ + +#if GTEST_HAS_TR1_TUPLE +// Overload for ::std::tr1::tuple. Needed for printing function arguments, +// which are packed as tuples. + +// Overloaded PrintTo() for tuples of various arities. We support +// tuples of up-to 10 fields. The following implementation works +// regardless of whether tr1::tuple is implemented using the +// non-standard variadic template feature or not. + +inline void PrintTo(const ::std::tr1::tuple<>& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo(const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} + +template +void PrintTo( + const ::std::tr1::tuple& t, + ::std::ostream* os) { + PrintTupleTo(t, os); +} +#endif // GTEST_HAS_TR1_TUPLE + +#if GTEST_HAS_STD_TUPLE_ +template +void PrintTo(const ::std::tuple& t, ::std::ostream* os) { + PrintTupleTo(t, os); +} +#endif // GTEST_HAS_STD_TUPLE_ + +// Overload for std::pair. +template +void PrintTo(const ::std::pair& value, ::std::ostream* os) { + *os << '('; + // We cannot use UniversalPrint(value.first, os) here, as T1 may be + // a reference type. The same for printing value.second. + UniversalPrinter::Print(value.first, os); + *os << ", "; + UniversalPrinter::Print(value.second, os); + *os << ')'; +} + +// Implements printing a non-reference type T by letting the compiler +// pick the right overload of PrintTo() for T. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180) + + // Note: we deliberately don't call this PrintTo(), as that name + // conflicts with ::testing::internal::PrintTo in the body of the + // function. + static void Print(const T& value, ::std::ostream* os) { + // By default, ::testing::internal::PrintTo() is used for printing + // the value. + // + // Thanks to Koenig look-up, if T is a class and has its own + // PrintTo() function defined in its namespace, that function will + // be visible here. Since it is more specific than the generic ones + // in ::testing::internal, it will be picked by the compiler in the + // following statement - exactly what we want. + PrintTo(value, os); + } + + GTEST_DISABLE_MSC_WARNINGS_POP_() +}; + +// UniversalPrintArray(begin, len, os) prints an array of 'len' +// elements, starting at address 'begin'. +template +void UniversalPrintArray(const T* begin, size_t len, ::std::ostream* os) { + if (len == 0) { + *os << "{}"; + } else { + *os << "{ "; + const size_t kThreshold = 18; + const size_t kChunkSize = 8; + // If the array has more than kThreshold elements, we'll have to + // omit some details by printing only the first and the last + // kChunkSize elements. + // TODO(wan@google.com): let the user control the threshold using a flag. + if (len <= kThreshold) { + PrintRawArrayTo(begin, len, os); + } else { + PrintRawArrayTo(begin, kChunkSize, os); + *os << ", ..., "; + PrintRawArrayTo(begin + len - kChunkSize, kChunkSize, os); + } + *os << " }"; + } +} +// This overload prints a (const) char array compactly. +GTEST_API_ void UniversalPrintArray( + const char* begin, size_t len, ::std::ostream* os); + +// This overload prints a (const) wchar_t array compactly. +GTEST_API_ void UniversalPrintArray( + const wchar_t* begin, size_t len, ::std::ostream* os); + +// Implements printing an array type T[N]. +template +class UniversalPrinter { + public: + // Prints the given array, omitting some elements when there are too + // many. + static void Print(const T (&a)[N], ::std::ostream* os) { + UniversalPrintArray(a, N, os); + } +}; + +// Implements printing a reference type T&. +template +class UniversalPrinter { + public: + // MSVC warns about adding const to a function type, so we want to + // disable the warning. + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4180) + + static void Print(const T& value, ::std::ostream* os) { + // Prints the address of the value. We use reinterpret_cast here + // as static_cast doesn't compile when T is a function type. + *os << "@" << reinterpret_cast(&value) << " "; + + // Then prints the value itself. + UniversalPrint(value, os); + } + + GTEST_DISABLE_MSC_WARNINGS_POP_() +}; + +// Prints a value tersely: for a reference type, the referenced value +// (but not the address) is printed; for a (const) char pointer, the +// NUL-terminated string (but not the pointer) is printed. + +template +class UniversalTersePrinter { + public: + static void Print(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); + } +}; +template +class UniversalTersePrinter { + public: + static void Print(const T& value, ::std::ostream* os) { + UniversalPrint(value, os); + } +}; +template +class UniversalTersePrinter { + public: + static void Print(const T (&value)[N], ::std::ostream* os) { + UniversalPrinter::Print(value, os); + } +}; +template <> +class UniversalTersePrinter { + public: + static void Print(const char* str, ::std::ostream* os) { + if (str == NULL) { + *os << "NULL"; + } else { + UniversalPrint(string(str), os); + } + } +}; +template <> +class UniversalTersePrinter { + public: + static void Print(char* str, ::std::ostream* os) { + UniversalTersePrinter::Print(str, os); + } +}; + +#if GTEST_HAS_STD_WSTRING +template <> +class UniversalTersePrinter { + public: + static void Print(const wchar_t* str, ::std::ostream* os) { + if (str == NULL) { + *os << "NULL"; + } else { + UniversalPrint(::std::wstring(str), os); + } + } +}; +#endif + +template <> +class UniversalTersePrinter { + public: + static void Print(wchar_t* str, ::std::ostream* os) { + UniversalTersePrinter::Print(str, os); + } +}; + +template +void UniversalTersePrint(const T& value, ::std::ostream* os) { + UniversalTersePrinter::Print(value, os); +} + +// Prints a value using the type inferred by the compiler. The +// difference between this and UniversalTersePrint() is that for a +// (const) char pointer, this prints both the pointer and the +// NUL-terminated string. +template +void UniversalPrint(const T& value, ::std::ostream* os) { + // A workarond for the bug in VC++ 7.1 that prevents us from instantiating + // UniversalPrinter with T directly. + typedef T T1; + UniversalPrinter::Print(value, os); +} + +typedef ::std::vector Strings; + +// TuplePolicy must provide: +// - tuple_size +// size of tuple TupleT. +// - get(const TupleT& t) +// static function extracting element I of tuple TupleT. +// - tuple_element::type +// type of element I of tuple TupleT. +template +struct TuplePolicy; + +#if GTEST_HAS_TR1_TUPLE +template +struct TuplePolicy { + typedef TupleT Tuple; + static const size_t tuple_size = ::std::tr1::tuple_size::value; + + template + struct tuple_element : ::std::tr1::tuple_element {}; + + template + static typename AddReference< + const typename ::std::tr1::tuple_element::type>::type get( + const Tuple& tuple) { + return ::std::tr1::get(tuple); + } +}; +template +const size_t TuplePolicy::tuple_size; +#endif // GTEST_HAS_TR1_TUPLE + +#if GTEST_HAS_STD_TUPLE_ +template +struct TuplePolicy< ::std::tuple > { + typedef ::std::tuple Tuple; + static const size_t tuple_size = ::std::tuple_size::value; + + template + struct tuple_element : ::std::tuple_element {}; + + template + static const typename ::std::tuple_element::type& get( + const Tuple& tuple) { + return ::std::get(tuple); + } +}; +template +const size_t TuplePolicy< ::std::tuple >::tuple_size; +#endif // GTEST_HAS_STD_TUPLE_ + +#if GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_ +// This helper template allows PrintTo() for tuples and +// UniversalTersePrintTupleFieldsToStrings() to be defined by +// induction on the number of tuple fields. The idea is that +// TuplePrefixPrinter::PrintPrefixTo(t, os) prints the first N +// fields in tuple t, and can be defined in terms of +// TuplePrefixPrinter. +// +// The inductive case. +template +struct TuplePrefixPrinter { + // Prints the first N fields of a tuple. + template + static void PrintPrefixTo(const Tuple& t, ::std::ostream* os) { + TuplePrefixPrinter::PrintPrefixTo(t, os); + GTEST_INTENTIONAL_CONST_COND_PUSH_() + if (N > 1) { + GTEST_INTENTIONAL_CONST_COND_POP_() + *os << ", "; + } + UniversalPrinter< + typename TuplePolicy::template tuple_element::type> + ::Print(TuplePolicy::template get(t), os); + } + + // Tersely prints the first N fields of a tuple to a string vector, + // one element for each field. + template + static void TersePrintPrefixToStrings(const Tuple& t, Strings* strings) { + TuplePrefixPrinter::TersePrintPrefixToStrings(t, strings); + ::std::stringstream ss; + UniversalTersePrint(TuplePolicy::template get(t), &ss); + strings->push_back(ss.str()); + } +}; + +// Base case. +template <> +struct TuplePrefixPrinter<0> { + template + static void PrintPrefixTo(const Tuple&, ::std::ostream*) {} + + template + static void TersePrintPrefixToStrings(const Tuple&, Strings*) {} +}; + +// Helper function for printing a tuple. +// Tuple must be either std::tr1::tuple or std::tuple type. +template +void PrintTupleTo(const Tuple& t, ::std::ostream* os) { + *os << "("; + TuplePrefixPrinter::tuple_size>::PrintPrefixTo(t, os); + *os << ")"; +} + +// Prints the fields of a tuple tersely to a string vector, one +// element for each field. See the comment before +// UniversalTersePrint() for how we define "tersely". +template +Strings UniversalTersePrintTupleFieldsToStrings(const Tuple& value) { + Strings result; + TuplePrefixPrinter::tuple_size>:: + TersePrintPrefixToStrings(value, &result); + return result; +} +#endif // GTEST_HAS_TR1_TUPLE || GTEST_HAS_STD_TUPLE_ + +} // namespace internal + +template +::std::string PrintToString(const T& value) { + ::std::stringstream ss; + internal::UniversalTersePrinter::Print(value, &ss); + return ss.str(); +} + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRINTERS_H_ + +#if GTEST_HAS_PARAM_TEST + +namespace testing { +namespace internal { + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Outputs a message explaining invalid registration of different +// fixture class for the same test case. This may happen when +// TEST_P macro is used to define two tests with the same name +// but in different namespaces. +GTEST_API_ void ReportInvalidTestCaseType(const char* test_case_name, + const char* file, int line); + +template class ParamGeneratorInterface; +template class ParamGenerator; + +// Interface for iterating over elements provided by an implementation +// of ParamGeneratorInterface. +template +class ParamIteratorInterface { + public: + virtual ~ParamIteratorInterface() {} + // A pointer to the base generator instance. + // Used only for the purposes of iterator comparison + // to make sure that two iterators belong to the same generator. + virtual const ParamGeneratorInterface* BaseGenerator() const = 0; + // Advances iterator to point to the next element + // provided by the generator. The caller is responsible + // for not calling Advance() on an iterator equal to + // BaseGenerator()->End(). + virtual void Advance() = 0; + // Clones the iterator object. Used for implementing copy semantics + // of ParamIterator. + virtual ParamIteratorInterface* Clone() const = 0; + // Dereferences the current iterator and provides (read-only) access + // to the pointed value. It is the caller's responsibility not to call + // Current() on an iterator equal to BaseGenerator()->End(). + // Used for implementing ParamGenerator::operator*(). + virtual const T* Current() const = 0; + // Determines whether the given iterator and other point to the same + // element in the sequence generated by the generator. + // Used for implementing ParamGenerator::operator==(). + virtual bool Equals(const ParamIteratorInterface& other) const = 0; +}; + +// Class iterating over elements provided by an implementation of +// ParamGeneratorInterface. It wraps ParamIteratorInterface +// and implements the const forward iterator concept. +template +class ParamIterator { + public: + typedef T value_type; + typedef const T& reference; + typedef ptrdiff_t difference_type; + + // ParamIterator assumes ownership of the impl_ pointer. + ParamIterator(const ParamIterator& other) : impl_(other.impl_->Clone()) {} + ParamIterator& operator=(const ParamIterator& other) { + if (this != &other) + impl_.reset(other.impl_->Clone()); + return *this; + } + + const T& operator*() const { return *impl_->Current(); } + const T* operator->() const { return impl_->Current(); } + // Prefix version of operator++. + ParamIterator& operator++() { + impl_->Advance(); + return *this; + } + // Postfix version of operator++. + ParamIterator operator++(int /*unused*/) { + ParamIteratorInterface* clone = impl_->Clone(); + impl_->Advance(); + return ParamIterator(clone); + } + bool operator==(const ParamIterator& other) const { + return impl_.get() == other.impl_.get() || impl_->Equals(*other.impl_); + } + bool operator!=(const ParamIterator& other) const { + return !(*this == other); + } + + private: + friend class ParamGenerator; + explicit ParamIterator(ParamIteratorInterface* impl) : impl_(impl) {} + scoped_ptr > impl_; +}; + +// ParamGeneratorInterface is the binary interface to access generators +// defined in other translation units. +template +class ParamGeneratorInterface { + public: + typedef T ParamType; + + virtual ~ParamGeneratorInterface() {} + + // Generator interface definition + virtual ParamIteratorInterface* Begin() const = 0; + virtual ParamIteratorInterface* End() const = 0; +}; + +// Wraps ParamGeneratorInterface and provides general generator syntax +// compatible with the STL Container concept. +// This class implements copy initialization semantics and the contained +// ParamGeneratorInterface instance is shared among all copies +// of the original object. This is possible because that instance is immutable. +template +class ParamGenerator { + public: + typedef ParamIterator iterator; + + explicit ParamGenerator(ParamGeneratorInterface* impl) : impl_(impl) {} + ParamGenerator(const ParamGenerator& other) : impl_(other.impl_) {} + + ParamGenerator& operator=(const ParamGenerator& other) { + impl_ = other.impl_; + return *this; + } + + iterator begin() const { return iterator(impl_->Begin()); } + iterator end() const { return iterator(impl_->End()); } + + private: + linked_ptr > impl_; +}; + +// Generates values from a range of two comparable values. Can be used to +// generate sequences of user-defined types that implement operator+() and +// operator<(). +// This class is used in the Range() function. +template +class RangeGenerator : public ParamGeneratorInterface { + public: + RangeGenerator(T begin, T end, IncrementT step) + : begin_(begin), end_(end), + step_(step), end_index_(CalculateEndIndex(begin, end, step)) {} + virtual ~RangeGenerator() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, begin_, 0, step_); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, end_, end_index_, step_); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, T value, int index, + IncrementT step) + : base_(base), value_(value), index_(index), step_(step) {} + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + virtual void Advance() { + value_ = value_ + step_; + index_++; + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const T* Current() const { return &value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const int other_index = + CheckedDowncastToActualType(&other)->index_; + return index_ == other_index; + } + + private: + Iterator(const Iterator& other) + : ParamIteratorInterface(), + base_(other.base_), value_(other.value_), index_(other.index_), + step_(other.step_) {} + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + T value_; + int index_; + const IncrementT step_; + }; // class RangeGenerator::Iterator + + static int CalculateEndIndex(const T& begin, + const T& end, + const IncrementT& step) { + int end_index = 0; + for (T i = begin; i < end; i = i + step) + end_index++; + return end_index; + } + + // No implementation - assignment is unsupported. + void operator=(const RangeGenerator& other); + + const T begin_; + const T end_; + const IncrementT step_; + // The index for the end() iterator. All the elements in the generated + // sequence are indexed (0-based) to aid iterator comparison. + const int end_index_; +}; // class RangeGenerator + + +// Generates values from a pair of STL-style iterators. Used in the +// ValuesIn() function. The elements are copied from the source range +// since the source can be located on the stack, and the generator +// is likely to persist beyond that stack frame. +template +class ValuesInIteratorRangeGenerator : public ParamGeneratorInterface { + public: + template + ValuesInIteratorRangeGenerator(ForwardIterator begin, ForwardIterator end) + : container_(begin, end) {} + virtual ~ValuesInIteratorRangeGenerator() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, container_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, container_.end()); + } + + private: + typedef typename ::std::vector ContainerType; + + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + typename ContainerType::const_iterator iterator) + : base_(base), iterator_(iterator) {} + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + virtual void Advance() { + ++iterator_; + value_.reset(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + // We need to use cached value referenced by iterator_ because *iterator_ + // can return a temporary object (and of type other then T), so just + // having "return &*iterator_;" doesn't work. + // value_ is updated here and not in Advance() because Advance() + // can advance iterator_ beyond the end of the range, and we cannot + // detect that fact. The client code, on the other hand, is + // responsible for not calling Current() on an out-of-range iterator. + virtual const T* Current() const { + if (value_.get() == NULL) + value_.reset(new T(*iterator_)); + return value_.get(); + } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + return iterator_ == + CheckedDowncastToActualType(&other)->iterator_; + } + + private: + Iterator(const Iterator& other) + // The explicit constructor call suppresses a false warning + // emitted by gcc when supplied with the -Wextra option. + : ParamIteratorInterface(), + base_(other.base_), + iterator_(other.iterator_) {} + + const ParamGeneratorInterface* const base_; + typename ContainerType::const_iterator iterator_; + // A cached value of *iterator_. We keep it here to allow access by + // pointer in the wrapping iterator's operator->(). + // value_ needs to be mutable to be accessed in Current(). + // Use of scoped_ptr helps manage cached value's lifetime, + // which is bound by the lifespan of the iterator itself. + mutable scoped_ptr value_; + }; // class ValuesInIteratorRangeGenerator::Iterator + + // No implementation - assignment is unsupported. + void operator=(const ValuesInIteratorRangeGenerator& other); + + const ContainerType container_; +}; // class ValuesInIteratorRangeGenerator + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Stores a parameter value and later creates tests parameterized with that +// value. +template +class ParameterizedTestFactory : public TestFactoryBase { + public: + typedef typename TestClass::ParamType ParamType; + explicit ParameterizedTestFactory(ParamType parameter) : + parameter_(parameter) {} + virtual Test* CreateTest() { + TestClass::SetParam(¶meter_); + return new TestClass(); + } + + private: + const ParamType parameter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactoryBase is a base class for meta-factories that create +// test factories for passing into MakeAndRegisterTestInfo function. +template +class TestMetaFactoryBase { + public: + virtual ~TestMetaFactoryBase() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) = 0; +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// TestMetaFactory creates test factories for passing into +// MakeAndRegisterTestInfo function. Since MakeAndRegisterTestInfo receives +// ownership of test factory pointer, same factory object cannot be passed +// into that method twice. But ParameterizedTestCaseInfo is going to call +// it for each Test/Parameter value combination. Thus it needs meta factory +// creator class. +template +class TestMetaFactory + : public TestMetaFactoryBase { + public: + typedef typename TestCase::ParamType ParamType; + + TestMetaFactory() {} + + virtual TestFactoryBase* CreateTestFactory(ParamType parameter) { + return new ParameterizedTestFactory(parameter); + } + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestMetaFactory); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseInfoBase is a generic interface +// to ParameterizedTestCaseInfo classes. ParameterizedTestCaseInfoBase +// accumulates test information provided by TEST_P macro invocations +// and generators provided by INSTANTIATE_TEST_CASE_P macro invocations +// and uses that information to register all resulting test instances +// in RegisterTests method. The ParameterizeTestCaseRegistry class holds +// a collection of pointers to the ParameterizedTestCaseInfo objects +// and calls RegisterTests() on each of them when asked. +class ParameterizedTestCaseInfoBase { + public: + virtual ~ParameterizedTestCaseInfoBase() {} + + // Base part of test case name for display purposes. + virtual const string& GetTestCaseName() const = 0; + // Test case id to verify identity. + virtual TypeId GetTestCaseTypeId() const = 0; + // UnitTest class invokes this method to register tests in this + // test case right before running them in RUN_ALL_TESTS macro. + // This method should not be called more then once on any single + // instance of a ParameterizedTestCaseInfoBase derived class. + virtual void RegisterTests() = 0; + + protected: + ParameterizedTestCaseInfoBase() {} + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfoBase); +}; + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseInfo accumulates tests obtained from TEST_P +// macro invocations for a particular test case and generators +// obtained from INSTANTIATE_TEST_CASE_P macro invocations for that +// test case. It registers tests with all values generated by all +// generators when asked. +template +class ParameterizedTestCaseInfo : public ParameterizedTestCaseInfoBase { + public: + // ParamType and GeneratorCreationFunc are private types but are required + // for declarations of public methods AddTestPattern() and + // AddTestCaseInstantiation(). + typedef typename TestCase::ParamType ParamType; + // A function that returns an instance of appropriate generator type. + typedef ParamGenerator(GeneratorCreationFunc)(); + + explicit ParameterizedTestCaseInfo(const char* name) + : test_case_name_(name) {} + + // Test case base name for display purposes. + virtual const string& GetTestCaseName() const { return test_case_name_; } + // Test case id to verify identity. + virtual TypeId GetTestCaseTypeId() const { return GetTypeId(); } + // TEST_P macro uses AddTestPattern() to record information + // about a single test in a LocalTestInfo structure. + // test_case_name is the base name of the test case (without invocation + // prefix). test_base_name is the name of an individual test without + // parameter index. For the test SequenceA/FooTest.DoBar/1 FooTest is + // test case base name and DoBar is test base name. + void AddTestPattern(const char* test_case_name, + const char* test_base_name, + TestMetaFactoryBase* meta_factory) { + tests_.push_back(linked_ptr(new TestInfo(test_case_name, + test_base_name, + meta_factory))); + } + // INSTANTIATE_TEST_CASE_P macro uses AddGenerator() to record information + // about a generator. + int AddTestCaseInstantiation(const string& instantiation_name, + GeneratorCreationFunc* func, + const char* /* file */, + int /* line */) { + instantiations_.push_back(::std::make_pair(instantiation_name, func)); + return 0; // Return value used only to run this method in namespace scope. + } + // UnitTest class invokes this method to register tests in this test case + // test cases right before running tests in RUN_ALL_TESTS macro. + // This method should not be called more then once on any single + // instance of a ParameterizedTestCaseInfoBase derived class. + // UnitTest has a guard to prevent from calling this method more then once. + virtual void RegisterTests() { + for (typename TestInfoContainer::iterator test_it = tests_.begin(); + test_it != tests_.end(); ++test_it) { + linked_ptr test_info = *test_it; + for (typename InstantiationContainer::iterator gen_it = + instantiations_.begin(); gen_it != instantiations_.end(); + ++gen_it) { + const string& instantiation_name = gen_it->first; + ParamGenerator generator((*gen_it->second)()); + + string test_case_name; + if ( !instantiation_name.empty() ) + test_case_name = instantiation_name + "/"; + test_case_name += test_info->test_case_base_name; + + int i = 0; + for (typename ParamGenerator::iterator param_it = + generator.begin(); + param_it != generator.end(); ++param_it, ++i) { + Message test_name_stream; + test_name_stream << test_info->test_base_name << "/" << i; + MakeAndRegisterTestInfo( + test_case_name.c_str(), + test_name_stream.GetString().c_str(), + NULL, // No type parameter. + PrintToString(*param_it).c_str(), + GetTestCaseTypeId(), + TestCase::SetUpTestCase, + TestCase::TearDownTestCase, + test_info->test_meta_factory->CreateTestFactory(*param_it)); + } // for param_it + } // for gen_it + } // for test_it + } // RegisterTests + + private: + // LocalTestInfo structure keeps information about a single test registered + // with TEST_P macro. + struct TestInfo { + TestInfo(const char* a_test_case_base_name, + const char* a_test_base_name, + TestMetaFactoryBase* a_test_meta_factory) : + test_case_base_name(a_test_case_base_name), + test_base_name(a_test_base_name), + test_meta_factory(a_test_meta_factory) {} + + const string test_case_base_name; + const string test_base_name; + const scoped_ptr > test_meta_factory; + }; + typedef ::std::vector > TestInfoContainer; + // Keeps pairs of + // received from INSTANTIATE_TEST_CASE_P macros. + typedef ::std::vector > + InstantiationContainer; + + const string test_case_name_; + TestInfoContainer tests_; + InstantiationContainer instantiations_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseInfo); +}; // class ParameterizedTestCaseInfo + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// ParameterizedTestCaseRegistry contains a map of ParameterizedTestCaseInfoBase +// classes accessed by test case names. TEST_P and INSTANTIATE_TEST_CASE_P +// macros use it to locate their corresponding ParameterizedTestCaseInfo +// descriptors. +class ParameterizedTestCaseRegistry { + public: + ParameterizedTestCaseRegistry() {} + ~ParameterizedTestCaseRegistry() { + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + delete *it; + } + } + + // Looks up or creates and returns a structure containing information about + // tests and instantiations of a particular test case. + template + ParameterizedTestCaseInfo* GetTestCasePatternHolder( + const char* test_case_name, + const char* file, + int line) { + ParameterizedTestCaseInfo* typed_test_info = NULL; + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + if ((*it)->GetTestCaseName() == test_case_name) { + if ((*it)->GetTestCaseTypeId() != GetTypeId()) { + // Complain about incorrect usage of Google Test facilities + // and terminate the program since we cannot guaranty correct + // test case setup and tear-down in this case. + ReportInvalidTestCaseType(test_case_name, file, line); + posix::Abort(); + } else { + // At this point we are sure that the object we found is of the same + // type we are looking for, so we downcast it to that type + // without further checks. + typed_test_info = CheckedDowncastToActualType< + ParameterizedTestCaseInfo >(*it); + } + break; + } + } + if (typed_test_info == NULL) { + typed_test_info = new ParameterizedTestCaseInfo(test_case_name); + test_case_infos_.push_back(typed_test_info); + } + return typed_test_info; + } + void RegisterTests() { + for (TestCaseInfoContainer::iterator it = test_case_infos_.begin(); + it != test_case_infos_.end(); ++it) { + (*it)->RegisterTests(); + } + } + + private: + typedef ::std::vector TestCaseInfoContainer; + + TestCaseInfoContainer test_case_infos_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(ParameterizedTestCaseRegistry); +}; + +} // namespace internal +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_H_ +// This file was GENERATED by command: +// pump.py gtest-param-util-generated.h.pump +// DO NOT EDIT BY HAND!!! + +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: vladl@google.com (Vlad Losev) + +// Type and function utilities for implementing parameterized tests. +// This file is generated by a SCRIPT. DO NOT EDIT BY HAND! +// +// Currently Google Test supports at most 50 arguments in Values, +// and at most 10 arguments in Combine. Please contact +// googletestframework@googlegroups.com if you need more. +// Please note that the number of arguments to Combine is limited +// by the maximum arity of the implementation of tuple which is +// currently set at 10. + +#ifndef GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ +#define GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ + +// scripts/fuse_gtest.py depends on gtest's own header being #included +// *unconditionally*. Therefore these #includes cannot be moved +// inside #if GTEST_HAS_PARAM_TEST. + +#if GTEST_HAS_PARAM_TEST + +namespace testing { + +// Forward declarations of ValuesIn(), which is implemented in +// include/gtest/gtest-param-test.h. +template +internal::ParamGenerator< + typename ::testing::internal::IteratorTraits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end); + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]); + +template +internal::ParamGenerator ValuesIn( + const Container& container); + +namespace internal { + +// Used in the Values() function to provide polymorphic capabilities. +template +class ValueArray1 { + public: + explicit ValueArray1(T1 v1) : v1_(v1) {} + + template + operator ParamGenerator() const { return ValuesIn(&v1_, &v1_ + 1); } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray1& other); + + const T1 v1_; +}; + +template +class ValueArray2 { + public: + ValueArray2(T1 v1, T2 v2) : v1_(v1), v2_(v2) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray2& other); + + const T1 v1_; + const T2 v2_; +}; + +template +class ValueArray3 { + public: + ValueArray3(T1 v1, T2 v2, T3 v3) : v1_(v1), v2_(v2), v3_(v3) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray3& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; +}; + +template +class ValueArray4 { + public: + ValueArray4(T1 v1, T2 v2, T3 v3, T4 v4) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray4& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; +}; + +template +class ValueArray5 { + public: + ValueArray5(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray5& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; +}; + +template +class ValueArray6 { + public: + ValueArray6(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray6& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; +}; + +template +class ValueArray7 { + public: + ValueArray7(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray7& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; +}; + +template +class ValueArray8 { + public: + ValueArray8(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray8& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; +}; + +template +class ValueArray9 { + public: + ValueArray9(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray9& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; +}; + +template +class ValueArray10 { + public: + ValueArray10(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray10& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; +}; + +template +class ValueArray11 { + public: + ValueArray11(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray11& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; +}; + +template +class ValueArray12 { + public: + ValueArray12(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray12& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; +}; + +template +class ValueArray13 { + public: + ValueArray13(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray13& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; +}; + +template +class ValueArray14 { + public: + ValueArray14(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray14& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; +}; + +template +class ValueArray15 { + public: + ValueArray15(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray15& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; +}; + +template +class ValueArray16 { + public: + ValueArray16(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray16& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; +}; + +template +class ValueArray17 { + public: + ValueArray17(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray17& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; +}; + +template +class ValueArray18 { + public: + ValueArray18(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray18& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; +}; + +template +class ValueArray19 { + public: + ValueArray19(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray19& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; +}; + +template +class ValueArray20 { + public: + ValueArray20(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray20& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; +}; + +template +class ValueArray21 { + public: + ValueArray21(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray21& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; +}; + +template +class ValueArray22 { + public: + ValueArray22(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray22& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; +}; + +template +class ValueArray23 { + public: + ValueArray23(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray23& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; +}; + +template +class ValueArray24 { + public: + ValueArray24(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray24& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; +}; + +template +class ValueArray25 { + public: + ValueArray25(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray25& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; +}; + +template +class ValueArray26 { + public: + ValueArray26(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray26& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; +}; + +template +class ValueArray27 { + public: + ValueArray27(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray27& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; +}; + +template +class ValueArray28 { + public: + ValueArray28(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray28& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; +}; + +template +class ValueArray29 { + public: + ValueArray29(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray29& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; +}; + +template +class ValueArray30 { + public: + ValueArray30(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray30& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; +}; + +template +class ValueArray31 { + public: + ValueArray31(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray31& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; +}; + +template +class ValueArray32 { + public: + ValueArray32(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray32& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; +}; + +template +class ValueArray33 { + public: + ValueArray33(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, + T33 v33) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray33& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; +}; + +template +class ValueArray34 { + public: + ValueArray34(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray34& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; +}; + +template +class ValueArray35 { + public: + ValueArray35(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), + v32_(v32), v33_(v33), v34_(v34), v35_(v35) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray35& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; +}; + +template +class ValueArray36 { + public: + ValueArray36(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), + v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray36& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; +}; + +template +class ValueArray37 { + public: + ValueArray37(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), + v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), + v36_(v36), v37_(v37) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray37& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; +}; + +template +class ValueArray38 { + public: + ValueArray38(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray38& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; +}; + +template +class ValueArray39 { + public: + ValueArray39(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray39& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; +}; + +template +class ValueArray40 { + public: + ValueArray40(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), + v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), + v40_(v40) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray40& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; +}; + +template +class ValueArray41 { + public: + ValueArray41(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, + T41 v41) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray41& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; +}; + +template +class ValueArray42 { + public: + ValueArray42(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray42& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; +}; + +template +class ValueArray43 { + public: + ValueArray43(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), + v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), + v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), + v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), + v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), + v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), + v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray43& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; +}; + +template +class ValueArray44 { + public: + ValueArray44(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), + v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), + v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), v18_(v18), + v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), v24_(v24), + v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), v30_(v30), + v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), v36_(v36), + v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), v42_(v42), + v43_(v43), v44_(v44) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray44& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; +}; + +template +class ValueArray45 { + public: + ValueArray45(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), + v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), v11_(v11), + v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), v17_(v17), + v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), v23_(v23), + v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), v29_(v29), + v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), v35_(v35), + v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), v41_(v41), + v42_(v42), v43_(v43), v44_(v44), v45_(v45) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray45& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; +}; + +template +class ValueArray46 { + public: + ValueArray46(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) : v1_(v1), v2_(v2), v3_(v3), + v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), + v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray46& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; +}; + +template +class ValueArray47 { + public: + ValueArray47(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) : v1_(v1), v2_(v2), + v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), v10_(v10), + v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), v16_(v16), + v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), v22_(v22), + v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), v28_(v28), + v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), v34_(v34), + v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), v40_(v40), + v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), v46_(v46), + v47_(v47) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray47& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; +}; + +template +class ValueArray48 { + public: + ValueArray48(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48) : v1_(v1), + v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), v8_(v8), v9_(v9), + v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), v15_(v15), + v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), v21_(v21), + v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), v27_(v27), + v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), v33_(v33), + v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), v39_(v39), + v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), v45_(v45), + v46_(v46), v47_(v47), v48_(v48) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray48& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; +}; + +template +class ValueArray49 { + public: + ValueArray49(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, + T49 v49) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), + v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_), static_cast(v49_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray49& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; + const T49 v49_; +}; + +template +class ValueArray50 { + public: + ValueArray50(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, T48 v48, T49 v49, + T50 v50) : v1_(v1), v2_(v2), v3_(v3), v4_(v4), v5_(v5), v6_(v6), v7_(v7), + v8_(v8), v9_(v9), v10_(v10), v11_(v11), v12_(v12), v13_(v13), v14_(v14), + v15_(v15), v16_(v16), v17_(v17), v18_(v18), v19_(v19), v20_(v20), + v21_(v21), v22_(v22), v23_(v23), v24_(v24), v25_(v25), v26_(v26), + v27_(v27), v28_(v28), v29_(v29), v30_(v30), v31_(v31), v32_(v32), + v33_(v33), v34_(v34), v35_(v35), v36_(v36), v37_(v37), v38_(v38), + v39_(v39), v40_(v40), v41_(v41), v42_(v42), v43_(v43), v44_(v44), + v45_(v45), v46_(v46), v47_(v47), v48_(v48), v49_(v49), v50_(v50) {} + + template + operator ParamGenerator() const { + const T array[] = {static_cast(v1_), static_cast(v2_), + static_cast(v3_), static_cast(v4_), static_cast(v5_), + static_cast(v6_), static_cast(v7_), static_cast(v8_), + static_cast(v9_), static_cast(v10_), static_cast(v11_), + static_cast(v12_), static_cast(v13_), static_cast(v14_), + static_cast(v15_), static_cast(v16_), static_cast(v17_), + static_cast(v18_), static_cast(v19_), static_cast(v20_), + static_cast(v21_), static_cast(v22_), static_cast(v23_), + static_cast(v24_), static_cast(v25_), static_cast(v26_), + static_cast(v27_), static_cast(v28_), static_cast(v29_), + static_cast(v30_), static_cast(v31_), static_cast(v32_), + static_cast(v33_), static_cast(v34_), static_cast(v35_), + static_cast(v36_), static_cast(v37_), static_cast(v38_), + static_cast(v39_), static_cast(v40_), static_cast(v41_), + static_cast(v42_), static_cast(v43_), static_cast(v44_), + static_cast(v45_), static_cast(v46_), static_cast(v47_), + static_cast(v48_), static_cast(v49_), static_cast(v50_)}; + return ValuesIn(array); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const ValueArray50& other); + + const T1 v1_; + const T2 v2_; + const T3 v3_; + const T4 v4_; + const T5 v5_; + const T6 v6_; + const T7 v7_; + const T8 v8_; + const T9 v9_; + const T10 v10_; + const T11 v11_; + const T12 v12_; + const T13 v13_; + const T14 v14_; + const T15 v15_; + const T16 v16_; + const T17 v17_; + const T18 v18_; + const T19 v19_; + const T20 v20_; + const T21 v21_; + const T22 v22_; + const T23 v23_; + const T24 v24_; + const T25 v25_; + const T26 v26_; + const T27 v27_; + const T28 v28_; + const T29 v29_; + const T30 v30_; + const T31 v31_; + const T32 v32_; + const T33 v33_; + const T34 v34_; + const T35 v35_; + const T36 v36_; + const T37 v37_; + const T38 v38_; + const T39 v39_; + const T40 v40_; + const T41 v41_; + const T42 v42_; + const T43 v43_; + const T44 v44_; + const T45 v45_; + const T46 v46_; + const T47 v47_; + const T48 v48_; + const T49 v49_; + const T50 v50_; +}; + +# if GTEST_HAS_COMBINE +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Generates values from the Cartesian product of values produced +// by the argument generators. +// +template +class CartesianProductGenerator2 + : public ParamGeneratorInterface< ::testing::tuple > { + public: + typedef ::testing::tuple ParamType; + + CartesianProductGenerator2(const ParamGenerator& g1, + const ParamGenerator& g2) + : g1_(g1), g2_(g2) {} + virtual ~CartesianProductGenerator2() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current2_; + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + ParamType current_value_; + }; // class CartesianProductGenerator2::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator2& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; +}; // class CartesianProductGenerator2 + + +template +class CartesianProductGenerator3 + : public ParamGeneratorInterface< ::testing::tuple > { + public: + typedef ::testing::tuple ParamType; + + CartesianProductGenerator3(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3) + : g1_(g1), g2_(g2), g3_(g3) {} + virtual ~CartesianProductGenerator3() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current3_; + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + ParamType current_value_; + }; // class CartesianProductGenerator3::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator3& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; +}; // class CartesianProductGenerator3 + + +template +class CartesianProductGenerator4 + : public ParamGeneratorInterface< ::testing::tuple > { + public: + typedef ::testing::tuple ParamType; + + CartesianProductGenerator4(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {} + virtual ~CartesianProductGenerator4() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current4_; + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + ParamType current_value_; + }; // class CartesianProductGenerator4::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator4& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; +}; // class CartesianProductGenerator4 + + +template +class CartesianProductGenerator5 + : public ParamGeneratorInterface< ::testing::tuple > { + public: + typedef ::testing::tuple ParamType; + + CartesianProductGenerator5(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {} + virtual ~CartesianProductGenerator5() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current5_; + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + ParamType current_value_; + }; // class CartesianProductGenerator5::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator5& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; +}; // class CartesianProductGenerator5 + + +template +class CartesianProductGenerator6 + : public ParamGeneratorInterface< ::testing::tuple > { + public: + typedef ::testing::tuple ParamType; + + CartesianProductGenerator6(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {} + virtual ~CartesianProductGenerator6() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current6_; + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + ParamType current_value_; + }; // class CartesianProductGenerator6::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator6& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; +}; // class CartesianProductGenerator6 + + +template +class CartesianProductGenerator7 + : public ParamGeneratorInterface< ::testing::tuple > { + public: + typedef ::testing::tuple ParamType; + + CartesianProductGenerator7(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {} + virtual ~CartesianProductGenerator7() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current7_; + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + ParamType current_value_; + }; // class CartesianProductGenerator7::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator7& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; +}; // class CartesianProductGenerator7 + + +template +class CartesianProductGenerator8 + : public ParamGeneratorInterface< ::testing::tuple > { + public: + typedef ::testing::tuple ParamType; + + CartesianProductGenerator8(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), + g8_(g8) {} + virtual ~CartesianProductGenerator8() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current8_; + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + ParamType current_value_; + }; // class CartesianProductGenerator8::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator8& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; +}; // class CartesianProductGenerator8 + + +template +class CartesianProductGenerator9 + : public ParamGeneratorInterface< ::testing::tuple > { + public: + typedef ::testing::tuple ParamType; + + CartesianProductGenerator9(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8, const ParamGenerator& g9) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9) {} + virtual ~CartesianProductGenerator9() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end(), g9_, g9_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8, + const ParamGenerator& g9, + const typename ParamGenerator::iterator& current9) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8), + begin9_(g9.begin()), end9_(g9.end()), current9_(current9) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current9_; + if (current9_ == end9_) { + current9_ = begin9_; + ++current8_; + } + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_ && + current9_ == typed_other->current9_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_), + begin9_(other.begin9_), + end9_(other.end9_), + current9_(other.current9_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_, + *current9_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_ || + current9_ == end9_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + const typename ParamGenerator::iterator begin9_; + const typename ParamGenerator::iterator end9_; + typename ParamGenerator::iterator current9_; + ParamType current_value_; + }; // class CartesianProductGenerator9::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator9& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; + const ParamGenerator g9_; +}; // class CartesianProductGenerator9 + + +template +class CartesianProductGenerator10 + : public ParamGeneratorInterface< ::testing::tuple > { + public: + typedef ::testing::tuple ParamType; + + CartesianProductGenerator10(const ParamGenerator& g1, + const ParamGenerator& g2, const ParamGenerator& g3, + const ParamGenerator& g4, const ParamGenerator& g5, + const ParamGenerator& g6, const ParamGenerator& g7, + const ParamGenerator& g8, const ParamGenerator& g9, + const ParamGenerator& g10) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9), g10_(g10) {} + virtual ~CartesianProductGenerator10() {} + + virtual ParamIteratorInterface* Begin() const { + return new Iterator(this, g1_, g1_.begin(), g2_, g2_.begin(), g3_, + g3_.begin(), g4_, g4_.begin(), g5_, g5_.begin(), g6_, g6_.begin(), g7_, + g7_.begin(), g8_, g8_.begin(), g9_, g9_.begin(), g10_, g10_.begin()); + } + virtual ParamIteratorInterface* End() const { + return new Iterator(this, g1_, g1_.end(), g2_, g2_.end(), g3_, g3_.end(), + g4_, g4_.end(), g5_, g5_.end(), g6_, g6_.end(), g7_, g7_.end(), g8_, + g8_.end(), g9_, g9_.end(), g10_, g10_.end()); + } + + private: + class Iterator : public ParamIteratorInterface { + public: + Iterator(const ParamGeneratorInterface* base, + const ParamGenerator& g1, + const typename ParamGenerator::iterator& current1, + const ParamGenerator& g2, + const typename ParamGenerator::iterator& current2, + const ParamGenerator& g3, + const typename ParamGenerator::iterator& current3, + const ParamGenerator& g4, + const typename ParamGenerator::iterator& current4, + const ParamGenerator& g5, + const typename ParamGenerator::iterator& current5, + const ParamGenerator& g6, + const typename ParamGenerator::iterator& current6, + const ParamGenerator& g7, + const typename ParamGenerator::iterator& current7, + const ParamGenerator& g8, + const typename ParamGenerator::iterator& current8, + const ParamGenerator& g9, + const typename ParamGenerator::iterator& current9, + const ParamGenerator& g10, + const typename ParamGenerator::iterator& current10) + : base_(base), + begin1_(g1.begin()), end1_(g1.end()), current1_(current1), + begin2_(g2.begin()), end2_(g2.end()), current2_(current2), + begin3_(g3.begin()), end3_(g3.end()), current3_(current3), + begin4_(g4.begin()), end4_(g4.end()), current4_(current4), + begin5_(g5.begin()), end5_(g5.end()), current5_(current5), + begin6_(g6.begin()), end6_(g6.end()), current6_(current6), + begin7_(g7.begin()), end7_(g7.end()), current7_(current7), + begin8_(g8.begin()), end8_(g8.end()), current8_(current8), + begin9_(g9.begin()), end9_(g9.end()), current9_(current9), + begin10_(g10.begin()), end10_(g10.end()), current10_(current10) { + ComputeCurrentValue(); + } + virtual ~Iterator() {} + + virtual const ParamGeneratorInterface* BaseGenerator() const { + return base_; + } + // Advance should not be called on beyond-of-range iterators + // so no component iterators must be beyond end of range, either. + virtual void Advance() { + assert(!AtEnd()); + ++current10_; + if (current10_ == end10_) { + current10_ = begin10_; + ++current9_; + } + if (current9_ == end9_) { + current9_ = begin9_; + ++current8_; + } + if (current8_ == end8_) { + current8_ = begin8_; + ++current7_; + } + if (current7_ == end7_) { + current7_ = begin7_; + ++current6_; + } + if (current6_ == end6_) { + current6_ = begin6_; + ++current5_; + } + if (current5_ == end5_) { + current5_ = begin5_; + ++current4_; + } + if (current4_ == end4_) { + current4_ = begin4_; + ++current3_; + } + if (current3_ == end3_) { + current3_ = begin3_; + ++current2_; + } + if (current2_ == end2_) { + current2_ = begin2_; + ++current1_; + } + ComputeCurrentValue(); + } + virtual ParamIteratorInterface* Clone() const { + return new Iterator(*this); + } + virtual const ParamType* Current() const { return ¤t_value_; } + virtual bool Equals(const ParamIteratorInterface& other) const { + // Having the same base generator guarantees that the other + // iterator is of the same type and we can downcast. + GTEST_CHECK_(BaseGenerator() == other.BaseGenerator()) + << "The program attempted to compare iterators " + << "from different generators." << std::endl; + const Iterator* typed_other = + CheckedDowncastToActualType(&other); + // We must report iterators equal if they both point beyond their + // respective ranges. That can happen in a variety of fashions, + // so we have to consult AtEnd(). + return (AtEnd() && typed_other->AtEnd()) || + ( + current1_ == typed_other->current1_ && + current2_ == typed_other->current2_ && + current3_ == typed_other->current3_ && + current4_ == typed_other->current4_ && + current5_ == typed_other->current5_ && + current6_ == typed_other->current6_ && + current7_ == typed_other->current7_ && + current8_ == typed_other->current8_ && + current9_ == typed_other->current9_ && + current10_ == typed_other->current10_); + } + + private: + Iterator(const Iterator& other) + : base_(other.base_), + begin1_(other.begin1_), + end1_(other.end1_), + current1_(other.current1_), + begin2_(other.begin2_), + end2_(other.end2_), + current2_(other.current2_), + begin3_(other.begin3_), + end3_(other.end3_), + current3_(other.current3_), + begin4_(other.begin4_), + end4_(other.end4_), + current4_(other.current4_), + begin5_(other.begin5_), + end5_(other.end5_), + current5_(other.current5_), + begin6_(other.begin6_), + end6_(other.end6_), + current6_(other.current6_), + begin7_(other.begin7_), + end7_(other.end7_), + current7_(other.current7_), + begin8_(other.begin8_), + end8_(other.end8_), + current8_(other.current8_), + begin9_(other.begin9_), + end9_(other.end9_), + current9_(other.current9_), + begin10_(other.begin10_), + end10_(other.end10_), + current10_(other.current10_) { + ComputeCurrentValue(); + } + + void ComputeCurrentValue() { + if (!AtEnd()) + current_value_ = ParamType(*current1_, *current2_, *current3_, + *current4_, *current5_, *current6_, *current7_, *current8_, + *current9_, *current10_); + } + bool AtEnd() const { + // We must report iterator past the end of the range when either of the + // component iterators has reached the end of its range. + return + current1_ == end1_ || + current2_ == end2_ || + current3_ == end3_ || + current4_ == end4_ || + current5_ == end5_ || + current6_ == end6_ || + current7_ == end7_ || + current8_ == end8_ || + current9_ == end9_ || + current10_ == end10_; + } + + // No implementation - assignment is unsupported. + void operator=(const Iterator& other); + + const ParamGeneratorInterface* const base_; + // begin[i]_ and end[i]_ define the i-th range that Iterator traverses. + // current[i]_ is the actual traversing iterator. + const typename ParamGenerator::iterator begin1_; + const typename ParamGenerator::iterator end1_; + typename ParamGenerator::iterator current1_; + const typename ParamGenerator::iterator begin2_; + const typename ParamGenerator::iterator end2_; + typename ParamGenerator::iterator current2_; + const typename ParamGenerator::iterator begin3_; + const typename ParamGenerator::iterator end3_; + typename ParamGenerator::iterator current3_; + const typename ParamGenerator::iterator begin4_; + const typename ParamGenerator::iterator end4_; + typename ParamGenerator::iterator current4_; + const typename ParamGenerator::iterator begin5_; + const typename ParamGenerator::iterator end5_; + typename ParamGenerator::iterator current5_; + const typename ParamGenerator::iterator begin6_; + const typename ParamGenerator::iterator end6_; + typename ParamGenerator::iterator current6_; + const typename ParamGenerator::iterator begin7_; + const typename ParamGenerator::iterator end7_; + typename ParamGenerator::iterator current7_; + const typename ParamGenerator::iterator begin8_; + const typename ParamGenerator::iterator end8_; + typename ParamGenerator::iterator current8_; + const typename ParamGenerator::iterator begin9_; + const typename ParamGenerator::iterator end9_; + typename ParamGenerator::iterator current9_; + const typename ParamGenerator::iterator begin10_; + const typename ParamGenerator::iterator end10_; + typename ParamGenerator::iterator current10_; + ParamType current_value_; + }; // class CartesianProductGenerator10::Iterator + + // No implementation - assignment is unsupported. + void operator=(const CartesianProductGenerator10& other); + + const ParamGenerator g1_; + const ParamGenerator g2_; + const ParamGenerator g3_; + const ParamGenerator g4_; + const ParamGenerator g5_; + const ParamGenerator g6_; + const ParamGenerator g7_; + const ParamGenerator g8_; + const ParamGenerator g9_; + const ParamGenerator g10_; +}; // class CartesianProductGenerator10 + + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Helper classes providing Combine() with polymorphic features. They allow +// casting CartesianProductGeneratorN to ParamGenerator if T is +// convertible to U. +// +template +class CartesianProductHolder2 { + public: +CartesianProductHolder2(const Generator1& g1, const Generator2& g2) + : g1_(g1), g2_(g2) {} + template + operator ParamGenerator< ::testing::tuple >() const { + return ParamGenerator< ::testing::tuple >( + new CartesianProductGenerator2( + static_cast >(g1_), + static_cast >(g2_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder2& other); + + const Generator1 g1_; + const Generator2 g2_; +}; // class CartesianProductHolder2 + +template +class CartesianProductHolder3 { + public: +CartesianProductHolder3(const Generator1& g1, const Generator2& g2, + const Generator3& g3) + : g1_(g1), g2_(g2), g3_(g3) {} + template + operator ParamGenerator< ::testing::tuple >() const { + return ParamGenerator< ::testing::tuple >( + new CartesianProductGenerator3( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder3& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; +}; // class CartesianProductHolder3 + +template +class CartesianProductHolder4 { + public: +CartesianProductHolder4(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4) {} + template + operator ParamGenerator< ::testing::tuple >() const { + return ParamGenerator< ::testing::tuple >( + new CartesianProductGenerator4( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder4& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; +}; // class CartesianProductHolder4 + +template +class CartesianProductHolder5 { + public: +CartesianProductHolder5(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5) {} + template + operator ParamGenerator< ::testing::tuple >() const { + return ParamGenerator< ::testing::tuple >( + new CartesianProductGenerator5( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder5& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; +}; // class CartesianProductHolder5 + +template +class CartesianProductHolder6 { + public: +CartesianProductHolder6(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6) {} + template + operator ParamGenerator< ::testing::tuple >() const { + return ParamGenerator< ::testing::tuple >( + new CartesianProductGenerator6( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder6& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; +}; // class CartesianProductHolder6 + +template +class CartesianProductHolder7 { + public: +CartesianProductHolder7(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7) {} + template + operator ParamGenerator< ::testing::tuple >() const { + return ParamGenerator< ::testing::tuple >( + new CartesianProductGenerator7( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder7& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; +}; // class CartesianProductHolder7 + +template +class CartesianProductHolder8 { + public: +CartesianProductHolder8(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), + g8_(g8) {} + template + operator ParamGenerator< ::testing::tuple >() const { + return ParamGenerator< ::testing::tuple >( + new CartesianProductGenerator8( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder8& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; +}; // class CartesianProductHolder8 + +template +class CartesianProductHolder9 { + public: +CartesianProductHolder9(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8, + const Generator9& g9) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9) {} + template + operator ParamGenerator< ::testing::tuple >() const { + return ParamGenerator< ::testing::tuple >( + new CartesianProductGenerator9( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_), + static_cast >(g9_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder9& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; + const Generator9 g9_; +}; // class CartesianProductHolder9 + +template +class CartesianProductHolder10 { + public: +CartesianProductHolder10(const Generator1& g1, const Generator2& g2, + const Generator3& g3, const Generator4& g4, const Generator5& g5, + const Generator6& g6, const Generator7& g7, const Generator8& g8, + const Generator9& g9, const Generator10& g10) + : g1_(g1), g2_(g2), g3_(g3), g4_(g4), g5_(g5), g6_(g6), g7_(g7), g8_(g8), + g9_(g9), g10_(g10) {} + template + operator ParamGenerator< ::testing::tuple >() const { + return ParamGenerator< ::testing::tuple >( + new CartesianProductGenerator10( + static_cast >(g1_), + static_cast >(g2_), + static_cast >(g3_), + static_cast >(g4_), + static_cast >(g5_), + static_cast >(g6_), + static_cast >(g7_), + static_cast >(g8_), + static_cast >(g9_), + static_cast >(g10_))); + } + + private: + // No implementation - assignment is unsupported. + void operator=(const CartesianProductHolder10& other); + + const Generator1 g1_; + const Generator2 g2_; + const Generator3 g3_; + const Generator4 g4_; + const Generator5 g5_; + const Generator6 g6_; + const Generator7 g7_; + const Generator8 g8_; + const Generator9 g9_; + const Generator10 g10_; +}; // class CartesianProductHolder10 + +# endif // GTEST_HAS_COMBINE + +} // namespace internal +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_INTERNAL_GTEST_PARAM_UTIL_GENERATED_H_ + +#if GTEST_HAS_PARAM_TEST + +namespace testing { + +// Functions producing parameter generators. +// +// Google Test uses these generators to produce parameters for value- +// parameterized tests. When a parameterized test case is instantiated +// with a particular generator, Google Test creates and runs tests +// for each element in the sequence produced by the generator. +// +// In the following sample, tests from test case FooTest are instantiated +// each three times with parameter values 3, 5, and 8: +// +// class FooTest : public TestWithParam { ... }; +// +// TEST_P(FooTest, TestThis) { +// } +// TEST_P(FooTest, TestThat) { +// } +// INSTANTIATE_TEST_CASE_P(TestSequence, FooTest, Values(3, 5, 8)); +// + +// Range() returns generators providing sequences of values in a range. +// +// Synopsis: +// Range(start, end) +// - returns a generator producing a sequence of values {start, start+1, +// start+2, ..., }. +// Range(start, end, step) +// - returns a generator producing a sequence of values {start, start+step, +// start+step+step, ..., }. +// Notes: +// * The generated sequences never include end. For example, Range(1, 5) +// returns a generator producing a sequence {1, 2, 3, 4}. Range(1, 9, 2) +// returns a generator producing {1, 3, 5, 7}. +// * start and end must have the same type. That type may be any integral or +// floating-point type or a user defined type satisfying these conditions: +// * It must be assignable (have operator=() defined). +// * It must have operator+() (operator+(int-compatible type) for +// two-operand version). +// * It must have operator<() defined. +// Elements in the resulting sequences will also have that type. +// * Condition start < end must be satisfied in order for resulting sequences +// to contain any elements. +// +template +internal::ParamGenerator Range(T start, T end, IncrementT step) { + return internal::ParamGenerator( + new internal::RangeGenerator(start, end, step)); +} + +template +internal::ParamGenerator Range(T start, T end) { + return Range(start, end, 1); +} + +// ValuesIn() function allows generation of tests with parameters coming from +// a container. +// +// Synopsis: +// ValuesIn(const T (&array)[N]) +// - returns a generator producing sequences with elements from +// a C-style array. +// ValuesIn(const Container& container) +// - returns a generator producing sequences with elements from +// an STL-style container. +// ValuesIn(Iterator begin, Iterator end) +// - returns a generator producing sequences with elements from +// a range [begin, end) defined by a pair of STL-style iterators. These +// iterators can also be plain C pointers. +// +// Please note that ValuesIn copies the values from the containers +// passed in and keeps them to generate tests in RUN_ALL_TESTS(). +// +// Examples: +// +// This instantiates tests from test case StringTest +// each with C-string values of "foo", "bar", and "baz": +// +// const char* strings[] = {"foo", "bar", "baz"}; +// INSTANTIATE_TEST_CASE_P(StringSequence, SrtingTest, ValuesIn(strings)); +// +// This instantiates tests from test case StlStringTest +// each with STL strings with values "a" and "b": +// +// ::std::vector< ::std::string> GetParameterStrings() { +// ::std::vector< ::std::string> v; +// v.push_back("a"); +// v.push_back("b"); +// return v; +// } +// +// INSTANTIATE_TEST_CASE_P(CharSequence, +// StlStringTest, +// ValuesIn(GetParameterStrings())); +// +// +// This will also instantiate tests from CharTest +// each with parameter values 'a' and 'b': +// +// ::std::list GetParameterChars() { +// ::std::list list; +// list.push_back('a'); +// list.push_back('b'); +// return list; +// } +// ::std::list l = GetParameterChars(); +// INSTANTIATE_TEST_CASE_P(CharSequence2, +// CharTest, +// ValuesIn(l.begin(), l.end())); +// +template +internal::ParamGenerator< + typename ::testing::internal::IteratorTraits::value_type> +ValuesIn(ForwardIterator begin, ForwardIterator end) { + typedef typename ::testing::internal::IteratorTraits + ::value_type ParamType; + return internal::ParamGenerator( + new internal::ValuesInIteratorRangeGenerator(begin, end)); +} + +template +internal::ParamGenerator ValuesIn(const T (&array)[N]) { + return ValuesIn(array, array + N); +} + +template +internal::ParamGenerator ValuesIn( + const Container& container) { + return ValuesIn(container.begin(), container.end()); +} + +// Values() allows generating tests from explicitly specified list of +// parameters. +// +// Synopsis: +// Values(T v1, T v2, ..., T vN) +// - returns a generator producing sequences with elements v1, v2, ..., vN. +// +// For example, this instantiates tests from test case BarTest each +// with values "one", "two", and "three": +// +// INSTANTIATE_TEST_CASE_P(NumSequence, BarTest, Values("one", "two", "three")); +// +// This instantiates tests from test case BazTest each with values 1, 2, 3.5. +// The exact type of values will depend on the type of parameter in BazTest. +// +// INSTANTIATE_TEST_CASE_P(FloatingNumbers, BazTest, Values(1, 2, 3.5)); +// +// Currently, Values() supports from 1 to 50 parameters. +// +template +internal::ValueArray1 Values(T1 v1) { + return internal::ValueArray1(v1); +} + +template +internal::ValueArray2 Values(T1 v1, T2 v2) { + return internal::ValueArray2(v1, v2); +} + +template +internal::ValueArray3 Values(T1 v1, T2 v2, T3 v3) { + return internal::ValueArray3(v1, v2, v3); +} + +template +internal::ValueArray4 Values(T1 v1, T2 v2, T3 v3, T4 v4) { + return internal::ValueArray4(v1, v2, v3, v4); +} + +template +internal::ValueArray5 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5) { + return internal::ValueArray5(v1, v2, v3, v4, v5); +} + +template +internal::ValueArray6 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6) { + return internal::ValueArray6(v1, v2, v3, v4, v5, v6); +} + +template +internal::ValueArray7 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7) { + return internal::ValueArray7(v1, v2, v3, v4, v5, + v6, v7); +} + +template +internal::ValueArray8 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8) { + return internal::ValueArray8(v1, v2, v3, v4, + v5, v6, v7, v8); +} + +template +internal::ValueArray9 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9) { + return internal::ValueArray9(v1, v2, v3, + v4, v5, v6, v7, v8, v9); +} + +template +internal::ValueArray10 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10) { + return internal::ValueArray10(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10); +} + +template +internal::ValueArray11 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11) { + return internal::ValueArray11(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11); +} + +template +internal::ValueArray12 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12) { + return internal::ValueArray12(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12); +} + +template +internal::ValueArray13 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13) { + return internal::ValueArray13(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13); +} + +template +internal::ValueArray14 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14) { + return internal::ValueArray14(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14); +} + +template +internal::ValueArray15 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15) { + return internal::ValueArray15(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15); +} + +template +internal::ValueArray16 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16) { + return internal::ValueArray16(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16); +} + +template +internal::ValueArray17 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17) { + return internal::ValueArray17(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17); +} + +template +internal::ValueArray18 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18) { + return internal::ValueArray18(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18); +} + +template +internal::ValueArray19 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19) { + return internal::ValueArray19(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19); +} + +template +internal::ValueArray20 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20) { + return internal::ValueArray20(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20); +} + +template +internal::ValueArray21 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21) { + return internal::ValueArray21(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21); +} + +template +internal::ValueArray22 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22) { + return internal::ValueArray22(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22); +} + +template +internal::ValueArray23 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23) { + return internal::ValueArray23(v1, v2, v3, + v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23); +} + +template +internal::ValueArray24 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24) { + return internal::ValueArray24(v1, v2, + v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, + v19, v20, v21, v22, v23, v24); +} + +template +internal::ValueArray25 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, + T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, + T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25) { + return internal::ValueArray25(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, + v18, v19, v20, v21, v22, v23, v24, v25); +} + +template +internal::ValueArray26 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26) { + return internal::ValueArray26(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, + v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26); +} + +template +internal::ValueArray27 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27) { + return internal::ValueArray27(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, + v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27); +} + +template +internal::ValueArray28 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28) { + return internal::ValueArray28(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, + v28); +} + +template +internal::ValueArray29 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29) { + return internal::ValueArray29(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, + v27, v28, v29); +} + +template +internal::ValueArray30 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30) { + return internal::ValueArray30(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, + v26, v27, v28, v29, v30); +} + +template +internal::ValueArray31 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31) { + return internal::ValueArray31(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, + v25, v26, v27, v28, v29, v30, v31); +} + +template +internal::ValueArray32 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32) { + return internal::ValueArray32(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32); +} + +template +internal::ValueArray33 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33) { + return internal::ValueArray33(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33); +} + +template +internal::ValueArray34 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, + T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, + T31 v31, T32 v32, T33 v33, T34 v34) { + return internal::ValueArray34(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, + v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34); +} + +template +internal::ValueArray35 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35) { + return internal::ValueArray35(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, + v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35); +} + +template +internal::ValueArray36 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36) { + return internal::ValueArray36(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36); +} + +template +internal::ValueArray37 Values(T1 v1, T2 v2, T3 v3, + T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37) { + return internal::ValueArray37(v1, v2, v3, + v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36, v37); +} + +template +internal::ValueArray38 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37, T38 v38) { + return internal::ValueArray38(v1, v2, + v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, + v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, + v33, v34, v35, v36, v37, v38); +} + +template +internal::ValueArray39 Values(T1 v1, T2 v2, + T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, + T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, + T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, + T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, + T37 v37, T38 v38, T39 v39) { + return internal::ValueArray39(v1, + v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, + v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, + v32, v33, v34, v35, v36, v37, v38, v39); +} + +template +internal::ValueArray40 Values(T1 v1, + T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, + T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, + T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, + T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, + T36 v36, T37 v37, T38 v38, T39 v39, T40 v40) { + return internal::ValueArray40(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, + v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, + v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40); +} + +template +internal::ValueArray41 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41) { + return internal::ValueArray41(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, + v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, v28, + v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41); +} + +template +internal::ValueArray42 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42) { + return internal::ValueArray42(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, v13, + v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, v27, + v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, v41, + v42); +} + +template +internal::ValueArray43 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43) { + return internal::ValueArray43(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, v12, + v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, v26, + v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, v40, + v41, v42, v43); +} + +template +internal::ValueArray44 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, + T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, T17 v17, + T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, T25 v25, + T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, T33 v33, + T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, T41 v41, + T42 v42, T43 v43, T44 v44) { + return internal::ValueArray44(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, v11, + v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, v25, + v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, v39, + v40, v41, v42, v43, v44); +} + +template +internal::ValueArray45 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, T8 v8, + T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, T16 v16, + T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, T24 v24, + T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, T32 v32, + T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, T40 v40, + T41 v41, T42 v42, T43 v43, T44 v44, T45 v45) { + return internal::ValueArray45(v1, v2, v3, v4, v5, v6, v7, v8, v9, v10, + v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, v24, + v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, v38, + v39, v40, v41, v42, v43, v44, v45); +} + +template +internal::ValueArray46 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46) { + return internal::ValueArray46(v1, v2, v3, v4, v5, v6, v7, v8, v9, + v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, + v38, v39, v40, v41, v42, v43, v44, v45, v46); +} + +template +internal::ValueArray47 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, T7 v7, + T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47) { + return internal::ValueArray47(v1, v2, v3, v4, v5, v6, v7, v8, + v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, v23, + v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, v37, + v38, v39, v40, v41, v42, v43, v44, v45, v46, v47); +} + +template +internal::ValueArray48 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, T6 v6, + T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, T15 v15, + T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, T23 v23, + T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, T31 v31, + T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, T39 v39, + T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, T47 v47, + T48 v48) { + return internal::ValueArray48(v1, v2, v3, v4, v5, v6, v7, + v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, v22, + v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, v36, + v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48); +} + +template +internal::ValueArray49 Values(T1 v1, T2 v2, T3 v3, T4 v4, T5 v5, + T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, T14 v14, + T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, T22 v22, + T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, T30 v30, + T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, T38 v38, + T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, T46 v46, + T47 v47, T48 v48, T49 v49) { + return internal::ValueArray49(v1, v2, v3, v4, v5, v6, + v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, v20, v21, + v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, v34, v35, + v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, v48, v49); +} + +template +internal::ValueArray50 Values(T1 v1, T2 v2, T3 v3, T4 v4, + T5 v5, T6 v6, T7 v7, T8 v8, T9 v9, T10 v10, T11 v11, T12 v12, T13 v13, + T14 v14, T15 v15, T16 v16, T17 v17, T18 v18, T19 v19, T20 v20, T21 v21, + T22 v22, T23 v23, T24 v24, T25 v25, T26 v26, T27 v27, T28 v28, T29 v29, + T30 v30, T31 v31, T32 v32, T33 v33, T34 v34, T35 v35, T36 v36, T37 v37, + T38 v38, T39 v39, T40 v40, T41 v41, T42 v42, T43 v43, T44 v44, T45 v45, + T46 v46, T47 v47, T48 v48, T49 v49, T50 v50) { + return internal::ValueArray50(v1, v2, v3, v4, + v5, v6, v7, v8, v9, v10, v11, v12, v13, v14, v15, v16, v17, v18, v19, + v20, v21, v22, v23, v24, v25, v26, v27, v28, v29, v30, v31, v32, v33, + v34, v35, v36, v37, v38, v39, v40, v41, v42, v43, v44, v45, v46, v47, + v48, v49, v50); +} + +// Bool() allows generating tests with parameters in a set of (false, true). +// +// Synopsis: +// Bool() +// - returns a generator producing sequences with elements {false, true}. +// +// It is useful when testing code that depends on Boolean flags. Combinations +// of multiple flags can be tested when several Bool()'s are combined using +// Combine() function. +// +// In the following example all tests in the test case FlagDependentTest +// will be instantiated twice with parameters false and true. +// +// class FlagDependentTest : public testing::TestWithParam { +// virtual void SetUp() { +// external_flag = GetParam(); +// } +// } +// INSTANTIATE_TEST_CASE_P(BoolSequence, FlagDependentTest, Bool()); +// +inline internal::ParamGenerator Bool() { + return Values(false, true); +} + +# if GTEST_HAS_COMBINE +// Combine() allows the user to combine two or more sequences to produce +// values of a Cartesian product of those sequences' elements. +// +// Synopsis: +// Combine(gen1, gen2, ..., genN) +// - returns a generator producing sequences with elements coming from +// the Cartesian product of elements from the sequences generated by +// gen1, gen2, ..., genN. The sequence elements will have a type of +// tuple where T1, T2, ..., TN are the types +// of elements from sequences produces by gen1, gen2, ..., genN. +// +// Combine can have up to 10 arguments. This number is currently limited +// by the maximum number of elements in the tuple implementation used by Google +// Test. +// +// Example: +// +// This will instantiate tests in test case AnimalTest each one with +// the parameter values tuple("cat", BLACK), tuple("cat", WHITE), +// tuple("dog", BLACK), and tuple("dog", WHITE): +// +// enum Color { BLACK, GRAY, WHITE }; +// class AnimalTest +// : public testing::TestWithParam > {...}; +// +// TEST_P(AnimalTest, AnimalLooksNice) {...} +// +// INSTANTIATE_TEST_CASE_P(AnimalVariations, AnimalTest, +// Combine(Values("cat", "dog"), +// Values(BLACK, WHITE))); +// +// This will instantiate tests in FlagDependentTest with all variations of two +// Boolean flags: +// +// class FlagDependentTest +// : public testing::TestWithParam > { +// virtual void SetUp() { +// // Assigns external_flag_1 and external_flag_2 values from the tuple. +// tie(external_flag_1, external_flag_2) = GetParam(); +// } +// }; +// +// TEST_P(FlagDependentTest, TestFeature1) { +// // Test your code using external_flag_1 and external_flag_2 here. +// } +// INSTANTIATE_TEST_CASE_P(TwoBoolSequence, FlagDependentTest, +// Combine(Bool(), Bool())); +// +template +internal::CartesianProductHolder2 Combine( + const Generator1& g1, const Generator2& g2) { + return internal::CartesianProductHolder2( + g1, g2); +} + +template +internal::CartesianProductHolder3 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3) { + return internal::CartesianProductHolder3( + g1, g2, g3); +} + +template +internal::CartesianProductHolder4 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4) { + return internal::CartesianProductHolder4( + g1, g2, g3, g4); +} + +template +internal::CartesianProductHolder5 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5) { + return internal::CartesianProductHolder5( + g1, g2, g3, g4, g5); +} + +template +internal::CartesianProductHolder6 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6) { + return internal::CartesianProductHolder6( + g1, g2, g3, g4, g5, g6); +} + +template +internal::CartesianProductHolder7 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7) { + return internal::CartesianProductHolder7( + g1, g2, g3, g4, g5, g6, g7); +} + +template +internal::CartesianProductHolder8 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8) { + return internal::CartesianProductHolder8( + g1, g2, g3, g4, g5, g6, g7, g8); +} + +template +internal::CartesianProductHolder9 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8, const Generator9& g9) { + return internal::CartesianProductHolder9( + g1, g2, g3, g4, g5, g6, g7, g8, g9); +} + +template +internal::CartesianProductHolder10 Combine( + const Generator1& g1, const Generator2& g2, const Generator3& g3, + const Generator4& g4, const Generator5& g5, const Generator6& g6, + const Generator7& g7, const Generator8& g8, const Generator9& g9, + const Generator10& g10) { + return internal::CartesianProductHolder10( + g1, g2, g3, g4, g5, g6, g7, g8, g9, g10); +} +# endif // GTEST_HAS_COMBINE + + + +# define TEST_P(test_case_name, test_name) \ + class GTEST_TEST_CLASS_NAME_(test_case_name, test_name) \ + : public test_case_name { \ + public: \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)() {} \ + virtual void TestBody(); \ + private: \ + static int AddToRegistry() { \ + ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ + GetTestCasePatternHolder(\ + #test_case_name, __FILE__, __LINE__)->AddTestPattern(\ + #test_case_name, \ + #test_name, \ + new ::testing::internal::TestMetaFactory< \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)>()); \ + return 0; \ + } \ + static int gtest_registering_dummy_ GTEST_ATTRIBUTE_UNUSED_; \ + GTEST_DISALLOW_COPY_AND_ASSIGN_(\ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)); \ + }; \ + int GTEST_TEST_CLASS_NAME_(test_case_name, \ + test_name)::gtest_registering_dummy_ = \ + GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::AddToRegistry(); \ + void GTEST_TEST_CLASS_NAME_(test_case_name, test_name)::TestBody() + +# define INSTANTIATE_TEST_CASE_P(prefix, test_case_name, generator) \ + ::testing::internal::ParamGenerator \ + gtest_##prefix##test_case_name##_EvalGenerator_() { return generator; } \ + int gtest_##prefix##test_case_name##_dummy_ = \ + ::testing::UnitTest::GetInstance()->parameterized_test_registry(). \ + GetTestCasePatternHolder(\ + #test_case_name, __FILE__, __LINE__)->AddTestCaseInstantiation(\ + #prefix, \ + >est_##prefix##test_case_name##_EvalGenerator_, \ + __FILE__, __LINE__) + +} // namespace testing + +#endif // GTEST_HAS_PARAM_TEST + +#endif // GTEST_INCLUDE_GTEST_GTEST_PARAM_TEST_H_ +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) +// +// Google C++ Testing Framework definitions useful in production code. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PROD_H_ + +// When you need to test the private or protected members of a class, +// use the FRIEND_TEST macro to declare your tests as friends of the +// class. For example: +// +// class MyClass { +// private: +// void MyMethod(); +// FRIEND_TEST(MyClassTest, MyMethod); +// }; +// +// class MyClassTest : public testing::Test { +// // ... +// }; +// +// TEST_F(MyClassTest, MyMethod) { +// // Can call MyClass::MyMethod() here. +// } + +#define FRIEND_TEST(test_case_name, test_name)\ +friend class test_case_name##_##test_name##_Test + +#endif // GTEST_INCLUDE_GTEST_GTEST_PROD_H_ +// Copyright 2008, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: mheule@google.com (Markus Heule) +// + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ + +#include +#include + +namespace testing { + +// A copyable object representing the result of a test part (i.e. an +// assertion or an explicit FAIL(), ADD_FAILURE(), or SUCCESS()). +// +// Don't inherit from TestPartResult as its destructor is not virtual. +class GTEST_API_ TestPartResult { + public: + // The possible outcomes of a test part (i.e. an assertion or an + // explicit SUCCEED(), FAIL(), or ADD_FAILURE()). + enum Type { + kSuccess, // Succeeded. + kNonFatalFailure, // Failed but the test can continue. + kFatalFailure // Failed and the test should be terminated. + }; + + // C'tor. TestPartResult does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestPartResult object. + TestPartResult(Type a_type, + const char* a_file_name, + int a_line_number, + const char* a_message) + : type_(a_type), + file_name_(a_file_name == NULL ? "" : a_file_name), + line_number_(a_line_number), + summary_(ExtractSummary(a_message)), + message_(a_message) { + } + + // Gets the outcome of the test part. + Type type() const { return type_; } + + // Gets the name of the source file where the test part took place, or + // NULL if it's unknown. + const char* file_name() const { + return file_name_.empty() ? NULL : file_name_.c_str(); + } + + // Gets the line in the source file where the test part took place, + // or -1 if it's unknown. + int line_number() const { return line_number_; } + + // Gets the summary of the failure message. + const char* summary() const { return summary_.c_str(); } + + // Gets the message associated with the test part. + const char* message() const { return message_.c_str(); } + + // Returns true iff the test part passed. + bool passed() const { return type_ == kSuccess; } + + // Returns true iff the test part failed. + bool failed() const { return type_ != kSuccess; } + + // Returns true iff the test part non-fatally failed. + bool nonfatally_failed() const { return type_ == kNonFatalFailure; } + + // Returns true iff the test part fatally failed. + bool fatally_failed() const { return type_ == kFatalFailure; } + + private: + Type type_; + + // Gets the summary of the failure message by omitting the stack + // trace in it. + static std::string ExtractSummary(const char* message); + + // The name of the source file where the test part took place, or + // "" if the source file is unknown. + std::string file_name_; + // The line in the source file where the test part took place, or -1 + // if the line number is unknown. + int line_number_; + std::string summary_; // The test failure summary. + std::string message_; // The test failure message. +}; + +// Prints a TestPartResult object. +std::ostream& operator<<(std::ostream& os, const TestPartResult& result); + +// An array of TestPartResult objects. +// +// Don't inherit from TestPartResultArray as its destructor is not +// virtual. +class GTEST_API_ TestPartResultArray { + public: + TestPartResultArray() {} + + // Appends the given TestPartResult to the array. + void Append(const TestPartResult& result); + + // Returns the TestPartResult at the given index (0-based). + const TestPartResult& GetTestPartResult(int index) const; + + // Returns the number of TestPartResult objects in the array. + int size() const; + + private: + std::vector array_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestPartResultArray); +}; + +// This interface knows how to report a test part result. +class TestPartResultReporterInterface { + public: + virtual ~TestPartResultReporterInterface() {} + + virtual void ReportTestPartResult(const TestPartResult& result) = 0; +}; + +namespace internal { + +// This helper class is used by {ASSERT|EXPECT}_NO_FATAL_FAILURE to check if a +// statement generates new fatal failures. To do so it registers itself as the +// current test part result reporter. Besides checking if fatal failures were +// reported, it only delegates the reporting to the former result reporter. +// The original result reporter is restored in the destructor. +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +class GTEST_API_ HasNewFatalFailureHelper + : public TestPartResultReporterInterface { + public: + HasNewFatalFailureHelper(); + virtual ~HasNewFatalFailureHelper(); + virtual void ReportTestPartResult(const TestPartResult& result); + bool has_new_fatal_failure() const { return has_new_fatal_failure_; } + private: + bool has_new_fatal_failure_; + TestPartResultReporterInterface* original_reporter_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(HasNewFatalFailureHelper); +}; + +} // namespace internal + +} // namespace testing + +#endif // GTEST_INCLUDE_GTEST_GTEST_TEST_PART_H_ +// Copyright 2008 Google Inc. +// All Rights Reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +// +// Author: wan@google.com (Zhanyong Wan) + +#ifndef GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ +#define GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// This header implements typed tests and type-parameterized tests. + +// Typed (aka type-driven) tests repeat the same test for types in a +// list. You must know which types you want to test with when writing +// typed tests. Here's how you do it: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + public: + ... + typedef std::list List; + static T shared_; + T value_; +}; + +// Next, associate a list of types with the test case, which will be +// repeated for each type in the list. The typedef is necessary for +// the macro to parse correctly. +typedef testing::Types MyTypes; +TYPED_TEST_CASE(FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// TYPED_TEST_CASE(FooTest, int); + +// Then, use TYPED_TEST() instead of TEST_F() to define as many typed +// tests for this test case as you want. +TYPED_TEST(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + // Since we are inside a derived class template, C++ requires use to + // visit the members of FooTest via 'this'. + TypeParam n = this->value_; + + // To visit static members of the fixture, add the TestFixture:: + // prefix. + n += TestFixture::shared_; + + // To refer to typedefs in the fixture, add the "typename + // TestFixture::" prefix. + typename TestFixture::List values; + values.push_back(n); + ... +} + +TYPED_TEST(FooTest, HasPropertyA) { ... } + +#endif // 0 + +// Type-parameterized tests are abstract test patterns parameterized +// by a type. Compared with typed tests, type-parameterized tests +// allow you to define the test pattern without knowing what the type +// parameters are. The defined pattern can be instantiated with +// different types any number of times, in any number of translation +// units. +// +// If you are designing an interface or concept, you can define a +// suite of type-parameterized tests to verify properties that any +// valid implementation of the interface/concept should have. Then, +// each implementation can easily instantiate the test suite to verify +// that it conforms to the requirements, without having to write +// similar tests repeatedly. Here's an example: + +#if 0 + +// First, define a fixture class template. It should be parameterized +// by a type. Remember to derive it from testing::Test. +template +class FooTest : public testing::Test { + ... +}; + +// Next, declare that you will define a type-parameterized test case +// (the _P suffix is for "parameterized" or "pattern", whichever you +// prefer): +TYPED_TEST_CASE_P(FooTest); + +// Then, use TYPED_TEST_P() to define as many type-parameterized tests +// for this type-parameterized test case as you want. +TYPED_TEST_P(FooTest, DoesBlah) { + // Inside a test, refer to TypeParam to get the type parameter. + TypeParam n = 0; + ... +} + +TYPED_TEST_P(FooTest, HasPropertyA) { ... } + +// Now the tricky part: you need to register all test patterns before +// you can instantiate them. The first argument of the macro is the +// test case name; the rest are the names of the tests in this test +// case. +REGISTER_TYPED_TEST_CASE_P(FooTest, + DoesBlah, HasPropertyA); + +// Finally, you are free to instantiate the pattern with the types you +// want. If you put the above code in a header file, you can #include +// it in multiple C++ source files and instantiate it multiple times. +// +// To distinguish different instances of the pattern, the first +// argument to the INSTANTIATE_* macro is a prefix that will be added +// to the actual test case name. Remember to pick unique prefixes for +// different instances. +typedef testing::Types MyTypes; +INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, MyTypes); + +// If the type list contains only one type, you can write that type +// directly without Types<...>: +// INSTANTIATE_TYPED_TEST_CASE_P(My, FooTest, int); + +#endif // 0 + + +// Implements typed tests. + +#if GTEST_HAS_TYPED_TEST + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the typedef for the type parameters of the +// given test case. +# define GTEST_TYPE_PARAMS_(TestCaseName) gtest_type_params_##TestCaseName##_ + +// The 'Types' template argument below must have spaces around it +// since some compilers may choke on '>>' when passing a template +// instance (e.g. Types) +# define TYPED_TEST_CASE(CaseName, Types) \ + typedef ::testing::internal::TypeList< Types >::type \ + GTEST_TYPE_PARAMS_(CaseName) + +# define TYPED_TEST(CaseName, TestName) \ + template \ + class GTEST_TEST_CLASS_NAME_(CaseName, TestName) \ + : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + virtual void TestBody(); \ + }; \ + bool gtest_##CaseName##_##TestName##_registered_ GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTest< \ + CaseName, \ + ::testing::internal::TemplateSel< \ + GTEST_TEST_CLASS_NAME_(CaseName, TestName)>, \ + GTEST_TYPE_PARAMS_(CaseName)>::Register(\ + "", #CaseName, #TestName, 0); \ + template \ + void GTEST_TEST_CLASS_NAME_(CaseName, TestName)::TestBody() + +#endif // GTEST_HAS_TYPED_TEST + +// Implements type-parameterized tests. + +#if GTEST_HAS_TYPED_TEST_P + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the namespace name that the type-parameterized tests for +// the given type-parameterized test case are defined in. The exact +// name of the namespace is subject to change without notice. +# define GTEST_CASE_NAMESPACE_(TestCaseName) \ + gtest_case_##TestCaseName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// +// Expands to the name of the variable used to remember the names of +// the defined tests in the given test case. +# define GTEST_TYPED_TEST_CASE_P_STATE_(TestCaseName) \ + gtest_typed_test_case_p_state_##TestCaseName##_ + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE DIRECTLY. +// +// Expands to the name of the variable used to remember the names of +// the registered tests in the given test case. +# define GTEST_REGISTERED_TEST_NAMES_(TestCaseName) \ + gtest_registered_test_names_##TestCaseName##_ + +// The variables defined in the type-parameterized test macros are +// static as typically these macros are used in a .h file that can be +// #included in multiple translation units linked together. +# define TYPED_TEST_CASE_P(CaseName) \ + static ::testing::internal::TypedTestCasePState \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName) + +# define TYPED_TEST_P(CaseName, TestName) \ + namespace GTEST_CASE_NAMESPACE_(CaseName) { \ + template \ + class TestName : public CaseName { \ + private: \ + typedef CaseName TestFixture; \ + typedef gtest_TypeParam_ TypeParam; \ + virtual void TestBody(); \ + }; \ + static bool gtest_##TestName##_defined_ GTEST_ATTRIBUTE_UNUSED_ = \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).AddTestName(\ + __FILE__, __LINE__, #CaseName, #TestName); \ + } \ + template \ + void GTEST_CASE_NAMESPACE_(CaseName)::TestName::TestBody() + +# define REGISTER_TYPED_TEST_CASE_P(CaseName, ...) \ + namespace GTEST_CASE_NAMESPACE_(CaseName) { \ + typedef ::testing::internal::Templates<__VA_ARGS__>::type gtest_AllTests_; \ + } \ + static const char* const GTEST_REGISTERED_TEST_NAMES_(CaseName) = \ + GTEST_TYPED_TEST_CASE_P_STATE_(CaseName).VerifyRegisteredTestNames(\ + __FILE__, __LINE__, #__VA_ARGS__) + +// The 'Types' template argument below must have spaces around it +// since some compilers may choke on '>>' when passing a template +// instance (e.g. Types) +# define INSTANTIATE_TYPED_TEST_CASE_P(Prefix, CaseName, Types) \ + bool gtest_##Prefix##_##CaseName GTEST_ATTRIBUTE_UNUSED_ = \ + ::testing::internal::TypeParameterizedTestCase::type>::Register(\ + #Prefix, #CaseName, GTEST_REGISTERED_TEST_NAMES_(CaseName)) + +#endif // GTEST_HAS_TYPED_TEST_P + +#endif // GTEST_INCLUDE_GTEST_GTEST_TYPED_TEST_H_ + +// Depending on the platform, different string classes are available. +// On Linux, in addition to ::std::string, Google also makes use of +// class ::string, which has the same interface as ::std::string, but +// has a different implementation. +// +// You can define GTEST_HAS_GLOBAL_STRING to 1 to indicate that +// ::string is available AND is a distinct type to ::std::string, or +// define it to 0 to indicate otherwise. +// +// If ::std::string and ::string are the same class on your platform +// due to aliasing, you should define GTEST_HAS_GLOBAL_STRING to 0. +// +// If you do not define GTEST_HAS_GLOBAL_STRING, it is defined +// heuristically. + +namespace testing { + +// Declares the flags. + +// This flag temporary enables the disabled tests. +GTEST_DECLARE_bool_(also_run_disabled_tests); + +// This flag brings the debugger on an assertion failure. +GTEST_DECLARE_bool_(break_on_failure); + +// This flag controls whether Google Test catches all test-thrown exceptions +// and logs them as failures. +GTEST_DECLARE_bool_(catch_exceptions); + +// This flag enables using colors in terminal output. Available values are +// "yes" to enable colors, "no" (disable colors), or "auto" (the default) +// to let Google Test decide. +GTEST_DECLARE_string_(color); + +// This flag sets up the filter to select by name using a glob pattern +// the tests to run. If the filter is not given all tests are executed. +GTEST_DECLARE_string_(filter); + +// This flag causes the Google Test to list tests. None of the tests listed +// are actually run if the flag is provided. +GTEST_DECLARE_bool_(list_tests); + +// This flag controls whether Google Test emits a detailed XML report to a file +// in addition to its normal textual output. +GTEST_DECLARE_string_(output); + +// This flags control whether Google Test prints the elapsed time for each +// test. +GTEST_DECLARE_bool_(print_time); + +// This flag specifies the random number seed. +GTEST_DECLARE_int32_(random_seed); + +// This flag sets how many times the tests are repeated. The default value +// is 1. If the value is -1 the tests are repeating forever. +GTEST_DECLARE_int32_(repeat); + +// This flag controls whether Google Test includes Google Test internal +// stack frames in failure stack traces. +GTEST_DECLARE_bool_(show_internal_stack_frames); + +// When this flag is specified, tests' order is randomized on every iteration. +GTEST_DECLARE_bool_(shuffle); + +// This flag specifies the maximum number of stack frames to be +// printed in a failure message. +GTEST_DECLARE_int32_(stack_trace_depth); + +// When this flag is specified, a failed assertion will throw an +// exception if exceptions are enabled, or exit the program with a +// non-zero code otherwise. +GTEST_DECLARE_bool_(throw_on_failure); + +// When this flag is set with a "host:port" string, on supported +// platforms test results are streamed to the specified port on +// the specified host machine. +GTEST_DECLARE_string_(stream_result_to); + +// The upper limit for valid stack trace depths. +const int kMaxStackTraceDepth = 100; + +namespace internal { + +class AssertHelper; +class DefaultGlobalTestPartResultReporter; +class ExecDeathTest; +class NoExecDeathTest; +class FinalSuccessChecker; +class GTestFlagSaver; +class StreamingListenerTest; +class TestResultAccessor; +class TestEventListenersAccessor; +class TestEventRepeater; +class UnitTestRecordPropertyTestHelper; +class WindowsDeathTest; +class UnitTestImpl* GetUnitTestImpl(); +void ReportFailureInUnknownLocation(TestPartResult::Type result_type, + const std::string& message); + +} // namespace internal + +// The friend relationship of some of these classes is cyclic. +// If we don't forward declare them the compiler might confuse the classes +// in friendship clauses with same named classes on the scope. +class Test; +class TestCase; +class TestInfo; +class UnitTest; + +// A class for indicating whether an assertion was successful. When +// the assertion wasn't successful, the AssertionResult object +// remembers a non-empty message that describes how it failed. +// +// To create an instance of this class, use one of the factory functions +// (AssertionSuccess() and AssertionFailure()). +// +// This class is useful for two purposes: +// 1. Defining predicate functions to be used with Boolean test assertions +// EXPECT_TRUE/EXPECT_FALSE and their ASSERT_ counterparts +// 2. Defining predicate-format functions to be +// used with predicate assertions (ASSERT_PRED_FORMAT*, etc). +// +// For example, if you define IsEven predicate: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then the failed expectation EXPECT_TRUE(IsEven(Fib(5))) +// will print the message +// +// Value of: IsEven(Fib(5)) +// Actual: false (5 is odd) +// Expected: true +// +// instead of a more opaque +// +// Value of: IsEven(Fib(5)) +// Actual: false +// Expected: true +// +// in case IsEven is a simple Boolean predicate. +// +// If you expect your predicate to be reused and want to support informative +// messages in EXPECT_FALSE and ASSERT_FALSE (negative assertions show up +// about half as often as positive ones in our tests), supply messages for +// both success and failure cases: +// +// testing::AssertionResult IsEven(int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess() << n << " is even"; +// else +// return testing::AssertionFailure() << n << " is odd"; +// } +// +// Then a statement EXPECT_FALSE(IsEven(Fib(6))) will print +// +// Value of: IsEven(Fib(6)) +// Actual: true (8 is even) +// Expected: false +// +// NB: Predicates that support negative Boolean assertions have reduced +// performance in positive ones so be careful not to use them in tests +// that have lots (tens of thousands) of positive Boolean assertions. +// +// To use this class with EXPECT_PRED_FORMAT assertions such as: +// +// // Verifies that Foo() returns an even number. +// EXPECT_PRED_FORMAT1(IsEven, Foo()); +// +// you need to define: +// +// testing::AssertionResult IsEven(const char* expr, int n) { +// if ((n % 2) == 0) +// return testing::AssertionSuccess(); +// else +// return testing::AssertionFailure() +// << "Expected: " << expr << " is even\n Actual: it's " << n; +// } +// +// If Foo() returns 5, you will see the following message: +// +// Expected: Foo() is even +// Actual: it's 5 +// +class GTEST_API_ AssertionResult { + public: + // Copy constructor. + // Used in EXPECT_TRUE/FALSE(assertion_result). + AssertionResult(const AssertionResult& other); + + GTEST_DISABLE_MSC_WARNINGS_PUSH_(4800 /* forcing value to bool */) + + // Used in the EXPECT_TRUE/FALSE(bool_expression). + // + // T must be contextually convertible to bool. + // + // The second parameter prevents this overload from being considered if + // the argument is implicitly convertible to AssertionResult. In that case + // we want AssertionResult's copy constructor to be used. + template + explicit AssertionResult( + const T& success, + typename internal::EnableIf< + !internal::ImplicitlyConvertible::value>::type* + /*enabler*/ = NULL) + : success_(success) {} + + GTEST_DISABLE_MSC_WARNINGS_POP_() + + // Assignment operator. + AssertionResult& operator=(AssertionResult other) { + swap(other); + return *this; + } + + // Returns true iff the assertion succeeded. + operator bool() const { return success_; } // NOLINT + + // Returns the assertion's negation. Used with EXPECT/ASSERT_FALSE. + AssertionResult operator!() const; + + // Returns the text streamed into this AssertionResult. Test assertions + // use it when they fail (i.e., the predicate's outcome doesn't match the + // assertion's expectation). When nothing has been streamed into the + // object, returns an empty string. + const char* message() const { + return message_.get() != NULL ? message_->c_str() : ""; + } + // TODO(vladl@google.com): Remove this after making sure no clients use it. + // Deprecated; please use message() instead. + const char* failure_message() const { return message(); } + + // Streams a custom failure message into this object. + template AssertionResult& operator<<(const T& value) { + AppendMessage(Message() << value); + return *this; + } + + // Allows streaming basic output manipulators such as endl or flush into + // this object. + AssertionResult& operator<<( + ::std::ostream& (*basic_manipulator)(::std::ostream& stream)) { + AppendMessage(Message() << basic_manipulator); + return *this; + } + + private: + // Appends the contents of message to message_. + void AppendMessage(const Message& a_message) { + if (message_.get() == NULL) + message_.reset(new ::std::string); + message_->append(a_message.GetString().c_str()); + } + + // Swap the contents of this AssertionResult with other. + void swap(AssertionResult& other); + + // Stores result of the assertion predicate. + bool success_; + // Stores the message describing the condition in case the expectation + // construct is not satisfied with the predicate's outcome. + // Referenced via a pointer to avoid taking too much stack frame space + // with test assertions. + internal::scoped_ptr< ::std::string> message_; +}; + +// Makes a successful assertion result. +GTEST_API_ AssertionResult AssertionSuccess(); + +// Makes a failed assertion result. +GTEST_API_ AssertionResult AssertionFailure(); + +// Makes a failed assertion result with the given failure message. +// Deprecated; use AssertionFailure() << msg. +GTEST_API_ AssertionResult AssertionFailure(const Message& msg); + +// The abstract class that all tests inherit from. +// +// In Google Test, a unit test program contains one or many TestCases, and +// each TestCase contains one or many Tests. +// +// When you define a test using the TEST macro, you don't need to +// explicitly derive from Test - the TEST macro automatically does +// this for you. +// +// The only time you derive from Test is when defining a test fixture +// to be used a TEST_F. For example: +// +// class FooTest : public testing::Test { +// protected: +// void SetUp() override { ... } +// void TearDown() override { ... } +// ... +// }; +// +// TEST_F(FooTest, Bar) { ... } +// TEST_F(FooTest, Baz) { ... } +// +// Test is not copyable. +class GTEST_API_ Test { + public: + friend class TestInfo; + + // Defines types for pointers to functions that set up and tear down + // a test case. + typedef internal::SetUpTestCaseFunc SetUpTestCaseFunc; + typedef internal::TearDownTestCaseFunc TearDownTestCaseFunc; + + // The d'tor is virtual as we intend to inherit from Test. + virtual ~Test(); + + // Sets up the stuff shared by all tests in this test case. + // + // Google Test will call Foo::SetUpTestCase() before running the first + // test in test case Foo. Hence a sub-class can define its own + // SetUpTestCase() method to shadow the one defined in the super + // class. + static void SetUpTestCase() {} + + // Tears down the stuff shared by all tests in this test case. + // + // Google Test will call Foo::TearDownTestCase() after running the last + // test in test case Foo. Hence a sub-class can define its own + // TearDownTestCase() method to shadow the one defined in the super + // class. + static void TearDownTestCase() {} + + // Returns true iff the current test has a fatal failure. + static bool HasFatalFailure(); + + // Returns true iff the current test has a non-fatal failure. + static bool HasNonfatalFailure(); + + // Returns true iff the current test has a (either fatal or + // non-fatal) failure. + static bool HasFailure() { return HasFatalFailure() || HasNonfatalFailure(); } + + // Logs a property for the current test, test case, or for the entire + // invocation of the test program when used outside of the context of a + // test case. Only the last value for a given key is remembered. These + // are public static so they can be called from utility functions that are + // not members of the test fixture. Calls to RecordProperty made during + // lifespan of the test (from the moment its constructor starts to the + // moment its destructor finishes) will be output in XML as attributes of + // the element. Properties recorded from fixture's + // SetUpTestCase or TearDownTestCase are logged as attributes of the + // corresponding element. Calls to RecordProperty made in the + // global context (before or after invocation of RUN_ALL_TESTS and from + // SetUp/TearDown method of Environment objects registered with Google + // Test) will be output as attributes of the element. + static void RecordProperty(const std::string& key, const std::string& value); + static void RecordProperty(const std::string& key, int value); + + protected: + // Creates a Test object. + Test(); + + // Sets up the test fixture. + virtual void SetUp(); + + // Tears down the test fixture. + virtual void TearDown(); + + private: + // Returns true iff the current test has the same fixture class as + // the first test in the current test case. + static bool HasSameFixtureClass(); + + // Runs the test after the test fixture has been set up. + // + // A sub-class must implement this to define the test logic. + // + // DO NOT OVERRIDE THIS FUNCTION DIRECTLY IN A USER PROGRAM. + // Instead, use the TEST or TEST_F macro. + virtual void TestBody() = 0; + + // Sets up, executes, and tears down the test. + void Run(); + + // Deletes self. We deliberately pick an unusual name for this + // internal method to avoid clashing with names used in user TESTs. + void DeleteSelf_() { delete this; } + + // Uses a GTestFlagSaver to save and restore all Google Test flags. + const internal::GTestFlagSaver* const gtest_flag_saver_; + + // Often a user misspells SetUp() as Setup() and spends a long time + // wondering why it is never called by Google Test. The declaration of + // the following method is solely for catching such an error at + // compile time: + // + // - The return type is deliberately chosen to be not void, so it + // will be a conflict if void Setup() is declared in the user's + // test fixture. + // + // - This method is private, so it will be another compiler error + // if the method is called from the user's test fixture. + // + // DO NOT OVERRIDE THIS FUNCTION. + // + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } + + // We disallow copying Tests. + GTEST_DISALLOW_COPY_AND_ASSIGN_(Test); +}; + +typedef internal::TimeInMillis TimeInMillis; + +// A copyable object representing a user specified test property which can be +// output as a key/value string pair. +// +// Don't inherit from TestProperty as its destructor is not virtual. +class TestProperty { + public: + // C'tor. TestProperty does NOT have a default constructor. + // Always use this constructor (with parameters) to create a + // TestProperty object. + TestProperty(const std::string& a_key, const std::string& a_value) : + key_(a_key), value_(a_value) { + } + + // Gets the user supplied key. + const char* key() const { + return key_.c_str(); + } + + // Gets the user supplied value. + const char* value() const { + return value_.c_str(); + } + + // Sets a new value, overriding the one supplied in the constructor. + void SetValue(const std::string& new_value) { + value_ = new_value; + } + + private: + // The key supplied by the user. + std::string key_; + // The value supplied by the user. + std::string value_; +}; + +// The result of a single Test. This includes a list of +// TestPartResults, a list of TestProperties, a count of how many +// death tests there are in the Test, and how much time it took to run +// the Test. +// +// TestResult is not copyable. +class GTEST_API_ TestResult { + public: + // Creates an empty TestResult. + TestResult(); + + // D'tor. Do not inherit from TestResult. + ~TestResult(); + + // Gets the number of all test parts. This is the sum of the number + // of successful test parts and the number of failed test parts. + int total_part_count() const; + + // Returns the number of the test properties. + int test_property_count() const; + + // Returns true iff the test passed (i.e. no test part failed). + bool Passed() const { return !Failed(); } + + // Returns true iff the test failed. + bool Failed() const; + + // Returns true iff the test fatally failed. + bool HasFatalFailure() const; + + // Returns true iff the test has a non-fatal failure. + bool HasNonfatalFailure() const; + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test part result among all the results. i can range + // from 0 to test_property_count() - 1. If i is not in that range, aborts + // the program. + const TestPartResult& GetTestPartResult(int i) const; + + // Returns the i-th test property. i can range from 0 to + // test_property_count() - 1. If i is not in that range, aborts the + // program. + const TestProperty& GetTestProperty(int i) const; + + private: + friend class TestInfo; + friend class TestCase; + friend class UnitTest; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::ExecDeathTest; + friend class internal::TestResultAccessor; + friend class internal::UnitTestImpl; + friend class internal::WindowsDeathTest; + + // Gets the vector of TestPartResults. + const std::vector& test_part_results() const { + return test_part_results_; + } + + // Gets the vector of TestProperties. + const std::vector& test_properties() const { + return test_properties_; + } + + // Sets the elapsed time. + void set_elapsed_time(TimeInMillis elapsed) { elapsed_time_ = elapsed; } + + // Adds a test property to the list. The property is validated and may add + // a non-fatal failure if invalid (e.g., if it conflicts with reserved + // key names). If a property is already recorded for the same key, the + // value will be updated, rather than storing multiple values for the same + // key. xml_element specifies the element for which the property is being + // recorded and is used for validation. + void RecordProperty(const std::string& xml_element, + const TestProperty& test_property); + + // Adds a failure if the key is a reserved attribute of Google Test + // testcase tags. Returns true if the property is valid. + // TODO(russr): Validate attribute names are legal and human readable. + static bool ValidateTestProperty(const std::string& xml_element, + const TestProperty& test_property); + + // Adds a test part result to the list. + void AddTestPartResult(const TestPartResult& test_part_result); + + // Returns the death test count. + int death_test_count() const { return death_test_count_; } + + // Increments the death test count, returning the new count. + int increment_death_test_count() { return ++death_test_count_; } + + // Clears the test part results. + void ClearTestPartResults(); + + // Clears the object. + void Clear(); + + // Protects mutable state of the property vector and of owned + // properties, whose values may be updated. + internal::Mutex test_properites_mutex_; + + // The vector of TestPartResults + std::vector test_part_results_; + // The vector of TestProperties + std::vector test_properties_; + // Running count of death tests. + int death_test_count_; + // The elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + + // We disallow copying TestResult. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestResult); +}; // class TestResult + +// A TestInfo object stores the following information about a test: +// +// Test case name +// Test name +// Whether the test should be run +// A function pointer that creates the test object when invoked +// Test result +// +// The constructor of TestInfo registers itself with the UnitTest +// singleton such that the RUN_ALL_TESTS() macro knows which tests to +// run. +class GTEST_API_ TestInfo { + public: + // Destructs a TestInfo object. This function is not virtual, so + // don't inherit from TestInfo. + ~TestInfo(); + + // Returns the test case name. + const char* test_case_name() const { return test_case_name_.c_str(); } + + // Returns the test name. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a typed + // or a type-parameterized test. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns the text representation of the value parameter, or NULL if this + // is not a value-parameterized test. + const char* value_param() const { + if (value_param_.get() != NULL) + return value_param_->c_str(); + return NULL; + } + + // Returns true if this test should run, that is if the test is not + // disabled (or it is disabled but the also_run_disabled_tests flag has + // been specified) and its full name matches the user-specified filter. + // + // Google Test allows the user to filter the tests by their full names. + // The full name of a test Bar in test case Foo is defined as + // "Foo.Bar". Only the tests that match the filter will run. + // + // A filter is a colon-separated list of glob (not regex) patterns, + // optionally followed by a '-' and a colon-separated list of + // negative patterns (tests to exclude). A test is run if it + // matches one of the positive patterns and does not match any of + // the negative patterns. + // + // For example, *A*:Foo.* is a filter that matches any string that + // contains the character 'A' or starts with "Foo.". + bool should_run() const { return should_run_; } + + // Returns true iff this test will appear in the XML report. + bool is_reportable() const { + // For now, the XML report includes all tests matching the filter. + // In the future, we may trim tests that are excluded because of + // sharding. + return matches_filter_; + } + + // Returns the result of the test. + const TestResult* result() const { return &result_; } + + private: +#if GTEST_HAS_DEATH_TEST + friend class internal::DefaultDeathTestFactory; +#endif // GTEST_HAS_DEATH_TEST + friend class Test; + friend class TestCase; + friend class internal::UnitTestImpl; + friend class internal::StreamingListenerTest; + friend TestInfo* internal::MakeAndRegisterTestInfo( + const char* test_case_name, + const char* name, + const char* type_param, + const char* value_param, + internal::TypeId fixture_class_id, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc, + internal::TestFactoryBase* factory); + + // Constructs a TestInfo object. The newly constructed instance assumes + // ownership of the factory object. + TestInfo(const std::string& test_case_name, + const std::string& name, + const char* a_type_param, // NULL if not a type-parameterized test + const char* a_value_param, // NULL if not a value-parameterized test + internal::TypeId fixture_class_id, + internal::TestFactoryBase* factory); + + // Increments the number of death tests encountered in this test so + // far. + int increment_death_test_count() { + return result_.increment_death_test_count(); + } + + // Creates the test object, runs it, records its result, and then + // deletes it. + void Run(); + + static void ClearTestResult(TestInfo* test_info) { + test_info->result_.Clear(); + } + + // These fields are immutable properties of the test. + const std::string test_case_name_; // Test case name + const std::string name_; // Test name + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // Text representation of the value parameter, or NULL if this is not a + // value-parameterized test. + const internal::scoped_ptr value_param_; + const internal::TypeId fixture_class_id_; // ID of the test fixture class + bool should_run_; // True iff this test should run + bool is_disabled_; // True iff this test is disabled + bool matches_filter_; // True if this test matches the + // user-specified filter. + internal::TestFactoryBase* const factory_; // The factory that creates + // the test object + + // This field is mutable and needs to be reset before running the + // test for the second time. + TestResult result_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestInfo); +}; + +// A test case, which consists of a vector of TestInfos. +// +// TestCase is not copyable. +class GTEST_API_ TestCase { + public: + // Creates a TestCase with the given name. + // + // TestCase does NOT have a default constructor. Always use this + // constructor to create a TestCase object. + // + // Arguments: + // + // name: name of the test case + // a_type_param: the name of the test's type parameter, or NULL if + // this is not a type-parameterized test. + // set_up_tc: pointer to the function that sets up the test case + // tear_down_tc: pointer to the function that tears down the test case + TestCase(const char* name, const char* a_type_param, + Test::SetUpTestCaseFunc set_up_tc, + Test::TearDownTestCaseFunc tear_down_tc); + + // Destructor of TestCase. + virtual ~TestCase(); + + // Gets the name of the TestCase. + const char* name() const { return name_.c_str(); } + + // Returns the name of the parameter type, or NULL if this is not a + // type-parameterized test case. + const char* type_param() const { + if (type_param_.get() != NULL) + return type_param_->c_str(); + return NULL; + } + + // Returns true if any test in this test case should run. + bool should_run() const { return should_run_; } + + // Gets the number of successful tests in this test case. + int successful_test_count() const; + + // Gets the number of failed tests in this test case. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests in this test case. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Get the number of tests in this test case that should run. + int test_to_run_count() const; + + // Gets the number of all tests in this test case. + int total_test_count() const; + + // Returns true iff the test case passed. + bool Passed() const { return !Failed(); } + + // Returns true iff the test case failed. + bool Failed() const { return failed_test_count() > 0; } + + // Returns the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const { return elapsed_time_; } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + const TestInfo* GetTestInfo(int i) const; + + // Returns the TestResult that holds test properties recorded during + // execution of SetUpTestCase and TearDownTestCase. + const TestResult& ad_hoc_test_result() const { return ad_hoc_test_result_; } + + private: + friend class Test; + friend class internal::UnitTestImpl; + + // Gets the (mutable) vector of TestInfos in this TestCase. + std::vector& test_info_list() { return test_info_list_; } + + // Gets the (immutable) vector of TestInfos in this TestCase. + const std::vector& test_info_list() const { + return test_info_list_; + } + + // Returns the i-th test among all the tests. i can range from 0 to + // total_test_count() - 1. If i is not in that range, returns NULL. + TestInfo* GetMutableTestInfo(int i); + + // Sets the should_run member. + void set_should_run(bool should) { should_run_ = should; } + + // Adds a TestInfo to this test case. Will delete the TestInfo upon + // destruction of the TestCase object. + void AddTestInfo(TestInfo * test_info); + + // Clears the results of all tests in this test case. + void ClearResult(); + + // Clears the results of all tests in the given test case. + static void ClearTestCaseResult(TestCase* test_case) { + test_case->ClearResult(); + } + + // Runs every test in this TestCase. + void Run(); + + // Runs SetUpTestCase() for this TestCase. This wrapper is needed + // for catching exceptions thrown from SetUpTestCase(). + void RunSetUpTestCase() { (*set_up_tc_)(); } + + // Runs TearDownTestCase() for this TestCase. This wrapper is + // needed for catching exceptions thrown from TearDownTestCase(). + void RunTearDownTestCase() { (*tear_down_tc_)(); } + + // Returns true iff test passed. + static bool TestPassed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Passed(); + } + + // Returns true iff test failed. + static bool TestFailed(const TestInfo* test_info) { + return test_info->should_run() && test_info->result()->Failed(); + } + + // Returns true iff the test is disabled and will be reported in the XML + // report. + static bool TestReportableDisabled(const TestInfo* test_info) { + return test_info->is_reportable() && test_info->is_disabled_; + } + + // Returns true iff test is disabled. + static bool TestDisabled(const TestInfo* test_info) { + return test_info->is_disabled_; + } + + // Returns true iff this test will appear in the XML report. + static bool TestReportable(const TestInfo* test_info) { + return test_info->is_reportable(); + } + + // Returns true if the given test should run. + static bool ShouldRunTest(const TestInfo* test_info) { + return test_info->should_run(); + } + + // Shuffles the tests in this test case. + void ShuffleTests(internal::Random* random); + + // Restores the test order to before the first shuffle. + void UnshuffleTests(); + + // Name of the test case. + std::string name_; + // Name of the parameter type, or NULL if this is not a typed or a + // type-parameterized test. + const internal::scoped_ptr type_param_; + // The vector of TestInfos in their original order. It owns the + // elements in the vector. + std::vector test_info_list_; + // Provides a level of indirection for the test list to allow easy + // shuffling and restoring the test order. The i-th element in this + // vector is the index of the i-th test in the shuffled test list. + std::vector test_indices_; + // Pointer to the function that sets up the test case. + Test::SetUpTestCaseFunc set_up_tc_; + // Pointer to the function that tears down the test case. + Test::TearDownTestCaseFunc tear_down_tc_; + // True iff any test in this test case should run. + bool should_run_; + // Elapsed time, in milliseconds. + TimeInMillis elapsed_time_; + // Holds test properties recorded during execution of SetUpTestCase and + // TearDownTestCase. + TestResult ad_hoc_test_result_; + + // We disallow copying TestCases. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestCase); +}; + +// An Environment object is capable of setting up and tearing down an +// environment. You should subclass this to define your own +// environment(s). +// +// An Environment object does the set-up and tear-down in virtual +// methods SetUp() and TearDown() instead of the constructor and the +// destructor, as: +// +// 1. You cannot safely throw from a destructor. This is a problem +// as in some cases Google Test is used where exceptions are enabled, and +// we may want to implement ASSERT_* using exceptions where they are +// available. +// 2. You cannot use ASSERT_* directly in a constructor or +// destructor. +class Environment { + public: + // The d'tor is virtual as we need to subclass Environment. + virtual ~Environment() {} + + // Override this to define how to set up the environment. + virtual void SetUp() {} + + // Override this to define how to tear down the environment. + virtual void TearDown() {} + private: + // If you see an error about overriding the following function or + // about it being private, you have mis-spelled SetUp() as Setup(). + struct Setup_should_be_spelled_SetUp {}; + virtual Setup_should_be_spelled_SetUp* Setup() { return NULL; } +}; + +// The interface for tracing execution of tests. The methods are organized in +// the order the corresponding events are fired. +class TestEventListener { + public: + virtual ~TestEventListener() {} + + // Fired before any test activity starts. + virtual void OnTestProgramStart(const UnitTest& unit_test) = 0; + + // Fired before each iteration of tests starts. There may be more than + // one iteration if GTEST_FLAG(repeat) is set. iteration is the iteration + // index, starting from 0. + virtual void OnTestIterationStart(const UnitTest& unit_test, + int iteration) = 0; + + // Fired before environment set-up for each iteration of tests starts. + virtual void OnEnvironmentsSetUpStart(const UnitTest& unit_test) = 0; + + // Fired after environment set-up for each iteration of tests ends. + virtual void OnEnvironmentsSetUpEnd(const UnitTest& unit_test) = 0; + + // Fired before the test case starts. + virtual void OnTestCaseStart(const TestCase& test_case) = 0; + + // Fired before the test starts. + virtual void OnTestStart(const TestInfo& test_info) = 0; + + // Fired after a failed assertion or a SUCCEED() invocation. + virtual void OnTestPartResult(const TestPartResult& test_part_result) = 0; + + // Fired after the test ends. + virtual void OnTestEnd(const TestInfo& test_info) = 0; + + // Fired after the test case ends. + virtual void OnTestCaseEnd(const TestCase& test_case) = 0; + + // Fired before environment tear-down for each iteration of tests starts. + virtual void OnEnvironmentsTearDownStart(const UnitTest& unit_test) = 0; + + // Fired after environment tear-down for each iteration of tests ends. + virtual void OnEnvironmentsTearDownEnd(const UnitTest& unit_test) = 0; + + // Fired after each iteration of tests finishes. + virtual void OnTestIterationEnd(const UnitTest& unit_test, + int iteration) = 0; + + // Fired after all test activities have ended. + virtual void OnTestProgramEnd(const UnitTest& unit_test) = 0; +}; + +// The convenience class for users who need to override just one or two +// methods and are not concerned that a possible change to a signature of +// the methods they override will not be caught during the build. For +// comments about each method please see the definition of TestEventListener +// above. +class EmptyTestEventListener : public TestEventListener { + public: + virtual void OnTestProgramStart(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationStart(const UnitTest& /*unit_test*/, + int /*iteration*/) {} + virtual void OnEnvironmentsSetUpStart(const UnitTest& /*unit_test*/) {} + virtual void OnEnvironmentsSetUpEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestCaseStart(const TestCase& /*test_case*/) {} + virtual void OnTestStart(const TestInfo& /*test_info*/) {} + virtual void OnTestPartResult(const TestPartResult& /*test_part_result*/) {} + virtual void OnTestEnd(const TestInfo& /*test_info*/) {} + virtual void OnTestCaseEnd(const TestCase& /*test_case*/) {} + virtual void OnEnvironmentsTearDownStart(const UnitTest& /*unit_test*/) {} + virtual void OnEnvironmentsTearDownEnd(const UnitTest& /*unit_test*/) {} + virtual void OnTestIterationEnd(const UnitTest& /*unit_test*/, + int /*iteration*/) {} + virtual void OnTestProgramEnd(const UnitTest& /*unit_test*/) {} +}; + +// TestEventListeners lets users add listeners to track events in Google Test. +class GTEST_API_ TestEventListeners { + public: + TestEventListeners(); + ~TestEventListeners(); + + // Appends an event listener to the end of the list. Google Test assumes + // the ownership of the listener (i.e. it will delete the listener when + // the test program finishes). + void Append(TestEventListener* listener); + + // Removes the given event listener from the list and returns it. It then + // becomes the caller's responsibility to delete the listener. Returns + // NULL if the listener is not found in the list. + TestEventListener* Release(TestEventListener* listener); + + // Returns the standard listener responsible for the default console + // output. Can be removed from the listeners list to shut down default + // console output. Note that removing this object from the listener list + // with Release transfers its ownership to the caller and makes this + // function return NULL the next time. + TestEventListener* default_result_printer() const { + return default_result_printer_; + } + + // Returns the standard listener responsible for the default XML output + // controlled by the --gtest_output=xml flag. Can be removed from the + // listeners list by users who want to shut down the default XML output + // controlled by this flag and substitute it with custom one. Note that + // removing this object from the listener list with Release transfers its + // ownership to the caller and makes this function return NULL the next + // time. + TestEventListener* default_xml_generator() const { + return default_xml_generator_; + } + + private: + friend class TestCase; + friend class TestInfo; + friend class internal::DefaultGlobalTestPartResultReporter; + friend class internal::NoExecDeathTest; + friend class internal::TestEventListenersAccessor; + friend class internal::UnitTestImpl; + + // Returns repeater that broadcasts the TestEventListener events to all + // subscribers. + TestEventListener* repeater(); + + // Sets the default_result_printer attribute to the provided listener. + // The listener is also added to the listener list and previous + // default_result_printer is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultResultPrinter(TestEventListener* listener); + + // Sets the default_xml_generator attribute to the provided listener. The + // listener is also added to the listener list and previous + // default_xml_generator is removed from it and deleted. The listener can + // also be NULL in which case it will not be added to the list. Does + // nothing if the previous and the current listener objects are the same. + void SetDefaultXmlGenerator(TestEventListener* listener); + + // Controls whether events will be forwarded by the repeater to the + // listeners in the list. + bool EventForwardingEnabled() const; + void SuppressEventForwarding(); + + // The actual list of listeners. + internal::TestEventRepeater* repeater_; + // Listener responsible for the standard result output. + TestEventListener* default_result_printer_; + // Listener responsible for the creation of the XML output file. + TestEventListener* default_xml_generator_; + + // We disallow copying TestEventListeners. + GTEST_DISALLOW_COPY_AND_ASSIGN_(TestEventListeners); +}; + +// A UnitTest consists of a vector of TestCases. +// +// This is a singleton class. The only instance of UnitTest is +// created when UnitTest::GetInstance() is first called. This +// instance is never deleted. +// +// UnitTest is not copyable. +// +// This class is thread-safe as long as the methods are called +// according to their specification. +class GTEST_API_ UnitTest { + public: + // Gets the singleton UnitTest object. The first time this method + // is called, a UnitTest object is constructed and returned. + // Consecutive calls will return the same object. + static UnitTest* GetInstance(); + + // Runs all tests in this UnitTest object and prints the result. + // Returns 0 if successful, or 1 otherwise. + // + // This method can only be called from the main thread. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + int Run() GTEST_MUST_USE_RESULT_; + + // Returns the working directory when the first TEST() or TEST_F() + // was executed. The UnitTest object owns the string. + const char* original_working_dir() const; + + // Returns the TestCase object for the test that's currently running, + // or NULL if no test is running. + const TestCase* current_test_case() const + GTEST_LOCK_EXCLUDED_(mutex_); + + // Returns the TestInfo object for the test that's currently running, + // or NULL if no test is running. + const TestInfo* current_test_info() const + GTEST_LOCK_EXCLUDED_(mutex_); + + // Returns the random seed used at the start of the current test run. + int random_seed() const; + +#if GTEST_HAS_PARAM_TEST + // Returns the ParameterizedTestCaseRegistry object used to keep track of + // value-parameterized tests and instantiate and register them. + // + // INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + internal::ParameterizedTestCaseRegistry& parameterized_test_registry() + GTEST_LOCK_EXCLUDED_(mutex_); +#endif // GTEST_HAS_PARAM_TEST + + // Gets the number of successful test cases. + int successful_test_case_count() const; + + // Gets the number of failed test cases. + int failed_test_case_count() const; + + // Gets the number of all test cases. + int total_test_case_count() const; + + // Gets the number of all test cases that contain at least one test + // that should run. + int test_case_to_run_count() const; + + // Gets the number of successful tests. + int successful_test_count() const; + + // Gets the number of failed tests. + int failed_test_count() const; + + // Gets the number of disabled tests that will be reported in the XML report. + int reportable_disabled_test_count() const; + + // Gets the number of disabled tests. + int disabled_test_count() const; + + // Gets the number of tests to be printed in the XML report. + int reportable_test_count() const; + + // Gets the number of all tests. + int total_test_count() const; + + // Gets the number of tests that should run. + int test_to_run_count() const; + + // Gets the time of the test program start, in ms from the start of the + // UNIX epoch. + TimeInMillis start_timestamp() const; + + // Gets the elapsed time, in milliseconds. + TimeInMillis elapsed_time() const; + + // Returns true iff the unit test passed (i.e. all test cases passed). + bool Passed() const; + + // Returns true iff the unit test failed (i.e. some test case failed + // or something outside of all tests failed). + bool Failed() const; + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + const TestCase* GetTestCase(int i) const; + + // Returns the TestResult containing information on test failures and + // properties logged outside of individual test cases. + const TestResult& ad_hoc_test_result() const; + + // Returns the list of event listeners that can be used to track events + // inside Google Test. + TestEventListeners& listeners(); + + private: + // Registers and returns a global test environment. When a test + // program is run, all global test environments will be set-up in + // the order they were registered. After all tests in the program + // have finished, all global test environments will be torn-down in + // the *reverse* order they were registered. + // + // The UnitTest object takes ownership of the given environment. + // + // This method can only be called from the main thread. + Environment* AddEnvironment(Environment* env); + + // Adds a TestPartResult to the current TestResult object. All + // Google Test assertion macros (e.g. ASSERT_TRUE, EXPECT_EQ, etc) + // eventually call this to report their results. The user code + // should use the assertion macros instead of calling this directly. + void AddTestPartResult(TestPartResult::Type result_type, + const char* file_name, + int line_number, + const std::string& message, + const std::string& os_stack_trace) + GTEST_LOCK_EXCLUDED_(mutex_); + + // Adds a TestProperty to the current TestResult object when invoked from + // inside a test, to current TestCase's ad_hoc_test_result_ when invoked + // from SetUpTestCase or TearDownTestCase, or to the global property set + // when invoked elsewhere. If the result already contains a property with + // the same key, the value will be updated. + void RecordProperty(const std::string& key, const std::string& value); + + // Gets the i-th test case among all the test cases. i can range from 0 to + // total_test_case_count() - 1. If i is not in that range, returns NULL. + TestCase* GetMutableTestCase(int i); + + // Accessors for the implementation object. + internal::UnitTestImpl* impl() { return impl_; } + const internal::UnitTestImpl* impl() const { return impl_; } + + // These classes and funcions are friends as they need to access private + // members of UnitTest. + friend class Test; + friend class internal::AssertHelper; + friend class internal::ScopedTrace; + friend class internal::StreamingListenerTest; + friend class internal::UnitTestRecordPropertyTestHelper; + friend Environment* AddGlobalTestEnvironment(Environment* env); + friend internal::UnitTestImpl* internal::GetUnitTestImpl(); + friend void internal::ReportFailureInUnknownLocation( + TestPartResult::Type result_type, + const std::string& message); + + // Creates an empty UnitTest. + UnitTest(); + + // D'tor + virtual ~UnitTest(); + + // Pushes a trace defined by SCOPED_TRACE() on to the per-thread + // Google Test trace stack. + void PushGTestTrace(const internal::TraceInfo& trace) + GTEST_LOCK_EXCLUDED_(mutex_); + + // Pops a trace from the per-thread Google Test trace stack. + void PopGTestTrace() + GTEST_LOCK_EXCLUDED_(mutex_); + + // Protects mutable state in *impl_. This is mutable as some const + // methods need to lock it too. + mutable internal::Mutex mutex_; + + // Opaque implementation object. This field is never changed once + // the object is constructed. We don't mark it as const here, as + // doing so will cause a warning in the constructor of UnitTest. + // Mutable state in *impl_ is protected by mutex_. + internal::UnitTestImpl* impl_; + + // We disallow copying UnitTest. + GTEST_DISALLOW_COPY_AND_ASSIGN_(UnitTest); +}; + +// A convenient wrapper for adding an environment for the test +// program. +// +// You should call this before RUN_ALL_TESTS() is called, probably in +// main(). If you use gtest_main, you need to call this before main() +// starts for it to take effect. For example, you can define a global +// variable like this: +// +// testing::Environment* const foo_env = +// testing::AddGlobalTestEnvironment(new FooEnvironment); +// +// However, we strongly recommend you to write your own main() and +// call AddGlobalTestEnvironment() there, as relying on initialization +// of global variables makes the code harder to read and may cause +// problems when you register multiple environments from different +// translation units and the environments have dependencies among them +// (remember that the compiler doesn't guarantee the order in which +// global variables from different translation units are initialized). +inline Environment* AddGlobalTestEnvironment(Environment* env) { + return UnitTest::GetInstance()->AddEnvironment(env); +} + +// Initializes Google Test. This must be called before calling +// RUN_ALL_TESTS(). In particular, it parses a command line for the +// flags that Google Test recognizes. Whenever a Google Test flag is +// seen, it is removed from argv, and *argc is decremented. +// +// No value is returned. Instead, the Google Test flag variables are +// updated. +// +// Calling the function for the second time has no user-visible effect. +GTEST_API_ void InitGoogleTest(int* argc, char** argv); + +// This overloaded version can be used in Windows programs compiled in +// UNICODE mode. +GTEST_API_ void InitGoogleTest(int* argc, wchar_t** argv); + +namespace internal { + +// FormatForComparison::Format(value) formats a +// value of type ToPrint that is an operand of a comparison assertion +// (e.g. ASSERT_EQ). OtherOperand is the type of the other operand in +// the comparison, and is used to help determine the best way to +// format the value. In particular, when the value is a C string +// (char pointer) and the other operand is an STL string object, we +// want to format the C string as a string, since we know it is +// compared by value with the string object. If the value is a char +// pointer but the other operand is not an STL string object, we don't +// know whether the pointer is supposed to point to a NUL-terminated +// string, and thus want to print it as a pointer to be safe. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// The default case. +template +class FormatForComparison { + public: + static ::std::string Format(const ToPrint& value) { + return ::testing::PrintToString(value); + } +}; + +// Array. +template +class FormatForComparison { + public: + static ::std::string Format(const ToPrint* value) { + return FormatForComparison::Format(value); + } +}; + +// By default, print C string as pointers to be safe, as we don't know +// whether they actually point to a NUL-terminated string. + +#define GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(CharType) \ + template \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(static_cast(value)); \ + } \ + } + +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(char); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const char); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(wchar_t); +GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_(const wchar_t); + +#undef GTEST_IMPL_FORMAT_C_STRING_AS_POINTER_ + +// If a C string is compared with an STL string object, we know it's meant +// to point to a NUL-terminated string, and thus can print it as a string. + +#define GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(CharType, OtherStringType) \ + template <> \ + class FormatForComparison { \ + public: \ + static ::std::string Format(CharType* value) { \ + return ::testing::PrintToString(value); \ + } \ + } + +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::std::string); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::std::string); + +#if GTEST_HAS_GLOBAL_STRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(char, ::string); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const char, ::string); +#endif + +#if GTEST_HAS_GLOBAL_WSTRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::wstring); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::wstring); +#endif + +#if GTEST_HAS_STD_WSTRING +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(wchar_t, ::std::wstring); +GTEST_IMPL_FORMAT_C_STRING_AS_STRING_(const wchar_t, ::std::wstring); +#endif + +#undef GTEST_IMPL_FORMAT_C_STRING_AS_STRING_ + +// Formats a comparison assertion (e.g. ASSERT_EQ, EXPECT_LT, and etc) +// operand to be used in a failure message. The type (but not value) +// of the other operand may affect the format. This allows us to +// print a char* as a raw pointer when it is compared against another +// char* or void*, and print it as a C string when it is compared +// against an std::string object, for example. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +std::string FormatForComparisonFailureMessage( + const T1& value, const T2& /* other_operand */) { + return FormatForComparison::Format(value); +} + +// Separate the error generating code from the code path to reduce the stack +// frame size of CmpHelperEQ. This helps reduce the overhead of some sanitizers +// when calling EXPECT_* in a tight loop. +template +AssertionResult CmpHelperEQFailure(const char* expected_expression, + const char* actual_expression, + const T1& expected, const T2& actual) { + return EqFailure(expected_expression, + actual_expression, + FormatForComparisonFailureMessage(expected, actual), + FormatForComparisonFailureMessage(actual, expected), + false); +} + +// The helper function for {ASSERT|EXPECT}_EQ. +template +AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { +GTEST_DISABLE_MSC_WARNINGS_PUSH_(4389 /* signed/unsigned mismatch */) + if (expected == actual) { + return AssertionSuccess(); + } +GTEST_DISABLE_MSC_WARNINGS_POP_() + + return CmpHelperEQFailure(expected_expression, actual_expression, expected, + actual); +} + +// With this overloaded version, we allow anonymous enums to be used +// in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous enums +// can be implicitly cast to BiggestInt. +GTEST_API_ AssertionResult CmpHelperEQ(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual); + +// The helper class for {ASSERT|EXPECT}_EQ. The template argument +// lhs_is_null_literal is true iff the first argument to ASSERT_EQ() +// is a null pointer literal. The following default implementation is +// for lhs_is_null_literal being false. +template +class EqHelper { + public: + // This templatized version is for the general case. + template + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // With this overloaded version, we allow anonymous enums to be used + // in {ASSERT|EXPECT}_EQ when compiled with gcc 4, as anonymous + // enums can be implicitly cast to BiggestInt. + // + // Even though its body looks the same as the above version, we + // cannot merge the two, as it will make anonymous enums unhappy. + static AssertionResult Compare(const char* expected_expression, + const char* actual_expression, + BiggestInt expected, + BiggestInt actual) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } +}; + +// This specialization is used when the first argument to ASSERT_EQ() +// is a null pointer literal, like NULL, false, or 0. +template <> +class EqHelper { + public: + // We define two overloaded versions of Compare(). The first + // version will be picked when the second argument to ASSERT_EQ() is + // NOT a pointer, e.g. ASSERT_EQ(0, AnIntFunction()) or + // EXPECT_EQ(false, a_bool). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + const T1& expected, + const T2& actual, + // The following line prevents this overload from being considered if T2 + // is not a pointer type. We need this because ASSERT_EQ(NULL, my_ptr) + // expands to Compare("", "", NULL, my_ptr), which requires a conversion + // to match the Secret* in the other overload, which would otherwise make + // this template match better. + typename EnableIf::value>::type* = 0) { + return CmpHelperEQ(expected_expression, actual_expression, expected, + actual); + } + + // This version will be picked when the second argument to ASSERT_EQ() is a + // pointer, e.g. ASSERT_EQ(NULL, a_pointer). + template + static AssertionResult Compare( + const char* expected_expression, + const char* actual_expression, + // We used to have a second template parameter instead of Secret*. That + // template parameter would deduce to 'long', making this a better match + // than the first overload even without the first overload's EnableIf. + // Unfortunately, gcc with -Wconversion-null warns when "passing NULL to + // non-pointer argument" (even a deduced integral argument), so the old + // implementation caused warnings in user code. + Secret* /* expected (NULL) */, + T* actual) { + // We already know that 'expected' is a null pointer. + return CmpHelperEQ(expected_expression, actual_expression, + static_cast(NULL), actual); + } +}; + +// Separate the error generating code from the code path to reduce the stack +// frame size of CmpHelperOP. This helps reduce the overhead of some sanitizers +// when calling EXPECT_OP in a tight loop. +template +AssertionResult CmpHelperOpFailure(const char* expr1, const char* expr2, + const T1& val1, const T2& val2, + const char* op) { + return AssertionFailure() + << "Expected: (" << expr1 << ") " << op << " (" << expr2 + << "), actual: " << FormatForComparisonFailureMessage(val1, val2) + << " vs " << FormatForComparisonFailureMessage(val2, val1); +} + +// A macro for implementing the helper functions needed to implement +// ASSERT_?? and EXPECT_??. It is here just to avoid copy-and-paste +// of similar code. +// +// For each templatized helper function, we also define an overloaded +// version for BiggestInt in order to reduce code bloat and allow +// anonymous enums to be used with {ASSERT|EXPECT}_?? when compiled +// with gcc 4. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +#define GTEST_IMPL_CMP_HELPER_(op_name, op)\ +template \ +AssertionResult CmpHelper##op_name(const char* expr1, const char* expr2, \ + const T1& val1, const T2& val2) {\ + if (val1 op val2) {\ + return AssertionSuccess();\ + } else {\ + return CmpHelperOpFailure(expr1, expr2, val1, val2, #op);\ + }\ +}\ +GTEST_API_ AssertionResult CmpHelper##op_name(\ + const char* expr1, const char* expr2, BiggestInt val1, BiggestInt val2) + +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. + +// Implements the helper function for {ASSERT|EXPECT}_NE +GTEST_IMPL_CMP_HELPER_(NE, !=); +// Implements the helper function for {ASSERT|EXPECT}_LE +GTEST_IMPL_CMP_HELPER_(LE, <=); +// Implements the helper function for {ASSERT|EXPECT}_LT +GTEST_IMPL_CMP_HELPER_(LT, <); +// Implements the helper function for {ASSERT|EXPECT}_GE +GTEST_IMPL_CMP_HELPER_(GE, >=); +// Implements the helper function for {ASSERT|EXPECT}_GT +GTEST_IMPL_CMP_HELPER_(GT, >); + +#undef GTEST_IMPL_CMP_HELPER_ + +// The helper function for {ASSERT|EXPECT}_STREQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRCASEEQ. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASEEQ(const char* expected_expression, + const char* actual_expression, + const char* expected, + const char* actual); + +// The helper function for {ASSERT|EXPECT}_STRNE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + +// The helper function for {ASSERT|EXPECT}_STRCASENE. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRCASENE(const char* s1_expression, + const char* s2_expression, + const char* s1, + const char* s2); + + +// Helper function for *_STREQ on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTREQ(const char* expected_expression, + const char* actual_expression, + const wchar_t* expected, + const wchar_t* actual); + +// Helper function for *_STRNE on wide strings. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult CmpHelperSTRNE(const char* s1_expression, + const char* s2_expression, + const wchar_t* s1, + const wchar_t* s2); + +} // namespace internal + +// IsSubstring() and IsNotSubstring() are intended to be used as the +// first argument to {EXPECT,ASSERT}_PRED_FORMAT2(), not by +// themselves. They check whether needle is a substring of haystack +// (NULL is considered a substring of itself only), and return an +// appropriate error message when they fail. +// +// The {needle,haystack}_expr arguments are the stringified +// expressions that generated the two real arguments. +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const char* needle, const char* haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const wchar_t* needle, const wchar_t* haystack); +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::string& needle, const ::std::string& haystack); + +#if GTEST_HAS_STD_WSTRING +GTEST_API_ AssertionResult IsSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +GTEST_API_ AssertionResult IsNotSubstring( + const char* needle_expr, const char* haystack_expr, + const ::std::wstring& needle, const ::std::wstring& haystack); +#endif // GTEST_HAS_STD_WSTRING + +namespace internal { + +// Helper template function for comparing floating-points. +// +// Template parameter: +// +// RawType: the raw floating-point type (either float or double) +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +template +AssertionResult CmpHelperFloatingPointEQ(const char* expected_expression, + const char* actual_expression, + RawType expected, + RawType actual) { + const FloatingPoint lhs(expected), rhs(actual); + + if (lhs.AlmostEquals(rhs)) { + return AssertionSuccess(); + } + + ::std::stringstream expected_ss; + expected_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << expected; + + ::std::stringstream actual_ss; + actual_ss << std::setprecision(std::numeric_limits::digits10 + 2) + << actual; + + return EqFailure(expected_expression, + actual_expression, + StringStreamToString(&expected_ss), + StringStreamToString(&actual_ss), + false); +} + +// Helper function for implementing ASSERT_NEAR. +// +// INTERNAL IMPLEMENTATION - DO NOT USE IN A USER PROGRAM. +GTEST_API_ AssertionResult DoubleNearPredFormat(const char* expr1, + const char* expr2, + const char* abs_error_expr, + double val1, + double val2, + double abs_error); + +// INTERNAL IMPLEMENTATION - DO NOT USE IN USER CODE. +// A class that enables one to stream messages to assertion macros +class GTEST_API_ AssertHelper { + public: + // Constructor. + AssertHelper(TestPartResult::Type type, + const char* file, + int line, + const char* message); + ~AssertHelper(); + + // Message assignment is a semantic trick to enable assertion + // streaming; see the GTEST_MESSAGE_ macro below. + void operator=(const Message& message) const; + + private: + // We put our data in a struct so that the size of the AssertHelper class can + // be as small as possible. This is important because gcc is incapable of + // re-using stack space even for temporary variables, so every EXPECT_EQ + // reserves stack space for another AssertHelper. + struct AssertHelperData { + AssertHelperData(TestPartResult::Type t, + const char* srcfile, + int line_num, + const char* msg) + : type(t), file(srcfile), line(line_num), message(msg) { } + + TestPartResult::Type const type; + const char* const file; + int const line; + std::string const message; + + private: + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelperData); + }; + + AssertHelperData* const data_; + + GTEST_DISALLOW_COPY_AND_ASSIGN_(AssertHelper); +}; + +} // namespace internal + +#if GTEST_HAS_PARAM_TEST +// The pure interface class that all value-parameterized tests inherit from. +// A value-parameterized class must inherit from both ::testing::Test and +// ::testing::WithParamInterface. In most cases that just means inheriting +// from ::testing::TestWithParam, but more complicated test hierarchies +// may need to inherit from Test and WithParamInterface at different levels. +// +// This interface has support for accessing the test parameter value via +// the GetParam() method. +// +// Use it with one of the parameter generator defining functions, like Range(), +// Values(), ValuesIn(), Bool(), and Combine(). +// +// class FooTest : public ::testing::TestWithParam { +// protected: +// FooTest() { +// // Can use GetParam() here. +// } +// virtual ~FooTest() { +// // Can use GetParam() here. +// } +// virtual void SetUp() { +// // Can use GetParam() here. +// } +// virtual void TearDown { +// // Can use GetParam() here. +// } +// }; +// TEST_P(FooTest, DoesBar) { +// // Can use GetParam() method here. +// Foo foo; +// ASSERT_TRUE(foo.DoesBar(GetParam())); +// } +// INSTANTIATE_TEST_CASE_P(OneToTenRange, FooTest, ::testing::Range(1, 10)); + +template +class WithParamInterface { + public: + typedef T ParamType; + virtual ~WithParamInterface() {} + + // The current parameter value. Is also available in the test fixture's + // constructor. This member function is non-static, even though it only + // references static data, to reduce the opportunity for incorrect uses + // like writing 'WithParamInterface::GetParam()' for a test that + // uses a fixture whose parameter type is int. + const ParamType& GetParam() const { + GTEST_CHECK_(parameter_ != NULL) + << "GetParam() can only be called inside a value-parameterized test " + << "-- did you intend to write TEST_P instead of TEST_F?"; + return *parameter_; + } + + private: + // Sets parameter value. The caller is responsible for making sure the value + // remains alive and unchanged throughout the current test. + static void SetParam(const ParamType* parameter) { + parameter_ = parameter; + } + + // Static value used for accessing parameter during a test lifetime. + static const ParamType* parameter_; + + // TestClass must be a subclass of WithParamInterface and Test. + template friend class internal::ParameterizedTestFactory; +}; + +template +const T* WithParamInterface::parameter_ = NULL; + +// Most value-parameterized classes can ignore the existence of +// WithParamInterface, and can just inherit from ::testing::TestWithParam. + +template +class TestWithParam : public Test, public WithParamInterface { +}; + +#endif // GTEST_HAS_PARAM_TEST + +// Macros for indicating success/failure in test code. + +// ADD_FAILURE unconditionally adds a failure to the current test. +// SUCCEED generates a success - it doesn't automatically make the +// current test successful, as a test is only successful when it has +// no failure. +// +// EXPECT_* verifies that a certain condition is satisfied. If not, +// it behaves like ADD_FAILURE. In particular: +// +// EXPECT_TRUE verifies that a Boolean condition is true. +// EXPECT_FALSE verifies that a Boolean condition is false. +// +// FAIL and ASSERT_* are similar to ADD_FAILURE and EXPECT_*, except +// that they will also abort the current function on failure. People +// usually want the fail-fast behavior of FAIL and ASSERT_*, but those +// writing data-driven tests often find themselves using ADD_FAILURE +// and EXPECT_* more. + +// Generates a nonfatal failure with a generic message. +#define ADD_FAILURE() GTEST_NONFATAL_FAILURE_("Failed") + +// Generates a nonfatal failure at the given source file location with +// a generic message. +#define ADD_FAILURE_AT(file, line) \ + GTEST_MESSAGE_AT_(file, line, "Failed", \ + ::testing::TestPartResult::kNonFatalFailure) + +// Generates a fatal failure with a generic message. +#define GTEST_FAIL() GTEST_FATAL_FAILURE_("Failed") + +// Define this macro to 1 to omit the definition of FAIL(), which is a +// generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_FAIL +# define FAIL() GTEST_FAIL() +#endif + +// Generates a success with a generic message. +#define GTEST_SUCCEED() GTEST_SUCCESS_("Succeeded") + +// Define this macro to 1 to omit the definition of SUCCEED(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_SUCCEED +# define SUCCEED() GTEST_SUCCEED() +#endif + +// Macros for testing exceptions. +// +// * {ASSERT|EXPECT}_THROW(statement, expected_exception): +// Tests that the statement throws the expected exception. +// * {ASSERT|EXPECT}_NO_THROW(statement): +// Tests that the statement doesn't throw any exception. +// * {ASSERT|EXPECT}_ANY_THROW(statement): +// Tests that the statement throws an exception. + +#define EXPECT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_NONFATAL_FAILURE_) +#define EXPECT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define EXPECT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_NONFATAL_FAILURE_) +#define ASSERT_THROW(statement, expected_exception) \ + GTEST_TEST_THROW_(statement, expected_exception, GTEST_FATAL_FAILURE_) +#define ASSERT_NO_THROW(statement) \ + GTEST_TEST_NO_THROW_(statement, GTEST_FATAL_FAILURE_) +#define ASSERT_ANY_THROW(statement) \ + GTEST_TEST_ANY_THROW_(statement, GTEST_FATAL_FAILURE_) + +// Boolean assertions. Condition can be either a Boolean expression or an +// AssertionResult. For more information on how to use AssertionResult with +// these macros see comments on that class. +#define EXPECT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_NONFATAL_FAILURE_) +#define EXPECT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_NONFATAL_FAILURE_) +#define ASSERT_TRUE(condition) \ + GTEST_TEST_BOOLEAN_(condition, #condition, false, true, \ + GTEST_FATAL_FAILURE_) +#define ASSERT_FALSE(condition) \ + GTEST_TEST_BOOLEAN_(!(condition), #condition, true, false, \ + GTEST_FATAL_FAILURE_) + +// Includes the auto-generated header that implements a family of +// generic predicate assertion macros. +// Copyright 2006, Google Inc. +// All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This file is AUTOMATICALLY GENERATED on 10/31/2011 by command +// 'gen_gtest_pred_impl.py 5'. DO NOT EDIT BY HAND! +// +// Implements a family of generic predicate assertion macros. + +#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ +#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Makes sure this header is not included before gtest.h. +#ifndef GTEST_INCLUDE_GTEST_GTEST_H_ +# error Do not include gtest_pred_impl.h directly. Include gtest.h instead. +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ + +// This header implements a family of generic predicate assertion +// macros: +// +// ASSERT_PRED_FORMAT1(pred_format, v1) +// ASSERT_PRED_FORMAT2(pred_format, v1, v2) +// ... +// +// where pred_format is a function or functor that takes n (in the +// case of ASSERT_PRED_FORMATn) values and their source expression +// text, and returns a testing::AssertionResult. See the definition +// of ASSERT_EQ in gtest.h for an example. +// +// If you don't care about formatting, you can use the more +// restrictive version: +// +// ASSERT_PRED1(pred, v1) +// ASSERT_PRED2(pred, v1, v2) +// ... +// +// where pred is an n-ary function or functor that returns bool, +// and the values v1, v2, ..., must support the << operator for +// streaming to std::ostream. +// +// We also define the EXPECT_* variations. +// +// For now we only support predicates whose arity is at most 5. +// Please email googletestframework@googlegroups.com if you need +// support for higher arities. + +// GTEST_ASSERT_ is the basic statement to which all of the assertions +// in this file reduce. Don't use this in your code. + +#define GTEST_ASSERT_(expression, on_failure) \ + GTEST_AMBIGUOUS_ELSE_BLOCKER_ \ + if (const ::testing::AssertionResult gtest_ar = (expression)) \ + ; \ + else \ + on_failure(gtest_ar.failure_message()) + + +// Helper function for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +template +AssertionResult AssertPred1Helper(const char* pred_text, + const char* e1, + Pred pred, + const T1& v1) { + if (pred(v1)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT1. +// Don't use this in your code. +#define GTEST_PRED_FORMAT1_(pred_format, v1, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, v1), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED1. Don't use +// this in your code. +#define GTEST_PRED1_(pred, v1, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred1Helper(#pred, \ + #v1, \ + pred, \ + v1), on_failure) + +// Unary predicate assertion macros. +#define EXPECT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT1(pred_format, v1) \ + GTEST_PRED_FORMAT1_(pred_format, v1, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED1(pred, v1) \ + GTEST_PRED1_(pred, v1, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +template +AssertionResult AssertPred2Helper(const char* pred_text, + const char* e1, + const char* e2, + Pred pred, + const T1& v1, + const T2& v2) { + if (pred(v1, v2)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT2. +// Don't use this in your code. +#define GTEST_PRED_FORMAT2_(pred_format, v1, v2, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, v1, v2), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED2. Don't use +// this in your code. +#define GTEST_PRED2_(pred, v1, v2, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred2Helper(#pred, \ + #v1, \ + #v2, \ + pred, \ + v1, \ + v2), on_failure) + +// Binary predicate assertion macros. +#define EXPECT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT2(pred_format, v1, v2) \ + GTEST_PRED_FORMAT2_(pred_format, v1, v2, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED2(pred, v1, v2) \ + GTEST_PRED2_(pred, v1, v2, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +template +AssertionResult AssertPred3Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3) { + if (pred(v1, v2, v3)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT3. +// Don't use this in your code. +#define GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, v1, v2, v3), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED3. Don't use +// this in your code. +#define GTEST_PRED3_(pred, v1, v2, v3, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred3Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + pred, \ + v1, \ + v2, \ + v3), on_failure) + +// Ternary predicate assertion macros. +#define EXPECT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT3(pred_format, v1, v2, v3) \ + GTEST_PRED_FORMAT3_(pred_format, v1, v2, v3, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED3(pred, v1, v2, v3) \ + GTEST_PRED3_(pred, v1, v2, v3, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +template +AssertionResult AssertPred4Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4) { + if (pred(v1, v2, v3, v4)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ", " + << e4 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3 + << "\n" << e4 << " evaluates to " << v4; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT4. +// Don't use this in your code. +#define GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, v1, v2, v3, v4), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED4. Don't use +// this in your code. +#define GTEST_PRED4_(pred, v1, v2, v3, v4, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred4Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4), on_failure) + +// 4-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT4(pred_format, v1, v2, v3, v4) \ + GTEST_PRED_FORMAT4_(pred_format, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED4(pred, v1, v2, v3, v4) \ + GTEST_PRED4_(pred, v1, v2, v3, v4, GTEST_FATAL_FAILURE_) + + + +// Helper function for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +template +AssertionResult AssertPred5Helper(const char* pred_text, + const char* e1, + const char* e2, + const char* e3, + const char* e4, + const char* e5, + Pred pred, + const T1& v1, + const T2& v2, + const T3& v3, + const T4& v4, + const T5& v5) { + if (pred(v1, v2, v3, v4, v5)) return AssertionSuccess(); + + return AssertionFailure() << pred_text << "(" + << e1 << ", " + << e2 << ", " + << e3 << ", " + << e4 << ", " + << e5 << ") evaluates to false, where" + << "\n" << e1 << " evaluates to " << v1 + << "\n" << e2 << " evaluates to " << v2 + << "\n" << e3 << " evaluates to " << v3 + << "\n" << e4 << " evaluates to " << v4 + << "\n" << e5 << " evaluates to " << v5; +} + +// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT5. +// Don't use this in your code. +#define GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(pred_format(#v1, #v2, #v3, #v4, #v5, v1, v2, v3, v4, v5), \ + on_failure) + +// Internal macro for implementing {EXPECT|ASSERT}_PRED5. Don't use +// this in your code. +#define GTEST_PRED5_(pred, v1, v2, v3, v4, v5, on_failure)\ + GTEST_ASSERT_(::testing::AssertPred5Helper(#pred, \ + #v1, \ + #v2, \ + #v3, \ + #v4, \ + #v5, \ + pred, \ + v1, \ + v2, \ + v3, \ + v4, \ + v5), on_failure) + +// 5-ary predicate assertion macros. +#define EXPECT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define EXPECT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_NONFATAL_FAILURE_) +#define ASSERT_PRED_FORMAT5(pred_format, v1, v2, v3, v4, v5) \ + GTEST_PRED_FORMAT5_(pred_format, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) +#define ASSERT_PRED5(pred, v1, v2, v3, v4, v5) \ + GTEST_PRED5_(pred, v1, v2, v3, v4, v5, GTEST_FATAL_FAILURE_) + + + +#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_ + +// Macros for testing equalities and inequalities. +// +// * {ASSERT|EXPECT}_EQ(expected, actual): Tests that expected == actual +// * {ASSERT|EXPECT}_NE(v1, v2): Tests that v1 != v2 +// * {ASSERT|EXPECT}_LT(v1, v2): Tests that v1 < v2 +// * {ASSERT|EXPECT}_LE(v1, v2): Tests that v1 <= v2 +// * {ASSERT|EXPECT}_GT(v1, v2): Tests that v1 > v2 +// * {ASSERT|EXPECT}_GE(v1, v2): Tests that v1 >= v2 +// +// When they are not, Google Test prints both the tested expressions and +// their actual values. The values must be compatible built-in types, +// or you will get a compiler error. By "compatible" we mean that the +// values can be compared by the respective operator. +// +// Note: +// +// 1. It is possible to make a user-defined type work with +// {ASSERT|EXPECT}_??(), but that requires overloading the +// comparison operators and is thus discouraged by the Google C++ +// Usage Guide. Therefore, you are advised to use the +// {ASSERT|EXPECT}_TRUE() macro to assert that two objects are +// equal. +// +// 2. The {ASSERT|EXPECT}_??() macros do pointer comparisons on +// pointers (in particular, C strings). Therefore, if you use it +// with two C strings, you are testing how their locations in memory +// are related, not how their content is related. To compare two C +// strings by content, use {ASSERT|EXPECT}_STR*(). +// +// 3. {ASSERT|EXPECT}_EQ(expected, actual) is preferred to +// {ASSERT|EXPECT}_TRUE(expected == actual), as the former tells you +// what the actual value is when it fails, and similarly for the +// other comparisons. +// +// 4. Do not depend on the order in which {ASSERT|EXPECT}_??() +// evaluate their arguments, which is undefined. +// +// 5. These macros evaluate their arguments exactly once. +// +// Examples: +// +// EXPECT_NE(5, Foo()); +// EXPECT_EQ(NULL, a_pointer); +// ASSERT_LT(i, array_size); +// ASSERT_GT(records.size(), 0) << "There is no record left."; + +#define EXPECT_EQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define EXPECT_NE(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperNE, expected, actual) +#define EXPECT_LE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define EXPECT_LT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define EXPECT_GE(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define EXPECT_GT(val1, val2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +#define GTEST_ASSERT_EQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal:: \ + EqHelper::Compare, \ + expected, actual) +#define GTEST_ASSERT_NE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperNE, val1, val2) +#define GTEST_ASSERT_LE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLE, val1, val2) +#define GTEST_ASSERT_LT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperLT, val1, val2) +#define GTEST_ASSERT_GE(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGE, val1, val2) +#define GTEST_ASSERT_GT(val1, val2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperGT, val1, val2) + +// Define macro GTEST_DONT_DEFINE_ASSERT_XY to 1 to omit the definition of +// ASSERT_XY(), which clashes with some users' own code. + +#if !GTEST_DONT_DEFINE_ASSERT_EQ +# define ASSERT_EQ(val1, val2) GTEST_ASSERT_EQ(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_NE +# define ASSERT_NE(val1, val2) GTEST_ASSERT_NE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LE +# define ASSERT_LE(val1, val2) GTEST_ASSERT_LE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_LT +# define ASSERT_LT(val1, val2) GTEST_ASSERT_LT(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GE +# define ASSERT_GE(val1, val2) GTEST_ASSERT_GE(val1, val2) +#endif + +#if !GTEST_DONT_DEFINE_ASSERT_GT +# define ASSERT_GT(val1, val2) GTEST_ASSERT_GT(val1, val2) +#endif + +// C-string Comparisons. All tests treat NULL and any non-NULL string +// as different. Two NULLs are equal. +// +// * {ASSERT|EXPECT}_STREQ(s1, s2): Tests that s1 == s2 +// * {ASSERT|EXPECT}_STRNE(s1, s2): Tests that s1 != s2 +// * {ASSERT|EXPECT}_STRCASEEQ(s1, s2): Tests that s1 == s2, ignoring case +// * {ASSERT|EXPECT}_STRCASENE(s1, s2): Tests that s1 != s2, ignoring case +// +// For wide or narrow string objects, you can use the +// {ASSERT|EXPECT}_??() macros. +// +// Don't depend on the order in which the arguments are evaluated, +// which is undefined. +// +// These macros evaluate their arguments exactly once. + +#define EXPECT_STREQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define EXPECT_STRNE(s1, s2) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define EXPECT_STRCASEEQ(expected, actual) \ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define EXPECT_STRCASENE(s1, s2)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +#define ASSERT_STREQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTREQ, expected, actual) +#define ASSERT_STRNE(s1, s2) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRNE, s1, s2) +#define ASSERT_STRCASEEQ(expected, actual) \ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASEEQ, expected, actual) +#define ASSERT_STRCASENE(s1, s2)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperSTRCASENE, s1, s2) + +// Macros for comparing floating-point numbers. +// +// * {ASSERT|EXPECT}_FLOAT_EQ(expected, actual): +// Tests that two float values are almost equal. +// * {ASSERT|EXPECT}_DOUBLE_EQ(expected, actual): +// Tests that two double values are almost equal. +// * {ASSERT|EXPECT}_NEAR(v1, v2, abs_error): +// Tests that v1 and v2 are within the given distance to each other. +// +// Google Test uses ULP-based comparison to automatically pick a default +// error bound that is appropriate for the operands. See the +// FloatingPoint template class in gtest-internal.h if you are +// interested in the implementation details. + +#define EXPECT_FLOAT_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_DOUBLE_EQ(expected, actual)\ + EXPECT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_FLOAT_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define ASSERT_DOUBLE_EQ(expected, actual)\ + ASSERT_PRED_FORMAT2(::testing::internal::CmpHelperFloatingPointEQ, \ + expected, actual) + +#define EXPECT_NEAR(val1, val2, abs_error)\ + EXPECT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +#define ASSERT_NEAR(val1, val2, abs_error)\ + ASSERT_PRED_FORMAT3(::testing::internal::DoubleNearPredFormat, \ + val1, val2, abs_error) + +// These predicate format functions work on floating-point values, and +// can be used in {ASSERT|EXPECT}_PRED_FORMAT2*(), e.g. +// +// EXPECT_PRED_FORMAT2(testing::DoubleLE, Foo(), 5.0); + +// Asserts that val1 is less than, or almost equal to, val2. Fails +// otherwise. In particular, it fails if either val1 or val2 is NaN. +GTEST_API_ AssertionResult FloatLE(const char* expr1, const char* expr2, + float val1, float val2); +GTEST_API_ AssertionResult DoubleLE(const char* expr1, const char* expr2, + double val1, double val2); + + +#if GTEST_OS_WINDOWS + +// Macros that test for HRESULT failure and success, these are only useful +// on Windows, and rely on Windows SDK macros and APIs to compile. +// +// * {ASSERT|EXPECT}_HRESULT_{SUCCEEDED|FAILED}(expr) +// +// When expr unexpectedly fails or succeeds, Google Test prints the +// expected result and the actual result with both a human-readable +// string representation of the error, if available, as well as the +// hex result code. +# define EXPECT_HRESULT_SUCCEEDED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define ASSERT_HRESULT_SUCCEEDED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTSuccess, (expr)) + +# define EXPECT_HRESULT_FAILED(expr) \ + EXPECT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +# define ASSERT_HRESULT_FAILED(expr) \ + ASSERT_PRED_FORMAT1(::testing::internal::IsHRESULTFailure, (expr)) + +#endif // GTEST_OS_WINDOWS + +// Macros that execute statement and check that it doesn't generate new fatal +// failures in the current thread. +// +// * {ASSERT|EXPECT}_NO_FATAL_FAILURE(statement); +// +// Examples: +// +// EXPECT_NO_FATAL_FAILURE(Process()); +// ASSERT_NO_FATAL_FAILURE(Process()) << "Process() failed"; +// +#define ASSERT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_FATAL_FAILURE_) +#define EXPECT_NO_FATAL_FAILURE(statement) \ + GTEST_TEST_NO_FATAL_FAILURE_(statement, GTEST_NONFATAL_FAILURE_) + +// Causes a trace (including the source file path, the current line +// number, and the given message) to be included in every test failure +// message generated by code in the current scope. The effect is +// undone when the control leaves the current scope. +// +// The message argument can be anything streamable to std::ostream. +// +// In the implementation, we include the current line number as part +// of the dummy variable name, thus allowing multiple SCOPED_TRACE()s +// to appear in the same block - as long as they are on different +// lines. +#define SCOPED_TRACE(message) \ + ::testing::internal::ScopedTrace GTEST_CONCAT_TOKEN_(gtest_trace_, __LINE__)(\ + __FILE__, __LINE__, ::testing::Message() << (message)) + +// Compile-time assertion for type equality. +// StaticAssertTypeEq() compiles iff type1 and type2 are +// the same type. The value it returns is not interesting. +// +// Instead of making StaticAssertTypeEq a class template, we make it a +// function template that invokes a helper class template. This +// prevents a user from misusing StaticAssertTypeEq by +// defining objects of that type. +// +// CAVEAT: +// +// When used inside a method of a class template, +// StaticAssertTypeEq() is effective ONLY IF the method is +// instantiated. For example, given: +// +// template class Foo { +// public: +// void Bar() { testing::StaticAssertTypeEq(); } +// }; +// +// the code: +// +// void Test1() { Foo foo; } +// +// will NOT generate a compiler error, as Foo::Bar() is never +// actually instantiated. Instead, you need: +// +// void Test2() { Foo foo; foo.Bar(); } +// +// to cause a compiler error. +template +bool StaticAssertTypeEq() { + (void)internal::StaticAssertTypeEqHelper(); + return true; +} + +// Defines a test. +// +// The first parameter is the name of the test case, and the second +// parameter is the name of the test within the test case. +// +// The convention is to end the test case name with "Test". For +// example, a test case for the Foo class can be named FooTest. +// +// Test code should appear between braces after an invocation of +// this macro. Example: +// +// TEST(FooTest, InitializesCorrectly) { +// Foo foo; +// EXPECT_TRUE(foo.StatusIsOK()); +// } + +// Note that we call GetTestTypeId() instead of GetTypeId< +// ::testing::Test>() here to get the type ID of testing::Test. This +// is to work around a suspected linker bug when using Google Test as +// a framework on Mac OS X. The bug causes GetTypeId< +// ::testing::Test>() to return different values depending on whether +// the call is from the Google Test framework itself or from user test +// code. GetTestTypeId() is guaranteed to always return the same +// value, as it always calls GetTypeId<>() from the Google Test +// framework. +#define GTEST_TEST(test_case_name, test_name)\ + GTEST_TEST_(test_case_name, test_name, \ + ::testing::Test, ::testing::internal::GetTestTypeId()) + +// Define this macro to 1 to omit the definition of TEST(), which +// is a generic name and clashes with some other libraries. +#if !GTEST_DONT_DEFINE_TEST +# define TEST(test_case_name, test_name) GTEST_TEST(test_case_name, test_name) +#endif + +// Defines a test that uses a test fixture. +// +// The first parameter is the name of the test fixture class, which +// also doubles as the test case name. The second parameter is the +// name of the test within the test case. +// +// A test fixture class must be declared earlier. The user should put +// his test code between braces after using this macro. Example: +// +// class FooTest : public testing::Test { +// protected: +// virtual void SetUp() { b_.AddElement(3); } +// +// Foo a_; +// Foo b_; +// }; +// +// TEST_F(FooTest, InitializesCorrectly) { +// EXPECT_TRUE(a_.StatusIsOK()); +// } +// +// TEST_F(FooTest, ReturnsElementCountCorrectly) { +// EXPECT_EQ(0, a_.size()); +// EXPECT_EQ(1, b_.size()); +// } + +#define TEST_F(test_fixture, test_name)\ + GTEST_TEST_(test_fixture, test_name, test_fixture, \ + ::testing::internal::GetTypeId()) + +} // namespace testing + +// Use this function in main() to run all tests. It returns 0 if all +// tests are successful, or 1 otherwise. +// +// RUN_ALL_TESTS() should be invoked after the command line has been +// parsed by InitGoogleTest(). +// +// This function was formerly a macro; thus, it is in the global +// namespace and has an all-caps name. +int RUN_ALL_TESTS() GTEST_MUST_USE_RESULT_; + +inline int RUN_ALL_TESTS() { + return ::testing::UnitTest::GetInstance()->Run(); +} + +#endif // GTEST_INCLUDE_GTEST_GTEST_H_ diff --git a/external/rocksdb/thirdparty.inc b/external/rocksdb/thirdparty.inc new file mode 100755 index 0000000000..9fffd9bff0 --- /dev/null +++ b/external/rocksdb/thirdparty.inc @@ -0,0 +1,265 @@ +# Edit definitions below to specify paths to include files and libraries of all 3rd party libraries + +# +# Edit these lines to set defaults for use of external libraries +# +set(USE_GFLAGS_DEFAULT 0) # GFLAGS is disabled by default, enable with -DGFLAGS=1 cmake command line agrument +set(USE_SNAPPY_DEFAULT 0) # SNAPPY is disabled by default, enable with -DSNAPPY=1 cmake command line agrument +set(USE_LZ4_DEFAULT 0) # LZ4 is disabled by default, enable with -DLZ4=1 cmake command line agrument +set(USE_ZLIB_DEFAULT 0) # ZLIB is disabled by default, enable with -DZLIB=1 cmake command line agrument +set(USE_XPRESS_DEFAULT 0) # XPRESS is disabled by default, enable with -DXPRESS=1 cmake command line agrument +set(USE_JEMALLOC_DEFAULT 0) # JEMALLOC is disabled by default, enable with -DJEMALLOC=1 cmake command line agrument +set(USE_JENONINIT_DEFAULT 1) # Default is enabled do not call je_init/je_uninit as the newer versions do not have it disable with -DJENONINIT=0 + +# +# This example assumes all the libraries locate in directories under THIRDPARTY_HOME environment variable +# Set environment variable THIRDPARTY_HOME to point to your third party libraries home (Unix style dir separators) +# or change the paths below to reflect where the libraries actually reside +# +set (THIRDPARTY_LIBS "") # Initialization, don't touch + +# +# Edit these 4 lines to define paths to GFLAGS +# +set(GFLAGS_HOME $ENV{THIRDPARTY_HOME}/Gflags.Library) +set(GFLAGS_INCLUDE ${GFLAGS_HOME}/inc/include) +set(GFLAGS_LIB_DEBUG ${GFLAGS_HOME}/bin/debug/amd64/gflags.lib) +set(GFLAGS_LIB_RELEASE ${GFLAGS_HOME}/bin/retail/amd64/gflags.lib) + +# ================================================== GFLAGS ================================================== +# +# Don't touch these lines +# +if (DEFINED GFLAGS) + set(USE_GFLAGS ${GFLAGS}) +else () + set(USE_GFLAGS ${USE_GFLAGS_DEFAULT}) +endif () + +if (${USE_GFLAGS} EQUAL 1) + message(STATUS "GFLAGS library is enabled") + + if(DEFINED ENV{GFLAGS_INCLUDE}) + set(GFLAGS_INCLUDE $ENV{GFLAGS_INCLUDE}) + endif() + + if(DEFINED ENV{GFLAGS_LIB_DEBUG}) + set(GFLAGS_LIB_DEBUG $ENV{GFLAGS_LIB_DEBUG}) + endif() + + if(DEFINED ENV{GFLAGS_LIB_RELEASE}) + set(GFLAGS_LIB_RELEASE $ENV{GFLAGS_LIB_RELEASE}) + endif() + + set(GFLAGS_CXX_FLAGS -DGFLAGS=gflags) + set(GFLAGS_LIBS debug ${GFLAGS_LIB_DEBUG} optimized ${GFLAGS_LIB_RELEASE}) + + add_definitions(${GFLAGS_CXX_FLAGS}) + include_directories(${GFLAGS_INCLUDE}) + set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${GFLAGS_LIBS}) +else () + message(STATUS "GFLAGS library is disabled") +endif () + +# ================================================== SNAPPY ================================================== +# +# Edit these 4 lines to define paths to Snappy +# +set(SNAPPY_HOME $ENV{THIRDPARTY_HOME}/Snappy.Library) +set(SNAPPY_INCLUDE ${SNAPPY_HOME}/inc/inc) +set(SNAPPY_LIB_DEBUG ${SNAPPY_HOME}/bin/debug/amd64/snappy.lib) +set(SNAPPY_LIB_RELEASE ${SNAPPY_HOME}/bin/retail/amd64/snappy.lib) + +# +# Don't touch these lines +# +if (DEFINED SNAPPY) + set(USE_SNAPPY ${SNAPPY}) +else () + set(USE_SNAPPY ${USE_SNAPPY_DEFAULT}) +endif () + +if (${USE_SNAPPY} EQUAL 1) + message(STATUS "SNAPPY library is enabled") + + if(DEFINED ENV{SNAPPY_INCLUDE}) + set(SNAPPY_INCLUDE $ENV{SNAPPY_INCLUDE}) + endif() + + if(DEFINED ENV{SNAPPY_LIB_DEBUG}) + set(SNAPPY_LIB_DEBUG $ENV{SNAPPY_LIB_DEBUG}) + endif() + + if(DEFINED ENV{SNAPPY_LIB_RELEASE}) + set(SNAPPY_LIB_RELEASE $ENV{SNAPPY_LIB_RELEASE}) + endif() + + set(SNAPPY_CXX_FLAGS -DSNAPPY) + set(SNAPPY_LIBS debug ${SNAPPY_LIB_DEBUG} optimized ${SNAPPY_LIB_RELEASE}) + + add_definitions(${SNAPPY_CXX_FLAGS}) + include_directories(${SNAPPY_INCLUDE}) + set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${SNAPPY_LIBS}) +else () + message(STATUS "SNAPPY library is disabled") +endif () + +# ================================================== LZ4 ================================================== +# +# Edit these 4 lines to define paths to LZ4 +# +set(LZ4_HOME $ENV{THIRDPARTY_HOME}/LZ4.Library) +set(LZ4_INCLUDE ${LZ4_HOME}/inc/include) +set(LZ4_LIB_DEBUG ${LZ4_HOME}/bin/debug/amd64/lz4.lib) +set(LZ4_LIB_RELEASE ${LZ4_HOME}/bin/retail/amd64/lz4.lib) + +# +# Don't touch these lines +# +if (DEFINED LZ4) + set(USE_LZ4 ${LZ4}) +else () + set(USE_LZ4 ${USE_LZ4_DEFAULT}) +endif () + +if (${USE_LZ4} EQUAL 1) + message(STATUS "LZ4 library is enabled") + + if(DEFINED ENV{LZ4_INCLUDE}) + set(LZ4_INCLUDE $ENV{LZ4_INCLUDE}) + endif() + + if(DEFINED ENV{LZ4_LIB_DEBUG}) + set(LZ4_LIB_DEBUG $ENV{LZ4_LIB_DEBUG}) + endif() + + if(DEFINED ENV{LZ4_LIB_RELEASE}) + set(LZ4_LIB_RELEASE $ENV{LZ4_LIB_RELEASE}) + endif() + + set(LZ4_CXX_FLAGS -DLZ4) + set(LZ4_LIBS debug ${LZ4_LIB_DEBUG} optimized ${LZ4_LIB_RELEASE}) + + add_definitions(${LZ4_CXX_FLAGS}) + include_directories(${LZ4_INCLUDE}) + set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${LZ4_LIBS}) +else () + message(STATUS "LZ4 library is disabled") +endif () + +# ================================================== ZLIB ================================================== +# +# Edit these 4 lines to define paths to ZLIB +# +set(ZLIB_HOME $ENV{THIRDPARTY_HOME}/ZLIB.Library) +set(ZLIB_INCLUDE ${ZLIB_HOME}/inc/include) +set(ZLIB_LIB_DEBUG ${ZLIB_HOME}/bin/debug/amd64/zlib.lib) +set(ZLIB_LIB_RELEASE ${ZLIB_HOME}/bin/retail/amd64/zlib.lib) + +# +# Don't touch these lines +# +if (DEFINED ZLIB) + set(USE_ZLIB ${ZLIB}) +else () + set(USE_ZLIB ${USE_ZLIB_DEFAULT}) +endif () + +if (${USE_ZLIB} EQUAL 1) + message(STATUS "ZLIB library is enabled") + + if(DEFINED ENV{ZLIB_INCLUDE}) + set(ZLIB_INCLUDE $ENV{ZLIB_INCLUDE}) + endif() + + if(DEFINED ENV{ZLIB_LIB_DEBUG}) + set(ZLIB_LIB_DEBUG $ENV{ZLIB_LIB_DEBUG}) + endif() + + if(DEFINED ENV{ZLIB_LIB_RELEASE}) + set(ZLIB_LIB_RELEASE $ENV{ZLIB_LIB_RELEASE}) + endif() + + set(ZLIB_CXX_FLAGS -DZLIB) + set(ZLIB_LIBS debug ${ZLIB_LIB_DEBUG} optimized ${ZLIB_LIB_RELEASE}) + + add_definitions(${ZLIB_CXX_FLAGS}) + include_directories(${ZLIB_INCLUDE}) + set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${ZLIB_LIBS}) +else () + message(STATUS "ZLIB library is disabled") +endif () + +if (DEFINED XPRESS) + set(USE_XPRESS ${XPRESS}) +else () + set(USE_XPRESS ${USE_XPRESS_DEFAULT}) +endif () + +if (${USE_XPRESS} EQUAL 1) + message(STATUS "XPRESS is enabled") + + add_definitions(-DXPRESS) + + # We are using the implementation provided by the system + set (SYSTEM_LIBS ${SYSTEM_LIBS} Cabinet.lib) +else () + message(STATUS "XPRESS is disabled") +endif () + +# +# Edit these 4 lines to define paths to Jemalloc +# +set(JEMALLOC_HOME $ENV{THIRDPARTY_HOME}/Jemalloc.Library) +set(JEMALLOC_INCLUDE ${JEMALLOC_HOME}/inc/include) +set(JEMALLOC_LIB_DEBUG ${JEMALLOC_HOME}/bin/debug/amd64/jemalloc.lib) +set(JEMALLOC_LIB_RELEASE ${JEMALLOC_HOME}/bin/retail/amd64/jemalloc.lib) + +# ================================================== JEMALLOC ================================================== +# +# Don't touch these lines +# +if (DEFINED JEMALLOC) + set(USE_JEMALLOC ${JEMALLOC}) +else () + set(USE_JEMALLOC ${USE_JEMALLOC_DEFAULT}) +endif () + +if (${USE_JEMALLOC} EQUAL 1) + message(STATUS "JEMALLOC library is enabled") + set(JEMALLOC_CXX_FLAGS "-DJEMALLOC -DJEMALLOC_EXPORT= ") + + if(DEFINED ENV{JEMALLOC_INCLUDE}) + set(JEMALLOC_INCLUDE $ENV{JEMALLOC_INCLUDE}) + endif() + + if(DEFINED ENV{JEMALLOC_LIB_DEBUG}) + set(JEMALLOC_LIB_DEBUG $ENV{JEMALLOC_LIB_DEBUG}) + endif() + + if(DEFINED ENV{JEMALLOC_LIB_RELEASE}) + set(JEMALLOC_LIB_RELEASE $ENV{JEMALLOC_LIB_RELEASE}) + endif() + + set(JEMALLOC_LIBS debug ${JEMALLOC_LIB_DEBUG} optimized ${JEMALLOC_LIB_RELEASE}) + + add_definitions(${JEMALLOC_CXX_FLAGS}) + include_directories(${JEMALLOC_INCLUDE}) + set (THIRDPARTY_LIBS ${THIRDPARTY_LIBS} ${JEMALLOC_LIBS}) + set (ARTIFACT_SUFFIX "_je") + + set(USE_JENONINIT USE_JENONINIT_DEFAULT) + + if(JENONINIT) + set(USE_JENONINIT ${JENONINIT}) + endif() + + if(${USE_JENONINIT} EQUAL 1) + add_definitions(-DJEMALLOC_NON_INIT) + message(STATUS "JEMALLOC NONINIT version") + endif() + +else () + set (ARTIFACT_SUFFIX "") + message(STATUS "JEMALLOC library is disabled") +endif () diff --git a/external/rocksdb/tools/Dockerfile b/external/rocksdb/tools/Dockerfile new file mode 100755 index 0000000000..1d5ead7fdb --- /dev/null +++ b/external/rocksdb/tools/Dockerfile @@ -0,0 +1,5 @@ +FROM buildpack-deps:wheezy + +ADD ./ldb /rocksdb/tools/ldb + +CMD /rocksdb/tools/ldb diff --git a/external/rocksdb/tools/auto_sanity_test.sh b/external/rocksdb/tools/auto_sanity_test.sh new file mode 100755 index 0000000000..07d444efb1 --- /dev/null +++ b/external/rocksdb/tools/auto_sanity_test.sh @@ -0,0 +1,91 @@ +TMP_DIR="/tmp/rocksdb-sanity-test" + +if [ "$#" -lt 2 ]; then + echo "usage: ./auto_sanity_test.sh [new_commit] [old_commit]" + echo "Missing either [new_commit] or [old_commit], perform sanity check with the latest and 10th latest commits." + recent_commits=`git log | grep -e "^commit [a-z0-9]\+$"| head -n10 | sed -e 's/commit //g'` + commit_new=`echo "$recent_commits" | head -n1` + commit_old=`echo "$recent_commits" | tail -n1` + echo "the most recent commits are:" + echo "$recent_commits" +else + commit_new=$1 + commit_old=$2 +fi + +if [ ! -d $TMP_DIR ]; then + mkdir $TMP_DIR +fi +dir_new="${TMP_DIR}/${commit_new}" +dir_old="${TMP_DIR}/${commit_old}" + +function makestuff() { + echo "make clean" + make clean > /dev/null + echo "make db_sanity_test -j32" + make db_sanity_test -j32 > /dev/null + if [ $? -ne 0 ]; then + echo "[ERROR] Failed to perform 'make db_sanity_test'" + exit 1 + fi +} + +rm -r -f $dir_new +rm -r -f $dir_old + +echo "Running db sanity check with commits $commit_new and $commit_old." + +echo "=============================================================" +echo "Making build $commit_new" +git checkout $commit_new +if [ $? -ne 0 ]; then + echo "[ERROR] Can't checkout $commit_new" + exit 1 +fi +makestuff +mv db_sanity_test new_db_sanity_test +echo "Creating db based on the new commit --- $commit_new" +./new_db_sanity_test $dir_new create +cp ./tools/db_sanity_test.cc $dir_new +cp ./tools/auto_sanity_test.sh $dir_new + +echo "=============================================================" +echo "Making build $commit_old" +git checkout $commit_old +if [ $? -ne 0 ]; then + echo "[ERROR] Can't checkout $commit_old" + exit 1 +fi +cp -f $dir_new/db_sanity_test.cc ./tools/. +cp -f $dir_new/auto_sanity_test.sh ./tools/. +makestuff +mv db_sanity_test old_db_sanity_test +echo "Creating db based on the old commit --- $commit_old" +./old_db_sanity_test $dir_old create + +echo "=============================================================" +echo "[Backward Compatibility Check]" +echo "Verifying old db $dir_old using the new commit --- $commit_new" +./new_db_sanity_test $dir_old verify +if [ $? -ne 0 ]; then + echo "[ERROR] Backward Compatibility Check fails:" + echo " Verification of $dir_old using commit $commit_new failed." + exit 2 +fi + +echo "=============================================================" +echo "[Forward Compatibility Check]" +echo "Verifying new db $dir_new using the old commit --- $commit_old" +./old_db_sanity_test $dir_new verify +if [ $? -ne 0 ]; then + echo "[ERROR] Forward Compatibility Check fails:" + echo " $dir_new using commit $commit_old failed." + exit 2 +fi + +rm old_db_sanity_test +rm new_db_sanity_test +rm -rf $dir_new +rm -rf $dir_old + +echo "Auto sanity test passed!" diff --git a/external/rocksdb/tools/benchmark.sh b/external/rocksdb/tools/benchmark.sh new file mode 100755 index 0000000000..5298bbe07c --- /dev/null +++ b/external/rocksdb/tools/benchmark.sh @@ -0,0 +1,513 @@ +#!/bin/bash +# REQUIRE: db_bench binary exists in the current directory + +if [ $# -ne 1 ]; then + echo -n "./benchmark.sh [bulkload/fillseq/overwrite/filluniquerandom/" + echo "readrandom/readwhilewriting/readwhilemerging/updaterandom/" + echo "mergerandom/randomtransaction/compact]" + exit 0 +fi + +# Make it easier to run only the compaction test. Getting valid data requires +# a number of iterations and having an ability to run the test separately from +# rest of the benchmarks helps. +if [ "$COMPACTION_TEST" == "1" -a "$1" != "universal_compaction" ]; then + echo "Skipping $1 because it's not a compaction test." + exit 0 +fi + +# size constants +K=1024 +M=$((1024 * K)) +G=$((1024 * M)) + +if [ -z $DB_DIR ]; then + echo "DB_DIR is not defined" + exit 0 +fi + +if [ -z $WAL_DIR ]; then + echo "WAL_DIR is not defined" + exit 0 +fi + +output_dir=${OUTPUT_DIR:-/tmp/} +if [ ! -d $output_dir ]; then + mkdir -p $output_dir +fi + +# all multithreaded tests run with sync=1 unless +# $DB_BENCH_NO_SYNC is defined +syncval="1" +if [ ! -z $DB_BENCH_NO_SYNC ]; then + echo "Turning sync off for all multithreaded tests" + syncval="0"; +fi + +num_threads=${NUM_THREADS:-16} +mb_written_per_sec=${MB_WRITE_PER_SEC:-0} +# Only for tests that do range scans +num_nexts_per_seek=${NUM_NEXTS_PER_SEEK:-10} +cache_size=${CACHE_SIZE:-$((1 * G))} +compression_max_dict_bytes=${COMPRESSION_MAX_DICT_BYTES:-0} +compression_type=${COMPRESSION_TYPE:-snappy} +duration=${DURATION:-0} + +num_keys=${NUM_KEYS:-$((1 * G))} +key_size=${KEY_SIZE:-20} +value_size=${VALUE_SIZE:-400} +block_size=${BLOCK_SIZE:-8192} + +const_params=" + --db=$DB_DIR \ + --wal_dir=$WAL_DIR \ + --disable_data_sync=0 \ + \ + --num=$num_keys \ + --num_levels=6 \ + --key_size=$key_size \ + --value_size=$value_size \ + --block_size=$block_size \ + --cache_size=$cache_size \ + --cache_numshardbits=6 \ + --compression_max_dict_bytes=$compression_max_dict_bytes \ + --compression_ratio=0.5 \ + --compression_type=$compression_type \ + --level_compaction_dynamic_level_bytes=true \ + --bytes_per_sync=$((8 * M)) \ + --cache_index_and_filter_blocks=0 \ + --pin_l0_filter_and_index_blocks_in_cache=1 \ + --benchmark_write_rate_limit=$(( 1024 * 1024 * $mb_written_per_sec )) \ + \ + --hard_rate_limit=3 \ + --rate_limit_delay_max_milliseconds=1000000 \ + --write_buffer_size=$((128 * M)) \ + --target_file_size_base=$((128 * M)) \ + --max_bytes_for_level_base=$((1 * G)) \ + \ + --verify_checksum=1 \ + --delete_obsolete_files_period_micros=$((60 * M)) \ + --max_grandparent_overlap_factor=8 \ + --max_bytes_for_level_multiplier=8 \ + \ + --statistics=0 \ + --stats_per_interval=1 \ + --stats_interval_seconds=60 \ + --histogram=1 \ + \ + --memtablerep=skip_list \ + --bloom_bits=10 \ + --open_files=-1" + +l0_config=" + --level0_file_num_compaction_trigger=4 \ + --level0_slowdown_writes_trigger=12 \ + --level0_stop_writes_trigger=20" + +if [ $duration -gt 0 ]; then + const_params="$const_params --duration=$duration" +fi + +params_w="$const_params \ + $l0_config \ + --max_background_compactions=16 \ + --max_write_buffer_number=8 \ + --max_background_flushes=7" + +params_bulkload="$const_params \ + --max_background_compactions=16 \ + --max_write_buffer_number=8 \ + --max_background_flushes=7 \ + --level0_file_num_compaction_trigger=$((10 * M)) \ + --level0_slowdown_writes_trigger=$((10 * M)) \ + --level0_stop_writes_trigger=$((10 * M))" + +# +# Tune values for level and universal compaction. +# For universal compaction, these level0_* options mean total sorted of runs in +# LSM. In level-based compaction, it means number of L0 files. +# +params_level_compact="$const_params \ + --max_background_flushes=4 \ + --max_write_buffer_number=4 \ + --level0_file_num_compaction_trigger=4 \ + --level0_slowdown_writes_trigger=16 \ + --level0_stop_writes_trigger=20" + +params_univ_compact="$const_params \ + --max_background_flushes=4 \ + --max_write_buffer_number=4 \ + --level0_file_num_compaction_trigger=8 \ + --level0_slowdown_writes_trigger=16 \ + --level0_stop_writes_trigger=20" + +function summarize_result { + test_out=$1 + test_name=$2 + bench_name=$3 + + # Note that this function assumes that the benchmark executes long enough so + # that "Compaction Stats" is written to stdout at least once. If it won't + # happen then empty output from grep when searching for "Sum" will cause + # syntax errors. + uptime=$( grep ^Uptime\(secs $test_out | tail -1 | awk '{ printf "%.0f", $2 }' ) + stall_time=$( grep "^Cumulative stall" $test_out | tail -1 | awk '{ print $3 }' ) + stall_pct=$( grep "^Cumulative stall" $test_out| tail -1 | awk '{ print $5 }' ) + ops_sec=$( grep ^${bench_name} $test_out | awk '{ print $5 }' ) + mb_sec=$( grep ^${bench_name} $test_out | awk '{ print $7 }' ) + lo_wgb=$( grep "^ L0" $test_out | tail -1 | awk '{ print $8 }' ) + sum_wgb=$( grep "^ Sum" $test_out | tail -1 | awk '{ print $8 }' ) + sum_size=$( grep "^ Sum" $test_out | tail -1 | awk '{ printf "%.1f", $3 / 1024.0 }' ) + wamp=$( echo "scale=1; $sum_wgb / $lo_wgb" | bc ) + wmb_ps=$( echo "scale=1; ( $sum_wgb * 1024.0 ) / $uptime" | bc ) + usecs_op=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $3 }' ) + p50=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $3 }' ) + p75=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.1f", $5 }' ) + p99=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $7 }' ) + p999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $9 }' ) + p9999=$( grep "^Percentiles:" $test_out | tail -1 | awk '{ printf "%.0f", $11 }' ) + echo -e "$ops_sec\t$mb_sec\t$sum_size\t$lo_wgb\t$sum_wgb\t$wamp\t$wmb_ps\t$usecs_op\t$p50\t$p75\t$p99\t$p999\t$p9999\t$uptime\t$stall_time\t$stall_pct\t$test_name" \ + >> $output_dir/report.txt +} + +function run_bulkload { + # This runs with a vector memtable and the WAL disabled to load faster. It is still crash safe and the + # client can discover where to restart a load after a crash. I think this is a good way to load. + echo "Bulk loading $num_keys random keys" + cmd="./db_bench --benchmarks=fillrandom \ + --use_existing_db=0 \ + --disable_auto_compactions=1 \ + --sync=0 \ + $params_bulkload \ + --threads=1 \ + --memtablerep=vector \ + --disable_wal=1 \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/benchmark_bulkload_fillrandom.log" + echo $cmd | tee $output_dir/benchmark_bulkload_fillrandom.log + eval $cmd + summarize_result $output_dir/benchmark_bulkload_fillrandom.log bulkload fillrandom + echo "Compacting..." + cmd="./db_bench --benchmarks=compact \ + --use_existing_db=1 \ + --disable_auto_compactions=1 \ + --sync=0 \ + $params_w \ + --threads=1 \ + 2>&1 | tee -a $output_dir/benchmark_bulkload_compact.log" + echo $cmd | tee $output_dir/benchmark_bulkload_compact.log + eval $cmd +} + +# +# Parameter description: +# +# $1 - 1 if I/O statistics should be collected. +# $2 - compaction type to use (level=0, universal=1). +# $3 - number of subcompactions. +# $4 - number of maximum background compactions. +# +function run_manual_compaction_worker { + # This runs with a vector memtable and the WAL disabled to load faster. + # It is still crash safe and the client can discover where to restart a + # load after a crash. I think this is a good way to load. + echo "Bulk loading $num_keys random keys for manual compaction." + + fillrandom_output_file=$output_dir/benchmark_man_compact_fillrandom_$3.log + man_compact_output_log=$output_dir/benchmark_man_compact_$3.log + + if [ "$2" == "1" ]; then + extra_params=$params_univ_compact + else + extra_params=$params_level_compact + fi + + # Make sure that fillrandom uses the same compaction options as compact. + cmd="./db_bench --benchmarks=fillrandom \ + --use_existing_db=0 \ + --disable_auto_compactions=0 \ + --sync=0 \ + $extra_params \ + --threads=$num_threads \ + --compaction_measure_io_stats=$1 \ + --compaction_style=$2 \ + --subcompactions=$3 \ + --memtablerep=vector \ + --disable_wal=1 \ + --max_background_compactions=$4 \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $fillrandom_output_file" + + echo $cmd | tee $fillrandom_output_file + eval $cmd + + summarize_result $fillrandom_output_file man_compact_fillrandom_$3 fillrandom + + echo "Compacting with $3 subcompactions specified ..." + + # This is the part we're really interested in. Given that compact benchmark + # doesn't output regular statistics then we'll just use the time command to + # measure how long this step takes. + cmd="{ \ + time ./db_bench --benchmarks=compact \ + --use_existing_db=1 \ + --disable_auto_compactions=0 \ + --sync=0 \ + $extra_params \ + --threads=$num_threads \ + --compaction_measure_io_stats=$1 \ + --compaction_style=$2 \ + --subcompactions=$3 \ + --max_background_compactions=$4 \ + ;} + 2>&1 | tee -a $man_compact_output_log" + + echo $cmd | tee $man_compact_output_log + eval $cmd + + # Can't use summarize_result here. One way to analyze the results is to run + # "grep real" on the resulting log files. +} + +function run_univ_compaction { + # Always ask for I/O statistics to be measured. + io_stats=1 + + # Values: kCompactionStyleLevel = 0x0, kCompactionStyleUniversal = 0x1. + compaction_style=1 + + # Define a set of benchmarks. + subcompactions=(1 2 4 8 16) + max_background_compactions=(16 16 8 4 2) + + i=0 + total=${#subcompactions[@]} + + # Execute a set of benchmarks to cover variety of scenarios. + while [ "$i" -lt "$total" ] + do + run_manual_compaction_worker $io_stats $compaction_style ${subcompactions[$i]} \ + ${max_background_compactions[$i]} + ((i++)) + done +} + +function run_fillseq { + # This runs with a vector memtable. WAL can be either disabled or enabled + # depending on the input parameter (1 for disabled, 0 for enabled). The main + # benefit behind disabling WAL is to make loading faster. It is still crash + # safe and the client can discover where to restart a load after a crash. I + # think this is a good way to load. + + # Make sure that we'll have unique names for all the files so that data won't + # be overwritten. + if [ $1 == 1 ]; then + log_file_name=$output_dir/benchmark_fillseq.wal_disabled.v${value_size}.log + test_name=fillseq.wal_disabled.v${value_size} + else + log_file_name=$output_dir/benchmark_fillseq.wal_enabled.v${value_size}.log + test_name=fillseq.wal_enabled.v${value_size} + fi + + echo "Loading $num_keys keys sequentially" + cmd="./db_bench --benchmarks=fillseq \ + --use_existing_db=0 \ + --sync=0 \ + $params_w \ + --min_level_to_compress=0 \ + --threads=1 \ + --memtablerep=vector \ + --disable_wal=$1 \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $log_file_name" + echo $cmd | tee $log_file_name + eval $cmd + + # The constant "fillseq" which we pass to db_bench is the benchmark name. + summarize_result $log_file_name $test_name fillseq +} + +function run_change { + operation=$1 + echo "Do $num_keys random $operation" + out_name="benchmark_${operation}.t${num_threads}.s${syncval}.log" + cmd="./db_bench --benchmarks=$operation \ + --use_existing_db=1 \ + --sync=$syncval \ + $params_w \ + --threads=$num_threads \ + --merge_operator=\"put\" \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/${out_name}" + echo $cmd | tee $output_dir/${out_name} + eval $cmd + summarize_result $output_dir/${out_name} ${operation}.t${num_threads}.s${syncval} $operation +} + +function run_filluniquerandom { + echo "Loading $num_keys unique keys randomly" + cmd="./db_bench --benchmarks=filluniquerandom \ + --use_existing_db=0 \ + --sync=0 \ + $params_w \ + --threads=1 \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/benchmark_filluniquerandom.log" + echo $cmd | tee $output_dir/benchmark_filluniquerandom.log + eval $cmd + summarize_result $output_dir/benchmark_filluniquerandom.log filluniquerandom filluniquerandom +} + +function run_readrandom { + echo "Reading $num_keys random keys" + out_name="benchmark_readrandom.t${num_threads}.log" + cmd="./db_bench --benchmarks=readrandom \ + --use_existing_db=1 \ + $params_w \ + --threads=$num_threads \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/${out_name}" + echo $cmd | tee $output_dir/${out_name} + eval $cmd + summarize_result $output_dir/${out_name} readrandom.t${num_threads} readrandom +} + +function run_readwhile { + operation=$1 + echo "Reading $num_keys random keys while $operation" + out_name="benchmark_readwhile${operation}.t${num_threads}.log" + cmd="./db_bench --benchmarks=readwhile${operation} \ + --use_existing_db=1 \ + --sync=$syncval \ + $params_w \ + --threads=$num_threads \ + --merge_operator=\"put\" \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/${out_name}" + echo $cmd | tee $output_dir/${out_name} + eval $cmd + summarize_result $output_dir/${out_name} readwhile${operation}.t${num_threads} readwhile${operation} +} + +function run_rangewhile { + operation=$1 + full_name=$2 + reverse_arg=$3 + out_name="benchmark_${full_name}.t${num_threads}.log" + echo "Range scan $num_keys random keys while ${operation} for reverse_iter=${reverse_arg}" + cmd="./db_bench --benchmarks=seekrandomwhile${operation} \ + --use_existing_db=1 \ + --sync=$syncval \ + $params_w \ + --threads=$num_threads \ + --merge_operator=\"put\" \ + --seek_nexts=$num_nexts_per_seek \ + --reverse_iterator=$reverse_arg \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/${out_name}" + echo $cmd | tee $output_dir/${out_name} + eval $cmd + summarize_result $output_dir/${out_name} ${full_name}.t${num_threads} seekrandomwhile${operation} +} + +function run_range { + full_name=$1 + reverse_arg=$2 + out_name="benchmark_${full_name}.t${num_threads}.log" + echo "Range scan $num_keys random keys for reverse_iter=${reverse_arg}" + cmd="./db_bench --benchmarks=seekrandom \ + --use_existing_db=1 \ + $params_w \ + --threads=$num_threads \ + --seek_nexts=$num_nexts_per_seek \ + --reverse_iterator=$reverse_arg \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/${out_name}" + echo $cmd | tee $output_dir/${out_name} + eval $cmd + summarize_result $output_dir/${out_name} ${full_name}.t${num_threads} seekrandom +} + +function run_randomtransaction { + echo "..." + cmd="./db_bench $params_r --benchmarks=randomtransaction \ + --num=$num_keys \ + --transaction_db \ + --threads=5 \ + --transaction_sets=5 \ + 2>&1 | tee $output_dir/benchmark_randomtransaction.log" + echo $cmd | tee $output_dir/benchmark_rangescanwhilewriting.log + eval $cmd +} + +function now() { + echo `date +"%s"` +} + +report="$output_dir/report.txt" +schedule="$output_dir/schedule.txt" + +echo "===== Benchmark =====" + +# Run!!! +IFS=',' read -a jobs <<< $1 +for job in ${jobs[@]}; do + + if [ $job != debug ]; then + echo "Start $job at `date`" | tee -a $schedule + fi + + start=$(now) + if [ $job = bulkload ]; then + run_bulkload + elif [ $job = fillseq_disable_wal ]; then + run_fillseq 1 + elif [ $job = fillseq_enable_wal ]; then + run_fillseq 0 + elif [ $job = overwrite ]; then + run_change overwrite + elif [ $job = updaterandom ]; then + run_change updaterandom + elif [ $job = mergerandom ]; then + run_change mergerandom + elif [ $job = filluniquerandom ]; then + run_filluniquerandom + elif [ $job = readrandom ]; then + run_readrandom + elif [ $job = fwdrange ]; then + run_range $job false + elif [ $job = revrange ]; then + run_range $job true + elif [ $job = readwhilewriting ]; then + run_readwhile writing + elif [ $job = readwhilemerging ]; then + run_readwhile merging + elif [ $job = fwdrangewhilewriting ]; then + run_rangewhile writing $job false + elif [ $job = revrangewhilewriting ]; then + run_rangewhile writing $job true + elif [ $job = fwdrangewhilemerging ]; then + run_rangewhile merging $job false + elif [ $job = revrangewhilemerging ]; then + run_rangewhile merging $job true + elif [ $job = randomtransaction ]; then + run_randomtransaction + elif [ $job = universal_compaction ]; then + run_univ_compaction + elif [ $job = debug ]; then + num_keys=1000; # debug + echo "Setting num_keys to $num_keys" + else + echo "unknown job $job" + exit + fi + end=$(now) + + if [ $job != debug ]; then + echo "Complete $job in $((end-start)) seconds" | tee -a $schedule + fi + + echo -e "ops/sec\tmb/sec\tSize-GB\tL0_GB\tSum_GB\tW-Amp\tW-MB/s\tusec/op\tp50\tp75\tp99\tp99.9\tp99.99\tUptime\tStall-time\tStall%\tTest" + tail -1 $output_dir/report.txt + +done diff --git a/external/rocksdb/tools/benchmark_leveldb.sh b/external/rocksdb/tools/benchmark_leveldb.sh new file mode 100755 index 0000000000..dce66d47ad --- /dev/null +++ b/external/rocksdb/tools/benchmark_leveldb.sh @@ -0,0 +1,185 @@ +#!/bin/bash +# REQUIRE: db_bench binary exists in the current directory +# +# This should be used with the LevelDB fork listed here to use additional test options. +# For more details on the changes see the blog post listed below. +# https://github.com/mdcallag/leveldb-1 +# http://smalldatum.blogspot.com/2015/04/comparing-leveldb-and-rocksdb-take-2.html + +if [ $# -ne 1 ]; then + echo -n "./benchmark.sh [fillseq/overwrite/readrandom/readwhilewriting]" + exit 0 +fi + +# size constants +K=1024 +M=$((1024 * K)) +G=$((1024 * M)) + +if [ -z $DB_DIR ]; then + echo "DB_DIR is not defined" + exit 0 +fi + +output_dir=${OUTPUT_DIR:-/tmp/} +if [ ! -d $output_dir ]; then + mkdir -p $output_dir +fi + +# all multithreaded tests run with sync=1 unless +# $DB_BENCH_NO_SYNC is defined +syncval="1" +if [ ! -z $DB_BENCH_NO_SYNC ]; then + echo "Turning sync off for all multithreaded tests" + syncval="0"; +fi + +num_threads=${NUM_THREADS:-16} +# Only for *whilewriting, *whilemerging +writes_per_second=${WRITES_PER_SECOND:-$((10 * K))} +cache_size=${CACHE_SIZE:-$((1 * G))} + +num_keys=${NUM_KEYS:-$((1 * G))} +key_size=20 +value_size=${VALUE_SIZE:-400} +block_size=${BLOCK_SIZE:-4096} + +const_params=" + --db=$DB_DIR \ + \ + --num=$num_keys \ + --value_size=$value_size \ + --cache_size=$cache_size \ + --compression_ratio=0.5 \ + \ + --write_buffer_size=$((2 * M)) \ + \ + --histogram=1 \ + \ + --bloom_bits=10 \ + --open_files=$((20 * K))" + +params_w="$const_params " + +function summarize_result { + test_out=$1 + test_name=$2 + bench_name=$3 + nthr=$4 + + usecs_op=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $3 }' ) + mb_sec=$( grep ^${bench_name} $test_out | awk '{ printf "%.1f", $5 }' ) + ops=$( grep "^Count:" $test_out | awk '{ print $2 }' ) + ops_sec=$( echo "scale=0; (1000000.0 * $nthr) / $usecs_op" | bc ) + avg=$( grep "^Count:" $test_out | awk '{ printf "%.1f", $4 }' ) + p50=$( grep "^Min:" $test_out | awk '{ printf "%.1f", $4 }' ) + echo -e "$ops_sec\t$mb_sec\t$usecs_op\t$avg\t$p50\t$test_name" \ + >> $output_dir/report.txt +} + +function run_fillseq { + # This runs with a vector memtable and the WAL disabled to load faster. It is still crash safe and the + # client can discover where to restart a load after a crash. I think this is a good way to load. + echo "Loading $num_keys keys sequentially" + cmd="./db_bench --benchmarks=fillseq \ + --use_existing_db=0 \ + --sync=0 \ + $params_w \ + --threads=1 \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/benchmark_fillseq.v${value_size}.log" + echo $cmd | tee $output_dir/benchmark_fillseq.v${value_size}.log + eval $cmd + summarize_result $output_dir/benchmark_fillseq.v${value_size}.log fillseq.v${value_size} fillseq 1 +} + +function run_change { + operation=$1 + echo "Do $num_keys random $operation" + out_name="benchmark_${operation}.t${num_threads}.s${syncval}.log" + cmd="./db_bench --benchmarks=$operation \ + --use_existing_db=1 \ + --sync=$syncval \ + $params_w \ + --threads=$num_threads \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/${out_name}" + echo $cmd | tee $output_dir/${out_name} + eval $cmd + summarize_result $output_dir/${out_name} ${operation}.t${num_threads}.s${syncval} $operation $num_threads +} + +function run_readrandom { + echo "Reading $num_keys random keys" + out_name="benchmark_readrandom.t${num_threads}.log" + cmd="./db_bench --benchmarks=readrandom \ + --use_existing_db=1 \ + $params_w \ + --threads=$num_threads \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/${out_name}" + echo $cmd | tee $output_dir/${out_name} + eval $cmd + summarize_result $output_dir/${out_name} readrandom.t${num_threads} readrandom $num_threads +} + +function run_readwhile { + operation=$1 + echo "Reading $num_keys random keys while $operation" + out_name="benchmark_readwhile${operation}.t${num_threads}.log" + cmd="./db_bench --benchmarks=readwhile${operation} \ + --use_existing_db=1 \ + --sync=$syncval \ + $params_w \ + --threads=$num_threads \ + --writes_per_second=$writes_per_second \ + --seed=$( date +%s ) \ + 2>&1 | tee -a $output_dir/${out_name}" + echo $cmd | tee $output_dir/${out_name} + eval $cmd + summarize_result $output_dir/${out_name} readwhile${operation}.t${num_threads} readwhile${operation} $num_threads +} + +function now() { + echo `date +"%s"` +} + +report="$output_dir/report.txt" +schedule="$output_dir/schedule.txt" + +echo "===== Benchmark =====" + +# Run!!! +IFS=',' read -a jobs <<< $1 +for job in ${jobs[@]}; do + + if [ $job != debug ]; then + echo "Start $job at `date`" | tee -a $schedule + fi + + start=$(now) + if [ $job = fillseq ]; then + run_fillseq + elif [ $job = overwrite ]; then + run_change overwrite + elif [ $job = readrandom ]; then + run_readrandom + elif [ $job = readwhilewriting ]; then + run_readwhile writing + elif [ $job = debug ]; then + num_keys=1000; # debug + echo "Setting num_keys to $num_keys" + else + echo "unknown job $job" + exit + fi + end=$(now) + + if [ $job != debug ]; then + echo "Complete $job in $((end-start)) seconds" | tee -a $schedule + fi + + echo -e "ops/sec\tmb/sec\tusec/op\tavg\tp50\tTest" + tail -1 $output_dir/report.txt + +done diff --git a/external/rocksdb/tools/check_format_compatible.sh b/external/rocksdb/tools/check_format_compatible.sh new file mode 100755 index 0000000000..e3353bfaeb --- /dev/null +++ b/external/rocksdb/tools/check_format_compatible.sh @@ -0,0 +1,110 @@ +#!/bin/bash +# +# A shell script to load some pre generated data file to a DB using ldb tool +# ./ldb needs to be avaible to be executed. +# +# Usage: